From bd648f4359f17c5552df2ea715097ccbe8016da4 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 13 Oct 2023 19:29:03 +0800 Subject: [PATCH 1/8] style: Apply Clang format to modified files and restore patch files. --- WORKSPACE | 4 +- build.sh | 10 +- conf/chunkserver.conf | 144 +- conf/chunkserver.conf.example | 145 +- conf/client.conf | 122 +- conf/cs_client.conf | 116 +- conf/mds.conf | 170 +- conf/py_client.conf | 112 +- conf/snap_client.conf | 116 +- conf/snapshot_clone_server.conf | 80 +- conf/tools.conf | 10 +- curve-ansible/client.ini | 2 +- .../wait_copysets_status_healthy.yml | 2 +- curve-ansible/group_vars/mds.yml | 2 +- .../roles/generate_config/defaults/main.yml | 20 +- .../templates/chunkserver.conf.j2 | 138 +- .../generate_config/templates/client.conf.j2 | 116 +- .../generate_config/templates/mds.conf.j2 | 166 +- .../templates/nebd-client.conf.j2 | 22 +- .../templates/nebd-server.conf.j2 | 10 +- .../templates/snapshot_clone_server.conf.j2 | 80 +- .../generate_config/templates/tools.conf.j2 | 10 +- .../install_package/files/disk_uuid_repair.py | 103 +- .../templates/chunkserver_ctl.sh.j2 | 24 +- .../templates/chunkserver_deploy.sh.j2 | 32 +- .../templates/etcd-daemon.sh.j2 | 44 +- .../templates/mds-daemon.sh.j2 | 52 +- .../install_package/templates/nebd-daemon.j2 | 8 +- .../templates/snapshot-daemon.sh.j2 | 52 +- .../roles/install_package/vars/main.yml | 2 +- .../roles/restart_service/defaults/main.yml | 2 +- .../tasks/include/restart_mds.yml | 2 +- .../tasks/include/restart_snapshotclone.yml | 2 +- .../roles/restart_service/tasks/main.yml | 2 +- .../roles/restart_service/vars/main.yml | 2 +- .../vars/main.yml | 2 +- .../tasks/include/start_chunkserver.yml | 2 +- .../roles/start_service/tasks/main.yml | 2 +- .../roles/stop_service/tasks/main.yml | 2 +- curve-ansible/rolling_update_curve.yml | 14 +- curve-ansible/server.ini | 14 +- curvefs/conf/curvebs_client.conf | 120 +- curvefs/monitor/grafana-report.py | 48 +- .../grafana/provisioning/dashboards/mds.json | 8 +- .../metaserverclient/metaserver_client.cpp | 81 +- .../src/metaserver/copyset/conf_epoch_file.h | 36 +- curvefs/src/metaserver/inflight_throttle.h | 14 +- .../test/mds/schedule/coordinator_test.cpp | 112 +- .../test/mds/schedule/operatorStep_test.cpp | 72 +- .../mds/schedule/recoverScheduler_test.cpp | 36 +- .../mds/schedule/scheduleMetrics_test.cpp | 40 +- .../scheduleService/scheduleService_test.cpp | 15 +- curvefs/test/volume/bitmap_allocator_test.cpp | 7 +- curvefs_python/cbd_client.h | 12 +- curvefs_python/curve_type.h | 109 +- curvefs_python/curvefs_tool.py | 85 +- curvefs_python/libcurvefs.h | 21 +- curvefs_python/test.py | 7 +- curvesnapshot_python/libcurveSnapshot.cpp | 190 +- curvesnapshot_python/libcurveSnapshot.h | 246 +- .../local/chunkserver/conf/chunkserver.conf.0 | 4 +- .../local/chunkserver/conf/chunkserver.conf.1 | 4 +- .../local/chunkserver/conf/chunkserver.conf.2 | 4 +- include/chunkserver/chunkserver_common.h | 74 +- include/client/libcurve.h | 408 +- include/etcdclient/etcdclient.h | 133 +- .../nebd-package/etc/nebd/nebd-client.conf | 22 +- .../nebd-package/etc/nebd/nebd-server.conf | 10 +- mk-deb.sh | 18 +- mk-tar.sh | 18 +- monitor/grafana-report.py | 48 +- monitor/grafana/dashboards/chunkserver.json | 104 +- monitor/grafana/dashboards/client.json | 34 +- monitor/grafana/dashboards/etcd.json | 2 +- monitor/grafana/dashboards/mds.json | 80 +- monitor/grafana/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- .../provisioning/dashboards/chunkserver.json | 104 +- .../provisioning/dashboards/client.json | 34 +- .../grafana/provisioning/dashboards/etcd.json | 2 +- .../grafana/provisioning/dashboards/mds.json | 80 +- .../provisioning/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- nebd/etc/nebd/nebd-client.conf | 22 +- nebd/etc/nebd/nebd-server.conf | 10 +- nebd/nebd-package/usr/bin/nebd-daemon | 8 +- nebd/src/common/configuration.cpp | 68 +- nebd/src/common/configuration.h | 127 +- nebd/src/common/crc32.h | 32 +- nebd/src/common/file_lock.h | 15 +- nebd/src/common/name_lock.h | 50 +- nebd/src/common/stringstatus.h | 34 +- nebd/src/common/timeutility.h | 12 +- nebd/src/part1/async_request_closure.cpp | 22 +- nebd/src/part1/async_request_closure.h | 81 +- nebd/src/part1/heartbeat_manager.h | 30 +- nebd/src/part1/libnebd.cpp | 36 +- nebd/src/part1/libnebd.h | 150 +- nebd/src/part1/libnebd_file.h | 88 +- nebd/src/part1/nebd_client.cpp | 141 +- nebd/src/part1/nebd_client.h | 122 +- nebd/src/part1/nebd_common.h | 34 +- nebd/src/part1/nebd_metacache.h | 33 +- nebd/src/part2/define.h | 48 +- nebd/src/part2/file_entity.cpp | 87 +- nebd/src/part2/file_entity.h | 162 +- nebd/src/part2/file_manager.cpp | 12 +- nebd/src/part2/file_manager.h | 134 +- nebd/src/part2/heartbeat_manager.cpp | 23 +- nebd/src/part2/heartbeat_manager.h | 77 +- nebd/src/part2/main.cpp | 11 +- nebd/src/part2/metafile_manager.cpp | 70 +- nebd/src/part2/metafile_manager.h | 50 +- nebd/src/part2/nebd_server.cpp | 47 +- nebd/src/part2/nebd_server.h | 60 +- nebd/src/part2/request_executor.h | 16 +- nebd/src/part2/request_executor_curve.h | 70 +- nebd/src/part2/util.h | 6 +- nebd/test/common/configuration_test.cpp | 27 +- nebd/test/common/test_name_lock.cpp | 36 +- .../test/part1/heartbeat_manager_unittest.cpp | 32 +- nebd/test/part1/nebd_client_unittest.cpp | 144 +- nebd/test/part2/file_manager_unittest.cpp | 239 +- .../test/part2/heartbeat_manager_unittest.cpp | 57 +- nebd/test/part2/heartbeat_service_test.cpp | 20 +- nebd/test/part2/metafile_manager_test.cpp | 101 +- nebd/test/part2/test_nebd_server.cpp | 34 +- .../part2/test_request_executor_curve.cpp | 141 +- proto/chunk.proto | 70 +- proto/cli.proto | 6 +- proto/cli2.proto | 16 +- proto/common.proto | 12 +- proto/copyset.proto | 42 +- proto/heartbeat.proto | 72 +- proto/nameserver2.proto | 114 +- proto/schedule.proto | 2 +- proto/topology.proto | 2 +- robot/Resources/keywords/deploy.py | 501 +- robot/Resources/keywords/fault_inject.py | 1518 +++--- robot/Resources/keywords/snapshot_operate.py | 76 +- robot/curve_choas.txt | 10 +- robot/curve_robot.txt | 38 +- src/chunkserver/chunk_closure.cpp | 28 +- src/chunkserver/chunk_closure.h | 55 +- src/chunkserver/chunk_service.cpp | 303 +- src/chunkserver/chunk_service.h | 103 +- src/chunkserver/chunk_service_closure.cpp | 103 +- src/chunkserver/chunk_service_closure.h | 65 +- src/chunkserver/chunkserver.cpp | 633 ++- src/chunkserver/chunkserver.h | 97 +- src/chunkserver/chunkserver_helper.cpp | 20 +- src/chunkserver/chunkserver_main.cpp | 2 +- src/chunkserver/chunkserver_metrics.cpp | 121 +- src/chunkserver/chunkserver_metrics.h | 355 +- src/chunkserver/cli.h | 54 +- src/chunkserver/cli2.cpp | 132 +- src/chunkserver/cli2.h | 83 +- src/chunkserver/clone_copyer.h | 85 +- src/chunkserver/clone_core.cpp | 319 +- src/chunkserver/clone_core.h | 155 +- src/chunkserver/clone_manager.cpp | 24 +- src/chunkserver/clone_manager.h | 59 +- src/chunkserver/clone_task.h | 28 +- src/chunkserver/conf_epoch_file.cpp | 38 +- src/chunkserver/conf_epoch_file.h | 63 +- src/chunkserver/config_info.h | 96 +- src/chunkserver/copyset_node.cpp | 503 +- src/chunkserver/copyset_node.h | 337 +- src/chunkserver/copyset_node_manager.cpp | 195 +- src/chunkserver/copyset_node_manager.h | 200 +- src/chunkserver/copyset_service.cpp | 92 +- src/chunkserver/copyset_service.h | 39 +- src/chunkserver/heartbeat.cpp | 231 +- src/chunkserver/heartbeat.h | 110 +- src/chunkserver/heartbeat_helper.cpp | 68 +- src/chunkserver/heartbeat_helper.h | 71 +- src/chunkserver/inflight_throttle.h | 17 +- src/chunkserver/op_request.cpp | 494 +- src/chunkserver/op_request.h | 341 +- src/chunkserver/passive_getfn.h | 112 +- .../raftsnapshot/curve_file_adaptor.h | 7 +- .../raftsnapshot/curve_file_service.cpp | 75 +- .../raftsnapshot/curve_filesystem_adaptor.cpp | 86 +- .../raftsnapshot/curve_filesystem_adaptor.h | 145 +- .../curve_snapshot_attachment.cpp | 21 +- .../raftsnapshot/curve_snapshot_attachment.h | 57 +- .../raftsnapshot/curve_snapshot_copier.cpp | 126 +- .../raftsnapshot/curve_snapshot_copier.h | 8 +- .../raftsnapshot/curve_snapshot_file_reader.h | 41 +- src/chunkserver/raftsnapshot/define.h | 9 +- src/chunkserver/register.cpp | 28 +- src/chunkserver/register.h | 22 +- src/chunkserver/trash.cpp | 119 +- src/chunkserver/trash.h | 150 +- src/client/chunk_closure.cpp | 488 +- src/client/chunk_closure.h | 150 +- src/client/client_common.h | 126 +- src/client/client_metric.h | 181 +- src/client/config_info.h | 204 +- src/client/copyset_client.cpp | 198 +- src/client/copyset_client.h | 246 +- src/client/file_instance.cpp | 82 +- src/client/file_instance.h | 122 +- src/client/inflight_controller.h | 57 +- src/client/io_condition_varaiable.h | 35 +- src/client/io_tracker.cpp | 119 +- src/client/io_tracker.h | 253 +- src/client/iomanager.h | 37 +- src/client/iomanager4chunk.h | 168 +- src/client/iomanager4file.cpp | 44 +- src/client/iomanager4file.h | 187 +- src/client/lease_executor.cpp | 15 +- src/client/lease_executor.h | 138 +- src/client/libcurve_file.cpp | 310 +- src/client/libcurve_file.h | 257 +- src/client/libcurve_snapshot.h | 547 ++- src/client/mds_client.cpp | 553 ++- src/client/mds_client.h | 584 +-- src/client/mds_client_base.h | 591 ++- src/client/metacache.cpp | 105 +- src/client/metacache.h | 238 +- src/client/metacache_struct.h | 119 +- src/client/request_closure.h | 81 +- src/client/request_context.h | 53 +- src/client/request_scheduler.cpp | 58 +- src/client/request_scheduler.h | 122 +- src/client/request_sender.h | 210 +- src/client/request_sender_manager.cpp | 9 +- src/client/request_sender_manager.h | 25 +- src/client/service_helper.cpp | 80 +- src/client/service_helper.h | 81 +- src/client/splitor.h | 140 +- src/client/unstable_helper.cpp | 8 +- src/client/unstable_helper.h | 39 +- src/common/authenticator.h | 29 +- src/common/bitmap.cpp | 139 +- src/common/bitmap.h | 171 +- src/common/channel_pool.h | 22 +- .../concurrent/bounded_blocking_queue.h | 38 +- src/common/concurrent/concurrent.h | 51 +- src/common/concurrent/count_down_event.h | 50 +- src/common/concurrent/task_thread_pool.h | 67 +- src/common/configuration.cpp | 134 +- src/common/configuration.h | 175 +- src/common/crc32.h | 32 +- src/common/curve_define.h | 39 +- src/common/define.h | 69 +- src/common/fs_util.h | 10 +- src/common/interruptible_sleeper.h | 22 +- src/common/location_operator.cpp | 32 +- src/common/location_operator.h | 46 +- src/common/net_common.h | 20 +- src/common/s3_adapter.cpp | 191 +- src/common/s3_adapter.h | 201 +- .../snapshotclone/snapshotclone_define.cpp | 12 +- .../snapshotclone/snapshotclone_define.h | 74 +- src/common/stringstatus.h | 35 +- src/common/timeutility.h | 22 +- src/common/uuid.h | 39 +- src/common/wait_interval.h | 19 +- src/fs/ext4_filesystem_impl.cpp | 117 +- src/fs/local_filesystem.h | 209 +- src/kvstorageclient/etcd_client.h | 100 +- src/leader_election/leader_election.cpp | 48 +- src/leader_election/leader_election.h | 42 +- src/mds/nameserver2/clean_core.cpp | 98 +- src/mds/nameserver2/clean_core.h | 39 +- src/mds/nameserver2/clean_manager.h | 33 +- src/mds/nameserver2/clean_task.h | 90 +- src/mds/nameserver2/clean_task_manager.cpp | 38 +- src/mds/nameserver2/clean_task_manager.h | 54 +- src/snapshotcloneserver/clone/clone_core.cpp | 674 ++- src/snapshotcloneserver/clone/clone_core.h | 560 +-- .../clone/clone_service_manager.cpp | 341 +- .../clone/clone_service_manager.h | 411 +- src/snapshotcloneserver/clone/clone_task.h | 127 +- .../clone/clone_task_manager.cpp | 97 +- .../clone/clone_task_manager.h | 115 +- src/snapshotcloneserver/common/config.h | 49 +- .../common/curvefs_client.h | 595 +-- .../common/snapshotclone_info.h | 413 +- .../common/snapshotclone_meta_store.h | 122 +- .../common/snapshotclone_meta_store_etcd.h | 59 +- .../common/snapshotclone_metric.h | 95 +- src/snapshotcloneserver/common/task.h | 32 +- src/snapshotcloneserver/common/task_info.h | 88 +- src/snapshotcloneserver/common/thread_pool.h | 36 +- src/snapshotcloneserver/main.cpp | 19 +- .../snapshot/snapshot_core.cpp | 450 +- .../snapshot/snapshot_core.h | 391 +- .../snapshot/snapshot_data_store.cpp | 42 +- .../snapshot/snapshot_data_store.h | 282 +- .../snapshot/snapshot_data_store_s3.h | 81 +- .../snapshot/snapshot_service_manager.cpp | 192 +- .../snapshot/snapshot_service_manager.h | 264 +- .../snapshot/snapshot_task.cpp | 95 +- .../snapshot/snapshot_task.h | 208 +- .../snapshot/snapshot_task_manager.cpp | 37 +- .../snapshot/snapshot_task_manager.h | 96 +- .../snapshotclone_server.cpp | 229 +- .../snapshotclone_server.h | 70 +- .../snapshotclone_service.cpp | 486 +- .../snapshotclone_service.h | 73 +- src/tools/chunkserver_client.cpp | 30 +- src/tools/chunkserver_client.h | 56 +- src/tools/chunkserver_tool_factory.h | 15 +- src/tools/common.cpp | 6 +- src/tools/common.h | 9 +- src/tools/consistency_check.cpp | 78 +- src/tools/consistency_check.h | 131 +- src/tools/copyset_check.cpp | 129 +- src/tools/copyset_check.h | 99 +- src/tools/copyset_check_core.cpp | 325 +- src/tools/copyset_check_core.h | 486 +- src/tools/curve_cli.cpp | 154 +- src/tools/curve_cli.h | 88 +- src/tools/curve_format_main.cpp | 112 +- src/tools/curve_meta_tool.cpp | 37 +- src/tools/curve_meta_tool.h | 44 +- src/tools/curve_tool_define.h | 21 +- src/tools/curve_tool_factory.h | 32 +- src/tools/curve_tool_main.cpp | 53 +- src/tools/etcd_client.h | 29 +- src/tools/mds_client.cpp | 312 +- src/tools/mds_client.h | 513 +- src/tools/metric_client.cpp | 25 +- src/tools/metric_client.h | 51 +- src/tools/metric_name.h | 65 +- src/tools/namespace_tool.cpp | 143 +- src/tools/namespace_tool.h | 71 +- src/tools/namespace_tool_core.cpp | 59 +- src/tools/namespace_tool_core.h | 158 +- src/tools/raft_log_tool.cpp | 90 +- src/tools/raft_log_tool.h | 86 +- src/tools/schedule_tool.cpp | 54 +- src/tools/schedule_tool.h | 30 +- src/tools/snapshot_check.h | 42 +- src/tools/snapshot_clone_client.cpp | 41 +- src/tools/snapshot_clone_client.h | 61 +- src/tools/status_tool.cpp | 335 +- src/tools/status_tool.h | 145 +- src/tools/version_tool.cpp | 20 +- src/tools/version_tool.h | 107 +- test/chunkserver/braft_cli_service2_test.cpp | 195 +- test/chunkserver/braft_cli_service_test.cpp | 80 +- test/chunkserver/chunk_service_test.cpp | 78 +- test/chunkserver/chunk_service_test2.cpp | 145 +- test/chunkserver/chunkserver_helper_test.cpp | 10 +- test/chunkserver/chunkserver_service_test.cpp | 30 +- .../chunkserver/chunkserver_snapshot_test.cpp | 936 ++-- test/chunkserver/chunkserver_test_util.cpp | 208 +- test/chunkserver/chunkserver_test_util.h | 192 +- test/chunkserver/cli2_test.cpp | 352 +- test/chunkserver/cli_test.cpp | 238 +- test/chunkserver/client.cpp | 59 +- test/chunkserver/clone/clone_copyer_test.cpp | 142 +- test/chunkserver/clone/clone_core_test.cpp | 389 +- test/chunkserver/clone/clone_manager_test.cpp | 81 +- test/chunkserver/clone/op_request_test.cpp | 743 ++- test/chunkserver/copyset_epoch_test.cpp | 101 +- .../chunkserver/copyset_node_manager_test.cpp | 122 +- test/chunkserver/copyset_node_test.cpp | 521 +- test/chunkserver/copyset_service_test.cpp | 114 +- .../datastore/datastore_mock_unittest.cpp | 2865 ++++------- .../datastore/file_helper_unittest.cpp | 35 +- .../datastore/filepool_mock_unittest.cpp | 534 +- .../datastore/filepool_unittest.cpp | 25 +- test/chunkserver/fake_datastore.h | 73 +- test/chunkserver/heartbeat_helper_test.cpp | 75 +- test/chunkserver/heartbeat_test.cpp | 104 +- test/chunkserver/heartbeat_test_common.cpp | 127 +- test/chunkserver/heartbeat_test_common.h | 149 +- test/chunkserver/heartbeat_test_main.cpp | 27 +- test/chunkserver/inflight_throttle_test.cpp | 9 +- test/chunkserver/metrics_test.cpp | 175 +- ...curve_filesystem_adaptor_mock_unittest.cpp | 86 +- .../curve_filesystem_adaptor_unittest.cpp | 62 +- .../curve_snapshot_attachment_test.cpp | 40 +- ...raftsnapshot_chunkfilepool_integration.cpp | 295 +- test/chunkserver/server.cpp | 29 +- test/chunkserver/trash_test.cpp | 98 +- test/client/client_common_unittest.cpp | 19 +- .../client_mdsclient_metacache_unittest.cpp | 587 ++- test/client/client_metric_test.cpp | 69 +- test/client/client_session_unittest.cpp | 65 +- test/client/client_unstable_helper_test.cpp | 62 +- test/client/client_userinfo_unittest.cpp | 228 +- test/client/copyset_client_test.cpp | 2287 ++++----- test/client/fake/client_workflow_test.cpp | 73 +- .../client/fake/client_workflow_test4snap.cpp | 52 +- test/client/fake/fakeChunkserver.h | 174 +- test/client/fake/fakeMDS.h | 649 ++- test/client/inflight_rpc_control_test.cpp | 30 +- test/client/iotracker_splitor_unittest.cpp | 268 +- test/client/lease_executor_test.cpp | 11 +- test/client/libcbd_libcurve_test.cpp | 45 +- test/client/libcurve_interface_unittest.cpp | 197 +- test/client/mds_failover_test.cpp | 145 +- test/client/mock/mock_chunkservice.h | 163 +- test/client/request_scheduler_test.cpp | 153 +- test/client/request_sender_test.cpp | 17 +- test/common/bitmap_test.cpp | 30 +- test/common/channel_pool_test.cpp | 12 +- test/common/configuration_test.cpp | 40 +- test/common/count_down_event_test.cpp | 35 +- test/common/lru_cache_test.cpp | 66 +- test/common/task_thread_pool_test.cpp | 50 +- test/common/test_name_lock.cpp | 36 +- test/failpoint/failpoint_test.cpp | 51 +- test/fs/ext4_filesystem_test.cpp | 238 +- .../chunkserver/chunkserver_basic_test.cpp | 276 +- .../chunkserver/chunkserver_clone_recover.cpp | 391 +- .../chunkserver_concurrent_test.cpp | 868 ++-- .../datastore/datastore_basic_test.cpp | 108 +- .../datastore/datastore_clone_case_test.cpp | 194 +- .../datastore/datastore_concurrency_test.cpp | 17 +- .../datastore/datastore_exception_test.cpp | 697 +-- .../datastore/datastore_integration_base.h | 32 +- .../datastore/datastore_integration_test.cpp | 252 +- .../datastore/datastore_restart_test.cpp | 397 +- .../datastore_snapshot_case_test.cpp | 211 +- .../datastore/datastore_stress_test.cpp | 24 +- .../client/chunkserver_exception_test.cpp | 370 +- .../client/common/file_operation.cpp | 18 +- .../client/common/file_operation.h | 13 +- .../integration/client/mds_exception_test.cpp | 655 +-- .../unstable_chunkserver_exception_test.cpp | 141 +- test/integration/cluster_common/cluster.cpp | 171 +- test/integration/cluster_common/cluster.h | 369 +- .../cluster_common/cluster_basic_test.cpp | 190 +- .../integration/cluster_common/mds.basic.conf | 158 +- test/integration/common/chunkservice_op.cpp | 152 +- test/integration/common/chunkservice_op.h | 223 +- test/integration/common/config_generator.h | 2 +- test/integration/common/peer_cluster.cpp | 432 +- test/integration/common/peer_cluster.h | 391 +- test/integration/heartbeat/common.cpp | 50 +- test/integration/heartbeat/common.h | 307 +- .../heartbeat/heartbeat_basic_test.cpp | 4360 ++++++++--------- .../heartbeat/heartbeat_exception_test.cpp | 111 +- .../raft/raft_config_change_test.cpp | 2308 +++------ .../raft/raft_log_replication_test.cpp | 1202 ++--- test/integration/raft/raft_snapshot_test.cpp | 565 +-- test/integration/raft/raft_vote_test.cpp | 1394 ++---- .../fake_curvefs_client.cpp | 213 +- .../snapshotcloneserver/fake_curvefs_client.h | 186 +- .../snapshotcloneserver_common_test.cpp | 517 +- .../snapshotcloneserver_concurrent_test.cpp | 183 +- .../snapshotcloneserver_exception_test.cpp | 999 ++-- .../snapshotcloneserver_module.cpp | 48 +- .../snapshotcloneserver_recover_test.cpp | 758 ++- .../snapshotcloneserver_test.cpp | 34 +- test/kvstorageclient/etcdclient_test.cpp | 123 +- .../chunkserver_healthy_checker_test.cpp | 85 +- test/mds/heartbeat/heartbeat_manager_test.cpp | 90 +- .../alloc_statistic_helper_test.cpp | 73 +- .../allocstatistic/alloc_statistic_test.cpp | 22 +- test/mds/nameserver2/clean_core_test.cpp | 120 +- test/mds/nameserver2/curvefs_test.cpp | 2879 ++++++----- test/mds/nameserver2/file_lock_test.cpp | 64 +- test/mds/nameserver2/file_record_test.cpp | 34 +- .../nameserver2/namespace_service_test.cpp | 419 +- test/mds/schedule/coordinator_test.cpp | 314 +- test/mds/schedule/leaderScheduler_test.cpp | 261 +- test/mds/schedule/operatorStep_test.cpp | 106 +- .../mds/schedule/rapidLeaderSheduler_test.cpp | 71 +- test/mds/schedule/recoverScheduler_test.cpp | 129 +- test/mds/schedule/scheduleMetrics_test.cpp | 284 +- .../scheduleService/scheduleService_test.cpp | 37 +- .../schedule/schedulerPOC/scheduler_poc.cpp | 420 +- test/mds/schedule/scheduler_helper_test.cpp | 248 +- test/mds/server/mds_test.cpp | 63 +- test/mds/topology/test_topology.cpp | 1323 ++--- .../test_topology_chunk_allocator.cpp | 367 +- test/mds/topology/test_topology_metric.cpp | 265 +- test/resources.list | 48 +- test/snapshotcloneserver/test_clone_core.cpp | 802 ++- .../test_curvefs_client.cpp | 66 +- .../test_snapshot_core.cpp | 1519 ++---- .../test_snapshot_service_manager.cpp | 593 +-- test/tools/chunkserver_client_test.cpp | 51 +- test/tools/config/data_check.conf | 102 +- test/tools/copyset_check_core_test.cpp | 489 +- test/tools/copyset_check_test.cpp | 164 +- test/tools/curve_cli_test.cpp | 249 +- test/tools/curve_meta_tool_test.cpp | 52 +- test/tools/data_consistency_check_test.cpp | 147 +- test/tools/etcd_client_test.cpp | 53 +- test/tools/mds_client_test.cpp | 872 ++-- test/tools/metric_client_test.cpp | 95 +- test/tools/namespace_tool_core_test.cpp | 198 +- test/tools/namespace_tool_test.cpp | 193 +- test/tools/raft_log_tool_test.cpp | 36 +- test/tools/segment_parser_test.cpp | 68 +- test/tools/snapshot_clone_client_test.cpp | 83 +- test/tools/status_tool_test.cpp | 510 +- test/tools/version_tool_test.cpp | 241 +- test/util/config_generator.h | 46 +- thirdparties/etcdclient/etcdclient.go | 20 +- tools/curvefsTool.cpp | 351 +- tools/snaptool/queryclone.py | 30 +- 501 files changed, 40530 insertions(+), 47143 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 70772b05f5..ea0bc6de23 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -96,8 +96,8 @@ bind( actual = "@com_google_googletest//:gtest", ) -#Import the glog files. -# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog" +# Import the glog files. +# When the BUILD file in brpc relies on glog, the direct specified dependency is "@com_github_google_glog//:glog" git_repository( name = "com_github_google_glog", remote = "https://github.com/google/glog", diff --git a/build.sh b/build.sh index 9d714c28d6..f9e880d131 100644 --- a/build.sh +++ b/build.sh @@ -17,7 +17,7 @@ # dir=`pwd` -#step1 清除生成的目录和文件 +# step1 Clear generated directories and files bazel clean rm -rf curvefs_python/BUILD rm -rf curvefs_python/tmplib/ @@ -29,8 +29,8 @@ then exit fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 +# step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'` if [ -z ${tag_version} ] then @@ -38,7 +38,7 @@ then tag_version=9.9.9 fi -#获取git提交版本信息 +# Obtain git submission version information commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` if [ "$1" = "debug" ] then @@ -50,7 +50,7 @@ fi curve_version=${tag_version}+${commit_id}${debug} -#step3 执行编译 +# step3 Execute Compilation # check bazel verion, bazel vesion must = 4.2.2 bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` if [ -z ${bazel_version} ] diff --git a/conf/chunkserver.conf b/conf/chunkserver.conf index 0cfc27b544..edb0380408 100644 --- a/conf/chunkserver.conf +++ b/conf/chunkserver.conf @@ -1,17 +1,17 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__ global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ global.subnet=127.0.0.0/24 global.enable_external_server=true global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__ global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, usually 16MB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.meta_page_size=4096 # chunk's block size, IO requests must align with it, supported value is |512| and |4096| @@ -21,40 +21,40 @@ global.meta_page_size=4096 # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.block_size=4096 -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666, 127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10 seconds mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__ -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__ -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -70,43 +70,43 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # lease read switch, default is true(open lease read) # if false, all requests will propose to raft(log read) -# 启用lease read,一般开启,否则将退化为log read形式 +# Enable lease read, usually enabled, otherwise it will revert to log read form copyset.enable_lease_read=true -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node, and the added node first acts like a learner to copy data. +# When the gap with the leader is equal to catchup_margin entries, the leader +# will attempt to commit the configuration change entry (generally, the committed entry +# will definitely be committed and applied). A small catchup_margin can +# ensure that the learner can join the replication group quickly. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__ -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency=10 # chunkserver use how many threads to use copyset complete sync. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The current peer's applied_index and leader‘s committed_index difference is less than this value +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -132,26 +132,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Whether to paste the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__ -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__ # Curve File time to live curve.curve_file_timeout_s=30 @@ -159,7 +159,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -179,27 +179,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,36 +211,36 @@ chunkfilepool.clean.throttle_iops=500 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index eb664c2fd6..c478b3dc7f 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -203,36 +204,36 @@ chunkfilepool.clean.throttle_iops=500 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/client.conf b/conf/client.conf index bac0dc1108..22345400d5 100644 --- a/conf/client.conf +++ b/conf/client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,84 +36,84 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# ** 已废弃,不再使用,请使用 `chunkserver.slowRequestThresholdMS` ** -# ** dreprecated, use `chunkserver.slowRequestThresholdMS` instead ** -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# ** Deprecated, no longer in use, please use `chunkserver.slowRequestThresholdMS` ** +# ** Deprecated, use `chunkserver.slowRequestThresholdMS` instead ** +# When an RPC retry exceeds the maxRetryTimesBeforeConsiderSuspend count +# it is marked as a suspended IO, and the metric will trigger an alert. chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # 请求重试时间超过该阈值后,会标记为slow request @@ -122,41 +122,41 @@ chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.slowRequestThresholdMS=45000 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off, false/do not turn off global.turnOffHealthCheck=true # minimal open file limit diff --git a/conf/cs_client.conf b/conf/cs_client.conf index 09d567d8f7..5bd674e417 100644 --- a/conf/cs_client.conf +++ b/conf/cs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### mds side configuration information ################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads for the queue +# The task of execution threads is to fetch IO and then send it over the network before moving on to the next network task. +# The time taken for a task, from retrieval from the queue to sending the RPC request, is typically between 20 microseconds to 100 microseconds. 20 microseconds is the normal case when leader acquisition is not needed during the send operation. +# If leader acquisition is required during sending, the time can be around 100 microseconds. The throughput of one thread ranges from 100,000 to 500,000 operations per second. +# The performance meets the requirements. schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +#Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/mds.conf b/conf/mds.conf index 1e1a7eb273..95d7eca3bb 100644 --- a/conf/mds.conf +++ b/conf/mds.conf @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 #__CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ mds.dummy.listen.port=6667 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ @@ -7,15 +7,15 @@ global.subnet=127.0.0.0/24 global.port=6666 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # wait dlock timeout mds.etcd.dlock.timeoutMs=10000 @@ -23,68 +23,68 @@ mds.etcd.dlock.timeoutMs=10000 mds.etcd.dlock.ttlSec=10 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=10000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 mds.segment.discard.scanIntevalMs=5000 -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true # Scan scheduler switch mds.enable.scan.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec=60 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=1 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=60 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=300 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec=1800 # Scan operator timeout (seconds) mds.scheduler.scan.limitSec=180 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec=1800 # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour=0 @@ -98,104 +98,104 @@ mds.scheduler.scan.concurrent.per.pool=10 mds.scheduler.scan.concurrent.per.chunkserver=1 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=10000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=30000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files=5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16)*12) * 2621440~=1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs=5000000 -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs=500000 # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName=root mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=10 -#和mds.chunkserver.failure.tolerance设置有关,一个zone 标准配置20台节点,如果允许3台节点failover, -#那么剩余17台机器需要承载原先20台机器的空间,17/20=0.85,即使用量超过这个值即不再往这个池分配, -#具体分为来两种情况, 当不使用chunkfilepool,物理池限制使用百分比,当使用 chunkfilepool 进行chunkfilepool分配时需预留failover空间, +# It is related to the settings of mds.chunkserver.failure.tolerance. A standard configuration for a zone is 20 nodes, and if 3 nodes are allowed to fail over, +# So the remaining 17 machines need to carry the space of the original 20 machines, 17/20=0.85. Even if the usage exceeds this value, they will no longer be allocated to this pool, +# There are two specific situations: when chunkfilepool is not used, the physical pool limits the percentage of usage, and when chunkfilepool is used for chunkfilepool allocation, it is necessary to reserve failover space, mds.topology.PoolUsagePercentLimit=85 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus=false # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +#Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of deviation from the mean scatterWidth of all chunk servers. +# Setting a large percentage deviation for scatterWidth can result in some machines having scatterWidth values that are too small, affecting machine recovery time and reducing cluster reliability. +# Additionally, it can lead to some machines having excessively large scatterWidth values, causing certain chunk server's copysets to be scattered across various machines. +# Once data is written to these servers, the ones with larger scatterWidth become hotspots. Setting the percentage deviation for scatterWidth too small requires a higher level of scatterWidth +# uniformity and copyset algorithm precision, potentially resulting in suboptimal algorithm results. +# It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize=1073741824 -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength=10737418240 -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength=21990232555520 # smallest read/write unit for volume, support |512| and |4096| mds.curvefs.blockSize=4096 @@ -203,29 +203,29 @@ mds.curvefs.blockSize=4096 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr=127.0.0.1:5555 # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_proxy_addr} __CURVEADM_TEMPLATE__ # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/conf/py_client.conf b/conf/py_client.conf index cb7999c5e4..5460949092 100644 --- a/conf/py_client.conf +++ b/conf/py_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,91 +36,91 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # minimal open file limit @@ -128,22 +128,22 @@ global.fileIOSplitMaxSizeKB=64 global.minOpenFileLimit=0 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=10000 # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snap_client.conf b/conf/snap_client.conf index a643e44461..427f521663 100644 --- a/conf/snap_client.conf +++ b/conf/snap_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +###################MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +#################Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=50 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=16000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +#After the number of unstable chunkservers on the same server exceeds this value +#All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +#Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snapshot_clone_server.conf b/conf/snapshot_clone_server.conf index d4fdf2b64c..01d2ca9158 100644 --- a/conf/snapshot_clone_server.conf +++ b/conf/snapshot_clone_server.conf @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/snap_client.conf __CURVEADM_TEMPLATE__ -# mds root 用户名 +# Mds root username mds.rootUser=root -# mds root 密码 +# Mds root password mds.rootPassword=root_password -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec=300 -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs=5000 -# 日志文件位置 +# Log file location log.dir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ # @@ -26,61 +26,61 @@ s3.config_path=./conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __ server.address=127.0.0.1:5556 # __CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ server.subnet=127.0.0.0/24 server.port=5556 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec=300 -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs=5000 -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum=256 -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs=1000 -# 转储chunk分片大小 +# Dump chunk shard size # for nos, pls set to 1048576 server.chunkSplitSize=8388608 -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs=1000 -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit=1024 -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum=64 -# mds session 时间 +# Mds session time server.mdsSessionTimeUs=5000000 -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency=16 # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum=256 -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum=256 -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum=256 -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs=1000 -# clone chunk分片大小 +# Clone chunk shard size # for nos, pls set to 65536 server.cloneChunkSplitSize=1048576 -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir=/clone -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency=64 -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency=64 -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs=500 -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs=3600000 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times=3 # wait dlock timeout etcd.dlock.timeoutMs=10000 @@ -88,20 +88,20 @@ etcd.dlock.timeoutMs=10000 etcd.dlock.ttlSec=10 # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix=snapshotcloneserverleaderlock -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms=0 # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port=8081 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/conf/tools.conf b/conf/tools.conf index 545297d92c..42be38e27c 100644 --- a/conf/tools.conf +++ b/conf/tools.conf @@ -1,16 +1,16 @@ -# mds地址 +# Mds address mdsAddr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ # mds dummy port mdsDummyPort=6700 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_port} __CURVEADM_TEMPLATE__ -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout=500 -# rpc重试次数 +# RPC retry count rpcRetryTimes=5 # the rpc concurrency to chunkserver rpcConcurrentNum=10 -# etcd地址 +# ETCD address etcdAddr=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# snapshot clone server 地址 +# Snapshot clone server address snapshotCloneAddr= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_addr} __CURVEADM_TEMPLATE__ # snapshot clone server dummy port snapshotCloneDummyPort= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/curve-ansible/client.ini b/curve-ansible/client.ini index 8eacc6270c..ecf308581d 100644 --- a/curve-ansible/client.ini +++ b/curve-ansible/client.ini @@ -1,7 +1,7 @@ [client] localhost ansible_ssh_host=127.0.0.1 -# 仅用于生成配置中的mds地址 +# Only used to generate mds addresses in the configuration [mds] localhost ansible_ssh_host=127.0.0.1 diff --git a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml index 7121b28042..8200229894 100644 --- a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml +++ b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 等待copyset健康,每个一段时间检查一次,一共检查若干次,成功则break,如果一直不健康则报错 +# Wait for the copyset to be healthy, check once every period of time, a total of several times. If successful, it will break, and if it remains unhealthy, an error will be reported - name: check copysets status until healthy shell: curve_ops_tool copysets-status --confPath={{ curve_ops_tool_config }} | grep "{{ defined_copysets_status }}" register: result diff --git a/curve-ansible/group_vars/mds.yml b/curve-ansible/group_vars/mds.yml index f575cb79d5..689b1414eb 100644 --- a/curve-ansible/group_vars/mds.yml +++ b/curve-ansible/group_vars/mds.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 集群拓扑信息 +# Cluster topology information cluster_map: servers: - name: server1 diff --git a/curve-ansible/roles/generate_config/defaults/main.yml b/curve-ansible/roles/generate_config/defaults/main.yml index 4d7dfe5514..36d14e676b 100644 --- a/curve-ansible/roles/generate_config/defaults/main.yml +++ b/curve-ansible/roles/generate_config/defaults/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 通用配置 +# General configuration curve_root_username: root curve_root_password: root_password curve_file_timeout_s: 30 @@ -25,7 +25,7 @@ min_file_length: 10737418240 max_file_length: 21990232555520 file_expired_time_us: 5000000 -# mds配置默认值 +# Mds configuration default values mds_etcd_dailtimeout_ms: 5000 mds_etcd_operation_timeout_ms: 5000 mds_etcd_retry_times: 3 @@ -94,7 +94,7 @@ throttle_bps_min_in_MB: 120 throttle_bps_max_in_MB: 260 throttle_bps_per_GB_in_MB: 0.3 -# chunkserver配置默认值 +# Chunkserver Configuration Default Values chunkserver_enable_external_server: true chunkserver_meta_page_size: 4096 chunkserver_location_limit: 3000 @@ -165,7 +165,7 @@ chunkserver_trash_expire_after_sec: 300 chunkserver_trash_scan_period_sec: 120 chunkserver_common_log_dir: ./runlog/ -# 快照克隆配置默认值 +# Default values for snapshot clone configuration snap_client_config_path: /etc/curve/snap_client.conf snap_client_method_retry_time_sec: 120 snap_client_method_retry_interval_ms: 5000 @@ -201,7 +201,7 @@ snap_leader_session_inter_sec: 5 snap_leader_election_timeout_ms: 0 snap_nginx_addr: 127.0.0.1:5555 -# client配置默认值 +# Default values for client configuration client_register_to_mds: true client_mds_rpc_timeout_ms: 500 client_mds_max_rpc_timeout_ms: 2000 @@ -244,7 +244,7 @@ client_discard_enable: true client_discard_granularity: 4096 client_discard_task_delay_ms: 60000 -# nebd默认配置 +# Nebd default configuration client_config_path: /etc/curve/client.conf nebd_client_sync_rpc_retry_times: 50 nebd_client_rpc_retry_inverval_us: 100000 @@ -259,7 +259,7 @@ nebd_server_heartbeat_timeout_s: 30 nebd_server_heartbeat_check_interval_ms: 3000 nebd_server_response_return_rpc_when_io_error: false -# s3配置默认值 +# Default values for s3 configuration s3_http_scheme: 0 s3_verify_ssl: false s3_user_agent_conf: S3 Browser @@ -276,15 +276,15 @@ s3_throttle_bpsTotalLimit: 1280 s3_throttle_bpsReadLimit: 1280 s3_throttle_bpsWriteLimit: 1280 -# 运维工具默认值 +# Default values for operation and maintenance tools tool_rpc_timeout: 500 tool_rpc_retry_times: 5 tool_rpc_concurrent_num: 10 -# snapshotclone_nginx配置 +# snapshotclone_nginx configuration nginx_docker_internal_port: 80 -# etcd默认配置 +# ETCD default configuration etcd_snapshot_count: 10000 etcd_heartbeat_interval: 100 etcd_election_timeout: 1000 diff --git a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 index 0e7e65e9cc..ae43478df7 100644 --- a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 @@ -1,24 +1,24 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip={{ ansible_ssh_host }} global.port={{ chunkserver_base_port }} global.subnet={{ chunkserver_subnet }} global.enable_external_server={{ chunkserver_enable_external_server }} global.external_ip={{ ansible_ssh_host }} global.external_subnet={{ chunkserver_external_subnet }} -# chunk大小,一般16MB +# Chunk size, usually 16MB global.chunk_size={{ chunk_size }} -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB global.meta_page_size={{ chunkserver_meta_page_size }} -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit={{ chunkserver_location_limit }} # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666,127.0.0.1:7777 {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -26,30 +26,30 @@ global.location_limit={{ chunkserver_location_limit }} {% set _ = mds_address.append("%s:%s" % (mds_ip, mds_port)) -%} {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries={{ chunkserver_register_retries }} -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout={{ chunkserver_register_timeout }} -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10s mds.heartbeat_interval={{ chunkserver_heartbeat_interval }} -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout={{ chunkserver_heartbeat_timeout }} # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri={{ chunkserver_stor_uri }} -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri={{ chunkserver_meta_uri }} -# disk类型 +# Disk type chunkserver.disk_type={{ chunkserver_disk_type }} -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes={{ chunkserver_snapshot_throttle_throughput_bytes }} -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles={{ chunkserver_snapshot_throttle_check_cycles }} chunkserver.max_inflight_requests={{ chunkserver_max_inflight_requests }} @@ -64,39 +64,39 @@ test.testcopyset_conf={{ chunkserver_test_testcopyset_conf }} # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term={{ chunkserver_copyset_check_term }} -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli={{ chunkserver_copyset_disable_cli }} copyset.log_applied_task={{ chunkserver_copyset_log_applied_task }} -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms={{ chunkserver_copyset_election_timeout_ms }} -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s={{ chunkserver_copyset_snapshot_interval_s }} -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node. The node added by 'add' first copies data in a way similar to a learner. +# When the difference from the leader reaches 'catchup_margin' entries, +# the leader will attempt to commit the configuration-changing entry. +# Generally, the committed and applied entry will definitely be committed and applied. +# A smaller catchup_margin can significantly ensure that the learner can quickly join the replication group soon after. copyset.catchup_margin={{ chunkserver_copyset_catchup_margin }} -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri={{ chunkserver_copyset_chunk_data_uri }} -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri={{ chunkserver_copyset_raft_log_uri }} -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri={{ chunkserver_copyset_raft_meta_uri }} -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri={{ chunkserver_copyset_raft_snapshot_uri }} -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri={{ chunkserver_copyset_recycler_uri }} -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency={{ chunkserver_copyset_load_concurrency }} -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes={{ chunkserver_copyset_check_retrytimes }} -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The difference between the applied_index of the current peer and the committed_index on the leader is less than this value. +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin={{ chunkserver_copyset_finishload_margin }} -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms={{ chunkserver_copyset_check_loadmargin_interval_ms }} # scan copyset interval copyset.scan_interval_sec={{ chunkserver_copyset_scan_interval_sec }} @@ -115,26 +115,26 @@ copyset.check_syncing_interval_ms={{ chunkserver_copyset_check_syncing_interval_ # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client={{ disable_snapshot_clone }} -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter={{ disable_snapshot_clone }} -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size={{ chunkserver_clone_slice_size }} -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste={{ chunkserver_clone_enable_paste }} -# 克隆的线程数量 +# Number of cloned threads clone.thread_num={{ chunkserver_clone_thread_num }} -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth={{ chunkserver_clone_queue_depth }} -# curve用户名 +# Curve username curve.root_username={{ curve_root_username }} -# curve密码 +# Curve password curve.root_password={{ curve_root_password }} -# client配置文件 +# Client configuration file curve.config_path={{ chunkserver_client_config_path }} -# s3配置文件 +# S3 configuration file s3.config_path={{ chunkserver_s3_config_path }} # Curve File time to live curve.curve_file_timeout_s={{ curve_file_timeout_s }} @@ -142,7 +142,7 @@ curve.curve_file_timeout_s={{ curve_file_timeout_s }} # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2={{ chunkserver_fs_enable_renameat2 }} # @@ -163,27 +163,27 @@ storeng.sync_write={{ chunkserver_storeng_sync_write }} # # Concurrent apply module # -# 并发模块的并发度,一般是10 +# The concurrency of concurrent modules is generally 10 wconcurrentapply.size={{ chunkserver_wconcurrentapply_size }} -# 并发模块线程的队列深度 +# Queue depth of concurrent module threads wconcurrentapply.queuedepth={{ chunkserver_wconcurrentapply_queuedepth }} -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size={{ chunkserver_rconcurrentapply_size }} -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth={{ chunkserver_rconcurrentapply_queuedepth }} # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_format_disk }} -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_dir }} -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size={{ chunkserver_chunkfilepool_cpmeta_file_size }} -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable={{ chunkserver_chunkfilepool_clean_enable }} @@ -195,34 +195,34 @@ chunkfilepool.clean.throttle_iops={{ chunkserver_chunkfilepool_clean_throttle_io # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool={{ walfilepool_use_chunk_file_pool }} -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool={{ chunkserver_format_disk }} -# walpool目录 +# Walpool directory walfilepool.file_pool_dir={{ chunkserver_walfilepool_file_pool_dir }} -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path={{ chunkserver_walfilepool_meta_path }} -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size={{ chunkserver_walfilepool_segment_size }} -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size={{ chunkserver_walfilepool_metapage_size }} -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size={{ chunkserver_walfilepool_meta_file_size }} -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times={{ chunkserver_walfilepool_retry_times }} # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec={{ chunkserver_trash_expire_after_sec }} -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec={{ chunkserver_trash_scan_period_sec }} # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir={{ chunkserver_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curve-ansible/roles/generate_config/templates/client.conf.j2 b/curve-ansible/roles/generate_config/templates/client.conf.j2 index 08d4413780..492ac270bf 100644 --- a/curve-ansible/roles/generate_config/templates/client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/client.conf.j2 @@ -1,8 +1,8 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -11,25 +11,25 @@ {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS={{ client_register_to_mds }} -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS={{ client_mds_rpc_timeout_ms }} -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS={{ client_mds_max_rpc_timeout_ms }} -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS={{ client_mds_max_retry_ms }} -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS={{ client_mds_max_failed_times_before_change_mds }} -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease={{ client_mds_refresh_times_per_lease }} -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS={{ client_mds_rpc_retry_interval_us }} # The normal retry times for trigger wait strategy @@ -42,104 +42,104 @@ mds.maxRetryMsInIOPath={{ client_mds_max_retry_ms_in_io_path }} mds.waitSleepMs={{ client_mds_wait_sleep_ms }} # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS={{ client_metacache_get_leader_timeout_ms }} -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry={{ client_metacache_get_leader_retry }} -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS={{ client_metacache_rpc_retry_interval_us }} # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity={{ client_schedule_queue_capacity }} -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize={{ client_schedule_threadpool_size }} -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity={{ client_isolation_task_queue_capacity }} -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize={{ client_isolation_task_thread_pool_size }} # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS={{ client_chunkserver_op_retry_interval_us }} -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry={{ client_chunkserver_op_max_retry }} -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS={{ client_chunkserver_rpc_timeout_ms }} -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead={{ client_chunkserver_enable_applied_index_read }} -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS={{ client_chunkserver_max_retry_sleep_interval_us }} -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS={{ client_chunkserver_max_rpc_timeout_ms }} -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes={{ client_chunkserver_max_stable_timeout_times }} -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs={{ client_chunkserver_check_health_timeout_ms }} -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold={{ client_chunkserver_server_stable_threshold }} -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff={{ client_chunkserver_min_retry_times_force_timeout_backoff }} -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend={{ client_chunkserver_max_retry_times_before_consider_suspend }} # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum={{ client_file_max_inflight_rpc_num }} -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB={{ client_file_io_split_max_size_kb }} # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel={{ client_log_level }} -# 设置log的路径 +# Set the path of the log global.logPath={{ client_log_path }} -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # @@ -151,15 +151,15 @@ closefd.timeout={{ client_closefd_timeout_sec }} closefd.timeInterval={{ client_closefd_time_interval_sec }} # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort={{ client_metric_dummy_server_start_port }} -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck={{ client_turn_off_health_check }} # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath={{ client_session_map_path }} diff --git a/curve-ansible/roles/generate_config/templates/mds.conf.j2 b/curve-ansible/roles/generate_config/templates/mds.conf.j2 index 13040fa9ea..7e9b8f39b1 100644 --- a/curve-ansible/roles/generate_config/templates/mds.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/mds.conf.j2 @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr={{ ansible_ssh_host }}:{{ mds_port }} @@ -8,9 +8,9 @@ global.subnet={{ mds_subnet }} global.port={{ mds_port }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -19,11 +19,11 @@ global.port={{ mds_port }} {% endfor -%} mds.etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs={{ mds_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs={{ mds_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times={{ mds_etcd_retry_times }} # wait dlock timeout mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} @@ -31,68 +31,68 @@ mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} mds.etcd.dlock.ttlSec={{ mds_etcd_dlock_ttl_sec }} # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs={{ mds_segment_alloc_periodic_persist_inter_ms }} -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs={{ mds_segment_alloc_retry_inter_ms }} mds.segment.discard.scanIntevalMs={{ mds_segment_discard_scan_interval_ms }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec={{ mds_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs={{ mds_leader_election_timeout_ms }} # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler={{ mds_enable_copyset_scheduler }} -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler={{ mds_enable_leader_scheduler }} -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler={{ mds_enable_recover_scheduler }} -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler={{ mds_enable_replica_scheduler }} # Scan scheduler switch mds.enable.scan.scheduler={{ mds_enable_scan_scheduler }} -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec={{ mds_copyset_scheduler_interval_sec }} -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec={{ mds_replica_scheduler_interval_sec }} -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec={{ mds_leader_scheduler_interval_sec }} -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec={{ mds_recover_scheduler_interval_sec }} # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec={{ mds_scan_scheduler_interval_sec }} -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent={{ mds_schduler_operator_concurrent }} -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec={{ mds_schduler_transfer_limit_sec }} -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec={{ mds_scheduler_remove_limit_sec }} -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec={{ mds_scheduler_add_limit_sec }} -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec={{ mds_scheduler_change_limit_sec }} # Scan operator timeout (seconds) mds.scheduler.scan.limitSec={{ mds_scheduler_scan_limit_sec }} -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent={{ mds_scheduler_copyset_mum_range_percent }} -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent={{ mds_schduler_scatterwidth_range_percent }} -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance={{ mds_chunkserver_failure_tolerance }} -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec={{ mds_scheduler_chunkserver_cooling_time_sec }} # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour={{ mds_scheduler_scan_start_hour }} @@ -106,129 +106,129 @@ mds.scheduler.scan.concurrent.per.pool={{ mds_scheduler_scan_concurrent_per_pool mds.scheduler.scan.concurrent.per.chunkserver={{ mds_scheduler_scan_concurrent_per_chunkserver }} # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs={{ mds_heartbeat_interval_ms }} -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs={{ mds_heartbeat_misstimeout_ms }} -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs={{ mds_heartbeat_offlinet_imeout_ms }} -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs={{ mds_heartbeat_clean_follower_after_ms }} # -# namespace cache相关 +#Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count={{ mds_cache_count }} # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs={{ file_expired_time_us }} -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs={{ mds_file_scan_inteval_time_us }} # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName={{ curve_root_username }} mds.auth.rootPassword={{ curve_root_password }} # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum={{ mds_filelock_bucket_num }} # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec={{ mds_topology_topology_update_to_repo_sec }} -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs={{ mds_topology_create_copyset_rpc_timeout_ms }} -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes={{ mds_topology_create_copyset_rpc_retry_times }} -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs={{ mds_topology_create_copyset_rpc_retry_sleep_time_ms }} -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec={{ mds_topology_update_metric_interval_sec }} -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit={{ mds_topology_pool_usage_percent_limit }} -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0: Random, 1: Weight mds.topology.choosePoolPolicy={{ mds_topology_choose_pool_policy }} # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus={{ mds_topology_enable_logicalpool_status}} # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes={{ mds_copyset_copyset_retry_times }} -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance={{ mds_copyset_scatterwidth_variance }} -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation={{ mds_copyset_scatterwidth_standard_devation }} -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange={{ mds_copyset_scatterwidth_range }} -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# The percentage of deviation from the mean scatterWidth of all chunk servers. Setting a too large percentage of scatterWidth deviation can result in some machines having +# excessively small scatterWidth, affecting machine recovery times. Prolonged recovery times can reduce the cluster's reliability. Additionally, it can lead to some machines having +# excessively large scatterWidth, causing certain chunk servers to scatter copysets across various machines. When other machines write data, these machines with larger scatterWidth +# can become hotspots. Setting a too small percentage of scatterWidth deviation requires a greater average scatterWidth, +# which demands higher copyset algorithm requirements. This can lead to the algorithm being unable +# to produce ideal results. It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage={{ mds_copyset_scatterwidth_floating_percentage }} # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize={{ chunk_size }} -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize={{ segment_size }} -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength={{ min_file_length }} -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength={{ max_file_length }} # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs={{ mds_chunkserverclient_rpc_timeout_ms }} -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes={{ mds_chunkserverclient_rpc_retry_times }} -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs={{ mds_chunkserverclient_rpc_retry_interval_ms }} -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes={{ mds_chunkserverclient_update_leader_retry_times }} -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs={{ mds_chunkserverclient_update_leader_retry_interval_ms }} # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr={{ snapshot_nginx_vip }}:{{ nginx_docker_external_port }} # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir={{ mds_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 index d7121c6dad..eadcb92bd7 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress={{ nebd_data_dir }}/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath={{ nebd_data_dir }}/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes={{ nebd_client_sync_rpc_retry_times }} -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs={{ nebd_client_rpc_retry_inverval_us }} -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs={{ nebd_client_rpc_retry_max_inverval_us }} -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs={{ nebd_client_rpc_hostdown_retry_inverval_us }} -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS={{ nebd_client_health_check_internal_s }} -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs={{ nebd_client_delay_health_check_internal_ms }} -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum={{ nebd_client_rpc_send_exec_queue_num }} -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS={{ nebd_client_heartbeat_inverval_s }} -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs={{ nebd_client_heartbeat_rpc_timeout_ms }} -# 日志路径 +# Log Path log.path={{ nebd_log_dir }}/client diff --git a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 index 5262d0af37..7cd700b2db 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath={{ client_config_path }} -#brpc server监听端口 +# brpc server listening port listen.address={{ nebd_data_dir }}/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path={{ nebd_data_dir }}/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec={{ nebd_server_heartbeat_timeout_s }} -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms={{ nebd_server_heartbeat_check_interval_ms }} # return rpc when io error diff --git a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 index ca52b19925..00c20160a0 100644 --- a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path={{ snap_client_config_path }} -# mds root 用户名 +# Mds root username mds.rootUser={{ curve_root_username }} -# mds root 密码 +# Mds root password mds.rootPassword={{ curve_root_password }} -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec={{ snap_client_method_retry_time_sec }} -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs={{ snap_client_method_retry_interval_ms }} -# 日志文件位置 +# Log file location log.dir={{ snap_log_dir }} # @@ -26,53 +26,53 @@ s3.config_path={{ snap_s3_config_path }} server.address={{ ansible_ssh_host }}:{{ snapshot_port }} server.subnet={{ snapshot_subnet }} server.port={{ snapshot_port }} -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec={{ snap_client_async_method_retry_time_sec }} -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs={{ snap_client_async_method_retry_interval_ms }} -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum={{ snap_snapshot_pool_thread_num }} -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs={{ snap_snapshot_task_manager_scan_interval_ms }} -# 转储chunk分片大小 +# Dump chunk shard size server.chunkSplitSize={{ snap_chunk_split_size }} -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs={{ snap_check_snapshot_status_interval_ms }} -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit={{ snap_max_snapshot_limit }} -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum={{ snap_snapshot_core_thread_num }} -# mds session 时间 +# Mds session time server.mdsSessionTimeUs={{ file_expired_time_us }} -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency={{ snap_read_chunk_snapshot_concurrency }} # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum={{ snap_stage1_pool_thread_num }} -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum={{ snap_stage2_pool_thread_num }} -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum={{ snap_common_pool_thread_num }} -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs={{ snap_clone_task_manager_scan_interval_ms }} -# clone chunk分片大小 +# Clone chunk shard size server.cloneChunkSplitSize={{ snap_clone_chunk_split_size }} -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir={{ snap_clone_temp_dir }} -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency={{ snap_create_clone_chunk_concurrency }} -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency={{ snap_recover_chunk_concurrency }} -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs={{ snap_clone_backend_ref_record_scan_interval_ms }} -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_interval_ms }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -81,11 +81,11 @@ server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_in {% endfor -%} etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs={{ snap_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs={{ snap_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times={{ snap_etcd_retry_times }} # wait dlock timeout etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} @@ -93,20 +93,20 @@ etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} etcd.dlock.ttlSec={{ snap_etcd_dlock_ttl_sec }} # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix={{ snap_leader_campagin_prefix }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec={{ snap_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms={{ snap_leader_election_timeout_ms }} # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port={{ snapshot_dummy_port }} diff --git a/curve-ansible/roles/generate_config/templates/tools.conf.j2 b/curve-ansible/roles/generate_config/templates/tools.conf.j2 index 6207e8a4ef..b630b3dfe3 100644 --- a/curve-ansible/roles/generate_config/templates/tools.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/tools.conf.j2 @@ -1,4 +1,4 @@ -# mds地址 +# Mds address {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -8,13 +8,13 @@ mdsAddr={{ mds_address | join(',') }} # mds dummy port mdsDummyPort={{ hostvars[groups.mds[0]].mds_dummy_port }} -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout={{ tool_rpc_timeout }} -# rpc重试次数 +# RPC retry count rpcRetryTimes={{ tool_rpc_retry_times }} # the rpc concurrency to chunkserver rpcConcurrentNum={{ tool_rpc_concurrent_num }} -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -23,7 +23,7 @@ rpcConcurrentNum={{ tool_rpc_concurrent_num }} {% endfor -%} etcdAddr={{ etcd_address | join(',') }} {% if groups.snapshotclone is defined and groups.snapshotclone[0] is defined %} -# snapshot clone server 地址 +# Snapshot clone server address {% set snap_address=[] -%} {% for host in groups.snapshotclone -%} {% set snap_ip = hostvars[host].ansible_ssh_host -%} diff --git a/curve-ansible/roles/install_package/files/disk_uuid_repair.py b/curve-ansible/roles/install_package/files/disk_uuid_repair.py index eb48728e2e..cfa5a32ac3 100644 --- a/curve-ansible/roles/install_package/files/disk_uuid_repair.py +++ b/curve-ansible/roles/install_package/files/disk_uuid_repair.py @@ -17,30 +17,34 @@ # limitations under the License. # -# 检测磁盘上disk.meta中记录的uuid与当前磁盘的实际uuid是否相符合 -# 如果不符合, 更新为当前的uuid +# Check if the uuid recorded in disk.meta on the disk matches the actual uuid of the current disk +# If not, update to the current uuid import os import hashlib import sys import subprocess + def __get_umount_disk_list(): - # 获取需要挂载的设备 + # Obtain devices that need to be mounted cmd = "lsblk -O|grep ATA|awk '{print $1}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) devlist = out_msg.splitlines() - # 查看当前设备的挂载状况 + # View the mounting status of the current device umount = [] for dev in devlist: cmd = "lsblk|grep " + dev + "|awk '{print $7}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) if len(out_msg.replace('\n', '')) == 0: umount.append(dev) return umount + def __uninit(): try: cmd = "grep curvefs /etc/fstab" @@ -49,6 +53,7 @@ def __uninit(): except subprocess.CalledProcessError: return True + def __analyse_uuid(kv): uuid = "" uuidkv = kv[0].split("=") @@ -64,25 +69,27 @@ def __analyse_uuid(kv): return "" else: uuidmd5 = uuidmd5kv[1].replace("\n", "") - # 校验 + # Verification if (hashlib.md5(uuid).hexdigest() != uuidmd5): print("uuid[%s] not match uuidmd5[%s]" % (uuid, uuidmd5)) return "" return uuid + def __get_recorduuid(disk): uuid = "" - # 将磁盘挂载到临时目录 + # Mount the disk to a temporary directory cmd = "mkdir -p /data/tmp; mount " + disk + " /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: print("Get record uuid in %s fail." % disk) return False, uuid - # 挂载成功,获取记录的uuid + # Successfully mounted, obtaining the recorded uuid try: cmd = "cat /data/tmp/disk.meta" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) kv = out_msg.splitlines() if len(kv) != 2: @@ -94,7 +101,7 @@ def __get_recorduuid(disk): except subprocess.CalledProcessError as e: print("Get file disk.meta from %s fail, reason: %s." % (disk, e)) - # 卸载磁盘 + # Unmount Disk cmd = "umount " + disk + "; rm -fr /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: @@ -103,75 +110,81 @@ def __get_recorduuid(disk): return True, uuid + def __get_actualuuid(disk): uuid = "" try: cmd = "ls -l /dev/disk/by-uuid/|grep " + disk + "|awk '{print $9}'" - uuid = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + uuid = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Get actual uuid of %s fail, reason: %s." % (disk, e)) return uuid + def __cmp_recorduuid_with_actual(umountDisk): recordList = {} actualList = {} for disk in umountDisk: - # 获取当前disk上记录的uuid - diskFullName = "/dev/" + disk - opRes, recorduuid = __get_recorduuid(diskFullName) - if opRes != True or len(recorduuid) == 0: - return False, recordList, actualList - - # 获取disk的实际uuid - actualuuid = __get_actualuuid(disk).replace("\n", "") - - # 比较记录的和实际的是否相同 - if actualuuid != recorduuid: - recordList[disk] = recorduuid - actualList[disk] = actualuuid - else: + # Obtain the uuid recorded on the current disk + diskFullName = "/dev/" + disk + opRes, recorduuid = __get_recorduuid(diskFullName) + if opRes != True or len(recorduuid) == 0: + return False, recordList, actualList + + # Obtain the actual uuid of the disk + actualuuid = __get_actualuuid(disk).replace("\n", "") + + # Compare whether the recorded and actual values are the same + if actualuuid != recorduuid: + recordList[disk] = recorduuid + actualList[disk] = actualuuid + else: return False, recordList, actualList return True, recordList, actualList + def __mount_with_atual_uuid(diskPath, record, actual): print("%s uuid change from [%s] to [%s]." % (diskPath, record, actual)) - # 从/etc/fstab中获取对应的挂载目录 + # Obtain the corresponding mount directory from/etc/fstab mntdir = "" try: cmd = "grep " + record + " /etc/fstab | awk -F \" \" '{print $2}'" - mntdir = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") + mntdir = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") except subprocess.CalledProcessError as e: print("Get mount dir for %s fail. error: %s." % (diskPath, e)) return False - # 将actual挂载到相应的目录下 + # Mount the actual to the corresponding directory cmd = "mount " + diskPath + " " + mntdir retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("mount %s to %s success." % (diskPath, mntdir)) replaceCmd = "sed -i \"s/" + record + "/" + actual + "/g\"" - # 将新的uuid写入到fstab + # Write the new uuid to fstab cmd = "cp /etc/fstab /etc/fstab.bak;" + replaceCmd + " /etc/fstab > /dev/null" retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to /etc/fstab for disk %s success." % diskPath) - # 将新的uuid写入到diskmeta + # Write the new uuid to diskmeta fileFullName = mntdir + "/disk.meta" filebakName = fileFullName + ".bak" cpcmd = "cp " + fileFullName + " " + filebakName uuidcmd = "echo uuid=" + actual + " > " + fileFullName - uuidmd5cmd = "echo uuidmd5=" + hashlib.md5(actual).hexdigest() + " >> " + fileFullName + uuidmd5cmd = "echo uuidmd5=" + \ + hashlib.md5(actual).hexdigest() + " >> " + fileFullName cmd = cpcmd + ";" + uuidcmd + ";" + uuidmd5cmd retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to %s success." % fileFullName) @@ -182,29 +195,32 @@ def __mount_with_atual_uuid(diskPath, record, actual): def __handle_inconsistent(umountDisk, record, actual): for disk in umountDisk: if disk not in record: - print("record uuid and actual uuid of %s is same, please check other reason" % disk) + print( + "record uuid and actual uuid of %s is same, please check other reason" % disk) continue - # 按照actual uuid做挂载 - res = __mount_with_atual_uuid("/dev/" + disk, record[disk], actual[disk]) + # Mount according to the actual uuid + res = __mount_with_atual_uuid( + "/dev/" + disk, record[disk], actual[disk]) if res: continue else: return False return True + if __name__ == "__main__": - # 查看未挂载成功的磁盘设备列表 + # View the list of disk devices that were not successfully mounted umountDisk = __get_umount_disk_list() if len(umountDisk) == 0: print("All disk mount success.") exit(0) - # 查看是否之前已经挂载过 + # Check if it has been previously mounted if __uninit(): print("Please init env with chunkserver_ctl.sh first.") exit(0) - # 查看当前未挂载成功的磁盘设备记录的uuid和实际uuid + # View the uuid and actual uuid of disk devices that have not been successfully mounted currently cmpRes, record, actual = __cmp_recorduuid_with_actual(umountDisk) if cmpRes == False: print("Compare record uuid with actual uuid fail.") @@ -213,13 +229,10 @@ def __handle_inconsistent(umountDisk, record, actual): print("Record uuid with actual uuid all consistent.") exit(0) - # 将不一致的磁盘按照当前的uuid重新挂载 + # Remount inconsistent disks according to the current uuid if __handle_inconsistent(umountDisk, record, actual): print("fix uuid-changed disk[%s] success." % umountDisk) exit(0) else: print("fxi uuid-changed disk[%s] fail." % umountDisk) exit(-1) - - - diff --git a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 index cba41adfcd..d44a03c682 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 @@ -6,7 +6,7 @@ dataDir={{ chunkserver_data_dir }} raftLogProtocol={{ chunkserver_raft_log_procotol }} source ./chunkserver_deploy.sh -# 使用方式 +# Usage function help() { echo "COMMANDS:" echo " start : start chunkserver" @@ -50,18 +50,18 @@ function ip_value() { }' } -# 从subnet获取ip +# Obtain IP from subnet function get_ip_from_subnet() { subnet=$1 prefix=$(ip_value $subnet) mod=`echo $subnet|awk -F/ '{print $2}'` mask=$((2**32-2**(32-$mod))) - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) ip= for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -76,7 +76,7 @@ function get_ip_from_subnet() { fi } -# 启动chunkserver +# Start chunkserver function start() { if [ $# -lt 1 ] then @@ -87,7 +87,7 @@ function start() { then confPath=$3 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "confPath $confPath not exist!" @@ -104,7 +104,7 @@ function start() { get_ip_from_subnet $external_subnet external_ip=$ip enableExternalServer=true - # external ip和internal ip一致或external ip为127.0.0.1时不启动external server + # Do not start the external server when the external IP and internal IP are consistent or when the external IP is 127.0.0.1 if [ $internal_ip = $external_ip -o $external_ip = "127.0.0.1" ] then enableExternalServer=false @@ -148,7 +148,7 @@ function start_one() { fi jemallocpath={{ jemalloc_path }} - # 检查jemalloc库文件 + # Check the Jemalloc library file if [ ! -f ${jemallocpath} ] then echo "Not found jemalloc library, Path is ${jemallocpath}" @@ -230,7 +230,7 @@ function restart() { } function wait_stop() { - # wait 3秒钟让它退出 + # Wait for 3 seconds to exit retry_times=0 while [ $retry_times -le 3 ] do @@ -244,7 +244,7 @@ function wait_stop() { break fi done - # 如果进程还在,就kill -9 + # If the process is still in progress, kill -9 ps -efl|grep -E "curve-chunkserver .*${dataDir}/chunkserver$1 "|grep -v grep > /dev/null 2>&1 if [ $? -eq 0 ] then @@ -325,12 +325,12 @@ function deploy() { } function format() { - # 格式化chunkfile pool + Format chunkfile pool curve-format $* } function recordmeta() { - # 将当前的磁盘的uuid及其md5备份到磁盘的disk.meta文件中 + # Back up the current disk's uuid and its md5 to the disk's disk.meta file meta_record; } diff --git a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 index db8566728a..7f84ccd28f 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 @@ -1,5 +1,5 @@ #!/bin/bash -#confirm提示,防止误操作 +# confirm prompt to prevent misoperation dataDir={{ chunkserver_data_dir }} function do_confirm { echo "This deployment script will format the disk and delete all the data." @@ -24,14 +24,14 @@ diskList="{{ dlist | join('\n') }}" {% endif %} function deploy_prep { -#清理/etc/fstab残留信息 +# Clean up/etc/fstab residual information grep curvefs /etc/fstab if [ $? -eq 0 ] then sed -i '/curvefs/d' /etc/fstab sed -i '/chunkserver/d' /etc/fstab fi -#将数据盘挂载的目录都卸载掉,为下一步格式化磁盘做准备 +# Uninstall all directories mounted on the data disk to prepare for the next step of formatting the disk for i in `{{ get_disk_list_cmd }}` do mntdir=`lsblk|grep $i|awk '{print $7}'` @@ -49,7 +49,7 @@ function deploy_prep { fi done } -#记录磁盘的盘符信息和磁盘的wwn信息,将信息持久化到diskinfo文件 +# Record the disk letter information and the disk's wwn information, and persist the information to the diskinfo file declare -A disk_map diskinfo=./diskinfo function record_diskinfo { @@ -69,7 +69,7 @@ function record_diskinfo { done } -#根据磁盘数量创建数据目录和日志目录,目前的数据目录格式统一是$dataDir/chunkserver+num,日志目录在$dataDir/log/chunkserver+num +# Create a data directory and log directory based on the number of disks. The current data directory format is $dataDir/chunkserver+num, and the log directory is in $dataDir/log/chunkserver+num function chunk_dir_prep { if [ -d ${dataDir} ] then @@ -90,7 +90,7 @@ function chunk_dir_prep { mkdir -p ${dataDir}/log/chunkserver$i done } -#格式化磁盘文件系统 +# Format Disk File System function disk_format { for disk in ${!disk_map[@]} do @@ -99,7 +99,7 @@ function disk_format { done } -#将创建好的数据目录按照顺序挂载到格式化好的磁盘上,并记录挂载信息到mount.info +# Mount the created data directory onto the formatted disk in order and record the mounting information to mount-info function mount_dir { while [ 1 ] do @@ -128,7 +128,7 @@ function mount_dir { lsblk > ./mount.info } -#持久化挂载信息到fstab文件,防止系统重启后丢失 +# Persist mounting information to fstab file to prevent loss after system restart function fstab_record { grep curvefs /etc/fstab if [ $? -ne 0 ] @@ -141,7 +141,7 @@ function fstab_record { fi } -#将当前的uuid持久化到磁盘上做备份,防止系统重启后uuid发生变化 +# Persist the current uuid to disk for backup to prevent changes in uuid after system restart function meta_record { grep curvefs /etc/fstab if [ $? -eq 0 ] @@ -158,7 +158,7 @@ function meta_record { fi } -#初始化chunkfile pool +# Initialize chunkfile pool function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` @@ -224,20 +224,20 @@ function deploy_all { function deploy_one { local diskname=$1 local dirname=$2 - #目录不存在 + # Directory does not exist if [ ! -d $dirname ] then echo "$dirname is not exist!" exit 1 fi - #磁盘正在挂载使用 + # Disk is being mounted for use mount | grep -w $diskname if [ $? -eq 0 ] then echo "$diskname is being used" exit 1 fi - #目录正在挂载使用 + # Directory is being mounted for use mount | grep -w $dirname if [ $? -eq 0 ] then @@ -265,7 +265,7 @@ function deploy_one { done mount $diskname $dirname lsblk > ./mount.info - #更新fstab + # Update fstab short_diskname=`echo $diskname|awk -F"/" '{print $3}'` ls -l /dev/disk/by-uuid|grep -w $short_diskname if [ $? -ne 0 ] @@ -275,12 +275,12 @@ function deploy_one { fi uuid=`ls -l /dev/disk/by-uuid/|grep -w ${short_diskname}|awk '{print $9}'` echo "UUID=$uuid $dirname ext4 rw,errors=remount-ro 0 0" >> /etc/fstab - # 将uuid及其md5写到diskmeta中 + # Write uuid and its md5 to diskmeta uuidmd5=`echo -n $uuid | md5sum | cut -d ' ' -f1` touch $dirname/disk.meta echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta - #格式化chunkfile pool + # Format chunkfile pool curve-format -allocatePercent={{ chunk_alloc_percent }} \ -filePoolDir=$dirname/chunkfilepool \ diff --git a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 index 6c0b36c932..9aadcb311f 100644 --- a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# 默认配置文件 +# Default configuration file confPath={{ etcd_config_path }} -# 日志文件目录 +# Log file directory logDir={{ etcd_log_dir }} -# 日志文件路径 +# Log file path logPath=${logDir}/etcd.log # pidfile @@ -15,9 +15,9 @@ pidFile=${HOME}/etcd.pid # daemon log daemonLog=${logDir}/daemon-etcd.log -# 启动etcd +# Start etcd function start_etcd() { - # 创建logDir + # Create logDir mkdir -p ${logDir} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -25,14 +25,14 @@ function start_etcd() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logDir} ] then echo "Write permission denied: ${logDir}" exit 1 fi - # 检查logPath是否可写或者是否能够创建 + # Check if logPath is writable or can be created touch ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -40,7 +40,7 @@ function start_etcd() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -48,28 +48,28 @@ function start_etcd() { exit fi - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查是否安装etcd + # Check if etcd is installed if [ -z `command -v etcd` ] then echo "No etcd installed" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found confFile, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -77,8 +77,8 @@ function start_etcd() { exit fi - # pidfile不存在 或 daemon进程不存在 - # 启动daemon,切换路径,并启动etcd + # The pidfile does not exist or the daemon process does not exist + # Start the daemon, switch paths, and start ETCD daemon --name etcd --core \ @@ -90,9 +90,9 @@ function start_etcd() { -- {{ install_etcd_dir }}/etcd --config-file ${confPath} } -# 停止daemon进程和etcd +# Stop the daemon process and ETCD function stop_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -112,7 +112,7 @@ function stop_etcd() { # restart function restart_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -127,7 +127,7 @@ function restart_etcd() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " etcd-daemon start -- start deamon process and watch on etcd process" @@ -139,7 +139,7 @@ function usage() { echo " etcd-daemon start -c /etcd/etcd.conf.yml -l ${HOME}/etcd.log" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -150,7 +150,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 @@ -176,11 +176,11 @@ case $1 in start_etcd ;; "stop") - # 停止daemon和etcd进程 + # Stop the daemon and etcd processes stop_etcd ;; "restart") - # 重启etcd + # Restart etcd restart_etcd ;; *) diff --git a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 index 6d69e6d47d..81f55b7ed7 100644 --- a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-mds路径 +# curve-mds path curveBin={{ curve_bin_dir }}/curve-mds -# 默认配置文件 +# Default configuration file confPath={{ mds_config_path }} -# 日志文件路径 +# Log file path logPath={{ mds_log_dir }} # mdsAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动mds +# Start mds function start_mds() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit 1 fi - # 检查curve-mds + # Check curve-mds if [ ! -f ${curveBin} ] then echo "No curve-mds installed" exit 1 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found mds.conf, Path is ${confPath}" exit 1 fi - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_mds() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_mds() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者是否能够创建 + # Check if consoleLog is writable or can be created touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_mds() { exit 1 fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_mds() { exit 1 fi - # 未指定mdsAddr, 从配置文件中解析出网段 + # No mdsAddr specified, resolving network segment from configuration file if [ -z ${mdsAddr} ] then subnet=`cat $confPath|grep global.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_mds() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_mds() { show_status } -# 停止daemon进程,且停止curve-mds +# Stop the daemon process and stop the curve-mds function stop_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_mds() { # restart function restart_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_mds() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load mds configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, mds应该没有起来 + # If there are no leader related logs in the logs after load mds configuration + # So leaderAddr is empty, and mds should not be up if [ -z ${leaderAddr} ] then echo "MDS may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current MDS is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " ./mds-daemon.sh start -- start deamon process and watch on curve-mds process" @@ -222,7 +222,7 @@ function usage() { echo " ./mds-daemon.sh start -c /etc/curve/mds.conf -l ${HOME}/ -a 127.0.0.1:6666" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 index 50bdc2a07e..d170963075 100644 --- a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 +++ b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 @@ -133,7 +133,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -174,7 +174,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -262,7 +262,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -278,7 +278,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 index 4d7edae130..169ff2b84d 100644 --- a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-snapshotcloneserver路径 +# curve-snapshotcloneserver path curveBin={{ curve_bin_dir }}/curve-snapshotcloneserver -# 默认配置文件 +# Default configuration file confPath={{ snapshot_config_path }} -# 日志文件路径 +# Log file path logPath={{ snapshot_clone_server_log_dir }} # serverAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动snapshotcloneserver +# Starting snapshotcloneserver function start_server() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查curve-snapshotcloneserver + # Check the curve-snapshotcloneserver if [ ! -f ${curveBin} ] then echo "No curve-snapshotcloneserver installed, Path is ${curveBin}" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found snapshot_clone_server.conf, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_server() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_server() { exit fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者能否创建,初始化glog之前的日志存放在这里 + # Check if the consoleLog can be written or created, and store the logs before initializing the glog here touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_server() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_server() { exit fi - # 未指定serverAddr, 从配置文件中解析出网段 + # No serverAddr specified, resolving network segment from configuration file if [ -z ${serverAddr} ] then subnet=`cat $confPath|grep server.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_server() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_server() { show_status } -# 停止daemon进程和curve-snapshotcloneserver +# Stop the daemon process and curve-snapshotcloneserver function stop_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_server() { # restart function restart_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_server() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, snapshotcloneserver应该没有起来 + # If there are no leader related logs in the logs after load configuration + # So the leaderAddr is empty, and the snapshotcloneserver should not be up if [ -z ${leaderAddr} ] then echo "SnapshotClone may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current SnapshotClone is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " snapshot-daemon start -- start deamon process and watch on curve-snapshotcloneserver process" @@ -222,7 +222,7 @@ function usage() { echo " snapshot-daemon start -c /etc/curve/snapshot_clone_server.conf -l ${HOME}/ -a 127.0.0.1:5555" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/vars/main.yml b/curve-ansible/roles/install_package/vars/main.yml index ee545c1d7b..8967883b7c 100644 --- a/curve-ansible/roles/install_package/vars/main.yml +++ b/curve-ansible/roles/install_package/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 包的名称 +# The name of the package package_name: package_version: lib_installed: false diff --git a/curve-ansible/roles/restart_service/defaults/main.yml b/curve-ansible/roles/restart_service/defaults/main.yml index 061c32a4ec..0051d42ecc 100644 --- a/curve-ansible/roles/restart_service/defaults/main.yml +++ b/curve-ansible/roles/restart_service/defaults/main.yml @@ -16,7 +16,7 @@ # check_health: False -# 启动一个chunkserver需要的最大时间 +# The maximum time required to start a chunkserver restart_chunkserver_async: 100 restart_chunkserver_check_delay: 5 restart_chunkserver_check_times: 20 diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml index d74b05abc7..6b3050bb01 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取mds的版本 + # Obtain the version of mds - name: get curve version vars: metric_port: "{{ mds_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml index 73f6bcf636..966d9b95d6 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取snapshotclone的版本 + # Obtain the version of snapshotclone - name: get snapshotclone version vars: metric_port: "{{ snapshot_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/main.yml b/curve-ansible/roles/restart_service/tasks/main.yml index befb68b5b3..a8b077a3a4 100644 --- a/curve-ansible/roles/restart_service/tasks/main.yml +++ b/curve-ansible/roles/restart_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 重启对应的服务 +# Restart the corresponding service - name: restart_service include_tasks: "include/restart_{{ service_name }}.yml" diff --git a/curve-ansible/roles/restart_service/vars/main.yml b/curve-ansible/roles/restart_service/vars/main.yml index 94f0bad0c6..44f7d6797e 100644 --- a/curve-ansible/roles/restart_service/vars/main.yml +++ b/curve-ansible/roles/restart_service/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 服务的名称 +# Name of service service_name: need_restart: true sudo: "" diff --git a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml index 82478df03e..f2a67fdba1 100644 --- a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml +++ b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml @@ -1,4 +1,4 @@ -# 服务的名称 +# Name of service service_name: leader_ip: all_ip: diff --git a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml index 25fecb2337..32602a56cd 100644 --- a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml +++ b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml @@ -27,7 +27,7 @@ poll: "{{ service_poll }}" failed_when: start_chunkserver_res.rc != 0 or "down" in start_chunkserver_res.stdout -# 打印控制台输出 +# Print Console Output - name: print console output debug: var: start_chunkserver_res.stdout_lines diff --git a/curve-ansible/roles/start_service/tasks/main.yml b/curve-ansible/roles/start_service/tasks/main.yml index 483dfd5d9a..be93405394 100644 --- a/curve-ansible/roles/start_service/tasks/main.yml +++ b/curve-ansible/roles/start_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: start_service include_tasks: "include/start_{{ service_name }}.yml" diff --git a/curve-ansible/roles/stop_service/tasks/main.yml b/curve-ansible/roles/stop_service/tasks/main.yml index 0b2bbb486e..d3b8cbd018 100644 --- a/curve-ansible/roles/stop_service/tasks/main.yml +++ b/curve-ansible/roles/stop_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: stop_service include_tasks: "include/stop_{{ service_name }}.yml" diff --git a/curve-ansible/rolling_update_curve.yml b/curve-ansible/rolling_update_curve.yml index fddd6832bf..61949f9f8f 100644 --- a/curve-ansible/rolling_update_curve.yml +++ b/curve-ansible/rolling_update_curve.yml @@ -83,7 +83,7 @@ - { role: generate_config, template_name: topo.json, conf_path: "{{ topo_file_path }}", tags: ["generate_config", "generage_topo_json"] } -# 获取leader节点和follower节点 +# Obtain the leader and follower nodes - name: set mds leader and follower list hosts: mds gather_facts: no @@ -95,7 +95,7 @@ roles: - { role: set_leader_and_follower_list, service_name: mds } -# 按顺序先升级follower节点,再升级leader节点 +# Upgrade the follower node first in order, and then upgrade the leader node - name: update follower and leader server in sequence hosts: mds_servers_followers, mds_servers_leader any_errors_fatal: true @@ -110,14 +110,14 @@ - pause: prompt: "Confirm restart mds in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启mds + # Restart mds roles: - { role: restart_service, service_name: mds, expected_curve_version: "{{ mds_package_version }}", command_need_sudo: "{{ mds_need_sudo | bool }}"} ############################## rolling update chunkserver ############################## -# 1. 更新各节点上的配置 +# 1. Update the configuration on each node - name: prepare chunkserver hosts: chunkservers any_errors_fatal: true @@ -136,8 +136,8 @@ - { role: generate_config, template_name: s3.conf, conf_path: "{{ chunkserver_s3_config_path }}", tags: ["generate_config", "generage_cs_s3_conf"] } -# 逐个重启chunkserver,每重启完一个需要等待copyset健康 -# 继续操作下一个的的时候还需要一个命令行交互确认 +# Restart the chunkservers one by one, and wait for the copyset to be healthy after each restart +# When continuing with the next operation, a command line interaction confirmation is also required - name: restart chunkserver and wait healthy hosts: chunkservers any_errors_fatal: true @@ -203,7 +203,7 @@ - pause: prompt: "Confirm restart snapshotclone in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启snapshot clone + # Restart snapshot clone roles: - { role: restart_service, service_name: snapshotclone, expected_curve_version: "{{ snapshot_package_version }}", command_need_sudo: "{{ snapshot_need_sudo | bool }}" } diff --git a/curve-ansible/server.ini b/curve-ansible/server.ini index eaca5a4515..7e06fbe105 100644 --- a/curve-ansible/server.ini +++ b/curve-ansible/server.ini @@ -14,8 +14,8 @@ localhost ansible_ssh_host=127.0.0.1 [zone1] localhost ansible_ssh_host=127.0.0.1 -# 请确保zone内机器数量一致,如果有多个zone,则在上面根据zone1格式增加zone2,zone3...即可。 -# 如果zone下面有多个机器,则换行一起列出来即可。比如: +# Please ensure that the number of machines in the zone is consistent. If there are multiple zones, add zone2, zone3... based on the zone1 format above. +# If there are multiple machines under the zone, they can be listed together in a new line. For example: # [zone1] # localhost ansible_ssh_host=127.0.0.1 # localhost2 ansible_ssh_host=127.0.0.2 @@ -32,7 +32,7 @@ mds_subnet=127.0.0.1/22 defined_healthy_status="cluster is healthy" mds_package_version="0.0.6.1+160be351" tool_package_version="0.0.6.1+160be351" -# 启动命令是否用sudo +# Whether to use sudo for startup command mds_need_sudo=True mds_config_path=/etc/curve/mds.conf mds_log_dir=/data/log/curve/mds @@ -90,7 +90,7 @@ chunkserver_subnet=127.0.0.1/22 global_enable_external_server=True chunkserver_external_subnet=127.0.0.1/22 chunkserver_s3_config_path=/etc/curve/cs_s3.conf -# chunkserver使用的client相关的配置 +# Client related configurations used by chunkserver chunkserver_client_config_path=/etc/curve/cs_client.conf client_register_to_mds=False client_chunkserver_op_max_retry=3 @@ -149,10 +149,10 @@ sudo_or_not=True ansible_become_user=curve ansible_become_flags=-iu curve update_config_with_puppet=False -# 启动服务要用到ansible的异步操作,否则ansible退出后chunkserver也会退出 -# 异步等待结果的总时间 +# Starting the service requires the asynchronous operation of ansible, otherwise the chunkserver will also exit after ansible exits +# Total time waiting for results asynchronously service_async=5 -# 异步查询结果的间隔 +# Interval between asynchronous query results service_poll=1 install_with_deb=False restart_directly=False diff --git a/curvefs/conf/curvebs_client.conf b/curvefs/conf/curvebs_client.conf index e0eb4d70f2..23fc37b087 100644 --- a/curvefs/conf/curvebs_client.conf +++ b/curvefs/conf/curvebs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,123 +36,123 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck=true # diff --git a/curvefs/monitor/grafana-report.py b/curvefs/monitor/grafana-report.py index 016473a509..16f8ce65cd 100644 --- a/curvefs/monitor/grafana-report.py +++ b/curvefs/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curvefs/monitor/grafana/report/report.tex' -imagedir= '/etc/curvefs/monitor/grafana/report/images/' -pdfpath= '/etc/curvefs/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curvefs/monitor/grafana/report/report.tex' +imagedir = '/etc/curvefs/monitor/grafana/report/images/' +pdfpath = '/etc/curvefs/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view the dashboard in Grafana (if displayed incorrectly, please check the attached PDF).

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Sending dashboard daily report email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple recipients - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The filename here can be anything, whatever name is written will be displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add the body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/curvefs/monitor/grafana/provisioning/dashboards/mds.json b/curvefs/monitor/grafana/provisioning/dashboards/mds.json index 09de6b31f7..a90a8f13c0 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/mds.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/mds.json @@ -290,7 +290,7 @@ { "columns": [], "datasource": null, - "description": "mds的配置", + "description": "Configuration of MDS", "fieldConfig": { "defaults": { "custom": { @@ -336,7 +336,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -352,7 +352,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -368,7 +368,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", diff --git a/curvefs/src/mds/metaserverclient/metaserver_client.cpp b/curvefs/src/mds/metaserverclient/metaserver_client.cpp index 739704f62a..f9b1278562 100644 --- a/curvefs/src/mds/metaserverclient/metaserver_client.cpp +++ b/curvefs/src/mds/metaserverclient/metaserver_client.cpp @@ -21,6 +21,7 @@ */ #include "curvefs/src/mds/metaserverclient/metaserver_client.h" + #include #include @@ -28,30 +29,30 @@ namespace curvefs { namespace mds { -using curvefs::metaserver::Time; -using curvefs::metaserver::CreateRootInodeRequest; -using curvefs::metaserver::CreateRootInodeResponse; -using curvefs::metaserver::CreateManageInodeRequest; -using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::mds::topology::BuildPeerIdWithAddr; +using curvefs::mds::topology::SplitPeerId; using curvefs::metaserver::CreateDentryRequest; using curvefs::metaserver::CreateDentryResponse; +using curvefs::metaserver::CreateManageInodeRequest; +using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::metaserver::CreateRootInodeRequest; +using curvefs::metaserver::CreateRootInodeResponse; using curvefs::metaserver::DeleteDentryRequest; using curvefs::metaserver::DeleteDentryResponse; using curvefs::metaserver::DeleteInodeRequest; using curvefs::metaserver::DeleteInodeResponse; +using curvefs::metaserver::Dentry; using curvefs::metaserver::MetaServerService_Stub; using curvefs::metaserver::MetaStatusCode; -using curvefs::metaserver::Dentry; +using curvefs::metaserver::Time; using curvefs::metaserver::copyset::COPYSET_OP_STATUS; using curvefs::metaserver::copyset::CopysetService_Stub; -using curvefs::mds::topology::SplitPeerId; -using curvefs::mds::topology::BuildPeerIdWithAddr; template FSStatusCode MetaserverClient::SendRpc2MetaServer( - Request *request, Response *response, const LeaderCtx &ctx, - void (T::*func)(google::protobuf::RpcController *, const Request *, - Response *, google::protobuf::Closure *)) { + Request* request, Response* response, const LeaderCtx& ctx, + void (T::*func)(google::protobuf::RpcController*, const Request*, Response*, + google::protobuf::Closure*)) { bool refreshLeader = true; uint32_t maxRetry = options_.rpcRetryTimes; @@ -110,14 +111,14 @@ FSStatusCode MetaserverClient::SendRpc2MetaServer( } } -FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, - std::string *leader) { +FSStatusCode MetaserverClient::GetLeader(const LeaderCtx& ctx, + std::string* leader) { GetLeaderRequest2 request; GetLeaderResponse2 response; request.set_poolid(ctx.poolId); request.set_copysetid(ctx.copysetId); - for (const std::string &item : ctx.addrs) { + for (const std::string& item : ctx.addrs) { LOG(INFO) << "GetLeader from " << item; if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; @@ -162,7 +163,7 @@ FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, FSStatusCode MetaserverClient::CreateRootInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, - const std::set &addrs) { + const std::set& addrs) { CreateRootInodeRequest request; CreateRootInodeResponse response; request.set_poolid(poolId); @@ -213,7 +214,7 @@ FSStatusCode MetaserverClient::CreateRootInode( FSStatusCode MetaserverClient::CreateManageInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, ManageInodeType manageType, - const std::set &addrs) { + const std::set& addrs) { CreateManageInodeRequest request; CreateManageInodeResponse response; request.set_poolid(poolId); @@ -259,14 +260,14 @@ FSStatusCode MetaserverClient::CreateManageInode( FSStatusCode MetaserverClient::CreateDentry( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t parentInodeId, const std::string &name, uint64_t inodeId, - const std::set &addrs) { + uint64_t parentInodeId, const std::string& name, uint64_t inodeId, + const std::set& addrs) { CreateDentryRequest request; CreateDentryResponse response; request.set_poolid(poolId); request.set_copysetid(copysetId); request.set_partitionid(partitionId); - Dentry *d = new Dentry; + Dentry* d = new Dentry; d->set_fsid(fsId); d->set_inodeid(inodeId); d->set_parentinodeid(parentInodeId); @@ -276,7 +277,7 @@ FSStatusCode MetaserverClient::CreateDentry( request.set_allocated_dentry(d); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); - Time *tm = new Time(); + Time* tm = new Time(); tm->set_sec(now.tv_sec); tm->set_nsec(now.tv_nsec); request.set_allocated_create(tm); @@ -309,11 +310,10 @@ FSStatusCode MetaserverClient::CreateDentry( } } -FSStatusCode -MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, - uint32_t partitionId, uint32_t fsId, - uint64_t parentInodeId, const std::string &name, - const std::set &addrs) { +FSStatusCode MetaserverClient::DeleteDentry( + uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t fsId, + uint64_t parentInodeId, const std::string& name, + const std::set& addrs) { DeleteDentryRequest request; DeleteDentryResponse response; request.set_poolid(poolId); @@ -342,13 +342,14 @@ MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, return ret; } else { switch (response.statuscode()) { - case MetaStatusCode::OK: - return FSStatusCode::OK; - default: - LOG(ERROR) << "DeleteDentry failed, request = " - << request.ShortDebugString() - << ", response statuscode = " << response.statuscode(); - return FSStatusCode::DELETE_DENTRY_FAIL; + case MetaStatusCode::OK: + return FSStatusCode::OK; + default: + LOG(ERROR) << "DeleteDentry failed, request = " + << request.ShortDebugString() + << ", response statuscode = " + << response.statuscode(); + return FSStatusCode::DELETE_DENTRY_FAIL; } } } @@ -372,7 +373,7 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { request.set_partitionid(0); request.set_fsid(fsId); request.set_inodeid(inodeId); - // TODO(@威姐): 适配新的proto + // TODO(@ Wei Jie): Adapt to the new proto request.set_copysetid(1); request.set_poolid(1); request.set_partitionid(1); @@ -398,10 +399,10 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { FSStatusCode MetaserverClient::CreatePartition( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t idStart, uint64_t idEnd, const std::set &addrs) { + uint64_t idStart, uint64_t idEnd, const std::set& addrs) { curvefs::metaserver::CreatePartitionRequest request; curvefs::metaserver::CreatePartitionResponse response; - PartitionInfo *partition = request.mutable_partition(); + PartitionInfo* partition = request.mutable_partition(); partition->set_fsid(fsId); partition->set_poolid(poolId); partition->set_copysetid(copysetId); @@ -448,7 +449,7 @@ FSStatusCode MetaserverClient::CreatePartition( FSStatusCode MetaserverClient::DeletePartition( uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - const std::set &addrs) { + const std::set& addrs) { curvefs::metaserver::DeletePartitionRequest request; curvefs::metaserver::DeletePartitionResponse response; request.set_poolid(poolId); @@ -489,8 +490,8 @@ FSStatusCode MetaserverClient::DeletePartition( } } -FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, - uint32_t copysetId, const std::set &addrs) { +FSStatusCode MetaserverClient::CreateCopySet( + uint32_t poolId, uint32_t copysetId, const std::set& addrs) { CreateCopysetRequest request; CreateCopysetResponse response; auto copyset = request.add_copysets(); @@ -500,7 +501,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, copyset->add_peers()->set_address(BuildPeerIdWithAddr(item)); } - for (const std::string &item : addrs) { + for (const std::string& item : addrs) { if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; return FSStatusCode::RPC_ERROR; @@ -544,7 +545,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, } FSStatusCode MetaserverClient::CreateCopySetOnOneMetaserver( - uint32_t poolId, uint32_t copysetId, const std::string &addr) { + uint32_t poolId, uint32_t copysetId, const std::string& addr) { CreateCopysetRequest request; CreateCopysetResponse response; diff --git a/curvefs/src/metaserver/copyset/conf_epoch_file.h b/curvefs/src/metaserver/copyset/conf_epoch_file.h index abe14f2f8b..ff3953b080 100644 --- a/curvefs/src/metaserver/copyset/conf_epoch_file.h +++ b/curvefs/src/metaserver/copyset/conf_epoch_file.h @@ -41,28 +41,30 @@ class ConfEpochFile { explicit ConfEpochFile(curve::fs::LocalFileSystem* fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful; - 1 failed */ int Load(const std::string& path, PoolId* poolId, CopysetId* copysetId, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + *file. The format is as follows: The 'head' indicates the length and is in + *binary format. The rest is in text format for easy viewing when necessary. + *'sync' ensures data persistence. | head |Configuration version + *information| | 8 bytes size_t | uint32_t | Variable length text | + *| length | crc32 | logic pool id | copyset id | epoch| + * The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded; - 1 failed */ int Save(const std::string& path, const PoolId poolId, const CopysetId copysetId, const uint64_t epoch); diff --git a/curvefs/src/metaserver/inflight_throttle.h b/curvefs/src/metaserver/inflight_throttle.h index fb670b6161..dfbe50bebf 100644 --- a/curvefs/src/metaserver/inflight_throttle.h +++ b/curvefs/src/metaserver/inflight_throttle.h @@ -30,7 +30,7 @@ namespace curvefs { namespace metaserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: @@ -40,8 +40,8 @@ class InflightThrottle { ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ bool IsOverLoad() { if (maxInflightRequest_ >= @@ -53,23 +53,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight request std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight request const uint64_t maxInflightRequest_; }; diff --git a/curvefs/test/mds/schedule/coordinator_test.cpp b/curvefs/test/mds/schedule/coordinator_test.cpp index e759da89ed..a5dd3736de 100644 --- a/curvefs/test/mds/schedule/coordinator_test.cpp +++ b/curvefs/test/mds/schedule/coordinator_test.cpp @@ -21,22 +21,24 @@ */ #include "curvefs/src/mds/schedule/coordinator.h" + #include + #include "curvefs/src/mds/common/mds_define.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::schedule::ScheduleOption; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::std::chrono::steady_clock; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curvefs::mds::topology::UNINITIALIZE_ID; @@ -51,7 +53,7 @@ class CoordinatorTest : public ::testing::Test { void SetUp() override { topo_ = std::make_shared(idGenerator_, tokenGenerator_, - storage_); + storage_); metric_ = std::make_shared(topo_); topoAdapter_ = std::make_shared(); coordinator_ = std::make_shared(topoAdapter_); @@ -132,7 +134,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -149,20 +151,20 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -174,19 +176,19 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -204,7 +206,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -217,7 +219,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -228,7 +230,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -237,7 +239,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -270,7 +272,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -289,21 +291,21 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -315,19 +317,19 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -345,7 +347,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -358,7 +360,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -369,7 +371,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -378,7 +380,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -386,15 +388,16 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; coordinator_->InitScheduler(scheduleOption, - std::make_shared(topo_)); + std::make_shared(topo_)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator_->MetaserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为metaserver-1 + // 2. There is a leader change on the copyset and the target leader is + // metaserver-1 Operator testOperator( 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2, 1)); @@ -403,7 +406,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 3. copyset上有remove peer操作 + // 3. There is a remove peer operation on the copyset Operator testOperator( 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -412,7 +415,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 4. copyset上有add peer操作, target不是1 + // 4. There is an add peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2)); @@ -421,7 +425,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 5. copyset上有add peer操作, target是1 + // 5. There is an add peer operation on the copyset, with a target of 1 Operator testOperator( 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -430,7 +434,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 6. copyset上有change peer操作,target不是1 + // 6. There is a change peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 2)); @@ -439,7 +444,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 7. copyset上有change peer操作,target是1 + // 7. There is a change peer operation on the copyset, with a target of + // 1 Operator testOperator( 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 1)); @@ -449,7 +455,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } TEST_F(CoordinatorTest, test_SchedulerSwitch) { - ScheduleOption scheduleOption = GetTrueScheduleOption(); + ScheduleOption scheduleOption = GetTrueScheduleOption(); scheduleOption.copysetSchedulerIntervalSec = 0; scheduleOption.leaderSchedulerIntervalSec = 0; scheduleOption.recoverSchedulerIntervalSec = 0; @@ -459,7 +465,7 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { EXPECT_CALL(*topoAdapter_, Getpools()).Times(0); EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()).Times(0); - // 设置flag都为false + // Set flags to false gflags::SetCommandLineOption("enableCopySetScheduler", "false"); gflags::SetCommandLineOption("enableRecoverScheduler", "false"); gflags::SetCommandLineOption("enableLeaderScheduler", "false"); @@ -471,18 +477,18 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { /* - 场景: - metaserver1: offline 有恢复op - metaserver2: offline 没有恢复op,没有candidate,有其他op - metaserver3: offline 有candidate + Scenario: + metaserver1: offline has recovery op + metaserver2: offline has no recovery op, no candidate, and other op + metaserver3: offline has a candidate metaserver4: online metaserver4: online */ - // 获取option + // Get option ScheduleOption scheduleOption = GetFalseScheduleOption(); coordinator_->InitScheduler(scheduleOption, metric_); - // 构造metaserver + // Construct metaserver std::vector metaserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { @@ -497,7 +503,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op + // Construct op Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, steady_clock::now(), std::make_shared(1, 4)); @@ -508,7 +514,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}); @@ -523,7 +529,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, configChangeInfoForCS3); - // 1. 查询所有metaserver + // 1. Query all metaservers { EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()) .WillOnce(Return(metaserverInfos)); @@ -545,7 +551,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定metaserver, 但metaserver不存在 + // 2. Query specified metaserver, but metaserver does not exist { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(7, _)) .WillOnce(Return(false)); @@ -556,7 +562,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { std::vector{7}, &statusMap)); } - // 3. 查询指定metaserver, 不在恢复中 + // 3. Query specified metaserver, not in recovery { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(6, _)) .WillOnce( diff --git a/curvefs/test/mds/schedule/operatorStep_test.cpp b/curvefs/test/mds/schedule/operatorStep_test.cpp index d6378bb927..821d97fac7 100644 --- a/curvefs/test/mds/schedule/operatorStep_test.cpp +++ b/curvefs/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "curvefs/test/mds/schedule/common.h" namespace curvefs { @@ -29,8 +30,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -48,21 +49,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -75,7 +76,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -89,14 +90,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -109,7 +110,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -126,8 +127,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -140,8 +140,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -157,7 +157,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -173,8 +173,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -198,13 +197,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -217,7 +215,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -233,31 +231,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. The change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -268,24 +266,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the oprator in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -295,7 +293,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } } // namespace schedule diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index d48c6a9ee1..32c6e88d18 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -21,26 +21,27 @@ */ #include + #include "curvefs/src/mds/common/mds_define.h" #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/src/mds/schedule/scheduleMetrics.h" #include "curvefs/src/mds/schedule/scheduler.h" #include "curvefs/src/mds/topology/topology_id_generator.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::MockIdGenerator; -using ::curvefs::mds::topology::MockTokenGenerator; using ::curvefs::mds::topology::MockStorage; +using ::curvefs::mds::topology::MockTokenGenerator; +using ::curvefs::mds::topology::MockTopology; +using ::curvefs::mds::topology::TopologyIdGenerator; using ::std::chrono::steady_clock; namespace curvefs { namespace mds { @@ -172,7 +173,7 @@ TEST_F(TestRecoverSheduler, recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -196,7 +197,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); { - // 1. 所有metaserveronline + // 1. All metaserveronline EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id2, _)) @@ -208,7 +209,8 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); @@ -217,12 +219,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; @@ -232,12 +235,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); @@ -254,12 +258,12 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换metaserver + // 5. Unable to select a replacement metaserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetMetaServersInPool(_)) .WillOnce(Return(std::vector{})); @@ -268,7 +272,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 6. 在metaserver上创建copyset失败 + // 6. Failed to create copyset on metaserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); std::vector metaserverList( diff --git a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp index 1041519eb6..0a7036ce15 100644 --- a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp +++ b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp @@ -21,23 +21,25 @@ */ #include "curvefs/src/mds/schedule/scheduleMetrics.h" + #include #include #include + #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/test/mds/mock/mock_topology.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::CopySetKey; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curvefs { namespace mds { @@ -82,7 +84,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ::curvefs::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operator of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -150,7 +152,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -167,11 +169,10 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(_)) - .WillOnce(Return("haha")); + EXPECT_CALL(*topo, GetHostNameAndPortById(_)).WillOnce(Return("haha")); EXPECT_CALL(*topo, GetMetaServer(1, _)) .WillOnce(DoAll(SetArgPointee<1>(GetMetaServer(1)), Return(true))); @@ -245,7 +246,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -263,7 +264,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -329,7 +330,6 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("Normal\",\"opType\":\"TransferLeader\",\"poolId") + std::string("\":\"1\",\"startEpoch\":\"1\"}"); - ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) @@ -338,14 +338,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } @@ -358,7 +359,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -426,14 +427,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } @@ -446,7 +448,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -459,7 +461,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - // 获取metaserver 或者 server失败 + // Failed to obtain metaserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)).WillOnce(Return(false)); diff --git a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp index 04241d0209..a8c91d7617 100644 --- a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,6 +20,8 @@ * @Author: chenwei */ +#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" + #include #include #include @@ -27,17 +29,16 @@ #include #include "curvefs/proto/schedule.pb.h" -#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" #include "curvefs/test/mds/mock/mock_coordinator.h" namespace curvefs { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,7 +46,7 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); ASSERT_EQ( 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { request.add_metaserverid(1); QueryMetaServerRecoverStatusResponse response; - // 1. 查询metaserver恢复状态返回成功 + // 1. Querying metaserver recovery status returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( @@ -91,7 +92,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的metaserverid不合法 + // 2. The metaserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..88c324e9e4 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -18,9 +18,8 @@ #include -#include "curvefs/test/volume/common.h" - #include "absl/memory/memory.h" +#include "curvefs/test/volume/common.h" namespace curvefs { namespace volume { @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) { Extents expected = { Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion, - allocSize)}; + allocSize)}; ASSERT_EQ(expected, exts); @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..a5415b26e3 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -56,15 +56,17 @@ class CBDClient { int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); int Extend(const char* filename, UserInfo_t* info, uint64_t size); - // 同步读写 - int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT + // Synchronous read and write + int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT - // 异步读写 + // Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); - // 获取文件的基本信息 + // Obtain basic information about the file int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/curvefs_python/curve_type.h b/curvefs_python/curve_type.h index d6603e238d..5382401d72 100644 --- a/curvefs_python/curve_type.h +++ b/curvefs_python/curve_type.h @@ -34,65 +34,65 @@ #define CURVEINODE_APPENDFILE 2 #define CURVE_INODE_APPENDECFILE 3 -#define CURVE_ERROR_OK 0 -// 文件或者目录已存在 +#define CURVE_ERROR_OK 0 +// The file or directory already exists #define CURVE_ERROR_EXISTS 1 -// 操作失败 +// Operation failed #define CURVE_ERROR_FAILED 2 -// 禁止IO +// Prohibit IO #define CURVE_ERROR_DISABLEIO 3 -// 认证失败 +// Authentication failed #define CURVE_ERROR_AUTHFAIL 4 -// 正在删除 +// Removing #define CURVE_ERROR_DELETING 5 -// 文件不存在 +// File does not exist #define CURVE_ERROR_NOTEXIST 6 -// 快照中 +// In the snapshot #define CURVE_ERROR_UNDER_SNAPSHOT 7 -// 非快照期间 +// During non snapshot periods #define CURVE_ERROR_NOT_UNDERSNAPSHOT 8 -// 删除错误 +// Delete Error #define CURVE_ERROR_DELETE_ERROR 9 -// segment未分配 +// Segment not allocated #define CURVE_ERROR_NOT_ALLOCATE 10 -// 操作不支持 +// Operation not supported #define CURVE_ERROR_NOT_SUPPORT 11 -// 目录非空 +// Directory is not empty #define CURVE_ERROR_NOT_EMPTY 12 -// 禁止缩容 +// Prohibit shrinkage #define CURVE_ERROR_NO_SHRINK_BIGGER_FILE 13 -// session不存在 +// Session does not exist #define CURVE_ERROR_SESSION_NOTEXISTS 14 -// 文件被占用 +// File occupied #define CURVE_ERROR_FILE_OCCUPIED 15 -// 参数错误 +// Parameter error #define CURVE_ERROR_PARAM_ERROR 16 -// MDS一侧存储错误 +// MDS side storage error #define CURVE_ERROR_INTERNAL_ERROR 17 -// crc检查错误 +// CRC check error #define CURVE_ERROR_CRC_ERROR 18 -// request参数存在问题 +// There is an issue with the request parameter #define CURVE_ERROR_INVALID_REQUEST 19 -// 磁盘存在问题 +// There is a problem with the disk #define CURVE_ERROR_DISK_FAIL 20 -// 空间不足 +// Insufficient space #define CURVE_ERROR_NO_SPACE 21 -// IO未对齐 +// IO misalignment #define CURVE_ERROR_NOT_ALIGNED 22 -// 文件被关闭,fd不可用 +// File closed, fd not available #define CURVE_ERROR_BAD_FD 23 -// 文件长度不支持 +// File length not supported #define CURVE_ERROR_LENGTH_NOT_SUPPORT 24 -// 文件状态 -#define CURVE_FILE_CREATED 0 -#define CURVE_FILE_DELETING 1 -#define CURVE_FILE_CLONING 2 +// File Status +#define CURVE_FILE_CREATED 0 +#define CURVE_FILE_DELETING 1 +#define CURVE_FILE_CLONING 2 #define CURVE_FILE_CLONEMETAINSTALLED 3 -#define CURVE_FILE_CLONED 4 -#define CURVE_FILE_BEINGCLONED 5 +#define CURVE_FILE_CLONED 4 +#define CURVE_FILE_BEINGCLONED 5 -// 未知错误 +// Unknown error #define CURVE_ERROR_UNKNOWN 100 #define CURVE_OP_READ 0 @@ -100,11 +100,10 @@ #define CLUSTERIDMAX 256 - typedef void (*AioCallBack)(struct AioContext* context); typedef struct AioContext { - unsigned long offset; //NOLINT - unsigned long length; //NOLINT + unsigned long offset; // NOLINT + unsigned long length; // NOLINT int ret; int op; AioCallBack cb; @@ -117,32 +116,32 @@ typedef struct UserInfo { } UserInfo_t; typedef struct FileInfo { - uint64_t id; - uint64_t parentid; - int filetype; - uint64_t length; - uint64_t ctime; - char filename[256]; - char owner[256]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; + uint64_t id; + uint64_t parentid; + int filetype; + uint64_t length; + uint64_t ctime; + char filename[256]; + char owner[256]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; } FileInfo_t; typedef struct DirInfos { - char* dirpath; - UserInfo_t* userinfo; - uint64_t dirsize; - FileInfo_t* fileinfo; + char* dirpath; + UserInfo_t* userinfo; + uint64_t dirsize; + FileInfo_t* fileinfo; } DirInfos_t; struct CreateContext { - std::string name; - size_t length; - UserInfo user; - std::string poolset; - uint64_t stripeUnit; - uint64_t stripeCount; + std::string name; + size_t length; + UserInfo user; + std::string poolset; + uint64_t stripeUnit; + uint64_t stripeCount; }; #endif // CURVEFS_PYTHON_CURVE_TYPE_H_ diff --git a/curvefs_python/curvefs_tool.py b/curvefs_python/curvefs_tool.py index f2fb582214..7a0cf73e92 100644 --- a/curvefs_python/curvefs_tool.py +++ b/curvefs_python/curvefs_tool.py @@ -21,61 +21,65 @@ import parser import time -fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] -fileStatus = ["Created", "Deleting", "Cloning", "CloneMetaInstalled", "Cloned", "BeingCloned"] +fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", + "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] +fileStatus = ["Created", "Deleting", "Cloning", + "CloneMetaInstalled", "Cloned", "BeingCloned"] kGB = 1024 * 1024 * 1024 kUnitializedFileID = 0 -# 参照curve/include/client/libcurve.h -retCode = { 0 : "OK", - 1 : "EXISTS", - 2 : "FAILED", - 3 : "DISABLEIO", - 4 : "AUTHFAIL", - 5 : "DELETING", - 6 : "NOTEXIST", - 7 : "UNDER_SNAPSHOT", - 8 : "NOT_UNDERSNAPSHOT", - 9 : "DELETE_ERROR", - 10 : "NOT_ALLOCATE", - 11 : "NOT_SUPPORT", - 12 : "NOT_EMPTY", - 13 : "NO_SHRINK_BIGGER_FILE", - 14 : "SESSION_NOTEXISTS", - 15 : "FILE_OCCUPIED", - 16 : "PARAM_ERROR", - 17 : "INTERNAL_ERROR", - 18 : "CRC_ERROR", - 19 : "INVALID_REQUEST", - 20 : "DISK_FAIL", - 21 : "NO_SPACE", - 22 : "NOT_ALIGNED", - 23 : "BAD_FD", - 24 : "LENGTH_NOT_SUPPORT", - 25 : "SESSION_NOT_EXIST", - 26 : "STATUS_NOT_MATCH", - 27 : "DELETE_BEING_CLONED", - 28 : "CLIENT_NOT_SUPPORT_SNAPSHOT", - 29 : "SNAPSTHO_FROZEN", - 100 : "UNKNOWN"} +# Refer to curve/include/client/libcurve.h +retCode = {0: "OK", + 1: "EXISTS", + 2: "FAILED", + 3: "DISABLEIO", + 4: "AUTHFAIL", + 5: "DELETING", + 6: "NOTEXIST", + 7: "UNDER_SNAPSHOT", + 8: "NOT_UNDERSNAPSHOT", + 9: "DELETE_ERROR", + 10: "NOT_ALLOCATE", + 11: "NOT_SUPPORT", + 12: "NOT_EMPTY", + 13: "NO_SHRINK_BIGGER_FILE", + 14: "SESSION_NOTEXISTS", + 15: "FILE_OCCUPIED", + 16: "PARAM_ERROR", + 17: "INTERNAL_ERROR", + 18: "CRC_ERROR", + 19: "INVALID_REQUEST", + 20: "DISK_FAIL", + 21: "NO_SPACE", + 22: "NOT_ALIGNED", + 23: "BAD_FD", + 24: "LENGTH_NOT_SUPPORT", + 25: "SESSION_NOT_EXIST", + 26: "STATUS_NOT_MATCH", + 27: "DELETE_BEING_CLONED", + 28: "CLIENT_NOT_SUPPORT_SNAPSHOT", + 29: "SNAPSTHO_FROZEN", + 100: "UNKNOWN"} + def getRetCodeMsg(ret): - if retCode.has_key(-ret) : + if retCode.has_key(-ret): return retCode[-ret] return "Unknown Error Code" + if __name__ == '__main__': - # 参数解析 + # Parameter parsing args = parser.get_parser().parse_args() - # 初始化client + # Initialize client cbd = curvefs.CBDClient() ret = cbd.Init(args.confpath) if ret != 0: print "init fail" exit(1) - # 获取文件user信息 + # Obtain file user information user = curvefs.UserInfo_t() user.owner = args.user if args.password: @@ -85,7 +89,8 @@ def getRetCodeMsg(ret): if args.optype == "create": if args.stripeUnit or args.stripeCount: - ret = cbd.Create2(args.filename, user, args.length * kGB, args.stripeUnit, args.stripeCount) + ret = cbd.Create2(args.filename, user, args.length * + kGB, args.stripeUnit, args.stripeCount) else: ret = cbd.Create(args.filename, user, args.length * kGB) elif args.optype == "delete": @@ -116,7 +121,7 @@ def getRetCodeMsg(ret): ret = cbd.Mkdir(args.dirname, user) elif args.optype == "rmdir": ret = cbd.Rmdir(args.dirname, user) - elif args.optype == "list" : + elif args.optype == "list": dir = cbd.Listdir(args.dirname, user) for i in dir: print i diff --git a/curvefs_python/libcurvefs.h b/curvefs_python/libcurvefs.h index 55c6bf55fe..069c4542f4 100644 --- a/curvefs_python/libcurvefs.h +++ b/curvefs_python/libcurvefs.h @@ -19,13 +19,14 @@ * File Created: Tuesday, 25th September 2018 2:07:05 pm * Author: */ -#ifndef CURVE_LIBCURVE_INTERFACE_H //NOLINT +#ifndef CURVE_LIBCURVE_INTERFACE_H // NOLINT #define CURVE_LIBCURVE_INTERFACE_H -#include #include -#include +#include + #include +#include #include "curvefs_python/curve_type.h" @@ -38,15 +39,17 @@ int Open4Qemu(const char* filename); int Open(const char* filename, UserInfo_t* info); int Create(const char* filename, UserInfo_t* info, size_t size); -// 同步读写 -int Read(int fd, char* buf, unsigned long offset, unsigned long length); //NOLINT -int Write(int fd, const char* buf, unsigned long offset, unsigned long length); //NOLINT +// Synchronous read and write +int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT +int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT -// 异步读写 +// Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); -// 获取文件的基本信息 +// Obtain basic information about the file int StatFile4Qemu(const char* filename, FileInfo_t* finfo); int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); @@ -59,7 +62,7 @@ int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); int DeleteForce(const char* filename, UserInfo_t* info); DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); void CloseDir(DirInfos_t* dirinfo); -int Listdir(DirInfos_t *dirinfo); +int Listdir(DirInfos_t* dirinfo); int Mkdir(const char* dirpath, UserInfo_t* info); int Rmdir(const char* dirpath, UserInfo_t* info); diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..eb77fd7f9e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -19,11 +19,12 @@ import os + def exec_cmd(cmd): ret = os.system(cmd) if ret == 0: print cmd + " exec success" - else : + else: print cmd + " exec fail, ret = " + str(ret) @@ -37,10 +38,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/curvesnapshot_python/libcurveSnapshot.cpp b/curvesnapshot_python/libcurveSnapshot.cpp index 5cdce45219..97588ba58c 100644 --- a/curvesnapshot_python/libcurveSnapshot.cpp +++ b/curvesnapshot_python/libcurveSnapshot.cpp @@ -20,60 +20,57 @@ * Author: tongguangxun */ +#include "curvesnapshot_python/libcurveSnapshot.h" + #include -#include #include +#include -#include "curvesnapshot_python/libcurveSnapshot.h" -#include "src/client/libcurve_snapshot.h" -#include "src/client/client_config.h" #include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/concurrent/concurrent.h" -using curve::client::UserInfo; using curve::client::ClientConfig; -using curve::client::SnapshotClient; -using curve::client::SnapCloneClosure; -using curve::client::FileServiceOption; using curve::client::ClientConfigOption; -using curve::common::Mutex; +using curve::client::FileServiceOption; +using curve::client::SnapCloneClosure; +using curve::client::SnapshotClient; +using curve::client::UserInfo; using curve::common::ConditionVariable; +using curve::common::Mutex; class TaskTracker { public: - TaskTracker() - : concurrent_(0), - lastErr_(0) {} + TaskTracker() : concurrent_(0), lastErr_(0) {} /** - * @brief 增加一个追踪任务 + * @brief Add a tracking task */ - void AddOneTrace() { - concurrent_.fetch_add(1, std::memory_order_acq_rel); - } + void AddOneTrace() { concurrent_.fetch_add(1, std::memory_order_acq_rel); } /** - * @brief 获取任务数量 + * @brief Get the number of tasks * - * @return 任务数量 + * @return Number of tasks */ - uint32_t GetTaskNum() const { - return concurrent_; - } + uint32_t GetTaskNum() const { return concurrent_; } /** - * @brief 处理任务返回值 + * @brief processing task return value * - * @param retCode 返回值 + * @param retCode return value */ void HandleResponse(int retCode) { if (retCode < 0) { lastErr_ = retCode; } if (1 == concurrent_.fetch_sub(1, std::memory_order_acq_rel)) { - // 最后一次需拿锁再发信号,防止先发信号后等待导致死锁 + // The last time you need to take the lock and send the signal + // again, to prevent deadlock caused by waiting after sending the + // signal first std::unique_lock lk(cv_m); cv_.notify_all(); } else { @@ -82,30 +79,29 @@ class TaskTracker { } /** - * @brief 等待追踪的所有任务完成 + * @brief Waiting for all tracked tasks to be completed */ void Wait() { std::unique_lock lk(cv_m); - cv_.wait(lk, [this](){ - return concurrent_.load(std::memory_order_acquire) == 0;}); + cv_.wait(lk, [this]() { + return concurrent_.load(std::memory_order_acquire) == 0; + }); } /** - * @brief 获取最后一个错误 + * @brief Get Last Error * - * @return 错误码 + * @return error code */ - int GetResult() { - return lastErr_; - } + int GetResult() { return lastErr_; } private: - // 等待的条件变量 + // Waiting condition variable ConditionVariable cv_; Mutex cv_m; - // 并发数量 + // Concurrent quantity std::atomic concurrent_; - // 错误码 + // Error code int lastErr_; }; @@ -162,32 +158,26 @@ void LocalInfo2ChunkIDInfo(const CChunkIDInfo& localinfo, idinfo->lpid_ = localinfo.lpid_.value; } -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } int ret = globalSnapshotclient->CreateSnapShot( - filename, - UserInfo(userinfo.owner, userinfo.password), - &seq->value); - LOG(INFO) << "create snapshot ret = " << ret - << ", seq = " << seq->value; + filename, UserInfo(userinfo.owner, userinfo.password), &seq->value); + LOG(INFO) << "create snapshot ret = " << ret << ", seq = " << seq->value; return ret; } -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } - return globalSnapshotclient->DeleteSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value); + return globalSnapshotclient->DeleteSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value); } int GetSnapShot(const char* filename, const CUserInfo_t userinfo, @@ -198,10 +188,9 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, } curve::client::FInfo_t fileinfo; - int ret = globalSnapshotclient->GetSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fileinfo); + int ret = globalSnapshotclient->GetSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + &fileinfo); if (ret == LIBCURVE_ERROR::OK) { snapinfo->id.value = fileinfo.id; snapinfo->parentid.value = fileinfo.parentid; @@ -224,22 +213,18 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, return ret; } -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo) { +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetSnapshotSegmentInfo(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - offset.value, - &seg); + int ret = globalSnapshotclient->GetSnapshotSegmentInfo( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + offset.value, &seg); if (ret == LIBCURVE_ERROR::OK) { segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; @@ -259,12 +244,10 @@ int GetSnapshotSegmentInfo(const char* filename, return ret; } -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo) { +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -274,14 +257,12 @@ int GetOrAllocateSegmentInfo(const char* filename, fileinfo.segmentsize = segmentsize.value; fileinfo.chunksize = chunksize.value; fileinfo.fullPathName = std::string(filename); - fileinfo.filename = std::string(filename); + fileinfo.filename = std::string(filename); fileinfo.userinfo = UserInfo(userinfo.owner, userinfo.password); curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetOrAllocateSegmentInfo(false, - offset.value, - &fileinfo, - &seg); + int ret = globalSnapshotclient->GetOrAllocateSegmentInfo( + false, offset.value, &fileinfo, &seg); segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; segInfo->startoffset.value = seg.startoffset; @@ -300,11 +281,8 @@ int GetOrAllocateSegmentInfo(const char* filename, return ret; } -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf) { +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -313,12 +291,11 @@ int ReadChunkSnapshot(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->ReadChunkSnapshot(idinfo, seq.value, - offset.value, len.value, - buf, cb); + int ret = globalSnapshotclient->ReadChunkSnapshot( + idinfo, seq.value, offset.value, len.value, buf, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -340,13 +317,12 @@ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); - int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSeq.value); + int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn( + idinfo, correctedSeq.value); return ret; } - -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -364,31 +340,23 @@ int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { return ret; } - -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus) { +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::FileStatus fs; - int ret = globalSnapshotclient->CheckSnapShotStatus(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fs); + int ret = globalSnapshotclient->CheckSnapShotStatus( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, &fs); filestatus->value = static_cast(fs); return ret; } - -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize) { +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -397,13 +365,11 @@ int CreateCloneChunk(const char* location, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->CreateCloneChunk(location, idinfo, - sn.value, correntSn.value, - chunkSize.value, - cb); + int ret = globalSnapshotclient->CreateCloneChunk( + location, idinfo, sn.value, correntSn.value, chunkSize.value, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -412,10 +378,8 @@ int CreateCloneChunk(const char* location, } } - -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len) { +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -423,13 +387,11 @@ int RecoverChunk(const CChunkIDInfo chunkidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->RecoverChunk(idinfo, - offset.value, - len.value, - cb); + int ret = + globalSnapshotclient->RecoverChunk(idinfo, offset.value, len.value, cb); tracker->Wait(); if (ret < 0) { return ret; diff --git a/curvesnapshot_python/libcurveSnapshot.h b/curvesnapshot_python/libcurveSnapshot.h index bb45a02f57..7db41cf7c3 100644 --- a/curvesnapshot_python/libcurveSnapshot.h +++ b/curvesnapshot_python/libcurveSnapshot.h @@ -24,6 +24,7 @@ #define CURVESNAPSHOT_PYTHON_LIBCURVESNAPSHOT_H_ #include + #include #ifdef __cplusplus @@ -52,42 +53,36 @@ enum CFileType { }; typedef struct FileInfo { - type_uInt64_t id; - type_uInt64_t parentid; - int filetype; - type_uInt64_t length; - type_uInt64_t ctime; + type_uInt64_t id; + type_uInt64_t parentid; + int filetype; + type_uInt64_t length; + type_uInt64_t ctime; } FileInfo_t; -enum CFileStatus { - Created = 0, - Deleting, - Cloning, - CloneMetaInstalled, - Cloned -}; +enum CFileStatus { Created = 0, Deleting, Cloning, CloneMetaInstalled, Cloned }; typedef struct CChunkIDInfo { - type_uInt64_t cid_; - type_uInt32_t cpid_; - type_uInt32_t lpid_; + type_uInt64_t cid_; + type_uInt32_t cpid_; + type_uInt32_t lpid_; } CChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct CChunkInfoDetail { type_uInt64_t snSize; std::vector chunkSn; } CChunkInfoDetail_t; - -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct CLogicalPoolCopysetIDInfo { type_uInt32_t lpid; type_uInt32_t cpidVecSize; std::vector cpidVec; } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct CSegmentInfo { type_uInt32_t segmentsize; type_uInt32_t chunksize; @@ -98,154 +93,153 @@ typedef struct CSegmentInfo { } CSegmentInfo_t; typedef struct CFInfo { - type_uInt64_t id; - type_uInt64_t parentid; - CFileType filetype; - type_uInt32_t chunksize; - type_uInt32_t segmentsize; - type_uInt64_t length; - type_uInt64_t ctime; - type_uInt64_t seqnum; - char owner[256]; - char filename[256]; - CFileStatus filestatus; + type_uInt64_t id; + type_uInt64_t parentid; + CFileType filetype; + type_uInt32_t chunksize; + type_uInt32_t segmentsize; + type_uInt64_t length; + type_uInt64_t ctime; + type_uInt64_t seqnum; + char owner[256]; + char filename[256]; + CFileStatus filestatus; } CFInfo_t; int Init(const char* path); /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of the + * file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq); /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq); /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot of + * the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int GetSnapShot(const char* fname, const CUserInfo_t userinfo, type_uInt64_t seq, CFInfo_t* snapinfo); /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment information of + * the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo); +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo); /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf); +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected */ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, type_uInt64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output + * parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot */ -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo); +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo); /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information */ -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus); +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus); /** - * 获取快照分配信息 - * @param: filename是当前文件名 - * @param: offset是当前的文件偏移 - * @param: segmentsize为segment大小 + * Obtain snapshot allocation information + * @param: filename is the current file name + * @param: offset is the current file offset + * @param: segmentsize is the segment size * @param: chunksize - * @param: userinfo是用户信息 - * @param[out]: segInfo是出参 + * @param: userinfo is the user information + * @param[out]: segInfo is the output parameter */ -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo); +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, CSegmentInfo* segInfo); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format of 'location' is defined as A@B. + * - If the source data is on S3, the 'location' format is uri@s3, where 'uri' + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the 'location' format is + * /filename/chunkindex@cs. * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn + * @param: location The URL of the data source + * @param: chunkidinfo The target chunk + * @param: sn The sequence number of the chunk + * @param: chunkSize The size of the chunk + * @param: correntSn Used for modifying the 'correctedSn' when creating the + * clone chunk * - * @return 错误码 + * @return error code */ -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize); +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length * - * @return 错误码 + * @return error code */ -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len); +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInit(); diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..b6b0010c83 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/include/chunkserver/chunkserver_common.h b/include/chunkserver/chunkserver_common.h index c483dbea82..62aaf9fce7 100644 --- a/include/chunkserver/chunkserver_common.h +++ b/include/chunkserver/chunkserver_common.h @@ -24,9 +24,9 @@ #define INCLUDE_CHUNKSERVER_CHUNKSERVER_COMMON_H_ #include +#include #include #include -#include #include #include @@ -35,16 +35,16 @@ namespace curve { namespace chunkserver { /* for IDs */ -using LogicPoolID = uint32_t; -using CopysetID = uint32_t; -using ChunkID = uint64_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +using LogicPoolID = uint32_t; +using CopysetID = uint32_t; +using ChunkID = uint64_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; using ChunkSizeType = uint32_t; -using PageSizeType = uint32_t; +using PageSizeType = uint32_t; -using GroupNid = uint64_t; +using GroupNid = uint64_t; using ChunkServerID = uint32_t; // braft @@ -60,57 +60,55 @@ using PosixFileSystemAdaptor = braft::PosixFileSystemAdaptor; using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; - -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or +// validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { - uint64_t readCount; - uint64_t writeCount; - uint64_t readBytes; - uint64_t writeBytes; - uint64_t readIops; - uint64_t writeIops; - uint64_t readBps; - uint64_t writeBps; + uint64_t readCount; + uint64_t writeCount; + uint64_t readBytes; + uint64_t writeBytes; + uint64_t readIops; + uint64_t writeIops; + uint64_t readBps; + uint64_t writeBps; }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: - * | group id | - * | 32 | 32 | + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical + * format, as follows: | group id | | 32 | 32 | * | logic pool id | copyset id | */ -inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupNid ToGroupNid(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + *Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string + *format */ -inline GroupId ToGroupId(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupId ToGroupId(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return std::to_string(ToGroupNid(logicPoolId, copysetId)); } -#define ToBraftGroupId ToGroupId +#define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + *Parsing LogicPoolID from Copy Group ID in Numeric Format */ -inline LogicPoolID GetPoolID(const GroupNid &groupId) { - return groupId >> 32; -} +inline LogicPoolID GetPoolID(const GroupNid& groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + *Parsing CopysetID from Copy Group ID in Numeric Format */ -inline CopysetID GetCopysetID(const GroupNid &groupId) { +inline CopysetID GetCopysetID(const GroupNid& groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ -inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +/*Format output string for group ID (logicPoolId, copysetId)*/ +inline std::string ToGroupIdString(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { std::string groupIdString; groupIdString.append("("); groupIdString.append(std::to_string(logicPoolId)); @@ -121,7 +119,7 @@ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, groupIdString.append(")"); return groupIdString; } -#define ToGroupIdStr ToGroupIdString +#define ToGroupIdStr ToGroupIdString // Meta page is header of chunkfile, and is used to store meta data of // chunkfile. diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 58459c8bb2..92fa097295 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -23,17 +23,18 @@ #ifndef INCLUDE_CLIENT_LIBCURVE_H_ #define INCLUDE_CLIENT_LIBCURVE_H_ -#include #include -#include +#include + #include #include +#include #include "libcurve_define.h" // NOLINT #define IO_ALIGNED_BLOCK_SIZE 4096 -#define PATH_MAX_SIZE 4096 -#define NAME_MAX_SIZE 256 +#define PATH_MAX_SIZE 4096 +#define NAME_MAX_SIZE 256 enum FileType { INODE_DIRECTORY = 0, @@ -44,38 +45,38 @@ enum FileType { }; typedef struct FileStatInfo { - uint64_t id; - uint64_t parentid; - FileType filetype; - uint64_t length; - uint64_t ctime; - char filename[NAME_MAX_SIZE]; - char owner[NAME_MAX_SIZE]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; - uint32_t blocksize; + uint64_t id; + uint64_t parentid; + FileType filetype; + uint64_t length; + uint64_t ctime; + char filename[NAME_MAX_SIZE]; + char owner[NAME_MAX_SIZE]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; + uint32_t blocksize; } FileStatInfo_t; -// 存储用户信息 +// Storing User Information typedef struct C_UserInfo { - // 当前执行的owner信息, owner信息需要以'\0'结尾 + // The current owner information needs to end with'\0' char owner[NAME_MAX_SIZE]; - // 当owner="root"的时候,需要提供password作为计算signature的key - // password信息需要以'\0'结尾 + // When owner="root", password needs to be provided as the key for + // calculating the signature password information needs to end with '\0' char password[NAME_MAX_SIZE]; } C_UserInfo_t; typedef struct DirInfo { - // 当前listdir的目录路径 - char* dirpath; - // 当前listdir操作的用户信息 - C_UserInfo_t* userinfo; - // 当前dir大小,也就是文件数量 - uint64_t dirSize; - // 当前dir的内的文件信息内容,是一个数组 - // fileStat是这个数组的头,数组大小为dirSize - FileStatInfo_t* fileStat; + // The directory path of the current listdir + char* dirpath; + // User information for the current listdir operation + C_UserInfo_t* userinfo; + // The current dir size, which is the number of files + uint64_t dirSize; + // The file information content within the current dir is an array + // fileStat is the header of this array, with an array size of dirSize + FileStatInfo_t* fileStat; } DirInfo_t; #ifdef __cplusplus @@ -85,21 +86,20 @@ extern "C" { const char* LibCurveErrorName(LIBCURVE_ERROR err); /** - * 初始化系统 - * @param: path为配置文件路径 - * @return: 成功返回0,否则返回-1. + * Initialize the system + * @param: path is the configuration file path + * @return: Successfully returns 0, otherwise returns -1 */ int Init(const char* path); /** - * 打开文件,qemu打开文件的方式 - * @param: filename文件名, filename中包含用户信息 - * 例如:/1.img_userinfo_ - * @return: 返回文件fd + * Open a file , the way qemu to open a file + * @param: filename File name, which contains user information + * For example:/1.img_userinfo_ + * @return: Return the file fd */ int Open4Qemu(const char* filename); - /** * increase epoch * @param: filename, filename include userinfo @@ -109,41 +109,43 @@ int Open4Qemu(const char* filename); int IncreaseEpoch(const char* filename); /** - * 打开文件,非qemu场景 - * @param: filename文件名 - * @param: userinfo为要打开的文件的用户信息 - * @return: 返回文件fd + * Open file, non qemu scene + * @param: filename File name + * @param: userinfo is the user information of the file to be opened + * @return: Return the file fd */ int Open(const char* filename, const C_UserInfo_t* userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回 0, 失败返回小于0,可能有多种可能,比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size length + * @return: Success returns 0, failure returns less than 0, and there may be + * multiple possibilities, such as internal errors or the file already exists */ -int Create(const char* filename, - const C_UserInfo_t* userinfo, - size_t size); +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回读取长度, 否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: Offset The offset within the file + * @param: length is the length to be read + * @return: Successfully returned the read length, otherwise + * -LIBCURVE_ERROR::FAILED, etc */ int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回 写入长度,否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: Offset The offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the write length, otherwise - + * LIBCURVE_ERROR::FAILED, etc */ int Write(int fd, const char* buf, off_t offset, size_t length); @@ -158,18 +160,20 @@ int Write(int fd, const char* buf, off_t offset, size_t length); int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise - LIBCURVE_ERROR::FAILED */ int AioRead(int fd, CurveAioContext* aioctx); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise -LIBCURVE_ERROR::FAILED */ int AioWrite(int fd, CurveAioContext* aioctx); @@ -182,51 +186,58 @@ int AioWrite(int fd, CurveAioContext* aioctx); int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路径 - * @param: newpath目标路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Rename File + * @param: userinfo is the user information + * @param: oldpath source path + * @param: newpath Target Path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Rename(const C_UserInfo_t* userinfo, const char* oldpath, const char* newpath); // NOLINT +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath); // NOLINT /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize); // NOLINT +int Extend(const char* filename, const C_UserInfo_t* userinfo, + uint64_t newsize); // NOLINT /** - * 扩展文件,Qemu场景在线扩容 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Expanding files, Qemu scene online expansion + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT - +int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Unlink(const char* filename, const C_UserInfo_t* userinfo); /** - * 强制删除文件, unlink删除文件在mds一侧并不是真正的删除, - * 而是放到了垃圾回收站,当使用DeleteForce接口删除的时候是直接删除 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Forced deletion of files, unlink deletion of files on the mds side is not a + * true deletion, Instead, it was placed in the garbage collection bin, and when + * deleted using the DeleteForce interface, it was directly deleted + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); @@ -239,96 +250,107 @@ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED and so on */ int Recover(const char* filename, const C_UserInfo_t* userinfo, - uint64_t fileId); + uint64_t fileId); /** - * 在获取目录内容之前先打开文件夹 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回一个非空的DirInfo_t指针,否则返回一个空指针 + * Open the folder before obtaining directory content + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned a non empty DirInfo_ Pointer t, otherwise + * return a null pointer */ DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 枚举目录内容, 用户OpenDir成功之后才能list - * @param[in][out]: dirinfo为OpenDir返回的指针, 内部会将mds返回的信息放入次结构中 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Enumerate directory contents, only after the user OpenDir is successful can + * they be listed + * @param[in][out]: dirinfo is the pointer returned by OpenDir, which internally + * places the information returned by mds into the substructures + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Listdir(DirInfo_t* dirinfo); /** - * 关闭打开的文件夹 - * @param: dirinfo为opendir返回的dir信息 + * Close Open Folder + * @param: dirinfo is the dir information returned by opendir */ void CloseDir(DirInfo_t* dirinfo); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int StatFile(const char* filename, - const C_UserInfo_t* userinfo, +int StatFile(const char* filename, const C_UserInfo_t* userinfo, FileStatInfo* finfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int StatFile4Qemu(const char* filename, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the + * root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int ChangeOwner(const char* filename, - const char* newOwner, +int ChangeOwner(const char* filename, const char* newOwner, const C_UserInfo_t* userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Close(int fd); void UnInit(); /** - * @brief: 获取集群id, id用UUID标识 - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 否则返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain the cluster ID, which is identified by UUID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Successfully returns 0, otherwise returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); @@ -343,24 +365,23 @@ class FileClient; enum class UserDataType { RawBuffer, // char* - IOBuffer // butil::IOBuf* + IOBuffer // butil::IOBuf* }; -// 存储用户信息 +// Storing User Information typedef struct UserInfo { - // 当前执行的owner信息 + // Current owner information for execution std::string owner; - // 当owner=root的时候,需要提供password作为计算signature的key + // When owner=root, password needs to be provided as the key for calculating + // the signature std::string password; UserInfo() = default; UserInfo(const std::string& own, const std::string& pwd = "") - : owner(own), password(pwd) {} + : owner(own), password(pwd) {} - bool Valid() const { - return !owner.empty(); - } + bool Valid() const { return !owner.empty(); } } UserInfo_t; inline bool operator==(const UserInfo& lhs, const UserInfo& rhs) { @@ -380,14 +401,14 @@ class CurveClient { virtual ~CurveClient(); /** - * 初始化 - * @param configPath 配置文件路径 - * @return 返回错误码 + * Initialize + * @param configPath Configuration file path + * @return returns an error code */ virtual int Init(const std::string& configPath); /** - * 反初始化 + * Deinitialization */ virtual void UnInit(); @@ -400,62 +421,59 @@ class CurveClient { virtual int IncreaseEpoch(const std::string& filename); /** - * 打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Open File + * @param filename File name, format: File name_ User name_ * @param[out] sessionId session Id - * @return 成功返回fd,失败返回-1 + * @return successfully returns fd, failure returns -1 */ - virtual int Open(const std::string& filename, - const OpenFlags& openflags); + virtual int Open(const std::string& filename, const OpenFlags& openflags); /** - * 重新打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Reopen File + * @param filename File name, format: File name_ User name_ * @param sessionId session Id - * @param[out] newSessionId reOpen之后的新sessionId - * @return 成功返回fd,失败返回-1 + * @param[out] newSessionId New sessionId after reOpen + * @return successfully returns fd, failure returns -1 */ - virtual int ReOpen(const std::string& filename, - const OpenFlags& openflags); + virtual int ReOpen(const std::string& filename, const OpenFlags& openflags); /** - * 关闭文件 - * @param fd 文件fd - * @return 返回错误码 + * Close File + * @param fd file fd + * @return returns an error code */ virtual int Close(int fd); /** - * 扩展文件 - * @param filename 文件名,格式为:文件名_用户名_ - * @param newsize 扩展后的大小 - * @return 返回错误码 + * Extension file + * @param filename File name, format: File name_ User name_ + * @param newsize The expanded size + * @return returns an error code */ - virtual int Extend(const std::string& filename, - int64_t newsize); + virtual int Extend(const std::string& filename, int64_t newsize); /** - * 获取文件大小 - * @param fd 文件fd - * @return 返回错误码 + * Get File Size + * @param fd file fd + * @return returns an error code */ virtual int64_t StatFile(int fd, FileStatInfo* fileStat); /** - * 异步读 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous reading + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType); /** - * 异步写 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous writing + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType); @@ -469,8 +487,8 @@ class CurveClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 测试使用,设置fileclient - * @param client 需要设置的fileclient + * Test usage, set fileclient + * @param client The fileclient that needs to be set */ void SetFileClient(FileClient* client); diff --git a/include/etcdclient/etcdclient.h b/include/etcdclient/etcdclient.h index 42f63a7436..b3ce392aba 100644 --- a/include/etcdclient/etcdclient.h +++ b/include/etcdclient/etcdclient.h @@ -18,7 +18,6 @@ /* package command-line-arguments */ - #line 1 "cgo-builtin-export-prolog" #include /* for ptrdiff_t below */ @@ -27,21 +26,22 @@ #define GO_CGO_EXPORT_PROLOGUE_H #ifndef GO_CGO_GOSTRING_TYPEDEF -typedef struct { const char *p; ptrdiff_t n; } _GoString_; +typedef struct { + const char* p; + ptrdiff_t n; +} _GoString_; #endif #endif /* Start of preamble from import "C" comments. */ - #line 19 "etcdclient.go" #include -enum EtcdErrCode -{ - // grpc errCode, 具体的含义见: +enum EtcdErrCode { + // The specific meaning of grpc errCode is as follows: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -62,7 +62,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom error code EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -79,31 +79,26 @@ enum EtcdErrCode EtcdObjectLenNotEnough = 30, }; -enum OpType { - OpPut = 1, - OpDelete = 2 -}; +enum OpType { OpPut = 1, OpDelete = 2 }; struct EtcdConf { - char *Endpoints; + char* Endpoints; int len; int DialTimeout; }; struct Operation { enum OpType opType; - char *key; - char *value; + char* key; + char* value; int keyLen; int valueLen; }; #line 1 "cgo-generated-wrapper" - /* End of preamble from import "C" comments. */ - /* Start of boilerplate cgo prologue. */ #line 1 "cgo-gcc-export-header-prolog" @@ -130,15 +125,23 @@ typedef double _Complex GoComplex128; static assertion to make sure the file is being used on architecture at least with matching size of GoInt. */ -typedef char _check_for_64_bit_pointer_matching_GoInt[sizeof(void*)==64/8 ? 1:-1]; +typedef char + _check_for_64_bit_pointer_matching_GoInt[sizeof(void*) == 64 / 8 ? 1 : -1]; #ifndef GO_CGO_GOSTRING_TYPEDEF typedef _GoString_ GoString; #endif -typedef void *GoMap; -typedef void *GoChan; -typedef struct { void *t; void *v; } GoInterface; -typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; +typedef void* GoMap; +typedef void* GoChan; +typedef struct { + void* t; + void* v; +} GoInterface; +typedef struct { + void* data; + GoInt len; + GoInt cap; +} GoSlice; #endif @@ -148,8 +151,7 @@ typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; extern "C" { #endif - -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required extern GoUint32 NewEtcdClientV3(struct EtcdConf p0); @@ -159,66 +161,77 @@ extern GoUint32 EtcdClientPut(int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientPutRewtihRevision */ struct EtcdClientPutRewtihRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision( + int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientGet */ struct EtcdClientGet_return { - GoUint32 r0; - char* r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + char* r1; + GoInt r2; + GoInt64 r3; }; extern struct EtcdClientGet_return EtcdClientGet(int p0, char* p1, int p2); /* Return type for EtcdClientList */ struct EtcdClientList_return { - GoUint32 r0; - GoUint64 r1; - GoInt64 r2; + GoUint32 r0; + GoUint64 r1; + GoInt64 r2; }; -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit -extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, + int p3, int p4); /* Return type for EtcdClientListWithLimitAndRevision */ struct EtcdClientListWithLimitAndRevision_return { - GoUint32 r0; - GoUint64 r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + GoUint64 r1; + GoInt r2; + GoInt64 r3; }; -extern struct EtcdClientListWithLimitAndRevision_return EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, int p4, GoInt64 p5, GoInt64 p6); +extern struct EtcdClientListWithLimitAndRevision_return +EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, + int p4, GoInt64 p5, GoInt64 p6); extern GoUint32 EtcdClientDelete(int p0, char* p1, int p2); /* Return type for EtcdClientDeleteRewithRevision */ struct EtcdClientDeleteRewithRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientDeleteRewithRevision_return EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); +extern struct EtcdClientDeleteRewithRevision_return +EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); -extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, struct Operation p2); +extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, + struct Operation p2); -extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, struct Operation p3); +extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, + struct Operation p3); -extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, int p4, int p5, int p6); +extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, + int p4, int p5, int p6); /* Return type for EtcdElectionCampaign */ struct EtcdElectionCampaign_return { - GoUint32 r0; - GoUint64 r1; + GoUint32 r0; + GoUint64 r1; }; -extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, char* p2, int p3, GoUint32 p4, GoUint32 p5); +extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, + char* p2, int p3, + GoUint32 p4, + GoUint32 p5); extern GoUint32 EtcdLeaderObserve(GoUint64 p0, char* p1, int p2); @@ -226,23 +239,25 @@ extern GoUint32 EtcdLeaderResign(GoUint64 p0, GoUint64 p1); /* Return type for EtcdClientGetSingleObject */ struct EtcdClientGetSingleObject_return { - GoUint32 r0; - char* r1; - GoInt r2; + GoUint32 r0; + char* r1; + GoInt r2; }; -extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject(GoUint64 p0); +extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject( + GoUint64 p0); /* Return type for EtcdClientGetMultiObject */ struct EtcdClientGetMultiObject_return { - GoUint32 r0; - char* r1; - GoInt r2; - char* r3; - GoInt r4; + GoUint32 r0; + char* r1; + GoInt r2; + char* r3; + GoInt r4; }; -extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject(GoUint64 p0, GoInt p1); +extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject( + GoUint64 p0, GoInt p1); extern void EtcdClientRemoveObject(GoUint64 p0); diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf index 71ca380f13..8bc37cb542 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/var/lib/nebd/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath=/var/lib/nebd/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/var/log/nebd/client diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf index b03e7a25c6..4dcb28c7e6 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf @@ -1,14 +1,14 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf -#brpc server监听端口 +# brpc server listening port listen.address=/var/lib/nebd/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/var/lib/nebd/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 \ No newline at end of file diff --git a/mk-deb.sh b/mk-deb.sh index 5e3e3a7935..074b4e35ac 100755 --- a/mk-deb.sh +++ b/mk-deb.sh @@ -24,7 +24,7 @@ set -o errexit dir=$(pwd) -# step1 清除生成的目录和文件 +# Step1 Clear generated directories and files bazel clean cleandir=( @@ -42,15 +42,15 @@ rm -rf "${cleandir[@]}" git submodule update --init -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 +# Step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -# 获取git提交版本信息 +# Obtain git submission version information commit_id=$(git rev-parse --short HEAD) if [ "$1" = "debug" ]; then debug="+debug" @@ -129,7 +129,7 @@ function build_curvefs_python() { done } -# step3 执行编译 +# Step3 Execute Compilation bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" @@ -224,7 +224,7 @@ else fi echo "end compile" -#step4 创建临时目录,拷贝二进制、lib库和配置模板 +# Step4 Create a temporary directory, copy binaries, lib libraries, and configuration templates mkdir build cp -r curve-mds build/ cp -r curve-chunkserver build/ @@ -315,7 +315,7 @@ cp -r k8s/nbd/nbd-package build/k8s-nbd-package mkdir -p build/k8s-nbd-package/usr/bin cp bazel-bin/nbd/src/curve-nbd build/k8s-nbd-package/usr/bin -#step5 记录到debian包的配置文件,打包debian包 +# Step5 Record the configuration file of the Debian package and package the Debian package debian_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f 2 | tr -d '"') version="Version: ${curve_version}+deb${debian_version}" @@ -343,10 +343,10 @@ dpkg-deb -b build/k8s-nebd-package . dpkg-deb -b build/nbd-package . dpkg-deb -b build/k8s-nbd-package . -# step6 清理libetcdclient.so编译出现的临时文件 +# Step6 Clean up temporary files that appear during libetcdclient.so compilation cd ${dir}/thirdparties/etcdclient make clean cd ${dir} -# step7 打包python wheel +# Step7 Packaging Python Wheel build_curvefs_python $1 diff --git a/mk-tar.sh b/mk-tar.sh index 0bb25540c2..fb5588b98e 100755 --- a/mk-tar.sh +++ b/mk-tar.sh @@ -18,7 +18,7 @@ dir=$(pwd) -# step1 清除生成的目录和文件 +# Step1 Clear generated directories and files bazel clean cleandir=( @@ -36,15 +36,15 @@ rm -rf "${cleandir[@]}" git submodule update --init -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 +# Step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -# 获取git提交版本信息 +# Obtain git submission version information commit_id=$(git rev-parse --short HEAD) if [ "$1" = "debug" ]; then debug="+debug" @@ -123,7 +123,7 @@ function build_curvefs_python() { done } -# step3 执行编译 +# Step3 Execute Compilation bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" @@ -218,7 +218,7 @@ else fi echo "end compile" -#step4 创建临时目录,拷贝二进制、lib库和配置模板 +# Step4 Create a temporary directory, copy binaries, lib libraries, and configuration templates echo "start copy" mkdir -p build/curve/ # curve-mds @@ -299,7 +299,7 @@ cp nbd/nbd-package/usr/bin/map_curve_disk.sh build/nbd-package/bin cp nbd/nbd-package/etc/curve/curvetab build/nbd-package/etc cp nbd/nbd-package/etc/systemd/system/map_curve_disk.service build/nbd-package/etc -# step5 打包tar包 +# Step5 Packaging tar package echo "start make tarball" cd ${dir}/build curve_name="curve_${curve_version}.tar.gz" @@ -320,14 +320,14 @@ tar zcf ${nbd_name} nbd-package cp ${nbd_name} $dir echo "end make tarball" -# step6 清理libetcdclient.so编译出现的临时文件 +# Step6 Clean up temporary files that appear during libetcdclient.so compilation echo "start clean etcd" cd ${dir}/thirdparties/etcdclient make clean cd ${dir} echo "end clean etcd" -# step7 打包python wheel +# Step7 Packaging python wheel echo "start make python wheel" build_curvefs_python $1 echo "end make python wheel" diff --git a/monitor/grafana-report.py b/monitor/grafana-report.py index a400263e8c..0170470996 100644 --- a/monitor/grafana-report.py +++ b/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curve/monitor/grafana/report/report.tex' -imagedir= '/etc/curve/monitor/grafana/report/images/' -pdfpath= '/etc/curve/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curve/monitor/grafana/report/report.tex' +imagedir = '/etc/curve/monitor/grafana/report/images/' +pdfpath = '/etc/curve/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view it in the Grafana dashboard (if the display is chaotic, please refer to the attached PDF)

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Send dashboard daily email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple people - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The file name here can be written arbitrarily, including the name you want to write and the name displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add Body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/monitor/grafana/dashboards/chunkserver.json b/monitor/grafana/dashboards/chunkserver.json index 2770cd2802..e48e7a0721 100644 --- a/monitor/grafana/dashboards/chunkserver.json +++ b/monitor/grafana/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process running time", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successful requests processed per second for all RPCs on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the read_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the write_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "Percentile values of RPC-level read chunk latency", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read-write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "Number of errors per second for read_chunk at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "Number of read_chunk operations successfully processed per second at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "Number of read_chunk requests received per second at the chunk service layer.", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/dashboards/client.json b/monitor/grafana/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/dashboards/client.json +++ b/monitor/grafana/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/dashboards/etcd.json b/monitor/grafana/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/dashboards/etcd.json +++ b/monitor/grafana/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/mds.json b/monitor/grafana/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/dashboards/mds.json +++ b/monitor/grafana/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/report.json b/monitor/grafana/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/dashboards/report.json +++ b/monitor/grafana/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/dashboards/snapshotcloneserver.json b/monitor/grafana/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/chunkserver.json b/monitor/grafana/provisioning/dashboards/chunkserver.json index 2770cd2802..89ce686aa7 100644 --- a/monitor/grafana/provisioning/dashboards/chunkserver.json +++ b/monitor/grafana/provisioning/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successfully processed requests per second for all RPCs on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the rpc level in read_chunk", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "Write_chunk The number of errors per second at the rpc level", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "The quantile value of read chunk delay at the rpc level", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read and write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "The number of read_chunk errors per second at the chunk service level", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "The number of read_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/client.json b/monitor/grafana/provisioning/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/provisioning/dashboards/client.json +++ b/monitor/grafana/provisioning/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/etcd.json b/monitor/grafana/provisioning/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/provisioning/dashboards/etcd.json +++ b/monitor/grafana/provisioning/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/mds.json b/monitor/grafana/provisioning/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/provisioning/dashboards/mds.json +++ b/monitor/grafana/provisioning/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/report.json b/monitor/grafana/provisioning/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/provisioning/dashboards/report.json +++ b/monitor/grafana/provisioning/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/nebd/etc/nebd/nebd-client.conf b/nebd/etc/nebd/nebd-client.conf index 1207e5bbd0..6baa9c2a51 100644 --- a/nebd/etc/nebd/nebd-client.conf +++ b/nebd/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -# 文件锁路径 +# File lock path metacache.fileLockPath=/data/nebd/lock # __CURVEADM_TEMPLATE__ ${prefix}/data/lock __CURVEADM_TEMPLATE__ -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/data/log/nebd/client # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ diff --git a/nebd/etc/nebd/nebd-server.conf b/nebd/etc/nebd/nebd-server.conf index a6d2fbe534..1ef0966cc6 100644 --- a/nebd/etc/nebd/nebd-server.conf +++ b/nebd/etc/nebd/nebd-server.conf @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/client.conf __CURVEADM_TEMPLATE__ -#brpc server监听端口 +# brpc server listening port listen.address=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/data/nebd/nebdserver.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/nebdserver.meta __CURVEADM_TEMPLATE__ -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 # return rpc when io error diff --git a/nebd/nebd-package/usr/bin/nebd-daemon b/nebd/nebd-package/usr/bin/nebd-daemon index fb8242d1dc..3204bc8732 100755 --- a/nebd/nebd-package/usr/bin/nebd-daemon +++ b/nebd/nebd-package/usr/bin/nebd-daemon @@ -138,7 +138,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -179,7 +179,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -267,7 +267,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -283,7 +283,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/nebd/src/common/configuration.cpp b/nebd/src/common/configuration.cpp index 69a23ebe43..3c331c7cee 100644 --- a/nebd/src/common/configuration.cpp +++ b/nebd/src/common/configuration.cpp @@ -22,10 +22,10 @@ #include "nebd/src/common/configuration.h" -#include +#include #include +#include #include -#include namespace nebd { namespace common { @@ -54,8 +54,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -73,38 +75,33 @@ std::string Configuration::DumpConfig() { return ""; } - std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -113,7 +110,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -122,7 +119,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -141,18 +138,17 @@ bool Configuration::GetInt64Value(const std::string& key, int64_t* out) { return false; } -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -161,18 +157,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -181,11 +176,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -195,7 +190,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -215,16 +210,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -233,7 +227,7 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; } diff --git a/nebd/src/common/configuration.h b/nebd/src/common/configuration.h index 95df251e80..642d3be2ad 100644 --- a/nebd/src/common/configuration.h +++ b/nebd/src/common/configuration.h @@ -20,8 +20,8 @@ * Author: hzchenwei7 */ -#include #include +#include #ifndef NEBD_SRC_COMMON_CONFIGURATION_H_ #define NEBD_SRC_COMMON_CONFIGURATION_H_ @@ -39,79 +39,80 @@ class Configuration { std::string DumpConfig(); std::map ListConfig() const; - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] outThe value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); bool GetInt64Value(const std::string& key, int64_t* out); - void SetIntValue(const std::string &key, const int value); + void SetIntValue(const std::string& key, const int value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); private: - std::string confFile_; - std::map config_; + std::string confFile_; + std::map config_; }; } // namespace common diff --git a/nebd/src/common/crc32.h b/nebd/src/common/crc32.h index 627218fcbd..238b1ce4fc 100644 --- a/nebd/src/common/crc32.h +++ b/nebd/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef NEBD_SRC_COMMON_CRC32_H_ #define NEBD_SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace nebd { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/nebd/src/common/file_lock.h b/nebd/src/common/file_lock.h index 277cfebcf7..dfd644b98b 100644 --- a/nebd/src/common/file_lock.h +++ b/nebd/src/common/file_lock.h @@ -28,31 +28,30 @@ namespace nebd { namespace common { -// 文件锁 +// File lock class FileLock { public: explicit FileLock(const std::string& fileName) - : fileName_(fileName), fd_(-1) {} + : fileName_(fileName), fd_(-1) {} FileLock() : fileName_(""), fd_(-1) {} ~FileLock() = default; /** - * @brief 获取文件锁 - * @return 成功返回0,失败返回-1 + * @brief Get file lock + * @return returns 0 for success, -1 for failure */ int AcquireFileLock(); - /** - * @brief 释放文件锁 + * @brief Release file lock */ void ReleaseFileLock(); private: - // 锁文件的文件名 + // Lock the file name of the file std::string fileName_; - // 锁文件的fd + // Lock file fd int fd_; }; diff --git a/nebd/src/common/name_lock.h b/nebd/src/common/name_lock.h index ae34c182a9..e179c4272d 100644 --- a/nebd/src/common/name_lock.h +++ b/nebd/src/common/name_lock.h @@ -23,12 +23,12 @@ #ifndef NEBD_SRC_COMMON_NAME_LOCK_H_ #define NEBD_SRC_COMMON_NAME_LOCK_H_ +#include +#include +#include // NOLINT #include #include #include -#include -#include -#include // NOLINT #include "nebd/src/common/uncopyable.h" @@ -40,29 +40,28 @@ class NameLock : public Uncopyable { explicit NameLock(int bucketNum = 256); /** - * @brief 对指定string加锁 + * @brief locks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ - void Lock(const std::string &lockStr); + void Lock(const std::string& lockStr); /** - * @brief 尝试指定sting加锁 + * @brief Attempt to specify sting lock * - * @param lockStr 被加锁的string + * @param lockStr locked string * - * @retval 成功 - * @retval 失败 + * @retval succeeded + * @retval failed */ - bool TryLock(const std::string &lockStr); + bool TryLock(const std::string& lockStr); /** - * @brief 对指定string解锁 + * @brief unlocks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ - void Unlock(const std::string &lockStr); - + void Unlock(const std::string& lockStr); private: struct LockEntry { @@ -77,7 +76,7 @@ class NameLock : public Uncopyable { }; using LockBucketPtr = std::shared_ptr; - int GetBucketOffset(const std::string &lockStr); + int GetBucketOffset(const std::string& lockStr); private: std::vector locks_; @@ -85,24 +84,21 @@ class NameLock : public Uncopyable { class NameLockGuard : public Uncopyable { public: - NameLockGuard(NameLock &lock, const std::string &lockStr) : //NOLINT - lock_(lock), - lockStr_(lockStr) { + NameLockGuard(NameLock& lock, const std::string& lockStr) + : // NOLINT + lock_(lock), + lockStr_(lockStr) { lock_.Lock(lockStr_); } - ~NameLockGuard() { - lock_.Unlock(lockStr_); - } + ~NameLockGuard() { lock_.Unlock(lockStr_); } private: - NameLock &lock_; + NameLock& lock_; std::string lockStr_; }; - -} // namespace common -} // namespace nebd - +} // namespace common +} // namespace nebd #endif // NEBD_SRC_COMMON_NAME_LOCK_H_ diff --git a/nebd/src/common/stringstatus.h b/nebd/src/common/stringstatus.h index fc4c9a6364..db47e08933 100644 --- a/nebd/src/common/stringstatus.h +++ b/nebd/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ -#define NEBD_SRC_COMMON_STRINGSTATUS_H_ +#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ +#define NEBD_SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace nebd { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,26 +49,28 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..9e454f15a7 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -26,9 +26,10 @@ #include #include #include + +#include #include #include -#include namespace nebd { namespace common { @@ -53,7 +54,8 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -64,7 +66,7 @@ class TimeUtility { } }; -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd -#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ +#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..c9ab8e873e 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -22,8 +22,8 @@ #include "nebd/src/part1/async_request_closure.h" -#include #include +#include #include #include @@ -40,11 +40,10 @@ void AsyncRequestClosure::Run() { int64_t sleepUs = GetRpcRetryIntervalUs(aioCtx->retryCount); LOG_EVERY_SECOND(WARNING) << OpTypeToString(aioCtx->op) << " rpc failed" - << ", error = " << cntl.ErrorText() - << ", fd = " << fd + << ", error = " << cntl.ErrorText() << ", fd = " << fd << ", log id = " << cntl.log_id() - << ", retryCount = " << aioCtx->retryCount - << ", sleep " << (sleepUs / 1000) << " ms"; + << ", retryCount = " << aioCtx->retryCount << ", sleep " + << (sleepUs / 1000) << " ms"; bthread_usleep(sleepUs); Retry(); } else { @@ -52,7 +51,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +72,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } @@ -83,10 +82,9 @@ int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { return requestOption_.rpcRetryIntervalUs; } - return std::max( - requestOption_.rpcRetryIntervalUs, - std::min(requestOption_.rpcRetryIntervalUs * retryCount, - requestOption_.rpcRetryMaxIntervalUs)); + return std::max(requestOption_.rpcRetryIntervalUs, + std::min(requestOption_.rpcRetryIntervalUs * retryCount, + requestOption_.rpcRetryMaxIntervalUs)); } void AsyncRequestClosure::Retry() const { diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..0df2f03172 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -32,12 +32,9 @@ namespace nebd { namespace client { struct AsyncRequestClosure : public google::protobuf::Closure { - AsyncRequestClosure(int fd, - NebdClientAioContext* ctx, + AsyncRequestClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : fd(fd), - aioCtx(ctx), - requestOption_(option) {} + : fd(fd), aioCtx(ctx), requestOption_(option) {} void Run() override; @@ -47,94 +44,70 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; }; struct AioWriteClosure : public AsyncRequestClosure { - AioWriteClosure(int fd, - NebdClientAioContext* ctx, + AioWriteClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} WriteResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioReadClosure : public AsyncRequestClosure { - AioReadClosure(int fd, - NebdClientAioContext* ctx, + AioReadClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} ReadResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioDiscardClosure : public AsyncRequestClosure { - AioDiscardClosure(int fd, - NebdClientAioContext* ctx, + AioDiscardClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} DiscardResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioFlushClosure : public AsyncRequestClosure { - AioFlushClosure(int fd, - NebdClientAioContext* ctx, + AioFlushClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} FlushResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; inline const char* OpTypeToString(LIBAIO_OP opType) { switch (opType) { - case LIBAIO_OP::LIBAIO_OP_READ: - return "Read"; - case LIBAIO_OP::LIBAIO_OP_WRITE: - return "Write"; - case LIBAIO_OP::LIBAIO_OP_DISCARD: - return "Discard"; - case LIBAIO_OP::LIBAIO_OP_FLUSH: - return "Flush"; - default: - return "Unknown"; + case LIBAIO_OP::LIBAIO_OP_READ: + return "Read"; + case LIBAIO_OP::LIBAIO_OP_WRITE: + return "Write"; + case LIBAIO_OP::LIBAIO_OP_DISCARD: + return "Discard"; + case LIBAIO_OP::LIBAIO_OP_FLUSH: + return "Flush"; + default: + return "Unknown"; } } diff --git a/nebd/src/part1/heartbeat_manager.h b/nebd/src/part1/heartbeat_manager.h index 13289cb2d0..c9020e84cc 100644 --- a/nebd/src/part1/heartbeat_manager.h +++ b/nebd/src/part1/heartbeat_manager.h @@ -25,52 +25,52 @@ #include -#include // NOLINT #include #include +#include // NOLINT +#include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "nebd/src/common/interrupt_sleep.h" namespace nebd { namespace client { -// Heartbeat 管理类 -// 定期向nebd-server发送已打开文件的心跳信息 +// Heartbeat Management Class +// Regularly send heartbeat information of opened files to nebd-server class HeartbeatManager { public: explicit HeartbeatManager(std::shared_ptr metaCache); - ~HeartbeatManager() { - Stop(); - } + ~HeartbeatManager() { Stop(); } /** - * @brief: 启动心跳线程 + * @brief: Start heartbeat thread */ void Run(); /** - * @brief: 停止心跳线程 + * @brief: Stop heartbeat thread */ void Stop(); /** - * @brief 初始化 - * @param heartbeatOption heartbeat 配置项 - * @return 0 初始化成功 / -1 初始化失败 + * @brief initialization + * @param heartbeatOption heartbeat configuration item + * @return 0 initialization successful/-1 initialization failed */ int Init(const HeartbeatOption& option); private: /** - * @brief: 心跳线程执行函数,定期发送心跳消息 + * @brief: Heartbeat thread execution function, sending heartbeat messages + * regularly */ void HeartBetaThreadFunc(); /** - * @brief: 向part2发送心跳消息,包括当前已打开的卷信息 + * @brief: Send a heartbeat message to part2, including information about + * the currently opened volume */ void SendHeartBeat(); @@ -79,7 +79,7 @@ class HeartbeatManager { HeartbeatOption heartbeatOption_; - std::shared_ptr metaCache_; + std::shared_ptr metaCache_; std::thread heartbeatThread_; nebd::common::InterruptibleSleeper sleeper_; diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index ab6093e415..dc254c9286 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -21,12 +21,14 @@ */ #include "nebd/src/part1/libnebd.h" + #include "nebd/src/part1/libnebd_file.h" extern "C" { bool g_inited = false; -// Note: 配置文件路径是否有上层传下来比较合适,评估是否要修改 +// Note: It is more appropriate to pass down the configuration file path from +// the upper level, and evaluate whether it needs to be modified const char* confpath = "/etc/nebd/nebd-client.conf"; int nebd_lib_init() { if (g_inited) { @@ -67,17 +69,13 @@ int nebd_lib_uninit() { return 0; } -int nebd_lib_open(const char* filename) { - return Open4Nebd(filename, nullptr); -} +int nebd_lib_open(const char* filename) { return Open4Nebd(filename, nullptr); } int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* flags) { return Open4Nebd(filename, flags); } -int nebd_lib_close(int fd) { - return Close4Nebd(fd); -} +int nebd_lib_close(int fd) { return Close4Nebd(fd); } int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length) { (void)fd; @@ -114,32 +112,20 @@ int nebd_lib_sync(int fd) { return 0; } -int64_t nebd_lib_filesize(int fd) { - return GetFileSize4Nebd(fd); -} +int64_t nebd_lib_filesize(int fd) { return GetFileSize4Nebd(fd); } -int64_t nebd_lib_blocksize(int fd) { - return GetBlockSize4Nebd(fd); -} +int64_t nebd_lib_blocksize(int fd) { return GetBlockSize4Nebd(fd); } -int nebd_lib_resize(int fd, int64_t size) { - return Extend4Nebd(fd, size); -} +int nebd_lib_resize(int fd, int64_t size) { return Extend4Nebd(fd, size); } int nebd_lib_flush(int fd, NebdClientAioContext* context) { return Flush4Nebd(fd, context); } -int64_t nebd_lib_getinfo(int fd) { - return GetInfo4Nebd(fd); -} +int64_t nebd_lib_getinfo(int fd) { return GetInfo4Nebd(fd); } -int nebd_lib_invalidcache(int fd) { - return InvalidCache4Nebd(fd); -} +int nebd_lib_invalidcache(int fd) { return InvalidCache4Nebd(fd); } -void nebd_lib_init_open_flags(NebdOpenFlags* flags) { - flags->exclusive = 1; -} +void nebd_lib_init_open_flags(NebdOpenFlags* flags) { flags->exclusive = 1; } } // extern "C" diff --git a/nebd/src/part1/libnebd.h b/nebd/src/part1/libnebd.h index 380776d71b..8a39ee3977 100644 --- a/nebd/src/part1/libnebd.h +++ b/nebd/src/part1/libnebd.h @@ -27,19 +27,19 @@ extern "C" { #endif +#include +#include +#include #include #include -#include -#include #include -#include +#include #include -#include -// 文件路径最大的长度,单位字节 -#define NEBD_MAX_FILE_PATH_LEN 1024 +// The maximum length of the file path, in bytes +#define NEBD_MAX_FILE_PATH_LEN 1024 -// nebd异步请求的类型 +// Types of nebd asynchronous requests typedef enum LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -55,139 +55,147 @@ void nebd_lib_init_open_flags(NebdOpenFlags* flags); struct NebdClientAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*LibAioCallBack)(struct NebdClientAioContext* context); struct NebdClientAioContext { - off_t offset; // 请求的offset - size_t length; // 请求的length - int ret; // 记录异步返回的返回值 - LIBAIO_OP op; // 异步请求的类型,详见定义 - LibAioCallBack cb; // 异步请求的回调函数 - void* buf; // 请求的buf - unsigned int retryCount; // 记录异步请求的重试次数 + off_t offset; // Requested offset + size_t length; // Requested length + int ret; // Record the return value returned asynchronously + LIBAIO_OP + op; // The type of asynchronous request, as defined in the definition + LibAioCallBack cb; // Callback function for asynchronous requests + void* buf; // Buf requested + unsigned int + retryCount; // Record the number of retries for asynchronous requests }; // int nebd_lib_fini(void); /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_init(void); int nebd_lib_init_with_conf(const char* confPath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_uninit(void); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int nebd_lib_open(const char* filename); int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* openflags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_close(int fd); /** - * @brief 同步读文件 - * @param fd:文件的fd - * buf:存放读取data的buf - * offset:读取的位置offset - * length:读取的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file reading + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length); /** - * @brief 同步写文件 - * @param fd:文件的fd - * buf:存放写入data的buf - * offset:写入的位置offset - * length:写入的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file writing + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_discard(int fd, struct NebdClientAioContext* context); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pread(int fd, struct NebdClientAioContext* context); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pwrite(int fd, struct NebdClientAioContext* context); /** - * @brief sync文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief sync file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_sync(int fd); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t nebd_lib_filesize(int fd); int64_t nebd_lib_blocksize(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int nebd_lib_resize(int fd, int64_t size); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_flush(int fd, struct NebdClientAioContext* context); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t nebd_lib_getinfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_invalidcache(int fd); diff --git a/nebd/src/part1/libnebd_file.h b/nebd/src/part1/libnebd_file.h index 6361094ab2..33e39a58c2 100644 --- a/nebd/src/part1/libnebd_file.h +++ b/nebd/src/part1/libnebd_file.h @@ -26,83 +26,89 @@ #include "nebd/src/part1/libnebd.h" /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init4Nebd(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit4Nebd(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open4Nebd(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close4Nebd(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend4Nebd(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t GetFileSize4Nebd(int fd); int64_t GetBlockSize4Nebd(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get info of the file + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t GetInfo4Nebd(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache4Nebd(int fd); diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index bd1a2202ea..7f9ec811fd 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -22,30 +22,42 @@ #include "nebd/src/part1/nebd_client.h" -#include -#include -#include #include -#include -#include +#include #include +#include +#include +#include +#include + #include -#include "nebd/src/part1/async_request_closure.h" #include "nebd/src/common/configuration.h" +#include "nebd/src/part1/async_request_closure.h" -#define RETURN_IF_FALSE(val) if (val == false) { return -1; } +#define RETURN_IF_FALSE(val) \ + if (val == false) { \ + return -1; \ + } -// 修改brpc的health_check_interval参数,这个参数用来控制健康检查的周期 -// ## 健康检查 -// 连接断开的server会被暂时隔离而不会被负载均衡算法选中,brpc会定期连接被隔离的server,以检查他们是否恢复正常,间隔由参数-health_check_interval控制: // NOLINT -// | Name | Value | Description | Defined At | // NOLINT -// | ------------------------- | ----- | ---------------------------------------- | ----------------------- | // NOLINT -// | health_check_interval (R) | 3 | seconds between consecutive health-checkings | src/brpc/socket_map.cpp | // NOLINT -// 一旦server被连接上,它会恢复为可用状态。如果在隔离过程中,server从命名服务中删除了,brpc也会停止连接尝试。 // NOLINT +// Modify health_check_interval parameter is used to control the period of +// health checks +// ## Health Check +// The disconnected servers will be temporarily isolated and not selected by the +// load balancing algorithm. brpc will periodically connect to the isolated +// servers to check if they have returned to normal. The interval is determined +// by the parameter-health_check_interval://NOLINT | Name | +// Value | Description | Defined At | +// // NOLINT | ------------------------- | ----- | +// ---------------------------------------- | ----------------------- | // +// NOLINT | health_check_interval (R) | 3 | seconds between consecutive +// health-checkings | src/brpc/socket_map.cpp | // +// NOLINT Once the server is connected, it will return to an available state. If +// the server is removed from the naming service during the isolation process, +// brpc will also stop connection attempts// NOLINT namespace brpc { - DECLARE_int32(health_check_interval); - DECLARE_int32(circuit_breaker_max_isolation_duration_ms); +DECLARE_int32(health_check_interval); +DECLARE_int32(circuit_breaker_max_isolation_duration_ms); } // namespace brpc namespace nebd { @@ -53,7 +65,7 @@ namespace client { using nebd::common::FileLock; -NebdClient &nebdClient = NebdClient::GetInstance(); +NebdClient& nebdClient = NebdClient::GetInstance(); constexpr int32_t kBufSize = 128; @@ -98,8 +110,7 @@ int NebdClient::Init(const char* confpath) { } metaCache_ = std::make_shared(); - heartbeatMgr_ = std::make_shared( - metaCache_); + heartbeatMgr_ = std::make_shared(metaCache_); ret = heartbeatMgr_->Init(heartbeatOption); if (ret != 0) { @@ -139,7 +150,7 @@ void NebdClient::Uninit() { } int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { - // 加文件锁 + // Add file lock std::string fileLockName = option_.fileLockPath + "/" + ReplaceSlash(filename); FileLock fileLock(fileLockName); @@ -150,8 +161,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { return -1; } - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); OpenFileRequest request; @@ -168,8 +178,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "OpenFile rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "OpenFile rpc failed, error = " << cntl->ErrorText() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -177,7 +186,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "OpenFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -199,8 +208,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { } int NebdClient::Close(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); CloseFileRequest request; @@ -219,7 +227,7 @@ int NebdClient::Close(int fd) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "CloseFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); } @@ -240,8 +248,7 @@ int NebdClient::Close(int fd) { } int NebdClient::Extend(int fd, int64_t newsize) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { (void)channel; nebd::client::NebdFileService_Stub stub(&channel_); @@ -255,17 +262,15 @@ int NebdClient::Extend(int fd, int64_t newsize) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "Resize RPC failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "Resize RPC failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "ExtendFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() - << ", fd = " << fd - << ", newsize = " << newsize + << ", retmsg = " << response.retmsg() + << ", fd = " << fd << ", newsize = " << newsize << ", log id = " << cntl->log_id(); return -1; } else { @@ -276,15 +281,13 @@ int NebdClient::Extend(int fd, int64_t newsize) { int64_t ret = ExecuteSyncRpc(task); if (ret < 0) { - LOG(ERROR) << "Extend failed, fd = " << fd - << ", newsize = " << newsize; + LOG(ERROR) << "Extend failed, fd = " << fd << ", newsize = " << newsize; } return ret; } int64_t NebdClient::GetFileSize(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -295,15 +298,14 @@ int64_t NebdClient::GetFileSize(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetFileSize failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetFileSize failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetFileSize failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -366,8 +368,8 @@ int NebdClient::Discard(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioDiscardClosure* done = new(std::nothrow) AioDiscardClosure( - fd, aioctx, option_.requestOption); + AioDiscardClosure* done = new (std::nothrow) + AioDiscardClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Discard(&done->cntl, &request, &done->response, done); @@ -386,8 +388,8 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioReadClosure* done = new(std::nothrow) AioReadClosure( - fd, aioctx, option_.requestOption); + AioReadClosure* done = new (std::nothrow) + AioReadClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Read(&done->cntl, &request, &done->response, done); @@ -398,9 +400,7 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { return 0; } -static void EmptyDeleter(void* m) { - (void)m; -} +static void EmptyDeleter(void* m) { (void)m; } int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { auto task = [this, fd, aioctx]() { @@ -410,8 +410,8 @@ int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioWriteClosure* done = new(std::nothrow) AioWriteClosure( - fd, aioctx, option_.requestOption); + AioWriteClosure* done = new (std::nothrow) + AioWriteClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); @@ -431,8 +431,8 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { nebd::client::FlushRequest request; request.set_fd(fd); - AioFlushClosure* done = new(std::nothrow) AioFlushClosure( - fd, aioctx, option_.requestOption); + AioFlushClosure* done = new (std::nothrow) + AioFlushClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Flush(&done->cntl, &request, &done->response, done); @@ -444,8 +444,7 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { } int64_t NebdClient::GetInfo(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -456,15 +455,14 @@ int64_t NebdClient::GetInfo(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetInfo rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetInfo rpc failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetInfo failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -482,8 +480,7 @@ int64_t NebdClient::GetInfo(int fd) { } int NebdClient::InvalidCache(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::InvalidateCacheRequest request; @@ -502,7 +499,7 @@ int NebdClient::InvalidCache(int fd) { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "InvalidCache failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -526,8 +523,7 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { LOG_IF(ERROR, ret != true) << "Load nebdserver.serverAddress failed"; RETURN_IF_FALSE(ret); - ret = conf->GetStringValue("metacache.fileLockPath", - &option_.fileLockPath); + ret = conf->GetStringValue("metacache.fileLockPath", &option_.fileLockPath); LOG_IF(ERROR, ret != true) << "Load metacache.fileLockPath failed"; RETURN_IF_FALSE(ret); @@ -550,7 +546,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcHostDownRetryIntervalUs", &requestOption.rpcHostDownRetryIntervalUs); - LOG_IF(ERROR, ret != true) << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetInt64Value("request.rpcHealthCheckIntervalS", @@ -560,7 +557,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcMaxDelayHealthCheckIntervalMs", &requestOption.rpcMaxDelayHealthCheckIntervalMs); - LOG_IF(ERROR, ret != true) << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetUInt32Value("request.rpcSendExecQueueNum", @@ -581,8 +579,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { int NebdClient::InitHeartBeatOption(Configuration* conf, HeartbeatOption* heartbeatOption) { - bool ret = conf->GetInt64Value("heartbeat.intervalS", - &heartbeatOption->intervalS); + bool ret = + conf->GetInt64Value("heartbeat.intervalS", &heartbeatOption->intervalS); LOG_IF(ERROR, ret != true) << "Load heartbeat.intervalS failed"; RETURN_IF_FALSE(ret); @@ -604,8 +602,7 @@ int NebdClient::InitChannel() { option_.requestOption.rpcHealthCheckIntervalS; brpc::FLAGS_circuit_breaker_max_isolation_duration_ms = option_.requestOption.rpcMaxDelayHealthCheckIntervalMs; - int ret = channel_.InitWithSockFile( - option_.serverAddress.c_str(), nullptr); + int ret = channel_.InitWithSockFile(option_.serverAddress.c_str(), nullptr); if (ret != 0) { LOG(ERROR) << "Init Channel failed, socket addr = " << option_.serverAddress; @@ -652,7 +649,6 @@ std::string NebdClient::ReplaceSlash(const std::string& str) { return ret; } - void NebdClient::InitLogger(const LogOption& logOption) { static const char* kProcessName = "nebd-client"; @@ -661,8 +657,9 @@ void NebdClient::InitLogger(const LogOption& logOption) { google::InitGoogleLogging(kProcessName); } -int NebdClient::ExecAsyncRpcTask(void* meta, - bthread::TaskIterator& iter) { // NOLINT +int NebdClient::ExecAsyncRpcTask( + void* meta, + bthread::TaskIterator& iter) { // NOLINT (void)meta; if (iter.is_queue_stopped()) { return 0; diff --git a/nebd/src/part1/nebd_client.h b/nebd/src/part1/nebd_client.h index c814f9f711..815c4c7fe7 100644 --- a/nebd/src/part1/nebd_client.h +++ b/nebd/src/part1/nebd_client.h @@ -27,30 +27,28 @@ #include #include -#include #include +#include #include -#include "nebd/src/part1/nebd_common.h" -#include "nebd/src/common/configuration.h" +#include "include/curve_compiler_specific.h" #include "nebd/proto/client.pb.h" -#include "nebd/src/part1/libnebd.h" +#include "nebd/src/common/configuration.h" #include "nebd/src/part1/heartbeat_manager.h" +#include "nebd/src/part1/libnebd.h" +#include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "include/curve_compiler_specific.h" - namespace nebd { namespace client { -using RpcTask = std::function; +using RpcTask = std::function; using nebd::common::Configuration; class NebdClient { public: - static NebdClient &GetInstance() { + static NebdClient& GetInstance() { static NebdClient client; return client; } @@ -58,93 +56,100 @@ class NebdClient { ~NebdClient() = default; /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + *Size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error + * code */ int64_t GetFileSize(int fd); int64_t GetBlockSize(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an + * error code */ int64_t GetInfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache(int fd); @@ -159,17 +164,17 @@ class NebdClient { void InitLogger(const LogOption& logOption); /** - * @brief 替换字符串中的 '/' 为 '+' + * @brief replaces'/'with'+'in the string * - * @param str 需要替换的字符串 - * @return 替换后的字符串 + * @param str The string that needs to be replaced + * @return The replaced string */ std::string ReplaceSlash(const std::string& str); int64_t ExecuteSyncRpc(RpcTask task); - // 心跳管理模块 + // Heartbeat management module std::shared_ptr heartbeatMgr_; - // 缓存模块 + // Cache module std::shared_ptr metaCache_; NebdClientOption option_; @@ -183,7 +188,8 @@ class NebdClient { std::vector> rpcTaskQueues_; - static int ExecAsyncRpcTask(void* meta, bthread::TaskIterator& iter); // NOLINT + static int ExecAsyncRpcTask( + void* meta, bthread::TaskIterator& iter); // NOLINT void PushAsyncTask(const AsyncRpcTask& task) { static thread_local unsigned int seed = time(nullptr); @@ -197,7 +203,7 @@ class NebdClient { } }; -extern NebdClient &nebdClient; +extern NebdClient& nebdClient; } // namespace client } // namespace nebd diff --git a/nebd/src/part1/nebd_common.h b/nebd/src/part1/nebd_common.h index 432f24534f..7c03839178 100644 --- a/nebd/src/part1/nebd_common.h +++ b/nebd/src/part1/nebd_common.h @@ -25,49 +25,49 @@ #include -// rpc request配置项 +// rpc request configuration item struct RequestOption { - // 同步rpc的最大重试次数 + // Maximum number of retries for synchronous rpc int64_t syncRpcMaxRetryTimes; - // rpc请求的重试间隔 + // The retry interval for rpc requests int64_t rpcRetryIntervalUs; - // rpc请求的最大重试间隔 + // Maximum retry interval for rpc requests int64_t rpcRetryMaxIntervalUs; - // rpc hostdown情况下的重试时间 + // The retry time in the case of rpc hostdown int64_t rpcHostDownRetryIntervalUs; - // brpc的健康检查周期时间 + // Health check cycle time for brpc int64_t rpcHealthCheckIntervalS; - // brpc从rpc失败到进行健康检查的最大时间间隔 + // The maximum time interval between RPC failure and health check in BRPC int64_t rpcMaxDelayHealthCheckIntervalMs; - // rpc发送执行队列个数 + // Number of RPC send execution queues uint32_t rpcSendExecQueueNum = 2; }; -// 日志配置项 +// Log Configuration Item struct LogOption { - // 日志存放目录 + // Log storage directory std::string logPath; }; -// nebd client配置项 +// nebd client configuration item struct NebdClientOption { // part2 socket file address std::string serverAddress; - // 文件锁路径 + // File lock path std::string fileLockPath; - // rpc request配置项 + // rpc request configuration item RequestOption requestOption; - // 日志配置项 + // Log Configuration Item LogOption logOption; }; -// heartbeat配置项 +// heartbeat configuration item struct HeartbeatOption { // part2 socket file address std::string serverAddress; - // heartbeat间隔 + // heartbeat interval int64_t intervalS; - // heartbeat rpc超时时间 + // heartbeat RPC timeout int64_t rpcTimeoutMs; }; diff --git a/nebd/src/part1/nebd_metacache.h b/nebd/src/part1/nebd_metacache.h index 3b596bdf62..5435e3af5f 100644 --- a/nebd/src/part1/nebd_metacache.h +++ b/nebd/src/part1/nebd_metacache.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART1_NEBD_METACACHE_H_ #define NEBD_SRC_PART1_NEBD_METACACHE_H_ +#include #include #include -#include #include "nebd/src/common/file_lock.h" #include "nebd/src/common/rw_lock.h" @@ -42,16 +42,13 @@ struct NebdClientFileInfo { NebdClientFileInfo() = default; - NebdClientFileInfo( - int fd, const std::string& fileName, - const FileLock& fileLock) - : fd(fd), - fileName(fileName), - fileLock(fileLock) {} + NebdClientFileInfo(int fd, const std::string& fileName, + const FileLock& fileLock) + : fd(fd), fileName(fileName), fileLock(fileLock) {} }; /** - * @brief: 保存当前已打开文件信息 + * @brief: Save the information of the currently opened file */ class NebdClientMetaCache { public: @@ -59,33 +56,33 @@ class NebdClientMetaCache { ~NebdClientMetaCache() = default; /** - * @brief: 添加文件信息 - * @param: fileInfo 文件信息 + * @brief: Add file information + * @param: fileInfo: file information */ void AddFileInfo(const NebdClientFileInfo& fileInfo); /** - * @brief: 删除文件信息 - * @param: fd 文件描述符 + * @brief: Delete file information + * @param: fd: file descriptor */ void RemoveFileInfo(int fd); /** - * @brief: 获取对应fd的文件信息 - * @param: fd 文件fd + * @brief: Obtain the file information of the corresponding fd + * @param: fd: file fd * @param[out]: fileInfo - * @return: 0 成功 / -1 返回 + * @return: 0 succeeded/-1 returned */ int GetFileInfo(int fd, NebdClientFileInfo* fileInfo) const; /** - * @brief: 获取当前已打开文件信息 - * @return: 当前已打开文件信息 + * @brief: Get information about currently opened files + * @return: Currently opened file information */ std::vector GetAllFileInfo() const; private: - // 当前已打开文件信息 + // Currently opened file information std::unordered_map fileinfos_; mutable nebd::common::RWLock rwLock_; }; diff --git a/nebd/src/part2/define.h b/nebd/src/part2/define.h index 4c2fc54022..8a66854c59 100644 --- a/nebd/src/part2/define.h +++ b/nebd/src/part2/define.h @@ -25,24 +25,25 @@ #include #include -#include -#include + #include +#include +#include #include "nebd/src/common/rw_lock.h" namespace nebd { namespace server { -using nebd::common::RWLock; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; using ::google::protobuf::RpcController; +using nebd::common::RWLock; const char CURVE_PREFIX[] = "cbd"; const char TEST_PREFIX[] = "test"; -// nebd异步请求的类型 +// Types of nebd asynchronous requests enum class LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -70,54 +71,55 @@ using RWLockPtr = std::shared_ptr; struct NebdServerAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*NebdAioCallBack)(struct NebdServerAioContext* context); -// nebd server端异步请求的上下文 -// 记录请求的类型、参数、返回信息、rpc信息 +// Context of Nebd server-side asynchronous requests +// Record the type, parameters, return information, and rpc information of the +// request struct NebdServerAioContext { - // 请求的offset + // Requested offset off_t offset = 0; - // 请求的size + // Requested size size_t size = 0; - // 记录异步返回的返回值 + // Record the return value returned asynchronously int ret = -1; - // 异步请求的类型,详见定义 + // The type of asynchronous request, as defined in the definition LIBAIO_OP op = LIBAIO_OP::LIBAIO_OP_UNKNOWN; - // 异步请求结束时调用的回调函数 + // Callback function called at the end of asynchronous request NebdAioCallBack cb; - // 请求的buf + // Buf requested void* buf = nullptr; - // rpc请求的相应内容 + // The corresponding content of the rpc request Message* response = nullptr; - // rpc请求的回调函数 - Closure *done = nullptr; - // rpc请求的controller + // Callback function for rpc requests + Closure* done = nullptr; + // Controller for rpc requests RpcController* cntl = nullptr; // return rpc when io error bool returnRpcWhenIoError = false; }; struct NebdFileInfo { - // 文件大小 + // File size uint64_t size; - // object/chunk大小 + // object/chunk size uint64_t obj_size; - // object数量 + // Number of objects uint64_t num_objs; // block size uint32_t block_size; }; using ExtendAttribute = std::map; -// nebd server 端文件持久化的元数据信息 +// Metadata information for file persistence on the Nebd server side struct NebdFileMeta { int fd; std::string fileName; ExtendAttribute xattr; }; -// part2配置项 +// part2 Configuration Item const char LISTENADDRESS[] = "listen.address"; const char METAFILEPATH[] = "meta.file.path"; const char HEARTBEATTIMEOUTSEC[] = "heartbeat.timeout.sec"; diff --git a/nebd/src/part2/file_entity.cpp b/nebd/src/part2/file_entity.cpp index 0899472c72..272e761ace 100644 --- a/nebd/src/part2/file_entity.cpp +++ b/nebd/src/part2/file_entity.cpp @@ -57,13 +57,13 @@ std::ostream& operator<<(std::ostream& os, const OpenFlags* flags) { } NebdFileEntity::NebdFileEntity() - : fd_(0) - , fileName_("") - , status_(NebdFileStatus::CLOSED) - , timeStamp_(0) - , fileInstance_(nullptr) - , executor_(nullptr) - , metaFileManager_(nullptr) {} + : fd_(0), + fileName_(""), + status_(NebdFileStatus::CLOSED), + timeStamp_(0), + fileInstance_(nullptr), + executor_(nullptr), + metaFileManager_(nullptr) {} NebdFileEntity::~NebdFileEntity() {} @@ -117,8 +117,7 @@ int NebdFileEntity::Open(const OpenFlags* openflags) { return -1; } LOG(INFO) << "Open file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; if (openflags) { openFlags_.reset(new OpenFlags{*openflags}); @@ -157,26 +156,28 @@ int NebdFileEntity::Reopen(const ExtendAttribute& xattr) { } LOG(INFO) << "Reopen file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return fd_; } int NebdFileEntity::Close(bool removeMeta) { CHECK(executor_ != nullptr) << "file entity is not inited. " << "filename: " << fileName_; - // 用于和其他用户请求互斥,避免文件被close后,请求发到后端导致返回失败 + // This is used to prevent conflicts with other user requests to ensure that + // a file is not closed, and requests sent to the backend after the file has + // been closed result in failures. WriteLockGuard writeLock(rwLock_); - // 这里的互斥锁是为了跟open请求互斥,以下情况可能导致close和open并发 - // part2重启,导致文件被reopen,然后由于超时,文件准备被close - // 此时用户发送了挂载卷请求对文件进行open + // The mutex lock here is to prevent conflicts with open requests. The + // following scenarios may lead to concurrent close and open operations: + // part2 restarts, causing the file to be reopened. Due to a timeout, the + // file is about to be closed. At this point, a user sends a request to + // mount a volume, which involves opening the file. std::unique_lock lock(fileStatusMtx_); if (status_ == NebdFileStatus::OPENED) { int ret = executor_->Close(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Close file failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::CLOSED; @@ -186,15 +187,13 @@ int NebdFileEntity::Close(bool removeMeta) { int ret = metaFileManager_->RemoveFileMeta(fileName_); if (ret != 0) { LOG(ERROR) << "Remove file record failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::DESTROYED; } LOG(INFO) << "Close file success. " - << "fd: " << fd_ - << ", filename: " << fileName_ + << "fd: " << fd_ << ", filename: " << fileName_ << ", meta removed? " << (removeMeta ? "yes" : "no"); return 0; } @@ -204,8 +203,7 @@ int NebdFileEntity::Discard(NebdServerAioContext* aioctx) { int ret = executor_->Discard(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Discard file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -219,8 +217,7 @@ int NebdFileEntity::AioRead(NebdServerAioContext* aioctx) { int ret = executor_->AioRead(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioRead file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -234,8 +231,7 @@ int NebdFileEntity::AioWrite(NebdServerAioContext* aioctx) { int ret = executor_->AioWrite(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioWrite file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -249,8 +245,7 @@ int NebdFileEntity::Flush(NebdServerAioContext* aioctx) { int ret = executor_->Flush(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Flush file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -264,8 +259,7 @@ int NebdFileEntity::Extend(int64_t newsize) { int ret = executor_->Extend(fileInstance_.get(), newsize); if (ret < 0) { LOG(ERROR) << "Extend file failed. " - << "fd: " << fd_ - << ", newsize: " << newsize + << "fd: " << fd_ << ", newsize: " << newsize << ", fileName" << fileName_; return -1; } @@ -279,8 +273,7 @@ int NebdFileEntity::GetInfo(NebdFileInfo* fileInfo) { int ret = executor_->GetInfo(fileInstance_.get(), fileInfo); if (ret < 0) { LOG(ERROR) << "Get file info failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -293,8 +286,7 @@ int NebdFileEntity::InvalidCache() { int ret = executor_->InvalidCache(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Invalid cache failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -318,8 +310,7 @@ int NebdFileEntity::ProcessSyncRequest(ProcessTask task) { int ret = task(); if (ret < 0) { LOG(ERROR) << "Process sync request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -340,18 +331,19 @@ int NebdFileEntity::ProcessAsyncRequest(ProcessTask task, return -1; } - // 对于异步请求,将此closure传给aiocontext,从而在请求返回时释放读锁 + // For asynchronous requests, pass this closure to aiocontext to release the + // read lock when the request returns done->SetClosure(aioctx->done); aioctx->done = doneGuard.release(); int ret = task(); if (ret < 0) { - // 如果请求失败,这里要主动释放锁,并将aiocontext还原回去 + // If the request fails, the lock should be actively released here and + // the aiocontext should be restored back brpc::ClosureGuard doneGuard(done); aioctx->done = done->GetClosure(); done->SetClosure(nullptr); LOG(ERROR) << "Process async request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -381,11 +373,11 @@ int NebdFileEntity::UpdateFileStatus(NebdFileInstancePtr fileInstance) { } bool NebdFileEntity::GuaranteeFileOpened() { - // 文件如果已经被用户close了,就不允许后面请求再自动打开进行操作了 + // If the file has already been closed by the user, subsequent requests for + // automatic opening for operation are not allowed if (status_ == NebdFileStatus::DESTROYED) { LOG(ERROR) << "File has been destroyed. " - << "filename: " << fileName_ - << ", fd: " << fd_; + << "filename: " << fileName_ << ", fd: " << fd_; return false; } @@ -393,8 +385,7 @@ bool NebdFileEntity::GuaranteeFileOpened() { int ret = Open(openFlags_.get()); if (ret != fd_) { LOG(ERROR) << "Get opened file failed. " - << "filename: " << fileName_ - << ", fd: " << fd_ + << "filename: " << fileName_ << ", fd: " << fd_ << ", ret: " << ret; return false; } @@ -404,8 +395,8 @@ bool NebdFileEntity::GuaranteeFileOpened() { std::ostream& operator<<(std::ostream& os, const NebdFileEntity& entity) { std::string standardTime; - TimeUtility::TimeStampToStandard( - entity.GetFileTimeStamp() / 1000, &standardTime); + TimeUtility::TimeStampToStandard(entity.GetFileTimeStamp() / 1000, + &standardTime); os << "[filename: " << entity.GetFileName() << ", fd: " << entity.GetFd() << ", status: " << NebdFileStatus2Str(entity.GetFileStatus()) << ", timestamp: " << standardTime << "]"; diff --git a/nebd/src/part2/file_entity.h b/nebd/src/part2/file_entity.h index fb1e1448d8..c57d90e2ad 100644 --- a/nebd/src/part2/file_entity.h +++ b/nebd/src/part2/file_entity.h @@ -25,42 +25,44 @@ #include #include -#include -#include + #include +#include +#include #include // NOLINT +#include #include -#include +#include "nebd/proto/client.pb.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/timeutility.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" -#include "nebd/src/part2/request_executor.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/request_executor.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::BthreadRWLock; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; using nebd::common::TimeUtility; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; class NebdFileInstance; class NebdRequestExecutor; using NebdFileInstancePtr = std::shared_ptr; -// 处理用户请求时需要加读写锁,避免close时仍有用户IO未处理完成 -// 对于异步IO来说,只有返回时才能释放读锁,所以封装成Closure -// 在发送异步请求前,将closure赋值给NebdServerAioContext +// When processing user requests, it is necessary to add a read write lock to +// avoid user IO still not being processed when closing For asynchronous IO, the +// read lock can only be released on return, so it is encapsulated as a Closure +// Assign the closure value to NebdServerAioContext before sending an +// asynchronous request class NebdRequestReadLockClosure : public Closure { public: explicit NebdRequestReadLockClosure(BthreadRWLock& rwLock) // NOLINT - : rwLock_(rwLock) - , done_(nullptr) { + : rwLock_(rwLock), done_(nullptr) { rwLock_.RDLock(); } ~NebdRequestReadLockClosure() {} @@ -71,13 +73,9 @@ class NebdRequestReadLockClosure : public Closure { rwLock_.Unlock(); } - void SetClosure(Closure* done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } - Closure* GetClosure() { - return done_; - } + Closure* GetClosure() { return done_; } private: BthreadRWLock& rwLock_; @@ -96,134 +94,132 @@ class NebdFileEntity : public std::enable_shared_from_this { virtual ~NebdFileEntity(); /** - * 初始化文件实体 - * @param option: 初始化参数 - * @return 成功返回0, 失败返回-1 + * Initialize File Entity + * @param option: Initialize parameters + * @return returns 0 for success, -1 for failure */ virtual int Init(const NebdFileEntityOption& option); /** - * 打开文件 - * @return 成功返回fd,失败返回-1 + * Open File + * @return successfully returns fd, failure returns -1 */ virtual int Open(const OpenFlags* openflags); /** - * 重新open文件,如果之前的后端存储的连接还存在则复用之前的连接 - * 否则与后端存储建立新的连接 - * @param xattr: 文件reopen需要的信息 - * @return 成功返回fd,失败返回-1 + * Reopen the file and reuse the previous backend storage connection if it + * still exists Otherwise, establish a new connection with the backend + * storage + * @param xattr: Information required for file reopening + * @return successfully returns fd, failure returns -1 */ virtual int Reopen(const ExtendAttribute& xattr); /** - * 关闭文件 - * @param removeMeta: 是否要移除文件元数据记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + *Close File + * @param removeMeta: Do you want to remove the file metadata record? True + *means remove, false means not remove If it is a close request passed from + *part1, this parameter is true If it is a close request initiated by the + *heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(bool removeMeta); /** - * 给文件扩容 - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int64_t newsize); /** - * 获取文件信息 - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(); - virtual std::string GetFileName() const { - return fileName_; - } + virtual std::string GetFileName() const { return fileName_; } - virtual int GetFd() const { - return fd_; - } + virtual int GetFd() const { return fd_; } virtual void UpdateFileTimeStamp(uint64_t timestamp) { timeStamp_.store(timestamp); } - virtual uint64_t GetFileTimeStamp() const { - return timeStamp_.load(); - } + virtual uint64_t GetFileTimeStamp() const { return timeStamp_.load(); } - virtual NebdFileStatus GetFileStatus() const { - return status_.load(); - } + virtual NebdFileStatus GetFileStatus() const { return status_.load(); } private: /** - * 更新文件状态,包括元信息文件和内存状态 - * @param fileInstancea: open或reopen返回的文件上下文信息 - * @return: 成功返回0,失败返回-1 + * Update file status, including meta information files and memory status + * @param fileInstancea: The file context information returned by open or + * reopen + * @return: Success returns 0, failure returns -1 */ int UpdateFileStatus(NebdFileInstancePtr fileInstance); /** - * 请求统一处理函数 - * @param task: 实际请求执行的函数体 - * @return: 成功返回0,失败返回-1 + * Request Unified Processing Function + * @param task: The actual request to execute the function body + * @return: Success returns 0, failure returns -1 */ using ProcessTask = std::function; int ProcessSyncRequest(ProcessTask task); int ProcessAsyncRequest(ProcessTask task, NebdServerAioContext* aioctx); - // 确保文件处于opened状态,如果不是则尝试进行open - // 无法open或者open失败,则返回false, - // 如果文件处于open状态,则返回true + // Ensure that the file is in an open state, and if not, attempt to open it + // Unable to open or failed to open, returns false, + // If the file is in the open state, return true bool GuaranteeFileOpened(); private: - // 文件读写锁,处理请求前加读锁,close文件的时候加写锁 - // 避免close时还有请求未处理完 + // File read/write lock, apply read lock before processing requests, and + // apply write lock when closing files Avoiding pending requests during + // close BthreadRWLock rwLock_; - // 互斥锁,用于open、close之间的互斥 + // Mutex lock, used for mutual exclusion between open and close bthread::Mutex fileStatusMtx_; - // nebd server为该文件分配的唯一标识符 + // The unique identifier assigned by the nebd server to this file int fd_; - // 文件名称 + // File Name std::string fileName_; std::unique_ptr openFlags_; - // 文件当前状态,opened表示文件已打开,closed表示文件已关闭 + // The current state of the file, where 'opened' indicates that the file is + // open and 'closed' indicates that the file is closed std::atomic status_; - // 该文件上一次收到心跳时的时间戳 + // The timestamp of the last time the file received a heartbeat std::atomic timeStamp_; - // 文件在executor open时返回上下文信息,用于后续文件的请求处理 + // When the file is opened by the executor, contextual information is + // returned for subsequent file request processing NebdFileInstancePtr fileInstance_; - // 文件对应的executor的指针 + // Pointer to the executor corresponding to the file NebdRequestExecutor* executor_; - // 元数据持久化管理 + // Metadata Persistence Management MetaFileManagerPtr metaFileManager_; }; using NebdFileEntityPtr = std::shared_ptr; diff --git a/nebd/src/part2/file_manager.cpp b/nebd/src/part2/file_manager.cpp index 5c1dc2a15c..d139829f4f 100644 --- a/nebd/src/part2/file_manager.cpp +++ b/nebd/src/part2/file_manager.cpp @@ -34,8 +34,7 @@ namespace nebd { namespace server { NebdFileManager::NebdFileManager(MetaFileManagerPtr metaFileManager) - : isRunning_(false) - , metaFileManager_(metaFileManager) {} + : isRunning_(false), metaFileManager_(metaFileManager) {} NebdFileManager::~NebdFileManager() {} @@ -62,14 +61,14 @@ int NebdFileManager::Fini() { } int NebdFileManager::Load() { - // 从元数据文件中读取持久化的文件信息 + // Reading persistent file information from metadata files std::vector fileMetas; int ret = metaFileManager_->ListFileMeta(&fileMetas); if (ret < 0) { LOG(ERROR) << "Load file metas failed."; return ret; } - // 根据持久化的信息重新open文件 + // Reopen files based on persistent information int maxFd = 0; for (auto& fileMeta : fileMetas) { NebdFileEntityPtr entity = @@ -174,8 +173,7 @@ int NebdFileManager::InvalidCache(int fd) { return entity->InvalidCache(); } -NebdFileEntityPtr -NebdFileManager::GetFileEntity(int fd) { +NebdFileEntityPtr NebdFileManager::GetFileEntity(int fd) { ReadLockGuard readLock(rwLock_); auto iter = fileMap_.find(fd); if (iter == fileMap_.end()) { @@ -221,7 +219,7 @@ NebdFileEntityPtr NebdFileManager::GenerateFileEntity( } } - // 检测是否存在冲突的文件记录 + // Detect for conflicting file records auto iter = fileMap_.find(fd); if (iter != fileMap_.end()) { LOG(ERROR) << "File entity conflict. " diff --git a/nebd/src/part2/file_manager.h b/nebd/src/part2/file_manager.h index bac54fd1fa..f81a3d72d0 100644 --- a/nebd/src/part2/file_manager.h +++ b/nebd/src/part2/file_manager.h @@ -25,27 +25,28 @@ #include #include + #include +#include // NOLINT #include #include -#include // NOLINT #include -#include "nebd/src/common/rw_lock.h" +#include "nebd/proto/client.pb.h" #include "nebd/src/common/name_lock.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" #include "nebd/src/part2/file_entity.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::NameLock; using nebd::common::NameLockGuard; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; using FileEntityMap = std::unordered_map; @@ -54,119 +55,124 @@ class NebdFileManager { explicit NebdFileManager(MetaFileManagerPtr metaFileManager); virtual ~NebdFileManager(); /** - * 停止FileManager并释放FileManager资源 - * @return 成功返回0,失败返回-1 + * Stop FileManager and release FileManager resources + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 启动FileManager - * @return 成功返回0,失败返回-1 + * Start FileManager + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 打开文件 - * @param filename: 文件的filename - * @return 成功返回fd,失败返回-1 + * Open File + * @param filename: The filename of the file + * @return successfully returns fd, failure returns -1 */ virtual int Open(const std::string& filename, const OpenFlags* flags); /** - * 关闭文件 - * @param fd: 文件的fd - * @param removeRecord: 是否要移除文件记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + * Close File + * @param fd: fd of the file + * @param removeRecord: Do you want to remove the file record? True means + * remove, false means not remove If it is a close request passed from + * part1, this parameter is true If it is a close request initiated by the + * heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(int fd, bool removeRecord); /** - * 给文件扩容 - * @param fd: 文件的fd - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param fd: fd of the file + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int fd, int64_t newsize); /** - * 获取文件信息 - * @param fd: 文件的fd - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fd: fd of the file + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(int fd, NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(int fd, NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @param fd: 文件的fd - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @param fd: fd of the file + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(int fd); - // 根据fd从map中获取指定的entity - // 如果entity已存在,返回entity指针,否则返回nullptr + // Obtain the specified entity from the map based on fd + // If entity already exists, return entity pointer; otherwise, return + // nullptr virtual NebdFileEntityPtr GetFileEntity(int fd); virtual FileEntityMap GetFileEntityMap(); - // 将所有文件状态输出到字符串 + // Output all file states to a string std::string DumpAllFileStatus(); // set public for test - // 启动时从metafile加载文件记录,并reopen文件 + // Load file records from metafile at startup and reopen the file int Load(); private: - // 分配新的可用的fd,fd不允许和已经存在的重复 - // 成功返回的可用fd,失败返回-1 + // Assign new available fds, fds are not allowed to duplicate existing ones + // Successfully returned available fd, failed returned -1 int GenerateValidFd(); - // 根据文件名获取file entity - // 如果entity存在,直接返回entity指针 - // 如果entity不存在,则创建新的entity,并插入map,然后返回 + // Obtain file entity based on file name + // If entity exists, directly return the entity pointer + // If the entity does not exist, create a new entity, insert a map, and then + // return NebdFileEntityPtr GetOrCreateFileEntity(const std::string& fileName); - // 根据fd和文件名生成file entity, - // 如果fd对于的entity已存在,直接返回entity指针 - // 如果entity不存在,则生成新的entity,并插入map,然后返回 + // Generate file entity based on fd and file name, + // If fd already exists for entity, directly return the entity pointer + // If the entity does not exist, generate a new entity, insert a map, and + // then return NebdFileEntityPtr GenerateFileEntity(int fd, const std::string& fileName); - // 删除指定fd对应的entity + // Delete the entity corresponding to the specified fd void RemoveEntity(int fd); private: - // 当前filemanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of the filemanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件名锁,对同名文件加锁 + // File name lock, lock files with the same name NameLock nameLock_; - // fd分配器 + // Fd distributor FdAllocator fdAlloc_; - // nebd server 文件记录管理 + // nebd server file record management MetaFileManagerPtr metaFileManager_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; - // 文件fd和文件实体的映射 + // Mapping of file fd and file entities FileEntityMap fileMap_; }; using NebdFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/heartbeat_manager.cpp b/nebd/src/part2/heartbeat_manager.cpp index 4516874807..739bf586a7 100644 --- a/nebd/src/part2/heartbeat_manager.cpp +++ b/nebd/src/part2/heartbeat_manager.cpp @@ -20,11 +20,12 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/heartbeat_manager.h" + #include +#include #include "nebd/src/common/timeutility.h" -#include "nebd/src/part2/heartbeat_manager.h" namespace nebd { namespace server { @@ -69,7 +70,7 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, const auto& iter = nebdClients_.find(pid); if (iter == nebdClients_.end()) { nebdClients_[pid] = - std::make_shared(pid, version, timestamp); + std::make_shared(pid, version, timestamp); nebdClientNum_ << 1; } else { nebdClients_[pid]->timeStamp = timestamp; @@ -79,8 +80,8 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, } void HeartbeatManager::CheckTimeoutFunc() { - while (sleeper_.wait_for( - std::chrono::milliseconds(checkTimeoutIntervalMs_))) { + while ( + sleeper_.wait_for(std::chrono::milliseconds(checkTimeoutIntervalMs_))) { LOG_EVERY_N(INFO, 60 * 1000 / checkTimeoutIntervalMs_) << "Checking timeout, file status: " << fileManager_->DumpAllFileStatus(); @@ -107,24 +108,24 @@ void HeartbeatManager::CheckTimeoutFunc() { bool HeartbeatManager::CheckNeedClosed(NebdFileEntityPtr entity) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - entity->GetFileTimeStamp(); - // 文件如果是opened状态,并且已经超时,则需要调用close - bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED - && interval > (uint64_t)1000 * heartbeatTimeoutS_; + // If the file is in an open state and has timed out, you need to call close + bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED && + interval > (uint64_t)1000 * heartbeatTimeoutS_; return needClose; } std::ostream& operator<<(std::ostream& os, NebdClientInfo* info) { std::string standardTime; TimeUtility::TimeStampToStandard(info->timeStamp / 1000, &standardTime); - os << "pid: " << info->pid << ", version: " - << info->version.GetValueByKey(kVersion) + os << "pid: " << info->pid + << ", version: " << info->version.GetValueByKey(kVersion) << ", last time received heartbeat: " << standardTime; return os; } void HeartbeatManager::RemoveTimeoutNebdClient() { WriteLockGuard writeLock(rwLock_); - auto iter = nebdClients_.begin(); + auto iter = nebdClients_.begin(); while (iter != nebdClients_.end()) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - iter->second->timeStamp; diff --git a/nebd/src/part2/heartbeat_manager.h b/nebd/src/part2/heartbeat_manager.h index 73943bc4bc..69b4c3eed2 100644 --- a/nebd/src/part2/heartbeat_manager.h +++ b/nebd/src/part2/heartbeat_manager.h @@ -24,32 +24,34 @@ #define NEBD_SRC_PART2_HEARTBEAT_MANAGER_H_ #include -#include // NOLINT + #include -#include #include +#include #include +#include // NOLINT #include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/stringstatus.h" -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/define.h" +#include "nebd/src/part2/file_manager.h" namespace nebd { namespace server { using nebd::common::InterruptibleSleeper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; struct HeartbeatManagerOption { - // 文件心跳超时时间(单位:秒) + // File heartbeat timeout (in seconds) uint32_t heartbeatTimeoutS; - // 心跳超时检测线程的检测间隔(时长:毫秒) + // Heartbeat timeout detection thread detection interval (duration: + // milliseconds) uint32_t checkTimeoutIntervalMs; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager; }; @@ -57,42 +59,44 @@ const char kNebdClientMetricPrefix[] = "nebd_client_pid_"; const char kVersion[] = "version"; struct NebdClientInfo { - NebdClientInfo(int pid2, const std::string& version2, - uint64_t timeStamp2) : - pid(pid2), timeStamp(timeStamp2) { + NebdClientInfo(int pid2, const std::string& version2, uint64_t timeStamp2) + : pid(pid2), timeStamp(timeStamp2) { version.ExposeAs(kNebdClientMetricPrefix, - std::to_string(pid2) + "_version"); + std::to_string(pid2) + "_version"); version.Set(kVersion, version2); version.Update(); } - // nebd client的进程号 + // Process number of nebd client int pid; - // nebd version的metric + // The metric of nebd version nebd::common::StringStatus version; - // 上次心跳的时间戳 + // Time stamp of last heartbeat uint64_t timeStamp; }; -// 负责文件心跳超时管理 +// Responsible for managing file heartbeat timeout class HeartbeatManager { public: explicit HeartbeatManager(HeartbeatManagerOption option) - : isRunning_(false) - , heartbeatTimeoutS_(option.heartbeatTimeoutS) - , checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs) - , fileManager_(option.fileManager) { + : isRunning_(false), + heartbeatTimeoutS_(option.heartbeatTimeoutS), + checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs), + fileManager_(option.fileManager) { nebdClientNum_.expose("nebd_client_num"); } virtual ~HeartbeatManager() {} - // 启动心跳检测线程 + // Start Heartbeat Detection Thread virtual int Run(); - // 停止心跳检测线程 + // Stop Heartbeat Detection Thread virtual int Fini(); - // part2收到心跳后,会通过该接口更新心跳中包含的文件在内存中记录的时间戳 - // 心跳检测线程会根据该时间戳判断是否需要关闭文件 + // After receiving the heartbeat, part2 will update the timestamp of the + // files included in the heartbeat recorded in memory through this interface + // The heartbeat detection thread will determine whether the file needs to + // be closed based on this timestamp virtual bool UpdateFileTimestamp(int fd, uint64_t timestamp); - // part2收到心跳后,会通过该接口更新part1的时间戳 + // After receiving the heartbeat, part2 will update the timestamp of part1 + // through this interface virtual void UpdateNebdClientInfo(int pid, const std::string& version, uint64_t timestamp); std::map> GetNebdClients() { @@ -101,31 +105,32 @@ class HeartbeatManager { } private: - // 心跳检测线程的函数执行体 + // Function execution body of heartbeat detection thread void CheckTimeoutFunc(); - // 判断文件是否需要close + // Determine if the file needs to be closed bool CheckNeedClosed(NebdFileEntityPtr entity); - // 从内存中删除已经超时的nebdClientInfo + // Delete nebdClientInfo that has timed out from memory void RemoveTimeoutNebdClient(); private: - // 当前heartbeatmanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of heartbeatmanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件心跳超时时长 + // File heartbeat timeout duration uint32_t heartbeatTimeoutS_; - // 心跳超时检测线程的检测时间间隔 + // Heartbeat timeout detection thread detection time interval uint32_t checkTimeoutIntervalMs_; - // 心跳检测线程 + // Heartbeat detection thread std::thread checkTimeoutThread_; - // 心跳检测线程的sleeper + // sleeper for Heartbeat Detection Thread InterruptibleSleeper sleeper_; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager_; - // nebd client的信息 + // Information on nebd client std::map> nebdClients_; - // nebdClient的计数器 + // Counters for nebdClient bvar::Adder nebdClientNum_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; }; diff --git a/nebd/src/part2/main.cpp b/nebd/src/part2/main.cpp index f8c742fe9a..ef1381bcc1 100644 --- a/nebd/src/part2/main.cpp +++ b/nebd/src/part2/main.cpp @@ -20,29 +20,30 @@ * Author: hzwuhongsong */ +#include #include #include -#include + #include "nebd/src/part2/nebd_server.h" DEFINE_string(confPath, "/etc/nebd/nebd-server.conf", "nebd server conf path"); int main(int argc, char* argv[]) { - // 解析参数 + // Parsing parameters google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); std::string confPath = FLAGS_confPath.c_str(); - // 启动nebd server + // Start nebd server auto server = std::make_shared<::nebd::server::NebdServer>(); int initRes = server->Init(confPath); if (initRes < 0) { - LOG(ERROR) << "init nebd server fail"; + LOG(ERROR) << "init nebd server fail"; return -1; } server->RunUntilAskedToQuit(); - // 停止nebd server + // Stop nebd server server->Fini(); google::ShutdownGoogleLogging(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 6fcdc5c94b..03c5f1d366 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -20,19 +20,18 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { NebdMetaFileManager::NebdMetaFileManager() - : metaFilePath_("") - , wrapper_(nullptr) - , parser_(nullptr) {} + : metaFilePath_(""), wrapper_(nullptr), parser_(nullptr) {} NebdMetaFileManager::~NebdMetaFileManager() {} @@ -52,9 +51,10 @@ int NebdMetaFileManager::Init(const NebdMetaFileManagerOption& option) { int NebdMetaFileManager::UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta) { WriteLockGuard writeLock(rwLock_); - bool needUpdate = metaCache_.find(fileName) == metaCache_.end() - || fileMeta != metaCache_[fileName]; - // 如果元数据信息没发生变更,则不需要写文件 + bool needUpdate = metaCache_.find(fileName) == metaCache_.end() || + fileMeta != metaCache_[fileName]; + // If the metadata information has not changed, there is no need to write a + // file if (!needUpdate) { return 0; } @@ -105,29 +105,29 @@ int NebdMetaFileManager::UpdateMetaFile(const FileMetaMap& fileMetas) { } int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { - // 写入tmp文件 + // Write tmp file std::string tmpFilePath = metaFilePath_ + ".tmp"; - int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT|O_RDWR, 0644); - // open文件失败 + int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT | O_RDWR, 0644); + // Open file failed if (fd <= 0) { LOG(ERROR) << "Open tmp file " << tmpFilePath << " fail"; return -1; } - // 写入 + // Write std::string jsonString = root.toStyledString(); - int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), - jsonString.size(), 0); + int writeSize = + wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); wrapper_->close(fd); if (writeSize != static_cast(jsonString.size())) { LOG(ERROR) << "Write tmp file " << tmpFilePath << " fail"; return -1; } - // 重命名 + // Rename int res = wrapper_->rename(tmpFilePath.c_str(), metaFilePath_.c_str()); if (res != 0) { - LOG(ERROR) << "rename file " << tmpFilePath << " to " - << metaFilePath_ << " fail"; + LOG(ERROR) << "rename file " << tmpFilePath << " to " << metaFilePath_ + << " fail"; return -1; } return 0; @@ -138,7 +138,8 @@ int NebdMetaFileManager::LoadFileMeta() { FileMetaMap tempMetas; std::ifstream in(metaFilePath_, std::ios::binary); if (!in) { - // 这里不应该返回错误,第一次初始化的时候文件可能还未创建 + // There should be no error returned here, the file may not have been + // created during the first initialization LOG(WARNING) << "File not exist: " << metaFilePath_; return 0; } @@ -149,8 +150,7 @@ int NebdMetaFileManager::LoadFileMeta() { bool ok = Json::parseFromStream(reader, in, &root, &errs); in.close(); if (!ok) { - LOG(ERROR) << "Parse meta file " << metaFilePath_ - << " fail: " << errs; + LOG(ERROR) << "Parse meta file " << metaFilePath_ << " fail: " << errs; return -1; } @@ -173,31 +173,28 @@ int NebdMetaFileManager::ListFileMeta(std::vector* fileMetas) { return 0; } -int NebdMetaFileParser::Parse(Json::Value root, - FileMetaMap* fileMetas) { +int NebdMetaFileParser::Parse(Json::Value root, FileMetaMap* fileMetas) { if (!fileMetas) { LOG(ERROR) << "the argument fileMetas is null pointer"; return -1; } fileMetas->clear(); - // 检验crc + // Check crc if (root[kCRC].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no crc"; + LOG(ERROR) << "Parse json: " << root << " fail, no crc"; return -1; } uint32_t crcValue = root[kCRC].asUInt(); root.removeMember(kCRC); std::string jsonString = root.toStyledString(); - uint32_t crcCalc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crcCalc = + nebd::common::CRC32(jsonString.c_str(), jsonString.size()); if (crcValue != crcCalc) { - LOG(ERROR) << "Parse json: " << root - << " fail, crc not match"; + LOG(ERROR) << "Parse json: " << root << " fail, crc not match"; return -1; } - // 没有volume字段 + // No volume field const auto& volumes = root[kVolumes]; if (volumes.isNull()) { LOG(WARNING) << "No volumes in json: " << root; @@ -208,22 +205,21 @@ int NebdMetaFileParser::Parse(Json::Value root, NebdFileMeta meta; if (volume[kFileName].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no filename"; + LOG(ERROR) << "Parse json: " << root << " fail, no filename"; return -1; } else { meta.fileName = volume[kFileName].asString(); } if (volume[kFd].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no fd"; + LOG(ERROR) << "Parse json: " << root << " fail, no fd"; return -1; } else { meta.fd = volume[kFd].asInt(); } - // 除了filename和fd的部分统一放到xattr里面 + // Except for the parts of filename and fd, they are uniformly placed in + // xattr Json::Value::Members mem = volume.getMemberNames(); ExtendAttribute xattr; for (auto iter = mem.begin(); iter != mem.end(); iter++) { @@ -238,13 +234,13 @@ int NebdMetaFileParser::Parse(Json::Value root, } Json::Value NebdMetaFileParser::ConvertFileMetasToJson( - const FileMetaMap& fileMetas) { + const FileMetaMap& fileMetas) { Json::Value volumes; for (const auto& meta : fileMetas) { Json::Value volume; volume[kFileName] = meta.second.fileName; volume[kFd] = meta.second.fd; - for (const auto &item : meta.second.xattr) { + for (const auto& item : meta.second.xattr) { volume[item.first] = item.second; } volumes.append(volume); @@ -252,7 +248,7 @@ Json::Value NebdMetaFileParser::ConvertFileMetasToJson( Json::Value root; root[kVolumes] = volumes; - // 计算crc + // Calculate crc std::string jsonString = root.toStyledString(); uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); root[kCRC] = crc; diff --git a/nebd/src/part2/metafile_manager.h b/nebd/src/part2/metafile_manager.h index a46255a467..35200fa9bc 100644 --- a/nebd/src/part2/metafile_manager.h +++ b/nebd/src/part2/metafile_manager.h @@ -24,16 +24,17 @@ #define NEBD_SRC_PART2_METAFILE_MANAGER_H_ #include -#include -#include -#include + #include +#include // NOLINT +#include #include // NOLINT -#include // NOLINT +#include +#include -#include "nebd/src/common/rw_lock.h" -#include "nebd/src/common/posix_wrapper.h" #include "nebd/src/common/crc32.h" +#include "nebd/src/common/posix_wrapper.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" #include "nebd/src/part2/util.h" @@ -41,9 +42,9 @@ namespace nebd { namespace server { using nebd::common::PosixWrapper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; using FileMetaMap = std::unordered_map; const char kVolumes[] = "volumes"; @@ -53,17 +54,15 @@ const char kCRC[] = "crc"; class NebdMetaFileParser { public: - int Parse(Json::Value root, - FileMetaMap* fileMetas); + int Parse(Json::Value root, FileMetaMap* fileMetas); Json::Value ConvertFileMetasToJson(const FileMetaMap& fileMetas); }; struct NebdMetaFileManagerOption { std::string metaFilePath = ""; - std::shared_ptr wrapper - = std::make_shared(); - std::shared_ptr parser - = std::make_shared(); + std::shared_ptr wrapper = std::make_shared(); + std::shared_ptr parser = + std::make_shared(); }; class NebdMetaFileManager { @@ -71,37 +70,38 @@ class NebdMetaFileManager { NebdMetaFileManager(); virtual ~NebdMetaFileManager(); - // 初始化,主要从文件读取元数据信息并加载到内存 + // Initialization, mainly reading metadata information from files and + // loading it into memory virtual int Init(const NebdMetaFileManagerOption& option); - // 列出文件记录 + // List file records virtual int ListFileMeta(std::vector* fileMetas); - // 更新文件元数据 + // Update file metadata virtual int UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta); - // 删除文件元数据 + // Delete file metadata virtual int RemoveFileMeta(const std::string& fileName); private: - // 原子写文件 + // Atomic writing file int AtomicWriteFile(const Json::Value& root); - // 更新元数据文件并更新内存缓存 + // Update metadata files and update memory cache int UpdateMetaFile(const FileMetaMap& fileMetas); - // 初始化从持久化文件读取到内存 + // Initialize reading from persistent files to memory int LoadFileMeta(); private: - // 元数据文件路径 + // Meta Data File Path std::string metaFilePath_; - // 文件系统操作封装 + // File system operation encapsulation std::shared_ptr wrapper_; - // 用于解析Json格式的元数据 + // Metadata for parsing Json format std::shared_ptr parser_; - // MetaFileManager 线程安全读写锁 + // MetaFileManager thread safe read write lock RWLock rwLock_; - // meta文件内存缓存 + // Meta file memory cache FileMetaMap metaCache_; }; using MetaFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/nebd_server.cpp b/nebd/src/part2/nebd_server.cpp index 74e5e2329d..89baaad537 100644 --- a/nebd/src/part2/nebd_server.cpp +++ b/nebd/src/part2/nebd_server.cpp @@ -20,19 +20,22 @@ * Author: lixiaocui */ +#include "nebd/src/part2/nebd_server.h" + #include + #include + #include "nebd/src/common/file_lock.h" -#include "nebd/src/part2/nebd_server.h" +#include "nebd/src/common/nebd_version.h" #include "nebd/src/part2/file_service.h" #include "nebd/src/part2/heartbeat_service.h" -#include "nebd/src/common/nebd_version.h" namespace nebd { namespace server { -int NebdServer::Init(const std::string &confPath, - std::shared_ptr curveClient) { +int NebdServer::Init(const std::string& confPath, + std::shared_ptr curveClient) { if (isRunning_) { LOG(WARNING) << "NebdServer is inited"; return -1; @@ -75,7 +78,7 @@ int NebdServer::Init(const std::string &confPath, LOG(INFO) << "NebdServer init heartbeatManager ok"; LOG(INFO) << "NebdServer init ok"; - // 暴露版本信息 + // Expose version information LOG(INFO) << "nebd version: " << nebd::common::NebdVersion(); nebd::common::ExposeNebdVersion(); return 0; @@ -100,7 +103,7 @@ int NebdServer::Fini() { } if (curveClient_ != nullptr) { - curveClient_ ->UnInit(); + curveClient_->UnInit(); } if (heartbeatManager_ != nullptr) { @@ -110,7 +113,7 @@ int NebdServer::Fini() { return 0; } -bool NebdServer::LoadConfFromFile(const std::string &confPath) { +bool NebdServer::LoadConfFromFile(const std::string& confPath) { conf_.SetConfigPath(confPath); return conf_.LoadConfig(); } @@ -172,16 +175,16 @@ MetaFileManagerPtr NebdServer::InitMetaFileManager() { return metaFileManager; } -bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption *opt) { - bool getOk = conf_.GetUInt32Value( - HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); +bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption* opt) { + bool getOk = + conf_.GetUInt32Value(HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.timeout.sec fail"; return false; } - getOk = conf_.GetUInt32Value( - HEARTBEATCHECKINTERVALMS, &opt->checkTimeoutIntervalMs); + getOk = conf_.GetUInt32Value(HEARTBEATCHECKINTERVALMS, + &opt->checkTimeoutIntervalMs); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.check.interval.ms fail"; return false; @@ -212,24 +215,24 @@ bool NebdServer::InitHeartbeatManager() { bool NebdServer::StartServer() { // add service bool returnRpcWhenIoError; - bool ret = conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, - &returnRpcWhenIoError); + bool ret = + conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, &returnRpcWhenIoError); if (false == ret) { LOG(ERROR) << "get " << RESPONSERETURNRPCWHENIOERROR << " fail"; return false; } NebdFileServiceImpl fileService(fileManager_, returnRpcWhenIoError); - int addFileServiceRes = server_.AddService( - &fileService, brpc::SERVER_DOESNT_OWN_SERVICE); + int addFileServiceRes = + server_.AddService(&fileService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add file service fail"; return false; } NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); - addFileServiceRes = server_.AddService( - &heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); + addFileServiceRes = + server_.AddService(&heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add heartbeat service fail"; return false; @@ -238,17 +241,17 @@ bool NebdServer::StartServer() { // start brcp server brpc::ServerOptions option; option.idle_timeout_sec = -1; - // 获取文件锁 + // Obtain file lock common::FileLock fileLock(listenAddress_ + ".lock"); if (fileLock.AcquireFileLock() != 0) { LOG(ERROR) << "Address already in use"; return -1; } - int startBrpcServerRes = server_.StartAtSockFile( - listenAddress_.c_str(), &option); + int startBrpcServerRes = + server_.StartAtSockFile(listenAddress_.c_str(), &option); if (0 != startBrpcServerRes) { LOG(ERROR) << "NebdServer start brpc server fail, res=" - << startBrpcServerRes; + << startBrpcServerRes; return false; } diff --git a/nebd/src/part2/nebd_server.h b/nebd/src/part2/nebd_server.h index c4ee40f23e..8a1275d23e 100644 --- a/nebd/src/part2/nebd_server.h +++ b/nebd/src/part2/nebd_server.h @@ -24,8 +24,10 @@ #define NEBD_SRC_PART2_NEBD_SERVER_H_ #include -#include + #include +#include + #include "nebd/src/common/configuration.h" #include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/heartbeat_manager.h" @@ -34,17 +36,17 @@ namespace nebd { namespace server { -using ::nebd::common::Configuration; using ::curve::client::CurveClient; +using ::nebd::common::Configuration; class NebdServer { public: NebdServer() {} virtual ~NebdServer() {} - int Init(const std::string &confPath, - std::shared_ptr curveClient = - std::make_shared()); + int Init(const std::string& confPath, + std::shared_ptr curveClient = + std::make_shared()); int RunUntilAskedToQuit(); @@ -52,62 +54,64 @@ class NebdServer { private: /** - * @brief 从配置文件加载配置项 - * @param[in] confPath 配置文件路径 - * @return false-加载配置文件失败 true-加载配置文件成功 + * @brief Load configuration items from the configuration file + * @param[in] confPath Configuration file path + * @return false-Failed to load configuration file, true-Successfully loaded + * configuration file */ - bool LoadConfFromFile(const std::string &confPath); + bool LoadConfFromFile(const std::string& confPath); /** - * @brief 初始化NebdFileManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize NebdFileManager + * @return false-initialization failed, true-initialization successful */ bool InitFileManager(); /** - * @brief 初始化request_executor_curve - * @return false-初始化失败 true-初始化成功 + * @brief initialization request_executor_curve + * @return false-initialization failed, true-initialization successful */ bool InitCurveRequestExecutor(); /** - * @brief 初始化NebdMetaFileManager - * @return nullptr-初始化不成功 否则表示初始化成功 + * @brief Initialize NebdMetaFileManager + * @return nullptr - initialization failed; otherwise, it indicates + * successful initialization */ MetaFileManagerPtr InitMetaFileManager(); /** - * @brief 初始化HeartbeatManagerOption + * @brief Initialize HeartbeatManagerOption * @param[out] opt - * @return false-初始化失败 true-初始化成功 + * @return false-initialization failed, true-initialization successful */ - bool InitHeartbeatManagerOption(HeartbeatManagerOption *opt); + bool InitHeartbeatManagerOption(HeartbeatManagerOption* opt); /** - * @brief 初始化HeartbeatManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize HeartbeatManager + * @return false-initialization failed, true-initialization successful */ bool InitHeartbeatManager(); /** - * @brief 启动brpc service - * @return false-启动service失败 true-启动service成功 + * @brief Start brpc service + * @return false-Failed to start service, true-Successfully started service */ bool StartServer(); private: - // 配置项 + // Configuration Item Configuration conf_; - // NebdServer监听地址 + // NebdServer Listening Address std::string listenAddress_; - // NebdServer是否处于running状态 - bool isRunning_ = false; + // Is NebdServer in running state + bool isRunning_ = false; // brpc server brpc::Server server_; - // 用于接受和处理client端的各种请求 + // Used to accept and process various requests from the client side std::shared_ptr fileManager_; - // 负责文件心跳超时处理 + // Responsible for handling file heartbeat timeout std::shared_ptr heartbeatManager_; // curveclient std::shared_ptr curveClient_; diff --git a/nebd/src/part2/request_executor.h b/nebd/src/part2/request_executor.h index 0d69e3c9c8..2098ca87a4 100644 --- a/nebd/src/part2/request_executor.h +++ b/nebd/src/part2/request_executor.h @@ -24,8 +24,9 @@ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_H_ #include -#include #include +#include + #include "nebd/src/part2/define.h" namespace nebd { @@ -41,14 +42,16 @@ class CurveRequestExecutor; using OpenFlags = nebd::client::ProtoOpenFlags; -// 具体RequestExecutor中会用到的文件实例上下文信息 -// RequestExecutor需要用到的文件上下文信息都记录到FileInstance内 +// The file instance context information used in the specific RequestExecutor +// The file context information required for RequestExecutor is recorded in +// FileInstance class NebdFileInstance { public: NebdFileInstance() {} virtual ~NebdFileInstance() {} - // 需要持久化到文件的内容,以kv形式返回,例如curve open时返回的sessionid - // 文件reopen的时候也会用到该内容 + // The content that needs to be persisted to the file is returned in kv + // format, such as the sessionid returned when curve open This content will + // also be used when reopening files ExtendAttribute xattr; }; @@ -65,7 +68,8 @@ class NebdRequestExecutor { virtual int GetInfo(NebdFileInstance* fd, NebdFileInfo* fileInfo) = 0; virtual int Discard(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int AioRead(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; - virtual int AioWrite(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; // NOLINT + virtual int AioWrite(NebdFileInstance* fd, + NebdServerAioContext* aioctx) = 0; // NOLINT virtual int Flush(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int InvalidCache(NebdFileInstance* fd) = 0; }; diff --git a/nebd/src/part2/request_executor_curve.h b/nebd/src/part2/request_executor_curve.h index 11606d1bb1..a96409e5c4 100644 --- a/nebd/src/part2/request_executor_curve.h +++ b/nebd/src/part2/request_executor_curve.h @@ -23,12 +23,13 @@ #ifndef NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ -#include #include +#include #include -#include "nebd/src/part2/request_executor.h" -#include "nebd/src/part2/define.h" + #include "include/client/libcurve.h" +#include "nebd/src/part2/define.h" +#include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { @@ -54,17 +55,22 @@ void CurveAioCallback(struct CurveAioContext* curveCtx); class FileNameParser { public: /** - * @brief 解析fileName - * 一般格式: - * qemu "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" //NOLINT - * nbd "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" // NOLINT + * @brief parsing fileName + * General format: + * qemu + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" + * //NOLINT nbd + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" + * // NOLINT * @param[in] fileName - * @return 解析结果 - * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "/etc/curve/client.conf" //NOLINT - * nbd "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" //NOLINT + * @return Parsing Result + * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", + * "/etc/curve/client.conf" //NOLINT nbd + * "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" + * //NOLINT */ - static std::pair - Parse(const std::string& fileName); + static std::pair Parse( + const std::string& fileName); }; class CurveRequestExecutor : public NebdRequestExecutor { @@ -74,7 +80,7 @@ class CurveRequestExecutor : public NebdRequestExecutor { return executor; } ~CurveRequestExecutor() {} - void Init(const std::shared_ptr &client); + void Init(const std::shared_ptr& client); std::shared_ptr Open(const std::string& filename, const OpenFlags* openflags) override; std::shared_ptr Reopen( @@ -90,40 +96,42 @@ class CurveRequestExecutor : public NebdRequestExecutor { private: /** - * @brief 构造函数 + * @brief constructor */ CurveRequestExecutor() {} /** - * @brief 从NebdFileInstance中解析出curve_client需要的fd - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中文件的fd, 如果小于0,表示解析结果错误 + * @brief Parse the fd needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the fd of the file in curve_client. If less than 0, it + * indicates an error in the parsing result. */ int GetCurveFdFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 从NebdFileInstance中解析出curbe_client需要的filename - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中的filename, 如果为空,表示解析出错 + * @brief Parse the filename needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the filename in curve_client. If empty, it indicates an + * error in the parsing. */ std::string GetFileNameFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 将NebdServerAioContext类型转换为CurveAioContext类型 - * @param[in] nebdCtx NebdServerAioContext类型 - * @param[out] curveCtx CurveAioContext类型 - * @return -1转换失败,0转换成功 + * @brief Convert NebdServerAioContext type to CurveAioContext type + * @param[in] nebdCtx NebdServerAioContext type + * @param[out] curveCtx CurveAioContext type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdCtxToCurveCtx( - NebdServerAioContext *nebdCtx, CurveAioContext *curveCtx); + int FromNebdCtxToCurveCtx(NebdServerAioContext* nebdCtx, + CurveAioContext* curveCtx); /** - * @brief 将LIBAIO_OP类型转换为curve_client中LIBCURVE_OP类型 - * @param[in] op LIBAIO_OP类型 - * @param[out] out LIBCURVE_OP类型 - * @return -1转换失败,0转换成功 + * @brief Convert LIBAIO_OP types to LIBCURVE_OP types in the curve_client + * @param[in] op LIBAIO_OP type + * @param[out] out LIBCURVE_OP type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP *out); + int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP* out); private: std::shared_ptr<::curve::client::CurveClient> client_; diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..0894d69ebe 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART2_UTIL_H_ #define NEBD_SRC_PART2_UTIL_H_ -#include #include // NOLINT #include +#include #include "nebd/src/part2/define.h" @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/configuration_test.cpp b/nebd/test/common/configuration_test.cpp index 4c9e7b7c21..ef24eeb42a 100644 --- a/nebd/test/common/configuration_test.cpp +++ b/nebd/test/common/configuration_test.cpp @@ -21,15 +21,15 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "nebd/src/common/configuration.h" + #include +#include -#include -#include #include +#include #include - -#include "nebd/src/common/configuration.h" +#include namespace nebd { namespace common { @@ -86,9 +86,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -136,10 +134,10 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } @@ -148,18 +146,19 @@ TEST_F(ConfigurationTest, SaveConfig) { Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } @@ -301,7 +300,7 @@ TEST_F(ConfigurationTest, GetSetDoubleAndFloatValue) { } // namespace common } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); int ret = RUN_ALL_TESTS(); diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..574667ad8b 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include // NOLINT @@ -32,29 +33,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +63,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -80,14 +80,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -95,12 +95,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = std::thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd diff --git a/nebd/test/part1/heartbeat_manager_unittest.cpp b/nebd/test/part1/heartbeat_manager_unittest.cpp index 72de6802d4..3d95f9adf4 100644 --- a/nebd/test/part1/heartbeat_manager_unittest.cpp +++ b/nebd/test/part1/heartbeat_manager_unittest.cpp @@ -20,14 +20,15 @@ * Author: hzchenwei7 */ -#include -#include +#include "nebd/src/part1/heartbeat_manager.h" + #include +#include +#include #include #include // NOLINT -#include "nebd/src/part1/heartbeat_manager.h" #include "nebd/src/part1/nebd_metacache.h" #include "nebd/test/part1/fake_heartbeat_service.h" @@ -66,24 +67,20 @@ class HeartbeatManagerTest : public testing::Test { HeartbeatOption option; }; -TEST_F(HeartbeatManagerTest, InitTest) { - ASSERT_EQ(0, manager->Init( - option)); -} +TEST_F(HeartbeatManagerTest, InitTest) { ASSERT_EQ(0, manager->Init(option)); } TEST_F(HeartbeatManagerTest, InvokeTimesTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); - // metaCache中数据为空,不发送心跳消息 + // The data in metaCache is empty and no heartbeat message will be sent for (int i = 0; i < 10; ++i) { ASSERT_EQ(0, fakeHeartBeatService.GetInvokeTimes()); std::this_thread::sleep_for(std::chrono::seconds(1)); } - // 添加数据 + // Add data NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); @@ -91,7 +88,7 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { int times = fakeHeartBeatService.GetInvokeTimes(); ASSERT_TRUE(times >= 9 && times <= 11); - // 清空metaCache数据 + // Clear MetaCache data metaCache->RemoveFileInfo(1); std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -103,13 +100,12 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { } TEST_F(HeartbeatManagerTest, RequestValidTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); std::vector currentFileInfos; - // 添加一个文件 + // Add a file NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); HeartbeatFileInfo info; @@ -126,7 +122,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 添加第二个文件 + // Add second file fileInfo = NebdClientFileInfo(2, "/test2", FileLock("/test2.lock")); metaCache->AddFileInfo(fileInfo); info.set_fd(2); @@ -147,7 +143,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 删除第一个文件 + // Delete the first file metaCache->RemoveFileInfo(1); currentFileInfos.erase(currentFileInfos.begin()); @@ -166,7 +162,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { } // namespace client } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part1/nebd_client_unittest.cpp b/nebd/test/part1/nebd_client_unittest.cpp index 6822947653..2f3e18910f 100644 --- a/nebd/test/part1/nebd_client_unittest.cpp +++ b/nebd/test/part1/nebd_client_unittest.cpp @@ -20,18 +20,18 @@ * Author: wuhanqing */ -#include -#include +#include "nebd/src/part1/nebd_client.h" + #include +#include +#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT -#include "nebd/src/part1/nebd_client.h" #include "nebd/src/part1/libnebd.h" #include "nebd/src/part1/libnebd_file.h" - #include "nebd/test/part1/fake_file_service.h" #include "nebd/test/part1/mock_file_service.h" #include "nebd/test/utils/config_generator.h" @@ -79,16 +79,14 @@ void AioRpcFailCallBack(NebdClientAioContext* ctx) { template void MockClientFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); } template void MockClientRpcFailedFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); static int invokeTimes = 0; @@ -110,20 +108,20 @@ class NebdFileClientTest : public ::testing::Test { void TearDown() override {} void AddFakeService() { - ASSERT_EQ(0, server.AddService( - &fakeService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&fakeService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void AddMockService() { - ASSERT_EQ(0, server.AddService( - &mockService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&mockService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void StartServer(const std::string& address = kNebdServerTestAddress) { - ASSERT_EQ(0, server.StartAtSockFile( - address.c_str(), nullptr)) << "Start server failed"; + ASSERT_EQ(0, server.StartAtSockFile(address.c_str(), nullptr)) + << "Start server failed"; } void StopServer() { @@ -137,15 +135,15 @@ class NebdFileClientTest : public ::testing::Test { }; using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; TEST_F(NebdFileClientTest, AioRpcFailTest) { AddMockService(); @@ -167,7 +165,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Write(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; auto start = std::chrono::system_clock::now(); @@ -177,9 +176,11 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { cond.wait(ulk, []() { return aioOpReturn.load(); }); ASSERT_TRUE(aioOpReturn.load()); auto end = std::chrono::system_clock::now(); - auto elpased = std::chrono::duration_cast(end - start).count(); // NOLINT + auto elpased = + std::chrono::duration_cast(end - start) + .count(); // NOLINT - // 重试睡眠时间: 100ms + 200ms + ... + 900ms = 4500ms + // Retrying sleep time: 100ms + 200ms + ... + 900ms = 4500ms ASSERT_TRUE(elpased >= 4000 && elpased <= 5000); } @@ -196,7 +197,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); @@ -218,7 +220,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); @@ -240,7 +243,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); @@ -261,10 +265,12 @@ TEST_F(NebdFileClientTest, NoNebdServerTest) { auto start = std::chrono::system_clock::now(); ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); auto end = std::chrono::system_clock::now(); - auto elapsed = std::chrono::duration_cast( - end - start).count(); + auto elapsed = + std::chrono::duration_cast(end - start) + .count(); - // rpc failed的清空下,睡眠100ms后继续重试,共重试10次 + // Clear RPC failed and continue to retry after sleeping for 100ms, a + // total of 10 retries ASSERT_TRUE(elapsed >= 900 && elapsed <= 1100); } ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); @@ -380,8 +386,8 @@ TEST_F(NebdFileClientTest, ReOpenTest) { int fd = Open4Nebd(kFileName, nullptr); ASSERT_GT(fd, 0); - // 文件已经被打开,并占用文件锁 - // 再次打开时,获取文件锁失败,直接返回 + // The file has been opened and is occupying the file lock + // When reopening, obtaining the file lock failed and returned directly ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); ASSERT_EQ(0, Close4Nebd(fd)); @@ -406,9 +412,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, OpenFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); } @@ -417,9 +424,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, CloseFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(0, Close4Nebd(0)); } @@ -428,9 +436,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, ResizeFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); } @@ -439,9 +447,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetFileSize4Nebd(1)); } @@ -450,9 +459,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, GetBlockSize4Nebd(1)); } @@ -461,9 +470,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetInfo4Nebd(1)); } @@ -474,7 +484,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, InvalidCache4Nebd(1)); } @@ -496,7 +507,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke( + MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioWrite4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -518,9 +530,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -542,9 +553,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -566,9 +578,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -596,14 +608,12 @@ TEST_F(NebdFileClientTest, InitAndUninitTest) { } // namespace client } // namespace nebd - int main(int argc, char* argv[]) { - std::vector nebdConfig { + std::vector nebdConfig{ std::string("nebdserver.serverAddress=") + kNebdServerTestAddress, std::string("metacache.fileLockPath=/tmp"), std::string("request.syncRpcMaxRetryTimes=10"), - std::string("log.path=.") - }; + std::string("log.path=.")}; nebd::common::NebdClientConfigGenerator generator; generator.SetConfigPath(kNebdClientConf); diff --git a/nebd/test/part2/file_manager_unittest.cpp b/nebd/test/part2/file_manager_unittest.cpp index 0d13a7b18c..0b59f918aa 100644 --- a/nebd/test/part2/file_manager_unittest.cpp +++ b/nebd/test/part2/file_manager_unittest.cpp @@ -20,15 +20,17 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/file_manager.h" + #include -#include +#include + #include +#include -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/file_entity.h" -#include "nebd/test/part2/mock_request_executor.h" #include "nebd/test/part2/mock_metafile_manager.h" +#include "nebd/test/part2/mock_request_executor.h" namespace nebd { namespace server { @@ -38,11 +40,11 @@ const char testFile2[] = "test:/cinder/222"; const char unknownFile[] = "un:/cinder/666"; using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -66,12 +68,10 @@ class FileManagerTest : public ::testing::Test { metaFileManager_ = std::make_shared(); fileManager_ = std::make_shared(metaFileManager_); } - void TearDown() { - delete aioContext_; - } + void TearDown() { delete aioContext_; } using TestTask = std::function; - // 构造初始环境 + // Construct initial environment void InitEnv() { NebdFileMeta meta; meta.fd = 1; @@ -80,18 +80,14 @@ class FileManagerTest : public ::testing::Test { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); } - void UnInitEnv() { - ASSERT_EQ(fileManager_->Fini(), 0); - } + void UnInitEnv() { ASSERT_EQ(fileManager_->Fini(), 0); } void ExpectCallRequest(RequestType type, int ret) { switch (type) { @@ -125,20 +121,19 @@ class FileManagerTest : public ::testing::Test { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件状态为OPENED + // The file status is OPENED ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件状态为CLOSED + // The file status is CLOSED EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); @@ -147,53 +142,47 @@ class FileManagerTest : public ::testing::Test { void RequestFailTest(RequestType type, TestTask task) { InitEnv(); - // 将文件close + // Close the file NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // open文件失败 - EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .Times(0); + // Open file failed + EXPECT_CALL(*executor_, Open(testFile1, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 更新元数据文件失败 + // Failed to update metadata file EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 执行处理函数失败 + // Failed to execute processing function EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, -1); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 将文件状态置为DESTROYED - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Set the file status to DESTROYED + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(entity1->Close(true), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); - EXPECT_CALL(*executor_, Open(testFile1, _)) - .Times(0); + EXPECT_CALL(*executor_, Open(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); - // 直接将文件删除 + // Delete files directly ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(-1, task(1)); @@ -216,17 +205,14 @@ TEST_F(FileManagerTest, RunTest) { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); - // 重复run返回失败 + // Repeated run returns failed ASSERT_EQ(fileManager_->Run(), -1); - // 校验结果 + // Verification results FileEntityMap entityMap = fileManager_->GetFileEntityMap(); ASSERT_EQ(1, entityMap.size()); ASSERT_NE(nullptr, entityMap[meta.fd]); @@ -239,44 +225,36 @@ TEST_F(FileManagerTest, RunFailTest) { std::vector fileMetas; fileMetas.emplace_back(meta); - // list file meta失败 - EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(Return(-1)); + // List file meta failed + EXPECT_CALL(*metaFileManager_, ListFileMeta(_)).WillOnce(Return(-1)); ASSERT_EQ(fileManager_->Run(), -1); - // reopen失败不影响Run成功 + // Reopen failure does not affect Run success EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(nullptr)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(nullptr)); ASSERT_EQ(fileManager_->Run(), 0); ASSERT_EQ(fileManager_->Fini(), 0); - // 更新metafile失败不影响Run成功 + // Failure to update metafile does not affect the success of Run EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(1); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).Times(1); ASSERT_EQ(fileManager_->Run(), 0); } TEST_F(FileManagerTest, OpenTest) { InitEnv(); - // open一个不存在的文件 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open a non-existent file + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); - // 重复open + // Repeat open fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); @@ -288,15 +266,13 @@ TEST_F(FileManagerTest, OpenTest) { ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(_)).WillOnce(Return(0)); ASSERT_EQ(entity2->Close(false), 0); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::CLOSED); - // open 已经close的文件, fd不变 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open closed files, keep fd unchanged + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); @@ -304,79 +280,67 @@ TEST_F(FileManagerTest, OpenTest) { TEST_F(FileManagerTest, OpenFailTest) { InitEnv(); - // 调用后端open接口时出错 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .Times(0); + // Error calling backend open interface + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)).Times(0); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // 持久化元数据信息失败 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Persisting metadata information failed + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(_)) - .Times(1); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(_)).Times(1); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // Open一个非法的filename - EXPECT_CALL(*executor_, Open(_, _)) - .Times(0); + // Open an illegal filename + EXPECT_CALL(*executor_, Open(_, _)).Times(0); fd = fileManager_->Open(unknownFile, nullptr); ASSERT_EQ(fd, -1); } TEST_F(FileManagerTest, CloseTest) { InitEnv(); - // 指定的fd不存在,直接返回成功 + // The specified fd does not exist, return success directly ASSERT_EQ(nullptr, fileManager_->GetFileEntity(2)); ASSERT_EQ(0, fileManager_->Close(2, true)); NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,且文件状态为OPENED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // The file exists and its status is OPENED, while removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // File exists, file status is CLOSED, removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); + // The file exists, the file status is CLOSED, and removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); NebdFileEntityPtr entity2 = fileManager_->GetFileEntity(2); ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,文件状态为OPENED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // File exists, file status is OPENED, removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile2)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(fd, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); } @@ -387,36 +351,31 @@ TEST_F(FileManagerTest, CloseFailTest) { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // executor close 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // Executor close failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // remove file meta 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Remove file meta failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(-1)); + .WillOnce(Return(-1)); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); } TEST_F(FileManagerTest, ExtendTest) { - auto task = [&](int fd)->int { - return fileManager_->Extend(fd, 4096); - }; + auto task = [&](int fd) -> int { return fileManager_->Extend(fd, 4096); }; RequestSuccssTest(RequestType::EXTEND, task); RequestFailTest(RequestType::EXTEND, task); } TEST_F(FileManagerTest, GetInfoTest) { NebdFileInfo fileInfo; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { return fileManager_->GetInfo(fd, &fileInfo); }; RequestSuccssTest(RequestType::GETINFO, task); @@ -424,16 +383,14 @@ TEST_F(FileManagerTest, GetInfoTest) { } TEST_F(FileManagerTest, InvalidCacheTest) { - auto task = [&](int fd)->int { - return fileManager_->InvalidCache(fd); - }; + auto task = [&](int fd) -> int { return fileManager_->InvalidCache(fd); }; RequestSuccssTest(RequestType::INVALIDCACHE, task); RequestFailTest(RequestType::INVALIDCACHE, task); } TEST_F(FileManagerTest, AioReadTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioRead(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -457,7 +414,7 @@ TEST_F(FileManagerTest, AioReadTest) { TEST_F(FileManagerTest, AioWriteTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioWrite(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -481,7 +438,7 @@ TEST_F(FileManagerTest, AioWriteTest) { TEST_F(FileManagerTest, DiscardTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Discard(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -505,7 +462,7 @@ TEST_F(FileManagerTest, DiscardTest) { TEST_F(FileManagerTest, FlushTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Flush(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -544,7 +501,7 @@ TEST_F(FileManagerTest, UpdateTimestampTest) { } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/nebd/test/part2/heartbeat_manager_unittest.cpp b/nebd/test/part2/heartbeat_manager_unittest.cpp index 2ae0e8d221..9d1e0eaabb 100644 --- a/nebd/test/part2/heartbeat_manager_unittest.cpp +++ b/nebd/test/part2/heartbeat_manager_unittest.cpp @@ -20,10 +20,12 @@ * Author: yangyaokai */ +#include "nebd/src/part2/heartbeat_manager.h" + #include + #include -#include "nebd/src/part2/heartbeat_manager.h" #include "nebd/test/part2/mock_file_entity.h" #include "nebd/test/part2/mock_file_manager.h" @@ -35,11 +37,11 @@ namespace server { using ::testing::_; using ::testing::AtLeast; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -53,16 +55,16 @@ class HeartbeatManagerTest : public ::testing::Test { option.fileManager = fileManager_; heartbeatManager_ = std::make_shared(option); } - std::shared_ptr fileManager_; + std::shared_ptr fileManager_; std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { ASSERT_EQ(heartbeatManager_->Run(), 0); - // 已经在run了不允许重复Run或者Init + // It is already running, and duplicate Run or Init is not allowed ASSERT_EQ(heartbeatManager_->Run(), -1); - // 构造file entity + // Construct file entity uint64_t curTime = TimeUtility::GetTimeofDayMs(); std::shared_ptr entity1 = std::make_shared(); @@ -71,51 +73,44 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { std::shared_ptr entity3 = std::make_shared(); EXPECT_CALL(*entity1, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity1, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); EXPECT_CALL(*entity2, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity2, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::CLOSED)); - EXPECT_CALL(*entity3, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime)); + .WillRepeatedly(Return(NebdFileStatus::CLOSED)); + EXPECT_CALL(*entity3, GetFileTimeStamp()).WillRepeatedly(Return(curTime)); EXPECT_CALL(*entity3, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); - // 构造file map + // Construct a file map FileEntityMap entityMap; entityMap.emplace(1, entity1); entityMap.emplace(2, entity2); entityMap.emplace(3, entity3); EXPECT_CALL(*fileManager_, GetFileEntityMap()) - .WillRepeatedly(Return(entityMap)); + .WillRepeatedly(Return(entityMap)); - // 预期结果 - EXPECT_CALL(*entity1, Close(false)) - .Times(AtLeast(1)); - EXPECT_CALL(*entity2, Close(false)) - .Times(0); - EXPECT_CALL(*entity3, Close(false)) - .Times(0); + // Expected results + EXPECT_CALL(*entity1, Close(false)).Times(AtLeast(1)); + EXPECT_CALL(*entity2, Close(false)).Times(0); + EXPECT_CALL(*entity3, Close(false)).Times(0); ::sleep(2); ASSERT_EQ(heartbeatManager_->Fini(), 0); - // 重复Fini,也返回成功 + // Repeat Fini and return success ASSERT_EQ(heartbeatManager_->Fini(), 0); } TEST_F(HeartbeatManagerTest, UpdateTimeStampTest) { std::shared_ptr entity = std::make_shared(); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(entity)); - EXPECT_CALL(*entity, UpdateFileTimeStamp(100)) - .Times(1); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(entity)); + EXPECT_CALL(*entity, UpdateFileTimeStamp(100)).Times(1); ASSERT_TRUE(heartbeatManager_->UpdateFileTimestamp(1, 100)); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(nullptr)); ASSERT_FALSE(heartbeatManager_->UpdateFileTimestamp(1, 100)); } @@ -136,7 +131,7 @@ TEST_F(HeartbeatManagerTest, UpdateNebdClientInfo) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/heartbeat_service_test.cpp b/nebd/test/part2/heartbeat_service_test.cpp index 7d60ce6981..7e29edd10c 100644 --- a/nebd/test/part2/heartbeat_service_test.cpp +++ b/nebd/test/part2/heartbeat_service_test.cpp @@ -20,13 +20,15 @@ * Author: charisu */ -#include +#include "nebd/src/part2/heartbeat_service.h" + #include #include +#include + #include #include "nebd/proto/heartbeat.pb.h" -#include "nebd/src/part2/heartbeat_service.h" #include "nebd/test/part2/mock_heartbeat_manager.h" using ::testing::_; using ::testing::Return; @@ -41,15 +43,15 @@ class HeartbeatServiceTest : public ::testing::Test { void SetUp() override { heartbeatManager_ = std::make_shared(); } - std::shared_ptr heartbeatManager_; + std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatServiceTest, KeepAlive) { - // 启动server + // Start server brpc::Server server; NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); ASSERT_EQ(0, server.AddService(&heartbeatService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(0, server.StartAtSockFile(kSockFile_.c_str(), &option)); @@ -68,7 +70,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { nebd::client::NebdHeartbeatService_Stub stub(&channel); brpc::Controller cntl; - // 正常情况 + // Normal situation EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillRepeatedly(Return(true)); @@ -76,7 +78,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kOK, response.retcode()); - // 有文件更新时间戳失败 + // Failed to update timestamp with file EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillOnce(Return(false)) @@ -86,14 +88,14 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kNoOK, response.retcode()); - // 停止server + // Stop server server.Stop(0); server.Join(); } } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/metafile_manager_test.cpp b/nebd/test/part2/metafile_manager_test.cpp index 7027cb9da6..dbde2d4ee3 100644 --- a/nebd/test/part2/metafile_manager_test.cpp +++ b/nebd/test/part2/metafile_manager_test.cpp @@ -20,11 +20,13 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include + #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/test/part2/mock_posix_wrapper.h" using ::testing::_; @@ -37,8 +39,7 @@ const char metaPath[] = "/tmp/nebd-test-metafilemanager.meta"; void FillCrc(Json::Value* root) { std::string jsonString = root->toStyledString(); - uint32_t crc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); (*root)[kCRC] = crc; } @@ -61,19 +62,19 @@ TEST_F(MetaFileManagerTest, nomaltest) { NebdMetaFileManager metaFileManager; ASSERT_EQ(metaFileManager.Init(option), 0); std::vector fileMetas; - // 文件不存在 + // File does not exist ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 添加两条记录,curve和test各一 + // Add two records, one for curve and one for test NebdFileMeta fileMeta1; fileMeta1.fileName = "test:volume1"; fileMeta1.fd = 1; ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 使用相同的内容Update + // Update using the same content ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 插入不同的meta + // Insert different meta NebdFileMeta fileMeta2; fileMeta2.fileName = "cbd:volume2"; fileMeta2.fd = 2; @@ -89,9 +90,9 @@ TEST_F(MetaFileManagerTest, nomaltest) { // remove meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta(fileMeta2.fileName)); - // remove 不存在的meta + // remove non-existent meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta("unknown")); - // 校验结果 + // Verification results fileMetas.clear(); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -111,35 +112,28 @@ TEST_F(MetaFileManagerTest, UpdateMetaFailTest) { fileMetaMap.emplace(fileMeta.fileName, fileMeta); std::vector fileMetas; - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // rename失败 + // Rename failed NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); @@ -160,15 +154,12 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - // 先插入一条数据 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Insert a piece of data first + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -176,33 +167,26 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { fileMetaMap.erase(fileMeta.fileName); root = parser.ConvertFileMetasToJson(fileMetaMap); - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // rename失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Rename failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -215,7 +199,7 @@ TEST(MetaFileParserTest, Parse) { Json::Value volumes; FileMetaMap fileMetas; - // 正常情况 + // Normal situation volume[kFileName] = "cbd:volume1"; volume[kFd] = 1; volumes.append(volume); @@ -225,18 +209,19 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(0, parser.Parse(root, &fileMetas)); - // 空指针 + // Null pointer ASSERT_EQ(-1, parser.Parse(root, nullptr)); - // crc校验不正确 + // Incorrect crc verification root[kCRC] = root[kCRC].asUInt() + 1; ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有crc字段 + // No crc field root.removeMember(kCRC); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有volumes字段或volumes字段是null,不应该报错 + // There is no volumes field or the volumes field is null, and an error + // should not be reported root.clear(); root["key"] = "value"; FillCrc(&root); @@ -249,7 +234,7 @@ TEST(MetaFileParserTest, Parse) { ASSERT_EQ(0, parser.Parse(root, &fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 记录中没有filename + // There is no filename in the record volume.clear(); volumes.clear(); root.clear(); @@ -259,7 +244,7 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 记录中没有fd + // The record does not contain an 'fd'. volume.clear(); volumes.clear(); root.clear(); @@ -273,7 +258,7 @@ TEST(MetaFileParserTest, Parse) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_nebd_server.cpp b/nebd/test/part2/test_nebd_server.cpp index 1f6f8ef112..effcdc05b3 100644 --- a/nebd/test/part2/test_nebd_server.cpp +++ b/nebd/test/part2/test_nebd_server.cpp @@ -21,27 +21,28 @@ */ #include + #include "nebd/src/part2/nebd_server.h" #include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; TEST(TestNebdServer, test_Init_Run_Fini) { NebdServer server; auto curveClient = std::make_shared(); std::string confPath; - // 1. 配置文件不存在, init失败 + // 1. Configuration file does not exist, init failed confPath = "./nebd.conf"; ASSERT_EQ(-1, server.Init(confPath)); - // 2. 配置文件存在, 监听端口未设置 + // 2. Configuration file exists, listening port not set confPath = "./nebd/test/part2/nebd-server-err.conf"; Configuration conf; conf.SetBoolValue("response.returnRpcWhenIoError", false); @@ -49,55 +50,54 @@ TEST(TestNebdServer, test_Init_Run_Fini) { conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 3、配置文件中没有client配置 + // 3. There is no client configuration in the configuration file conf.SetStringValue("listen.address", "/tmp/nebd-server.sock"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 4. curveclient init失败 + // 4. Curveclient init failed conf.SetStringValue("curveclient.confPath", "/etc/curve/client.conf"); conf.SaveConfig(); EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 5、初始化fileManager失败 + // 5. Failed to initialize fileManager EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 6、没有heartbeat.timeout字段 + // 6. There is no heartbeat.timeout field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetStringValue("meta.file.path", "./nebd-server-test.meta"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7、没有heartbeat.check.interval.ms字段 + // 7. No heartbeat.check.interval.ms field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.timeout.sec", 30); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - - // 8. 初始化成功 + // 8. Initialized successfully EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.check.interval.ms", 3000); conf.SaveConfig(); ASSERT_EQ(0, server.Init(confPath, curveClient)); - // 9. run成功 + // 9. Run successful EXPECT_CALL(*curveClient, UnInit()).Times(2); std::thread nebdServerThread(&NebdServer::RunUntilAskedToQuit, &server); sleep(1); - // 10、再次Run会失败 + // 10. Running again will fail ASSERT_EQ(-1, server.RunUntilAskedToQuit()); - // 11、Run之后Init会失败 + // 11. Init will fail after Run ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7. stop成功 + // 7. Stop successful ASSERT_EQ(0, server.Fini()); - // 8. 再次stop不会重复释放资源 + // 8. Stopping again will not repeatedly release resources ASSERT_EQ(0, server.Fini()); nebdServerThread.join(); } @@ -105,7 +105,7 @@ TEST(TestNebdServer, test_Init_Run_Fini) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_request_executor_curve.cpp b/nebd/test/part2/test_request_executor_curve.cpp index 2b749d0615..8d8c3811f2 100644 --- a/nebd/test/part2/test_request_executor_curve.cpp +++ b/nebd/test/part2/test_request_executor_curve.cpp @@ -21,36 +21,30 @@ */ #include -#include "nebd/src/part2/request_executor_curve.h" -#include "nebd/test/part2/mock_curve_client.h" #include "nebd/proto/client.pb.h" #include "nebd/proto/heartbeat.pb.h" #include "nebd/src/part2/file_service.h" +#include "nebd/src/part2/request_executor_curve.h" +#include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; -using ::testing::SaveArg; using ::testing::Invoke; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestReuqestExecutorCurveClosure : public google::protobuf::Closure { public: TestReuqestExecutorCurveClosure() : runned_(false) {} ~TestReuqestExecutorCurveClosure() {} - void Run() { - runned_ = true; - } - bool IsRunned() { - return runned_; - } - void Reset() { - runned_ = false; - } + void Run() { runned_ = true; } + bool IsRunned() { return runned_; } + void Reset() { runned_ = false; } private: bool runned_; @@ -60,7 +54,7 @@ void NebdUnitTestCallback(NebdServerAioContext* context) { std::cout << "callback" << std::endl; } -class TestReuqestExecutorCurve : public ::testing::Test { +class TestReuqestExecutorCurve : public ::testing::Test { protected: void SetUp() { curveClient_ = std::make_shared(); @@ -77,7 +71,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(fileName, _)).Times(0); @@ -86,23 +80,21 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { ASSERT_TRUE(nullptr == ret); } - // 2. curveclient open失败 + // 2. Curveclient open failed { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(-1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr == ret); } - // 3. open成功 + // 3. Open successful { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -117,16 +109,16 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(_, _)).Times(0); - std::shared_ptr ret = executor.Reopen( - errFileName, xattr); + std::shared_ptr ret = + executor.Reopen(errFileName, xattr); ASSERT_TRUE(nullptr == ret); } - // 2. repoen失败 + // 2. repoen failed { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(-1)); @@ -135,14 +127,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { ASSERT_TRUE(nullptr == ret); } - // 3. reopen成功 + // 3. reopen successful { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(1)); - std::shared_ptr ret = + std::shared_ptr ret = executor.Reopen(fileName, xattr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -153,14 +145,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { TEST_F(TestReuqestExecutorCurve, test_Close) { auto executor = CurveRequestExecutor::GetInstance(); - // 1. nebdFileIns不是CurveFileInstance类型, close失败 + // 1. nebdFileIns is not of type CurveFileInstance, close failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Close(_)).Times(0); ASSERT_EQ(-1, executor.Close(nebdFileIns)); } - // 2. nebdFileIns中的fd<0, close失败 + // 2. fd<0 in nebdFileIns, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -168,7 +160,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 3. 调用curveclient的close接口失败, close失败 + // 3. Calling the close interface of curveclient failed, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -177,7 +169,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 4. close成功 + // 4. close successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -191,21 +183,21 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, extend失败 + // 1. nebdFileIns is not of type CurveFileInstance, extend failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(nebdFileIns, 1)); } - // 2. nebdFileIns中的fileName为空, extend失败 + // 2. FileName in nebdFileIns is empty, extend failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 3. 调用curveclient的extend接口失败, extend失败 + // 3. Calling the extend interface of curveclient failed, extend failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -214,7 +206,7 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 4. extend成功 + // 4. extend successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -229,43 +221,40 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { NebdFileInfo fileInfo; int curveFd = 123; - // 1. nebdFileIns不是CurveFileInstance类型, stat失败 + // 1. nebdFileIns is not of type CurveFileInstance, stat failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(nebdFileIns, &fileInfo)); } - // 2. nebdFileIns中的fd为空, stat失败 + // 2. Fd in nebdFileIns is empty, stat failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - - // 3. 调用curveclient的stat接口失败, stat失败 + // 3. Calling the stat interface of curveclient failed, stat failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; - EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - // 4. stat成功 + // 4. stat successful { const uint64_t size = 10ull * 1024 * 1024 * 1024; const uint32_t blocksize = 4096; auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Invoke( - [size, blocksize](int /*fd*/, FileStatInfo* info) { - info->length = size; - info->blocksize = blocksize; - return 0; - })); + .WillOnce(Invoke([size, blocksize](int /*fd*/, FileStatInfo* info) { + info->length = size; + info->blocksize = blocksize; + return 0; + })); ASSERT_EQ(0, executor.GetInfo(curveFileIns, &fileInfo)); ASSERT_EQ(size, fileInfo.size); ASSERT_EQ(blocksize, fileInfo.block_size); @@ -278,14 +267,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步读失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous read failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步读失败 + // 2. fd<0 in nebdFileIns, asynchronous read failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -293,7 +282,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioRead接口失败, 异步读失败 + // 3. Calling the AioRead interface of curveclient failed, asynchronous read + // failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -307,15 +297,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 4. 异步读取成功 + // 4. Asynchronous read successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioRead(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioRead(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -327,14 +316,15 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步写失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous write + // failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步写失败 + // 2. fd<0 in nebdFileIns, asynchronous write failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -342,7 +332,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioWrite接口失败, 异步写失败 + // 3. Calling the AioWrite interface of curveclient failed, asynchronous + // write failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -356,15 +347,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 4. 异步写入成功 + // 4. Asynchronous write successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioWrite(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioWrite(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -379,8 +369,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { // 1. not an curve volume { std::unique_ptr nebdFileIns(new NebdFileInstance()); - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(nebdFileIns.get(), &aioctx)); } @@ -389,8 +378,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { std::unique_ptr curveFileIns( new CurveFileInstance()); curveFileIns->fd = -1; - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(curveFileIns.get(), &aioctx)); } @@ -419,8 +407,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.Discard(curveFileIns.get(), &aioctx)); curveCtx->cb(curveCtx); } @@ -448,13 +435,13 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 不合法 + // 1. nebdFileIns is not of type CurveFileInstance, illegal { auto nebdFileIns = new NebdFileInstance(); ASSERT_EQ(-1, executor.InvalidCache(nebdFileIns)); } - // 2. fd<0, 不合法 + // 2. fd<0, illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -462,14 +449,14 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 3. filename为空,不合法 + // 3. The filename is empty and illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 4. 合法 + // 4. legitimate { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -478,11 +465,10 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { } } - TEST(TestFileNameParser, test_Parse) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); - std::pair res( - "/cinder/volume-1234_cinder_", "/client.conf"); + std::pair res("/cinder/volume-1234_cinder_", + "/client.conf"); ASSERT_EQ(res, FileNameParser::Parse(fileName)); fileName = "cbd:pool1//cinder/volume-1234_cinder_"; @@ -500,11 +486,10 @@ TEST(TestFileNameParser, test_Parse) { ASSERT_EQ(res, FileNameParser::Parse(fileName)); } - } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/proto/chunk.proto b/proto/chunk.proto index af5cd3fb5a..c19303c854 100755 --- a/proto/chunk.proto +++ b/proto/chunk.proto @@ -20,7 +20,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/chunk"; -// Qos 参数 +// Qos parameters message QosRequestParas { optional uint32 clientId = 1; optional int32 dmclockDelta = 2; @@ -28,38 +28,38 @@ message QosRequestParas { } message QosResponseParas { - optional int32 phase = 1; // 0: 代表 reservation 阶段; 1: 代表 priority 阶段 + optional int32 phase = 1; // 0: represents the reservation stage; 1: Representing the priority stage optional int32 cost = 2; // } // For chunk enum CHUNK_OP_TYPE { - CHUNK_OP_DELETE = 0; // 删除 chunk - CHUNK_OP_READ = 1; // 读 chunk - CHUNK_OP_WRITE = 2; // 写 chunk + CHUNK_OP_DELETE = 0; // Delete chunk + CHUNK_OP_READ = 1; // Read chunk + CHUNK_OP_WRITE = 2; // Write chunk CHUNK_OP_READ_SNAP = 3; // read chunk snapshot - // TODO(wudemiao): 后期替换成CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, - // 保证和chunkserver的接口一致 + // TODO(wudemiao): later replaced with CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, + // Ensure consistency with chunkserver interface CHUNK_OP_DELETE_SNAP = 4; // delete chunk snapshot - CHUNK_OP_CREATE_CLONE = 5; // 创建clone chunk - CHUNK_OP_RECOVER = 6; // 恢复clone chunk - CHUNK_OP_PASTE = 7; // paste chunk 内部请求 + CHUNK_OP_CREATE_CLONE = 5; // Create clone chunk + CHUNK_OP_RECOVER = 6; // Restore clone chunk + CHUNK_OP_PASTE = 7; // paste chunk internal request CHUNK_OP_UNKNOWN = 8; // unknown Op CHUNK_OP_SCAN = 9; // scan oprequest }; -// read/write 的实际数据在 rpc 的 attachment 中 +// The actual data of read/write is in the attachment of rpc message ChunkRequest { required CHUNK_OP_TYPE opType = 1; // for all - required uint32 logicPoolId = 2; // for all // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 2; // for all // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 3; // for all required uint64 chunkId = 4; // for all optional uint64 appliedIndex = 5; // for read optional uint32 offset = 6; // for read/write - optional uint32 size = 7; // for read/write/clone 读取数据大小/写入数据大小/创建快照请求中表示请求创建的chunk大小 + optional uint32 size = 7; // for read/write/clone Read data size/Write data size/Create snapshot request represents the chunk size of the request creation optional QosRequestParas deltaRho = 8; // for read/write - optional uint64 sn = 9; // for write/read snapshot 写请求中表示文件当前版本号,读快照请求中表示请求的chunk的版本号 - optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn 用于修改chunk的correctedSn + optional uint64 sn = 9; // for write/read snapshot, in the write request, represents the current version number of the file, and in the read snapshot request, represents the version number of the requested chunk + optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn used to modify the correctedSn of a chunk optional string location = 11; // for CreateCloneChunk optional string cloneFileSource = 12; // for write/read optional uint64 cloneFileOffset = 13; // for write/read @@ -72,28 +72,28 @@ message ChunkRequest { }; enum CHUNK_OP_STATUS { - CHUNK_OP_STATUS_SUCCESS = 0; // 成功 - CHUNK_OP_STATUS_REDIRECTED = 1; // 不是 leader,重定向 - CHUNK_OP_STATUS_DISK_FAIL = 2; // 磁盘返回错误 - CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC 校验失败 - CHUNK_OP_STATUS_INVALID_REQUEST = 4; // 请求参数不对 - CHUNK_OP_STATUS_NOSPACE = 5; // 空间不够 - CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // copyset 不存在 - CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // chunk或其快照文件不存在 - CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // 其他错误 - CHUNK_OP_STATUS_OVERLOAD = 9; // 过载,表示服务端有过多请求未处理返回 - CHUNK_OP_STATUS_BACKWARD = 10; // 请求的版本落后当前chunk的版本 - CHUNK_OP_STATUS_CHUNK_EXIST = 11; // chunk已存在 + CHUNK_OP_STATUS_SUCCESS = 0; // Success + CHUNK_OP_STATUS_REDIRECTED = 1; // Not a leader, redirect + CHUNK_OP_STATUS_DISK_FAIL = 2; // Disk returned error + CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC verification failed + CHUNK_OP_STATUS_INVALID_REQUEST = 4; // The request parameters are incorrect + CHUNK_OP_STATUS_NOSPACE = 5; // Insufficient space + CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // Copyset does not exist + CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // Chunk or its snapshot file does not exist + CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // Other errors + CHUNK_OP_STATUS_OVERLOAD = 9; // Overload indicates that the server has too many requests that have not been processed and returned + CHUNK_OP_STATUS_BACKWARD = 10; // The requested version falls behind the current chunk version + CHUNK_OP_STATUS_CHUNK_EXIST = 11; // Chunk already exists CHUNK_OP_STATUS_EPOCH_TOO_OLD = 12; // request epoch too old }; message ChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - optional uint64 appliedIndex = 3; // 返回当前最新的 committedIndex, 注意 read 和 write 都要返回 + optional string redirect = 2; // Not the leader, redirect to the leader + optional uint64 appliedIndex = 3; // Return the latest committedIndex, note that both read and write must be returned optional QosResponseParas phaseCost = 4; // for read/write - optional uint64 chunkSn = 5; // for GetChunkInfo 表示chunk文件版本号,0表示不存在 - optional uint64 snapSn = 6; // for GetChunkInfo 表示chunk文件快照的版本号,0表示不存在 + optional uint64 chunkSn = 5; // for GetChunkInfo represents the version number of the chunk file, while 0 indicates that it does not exist + optional uint64 snapSn = 6; // for GetChunkInfo represents the version number of the Chunk file snapshot, while 0 indicates that it does not exist }; message GetChunkInfoRequest { @@ -104,8 +104,8 @@ message GetChunkInfoRequest { message GetChunkInfoResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - repeated uint64 chunkSn = 3; // chunk 版本号 和 snapshot 版本号 + optional string redirect = 2; // Not the leader, redirect to the leader + repeated uint64 chunkSn = 3; // Chunk version number and snapshot version number }; message GetChunkHashRequest { @@ -118,7 +118,7 @@ message GetChunkHashRequest { message GetChunkHashResponse { required CHUNK_OP_STATUS status = 1; - optional string hash = 2; // 能标志chunk数据状态的hash值,一般是crc32c + optional string hash = 2; // The hash value that can indicate the status of chunk data, usually crc32c }; message CreateS3CloneChunkRequest { @@ -131,7 +131,7 @@ message CreateS3CloneChunkRequest { message CreateS3CloneChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // Not the leader, redirect to the leader }; message UpdateEpochRequest { diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..5a0bdd89ff 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, both logicPoolId and copysetId are used. After entering the rpc service, they will be converted to a string +// GroupId of type, passed to raft // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // LogicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/proto/common.proto b/proto/common.proto index 3cae9f9e65..0dc409b609 100644 --- a/proto/common.proto +++ b/proto/common.proto @@ -21,13 +21,13 @@ package curve.common; option cc_generic_services = true; option go_package = "proto/common"; -// 1. braft场景: id不使用,address为braft里面的PeerId,格式为{ip}:{port}:{index} -// 2. curve-raft场景:id是peer id,address为{ip}:{port} -// 当前chunkserver id就是peer id +// 1. In the braft scenario: 'id' is not used, and 'address' is the PeerId within braft, in the format {ip}:{port}:{index}. +// 2. In the curve-raft scenario: 'id' represents the peer id, and 'address' is in the format {ip}:{port}. +// The current chunkserver id is the peer id. message Peer { - optional uint64 id = 1; // peer id,全局唯一 -// optional bool isLearner = 2; // 是否是learner (暂时不支持) - optional string address = 3; // peer的地址信息 + optional uint64 id = 1; // Peer ID, globally unique +// optional bool isLearner = 2; // Whether it is a learner (not supported for now) + optional string address = 3; // Address information of the peer } message CopysetInfo { diff --git a/proto/copyset.proto b/proto/copyset.proto index fe3d271d53..10aab0485c 100755 --- a/proto/copyset.proto +++ b/proto/copyset.proto @@ -23,7 +23,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/copyset"; -// copyset epoch message,用于epoch序列化和反序列化 +// copyset epoch message for epoch serialization and deserialization message ConfEpoch { required uint32 logicPoolId = 1; required uint32 copysetId = 2; @@ -32,15 +32,15 @@ message ConfEpoch { } message CopysetRequest { - // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + // logicPoolId is actually uint16, but proto does not have uint16 required uint32 logicPoolId = 1; required uint32 copysetId = 2; - repeated string peerid = 3; // 当前复制组配置,可以为空 + repeated string peerid = 3; // The current replication group configuration can be empty }; enum COPYSET_OP_STATUS { COPYSET_OP_STATUS_SUCCESS = 0; - COPYSET_OP_STATUS_EXIST = 1; // copyset node 已经存在 + COPYSET_OP_STATUS_EXIST = 1; // copyset node already exists COPYSET_OP_STATUS_COPYSET_NOTEXIST = 2; COPYSET_OP_STATUS_FAILURE_UNKNOWN = 3; COPYSET_OP_STATUS_COPYSET_IS_HEALTHY = 4; @@ -48,7 +48,7 @@ enum COPYSET_OP_STATUS { message CopysetResponse { optional COPYSET_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // If not the leader, redirect to the leader. }; message Copyset { @@ -69,27 +69,27 @@ message CopysetStatusRequest { required uint32 logicPoolId = 1; required uint32 copysetId = 2; required common.Peer peer = 3; - required bool queryHash = 4; // 考虑到计算copyset hash值是一个非常耗时的操作,所以设置一个bool变量可以选择不查 + required bool queryHash = 4; // Considering that calculating the copyset hash value is a very time-consuming operation, setting a bool variable can choose not to check } -// 大部分字段只能是optional,因为copyset node可能不存在 +// Most fields can only be optional, as the copyset node may not exist message CopysetStatusResponse { - required COPYSET_OP_STATUS status = 1; // op状态 - optional uint32 state = 2; // copyset状态 + required COPYSET_OP_STATUS status = 1; // OP status + optional uint32 state = 2; // Copyset status optional common.Peer peer = 3; // peer optional common.Peer leader = 4; // leader - optional bool readOnly = 5; // 是否只读 - optional int64 term = 6; // 当前任期 - optional int64 committedIndex = 7; // 当前的committed index - optional int64 knownAppliedIndex = 8; // 当前copyset已知的applied index,当前peer可能未apply - optional int64 pendingIndex = 9; // 当前副本未决的op log index起始index - optional int64 pendingQueueSize = 10; // 当前副本未决的op log queue的长度 - optional int64 applyingIndex = 11; // 当前副本正在apply的op log index - optional int64 firstIndex = 12; // 当前副本第一条op log index(包括盘和memory) - optional int64 lastIndex = 13; // 当前副本最后一条op log index(包括盘和memory) - optional int64 diskIndex = 14; // 当前副本已经持久化的最大op log index(不包含memory) - optional uint64 epoch = 15; // 当前copyset配置版本 - optional string hash = 16; // 当前copyset的数据hash值 + optional bool readOnly = 5; // Read Only + optional int64 term = 6; // Current term of office + optional int64 committedIndex = 7; // Current committed index + optional int64 knownAppliedIndex = 8; // The current copyset has a known applied index, but the current peer may not have applied it + optional int64 pendingIndex = 9; // The open op log index starting index for the current replica + optional int64 pendingQueueSize = 10; // The length of the pending op log queue for the current replica + optional int64 applyingIndex = 11; // The current copy is applying the op log index + optional int64 firstIndex = 12; // The first op log index of the current replica (including disk and memory) + optional int64 lastIndex = 13; // The last op log index of the current replica (including disk and memory) + optional int64 diskIndex = 14; // The maximum op log index that the current replica has persisted (excluding memory) + optional uint64 epoch = 15; // Current copyset configuration version + optional string hash = 16; // The data hash value of the current copyset } service CopysetService { diff --git a/proto/heartbeat.proto b/proto/heartbeat.proto index d54723dfb8..292331defa 100644 --- a/proto/heartbeat.proto +++ b/proto/heartbeat.proto @@ -33,13 +33,13 @@ message CopySetInfo { required uint32 copysetId = 2; // copyset replicas, IP:PORT:ID, e.g. 127.0.0.1:8200:0 repeated common.Peer peers = 3; - // epoch, 用来标记配置变更,每变更一次,epoch会增加 + // epoch is used to mark configuration changes. Every time a change is made, epoch will increase required uint64 epoch = 4; - // 该复制组的leader + // The leader of this replication group required common.Peer leaderPeer = 5; - // 配置变更相关信息 + // Configuration change related information optional ConfigChangeInfo configChangeInfo = 6; - // copyset的性能信息 + // Performance information of copyset optional CopysetStatistics stats = 7; // whether the current copyset is on scaning optional bool scaning = 8; @@ -51,11 +51,11 @@ message CopySetInfo { message ConfigChangeInfo { required common.Peer peer = 1; - // 配置变更的类型 + // Types of configuration changes required ConfigChangeType type = 2; - // 配置变更是否成功 + // Whether the configuration change was successful required bool finished = 3; - // 变更的error信息 + // Changed error information optional CandidateError err = 4; }; @@ -81,13 +81,13 @@ message ChunkServerStatisticInfo { required uint32 writeRate = 2; required uint32 readIOPS = 3; required uint32 writeIOPS = 4; - // 已使用的chunk占用的磁盘空间 + // Disk space occupied by used chunks required uint64 chunkSizeUsedBytes = 5; - // chunkfilepool中未使用的chunk占用的磁盘空间 + // Disk space occupied by unused chunks in chunkfilepool required uint64 chunkSizeLeftBytes = 6; - // 回收站中chunk占用的磁盘空间 + // Disk space occupied by chunks in the recycle bin required uint64 chunkSizeTrashedBytes = 7; - // chunkfilepool的大小 + // The size of chunkfilepool optional uint64 chunkFilepoolSize = 8; }; @@ -100,27 +100,27 @@ message ChunkServerHeartbeatRequest { required DiskState diskState = 6; required uint64 diskCapacity = 7; required uint64 diskUsed = 8; - // 返回该chunk上所有copyset的信息 + // Returns information about all copysets on this chunk repeated CopySetInfo copysetInfos = 9; - // 时间窗口内该chunkserver上leader的个数 + // The number of leaders on this chunkserver within the time window required uint32 leaderCount = 10; - // 时间窗口内该chunkserver上copyset的个数 + // The number of copysets on this chunkserver within the time window required uint32 copysetCount = 11; - // chunkServer相关的统计信息 + // ChunkServer related statistical information optional ChunkServerStatisticInfo stats = 12; optional string version = 13; }; enum ConfigChangeType { - // 配置变更命令: leader转换 + // Configuration change command: leader conversion TRANSFER_LEADER = 1; - // 配置变更命令: 复制组增加一个成员 + // Configuration change command: Add a member to the replication group ADD_PEER = 2; - // 配置变更命令: 复制组删除一个成员 + // Configuration change command: Delete a member from a replication group REMOVE_PEER = 3; - // 配置变更命令: 没有配置变更 + // Configuration change command: No configuration changes NONE = 4; - // 配置变更命令:change复制组一个成员 + // Configuration change command: change a member of a replication group CHANGE_PEER = 5; // start scan on the peer START_SCAN_PEER = 6; @@ -134,40 +134,40 @@ message CopySetConf { repeated common.Peer peers = 3; required uint64 epoch = 4; optional ConfigChangeType type = 5; - // configchangeItem 是目标节点 - // 对于TRANSFER_LEADER: 表示目标节点; 对于ADD_PEER: 表示待加入节点 - // 对于REMOVE_PEER: 表示待删除节点; 对于CHANGE_PEER: 表示待加入节点 + // ConfigchangeItem is the target node + // For TRANSFER_LEADER: represents the target node; For ADD_PEER: indicates the node to be added + // For MOVE_PEER: represents the node to be deleted; For CHANGE_PEER: indicates the node to be added // SCAN_PEER: to scan the node optional common.Peer configchangeItem = 6; - // oldPeer, 这个只在ConfigChangeType=对于CHANGE_PEER的情况下会赋值, - // 表示待删除节点。 - // chunkserver收到CHANGE_PEER,根据peers,configchangeItem,oldPeer拼出新的conf + // OldPeer, this only applies to ConfigChangeType=for In the case of CHANGE_PEER, a value will be assigned, + // Represents a node to be deleted. + // Chunkserver received CHANGE_PEER, according to peers, configchangeItem, oldPeer, spell out a new conf optional common.Peer oldPeer = 7; }; enum HeartbeatStatusCode { - // 正常返回 + // Normal return hbOK = 0; - // 必要的参数为初始化 + // The necessary parameters are initialization hbParamUnInitialized = 1; - // chunkserver不在topology中 + // Chunkserver is not in topology hbChunkserverUnknown = 2; - // chunkserver状态为retired + // Chunkserver status is retired hbChunkserverRetired = 3; - // chunkserver的ip和port与topology中的不匹配 + // The IP and port of chunkserver do not match those in topology hbChunkserverIpPortNotMatch = 4; - // chunkserver的token不匹配 + // Chunkserver token mismatch hbChunkserverTokenNotMatch = 5; - // 无copyset上报 + // No copyset reported hbRequestNoCopyset = 6; - // copyset转换为topology格式失败 + // Copyset conversion to topology format failed hbAnalyseCopysetError = 7; } message ChunkServerHeartbeatResponse { - // 返回需要进行变更的copyset的信息 + // Returns information about the copyset that needs to be changed repeated CopySetConf needUpdateCopysets = 1; - // 错误码 + // Error code optional HeartbeatStatusCode statusCode = 2; }; diff --git a/proto/nameserver2.proto b/proto/nameserver2.proto index 85947d96ad..57b8a80c3d 100644 --- a/proto/nameserver2.proto +++ b/proto/nameserver2.proto @@ -31,18 +31,18 @@ enum FileType { }; enum FileStatus { - // 文件创建完成 + // File creation completed kFileCreated = 0; - // 文件删除中 + // Deleting files kFileDeleting = 1; - // 文件正在克隆 + // File is being cloned kFileCloning = 2; - // 文件元数据安装完毕 + // File metadata installation completed kFileCloneMetaInstalled = 3; - // 文件克隆完成 + // File cloning completed kFileCloned = 4; - // 文件正在被克隆 + // The file is being cloned kFileBeingCloned = 5; } @@ -78,15 +78,15 @@ message FileInfo { optional uint64 ctime = 9; optional uint64 seqNum = 10; optional FileStatus fileStatus = 11; - //用于文件转移到回收站的情况下恢复场景下的使用, - //RecycleBin(回收站)目录下使用/其他场景下不使用 + // Used to restore usage in scenarios where files are transferred to the recycle bin, + // Used in the RecycleBin directory/not used in other scenarios optional string originalFullPathName = 12; - // cloneSource 当前用于存放克隆源(当前主要用于curvefs) - // 后期可以考虑存放 s3相关信息 + // CloneSource is currently used to store clone sources (currently mainly used for curvefs) + //Later on, we can consider storing s3 related information optional string cloneSource = 13; - // cloneLength 克隆源文件的长度,用于clone过程中进行extent + // CloneLength The length of the clone source file used for extension during the clone process optional uint64 cloneLength = 14; optional uint64 stripeUnit = 15; optional uint64 stripeCount = 16; @@ -99,68 +99,68 @@ message FileInfo { // status code enum StatusCode { - // 执行成功 + // Execution successful kOK = 0; - // 文件已存在 + // File already exists kFileExists = 101; - // 文件不存在 + // File does not exist kFileNotExists = 102; - // 非目录类型 + // Non directory type kNotDirectory = 103; - // 传入参数错误 + // Incoming parameter error kParaError = 104; - // 缩小文件,目前不支持缩小文件 + // Shrinking files, currently not supported kShrinkBiggerFile = 105; - // 扩容单位错误,非segment size整数倍 + // Expansion unit error, not an integer multiple of segment size kExtentUnitError = 106; - // segment未分配 + // Segment not allocated kSegmentNotAllocated = 107; - // segment分配失败 + // Segment allocation failed kSegmentAllocateError = 108; - // 目录不存在 + // Directory does not exist kDirNotExist = 109; - // 功能不支持 + // Function not supported kNotSupported = 110; - // owner认证失败 + // Owner authentication failed kOwnerAuthFail = 111; - // 目录非空 + // Directory is not empty kDirNotEmpty = 112; - // 文件已处于快照中 + // The file is already in a snapshot kFileUnderSnapShot = 120; - // 文件不在快照中 + // The file is not in the snapshot kFileNotUnderSnapShot = 121; - // 快照删除中 + // Snapshot deletion in progress kSnapshotDeleting = 122; - // 快照文件不存在 + // The snapshot file does not exist kSnapshotFileNotExists = 123; - // 快照文件删除失败 + // Snapshot file deletion failed kSnapshotFileDeleteError = 124; - // session不存在 + // Session does not exist kSessionNotExist = 125; - // 文件已被占用 + // The file is already in use kFileOccupied = 126; kCloneFileNameIllegal = 127; kCloneStatusNotMatch = 128; - // 文件删除失败 + // File deletion failed kCommonFileDeleteError = 129; - // 文件id不匹配 + // File ID mismatch kFileIdNotMatch = 130; - // 文件在删除中 + // The file is being deleted kFileUnderDeleting = 131; - // 文件长度不符合要求 + // The file length does not meet the requirements kFileLengthNotSupported = 132; - // 文件正在被克隆 + // The file is being cloned kDeleteFileBeingCloned = 133; - // client版本不匹配 + // Client version mismatch kClientVersionNotMatch = 134; - // snapshot功能禁用中 + // The snapshot function is disabled kSnapshotFrozen = 135; - // 快照克隆服务连不上 + // The snapshot clone service cannot be connected kSnapshotCloneConnectFail = 136; - // 快照克隆服务未初始化 + // The snapshot clone service is not initialized kSnapshotCloneServerNotInit = 137; // recover file status is CloneMetaInstalled kRecoverFileCloneMetaInstalled = 138; @@ -170,9 +170,9 @@ enum StatusCode { kEpochTooOld = 140; // poolset doesn't exist kPoolsetNotExist = 141; - // 元数据存储错误 + // Metadata storage error kStorageError = 501; - // 内部错误 + // Internal error KInternalError = 502; }; @@ -311,20 +311,20 @@ message ExtendFileResponse { } message ChangeOwnerRequest { - // 需要变更owner的文件的fileName + // Need to change the fileName of the owner's file required string fileName = 1; - // 希望文件owner变更后的新的owner + // Hope the new owner after the file owner changes required string newOwner = 2; - // ChangerOwner接口只能通过root权限进行调用,需要传入root权限的owner + // The ChangerOwner interface can only be called with root permission, and an owner with root permission needs to be passed in required string rootOwner = 3; - // 对root身份进行校验的的signature + // The signature for verifying the root identity required string signature = 4; - // 用来在mds端重新计算signature + // Used to recalculate the signature on the mds side required uint64 date = 5; } -// 返回ChangeOwner的执行结果,成功返回statusCode::kOK -// 失败可能返回kFileNotExists、kOwnerAuthFail、kFileOccupied、kStorageError等,可能返回的错误码将来继续补充 +// Returns the execution result of ChangeOwner, successfully returning statusCode::kOK +// Failure may return kFileNotExists, kOwnerAuthFail, kFileOccupied, kStorageError, etc. The error codes that may be returned will continue to be supplemented in the future message ChangeOwnerResponse { required StatusCode statusCode = 1; } @@ -395,8 +395,8 @@ message CheckSnapShotStatusRequest { required uint64 date = 5; } -// statusCode为kOK时,fileStatus和progress才会赋值 -// 只有fileStatus是kFileDeleting时,progress表示快照文件删除进度,否则progress返回0 +// FileStatus and progress are only assigned values when statusCode is kOK +// Only when fileStatus is kFileDeleting, progress represents the progress of snapshot file deletion, otherwise progress returns 0 message CheckSnapShotStatusResponse { required StatusCode statusCode = 1; optional FileStatus fileStatus = 2; @@ -431,7 +431,7 @@ message OpenFileRequest { optional string clientVersion = 5; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -456,7 +456,7 @@ message CloseFileRequest { optional uint32 clientPort = 7; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -478,7 +478,7 @@ message ReFreshSessionRequest { optional uint32 clientPort = 8; } -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -531,9 +531,9 @@ message GetAllocatedSizeRequest { message GetAllocatedSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的分配大小 + // Allocation size of files or directories optional uint64 allocatedSize = 2; - // key是逻辑池id,value是分配大小 + // Key is the logical pool id, and value is the allocation size map allocSizeMap = 3; } @@ -543,7 +543,7 @@ message GetFileSizeRequest { message GetFileSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的file length + // The file length of a file or directory optional uint64 fileSize = 2; } diff --git a/proto/schedule.proto b/proto/schedule.proto index 2dde693556..9c92bb4ef5 100644 --- a/proto/schedule.proto +++ b/proto/schedule.proto @@ -34,7 +34,7 @@ message RapidLeaderScheduleResponse { required sint32 statusCode = 1; } -// 如果chunkServerID为空,则返回所有chunkserver的恢复状态 +// If chunkServerID is empty, return the recovery status of all chunkservers message QueryChunkServerRecoverStatusRequest { repeated uint32 chunkServerID = 1; } diff --git a/proto/topology.proto b/proto/topology.proto index 2057cafe2a..9e002f2c3d 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/robot/Resources/keywords/deploy.py b/robot/Resources/keywords/deploy.py index 93d7926a45..0a556c7021 100644 --- a/robot/Resources/keywords/deploy.py +++ b/robot/Resources/keywords/deploy.py @@ -9,6 +9,7 @@ import random import time + def add_config(): etcd = [] for host in config.etcd_list: @@ -16,168 +17,183 @@ def add_config(): etcd_addrs = ",".join(etcd) # add mds config for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf"%host + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change offline time - ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf"%(config.offline_timeout*1000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change offline time + ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf" % ( + config.offline_timeout*1000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change clean_follower_afterMs time - ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf"%(300000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change clean_follower_afterMs time + ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf" % ( + 300000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change scheduler time + assert rs[3] == 0, "change host %s mds config fail" % host + # change scheduler time ori_cmd = R"sed -i 's/mds.copyset.scheduler.intervalSec=.*/mds.copyset.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = R"sed -i 's/mds.replica.scheduler.intervalSec=.*/mds.replica.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # ori_cmd = R"sed -i 's/mds.recover.scheduler.intervalSec=.*/mds.recover.scheduler.intervalSec=0/g' mds.conf" # rs = shell_operator.ssh_exec(ssh, ori_cmd) # assert rs[3] == 0,"change host %s mds config fail"%host ori_cmd = R"sed -i 's/mds.leader.scheduler.intervalSec=.*/mds.leader.scheduler.intervalSec=5/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # change topology update time ori_cmd = R"sed -i 's/mds.topology.TopologyUpdateToRepoSec=.*/mds.topology.TopologyUpdateToRepoSec=1/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add mysql conf - ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s mds config fail" % host + # add mysql conf + ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add etcd conf - ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s mds config fail" % host + # add etcd conf + ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = "sudo mv mds.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s mds conf fail"%host + assert rs[3] == 0, "mv %s mds conf fail" % host # add client config mds_addrs = [] for host in config.mds_list: mds_addrs.append(host + ":6666") addrs = ",".join(mds_addrs) for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s client config fail"%host -#将client.conf配置成py_client.conf(主机用),方便client复现死锁问题 + assert rs[3] == 0, "change host %s client config fail" % host + # Configure client.conf to py_client.conf(for the host) to facilitate client replication of deadlock issues ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /etc/curve/client.conf /etc/curve/py_client.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host # add chunkserver config addrs = ",".join(mds_addrs) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - #change global ip - ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf"%host + # change global ip + ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change global subnet - subnet=host+"/24" - ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf"%subnet + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change global subnet + subnet = host+"/24" + ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf" % subnet rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change mds ip - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf"%(addrs) + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change mds ip + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/chunkserver.snapshot_throttle_throughput_bytes=.*/chunkserver.snapshot_throttle_throughput_bytes=104857600/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.expire_afterSec=.*/trash.expire_afterSec=0/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.scan_periodSec=.*/trash.scan_periodSec=10/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #open use snapshot + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # open use snapshot ori_cmd = R"sed -i 's/clone.disable_curve_client=true/clone.disable_curve_client=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's/clone.disable_s3_adapter=true/clone.disable_s3_adapter=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#curve.config_path=conf/cs_client.conf#curve.config_path=/etc/curve/conf/cs_client.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#s3.config_path=conf/s3.conf#s3.config_path=/etc/curve/conf/s3.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = "sudo mv chunkserver.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s chunkserver conf fail"%host + assert rs[3] == 0, "mv %s chunkserver conf fail" % host # add s3 and client conf\cs_client conf client_host = random.choice(config.client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,client_host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s cs_client config fail"%host + assert rs[3] == 0, "change host %s cs_client config fail" % host ori_cmd = "sudo mv client.conf /etc/curve/conf && sudo mv cs_client.conf /etc/curve/conf/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) ori_cmd = "sed -i \"s/client.config_path=\S*/client.config_path=\/etc\/curve\/snap_client.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改snapshot_clone_server.conf etcd配置 - ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modify snapshot_clone_server.conf etcd configuration + ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改数据库配置项 - ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modifying Database Configuration Items + ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot clone server config fail"%host + assert rs[3] == 0, "change host %s snapshot clone server config fail" % host ori_cmd = "sed -i \"s/s3.config_path=\S*/s3.config_path=\/etc\/curve\/s3.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host + ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host -#change snap_client.conf - ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf"%(addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host +# change snap_client.conf + ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host ori_cmd = "sudo mv snapshot_clone_server.conf /etc/curve/ && sudo mv snap_client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s snapshot_clone_server conf fail"%host + assert rs[3] == 0, "mv %s snapshot_clone_server conf fail" % host ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -187,29 +203,32 @@ def add_config(): snap_addrs_list.append(host + ":5556") snap_addrs = ",".join(snap_addrs_list) for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf"%addrs + ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf" % addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf"%etcd_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf" % etcd_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf"%snap_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf" % snap_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host + assert rs[3] == 0, "change host %s tools config fail" % host ori_cmd = "sudo mv tools.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s tools conf fail"%host + assert rs[3] == 0, "mv %s tools conf fail" % host + def clean_env(): - host_list = config.client_list + config.mds_list + config.chunkserver_list + host_list = config.client_list + config.mds_list + config.chunkserver_list host_list = list(set(host_list)) for host in host_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd1 = "sudo tc qdisc del dev bond0.106 root" shell_operator.ssh_exec(ssh, ori_cmd1) ori_cmd2 = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" @@ -217,33 +236,42 @@ def clean_env(): ori_cmd3 = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd3) + def destroy_mds(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_etcd(): for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_snapshotclone_server(): for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep |grep -v sudo | grep snapshotcloneserver | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def stop_nebd(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep nebd | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: logger.debug("snapshotcloneserver not up") continue - + + def initial_chunkserver(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -257,22 +285,24 @@ def initial_chunkserver(host): assert rs[1] == [], "kill chunkserver fail" ori_cmd = "sudo find /data/ -name chunkserver.dat -exec rm -rf {} \;" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("delete dat ,return is %s"%rs[1]) - assert rs[3] == 0,"rm %s dat fail"%host + logger.debug("delete dat ,return is %s" % rs[1]) + assert rs[3] == 0, "rm %s dat fail" % host ori_cmd = "sh recycle_chunks.sh -d /data -chunks chunkfilepool -wals chunkfilepool" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("recycle chunk ,return is %s"%rs[1]) - assert rs[3] == 0,"recycle %s chunk fail"%host + logger.debug("recycle chunk ,return is %s" % rs[1]) + assert rs[3] == 0, "recycle %s chunk fail" % host ssh.close() except Exception as e: logger.error("%s" % e) raise return 0 + def recycle_chunk(): cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/clean_curve.yml --tags chunkserver" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible clean chunk fail" + assert ret == 0, "ansible clean chunk fail" + def drop_all_chunkserver_dat(): thread = [] @@ -286,34 +316,39 @@ def drop_all_chunkserver_dat(): logger.debug("drop cs dat get result is %d" % t.get_result()) assert t.get_result() == 0 + def destroy_test_env(): try: cmd = "cp robot/init_env.sh . && bash init_env.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"init env fail" + assert ret == 0, "init env fail" host = config.client_list[0] except Exception: logger.error("init env fail.") raise + def change_cfg(): try: - cmd = "bash %s/change_cfg.sh"%config.fs_cfg_path + cmd = "bash %s/change_cfg.sh" % config.fs_cfg_path ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"change fs cfg fail" + assert ret == 0, "change fs cfg fail" except Exception: logger.error("change fs cfg fail.") raise + def destroy_curvefs(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) shell_operator.ssh_exec(ssh, cmd) cmd = "echo 'yes' | /home/nbs/.curveadm/bin/curveadm stop" ret = shell_operator.run_exec(cmd) @@ -323,186 +358,218 @@ def destroy_curvefs(): logger.error("destroy curvefs fail.") raise + def use_ansible_deploy(): try: cmd = "cp robot/ansible_deploy.sh . && bash ansible_deploy.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible deploy fail" + assert ret == 0, "ansible deploy fail" host = config.client_list[0] - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, host) ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"cp client.conf fail" + assert ret == 0, "cp client.conf fail" except Exception: logger.error("deploy curve fail.") raise + def deploy_all_servers(): try: cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" cmd = "/home/nbs/.curveadm/bin/curveadm deploy" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"deploy mds\etcd\metaserver fail" + assert ret == 0, "deploy mds\etcd\metaserver fail" except Exception: logger.error("deploy curvefs fail.") raise -def remk_test_dir(): + +def remk_test_dir(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) for test_dir in config.fs_mount_dir: - ori_cmd = "rm -rf %s/%s"%(config.fs_mount_path,test_dir) + ori_cmd = "rm -rf %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm test dir %s fail,error is %s"%(test_dir,rs[1]) - ori_cmd = "mkdir %s/%s"%(config.fs_mount_path,test_dir) + assert rs[3] == 0, "rm test dir %s fail,error is %s" % ( + test_dir, rs[1]) + ori_cmd = "mkdir %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mkdir %s fail,error is %s"%(test_dir,rs[1]) + assert rs[3] == 0, "mkdir %s fail,error is %s" % (test_dir, rs[1]) except Exception: logger.error(" remk test dir fail.") raise -def mount_test_dir(mountpoint="",mountfile=""): + +def mount_test_dir(mountpoint="", mountfile=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) - else: + --fstype volume" % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) + else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) + " % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: if mountfile == "": mountfile = mountpoint if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountfile,mountfile) + --fstype volume" % (mountpoint, config.fs_mount_path, mountfile, mountfile) else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountfile,mountfile) + " % (mountpoint, config.fs_mount_path, mountfile, mountfile) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("mount dir fail.") raise + def umount_test_dir(mountpoint=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("umount dir fail.") raise + def install_deb(): try: -# mkdeb_url = config.curve_workspace + "mk-deb.sh" -# exec_mkdeb = "bash %s"%mkdeb_url -# shell_operator.run_exec2(exec_mkdeb) - cmd = "ls %scurve-mds*.deb"%config.curve_workspace + # mkdeb_url = config.curve_workspace + "mk-deb.sh" + # exec_mkdeb = "bash %s"%mkdeb_url + # shell_operator.run_exec2(exec_mkdeb) + cmd = "ls %scurve-mds*.deb" % config.curve_workspace mds_deb = shell_operator.run_exec2(cmd) version = mds_deb.split('+')[1] for host in config.mds_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mds install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "mds install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) - + for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"sdk install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "sdk install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) for host in config.chunkserver_list: cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ - (config.pravie_key_path,config.curve_workspace,host) + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb"%(version,version) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb" % ( + version, version) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0, "chunkserver install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "chunkserver install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) except Exception: logger.error("install deb fail.") raise + def start_nebd(): - cmd = "ls nebd/nebd*.deb" - nebd_deb = shell_operator.run_exec2(cmd) - version = nebd_deb.split('+')[1] - assert nebd_deb != "","can not get nebd deb" - for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) - shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s"%version - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"install nebd deb fail,error is %s"%rs - rm_deb = "rm nebd_*%s"%version - shell_operator.ssh_exec(ssh, rm_deb) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/"%\ - (config.pravie_key_path,host) - shell_operator.run_exec2(cmd) - ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp %s nebd conf fail"%host - ori_cmd = "sudo nebd-daemon start" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if rs[3] != 0: - logger.debug("nebd start fail,error is %s"%rs[1]) - ori_cmd == "sudo nebd-daemon restart" - rs2 = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs2[3] == 0,"restart nebd fail, return is %s"%rs2[1] - time.sleep(5) - ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] != "","start nebd fail!" + cmd = "ls nebd/nebd*.deb" + nebd_deb = shell_operator.run_exec2(cmd) + version = nebd_deb.split('+')[1] + assert nebd_deb != "", "can not get nebd deb" + for host in config.client_list: + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) + shell_operator.run_exec2(cmd) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s" % version + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "install nebd deb fail,error is %s" % rs + rm_deb = "rm nebd_*%s" % version + shell_operator.ssh_exec(ssh, rm_deb) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/" %\ + (config.pravie_key_path, host) + shell_operator.run_exec2(cmd) + ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cp %s nebd conf fail" % host + ori_cmd = "sudo nebd-daemon start" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[3] != 0: + logger.debug("nebd start fail,error is %s" % rs[1]) + ori_cmd == "sudo nebd-daemon restart" + rs2 = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs2[3] == 0, "restart nebd fail, return is %s" % rs2[1] + time.sleep(5) + ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] != "", "start nebd fail!" + def add_config_file(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) + def start_abnormal_test_services(): try: for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm -rf /etcd/default.etcd" shell_operator.ssh_exec(ssh, ori_cmd) etcd_cmd = "cd etcdrun && sudo nohup ./run.sh new &" @@ -510,52 +577,59 @@ def start_abnormal_test_services(): ori_cmd = "ps -ef|grep -v grep | grep -w etcd | awk '{print $2}'" time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("etcd pid is %s"%rs[1]) + logger.debug("etcd pid is %s" % rs[1]) assert rs[1] != [], "up etcd fail" for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) mds_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, mds_cmd) time.sleep(1) ori_cmd = "ps -ef|grep -v grep | grep -v curve-mds.log | grep -v sudo | grep -w curve-mds | awk '{print $2}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[1] != [], "up mds fail" - logger.debug("mds pid is %s"%rs[1]) + logger.debug("mds pid is %s" % rs[1]) for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, ori_cmd) except Exception: logger.error("up servers fail.") raise + def create_pool(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) mds = [] mds_addrs = "" for mds_host in config.mds_list: mds.append(mds_host + ":6666") mds_addrs = ",".join(mds) physical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_physicalpool"%(mds_addrs) + -op=create_physicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, physical_pool) if rs[3] == 0: logger.info("create physical pool sucess") else: - assert False,"create physical fail ,msg is %s"%rs[2] + assert False, "create physical fail ,msg is %s" % rs[2] for host in config.chunkserver_list: - ssh2 = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh2 = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo nohup ./chunkserver_ctl.sh start all &" shell_operator.ssh_background_exec2(ssh2, ori_cmd) time.sleep(60) logical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_logicalpool"%(mds_addrs) + -op=create_logicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, logical_pool) time.sleep(180) + def restart_cinder_server(): for client_host in config.client_list: - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ori_cmd = "sudo cp /usr/curvefs/curvefs.py /srv/stack/cinder/lib/python2.7/site-packages/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /usr/curvefs/_curvefs.so /srv/stack/cinder/lib/python2.7/site-packages/" @@ -563,21 +637,22 @@ def restart_cinder_server(): time.sleep(2) ori_cmd = "sudo service cinder-volume restart" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] == [],"rs is %s"%rs + assert rs[1] == [], "rs is %s" % rs + def wait_cinder_server_up(): cinder_host = config.nova_host - ssh = shell_operator.create_ssh_connect(cinder_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + cinder_host, 1046, config.abnormal_user) ori_cmd = R"source OPENRC && cinder get-host-list --all-services | grep pool1 | grep curve2 | awk '{print $16}'" i = 0 while i < 360: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - status = "".join(rs[1]).strip() - if status == "up": - break - i = i + 5 - time.sleep(5) - assert status == "up","up curve2 cinder service fail,please check" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + status = "".join(rs[1]).strip() + if status == "up": + break + i = i + 5 + time.sleep(5) + assert status == "up", "up curve2 cinder service fail,please check" if status == "up": - time.sleep(10) - + time.sleep(10) diff --git a/robot/Resources/keywords/fault_inject.py b/robot/Resources/keywords/fault_inject.py index 48e95382c4..507b5af8cf 100644 --- a/robot/Resources/keywords/fault_inject.py +++ b/robot/Resources/keywords/fault_inject.py @@ -15,6 +15,7 @@ import string import types + def block_ip(chain): ori_cmd = "iptables -I %s 2>&1" % chain cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -30,6 +31,7 @@ def cancel_block_ip(chain): print cmd # rc = shell_operator.run_exec(cmd) + def net_work_delay(dev, time): ori_cmd = "tc qdisc add dev %s root netem delay %dms 2>&1" % (dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -37,37 +39,45 @@ def net_work_delay(dev, time): print cmd # rc = shell_operator.run_exec(cmd) -def package_loss_all(ssh,dev, percent): - ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % (dev, percent) + +def package_loss_all(ssh, dev, percent): + ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % ( + dev, percent) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def package_delay_all(ssh, dev,ms): + +def package_delay_all(ssh, dev, ms): ori_cmd = "sudo tc qdisc add dev %s root netem delay %dms" % (dev, ms) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def cancel_tc_inject(ssh,dev): + +def cancel_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc del dev %s root" % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def show_tc_inject(ssh,dev): + +def show_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc show dev %s " % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) + def package_reorder_all(dev, ms, percent1, percent2): - ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % (dev, ms, percent1, percent2) + ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % ( + dev, ms, percent1, percent2) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def package_duplicate_all(dev, percent): ori_cmd = "tc qdisc add dev %s root netem duplicate %d%%" % (dev, percent) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -77,7 +87,8 @@ def package_duplicate_all(dev, percent): def eth_down_for_a_monent(dev, time): - ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % (dev, time) + ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % ( + dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd @@ -86,110 +97,125 @@ def eth_down_for_a_monent(dev, time): def add_rate_limit(dev, downlink, uplink): ori_cmd = "wget -N -P /tmp nos.netease.com/nfit-software/taaslimit.sh 2>&1 && chmod a+rx /tmp/taaslimit.sh 2>&1 " \ - "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % (dev, downlink, uplink) + "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % ( + dev, downlink, uplink) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def del_rate_limit(dev): - ori_cmd = "taaslimit clear %s 2>&1" %(dev) + ori_cmd = "taaslimit clear %s 2>&1" % (dev) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) -def inject_cpu_stress(ssh,stress=50): - cmd = "sudo nohup python cpu_stress.py %d &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) + +def inject_cpu_stress(ssh, stress=50): + cmd = "sudo nohup python cpu_stress.py %d &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up cpu stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up cpu stress fail" + def del_cpu_stress(ssh): cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no cpu stress running") return cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop cpu stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop cpu stess fail" + -def inject_mem_stress(ssh,stress): - cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) +def inject_mem_stress(ssh, stress): + cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up memster stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up memster stress fail" + def del_mem_stress(ssh): cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no memtester stress running") return cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop memtester stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop memtester stess fail" -def inject_clock_offset(ssh,time): + +def inject_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"+%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"inject clock offet fail,return is %s"%rs[2] + assert rs[3] == 0, "inject clock offet fail,return is %s" % rs[2] + -def del_clock_offset(ssh,time): +def del_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"-%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) assert rs[3] == 0, "del clock offet fail,return is %s" % rs[2] + def listen_network_stress(ip): ori_cmd = "iperf -s" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) # assert rs[3] == 0,"up iperf fail: %s"%rs[1] + def inject_network_stress(ip): - ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001"%ip + ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001" % ip ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"inject iperf fail: %s"%rs[2] + assert rs[3] == 0, "inject iperf fail: %s" % rs[2] + def stop_network_stress(ip): ori_cmd = "ps -ef|grep iperf |grep -v grep| awk '{print $2}' | sudo xargs kill -9" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"stop iperf fail: %s"%rs[2] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "stop iperf fail: %s" % rs[2] ori_cmd = "ps -ef|grep iperf |grep -v grep" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[1] == [],"stop iperf fail,pid %s"%rs[1] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] == [], "stop iperf fail,pid %s" % rs[1] + def ipmitool_cycle_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power cycle" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"cycle restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cycle restart host fail,return is %s" % rs + def ipmitool_reset_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power reset" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reset restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reset restart host fail,return is %s" % rs -def get_hostip_dev(ssh,hostip): - ori_cmd = "ip a|grep %s | awk '{print $7}'"%hostip + +def get_hostip_dev(ssh, hostip): + ori_cmd = "ip a|grep %s | awk '{print $7}'" % hostip rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] return "".join(rs[1]).strip() + def clear_RecycleBin(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool clean-recycle --isTest" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"clean RecyclenBin fail,msg is %s"%rs[1] + assert rs[3] == 0, "clean RecyclenBin fail,msg is %s" % rs[1] starttime = time.time() ori_cmd = "curve_ops_tool list -fileName=/RecycleBin |grep Total" while time.time() - starttime < 180: @@ -199,9 +225,10 @@ def clear_RecycleBin(): else: logger.debug("deleting") if rs[3] != 0: - logger.debug("list /RecycleBin fail,error is %s"%rs[1]) - time.sleep(3) - assert rs[3] == 0,"delete /RecycleBin fail,error is %s"%rs[1] + logger.debug("list /RecycleBin fail,error is %s" % rs[1]) + time.sleep(3) + assert rs[3] == 0, "delete /RecycleBin fail,error is %s" % rs[1] + def loop_map_unmap_file(): thread = [] @@ -209,7 +236,7 @@ def loop_map_unmap_file(): filename = "nbdthrash" + str(i) t = mythread.runThread(test_curve_stability_nbd.nbd_all, filename) thread.append(t) - logger.debug("thrash map unmap %s" %filename) + logger.debug("thrash map unmap %s" % filename) config.thrash_thread = thread for t in thread: @@ -217,29 +244,32 @@ def loop_map_unmap_file(): # logger.debug("get result is %d" % t.get_result()) # assert t.get_result() == 0 + def stop_map_unmap(): try: if config.thrash_thread == []: - assert False,"map umap not up" + assert False, "map umap not up" thread = config.thrash_thread config.thrash_map = False logger3.info("set thrash_map to false") time = 0 for t in thread: - assert t.exitcode == 0,"map/umap thread error" + assert t.exitcode == 0, "map/umap thread error" result = t.get_result() - logger.debug("thrash map/umap time is %d"%result) - assert result > 0,"map/umap thread error" + logger.debug("thrash map/umap time is %d" % result) + assert result > 0, "map/umap thread error" time = time + result - logger.info("map/umap all time is %d"%time) + logger.info("map/umap all time is %d" % time) except: - raise + raise + def stop_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "sudo supervisorctl stop all" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"stop rwio fail,rs is %s"%rs[1] + assert rs[3] == 0, "stop rwio fail,rs is %s" % rs[1] ori_cmd = "ps -ef|grep -v grep | grep randrw | awk '{print $2}'| sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "ps -ef|grep -v grep | grep -w /home/nbs/vdbench50406/profile | awk '{print $2}'| sudo xargs kill -9" @@ -247,114 +277,133 @@ def stop_rwio(): time.sleep(3) ssh.close() + def run_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) - ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd0": logger.error("map is error") - assert False,"output is %s"%output - ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" + assert False, "output is %s" % output + ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd1": logger.error("map is error") - assert False,"output is %s"%output + assert False, "output is %s" % output ori_cmd = "sudo supervisorctl stop all && sudo supervisorctl reload" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo nohup /home/nbs/vdbench50406/vdbench -jn -f /home/nbs/vdbench50406/profile &" rs = shell_operator.ssh_background_exec2(ssh, ori_cmd) - #write 60s io + # write 60s io time.sleep(60) # assert rs[3] == 0,"start rwio fail" ssh.close() + def init_recover_disk(fio_size): - ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based"%int(fio_size) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based" % int( + fio_size) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"write fio fail" + assert rs[3] == 0, "write fio fail" cmd = "sudo curve-nbd unmap cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap recover fail:%s"%rs[2] + assert rs[3] == 0, "unmap recover fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") config.recover_vol_md5 = md5 cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def recover_disk(): cmd = "sudo curve recover --user test --filename /recover" - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"recover file fail:%s"%rs[2] + assert rs[3] == 0, "recover file fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") - assert md5 == config.recover_vol_md5,"Data is inconsistent after translation,md5 is %s,recover md5 is %s"%(config.recover_vol_md5,md5) - + assert md5 == config.recover_vol_md5, "Data is inconsistent after translation,md5 is %s,recover md5 is %s" % ( + config.recover_vol_md5, md5) + + def get_chunkserver_list(): client_host = config.client_list[0] logger.info("|------begin get chunkserver list------|") cmd = "curve_ops_tool chunkserver-list > cs_list" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) -def get_chunkserver_id(host,cs_id): + +def get_chunkserver_id(host, cs_id): client_host = config.client_list[0] - logger.info("|------begin get chunkserver %s id %d------|"%(host,cs_id)) - cmd = "cat cs_list | grep %s |grep -w chunkserver%d"%(host,cs_id) + logger.info("|------begin get chunkserver %s id %d------|" % (host, cs_id)) + cmd = "cat cs_list | grep %s |grep -w chunkserver%d" % (host, cs_id) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: return -1 -def get_cs_copyset_num(host,cs_id): + +def get_cs_copyset_num(host, cs_id): client_host = config.client_list[0] cs_number = int(cs_id) + 8200 - cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'"%(host,cs_number) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'" % ( + host, cs_number) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: - return -1 + return -1 -def stop_vm(ssh,uuid): - stop_cmd = "source OPENRC && nova stop %s"%uuid + +def stop_vm(ssh, uuid): + stop_cmd = "source OPENRC && nova stop %s" % uuid rs = shell_operator.ssh_exec(ssh, stop_cmd) - assert rs[3] == 0,"stop vm fail,error is %s"%rs[2] + assert rs[3] == 0, "stop vm fail,error is %s" % rs[2] time.sleep(5) -def start_vm(ssh,uuid): - start_cmd = "source OPENRC && nova start %s"%uuid + +def start_vm(ssh, uuid): + start_cmd = "source OPENRC && nova start %s" % uuid rs = shell_operator.ssh_exec(ssh, start_cmd) - assert rs[3] == 0,"start vm fail,error is %s"%rs[2] + assert rs[3] == 0, "start vm fail,error is %s" % rs[2] -def restart_vm(ssh,uuid): - restart_cmd = "source OPENRC && nova reboot %s"%uuid + +def restart_vm(ssh, uuid): + restart_cmd = "source OPENRC && nova reboot %s" % uuid rs = shell_operator.ssh_exec(ssh, restart_cmd) - assert rs[3] == 0,"reboot vm fail,error is %s"%rs[2] + assert rs[3] == 0, "reboot vm fail,error is %s" % rs[2] + -def check_vm_status(ssh,uuid): - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%uuid +def check_vm_status(ssh, uuid): + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % uuid i = 0 while i < 180: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if "".join(rs[1]).strip() == "ACTIVE": - return True - elif "".join(rs[1]).strip() == "ERROR": - return False - else: - time.sleep(5) - i = i + 5 - assert False,"start vm fail" - -def check_vm_vd(ip,nova_ssh,uuid): + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if "".join(rs[1]).strip() == "ACTIVE": + return True + elif "".join(rs[1]).strip() == "ERROR": + return False + else: + time.sleep(5) + i = i + 5 + assert False, "start vm fail" + + +def check_vm_vd(ip, nova_ssh, uuid): i = 0 while i < 300: try: @@ -363,19 +412,21 @@ def check_vm_vd(ip,nova_ssh,uuid): rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output == "vdc": - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - shell_operator.ssh_exec(nova_ssh,ori_cmd) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + shell_operator.ssh_exec(nova_ssh, ori_cmd) elif output == "": break except: i = i + 5 time.sleep(5) - assert rs[3] == 0,"start vm fail,ori_cmd is %s" % rs[1] + assert rs[3] == 0, "start vm fail,ori_cmd is %s" % rs[1] + def init_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_host - ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_stability_host + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_host + ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_stability_host try: rs = shell_operator.ssh_exec(ssh, ori_cmd) rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) @@ -384,23 +435,23 @@ def init_vm(): uuid = "".join(rs[1]).strip() uuid2 = "".join(rs2[1]).strip() - for i in range(1,10): + for i in range(1, 10): ori_cmd = "bash curve_test.sh delete" shell_operator.ssh_exec(ssh, ori_cmd) - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - ori_cmd2 = "source OPENRC && nova reboot %s --hard"%uuid2 - rs = shell_operator.ssh_exec(ssh,ori_cmd) - rs2 = shell_operator.ssh_exec(ssh,ori_cmd2) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + ori_cmd2 = "source OPENRC && nova reboot %s --hard" % uuid2 + rs = shell_operator.ssh_exec(ssh, ori_cmd) + rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) time.sleep(60) - rs1 = check_vm_status(ssh,uuid) - rs2 = check_vm_status(ssh,uuid2) + rs1 = check_vm_status(ssh, uuid) + rs2 = check_vm_status(ssh, uuid2) if rs1 == True and rs2 == True: break - assert rs1 == True,"hard reboot vm fail" - assert rs2 == True,"hard reboot vm fail" + assert rs1 == True, "hard reboot vm fail" + assert rs2 == True, "hard reboot vm fail" - check_vm_vd(config.vm_host,ssh,uuid) - check_vm_vd(config.vm_stability_host,ssh,uuid2) + check_vm_vd(config.vm_host, ssh, uuid) + check_vm_vd(config.vm_stability_host, ssh, uuid2) except: logger.error("init vm error") raise @@ -408,42 +459,49 @@ def init_vm(): def remove_vm_key(): - cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s"%config.vm_host + cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s" % config.vm_host shell_operator.run_exec(cmd) print cmd -def attach_new_vol(fio_size,vdbench_size): - ori_cmd = "bash curve_test.sh create %d %d"%(int(fio_size),int(vdbench_size)) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + +def attach_new_vol(fio_size, vdbench_size): + ori_cmd = "bash curve_test.sh create %d %d" % ( + int(fio_size), int(vdbench_size)) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "attach vol fail,return is %s" % rs[2] logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"attach vol fail,return is %s"%rs[2] - logger.info("exec cmd %s"%ori_cmd) get_vol_uuid() ssh.close() + def detach_vol(): stop_rwio() ori_cmd = "bash curve_test.sh delete" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "retcode is %d,error is %s" % (rs[3], rs[2]) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"retcode is %d,error is %s"%(rs[3],rs[2]) - logger.info("exec cmd %s"%ori_cmd) ssh.close() + def clean_nbd(): for client_ip in config.client_list: - logger.info("|------begin test clean client %s------|"%(client_ip)) + logger.info("|------begin test clean client %s------|" % (client_ip)) cmd = "sudo curve-nbd list-mapped |grep nbd" - ssh = shell_operator.create_ssh_connect(client_ip, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_ip, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] != []: for nbd_info in rs[1]: - nbd = re.findall("/dev/nbd\d+",nbd_info) + nbd = re.findall("/dev/nbd\d+", nbd_info) cmd = "sudo curve-nbd unmap " + nbd[0] rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap %s fail,error is %s"%(nbd,rs[2]) + assert rs[3] == 0, "unmap %s fail,error is %s" % (nbd, rs[2]) cmd = "ps -ef|grep curve-nbd|grep -v grep | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, cmd) return @@ -451,159 +509,174 @@ def clean_nbd(): def map_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - stripeUnit = [524288,1048576,2097152,4194304] - stripeCount = [1,2,4,8,16] - cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + stripeUnit = [524288, 1048576, 2097152, 4194304] + stripeCount = [1, 2, 4, 8, 16] + cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /fiofile fail:%s"%rs[2] - cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /fiofile fail:%s" % rs[2] + cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /vdbenchfile fail:%s"%rs[2] - #test recover recyclebin file - cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /vdbenchfile fail:%s" % rs[2] + # test recover recyclebin file + cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /recover fail:%s"%rs[2] + assert rs[3] == 0, "create /recover fail:%s" % rs[2] time.sleep(3) cmd = "sudo curve-nbd map cbd:pool1//fiofile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map fiofile fail:%s"%rs[2] + assert rs[3] == 0, "map fiofile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//vdbenchfile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "map vdbenchfile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map recover fail:%s"%rs[2] + assert rs[3] == 0, "map recover fail:%s" % rs[2] + def delete_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) cmd = "curve delete --filename /fiofile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /fiofile fail:%s"%rs[2] + assert rs[3] == 0, "delete /fiofile fail:%s" % rs[2] cmd = "curve delete --filename /vdbenchfile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "delete /vdbenchfile fail:%s" % rs[2] cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def check_host_connect(ip): - cmd = "ping %s -w3"%ip + cmd = "ping %s -w3" % ip status = shell_operator.run_exec(cmd) if status == 0: return True else: return False + def get_chunkserver_status(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) grep_cmd = "bash /home/nbs/chunkserver_ctl.sh status all" - rs = shell_operator.ssh_exec(ssh,grep_cmd) + rs = shell_operator.ssh_exec(ssh, grep_cmd) chunkserver_lines = rs[1] - logger.debug("get lines is %s"%chunkserver_lines) - up_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "active" in x, chunkserver_lines)] - down_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "down" in x, chunkserver_lines)] - return {'up':up_cs, 'down':down_cs} + logger.debug("get lines is %s" % chunkserver_lines) + up_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "active" in x, chunkserver_lines)] + down_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "down" in x, chunkserver_lines)] + return {'up': up_cs, 'down': down_cs} ssh.close() -def kill_mult_cs_process(host,num): + +def kill_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - up_cs = cs_status["up"] - if up_cs == []: - raise Exception("no chunkserver up") + cs_status = get_chunkserver_status(host) + up_cs = cs_status["up"] + if up_cs == []: + raise Exception("no chunkserver up") except Exception as e: - logger.debug("cs_status is %s"%cs_status) - logger.error("%s"%e) - raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) + logger.error("%s" % e) + raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(up_cs) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ - ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'"%(cs,cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) + ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) pid_chunkserver = "".join(rs[1]).strip() - logger.info("test kill host %s chunkserver %s"%(host,cs)) - kill_cmd = "sudo kill -9 %s"%pid_chunkserver - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[2]))) - assert rs[3] == 0,"kill chunkserver fail" + logger.info("test kill host %s chunkserver %s" % (host, cs)) + kill_cmd = "sudo kill -9 %s" % pid_chunkserver + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[2]))) + assert rs[3] == 0, "kill chunkserver fail" up_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs -def start_mult_cs_process(host,num): + +def start_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - raise Exception("no chunkserver down") + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + raise Exception("no chunkserver down") except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % (cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" down_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs + def up_all_cs(): operate_cs = [] for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: + if get_cs_copyset_num(host, cs) == 0: ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat;sudo rm -rf /data/chunkserver%d/copysets;\ - sudo rm -rf /data/chunkserver%d/recycler"%(cs,cs,cs) + sudo rm -rf /data/chunkserver%d/recycler" % (cs, cs, cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail" + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail" time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" ssh.close() + def stop_host_cs_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -612,17 +685,18 @@ def stop_host_cs_process(host): if up_cs == []: raise Exception("no chunkserver up") except Exception as e: - logger.error("%s"%e) + logger.error("%s" % e) raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) ori_cmd = "ps -ef|grep -v grep | grep -w curve-chunkserver |grep -v sudo | awk '{print $2}' | sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) - print "test kill host %s chunkserver %s"%(host,up_cs) - assert rs[3] == 0,"kill chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) + print "test kill host %s chunkserver %s" % (host, up_cs) + assert rs[3] == 0, "kill chunkserver fail" ssh.close() -def start_host_cs_process(host,csid=-1): + +def start_host_cs_process(host, csid=-1): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) down_cs = cs_status["down"] @@ -636,17 +710,19 @@ def start_host_cs_process(host,csid=-1): if csid == -1: ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start all" else: - if get_cs_copyset_num(host,csid) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(csid) + if get_cs_copyset_num(host, csid) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + csid) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" %csid - print "test up host %s chunkserver %s"%(host, down_cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % csid + print "test up host %s chunkserver %s" % (host, down_cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] ssh.close() -def restart_mult_cs_process(host,num): + +def restart_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) for i in range(0, num): try: @@ -680,6 +756,7 @@ def restart_mult_cs_process(host,num): assert False, "up chunkserver fail" up_cs.remove(cs) + def kill_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}'" @@ -689,10 +766,11 @@ def kill_mds_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill mds fail,process is %s"%pid + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill mds fail,process is %s" % pid + def start_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -703,12 +781,13 @@ def start_mds_process(host): return up_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "mds up fail" + def kill_etcd_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" @@ -718,35 +797,37 @@ def kill_etcd_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill etcd fail" + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill etcd fail" + def start_etcd_process(host): -# ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) -# ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] != []: -# logger.debug("etcd already up") -# return -# mkdir_cmd = "sudo rm -rf /etcd/default.etcd" -# rs = shell_operator.ssh_exec(ssh, mkdir_cmd) -# up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" + # ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + # ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] != []: + # logger.debug("etcd already up") + # return + # mkdir_cmd = "sudo rm -rf /etcd/default.etcd" + # rs = shell_operator.ssh_exec(ssh, mkdir_cmd) + # up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" # shell_operator.ssh_background_exec2(ssh, up_cmd) -# logger.debug("exec %s"%(up_cmd)) -# time.sleep(2) -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] == []: -# assert False, "etcd up fail" + # logger.debug("exec %s"%(up_cmd)) + # time.sleep(2) + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] == []: + # assert False, "etcd up fail" try: - cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" - ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible start etcd fail" + cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" + ret = shell_operator.run_exec(cmd) + assert ret == 0, "ansible start etcd fail" except Exception: - logger.error("ansible start etcd fail.") - raise - + logger.error("ansible start etcd fail.") + raise + + def stop_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep mysql" @@ -756,8 +837,9 @@ def stop_mysql_process(host): return ori_cmd = "sudo killall mysqld" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) - assert rs[3] == 0,"stop mysql fail" + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) + assert rs[3] == 0, "stop mysql fail" + def start_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -774,73 +856,82 @@ def start_mysql_process(host): if rs[1] == []: assert False, "mysql up fail" + def get_cluster_iops(): return 100 + def exec_deleteforce(): client_list = config.client_list host = random.choice(client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/"%(config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/" % ( + config.pravie_key_path, host) shell_operator.run_exec2(cmd) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "sudo cp ~/deleteforce-test.py /usr/curvefs/" shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo python /usr/curvefs/deleteforce-test.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.info("exec deleteforce return is %s"%rs[1]) - assert rs[3] == 0,"rc is %d"%rs[3] - + logger.info("exec deleteforce return is %s" % rs[1]) + assert rs[3] == 0, "rc is %d" % rs[3] + + def get_all_chunk_num(): chunkserver_list = config.chunkserver_list num = 0 for host in chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) cs_list = cs_status["up"] + cs_status["down"] for cs in cs_list: - ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l"%cs + ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l" % cs rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 num = num + int("".join(rs[1]).strip()) - logger.info("now num is %d"%(num)) + logger.info("now num is %d" % (num)) return num def check_nbd_iops(limit_iops=3000): - ssh = shell_operator.create_ssh_connect(config.client_list[0],1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "iostat -d nb0 3 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("now nbd0 iops is %d with 4k randrw"%iops) - assert iops >= limit_iops,"vm iops not ok,is %d"%iops + logger.info("now nbd0 iops is %d with 4k randrw" % iops) + assert iops >= limit_iops, "vm iops not ok,is %d" % iops + def check_chunkserver_online(num=120): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool chunkserver-status | grep chunkserver" - + starttime = time.time() i = 0 while time.time() - starttime < 300: rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.debug("get chunkserver status fail,rs is %s"%rs[1]) + logger.debug("get chunkserver status fail,rs is %s" % rs[1]) time.sleep(10) continue status = "".join(rs[1]).strip() - online_num = re.findall(r'(?<=online = )\d+',status) - logger.info("chunkserver online num is %s"%online_num) + online_num = re.findall(r'(?<=online = )\d+', status) + logger.info("chunkserver online num is %s" % online_num) if int(online_num[0]) != num: - logger.debug("chunkserver online num is %s"%online_num) + logger.debug("chunkserver online num is %s" % online_num) time.sleep(10) else: break if int(online_num[0]) != num: ori_cmd = "curve_ops_tool chunkserver-list -checkHealth=false -checkCSAlive | grep OFFLINE" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("chunkserver offline list is %s"%rs[1]) - assert int(online_num[0]) == num,"chunkserver online num is %s"%online_num + logger.error("chunkserver offline list is %s" % rs[1]) + assert int( + online_num[0]) == num, "chunkserver online num is %s" % online_num + def wait_health_ok(): host = random.choice(config.mds_list) @@ -858,9 +949,10 @@ def wait_health_ok(): ori_cmd2 = "curve_ops_tool copysets-status -detail | grep \"unhealthy copysets statistic\"" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) health = rs2[1] - logger.debug("copysets status is %s"%health) + logger.debug("copysets status is %s" % health) time.sleep(10) - assert check == 1,"cluster is not healthy in %d s"%config.recover_time + assert check == 1, "cluster is not healthy in %d s" % config.recover_time + def rapid_leader_schedule(): host = random.choice(config.mds_list) @@ -877,12 +969,12 @@ def rapid_leader_schedule(): else: ori_cmd2 = "curve_ops_tool check-operator -opName=change_peer" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) - logger.debug("operator status is %s"%rs2[1]) + logger.debug("operator status is %s" % rs2[1]) time.sleep(10) - assert check == 1,"change operator num is not 0 in %d s"%config.recover_time + assert check == 1, "change operator num is not 0 in %d s" % config.recover_time ori_cmd = "curve_ops_tool rapid-leader-schedule" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rapid leader schedule not ok" + assert rs[3] == 0, "rapid leader schedule not ok" ori_cmd = "curve_ops_tool check-operator -opName=transfer_leader -leaderOpInterval=1| grep \"Operator num is\"" starttime = time.time() while time.time() - starttime < 60: @@ -893,6 +985,7 @@ def rapid_leader_schedule(): else: time.sleep(1) + def wait_cluster_healthy(limit_iops=8000): check_chunkserver_online() host = random.choice(config.mds_list) @@ -912,46 +1005,53 @@ def wait_cluster_healthy(limit_iops=8000): ori_cmd2 = "curve_ops_tool status" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) cluster_status = "".join(rs2[1]).strip() - logger.debug("cluster status is %s"%cluster_status) + logger.debug("cluster status is %s" % cluster_status) ori_cmd2 = "curve_ops_tool copysets-status -detail" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) copysets_status = "".join(rs2[1]).strip() - logger.debug("copysets status is %s"%copysets_status) - assert check == 1,"cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s"%(config.recover_time,cluster_status,copysets_status) - rapid_leader_schedule() - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + logger.debug("copysets status is %s" % copysets_status) + assert check == 1, "cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s" % ( + config.recover_time, cluster_status, copysets_status) + rapid_leader_schedule() + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) i = 0 while i < 300: ori_cmd = "iostat -d nb0 1 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("vm iops is %d"%iops) + logger.info("vm iops is %d" % iops) if iops >= limit_iops: break i = i + 2 time.sleep(2) - assert iops >= limit_iops,"vm iops not ok in 300s" + assert iops >= limit_iops, "vm iops not ok in 300s" + def clean_kernel_log(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0," rollback log fail, %s"%rs[1] + assert rs[3] == 0, " rollback log fail, %s" % rs[1] ssh.close() + def check_io_error(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo grep \'I/O error\' /var/log/kern.log -R | grep -v nbd2" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" shell_operator.ssh_exec(ssh, ori_cmd) - assert False," rwio error,log is %s"%rs[1] + assert False, " rwio error,log is %s" % rs[1] ssh.close() + def check_copies_consistency(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -966,15 +1066,16 @@ def check_copies_consistency(): rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] == 0: break - logger.info("check_hash false return is %s,return code is %d"%(rs[1],rs[3])) + logger.info( + "check_hash false return is %s,return code is %d" % (rs[1], rs[3])) time.sleep(3) i = i + 3 if rs[3] != 0: - assert False,"exec check_hash false fail,return is %s"%rs[1] + assert False, "exec check_hash false fail,return is %s" % rs[1] check_hash = "true" ori_cmd = ori_cmdpri + check_hash - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) if rs[3] == 0: print "check consistency ok!" else: @@ -983,15 +1084,18 @@ def check_copies_consistency(): chunkID = message["chunkID"] hosts = message["hosts"] chunkservers = message["chunkservers"] - for i in range(0,3): + for i in range(0, 3): host = hosts[i] chunkserver = chunkservers[i] - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s"%(chunkserver,groupId,chunkID,chunkserver) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s" % ( + chunkserver, groupId, chunkID, chunkserver) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.error("cp chunk fail,is %s"%rs[1]) - assert False,"checkconsistecny fail,error is %s"%("".join(rs[1]).strip()) + logger.error("cp chunk fail,is %s" % rs[1]) + assert False, "checkconsistecny fail,error is %s" % ( + "".join(rs[1]).strip()) # check_data_consistency() except: logger.error("check consistency error") @@ -999,132 +1103,151 @@ def check_copies_consistency(): raise # run_rwio() + def check_data_consistency(): try: - #wait run 60s io - #time.sleep(60) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + # wait run 60s io + # time.sleep(60) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "grep \"Data Validation error\" /home/nbs/output/ -R && \ grep \"Data Validation error\" /home/nbs/nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: t = time.time() - ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d"%(int(t),int(t)) + ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d" % ( + int(t), int(t)) rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "mkdir output && touch nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) # logger.error("find error in %s"%rs[1]) - assert False,"find data consistency error,save log to vm /root/vdbench-output/output-%d"%int(t) + assert False, "find data consistency error,save log to vm /root/vdbench-output/output-%d" % int( + t) except Exception as e: ssh.close() raise ssh.close() + def test_kill_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test kill chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test kill chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: -# check_chunkserver_status(chunkserver_host) - kill_mult_cs_process(chunkserver_host,num) + # check_chunkserver_status(chunkserver_host) + kill_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("error:%s"%e) - start_mult_cs_process(chunkserver_host,num) - raise + logger.error("error:%s" % e) + start_mult_cs_process(chunkserver_host, num) + raise return chunkserver_host -def test_start_chunkserver_num(num,host=None): + +def test_start_chunkserver_num(num, host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test start chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test start chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - start_mult_cs_process(chunkserver_host,num) + start_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_outcs_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test out one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test out one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(5) while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) if num == 0: break - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if num != 0: - # assert num != 0 - raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d"%(chunkserver_host,cs_list[0],config.recover_time,num)) + # assert num != 0 + raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d" % ( + chunkserver_host, cs_list[0], config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) + # raise AssertionError() + logger.error("error is %s" % e) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) raise - return chunkserver_host,begin_num + return chunkserver_host, begin_num + -def test_upcs_recover_copyset(host,copyset_num): +def test_upcs_recover_copyset(host, copyset_num): if host == None: chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test up one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test up one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = start_mult_cs_process(chunkserver_host,1) + cs_list = start_mult_cs_process(chunkserver_host, 1) time.sleep(10) - #time.sleep(config.recover_time) + # time.sleep(config.recover_time) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 time.sleep(60) - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + logger.info("cs copyset num is %d" % num) if abs(num - copyset_num) <= 10: break if abs(num - copyset_num) > 10: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs_list[0],num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs_list[0], num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],copyset_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], copyset_num, config.recover_time, num)) except Exception as e: - logger.error("error is :%s"%e) - raise + logger.error("error is :%s" % e) + raise return chunkserver_host + def stop_all_cs_not_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop all chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop all chunkserver,host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) down_list = list["down"] dict = {} for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) dict[cs] = num time.sleep(config.offline_timeout + 10) check_nbd_iops() for cs in dict: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != dict[cs]: - # assert num != 0 - raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % (cs,dict[cs],num)) + # assert num != 0 + raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % ( + cs, dict[cs], num)) except Exception as e: # raise AssertionError() logger.error("error is %s" % e) @@ -1132,11 +1255,15 @@ def stop_all_cs_not_recover(): raise start_host_cs_process(chunkserver_host) + def pendding_all_cs_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test pendding all chunkserver,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) @@ -1149,13 +1276,14 @@ def pendding_all_cs_recover(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in down_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) i = 0 @@ -1164,13 +1292,14 @@ def pendding_all_cs_recover(): i = i + 60 time.sleep(60) for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: break if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1180,7 +1309,7 @@ def pendding_all_cs_recover(): raise test_start_mds() for cs in down_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1189,23 +1318,28 @@ def pendding_all_cs_recover(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def pendding_all_cs_recover_online(): cs_host = list(config.chunkserver_list) chunkserver_host = random.choice(config.cs_list) cs_host.remove(chunkserver_host) - logger.info("|------begin test pendding all chunkserver online,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver online,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1216,13 +1350,14 @@ def pendding_all_cs_recover_online(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in up_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) chunkserver_host2 = random.choice(config.cs_list) @@ -1236,7 +1371,7 @@ def pendding_all_cs_recover_online(): i = i + 60 time.sleep(60) for cs in up_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: @@ -1244,7 +1379,8 @@ def pendding_all_cs_recover_online(): stop_host_cs_process(chunkserver_host) wait_health_ok() if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("online pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1254,7 +1390,7 @@ def pendding_all_cs_recover_online(): raise test_start_mds() for cs in up_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1263,146 +1399,162 @@ def pendding_all_cs_recover_online(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def test_suspend_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend recover,host %s------|"%(chunkserver_host)) + logger.info("|------begin test suspend recover,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(config.offline_timeout - 5) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" start_host_cs_process(chunkserver_host) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_suspend_delete_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend delete recover,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test suspend delete recover,host %s------|" % (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(10) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" - start_host_cs_process(chunkserver_host,cs_list[0]) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" + start_host_cs_process(chunkserver_host, cs_list[0]) time.sleep(300) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_kill_mds(num=1): start_iops = get_cluster_iops() - logger.info("|------begin test kill mds num %d------|"%(num)) + logger.info("|------begin test kill mds num %d------|" % (num)) mds_ips = list(config.mds_list) try: - for i in range(0,num): + for i in range(0, num): mds_host = random.choice(mds_ips) - logger.info("mds ip is %s"%mds_host) + logger.info("mds ip is %s" % mds_host) kill_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) mds_ips.remove(mds_host) except Exception as e: - logger.error("kill mds %s fail"%mds_host) - raise + logger.error("kill mds %s fail" % mds_host) + raise return mds_host + def test_start_mds(): start_iops = get_cluster_iops() try: - logger.info("mds list is %s"%config.mds_list) + logger.info("mds list is %s" % config.mds_list) for mds_host in config.mds_list: start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_snap(): start_iops = get_cluster_iops() try: - logger.info("snap list is %s"%config.snap_server_list) + logger.info("snap list is %s" % config.snap_server_list) for snap_host in config.snap_server_list: start_snap_process(snap_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_nginx(): client_host = config.client_list[0] - logger.info("|------begin start nginx,host %s------|"%(client_host)) + logger.info("|------begin start nginx,host %s------|" % (client_host)) cmd = "sudo docker start 5ac540f1608d" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"start nginx docker fail %s"%rs[1] + assert rs[3] == 0, "start nginx docker fail %s" % rs[1] + def start_snap_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -1413,12 +1565,13 @@ def start_snap_process(host): return up_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "snap up fail" + def test_round_restart_mds(): logger.info("|------begin test round restart mds------|") start_iops = get_cluster_iops() @@ -1430,29 +1583,33 @@ def test_round_restart_mds(): start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart mds %s fail"%mds_host) + logger.error("round restart mds %s fail" % mds_host) raise + def test_kill_etcd(num=1): - logger.info("|------begin test kill etcd num %d------|"%(num)) + logger.info("|------begin test kill etcd num %d------|" % (num)) start_iops = get_cluster_iops() etcd_ips = list(config.etcd_list) try: - for i in range(0,num): + for i in range(0, num): etcd_host = random.choice(etcd_ips) - logger.info("etcd ip is %s"%etcd_host) + logger.info("etcd ip is %s" % etcd_host) kill_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) etcd_ips.remove(etcd_host) except Exception as e: - logger.error("kill etcd %s fail"%etcd_host) + logger.error("kill etcd %s fail" % etcd_host) raise return etcd_host + def test_start_etcd(): start_iops = get_cluster_iops() try: @@ -1460,9 +1617,11 @@ def test_start_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_round_restart_etcd(): logger.info("|------begin test round restart etcd------|") @@ -1475,11 +1634,13 @@ def test_round_restart_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart etcd %s fail"%etcd_host) + logger.error("round restart etcd %s fail" % etcd_host) raise + def test_kill_mysql(): logger.info("|------begin test kill mysql------|") start_iops = get_cluster_iops() @@ -1488,12 +1649,14 @@ def test_kill_mysql(): stop_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_mysql_process(mysql_host) raise return mysql_host + def test_start_mysql(host): start_iops = get_cluster_iops() mysql_host = host @@ -1501,69 +1664,84 @@ def test_start_mysql(host): start_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise + def test_stop_chunkserver_host(): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop chunkserver host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop chunkserver host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_host_cs_process(chunkserver_host) raise e return chunkserver_host + def test_start_chunkserver_host(host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host try: start_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_restart_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test restart chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test restart chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - restart_mult_cs_process(chunkserver_host,num) + restart_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def stop_scheduler(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) for mds_host in config.mds_list: - logger.info("|------begin stop copyset scheduler %s------|"%(mds_host)) - cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false"%mds_host - rs = shell_operator.ssh_exec(ssh,cmd) + logger.info("|------begin stop copyset scheduler %s------|" % + (mds_host)) + cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false" % mds_host + rs = shell_operator.ssh_exec(ssh, cmd) time.sleep(180) + def test_start_all_chunkserver(): start_iops = get_cluster_iops() try: for chunkserver_host in config.chunkserver_list: - start_host_cs_process(chunkserver_host) - end_iops = get_cluster_iops() - if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + start_host_cs_process(chunkserver_host) + end_iops = get_cluster_iops() + if float(end_iops) / float(start_iops) < 0.9: + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_stop_all_chunkserver(): start_iops = get_cluster_iops() logger.info("|------begin test stop all chunkserver------|") @@ -1572,18 +1750,21 @@ def test_stop_all_chunkserver(): stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: test_start_all_chunkserver() raise e + def test_kill_diff_host_chunkserver(): start_iops = get_cluster_iops() chunkserver_list = list(config.chunkserver_list) chunkserver_host1 = random.choice(chunkserver_list) chunkserver_list.remove(chunkserver_host1) chunkserver_host2 = random.choice(chunkserver_list) - logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|"%(chunkserver_host1,chunkserver_host2)) + logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|" % + (chunkserver_host1, chunkserver_host2)) try: kill_mult_cs_process(chunkserver_host1, 1) kill_mult_cs_process(chunkserver_host2, 1) @@ -1602,44 +1783,52 @@ def test_kill_diff_host_chunkserver(): start_mult_cs_process(chunkserver_host1, 1) start_mult_cs_process(chunkserver_host2, 1) + def test_reboot_nebd(): client_host = random.choice(config.client_list) - logger.info("|------begin test reboot nebd %s------|"%(client_host)) + logger.info("|------begin test reboot nebd %s------|" % (client_host)) cmd = "sudo nebd-daemon restart" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"reboot nebd daemon fail,return is %s"%rs[1] + assert rs[3] == 0, "reboot nebd daemon fail,return is %s" % rs[1] + def test_cs_loss_package(percent): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s loss package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (chunkserver_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: raise Exception("client io slow op more than 5s") except Exception as e: - raise + raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_loss_package(percent): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s loss package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (mds_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1648,18 +1837,21 @@ def test_mds_loss_package(percent): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_cs_delay_package(ms): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s delay package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (chunkserver_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1668,18 +1860,21 @@ def test_cs_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_delay_package(ms): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s delay package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (mds_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) # check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1688,75 +1883,93 @@ def test_mds_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_chunkserver_cpu_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver cpu stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver cpu stress,host %s------|" % (chunkserver_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,chunkserver_host) + %s:~/" % (config.pravie_key_path, chunkserver_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh - + + def test_mds_cpu_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds cpu stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds cpu stress,host %s------|" % (mds_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,mds_host) + %s:~/" % (config.pravie_key_path, mds_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_client_cpu_stress(stress=50): -# client_host = random.choice(config.client_list) + # client_host = random.choice(config.client_list) client_host = config.client_list[0] - logger.info("|------begin test client cpu stress,host %s------|"%(client_host)) + logger.info("|------begin test client cpu stress,host %s------|" % + (client_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,client_host) + %s:~/" % (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_chunkserver_mem_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver mem stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver mem stress,host %s------|" % (chunkserver_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_mds_mem_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds mem stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds mem stress,host %s------|" % (mds_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_client_mem_stress(stress=50): client_host = config.client_list[0] - logger.info("|------begin test client mem stress,host %s------|"%(client_host)) + logger.info("|------begin test client mem stress,host %s------|" % + (client_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_chunkserver_network_stress(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver network stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver network stress,host %s------|" % (chunkserver_host)) t1 = mythread.runThread(listen_network_stress, chunkserver_host) t2 = mythread.runThread(inject_network_stress, chunkserver_host) t1.start() @@ -1764,9 +1977,11 @@ def test_chunkserver_network_stress(): t2.start() return chunkserver_host + def test_mds_network_stress(): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds network stress,host %s------|"%(mds_host)) + logger.info( + "|------begin test mds network stress,host %s------|" % (mds_host)) t1 = mythread.runThread(listen_network_stress, mds_host) t2 = mythread.runThread(inject_network_stress, mds_host) t1.start() @@ -1774,9 +1989,11 @@ def test_mds_network_stress(): t2.start() return mds_host + def test_client_network_stress(): client_host = config.client_list[0] - logger.info("|------begin test client network stress,host %s------|"%(client_host)) + logger.info( + "|------begin test client network stress,host %s------|" % (client_host)) t1 = mythread.runThread(listen_network_stress, client_host) t2 = mythread.runThread(inject_network_stress, client_host) t1.start() @@ -1784,23 +2001,31 @@ def test_client_network_stress(): t2.start() return client_host + def test_chunkserver_clock_offset(offset): chunkserver_host = random.choice(config.chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh + def test_mds_clock_offset(offset): mds_host = random.choice(config.mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh -#使用cycle会从掉电到上电有1秒钟的间隔 +# There is a 1-second interval from power down to power up when using cycle + + def test_ipmitool_restart_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool cycle,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool cycle,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1812,13 +2037,16 @@ def test_ipmitool_restart_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_client(): client_host = config.client_list[0] - logger.info("|------begin test client ipmitool cycle,host %s------|"%(client_host)) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + logger.info( + "|------begin test client ipmitool cycle,host %s------|" % (client_host)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1830,13 +2058,17 @@ def test_ipmitool_restart_client(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%client_host + assert status, "restart host %s fail" % client_host + +# There is no interval between power-off and power-on when using reset + -#使用reset从掉电到上电没有间隔 def test_ipmitool_reset_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool reset,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool reset,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_reset_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1848,13 +2080,16 @@ def test_ipmitool_reset_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_mds(): mds_host = random.choice(config.mds_reset_list) - logger.info("|------begin test mds ipmitool cycle,host %s------|"%(mds_host)) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + logger.info( + "|------begin test mds ipmitool cycle,host %s------|" % (mds_host)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1866,11 +2101,12 @@ def test_ipmitool_restart_mds(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%mds_host + assert status, "restart host %s fail" % mds_host start_mds_process(mds_host) start_etcd_process(mds_host) start_host_cs_process(mds_host) + def clean_last_data(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) ori_cmd = "rm /root/perf/test-ssd/fiodata/* && rm /root/perf/test-ssd/cfg/*" @@ -1879,19 +2115,20 @@ def clean_last_data(): ori_cmd = "rm /root/perf/fiodata -rf" rs = shell_operator.ssh_exec(ssh, ori_cmd) + def analysis_data(ssh): ori_cmd = "cd /root/perf/ && python gen_randrw_data.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"gen randrw data fail,error is %s"%rs[1] + assert rs[3] == 0, "gen randrw data fail,error is %s" % rs[1] ori_cmd = "cat /root/perf/test.csv" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"get data fail,error is %s"%rs[1] + assert rs[3] == 0, "get data fail,error is %s" % rs[1] for line in rs[1]: if 'randread,4k' in line: randr_4k_iops = line.split(',')[4] elif 'randwrite,4k' in line: randw_4k_iops = line.split(',')[8] - elif 'write,512k' in line: + elif 'write,512k' in line: write_512k_iops = line.split(',')[8] elif 'read,512k' in line: read_512k_iops = line.split(',')[4] @@ -1900,24 +2137,29 @@ def analysis_data(ssh): read_512k_BW = float(read_512k_iops)*1000/2 write_512k_BW = float(write_512k_iops)*1000/2 logger.info("get one volume Basic data:-------------------------------") - logger.info("4k rand read iops is %d/s"%int(randr_4k_iops)) - logger.info("4k rand write iops is %d/s"%int(randw_4k_iops)) - logger.info("512k read BW is %d MB/s"%int(read_512k_BW)) - logger.info("512k write BW is %d MB/s"%int(write_512k_BW)) + logger.info("4k rand read iops is %d/s" % int(randr_4k_iops)) + logger.info("4k rand write iops is %d/s" % int(randw_4k_iops)) + logger.info("512k read BW is %d MB/s" % int(read_512k_BW)) + logger.info("512k write BW is %d MB/s" % int(write_512k_BW)) filename = "onevolume_perf.txt" - with open(filename,'w') as f: - f.write("4k randwrite %d/s 56000\n"%int(randw_4k_iops)) - f.write("4k randread %d/s 75000\n"%int(randr_4k_iops)) - f.write("512k write %dMB/s 135\n"%int(write_512k_BW)) - f.write("512k read %dMB/s 450\n"%int(read_512k_BW)) + with open(filename, 'w') as f: + f.write("4k randwrite %d/s 56000\n" % int(randw_4k_iops)) + f.write("4k randread %d/s 75000\n" % int(randr_4k_iops)) + f.write("512k write %dMB/s 135\n" % int(write_512k_BW)) + f.write("512k read %dMB/s 450\n" % int(read_512k_BW)) if randr_4k_iops < 75000: - assert float(75000 - randr_4k_iops)/75000 < 0.02,"4k_randr_iops did not meet expectations,expect more than 75000" + assert float(75000 - randr_4k_iops) / \ + 75000 < 0.02, "4k_randr_iops did not meet expectations,expect more than 75000" if randw_4k_iops < 56000: - assert float(56000 - randw_4k_iops)/56000 < 0.02,"4k_randw_iops did not meet expectations,expect more than 56000" + assert float(56000 - randw_4k_iops) / \ + 56000 < 0.02, "4k_randw_iops did not meet expectations,expect more than 56000" if read_512k_BW < 450: - assert float(450 - read_512k_BW)/450 < 0.02,"512k_read_bw did not meet expectations,expect more than 450" + assert float(450 - read_512k_BW) / \ + 450 < 0.02, "512k_read_bw did not meet expectations,expect more than 450" if write_512k_BW < 135: - assert float(135 - write_512k_BW)/135 < 0.02,"512k_write_bw did not meet expectations,expect more than 135" + assert float(135 - write_512k_BW) / \ + 135 < 0.02, "512k_write_bw did not meet expectations,expect more than 135" + def perf_test(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) @@ -1929,7 +2171,7 @@ def perf_test(): -bs=4k -size=200G -runtime=300 -numjobs=1 -time_based" shell_operator.ssh_exec(ssh, init_io) start_test = "cd /root/perf && nohup python /root/perf/io_test.py &" - shell_operator.ssh_background_exec2(ssh,start_test) + shell_operator.ssh_background_exec2(ssh, start_test) time.sleep(60) final = 0 starttime = time.time() @@ -1942,123 +2184,134 @@ def perf_test(): else: logger.debug("wait io test finally") time.sleep(60) - assert final == 1,"io test have not finall" + assert final == 1, "io test have not finall" ori_cmd = "cp -r /root/perf/test-ssd/fiodata /root/perf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp fiodata fail,error is %s"%rs[1] + assert rs[3] == 0, "cp fiodata fail,error is %s" % rs[1] analysis_data(ssh) + def add_data_disk(): ori_cmd = "bash attach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"attach thrash vol fail,rs is %s"%rs[1] + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "attach thrash vol fail,rs is %s" % rs[1] ori_cmd = "cat thrash_vm" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.info("rs is %s"%rs[1]) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("rs is %s" % rs[1]) vm_list = [] for i in rs[1]: - logger.info("uuid is %s"%i) - vm_list.append(i.strip()) + logger.info("uuid is %s" % i) + vm_list.append(i.strip()) vm_ip_list = [] for vm in vm_list: - ori_cmd = "source OPENRC && nova list|grep %s"%vm - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ori_cmd = "source OPENRC && nova list|grep %s" % vm + rs = shell_operator.ssh_exec(ssh, ori_cmd) ret = "".join(rs[1]).strip() - ip = re.findall(r'\d+\.\d+\.\d+\.\d+',ret) - logger.info("get vm %s ip %s"%(vm,ip)) + ip = re.findall(r'\d+\.\d+\.\d+\.\d+', ret) + logger.info("get vm %s ip %s" % (vm, ip)) vm_ip_list.append(ip[0]) ssh.close() ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) for ip in vm_ip_list: - ori_cmd = "ssh %s -o StrictHostKeyChecking=no "%ip + "\"" + " supervisorctl reload && supervisorctl start all " + "\"" + ori_cmd = "ssh %s -o StrictHostKeyChecking=no " % ip + "\"" + \ + " supervisorctl reload && supervisorctl start all " + "\"" logger.info("exec cmd %s" % ori_cmd) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"start supervisor fail,rs is %s"%rs[1] + assert rs[3] == 0, "start supervisor fail,rs is %s" % rs[1] ssh.close() def create_vm_image(vm_name): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%(vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % ( + vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("vm uuid is %s" % rs[1]) thrash_vm_uuid = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-create %s image-%s"%(thrash_vm_uuid,vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm %s image fail"%(thrash_vm_uuid) + ori_cmd = "source OPENRC && nova image-create %s image-%s" % ( + thrash_vm_uuid, vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm %s image fail" % (thrash_vm_uuid) starttime = time.time() - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm image image-%s fail"%(vm_name) + assert False, "create vm image image-%s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait image create image-%s fail"%(vm_name) - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'"%vm_name + assert False, "wait image create image-%s fail" % (vm_name) + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'" % vm_name rs = shell_operator.ssh_exec(ssh, ori_cmd) return "".join(rs[1]).strip() + def get_all_curvevm_active_num(num): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) starttime = time.time() while time.time() - starttime < 600: - ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm status fail" + ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm status fail" if int("".join(rs[1]).strip()) == num: break else: time.sleep(10) active_num = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm uuid fail" + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm uuid fail" for uuid in rs[1]: uuid = uuid.strip() status = "up" cmd = "source OPENRC && nova show %s |grep os-server-status |awk \'{print $4}\'" % uuid st = shell_operator.ssh_exec(ssh, cmd) status = "".join(st[1]).strip() - assert status == "up","get vm status fail,not up.is %s,current vm id is %s"%(status,uuid) + assert status == "up", "get vm status fail,not up.is %s,current vm id is %s" % ( + status, uuid) return active_num + def init_create_curve_vm(num): image_id = config.image_id salt = ''.join(random.sample(string.ascii_letters + string.digits, 8)) - logger.info("vm name is thrash-%s"%salt) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + logger.info("vm name is thrash-%s" % salt) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s"%(config.image_id,config.avail_zone,salt) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + --meta use-vpc=True --meta instance_image_type=curve thrash-%s" % (config.image_id, config.avail_zone, salt) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] - vm_name = "thrash-%s"%salt + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] + vm_name = "thrash-%s" % salt starttime = time.time() - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm %s fail"%(vm_name) + assert False, "create vm %s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait vm ok %s fail"%(vm_name) + assert False, "wait vm ok %s fail" % (vm_name) new_image_id = create_vm_image(vm_name) config.vm_prefix = vm_name - for i in range(1,num): + for i in range(1, num): ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d"%(new_image_id,config.avail_zone,salt,i) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] + --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d" % (new_image_id, config.avail_zone, salt, i) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] starttime = time.time() while time.time() - starttime < 300: active_num = int(get_all_curvevm_active_num(num)) @@ -2067,28 +2320,32 @@ def init_create_curve_vm(num): break else: time.sleep(10) - assert active_num == num,"some vm are abnormal,%d is acitve"%active_num + assert active_num == num, "some vm are abnormal,%d is acitve" % active_num + def reboot_curve_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done "%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reboot curve vm fail" + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done " % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reboot curve vm fail" + def clean_curve_data(): ori_cmd = "bash detach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"detach thrash vol fail,rs is %s"%rs[1] - ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete vm fail,rs is %s"%rs[1] - ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "detach thrash vol fail,rs is %s" % rs[1] + ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete vm fail,rs is %s" % rs[1] + ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) image_id = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-delete %s"%(image_id) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete image fail,rs is %s"%rs + ori_cmd = "source OPENRC && nova image-delete %s" % (image_id) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete image fail,rs is %s" % rs time.sleep(30) ori_cmd = "curve_ops_tool list -fileName=/nova |grep Total" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -2097,46 +2354,53 @@ def clean_curve_data(): else: ori_cmd = "curve_ops_tool list -fileName=/nova" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("No deleted files: %s"%rs[1]) - assert False,"vm or image not be deleted" + logger.error("No deleted files: %s" % rs[1]) + assert False, "vm or image not be deleted" + def do_thrasher(action): - #start level1 + # start level1 if type(action) is types.StringType: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX"%action) + logger.debug( + "Startup FailureXXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX" % action) globals()[action]() else: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1]))) + logger.debug("Startup FailureXXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX" % ( + action[0], str(action[1]))) globals()[action[0]](action[1]) + def start_retired_and_down_chunkservers(): for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue - logger.debug("down_cs is %s"%down_cs) - for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm chunkserver%d chunkserver.dat fail"%cs - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] - time.sleep(2) - ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue + logger.debug("down_cs is %s" % down_cs) + for cs in down_cs: + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "rm chunkserver%d chunkserver.dat fail" % cs + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] + time.sleep(2) + ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - if rs[1] == []: - assert False,"up chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[1] == []: + assert False, "up chunkserver fail" except: raise ssh.close() + def get_level_list(level): if level == "level1": return config.level1 diff --git a/robot/Resources/keywords/snapshot_operate.py b/robot/Resources/keywords/snapshot_operate.py index f21c2be296..d902cd0737 100644 --- a/robot/Resources/keywords/snapshot_operate.py +++ b/robot/Resources/keywords/snapshot_operate.py @@ -18,8 +18,9 @@ def create_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam else: return rc + def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=config.user_name, size=config.size, - pass_word=config.pass_word): + pass_word=config.pass_word): curvefs = swig_operate.LibCurve() rc = curvefs.libcurve_create(file_name, user_name, size, pass_word) if rc != 0: @@ -28,9 +29,11 @@ def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=conf else: return rc + def delete_curve_file_for_shanpshot(): curvefs = swig_operate.LibCurve() - rc = curvefs.libcurve_delete(config.snapshot_file_name, config.user_name, config.pass_word) + rc = curvefs.libcurve_delete( + config.snapshot_file_name, config.user_name, config.pass_word) if rc != 0: logger.info("delete_curve_file_for_shanpshot file fail. rc = %s" % rc) return rc @@ -44,21 +47,25 @@ def write_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_name buf=config.buf, offset=config.offset, length=config.length): curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def read_4k_length_curve_file(): curvefs = swig_operate.LibCurve() - fd = curvefs.libcurve_open(config.snapshot_file_name, config.user_name, config.pass_word) + fd = curvefs.libcurve_open( + config.snapshot_file_name, config.user_name, config.pass_word) content = curvefs.libcurve_read(fd, "", 0, 4096) return content @@ -68,22 +75,26 @@ def modify_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) buf = "tttttttt" * 512 - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def snapshot_normal_create(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_curve_file_for_snapshot file and return seq.value = %s" % seq) + logger.info( + "create_curve_file_for_snapshot file and return seq.value = %s" % seq) return seq @@ -93,7 +104,8 @@ def snapshot_create_with_not_exist_file(file_name="/notexistfile", user_name=con rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_not_exist_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_not_exist_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_not_exist_file file fail. rc = %s" % rc) return rc @@ -103,25 +115,28 @@ def snapshot_create_with_empty_str_file(file_name=" ", user_name=config.user_nam rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_empty_str_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_empty_str_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_empty_str_file file fail. rc = %s" % rc) return rc -# "特殊字符`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" -# "特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" def snapshot_create_with_special_file_name(file_name="/特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?", user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_special_file_name , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_special_file_name file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_special_file_name file fail. rc = %s" % rc) return rc def get_sanpshot_info(seq): client = snapshot_client.CurveSnapshot() - finfo = client.get_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + finfo = client.get_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) # logger.info("get_sanpshot_info , file snapshot info.status = %s, owner = %s, filename = %s, " # "length = %s, chunksize = %s, seqnum = %s, segmentsize = %s , parentid = %s, " # "filetype = %s, ctime = %s" % ( @@ -131,25 +146,28 @@ def get_sanpshot_info(seq): return finfo -# 创建并获取快照文件信息 +# Create and obtain snapshot file information def create_snapshot_and_get_snapshot_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) + logger.info( + "create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) finfo = client.get_snapshot(file_name, user_name, password, seq) return finfo -# 正常获取快照文件分配信息 +# Obtain snapshot file allocation information normally def get_normal_snapshot_segment_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): seq = snapshot_normal_create(file_name, user_name, password) client = snapshot_client.CurveSnapshot() offset = curvesnapshot.type_uInt64_t() offset.value = 0 - seginfo = client.get_snapshot_SegmentInfo(file_name, user_name, password, seq, offset) - logger.info("get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) + seginfo = client.get_snapshot_SegmentInfo( + file_name, user_name, password, seq, offset) + logger.info( + "get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) return seginfo @@ -159,7 +177,7 @@ def get_normal_chunk_info(file_name=config.snapshot_file_name, user_name=config. client = snapshot_client.CurveSnapshot() chunkinfo = client.get_chunk_Info(seginfo.chunkvec[0]) logger.info("get_normal_chunkInfo chunkInfo info = %s" % chunkinfo) - return chunkinfo # 可以对chunInfo.chunkSn进行断言验证 + return chunkinfo # Can perform assertion validation on chunInfo.chunkSn def get_chunk_info_with_chunk_id_info(idinfo): @@ -175,7 +193,7 @@ def get_snapshot_first_segment_info(seq): offset = curvesnapshot.type_uInt64_t() offset.value = 0 seginfo = client.get_snapshot_SegmentInfo(config.snapshot_file_name, config.user_name, config.pass_word, seq, - offset) + offset) # logger.info( # "get_snapshot_first_segment_info seq = %s, segmsize = %s, chunksize = %s, startoffset = %s, chunkvecsize = %s, " # % ( @@ -220,7 +238,8 @@ def read_chunk_snapshot(idinfo, seq): buf = "tttttttt" * 512 rc = client.read_chunk_snapshot(idinfo, seq, offset, len, buf) if rc != len.value: - logger.info("read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) + logger.info( + "read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) return rc logger.info("read_chunk_snapshot ,return buf = %s" % buf) return buf @@ -228,14 +247,16 @@ def read_chunk_snapshot(idinfo, seq): def check_snapshot_status(seq): client = snapshot_client.CurveSnapshot() - status = client.check_snapshot_status(config.snapshot_file_name, config.user_name, config.pass_word, seq) + status = client.check_snapshot_status( + config.snapshot_file_name, config.user_name, config.pass_word, seq) logger.info("check_snapshot_status rc = %s " % status) return status def delete_file_snapshot(seq): client = snapshot_client.CurveSnapshot() - rc = client.delete_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + rc = client.delete_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) return rc @@ -253,7 +274,8 @@ def create_clone_chunk_with_s3_object(chunkidinfo): seq.value = 1 correctseq = curvesnapshot.type_uInt64_t() correctseq.value = 0 - rc = client.create_clone_chunk(config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) + rc = client.create_clone_chunk( + config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) return rc diff --git a/robot/curve_choas.txt b/robot/curve_choas.txt index ff39c335e5..0f9b389152 100644 --- a/robot/curve_choas.txt +++ b/robot/curve_choas.txt @@ -37,7 +37,7 @@ test one volume perf stop rwio perf test -#启动大压力情况下的混沌测试:分等级进行随机故障注入。每次注入完成后恢复集群所有业务,目前设置100次的全流程注入 +# Conduct chaos testing under high stress: Inject faults of various levels randomly. Restore all cluster operations after each injection. Currently set for 100 rounds of full injection inject cluster chaos test [Tags] P2 chaos longtime @@ -47,17 +47,17 @@ inject cluster chaos test ${num} evaluate int(10) init create curve vm ${num} :FOR ${i} IN RANGE 10 - log "启动第"${i}"轮故障" + log "Starting Round "${i}" of Fault Injection" ${choas1} evaluate random.choice($choas_level1) random - log "开始启动一级故障" + log "Starting Level 1 Fault" do thrasher ${choas1} sleep 30 ${choas2} evaluate random.choice($choas_level2) random - log "开始启动二级故障" + log "Starting Level 2 Fault" do thrasher ${choas2} sleep 30 ${choas3} evaluate random.choice($choas_level3) random - log "开始启动三级故障" + log "Starting Level 3 Fault" do thrasher ${choas3} sleep 30 clean env diff --git a/robot/curve_robot.txt b/robot/curve_robot.txt index 8709a96b6e..9f49ca2caa 100644 --- a/robot/curve_robot.txt +++ b/robot/curve_robot.txt @@ -1628,7 +1628,7 @@ test kill chunkserver one check loop read ${new_fd} [Teardown] file clean ${new_fd} -# create snapshot 相关用例 +# Create snapshot related use cases create snapshot with notexist file [Tags] P0 base first release test-snapshot @@ -1698,7 +1698,7 @@ create snapshot with nomal file and check first chunk snapshot [Teardown] delete curve file for shanpshot -# 创建文件->写文件->创建快照->修改文件->读快照验证(修改前数据)->删除重新快照->验证快照数据(修改后数据) +# Create file ->Write file ->Create snapshot ->Modify file ->Read snapshot verification (data before modification) ->Delete re snapshot ->Verify snapshot data (data after modification) create snapshot and check chunk snapshot after cow [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -1744,7 +1744,7 @@ create snapshot repeat should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsnapshot info 用例 +# Getsnapshot info use case get empty file snapshot info [Tags] P0 base first release test-snapshot @@ -1871,7 +1871,7 @@ delete snapshoting curve file should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsegmentinfo 相关用例 +# Use cases related to getsegmentinfo check snapshot segmentinfo after modify file [Tags] P0 base first release test-snapshot @@ -1981,7 +1981,7 @@ get empty file snapshot segmentinfo should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# snapshot chunkinfo 用例验证 +# Snapshot chunkinfo use case validation check empty file snapshot chunkinfo after modify file [Tags] P0 base first release test-snapshot @@ -2038,10 +2038,10 @@ get snapshot chunkinfo with notexist chunidinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} - #TODO: 此处需要判断错误,当前是死循环,不停轮询查询id信息 + # TODO: An error needs to be determined here. Currently, it is a dead loop and constantly polls for ID information ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2063,7 +2063,7 @@ check snapshot chunkinfo after delete snapshot ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} - # 此处应该再重新获取下segmentinfo, chunkvec[0]应该不存在 + # We should retrieve segmentinfo again here, chunkvec [0] should not exist ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} should be equal ${chunkinfo.snSize.value} ${expect_size} should be equal ${chunkinfo.chunkSn[0]} ${expect_first_sn} @@ -2071,7 +2071,7 @@ check snapshot chunkinfo after delete snapshot [Teardown] delete curve file for shanpshot -# read snapshot chunk 用例 CLDCFS-1249 +# Read snapshot chunk use case CLDCFS-1249 read snapshot chunk with notexist idinfo [Tags] P0 base first release no-need @@ -2081,10 +2081,10 @@ read snapshot chunk with notexist idinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # TODO:当前客户端死循环打印错误,此处校验结果应该返回错误 + # TODO: The current client has a loop printing error, and the verification result should return an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2101,7 +2101,7 @@ read snapshot chunk with error seq ${seginfo} get snapshot first segment info ${seq} ${seq.value} evaluate int(8) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处校验结果应该返回错误 + # The verification result should return an error here ${expect_rst} evaluate int(-6) should be equal ${content} ${expect_rst} ${seq.value} evaluate int(1) @@ -2110,7 +2110,7 @@ read snapshot chunk with error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 查询快照状态用例 +# Query snapshot status use case check empty file snapshot status [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2197,7 +2197,7 @@ check snapshot status use error seq [Teardown] delete curve file for shanpshot -# 删除快照相关用例 +# Delete snapshot related use cases repeat delete snapshot [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2230,7 +2230,7 @@ delete snapshot use error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 删除chunk快照(当前无限重试,需要调用方设置重试次数) CLDCFS-1254 +# Delete chunk snapshot (currently infinite retries, caller needs to set retry count) CLDCFS-1254 delete chunk snapshot with snapshot seq [Tags] P0 base first release no-need ${rc} create curve file for snapshot @@ -2243,7 +2243,7 @@ delete chunk snapshot with snapshot seq ${rc} delete chunk snapshot with correct sn ${seginfo.chunkvec[0]} ${seq} should be equal ${rc} ${expect_rc} ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处判断返回结果是否为错误 + # Determine whether the returned result is an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2291,8 +2291,8 @@ repeat delete chunk snapshot [Teardown] delete curve file for shanpshot -# 创建clone&recover -# 步骤:创建文件、写文件、创建快照记录seq,触发cow,获取快照信息(版本号),createclonechunk(指定s3上对象,correctedseq=快照seq),恢复快照,验证chunk数据是否为s3数据 +# Create clone&recover +# Steps: Create a file, write a file, create a snapshot record seq, trigger Cow, obtain snapshot information (version number), create clonechunk (specify an object on s3, correctedseq=snapshot seq), restore the snapshot, verify if the chunk data is s3 data create clone and recover chunk [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2311,7 +2311,7 @@ create clone and recover chunk should be equal ${rc} ${expect_rc} ${rc} recover chunk data ${seginfo.chunkvec[0]} should be equal ${rc} ${expect_rc} - # check数据 + # Check data ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} ${expect_content} evaluate str("aaaaaaaa")*512 should be equal ${content} ${expect_content} diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..ae00f97a66 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_closure.h" + #include namespace curve { @@ -28,21 +29,22 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although before the request proposal was given to the copyset + * Confirmed the identity of the leader, but processed it in copyset + * When requesting, it is still possible to determine the identity of the + * current copyset Becoming a non leader, so it needs to be determined that + * ChunkClosure has been adjusted When using, the status of the request. If + * it is OK, it indicates that it is Normal apply processing, otherwise the + * request will be forwarded */ if (status().ok()) { return; @@ -61,13 +63,13 @@ void ScanChunkClosure::Run() { case CHUNK_OP_STATUS_CHUNK_NOTEXIST: LOG(WARNING) << "scan chunk failed, read chunk not exist. " << request_->ShortDebugString(); - break; + break; case CHUNK_OP_STATUS_FAILURE_UNKNOWN: LOG(ERROR) << "scan chunk failed, read chunk unknown failure. " << request_->ShortDebugString(); - break; - default: - break; + break; + default: + break; } } diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..6700527c26 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -24,20 +24,23 @@ #define SRC_CHUNKSERVER_CHUNK_CLOSURE_H_ #include + #include -#include "src/chunkserver/op_request.h" #include "proto/chunk.pb.h" +#include "src/chunkserver/op_request.h" namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft + * for processing through the braft::Task, There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and + * returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred + * before it could be processed, such as leader If the step down becomes a non + * leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,37 +52,37 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; class ScanChunkClosure : public google::protobuf::Closure { public: - ScanChunkClosure(ChunkRequest *request, ChunkResponse *response) : - request_(request), response_(response) {} + ScanChunkClosure(ChunkRequest* request, ChunkResponse* response) + : request_(request), response_(response) {} ~ScanChunkClosure() = default; void Run() override; public: - ChunkRequest *request_; - ChunkResponse *response_; + ChunkRequest* request_; + ChunkResponse* response_; }; class SendScanMapClosure : public google::protobuf::Closure { public: - SendScanMapClosure(FollowScanMapRequest * request, - FollowScanMapResponse *response, - uint64_t timeout, - uint32_t retry, - uint64_t retryIntervalUs, - brpc::Controller* cntl, - brpc::Channel *channel) : - request_(request), response_(response), - rpcTimeoutMs_(timeout), retry_(retry), - retryIntervalUs_(retryIntervalUs), - cntl_(cntl), channel_(channel) {} + SendScanMapClosure(FollowScanMapRequest* request, + FollowScanMapResponse* response, uint64_t timeout, + uint32_t retry, uint64_t retryIntervalUs, + brpc::Controller* cntl, brpc::Channel* channel) + : request_(request), + response_(response), + rpcTimeoutMs_(timeout), + retry_(retry), + retryIntervalUs_(retryIntervalUs), + cntl_(cntl), + channel_(channel) {} ~SendScanMapClosure() = default; @@ -89,13 +92,13 @@ class SendScanMapClosure : public google::protobuf::Closure { void Guard(); public: - FollowScanMapRequest *request_; - FollowScanMapResponse *response_; + FollowScanMapRequest* request_; + FollowScanMapResponse* response_; uint64_t rpcTimeoutMs_; uint32_t retry_; uint64_t retryIntervalUs_; - brpc::Controller *cntl_; - brpc::Channel *channel_; + brpc::Controller* cntl_; + brpc::Channel* channel_; }; } // namespace chunkserver diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..85d3d241a5 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -22,31 +22,30 @@ #include "src/chunkserver/chunk_service.h" -#include #include #include +#include -#include #include +#include #include +#include "include/curve_compiler_specific.h" +#include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/op_request.h" -#include "src/chunkserver/chunk_service_closure.h" #include "src/common/fast_align.h" -#include "include/curve_compiler_specific.h" - namespace curve { namespace chunkserver { using ::curve::common::is_aligned; ChunkServiceImpl::ChunkServiceImpl( - const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr& epochMap) + const ChunkServiceOptions& chunkServiceOptions, + const std::shared_ptr& epochMap) : chunkServiceOptions_(chunkServiceOptions), copysetNodeManager_(chunkServiceOptions.copysetNodeManager), inflightThrottle_(chunkServiceOptions.inflightThrottle), @@ -55,15 +54,11 @@ ChunkServiceImpl::ChunkServiceImpl( maxChunkSize_ = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; } -void ChunkServiceImpl::DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::DeleteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -76,7 +71,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -86,24 +81,17 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::WriteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -116,11 +104,11 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); DVLOG(9) << "Get write I/O request, op: " << request->optype() - << " offset: " << request->offset() - << " size: " << request->size() << " buf header: " - << *(unsigned int *) cntl->request_attachment().to_string().c_str() + << " offset: " << request->offset() << " size: " << request->size() + << " buf header: " + << *(unsigned int*)cntl->request_attachment().to_string().c_str() << " attachement size " << cntl->request_attachment().size(); if (request->has_epoch()) { @@ -134,7 +122,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +132,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -154,24 +142,18 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -184,7 +166,8 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured + // for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +176,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -203,19 +186,15 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared( + nodePtr, controller, request, response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done) { +void ChunkServiceImpl::CreateS3CloneChunk( + RpcController* controller, const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, Closure* done) { (void)controller; (void)request; brpc::ClosureGuard doneGuard(done); @@ -223,15 +202,11 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, LOG(INFO) << "Invalid request, serverSide Not implement yet"; } -void ChunkServiceImpl::ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -244,7 +219,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +229,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -264,25 +239,17 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::RecoverChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -295,7 +262,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +272,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,26 +282,19 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + // RecoverChunk request and ReadChunk request share ReadChunkRequest + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -347,13 +307,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -363,25 +323,17 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( - RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); + RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -401,7 +353,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -412,31 +364,26 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated + * from Chunk Service, And it does not go through QoS or raft consistency + * protocols, so it is not allowed to inherit here OpRequest or QoSRequest to be + * re encapsulated, but directly processed in place */ -void ChunkServiceImpl::GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, + Closure* done) { (void)controller; - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - nullptr, - nullptr, - done); + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, nullptr, nullptr, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -449,10 +396,9 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkInfo failed, copyset node is not found: " @@ -460,7 +406,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +422,15 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); - if (chunkInfo.snapSn > 0) - response->add_chunksn(chunkInfo.snapSn); + if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -497,14 +442,14 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, } } -void ChunkServiceImpl::GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,10 +462,9 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkHash failed, copyset node is not found: " @@ -531,21 +475,19 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, CSErrorCode ret; std::string hash; - ret = nodePtr->GetDataStore()->GetChunkHash(request->chunkid(), - request->offset(), - request->length(), - &hash); + ret = nodePtr->GetDataStore()->GetChunkHash( + request->chunkid(), request->offset(), request->length(), &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -557,18 +499,17 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, } } -void ChunkServiceImpl::UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done) { +void ChunkServiceImpl::UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); LOG(INFO) << "Update fileId: " << request->fileid() - << " to epoch: " << request->epoch() - << " success."; + << " to epoch: " << request->epoch() << " success."; } else { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); LOG(WARNING) << "Update fileId: " << request->fileid() @@ -579,7 +520,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..04e37feac9 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -23,9 +23,9 @@ #ifndef SRC_CHUNKSERVER_CHUNK_SERVICE_H_ #define SRC_CHUNKSERVER_CHUNK_SERVICE_H_ -#include #include #include +#include #include "proto/chunk.pb.h" #include "src/chunkserver/config_info.h" @@ -34,84 +34,71 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; class ChunkServiceImpl : public ChunkService { public: explicit ChunkServiceImpl(const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr &epochMap); + const std::shared_ptr& epochMap); ~ChunkServiceImpl() {} - void DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void DeleteChunkSnapshotOrCorrectSn(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); + void DeleteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void WriteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); + + void DeleteChunkSnapshotOrCorrectSn(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); void CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done); - void RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done); - - void GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done); - - void UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done); + const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, + Closure* done); + void RecoverChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, Closure* done); + + void GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, Closure* done); + + void UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, Closure* done); private: /** - * 验证op request的offset和length是否越界和对齐 + * Verify if the offset and length of the op request are out of bounds and + * aligned * @param offset[in]: op request' offset * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * @return true indicates legality, otherwise false is returned */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; private: ChunkServiceOptions chunkServiceOptions_; - CopysetNodeManager *copysetNodeManager_; + CopysetNodeManager* copysetNodeManager_; std::shared_ptr inflightThrottle_; - uint32_t maxChunkSize_; + uint32_t maxChunkSize_; std::shared_ptr epochMap_; uint32_t blockSize_; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..fca11199f5 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_service_closure.h" + #include #include "src/chunkserver/chunkserver_metrics.h" @@ -30,55 +31,52 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All brpcDone_ All operations that need to be done before calling are + // placed within this lifecycle brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // When calling the closure, subtract 1, and add 1 to what the closure + // creates This line must be placed in brpcDone_ After calling, UT needs to + // test the performance of inflightio when it exceeds the limit Will add a + // sleep to the incoming closure to control the number of inflightio if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::READ_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::WRITE_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::RECOVER_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::PASTE_CHUNK); break; } @@ -88,62 +86,51 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the + // return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 - hasError = (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && - (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + // If it is a read request, return CHUNK_OP_STATUS_CHUNK_NOTEXIST + // also believes that it is correct + hasError = (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && + (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::READ_CHUNK, - request_->size(), - latencyUs, - hasError); + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::READ_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::WRITE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::WRITE_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::RECOVER_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::RECOVER_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::PASTE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::PASTE_CHUNK, request_->size(), + latencyUs, hasError); break; } default: diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..48c418033c 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -24,66 +24,71 @@ #define SRC_CHUNKSERVER_CHUNK_SERVICE_CLOSURE_H_ #include + #include #include "proto/chunk.pb.h" -#include "src/chunkserver/op_request.h" #include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/op_request.h" #include "src/common/timeutility.h" namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc +// layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( - std::shared_ptr inflightThrottle, - const ChunkRequest *request, - ChunkResponse *response, - google::protobuf::Closure *done) - : inflightThrottle_(inflightThrottle) - , request_(request) - , response_(response) - , brpcDone_(done) - , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 - if (nullptr != inflightThrottle_) { - inflightThrottle_->Increment(); - } - // 统计请求数量 - OnRequest(); + std::shared_ptr inflightThrottle, + const ChunkRequest* request, ChunkResponse* response, + google::protobuf::Closure* done) + : inflightThrottle_(inflightThrottle), + request_(request), + response_(response), + brpcDone_(done), + receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { + // What does the closure create add 1, and when the closure is called, + // subtract 1 + if (nullptr != inflightThrottle_) { + inflightThrottle_->Increment(); } + // Count the number of requests + OnRequest(); + } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the + * closure Currently, this function mainly performs some metric statistics + * on the returned results of read and write requests If there are similar + * scenarios in the future (doing some processing at the end of the service + * request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was + * incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request - const ChunkRequest *request_; - // rpc请求的response - ChunkResponse *response_; - // rpc请求回调 - google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Request for rpc requests + const ChunkRequest* request_; + // Response to rpc requests + ChunkResponse* response_; + // Rpc request callback + google::protobuf::Closure* brpcDone_; + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 07f1f48d5f..18205a05d1 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -20,37 +20,37 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/chunkserver.h" -#include #include #include #include +#include +#include #include -#include "src/chunkserver/chunkserver.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/chunkserver_service.h" -#include "src/chunkserver/copyset_service.h" -#include "src/chunkserver/chunk_service.h" #include "src/chunkserver/braft_cli_service.h" #include "src/chunkserver/braft_cli_service2.h" +#include "src/chunkserver/chunk_service.h" #include "src/chunkserver/chunkserver_helper.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/common/uri_parser.h" -#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/chunkserver_service.h" +#include "src/chunkserver/copyset_service.h" +#include "src/chunkserver/raftlog/curve_segment_log_storage.h" #include "src/chunkserver/raftsnapshot/curve_file_service.h" +#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" -#include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/curve_version.h" +#include "src/common/uri_parser.h" +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::curve::common::UriParser; +using ::curve::fs::FileSystemType; using ::curve::fs::LocalFileSystem; using ::curve::fs::LocalFileSystemOption; using ::curve::fs::LocalFsFactory; -using ::curve::fs::FileSystemType; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; -using ::curve::common::UriParser; DEFINE_string(conf, "ChunkServer.conf", "Path of configuration file"); DEFINE_string(chunkServerIp, "127.0.0.1", "chunkserver ip"); @@ -58,15 +58,15 @@ DEFINE_bool(enableExternalServer, false, "start external server or not"); DEFINE_string(chunkServerExternalIp, "127.0.0.1", "chunkserver external ip"); DEFINE_int32(chunkServerPort, 8200, "chunkserver port"); DEFINE_string(chunkServerStoreUri, "local://./0/", "chunkserver store uri"); -DEFINE_string(chunkServerMetaUri, - "local://./0/chunkserver.dat", "chunkserver meta uri"); +DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", + "chunkserver meta uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); -DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); +DEFINE_string(recycleUri, "local://./0/recycler", "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); -DEFINE_string(chunkFilePoolMetaPath, - "./chunkfilepool.meta", "chunk file pool meta path"); +DEFINE_string(chunkFilePoolMetaPath, "./chunkfilepool.meta", + "chunk file pool meta path"); DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); @@ -74,8 +74,7 @@ DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", - "WAL filepool meta path"); - + "WAL filepool meta path"); const char* kProtocalCurve = "curve"; @@ -87,57 +86,58 @@ int ChunkServer::Run(int argc, char** argv) { RegisterCurveSegmentLogStorageOrDie(); - // ==========================加载配置项===============================// + // ==========================Load Configuration + // Items===============================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); - // 在从配置文件获取 + // Obtaining from the configuration file LOG_IF(FATAL, !conf.LoadConfig()) << "load chunkserver configuration fail, conf path = " << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 + // The command line can override parameters in the configuration file LoadConfigFromCmdline(&conf); - // 初始化日志模块 + // Initialize Log Module google::InitGoogleLogging(argv[0]); - // 打印参数 + // Print parameters conf.PrintConfig(); conf.ExposeMetric("chunkserver_config"); curve::common::ExposeCurveVersion(); - // ============================初始化各模块==========================// + // ============================nitialize each + // module==========================// LOG(INFO) << "Initializing ChunkServer modules"; - // 优先初始化 metric 收集模块 + // Prioritize initializing the metric collection module ChunkServerMetricOptions metricOptions; InitMetricOptions(&conf, &metricOptions); ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); LOG_IF(FATAL, metric->Init(metricOptions) != 0) << "Failed to init chunkserver metric."; - // 初始化并发持久模块 + // Initialize concurrent persistence module ConcurrentApplyModule concurrentapply; ConcurrentApplyOption concurrentApplyOptions; InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) << "Failed to initialize concurrentapply module!"; - // 初始化本地文件系统 + // Initialize local file system std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); LocalFileSystemOption lfsOption; - LOG_IF(FATAL, !conf.GetBoolValue( - "fs.enable_renameat2", &lfsOption.enableRenameat2)); + LOG_IF(FATAL, !conf.GetBoolValue("fs.enable_renameat2", + &lfsOption.enableRenameat2)); LOG_IF(FATAL, 0 != fs->Init(lfsOption)) << "Failed to initialize local filesystem module!"; - // 初始化chunk文件池 + // Initialize chunk file pool FilePoolOptions chunkFilePoolOptions; InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); + std::shared_ptr chunkfilePool = std::make_shared(fs); LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) << "Failed to init chunk file pool"; @@ -150,9 +150,8 @@ int ChunkServer::Run(int argc, char** argv) { bool useChunkFilePoolAsWalPool = true; uint32_t useChunkFilePoolAsWalPoolReserve = 15; if (raftLogProtocol == kProtocalCurve) { - LOG_IF(FATAL, !conf.GetBoolValue( - "walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); + LOG_IF(FATAL, !conf.GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); if (!useChunkFilePoolAsWalPool) { FilePoolOptions walFilePoolOptions; @@ -164,20 +163,20 @@ int ChunkServer::Run(int argc, char** argv) { } else { walFilePool = chunkfilePool; LOG_IF(FATAL, !conf.GetUInt32Value( - "walfilepool.use_chunk_file_pool_reserve", - &useChunkFilePoolAsWalPoolReserve)); + "walfilepool.use_chunk_file_pool_reserve", + &useChunkFilePoolAsWalPoolReserve)); LOG(INFO) << "initialize to use chunkfilePool as walpool success."; } } - // 远端拷贝管理模块选项 + // Remote Copy Management Module Options CopyerOptions copyerOptions; InitCopyerOptions(&conf, ©erOptions); auto copyer = std::make_shared(); LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) << "Failed to initialize clone copyer."; - // 克隆管理模块初始化 + // Clone Management Module Initialization CloneOptions cloneOptions; InitCloneOptions(&conf, &cloneOptions); uint32_t sliceSize; @@ -189,11 +188,11 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) << "Failed to initialize clone manager."; - // 初始化注册模块 + // Initialize registration module RegisterOptions registerOptions; InitRegisterOptions(&conf, ®isterOptions); registerOptions.useChunkFilePoolAsWalPoolReserve = - useChunkFilePoolAsWalPoolReserve; + useChunkFilePoolAsWalPoolReserve; registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; registerOptions.fs = fs; registerOptions.chunkFilepool = chunkfilePool; @@ -202,40 +201,39 @@ int ChunkServer::Run(int argc, char** argv) { Register registerMDS(registerOptions); ChunkServerMetadata metadata; ChunkServerMetadata localMetadata; - // 从本地获取meta - std::string metaPath = UriParser::GetPathFromUri( - registerOptions.chunkserverMetaUri); + // Get Meta from Local + std::string metaPath = + UriParser::GetPathFromUri(registerOptions.chunkserverMetaUri); auto epochMap = std::make_shared(); if (fs->FileExists(metaPath)) { LOG_IF(FATAL, GetChunkServerMetaFromLocal( - registerOptions.chunserverStoreUri, - registerOptions.chunkserverMetaUri, - registerOptions.fs, &localMetadata) != 0) + registerOptions.chunserverStoreUri, + registerOptions.chunkserverMetaUri, + registerOptions.fs, &localMetadata) != 0) << "Failed to GetChunkServerMetaFromLocal."; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - &localMetadata, &metadata, epochMap) != 0) + LOG_IF(FATAL, registerMDS.RegisterToMDS(&localMetadata, &metadata, + epochMap) != 0) << "Failed to register to MDS."; } else { - // 如果本地获取不到,向mds注册 - LOG(INFO) << "meta file " - << metaPath << " do not exist, register to mds"; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - nullptr, &metadata, epochMap) != 0) + // If it cannot be obtained locally, register with MDS + LOG(INFO) << "meta file " << metaPath + << " do not exist, register to mds"; + LOG_IF(FATAL, + registerMDS.RegisterToMDS(nullptr, &metadata, epochMap) != 0) << "Failed to register to MDS."; } - // trash模块初始化 + // Trash module initialization TrashOptions trashOptions; InitTrashOptions(&conf, &trashOptions); trashOptions.localFileSystem = fs; trashOptions.chunkFilePool = chunkfilePool; trashOptions.walPool = walFilePool; trash_ = std::make_shared(); - LOG_IF(FATAL, trash_->Init(trashOptions) != 0) - << "Failed to init Trash"; + LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; - // 初始化复制组管理模块 + // Initialize replication group management module CopysetNodeOptions copysetNodeOptions; InitCopysetNodeOptions(&conf, ©setNodeOptions); copysetNodeOptions.concurrentapply = &concurrentapply; @@ -256,23 +254,25 @@ int ChunkServer::Run(int argc, char** argv) { } } - // install snapshot的带宽限制 + // Bandwidth limitation of install snapshot int snapshotThroughputBytes; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", &snapshotThroughputBytes)); /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 + * CheckCycles is used for finer bandwidth control, with + * snapshotThroughputBytes=100MB, Taking checkCycles=10 as an example, it + * can ensure a bandwidth of 10MB every 1/10 second without accumulation, + * such as the first one The bandwidth of 1/10 second is 10MB, but it + * expires. In the second 1/10 second, only 10MB of bandwidth can be used, + * and Not a bandwidth of 20MB */ int checkCycles; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", &checkCycles)); - scoped_refptr snapshotThrottle - = new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); + scoped_refptr snapshotThrottle = + new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); snapshotThrottle_ = snapshotThrottle; copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; @@ -282,7 +282,7 @@ int ChunkServer::Run(int argc, char** argv) { return -1; } butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage + // Register curve snapshot storage RegisterCurveSnapshotStorageOrDie(); CurveSnapshotStorage::set_server_addr(endPoint); copysetNodeManager_ = &CopysetNodeManager::GetInstance(); @@ -296,7 +296,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) << "Failed to init scan manager."; - // 心跳模块初始化 + // Heartbeat module initialization HeartbeatOptions heartbeatOptions; InitHeartbeatOptions(&conf, &heartbeatOptions); heartbeatOptions.copysetNodeManager = copysetNodeManager_; @@ -308,7 +308,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) << "Failed to init Heartbeat manager."; - // 监控部分模块的metric指标 + // Metric indicators for monitoring some modules metric->MonitorTrash(trash_.get()); metric->MonitorChunkFilePool(chunkfilePool.get()); if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { @@ -316,8 +316,8 @@ int ChunkServer::Run(int argc, char** argv) { } metric->ExposeConfigMetric(&conf); - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric + // ========================Add RPC Service===============================// + // TODO(lixiaocui): Add delay metric to each interface in rpc brpc::Server server; brpc::Server externalServer; // We need call braft::add_service to add endPoint to braft::NodeManager @@ -325,17 +325,16 @@ int ChunkServer::Run(int argc, char** argv) { // copyset service CopysetServiceImpl copysetService(copysetNodeManager_); - int ret = server.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + int ret = + server.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; // inflight throttle int maxInflight; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.max_inflight_requests", - &maxInflight)); - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + LOG_IF(FATAL, !conf.GetIntValue("chunkserver.max_inflight_requests", + &maxInflight)); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service @@ -345,8 +344,7 @@ int ChunkServer::Run(int argc, char** argv) { chunkServiceOptions.inflightThrottle = inflightThrottle; ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); - ret = server.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&chunkService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; // We need to replace braft::CliService with our own implementation @@ -354,14 +352,12 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::CliService"; BRaftCliServiceImpl braftCliService; - ret = server.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService"; // braftclient service BRaftCliServiceImpl2 braftCliService2; - ret = server.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService2, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2"; // We need to replace braft::FileServiceImpl with our own implementation @@ -369,51 +365,52 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); - ret = server.AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&kCurveFileService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // chunkserver service ChunkServerServiceImpl chunkserverService(copysetNodeManager_); - ret = server.AddService(&chunkserverService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&chunkserverService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkServerService"; // scan copyset service ScanServiceImpl scanCopysetService(&scanManager_); - ret = server.AddService(&scanCopysetService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&scanCopysetService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ScanCopysetService"; - // 启动rpc service + // Start rpc service LOG(INFO) << "Internal server is going to serve on: " << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; if (server.Start(endPoint, NULL) != 0) { LOG(ERROR) << "Fail to start Internal Server"; return -1; } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ + /* Start external server + External server is used to provide services to external clients and + tools Different from communication between MDS and chunkserver*/ if (registerOptions.enableExternalServer) { ret = externalServer.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService at external server"; ret = externalServer.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService at external server"; ret = externalServer.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; ret = externalServer.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; braft::RaftStatImpl raftStatService; ret = externalServer.AddService(&raftStatService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add RaftStatService at external server"; - std::string externalAddr = registerOptions.chunkserverExternalIp + ":" + - std::to_string(registerOptions.chunkserverPort); + std::string externalAddr = + registerOptions.chunkserverExternalIp + ":" + + std::to_string(registerOptions.chunkserverPort); LOG(INFO) << "External server is going to serve on: " << externalAddr; if (externalServer.Start(externalAddr.c_str(), NULL) != 0) { LOG(ERROR) << "Fail to start External Server"; @@ -421,30 +418,31 @@ int ChunkServer::Run(int argc, char** argv) { } } - // =======================启动各模块==================================// + // =======================Start each + // module==================================// LOG(INFO) << "ChunkServer starts."; /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 + * Placing module startup after RPC service startup is mainly to address + * memory growth issues Control the number of copysets for concurrent + * recovery. Copyset recovery requires the RPC service to be started first */ - LOG_IF(FATAL, trash_->Run() != 0) - << "Failed to start trash."; - LOG_IF(FATAL, cloneManager_.Run() != 0) - << "Failed to start clone manager."; + LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; + LOG_IF(FATAL, cloneManager_.Run() != 0) << "Failed to start clone manager."; LOG_IF(FATAL, heartbeat_.Run() != 0) << "Failed to start heartbeat manager."; LOG_IF(FATAL, copysetNodeManager_->Run() != 0) << "Failed to start CopysetNodeManager."; - LOG_IF(FATAL, scanManager_.Run() != 0) - << "Failed to start scan manager."; + LOG_IF(FATAL, scanManager_.Run() != 0) << "Failed to start scan manager."; LOG_IF(FATAL, !chunkfilePool->StartCleaning()) << "Failed to start file pool clean worker."; - // =======================等待进程退出==================================// + // =======================Wait for the process to + // exit==================================// while (!brpc::IsAskedToQuit()) { bthread_usleep(1000000L); } - // scanmanager stop maybe need a little while, so stop it first before stop service NOLINT + // scanmanager stop maybe need a little while, so stop it first before stop + // service NOLINT LOG(INFO) << "ChunkServer is going to quit."; LOG_IF(ERROR, scanManager_.Fini() != 0) << "Failed to shutdown scan manager."; @@ -463,10 +461,8 @@ int ChunkServer::Run(int argc, char** argv) { << "Failed to shutdown CopysetNodeManager."; LOG_IF(ERROR, cloneManager_.Fini() != 0) << "Failed to shutdown clone manager."; - LOG_IF(ERROR, copyer->Fini() != 0) - << "Failed to shutdown clone copyer."; - LOG_IF(ERROR, trash_->Fini() != 0) - << "Failed to shutdown trash."; + LOG_IF(ERROR, copyer->Fini() != 0) << "Failed to shutdown clone copyer."; + LOG_IF(ERROR, trash_->Fini() != 0) << "Failed to shutdown trash."; LOG_IF(ERROR, !chunkfilePool->StopCleaning()) << "Failed to shutdown file pool clean worker."; concurrentapply.Stop(); @@ -475,16 +471,12 @@ int ChunkServer::Run(int argc, char** argv) { return 0; } -void ChunkServer::Stop() { - brpc::AskToQuit(); -} - - +void ChunkServer::Stop() { brpc::AskToQuit(); } void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { + common::Configuration* conf, FilePoolOptions* chunkFilePoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->fileSize)); + &chunkFilePoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", &chunkFilePoolOptions->metaPageSize)) @@ -495,34 +487,34 @@ void ChunkServer::InitChunkFilePoolOptions( << "Not found `global.block_size` in config file"; LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getFileFromPool)); + &chunkFilePoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + &chunkFilePoolOptions->getFileFromPool)); if (chunkFilePoolOptions->getFileFromPool == false) { std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->filePoolDir, - chunkFilePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), chunkFilePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.meta_path", &metaUri)); - ::memcpy( - chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + LOG_IF(FATAL, + !conf->GetStringValue("chunkfilepool.meta_path", &metaUri)); + ::memcpy(chunkFilePoolOptions->metaPath, metaUri.c_str(), + metaUri.size()); LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", - &chunkFilePoolOptions->needClean)); - LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", // NOLINT - &chunkFilePoolOptions->bytesPerWrite)); + &chunkFilePoolOptions->needClean)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "chunkfilepool.clean.bytes_per_write", // NOLINT + &chunkFilePoolOptions->bytesPerWrite)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", - &chunkFilePoolOptions->iops4clean)); + &chunkFilePoolOptions->iops4clean)); - if (0 == chunkFilePoolOptions->bytesPerWrite - || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 - || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { + if (0 == chunkFilePoolOptions->bytesPerWrite || + chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 || + 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " << "and should be aligned to 4K, " << "but now is: " << chunkFilePoolOptions->bytesPerWrite; @@ -530,129 +522,132 @@ void ChunkServer::InitChunkFilePoolOptions( } } -void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOptions) { - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.size", &concurrentApplyOptions->rconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.size", &concurrentApplyOptions->wconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.queuedepth", &concurrentApplyOptions->rqueuedepth)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); +void ChunkServer::InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOptions) { + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.size", + &concurrentApplyOptions->rconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.size", + &concurrentApplyOptions->wconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.queuedepth", + &concurrentApplyOptions->rqueuedepth)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.queuedepth", + &concurrentApplyOptions->wqueuedepth)); } -void ChunkServer::InitWalFilePoolOptions( - common::Configuration *conf, FilePoolOptions *walPoolOptions) { +void ChunkServer::InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", - &walPoolOptions->fileSize)); + &walPoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", - &walPoolOptions->metaPageSize)); + &walPoolOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", - &walPoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "walfilepool.enable_get_segment_from_pool", - &walPoolOptions->getFileFromPool)); + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); if (walPoolOptions->getFileFromPool == false) { std::string filePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.file_pool_dir", &filePoolUri)); - ::memcpy(walPoolOptions->filePoolDir, - filePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.file_pool_dir", + &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, filePoolUri.c_str(), filePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.meta_path", &metaUri)); - ::memcpy( - walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.meta_path", &metaUri)); + ::memcpy(walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); } } void ChunkServer::InitCopysetNodeOptions( - common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { + common::Configuration* conf, CopysetNodeOptions* copysetNodeOptions) { LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", ©setNodeOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", ©setNodeOptions->port)); if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) { LOG(FATAL) << "Invalid server port provided: " << copysetNodeOptions->port; } LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", - ©setNodeOptions->electionTimeoutMs)); + ©setNodeOptions->electionTimeoutMs)); LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", - ©setNodeOptions->snapshotIntervalS)); + ©setNodeOptions->snapshotIntervalS)); bool ret = conf->GetBoolValue("copyset.enable_lease_read", - ©setNodeOptions->enbaleLeaseRead); + ©setNodeOptions->enbaleLeaseRead); LOG_IF(WARNING, ret == false) << "config no copyset.enable_lease_read info, using default value " << copysetNodeOptions->enbaleLeaseRead; LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", - ©setNodeOptions->catchupMargin)); + ©setNodeOptions->catchupMargin)); LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", - ©setNodeOptions->chunkDataUri)); + ©setNodeOptions->chunkDataUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", - ©setNodeOptions->logUri)); + ©setNodeOptions->logUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", - ©setNodeOptions->raftMetaUri)); + ©setNodeOptions->raftMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", - ©setNodeOptions->raftSnapshotUri)); + ©setNodeOptions->raftSnapshotUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", - ©setNodeOptions->recyclerUri)); + ©setNodeOptions->recyclerUri)); LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - ©setNodeOptions->maxChunkSize)); + ©setNodeOptions->maxChunkSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - ©setNodeOptions->metaPageSize)); + ©setNodeOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - ©setNodeOptions->blockSize)); + ©setNodeOptions->blockSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", - ©setNodeOptions->locationLimit)); + ©setNodeOptions->locationLimit)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", - ©setNodeOptions->loadConcurrency)); + ©setNodeOptions->loadConcurrency)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", - ©setNodeOptions->checkRetryTimes)); + ©setNodeOptions->checkRetryTimes)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", - ©setNodeOptions->finishLoadMargin)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_loadmargin_interval_ms", - ©setNodeOptions->checkLoadMarginIntervalMs)); + ©setNodeOptions->finishLoadMargin)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_loadmargin_interval_ms", + ©setNodeOptions->checkLoadMarginIntervalMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", - ©setNodeOptions->syncConcurrency)); + ©setNodeOptions->syncConcurrency)); LOG_IF(FATAL, !conf->GetBoolValue( - "copyset.enable_odsync_when_open_chunkfile", - ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); + "copyset.enable_odsync_when_open_chunkfile", + ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) { - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_chunk_limits", - ©setNodeOptions->syncChunkLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_threshold", - ©setNodeOptions->syncThreshold)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_syncing_interval_ms", - ©setNodeOptions->checkSyncingIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_trigger_seconds", - ©setNodeOptions->syncTriggerSeconds)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_chunk_limits", + ©setNodeOptions->syncChunkLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_threshold", + ©setNodeOptions->syncThreshold)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_syncing_interval_ms", + ©setNodeOptions->checkSyncingIntervalMs)); + LOG_IF(FATAL, + !conf->GetUInt32Value("copyset.sync_trigger_seconds", + ©setNodeOptions->syncTriggerSeconds)); } } -void ChunkServer::InitCopyerOptions( - common::Configuration *conf, CopyerOptions *copyerOptions) { +void ChunkServer::InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions) { LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", - ©erOptions->curveUser.owner)); + ©erOptions->curveUser.owner)); LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", - ©erOptions->curveUser.password)); + ©erOptions->curveUser.password)); LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", - ©erOptions->curveConf)); + ©erOptions->curveConf)); LOG_IF(FATAL, - !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); + !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); bool disableCurveClient = false; bool disableS3Adapter = false; LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", - &disableCurveClient)); - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_s3_adapter", - &disableS3Adapter)); + &disableCurveClient)); + LOG_IF(FATAL, + !conf->GetBoolValue("clone.disable_s3_adapter", &disableS3Adapter)); LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", - ©erOptions->curveFileTimeoutSec)); + ©erOptions->curveFileTimeoutSec)); if (disableCurveClient) { copyerOptions->curveClient = nullptr; @@ -667,105 +662,105 @@ void ChunkServer::InitCopyerOptions( } } -void ChunkServer::InitCloneOptions( - common::Configuration *conf, CloneOptions *cloneOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("clone.thread_num", - &cloneOptions->threadNum)); +void ChunkServer::InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions) { + LOG_IF(FATAL, + !conf->GetUInt32Value("clone.thread_num", &cloneOptions->threadNum)); LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", - &cloneOptions->queueCapacity)); + &cloneOptions->queueCapacity)); } -void ChunkServer::InitScanOptions( - common::Configuration *conf, ScanManagerOptions *scanOptions) { +void ChunkServer::InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", - &scanOptions->intervalSec)); + &scanOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", - &scanOptions->scanSize)); + &scanOptions->scanSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &scanOptions->chunkMetaPageSize)); + &scanOptions->chunkMetaPageSize)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", - &scanOptions->timeoutMs)); + &scanOptions->timeoutMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", - &scanOptions->retry)); + &scanOptions->retry)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", - &scanOptions->retryIntervalUs)); + &scanOptions->retryIntervalUs)); } -void ChunkServer::InitHeartbeatOptions( - common::Configuration *conf, HeartbeatOptions *heartbeatOptions) { +void ChunkServer::InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions) { LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - &heartbeatOptions->storeUri)); + &heartbeatOptions->storeUri)); LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.port", - &heartbeatOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", &heartbeatOptions->port)); LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - &heartbeatOptions->mdsListenAddr)); + &heartbeatOptions->mdsListenAddr)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", - &heartbeatOptions->intervalSec)); + &heartbeatOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", - &heartbeatOptions->timeout)); + &heartbeatOptions->timeout)); } -void ChunkServer::InitRegisterOptions( - common::Configuration *conf, RegisterOptions *registerOptions) { +void ChunkServer::InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions) { LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - ®isterOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", - ®isterOptions->chunkserverInternalIp)); + ®isterOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetStringValue( + "global.ip", ®isterOptions->chunkserverInternalIp)); LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", - ®isterOptions->enableExternalServer)); - LOG_IF(FATAL, !conf->GetStringValue("global.external_ip", - ®isterOptions->chunkserverExternalIp)); + ®isterOptions->enableExternalServer)); + LOG_IF(FATAL, + !conf->GetStringValue("global.external_ip", + ®isterOptions->chunkserverExternalIp)); LOG_IF(FATAL, !conf->GetIntValue("global.port", - ®isterOptions->chunkserverPort)); + ®isterOptions->chunkserverPort)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - ®isterOptions->chunserverStoreUri)); + ®isterOptions->chunserverStoreUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", - ®isterOptions->chunkserverMetaUri)); + ®isterOptions->chunkserverMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", - ®isterOptions->chunkserverDiskType)); + ®isterOptions->chunkserverDiskType)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", - ®isterOptions->registerRetries)); + ®isterOptions->registerRetries)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", - ®isterOptions->registerTimeout)); + ®isterOptions->registerTimeout)); } -void ChunkServer::InitTrashOptions( - common::Configuration *conf, TrashOptions *trashOptions) { - LOG_IF(FATAL, !conf->GetStringValue( - "copyset.recycler_uri", &trashOptions->trashPath)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.expire_afterSec", &trashOptions->expiredAfterSec)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.scan_periodSec", &trashOptions->scanPeriodSec)); +void ChunkServer::InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions) { + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + &trashOptions->trashPath)); + LOG_IF(FATAL, !conf->GetIntValue("trash.expire_afterSec", + &trashOptions->expiredAfterSec)); + LOG_IF(FATAL, !conf->GetIntValue("trash.scan_periodSec", + &trashOptions->scanPeriodSec)); } -void ChunkServer::InitMetricOptions( - common::Configuration *conf, ChunkServerMetricOptions *metricOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", &metricOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue( - "global.ip", &metricOptions->ip)); - LOG_IF(FATAL, !conf->GetBoolValue( - "metric.onoff", &metricOptions->collectMetric)); +void ChunkServer::InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions) { + LOG_IF(FATAL, !conf->GetUInt32Value("global.port", &metricOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &metricOptions->ip)); + LOG_IF(FATAL, + !conf->GetBoolValue("metric.onoff", &metricOptions->collectMetric)); } -void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void ChunkServer::LoadConfigFromCmdline(common::Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { conf->SetStringValue("global.ip", FLAGS_chunkServerIp); } else { LOG(FATAL) - << "chunkServerIp must be set when run chunkserver in command."; + << "chunkServerIp must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("enableExternalServer", &info) && - !info.is_default) { - conf->SetBoolValue( - "global.enable_external_server", FLAGS_enableExternalServer); + !info.is_default) { + conf->SetBoolValue("global.enable_external_server", + FLAGS_enableExternalServer); } if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && - !info.is_default) { + !info.is_default) { conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); } @@ -773,23 +768,23 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetIntValue("global.port", FLAGS_chunkServerPort); } else { LOG(FATAL) - << "chunkServerPort must be set when run chunkserver in command."; + << "chunkServerPort must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); } else { - LOG(FATAL) - << "chunkServerStoreUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerStoreUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); } else { - LOG(FATAL) - << "chunkServerMetaUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerMetaUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) { @@ -798,75 +793,68 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); } else { - LOG(FATAL) - << "copySetUri must be set when run chunkserver in command."; + LOG(FATAL) << "copySetUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_snapshot_uri", FLAGS_raftSnapshotUri); + conf->SetStringValue("copyset.raft_snapshot_uri", + FLAGS_raftSnapshotUri); } else { LOG(FATAL) - << "raftSnapshotUri must be set when run chunkserver in command."; + << "raftSnapshotUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_log_uri", FLAGS_raftLogUri); + conf->SetStringValue("copyset.raft_log_uri", FLAGS_raftLogUri); } else { - LOG(FATAL) - << "raftLogUri must be set when run chunkserver in command."; + LOG(FATAL) << "raftLogUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("recycleUri", &info) && - !info.is_default) { + if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) { conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); } else { - LOG(FATAL) - << "recycleUri must be set when run chunkserver in command."; + LOG(FATAL) << "recycleUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.chunk_file_pool_dir", FLAGS_chunkFilePoolDir); + if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("chunkfilepool.chunk_file_pool_dir", + FLAGS_chunkFilePoolDir); } else { LOG(FATAL) - << "chunkFilePoolDir must be set when run chunkserver in command."; + << "chunkFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "chunkfilepool.meta_path", FLAGS_chunkFilePoolMetaPath); + conf->SetStringValue("chunkfilepool.meta_path", + FLAGS_chunkFilePoolMetaPath); } else { - LOG(FATAL) - << "chunkFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "chunkFilePoolMetaPath must be set when run chunkserver " + "in command."; } - if (GetCommandLineFlagInfo("walFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("walfilepool.file_pool_dir", FLAGS_walFilePoolDir); } else { LOG(FATAL) - << "walFilePoolDir must be set when run chunkserver in command."; + << "walFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); + conf->SetStringValue("walfilepool.meta_path", + FLAGS_walFilePoolMetaPath); } else { - LOG(FATAL) - << "walFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "walFilePoolMetaPath must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { - if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT + if (!conf->GetStringValue("chunkserver.common.logDir", + &FLAGS_log_dir)) { // NOLINT LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf << ", will log to /tmp"; } @@ -875,42 +863,40 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && !info.is_default) { conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", - FLAGS_enableChunkfilepool); + FLAGS_enableChunkfilepool); } if (GetCommandLineFlagInfo("enableWalfilepool", &info) && !info.is_default) { conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", - FLAGS_enableWalfilepool); + FLAGS_enableWalfilepool); } if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && !info.is_default) { conf->SetIntValue("copyset.load_concurrency", - FLAGS_copysetLoadConcurrency); + FLAGS_copysetLoadConcurrency); } } int ChunkServer::GetChunkServerMetaFromLocal( - const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata) { + const std::string& storeUri, const std::string& metaUri, + const std::shared_ptr& fs, ChunkServerMetadata* metadata) { std::string proto = UriParser::GetProtocolFromUri(storeUri); if (proto != "local") { LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; return -1; } - // 从配置文件中获取chunkserver元数据的文件路径 + // Obtain the file path for chunkserver metadata from the configuration file proto = UriParser::GetProtocolFromUri(metaUri); if (proto != "local") { - LOG(ERROR) << "Chunkserver meta protocal " - << proto << " is not supported yet"; + LOG(ERROR) << "Chunkserver meta protocal " << proto + << " is not supported yet"; return -1; } - // 元数据文件已经存在 + // The metadata file already exists if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 + // Get File Content if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { LOG(ERROR) << "Fail to read persisted chunkserver meta data"; return -1; @@ -924,8 +910,9 @@ int ChunkServer::GetChunkServerMetaFromLocal( return -1; } -int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata) { +int ChunkServer::ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata) { int fd; std::string metaFile = UriParser::GetPathFromUri(metaUri); @@ -935,7 +922,7 @@ int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, return -1; } - #define METAFILE_MAX_SIZE 4096 +#define METAFILE_MAX_SIZE 4096 int size; char json[METAFILE_MAX_SIZE] = {0}; diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..6698281fec 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_H_ -#include #include -#include "src/common/configuration.h" +#include + +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/clone_manager.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/heartbeat.h" -#include "src/chunkserver/scan_manager.h" -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/register.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/scan_manager.h" #include "src/chunkserver/scan_service.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; @@ -43,81 +44,84 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); private: - void InitChunkFilePoolOptions(common::Configuration *conf, - FilePoolOptions *chunkFilePoolOptions); + void InitChunkFilePoolOptions(common::Configuration* conf, + FilePoolOptions* chunkFilePoolOptions); - void InitWalFilePoolOptions(common::Configuration *conf, - FilePoolOptions *walPoolOption); + void InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOption); - void InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOption); + void InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOption); - void InitCopysetNodeOptions(common::Configuration *conf, - CopysetNodeOptions *copysetNodeOptions); + void InitCopysetNodeOptions(common::Configuration* conf, + CopysetNodeOptions* copysetNodeOptions); - void InitCopyerOptions(common::Configuration *conf, - CopyerOptions *copyerOptions); + void InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions); - void InitCloneOptions(common::Configuration *conf, - CloneOptions *cloneOptions); + void InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions); - void InitScanOptions(common::Configuration *conf, - ScanManagerOptions *scanOptions); + void InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions); - void InitHeartbeatOptions(common::Configuration *conf, - HeartbeatOptions *heartbeatOptions); + void InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions); - void InitRegisterOptions(common::Configuration *conf, - RegisterOptions *registerOptions); + void InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions); - void InitTrashOptions(common::Configuration *conf, - TrashOptions *trashOptions); + void InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions); - void InitMetricOptions(common::Configuration *conf, - ChunkServerMetricOptions *metricOptions); + void InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions); - void LoadConfigFromCmdline(common::Configuration *conf); + void LoadConfigFromCmdline(common::Configuration* conf); - int GetChunkServerMetaFromLocal(const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata); + int GetChunkServerMetaFromLocal(const std::string& storeUri, + const std::string& metaUri, + const std::shared_ptr& fs, + ChunkServerMetadata* metadata); - int ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata); + int ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing + // tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; @@ -125,4 +129,3 @@ class ChunkServer { } // namespace curve #endif // SRC_CHUNKSERVER_CHUNKSERVER_H_ - diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..96afcf39e8 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -20,19 +20,20 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/chunkserver_helper.h" + #include +#include +#include #include "src/common/crc32.h" -#include "src/chunkserver/chunkserver_helper.h" namespace curve { namespace chunkserver { const uint64_t DefaultMagic = 0x6225929368674118; bool ChunkServerMetaHelper::EncodeChunkServerMeta( - const ChunkServerMetadata &meta, std::string *out) { + const ChunkServerMetadata& meta, std::string* out) { if (!out->empty()) { LOG(ERROR) << "out string must empty!"; return false; @@ -50,8 +51,8 @@ bool ChunkServerMetaHelper::EncodeChunkServerMeta( return true; } -bool ChunkServerMetaHelper::DecodeChunkServerMeta( - const std::string &meta, ChunkServerMetadata *out) { +bool ChunkServerMetaHelper::DecodeChunkServerMeta(const std::string& meta, + ChunkServerMetadata* out) { std::string jsonStr(meta); std::string err; json2pb::Json2PbOptions opt; @@ -63,7 +64,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." @@ -75,8 +76,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return true; } -uint32_t ChunkServerMetaHelper::MetadataCrc( - const ChunkServerMetadata &meta) { +uint32_t ChunkServerMetaHelper::MetadataCrc(const ChunkServerMetadata& meta) { uint32_t crc = 0; uint32_t ver = meta.version(); uint32_t id = meta.id(); @@ -87,7 +87,7 @@ uint32_t ChunkServerMetaHelper::MetadataCrc( crc = curve::common::CRC32(crc, reinterpret_cast(&id), sizeof(id)); crc = curve::common::CRC32(crc, token, meta.token().size()); crc = curve::common::CRC32(crc, reinterpret_cast(&magic), - sizeof(magic)); + sizeof(magic)); return crc; } diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..f8a361d94e 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -21,8 +21,9 @@ */ #include "src/chunkserver/chunkserver_metrics.h" -#include + #include +#include #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/passive_getfn.h" @@ -31,13 +32,15 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + : rps_(&reqNum_, 1), + iops_(&ioNum_, 1), + eps_(&errorNum_, 1), bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric +int IOMetric::Init(const std::string& prefix) { + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -94,9 +97,8 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } } - -int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric +int CSIOMetric::Init(const std::string& prefix) { + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -161,30 +163,30 @@ void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); @@ -196,7 +198,7 @@ int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -209,30 +211,36 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage *logStorage) { + CurveSegmentLogStorage* logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), - walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : hasInited_(false), + leaderCount_(nullptr), + chunkLeft_(nullptr), + walSegmentLeft_(nullptr), + chunkTrashed_(nullptr), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} -ChunkServerMetric *ChunkServerMetric::self_ = nullptr; +ChunkServerMetric* ChunkServerMetric::self_ = nullptr; -ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 +ChunkServerMetric* ChunkServerMetric::GetInstance() { + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock + // protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -245,14 +253,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +286,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -293,8 +301,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { if (!option_.collectMetric) { return 0; } @@ -321,9 +329,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, return 0; } -CopysetMetricPtr -ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( + const LogicPoolID& logicPoolId, const CopysetID& copysetId) { if (!option_.collectMetric) { return nullptr; } @@ -332,18 +339,18 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -356,8 +363,8 @@ void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { if (!option_.collectMetric) { @@ -371,7 +378,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } @@ -381,7 +388,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { if (!option_.collectMetric) { return; } @@ -391,7 +398,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash *trash) { +void ChunkServerMetric::MonitorTrash(Trash* trash) { if (!option_.collectMetric) { return; } @@ -417,7 +424,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { if (!option_.collectMetric) { return; } diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..b91fbf0f6e 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ -#include #include +#include + +#include #include #include -#include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/configuration.h" -#include "src/chunkserver/datastore/file_pool.h" +#include "src/common/uncopyable.h" using curve::common::Configuration; using curve::common::ReadLockGuard; @@ -54,57 +55,59 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template using AdderPtr = std::shared_ptr>; +template +using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and +// write requests Statistics can be conducted on quantile values, maximum +// values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -120,100 +123,109 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), - pasteMetric_(nullptr), downloadMetric_(nullptr) {} + : readMetric_(nullptr), + writeMetric_(nullptr), + recoverMetric_(nullptr), + pasteMetric_(nullptr), + downloadMetric_(nullptr) {} ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : logicPoolId_(0), + copysetId_(0), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); + int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, + * number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ - void MonitorDataStore(CSDataStore *datastore); + void MonitorDataStore(CSDataStore* datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +233,10 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +277,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,173 +357,175 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 - static ChunkServerMetric *GetInstance(); + // Implementation singleton + static ChunkServerMetric* GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ - int Init(const ChunkServerMetricOptions &option); + int Init(const ChunkServerMetricOptions& option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ - void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnRequest(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ - void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnResponse(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + *Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the + *specified metric already exists */ - int CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure + * returns nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + *Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + *Monitor the chunk allocation pool, mainly monitoring the number of chunks + *in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ - void MonitorChunkFilePool(FilePool *chunkFilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + *Monitor the allocation pool of wall segments, mainly monitoring the number + *of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ - void MonitorWalFilePool(FilePool *walFilePool); + void MonitorWalFilePool(FilePool* walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + *Monitor Recycle Bin + * @param trash: Object pointer to trash */ - void MonitorTrash(Trash *trash); + void MonitorTrash(Trash* trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + *Update configuration item data + * @param conf: Configuration content */ - void ExposeConfigMetric(common::Configuration *conf); + void ExposeConfigMetric(common::Configuration* conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } + CopysetMetricMap* GetCopysetMetricMap() { return ©setMetricMap_; } uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { - if (leaderCount_ == nullptr) - return 0; + if (leaderCount_ == nullptr) return 0; return leaderCount_->get_value(); } uint32_t GetTotalChunkCount() { - if (chunkCount_ == nullptr) - return 0; + if (chunkCount_ == nullptr) return 0; return chunkCount_->get_value(); } uint32_t GetTotalSnapshotCount() { - if (snapshotCount_ == nullptr) - return 0; + if (snapshotCount_ == nullptr) return 0; return snapshotCount_->get_value(); } uint32_t GetTotalCloneChunkCount() { - if (cloneChunkCount_ == nullptr) - return 0; + if (cloneChunkCount_ == nullptr) return 0; return cloneChunkCount_->get_value(); } uint32_t GetTotalWalSegmentCount() { - if (nullptr == walSegmentCount_) - return 0; + if (nullptr == walSegmentCount_) return 0; return walSegmentCount_->get_value(); } uint32_t GetChunkLeftCount() const { - if (chunkLeft_ == nullptr) - return 0; + if (chunkLeft_ == nullptr) return 0; return chunkLeft_->get_value(); } uint32_t GetWalSegmentLeftCount() const { - if (nullptr == walSegmentLeft_) - return 0; + if (nullptr == walSegmentLeft_) return 0; return walSegmentLeft_->get_value(); } uint32_t GetChunkTrashedCount() const { - if (chunkTrashed_ == nullptr) - return 0; + if (chunkTrashed_ == nullptr) return 0; return chunkTrashed_->get_value(); } @@ -522,32 +537,32 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 - static ChunkServerMetric *self_; + // Self pointing pointer for singleton mode + static ChunkServerMetric* self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..ed048dc460 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,41 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId); +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId); -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const PeerId& peer, + const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const PeerId& peer, + const braft::cli::CliOptions& options); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..ba779bb8d7 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -22,10 +22,10 @@ #include "src/chunkserver/cli2.h" -#include -#include #include #include +#include +#include #include @@ -34,16 +34,14 @@ namespace curve { namespace chunkserver { -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader) { +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader) { if (conf.empty()) { return butil::Status(EINVAL, "Empty group configuration"); } - butil::Status st(-1, - "Fail to get leader of copyset node %s", + butil::Status st(-1, "Fail to get leader of copyset node %s", ToGroupIdString(logicPoolId, copysetId).c_str()); PeerId leaderId; Configuration::const_iterator iter = conf.begin(); @@ -53,7 +51,7 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status(-1, "Fail to init channel to %s", iter->to_string().c_str()); } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -84,11 +82,9 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -101,10 +97,10 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, AddPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *addPeer = new Peer(); + Peer* addPeer = new Peer(); request.set_allocated_addpeer(addPeer); *addPeer = peer; AddPeerResponse2 response; @@ -128,17 +124,15 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -151,10 +145,10 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, RemovePeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *removePeer = new Peer(); + Peer* removePeer = new Peer(); request.set_allocated_removepeer(removePeer); *removePeer = peer; RemovePeerResponse2 response; @@ -179,17 +173,15 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options) { +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -203,11 +195,11 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, ChangePeersRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); *leaderPeer = leader; request.set_allocated_leader(leaderPeer); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ChangePeersResponse2 response; @@ -229,17 +221,15 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, new_conf.add_peer(response.newpeers(i).address()); } LOG(INFO) << "Configuration of replication group `" - << ToGroupIdString(logicPoolId, copysetId) - << "' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << "' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -256,10 +246,10 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, TransferLeaderRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *transfereePeer = new Peer(); + Peer* transfereePeer = new Peer(); request.set_allocated_transferee(transfereePeer); *transfereePeer = peer; TransferLeaderResponse2 response; @@ -274,18 +264,23 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// reset peer does not follow a consistency protocol and directly resets them, +// thus posing certain risks Application scenario: Extreme situation where most +// nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, +// which will be unavailable for a period of time. To cope with this scenario, +// we have introduced The reset peer tool directly resets replication group +// members to only contain surviving replicas. Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the +// check-copyset tool that most of the replicas in the replication group have +// indeed been suspended +// 2. When resetting the peer, ensure that the remaining replicas have the +// latest data, otherwise there is a risk of data loss +// 3. Reset peer is suitable for situations where the other two replicas cannot +// be restored, otherwise it may disrupt the cluster +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options) { if (newPeers.empty()) { return butil::Status(EINVAL, "new_conf is empty"); @@ -294,7 +289,7 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, brpc::Channel channel; if (channel.Init(requestPeerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - requestPeerId.to_string().c_str()); + requestPeerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -302,11 +297,11 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, ResetPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *requestPeerPtr = new Peer(); + Peer* requestPeerPtr = new Peer(); *requestPeerPtr = requestPeer; request.set_allocated_requestpeer(requestPeerPtr); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ResetPeerResponse2 response; @@ -318,15 +313,14 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options) { brpc::Channel channel; PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -334,7 +328,7 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(peer); + Peer* peerPtr = new Peer(peer); request.set_allocated_peer(peerPtr); SnapshotResponse2 response; CliService2_Stub stub(&channel); @@ -351,7 +345,7 @@ butil::Status SnapshotAll(const Peer& peer, PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..512850b747 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,50 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader); - -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 变更配置 -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options); - -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 重置复制组 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader); + +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options); + +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options); + +// Change configuration +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options); + +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options); + +// Reset replication group +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/chunkserver/clone_copyer.h b/src/chunkserver/clone_copyer.h index 6ccb7d7dc1..3c640f4693 100644 --- a/src/chunkserver/clone_copyer.h +++ b/src/chunkserver/clone_copyer.h @@ -24,56 +24,57 @@ #define SRC_CHUNKSERVER_CLONE_COPYER_H_ #include + +#include #include -#include #include -#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/location_operator.h" +#include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" #include "src/client/libcurve_file.h" -#include "src/client/client_common.h" -#include "include/client/libcurve.h" +#include "src/common/location_operator.h" #include "src/common/s3_adapter.h" namespace curve { namespace chunkserver { -using curve::common::S3Adapter; using curve::client::FileClient; using curve::client::UserInfo; -using curve::common::LocationOperator; -using curve::common::OriginType; using curve::common::GetObjectAsyncCallBack; using curve::common::GetObjectAsyncContext; +using curve::common::LocationOperator; +using curve::common::OriginType; +using curve::common::S3Adapter; using std::string; class DownloadClosure; struct CopyerOptions { - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser; - // curvefs 的配置文件路径 + // Profile path for curvefs std::string curveConf; - // s3adapter 的配置文件路径 + // Configuration file path for s3adapter std::string s3Conf; - // curve client的对象指针 + // Object pointer to curve client std::shared_ptr curveClient; - // s3 adapter的对象指针 + // Object pointer to s3 adapter std::shared_ptr s3Client; // curve file's time to live uint64_t curveFileTimeoutSec; }; struct AsyncDownloadContext { - // 源chunk的位置信息 + // Location information of the source chunk string location; - // 请求下载数据在对象中的相对偏移 + // Request to download the relative offset of data in the object off_t offset; - // 请求下载数据的的长度 + // The length of the requested download data size_t size; - // 存放下载数据的缓冲区 + // Buffer for storing downloaded data char* buf; }; @@ -85,9 +86,9 @@ struct CurveOpenTimestamp { // lastest use time, using seconds int64_t lastUsedSec; // Init functions - CurveOpenTimestamp(): fd(-1), fileName(""), lastUsedSec(0) {} - CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec): - fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} + CurveOpenTimestamp() : fd(-1), fileName(""), lastUsedSec(0) {} + CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec) + : fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} }; std::ostream& operator<<(std::ostream& out, const AsyncDownloadContext& rhs); @@ -98,40 +99,34 @@ class OriginCopyer { virtual ~OriginCopyer() = default; /** - * 初始化资源 - * @param options: 配置信息 - * @return: 成功返回0,失败返回-1 + * Initialize Resources + * @param options: Configuration information + * @return: Success returns 0, failure returns -1 */ virtual int Init(const CopyerOptions& options); /** - * 释放资源 - * @return: 成功返回0,失败返回-1 + * Release resources + * @return: Success returns 0, failure returns -1 */ virtual int Fini(); /** - * 异步地从源端拷贝数据 - * @param done:包含下载请求的上下文信息, - * 数据下载完成后执行该closure进行回调 + * Asynchronous copying of data from the source + * @param done: Contains contextual information for download requests, + *After the data download is completed, execute the closure for callback */ virtual void DownloadAsync(DownloadClosure* done); private: - void DownloadFromS3(const string& objectName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); - void DownloadFromCurve(const string& fileName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); + void DownloadFromS3(const string& objectName, off_t off, size_t size, + char* buf, DownloadClosure* done); + void DownloadFromCurve(const string& fileName, off_t off, size_t size, + char* buf, DownloadClosure* done); static void DeleteExpiredCurveCache(void* arg); private: - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser_; // mutex for protect curveOpenTime_ std::mutex timeMtx_; @@ -139,13 +134,13 @@ class OriginCopyer { std::list curveOpenTime_; // curve file's time to live uint64_t curveFileTimeoutSec_; - // 负责跟curve交互 + // Responsible for interacting with curve std::shared_ptr curveClient_; - // 负责跟s3交互 - std::shared_ptr s3Client_; - // 保护fdMap_的互斥锁 - std::mutex mtx_; - // 文件名->文件fd 的映射 + // Responsible for interacting with s3 + std::shared_ptr s3Client_; + // Protect fdMap_ Mutex lock for + std::mutex mtx_; + // File name ->Mapping of file fd std::unordered_map fdMap_; // Timer for clean expired curve file bthread::TimerThread timer_; diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index b3efe70f36..af05a01646 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -20,15 +20,16 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/clone_core.h" + #include +#include -#include "src/common/bitmap.h" -#include "src/chunkserver/clone_core.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/op_request.h" +#include "src/common/bitmap.h" #include "src/common/timeutility.h" namespace curve { @@ -37,26 +38,23 @@ namespace chunkserver { using curve::common::Bitmap; using curve::common::TimeUtility; -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} +static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } DownloadClosure::DownloadClosure(std::shared_ptr readRequest, std::shared_ptr cloneCore, AsyncDownloadContext* downloadCtx, Closure* done) - : isFailed_(false) - , beginTime_(TimeUtility::GetTimeofDayUs()) - , downloadCtx_(downloadCtx) - , cloneCore_(cloneCore) - , readRequest_(readRequest) - , done_(done) { - // 记录初始metric + : isFailed_(false), + beginTime_(TimeUtility::GetTimeofDayUs()), + downloadCtx_(downloadCtx), + cloneCore_(cloneCore), + readRequest_(readRequest), + done_(done) { + // Record initial metric if (readRequest_ != nullptr) { const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - csMetric->OnRequest(request->logicpoolid(), - request->copysetid(), + csMetric->OnRequest(request->logicpoolid(), request->copysetid(), CSIOMetricType::DOWNLOAD); } } @@ -66,60 +64,56 @@ void DownloadClosure::Run() { std::unique_ptr contextGuard(downloadCtx_); brpc::ClosureGuard doneGuard(done_); butil::IOBuf copyData; - copyData.append_user_data( - downloadCtx_->buf, downloadCtx_->size, ReadBufferDeleter); + copyData.append_user_data(downloadCtx_->buf, downloadCtx_->size, + ReadBufferDeleter); CHECK(readRequest_ != nullptr) << "read request is nullptr."; - // 记录结束metric + // Record End Metric const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; - csMetric->OnResponse(request->logicpoolid(), - request->copysetid(), - CSIOMetricType::DOWNLOAD, - downloadCtx_->size, - latencyUs, - isFailed_); - - // 从源端拷贝数据失败 + csMetric->OnResponse(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD, downloadCtx_->size, + latencyUs, isFailed_); + + // Copying data from the source failed if (isFailed_) { LOG(ERROR) << "download origin data failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " AsyncDownloadContext: " << *downloadCtx_; + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " AsyncDownloadContext: " << *downloadCtx_; cloneCore_->SetResponse( readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); return; } if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - // release doneGuard,将closure交给paste请求处理 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, + // Release doneGuard, hand over the closure to the pass request for + // processing + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, doneGuard.release()); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + // Error or end of processing call closure returned to user cloneCore_->SetReadChunkResponse(readRequest_, ©Data); - // paste clone data是异步操作,很快就能处理完 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, + // Paste clone data is an asynchronous operation that can be processed + // quickly + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, nullptr); } } void CloneClosure::Run() { - // 释放资源 + // Release resources std::unique_ptr selfGuard(this); std::unique_ptr requestGuard(request_); std::unique_ptr responseGuard(response_); brpc::ClosureGuard doneGuard(done_); - // 如果userResponse不为空,需要将response_中的相关内容赋值给userResponse + // If userResponse is not empty, you need to set the response_ Assign the + // relevant content in to userResponse if (userResponse_ != nullptr) { if (response_->has_status()) { userResponse_->set_status(response_->status()); @@ -134,8 +128,8 @@ void CloneClosure::Run() { } int CloneCore::CloneReadByLocalInfo( - std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done) { + std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, + Closure* done) { brpc::ClosureGuard doneGuard(done); const ChunkRequest* request = readRequest->request_; off_t offset = request->offset(); @@ -148,8 +142,7 @@ int CloneCore::CloneReadByLocalInfo( << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() << " chunkid: " << request->chunkid() - << " offset: " << offset - << " length: " << length + << " offset: " << offset << " length: " << length << " block size: " << blockSize; SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); @@ -159,104 +152,103 @@ int CloneCore::CloneReadByLocalInfo( uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 请求提交到CloneManager的时候,chunk一定是clone chunk - // 但是由于有其他请求操作相同的chunk,此时chunk有可能已经被遍写过了 - // 所以此处要先判断chunk是否是clone chunk,如果是再判断是否要拷贝数据 + // When submitting a request to CloneManager, the chunk must be a clone + // chunk However, due to other requests for the same chunk, it is possible + // that the chunk has already been overwritten at this time So here we need + // to first determine whether the chunk is a clone chunk, and then determine + // whether to copy the data if so bool needClone = chunkInfo.isClone && - (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS); + (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS); if (needClone) { - // TODO(yyk) 这一块可以优化,但是优化方法判断条件可能比较复杂 - // 目前只根据是否存在未写过的page来决定是否要触发拷贝 - // chunk中请求读取范围内的数据存在page未被写过,则需要从源端拷贝数据 + // The TODO(yyk) block can be optimized, but the optimization method may + // determine complex conditions Currently, the decision to trigger + // copying is only based on whether there are unwritten pages If the + // data within the requested read range in the chunk has a page that has + // not been written, it is necessary to copy the data from the source + // side AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; downloadCtx->location = chunkInfo.location; downloadCtx->offset = offset; downloadCtx->size = length; downloadCtx->buf = new (std::nothrow) char[length]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); + DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); copyer_->DownloadAsync(downloadClosure); return 0; } - // 执行到这一步说明不需要拷贝数据,如果是recover请求可以直接返回成功 - // 如果是ReadChunk请求,则直接读chunk并返回 + // Performing this step indicates that there is no need to copy data. If it + // is a recover request, it can directly return success If it is a ReadChunk + // request, read the chunk directly and return if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + // Error or end of processing call closure returned to user return ReadChunk(readRequest); } return 0; } -void CloneCore::CloneReadByRequestInfo(std::shared_ptr - readRequest, Closure* done) { +void CloneCore::CloneReadByRequestInfo( + std::shared_ptr readRequest, Closure* done) { brpc::ClosureGuard doneGuard(done); - const ChunkRequest* chunkRequest = readRequest->request_; + const ChunkRequest* chunkRequest = readRequest->request_; auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - std::string location = func(chunkRequest->clonefilesource(), - chunkRequest->clonefileoffset()); + std::string location = + func(chunkRequest->clonefilesource(), chunkRequest->clonefileoffset()); - AsyncDownloadContext* downloadCtx = - new (std::nothrow) AsyncDownloadContext; + AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; downloadCtx->location = location; downloadCtx->offset = chunkRequest->offset(); downloadCtx->size = chunkRequest->size(); downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); + DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); copyer_->DownloadAsync(downloadClosure); return; } -int CloneCore::HandleReadRequest( - std::shared_ptr readRequest, - Closure* done) { +int CloneCore::HandleReadRequest(std::shared_ptr readRequest, + Closure* done) { brpc::ClosureGuard doneGuard(done); const ChunkRequest* request = readRequest->request_; - // 获取chunk信息 + // Obtain chunk information CSChunkInfo chunkInfo; ChunkID id = readRequest->ChunkId(); std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); /* - * chunk存在:按照查看分析bitmap判断是否可以本地读 - * chunk不存在:如包含clone信息则从clonesource读,否则返回错误 - * 因为上层ReadChunkRequest::OnApply已经处理了NoExist - * 并且cloneinfo不存在的情况 - */ + *Chunk exists: Check and analyze Bitmap to determine if it can be read + *locally Chunk does not exist: if it contains clone information, it will be + *read from clonesource, otherwise an error will be returned Because the + *upper level ReadChunkRequest::OnApply has already processed NoExist And + *the situation where cloneinfo does not exist + */ switch (errorCode) { - case CSErrorCode::Success: - return CloneReadByLocalInfo(readRequest, chunkInfo, - doneGuard.release()); - case CSErrorCode::ChunkNotExistError: - if (existCloneInfo(request)) { - CloneReadByRequestInfo(readRequest, doneGuard.release()); - return 0; - } - // 否则fallthrough直接返回错误 - FALLTHROUGH_INTENDED; - default: - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; + case CSErrorCode::Success: + return CloneReadByLocalInfo(readRequest, chunkInfo, + doneGuard.release()); + case CSErrorCode::ChunkNotExistError: + if (existCloneInfo(request)) { + CloneReadByRequestInfo(readRequest, doneGuard.release()); + return 0; + } + // Otherwise, fallthrough will directly return an error + FALLTHROUGH_INTENDED; + default: + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; } } @@ -267,29 +259,25 @@ int CloneCore::ReadChunk(std::shared_ptr readRequest) { std::unique_ptr chunkData(new char[length]); std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData.get(), - offset, - length); + errorCode = dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData.get(), offset, length); if (CSErrorCode::Success != errorCode) { SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << offset - << " read length: " << length - << " error code: " << errorCode; + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << offset << " read length: " << length + << " error code: " << errorCode; return -1; } - // 读成功后需要更新 apply index + // After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - // Return 完成数据读取后可以将结果返回给用户 - readRequest->cntl_->response_attachment().append( - chunkData.get(), length); + // After completing the data reading, Return can return the results to the + // user + readRequest->cntl_->response_attachment().append(chunkData.get(), length); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; } @@ -303,14 +291,19 @@ int CloneCore::SetReadChunkResponse( std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - // 如果chunk不存在,需要判断请求是否带源chunk的信息 - // 如果带了源chunk信息,说明用了lazy分配chunk机制,可以直接返回clone data - // 有一种情况,当请求的chunk是lazy allocate的,请求时chunk在本地是存在的, - // 并且请求读取的部分区域已经被写过,在从源端拷贝数据的时候,chunk又被删除了 - // 这种情况下会被当成正常请求返回,但是返回的数据不符合预期 - // 由于当前我们的curve file都是延迟删除的,文件真正删除时能够确保没有用户IO - // 如果后续添加了一些改动触发到这个问题,则需要进行修复 - // TODO(yyk) fix it + // If the chunk does not exist, it is necessary to determine whether the + // request contains information about the source chunk If the source chunk + // information is provided, it indicates that the lazy allocation chunk + // mechanism is used, and clone data can be directly returned There is a + // situation where the requested chunk is lazily allocated and the requested + // chunk exists locally, And the requested read area has already been + // written, and when copying data from the source, the chunk has been + // deleted again In this case, it will be returned as a normal request, but + // the returned data does not meet expectations Due to the current delayed + // deletion of our curve files, it is ensured that there is no user IO when + // the files are truly deleted If some changes are added later that trigger + // this issue, it needs to be fixed + // TODO(yyk) fix it bool expect = errorCode == CSErrorCode::Success || (errorCode == CSErrorCode::ChunkNotExistError && existCloneInfo(request)); @@ -327,11 +320,11 @@ int CloneCore::SetReadChunkResponse( size_t length = request->size(); butil::IOBuf responseData; - // 如果chunk存在,则要从chunk中读取已经写过的区域合并后返回 + // If a chunk exists, read the regions that have already been written from + // the chunk and merge them back if (errorCode == CSErrorCode::Success) { char* chunkData = new (std::nothrow) char[length]; - int ret = ReadThenMerge( - readRequest, chunkInfo, cloneData, chunkData); + int ret = ReadThenMerge(readRequest, chunkInfo, cloneData, chunkData); responseData.append_user_data(chunkData, length, ReadBufferDeleter); if (ret < 0) { SetResponse(readRequest, @@ -343,7 +336,7 @@ int CloneCore::SetReadChunkResponse( } readRequest->cntl_->response_attachment().append(responseData); - // 读成功后需要更新 apply index + // After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; @@ -351,8 +344,7 @@ int CloneCore::SetReadChunkResponse( int CloneCore::ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData) { + const butil::IOBuf* cloneData, char* chunkData) { const ChunkRequest* request = readRequest->request_; std::shared_ptr dataStore = readRequest->datastore_; @@ -361,13 +353,11 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 获取chunk文件已经写过和未被写过的区域 + // Obtain the regions where the chunk file has been written and not written std::vector copiedRanges; std::vector uncopiedRanges; if (chunkInfo.isClone) { - chunkInfo.bitmap->Divide(beginIndex, - endIndex, - &uncopiedRanges, + chunkInfo.bitmap->Divide(beginIndex, endIndex, &uncopiedRanges, &copiedRanges); } else { BitRange range; @@ -376,23 +366,22 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, copiedRanges.push_back(range); } - // 需要读取的起始位置在chunk中的偏移 + // The offset of the starting position to be read in the chunk off_t readOff; - // 读取到的数据要拷贝到缓冲区中的相对偏移 + // The relative offset of the read data to be copied into the buffer off_t relativeOff; - // 每次从chunk读取的数据长度 + // The length of data read from chunk each time size_t readSize; - // 1.Read 对于已写过的区域,从chunk文件中读取 + // 1. Read for regions that have already been written, read from the chunk + // file CSErrorCode errorCode; for (auto& range : copiedRanges) { readOff = range.beginIndex * blockSize; readSize = (range.endIndex - range.beginIndex + 1) * blockSize; relativeOff = readOff - offset; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData + relativeOff, - readOff, - readSize); + errorCode = + dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData + relativeOff, readOff, readSize); if (CSErrorCode::Success != errorCode) { LOG(ERROR) << "read chunk failed: " << " logic pool id: " << request->logicpoolid() @@ -405,7 +394,8 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, } } - // 2.Merge 对于未写过的区域,从源端下载的区域中拷贝出来进行merge + // 2. Merge: For areas that have not been written before, copy them from the + // downloaded area on the source side for merging for (auto& range : uncopiedRanges) { readOff = range.beginIndex * blockSize; readSize = (range.endIndex - range.beginIndex + 1) * blockSize; @@ -416,16 +406,15 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, } void CloneCore::PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done) { + const butil::IOBuf* cloneData, off_t offset, + size_t cloneDataSize, Closure* done) { const ChunkRequest* request = readRequest->request_; - bool dontPaste = CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() - && !enablePaste_; + bool dontPaste = + CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() && !enablePaste_; if (dontPaste) return; - // 数据拷贝完成以后,需要将产生PaseChunkRequest将数据Paste到chunk文件 + // After the data copy is completed, it is necessary to generate a + // PaseChunkRequest and paste the data to the chunk file ChunkRequest* pasteRequest = new ChunkRequest(); pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); pasteRequest->set_logicpoolid(request->logicpoolid()); @@ -440,22 +429,18 @@ void CloneCore::PasteCloneData(std::shared_ptr readRequest, closure->SetRequest(pasteRequest); closure->SetResponse(pasteResponse); closure->SetClosure(done); - // 如果是recover chunk的请求,需要将paste的结果通过rpc返回 + // If it is a request for a recover chunk, the result of the pass needs to + // be returned through rpc if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { closure->SetUserResponse(readRequest->response_); } - ChunkServiceClosure* pasteClosure = - new (std::nothrow) ChunkServiceClosure(nullptr, - pasteRequest, - pasteResponse, - closure); - - req = std::make_shared(readRequest->node_, - pasteRequest, - pasteResponse, - cloneData, - pasteClosure); + ChunkServiceClosure* pasteClosure = new (std::nothrow) + ChunkServiceClosure(nullptr, pasteRequest, pasteResponse, closure); + + req = std::make_shared( + readRequest->node_, pasteRequest, pasteResponse, cloneData, + pasteClosure); req->Process(); } diff --git a/src/chunkserver/clone_core.h b/src/chunkserver/clone_core.h index c91183feb3..3f3eb2ef69 100644 --- a/src/chunkserver/clone_core.h +++ b/src/chunkserver/clone_core.h @@ -23,25 +23,26 @@ #ifndef SRC_CHUNKSERVER_CLONE_CORE_H_ #define SRC_CHUNKSERVER_CLONE_CORE_H_ +#include #include #include #include -#include + #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/timeutility.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/datastore/define.h" +#include "src/common/timeutility.h" namespace curve { namespace chunkserver { +using common::TimeUtility; +using curve::chunkserver::CSChunkInfo; using ::google::protobuf::Closure; using ::google::protobuf::Message; -using curve::chunkserver::CSChunkInfo; -using common::TimeUtility; class ReadChunkRequest; class PasteChunkInternalRequest; @@ -51,151 +52,147 @@ class DownloadClosure : public Closure { public: DownloadClosure(std::shared_ptr readRequest, std::shared_ptr cloneCore, - AsyncDownloadContext* downloadCtx, - Closure *done); + AsyncDownloadContext* downloadCtx, Closure* done); void Run(); - void SetFailed() { - isFailed_ = true; - } + void SetFailed() { isFailed_ = true; } - AsyncDownloadContext* GetDownloadContext() { - return downloadCtx_; - } + AsyncDownloadContext* GetDownloadContext() { return downloadCtx_; } protected: - // 下载是否出错出错 + // Is there an error in downloading bool isFailed_; - // 请求开始的时间 + // Request start time uint64_t beginTime_; - // 下载请求上下文信息 + // Download request context information AsyncDownloadContext* downloadCtx_; - // clone core对象 + // Clone core object std::shared_ptr cloneCore_; - // read chunk请求对象 + // Read chunk request object std::shared_ptr readRequest_; - // DownloadClosure生命周期结束后需要执行的回调 + // Callbacks to be executed after the end of the DownloadClosure lifecycle Closure* done_; }; class CloneClosure : public Closure { public: - CloneClosure() : request_(nullptr) - , response_(nullptr) - , userResponse_(nullptr) - , done_(nullptr) {} + CloneClosure() + : request_(nullptr), + response_(nullptr), + userResponse_(nullptr), + done_(nullptr) {} void Run(); - void SetClosure(Closure *done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } void SetRequest(Message* request) { - request_ = dynamic_cast(request); + request_ = dynamic_cast(request); } void SetResponse(Message* response) { - response_ = dynamic_cast(response); + response_ = dynamic_cast(response); } void SetUserResponse(Message* response) { - userResponse_ = dynamic_cast(response); + userResponse_ = dynamic_cast(response); } private: - // paste chunk的请求结构体 - ChunkRequest *request_; - // paste chunk的响应结构体 - ChunkResponse *response_; - // 真正要返回给用户的响应结构体 - ChunkResponse *userResponse_; - // CloneClosure生命周期结束后需要执行的回调 - Closure *done_; + // Request structure for paste chunk + ChunkRequest* request_; + // Response structure of paste chunk + ChunkResponse* response_; + // The response structure that truly needs to be returned to the user + ChunkResponse* userResponse_; + // Callbacks to be executed after the end of the CloneClosure lifecycle + Closure* done_; }; class CloneCore : public std::enable_shared_from_this { friend class DownloadClosure; + public: CloneCore(uint32_t sliceSize, bool enablePaste, std::shared_ptr copyer) - : sliceSize_(sliceSize) - , enablePaste_(enablePaste) - , copyer_(copyer) {} + : sliceSize_(sliceSize), enablePaste_(enablePaste), copyer_(copyer) {} virtual ~CloneCore() {} /** - * 处理读请求的逻辑 - * @param readRequest[in]:读请求信息 - * @param done[in]:任务完成后要执行的closure - * @return: 成功返回0,失败返回-1 + * Logic for processing read requests + * @param readRequest[in]: Read request information + * @param done[in]: The closure to be executed after the task is completed + * @return: Success returns 0, failure returns -1 */ int HandleReadRequest(std::shared_ptr readRequest, Closure* done); protected: /** - * 本地chunk文件存在情况下,按照本地记录的clone和bitmap信息进行数据读取 - * 会涉及读取远程文件结合本地文件进行merge返回结果 - * @param[in/out] readRequest: 用户请求&响应上下文 - * @param[in] chunkInfo: 对应本地的chunkinfo - * @return 成功返回0,失败返回负数 + * When a local chunk file exists, read data based on the locally recorded + * clone and bitmap information Will involve reading remote files and + * merging with local files to return results + * @param[in/out] readRequest: User Request&Response Context + * @param[in] chunkInfo: corresponds to the local chunkinfo + * @return Success returns 0, failure returns a negative number */ int CloneReadByLocalInfo(std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done); + const CSChunkInfo& chunkInfo, Closure* done); /** - * 本地chunk文件不存在情况下,按照用户请求上下文中带的clonesource信息进行数据读取 - * 不涉及merge本地结果 - * @param[in/out] readRequest: 用户请求&响应上下文 + * When the local chunk file does not exist, read the data according to the + * clonesource information in the user request context Not involving merge + * local results + * @param[in/out] readRequest: User Request&Response Context */ void CloneReadByRequestInfo(std::shared_ptr readRequest, - Closure* done); + Closure* done); /** - * 从本地chunk中读取请求的区域,然后设置response - * @param readRequest: 用户的ReadRequest - * @return: 成功返回0,失败返回-1 + * Read the requested area from the local chunk and set the response + * @param readRequest: User's ReadRequest + * @return: Success returns 0, failure returns -1 */ int ReadChunk(std::shared_ptr readRequest); /** - * 设置read chunk类型的response,包括返回的数据和其他返回参数 - * 从本地chunk中读取已被写过的区域,未写过的区域从克隆下来的数据中获取 - * 然后将数据在内存中merge - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端拷贝下来的数据,数据起始偏移同请求中的偏移 - * @return: 成功返回0,失败返回-1 + * Set the response of the read chunk type, including the returned data and + * other return parameters Read the written area from the local chunk, and + * obtain the unwritten area from the cloned data Then merge the data into + * memory + * @param readRequest: User's ReadRequest + * @param cloneData: The data copied from the source has the same starting + * offset as the offset in the request + * @return: Success returns 0, failure returns -1 */ int SetReadChunkResponse(std::shared_ptr readRequest, const butil::IOBuf* cloneData); - // 从本地chunk中读取已经写过的区域合并到clone data中 + // Read the previously written regions from the local chunk and merge them + // into clone data int ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData); + const butil::IOBuf* cloneData, char* chunkData); /** - * 将从源端下载下来的数据paste到本地chunk文件中 - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端下载的数据 - * @param offset: 下载的数据在chunk文件中的偏移 - * @param cloneDataSize: 下载的数据长度 - * @param done:任务完成后要执行的closure + * Paste the downloaded data from the source into the local chunk file + * @param readRequest: User's ReadRequest + * @param cloneData: Data downloaded from the source + * @param offset: The offset of the downloaded data in the chunk file + * @param cloneDataSize: Download data length + * @param done: The closure to be executed after the task is completed */ void PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done); + const butil::IOBuf* cloneData, off_t offset, + size_t cloneDataSize, Closure* done); inline void SetResponse(std::shared_ptr readRequest, CHUNK_OP_STATUS status); private: - // 每次拷贝的slice的大小 + // The size of each copied slice uint32_t sliceSize_; - // 判断read chunk类型的请求是否需要paste, true需要paste,false表示不需要 + // Determine whether a read chunk type request requires paste. True requires + // paste, while false indicates no need bool enablePaste_; - // 负责从源端下载数据 + // Responsible for downloading data from the source std::shared_ptr copyer_; }; diff --git a/src/chunkserver/clone_manager.cpp b/src/chunkserver/clone_manager.cpp index 6fc428bdba..c41d844500 100644 --- a/src/chunkserver/clone_manager.cpp +++ b/src/chunkserver/clone_manager.cpp @@ -28,8 +28,7 @@ namespace chunkserver { CloneManager::CloneManager() : isRunning_(false) {} CloneManager::~CloneManager() { - if (isRunning_.load(std::memory_order_acquire)) - Fini(); + if (isRunning_.load(std::memory_order_acquire)) Fini(); } int CloneManager::Init(const CloneOptions& options) { @@ -38,9 +37,8 @@ int CloneManager::Init(const CloneOptions& options) { } int CloneManager::Run() { - if (isRunning_.load(std::memory_order_acquire)) - return 0; - // 启动线程池 + if (isRunning_.load(std::memory_order_acquire)) return 0; + // Start Thread Pool LOG(INFO) << "Begin to run clone manager."; tp_ = std::make_shared>(); int ret = tp_->Start(options_.threadNum, options_.queueCapacity); @@ -56,8 +54,7 @@ int CloneManager::Run() { } int CloneManager::Fini() { - if (!isRunning_.load(std::memory_order_acquire)) - return 0; + if (!isRunning_.load(std::memory_order_acquire)) return 0; LOG(INFO) << "Begin to stop clone manager."; isRunning_.store(false, std::memory_order_release); @@ -69,10 +66,9 @@ int CloneManager::Fini() { std::shared_ptr CloneManager::GenerateCloneTask( std::shared_ptr request, - ::google::protobuf::Closure *done) { - // 如果core是空的,任务无法被处理,所以返回空 - if (options_.core == nullptr) - return nullptr; + ::google::protobuf::Closure* done) { + // If the core is empty, the task cannot be processed, so it returns empty + if (options_.core == nullptr) return nullptr; std::shared_ptr cloneTask = std::make_shared(request, options_.core, done); @@ -80,11 +76,9 @@ std::shared_ptr CloneManager::GenerateCloneTask( } bool CloneManager::IssueCloneTask(std::shared_ptr cloneTask) { - if (!isRunning_.load(std::memory_order_acquire)) - return false; + if (!isRunning_.load(std::memory_order_acquire)) return false; - if (cloneTask == nullptr) - return false; + if (cloneTask == nullptr) return false; tp_->Enqueue(cloneTask->Closure()); diff --git a/src/chunkserver/clone_manager.h b/src/chunkserver/clone_manager.h index 01f7088218..96e489d5c1 100644 --- a/src/chunkserver/clone_manager.h +++ b/src/chunkserver/clone_manager.h @@ -25,16 +25,17 @@ #include #include -#include // NOLINT -#include // NOLINT + #include -#include +#include // NOLINT #include +#include // NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/clone_task.h" #include "src/chunkserver/clone_core.h" +#include "src/chunkserver/clone_task.h" +#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace chunkserver { @@ -44,18 +45,16 @@ using curve::common::TaskThreadPool; class ReadChunkRequest; struct CloneOptions { - // 核心逻辑处理类 + // Core logic processing class std::shared_ptr core; - // 最大线程数 + // Maximum number of threads uint32_t threadNum; - // 最大队列深度 + // Maximum queue depth uint32_t queueCapacity; - // 任务状态检查的周期,单位ms + // The cycle of task status check, in ms uint32_t checkPeriod; - CloneOptions() : core(nullptr) - , threadNum(10) - , queueCapacity(100) - , checkPeriod(5000) {} + CloneOptions() + : core(nullptr), threadNum(10), queueCapacity(100), checkPeriod(5000) {} }; class CloneManager { @@ -64,49 +63,51 @@ class CloneManager { virtual ~CloneManager(); /** - * 初始化 + * Initialize * - * @param options[in]:初始化参数 - * @return 错误码 + * @param options[in]: initialization parameters + * @return error code */ virtual int Init(const CloneOptions& options); /** - * 启动所有线程 + * Start all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 停止所有线程 + * Stop all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 生成克隆任务 - * @param request[in]:请求信息 - * @return:返回生成的克隆任务,如果生成失败,返回nullptr + * Generate Clone Task + * @param request[in]: Request information + * @return: Returns the generated clone task. If the generation fails, + * returns nullptr */ virtual std::shared_ptr GenerateCloneTask( std::shared_ptr request, ::google::protobuf::Closure* done); /** - * 发布克隆任务,产生克隆任务放到线程池中处理 - * @param task[in]:克隆任务 - * @return 成功返回true,失败返回false + * Publish clone tasks, generate clone tasks, and place them in the thread + * pool for processing + * @param task[in]: Clone task + * @return returns true for success, false for failure */ virtual bool IssueCloneTask(std::shared_ptr cloneTask); private: - // 克隆任务管理相关的选项,调Init的时候初始化 + // Clone task management related options, initialization when calling Init CloneOptions options_; - // 处理克隆任务的异步线程池 + // Asynchronous thread pool for processing cloning tasks std::shared_ptr> tp_; - // 当前线程池是否处于工作状态 + // Is the current thread pool in working state std::atomic isRunning_; }; diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..cd55f0b439 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -25,37 +25,33 @@ #include #include + #include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::Uncopyable; -class CloneTask : public Uncopyable - , public std::enable_shared_from_this{ +class CloneTask : public Uncopyable, + public std::enable_shared_from_this { public: CloneTask(std::shared_ptr request, std::shared_ptr core, ::google::protobuf::Closure* done) - : core_(core) - , readRequest_(request) - , done_(done) - , isComplete_(false) {} + : core_(core), readRequest_(request), done_(done), isComplete_(false) {} virtual ~CloneTask() {} virtual std::function Closure() { auto sharedThis = shared_from_this(); - return [sharedThis] () { - sharedThis->Run(); - }; + return [sharedThis]() { sharedThis->Run(); }; } virtual void Run() { @@ -65,18 +61,16 @@ class CloneTask : public Uncopyable isComplete_ = true; } - virtual bool IsComplete() { - return isComplete_; - } + virtual bool IsComplete() { return isComplete_; } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 6a39c6ce3e..aa8fa0077c 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -22,20 +22,20 @@ #include "src/chunkserver/conf_epoch_file.h" -#include #include +#include #include "src/common/crc32.h" namespace curve { namespace chunkserver { -// conf.epoch文件最大长度 +// Maximum length of conf.epoch file const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; -int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, - CopysetID *copysetID, uint64_t *epoch) { +int ConfEpochFile::Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch) { int fd = fs_->Open(path.c_str(), O_RDWR); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -47,7 +47,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, char json[kConfEpochFileMaxSize] = {0}; int size = 0; - // 1. read数据 + // 1. Read data size = fs_->Read(fd, json, 0, kConfEpochFileMaxSize); if (size <= 0) { LOG(ERROR) << "LoadConfEpoch read failed: " << path @@ -58,7 +58,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, } fs_->Close(fd); - // 2.反序列化 + // 2. Deserialization ConfEpoch confEpoch; std::string jsonStr(json); std::string err; @@ -71,7 +71,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return -1; } - // 3. 验证crc + // 3. Verify CRC uint32_t crc32c = ConfEpochCrc(confEpoch); if (crc32c != confEpoch.checksum()) { LOG(ERROR) << "conf epoch crc error: " << jsonStr; @@ -89,15 +89,15 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return 0; } -int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, +int ConfEpochFile::Save(const std::string& path, const LogicPoolID logicPoolID, const CopysetID copysetID, const uint64_t epoch) { - // 1. 转换成conf message + // 1. Convert to conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); confEpoch.set_copysetid(copysetID); confEpoch.set_epoch(epoch); - // 计算crc + // Calculate crc uint32_t crc32c = ConfEpochCrc(confEpoch); confEpoch.set_checksum(crc32c); @@ -113,7 +113,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 2. open文件 + // 2. Open file int fd = fs_->Open(path.c_str(), O_RDWR | O_CREAT); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -122,7 +122,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 3. write文件 + // 3. Write file if (static_cast(out.size()) != fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path @@ -132,7 +132,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 4. 落盘 + // 4. Falling disc if (0 != fs_->Fsync(fd)) { LOG(ERROR) << "SaveConfEpoch sync failed, path: " << path << ", errno: " << errno @@ -145,20 +145,20 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return 0; } -uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch &confEpoch) { +uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch& confEpoch) { uint32_t crc32c = 0; uint32_t logicPoolId = confEpoch.logicpoolid(); uint32_t copysetId = confEpoch.copysetid(); uint64_t epoch = confEpoch.epoch(); uint64_t magic = kConfEpochFileMagic; - crc32c = curve::common::CRC32( - crc32c, reinterpret_cast(&logicPoolId), sizeof(logicPoolId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&logicPoolId), + sizeof(logicPoolId)); + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), sizeof(copysetId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), sizeof(epoch)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), sizeof(magic)); return crc32c; diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 91ee27ec6b..4d2513fc2b 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -23,13 +23,13 @@ #ifndef SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ #define SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/fs/fs_common.h" #include "include/chunkserver/chunkserver_common.h" #include "proto/copyset.pb.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { @@ -38,47 +38,44 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; /** - * 配置版本序列化和反序列化的工具类 - * TODO(wudemiao): 后期替换采用json编码 + * Tool classes for configuring version serialization and deserialization + * TODO(wudemiao): Post replacement using JSON encoding */ class ConfEpochFile { public: - explicit ConfEpochFile(std::shared_ptr fs) - : fs_(fs) {} + explicit ConfEpochFile(std::shared_ptr fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful- 1 failed */ - int Load(const std::string &path, - LogicPoolID *logicPoolID, - CopysetID *copysetID, - uint64_t *epoch); + int Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + *file. The format is as follows: The 'head' indicates the length and is in + *binary format. The rest is in text format for easy viewing when necessary. + *'sync' ensures data persistence. | head | + *Configuration version information | | 8 bytes size_t | uint32_t | + *Variable length text | | length | crc32 | logic pool id + *| copyset id | epoch| The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded- 1 failed */ - int Save(const std::string &path, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const uint64_t epoch); + int Save(const std::string& path, const LogicPoolID logicPoolID, + const CopysetID copysetID, const uint64_t epoch); private: - static uint32_t ConfEpochCrc(const ConfEpoch &confEpoch); + static uint32_t ConfEpochCrc(const ConfEpoch& confEpoch); std::shared_ptr fs_; }; diff --git a/src/chunkserver/config_info.h b/src/chunkserver/config_info.h index 67c3f57524..c00809413f 100644 --- a/src/chunkserver/config_info.h +++ b/src/chunkserver/config_info.h @@ -23,33 +23,34 @@ #ifndef SRC_CHUNKSERVER_CONFIG_INFO_H_ #define SRC_CHUNKSERVER_CONFIG_INFO_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/inflight_throttle.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/trash.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { -using curve::fs::LocalFileSystem; using curve::chunkserver::concurrent::ConcurrentApplyModule; +using curve::fs::LocalFileSystem; class FilePool; class CopysetNodeManager; class CloneManager; /** - * copyset node的配置选项 + * Configuration options for copyset node */ struct CopysetNodeOptions { - // follower to candidate 超时时间,单位ms,默认是1000ms + // follower to candidate timeout, in ms, defaults to 1000ms int electionTimeoutMs; - // 定期打快照的时间间隔,默认3600s,也就是1小时 + // The time interval for taking regular snapshots is 3600s by default, which + // is 1 hour int snapshotIntervalS; // If true, read requests will be invoked in current lease leader node. @@ -57,79 +58,86 @@ struct CopysetNodeOptions { // Default: true bool enbaleLeaseRead; - // 如果follower和leader日志相差超过catchupMargin, - // 就会执行install snapshot进行恢复,默认: 1000 + // If the difference between the follower and leader logs exceeds + // catchupMargin, Will execute install snapshot for recovery, default: 1000 int catchupMargin; - // 是否开启pthread执行用户代码,默认false + // Enable pthread to execute user code, default to false bool usercodeInPthread; - // 所有uri个格式: ${protocol}://${绝对或者相对路径} - // eg: - // posix: local - // bluestore: bluestore + // All uri formats: ${protocol}://${absolute or relative path} + // eg: + // posix: local + // bluestore: bluestore - // raft log uri, 默认raft_log + // Raft log uri, default raft_log std::string logUri; - // raft meta uri, 默认raft_meta + // Raft meta uri, default raft_meta std::string raftMetaUri; - // raft snapshot uri,默认raft_snpashot + // Raft snapshot uri, default raft_snpashot std::string raftSnapshotUri; - // chunk data uri,默认data + // Chunk data uri, default data std::string chunkDataUri; - // chunk snapshot uri,默认snapshot + // Chunk snapshot uri, default snapshot std::string chunkSnapshotUri; - // copyset data recycling uri,默认recycler + // Copyset data recycling uri, default recycler std::string recyclerUri; std::string ip; uint32_t port; - // chunk文件的大小 + // Chunk file size uint32_t maxChunkSize; // WAL segment file size uint32_t maxWalSegmentSize; - // chunk文件的page大小 + // The page size of the chunk file uint32_t metaPageSize; // alignment for I/O request uint32_t blockSize; - // clone chunk的location长度限制 + // Location length limit for clone chunks uint32_t locationLimit; - // 并发模块 - ConcurrentApplyModule *concurrentapply; - // Chunk file池子 + // Concurrent module + ConcurrentApplyModule* concurrentapply; + // Chunk file pool std::shared_ptr chunkFilePool; // WAL file pool std::shared_ptr walFilePool; - // 文件系统适配层 + // File System Adaptation Layer std::shared_ptr localFileSystem; - // 回收站, 心跳模块判断该chunkserver不在copyset配置组时, - // 通知copysetManager将copyset目录移动至回收站 - // 一段时间后实际回收物理空间 + // When the recycle bin and heartbeat module determine that the chunkserver + // is not in the copyset configuration group, Notify the copysetManager to + // move the copyset directory to the recycle bin Actual recovery of physical + // space after a period of time std::shared_ptr trash; - // snapshot流控 - scoped_refptr *snapshotThrottle; + // Snapshot flow control + scoped_refptr* snapshotThrottle; - // 限制chunkserver启动时copyset并发恢复加载的数量,为0表示不限制 + // Limit the number of copyset concurrent recovery loads during chunkserver + // startup, with a value of 0 indicating no limit uint32_t loadConcurrency = 0; // chunkserver sync_thread_pool number of threads. uint32_t syncConcurrency = 20; // copyset trigger sync timeout uint32_t syncTriggerSeconds = 25; - // 检查copyset是否加载完成出现异常时的最大重试次数 - // 可能的异常:1.当前大多数副本还没起来;2.网络问题等导致无法获取leader - // 3.其他的原因导致无法获取到leader的committed index + // Check if the copyset has completed loading and the maximum number of + // retries when an exception occurs Possible exceptions: 1. Currently, most + // replicas have not yet been restored; 2. Network issues and other issues + // preventing the acquisition of leaders + // 3. Due to other reasons, it is not possible to obtain the committed index + // of the leader uint32_t checkRetryTimes = 3; - // 当前peer的applied_index与leader上的committed_index差距小于该值 - // 则判定copyset已经加载完成 + // the difference bewteen the current peer's applied_index and leader's + // committed_index is less than this value Then it is determined that the + // copyset has been loaded successfully uint32_t finishLoadMargin = 2000; - // 循环判定copyset是否加载完成的内部睡眠时间 + // Internal sleep time for loop determination of whether copyset has been + // loaded and completed uint32_t checkLoadMarginIntervalMs = 1000; // enable O_DSYNC when open chunkfile @@ -145,11 +153,11 @@ struct CopysetNodeOptions { }; /** - * ChunkServiceManager 的依赖项 + *Dependencies for ChunkServiceManager */ struct ChunkServiceOptions { - CopysetNodeManager *copysetNodeManager; - CloneManager *cloneManager; + CopysetNodeManager* copysetNodeManager; + CloneManager* cloneManager; std::shared_ptr inflightThrottle; }; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..87e8d70135 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -22,33 +22,34 @@ #include "src/chunkserver/copyset_node.h" -#include -#include -#include #include -#include #include -#include -#include +#include +#include +#include +#include + #include #include -#include -#include -#include #include #include +#include +#include +#include +#include +#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/chunk_closure.h" -#include "src/chunkserver/op_request.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/fs/fs_common.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/crc32.h" #include "src/common/fs_util.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" namespace braft { DECLARE_bool(raft_enable_leader_lease); @@ -59,37 +60,36 @@ namespace chunkserver { using curve::fs::FileSystemInfo; -const char *kCurveConfEpochFilename = "conf.epoch"; +const char* kCurveConfEpochFilename = "conf.epoch"; uint32_t CopysetNode::syncTriggerSeconds_ = 25; -std::shared_ptr> - CopysetNode::copysetSyncPool_ = nullptr; - -CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf) : - logicPoolId_(logicPoolId), - copysetId_(copysetId), - conf_(initConf), - epoch_(0), - peerId_(), - nodeOptions_(), - raftNode_(nullptr), - chunkDataApath_(), - chunkDataRpath_(), - appliedIndex_(0), - leaderTerm_(-1), - configChange_(std::make_shared()), - lastSnapshotIndex_(0), - scaning_(false), - lastScanSec_(0), - enableOdsyncWhenOpenChunkFile_(false), - isSyncing_(false), - checkSyncingIntervalMs_(500) { -} +std::shared_ptr> CopysetNode::copysetSyncPool_ = + nullptr; + +CopysetNode::CopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& initConf) + : logicPoolId_(logicPoolId), + copysetId_(copysetId), + conf_(initConf), + epoch_(0), + peerId_(), + nodeOptions_(), + raftNode_(nullptr), + chunkDataApath_(), + chunkDataRpath_(), + appliedIndex_(0), + leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), + scaning_(false), + lastScanSec_(0), + enableOdsyncWhenOpenChunkFile_(false), + isSyncing_(false), + checkSyncingIntervalMs_(500) {} CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -98,17 +98,16 @@ CopysetNode::~CopysetNode() { delete nodeOptions_.snapshot_file_system_adaptor; nodeOptions_.snapshot_file_system_adaptor = nullptr; } - LOG(INFO) << "release copyset node: " - << GroupIdString(); + LOG(INFO) << "release copyset node: " << GroupIdString(); } -int CopysetNode::Init(const CopysetNodeOptions &options) { +int CopysetNode::Init(const CopysetNodeOptions& options) { std::string groupId = GroupId(); std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -135,12 +134,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.locationLimit = options.locationLimit; dsOptions.enableOdsyncWhenOpenChunkFile = options.enableOdsyncWhenOpenChunkFile; - dataStore_ = std::make_shared(options.localFileSystem, - options.chunkFilePool, - dsOptions); + dataStore_ = std::make_shared( + options.localFileSystem, options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -150,10 +148,10 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { syncThread_.Init(this); dataStore_->SetCacheCondPtr(syncThread_.cond_); dataStore_->SetCacheLimits(options.syncChunkLimit, - options.syncThreshold); + options.syncThreshold); LOG(INFO) << "init sync thread success limit = " - << options.syncChunkLimit << - "syncthreshold = " << options.syncThreshold; + << options.syncChunkLimit + << "syncthreshold = " << options.syncThreshold; } recyclerUri_ = options.recyclerUri; @@ -166,21 +164,21 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have + * multiple copies of the same copyset, Pay attention to this point and not + * distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); concurrentapply_ = options.concurrentapply; - /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -189,10 +187,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { << "Copyset: " << GroupIdString(); return -1; } - metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( - logicPoolId_, copysetId_); + metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric(logicPoolId_, + copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in + // the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +212,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,19 +236,20 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the + // WriteChunk operation may result in errors concurrentapply_->Flush(); } } -void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { +void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions& options) { auto groupId = GroupId(); nodeOptions_.initial_conf = conf_; nodeOptions_.election_timeout_ms = options.electionTimeoutMs; @@ -257,20 +257,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { nodeOptions_.node_owns_fsm = false; nodeOptions_.snapshot_interval_s = options.snapshotIntervalS; nodeOptions_.log_uri = options.logUri; - nodeOptions_.log_uri.append("/").append(groupId) - .append("/").append(RAFT_LOG_DIR); + nodeOptions_.log_uri.append("/").append(groupId).append("/").append( + RAFT_LOG_DIR); nodeOptions_.raft_meta_uri = options.raftMetaUri; - nodeOptions_.raft_meta_uri.append("/").append(groupId) - .append("/").append(RAFT_META_DIR); + nodeOptions_.raft_meta_uri.append("/").append(groupId).append("/").append( + RAFT_META_DIR); nodeOptions_.snapshot_uri = options.raftSnapshotUri; - nodeOptions_.snapshot_uri.append("/").append(groupId) - .append("/").append(RAFT_SNAP_DIR); + nodeOptions_.snapshot_uri.append("/").append(groupId).append("/").append( + RAFT_SNAP_DIR); nodeOptions_.usercode_in_pthread = options.usercodeInPthread; nodeOptions_.snapshot_throttle = options.snapshotThrottle; - CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkFilePool, - options.localFileSystem); + CurveFilesystemAdaptor* cfa = new CurveFilesystemAdaptor( + options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(kCurveConfEpochFilename); @@ -282,47 +281,52 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { new scoped_refptr(cfa); } -void CopysetNode::on_apply(::braft::Iterator &iter) { +void CopysetNode::on_apply(::braft::Iterator& iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of + // the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which + * includes All Contextual ChunkOpRequest for Op */ - braft::Closure *closure = iter.done(); + braft::Closure* closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node + * is normal and Op is directly obtained from memory Apply in + * context */ - ChunkClosure - *chunkClosure = dynamic_cast(iter.done()); + ChunkClosure* chunkClosure = + dynamic_cast(iter.done()); CHECK(nullptr != chunkClosure) << "ChunkClosure dynamic cast failed"; std::shared_ptr& opRequest = chunkClosure->request_; - concurrentapply_->Push(opRequest->ChunkId(), ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT - &ChunkOpRequest::OnApply, opRequest, - iter.index(), doneGuard.release()); + concurrentapply_->Push( + opRequest->ChunkId(), + ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT + &ChunkOpRequest::OnApply, opRequest, iter.index(), + doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply - * 2.2. follower apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op + * log entry will be deserialized, Then obtain Op information for + * application 2.2. follower apply */ ChunkRequest request; butil::IOBuf data; auto opReq = ChunkOpRequest::Decode(log, &request, &data, iter.index(), GetLeaderId()); auto chunkId = request.chunkid(); - concurrentapply_->Push(chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT - &ChunkOpRequest::OnApplyFromLog, opReq, - dataStore_, std::move(request), data); + concurrentapply_->Push( + chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT + &ChunkOpRequest::OnApplyFromLog, opReq, dataStore_, + std::move(request), data); } } } @@ -331,11 +335,11 @@ void CopysetNode::on_shutdown() { LOG(INFO) << GroupIdString() << " is shutdown"; } -void CopysetNode::on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { snapshotFuture_ = - std::async(std::launch::async, - &CopysetNode::save_snapshot_background, this, writer, done); + std::async(std::launch::async, &CopysetNode::save_snapshot_background, + this, writer, done); } void CopysetNode::WaitSnapshotDone() { @@ -345,12 +349,12 @@ void CopysetNode::WaitSnapshotDone() { } } -void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,37 +363,41 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that + * conf.epoch is stored in the data directory */ - std::string - filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; + std::string filePathTemp = + writer->get_path() + "/" + kCurveConfEpochFilename; if (0 != SaveConfEpoch(filePathTemp)) { done->status().set_error(errno, "invalid: %s", strerror(errno)); LOG(ERROR) << "SaveConfEpoch failed. " - << "Copyset: " << GroupIdString() - << ", errno: " << errno << ", " + << "Copyset: " << GroupIdString() << ", errno: " << errno + << ", " << ", error message: " << strerror(errno); return; } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the + // list of snapshot files in the meta information When raft + // downloads a snapshot, after downloading the chunk, a separate + // snapshot list will be obtained bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through + // absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); - std::string filePath = curve::common::CalcRelativePath( - writer->get_path(), chunkApath); + std::string filePath = + curve::common::CalcRelativePath(writer->get_path(), chunkApath); writer->add_file(filePath); } } else { @@ -401,16 +409,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ - writer->add_file(kCurveConfEpochFilename); + writer->add_file(kCurveConfEpochFilename); } -int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { +int CopysetNode::on_snapshot_load(::braft::SnapshotReader* reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,15 +427,19 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section + // does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 - bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - delete_file(chunkDataApath_, true); + // Before loading snapshot data, clean the files in the copyset data + // directory first Otherwise, it may result in some residual data after + // the snapshot is loaded If delete_file or rename fails, the current + // node status will be set to ERROR If delete_file or during the rename + // the process restarts, and after copyset is set, the snapshot will be + // loaded Since rename ensures atomicity, after loading the snapshot, + // the data directory must be restored + bool ret = + nodeOptions_.snapshot_file_system_adaptor->get()->delete_file( + chunkDataApath_, true); if (!ret) { LOG(ERROR) << "delete chunk data dir failed. " << "Copyset: " << GroupIdString() @@ -437,8 +449,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { LOG(INFO) << "delete chunk data dir success. " << "Copyset: " << GroupIdString() << ", path: " << chunkDataApath_; - ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - rename(snapshotChunkDataDir, chunkDataApath_); + ret = nodeOptions_.snapshot_file_system_adaptor->get()->rename( + snapshotChunkDataDir, chunkDataApath_); if (!ret) { LOG(ERROR) << "rename snapshot data dir " << snapshotChunkDataDir << "to chunk data dir " << chunkDataApath_ << " failed. " @@ -449,13 +461,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { << "to chunk data dir " << chunkDataApath_ << " success. " << "Copyset: " << GroupIdString(); } else { - LOG(INFO) << "load snapshot data path: " - << snapshotChunkDataDir << " not exist. " + LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir + << " not exist. " << "Copyset: " << GroupIdString(); } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +480,25 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, + * the data store may return "chunk not exist." This is because the newly + * added peer initially has no data, and when the data store is initialized, + * it is not aware of the data that the new peer receives after the leader + * recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot + * operation, it is performed through a rename operation. If a file was + * previously open in the data store, the rename operation can succeed, but + * the old file can only be deleted after the data store closes it. + * Therefore, it is necessary to reinitialize the data store, close the + * file's file descriptor (fd), and then reopen the new file. Otherwise, the + * data store will continue to operate on the old file. Once the data store + * closes, the corresponding fd, any subsequent write operations will be + * lost. Additionally, if the datastore is not reinitialized and the new + * file is not reopened, it may result in reading the old data rather than + * the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +507,9 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that + * there is no need for on_configuration_committed. It should be noted that + * the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -510,7 +528,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * @@ -536,7 +554,7 @@ void CopysetNode::on_leader_start(int64_t term) { << " become leader, term is: " << leaderTerm_; } -void CopysetNode::on_leader_stop(const butil::Status &status) { +void CopysetNode::on_leader_stop(const butil::Status& status) { (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); @@ -544,7 +562,7 @@ void CopysetNode::on_leader_stop(const butil::Status &status) { << ", peer id: " << peerId_.to_string() << " stepped down"; } -void CopysetNode::on_error(const ::braft::Error &e) { +void CopysetNode::on_error(const ::braft::Error& e) { LOG(FATAL) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " meet raft error: " << e; @@ -556,7 +574,7 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, // Loading snapshot should not increase epoch. When loading // snapshot, the index is equal with lastSnapshotIndex_. LOG(INFO) << "index: " << index - << ", lastSnapshotIndex_: " << lastSnapshotIndex_; + << ", lastSnapshotIndex_: " << lastSnapshotIndex_; if (index != lastSnapshotIndex_) { std::unique_lock lock_guard(confLock_); conf_ = conf; @@ -569,63 +587,47 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, << ", epoch: " << epoch_.load(std::memory_order_acquire); } -void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << " stops following" << ctx; + << ", peer id: " << peerId_.to_string() << " stops following" + << ctx; } -void CopysetNode::on_start_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_start_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << "start following" << ctx; + << ", peer id: " << peerId_.to_string() << "start following" + << ctx; } -LogicPoolID CopysetNode::GetLogicPoolId() const { - return logicPoolId_; -} +LogicPoolID CopysetNode::GetLogicPoolId() const { return logicPoolId_; } -CopysetID CopysetNode::GetCopysetId() const { - return copysetId_; -} +CopysetID CopysetNode::GetCopysetId() const { return copysetId_; } -void CopysetNode::SetScan(bool scan) { - scaning_ = scan; -} +void CopysetNode::SetScan(bool scan) { scaning_ = scan; } -bool CopysetNode::GetScan() const { - return scaning_; -} +bool CopysetNode::GetScan() const { return scaning_; } -void CopysetNode::SetLastScan(uint64_t time) { - lastScanSec_ = time; -} +void CopysetNode::SetLastScan(uint64_t time) { lastScanSec_ = time; } -uint64_t CopysetNode::GetLastScan() const { - return lastScanSec_; -} +uint64_t CopysetNode::GetLastScan() const { return lastScanSec_; } std::vector& CopysetNode::GetFailedScanMap() { return failedScanMaps_; } -std::string CopysetNode::GetCopysetDir() const { - return copysetDirPath_; -} +std::string CopysetNode::GetCopysetDir() const { return copysetDirPath_; } uint64_t CopysetNode::GetConfEpoch() const { std::lock_guard lockguard(confLock_); return epoch_.load(std::memory_order_relaxed); } -int CopysetNode::LoadConfEpoch(const std::string &filePath) { +int CopysetNode::LoadConfEpoch(const std::string& filePath) { LogicPoolID loadLogicPoolID = 0; CopysetID loadCopysetID = 0; uint64_t loadEpoch = 0; - int ret = epochFile_->Load(filePath, - &loadLogicPoolID, - &loadCopysetID, + int ret = epochFile_->Load(filePath, &loadLogicPoolID, &loadCopysetID, &loadEpoch); if (0 == ret) { if (logicPoolId_ != loadLogicPoolID || copysetId_ != loadCopysetID) { @@ -643,7 +645,7 @@ int CopysetNode::LoadConfEpoch(const std::string &filePath) { return ret; } -int CopysetNode::SaveConfEpoch(const std::string &filePath) { +int CopysetNode::SaveConfEpoch(const std::string& filePath) { return epochFile_->Save(filePath, logicPoolId_, copysetId_, epoch_); } @@ -678,17 +680,17 @@ void CopysetNode::SetCopysetNode(std::shared_ptr node) { raftNode_ = node; } -void CopysetNode::SetSnapshotFileSystem(scoped_refptr *fs) { +void CopysetNode::SetSnapshotFileSystem(scoped_refptr* fs) { nodeOptions_.snapshot_file_system_adaptor = fs; } bool CopysetNode::IsLeaderTerm() const { - if (0 < leaderTerm_.load(std::memory_order_acquire)) - return true; + if (0 < leaderTerm_.load(std::memory_order_acquire)) return true; return false; } -bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT /* * Why not use lease_status.state==LEASE_VALID directly to judge? * @@ -707,13 +709,12 @@ bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) co return term > 0 && term == lease_status.term; } -bool CopysetNode::IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT return lease_status.state == braft::LEASE_EXPIRED; } -PeerId CopysetNode::GetLeaderId() const { - return raftNode_->leader_id(); -} +PeerId CopysetNode::GetLeaderId() const { return raftNode_->leader_id(); } butil::Status CopysetNode::TransferLeader(const Peer& peer) { butil::Status status; @@ -722,15 +723,15 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { if (raftNode_->leader_id() == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << "Skipped transferring leader to leader itself. " - << "peerid: " << peerId - << ", Copyset: " << GroupIdString(); + << "peerid: " << peerId << ", Copyset: " << GroupIdString(); return status; } int rc = raftNode_->transfer_leadership_to(peerId); if (rc != 0) { - status = butil::Status(rc, "Failed to transfer leader of copyset " + status = butil::Status(rc, + "Failed to transfer leader of copyset " "%s to peer %s, error: %s", GroupIdString().c_str(), peerId.to_string().c_str(), berror(rc)); @@ -741,9 +742,8 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { transferee_ = peer; status = butil::Status::OK(); - LOG(INFO) << "Transferred leader of copyset " - << GroupIdString() - << " to peer " << peerId; + LOG(INFO) << "Transferred leader of copyset " << GroupIdString() + << " to peer " << peerId; return status; } @@ -761,14 +761,13 @@ butil::Status CopysetNode::AddPeer(const Peer& peer) { if (peer == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << peerId << " is already a member of copyset " - << GroupIdString() - << ", skip adding peer"; + << GroupIdString() << ", skip adding peer"; return status; } } ConfigurationChangeDone* addPeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::ADD_PEER, peer); addPeerDone->expectedCfgChange = expectedCfgChange; raftNode_->add_peer(peerId, addPeerDone); @@ -797,13 +796,13 @@ butil::Status CopysetNode::RemovePeer(const Peer& peer) { if (!peerValid) { butil::Status status = butil::Status::OK(); - DVLOG(6) << peerId << " is not a member of copyset " - << GroupIdString() << ", skip removing"; + DVLOG(6) << peerId << " is not a member of copyset " << GroupIdString() + << ", skip removing"; return status; } ConfigurationChangeDone* removePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::REMOVE_PEER, peer); removePeerDone->expectedCfgChange = expectedCfgChange; raftNode_->remove_peer(peerId, removePeerDone); @@ -831,7 +830,7 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { return st; } ConfigurationChangeDone* changePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange; expectedCfgChange.type = ConfigChangeType::CHANGE_PEER; expectedCfgChange.alterPeer.set_address(adding.begin()->to_string()); @@ -845,18 +844,22 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it + * is equal, it means that no one has modified appliedindex. In this + * case, it tries to update appliedIndex with the value of index, and if + * the update is successful, it's done. If curIndex is not equal to + * appliedindex, it indicates that someone else has updated appliedIndex + * in the meantime. In this case, it returns the current value of + * appliedindex through curIndex and returns false. This entire process + * is atomic. */ - while (!appliedIndex_.compare_exchange_strong(curIndex, - index, - std::memory_order_acq_rel)) { //NOLINT + while (!appliedIndex_.compare_exchange_strong( + curIndex, index, + std::memory_order_acq_rel)) { // NOLINT if (index <= curIndex) { break; } @@ -876,27 +879,29 @@ CurveSegmentLogStorage* CopysetNode::GetLogStorage() const { return logStorage_; } -ConcurrentApplyModule *CopysetNode::GetConcurrentApplyModule() const { +ConcurrentApplyModule* CopysetNode::GetConcurrentApplyModule() const { return concurrentapply_; } -void CopysetNode::Propose(const braft::Task &task) { - raftNode_->apply(task); -} +void CopysetNode::Propose(const braft::Task& task) { raftNode_->apply(task); } -int CopysetNode::GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer) { +int CopysetNode::GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the + * following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, + * and A is the leader. A receives a configuration change log that adds D, + * and assume that B also receives the configuration change log {ABC+D}. + * Then, leader A crashes, and B is elected as the new leader. Before B + * commits the noop entry, the maximum epoch value it can query on B is + * still 5, but the queried configuration is {ABCD}. Therefore, here, before + * the new leader B commits the noop entry, which is effectively committing + * the hidden configuration change log {ABC+D}, it does not allow returning + * the configuration and configuration change information to the user to + * avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -922,9 +927,9 @@ uint64_t CopysetNode::LeaderTerm() const { return leaderTerm_.load(std::memory_order_acquire); } -int CopysetNode::GetHash(std::string *hash) { +int CopysetNode::GetHash(std::string* hash) { int ret = 0; - int fd = 0; + int fd = 0; int len = 0; uint32_t crc32c = 0; std::vector files; @@ -934,7 +939,8 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of + // calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { @@ -953,7 +959,7 @@ int CopysetNode::GetHash(std::string *hash) { } len = fileInfo.st_size; - char *buff = new (std::nothrow) char[len]; + char* buff = new (std::nothrow) char[len]; if (nullptr == buff) { return -1; } @@ -974,15 +980,15 @@ int CopysetNode::GetHash(std::string *hash) { return 0; } -void CopysetNode::GetStatus(NodeStatus *status) { +void CopysetNode::GetStatus(NodeStatus* status) { raftNode_->get_status(status); } -void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status) { +void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status) { raftNode_->get_leader_lease_status(status); } -bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { +bool CopysetNode::GetLeaderStatus(NodeStatus* leaderStaus) { NodeStatus status; GetStatus(&status); if (status.leader_id.is_empty()) { @@ -997,16 +1003,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { brpc::Controller cntl; cntl.set_timeout_ms(500); brpc::Channel channel; - if (channel.Init(status.leader_id.addr, nullptr) !=0) { - LOG(WARNING) << "can not create channel to " - << status.leader_id.addr + if (channel.Init(status.leader_id.addr, nullptr) != 0) { + LOG(WARNING) << "can not create channel to " << status.leader_id.addr << ", copyset " << GroupIdString(); return false; } CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(status.leader_id.to_string()); request.set_logicpoolid(logicPoolId_); request.set_copysetid(copysetId_); @@ -1016,16 +1021,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { CopysetService_Stub stub(&channel); stub.GetCopysetStatus(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(WARNING) << "get leader status failed: " - << cntl.ErrorText() + LOG(WARNING) << "get leader status failed: " << cntl.ErrorText() << ", copyset " << GroupIdString(); return false; } if (response.status() != COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { LOG(WARNING) << "get leader status failed" - << ", status: " << response.status() - << ", copyset " << GroupIdString(); + << ", status: " << response.status() << ", copyset " + << GroupIdString(); return false; } @@ -1078,9 +1082,8 @@ void CopysetNode::SyncAllChunks() { CSErrorCode r = dataStore_->SyncChunk(chunk); if (r != CSErrorCode::Success) { LOG(FATAL) << "Sync Chunk failed in Copyset: " - << GroupIdString() - << ", chunkid: " << chunk - << " data store return: " << r; + << GroupIdString() << ", chunkid: " << chunk + << " data store return: " << r; } }); } @@ -1093,11 +1096,11 @@ void SyncChunkThread::Init(CopysetNode* node) { } void SyncChunkThread::Run() { - syncThread_ = std::thread([this](){ + syncThread_ = std::thread([this]() { while (running_) { std::unique_lock lock(mtx_); - cond_->wait_for(lock, - std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); + cond_->wait_for( + lock, std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); node_->SyncAllChunks(); } }); @@ -1111,9 +1114,7 @@ void SyncChunkThread::Stop() { } } -SyncChunkThread::~SyncChunkThread() { - Stop(); -} +SyncChunkThread::~SyncChunkThread() { Stop(); } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/copyset_node.h b/src/chunkserver/copyset_node.h index cf7a34aeec..74033cbc80 100755 --- a/src/chunkserver/copyset_node.h +++ b/src/chunkserver/copyset_node.h @@ -23,53 +23,53 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_H_ -#include #include #include +#include +#include #include +#include +#include #include #include -#include -#include -#include +#include "proto/chunk.pb.h" +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/scan.pb.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "src/chunkserver/conf_epoch_file.h" #include "src/chunkserver/config_info.h" -#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/raft_node.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_writer.h" -#include "src/common/string_util.h" +#include "src/chunkserver/raftsnapshot/define.h" #include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/raft_node.h" -#include "proto/heartbeat.pb.h" -#include "proto/chunk.pb.h" -#include "proto/common.pb.h" -#include "proto/scan.pb.h" +#include "src/common/string_util.h" namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; -using ::google::protobuf::Closure; -using ::curve::mds::heartbeat::ConfigChangeType; using ::curve::common::Peer; using ::curve::common::TaskThreadPool; +using ::curve::mds::heartbeat::ConfigChangeType; +using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; -extern const char *kCurveConfEpochFilename; +extern const char* kCurveConfEpochFilename; struct ConfigurationChange { ConfigChangeType type; Peer alterPeer; ConfigurationChange() : type(ConfigChangeType::NONE) {} - ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) : - type(type2), alterPeer(alterPeer2) {} + ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) + : type(type2), alterPeer(alterPeer2) {} bool IsEmpty() { return type == ConfigChangeType::NONE && !alterPeer.has_address(); } @@ -79,7 +79,7 @@ struct ConfigurationChange { } bool operator==(const ConfigurationChange& rhs) { return type == rhs.type && - alterPeer.address() == rhs.alterPeer.address(); + alterPeer.address() == rhs.alterPeer.address(); } ConfigurationChange& operator=(const ConfigurationChange& rhs) { type = rhs.type; @@ -92,17 +92,18 @@ class ConfigurationChangeDone : public braft::Closure { public: void Run() { if (!expectedCfgChange.IsEmpty() && - *curCfgChange == expectedCfgChange) { + *curCfgChange == expectedCfgChange) { curCfgChange->Reset(); } delete this; } explicit ConfigurationChangeDone( - std::shared_ptr cfgChange) - : curCfgChange(cfgChange) {} - // copyset node中当前的配置变更信息 + std::shared_ptr cfgChange) + : curCfgChange(cfgChange) {} + // Current configuration change information in the copyset node std::shared_ptr curCfgChange; - // 这次配置变更对应的配置变更信息 + // The configuration change information corresponding to this configuration + // change ConfigurationChange expectedCfgChange; }; @@ -116,6 +117,7 @@ class SyncChunkThread : public curve::common::Uncopyable { void Run(); void Init(CopysetNode* node); void Stop(); + private: bool running_; std::mutex mtx_; @@ -125,7 +127,7 @@ class SyncChunkThread : public curve::common::Uncopyable { }; /** - * 一个Copyset Node就是一个复制组的副本 + * A Copyset Node is a replica of a replication group */ class CopysetNode : public braft::StateMachine, public std::enable_shared_from_this { @@ -133,38 +135,37 @@ class CopysetNode : public braft::StateMachine, // for ut mock CopysetNode() = default; - CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf); + CopysetNode(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& initConf); virtual ~CopysetNode(); /** - * 初始化copyset node配置 + * Initialize copyset node configuration * @param options - * @return 0,成功,-1失败 + * @return 0, successful, -1 failed */ - virtual int Init(const CopysetNodeOptions &options); + virtual int Init(const CopysetNodeOptions& options); /** - * Raft Node init,使得Raft Node运行起来 + * Raft Node init to make Raft Node run * @return */ virtual int Run(); /** - * 关闭copyset node + * Close copyset node */ virtual void Fini(); /** - * 返回复制组的逻辑池ID + * Returns the logical pool ID of the replication group * @return */ LogicPoolID GetLogicPoolId() const; /** - * 返回复制组的复制组ID + * Returns the replication group ID of the replication group * @return */ CopysetID GetCopysetId() const; @@ -180,13 +181,13 @@ class CopysetNode : public braft::StateMachine, virtual std::vector& GetFailedScanMap(); /** - * 返回复制组数据目录 + * Return to the replication group data directory * @return */ std::string GetCopysetDir() const; /** - * 返回当前副本是否在leader任期 + * Returns whether the current replica is in the leader's tenure * @return */ virtual bool IsLeaderTerm() const; @@ -195,111 +196,115 @@ class CopysetNode : public braft::StateMachine, * check if current node is in lease leader * @return */ - virtual bool IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** * check if current node is expired * @return */ - virtual bool IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** - * 返回当前的任期 - * @return 当前的任期 + * Return to current tenure + * @return Current tenure */ virtual uint64_t LeaderTerm() const; /** - * 返回leader id + * Return leader id * @return */ virtual PeerId GetLeaderId() const; /** - * @brief 切换复制组的Leader - * @param[in] peerId 目标Leader的成员ID - * @return 心跳任务的引用 + * @brief Switch the leader of the replication group + * @param[in] peerId The member ID of the target leader + * @return Reference to Heartbeat Task */ butil::Status TransferLeader(const Peer& peer); /** - * @brief 复制组添加新成员 - * @param[in] peerId 新成员的ID - * @return 心跳任务的引用 + * @brief Add new members to the replication group + * @param[in] peerId The ID of the new member + * @return Reference to Heartbeat Task */ butil::Status AddPeer(const Peer& peer); /** - * @brief 复制组删除成员 - * @param[in] peerId 将要删除成员的ID - * @return 心跳任务的引用 + * @brief Copy Group Delete Members + * @param[in] peerId The ID of the member to be deleted + * @return Reference to Heartbeat Task */ butil::Status RemovePeer(const Peer& peer); /** - * @brief 变更复制组成员 - * @param[in] newPeers 新的复制组成员 - * @return 心跳任务的引用 + * @brief Change replication group members + * @param[in] newPeers New replication group member + * @return Reference to Heartbeat Task */ butil::Status ChangePeer(const std::vector& newPeers); /** - * 返回copyset的配置版本 + * Returns the configuration version of the copyset * @return */ virtual uint64_t GetConfEpoch() const; /** - * 更新applied index,只有比它大的才更新 + * Update the applied index, only those larger than it will be updated * @param index */ virtual void UpdateAppliedIndex(uint64_t index); /** - * 返回当前最新的applied index + * Returns the current latest applied index * @return */ virtual uint64_t GetAppliedIndex() const; /** - * @brief: 查询配置变更的状态 - * @param type[out]: 配置变更类型 - * @param oldConf[out]: 老的配置 - * @param alterPeer[out]: 变更的peer - * @return 0查询成功,-1查询异常失败 + * @brief: Query the status of configuration changes + * @param type[out]: Configuration change type + * @param oldConf[out]: Old configuration + * @param alterPeer[out]: Changed Peer + * @return 0 query successful, -1 query exception failed */ - virtual int GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer); + virtual int GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer); /** - * @brief: 获取copyset node的状态值,用于比较多个副本的数据一致性 - * @param hash[out]: copyset node状态值 - * @return 0成功,-1失败 + * @brief: Obtain the status value of the copyset node for comparing data + * consistency across multiple replicas + * @param hash[out]: copyset node status value + * @return 0 succeeded, -1 failed */ - virtual int GetHash(std::string *hash); + virtual int GetHash(std::string* hash); /** - * @brief: 获取copyset node的status,实际调用的raft node的get_status接口 + * @brief: Get the status of the copyset node, actually calling the + * get_status interface of the Raft node * @param status[out]: copyset node status */ - virtual void GetStatus(NodeStatus *status); + virtual void GetStatus(NodeStatus* status); /** * @brief: get raft node leader lease status * @param status[out]: raft node leader lease status */ - virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status); + virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status); /** - * 获取此copyset的leader上的status + * Obtain the status on the leader of this copyset * @param leaderStaus[out]: leader copyset node status - * @return 获取成功返回true,获取失败返回false + * @return returns true for successful acquisition, false for failed + * acquisition */ - virtual bool GetLeaderStatus(NodeStatus *leaderStaus); + virtual bool GetLeaderStatus(NodeStatus* leaderStaus); /** - * 返回data store指针 + * Return data store pointer * @return */ virtual std::shared_ptr GetDataStore() const; @@ -311,19 +316,19 @@ class CopysetNode : public braft::StateMachine, virtual CurveSegmentLogStorage* GetLogStorage() const; /** - * 返回ConcurrentApplyModule + * Returning ConcurrentApplyModule */ virtual ConcurrentApplyModule* GetConcurrentApplyModule() const; /** - * 向copyset node propose一个op request + * Propose an op request to the copyset node * @param task */ - virtual void Propose(const braft::Task &task); + virtual void Propose(const braft::Task& task); /** - * 获取复制组成员 - * @param peers:返回的成员列表(输出参数) + * Get replication group members + * @param peers: List of returned members (output parameters) * @return */ virtual void ListPeers(std::vector* peers); @@ -333,87 +338,95 @@ class CopysetNode : public braft::StateMachine, * @param options * @return */ - void InitRaftNodeOptions(const CopysetNodeOptions &options); + void InitRaftNodeOptions(const CopysetNodeOptions& options); /** - * 下面的接口都是继承StateMachine实现的接口 + * The following interfaces are all interfaces that inherit the + * implementation of StateMachine */ public: /** - * op log apply的时候回调函数 - * @param iter:可以batch的访问已经commit的log entries + * Callback function when applying op log + * @param iter: Allows batch access to already committed log entries. */ - void on_apply(::braft::Iterator &iter) override; + void on_apply(::braft::Iterator& iter) override; /** - * 复制关闭的时候调用此回调 + * Call this callback when replication is closed */ void on_shutdown() override; /** - * raft snapshot相关的接口,仅仅保存raft snapshot meta - * 和snapshot文件的list,这里并没有拷贝实际的数据,因为 - * 在块存储场景所有操作是幂等,所以,并不真实的拷贝数据 + * Interfaces related to raft snapshot, which only store raft snapshot meta + * and a list of snapshot files. Actual data is not copied here because + * in the context of block storage, all operations are idempotent, so there + * is no need to actually copy the data. */ - void on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) override; + void on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) override; /** - * load日志有两种情况: - * 1. Follower节点Install snapshot追赶leader,这个时候 - * snapshot目录下面有chunk数据和snapshot数据 - * 2. 节点重启,会执行snapshot load,然后回放日志,这个时 - * 候snapshot目录下面没有数据,什么都不用做 - * TODO(wudemiao): install snapshot的时候会存在空间 - * double的可能性,考虑如下场景,follower落后,然后通过从 - * leader install snapshot恢复数据,其首先会从leader将 - * 所有数据下载过来,然后在调用snapshot load加载快照,这个 - * 期间空间占用了就double了;后期需要通过控制单盘参与install - * snapshot的数量 + * There are two scenarios for loading logs: + * 1. Follower nodes catch up with the leader by installing a snapshot. In + * this case, there are chunk data and snapshot data under the snapshot + * directory. + * 2. When a node restarts, it performs a snapshot load and then replays + * the logs. In this case, there is no data under the snapshot directory, so + * nothing needs to be done. + * TODO(wudemiao): When installing a snapshot, there is a possibility of + * doubling the space usage. Consider the following scenario: a follower + * lags behind and then recovers data by installing a snapshot from the + * leader. It will first download all the data from the leader and then call + * snapshot load to load the snapshot. During this period, the space usage + * doubles. Later, we need to control the number of disks participating in + * the installation of snapshots. */ - int on_snapshot_load(::braft::SnapshotReader *reader) override; + int on_snapshot_load(::braft::SnapshotReader* reader) override; /** - * new leader在apply noop之后会调用此接口,表示此 leader可 - * 以提供read/write服务了。 - * @param term:当前leader任期 + * The new leader will call this interface after applying noop, indicating + * that this leader can provide read/write services. + * @param term: Current leader term */ void on_leader_start(int64_t term) override; /** - * leader step down的时候调用 - * @param status:复制组的状态 + * Called when the leader step is down + * @param status: The status of the replication group */ - void on_leader_stop(const butil::Status &status) override; + void on_leader_stop(const butil::Status& status) override; /** - * 复制组发生错误的时候调用 - * @param e:具体的 error + * Called when an error occurs in the replication group + * @param e: Specific error */ - void on_error(const ::braft::Error &e) override; + void on_error(const ::braft::Error& e) override; /** - * 配置变更日志entry apply的时候会调用此函数,目前会利用此接口 - * 更新配置epoch值 - * @param conf:当前复制组最新的配置 + * This function will be called when configuring the change log entry + * application, and currently this interface will be utilized Update + * configuration epoch value + * @param conf: The latest configuration of the current replication group * @param index log index */ - void on_configuration_committed(const Configuration& conf, int64_t index) override; //NOLINT + void on_configuration_committed(const Configuration& conf, + int64_t index) override; // NOLINT /** - * 当follower停止following主的时候调用 - * @param ctx:可以获取stop following的原因 + * Called when the follower stops following the main + * @param ctx: Can obtain the reason for stop following */ - void on_stop_following(const ::braft::LeaderChangeContext &ctx) override; + void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; /** - * Follower或者Candidate发现新的leader后调用 - * @param ctx:leader变更上下,可以获取new leader和start following的原因 + * Called after the Follower or Candidate finds a new leader + * @param ctx: Change the leader up and down to obtain the reasons for the + * new leader and start following */ - void on_start_following(const ::braft::LeaderChangeContext &ctx) override; + void on_start_following(const ::braft::LeaderChangeContext& ctx) override; /** - * 用于测试注入mock依赖 + * Used for testing injection mock dependencies */ public: void SetCSDateStore(std::shared_ptr datastore); @@ -435,22 +448,22 @@ class CopysetNode : public braft::StateMachine, // shared to sync pool static std::shared_ptr> copysetSyncPool_; /** - * 从文件中解析copyset配置版本信息 - * @param filePath:文件路径 - * @return 0: successs, -1 failed + * Parsing copyset configuration version information from a file + * @param filePath: File path + * @return 0: success, -1 fail */ - int LoadConfEpoch(const std::string &filePath); + int LoadConfEpoch(const std::string& filePath); /** - * 保存copyset配置版本信息到文件中 - * @param filePath:文件路径 - * @return 0 成功,-1 failed + * Save the copyset configuration version information to a file + * @param filePath: File path + * @return 0 success, -1 fail */ - int SaveConfEpoch(const std::string &filePath); + int SaveConfEpoch(const std::string& filePath); public: - void save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done); + void save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done); void ShipToSync(ChunkID chunkId) { if (enableOdsyncWhenOpenChunkFile_) { @@ -470,58 +483,58 @@ class CopysetNode : public braft::StateMachine, void WaitSnapshotDone(); private: - inline std::string GroupId() { - return ToGroupId(logicPoolId_, copysetId_); - } + inline std::string GroupId() { return ToGroupId(logicPoolId_, copysetId_); } inline std::string GroupIdString() { return ToGroupIdString(logicPoolId_, copysetId_); } private: - // 逻辑池 id + // Logical Pool ID LogicPoolID logicPoolId_; - // 复制组 id + // Copy Group ID CopysetID copysetId_; - // 复制组的配置 - Configuration conf_; - // 复制组的配置操作锁 - mutable std::mutex confLock_; - // 复制组的配置版本 + // Configuration of replication groups + Configuration conf_; + // Configuration operation lock for replication group + mutable std::mutex confLock_; + // Copy the configuration version of the group std::atomic epoch_; - // 复制组副本的peer id + // Peer ID of the replication group replica PeerId peerId_; - // braft Node的配置参数 + // Configuration parameters for the braft Node NodeOptions nodeOptions_; - // CopysetNode对应的braft Node + // The braft Node corresponding to CopysetNode std::shared_ptr raftNode_; - // chunk file的绝对目录 + // Absolute directory for chunk files std::string chunkDataApath_; - // chunk file的相对目录 + // Relative directory for chunk files std::string chunkDataRpath_; - // copyset绝对路径 + // copyset absolute path std::string copysetDirPath_; - // 文件系统适配器 + // File system adapter std::shared_ptr fs_; - // Chunk持久化操作接口 + // Chunk Persistence Operation Interface std::shared_ptr dataStore_; // The log storage for braft CurveSegmentLogStorage* logStorage_; - // 并发模块 - ConcurrentApplyModule *concurrentapply_ = nullptr; - // 配置版本持久化工具接口 + // Concurrent module + ConcurrentApplyModule* concurrentapply_ = nullptr; + // Configure version persistence tool interface std::unique_ptr epochFile_; - // 复制组的apply index + // Apply index of replication group std::atomic appliedIndex_; - // 复制组当前任期,如果<=0表明不是leader + // Copy the current tenure of the group. If<=0, it indicates that it is not + // a leader std::atomic leaderTerm_; - // 复制组数据回收站目录 + // Copy Group Data Recycle Bin Directory std::string recyclerUri_; - // 复制组的metric信息 + // Copy the metric information of the group CopysetMetricPtr metric_; - // 正在进行中的配置变更 + // Configuration changes in progress std::shared_ptr configChange_; - // transfer leader的目标,状态为TRANSFERRING时有效 + // The target of the transfer leader is valid when the status is + // TRANSFERRING Peer transferee_; int64_t lastSnapshotIndex_; // scan status diff --git a/src/chunkserver/copyset_node_manager.cpp b/src/chunkserver/copyset_node_manager.cpp index 78f4afec89..9c856ccb50 100755 --- a/src/chunkserver/copyset_node_manager.cpp +++ b/src/chunkserver/copyset_node_manager.cpp @@ -22,27 +22,26 @@ #include "src/chunkserver/copyset_node_manager.h" -#include #include #include +#include -#include #include #include +#include +#include "src/chunkserver/braft_cli_service.h" +#include "src/chunkserver/braft_cli_service2.h" +#include "src/chunkserver/chunk_service.h" #include "src/chunkserver/config_info.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_service.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_file_service.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/chunkserver/chunk_service.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_service.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/braft_cli_service2.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/raftsnapshot/curve_file_service.h" - namespace curve { namespace chunkserver { @@ -51,7 +50,7 @@ using curve::common::TimeUtility; std::once_flag addServiceFlag; -int CopysetNodeManager::Init(const CopysetNodeOptions ©setNodeOptions) { +int CopysetNodeManager::Init(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; CopysetNode::syncTriggerSeconds_ = copysetNodeOptions.syncTriggerSeconds; CopysetNode::copysetSyncPool_ = @@ -71,10 +70,9 @@ int CopysetNodeManager::Run() { CopysetNode::copysetSyncPool_->Start(copysetNodeOptions_.syncConcurrency); assert(copysetNodeOptions_.syncConcurrency > 0); int ret = 0; - // 启动线程池 + // Start Thread Pool if (copysetLoader_ != nullptr) { - ret = copysetLoader_->Start( - copysetNodeOptions_.loadConcurrency); + ret = copysetLoader_->Start(copysetNodeOptions_.loadConcurrency); if (ret < 0) { LOG(ERROR) << "CopysetLoadThrottle start error. ThreadNum: " << copysetNodeOptions_.loadConcurrency; @@ -82,7 +80,7 @@ int CopysetNodeManager::Run() { } } - // 启动加载已有的copyset + // Start loading existing copyset ret = ReloadCopysets(); if (ret == 0) { loadFinished_.exchange(true, std::memory_order_acq_rel); @@ -141,28 +139,26 @@ int CopysetNodeManager::ReloadCopysets() { } uint64_t poolId = GetPoolID(groupId); uint64_t copysetId = GetCopysetID(groupId); - LOG(INFO) << "Parsed groupid " << groupId - << " as " << ToGroupIdString(poolId, copysetId); + LOG(INFO) << "Parsed groupid " << groupId << " as " + << ToGroupIdString(poolId, copysetId); if (copysetLoader_ == nullptr) { LoadCopyset(poolId, copysetId, false); } else { - copysetLoader_->Enqueue( - std::bind(&CopysetNodeManager::LoadCopyset, - this, - poolId, - copysetId, - true)); + copysetLoader_->Enqueue(std::bind(&CopysetNodeManager::LoadCopyset, + this, poolId, copysetId, true)); } } - // 如果加载成功,则等待所有copyset加载完成,关闭线程池 + // If loading is successful, wait for all copysets to load and close the + // thread pool if (copysetLoader_ != nullptr) { while (copysetLoader_->QueueSize() != 0) { ::sleep(1); } - // queue size为0,但是线程池中的线程仍然可能还在执行 - // stop内部会去join thread,以此保证所有任务执行完以后再退出 + // Even when the queue size is 0, the threads in the thread pool may + // still be executing. The 'stop' function internally performs thread + // joining to ensure that all tasks are completed before exiting. copysetLoader_->Stop(); copysetLoader_ = nullptr; } @@ -174,8 +170,8 @@ bool CopysetNodeManager::LoadFinished() { return loadFinished_.load(std::memory_order_acquire); } -void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void CopysetNodeManager::LoadCopyset(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, bool needCheckLoadFinished) { LOG(INFO) << "Begin to load copyset " << ToGroupIdString(logicPoolId, copysetId) @@ -183,8 +179,9 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, << (needCheckLoadFinished ? "Yes." : "No."); uint64_t beginTime = TimeUtility::GetTimeofDayMs(); - // chunkserver启动加载copyset阶段,会拒绝外部的创建copyset请求 - // 因此不会有其他线程加载或者创建相同copyset,此时不需要加锁 + // chunkserver starts the loading copyset phase and will reject external + // requests to create copysets Therefore, no other threads will load or + // create the same copyset, and locking is not necessary at this time Configuration conf; std::shared_ptr copysetNode = CreateCopysetNodeUnlocked(logicPoolId, copysetId, conf); @@ -205,7 +202,7 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, } LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " end, time used (ms): " - << TimeUtility::GetTimeofDayMs() - beginTime; + << TimeUtility::GetTimeofDayMs() - beginTime; } bool CopysetNodeManager::CheckCopysetUntilLoadFinished( @@ -224,9 +221,12 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( } NodeStatus leaderStaus; bool getSuccess = node->GetLeaderStatus(&leaderStaus); - // 获取leader状态失败一般是由于还没选出leader或者leader心跳还未发送到当前节点 - // 正常通过几次重试可以获取到leader信息,如果重试多次都未获取到 - // 则认为copyset当前可能无法选出leader,直接退出 + // Failure to obtain leader status is usually because a leader has not + // been elected yet, or the leader's heartbeat has not been received by + // the current node. Typically, leader information can be obtained + // through several retries. If multiple retries fail to obtain the + // information, it is assumed that the copyset may not be able to elect + // a leader at the moment, and the operation exits directly. if (!getSuccess) { ++retryTimes; ::usleep(1000 * copysetNodeOptions_.electionTimeoutMs); @@ -235,8 +235,10 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( NodeStatus status; node->GetStatus(&status); - // 当前副本的最后一个日志落后于leader上保存的第一个日志 - // 这种情况下此副本会通过安装快照恢复,可以忽略避免阻塞检查线程 + // When the last log of the current replica lags behind the first log + // saved on the leader, in this situation, the replica will recover by + // installing a snapshot, and it can be safely ignored to avoid blocking + // the checking thread. bool mayInstallSnapshot = leaderStaus.first_index > status.last_index; if (mayInstallSnapshot) { LOG(WARNING) << "Copyset " @@ -250,73 +252,73 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( return false; } - // 判断当前副本已经apply的日志是否接近已经committed的日志 - int64_t margin = leaderStaus.committed_index - - status.known_applied_index; - bool catchupLeader = margin - < (int64_t)copysetNodeOptions_.finishLoadMargin; + // Determine whether the logs that have been applied to the current + // replica are close to the logs that have been committed + int64_t margin = + leaderStaus.committed_index - status.known_applied_index; + bool catchupLeader = + margin < (int64_t)copysetNodeOptions_.finishLoadMargin; if (catchupLeader) { LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " finished, " << "leader CommittedIndex: " << leaderStaus.committed_index - << ", node appliedIndex: " - << status.known_applied_index; + << ", node appliedIndex: " << status.known_applied_index; return true; } retryTimes = 0; ::usleep(1000 * copysetNodeOptions_.checkLoadMarginIntervalMs); } - LOG(WARNING) << "check copyset " - << ToGroupIdString(logicPoolId, copysetId) + LOG(WARNING) << "check copyset " << ToGroupIdString(logicPoolId, copysetId) << " failed."; return false; } std::shared_ptr CopysetNodeManager::GetCopysetNode( - const LogicPoolID &logicPoolId, const CopysetID ©setId) const { - /* 加读锁 */ + const LogicPoolID& logicPoolId, const CopysetID& copysetId) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); - if (copysetNodeMap_.end() != it) - return it->second; + if (copysetNodeMap_.end() != it) return it->second; return nullptr; } void CopysetNodeManager::GetAllCopysetNodes( - std::vector *nodes) const { - /* 加读锁 */ + std::vector* nodes) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); for (auto it = copysetNodeMap_.begin(); it != copysetNodeMap_.end(); ++it) { nodes->push_back(it->second); } } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 如果本地copyset还未全部加载完成,不允许外部创建copyset + // If the local copyset has not been fully loaded yet, external copyset + // creation is not allowed if (!loadFinished_.load(std::memory_order_acquire)) { LOG(WARNING) << "Create copyset failed: load unfinished " << ToGroupIdString(logicPoolId, copysetId); return false; } - // copysetnode析构的时候会去调shutdown,可能导致协程切出 - // 所以创建copysetnode失败的时候,不能占着写锁,等写锁释放后再析构 + // When copysetnode is deconstructed, shutdown may be called, which may lead + // to coprocessor disconnection So when creating a copysetnode fails, it + // cannot occupy the write lock, wait for the write lock to be released + // before destructing std::shared_ptr copysetNode = nullptr; - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); if (copysetNodeMap_.end() == copysetNodeMap_.find(groupId)) { - copysetNode = std::make_shared(logicPoolId, - copysetId, - conf); + copysetNode = + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) - << " init failed"; + << " init failed"; return false; } if (0 != copysetNode->Run()) { @@ -325,8 +327,7 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } copysetNodeMap_.insert(std::pair>( - groupId, - copysetNode)); + groupId, copysetNode)); LOG(INFO) << "Create copyset success " << ToGroupIdString(logicPoolId, copysetId); return true; @@ -336,8 +337,8 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers) { Configuration conf; for (Peer peer : peers) { @@ -348,13 +349,10 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, } std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf) { std::shared_ptr copysetNode = - std::make_shared(logicPoolId, - copysetId, - conf); + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) << " init failed"; @@ -369,13 +367,13 @@ std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( return copysetNode; } -int CopysetNodeManager::AddService(brpc::Server *server, - const butil::EndPoint &listenAddress) { +int CopysetNodeManager::AddService(brpc::Server* server, + const butil::EndPoint& listenAddress) { int ret = 0; uint64_t maxInflight = 100; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); - CopysetNodeManager *copysetNodeManager = this; + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); + CopysetNodeManager* copysetNodeManager = this; ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = copysetNodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -401,7 +399,7 @@ int CopysetNodeManager::AddService(brpc::Server *server, ret = server->RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; ret = server->AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // add other services @@ -413,70 +411,71 @@ int CopysetNodeManager::AddService(brpc::Server *server, brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; auto epochMap = std::make_shared(); - ret = server->AddService(new ChunkServiceImpl( - chunkServiceOptions, epochMap), - brpc::SERVER_OWNS_SERVICE); + ret = server->AddService( + new ChunkServiceImpl(chunkServiceOptions, epochMap), + brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; } while (false); return ret; } -bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { copysetNodeMap_.erase(it); ret = true; LOG(INFO) << "Delete copyset " - << ToGroupIdString(logicPoolId, copysetId) - <<" success."; + << ToGroupIdString(logicPoolId, copysetId) << " success."; } } return ret; } -bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { if (0 != copysetNodeOptions_.trash->RecycleCopySet( - it->second->GetCopysetDir())) { + it->second->GetCopysetDir())) { LOG(ERROR) << "Failed to remove copyset " << ToGroupIdString(logicPoolId, copysetId) << " persistently."; @@ -519,18 +518,18 @@ bool CopysetNodeManager::DeleteBrokenCopyset(const LogicPoolID& poolId, return true; } -bool CopysetNodeManager::IsExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { - /* 加读锁 */ +bool CopysetNodeManager::IsExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); return copysetNodeMap_.end() != copysetNodeMap_.find(groupId); } bool CopysetNodeManager::InsertCopysetNodeIfNotExist( - const LogicPoolID &logicPoolId, const CopysetID ©setId, + const LogicPoolID& logicPoolId, const CopysetID& copysetId, std::shared_ptr node) { - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); diff --git a/src/chunkserver/copyset_node_manager.h b/src/chunkserver/copyset_node_manager.h index 8294b21e0f..5336025227 100755 --- a/src/chunkserver/copyset_node_manager.h +++ b/src/chunkserver/copyset_node_manager.h @@ -23,209 +23,215 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ -#include //NOLINT -#include #include +#include //NOLINT #include +#include #include "src/chunkserver/copyset_node.h" #include "src/common/concurrent/rw_lock.h" -#include "src/common/uncopyable.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::BthreadRWLock; using curve::common::ReadLockGuard; -using curve::common::WriteLockGuard; using curve::common::TaskThreadPool; +using curve::common::WriteLockGuard; class ChunkOpRequest; /** - * Copyset Node的管理者 + * Manager of Copyset Node */ class CopysetNodeManager : public curve::common::Uncopyable { public: using CopysetNodePtr = std::shared_ptr; - // 单例,仅仅在 c++11或者更高版本下正确 - static CopysetNodeManager &GetInstance() { + // Single example, only correct in c++11 or higher versions + static CopysetNodeManager& GetInstance() { static CopysetNodeManager instance; return instance; } virtual ~CopysetNodeManager() = default; - int Init(const CopysetNodeOptions ©setNodeOptions); + int Init(const CopysetNodeOptions& copysetNodeOptions); int Run(); int Fini(); /** - * @brief 加载目录下的所有copyset + * @brief Load all copysets in the directory * - * @return 0表示加载成功,非0表示加载失败 + * @return 0 indicates successful loading, non 0 indicates failed loading */ int ReloadCopysets(); /** - * 创建copyset node,两种情况需要创建copyset node - * TODO(wudemiao): 后期替换之后删除掉 - * 1.集群初始化,创建copyset - * 2.恢复的时候add peer + * To create a copyset node, there are two situations where you need to + * create a copyset node + * TODO(wudemiao): Delete after later replacement + * 1. Cluster initialization, creating copyset + * 2. add peer during recovery */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf); /** - * 都是创建copyset,目前两个同时存在,后期仅仅保留一个 + * Both are creating copysets, currently both exist simultaneously, and only + * one will be retained in the future */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers); /** - * 删除copyset node内存实例(停止copyset, 销毁copyset内存实例并从copyset - * manager的copyset表中清除copyset表项,并不影响盘上的copyset持久化数据) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Delete the copyset node memory instance (stop copyset, destroy the + * copyset memory instance, and remove it from the copyset Clearing the + * copyset table entry in the manager's copyset table does not affect the + * persistence data of the copyset on the disk + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 彻底删除copyset node内存数据(停止copyset, 销毁copyset内存实例并从 - * copyset manager的copyset表中清除copyset表项,并将copyset持久化数据从盘 - * 上彻底删除) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Completely delete the copyset node's memory data (stop copyset, destroy + * the copyset memory instance, and remove it from the Clear the copyset + * table entries in the copyset manager's copyset table and persist the + * copyset data from the disk Completely delete on) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** * @brief Delete broken copyset * @param[in] poolId logical pool id * @param[in] copysetId copyset id * @return true if delete success, else return false - */ + */ bool DeleteBrokenCopyset(const LogicPoolID& poolId, const CopysetID& copysetId); /** - * 判断指定的copyset是否存在 - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return true存在,false不存在 + * Determine whether the specified copyset exists + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true exists, false does not exist */ - bool IsExist(const LogicPoolID &logicPoolId, const CopysetID ©setId); + bool IsExist(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 获取指定的copyset - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return nullptr则为没查询到 + * Get the specified copyset + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return nullptr means that no query was found */ - virtual CopysetNodePtr GetCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) const; + virtual CopysetNodePtr GetCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) const; /** - * 查询所有的copysets - * @param nodes:出参,返回所有的copyset + * Query all copysets + * @param nodes: Issue parameters and return all copysets */ - void GetAllCopysetNodes(std::vector *nodes) const; + void GetAllCopysetNodes(std::vector* nodes) const; /** - * 添加RPC service - * TODO(wudemiao): 目前仅仅用于测试,后期完善了会删除掉 - * @param server:rpc Server - * @param listenAddress:监听的地址 - * @return 0成功,-1失败 + * Add RPC service + * TODO(wudemiao): Currently only used for testing, and will be removed + * after later refinement + * @param server: rpc Server + * @param listenAddress: The address to listen to + * @return 0 succeeded, -1 failed */ - int AddService(brpc::Server *server, - const butil::EndPoint &listenAddress); + int AddService(brpc::Server* server, const butil::EndPoint& listenAddress); - virtual const CopysetNodeOptions &GetCopysetNodeOptions() const { + virtual const CopysetNodeOptions& GetCopysetNodeOptions() const { return copysetNodeOptions_; } /** * @brief: Only for test */ - void SetCopysetNodeOptions( - const CopysetNodeOptions& copysetNodeOptions) { + void SetCopysetNodeOptions(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; } /** - * 加载copyset,包括新建一个copyset或者重启一个copyset - * @param logicPoolId: 逻辑池id + * Load copyset, including creating a new copyset or restarting a copyset + * @param logicPoolId: Logical Pool ID * @param copysetId: copyset id - * @param needCheckLoadFinished: 是否需要判断copyset加载完成 + * @param needCheckLoadFinished: Do you need to determine if the copyset + * loading is complete */ - void LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + void LoadCopyset(const LogicPoolID& logicPoolId, const CopysetID& copysetId, bool needCheckLoadFinished); /** - * 检测指定的copyset状态,直到copyset加载完成或出现异常 - * @param node: 指定的copyset node - * @return true表示加载成功,false表示检测过程中出现异常 + * Detect the specified copyset state until the copyset load is completed or + * an exception occurs + * @param node: The specified copyset node + * @return true indicates successful loading, while false indicates an + * exception occurred during the detection process */ bool CheckCopysetUntilLoadFinished(std::shared_ptr node); /** - * 获取copysetNodeManager加载copyset的状态 - * @return false-copyset未加载完成 true-copyset已加载完成 + * Obtain the status of copysetNodeManager loading copyset + * @return false-copyset not loaded complete, true-copyset loaded complete */ virtual bool LoadFinished(); protected: CopysetNodeManager() - : copysetLoader_(nullptr) - , running_(false) - , loadFinished_(false) {} + : copysetLoader_(nullptr), running_(false), loadFinished_(false) {} private: /** - * 如果指定copyset不存在,则将copyset插入到map当中(线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param node:要插入的copysetnode - * @return copyset不存在,则插入到map并返回true; - * copyset如果存在,则返回false + * If the specified copyset does not exist, insert the copyset into the map + * (thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param node: The copysetnode to be inserted + * @return If the copyset does not exist, insert it into the map and return + * true; If copyset exists, return false */ - bool InsertCopysetNodeIfNotExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool InsertCopysetNodeIfNotExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, std::shared_ptr node); /** - * 创建一个新的copyset或加载一个已存在的copyset(非线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param conf:此copyset的配置成员 - * @return 创建或加载成功返回copysetnode,否则返回nullptr + * Create a new copyset or load an existing copyset (non thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param conf: The configuration members of this copyset + * @return Successfully created or loaded, returns copysetnode, otherwise + * returns nullptr */ std::shared_ptr CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf); private: - using CopysetNodeMap = std::unordered_map>; - // 保护复制组 map的读写锁 + using CopysetNodeMap = + std::unordered_map>; + // Protect the read write lock of the replication group map mutable BthreadRWLock rwLock_; - // 复制组map + // Copy Group Map CopysetNodeMap copysetNodeMap_; - // 复制组配置选项 + // Copy Group Configuration Options CopysetNodeOptions copysetNodeOptions_; - // 控制copyset并发启动的数量 + // Control the number of concurrent starts of copyset std::shared_ptr> copysetLoader_; - // 表示copyset node manager当前是否正在运行 + // Indicates whether the copyset node manager is currently running Atomic running_; - // 表示copyset node manager当前是否已经完成加载 + // Indicates whether the copyset node manager has currently completed + // loading Atomic loadFinished_; }; diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e09516c0ad..9082024b4c 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -20,36 +20,36 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_service.h" + #include #include -#include #include +#include -#include "src/chunkserver/copyset_service.h" #include "src/chunkserver/copyset_node_manager.h" namespace curve { namespace chunkserver { -void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); LOG(INFO) << "Received create copyset request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 解析request中的peers + // Analyzing Peers in Request Configuration conf; for (int i = 0; i < request->peerid_size(); ++i) { PeerId peer; int ret = peer.parse(request->peerid(i)); if (ret != 0) { - cntl->SetFailed(EINVAL, - "Fail to parse peer id %s", + cntl->SetFailed(EINVAL, "Fail to parse peer id %s", request->peerid(i).c_str()); return; } @@ -59,12 +59,9 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, LogicPoolID logicPoolID = request->logicpoolid(); CopysetID copysetID = request->copysetid(); GroupId groupId = ToGroupId(logicPoolID, copysetID); - if (false == copysetNodeManager_->IsExist(logicPoolID, - copysetID)) { - if (true == - copysetNodeManager_->CreateCopysetNode(logicPoolID, - copysetID, - conf)) { + if (false == copysetNodeManager_->IsExist(logicPoolID, copysetID)) { + if (true == copysetNodeManager_->CreateCopysetNode(logicPoolID, + copysetID, conf)) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { response->set_status( @@ -80,10 +77,10 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, << COPYSET_OP_STATUS_Name(response->status()); } -void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); @@ -103,31 +100,32 @@ void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, if (true == copysetNodeManager_->IsExist(copyset.logicpoolid(), copyset.copysetid())) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); - LOG(WARNING) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) - << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); //NOLINT + LOG(WARNING) + << "Create copyset " + << ToGroupIdString(copyset.logicpoolid(), + copyset.copysetid()) + << " failed, response code: " + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); // NOLINT return; } - if (false == - copysetNodeManager_->CreateCopysetNode(copyset.logicpoolid(), - copyset.copysetid(), - peers)) { + if (false == copysetNodeManager_->CreateCopysetNode( + copyset.logicpoolid(), copyset.copysetid(), peers)) { response->set_status( COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Create copyset " << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); //NOLINT + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN); // NOLINT return; } LOG(INFO) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) + << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " success."; } @@ -151,7 +149,7 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, // if copyset node exist in the manager means its data is complete if (copysetNodeManager_->IsExist(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_COPYSET_IS_HEALTHY); - LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; + LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; } else if (!copysetNodeManager_->DeleteBrokenCopyset(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Delete broken copyset " << groupId << " failed"; @@ -161,17 +159,17 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, } } -void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done) { +void CopysetServiceImpl::GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); LOG(INFO) << "Received GetCopysetStatus request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -183,14 +181,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, return; } - // 获取raft node status + // Obtain raft node status NodeStatus status; nodePtr->GetStatus(&status); response->set_state(status.state); - Peer *peer = new Peer(); + Peer* peer = new Peer(); response->set_allocated_peer(peer); peer->set_address(status.peer_id.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); response->set_allocated_leader(leader); leader->set_address(status.leader_id.to_string()); response->set_readonly(status.readonly); @@ -204,13 +202,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_lastindex(status.last_index); response->set_diskindex(status.disk_index); - // 获取配置的版本 + // Obtain the version of the configuration response->set_epoch(nodePtr->GetConfEpoch()); /** - * 考虑到query hash需要读取copyset的所有chunk数据,然后计算hash值 - * 是一个非常耗时的操作,所以在request会设置query hash字段,如果 - * 为false,那么就不需要查询copyset的hash值 + * Considering that calculating the hash value for query hash requires + * reading all chunk data from a copyset, which is a very time-consuming + * operation, the request will have a "query hash" field. If it is set to + * false, then there is no need to query the hash value of the copyset. */ if (request->queryhash()) { std::string hash; @@ -228,8 +227,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); LOG(INFO) << "GetCopysetStatus success: " - << ToGroupIdString(request->logicpoolid(), - request->copysetid()); + << ToGroupIdString(request->logicpoolid(), request->copysetid()); } } // namespace chunkserver diff --git a/src/chunkserver/copyset_service.h b/src/chunkserver/copyset_service.h index fabf6df8fc..7025b6e9dd 100755 --- a/src/chunkserver/copyset_service.h +++ b/src/chunkserver/copyset_service.h @@ -28,51 +28,48 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; /** - * 复制组管理的Rpc服务,目前仅有创建复制组 + * The Rpc service for replication group management currently only creates + * replication groups */ class CopysetServiceImpl : public CopysetService { public: - explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) : - copysetNodeManager_(copysetNodeManager) {} + explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) + : copysetNodeManager_(copysetNodeManager) {} ~CopysetServiceImpl() {} /** - * 创建复制组,一次只能创建一个 + * Create replication groups, only one can be created at a time */ - void CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done); + void CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, Closure* done); /* - * 创建复制组,一次可以创建多个 + * Create replication groups, multiple can be created at once */ - void CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done); + void CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, Closure* done); /** * @brief Delete broken copyset */ void DeleteBrokenCopyset(RpcController* controller, const CopysetRequest* request, - CopysetResponse* response, - Closure* done); + CopysetResponse* response, Closure* done); - void GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done); + void GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, Closure* done); private: - // 复制组管理者 + // Copy Group Manager CopysetNodeManager* copysetNodeManager_; }; diff --git a/src/chunkserver/heartbeat.cpp b/src/chunkserver/heartbeat.cpp index 0e756b29c6..8bba11ecae 100644 --- a/src/chunkserver/heartbeat.cpp +++ b/src/chunkserver/heartbeat.cpp @@ -21,21 +21,22 @@ * 2018/12/20 Wenyu Zhou Initial version */ -#include -#include +#include "src/chunkserver/heartbeat.h" + +#include #include #include -#include +#include +#include -#include #include +#include -#include "src/fs/fs_common.h" -#include "src/common/timeutility.h" -#include "src/chunkserver/heartbeat.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/heartbeat_helper.h" #include "src/common/curve_version.h" +#include "src/common/timeutility.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" using curve::fs::FileSystemInfo; @@ -55,7 +56,7 @@ TaskStatus Heartbeat::PurgeCopyset(LogicPoolID poolId, CopysetID copysetId) { return TaskStatus::OK(); } -int Heartbeat::Init(const HeartbeatOptions &options) { +int Heartbeat::Init(const HeartbeatOptions& options) { toStop_.store(false, std::memory_order_release); options_ = options; @@ -68,13 +69,13 @@ int Heartbeat::Init(const HeartbeatOptions &options) { csEp_ = butil::EndPoint(csIp, options_.port); LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; - // mdsEps不能为空 + // mdsEps cannot be empty ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); if (mdsEps_.empty()) { LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; return -1; } - // 检查每个地址的合法性 + // Check the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -88,10 +89,10 @@ int Heartbeat::Init(const HeartbeatOptions &options) { copysetMan_ = options.copysetNodeManager; - // 初始化timer + // Initialize timer waitInterval_.Init(options_.intervalSec * 1000); - // 获取当前unix时间戳 + // Obtain the current Unix timestamp startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); // init scanManager @@ -157,7 +158,7 @@ int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, } auto failedScanMaps = copyset->GetFailedScanMap(); if (!failedScanMaps.empty()) { - for (auto &map : failedScanMaps) { + for (auto& map : failedScanMaps) { info->add_scanmap()->CopyFrom(map); } } @@ -209,7 +210,7 @@ int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, } ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); - replica = new(std::nothrow) ::curve::common::Peer(); + replica = new (std::nothrow)::curve::common::Peer(); if (replica == nullptr) { LOG(ERROR) << "apply memory error"; return -1; @@ -237,7 +238,7 @@ int Heartbeat::BuildRequest(HeartbeatRequest* req) { * is ready */ curve::mds::heartbeat::DiskState* diskState = - new curve::mds::heartbeat::DiskState(); + new curve::mds::heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); req->set_allocated_diskstate(diskState); @@ -256,21 +257,21 @@ int Heartbeat::BuildRequest(HeartbeatRequest* req) { CopysetNodeOptions opt = copysetMan_->GetCopysetNodeOptions(); uint64_t chunkFileSize = opt.maxChunkSize; uint64_t walSegmentFileSize = opt.maxWalSegmentSize; - uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize - + metric->GetTotalChunkCount() * chunkFileSize; - uint64_t usedWalSegmentSize = metric->GetTotalWalSegmentCount() - * walSegmentFileSize; + uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize + + metric->GetTotalChunkCount() * chunkFileSize; + uint64_t usedWalSegmentSize = + metric->GetTotalWalSegmentCount() * walSegmentFileSize; uint64_t trashedChunkSize = metric->GetChunkTrashedCount() * chunkFileSize; uint64_t leftChunkSize = metric->GetChunkLeftCount() * chunkFileSize; // leftWalSegmentSize will be 0 when CHUNK and WAL share file pool - uint64_t leftWalSegmentSize = metric->GetWalSegmentLeftCount() - * walSegmentFileSize; + uint64_t leftWalSegmentSize = + metric->GetWalSegmentLeftCount() * walSegmentFileSize; uint64_t chunkPoolSize = options_.chunkFilePool->Size() * - options_.chunkFilePool->GetFilePoolOpt().fileSize; + options_.chunkFilePool->GetFilePoolOpt().fileSize; stats->set_chunkfilepoolsize(chunkPoolSize); - stats->set_chunksizeusedbytes(usedChunkSize+usedWalSegmentSize); - stats->set_chunksizeleftbytes(leftChunkSize+leftWalSegmentSize); + stats->set_chunksizeusedbytes(usedChunkSize + usedWalSegmentSize); + stats->set_chunksizeleftbytes(leftChunkSize + leftWalSegmentSize); stats->set_chunksizetrashedbytes(trashedChunkSize); req->set_allocated_stats(stats); @@ -297,7 +298,7 @@ int Heartbeat::BuildRequest(HeartbeatRequest* req) { if (ret != 0) { LOG(ERROR) << "Failed to build heartbeat information of copyset " << ToGroupIdStr(copyset->GetLogicPoolId(), - copyset->GetCopysetId()); + copyset->GetCopysetId()); continue; } if (copyset->IsLeaderTerm()) { @@ -311,17 +312,16 @@ int Heartbeat::BuildRequest(HeartbeatRequest* req) { } void Heartbeat::DumpHeartbeatRequest(const HeartbeatRequest& request) { - DVLOG(6) << "Heartbeat request: Chunkserver ID: " - << request.chunkserverid() + DVLOG(6) << "Heartbeat request: Chunkserver ID: " << request.chunkserverid() << ", IP: " << request.ip() << ", port: " << request.port() << ", copyset count: " << request.copysetcount() << ", leader count: " << request.leadercount(); - for (int i = 0; i < request.copysetinfos_size(); i ++) { + for (int i = 0; i < request.copysetinfos_size(); i++) { const curve::mds::heartbeat::CopySetInfo& info = request.copysetinfos(i); std::string peersStr = ""; - for (int j = 0; j < info.peers_size(); j ++) { + for (int j = 0; j < info.peers_size(); j++) { peersStr += info.peers(j).address() + ","; } @@ -345,23 +345,24 @@ void Heartbeat::DumpHeartbeatResponse(const HeartbeatResponse& response) { int count = response.needupdatecopysets_size(); if (count > 0) { LOG(INFO) << "Received " << count << " config change commands:"; - for (int i = 0; i < count; i ++) { + for (int i = 0; i < count; i++) { CopySetConf conf = response.needupdatecopysets(i); int type = (conf.has_type()) ? conf.type() : 0; - std::string item = (conf.has_configchangeitem()) ? - conf.configchangeitem().address() : ""; + std::string item = (conf.has_configchangeitem()) + ? conf.configchangeitem().address() + : ""; std::string peersStr = ""; - for (int j = 0; j < conf.peers_size(); j ++) { + for (int j = 0; j < conf.peers_size(); j++) { peersStr += conf.peers(j).address(); } LOG(INFO) << "Config change " << i << ": " - << "Copyset < " << conf.logicalpoolid() - << ", " << conf.copysetid() << ">, epoch: " - << conf.epoch() << ", Peers: " << peersStr - << ", type: " << type << ", item: " << item; + << "Copyset < " << conf.logicalpoolid() << ", " + << conf.copysetid() << ">, epoch: " << conf.epoch() + << ", Peers: " << peersStr << ", type: " << type + << ", item: " << item; } } else { LOG(INFO) << "Received no config change command."; @@ -386,8 +387,7 @@ int Heartbeat::SendHeartbeat(const HeartbeatRequest& request, stub.ChunkServerHeartbeat(&cntl, &request, response, nullptr); if (cntl.Failed()) { - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == ETIMEDOUT || + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == ETIMEDOUT || cntl.ErrorCode() == brpc::ELOGOFF || cntl.ErrorCode() == brpc::ERPCTIMEDOUT) { LOG(WARNING) << "current mds: " << mdsEps_[inServiceIndex_] @@ -397,10 +397,10 @@ int Heartbeat::SendHeartbeat(const HeartbeatRequest& request, << mdsEps_[inServiceIndex_]; } else { LOG(ERROR) << csEp_.ip << ":" << csEp_.port - << " Fail to send heartbeat to MDS " - << mdsEps_[inServiceIndex_] << "," - << " cntl errorCode: " << cntl.ErrorCode() - << " cntl error: " << cntl.ErrorText(); + << " Fail to send heartbeat to MDS " + << mdsEps_[inServiceIndex_] << "," + << " cntl errorCode: " << cntl.ErrorCode() + << " cntl error: " << cntl.ErrorText(); } return -1; } else { @@ -412,59 +412,61 @@ int Heartbeat::SendHeartbeat(const HeartbeatRequest& request, int Heartbeat::ExecTask(const HeartbeatResponse& response) { int count = response.needupdatecopysets_size(); - for (int i = 0; i < count; i ++) { + for (int i = 0; i < count; i++) { CopySetConf conf = response.needupdatecopysets(i); - CopysetNodePtr copyset = copysetMan_->GetCopysetNode( - conf.logicalpoolid(), conf.copysetid()); + CopysetNodePtr copyset = + copysetMan_->GetCopysetNode(conf.logicalpoolid(), conf.copysetid()); - // 判断copyconf是否合法 + // Determine whether copyconf is legal if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) { continue; } - // 解析该chunkserver上的copyset是否需要删除 - // 需要删除则清理copyset + // Resolve whether the copyset on the chunkserver needs to be deleted + // If deletion is required, clean the copyset if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) { LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); PurgeCopyset(conf.logicalpoolid(), conf.copysetid()); continue; } - // 解析是否有配置变更需要执行 + // Resolve if there are any configuration changes that need to be + // executed if (!conf.has_type()) { LOG(INFO) << "Failed to parse task for copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); continue; } - // 如果有配置变更需要执行,下发变更到copyset + // If there are configuration changes that need to be executed, issue + // the changes to the copyset if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) { continue; } if (conf.epoch() != copyset->GetConfEpoch()) { LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is not same as current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; + << " is not same as current:" + << copyset->GetConfEpoch() << " on copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; continue; } - // 根据不同的变更类型下发配置 + // Distribute configurations based on different change types switch (conf.type()) { - case curve::mds::heartbeat::TRANSFER_LEADER: - { + case curve::mds::heartbeat::TRANSFER_LEADER: { if (!HeartbeatHelper::ChunkServerLoadCopySetFin( conf.configchangeitem().address())) { - LOG(INFO) << "Transfer leader to " + LOG(INFO) + << "Transfer leader to " << conf.configchangeitem().address() << " on copyset" << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) << " reject. target chunkserver is loading copyset"; @@ -472,45 +474,48 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { } LOG(INFO) << "Transfer leader to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + << conf.configchangeitem().address() << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); copyset->TransferLeader(conf.configchangeitem()); break; } - case curve::mds::heartbeat::ADD_PEER: - LOG(INFO) << "Adding peer " << conf.configchangeitem().address() - << " to copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->AddPeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::REMOVE_PEER: - LOG(INFO) << "Removing peer " << conf.configchangeitem().address() - << " from copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->RemovePeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::CHANGE_PEER: - { + case curve::mds::heartbeat::ADD_PEER: + LOG(INFO) << "Adding peer " << conf.configchangeitem().address() + << " to copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->AddPeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::REMOVE_PEER: + LOG(INFO) << "Removing peer " + << conf.configchangeitem().address() + << " from copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->RemovePeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::CHANGE_PEER: { std::vector newPeers; if (HeartbeatHelper::BuildNewPeers(conf, &newPeers)) { - LOG(INFO) << "Change peer from " - << conf.oldpeer().address() << " to " - << conf.configchangeitem().address() << " on copyset" + LOG(INFO) + << "Change peer from " << conf.oldpeer().address() + << " to " << conf.configchangeitem().address() + << " on copyset" << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); copyset->ChangePeer(newPeers); } else { - LOG(ERROR) << "Build new peer for copyset" + LOG(ERROR) + << "Build new peer for copyset" << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) << " failed"; } - } - break; + } break; - case curve::mds::heartbeat::START_SCAN_PEER: - { + case curve::mds::heartbeat::START_SCAN_PEER: { ConfigChangeType type; Configuration tmpConf; Peer peer; @@ -522,41 +527,37 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { LOG(ERROR) << "Failed to get config change state of copyset" << ToGroupIdStr(poolId, copysetId); return ret; - } else if (type != curve::mds::heartbeat::NONE) { + } else if (type != curve::mds::heartbeat::NONE) { LOG(INFO) << "drop scan peer request to copyset: " << ToGroupIdStr(poolId, copysetId) << " because exist config" << " ConfigChangeType: " << type; - } else { - LOG(INFO) << "Scan peer " - << conf.configchangeitem().address() - << "to copyset " - << ToGroupIdStr(poolId, copysetId); + } else { + LOG(INFO) + << "Scan peer " << conf.configchangeitem().address() + << "to copyset " << ToGroupIdStr(poolId, copysetId); scanMan_->Enqueue(poolId, copysetId); } - } - break; + } break; - case curve::mds::heartbeat::CANCEL_SCAN_PEER: - { + case curve::mds::heartbeat::CANCEL_SCAN_PEER: { // todo Abnormal scenario LogicPoolID poolId = conf.logicalpoolid(); CopysetID copysetId = conf.copysetid(); int ret = scanMan_->CancelScanJob(poolId, copysetId); if (ret < 0) { - LOG(ERROR) << "cancel scan peer failed, " - << "peer address: " - << conf.configchangeitem().address() - << "copyset groupId: " - << ToGroupIdStr(poolId, copysetId); + LOG(ERROR) + << "cancel scan peer failed, " + << "peer address: " << conf.configchangeitem().address() + << "copyset groupId: " + << ToGroupIdStr(poolId, copysetId); } return ret; - } - break; + } break; - default: - LOG(ERROR) << "Invalid configchange type: " << conf.type(); - break; + default: + LOG(ERROR) << "Invalid configchange type: " << conf.type(); + break; } } @@ -569,7 +570,7 @@ void Heartbeat::HeartbeatWorker() { LOG(INFO) << "Starting Heartbeat worker thread."; - // 处理配置等于0等异常情况 + // Handling abnormal situations such as configuration equal to 0 if (options_.intervalSec <= 4) { errorIntervalSec = 2; } else { diff --git a/src/chunkserver/heartbeat.h b/src/chunkserver/heartbeat.h index df86d8e88a..16d5c1a1fa 100644 --- a/src/chunkserver/heartbeat.h +++ b/src/chunkserver/heartbeat.h @@ -24,58 +24,58 @@ #ifndef SRC_CHUNKSERVER_HEARTBEAT_H_ #define SRC_CHUNKSERVER_HEARTBEAT_H_ +#include // NodeImpl #include -#include // NodeImpl -#include -#include -#include #include +#include #include +#include #include //NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/common/wait_interval.h" -#include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/scan_manager.h" #include "proto/heartbeat.pb.h" #include "proto/scan.pb.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/scan_manager.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/wait_interval.h" using ::curve::common::Thread; namespace curve { namespace chunkserver { -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; -using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; -using CopySetConf = curve::mds::heartbeat::CopySetConf; -using CandidateError = curve::mds::heartbeat::CandidateError; -using TaskStatus = butil::Status; -using CopysetNodePtr = std::shared_ptr; +using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; +using CopySetConf = curve::mds::heartbeat::CopySetConf; +using CandidateError = curve::mds::heartbeat::CandidateError; +using TaskStatus = butil::Status; +using CopysetNodePtr = std::shared_ptr; /** - * 心跳子系统选项 + * Heartbeat subsystem options */ struct HeartbeatOptions { - ChunkServerID chunkserverId; - std::string chunkserverToken; - std::string storeUri; - std::string mdsListenAddr; - std::string ip; - uint32_t port; - uint32_t intervalSec; - uint32_t timeout; - CopysetNodeManager* copysetNodeManager; - ScanManager* scanManager; + ChunkServerID chunkserverId; + std::string chunkserverToken; + std::string storeUri; + std::string mdsListenAddr; + std::string ip; + uint32_t port; + uint32_t intervalSec; + uint32_t timeout; + CopysetNodeManager* copysetNodeManager; + ScanManager* scanManager; std::shared_ptr fs; std::shared_ptr chunkFilePool; }; /** - * 心跳子系统处理模块 + * Heartbeat subsystem processing module */ class Heartbeat { public: @@ -83,110 +83,110 @@ class Heartbeat { ~Heartbeat() {} /** - * @brief 初始化心跳子系统 - * @param[in] options 心跳子系统选项 - * @return 0:成功,非0失败 + * @brief Initialize heartbeat subsystem + * @param[in] options Heartbeat subsystem options + * @return 0: Success, non 0 failure */ int Init(const HeartbeatOptions& options); /** - * @brief 清理心跳子系统 - * @return 0:成功,非0失败 + * @brief Clean heartbeat subsystem + * @return 0: Success, non 0 failure */ int Fini(); /** - * @brief 启动心跳子系统 - * @return 0:成功,非0失败 + * @brief: Start the heartbeat subsystem + * @return 0: Success, non 0 failure */ int Run(); private: /** - * @brief 停止心跳子系统 - * @return 0:成功,非0失败 + * @brief Stop heartbeat subsystem + * @return 0: Success, non 0 failure */ int Stop(); /* - * 心跳工作线程 + * Heartbeat Worker Thread */ void HeartbeatWorker(); /* - * 获取Chunkserver存储空间信息 + * Obtain Chunkserver storage space information */ int GetFileSystemSpaces(size_t* capacity, size_t* free); /* - * 构建心跳消息的Copyset信息项 + * Building a Copyset information item for heartbeat messages */ int BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, CopysetNodePtr copyset); /* - * 构建心跳请求 + * Build Heartbeat Request */ int BuildRequest(HeartbeatRequest* request); /* - * 发送心跳消息 + * Send heartbeat message */ int SendHeartbeat(const HeartbeatRequest& request, HeartbeatResponse* response); /* - * 执行心跳任务 + * Perform Heartbeat Tasks */ int ExecTask(const HeartbeatResponse& response); /* - * 输出心跳请求信息 + * Output heartbeat request information */ void DumpHeartbeatRequest(const HeartbeatRequest& request); /* - * 输出心跳回应信息 + * Output heartbeat response information */ void DumpHeartbeatResponse(const HeartbeatResponse& response); /* - * 清理复制组实例及持久化数据 + * Clean up replication group instances and persist data */ TaskStatus PurgeCopyset(LogicPoolID poolId, CopysetID copysetId); private: - // 心跳线程 + // Heartbeat Thread Thread hbThread_; - // 控制心跳模块运行或停止 + // Control the heartbeat module to run or stop std::atomic toStop_; - // 使用定时器 + // Using a timer ::curve::common::WaitInterval waitInterval_; - // Copyset管理模块 + // Copyset Management Module CopysetNodeManager* copysetMan_; - // ChunkServer目录 + // ChunkServer directory std::string storePath_; - // 心跳选项 + // Heartbeat Options HeartbeatOptions options_; - // MDS的地址 + // MDS address std::vector mdsEps_; - // 当前供服务的mds + // Current mds for service int inServiceIndex_; - // ChunkServer本身的地址 + // ChunkServer's own address butil::EndPoint csEp_; - // 模块初始化时间, unix时间 + // Module initialization time, unix time uint64_t startUpTime_; - ScanManager *scanMan_; + ScanManager* scanMan_; }; } // namespace chunkserver diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index 02a2fc65c9..bc9bbd3708 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -20,34 +20,37 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat_helper.h" + #include #include +#include + #include -#include "src/chunkserver/heartbeat_helper.h" + #include "include/chunkserver/chunkserver_common.h" #include "proto/chunkserver.pb.h" namespace curve { namespace chunkserver { -bool HeartbeatHelper::BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers) { - // 检验目标节点和待删除节点是否有效 +bool HeartbeatHelper::BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers) { + // Verify if the target node and the node to be deleted are valid std::string target(conf.configchangeitem().address()); std::string old(conf.oldpeer().address()); if (!PeerVaild(target) || !PeerVaild(old)) { return false; } - // 生成newPeers + // Generate newPeers for (int i = 0; i < conf.peers_size(); i++) { std::string peer = conf.peers(i).address(); - // 检验conf中的peer是否有效 + // Verify if the peer in conf is valid if (!PeerVaild(peer)) { return false; } - // newPeers中不包含old副本 + // newPeers does not contain old copies if (conf.peers(i).address() != old) { newPeers->emplace_back(conf.peers(i)); } @@ -57,49 +60,51 @@ bool HeartbeatHelper::BuildNewPeers( return true; } -bool HeartbeatHelper::PeerVaild(const std::string &peer) { +bool HeartbeatHelper::PeerVaild(const std::string& peer) { PeerId peerId; return 0 == peerId.parse(peer); } -bool HeartbeatHelper::CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set) { - // chunkserver中不存在需要变更的copyset, 报警 +bool HeartbeatHelper::CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset) { + // There is no copyset that needs to be changed in chunkserver, alarm if (copyset == nullptr) { - LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() - << "," << conf.copysetid() << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); return false; } - // 下发的变更epoch < copyset实际的epoch,报错 + // The issued change epoch is less than the actual epoch of the copyset, and + // an error is reported if (conf.epoch() < copyset->GetConfEpoch()) { LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is smaller than current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; + << " is smaller than current:" << copyset->GetConfEpoch() + << " on copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; return false; } return true; } -bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, - const CopySetConf &conf, const CopysetNodePtr ©set) { +bool HeartbeatHelper::NeedPurge(const butil::EndPoint& csEp, + const CopySetConf& conf, + const CopysetNodePtr& copyset) { (void)copyset; - // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset + // CLDCFS-1004 bug-fix: mds issued a copyset with epoch 0 and empty + // configuration if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << "in peer " << csEp - << ", witch is not exist in mds record"; + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << "in peer " << csEp << ", witch is not exist in mds record"; return true; } - // 该chunkserrver不在copyset的配置中,需要清理 + // The chunkserrver is not in the configuration of the copyset and needs to + // be cleaned up std::string chunkserverEp = std::string(butil::endpoint2str(csEp).c_str()); for (int i = 0; i < conf.peers_size(); i++) { if (conf.peers(i).address().find(chunkserverEp) != std::string::npos) { @@ -117,7 +122,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { PeerId peer; peer.parse(peerId); - const char *ip = butil::ip2str(peer.addr.ip).c_str(); + const char* ip = butil::ip2str(peer.addr.ip).c_str(); int port = peer.addr.port; brpc::Channel channel; if (channel.Init(ip, port, NULL) != 0) { @@ -133,7 +138,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { stub.ChunkServerStatus(&cntl, &req, &rep, nullptr); if (cntl.Failed()) { LOG(WARNING) << "Send ChunkServerStatusRequest failed, cntl.errorText =" - << cntl.ErrorText(); + << cntl.ErrorText(); return false; } @@ -142,4 +147,3 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { } // namespace chunkserver } // namespace curve - diff --git a/src/chunkserver/heartbeat_helper.h b/src/chunkserver/heartbeat_helper.h index 43ada5f6ea..c06fedb61b 100644 --- a/src/chunkserver/heartbeat_helper.h +++ b/src/chunkserver/heartbeat_helper.h @@ -24,74 +24,83 @@ #define SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ #include -#include + #include #include +#include + #include "proto/heartbeat.pb.h" #include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -using ::curve::mds::heartbeat::CopySetConf; using ::curve::common::Peer; +using ::curve::mds::heartbeat::CopySetConf; using CopysetNodePtr = std::shared_ptr; class HeartbeatHelper { public: /** - * 根据mds下发的conf构建出指定复制组的新配置,给ChangePeer使用 + * Build a new configuration for the specified replication group based on + * the conf issued by mds, and use it for ChangePeer * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[out] newPeers 指定复制组的目标配置 + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[out] newPeers specifies the target configuration for the + * replication group * - * @return false-生成newpeers失败 true-生成newpeers成功 + * @return false - Failed to generate newpeers, true - Successfully + * generated newpeers */ - static bool BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers); + static bool BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers); /** - * 判断字符串peer(正确的形式为: ip:port:0)是否有效 + * Determine whether the string peer (correct form: ip:port:0) is valid * - * @param[in] peer 指定字符串 + * @param[in] peer specifies the string * - * @return false-无效 true-有效 + * @return false - invalid, true - valid */ - static bool PeerVaild(const std::string &peer); + static bool PeerVaild(const std::string& peer); /** - * 判断mds下发过来的copysetConf是否合法,以下两种情况不合法: - * 1. chunkserver中不存在该copyset - * 2. mds下发的copyset中记录的epoch小于chunkserver上copyset此时的epoch + * Determine whether the copysetConf sent by mds is legal, and the following + * two situations are illegal: + * 1. The copyset does not exist in chunkserver + * 2. The epoch recorded in the copyset issued by mds is smaller than the + * epoch recorded in the copyset on chunkserver at this time * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-copysetConf不合法,true-copysetConf合法 + * @return false-copysetConf is illegal, true-copysetConf is legal */ - static bool CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set); + static bool CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断chunkserver(csEp)中指定copyset是否需要删除 + * Determine whether the specified copyset in chunkserver(csEp) needs to be + * deleted * - * @param[in] csEp 该chunkserver的ip:port - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] csEp The ip:port of this chunkserver + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-该chunkserver上的copyset无需清理; - * true-该chunkserver上的copyset需要清理 + * @return false-The copyset on the chunkserver does not need to be cleaned; + * true-The copyset on this chunkserver needs to be cleaned up */ - static bool NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, - const CopysetNodePtr ©set); + static bool NeedPurge(const butil::EndPoint& csEp, const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断指定chunkserver copyset是否已经加载完毕 + * Determine whether the specified chunkserver copyset has been loaded + * completely * - * @return false-copyset加载完毕 true-copyset未加载完成 + * @return false-copyset loading completed, true-copyset not loaded + * completed */ static bool ChunkServerLoadCopySetFin(const std::string ipPort); }; } // namespace chunkserver } // namespace curve #endif // SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ - diff --git a/src/chunkserver/inflight_throttle.h b/src/chunkserver/inflight_throttle.h index 86af93daf7..71462b5e97 100644 --- a/src/chunkserver/inflight_throttle.h +++ b/src/chunkserver/inflight_throttle.h @@ -30,18 +30,17 @@ namespace curve { namespace chunkserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: explicit InflightThrottle(uint64_t maxInflight) - : inflightRequestCount_(0), - kMaxInflightRequest_(maxInflight) { } + : inflightRequestCount_(0), kMaxInflightRequest_(maxInflight) {} virtual ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ inline bool IsOverLoad() { if (kMaxInflightRequest_ >= @@ -53,23 +52,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ inline void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ inline void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight requests std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight requests const uint64_t kMaxInflightRequest_; }; diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 817e65c79f..e03c079341 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -22,44 +22,41 @@ #include "src/chunkserver/op_request.h" -#include +#include #include #include -#include +#include #include #include #include -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_closure.h" #include "src/chunkserver/clone_manager.h" #include "src/chunkserver/clone_task.h" +#include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -ChunkOpRequest::ChunkOpRequest() : - datastore_(nullptr), - node_(nullptr), - cntl_(nullptr), - request_(nullptr), - response_(nullptr), - done_(nullptr) { -} +ChunkOpRequest::ChunkOpRequest() + : datastore_(nullptr), + node_(nullptr), + cntl_(nullptr), + request_(nullptr), + response_(nullptr), + done_(nullptr) {} ChunkOpRequest::ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - datastore_(nodePtr->GetDataStore()), - node_(nodePtr), - cntl_(dynamic_cast(cntl)), - request_(request), - response_(response), - done_(done) { -} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : datastore_(nodePtr->GetDataStore()), + node_(nodePtr), + cntl_(dynamic_cast(cntl)), + request_(request), + response_(response), + done_(done) {} void ChunkOpRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -71,18 +68,19 @@ void ChunkOpRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ - if (0 == Propose(request_, cntl_ ? &cntl_->request_attachment() : - nullptr)) { + if (0 == + Propose(request_, cntl_ ? &cntl_->request_attachment() : nullptr)) { doneGuard.release(); } } -int ChunkOpRequest::Propose(const ChunkRequest *request, - const butil::IOBuf *data) { - // 打包op request为task +int ChunkOpRequest::Propose(const ChunkRequest* request, + const butil::IOBuf* data) { + // Pack op request as task braft::Task task; butil::IOBuf log; if (0 != Encode(request, data, &log)) { @@ -93,10 +91,13 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, task.data = &log; task.done = new ChunkClosure(shared_from_this()); /** - * 由于apply是异步的,有可能某个节点在term1是leader,apply了一条log, - * 但是中间发生了主从切换,在很短的时间内这个节点又变为term3的leader, - * 之前apply的日志才开始进行处理,这种情况下要实现严格意义上的复制状态 - * 机,需要解决这种ABA问题,可以在apply的时候设置leader当时的term + * Due to the asynchronous nature of the application, it is possible that a + * node in term1 is a leader and has applied a log, But there was a + * master-slave switch in the middle, and in a short period of time, this + * node became the leader of term3 again, Previously applied logs were only + * processed, in which case strict replication status needs to be + * implemented To solve this ABA problem, you can set the term of the leader + * at the time of application */ task.expected_term = node_->LeaderTerm(); @@ -106,8 +107,8 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, } void ChunkOpRequest::RedirectChunkRequest() { - // 编译时加上 --copt -DUSE_BTHREAD_MUTEX - // 否则可能发生死锁: CLDCFS-1120 + // Compile with --copt -DUSE_BTHREAD_MUTEX + // Otherwise, a deadlock may occur: CLDCFS-1120 // PeerId leader = node_->GetLeaderId(); // if (!leader.is_empty()) { // response_->set_redirect(leader.to_string()); @@ -115,9 +116,8 @@ void ChunkOpRequest::RedirectChunkRequest() { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); } -int ChunkOpRequest::Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log) { +int ChunkOpRequest::Encode(const ChunkRequest* request, + const butil::IOBuf* data, butil::IOBuf* log) { // 1.append request length const uint32_t metaSize = butil::HostToNet32(request->ByteSize()); log->append(&metaSize, sizeof(uint32_t)); @@ -135,8 +135,8 @@ int ChunkOpRequest::Encode(const ChunkRequest *request, } std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId) { uint32_t metaSize = 0; @@ -171,35 +171,35 @@ std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, return std::make_shared(); case CHUNK_OP_TYPE::CHUNK_OP_SCAN: return std::make_shared(index, leaderId); - default:LOG(ERROR) << "Unknown chunk op"; + default: + LOG(ERROR) << "Unknown chunk op"; return nullptr; } } ApplyTaskType ChunkOpRequest::Schedule(CHUNK_OP_TYPE opType) { switch (opType) { - case CHUNK_OP_READ: - case CHUNK_OP_RECOVER: - return ApplyTaskType::READ; - default: - return ApplyTaskType::WRITE; + case CHUNK_OP_READ: + case CHUNK_OP_RECOVER: + return ApplyTaskType::READ; + default: + return ApplyTaskType::WRITE; } } namespace { uint64_t MaxAppliedIndex( - const std::shared_ptr& node, - uint64_t current) { + const std::shared_ptr& node, + uint64_t current) { return std::max(current, node->GetAppliedIndex()); } } // namespace void DeleteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->DeleteChunk(request_->chunkid(), - request_->sn()); + auto ret = datastore_->DeleteChunk(request_->chunkid(), request_->sn()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); @@ -211,21 +211,19 @@ void DeleteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "delete chunk failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteChunk(request.chunkid(), - request.sn()); - if (CSErrorCode::Success == ret) - return; + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete failed: " @@ -239,16 +237,14 @@ void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } ReadChunkRequest::ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, cntl, request, response, done), - cloneMgr_(cloneMgr), - concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), - applyIndex(0) { -} + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done), + cloneMgr_(cloneMgr), + concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), + applyIndex(0) {} void ReadChunkRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -267,21 +263,20 @@ void ReadChunkRequest::Process() { * extend from std::enable_shared_from_this, * use shared_from_this() to return a shared_ptr */ - auto thisPtr - = std::dynamic_pointer_cast(shared_from_this()); + auto thisPtr = + std::dynamic_pointer_cast(shared_from_this()); /* * why push read requests to concurrent layer: * 1. all I/O operators including read and write requests are executed * in concurrent layer, we can separate disk I/O from other logic. * 2. ensure linear consistency of read semantics. */ - auto task = std::bind(&ReadChunkRequest::OnApply, - thisPtr, - node_->GetAppliedIndex(), - doneGuard.release()); - concurrentApplyModule_->Push(request_->chunkid(), - ChunkOpRequest::Schedule(request_->optype()), // NOLINT - task); + auto task = std::bind(&ReadChunkRequest::OnApply, thisPtr, + node_->GetAppliedIndex(), doneGuard.release()); + concurrentApplyModule_->Push( + request_->chunkid(), + ChunkOpRequest::Schedule(request_->optype()), // NOLINT + task); return; } @@ -298,16 +293,19 @@ void ReadChunkRequest::Process() { } void ReadChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { - // 先清除response中的status,以保证CheckForward后的判断的正确性 + ::google::protobuf::Closure* done) { + // Clear the status in the response first to ensure the correctness of the + // judgment after CheckForward response_->clear_status(); CSChunkInfo chunkInfo; - CSErrorCode errorCode = datastore_->GetChunkInfo(request_->chunkid(), - &chunkInfo); + CSErrorCode errorCode = + datastore_->GetChunkInfo(request_->chunkid(), &chunkInfo); do { bool needLazyClone = false; - // 如果需要Read的chunk不存在,但是请求包含Clone源信息,则尝试从Clone源读取数据 + // If the chunk that needs to be read does not exist, but the request + // contains Clone source information, try reading data from the Clone + // source if (CSErrorCode::ChunkNotExistError == errorCode) { if (existCloneInfo(request_)) { needLazyClone = true; @@ -324,14 +322,15 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果需要从源端拷贝数据,需要将请求转发给clone manager处理 - if ( needLazyClone || NeedClone(chunkInfo) ) { + // If you need to copy data from the source, you need to forward the + // request to the clone manager for processing + if (needLazyClone || NeedClone(chunkInfo)) { applyIndex = index; - std::shared_ptr cloneTask = - cloneMgr_->GenerateCloneTask( + std::shared_ptr cloneTask = cloneMgr_->GenerateCloneTask( std::dynamic_pointer_cast(shared_from_this()), done); - // TODO(yyk) 尽量不能阻塞队列,后面要具体考虑 + // TODO(yyk) should try not to block the queue, and specific + // considerations should be taken later bool result = cloneMgr_->IssueCloneTask(cloneTask); if (!result) { LOG(ERROR) << "issue clone task failed: " @@ -340,14 +339,16 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果请求成功转发给了clone manager就可以直接返回了 + // If the request is successfully forwarded to the clone manager, it + // can be returned directly return; } - // 如果是ReadChunk请求还需要从本地读取数据 + // If it is a ReadChunk request, data needs to be read locally if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_READ) { ReadChunk(); } - // 如果是recover请求,说明请求区域已经被写过了,可以直接返回成功 + // If it is a recover request, it indicates that the request area has + // been written and can directly return success if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_RECOVER) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } @@ -362,57 +363,51 @@ void ReadChunkRequest::OnApply(uint64_t index, } void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } bool ReadChunkRequest::NeedClone(const CSChunkInfo& chunkInfo) { - // 如果不是 clone chunk,就不需要拷贝 + // If it's not a clone chunk, there's no need to copy it if (chunkInfo.isClone) { off_t offset = request_->offset(); size_t length = request_->size(); uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 如果是clone chunk,且存在未被写过的page,就需要拷贝 - if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS) { + // If it is a clone chunk and there are unwritten pages, it needs to be + // copied + if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS) { return true; } } return false; } -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} +static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } void ReadChunkRequest::ReadChunk() { - char *readBuffer = nullptr; + char* readBuffer = nullptr; size_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); - auto ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - size); + auto ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer, request_->offset(), size); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "read failed: " << " data store return: " << ret @@ -421,50 +416,47 @@ void ReadChunkRequest::ReadChunk() { LOG(ERROR) << "read failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } void WriteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(request_)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request_->clonefilesource(), - request_->clonefileoffset()); + cloneSourceLocation = + func(request_->clonefilesource(), request_->clonefileoffset()); } - auto ret = datastore_->WriteChunk(request_->chunkid(), - request_->sn(), - cntl_->request_attachment(), - request_->offset(), - request_->size(), - &cost, - cloneSourceLocation); + auto ret = datastore_->WriteChunk( + request_->chunkid(), request_->sn(), cntl_->request_attachment(), + request_->offset(), request_->size(), &cost, cloneSourceLocation); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); } else if (CSErrorCode::BackwardRequestError == ret) { - // 打快照那一刻是有可能出现旧版本的请求 - // 返回错误给客户端,让客户端带新版本来重试 + // At the moment of taking a snapshot, there may be requests for older + // versions Return an error to the client and ask them to try again with + // the new version of the original LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret || CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * internalerror一般是磁盘错误,为了防止副本不一致,让进程退出 - * TODO(yyk): 当前遇到write错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 - */ + * An internal error is usually a disk error. To prevent inconsistent + * replicas, the process is forced to exit + * TODO(yyk): Currently encountering a write error, directly fatally + * exit the entire process ChunkServer will consider only flagging this + * copyset in the later stage to ensure good availability + */ LOG(FATAL) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); @@ -472,8 +464,7 @@ void WriteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); @@ -481,27 +472,24 @@ void WriteChunkRequest::OnApply(uint64_t index, } void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request + const ChunkRequest& request, + const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(&request)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request.clonefilesource(), - request.clonefileoffset()); + cloneSourceLocation = + func(request.clonefilesource(), request.clonefileoffset()); } - auto ret = datastore->WriteChunk(request.chunkid(), - request.sn(), - data, - request.offset(), - request.size(), - &cost, + auto ret = datastore->WriteChunk(request.chunkid(), request.sn(), data, + request.offset(), request.size(), &cost, cloneSourceLocation); - if (CSErrorCode::Success == ret) { - return; - } else if (CSErrorCode::BackwardRequestError == ret) { + if (CSErrorCode::Success == ret) { + return; + } else if (CSErrorCode::BackwardRequestError == ret) { LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request.ShortDebugString(); @@ -519,24 +507,22 @@ void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } void ReadSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - char *readBuffer = nullptr; + char* readBuffer = nullptr; uint32_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) << "new readBuffer failed, " - << errno << ":" << strerror(errno); - auto ret = datastore_->ReadSnapshotChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - request_->size()); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) + << "new readBuffer failed, " << errno << ":" << strerror(errno); + auto ret = datastore_->ReadSnapshotChunk( + request_->chunkid(), request_->sn(), readBuffer, request_->offset(), + request_->size()); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); do { /** - * 1.成功 + * 1. Success */ if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); @@ -548,7 +534,8 @@ void ReadSnapshotRequest::OnApply(uint64_t index, * 2.chunk not exist */ if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT + response_->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT break; } /** @@ -560,30 +547,29 @@ void ReadSnapshotRequest::OnApply(uint64_t index, << ", request: " << request_->ShortDebugString(); } /** - * 4.其他错误 + * 4. Other errors */ LOG(ERROR) << "read snapshot failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } while (0); response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } void DeleteSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); CSErrorCode ret = datastore_->DeleteSnapshotChunkOrCorrectSn( request_->chunkid(), request_->correctedsn()); @@ -594,8 +580,7 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(WARNING) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete snapshot or correct sn failed: " << " data store return: " << ret @@ -604,20 +589,20 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(ERROR) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void DeleteSnapshotRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( - request.chunkid(), request.correctedsn()); + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteSnapshotChunkOrCorrectSn(request.chunkid(), + request.correctedsn()); if (CSErrorCode::Success == ret) { return; } else if (CSErrorCode::BackwardRequestError == ret) { @@ -636,14 +621,12 @@ void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastor } void CreateCloneChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->CreateCloneChunk(request_->chunkid(), - request_->sn(), - request_->correctedsn(), - request_->size(), - request_->location()); + auto ret = datastore_->CreateCloneChunk( + request_->chunkid(), request_->sn(), request_->correctedsn(), + request_->size(), request_->location()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -652,44 +635,41 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * TODO(yyk): 当前遇到createclonechunk错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 + * TODO(yyk): Currently encountering the createclonechunk error, + * directly fatally exit the entire process ChunkServer will consider + * only flagging this copyset in the later stage to ensure good + * availability */ LOG(FATAL) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } else if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); + << ", request: " << request_->ShortDebugString(); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); } else { LOG(ERROR) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void CreateCloneChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->CreateCloneChunk(request.chunkid(), - request.sn(), + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), request.correctedsn(), - request.size(), - request.location()); - if (CSErrorCode::Success == ret) - return; + request.size(), request.location()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request.ShortDebugString(); + << ", request: " << request.ShortDebugString(); return; } @@ -714,8 +694,9 @@ void PasteChunkInternalRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ if (0 == Propose(request_, &data_)) { doneGuard.release(); @@ -723,13 +704,12 @@ void PasteChunkInternalRequest::Process() { } void PasteChunkInternalRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); auto ret = datastore_->PasteChunk(request_->chunkid(), - data_.to_string().c_str(), //NOLINT - request_->offset(), - request_->size()); + data_.to_string().c_str(), // NOLINT + request_->offset(), request_->size()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -746,16 +726,15 @@ void PasteChunkInternalRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->PasteChunk(request.chunkid(), - data.to_string().c_str(), - request.offset(), - request.size()); - if (CSErrorCode::Success == ret) - return; +void PasteChunkInternalRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = + datastore->PasteChunk(request.chunkid(), data.to_string().c_str(), + request.offset(), request.size()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "paste chunk failed: " @@ -767,27 +746,22 @@ void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr data } void ScanChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); // read and calculate crc, build scanmap uint32_t crc = 0; size_t size = request_->size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request_->has_readmetapage() && request_->readmetapage()) { - ret = datastore_->ReadChunkMetaPage(request_->chunkid(), - request_->sn(), + ret = datastore_->ReadChunkMetaPage(request_->chunkid(), request_->sn(), readBuffer.get()); } else { - ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer.get(), - request_->offset(), - size); + ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer.get(), request_->offset(), size); } if (CSErrorCode::Success == ret) { @@ -808,39 +782,32 @@ void ScanChunkRequest::OnApply(uint64_t index, scanManager_->GenScanJobs(jobKey); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "scan chunk failed, read chunk internal error" << ", request: " << request_->ShortDebugString(); } else { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } -void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void ScanChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; uint32_t crc = 0; size_t size = request.size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request.has_readmetapage() && request.readmetapage()) { - ret = datastore->ReadChunkMetaPage(request.chunkid(), - request.sn(), - readBuffer.get()); + ret = datastore->ReadChunkMetaPage(request.chunkid(), request.sn(), + readBuffer.get()); } else { - ret = datastore->ReadChunk(request.chunkid(), - request.sn(), - readBuffer.get(), - request.offset(), - size); + ret = datastore->ReadChunk(request.chunkid(), request.sn(), + readBuffer.get(), request.offset(), size); } if (CSErrorCode::Success == ret) { @@ -861,10 +828,10 @@ void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, / } } -void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, +void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc) { // send rpc to leader - brpc::Channel *channel = new brpc::Channel(); + brpc::Channel* channel = new brpc::Channel(); if (channel->Init(peer_.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to chunkserver for send scanmap: " << peer_; @@ -873,7 +840,7 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, } // build scanmap - ScanMap *scanMap = new ScanMap(); + ScanMap* scanMap = new ScanMap(); scanMap->set_logicalpoolid(request.logicpoolid()); scanMap->set_copysetid(request.copysetid()); scanMap->set_chunkid(request.chunkid()); @@ -882,20 +849,17 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, scanMap->set_offset(request.offset()); scanMap->set_len(request.size()); - FollowScanMapRequest *scanMapRequest = new FollowScanMapRequest(); + FollowScanMapRequest* scanMapRequest = new FollowScanMapRequest(); scanMapRequest->set_allocated_scanmap(scanMap); ScanService_Stub stub(channel); brpc::Controller* cntl = new brpc::Controller(); cntl->set_timeout_ms(request.sendscanmaptimeoutms()); - FollowScanMapResponse *scanMapResponse = new FollowScanMapResponse(); - SendScanMapClosure *done = new SendScanMapClosure( - scanMapRequest, - scanMapResponse, - request.sendscanmaptimeoutms(), - request.sendscanmapretrytimes(), - request.sendscanmapretryintervalus(), - cntl, channel); + FollowScanMapResponse* scanMapResponse = new FollowScanMapResponse(); + SendScanMapClosure* done = new SendScanMapClosure( + scanMapRequest, scanMapResponse, request.sendscanmaptimeoutms(), + request.sendscanmapretrytimes(), request.sendscanmapretryintervalus(), + cntl, channel); LOG(INFO) << "logid = " << cntl->log_id() << " Sending scanmap: " << scanMap->ShortDebugString() << " to leader: " << peer_.addr; diff --git a/src/chunkserver/op_request.h b/src/chunkserver/op_request.h index c29484f79b..d83a7ab827 100755 --- a/src/chunkserver/op_request.h +++ b/src/chunkserver/op_request.h @@ -23,21 +23,21 @@ #ifndef SRC_CHUNKSERVER_OP_REQUEST_H_ #define SRC_CHUNKSERVER_OP_REQUEST_H_ -#include -#include #include +#include +#include #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/datastore/define.h" #include "src/chunkserver/scan_manager.h" -using ::google::protobuf::RpcController; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; using ::curve::chunkserver::concurrent::ApplyTaskType; +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::google::protobuf::RpcController; namespace curve { namespace chunkserver { @@ -49,12 +49,10 @@ class CloneCore; class CloneTask; class ScanManager; - -inline bool existCloneInfo(const ChunkRequest *request) { +inline bool existCloneInfo(const ChunkRequest* request) { if (request != nullptr) { - if (request->has_clonefilesource() && - request->has_clonefileoffset()) { - return true; + if (request->has_clonefilesource() && request->has_clonefileoffset()) { + return true; } } return false; @@ -63,97 +61,104 @@ inline bool existCloneInfo(const ChunkRequest *request) { class ChunkOpRequest : public std::enable_shared_from_this { public: ChunkOpRequest(); - ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + ChunkOpRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ChunkOpRequest() = default; /** - * 处理request,实际上是Propose给相应的copyset + * Processing a request actually involves proposing to the corresponding + * copyset */ virtual void Process(); /** - * request正常情况从内存中获取上下文on apply逻辑 - * @param index:此op log entry的index - * @param done:对应的ChunkClosure + * request normally obtains context on apply logic from memory + * @param index: The index of this op log entry + * @param done: corresponding ChunkClosure */ - virtual void OnApply(uint64_t index, - ::google::protobuf::Closure *done) = 0; + virtual void OnApply(uint64_t index, ::google::protobuf::Closure* done) = 0; /** - * NOTE: 子类实现过程中优先使用参数传入的datastore/request - * 从log entry反序列之后得到request详细信息进行处理,request - * 相关的上下文和依赖的data store都是从参数传递进去的 - * 1.重启回放日志,从磁盘读取op log entry然后执行on apply逻辑 - * 2. follower执行on apply的逻辑 - * @param datastore:chunk数据持久化层 - * @param request:反序列化后得到的request 细信息 - * @param data:反序列化后得到的request要处理的数据 + * NOTE: During subclass implementation, prioritize the use of + * datastore/request passed in as parameters Obtain detailed request + * information from the reverse sequence of the log entry for processing, + * request The relevant context and dependent data store are passed in from + * parameters + * 1. Restart the replay log, read the op log entry from the disk, and then + * execute the on apply logic + * 2. follower execute the logic of on apply + * @param datastore: chunk data persistence layer + * @param request: The detailed request information obtained after + * deserialization + * @param data: The data to be processed by the request obtained after + * deserialization */ virtual void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) = 0; + const ChunkRequest& request, + const butil::IOBuf& data) = 0; /** - * 返回request的done成员 + * Return the done member of the request */ - ::google::protobuf::Closure *Closure() { return done_; } + ::google::protobuf::Closure* Closure() { return done_; } /** - * 返回chunk id + * Return chunk id */ ChunkID ChunkId() { return request_->chunkid(); } /** - * 返回请求类型 + * Return request type */ CHUNK_OP_TYPE OpType() { return request_->optype(); } /** - * 返回请求大小 + * Return request size */ uint32_t RequestSize() { return request_->size(); } /** - * 转发request给leader + * Forward request to leader */ virtual void RedirectChunkRequest(); public: /** - * Op序列化工具函数 + * Op Serialization Tool Function * | data | * | op meta | op data | * | op request length | op request | * | 32 bit | .... | - * 各个字段解释如下: - * data: encode之后的数据,实际上就是一条op log entry的data - * op meta: 就是op的元数据,这里是op request部分的长度 - * op data: 就是request通过protobuf序列化后的数据 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @param log:出参,存放序列化好的数据,用户自己保证data!=nullptr - * @return 0成功,-1失败 + * The explanation of each field is as follows: + * data: The data after encoding is actually the data of an op log entry + * op meta: refers to the metadata of op, where is the length of the op + * request section op data: refers to the data serialized by the request + * through protobuf + * @param request: Chunk Request + * @param data: The data content contained in the request + * @param log: Output parameter to store serialized data. Users are + * responsible for ensuring that data != nullptr. + * @return 0 succeeded, -1 failed */ - static int Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log); + static int Encode(const ChunkRequest* request, const butil::IOBuf* data, + butil::IOBuf* log); /** - * 反序列化,从log entry得到ChunkOpRequest,当前反序列出的ChunkRequest和data - * 都会从出参传出去,而不会放在ChunkOpRequest的成员变量里面 - * @param log:op log entry - * @param request: 出参,存放反序列上下文 - * @param data:出参,op操作的数据 - * @return nullptr,失败,否则返回相应的ChunkOpRequest + * Deserialize, obtain ChunkOpRequest from log entry, and deserialize the + * current ChunkRequest and data Will be passed out from the output + * parameter, rather than being placed in the member variable of + * ChunkOpRequest + * @param log: op log entry + * @param request: Provide parameters to store the reverse sequence context + * @param data: Output parameters, op operation data + * @return nullptr, failed, otherwise return the corresponding + * ChunkOpRequest */ static std::shared_ptr Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId); @@ -161,49 +166,43 @@ class ChunkOpRequest : public std::enable_shared_from_this { protected: /** - * 打包request为braft::task,propose给相应的复制组 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @return 0成功,-1失败 + * Package the request as braft::task and propose it to the corresponding + * replication group + * @param request: Chunk Request + * @param data: The data content contained in the request + * @return 0 succeeded, -1 failed */ - int Propose(const ChunkRequest *request, - const butil::IOBuf *data); + int Propose(const ChunkRequest* request, const butil::IOBuf* data); protected: - // chunk持久化接口 + // chunk Persistence Interface std::shared_ptr datastore_; - // 复制组 + // Copy Group std::shared_ptr node_; // rpc controller - brpc::Controller *cntl_; - // rpc 请求 - const ChunkRequest *request_; - // rpc 返回 - ChunkResponse *response_; + brpc::Controller* cntl_; + // rpc request + const ChunkRequest* request_; + // rpc return + ChunkResponse* response_; // rpc done closure - ::google::protobuf::Closure *done_; + ::google::protobuf::Closure* done_; }; class DeleteChunkRequest : public ChunkOpRequest { public: - DeleteChunkRequest() : - ChunkOpRequest() {} + DeleteChunkRequest() : ChunkOpRequest() {} DeleteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadChunkRequest : public ChunkOpRequest { @@ -211,154 +210,118 @@ class ReadChunkRequest : public ChunkOpRequest { friend class PasteChunkInternalRequest; public: - ReadChunkRequest() : - ChunkOpRequest() {} + ReadChunkRequest() : ChunkOpRequest() {} ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ReadChunkRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; - const ChunkRequest* GetChunkRequest() { - return request_; - } + const ChunkRequest* GetChunkRequest() { return request_; } private: - // 根据chunk信息判断是否需要拷贝数据 + // Determine whether to copy data based on chunk information bool NeedClone(const CSChunkInfo& chunkInfo); - // 从chunk文件中读数据 + // Reading data from chunk file void ReadChunk(); private: CloneManager* cloneMgr_; - // 并发模块 + // Concurrent module ConcurrentApplyModule* concurrentApplyModule_; - // 保存 apply index + // Save the apply index uint64_t applyIndex; }; class WriteChunkRequest : public ChunkOpRequest { public: - WriteChunkRequest() : - ChunkOpRequest() {} - WriteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + WriteChunkRequest() : ChunkOpRequest() {} + WriteChunkRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~WriteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done); + void OnApply(uint64_t index, ::google::protobuf::Closure* done); void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadSnapshotRequest : public ChunkOpRequest { public: - ReadSnapshotRequest() : - ChunkOpRequest() {} + ReadSnapshotRequest() : ChunkOpRequest() {} ReadSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~ReadSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class DeleteSnapshotRequest : public ChunkOpRequest { public: - DeleteSnapshotRequest() : - ChunkOpRequest() {} + DeleteSnapshotRequest() : ChunkOpRequest() {} DeleteSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class CreateCloneChunkRequest : public ChunkOpRequest { public: - CreateCloneChunkRequest() : - ChunkOpRequest() {} + CreateCloneChunkRequest() : ChunkOpRequest() {} CreateCloneChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~CreateCloneChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class PasteChunkInternalRequest : public ChunkOpRequest { public: - PasteChunkInternalRequest() : - ChunkOpRequest() {} + PasteChunkInternalRequest() : ChunkOpRequest() {} PasteChunkInternalRequest(std::shared_ptr nodePtr, - const ChunkRequest *request, - ChunkResponse *response, - const butil::IOBuf* data, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done) { - if (data != nullptr) { - data_ = *data; - } + const ChunkRequest* request, + ChunkResponse* response, const butil::IOBuf* data, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done) { + if (data != nullptr) { + data_ = *data; } + } virtual ~PasteChunkInternalRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: butil::IOBuf data_; @@ -366,28 +329,22 @@ class PasteChunkInternalRequest : public ChunkOpRequest { class ScanChunkRequest : public ChunkOpRequest { public: - ScanChunkRequest(uint64_t index, PeerId peer) : - ChunkOpRequest(), index_(index), peer_(peer) {} + ScanChunkRequest(uint64_t index, PeerId peer) + : ChunkOpRequest(), index_(index), peer_(peer) {} ScanChunkRequest(std::shared_ptr nodePtr, - ScanManager* scanManager, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done), - scanManager_(scanManager) {} + ScanManager* scanManager, const ChunkRequest* request, + ChunkResponse* response, ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done), + scanManager_(scanManager) {} virtual ~ScanChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: - void BuildAndSendScanMap(const ChunkRequest &request, uint64_t index, + void BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc); ScanManager* scanManager_; uint64_t index_; diff --git a/src/chunkserver/passive_getfn.h b/src/chunkserver/passive_getfn.h index ac6655d1b2..56b6cd01eb 100644 --- a/src/chunkserver/passive_getfn.h +++ b/src/chunkserver/passive_getfn.h @@ -23,70 +23,70 @@ #ifndef SRC_CHUNKSERVER_PASSIVE_GETFN_H_ #define SRC_CHUNKSERVER_PASSIVE_GETFN_H_ -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { - /** - * 获取datastore中chunk文件的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreChunkCountFunc(void* arg); - /** - * @brief: Get the number of WAL segment in CurveSegmentLogStorage - * @param arg: The pointer to CurveSegmentLogStorage - */ - uint32_t GetLogStorageWalSegmentCountFunc(void* arg); - /** - * 获取datastore中快照chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreSnapshotCountFunc(void* arg); - /** - * 获取datastore中clone chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreCloneChunkCountFunc(void* arg); - /** - * 获取chunkserver上chunk文件的数量 - * @param arg: nullptr - */ - uint32_t GetTotalChunkCountFunc(void* arg); - /** - * @brief: Get the total number of WAL segment in chunkserver - * @param arg: The pointer to ChunkServerMetric - */ - uint32_t GetTotalWalSegmentCountFunc(void* arg); +/** + * Obtain the number of chunk files in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreChunkCountFunc(void* arg); +/** + * @brief: Get the number of WAL segment in CurveSegmentLogStorage + * @param arg: The pointer to CurveSegmentLogStorage + */ +uint32_t GetLogStorageWalSegmentCountFunc(void* arg); +/** + * Obtain the number of snapshot chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreCloneChunkCountFunc(void* arg); +/** + * Obtain the number of chunk files on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalChunkCountFunc(void* arg); +/** + * @brief: Get the total number of WAL segment in chunkserver + * @param arg: The pointer to ChunkServerMetric + */ +uint32_t GetTotalWalSegmentCountFunc(void* arg); - /** - * 获取chunkserver上快照chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalSnapshotCountFunc(void* arg); - /** - * 获取chunkserver上clone chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalCloneChunkCountFunc(void* arg); - /** - * 获取chunkfilepool中剩余chunk的数量 - * @param arg: chunkfilepool的对象指针 - */ - uint32_t GetChunkLeftFunc(void* arg); - /** - * 获取walfilepool中剩余chunk的数量 - * @param arg: walfilepool的对象指针 - */ - uint32_t GetWalSegmentLeftFunc(void* arg); - /** - * 获取trash中chunk的数量 - * @param arg: trash的对象指针 - */ - uint32_t GetChunkTrashedFunc(void* arg); +/** + * Obtain the number of snapshot chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalCloneChunkCountFunc(void* arg); +/** + * Obtain the number of remaining chunks in the chunkfilepool + * @param arg: Object pointer to chunkfilepool + */ +uint32_t GetChunkLeftFunc(void* arg); +/** + * Obtain the number of remaining chunks in the walfilepool + * @param arg: Object pointer to walfilepool + */ +uint32_t GetWalSegmentLeftFunc(void* arg); +/** + * Obtain the number of chunks in the trash + * @param arg: Object pointer to trash + */ +uint32_t GetChunkTrashedFunc(void* arg); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_file_adaptor.h b/src/chunkserver/raftsnapshot/curve_file_adaptor.h index 2f6b23ec0b..b4467bb268 100644 --- a/src/chunkserver/raftsnapshot/curve_file_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_file_adaptor.h @@ -31,10 +31,9 @@ namespace chunkserver { class CurveFileAdaptor : public braft::PosixFileAdaptor { public: explicit CurveFileAdaptor(int fd) : PosixFileAdaptor(fd) {} - // close之前必须先sync,保证数据落盘,其他逻辑不变 - bool close() override { - return sync() && braft::PosixFileAdaptor::close(); - } + // Before closing, you must first synchronize to ensure that the data is + // dropped and other logic remains unchanged + bool close() override { return sync() && braft::PosixFileAdaptor::close(); } }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_file_service.cpp b/src/chunkserver/raftsnapshot/curve_file_service.cpp index f1d5d931e0..4395234d6f 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.cpp +++ b/src/chunkserver/raftsnapshot/curve_file_service.cpp @@ -36,15 +36,17 @@ // Authors: Zhangyi Chen(chenzhangyi01@baidu.com) -#include -#include -#include -#include +#include "src/chunkserver/raftsnapshot/curve_file_service.h" + +#include #include #include -#include +#include +#include +#include +#include + #include -#include "src/chunkserver/raftsnapshot/curve_file_service.h" namespace curve { namespace chunkserver { @@ -52,9 +54,9 @@ namespace chunkserver { CurveFileService& kCurveFileService = CurveFileService::GetInstance(); void CurveFileService::get_file(::google::protobuf::RpcController* controller, - const ::braft::GetFileRequest* request, - ::braft::GetFileResponse* response, - ::google::protobuf::Closure* done) { + const ::braft::GetFileRequest* request, + ::braft::GetFileResponse* response, + ::google::protobuf::Closure* done) { scoped_refptr reader; brpc::ClosureGuard done_gurad(done); brpc::Controller* cntl = (brpc::Controller*)controller; @@ -63,21 +65,23 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, if (iter == _reader_map.end()) { lck.unlock(); /** - * 为了和文件不存在的错误区分开来,且考虑到install snapshot - * 的uri format为:remote://ip:port/reader_id,所以使用ENXIO - * 代表reader id不存在的错误 + * In order to distinguish between the error of a non-existent file + * and considering that the uri format for installing a snapshot is: + * remote://ip:port/reader_id, ENXIO is used to represent the error of a + * non-existent reader id. */ cntl->SetFailed(ENXIO, "Fail to find reader=%" PRId64, - request->reader_id()); + request->reader_id()); return; } // Don't touch iter ever after reader = iter->second; lck.unlock(); - LOG(INFO) << "get_file for " << cntl->remote_side() << " path=" - << reader->path() << " filename=" << request->filename() - << " offset=" << request->offset() << " count=" - << request->count(); + LOG(INFO) << "get_file for " << cntl->remote_side() + << " path=" << reader->path() + << " filename=" << request->filename() + << " offset=" << request->offset() + << " count=" << request->count(); if (request->count() <= 0 || request->offset() < 0) { cntl->SetFailed(brpc::EREQUEST, "Invalid request=%s", @@ -88,10 +92,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, butil::IOBuf buf; bool is_eof = false; size_t read_count = 0; - // 1. 如果是read attch meta file + // 1. If it is a read attach meta file if (request->filename() == BRAFT_SNAPSHOT_ATTACH_META_FILE) { - // 如果没有设置snapshot attachment,那么read文件的长度为零 - // 表示没有 snapshot attachment文件列表 + // If no snapshot attachment is set, then the length of the read file is + // zero, indicating that there are no snapshot attachment files in the + // list. bool snapshotAttachmentExist = false; { std::unique_lock lck(_mutex); @@ -104,7 +109,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } } if (snapshotAttachmentExist) { - // 否则获取snapshot attachment file list + // Otherwise, obtain the snapshot attachment file list std::vector files; _snapshot_attachment->list_attach_files(&files, reader->path()); CurveSnapshotAttachMetaTable attachMetaTable; @@ -121,7 +126,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, request->reader_id()); return; } - CurveSnapshotFileReader *reader = + CurveSnapshotFileReader* reader = dynamic_cast(it->second.get()); if (reader != nullptr) { reader->set_attach_meta_table(attachMetaTable); @@ -135,11 +140,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } if (0 != attachMetaTable.save_to_iobuf_as_remote(&buf)) { - // 内部错误: EINTERNAL + // Internal error: EINTERNAL LOG(ERROR) << "Fail to serialize " - "LocalSnapshotAttachMetaTable as iobuf"; + "LocalSnapshotAttachMetaTable as iobuf"; cntl->SetFailed(brpc::EINTERNAL, - "serialize snapshot attach meta table fail"); + "serialize snapshot attach meta table fail"); return; } else { LOG(INFO) << "LocalSnapshotAttachMetaTable encode buf length = " @@ -149,17 +154,15 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, read_count = buf.size(); } } else { - // 2. 否则其它文件下载继续走raft原先的文件下载流程 + // 2. Otherwise, the download of other files will continue to follow the + // original file download process of Raft const int rc = reader->read_file( - &buf, request->filename(), - request->offset(), request->count(), - request->read_partly(), - &read_count, - &is_eof); + &buf, request->filename(), request->offset(), request->count(), + request->read_partly(), &read_count, &is_eof); if (rc != 0) { cntl->SetFailed(rc, "Fail to read from path=%s filename=%s : %s", - reader->path().c_str(), - request->filename().c_str(), berror(rc)); + reader->path().c_str(), request->filename().c_str(), + berror(rc)); return; } } @@ -177,13 +180,13 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } void CurveFileService::set_snapshot_attachment( - SnapshotAttachment *snapshot_attachment) { + SnapshotAttachment* snapshot_attachment) { _snapshot_attachment = snapshot_attachment; } CurveFileService::CurveFileService() { - _next_id = ((int64_t)getpid() << 45) | - (butil::gettimeofday_us() << 17 >> 17); + _next_id = + ((int64_t)getpid() << 45) | (butil::gettimeofday_us() << 17 >> 17); } int CurveFileService::add_reader(braft::FileReader* reader, diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp index 18479b26a6..d46a7f18b9 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp @@ -20,16 +20,17 @@ * Author: tongguangxun */ +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include -#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include namespace curve { namespace chunkserver { CurveFilesystemAdaptor::CurveFilesystemAdaptor( - std::shared_ptr chunkFilePool, - std::shared_ptr lfs) { + std::shared_ptr chunkFilePool, + std::shared_ptr lfs) { lfs_ = lfs; chunkFilePool_ = chunkFilePool; uint64_t metapageSize = chunkFilePool->GetFilePoolOpt().metaPageSize; @@ -39,8 +40,7 @@ CurveFilesystemAdaptor::CurveFilesystemAdaptor( } CurveFilesystemAdaptor::CurveFilesystemAdaptor() - : tempMetaPageContent(nullptr) { -} + : tempMetaPageContent(nullptr) {} CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { if (tempMetaPageContent != nullptr) { @@ -50,14 +50,14 @@ CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { LOG(INFO) << "release raftsnapshot filesystem adaptor!"; } -braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, - int oflag, const ::google::protobuf::Message* file_meta, - butil::File::Error* e) { - (void) file_meta; +braft::FileAdaptor* CurveFilesystemAdaptor::open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e) { + (void)file_meta; static std::once_flag local_s_check_cloexec_once; static bool local_s_support_cloexec_on_open = false; - std::call_once(local_s_check_cloexec_once, [&](){ + std::call_once(local_s_check_cloexec_once, [&]() { int fd = lfs_->Open("/dev/zero", O_RDONLY | O_CLOEXEC); local_s_support_cloexec_on_open = (fd != -1); if (fd != -1) { @@ -69,18 +69,21 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, if (cloexec && !local_s_support_cloexec_on_open) { oflag &= (~O_CLOEXEC); } - // Open就使用sync标志是为了避免集中在close一次性sync,对于16MB的chunk文件可能会造成抖动 + // The use of the sync flag in Open is to avoid focusing on the close + // one-time sync, which may cause jitter for 16MB chunk files oflag |= O_SYNC; - // 先判断当前文件是否需要过滤,如果需要过滤,就直接走下面逻辑,不走chunkfilepool - // 如果open操作携带create标志,则从chunkfilepool取,否则保持原来语意 - // 如果待打开的文件已经存在,则直接使用原有语意 - if (!NeedFilter(path) && - (oflag & O_CREAT) && + // First, determine whether the current file needs to be filtered. If it + // needs to be filtered, simply follow the following logic instead of + // chunkfilepool If the open operation carries the create flag, it will be + // taken from chunkfilepool, otherwise it will maintain its original meaning + // If the file to be opened already exists, use the original meaning + // directly + if (!NeedFilter(path) && (oflag & O_CREAT) && false == lfs_->FileExists(path)) { - // 从chunkfile pool中取出chunk返回 + // Removing a chunk from the chunkfile pool returns int rc = chunkFilePool_->GetFile(path, tempMetaPageContent); - // 如果从FilePool中取失败,返回错误。 + // If retrieving from FilePool fails, an error is returned. if (rc != 0) { LOG(ERROR) << "get chunk from chunkfile pool failed!"; return NULL; @@ -93,17 +96,17 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, int fd = lfs_->Open(path.c_str(), oflag); if (e) { *e = (fd < 0) ? butil::File::OSErrorToFileError(errno) - : butil::File::FILE_OK; + : butil::File::FILE_OK; } if (fd < 0) { if (oflag & O_CREAT) { LOG(ERROR) << "snapshot create chunkfile failed, filename = " - << path.c_str() << ", errno = " << errno; + << path.c_str() << ", errno = " << errno; } else { LOG(WARNING) << "snapshot open chunkfile failed," - << "may be deleted by user, filename = " - << path.c_str() << ",errno = " << errno; + << "may be deleted by user, filename = " + << path.c_str() << ",errno = " << errno; } return NULL; } @@ -115,10 +118,12 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, } bool CurveFilesystemAdaptor::delete_file(const std::string& path, - bool recursive) { - // 1. 如果是目录且recursive=true,那么遍历目录内容回收 - // 2. 如果是目录且recursive=false,那么判断目录内容是否为空,不为空返回false - // 3. 如果是文件直接回收 + bool recursive) { + // 1. If it is a directory and recursive=true, then traverse the directory + // content to recycle + // 2. If it is a directory and recursive=false, then determine whether the + // directory content is empty, and return false if it is not empty + // 3. If the file is directly recycled if (lfs_->DirExists(path)) { std::vector dircontent; lfs_->List(path, &dircontent); @@ -130,20 +135,21 @@ bool CurveFilesystemAdaptor::delete_file(const std::string& path, } } else { if (lfs_->FileExists(path)) { - // 如果在过滤名单里,就直接删除 - if (NeedFilter(path)) { - return lfs_->Delete(path) == 0; - } else { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // If it is on the filtering list, delete it directly + if (NeedFilter(path)) { + return lfs_->Delete(path) == 0; + } else { + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be + // deleted directly return chunkFilePool_->RecycleFile(path) == 0; - } + } } } return true; } -bool CurveFilesystemAdaptor::RecycleDirRecursive( - const std::string& path) { +bool CurveFilesystemAdaptor::RecycleDirRecursive(const std::string& path) { std::vector dircontent; lfs_->List(path, &dircontent); bool rc = true; @@ -152,7 +158,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( if (lfs_->DirExists(todeletePath)) { RecycleDirRecursive(todeletePath); } else { - // 如果在过滤名单里,就直接删除 + // If it is on the filtering list, delete it directly if (NeedFilter(todeletePath)) { if (lfs_->Delete(todeletePath) != 0) { LOG(ERROR) << "delete " << todeletePath << ", failed!"; @@ -173,16 +179,18 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( } bool CurveFilesystemAdaptor::rename(const std::string& old_path, - const std::string& new_path) { + const std::string& new_path) { if (!NeedFilter(new_path) && lfs_->FileExists(new_path)) { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be deleted + // directly chunkFilePool_->RecycleFile(new_path); } return lfs_->Rename(old_path, new_path) == 0; } void CurveFilesystemAdaptor::SetFilterList( - const std::vector& filter) { + const std::vector& filter) { filterList_.assign(filter.begin(), filter.end()); } diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h index 4e6737b8d4..b29a0948a8 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h @@ -33,53 +33,59 @@ #include "src/chunkserver/raftsnapshot/curve_file_adaptor.h" /** - * RaftSnapshotFilesystemAdaptor目的是为了接管braft - * 内部snapshot创建chunk文件的逻辑,目前curve内部 - * 会从chunkfilepool中直接取出已经格式化好的chunk文件 - * 但是braft内部由于install snapshot也会创建chunk文件 - * 这个创建文件不感知chunkfilepool,因此我们希望install - * snapshot也能从chunkfilepool中直接取出chunk文件,因此 - * 我们对install snapshot流程中的文件系统做了一层hook,在 - * 创建及删除文件操作上直接使用curve提供的文件系统接口即可。 + * The purpose of RaftSnapshotFilesystemAdaptor is to take over the logic of + * creating chunk files for internal snapshots in braft. Currently, within + * Curve, we directly retrieve pre-formatted chunk files from the chunk file + * pool. However, within braft, the creation of chunk files during an install + * snapshot process does not interact with the chunk file pool. Therefore, we + * want the install snapshot process to also be able to retrieve chunk files + * directly from the chunk file pool. To achieve this, we have implemented a + * hook in the file system operations within the install snapshot process. This + * hook allows us to use the file system interface provided by Curve for file + * creation and deletion. */ -using curve::fs::LocalFileSystem; using curve::chunkserver::FilePool; +using curve::fs::LocalFileSystem; namespace curve { namespace chunkserver { /** - * CurveFilesystemAdaptor继承raft的PosixFileSystemAdaptor类,在raft - * 内部其快照使用PosixFileSystemAdaptor类进行文件操作,因为我们只希望在其创建文件 - * 或者删除文件的时候使用chunkfilepool提供的getchunk和recyclechunk接口,所以这里 - * 我们只实现了open和delete_file两个接口。其他接口在调用的时候仍然使用原来raft的内部 - * 的接口。 + * CurveFilesystemAdaptor inherits from Raft's PosixFileSystemAdaptor class. + * Within the Raft framework, it uses the PosixFileSystemAdaptor class for file + * operations during snapshots. However, we only want to use the `getchunk` and + * `recyclechunk` interfaces provided by the chunkfilepool when creating or + * deleting files. Therefore, in this context, we have only implemented the + * `open` and `delete_file` interfaces. Other interfaces are still used with the + * original internal Raft interfaces when called. */ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { public: /** - * 构造函数 - * @param: chunkfilepool用于获取和回收chunk文件 - * @param: lfs用于进行一些文件操作,比如打开或者删除目录 + * Constructor + * @param: chunkfilepool is used to retrieve and recycle chunk files + * @param: lfs is used for some file operations, such as opening or deleting + * directories */ CurveFilesystemAdaptor(std::shared_ptr filePool, - std::shared_ptr lfs); + std::shared_ptr lfs); CurveFilesystemAdaptor(); virtual ~CurveFilesystemAdaptor(); /** - * 打开文件,在raft内部使用open来创建一个文件,并返回FileAdaptor结构 - * @param: path是当前待打开的路径 - * @param: oflag为打开文件参数 - * @param: file_meta是当前文件的meta信息,这个参数内部未使用 - * @param: e为打开文件是的错误码 - * @return: FileAdaptor是raft内部封装fd的一个类,fd是open打开path的返回值 - * 后续所有对于该文件的读写都是通过该FileAdaptor指针进行的,其内部封装了 - * 读写操作,其内部定义如下。 - * class PosixFileAdaptor : public FileAdaptor { - * friend class PosixFileSystemAdaptor; - * public: - * PosixFileAdaptor(int fd) : _fd(fd) {} + * Open the file, use open inside the raft to create a file, and return the + * FileAdaptor structure + * @param: path is the current path to be opened + * @param: oflag is the parameter for opening a file + * @param: file_meta is the meta information of the current file, which is + * not used internally + * @param: e is the error code for opening the file + * @return: FileAdaptor is a class within Raft that encapsulates a file + * descriptor (fd). After opening a path with the `open` call, all + * subsequent read and write operations on that file are performed through a + * pointer to this FileAdaptor class. It internally defines the following + * operations: class PosixFileAdaptor : public FileAdaptor { friend class + * PosixFileSystemAdaptor; public: PosixFileAdaptor(int fd) : _fd(fd) {} * virtual ~PosixFileAdaptor(); * * virtual ssize_t write(const butil::IOBuf& data, @@ -94,61 +100,70 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { * int _fd; * }; */ - virtual braft::FileAdaptor* open(const std::string& path, int oflag, - const ::google::protobuf::Message* file_meta, - butil::File::Error* e); + virtual braft::FileAdaptor* open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e); /** - * 删除path对应的文件或目录 - * @param: path是待删除的文件路径 - * @param: recursive是否递归删除 - * @return: 成功返回true,否则返回false + * Delete the file or directory corresponding to the path + * @param: path is the file path to be deleted + * @param: Recursive whether to recursively delete + * @return: Successfully returns true, otherwise returns false */ virtual bool delete_file(const std::string& path, bool recursive); /** - * rename到新路径 - * 为什么要重载rename? - * 由于raft内部使用的是本地文件系统的rename,如果目标new path - * 已经存在文件,那么就会覆盖该文件。这样raft内部会创建temp_snapshot_meta - * 文件,这个是为了保证原子修改snapshot_meta文件而设置的,然后通过rename保证 - * 修改snapshot_meta文件修改的原子性。如果这个temp_snapshot_meta是从chunkfilpool - * 取的,那么如果直接rename,这个temp_snapshot_meta文件所占用的chunk文件 - * 就永远收不回来了,这种情况下会消耗大量的预分配chunk,所以这里重载rename,先 - * 回收new path,然后再rename, - * @param: old_path旧文件路径 - * @param: new_path新文件路径 + * Rename to a new path. + * Why override the rename function? + * Raft internally uses the rename function of the local file system. If the + * target new path already exists as a file, it will overwrite that file. + * This behavior leads to the creation of a 'temp_snapshot_meta' file, which + * is set up to ensure the atomic modification of the 'snapshot_meta' file. + * Using rename helps ensure the atomicity of modifying the 'snapshot_meta' + * file. However, if the 'temp_snapshot_meta' file is allocated from the + * chunk file pool and renamed directly, the chunk file used by the + * 'temp_snapshot_meta' file will never be released. In this situation, a + * significant number of pre-allocated chunks can be consumed. Therefore, + * the rename function is overridden here to first release the resources + * associated with the new path, and then perform the rename operation. + * @param: old_path - The old file path + * @param: new_path - The new file path */ virtual bool rename(const std::string& old_path, - const std::string& new_path); + const std::string& new_path); - // 设置过滤哪些文件,这些文件不从chunkfilepool取 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Set which files to filter and do not retrieve them from chunkfilepool + // Delete these files directly during recycling without entering the + // chunkfilepool void SetFilterList(const std::vector& filter); private: - /** - * 递归回收目录内容 - * @param: path为待回收的目录路径 - * @return: 成功返回true,否则返回false - */ + /** + * Recursive recycling of directory content + * @param: path is the directory path to be recycled + * @return: Successfully returns true, otherwise returns false + */ bool RecycleDirRecursive(const std::string& path); /** - * 查看文件是否需要过滤 + * Check if the file needs to be filtered */ bool NeedFilter(const std::string& filename); private: - // 由于chunkfile pool获取新的chunk时需要传入metapage信息 - // 这里创建一个临时的metapage,其内容无关紧要,因为快照会覆盖这部分内容 - char* tempMetaPageContent; - // 我们自己的文件系统,这里文件系统会做一些打开及删除目录操作 + // Due to the need to pass in metapage information when obtaining new chunks + // in the chunkfile pool Create a temporary metapage here, whose content is + // irrelevant as the snapshot will overwrite this part of the content + char* tempMetaPageContent; + // Our own file system, where the file system performs some opening and + // deleting directory operations std::shared_ptr lfs_; - // 操作chunkfilepool的指针,这个FilePool_与copysetnode的 - // chunkfilepool_应该是全局唯一的,保证操作chunkfilepool的原子性 + // Pointer to operate chunkfilepool, this FilePool_ Related to copysetnode + // Chunkfilepool_ It should be globally unique, ensuring the atomicity of + // the chunkfilepool operation std::shared_ptr chunkFilePool_; - // 过滤名单,在当前vector中的文件名,都不从chunkfilepool中取文件 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Filter the list and do not retrieve file names from chunkfilepool in the + // current vector Delete these files directly during recycling without + // entering the chunkfilepool std::vector filterList_; }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp index 93d4a7c324..cbd77403da 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include "src/common/fs_util.h" namespace curve { @@ -31,11 +32,11 @@ CurveSnapshotAttachment::CurveSnapshotAttachment( : fileHelper_(fs) {} void CurveSnapshotAttachment::list_attach_files( - std::vector *files, const std::string& raftSnapshotPath) { + std::vector* files, const std::string& raftSnapshotPath) { std::string raftBaseDir = - getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); + getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); std::string dataDir; - if (raftBaseDir[raftBaseDir.length()-1] != '/') { + if (raftBaseDir[raftBaseDir.length() - 1] != '/') { dataDir = raftBaseDir + "/" + RAFT_DATA_DIR; } else { dataDir = raftBaseDir + RAFT_DATA_DIR; @@ -43,23 +44,23 @@ void CurveSnapshotAttachment::list_attach_files( std::vector snapFiles; int rc = fileHelper_.ListFiles(dataDir, nullptr, &snapFiles); - // list出错一般认为就是磁盘出现问题了,这种情况直接让进程挂掉 - // Attention: 这里还需要更仔细考虑 + // An error in the list is generally believed to be due to a disk issue, + // which directly causes the process to crash Attention: More careful + // consideration is needed here CHECK(rc == 0) << "List dir failed."; files->clear(); - // 文件路径格式与snapshot_meta中的格式要相同 + // File path format and the format in snapshot_meta should be the same for (const auto& snapFile : snapFiles) { std::string snapApath; - // 添加绝对路径 + // Add absolute path snapApath.append(dataDir); snapApath.append("/").append(snapFile); - std::string filePath = curve::common::CalcRelativePath( - raftSnapshotPath, snapApath); + std::string filePath = + curve::common::CalcRelativePath(raftSnapshotPath, snapApath); files->emplace_back(filePath); } } - } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h index 10e2172673..94b6009714 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h @@ -23,62 +23,71 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_ATTACHMENT_H_ #include + +#include #include #include -#include -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" +#include "src/chunkserver/raftsnapshot/define.h" namespace curve { namespace chunkserver { /** - * 用于获取snapshot attachment files的接口,一般用于一些下载 - * 快照获取需要额外下载的文件list + * The interface used to obtain snapshot attachment files, usually used for some + * downloads List of files that require additional downloads for snapshot + * acquisition */ -class SnapshotAttachment : - public butil::RefCountedThreadSafe { +class SnapshotAttachment + : public butil::RefCountedThreadSafe { public: SnapshotAttachment() = default; virtual ~SnapshotAttachment() = default; /** - * 获取snapshot attachment文件列表 - * @param files[out]: attachment文件列表 - * @param snapshotPath[in]: braft快照的路径 + * Obtain a list of snapshot attachment files + * @param files[out]: attachment file list + * @param snapshotPath[in]: Path to the brace snapshot */ - virtual void list_attach_files(std::vector *files, - const std::string& raftSnapshotPath) = 0; + virtual void list_attach_files(std::vector* files, + const std::string& raftSnapshotPath) = 0; }; -// SnapshotAttachment接口的实现,用于raft加载快照时,获取chunk快照文件列表 +// Implementation of the SnapshotAttachment interface, used to obtain a list of +// chunk snapshot files when loading snapshots in the raft class CurveSnapshotAttachment : public SnapshotAttachment { public: explicit CurveSnapshotAttachment(std::shared_ptr fs); virtual ~CurveSnapshotAttachment() = default; /** - * 获取raft snapshot的attachment,这里就是获取chunk的快照文件列表 - * @param files[out]: data目录下的chunk快照文件列表 - * @param raftSnapshotPath: braft快照的路径 - * 返回的文件路径使用 绝对路径:相对路径 的格式,相对路径包含data目录 + *Obtain the attachment of the raft snapshot, which is the list of snapshot + *files for the chunk + * @param files[out]: List of chunk snapshot files in the data directory + * @param raftSnapshotPath: Path to the brace snapshot + * The returned file path uses an absolute path: in the format of a relative + *path, which includes the data directory */ - void list_attach_files(std::vector *files, + void list_attach_files(std::vector* files, const std::string& raftSnapshotPath) override; + private: DatastoreFileHelper fileHelper_; }; /* -* @brif 通过具体的某个raft的snapshot实例地址获取raft实例基础地址 -* @param[in] specificSnapshotDir 某个具体snapshot的目录 - 比如/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ -* @param[in] raftSnapshotRelativeDir 上层业务指的所有snapshot的相对基地址 - 比如raft_snapshot -* @return 返回raft实例的绝对基地址,/data/chunkserver1/copysets/4294967812/ +* @brif obtains the base address of a raft instance through the snapshot +instance address of a specific raft +* @param[in] specificSnapshotDir The directory of a specific snapshot + For +example,/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ +* @param[in] raftSnapshotRelativeDir The relative base addresses of all +snapshots referred to by the upper level business For example, raft_ Snapshot +* @return returns the absolute base address of the raft +instance,/data/chunkserver1/copysets/4294967812/ */ inline std::string getCurveRaftBaseDir(std::string specificSnapshotDir, - std::string raftSnapshotRelativeDir) { + std::string raftSnapshotRelativeDir) { std::string::size_type m = specificSnapshotDir.find(raftSnapshotRelativeDir); if (m == std::string::npos) { diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp index 6a996695bd..5cceb37171 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp @@ -48,22 +48,19 @@ CurveSnapshotCopier::CurveSnapshotCopier(CurveSnapshotStorage* storage, bool filter_before_copy_remote, braft::FileSystemAdaptor* fs, braft::SnapshotThrottle* throttle) - : _tid(INVALID_BTHREAD) - , _cancelled(false) - , _filter_before_copy_remote(filter_before_copy_remote) - , _fs(fs) - , _throttle(throttle) - , _writer(NULL) - , _storage(storage) - , _reader(NULL) - , _cur_session(NULL) -{} - -CurveSnapshotCopier::~CurveSnapshotCopier() { - CHECK(!_writer); -} - -void *CurveSnapshotCopier::start_copy(void* arg) { + : _tid(INVALID_BTHREAD), + _cancelled(false), + _filter_before_copy_remote(filter_before_copy_remote), + _fs(fs), + _throttle(throttle), + _writer(NULL), + _storage(storage), + _reader(NULL), + _cur_session(NULL) {} + +CurveSnapshotCopier::~CurveSnapshotCopier() { CHECK(!_writer); } + +void* CurveSnapshotCopier::start_copy(void* arg) { CurveSnapshotCopier* c = reinterpret_cast(arg); c->copy(); return NULL; @@ -71,7 +68,7 @@ void *CurveSnapshotCopier::start_copy(void* arg) { void CurveSnapshotCopier::copy() { do { - // 下载snapshot meta中记录的文件 + // Download the files recorded in the snapshot meta load_meta_table(); if (!ok()) { break; @@ -86,7 +83,7 @@ void CurveSnapshotCopier::copy() { copy_file(files[i]); } - // 下载snapshot attachment文件 + // Download snapshot attachment file load_attach_meta_table(); if (!ok()) { break; @@ -99,8 +96,8 @@ void CurveSnapshotCopier::copy() { } while (0); if (!ok() && _writer && _writer->ok()) { LOG(WARNING) << "Fail to copy, error_code " << error_code() - << " error_msg " << error_cstr() - << " writer path " << _writer->get_path(); + << " error_msg " << error_cstr() << " writer path " + << _writer->get_path(); _writer->set_error(error_code(), error_cstr()); } if (_writer) { @@ -123,9 +120,9 @@ void CurveSnapshotCopier::load_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, &meta_buf, + NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -153,9 +150,9 @@ void CurveSnapshotCopier::load_attach_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, + &meta_buf, NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -169,21 +166,22 @@ void CurveSnapshotCopier::load_attach_meta_table() { return; } - // 如果attach meta table为空,那么说明没有snapshot attachment files + // If the attach_meta_table is empty, then there are no snapshot attachment + // files if (0 == meta_buf.size()) { return; } - if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote(meta_buf) - != 0) { + if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote( + meta_buf) != 0) { LOG(WARNING) << "Bad attach_meta_table format"; set_error(-1, "Bad attach_meta_table format"); return; } } -int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, - braft::SnapshotReader* last_snapshot) { +int CurveSnapshotCopier::filter_before_copy( + CurveSnapshotWriter* writer, braft::SnapshotReader* last_snapshot) { std::vector existing_files; writer->list_files(&existing_files); std::vector to_remove; @@ -200,8 +198,7 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, for (size_t i = 0; i < remote_files.size(); ++i) { const std::string& filename = remote_files[i]; braft::LocalFileMeta remote_meta; - CHECK_EQ(0, _remote_snapshot.get_file_meta( - filename, &remote_meta)); + CHECK_EQ(0, _remote_snapshot.get_file_meta(filename, &remote_meta)); if (!remote_meta.has_checksum()) { // Redownload file if this file doen't have checksum writer->remove_file(filename); @@ -214,8 +211,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, if (local_meta.has_checksum() && local_meta.checksum() == remote_meta.checksum()) { LOG(INFO) << "Keep file=" << filename - << " checksum=" << remote_meta.checksum() - << " in " << writer->get_path(); + << " checksum=" << remote_meta.checksum() << " in " + << writer->get_path(); continue; } // Remove files from writer so that the file is to be copied from @@ -232,21 +229,20 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, continue; } if (!local_meta.has_checksum() || - local_meta.checksum() != remote_meta.checksum()) { + local_meta.checksum() != remote_meta.checksum()) { continue; } LOG(INFO) << "Found the same file=" << filename << " checksum=" << remote_meta.checksum() << " in last_snapshot=" << last_snapshot->get_path(); if (local_meta.source() == braft::FILE_SOURCE_LOCAL) { - std::string source_path = last_snapshot->get_path() + '/' - + filename; - std::string dest_path = writer->get_path() + '/' - + filename; + std::string source_path = + last_snapshot->get_path() + '/' + filename; + std::string dest_path = writer->get_path() + '/' + filename; _fs->delete_file(dest_path, false); if (!_fs->link(source_path, dest_path)) { - PLOG(ERROR) << "Fail to link " << source_path - << " to " << dest_path; + PLOG(ERROR) + << "Fail to link " << source_path << " to " << dest_path; continue; } // Don't delete linked file @@ -272,8 +268,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, } void CurveSnapshotCopier::filter() { - _writer = reinterpret_cast(_storage->create( - !_filter_before_copy_remote)); + _writer = reinterpret_cast( + _storage->create(!_filter_before_copy_remote)); if (_writer == NULL) { set_error(EIO, "Fail to create snapshot writer"); return; @@ -283,12 +279,13 @@ void CurveSnapshotCopier::filter() { braft::SnapshotReader* reader = _storage->open(); if (filter_before_copy(_writer, reader) != 0) { LOG(WARNING) << "Fail to filter writer before copying" - ", path: " << _writer->get_path() + ", path: " + << _writer->get_path() << ", destroy and create a new writer"; _writer->set_error(-1, "Fail to filter"); _storage->close(_writer, false); - _writer = reinterpret_cast( - _storage->create(true)); + _writer = + reinterpret_cast(_storage->create(true)); } if (reader) { _storage->close(reader); @@ -319,16 +316,16 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { butil::File::Error e; bool rc = false; if (braft::FLAGS_raft_create_parent_directories) { - butil::FilePath sub_dir = butil::FilePath( - _writer->get_path()).Append(sub_path.DirName()); + butil::FilePath sub_dir = + butil::FilePath(_writer->get_path()).Append(sub_path.DirName()); rc = _fs->create_directory(sub_dir.value(), &e, true); } else { - rc = create_sub_directory( - _writer->get_path(), sub_path.DirName().value(), _fs, &e); + rc = create_sub_directory(_writer->get_path(), + sub_path.DirName().value(), _fs, &e); } if (!rc) { - LOG(ERROR) << "Fail to create directory for " << file_path - << " : " << butil::File::ErrorToString(e); + LOG(ERROR) << "Fail to create directory for " << file_path << " : " + << butil::File::ErrorToString(e); set_error(braft::file_error_to_os_error(e), "Fail to create directory"); } @@ -340,8 +337,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_file(filename, file_path, NULL); + scoped_refptr session = + _copier.start_to_copy_to_file(filename, file_path, NULL); if (session == NULL) { LOG(WARNING) << "Fail to copy " << filename << " path: " << _writer->get_path(); @@ -355,14 +352,13 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { _cur_session = NULL; lck.unlock(); if (!session->status().ok()) { - // 如果是文件不存在,那么删除刚开始open的文件 + // If the file does not exist, delete the file that was just opened if (session->status().error_code() == ENOENT) { bool rc = _fs->delete_file(file_path, false); if (!rc) { - LOG(ERROR) << "Fail to delete file" << file_path - << " : " << ::berror(errno); - set_error(errno, - "Fail to create delete file " + file_path); + LOG(ERROR) << "Fail to delete file" << file_path << " : " + << ::berror(errno); + set_error(errno, "Fail to create delete file " + file_path); } return; } @@ -371,7 +367,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { session->status().error_cstr()); return; } - // 如果是attach file,那么不需要持久化file meta信息 + // If it is an attach file, then there is no need to persist the file meta + // information if (!attch && _writer->add_file(filename, &meta) != 0) { set_error(EIO, "Fail to add file to writer"); return; @@ -394,16 +391,13 @@ std::string CurveSnapshotCopier::get_rfilename(const std::string& filename) { } void CurveSnapshotCopier::start() { - if (bthread_start_background( - &_tid, NULL, start_copy, this) != 0) { + if (bthread_start_background(&_tid, NULL, start_copy, this) != 0) { PLOG(ERROR) << "Fail to start bthread"; copy(); } } -void CurveSnapshotCopier::join() { - bthread_join(_tid, NULL); -} +void CurveSnapshotCopier::join() { bthread_join(_tid, NULL); } void CurveSnapshotCopier::cancel() { BAIDU_SCOPED_LOCK(_mutex); diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h index 1c991720b0..fdc1ef960a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h @@ -43,8 +43,10 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_COPIER_H_ #include -#include + #include +#include + #include "src/chunkserver/raftsnapshot/curve_snapshot.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" @@ -75,7 +77,9 @@ class CurveSnapshotCopier : public braft::SnapshotCopier { braft::SnapshotReader* last_snapshot); void filter(); void copy_file(const std::string& filename, bool attach = false); - // 这里的filename是相对于快照目录的路径,为了先把文件下载到临时目录,需要把前面的..去掉 + // The filename here is the path relative to the snapshot directory. In + // order to download the file to the temporary directory first, it is + // necessary to Remove std::string get_rfilename(const std::string& filename); braft::raft_mutex_t _mutex; diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h index 97c553661c..5221a0df8a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h @@ -44,10 +44,12 @@ #include #include + +#include +#include #include #include -#include -#include + #include "proto/curve_storage.pb.h" #include "src/chunkserver/raftsnapshot/define.h" @@ -55,9 +57,10 @@ namespace curve { namespace chunkserver { /** - * snapshot attachment文件元数据表,同上面的 - * CurveSnapshotAttachMetaTable接口,主要提供attach文件元数据信息 - * 的查询、序列化和反序列等接口 + * Snapshot attachment file metadata table, similar to the above + * CurveSnapshotAttachMetaTable interface. This table primarily provides + * interfaces for querying, serializing, and deserializing attachment file + * metadata */ class CurveSnapshotAttachMetaTable { public: @@ -70,7 +73,7 @@ class CurveSnapshotAttachMetaTable { int get_attach_file_meta(const std::string& filename, braft::LocalFileMeta* file_meta) const; // list files in the attach meta table - void list_files(std::vector *files) const; + void list_files(std::vector* files) const; // deserialize int load_from_iobuf_as_remote(const butil::IOBuf& buf); // serialize @@ -79,39 +82,31 @@ class CurveSnapshotAttachMetaTable { private: typedef std::map Map; // file -> file meta - Map _file_map; + Map _file_map; }; class CurveSnapshotFileReader : public braft::LocalDirReader { public: CurveSnapshotFileReader(braft::FileSystemAdaptor* fs, - const std::string& path, - braft::SnapshotThrottle* snapshot_throttle) - : LocalDirReader(fs, path), - _snapshot_throttle(snapshot_throttle) - {} + const std::string& path, + braft::SnapshotThrottle* snapshot_throttle) + : LocalDirReader(fs, path), _snapshot_throttle(snapshot_throttle) {} virtual ~CurveSnapshotFileReader() = default; - void set_meta_table(const braft::LocalSnapshotMetaTable &meta_table) { + void set_meta_table(const braft::LocalSnapshotMetaTable& meta_table) { _meta_table = meta_table; } void set_attach_meta_table( - const CurveSnapshotAttachMetaTable &attach_meta_table) { + const CurveSnapshotAttachMetaTable& attach_meta_table) { _attach_meta_table = attach_meta_table; } - int read_file(butil::IOBuf* out, - const std::string &filename, - off_t offset, - size_t max_count, - bool read_partly, - size_t* read_count, + int read_file(butil::IOBuf* out, const std::string& filename, off_t offset, + size_t max_count, bool read_partly, size_t* read_count, bool* is_eof) const override; - braft::LocalSnapshotMetaTable get_meta_table() { - return _meta_table; - } + braft::LocalSnapshotMetaTable get_meta_table() { return _meta_table; } private: braft::LocalSnapshotMetaTable _meta_table; diff --git a/src/chunkserver/raftsnapshot/define.h b/src/chunkserver/raftsnapshot/define.h index 012da7f1ba..79b1dcf355 100644 --- a/src/chunkserver/raftsnapshot/define.h +++ b/src/chunkserver/raftsnapshot/define.h @@ -29,12 +29,13 @@ namespace chunkserver { const char RAFT_DATA_DIR[] = "data"; const char RAFT_META_DIR[] = "raft_meta"; -// TODO(all:fix it): RAFT_SNAP_DIR注意当前这个目录地址不能修改 -// 与当前外部依赖curve-braft代码强耦合(两边硬编码耦合) +// TODO(all:fix it): Note that the RAFT_SNAP_DIR directory address should not be +// modified at this time. This is tightly coupled with the current external +// dependency on curve-braft code (hardcoded coupling on both sides). const char RAFT_SNAP_DIR[] = "raft_snapshot"; -const char RAFT_LOG_DIR[] = "log"; +const char RAFT_LOG_DIR[] = "log"; #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 -#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" +#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" #define BRAFT_SNAPSHOT_ATTACH_META_FILE "__raft_snapshot_attach_meta" #define BRAFT_PROTOBUF_FILE_TEMP ".tmp" diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 1616800c55..edbf2a27f7 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -20,29 +20,30 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/register.h" + #include #include +#include +#include #include #include +#include "proto/topology.pb.h" +#include "src/chunkserver/chunkserver_helper.h" #include "src/common/crc32.h" #include "src/common/string_util.h" -#include "src/chunkserver/register.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/chunkserver_helper.h" -#include "proto/topology.pb.h" namespace curve { namespace chunkserver { -Register::Register(const RegisterOptions &ops) { +Register::Register(const RegisterOptions& ops) { this->ops_ = ops; - // 解析mds的多个地址 + // Parsing multiple addresses of mds ::curve::common::SplitString(ops.mdsListenAddr, ",", &mdsEps_); - // 检验每个地址的合法性 + // Verify the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -52,9 +53,9 @@ Register::Register(const RegisterOptions &ops) { inServiceIndex_ = 0; } -int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap) { +int Register::RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap) { ::curve::mds::topology::ChunkServerRegistRequest req; ::curve::mds::topology::ChunkServerRegistResponse resp; req.set_disktype(ops_.chunkserverDiskType); @@ -105,7 +106,8 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, curve::mds::topology::TopologyService_Stub stub(&channel); stub.RegistChunkServer(&cntl, &req, &resp, nullptr); - // TODO(lixiaocui): 后续错误码和mds共享后改成枚举类型 + // TODO(lixiaocui): Change to enumeration type after sharing error codes + // and mds in the future if (!cntl.Failed() && resp.statuscode() == 0) { break; } else { @@ -158,7 +160,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, return 0; } -int Register::PersistChunkServerMeta(const ChunkServerMetadata &metadata) { +int Register::PersistChunkServerMeta(const ChunkServerMetadata& metadata) { int fd; std::string metaFile = curve::common::UriParser::GetPathFromUri(ops_.chunkserverMetaUri); diff --git a/src/chunkserver/register.h b/src/chunkserver/register.h index f89683087d..d45a15fdf5 100644 --- a/src/chunkserver/register.h +++ b/src/chunkserver/register.h @@ -23,13 +23,14 @@ #ifndef SRC_CHUNKSERVER_REGISTER_H_ #define SRC_CHUNKSERVER_REGISTER_H_ -#include #include +#include #include -#include "src/fs/local_filesystem.h" + #include "proto/chunkserver.pb.h" -#include "src/chunkserver/epoch_map.h" #include "src/chunkserver/datastore/file_pool.h" +#include "src/chunkserver/epoch_map.h" +#include "src/fs/local_filesystem.h" using ::curve::fs::LocalFileSystem; @@ -37,7 +38,7 @@ namespace curve { namespace chunkserver { const uint32_t CURRENT_METADATA_VERSION = 0x01; -// register配置选项 +// Register Configuration Options struct RegisterOptions { std::string mdsListenAddr; std::string chunkserverInternalIp; @@ -61,7 +62,7 @@ struct RegisterOptions { class Register { public: - explicit Register(const RegisterOptions &ops); + explicit Register(const RegisterOptions& ops); ~Register() {} /** @@ -71,16 +72,16 @@ class Register { * @param[out] metadata chunkserver meta * @param[in,out] epochMap epochMap to update */ - int RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap); + int RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap); /** - * @brief 持久化ChunkServer元数据 + * @brief Persisting ChunkServer metadata * * @param[in] metadata */ - int PersistChunkServerMeta(const ChunkServerMetadata &metadata); + int PersistChunkServerMeta(const ChunkServerMetadata& metadata); private: RegisterOptions ops_; @@ -92,4 +93,3 @@ class Register { } // namespace curve #endif // SRC_CHUNKSERVER_REGISTER_H_ - diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..511ad103f0 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -20,21 +20,24 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/trash.h" + #include +#include + #include -#include "src/chunkserver/trash.h" -#include "src/common/string_util.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/copyset_node.h" + #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/raftlog/define.h" +#include "src/common/string_util.h" +#include "src/common/uri_parser.h" using ::curve::chunkserver::RAFT_DATA_DIR; +using ::curve::chunkserver::RAFT_LOG_DIR; using ::curve::chunkserver::RAFT_META_DIR; using ::curve::chunkserver::RAFT_SNAP_DIR; -using ::curve::chunkserver::RAFT_LOG_DIR; namespace curve { namespace chunkserver { @@ -60,13 +63,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -100,8 +103,8 @@ int Trash::Fini() { return 0; } -int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 +int Trash::RecycleCopySet(const std::string& dirPath) { + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,10 +116,11 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 - std::string dst = trashPath_ + "/" + - dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + - '.' + std::to_string(std::time(nullptr)); + // If the directory already exists in the recycle bin, this deletion failed + std::string dst = + trashPath_ + "/" + + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + + std::to_string(std::time(nullptr)); if (localFileSystem_->DirExists(dst)) { LOG(WARNING) << "recycle error: " << dst << " already exist in " << trashPath_; @@ -137,28 +141,28 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } void Trash::DeleteEligibleFileInTrashInterval() { - while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 - DeleteEligibleFileInTrash(); - } + while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { + // Scan Recycle Bin + DeleteEligibleFileInTrash(); + } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +176,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -180,10 +184,10 @@ void Trash::DeleteEligibleFileInTrash() { } } -bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) +bool Trash::IsCopysetInTrash(const std::string& dirName) { + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low + // 32-bit composed of copysetId(>0) The directory is in decimal form For + // example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -196,7 +200,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { return GetPoolID(groupId) >= 1 && GetCopysetID(groupId) >= 1; } -bool Trash::NeedDelete(const std::string ©setDir) { +bool Trash::NeedDelete(const std::string& copysetDir) { int fd = localFileSystem_->Open(copysetDir, O_RDONLY); if (0 > fd) { LOG(ERROR) << "Trash fail open " << copysetDir; @@ -219,15 +223,15 @@ bool Trash::NeedDelete(const std::string ©setDir) { return true; } -bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { +bool Trash::IsChunkOrSnapShotFile(const std::string& chunkName) { return FileNameOperator::FileType::UNKNOWN != - FileNameOperator::ParseFileName(chunkName).type; + FileNameOperator::ParseFileName(chunkName).type; } -bool Trash::RecycleChunksAndWALInDir( - const std::string ©setPath, const std::string &filename) { +bool Trash::RecycleChunksAndWALInDir(const std::string& copysetPath, + const std::string& filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // Is it a file to see if it needs to be recycled if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +242,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } @@ -257,13 +261,13 @@ bool Trash::RecycleChunksAndWALInDir( return ret; } -bool Trash::RecycleChunkfile( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleChunkfile(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to FilePool"; + << " to FilePool"; return false; } @@ -271,13 +275,12 @@ bool Trash::RecycleChunkfile( return true; } -bool Trash::RecycleWAL( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleWAL(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { - LOG(ERROR) << "Trash failed recycle WAL " << filepath - << " to WALPool"; + LOG(ERROR) << "Trash failed recycle WAL " << filepath << " to WALPool"; return false; } @@ -285,12 +288,12 @@ bool Trash::RecycleWAL( return true; } -bool Trash::IsWALFile(const std::string &fileName) { +bool Trash::IsWALFile(const std::string& fileName) { int match = 0; int64_t first_index = 0; int64_t last_index = 0; - match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, - &first_index, &last_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, &first_index, + &last_index); if (match == 2) { LOG(INFO) << "recycle closed segment wal file, path: " << fileName << " first_index: " << first_index @@ -298,8 +301,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return true; } - match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, - &first_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, &first_index); if (match == 1) { LOG(INFO) << "recycle open segment wal file, path: " << fileName << " first_index: " << first_index; @@ -308,7 +310,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return false; } -uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { +uint32_t Trash::CountChunkNumInCopyset(const std::string& copysetPath) { std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; @@ -317,15 +319,14 @@ uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { // Traverse subdirectories uint32_t chunkNum = 0; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; bool isDir = localFileSystem_->DirExists(filePath); if (!isDir) { // valid: chunkfile, snapshotfile, walfile - if (!(IsChunkOrSnapShotFile(file) || - IsWALFile(file))) { - LOG(WARNING) << "Trash find a illegal file:" - << file << " in " << copysetPath; + if (!(IsChunkOrSnapShotFile(file) || IsWALFile(file))) { + LOG(WARNING) << "Trash find a illegal file:" << file << " in " + << copysetPath; continue; } ++chunkNum; diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index ff037db8a4..8733fbbe4e 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -25,25 +25,27 @@ #include #include -#include "src/fs/local_filesystem.h" + #include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" +#include "src/fs/local_filesystem.h" -using ::curve::common::Thread; using ::curve::common::Atomic; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; using ::curve::common::InterruptibleSleeper; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; +using ::curve::common::Thread; namespace curve { namespace chunkserver { -struct TrashOptions{ - // copyset的trash路径 +struct TrashOptions { + // The trash path of copyset std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfteSec seconds int expiredAfterSec; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec; std::shared_ptr localFileSystem; @@ -60,70 +62,74 @@ class Trash { int Fini(); /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 - */ + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash + * directory + */ void DeleteEligibleFileInTrash(); - int RecycleCopySet(const std::string &dirPath); + int RecycleCopySet(const std::string& dirPath); /* - * @brief 获取回收站中chunk的个数 - * - * @return chunk个数 - */ - uint32_t GetChunkNum() {return chunkNum_.load();} + * @brief Get the number of chunks in the recycle bin + * + * @return Number of chunks + */ + uint32_t GetChunkNum() { return chunkNum_.load(); } private: /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 - */ + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling + * at regular intervals + */ void DeleteEligibleFileInTrashInterval(); /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 - * - * @param[in] copysetDir copyset的目录路径 - * - * @return true-可以被删除 - */ - bool NeedDelete(const std::string ©setDir); + * @brief NeedDelete Does the file need to be deleted, and the time it takes + * to place the trash is greater than ExpiredAfterSec in trash can be + * deleted + * + * @param[in] copysetDir copyset directory path + * + * @return true - can be deleted + */ + bool NeedDelete(const std::string& copysetDir); /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 - * - * @param[in] dirName 文目录路径 - * - * @return true-符合copyset目录命名规则 - */ - bool IsCopysetInTrash(const std::string &dirName); + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle + * bin + * + * @param[in] dirName directory path + * + * @return true - Complies with copyset directory naming rules + */ + bool IsCopysetInTrash(const std::string& dirName); /* - * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 - * - * @param[in] chunkName 文件名 - * - * @return true-符合chunk或snapshot文件命名规则 - */ - bool IsChunkOrSnapShotFile(const std::string &chunkName); + * @brief IsChunkOrSnapShotFile Is a chunk or snapshot file + * + * @param[in] chunkName file name + * + * @return true-Complies with chunk or snapshot file naming rules + */ + bool IsChunkOrSnapShotFile(const std::string& chunkName); /* - * @brief Recycle Chunkfile and wal file in Copyset - * - * @param[in] copysetDir copyset dir - * @param[in] filename filename - */ - bool RecycleChunksAndWALInDir( - const std::string ©setDir, const std::string &filename); + * @brief Recycle Chunkfile and wal file in Copyset + * + * @param[in] copysetDir copyset dir + * @param[in] filename filename + */ + bool RecycleChunksAndWALInDir(const std::string& copysetDir, + const std::string& filename); /* - * @brief Recycle Chunkfile - * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 - */ - bool RecycleChunkfile( - const std::string &filepath, const std::string &filename); + * @brief Recycle Chunkfile + * + * @param[in] filepath file path + * @param[in] filename File name + */ + bool RecycleChunkfile(const std::string& filepath, + const std::string& filename); /** * @brief Recycle WAL @@ -134,9 +140,7 @@ class Trash { * @retval true success * @retval false failure */ - bool RecycleWAL( - const std::string &filepath, const std::string &filename); - + bool RecycleWAL(const std::string& filepath, const std::string& filename); /** * @brief is WAL or not ? @@ -146,44 +150,45 @@ class Trash { * @retval true yes * @retval false no */ - bool IsWALFile(const std::string &fileName); + bool IsWALFile(const std::string& fileName); /* - * @brief 统计copyset目录中的chunk个数 - * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 - */ - uint32_t CountChunkNumInCopyset(const std::string ©setPath); + * @brief counts the number of chunks in the copyset directory + * + * @param[in] copysetPath chunk directory + * @return returns the number of chunks + */ + uint32_t CountChunkNumInCopyset(const std::string& copysetPath); private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfterSec seconds int expiredAfterSec_; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec_; - // 回收站中chunk的个数 + // Number of chunks in the Recycle Bin Atomic chunkNum_; Mutex mtx_; - // 本地文件系统 + // Local File System std::shared_ptr localFileSystem_; - // chunk池子 + // chunk Pool std::shared_ptr chunkFilePool_; // wal pool std::shared_ptr walPool_; - // 回收站全路径 + // Recycle Bin Full Path std::string trashPath_; - // 后台清理回收站的线程 + // Thread for background cleaning of the recycle bin Thread recycleThread_; - // false-开始后台任务,true-停止后台任务 + // false-Start background task, true-Stop background task Atomic isStop_; InterruptibleSleeper sleeper_; @@ -192,4 +197,3 @@ class Trash { } // namespace curve #endif // SRC_CHUNKSERVER_TRASH_H_ - diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 592e9d2a06..d2345e85fc 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -22,48 +22,57 @@ #include "src/client/chunk_closure.h" -#include -#include #include +#include +#include #include "src/client/client_common.h" #include "src/client/copyset_client.h" +#include "src/client/io_tracker.h" #include "src/client/metacache.h" #include "src/client/request_closure.h" #include "src/client/request_context.h" #include "src/client/service_helper.h" -#include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from +// the RPC return logic namespace curve { namespace client { -ClientClosure::BackoffParam ClientClosure::backoffParam_; -FailureRequestOption ClientClosure::failReqOpt_; +ClientClosure::BackoffParam ClientClosure::backoffParam_; +FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the corresponding cooysetId leader may change + // So set the timeout time for this retry request to the default value + // This is to retry this request as soon as possible + // There will be a delay of 1-2 seconds when migrating from copysetleader to + // client GetLeader to obtain a new leader For a request, GetLeader may + // still return the old Leader The rpc timeout time may be set to 2s/4s, and + // the leader information will be obtained after the timeout To retry the + // request on the new Leader as soon as possible, set the rpc timeout time + // to the default value if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 - if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT + // When a certain IO retry exceeds a certain number of times, an + // exponential backoff must be performed during the timeout period When + // the underlying chunkserver is under high pressure, unstable may also + // be triggered Due to copyset leader may change, the request timeout + // time will be set to the default value And chunkserver cannot process + // it within this time, resulting in IO hang In the case of real + // downtime, the request will be processed after a certain number of + // retries If you keep trying again, it's not a downtime situation, and + // at this point, the timeout still needs to enter the exponential + // backoff logic + if (retriedTimes < + failReqOpt_ + .chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; } else { @@ -71,25 +80,23 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } reqDone->SetNextTimeOutMS(nextTimeout); - LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout << ", " + << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); return; } if (rpcstatus == CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD) { uint64_t nextsleeptime = OverLoadBackOff(reqDone->GetRetriedTimes()); LOG(WARNING) << "chunkserver overload, sleep(us) = " << nextsleeptime - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << ", " << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); bthread_usleep(nextsleeptime); return; } @@ -103,19 +110,19 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } } - LOG(WARNING) - << "Rpc failed " - << (retryDirectly_ ? "retry directly, " - : "sleep " + std::to_string(nextSleepUS) + " us, ") - << *reqCtx_ << ", cntl status = " << cntlstatus - << ", response status = " - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(rpcstatus)) - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "Rpc failed " + << (retryDirectly_ + ? "retry directly, " + : "sleep " + std::to_string(nextSleepUS) + " us, ") + << *reqCtx_ << ", cntl status = " << cntlstatus + << ", response status = " + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast( + rpcstatus)) + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (nextSleepUS != 0) { bthread_usleep(nextSleepUS); @@ -134,8 +141,11 @@ uint64_t ClientClosure::OverLoadBackOff(uint64_t currentRetryTimes) { random_time -= nextsleeptime / 10; nextsleeptime += random_time; - nextsleeptime = std::min(nextsleeptime, failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT - nextsleeptime = std::max(nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT + nextsleeptime = + std::min(nextsleeptime, + failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT + nextsleeptime = std::max( + nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT return nextsleeptime; } @@ -153,10 +163,11 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified Request Callback Function Entry +// The overall processing logic is the same as before +// Perform corresponding processing for different request types and return +// status codes Each subclass needs to implement SendRetryRequest for retry +// requests void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,80 +187,81 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 - metaCache_->GetUnstableHelper().ClearTimeout( - chunkserverID_, chunkserverEndPoint_); + // As long as RPC returns normally, clear the timeout counter + metaCache_->GetUnstableHelper().ClearTimeout(chunkserverID_, + chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: - OnSuccess(); - break; - - // 2.1 不是leader - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: - MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); - needRetry = true; - OnRedirected(); - break; - - // 2.2 Copyset不存在,大概率都是配置变更了 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: - needRetry = true; - OnCopysetNotExist(); - break; - - // 2.3 chunk not exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: - OnChunkNotExist(); - break; - - // 2.4 非法参数,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: - OnInvalidRequest(); - break; + // 1. Request successful + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: + OnSuccess(); + break; + + // 2.1 is not a leader + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: + MetricHelper::IncremRedirectRPCCount(fileMetric_, + reqCtx_->optype_); + needRetry = true; + OnRedirected(); + break; - // 2.5 返回backward - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: - if (reqCtx_->optype_ == OpType::WRITE) { + // 2.2 Copyset does not exist, most likely due to configuration + // changes + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; - OnBackward(); - } else { - LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " return backward, " - << *reqCtx_ - << ", status=" << status_ + OnCopysetNotExist(); + break; + + // 2.3 Chunk not exist, return directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: + OnChunkNotExist(); + break; + + // 2.4 Illegal parameter, returned directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: + OnInvalidRequest(); + break; + + // 2.5 Return to feedback + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: + if (reqCtx_->optype_ == OpType::WRITE) { + needRetry = true; + OnBackward(); + } else { + LOG(ERROR) + << OpTypeToString(reqCtx_->optype_) + << " return backward, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); + } + break; + + // 2.6 Return Chunk Exist, directly return without retrying + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: + OnChunkExist(); + break; + + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: + OnEpochTooOld(); + break; + + default: + needRetry = true; + LOG(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed for UNKNOWN reason, " << *reqCtx_ << ", status=" + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast(status_)) << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); - } - break; - - // 2.6 返回chunk exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: - OnChunkExist(); - break; - - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: - OnEpochTooOld(); - break; - - default: - needRetry = true; - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed for UNKNOWN reason, " << *reqCtx_ - << ", status=" - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(status_)) - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); } } @@ -264,22 +276,22 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying + // again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout + // requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } - LOG_EVERY_SECOND(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed, error code: " - << cntl_->ErrorCode() - << ", error: " << cntl_->ErrorText() - << ", " << *reqCtx_ + LOG_EVERY_SECOND(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed, error code: " << cntl_->ErrorCode() + << ", error: " << cntl_->ErrorText() << ", " << *reqCtx_ << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); ProcessUnstableState(); @@ -291,26 +303,27 @@ void ClientClosure::ProcessUnstableState() { chunkserverID_, chunkserverEndPoint_); switch (state) { - case UnstableState::ServerUnstable: { - std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); - int ret = metaCache_->SetServerUnstable(ip); - if (ret != 0) { - LOG(WARNING) << "Set server(" << ip << ") unstable failed, " - << "now set chunkserver(" << chunkserverID_ << ") unstable"; + case UnstableState::ServerUnstable: { + std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); + int ret = metaCache_->SetServerUnstable(ip); + if (ret != 0) { + LOG(WARNING) + << "Set server(" << ip << ") unstable failed, " + << "now set chunkserver(" << chunkserverID_ << ") unstable"; + metaCache_->SetChunkserverUnstable(chunkserverID_); + } + break; + } + case UnstableState::ChunkServerUnstable: { metaCache_->SetChunkserverUnstable(chunkserverID_); + break; } - break; - } - case UnstableState::ChunkServerUnstable: { - metaCache_->SetChunkserverUnstable(chunkserverID_); - break; - } - case UnstableState::NoUnstable: { - RefreshLeader(); - break; - } - default: - break; + case UnstableState::NoUnstable: { + RefreshLeader(); + break; + } + default: + break; } } @@ -319,64 +332,58 @@ void ClientClosure::OnSuccess() { auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkNotExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " not exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " not exists, " + << *reqCtx_ << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " exists, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnEpochTooOld() { reqDone_->SetFailed(status_); LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " epoch too old, reqCtx: " << *reqCtx_ - << ", status: " << status_ - << ", retried times: " << reqDone_->GetRetriedTimes() - << ", IO id: " << reqDone_->GetIOTracker()->GetID() - << ", request id: " << reqCtx_->id_ - << ", remote side: " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " epoch too old, reqCtx: " << *reqCtx_ + << ", status: " << status_ + << ", retried times: " << reqDone_->GetRetriedTimes() + << ", IO id: " << reqDone_->GetIOTracker()->GetID() + << ", request id: " << reqCtx_->id_ << ", remote side: " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnRedirected() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (response_->has_redirect() ? response_->redirect() : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (response_->has_redirect() ? response_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (response_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(response_->redirect()); @@ -390,13 +397,11 @@ void ClientClosure::OnRedirected() { void ClientClosure::OnCopysetNotExist() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " copyset not exists, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); RefreshLeader(); } @@ -443,23 +448,20 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If the refresh leader obtains new leader information + // Do not sleep before retrying retryDirectly_ = (leaderId != chunkserverID_); } } void ClientClosure::OnBackward() { const auto latestSn = metaCache_->GetLatestFileSn(); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " return BACKWARD, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " return BACKWARD, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); reqCtx_->seq_ = latestSn; } @@ -467,38 +469,26 @@ void ClientClosure::OnBackward() { void ClientClosure::OnInvalidRequest() { reqDone_->SetFailed(status_); LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " failed for invalid format, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " failed for invalid format, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); MetricHelper::IncremFailRPCCount(fileMetric_, reqCtx_->optype_); } void WriteChunkClosure::SendRetryRequest() { - client_->WriteChunk(reqCtx_->idinfo_, - reqCtx_->fileId_, - reqCtx_->epoch_, - reqCtx_->seq_, - reqCtx_->writeData_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->fileId_, reqCtx_->epoch_, + reqCtx_->seq_, reqCtx_->writeData_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } -void WriteChunkClosure::OnSuccess() { - ClientClosure::OnSuccess(); -} +void WriteChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); } void ReadChunkClosure::SendRetryRequest() { - client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } void ReadChunkClosure::OnSuccess() { @@ -516,9 +506,7 @@ void ReadChunkClosure::OnChunkNotExist() { void ReadChunkSnapClosure::SendRetryRequest() { client_->ReadChunkSnapshot(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + reqCtx_->offset_, reqCtx_->rawlength_, done_); } void ReadChunkSnapClosure::OnSuccess() { @@ -528,10 +516,8 @@ void ReadChunkSnapClosure::OnSuccess() { } void DeleteChunkSnapClosure::SendRetryRequest() { - client_->DeleteChunkSnapshotOrCorrectSn( - reqCtx_->idinfo_, - reqCtx_->correctedSeq_, - done_); + client_->DeleteChunkSnapshotOrCorrectSn(reqCtx_->idinfo_, + reqCtx_->correctedSeq_, done_); } void GetChunkInfoClosure::SendRetryRequest() { @@ -548,17 +534,16 @@ void GetChunkInfoClosure::OnSuccess() { } void GetChunkInfoClosure::OnRedirected() { - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " redirected, " << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (chunkinforesponse_->has_redirect() ? chunkinforesponse_->redirect() - : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (chunkinforesponse_->has_redirect() + ? chunkinforesponse_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (chunkinforesponse_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(chunkinforesponse_->redirect()); @@ -571,19 +556,14 @@ void GetChunkInfoClosure::OnRedirected() { } void CreateCloneChunkClosure::SendRetryRequest() { - client_->CreateCloneChunk(reqCtx_->idinfo_, - reqCtx_->location_, - reqCtx_->seq_, - reqCtx_->correctedSeq_, - reqCtx_->chunksize_, - done_); + client_->CreateCloneChunk(reqCtx_->idinfo_, reqCtx_->location_, + reqCtx_->seq_, reqCtx_->correctedSeq_, + reqCtx_->chunksize_, done_); } void RecoverChunkClosure::SendRetryRequest() { - client_->RecoverChunk(reqCtx_->idinfo_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + client_->RecoverChunk(reqCtx_->idinfo_, reqCtx_->offset_, + reqCtx_->rawlength_, done_); } int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { @@ -601,7 +581,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->UpdateLeader(lpId, cpId, leaderAddr.addr_); if (ret != 0) { LOG(WARNING) << "Update leader of copyset (" << lpId << ", " << cpId - << ") in metaCache fail"; + << ") in metaCache fail"; return -1; } @@ -609,7 +589,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->GetLeader(lpId, cpId, &leaderId, &leaderEp); if (ret != 0) { LOG(INFO) << "Get leader of copyset (" << lpId << ", " << cpId - << ") from metaCache fail"; + << ") from metaCache fail"; return -1; } @@ -617,5 +597,5 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..3ca5a609df 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -23,15 +23,16 @@ #ifndef SRC_CLIENT_CHUNK_CLOSURE_H_ #define SRC_CLIENT_CHUNK_CLOSURE_H_ -#include #include #include +#include + #include #include #include "proto/chunk.pb.h" -#include "src/client/client_config.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/client/request_closure.h" #include "src/common/math_util.h" @@ -42,15 +43,15 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkResponse; using curve::chunkserver::GetChunkInfoResponse; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for saving Rpc context, + * Contains cntl and response retries */ class ClientClosure : public Closure { public: @@ -59,67 +60,59 @@ class ClientClosure : public Closure { virtual ~ClientClosure() = default; - void SetCntl(brpc::Controller* cntl) { - cntl_ = cntl; - } + void SetCntl(brpc::Controller* cntl) { cntl_ = cntl; } virtual void SetResponse(Message* response) { response_.reset(static_cast(response)); } - void SetChunkServerID(ChunkServerID csid) { - chunkserverID_ = csid; - } + void SetChunkServerID(ChunkServerID csid) { chunkserverID_ = csid; } - ChunkServerID GetChunkServerID() const { - return chunkserverID_; - } + ChunkServerID GetChunkServerID() const { return chunkserverID_; } void SetChunkServerEndPoint(const butil::EndPoint& endPoint) { chunkserverEndPoint_ = endPoint; } - EndPoint GetChunkServerEndPoint() const { - return chunkserverEndPoint_; - } + EndPoint GetChunkServerEndPoint() const { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -132,45 +125,43 @@ class ClientClosure : public Closure { SetBackoffParam(); DVLOG(9) << "Client clousre conf info: " - << "chunkserverOPRetryIntervalUS = " - << failReqOpt_.chunkserverOPRetryIntervalUS - << ", chunkserverOPMaxRetry = " - << failReqOpt_.chunkserverOPMaxRetry; + << "chunkserverOPRetryIntervalUS = " + << failReqOpt_.chunkserverOPRetryIntervalUS + << ", chunkserverOPMaxRetry = " + << failReqOpt_.chunkserverOPMaxRetry; } - Closure* GetClosure() const { - return done_; - } + Closure* GetClosure() const { return done_; } - // 测试使用,设置closure - void SetClosure(Closure* done) { - done_ = done; - } + // Test usage, set closure + void SetClosure(Closure* done) { done_ = done; } - static FailureRequestOption GetFailOpt() { - return failReqOpt_; - } + static FailureRequestOption GetFailOpt() { return failReqOpt_; } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying + * Scenario 1: rpc timeout, which will exponentially increase the current + * rpc timeout and then directly retry Scenario 2: Underlying Overload, then + * it is necessary to sleep for a period of time before retrying, and the + * sleep time increases exponentially based on the number of retries + * @param: rpcstatue returns the value for rpc + * @param: cntlstatus is the return value of this rpc controller */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After the underlying chunkserver overload, it is necessary to backoff + * based on the number of retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the current time required for sleep */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After the rpc timeout, it is necessary to backoff based on the number of + * retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the next RPC timeout time */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -207,32 +198,33 @@ class ClientClosure : public Closure { void RefreshLeader(); - static FailureRequestOption failReqOpt_; - - brpc::Controller* cntl_; - std::unique_ptr response_; - CopysetClient* client_; - Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 - ChunkServerID chunkserverID_; - butil::EndPoint chunkserverEndPoint_; - - // 记录当前请求的相关信息 - MetaCache* metaCache_; - RequestClosure* reqDone_; - FileMetric* fileMetric_; - RequestContext* reqCtx_; - ChunkIDInfo chunkIdInfo_; - - // 发送重试请求前是否睡眠 + static FailureRequestOption failReqOpt_; + + brpc::Controller* cntl_; + std::unique_ptr response_; + CopysetClient* client_; + Closure* done_; + // The chunkserverID is saved here to distinguish which chunkserver the + // current rpc is sent to This makes it easy to directly find which + // chunkserver is currently returning the failure in the rpc closure + ChunkServerID chunkserverID_; + butil::EndPoint chunkserverEndPoint_; + + // Record relevant information for the current request + MetaCache* metaCache_; + RequestClosure* reqDone_; + FileMetric* fileMetric_; + RequestContext* reqCtx_; + ChunkIDInfo chunkIdInfo_; + + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 - int status_; + // response status code + int status_; - // rpc 状态码 - int cntlstatus_; + // rpc status code + int cntlstatus_; }; class WriteChunkClosure : public ClientClosure { @@ -308,7 +300,7 @@ class RecoverChunkClosure : public ClientClosure { void SendRetryRequest() override; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CHUNK_CLOSURE_H_ diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..97598a7038 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -28,8 +28,8 @@ #include #include -#include #include +#include #include "include/client/libcurve.h" #include "src/common/throttle.h" @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -90,12 +90,10 @@ typedef struct ChunkIDInfo { ChunkIDInfo(ChunkID cid, LogicPoolID lpid, CopysetID cpid) : cid_(cid), cpid_(cpid), lpid_(lpid) {} - bool Valid() const { - return lpid_ > 0 && cpid_ > 0; - } + bool Valid() const { return lpid_ > 0 && cpid_ > 0; } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +104,8 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +116,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +146,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -162,7 +161,7 @@ typedef struct FInfo { uint64_t stripeCount; std::string poolset; - OpenFlags openflags; + OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; FInfo() { @@ -187,10 +186,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -198,17 +197,17 @@ struct PeerAddr { bool IsEmpty() const { return (addr_.ip == butil::IP_ANY && addr_.port == 0) && - addr_.socket_file.empty(); + addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 - int Parse(const std::string &str) { + // Parse address information from a string + int Parse(const std::string& str) { int idx; char ip_str[64]; if (2 > sscanf(str.c_str(), "%[^:]%*[:]%d%*[:]%d", ip_str, &addr_.port, @@ -224,8 +223,9 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format + // In the get leader call, this value can be directly passed into the + // request std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), @@ -233,32 +233,32 @@ struct PeerAddr { return std::string(str); } - bool operator==(const PeerAddr &other) const { + bool operator==(const PeerAddr& other) const { return addr_ == other.addr_; } }; -inline const char *OpTypeToString(OpType optype) { +inline const char* OpTypeToString(OpType optype) { switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::CREATE_CLONE: - return "CreateCloneChunk"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::DISCARD: - return "Discard"; - case OpType::UNKNOWN: - default: - return "Unknown"; + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::CREATE_CLONE: + return "CreateCloneChunk"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::DISCARD: + return "Discard"; + case OpType::UNKNOWN: + default: + return "Unknown"; } } @@ -279,16 +279,14 @@ class SnapCloneClosure : public google::protobuf::Closure { class ClientDummyServerInfo { public: - static ClientDummyServerInfo &GetInstance() { + static ClientDummyServerInfo& GetInstance() { static ClientDummyServerInfo clientInfo; return clientInfo; } - void SetIP(const std::string &ip) { localIP_ = ip; } + void SetIP(const std::string& ip) { localIP_ = ip; } - std::string GetIP() const { - return localIP_; - } + std::string GetIP() const { return localIP_; } void SetPort(uint32_t port) { localPort_ = port; } @@ -309,22 +307,22 @@ class ClientDummyServerInfo { inline void TrivialDeleter(void*) {} -inline const char *FileStatusToName(FileStatus status) { +inline const char* FileStatusToName(FileStatus status) { switch (status) { - case FileStatus::Created: - return "Created"; - case FileStatus::Deleting: - return "Deleting"; - case FileStatus::Cloning: - return "Cloning"; - case FileStatus::CloneMetaInstalled: - return "CloneMetaInstalled"; - case FileStatus::Cloned: - return "Cloned"; - case FileStatus::BeingCloned: - return "BeingCloned"; - default: - return "Unknown"; + case FileStatus::Created: + return "Created"; + case FileStatus::Deleting: + return "Deleting"; + case FileStatus::Cloning: + return "Cloning"; + case FileStatus::CloneMetaInstalled: + return "CloneMetaInstalled"; + case FileStatus::Cloned: + return "Cloned"; + case FileStatus::BeingCloned: + return "BeingCloned"; + default: + return "Unknown"; } } @@ -359,7 +357,7 @@ struct CreateFileContext { std::string poolset; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_COMMON_H_ diff --git a/src/client/client_metric.h b/src/client/client_metric.h index ba841e8b80..79959f319b 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -28,9 +28,9 @@ #include #include -#include "src/common/timeutility.h" #include "src/client/client_common.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -48,11 +48,11 @@ struct SlowRequestMetric { : count(prefix, name + "_total") {} }; -// 秒级信息统计 +// Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + // Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + // persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -60,21 +60,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +// Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + // Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + // Call throughput PerSecondMetric bps; - // 调用超时次数qps + // Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + // Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + // Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -100,33 +100,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +// File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + // Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + // Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + // The maximum number of request bytes for the current file request, which + // is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + // Libcurve's lowest level read rpc interface statistics information metric + // statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + // Libcurve's lowest level write rpc interface statistics information metric + // statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + // User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + // User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + // Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; // Number of slow requests @@ -151,52 +154,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +// Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + // Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + // openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + // createFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + // closeFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + // GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + // RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + // GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + // GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + // DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + // RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + // Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + // deleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + // changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + // Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + // Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + // GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + // ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + // Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -243,8 +246,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -253,13 +256,14 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + * operation */ - static void IncremUserQPSCount(FileMetric* fm, - uint64_t length, + static void IncremUserQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -284,9 +288,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -306,13 +312,18 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the + * user for RPS calculation rps: receive request persecond, which is the + * number of requests received by the current interface per second qps: + * query request persecond, which is the number of requests processed by the + * current interface per second eps: error request persecond, which is the + * number of requests that make errors per second on the current interface + * rps minus qps is the number of requests that the current client is + * waiting for per second, which will persistently occupy the current memory + * for one second + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -332,9 +343,10 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -352,9 +364,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed + * out, used for timeoutQps calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -372,9 +386,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric The metric pointer of the current file + * @param opType request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -392,13 +406,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for QPS and bps calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCQPSCount(FileMetric* fm, - uint64_t length, + static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -417,13 +432,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for RPS calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCRPSCount(FileMetric* fm, - OpType type) { + static void IncremRPCRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -438,9 +454,7 @@ class MetricHelper { } } - static void LatencyRecord(FileMetric* fm, - uint64_t duration, - OpType type) { + static void LatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -455,8 +469,7 @@ class MetricHelper { } } - static void UserLatencyRecord(FileMetric* fm, - uint64_t duration, + static void UserLatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { @@ -500,7 +513,7 @@ class MetricHelper { } } }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_METRIC_H_ diff --git a/src/client/config_info.h b/src/client/config_info.h index 620d464eae..e324a6e8ba 100644 --- a/src/client/config_info.h +++ b/src/client/config_info.h @@ -24,6 +24,7 @@ #define SRC_CLIENT_CONFIG_INFO_H_ #include + #include #include @@ -31,9 +32,9 @@ namespace curve { namespace client { /** - * log的基本配置信息 - * @logLevel: 是log打印等级 - * @logPath: log打印位置 + * Basic configuration information of log + * @logLevel: It is the log printing level + * @logPath: Log printing location */ struct LogInfo { int logLevel = 2; @@ -41,8 +42,9 @@ struct LogInfo { }; /** - * in flight IO控制信息 - * @fileMaxInFlightRPCNum: 为一个文件中最大允许的inflight IO数量 + * in flight IO control information + * @fileMaxInFlightRPCNum: is the maximum allowed number of inflight IOs in a + * file */ struct InFlightIOCntlInfo { uint64_t fileMaxInFlightRPCNum = 2048; @@ -78,27 +80,29 @@ struct MetaServerOption { }; /** - * 租约基本配置 - * @mdsRefreshTimesPerLease: 一个租约内续约次数,client与mds之间通过租约保持心跳 - * 如果双方约定租约有效期为10s,那么client会在这10s内 - * 发送mdsRefreshTimesPerLease次心跳,如果连续失败, - * 那么client认为当前mds存在异常,会阻塞后续的IO,直到 - * 续约成功。 + * Basic configuration of lease + * @mdsRefreshTimesPerLease: The number of renewals within a lease, and the + * heartbeat between client and mds is maintained through the lease If both + * parties agree that the lease term is 10 seconds, then the client will be + * within these 10 seconds send mdsRefreshTimesPerLease heartbeats, if + * consecutive failures occur, So the client believes that there is an + * abnormality in the current mds, which will block subsequent IO until + * successfully renewed the contract. */ struct LeaseOption { uint32_t mdsRefreshTimesPerLease = 5; }; /** - * rpc超时,判断是否unstable的参数 + * RPC timeout, parameter to determine if it is unstable * @maxStableChunkServerTimeoutTimes: - * 一个chunkserver连续超时请求的阈值, 超过之后会检查健康状态, - * 如果不健康,则标记为unstable + * The threshold for a chunkserver to continuously timeout requests, after + * which the health status will be checked, If not healthy, mark as unstable * @checkHealthTimeoutMS: - * 检查chunkserver是否健康的http请求超时时间 + * Check if chunkserver is healthy HTTP request timeout * @serverUnstableThreashold: - * 一个server上超过serverUnstableThreashold个chunkserver都标记为unstable, - * 整个server上的所有chunkserver都标记为unstable + * More than serverUnstableThreashold chunkservers on a server are marked + * as unstable, All chunkservers on the entire server are marked as unstable */ struct ChunkServerUnstableOption { uint32_t maxStableChunkServerTimeoutTimes = 64; @@ -107,37 +111,40 @@ struct ChunkServerUnstableOption { }; /** - * 发送失败的chunk request处理 + * Handling of failed chunk request: * @chunkserverOPMaxRetry: - * 最大重试次数,一个RPC下发到底层chunkserver,最大允许的失败 - * 次数,超限之后会向上返回用户。 + * Maximum retry count allowed for an RPC sent to the underlying chunk server. + * If exceeded, it will be propagated to the user. * @chunkserverOPRetryIntervalUS: - * 相隔多久再重试,一个rpc失败之后client会根据其返回 - * 状态决定是否需要睡眠一段时间再重试,目前除了 - * TIMEOUT、REDIRECTED,这两种返回值,其他返回值都是需要 - * 先睡眠一段时间再重试。 - * @chunkserverRPCTimeoutMS: 为每个rpc发送时,其rpc controller配置的超时时间 + * Time interval between retries. After a failed RPC, the client will sleep for + * a period determined by the RPC response status before retrying. Currently, + * except for TIMEOUT and REDIRECTED, all other response + * values require sleeping for some time before retrying. + * @chunkserverRPCTimeoutMS: Timeout configured for each RPC sent when creating + * its RPC controller. * @chunkserverMaxRPCTimeoutMS: - * 在底层chunkserver返回TIMEOUT时,说明当前请求在底层 - * 无法及时得到处理,原因可能是底层的排队任务太多,这时候如果 - * 以相同的rpc - * 超时时间再去发送请求,很有可能最后还是超时, - * 所以为了避免底层处理请求时,rpc在client一侧已经超时的这种 - * 状况,为rpc超时时间增加了指数退避逻辑,超时时间会逐渐增加, - * 最大不能超过该值。 + * When the underlying chunkserver returns TIMEOUT, it means the current request + * cannot be processed promptly, possibly due to a large number of queued tasks. + * In such cases, sending requests with the same RPC timeout again may still + * result in timeouts. To avoid this, exponential backoff logic is applied to + * increase the timeout gradually, but it cannot exceed this maximum value. * @chunkserverMaxRetrySleepIntervalUS: - * 在底层返回OVERLOAD时,表明当前chunkserver - * 压力过大,这时候睡眠时间会进行指数退避,睡眠时间会加长,这样 - * 能够保证client的请求不会将底层chunkserver打满,但是睡眠时间 - * 最长不能超过该值。 - * @chunkserverMaxStableTimeoutTimes: 一个chunkserver连续超时请求的阈值, - * 超过之后 会标记为unstable。因为一个chunkserver所在的server如果宕机 - * 那么发向该chunkserver的请求都会超时,如果同一个chunkserver - * 的rpc连续超时超过该阈值,那么client就认为这个chunkserver - * 所在的server可能宕机了,就将该server上的所有leader - * copyset 标记为unstable,促使其下次发送rpc前,先去getleader。 + * When the underlying chunk server returns OVERLOAD, indicating excessive + * pressure, the sleep interval is exponentially extended to ensure that client + * requests do not overwhelm the underlying chunk server. + * However, the maximum sleep time cannot exceed this value. + * @chunkserverMaxStableTimeoutTimes: + * Threshold for consecutive timeouts on an RPC from a chunk server. If + * exceeded, the chunk server is marked as unstable. This is because if a server + * where a chunk server resides crashes, requests sent to + * that chunk server will all time out. If the same chunk server's RPCs + * consecutively timeout beyond this threshold, the client assumes that the + * server where it resides may have crashed and marks all leader copysets on + * that server as unstable, prompting a leader retrieval before sending any + * RPCs. * @chunkserverMinRetryTimesForceTimeoutBackoff: - * 当一个请求重试次数超过阈值时,还在重试 使其超时时间进行指数退避 + * When a request exceeds the retry count threshold, it continues to retry with + * exponential backoff for its timeout duration. */ struct FailureRequestOption { uint32_t chunkserverOPMaxRetry = 3; @@ -154,9 +161,11 @@ struct FailureRequestOption { }; /** - * 发送rpc给chunkserver的配置 - * @inflightOpt: 一个文件向chunkserver发送请求时的inflight 请求控制配置 - * @failRequestOpt: rpc发送失败之后,需要进行rpc重试的相关配置 + * Configuration for sending rpc to chunkserver + * @inflightOpt: Configuration of inflight request control when a file sends a + * request to chunkserver + * @failRequestOpt: After rpc sending fails, relevant configuration for rpc + * retry needs to be carried out */ struct IOSenderOption { InFlightIOCntlInfo inflightOpt; @@ -164,10 +173,12 @@ struct IOSenderOption { }; /** - * scheduler模块基本配置信息,schedule模块是用于分发用户请求,每个文件有自己的schedule - * 线程池,线程池中的线程各自配置一个队列 - * @scheduleQueueCapacity: schedule模块配置的队列深度 - * @scheduleThreadpoolSize: schedule模块线程池大小 + Basic Configuration Information for the Scheduler Module + * The scheduler module is used for distributing user requests. Each file has + its own scheduler thread pool, and each thread in the pool is configured with + its own queue. + * @scheduleQueueCapacity: The queue depth configured by the schedule module + * @scheduleThreadpoolSize: schedule module thread pool size */ struct RequestScheduleOption { uint32_t scheduleQueueCapacity = 1024; @@ -176,26 +187,29 @@ struct RequestScheduleOption { }; /** - * metaccache模块配置信息 + * MetaCache Module Configuration * @metacacheGetLeaderRetry: - * 获取leader重试次数,一个rpc发送到chunkserver之前需要先 - * 获取当前copyset的leader,如果metacache中没有这个信息, - * 就向copyset的peer发送getleader请求,如果getleader失败, - * 需要重试,最大重试次数为该值。 + * Number of retries to get the leader. Before an RPC is sent to the + * chunkserver, it needs to first obtain the leader for the current copyset. If + * this information is not available in the metacache, a getleader request is + * sent to a copyset's peers. If getleader fails, it needs to be retried, with a + * maximum retry count defined by this value. * @metacacheRPCRetryIntervalUS: - * 如上所述,如果getleader请求失败,会发起重试,但是并 - * 不会立即进行重试,而是选择先睡眠一段时间在重试。该值代表 - * 睡眠长度。 - * @metacacheGetLeaderRPCTimeOutMS: 发送getleader rpc请求的rpc - * controller最大超时时间 + * As mentioned above, if a getleader request fails, it will be retried, but not + * immediately. Instead, there will be a delay before the retry. This value + * represents the length of that delay. + * @metacacheGetLeaderRPCTimeOutMS: The maximum timeout duration for the RPC + * controller when sending a 'getleader' RPC request * @metacacheGetLeaderBackupRequestMS: - * 因为一个copyset有三个或者更多的peer,getleader - * 会以backuprequest的方式向这些peer发送rpc,在brpc内部 - * 会串行发送,如果第一个请求超过一定时间还没返回,就直接向 - * 下一个peer发送请求,而不用等待上一次请求返回或超时,这个触发 - * backup request的时间就为该值。 - * @metacacheGetLeaderBackupRequestLbName: 为getleader backup rpc - * 选择底层服务节点的策略 + * Since a copyset has three or more peers, getleader requests are + * sent to these peers in a backuprequest manner. + * Internally, in brpc, these requests are sent + * serially. If the first request takes too long to return, the next request is + * sent to the next peer without waiting for the previous one to return or time + * out. The time at which backup requests are triggered is determined by this + * value. + * @metacacheGetLeaderBackupRequestLbName: Strategy for selecting the underlying + * service nodes for getleader backup RPCs. */ struct MetaCacheOption { uint32_t metacacheGetLeaderRetry = 3; @@ -208,21 +222,23 @@ struct MetaCacheOption { }; /** - * IO 拆分模块配置信息 + * IO Split Module Configuration * @fileIOSplitMaxSizeKB: - * 用户下发IO大小client没有限制,但是client会将用户的IO进行拆分, - * 发向同一个chunkserver的请求锁携带的数据大小不能超过该值。 + * The size of user-issued IOs is not restricted by the client. However, the + * client will split the user's IOs, and the data size carried by requests sent + * to the same chunkserver cannot exceed this value. */ struct IOSplitOption { uint64_t fileIOSplitMaxSizeKB = 64; }; /** - * 线程隔离任务队列配置信息 - * 线程隔离主要是为了上层做异步接口调用时,直接将其调用任务推到线程池中而不是让其阻塞到放入 - * 分发队列线程池。 - * @isolationTaskQueueCapacity: 隔离线程池的队列深度 - * @isolationTaskThreadPoolSize: 隔离线程池容量 + * Configuration information for thread-isolated task queues. + * Thread isolation is primarily used to push asynchronous interface calls + * directly into the thread pool instead of blocking them until they are placed + * in the dispatch queue thread pool. + * @isolationTaskQueueCapacity: The queue depth of the isolation thread pool. + * @isolationTaskThreadPoolSize: The capacity of the isolation thread pool. */ struct TaskThreadOption { uint64_t isolationTaskQueueCapacity = 500000; @@ -250,7 +266,8 @@ struct ThrottleOption { }; /** - * IOOption存储了当前io 操作所需要的所有配置信息 + * IOOption stores all the configuration information required for the current IO + * operation */ struct IOOption { IOSplitOption ioSplitOpt; @@ -264,11 +281,12 @@ struct IOOption { }; /** - * client一侧常规的共同的配置信息 - * @mdsRegisterToMDS: 是否向mds注册client信息,因为client需要通过dummy - * server导出 metric信息,为了配合普罗米修斯的自动服务发现机制,会将其监听的 - * ip和端口信息发送给mds。 - * @turnOffHealthCheck: 是否关闭健康检查 + * Common client-side configuration options: + * @mdsRegisterToMDS: Whether to register client information with the MDS. Since + * the client needs to export metric information through a dummy server to + * support Prometheus's automatic service discovery mechanism, it sends its + * listening IP and port information to the MDS. + * @turnOffHealthCheck: Whether to disable health checks. */ struct CommonConfigOpt { bool mdsRegisterToMDS = false; @@ -284,7 +302,8 @@ struct CommonConfigOpt { }; /** - * ClientConfigOption是外围快照系统需要设置的配置信息 + * ClientConfigOption is the configuration information that needs to be set for + * the peripheral snapshot system */ struct ClientConfigOption { LogInfo loginfo; @@ -296,25 +315,24 @@ struct ClientConfigOption { struct ChunkServerBroadCasterOption { uint32_t broadCastMaxNum; - ChunkServerBroadCasterOption() - : broadCastMaxNum(200) {} + ChunkServerBroadCasterOption() : broadCastMaxNum(200) {} }; struct ChunkServerClientRetryOptions { - uint32_t rpcTimeoutMs; - uint32_t rpcMaxTry; - uint32_t rpcIntervalUs; - uint32_t rpcMaxTimeoutMs; + uint32_t rpcTimeoutMs; + uint32_t rpcMaxTry; + uint32_t rpcIntervalUs; + uint32_t rpcMaxTimeoutMs; ChunkServerClientRetryOptions() - : rpcTimeoutMs(500), - rpcMaxTry(3), - rpcIntervalUs(100000), - rpcMaxTimeoutMs(8000) {} + : rpcTimeoutMs(500), + rpcMaxTry(3), + rpcIntervalUs(100000), + rpcMaxTimeoutMs(8000) {} }; /** - * FileServiceOption是QEMU侧总体配置信息 + * FileServiceOption is the overall configuration information on the QEMU side */ struct FileServiceOption { LogInfo loginfo; diff --git a/src/client/copyset_client.cpp b/src/client/copyset_client.cpp index 964929d18f..9211070715 100644 --- a/src/client/copyset_client.cpp +++ b/src/client/copyset_client.cpp @@ -24,21 +24,21 @@ #include #include + #include #include -#include "src/client/request_sender.h" -#include "src/client/metacache.h" #include "src/client/client_config.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache.h" #include "src/client/request_closure.h" +#include "src/client/request_scheduler.h" +#include "src/client/request_sender.h" namespace curve { namespace client { -int CopysetClient::Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler, - FileMetric* fileMetric) { +int CopysetClient::Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, + RequestScheduler* scheduler, FileMetric* fileMetric) { if (nullptr == metaCache || scheduler == nullptr) { LOG(ERROR) << "metacache or scheduler is null!"; return -1; @@ -47,7 +47,7 @@ int CopysetClient::Init(MetaCache *metaCache, metaCache_ = metaCache; scheduler_ = scheduler; fileMetric_ = fileMetric; - senderManager_ = new(std::nothrow) RequestSenderManager(); + senderManager_ = new (std::nothrow) RequestSenderManager(); if (nullptr == senderManager_) { return -1; } @@ -63,30 +63,33 @@ int CopysetClient::Init(MetaCache *metaCache, return 0; } bool CopysetClient::FetchLeader(LogicPoolID lpid, CopysetID cpid, - ChunkServerID* leaderid, butil::EndPoint* leaderaddr) { - // 1. 先去当前metacache中拉取leader信息 - if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, false, fileMetric_)) { + ChunkServerID* leaderid, + butil::EndPoint* leaderaddr) { + // 1. First, pull the leader information from the current metacache + if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, false, + fileMetric_)) { return true; } - // 2. 如果metacache中leader信息拉取失败,就发送RPC请求获取新leader信息 - if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, true, fileMetric_)) { + // 2. If the pull of leader information in the metacache fails, send an RPC + // request to obtain new leader information + if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, true, + fileMetric_)) { LOG(WARNING) << "Get leader address form cache failed, but " - << "also refresh leader address failed from mds." - << "(<" << lpid << ", " << cpid << ">)"; + << "also refresh leader address failed from mds." + << "(<" << lpid << ", " << cpid << ">)"; return false; } return true; } -// 因为这里的CopysetClient::ReadChunk(会在两个逻辑里调用 -// 1. 从request scheduler下发的新的请求 -// 2. clientclosure再重试逻辑里调用copyset client重试 -// 这两种状况都会调用该接口,因为对于重试的RPC有可能需要重新push到队列中 -// 非重试的RPC如果重新push到队列中会导致死锁。 +// Because the CopysetClient::ReadChunk (will be called in two logics) here +// 1. New requests issued from the request scheduler +// 2. Calling copyset client to retry in the clientclosure retry logic +// Both of these situations will call the interface, as retrying RPCs may +// require re pushing to the queue If non retrying RPC is pushed back into the +// queue, it will cause a deadlock. int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, @@ -94,26 +97,31 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, RequestClosure* reqclosure = static_cast(done); brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { - // session过期之后需要重新push到队列 + // After the session expires, it needs to be re pushed to the queue LOG(WARNING) << "session not valid, read rpc ReSchedule!"; doneGuard.release(); reqclosure->ReleaseInflightRPCToken(); @@ -123,20 +131,17 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, } auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkClosure *readDone = new ReadChunkClosure(this, done); - senderPtr->ReadChunk(idinfo, sn, offset, - length, sourceInfo, readDone); + ReadChunkClosure* readDone = new ReadChunkClosure(this, done); + senderPtr->ReadChunk(idinfo, sn, offset, length, sourceInfo, readDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, size_t length, +int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, + uint64_t epoch, uint64_t sn, + const butil::IOBuf& data, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, google::protobuf::Closure* done) { std::shared_ptr senderPtr = nullptr; @@ -146,23 +151,28 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { LOG(WARNING) << "session not valid, write rpc ReSchedule!"; @@ -175,19 +185,18 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, auto task = [&](Closure* done, std::shared_ptr senderPtr) { WriteChunkClosure* writeDone = new WriteChunkClosure(this, done); - senderPtr->WriteChunk(idinfo, fileId, epoch, sn, - data, offset, length, sourceInfo, - writeDone); + senderPtr->WriteChunk(idinfo, fileId, epoch, sn, data, offset, length, + sourceInfo, writeDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, off_t offset, size_t length, Closure *done) { - +int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, + off_t offset, size_t length, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkSnapClosure *readDone = new ReadChunkSnapClosure(this, done); + ReadChunkSnapClosure* readDone = new ReadChunkSnapClosure(this, done); senderPtr->ReadChunkSnapshot(idinfo, sn, offset, length, readDone); }; @@ -195,21 +204,22 @@ int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, } int CopysetClient::DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, Closure *done) { - + uint64_t correctedSn, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - DeleteChunkSnapClosure *deleteDone = new DeleteChunkSnapClosure( - this, done); - senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSn, deleteDone); + DeleteChunkSnapClosure* deleteDone = + new DeleteChunkSnapClosure(this, done); + senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, correctedSn, + deleteDone); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { +int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - GetChunkInfoClosure *chunkInfoDone = new GetChunkInfoClosure(this, done); // NOLINT + GetChunkInfoClosure* chunkInfoDone = + new GetChunkInfoClosure(this, done); // NOLINT senderPtr->GetChunkInfo(idinfo, chunkInfoDone); }; @@ -217,9 +227,9 @@ int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { } int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string& location, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - Closure* done) { + const std::string& location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { CreateCloneChunkClosure* createCloneDone = new CreateCloneChunkClosure(this, done); @@ -230,22 +240,22 @@ int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, return DoRPCTask(idinfo, task, done); } -int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, +int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { RecoverChunkClosure* recoverChunkDone = new RecoverChunkClosure(this, done); - senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, - len); + senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, len); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, - std::function senderptr)> task, Closure *done) { +int CopysetClient::DoRPCTask( + const ChunkIDInfo& idinfo, + std::function senderptr)> + task, + Closure* done) { RequestClosure* reqclosure = static_cast(done); ChunkServerID leaderId; @@ -253,30 +263,30 @@ int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); while (reqclosure->GetRetriedTimes() < - iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { + iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { reqclosure->IncremRetriedTimes(); - if (false == FetchLeader(idinfo.lpid_, idinfo.cpid_, - &leaderId, &leaderAddr)) { + if (false == + FetchLeader(idinfo.lpid_, idinfo.cpid_, &leaderId, &leaderAddr)) { bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } - auto senderPtr = senderManager_->GetOrCreateSender(leaderId, - leaderAddr, iosenderopt_); + auto senderPtr = senderManager_->GetOrCreateSender(leaderId, leaderAddr, + iosenderopt_); if (nullptr != senderPtr) { task(doneGuard.release(), senderPtr); break; } else { LOG(WARNING) << "create or reset sender failed, " - << ", leaderId = " << leaderId; + << ", leaderId = " << leaderId; bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } } return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/copyset_client.h b/src/client/copyset_client.h index 3dc1fc66f7..0881a7ffac 100644 --- a/src/client/copyset_client.h +++ b/src/client/copyset_client.h @@ -23,11 +23,11 @@ #ifndef SRC_CLIENT_COPYSET_CLIENT_H_ #define SRC_CLIENT_COPYSET_CLIENT_H_ -#include #include +#include -#include #include +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" @@ -43,12 +43,14 @@ namespace client { using curve::common::Uncopyable; using ::google::protobuf::Closure; -// TODO(tongguangxun) :后续除了read、write的接口也需要调整重试逻辑 +// TODO(tongguangxun): In addition to the read and write interfaces, the retry +// logic needs to be adjusted in the future class MetaCache; class RequestScheduler; /** - * 负责管理 ChunkServer 的链接,向上层提供访问 - * 指定 copyset 的 chunk 的 read/write 等接口 + * Responsible for managing connections to ChunkServers and providing + * upper-layer access to read/write interfaces for specific chunks within a + * copyset. */ class CopysetClient { public: @@ -68,120 +70,101 @@ class CopysetClient { senderManager_ = nullptr; } - int Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, + int Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler = nullptr, FileMetric* fileMetic = nullptr); /** - * 返回依赖的Meta Cache + * Return dependent Meta Cache */ - MetaCache* GetMetaCache() { - return metaCache_; - } + MetaCache* GetMetaCache() { return metaCache_; } /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param souceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param idinfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - google::protobuf::Closure *done); + int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + google::protobuf::Closure* done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param writeData:要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& writeData, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - Closure *done); + * Write Chunk + * @param idinfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param writeData: The data to be written + *@param offset: write offset + * @param length: The length written + * @param sourceInfo: chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf& writeData, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + Closure* done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + *Reading Chunk snapshot files + * @param idinfo: the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - Closure *done); + int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, Closure* done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param idinfo is the ID information related to chunk + * @param correctedSn: Version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - Closure *done); + uint64_t correctedSn, Closure* done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure + * Obtain information about chunk files + * @param idinfo: the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - Closure *done); + int GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done); /** - * @brief lazy 创建clone chunk - * @param idinfo为chunk相关的id信息 - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - Closure *done); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param:offset 偏移 - * @param:len 长度 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, - uint64_t len, - Closure *done); + * @brief lazy Create clone chunk + * @param idinfo: the ID information related to chunk + * @param location: URL of the data source + * @param sn: chunk's serial number + * @param correntSn: used to modify the chunk when creating CloneChunk + * @param chunkSize: Chunk size + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo& idinfo, const std::string& location, + uint64_t sn, uint64_t correntSn, uint64_t chunkSize, + Closure* done); + + /** + * @brief Actual recovery chunk data + * @param idinfo is the ID information related to chunk + * @param offset: offset + * @param len: length + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, + Closure* done); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId) { @@ -189,24 +172,21 @@ class CopysetClient { } /** - * session过期,需要将重试RPC停住 + * session expired, retry RPC needs to be stopped */ - void StartRecycleRetryRPC() { - sessionNotValid_ = true; - } + void StartRecycleRetryRPC() { sessionNotValid_ = true; } /** - * session恢复通知不再回收重试的RPC + * session recovery notification no longer recycles retried RPCs */ - void ResumeRPCRetry() { - sessionNotValid_ = false; - } + void ResumeRPCRetry() { sessionNotValid_ = false; } /** - * 在文件关闭的时候接收上层关闭通知, 根据session有效状态 - * 置位exitFlag, 如果sessio无效状态下再有rpc超时返回,这 - * 些RPC会直接错误返回,如果session正常,则将继续正常下发 - * RPC,直到重试次数结束或者成功返回 + * Receive upper-layer closure notification when the file is closed. + * Set the exitFlag based on the session's validity status. If there are RPC + * timeouts under an invalid session state, these RPCs will return errors + * directly. If the session is valid, RPCs will continue to be issued until + * the retry limit is reached or they return successfully. */ void ResetExitFlag() { if (sessionNotValid_) { @@ -218,47 +198,49 @@ class CopysetClient { friend class WriteChunkClosure; friend class ReadChunkClosure; - // 拉取新的leader信息 - bool FetchLeader(LogicPoolID lpid, - CopysetID cpid, - ChunkServerID* leaderid, + // Pull new leader information + bool FetchLeader(LogicPoolID lpid, CopysetID cpid, ChunkServerID* leaderid, butil::EndPoint* leaderaddr); /** - * 执行发送rpc task,并进行错误重试 - * @param[in]: idinfo为当前rpc task的id信息 - * @param[in]: task为本次要执行的rpc task - * @param[in]: done是本次rpc 任务的异步回调 - * @return: 成功返回0, 否则-1 + * Execute the send rpc task and retry with an error + * @param[in]: idinfo is the ID information of the current rpc task + * @param[in]: task is the rpc task executed this time + * @param[in]: done is the asynchronous callback for this RPC task + * @return: Successfully returns 0, otherwise -1 */ - int DoRPCTask(const ChunkIDInfo& idinfo, + int DoRPCTask( + const ChunkIDInfo& idinfo, std::function)> task, - Closure *done); + Closure* done); private: - // 元数据缓存 - MetaCache *metaCache_; - // 所有ChunkServer的链接管理者 - RequestSenderManager *senderManager_; - // 配置 + // Metadata cache + MetaCache* metaCache_; + // Link managers for all ChunkServers + RequestSenderManager* senderManager_; + // Configuration IOSenderOption iosenderopt_; - // session是否有效,如果session无效那么需要将重试的RPC停住 - // RPC停住通过将这个rpc重新push到request scheduler队列,这样不会 - // 阻塞brpc内部的线程,防止一个文件的操作影响到其他文件 + // Check if the session is valid. If the session is invalid, it's necessary + // to pause the retry RPCs by re-pushing this RPC into the request scheduler + // queue. This ensures that it doesn't block the internal threads of BRPC + // and prevents operations on one file from affecting other files. bool sessionNotValid_; - // request 调度器,在session过期的时候重新将RPC push到调度队列 + // request scheduler to push RPC back to the scheduling queue when the + // session expires RequestScheduler* scheduler_; - // 当前copyset client对应的文件metric + // The file metric corresponding to the current copyset client FileMetric* fileMetric_; - // 是否在停止状态中,如果是在关闭过程中且session失效,需要将rpc直接返回不下发 + // Is it in a stopped state? If it is during the shutdown process and the + // session fails, it is necessary to directly return rpc without issuing it bool exitFlag_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_COPYSET_CLIENT_H_ diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 63836653de..343b6cd5f8 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -24,21 +24,22 @@ #include #include + #include #include "src/client/iomanager4file.h" #include "src/client/mds_client.h" -#include "src/common/timeutility.h" #include "src/common/curve_define.h" #include "src/common/fast_align.h" +#include "src/common/timeutility.h" namespace curve { namespace client { using curve::client::ClientConfig; +using curve::common::is_aligned; using curve::common::TimeUtility; using curve::mds::SessionStatus; -using curve::common::is_aligned; bool CheckAlign(off_t off, size_t length, size_t blocksize) { return is_aligned(off, blocksize) && is_aligned(length, blocksize); @@ -105,18 +106,16 @@ void FileInstance::UnInitialize() { int FileInstance::Read(char* buf, off_t offset, size_t length) { if (CURVE_UNLIKELY(!CheckAlign(offset, length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << offset - << ", length: " << length - << ", block size: " << blocksize_; + << ", length: " << length << ", block size: " << blocksize_; return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin Read "<< finfo_.fullPathName - << ", offset = " << offset - << ", len = " << length; + DLOG_EVERY_SECOND(INFO) << "begin Read " << finfo_.fullPathName + << ", offset = " << offset << ", len = " << length; return iomanager4file_.Read(buf, offset, length, mdsclient_.get()); } -int FileInstance::Write(const char *buf, off_t offset, size_t len) { +int FileInstance::Write(const char* buf, off_t offset, size_t len) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; @@ -135,7 +134,7 @@ int FileInstance::Write(const char *buf, off_t offset, size_t len) { int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -144,20 +143,20 @@ int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioRead " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioRead " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioRead(aioctx, mdsclient_.get(), dataType); } -int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { +int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; } if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -166,9 +165,9 @@ int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioWrite " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioWrite " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioWrite(aioctx, mdsclient_.get(), dataType); } @@ -181,7 +180,7 @@ int FileInstance::Discard(off_t offset, size_t length) { return -1; } -int FileInstance::AioDiscard(CurveAioContext *aioctx) { +int FileInstance::AioDiscard(CurveAioContext* aioctx) { if (CURVE_LIKELY(!readonly_)) { return iomanager4file_.AioDiscard(aioctx, mdsclient_.get()); } @@ -190,16 +189,23 @@ int FileInstance::AioDiscard(CurveAioContext *aioctx) { return -1; } -// 两种场景会造成在Open的时候返回LIBCURVE_ERROR::FILE_OCCUPIED -// 1. 强制重启qemu不会调用close逻辑,然后启动的时候原来的文件sessio还没过期. -// 导致再次去发起open的时候,返回被占用,这种情况可以通过load sessionmap -// 拿到已有的session,再去执行refresh。 -// 2. 由于网络原因,导致open rpc超时,然后再去重试的时候就会返回FILE_OCCUPIED -// 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh -// 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 -// 等待一段时间再去Open,如果依然失败,就向上层返回失败。 +// Two scenarios can lead to returning LIBCURVE_ERROR::FILE_OCCUPIED when +// opening: +// 1. Forcibly restarting QEMU does not trigger the close logic, and when +// starting, the original session file has not expired yet. +// This causes a return of "occupied" +// when attempting to open it again. This situation can be resolved by +// loading the session map, obtaining the existing session, and then +// performing a refresh. +// 2. Due to network issues, the open RPC times out, and when retrying, it +// returns FILE_OCCUPIED. +// At this point, the file hasn't been successfully opened yet, so the +// session information isn't stored, and it's impossible to open it through +// refresh. In this case, you need to obtain the session lease duration on +// the MDS side, then wait for a period on the client side before attempting +// to Open again. If it still fails, return a failure to the upper layer. int FileInstance::Open(std::string* sessionId) { - LeaseSession_t lease; + LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; FileEpoch fEpoch; @@ -218,8 +224,8 @@ int FileInstance::Open(std::string* sessionId) { return -ret; } -int FileInstance::GetFileInfo(const std::string &filename, FInfo_t *fi, - FileEpoch_t *fEpoch) { +int FileInstance::GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch) { LIBCURVE_ERROR ret = mdsclient_->GetFileInfo(filename, finfo_.userinfo, fi, fEpoch); return -ret; @@ -240,12 +246,12 @@ int FileInstance::Close() { FileInstance* FileInstance::NewInitedFileInstance( const FileServiceOption& fileServiceOption, - const std::shared_ptr& mdsClient, - const std::string& filename, + const std::shared_ptr& mdsClient, const std::string& filename, const UserInfo& userInfo, - const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and readonly into openflags // NOLINT + const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and + // readonly into openflags // NOLINT bool readonly) { - FileInstance *instance = new (std::nothrow) FileInstance(); + FileInstance* instance = new (std::nothrow) FileInstance(); if (instance == nullptr) { LOG(ERROR) << "Create FileInstance failed, filename: " << filename; return nullptr; @@ -266,10 +272,8 @@ FileInstance* FileInstance::NewInitedFileInstance( } FileInstance* FileInstance::Open4Readonly( - const FileServiceOption& opt, - const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const FileServiceOption& opt, const std::shared_ptr& mdsclient, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags) { FileInstance* instance = FileInstance::NewInitedFileInstance( opt, mdsclient, filename, userInfo, openflags, true); @@ -279,8 +283,8 @@ FileInstance* FileInstance::Open4Readonly( } FileEpoch_t fEpoch; - int ret = mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, - &fEpoch); + int ret = + mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, &fEpoch); if (ret != 0) { LOG(ERROR) << "Get file info failed!"; instance->UnInitialize(); diff --git a/src/client/file_instance.h b/src/client/file_instance.h index 432a3402e4..952fc7e3d4 100644 --- a/src/client/file_instance.h +++ b/src/client/file_instance.h @@ -25,13 +25,13 @@ #include #include -#include "src/client/mds_client.h" #include "include/client/libcurve.h" #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" -#include "src/client/service_helper.h" #include "src/client/iomanager4file.h" #include "src/client/lease_executor.h" +#include "src/client/mds_client.h" +#include "src/client/service_helper.h" namespace curve { namespace client { @@ -42,55 +42,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { ~FileInstance() = default; /** - * 初始化 - * @param: filename文件名用于初始化iomanager的metric信息 - * @param: mdsclient为全局的mds client - * @param: userinfo为user信息 - * @param: fileservicopt fileclient的配置选项 - * @param: clientMetric为client端要统计的metric信息 - * @param: readonly是否以只读方式打开 - * @return: 成功返回true、否则返回false + * Initialize + * @param: filename The filename used to initialize the iomanager's metric + * information. + * @param: mdsclient The global mds client. + * @param: userinfo User information. + * @param: fileservicopt The configuration options for the fileclient. + * @param: clientMetric Metric information to be collected on the client + * side. + * @param: readonly Whether to open in read-only mode. + * @return: Returns true on success, otherwise returns false. */ bool Initialize(const std::string& filename, const std::shared_ptr& mdsclient, - const UserInfo& userinfo, - const OpenFlags& openflags, + const UserInfo& userinfo, const OpenFlags& openflags, const FileServiceOption& fileservicopt, bool readonly = false); /** - * 打开文件 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Open File + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int Open(std::string* sessionId = nullptr); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode read + * @param: buf The current buffer to be read + * @param: offset The offset within the file + * @param: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: buf The current buffer to be written + * @param: offset The offset within the file + * @parma: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length); /** - * 异步模式读 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read. + * @param: aioctx The I/O context for asynchronous read/write, which holds + * basic I/O information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 on success, less than 0 on failure */ int AioRead(CurveAioContext* aioctx, UserDataType dataType); /** - * 异步模式写 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write. + * @param: aioctx An asynchronous read/write IO context that stores basic IO + * information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, UserDataType dataType); @@ -113,69 +119,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { void UnInitialize(); - IOManager4File* GetIOManager4File() { - return &iomanager4file_; - } + IOManager4File* GetIOManager4File() { return &iomanager4file_; } /** - * 获取lease, 测试代码使用 + * Obtain a release to test code usage */ - LeaseExecutor* GetLeaseExecutor() const { - return leaseExecutor_.get(); - } + LeaseExecutor* GetLeaseExecutor() const { return leaseExecutor_.get(); } - int GetFileInfo(const std::string& filename, - FInfo_t* fi, FileEpoch_t *fEpoch); + int GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch); - void UpdateFileEpoch(const FileEpoch_t &fEpoch) { + void UpdateFileEpoch(const FileEpoch_t& fEpoch) { iomanager4file_.UpdateFileEpoch(fEpoch); } /** - * @brief 获取当前instance对应的文件信息 + * @brief Get the file information corresponding to the current instance * - * @return 当前instance对应文件的信息 + * @return The information of the file corresponding to the current instance */ - FInfo GetCurrentFileInfo() const { - return finfo_; - } + FInfo GetCurrentFileInfo() const { return finfo_; } static FileInstance* NewInitedFileInstance( const FileServiceOption& fileServiceOption, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags, - bool readonly); + const std::string& filename, const UserInfo& userInfo, + const OpenFlags& openflags, bool readonly); static FileInstance* Open4Readonly( const FileServiceOption& opt, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags = DefaultReadonlyOpenFlags()); private: void StopLease(); private: - // 保存当前file的文件信息 + // Save file information for the current file FInfo finfo_; - // 当前FileInstance的初始化配置信息 - FileServiceOption fileopt_; + // The initialization configuration information of the current FileInstance + FileServiceOption fileopt_; - // MDSClient是FileInstance与mds通信的唯一出口 + // MDSClient is the only exit for FileInstance to communicate with mds std::shared_ptr mdsclient_; - // 每个文件都持有与MDS通信的lease,LeaseExecutor是续约执行者 + // Each file holds a lease for communication with MDS, and the LeaseExecutor + // is the renewal executor std::unique_ptr leaseExecutor_; - // IOManager4File用于管理所有向chunkserver端发送的IO - IOManager4File iomanager4file_; + // IOManager4File is used to manage all IO sent to the chunkserver end + IOManager4File iomanager4file_; - // 是否为只读方式 - bool readonly_ = false; + // Whether to open in read-only mode + bool readonly_ = false; // offset and length must align with `blocksize_` // 4096 for backward compatibility @@ -184,7 +182,7 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { bool CheckAlign(off_t off, size_t length, size_t blocksize); -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_FILE_INSTANCE_H_ diff --git a/src/client/inflight_controller.h b/src/client/inflight_controller.h index 5c59f4edce..ddef520d0d 100644 --- a/src/client/inflight_controller.h +++ b/src/client/inflight_controller.h @@ -28,8 +28,8 @@ namespace curve { namespace client { -using curve::common::Mutex; using curve::common::ConditionVariable; +using curve::common::Mutex; class InflightControl { public: @@ -40,8 +40,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight全部回来,这段期间是hang的, - * 在close文件时调用 + * @brief calls the interface to wait for all inflight returns, which is a + * period of hang, Called when closing a file */ void WaitInflightAllComeBack() { LOG(INFO) << "wait inflight to complete, count = " << curInflightIONum_; @@ -53,7 +53,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight回来,这段期间是hang的 + * @brief calls the interface to wait for inflight to return, which is + * during the hang period */ void WaitInflightComeBack() { if (curInflightIONum_.load(std::memory_order_acquire) >= @@ -67,14 +68,14 @@ class InflightControl { } /** - * @brief 递增inflight num + * @brief increment inflight num */ void IncremInflightNum() { curInflightIONum_.fetch_add(1, std::memory_order_release); } /** - * @brief 递减inflight num + * @brief decreasing inflight num */ void DecremInflightNum() { std::lock_guard lk(inflightComeBackmtx_); @@ -90,24 +91,30 @@ class InflightControl { } /** - * WaitInflightComeBack会检查当前未返回的io数量是否超过我们限制的最大未返回inflight数量 - * 但是真正的inflight数量与上层并发调用的线程数有关。 - * 假设我们设置的maxinflight=100,上层有三个线程在同时调用GetInflightToken, - * 如果这个时候inflight数量为99,那么并发状况下这3个线程在WaitInflightComeBack - * 都会通过然后向下并发执行IncremInflightNum,这个时候真正的inflight为102, - * 下一个下发的时候需要等到inflight数量小于100才能继续,也就是等至少3个IO回来才能继续 - * 下发。这个误差是可以接受的,他与scheduler一侧并发度有关,误差有上限。 - * 如果想要精确控制inflight数量,就需要在接口处加锁,让原本可以并发的逻辑变成了 - * 串行,这样得不偿失。因此我们这里选择容忍一定误差范围。 + * WaitInflightComeBack checks if the current number of pending IOs exceeds + * our maximum allowed inflight limit. However, the actual inflight count is + * influenced by concurrent calls from upper-layer threads. Suppose we set + * maxinflight to 100, and there are three upper-layer threads + * simultaneously calling GetInflightToken. If, at this moment, the inflight + * count is 99, then in a concurrent scenario, all three threads in + * WaitInflightComeBack will pass and proceed to concurrently execute + * IncremInflightNum. Consequently, the actual inflight count becomes 102. + * The next dispatch operation will need to wait until the inflight count is + * less than 100 to proceed, which means it needs at least 3 IOs to return + * before proceeding. This margin of error is acceptable and is related to + * the concurrency level on the scheduler side, with a defined upper limit. + * If precise control over the inflight count is required, it would + * necessitate adding locks at the interface level, converting originally + * concurrent logic into serial, which would not be a cost-effective + * solution. Therefore, we choose to tolerate a certain margin of error in + * this scenario. */ void GetInflightToken() { WaitInflightComeBack(); IncremInflightNum(); } - void ReleaseInflightToken() { - DecremInflightNum(); - } + void ReleaseInflightToken() { DecremInflightNum(); } /** * @brief Get current inflight io num, only use in test code @@ -117,16 +124,16 @@ class InflightControl { } private: - uint64_t maxInflightNum_ = 0; + uint64_t maxInflightNum_ = 0; std::atomic curInflightIONum_{0}; - Mutex inflightComeBackmtx_; - ConditionVariable inflightComeBackcv_; - Mutex inflightAllComeBackmtx_; - ConditionVariable inflightAllComeBackcv_; + Mutex inflightComeBackmtx_; + ConditionVariable inflightComeBackcv_; + Mutex inflightAllComeBackmtx_; + ConditionVariable inflightAllComeBackcv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_INFLIGHT_CONTROLLER_H_ diff --git a/src/client/io_condition_varaiable.h b/src/client/io_condition_varaiable.h index a220168db3..9b721bd60f 100644 --- a/src/client/io_condition_varaiable.h +++ b/src/client/io_condition_varaiable.h @@ -23,12 +23,13 @@ #ifndef SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ #define SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace client { -// IOConditionVariable是用户同步IO场景下IO等待条件变量 +// IOConditionVariable is the IO waiting condition variable in the user +// synchronous IO scenario class IOConditionVariable { public: IOConditionVariable() : retCode_(-1), done_(false), mtx_(), cv_() {} @@ -36,9 +37,10 @@ class IOConditionVariable { ~IOConditionVariable() = default; /** - * 条件变量唤醒函数,因为底层的RPC request是异步的,所以用户下发同步IO的时候需要 - * 在发送读写请求的时候暂停等待IO返回。 - * @param: retcode是当前IO的返回值 + * Condition variable wakeup function. Since the underlying RPC requests are + * asynchronous, when users initiate synchronous IO, they need to pause and + * wait for the IO to return while sending read/write requests. + * @param: retcode is the return value of the current IO. */ void Complete(int retcode) { std::unique_lock lk(mtx_); @@ -48,7 +50,8 @@ class IOConditionVariable { } /** - * 是用户IO需要等待时候调用的函数,这个函数会在Complete被调用的时候返回 + * This is a function called when user IO needs to wait, and this function + * will return when Complete is called */ int Wait() { std::unique_lock lk(mtx_); @@ -58,20 +61,20 @@ class IOConditionVariable { } private: - // 当前IO的返回值 - int retCode_; + // The return value of the current IO + int retCode_; - // 当前IO是否完成 - bool done_; + // Is the current IO completed + bool done_; - // 条件变量使用的锁 - std::mutex mtx_; + // Locks used by conditional variables + std::mutex mtx_; - // 条件变量用于等待 + // Condition variable used for waiting std::condition_variable cv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 85d6dae911..b835ebf503 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -20,21 +20,22 @@ * Author: tongguangxun */ +#include "src/client/io_tracker.h" + #include #include #include #include -#include "src/client/splitor.h" +#include "src/client/discard_task.h" #include "src/client/iomanager.h" -#include "src/client/io_tracker.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache_struct.h" #include "src/client/request_closure.h" -#include "src/common/timeutility.h" +#include "src/client/request_scheduler.h" #include "src/client/source_reader.h" -#include "src/client/metacache_struct.h" -#include "src/client/discard_task.h" +#include "src/client/splitor.h" +#include "src/common/timeutility.h" namespace curve { namespace client { @@ -44,24 +45,22 @@ using curve::chunkserver::CHUNK_OP_STATUS; std::atomic IOTracker::tracekerID_(1); DiscardOption IOTracker::discardOption_; -IOTracker::IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric, +IOTracker::IOTracker(IOManager* iomanager, MetaCache* mc, + RequestScheduler* scheduler, FileMetric* clientMetric, bool disableStripe) : mc_(mc), scheduler_(scheduler), iomanager_(iomanager), fileMetric_(clientMetric), disableStripe_(disableStripe) { - id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); - scc_ = nullptr; - aioctx_ = nullptr; - data_ = nullptr; - type_ = OpType::UNKNOWN; - errcode_ = LIBCURVE_ERROR::OK; - offset_ = 0; - length_ = 0; + id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); + scc_ = nullptr; + aioctx_ = nullptr; + data_ = nullptr; + type_ = OpType::UNKNOWN; + errcode_ = LIBCURVE_ERROR::OK; + offset_ = 0; + length_ = 0; reqlist_.clear(); reqcount_.store(0, std::memory_order_release); opStartTimePoint_ = curve::common::TimeUtility::GetTimeofDayUs(); @@ -162,8 +161,7 @@ int IOTracker::ReadFromSource(const std::vector& reqCtxVec, void IOTracker::StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { data_ = const_cast(buf); offset_ = offset; length_ = length; @@ -190,8 +188,7 @@ void IOTracker::StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, } void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { if (nullptr == data_) { ReturnOnFail(); return; @@ -199,8 +196,7 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, switch (userDataType_) { case UserDataType::RawBuffer: - writeData_.append_user_data(data_, length_, - TrivialDeleter); + writeData_.append_user_data(data_, length_, TrivialDeleter); break; case UserDataType::IOBuffer: writeData_ = *reinterpret_cast(data_); @@ -211,9 +207,9 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, throttle->Add(false, length_); } - int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, - offset_, length_, - mdsclient, fileInfo, fEpoch); + int ret = + Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, offset_, + length_, mdsclient, fileInfo, fEpoch); if (ret == 0) { uint32_t subIoIndex = 0; @@ -284,14 +280,14 @@ void IOTracker::DoDiscard(MDSClient* mdsClient, const FInfo* fileInfo, Done(); } -void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, uint64_t offset, uint64_t len, - char *buf, SnapCloneClosure* scc) { - scc_ = scc; - data_ = buf; +void IOTracker::ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) { + scc_ = scc; + data_ = buf; offset_ = offset; length_ = len; - type_ = OpType::READ_SNAP; + type_ = OpType::READ_SNAP; int ret = -1; do { @@ -316,8 +312,8 @@ void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, } } -void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq) { +void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq) { type_ = OpType::DELETE_SNAP; int ret = -1; @@ -343,8 +339,8 @@ void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, } } -void IOTracker::GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo) { +void IOTracker::GetChunkInfo(const ChunkIDInfo& cinfo, + ChunkInfoDetail* chunkInfo) { type_ = OpType::GET_CHUNK_INFO; int ret = -1; @@ -384,10 +380,10 @@ void IOTracker::CreateCloneChunk(const std::string& location, break; } - newreqNode->seq_ = sn; - newreqNode->chunksize_ = chunkSize; - newreqNode->location_ = location; - newreqNode->correctedSeq_ = correntSn; + newreqNode->seq_ = sn; + newreqNode->chunksize_ = chunkSize; + newreqNode->location_ = location; + newreqNode->correctedSeq_ = correntSn; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -415,8 +411,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, break; } - newreqNode->rawlength_ = len; - newreqNode->offset_ = offset; + newreqNode->rawlength_ = len; + newreqNode->offset_ = offset; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -433,8 +429,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, } void IOTracker::FillCommonFields(ChunkIDInfo idinfo, RequestContext* req) { - req->optype_ = type_; - req->idinfo_ = idinfo; + req->optype_ = type_; + req->idinfo_ = idinfo; req->done_->SetIOTracker(this); } @@ -459,9 +455,7 @@ void IOTracker::InitDiscardOption(const DiscardOption& opt) { discardOption_ = opt; } -int IOTracker::Wait() { - return iocv_.Wait(); -} +int IOTracker::Wait() { return iocv_.Wait(); } void IOTracker::Done() { if (type_ == OpType::READ || type_ == OpType::WRITE) { @@ -510,15 +504,15 @@ void IOTracker::Done() { MetricHelper::IncremUserEPSCount(fileMetric_, type_); if (type_ == OpType::READ || type_ == OpType::WRITE) { if (LIBCURVE_ERROR::EPOCH_TOO_OLD == errcode_) { - LOG(WARNING) << "file [" << fileMetric_->filename << "]" - << ", epoch too old, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + LOG(WARNING) + << "file [" << fileMetric_->filename << "]" + << ", epoch too old, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ << ", length = " << length_; } else { LOG(ERROR) << "file [" << fileMetric_->filename << "]" - << ", IO Error, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + << ", IO Error, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ + << ", length = " << length_; } } else { if (OpType::CREATE_CLONE == type_ && @@ -533,13 +527,13 @@ void IOTracker::Done() { DestoryRequestList(); - // scc_和aioctx都为空的时候肯定是个同步调用 + // When both scc_ and aioctx are empty, it is definitely a synchronous call. if (scc_ == nullptr && aioctx_ == nullptr) { iocv_.Complete(ToReturnCode()); return; } - // 异步函数调用,在此处发起回调 + // Asynchronous function call, where a callback is initiated if (aioctx_ != nullptr) { aioctx_->ret = ToReturnCode(); aioctx_->cb(aioctx_); @@ -548,7 +542,7 @@ void IOTracker::Done() { scc_->Run(); } - // 回收当前io tracker + // Recycle the current io tracker iomanager_->HandleAsyncIOResponse(this); } @@ -565,12 +559,13 @@ void IOTracker::ReturnOnFail() { } void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, - LIBCURVE_ERROR* errout) { + LIBCURVE_ERROR* errout) { switch (errcode) { case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: *errout = LIBCURVE_ERROR::OK; break; - // chunk或者copyset对于用户来说是透明的,所以直接返回错误 + // Chunks or copysets are transparent to users, so they directly return + // errors case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: *errout = LIBCURVE_ERROR::NOTEXIST; @@ -599,5 +594,5 @@ void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6369410ae3..e87ffcc23b 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -49,44 +49,45 @@ class IOManager; class FileSegment; class DiscardTaskManager; -// IOTracker用于跟踪一个用户IO,因为一个用户IO可能会跨chunkserver, -// 因此在真正下发的时候会被拆分成多个小IO并发的向下发送,因此我们需要 -// 跟踪发送的request的执行情况。 +// IOTracker is used to track a user's IO, as a user's IO may cross +// chunkservers, Therefore, when it is actually distributed, it will be split +// into multiple small IOs and sent down concurrently. Therefore, we need to +// Track the execution status of the sent request. class CURVE_CACHELINE_ALIGNMENT IOTracker { friend class Splitor; public: - IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric = nullptr, - bool disableStripe = false); + IOTracker(IOManager* iomanager, MetaCache* mc, RequestScheduler* scheduler, + FileMetric* clientMetric = nullptr, bool disableStripe = false); ~IOTracker() = default; /** - * @brief StartRead同步读 - * @param buf 读缓冲区 - * @param offset 读偏移 - * @param length 读长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartRead Sync Read + * @param buf read buffer + * @param offset read offset + * @param length Read length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartRead(void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, Throttle* throttle = nullptr); /** - * @brief StartWrite同步写 - * @param buf 写缓冲区 - * @param offset 写偏移 - * @param length 写长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartWrite Sync Write + * @param buf write buffer + * @param offset write offset + * @param length Write length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle = nullptr); + const FileEpoch* fEpoch, Throttle* throttle = nullptr); /** * @brief start an async read operation @@ -105,8 +106,8 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { * @param fEpoch file epoch info */ void StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, - const FInfo_t* fileInfo, - const FileEpoch* fEpoch, Throttle* throttle = nullptr); + const FInfo_t* fileInfo, const FileEpoch* fEpoch, + Throttle* throttle = nullptr); void StartDiscard(off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -116,46 +117,44 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { DiscardTaskManager* taskManager); /** - * chunk相关接口是提供给snapshot使用的,上层的snapshot和file - * 接口是分开的,在IOTracker这里会将其统一,这样对下层来说不用 - * 感知上层的接口类别。 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 + * The chunk-related interfaces are intended for use by snapshots. The + * upper-level snapshot and file interfaces are separate. However, in the + * IOTracker, they are unified so that the lower levels do not need to be + * aware of the upper-level interface category. + * @param: chunkidinfo The target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is the read buffer + * @param: scc is the asynchronous callback */ - void ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); + void ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: seq是需要修正的版本号 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo is the target chunk + * @param: seq is the version number that needs to be corrected */ - void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq); + void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot */ - void GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo); + void GetChunkInfo(const ChunkIDInfo& cinfo, ChunkInfoDetail* chunkInfo); /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief lazy Create clone chunk + * @param: location is the URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when CreateCloneChunk + * @param: chunkSize chunk size + * @param: scc is an asynchronous callback */ void CreateCloneChunk(const std::string& location, const ChunkIDInfo& chunkidinfo, uint64_t sn, @@ -163,47 +162,51 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief Actual recovery chunk data + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: chunkSize Chunk size + * @param: scc is an asynchronous callback */ void RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * Wait用于同步接口等待,因为用户下来的IO被client内部线程接管之后 - * 调用就可以向上返回了,但是用户的同步IO语意是要等到结果返回才能向上 - * 返回的,因此这里的Wait会让用户线程等待。 - * @return: 返回读写信息,异步IO的时候返回0或-1.0代表成功,-1代表失败 - * 同步IO返回length或-1,length代表真实读写长度,-1代表读写失败 + * Wait is used for synchronous interface waiting. When the user's IO is + * taken over by client internal threads, the call can return to the upper + * layer. However, the user's synchronous IO semantics require waiting for + * the result to return before returning to the upper layer, so Wait here + * will make the user thread wait. + * @return: Returns read/write information. For asynchronous IO, it returns + * 0 or -1. 0 means success, -1 means failure. For synchronous IO, it + * returns the length or -1. 'length' represents the actual read/write + * length, and -1 represents read/write failure. */ int Wait(); /** - * 每个request都要有自己的OP类型,这里提供接口可以在io拆分的时候获取类型 + * Each request must have its own OP type, and an interface is provided here + * to obtain the type during IO splitting */ - OpType Optype() {return type_;} + OpType Optype() { return type_; } - // 设置操作类型,测试使用 + // Set operation type, test usage void SetOpType(OpType type) { type_ = type; } /** - * 因为client的IO都是异步发送的,且一个IO被拆分成多个Request,因此在异步 - * IO返回后就应该告诉IOTracker当前request已经返回,这样tracker可以处理 - * 返回的request。 - * @param: 待处理的异步request + * Because client IOs are all sent asynchronously, and a single IO is split + * into multiple Requests, after asynchronous IO returns, it should inform + * the IOTracker that the current request has returned. This way, the + * tracker can handle the returned request. + * @param: The asynchronous request to be processed. */ void HandleResponse(RequestContext* reqctx); /** - * 获取当前tracker id信息 + * Obtain the current tracker ID information */ - uint64_t GetID() const { - return id_; - } + uint64_t GetID() const { return id_; } // set user data type void SetUserDataType(const UserDataType dataType) { @@ -222,9 +225,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { readDatas_[subIoIndex] = data; } - bool IsStripeDisabled() const { - return disableStripe_; - } + bool IsStripeDisabled() const { return disableStripe_; } static void InitDiscardOption(const DiscardOption& opt); @@ -232,38 +233,40 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { void ReleaseAllSegmentLocks(); /** - * 当IO返回的时候调用done,由done负责向上返回 + * When IO returns, call done, which is responsible for returning upwards */ void Done(); /** - * 在io拆分或者,io分发失败的时候需要调用,设置返回状态,并向上返回 + * When IO splitting or IO distribution fails, it needs to be called, set + * the return status, and return upwards */ void ReturnOnFail(); /** - * 用户下来的大IO会被拆分成多个子IO,这里在返回之前将子IO资源回收 + * The user's incoming large IO will be split into multiple sub IOs, and the + * sub IO resources will be reclaimed before returning here */ void DestoryRequestList(); /** - * 填充request context common字段 - * @param: idinfo为chunk的id信息 - * @param: req为待填充的request context + * Fill in the request context common field + * @param: IDInfo is the ID information of the chunk + * @param: req is the request context to be filled in */ void FillCommonFields(ChunkIDInfo idinfo, RequestContext* req); /** - * chunkserver errcode转化为libcurve client的errode - * @param: errcode为chunkserver侧的errode - * @param[out]: errout为libcurve自己的errode + * Convert chunkserver errcode to libcurve client errode + * @param: errcode is the error code on the chunkserver side + * @param[out]: errout is libcurve's own errode */ void ChunkServerErr2LibcurveErr(curve::chunkserver::CHUNK_OP_STATUS errcode, LIBCURVE_ERROR* errout); /** - * 获取一个初始化后的RequestContext - * return: 如果分配失败或者初始化失败,返回nullptr - * 反之,返回一个指针 + * Obtain an initialized RequestContext + * @return: If allocation or initialization fails, return nullptr + * On the contrary, return a pointer */ RequestContext* GetInitedRequestContext() const; @@ -283,8 +286,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // perform write operation void DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle); + const FileEpoch* fEpoch, Throttle* throttle); void DoDiscard(MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -296,12 +298,13 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { } private: - // io 类型 - OpType type_; + // IO type + OpType type_; - // 当前IO的数据内容,data是读写数据的buffer - off_t offset_; - uint64_t length_; + // The current IO data content, where data is the buffer for reading and + // writing data + off_t offset_; + uint64_t length_; // user data pointer void* data_; @@ -315,48 +318,52 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // save read data std::vector readDatas_; - // 当用户下发的是同步IO的时候,其需要在上层进行等待,因为client的 - // IO发送流程全部是异步的,因此这里需要用条件变量等待,待异步IO返回 - // 之后才将这个等待的条件变量唤醒,然后向上返回。 - IOConditionVariable iocv_; + // When a user sends synchronous IO, they need to wait in the upper layer + // because the client's IO sending process is all asynchronous, so here we + // need to use a conditional variable to wait for asynchronous IO to return + // Afterwards, the waiting condition variable is awakened and then returned + // upwards. + IOConditionVariable iocv_; - // 异步IO的context,在异步IO返回时,通过调用aioctx - // 的异步回调进行返回。 + // The context of asynchronous IO is called aioctx when asynchronous IO + // returns Asynchronous callback for return. CurveAioContext* aioctx_; - // 当前IO的errorcode + // The errorcode of the current IO LIBCURVE_ERROR errcode_; - // 当前IO被拆分成reqcount_个小IO + // The current IO is split into reqcount_ Small IO std::atomic reqcount_; - // 大IO被拆分成多个request,这些request放在reqlist中国保存 - std::vector reqlist_; + // The large IO is split into multiple requests, which are stored in the + // reqlist in China + std::vector reqlist_; // store segment indices that can be discarded std::unordered_set discardSegments_; - // metacache为当前fileinstance的元数据信息 + // metacache is the metadata information of the current fileinstance MetaCache* mc_; - // scheduler用来将用户线程与client自己的线程切分 - // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 + // The scheduler is used to separate user threads from the client's own + // threads After the large IO is split, the split reqlist is passed to the + // scheduler and sent downwards RequestScheduler* scheduler_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 - // iomanager可以将该tracker释放 + // For asynchronous IO, the Tracker needs to notify the upper level that the + // current IO processing has ended The iomanager can release the tracker IOManager* iomanager_; - // 发起时间 + // Initiation time uint64_t opStartTimePoint_; - // client端的metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // 当前tracker的id + // The ID of the current tracker uint64_t id_; - // 快照克隆系统异步调用回调指针 + // Asynchronous call callback pointer for snapshot cloning system SnapCloneClosure* scc_; bool disableStripe_; @@ -365,11 +372,11 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // so store corresponding segment lock and release after operations finished std::vector segmentLocks_; - // id生成器 + // ID generator static std::atomic tracekerID_; static DiscardOption discardOption_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_TRACKER_H_ diff --git a/src/client/iomanager.h b/src/client/iomanager.h index e985b1527f..04664fe870 100644 --- a/src/client/iomanager.h +++ b/src/client/iomanager.h @@ -23,8 +23,8 @@ #ifndef SRC_CLIENT_IOMANAGER_H_ #define SRC_CLIENT_IOMANAGER_H_ -#include "src/client/io_tracker.h" #include "src/client/client_common.h" +#include "src/client/io_tracker.h" #include "src/common/concurrent/concurrent.h" namespace curve { @@ -34,48 +34,41 @@ using curve::common::Atomic; class IOManager { public: - IOManager() { - id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); - } + IOManager() { id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); } virtual ~IOManager() = default; /** - * @brief 获取当前iomanager的ID信息 + * @brief Get the ID information of the current iomanager */ - virtual IOManagerID ID() const { - return id_; - } + virtual IOManagerID ID() const { return id_; } /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ - virtual void GetInflightRpcToken() { - return; - } + virtual void GetInflightRpcToken() { return; } /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ - virtual void ReleaseInflightRpcToken() { - return; - } + virtual void ReleaseInflightRpcToken() { return; } /** - * @brief 处理异步返回的response - * @param: iotracker是当前reponse的归属 + * @brief handles response returned asynchronously + * @param: iotracker The ownership of the current reponse */ virtual void HandleAsyncIOResponse(IOTracker* iotracker) = 0; protected: - // iomanager id目的是为了让底层RPC知道自己归属于哪个iomanager + // The purpose of the iomanager id is to let the underlying RPC know which + // iomanager it belongs to IOManagerID id_; private: // global id recorder - static Atomic idRecorder_; + static Atomic idRecorder_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER_H_ diff --git a/src/client/iomanager4chunk.h b/src/client/iomanager4chunk.h index f9cedeca02..209829f3ef 100644 --- a/src/client/iomanager4chunk.h +++ b/src/client/iomanager4chunk.h @@ -24,15 +24,15 @@ #define SRC_CLIENT_IOMANAGER4CHUNK_H_ #include -#include // NOLINT +#include // NOLINT +#include // NOLINT #include -#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager.h" +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/iomanager.h" +#include "src/client/metacache.h" #include "src/client/request_scheduler.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -41,107 +41,109 @@ class IOManager4Chunk : public IOManager { public: IOManager4Chunk(); ~IOManager4Chunk() = default; - bool Initialize(IOOption ioOpt, MDSClient* mdsclient); + bool Initialize(IOOption ioOpt, MDSClient* mdsclient); - /** - * 读取seq版本号的快照数据 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return:成功返回真实读取长度,失败为-1 - */ - int ReadSnapChunk(const ChunkIDInfo &chunkidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: correctedSeq是需要修正的版本号 - */ - int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &chunkidinfo, + /** + * Read snapshot data of seq version number + * @param: chunkidinfo target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned the true read length, failed with -1 + */ + int ReadSnapChunk(const ChunkIDInfo& chunkidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo target chunk + * @param: correctedSeq is the version number that needs to be corrected + */ + int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& chunkidinfo, uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(const ChunkIDInfo &chunkidinfo, - ChunkInfoDetail *chunkInfo); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(const ChunkIDInfo& chunkidinfo, + ChunkInfoDetail* chunkInfo); - /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * @return 成功返回0, 否则-1 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @detail + * - The format of the location is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * @return successfully returns 0, otherwise -1 + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param scc 异步回调 - * @return 成功返回0, 否则-1 + * @param offset offset + * @param len length + * @param scc asynchronous callback + * @return successfully returns 0, otherwise -1 */ int RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: 是异步返回的io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: It is an io returned asynchronously */ void HandleAsyncIOResponse(IOTracker* iotracker) override; - /** - * 析构,回收资源 - */ + /** + * Deconstruct and recycle resources + */ void UnInitialize(); - /** - * 获取metacache,测试代码使用 - */ - MetaCache* GetMetaCache() {return &mc_;} - /** - * 设置scahuler,测试代码使用 - */ + /** + * Obtain Metacache, test code usage + */ + MetaCache* GetMetaCache() { return &mc_; } + /** + * Set up scahuler to test code usage + */ void SetRequestScheduler(RequestScheduler* scheduler) { - scheduler_ = scheduler; + scheduler_ = scheduler; } private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前snapshot client元数据信息 - MetaCache mc_; + // metacache stores the current snapshot client metadata information + MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER4CHUNK_H_ diff --git a/src/client/iomanager4file.cpp b/src/client/iomanager4file.cpp index b6f1b09527..992554264d 100644 --- a/src/client/iomanager4file.cpp +++ b/src/client/iomanager4file.cpp @@ -20,14 +20,15 @@ * Author: tongguangxun */ +#include "src/client/iomanager4file.h" + #include -#include // NOLINT +#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager4file.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" +#include "src/client/metacache.h" #include "src/client/splitor.h" namespace curve { @@ -36,8 +37,7 @@ Atomic IOManager::idRecorder_(1); IOManager4File::IOManager4File() : scheduler_(nullptr), exit_(false) {} bool IOManager4File::Initialize(const std::string& filename, - const IOOption& ioOpt, - MDSClient* mdsclient) { + const IOOption& ioOpt, MDSClient* mdsclient) { ioopt_ = ioOpt; disableStripe_ = false; @@ -55,8 +55,9 @@ bool IOManager4File::Initialize(const std::string& filename, return false; } - // IO Manager中不控制inflight IO数量,所以传入UINT64_MAX - // 但是IO Manager需要控制所有inflight IO在关闭的时候都被回收掉 + // The IO Manager does not control the number of inflight IOs, so UINT64_MAX + // is passed. However, the IO Manager needs to ensure that all inflight IOs + // are reclaimed upon shutdown. inflightCntl_.SetMaxInflightNum(UINT64_MAX); scheduler_ = new (std::nothrow) RequestScheduler(); @@ -114,7 +115,7 @@ void IOManager4File::UnInitialize() { { std::unique_lock lk(exitMtx); - exitCv.wait(lk, [&](){ return exitFlag; }); + exitCv.wait(lk, [&]() { return exitFlag; }); } taskPool_.Stop(); @@ -128,8 +129,9 @@ void IOManager4File::UnInitialize() { discardTaskManager_->Stop(); { - // 这个锁保证设置exit_和delete scheduler_是原子的 - // 这样保证在scheduler_被析构的时候lease线程不会使用scheduler_ + // This lock ensures that setting exit_ and deleting scheduler_ are + // atomic. This ensures that the lease thread won't use scheduler_ when + // it is being destructed. std::unique_lock lk(exitMtx_); exit_ = true; @@ -140,8 +142,8 @@ void IOManager4File::UnInitialize() { } } -int IOManager4File::Read(char* buf, off_t offset, - size_t length, MDSClient* mdsclient) { +int IOManager4File::Read(char* buf, off_t offset, size_t length, + MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::READ); FlightIOGuard guard(this); @@ -162,9 +164,7 @@ int IOManager4File::Read(char* buf, off_t offset, } } -int IOManager4File::Write(const char* buf, - off_t offset, - size_t length, +int IOManager4File::Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::WRITE); FlightIOGuard guard(this); @@ -175,8 +175,7 @@ int IOManager4File::Write(const char* buf, IOTracker temp(this, &mc_, scheduler_, fileMetric_, disableStripe_); temp.SetUserDataType(UserDataType::IOBuffer); temp.StartWrite(&data, offset, length, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); int rc = temp.Wait(); return rc; @@ -223,8 +222,7 @@ int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient, inflightCntl_.IncremInflightNum(); auto task = [this, ctx, mdsclient, temp]() { temp->StartAioWrite(ctx, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); }; taskPool_.Enqueue(task); @@ -286,9 +284,7 @@ void IOManager4File::UpdateFileThrottleParams( } } -void IOManager4File::SetDisableStripe() { - disableStripe_ = true; -} +void IOManager4File::SetDisableStripe() { disableStripe_ = true; } void IOManager4File::HandleAsyncIOResponse(IOTracker* iotracker) { inflightCntl_.DecremInflightNum(); @@ -330,5 +326,5 @@ void IOManager4File::GetInflightRpcToken() { inflightRpcCntl_.GetInflightToken(); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/iomanager4file.h b/src/client/iomanager4file.h index eaecc8497f..9571a3845d 100644 --- a/src/client/iomanager4file.h +++ b/src/client/iomanager4file.h @@ -28,12 +28,13 @@ #include #include // NOLINT -#include // NOLINT -#include #include +#include // NOLINT +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/discard_task.h" #include "src/client/inflight_controller.h" #include "src/client/iomanager.h" #include "src/client/mds_client.h" @@ -42,7 +43,6 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/throttle.h" -#include "src/client/discard_task.h" namespace curve { namespace client { @@ -57,14 +57,13 @@ class IOManager4File : public IOManager { ~IOManager4File() = default; /** - * 初始化函数 - * @param: filename为当前iomanager服务的文件名 - * @param: ioopt为当前iomanager的配置信息 - * @param: mdsclient向下透传给metacache - * @return: 成功true,失败false + * Initialization function + * @param: filename is the file name of the current iomanager service + * @param: ioopt is the configuration information of the current iomanager + * @param: mdsclient penetrates downwards to Metacache + * @return: Success true, failure false */ - bool Initialize(const std::string& filename, - const IOOption& ioOpt, + bool Initialize(const std::string& filename, const IOOption& ioOpt, MDSClient* mdsclient); /** @@ -73,39 +72,47 @@ class IOManager4File : public IOManager { void UnInitialize(); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode reading + * @param: buf is the current buffer to be read + * @param: offset is the offset in file + * @parma: length is the length to be read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @return: Successfully returned reading the true length, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 同步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @param:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: buf is the current buffer to be written + * @param: offset is the offset within the file + * @param: length is the length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 异步模式读 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioRead(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); /** - * 异步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); @@ -128,88 +135,71 @@ class IOManager4File : public IOManager { int AioDiscard(CurveAioContext* aioctx, MDSClient* mdsclient); /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ void GetInflightRpcToken() override; /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ void ReleaseInflightRpcToken() override; /** - * 获取metacache,测试代码使用 + * Obtain Metacache, test code usage */ - MetaCache* GetMetaCache() { - return &mc_; - } + MetaCache* GetMetaCache() { return &mc_; } /** - * 设置scheduler,测试代码使用 + * Set the scheduler to test the code using */ void SetRequestScheduler(RequestScheduler* scheduler) { scheduler_ = scheduler; } /** - * 获取metric信息,测试代码使用 + * Obtain metric information and test code usage */ - FileMetric* GetMetric() { - return fileMetric_; - } + FileMetric* GetMetric() { return fileMetric_; } /** - * 重新设置io配置信息,测试使用 + * Reset IO configuration information for testing use */ - void SetIOOpt(const IOOption& opt) { - ioopt_ = opt; - } + void SetIOOpt(const IOOption& opt) { ioopt_ = opt; } /** - * 测试使用,获取request scheduler + * Test usage, obtain request scheduler */ - RequestScheduler* GetScheduler() { - return scheduler_; - } + RequestScheduler* GetScheduler() { return scheduler_; } /** - * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 - * @param: fi为当前需要更新的文件信息 + * When the lease excutor detects a version update, it needs to notify the + * iomanager to update the file version information + * @param: fi is the current file information that needs to be updated */ void UpdateFileInfo(const FInfo_t& fi); - const FInfo* GetFileInfo() const { - return mc_.GetFileInfo(); - } + const FInfo* GetFileInfo() const { return mc_.GetFileInfo(); } void UpdateFileEpoch(const FileEpoch& fEpoch) { mc_.UpdateFileEpoch(fEpoch); } - const FileEpoch* GetFileEpoch() const { - return mc_.GetFileEpoch(); - } + const FileEpoch* GetFileEpoch() const { return mc_.GetFileEpoch(); } /** - * 返回文件最新版本号 + * Return the latest version number of the file */ - uint64_t GetLatestFileSn() const { - return mc_.GetLatestFileSn(); - } + uint64_t GetLatestFileSn() const { return mc_.GetLatestFileSn(); } /** - * 更新文件最新版本号 + * Update the latest version number of the file */ - void SetLatestFileSn(uint64_t newSn) { - mc_.SetLatestFileSn(newSn); - } + void SetLatestFileSn(uint64_t newSn) { mc_.SetLatestFileSn(newSn); } /** * @brief get current file inodeid * @return file inodeid */ - uint64_t InodeId() const { - return mc_.InodeId(); - } + uint64_t InodeId() const { return mc_.InodeId(); } void UpdateFileThrottleParams( const common::ReadWriteThrottleParams& params); @@ -220,26 +210,30 @@ class IOManager4File : public IOManager { friend class LeaseExecutor; friend class FlightIOGuard; /** - * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 将新下发的IO全部失败返回 + * lease related interface, when LeaseExecutor contract renewal fails, calls + * LeaseTimeoutDisableIO Failed to return all newly issued IOs */ void LeaseTimeoutBlockIO(); /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO */ void ResumeIO(); /** - * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 + * When the lesaeexcutor detects a version change, it calls the interface + * and waits for inflight to return. During this period, IO is hanging */ void BlockIO(); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: iotracker是返回的异步io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: iotracker is an asynchronous io returned */ void HandleAsyncIOResponse(IOTracker* iotracker) override; @@ -250,9 +244,7 @@ class IOManager4File : public IOManager { iomanager->inflightCntl_.IncremInflightNum(); } - ~FlightIOGuard() { - iomanager->inflightCntl_.DecremInflightNum(); - } + ~FlightIOGuard() { iomanager->inflightCntl_.DecremInflightNum(); } private: IOManager4File* iomanager; @@ -261,42 +253,45 @@ class IOManager4File : public IOManager { bool IsNeedDiscard(size_t len) const; private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前文件的所有元数据信息 + // metacache stores all metadata information for the current file MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; - // client端metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // task thread pool为了将qemu线程与curve线程隔离 + // The task thread pool is used to isolate the QEMU thread from the curve + // thread curve::common::TaskThreadPool taskPool_; - // inflight IO控制 + // inflight IO control InflightControl inflightCntl_; - // inflight rpc控制 + // inflight rpc control InflightControl inflightRpcCntl_; std::unique_ptr throttle_; - // 是否退出 + // Exit or not bool exit_; - // lease续约线程与qemu一侧线程调用是并发的 - // qemu在调用close的时候会关闭iomanager及其对应 - // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 - // scheduler线程现在需要block IO或者resume IO,所以 - // 如果在lease续约线程需要通知iomanager的时候,这时候 - // 如果iomanager的资源scheduler已经被释放了,就会 - // 导致crash,所以需要对这个资源加一把锁,在退出的时候 - // 不会有并发的情况,保证在资源被析构的时候lease续约 - // 线程不会再用到这些资源. + // The lease renewal thread and the QEMU-side thread are concurrent. + // When QEMU calls close, it closes the iomanager and its corresponding + // resources. The lease renewal thread notifies the iomanager's scheduler + // thread when renewal succeeds or fails, indicating whether it needs to + // block or resume IO. Therefore, if the lease renewal thread needs to + // notify the iomanager at this point, and if the iomanager's scheduler + // resources have already been released, it may lead to a crash. So, it's + // necessary to add a lock to protect this resource, ensuring that there is + // no concurrency when exiting. This ensures that the lease renewal thread + // won't use these resources when they are being destructed. std::mutex exitMtx_; // enable/disable stripe for read/write of stripe file diff --git a/src/client/lease_executor.cpp b/src/client/lease_executor.cpp index c8db8ddd30..797c0f0075 100644 --- a/src/client/lease_executor.cpp +++ b/src/client/lease_executor.cpp @@ -19,11 +19,12 @@ * File Created: Saturday, 23rd February 2019 1:41:31 pm * Author: tongguangxun */ +#include "src/client/lease_executor.h" + #include -#include "src/common/timeutility.h" -#include "src/client/lease_executor.h" #include "src/client/service_helper.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -145,9 +146,7 @@ void LeaseExecutor::Stop() { } } -bool LeaseExecutor::LeaseValid() { - return isleaseAvaliable_.load(); -} +bool LeaseExecutor::LeaseValid() { return isleaseAvaliable_.load(); } void LeaseExecutor::IncremRefreshFailed() { failedrefreshcount_.fetch_add(1); @@ -190,7 +189,7 @@ void LeaseExecutor::ResetRefreshSessionTask() { return; } - // 等待前一个任务退出 + // Waiting for the previous task to exit task_->Stop(); task_->WaitTaskExit(); @@ -203,5 +202,5 @@ void LeaseExecutor::ResetRefreshSessionTask() { isleaseAvaliable_.store(true); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/lease_executor.h b/src/client/lease_executor.h index 2236dc9982..829d264adc 100644 --- a/src/client/lease_executor.h +++ b/src/client/lease_executor.h @@ -41,16 +41,13 @@ namespace client { class RefreshSessionTask; /** - * lease refresh结果,session如果不存在就不需要再续约 - * 如果session存在但是lease续约失败,继续续约 - * 续约成功了FInfo_t中才会有对应的文件信息 + * Please refresh the result. If the session does not exist, there is no need to + * renew it If the session exists but the lease renewal fails, continue to renew + * the contract Successfully renewed the contract, FInfo_ Only in t will there + * be corresponding file information */ struct LeaseRefreshResult { - enum class Status { - OK, - FAILED, - NOT_EXIST - }; + enum class Status { OK, FAILED, NOT_EXIST }; Status status; FInfo_t finfo; }; @@ -62,19 +59,22 @@ class LeaseExecutorBase { }; /** - * 每个vdisk对应的fileinstance都会与mds保持心跳 - * 心跳通过LeaseExecutor实现,LeaseExecutor定期 - * 去mds续约,同时将mds端当前file最新的版本信息带回来 - * 然后检查版本信息是否变更,如果变更就需要通知iomanager - * 更新版本。如果续约失败,就需要将用户新发下来的io直接错误返回 + * The fileinstance corresponding to each vdisk will maintain heartbeat with the + * mds Heartbeat is achieved through LeaseExecutor, which periodically Go to MDS + * to renew the contract and bring back the latest version information of the + * current file on the MDS side Then check if the version information has + * changed, and if so, notify the iomanager Updated version. If the renewal + * fails, the user's newly sent IO needs to be returned in error */ class LeaseExecutor : public LeaseExecutorBase { public: /** - * 构造函数 - * @param: leaseopt为当前lease续约的option配置 - * @param: mdsclient是与mds续约的client - * @param: iomanager会在续约失败或者版本变更的时候进行io调度 + * Constructor + * @param: leaseopt is the option configuration for the current lease + * renewal + * @param: mdsclient is a renewed client with mds + * @param: iomanager will schedule IO in case of contract renewal failure or + * version change */ LeaseExecutor(const LeaseOption& leaseOpt, const UserInfo& userinfo, MDSClient* mdscllent, IOManager4File* iomanager); @@ -82,26 +82,27 @@ class LeaseExecutor : public LeaseExecutorBase { ~LeaseExecutor(); /** - * LeaseExecutor需要finfo保存filename - * LeaseSession_t是当前leaeexcutor的执行配置 - * @param: fi为当前需要续约的文件版本信息 - * @param: lease为续约的lease信息 - * @return: 成功返回true,否则返回false + * LeaseExecutor requires finfo to save the filename + * LeaseSession_t is the execution configuration of the current leaeexcutor + * @param: fi is the current version information of the file that needs to + * be renewed + * @param: lease is the lease information for renewal + * @return: Successfully returns true, otherwise returns false */ - bool Start(const FInfo_t& fi, const LeaseSession_t& lease); + bool Start(const FInfo_t& fi, const LeaseSession_t& lease); /** - * 停止续约 + *Stop Renewal */ void Stop(); /** - * 当前lease如果续约失败则通知iomanagerdisable io + *Notify iomanagerdisable io if the current lease renewal fails */ bool LeaseValid(); /** - * 测试使用,主动失效增加刷新失败 + *Test use, active failure increases refresh failure */ void InvalidLease() { for (uint32_t i = 0; i <= leaseoption_.mdsRefreshTimesPerLease; i++) { @@ -110,20 +111,21 @@ class LeaseExecutor : public LeaseExecutorBase { } /** - * @brief 续约任务执行者 - * @return 是否继续执行refresh session任务 + * @brief Renew Task Executor + * @return Do you want to continue executing the refresh session task */ bool RefreshLease() override; /** - * @brief 测试使用,重置refresh session task + * @brief test use, reset refresh session task */ void ResetRefreshSessionTask(); private: /** - * 一个lease期间会续约rfreshTimesPerLease次,每次续约失败就递增 - * 当连续续约rfreshTimesPerLease次失败的时候,则disable IO + * During a lease period, rfreshTimesPerLease will be renewed times, + * increasing every time the renewal fails When consecutive renewals of + * rfreshTimesPerLease fail times, disable IO */ void IncremRefreshFailed(); @@ -135,44 +137,46 @@ class LeaseExecutor : public LeaseExecutorBase { void CheckNeedUpdateFileInfo(const FInfo& fileInfo); private: - // 与mds进行lease续约的文件名 - std::string fullFileName_; + // File name for lease renewal with mds + std::string fullFileName_; - // 用于续约的client - MDSClient* mdsclient_; + // client for renewal + MDSClient* mdsclient_; - // 用于发起refression的user信息 - UserInfo_t userinfo_; + // User information used to initiate a expression + UserInfo_t userinfo_; - // IO管理者,当文件需要更新版本信息或者disable io的时候调用其接口 - IOManager4File* iomanager_; + // IO manager, calls its interface when a file needs to update version + // information or disable IO + IOManager4File* iomanager_; - // 当前lease执行的配置信息 - LeaseOption leaseoption_; + // Configuration information for the current lease execution + LeaseOption leaseoption_; - // mds端传过来的lease信息,包含当前文件的lease时长,及sessionid - LeaseSession_t leasesession_; + // The lease information transmitted from the mds end, including the lease + // duration of the current file and the sessionid + LeaseSession_t leasesession_; - // 记录当前lease是否可用 - std::atomic isleaseAvaliable_; + // Record whether the current lease is available + std::atomic isleaseAvaliable_; - // 记录当前连续续约失败的次数 - std::atomic failedrefreshcount_; + // Record the current number of consecutive renewal failures + std::atomic failedrefreshcount_; - // refresh session定时任务,会间隔固定时间执行一次 + // refresh session scheduled tasks will be executed at fixed intervals std::unique_ptr task_; }; -// RefreshSessin定期任务 -// 利用brpc::PeriodicTaskManager进行管理 -// 定时器触发时调用OnTriggeringTask,根据返回值决定是否继续定时触发 -// 如果不再继续触发,调用OnDestroyingTask进行清理操作 +// RefreshSession Recurring Task +// Manage using brpc::PeriodicTaskManager +// Call OnTriggeringTask when the timer is triggered, and decide whether to +// continue timing triggering based on the return value If no longer triggered, +// call OnDestroyingTask for cleaning operation class RefreshSessionTask : public brpc::PeriodicTask { public: using Task = std::function; - RefreshSessionTask(LeaseExecutorBase* leaseExecutor, - uint64_t intervalUs) + RefreshSessionTask(LeaseExecutorBase* leaseExecutor, uint64_t intervalUs) : leaseExecutor_(leaseExecutor), refreshIntervalUs_(intervalUs), stopped_(false), @@ -193,10 +197,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { virtual ~RefreshSessionTask() = default; /** - * @brief 定时器超时后执行当前函数 - * @param next_abstime 任务下次执行的绝对时间 - * @return true 继续定期执行当前任务 - * false 停止执行当前任务 + * @brief: Execute current function after timer timeout + * @param next_abstime Absolute time for the next execution of the task + * @return true Continue to regularly execute the current task + * false Stop executing the current task */ bool OnTriggeringTask(timespec* next_abstime) override { std::lock_guard lk(stopMtx_); @@ -209,7 +213,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 停止再次执行当前任务 + * @brief Stop executing the current task again */ void Stop() { std::lock_guard lk(stopMtx_); @@ -217,7 +221,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 任务停止后调用 + * @brief is called after the task stops */ void OnDestroyingTask() override { std::unique_lock lk(terminatedMtx_); @@ -226,7 +230,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 等待任务退出 + * @brief Wait for the task to exit */ void WaitTaskExit() { std::unique_lock lk(terminatedMtx_); @@ -236,12 +240,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 获取refresh session时间间隔(us) - * @return refresh session任务时间间隔(us) + * @brief Get refresh session time interval (us) + * @return refresh session task time interval (us) */ - uint64_t RefreshIntervalUs() const { - return refreshIntervalUs_; - } + uint64_t RefreshIntervalUs() const { return refreshIntervalUs_; } private: LeaseExecutorBase* leaseExecutor_; @@ -255,7 +257,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { bthread::ConditionVariable terminatedCv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LEASE_EXECUTOR_H_ diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index 06273c5d0b..4c4d3fb632 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -49,7 +49,7 @@ #include "src/common/uuid.h" bool globalclientinited_ = false; -curve::client::FileClient *globalclient = nullptr; +curve::client::FileClient* globalclient = nullptr; using curve::client::UserInfo; @@ -71,9 +71,9 @@ char g_processname[kProcessNameMax]; class LoggerGuard { private: - friend void InitLogging(const std::string &confPath); + friend void InitLogging(const std::string& confPath); - explicit LoggerGuard(const std::string &confpath) { + explicit LoggerGuard(const std::string& confpath) { InitInternal(confpath); } @@ -83,13 +83,13 @@ class LoggerGuard { } } - void InitInternal(const std::string &confpath); + void InitInternal(const std::string& confpath); private: bool needShutdown_ = false; }; -void LoggerGuard::InitInternal(const std::string &confPath) { +void LoggerGuard::InitInternal(const std::string& confPath) { curve::common::Configuration conf; conf.SetConfigPath(confPath); @@ -127,14 +127,18 @@ void LoggerGuard::InitInternal(const std::string &confPath) { needShutdown_ = true; } -void InitLogging(const std::string &confPath) { +void InitLogging(const std::string& confPath) { static LoggerGuard guard(confPath); } } // namespace FileClient::FileClient() - : rwlock_(), fdcount_(0), fileserviceMap_(), clientconfig_(), mdsClient_(), + : rwlock_(), + fdcount_(0), + fileserviceMap_(), + clientconfig_(), + mdsClient_(), csClient_(std::make_shared()), csBroadCaster_(std::make_shared(csClient_)), inited_(false), @@ -214,8 +218,8 @@ void FileClient::UnInit() { inited_ = false; } -int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, - const OpenFlags &openflags) { +int FileClient::Open(const std::string& filename, const UserInfo_t& userinfo, + const OpenFlags& openflags) { LOG(INFO) << "Opening filename: " << filename << ", flags: " << openflags; ClientConfig clientConfig; if (openflags.confPath.empty()) { @@ -235,7 +239,7 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return -LIBCURVE_ERROR::FAILED; } - FileInstance *fileserv = FileInstance::NewInitedFileInstance( + FileInstance* fileserv = FileInstance::NewInitedFileInstance( clientConfig.GetFileServiceOption(), mdsClient, filename, userinfo, openflags, false); if (fileserv == nullptr) { @@ -266,9 +270,9 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return fd; } -int FileClient::Open4ReadOnly(const std::string &filename, - const UserInfo_t &userinfo, bool disableStripe) { - FileInstance *instance = FileInstance::Open4Readonly( +int FileClient::Open4ReadOnly(const std::string& filename, + const UserInfo_t& userinfo, bool disableStripe) { + FileInstance* instance = FileInstance::Open4Readonly( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo); if (instance == nullptr) { @@ -293,8 +297,8 @@ int FileClient::Open4ReadOnly(const std::string &filename, return fd; } -int FileClient::IncreaseEpoch(const std::string &filename, - const UserInfo_t &userinfo) { +int FileClient::IncreaseEpoch(const std::string& filename, + const UserInfo_t& userinfo) { LOG(INFO) << "IncreaseEpoch, filename: " << filename; FInfo_t fi; FileEpoch_t fEpoch; @@ -324,8 +328,7 @@ int FileClient::IncreaseEpoch(const std::string &filename, return ret2; } -int FileClient::Create(const std::string& filename, - const UserInfo& userinfo, +int FileClient::Create(const std::string& filename, const UserInfo& userinfo, size_t size) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -350,8 +353,8 @@ int FileClient::Create2(const CreateFileContext& context) { if (mdsClient_ != nullptr) { ret = mdsClient_->CreateFile(context); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Create file failed, filename: " << context.name - << ", ret: " << ret; + << "Create file failed, filename: " << context.name + << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -359,8 +362,8 @@ int FileClient::Create2(const CreateFileContext& context) { return -ret; } -int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -374,8 +377,8 @@ int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Read(buf, offset, len); } -int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -400,9 +403,9 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { return iter->second->Discard(offset, length); } -int FileClient::AioRead(int fd, CurveAioContext *aioctx, +int FileClient::AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -420,9 +423,9 @@ int FileClient::AioRead(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioWrite(int fd, CurveAioContext *aioctx, +int FileClient::AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -440,7 +443,7 @@ int FileClient::AioWrite(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { +int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { ReadLockGuard lk(rwlock_); auto iter = fileserviceMap_.find(fd); if (CURVE_UNLIKELY(iter == fileserviceMap_.end())) { @@ -451,8 +454,8 @@ int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { } } -int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, - const std::string &newpath) { +int FileClient::Rename(const UserInfo_t& userinfo, const std::string& oldpath, + const std::string& newpath) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RenameFile(userinfo, oldpath, newpath); @@ -466,7 +469,7 @@ int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, return -ret; } -int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -481,7 +484,7 @@ int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -496,7 +499,7 @@ int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -510,7 +513,7 @@ int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::StatFile(int fd, FileStatInfo *finfo) { +int FileClient::StatFile(int fd, FileStatInfo* finfo) { FInfo_t fi; { ReadLockGuard lk(rwlock_); @@ -519,7 +522,7 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { LOG(ERROR) << "StatFile failed not found fd = " << fd; return -LIBCURVE_ERROR::FAILED; } - FileInstance *instance = fileserviceMap_[fd]; + FileInstance* instance = fileserviceMap_[fd]; fi = instance->GetCurrentFileInfo(); } BuildFileStatInfo(fi, finfo); @@ -527,8 +530,8 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { return LIBCURVE_ERROR::OK; } -int FileClient::StatFile(const std::string &filename, - const UserInfo_t &userinfo, FileStatInfo *finfo) { +int FileClient::StatFile(const std::string& filename, + const UserInfo_t& userinfo, FileStatInfo* finfo) { FInfo_t fi; FileEpoch_t fEpoch; int ret; @@ -548,8 +551,8 @@ int FileClient::StatFile(const std::string &filename, return -ret; } -int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, - std::vector *filestatVec) { +int FileClient::Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + std::vector* filestatVec) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Listdir(dirpath, userinfo, filestatVec); @@ -563,7 +566,7 @@ int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, return -ret; } -int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { CreateFileContext context; @@ -588,7 +591,7 @@ int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(dirpath, userinfo); @@ -601,9 +604,9 @@ int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +int FileClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->ChangeOwner(filename, newOwner, userinfo); @@ -651,7 +654,7 @@ int FileClient::Close(int fd) { return -LIBCURVE_ERROR::FAILED; } -int FileClient::GetClusterId(char *buf, int len) { +int FileClient::GetClusterId(char* buf, int len) { std::string result = GetClusterId(); if (result.empty()) { @@ -685,7 +688,7 @@ std::string FileClient::GetClusterId() { return {}; } -int FileClient::GetFileInfo(int fd, FInfo *finfo) { +int FileClient::GetFileInfo(int fd, FInfo* finfo) { int ret = -LIBCURVE_ERROR::FAILED; ReadLockGuard lk(rwlock_); @@ -707,11 +710,11 @@ std::vector FileClient::ListPoolset() { const auto ret = mdsClient_->ListPoolset(&out); LOG_IF(WARNING, ret != LIBCURVE_ERROR::OK) - << "Failed to list poolset, error: " << ret; + << "Failed to list poolset, error: " << ret; return out; } -void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { +void FileClient::BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo) { finfo->id = fi.id; finfo->parentid = fi.parentid; finfo->ctime = fi.ctime; @@ -722,9 +725,9 @@ void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { finfo->stripeCount = fi.stripeCount; memcpy(finfo->filename, fi.filename.c_str(), - std::min(sizeof(finfo->filename), fi.filename.size() + 1)); + std::min(sizeof(finfo->filename), fi.filename.size() + 1)); memcpy(finfo->owner, fi.owner.c_str(), - std::min(sizeof(finfo->owner), fi.owner.size() + 1)); + std::min(sizeof(finfo->owner), fi.owner.size() + 1)); finfo->fileStatus = static_cast(fi.filestatus); } @@ -758,7 +761,7 @@ bool FileClient::StartDummyServer() { return false; } - // 获取本地ip + // Obtain local IP std::string ip; if (!common::NetCommon::GetLocalIP(&ip)) { LOG(ERROR) << "Get local ip failed!"; @@ -775,14 +778,13 @@ bool FileClient::StartDummyServer() { } // namespace client } // namespace curve - -// 全局初始化与反初始化 -int GlobalInit(const char *configpath); +// Global initialization and deinitialization +int GlobalInit(const char* configpath); void GlobalUnInit(); -int Init(const char *path) { return GlobalInit(path); } +int Init(const char* path) { return GlobalInit(path); } -int Open4Qemu(const char *filename) { +int Open4Qemu(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -800,7 +802,7 @@ int Open4Qemu(const char *filename) { return globalclient->Open(realname, userinfo); } -int IncreaseEpoch(const char *filename) { +int IncreaseEpoch(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -818,7 +820,7 @@ int IncreaseEpoch(const char *filename) { return globalclient->IncreaseEpoch(realname, userinfo); } -int Extend4Qemu(const char *filename, int64_t newsize) { +int Extend4Qemu(const char* filename, int64_t newsize) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -840,7 +842,7 @@ int Extend4Qemu(const char *filename, int64_t newsize) { static_cast(newsize)); } -int Open(const char *filename, const C_UserInfo_t *userinfo) { +int Open(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -850,7 +852,7 @@ int Open(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Read(int fd, char *buf, off_t offset, size_t length) { +int Read(int fd, char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -859,7 +861,7 @@ int Read(int fd, char *buf, off_t offset, size_t length) { return globalclient->Read(fd, buf, offset, length); } -int Write(int fd, const char *buf, off_t offset, size_t length) { +int Write(int fd, const char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -877,7 +879,7 @@ int Discard(int fd, off_t offset, size_t length) { return globalclient->Discard(fd, offset, length); } -int AioRead(int fd, CurveAioContext *aioctx) { +int AioRead(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -888,7 +890,7 @@ int AioRead(int fd, CurveAioContext *aioctx) { return globalclient->AioRead(fd, aioctx); } -int AioWrite(int fd, CurveAioContext *aioctx) { +int AioWrite(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -896,11 +898,11 @@ int AioWrite(int fd, CurveAioContext *aioctx) { DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length << " op: " << aioctx->op - << " buf: " << *(unsigned int *)aioctx->buf; + << " buf: " << *(unsigned int*)aioctx->buf; return globalclient->AioWrite(fd, aioctx); } -int AioDiscard(int fd, CurveAioContext *aioctx) { +int AioDiscard(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "Not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -909,7 +911,7 @@ int AioDiscard(int fd, CurveAioContext *aioctx) { return globalclient->AioDiscard(fd, aioctx); } -int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -919,8 +921,8 @@ int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { filename, UserInfo(userinfo->owner, userinfo->password), size); } -int Rename(const C_UserInfo_t* userinfo, - const char* oldpath, const char* newpath) { +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -930,7 +932,7 @@ int Rename(const C_UserInfo_t* userinfo, oldpath, newpath); } -int Extend(const char *filename, const C_UserInfo_t *userinfo, +int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -941,7 +943,7 @@ int Extend(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), newsize); } -int Unlink(const char *filename, const C_UserInfo_t *userinfo) { +int Unlink(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -951,7 +953,7 @@ int Unlink(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { +int DeleteForce(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -961,7 +963,7 @@ int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { filename, UserInfo(userinfo->owner, userinfo->password), true); } -int Recover(const char *filename, const C_UserInfo_t *userinfo, +int Recover(const char* filename, const C_UserInfo_t* userinfo, uint64_t fileId) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -972,21 +974,21 @@ int Recover(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), fileId); } -DirInfo_t *OpenDir(const char *dirpath, const C_UserInfo_t *userinfo) { +DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return nullptr; } - DirInfo_t *dirinfo = new (std::nothrow) DirInfo_t; - dirinfo->dirpath = const_cast(dirpath); - dirinfo->userinfo = const_cast(userinfo); + DirInfo_t* dirinfo = new (std::nothrow) DirInfo_t; + dirinfo->dirpath = const_cast(dirpath); + dirinfo->userinfo = const_cast(userinfo); dirinfo->fileStat = nullptr; return dirinfo; } -int Listdir(DirInfo_t *dirinfo) { +int Listdir(DirInfo_t* dirinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1027,7 +1029,7 @@ int Listdir(DirInfo_t *dirinfo) { return ret; } -void CloseDir(DirInfo_t *dirinfo) { +void CloseDir(DirInfo_t* dirinfo) { if (dirinfo != nullptr) { if (dirinfo->fileStat != nullptr) { delete[] dirinfo->fileStat; @@ -1037,7 +1039,7 @@ void CloseDir(DirInfo_t *dirinfo) { } } -int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1047,7 +1049,7 @@ int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Rmdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1066,7 +1068,7 @@ int Close(int fd) { return globalclient->Close(fd); } -int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { +int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -1084,8 +1086,8 @@ int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { return globalclient->StatFile(realname, userinfo, finfo); } -int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, - FileStatInfo *finfo) { +int StatFile(const char* filename, const C_UserInfo_t* cuserinfo, + FileStatInfo* finfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1095,8 +1097,8 @@ int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, return globalclient->StatFile(filename, userinfo, finfo); } -int ChangeOwner(const char *filename, const char *newOwner, - const C_UserInfo_t *cuserinfo) { +int ChangeOwner(const char* filename, const char* newOwner, + const C_UserInfo_t* cuserinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1108,7 +1110,7 @@ int ChangeOwner(const char *filename, const char *newOwner, void UnInit() { GlobalUnInit(); } -int GetClusterId(char *buf, int len) { +int GetClusterId(char* buf, int len) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1117,7 +1119,7 @@ int GetClusterId(char *buf, int len) { return globalclient->GetClusterId(buf, len); } -int GlobalInit(const char *path) { +int GlobalInit(const char* path) { int ret = 0; if (globalclientinited_) { LOG(INFO) << "global cient already inited!"; @@ -1154,74 +1156,74 @@ void GlobalUnInit() { } } -const char *LibCurveErrorName(LIBCURVE_ERROR err) { +const char* LibCurveErrorName(LIBCURVE_ERROR err) { switch (err) { - case LIBCURVE_ERROR::OK: - return "OK"; - case LIBCURVE_ERROR::EXISTS: - return "EXISTS"; - case LIBCURVE_ERROR::FAILED: - return "FAILED"; - case LIBCURVE_ERROR::DISABLEIO: - return "DISABLEIO"; - case LIBCURVE_ERROR::AUTHFAIL: - return "AUTHFAIL"; - case LIBCURVE_ERROR::DELETING: - return "DELETING"; - case LIBCURVE_ERROR::NOTEXIST: - return "NOTEXIST"; - case LIBCURVE_ERROR::UNDER_SNAPSHOT: - return "UNDER_SNAPSHOT"; - case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: - return "NOT_UNDERSNAPSHOT"; - case LIBCURVE_ERROR::DELETE_ERROR: - return "DELETE_ERROR"; - case LIBCURVE_ERROR::NOT_ALLOCATE: - return "NOT_ALLOCATE"; - case LIBCURVE_ERROR::NOT_SUPPORT: - return "NOT_SUPPORT"; - case LIBCURVE_ERROR::NOT_EMPTY: - return "NOT_EMPTY"; - case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: - return "NO_SHRINK_BIGGER_FILE"; - case LIBCURVE_ERROR::SESSION_NOTEXISTS: - return "SESSION_NOTEXISTS"; - case LIBCURVE_ERROR::FILE_OCCUPIED: - return "FILE_OCCUPIED"; - case LIBCURVE_ERROR::PARAM_ERROR: - return "PARAM_ERROR"; - case LIBCURVE_ERROR::INTERNAL_ERROR: - return "INTERNAL_ERROR"; - case LIBCURVE_ERROR::CRC_ERROR: - return "CRC_ERROR"; - case LIBCURVE_ERROR::INVALID_REQUEST: - return "INVALID_REQUEST"; - case LIBCURVE_ERROR::DISK_FAIL: - return "DISK_FAIL"; - case LIBCURVE_ERROR::NO_SPACE: - return "NO_SPACE"; - case LIBCURVE_ERROR::NOT_ALIGNED: - return "NOT_ALIGNED"; - case LIBCURVE_ERROR::BAD_FD: - return "BAD_FD"; - case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: - return "LENGTH_NOT_SUPPORT"; - case LIBCURVE_ERROR::SESSION_NOT_EXIST: - return "SESSION_NOT_EXIST"; - case LIBCURVE_ERROR::STATUS_NOT_MATCH: - return "STATUS_NOT_MATCH"; - case LIBCURVE_ERROR::DELETE_BEING_CLONED: - return "DELETE_BEING_CLONED"; - case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: - return "CLIENT_NOT_SUPPORT_SNAPSHOT"; - case LIBCURVE_ERROR::SNAPSTHO_FROZEN: - return "SNAPSTHO_FROZEN"; - case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: - return "RETRY_UNTIL_SUCCESS"; - case LIBCURVE_ERROR::EPOCH_TOO_OLD: - return "EPOCH_TOO_OLD"; - case LIBCURVE_ERROR::UNKNOWN: - break; + case LIBCURVE_ERROR::OK: + return "OK"; + case LIBCURVE_ERROR::EXISTS: + return "EXISTS"; + case LIBCURVE_ERROR::FAILED: + return "FAILED"; + case LIBCURVE_ERROR::DISABLEIO: + return "DISABLEIO"; + case LIBCURVE_ERROR::AUTHFAIL: + return "AUTHFAIL"; + case LIBCURVE_ERROR::DELETING: + return "DELETING"; + case LIBCURVE_ERROR::NOTEXIST: + return "NOTEXIST"; + case LIBCURVE_ERROR::UNDER_SNAPSHOT: + return "UNDER_SNAPSHOT"; + case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: + return "NOT_UNDERSNAPSHOT"; + case LIBCURVE_ERROR::DELETE_ERROR: + return "DELETE_ERROR"; + case LIBCURVE_ERROR::NOT_ALLOCATE: + return "NOT_ALLOCATE"; + case LIBCURVE_ERROR::NOT_SUPPORT: + return "NOT_SUPPORT"; + case LIBCURVE_ERROR::NOT_EMPTY: + return "NOT_EMPTY"; + case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: + return "NO_SHRINK_BIGGER_FILE"; + case LIBCURVE_ERROR::SESSION_NOTEXISTS: + return "SESSION_NOTEXISTS"; + case LIBCURVE_ERROR::FILE_OCCUPIED: + return "FILE_OCCUPIED"; + case LIBCURVE_ERROR::PARAM_ERROR: + return "PARAM_ERROR"; + case LIBCURVE_ERROR::INTERNAL_ERROR: + return "INTERNAL_ERROR"; + case LIBCURVE_ERROR::CRC_ERROR: + return "CRC_ERROR"; + case LIBCURVE_ERROR::INVALID_REQUEST: + return "INVALID_REQUEST"; + case LIBCURVE_ERROR::DISK_FAIL: + return "DISK_FAIL"; + case LIBCURVE_ERROR::NO_SPACE: + return "NO_SPACE"; + case LIBCURVE_ERROR::NOT_ALIGNED: + return "NOT_ALIGNED"; + case LIBCURVE_ERROR::BAD_FD: + return "BAD_FD"; + case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: + return "LENGTH_NOT_SUPPORT"; + case LIBCURVE_ERROR::SESSION_NOT_EXIST: + return "SESSION_NOT_EXIST"; + case LIBCURVE_ERROR::STATUS_NOT_MATCH: + return "STATUS_NOT_MATCH"; + case LIBCURVE_ERROR::DELETE_BEING_CLONED: + return "DELETE_BEING_CLONED"; + case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: + return "CLIENT_NOT_SUPPORT_SNAPSHOT"; + case LIBCURVE_ERROR::SNAPSTHO_FROZEN: + return "SNAPSTHO_FROZEN"; + case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: + return "RETRY_UNTIL_SUCCESS"; + case LIBCURVE_ERROR::EPOCH_TOO_OLD: + return "EPOCH_TOO_OLD"; + case LIBCURVE_ERROR::UNKNOWN: + break; } static thread_local char message[64]; diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index 1f1202bbbb..cd24b8afc6 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -24,19 +24,20 @@ #define SRC_CLIENT_LIBCURVE_FILE_H_ #include + #include +#include #include #include #include -#include #include "include/client/libcurve.h" +#include "src/client/chunkserver_broadcaster.h" #include "src/client/client_common.h" #include "src/client/file_instance.h" #include "src/common/concurrent/rw_lock.h" -#include "src/client/chunkserver_broadcaster.h" -// TODO(tongguangxun) :添加关键函数trace功能 +// TODO(tongguangxun): Add key function trace function namespace curve { namespace client { @@ -48,28 +49,28 @@ class FileClient { virtual ~FileClient() = default; /** - * file对象初始化函数 - * @param: 配置文件路径 + * file object initialization function + * @param: Configuration file path */ virtual int Init(const std::string& configpath); /** - * 打开或创建文件 - * @param: filename文件名 - * @param: userinfo是操作文件的用户信息 - * @return: 返回文件fd + * Open or create a file + * @param: filename File name + * @param: userinfo is the user information for operating the file + * @return: Return the file fd */ - virtual int Open(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Open(const std::string& filename, const UserInfo_t& userinfo, const OpenFlags& openflags = {}); /** - * 打开文件,这个打开只是创建了一个fd,并不与mds交互,没有session续约 - * 这个Open接口主要是提供给快照克隆镜像系统做数据拷贝使用 - * @param: filename文件名 - * @param: userinfo当前用户信息 + * Open the file. This only creates an fd and does not interact with mds. + * There is no session renewal This Open interface is mainly provided for + * data copying in snapshot clone image systems + * @param: filename File name + * @param: userinfo Current user information * @param disableStripe enable/disable stripe feature for a stripe file - * @return: 返回文件fd + * @return: Return the file fd */ virtual int Open4ReadOnly(const std::string& filename, const UserInfo_t& userinfo, @@ -83,19 +84,19 @@ class FileClient { * * @return 0 for success, -1 for fail */ - int IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo); + int IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回0, 失败可能有多种可能 - * 比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size + * length + * @return: Success returns 0, failure may have multiple possibilities + * For example, internal errors or files that already exist */ - virtual int Create(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Create(const std::string& filename, const UserInfo_t& userinfo, size_t size); /** @@ -105,22 +106,24 @@ class FileClient { virtual int Create2(const CreateFileContext& context); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int Write(int fd, const char* buf, off_t offset, size_t length); @@ -135,21 +138,25 @@ class FileClient { virtual int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); @@ -163,33 +170,31 @@ class FileClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路劲 - * @param: newpath目标路径 + * Rename File + * @param: userinfo is the user information + * @param: oldpath Yuanlujin + * @param: newpath Target Path */ - virtual int Rename(const UserInfo_t& userinfo, - const std::string& oldpath, + virtual int Rename(const UserInfo_t& userinfo, const std::string& oldpath, const std::string& newpath); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - virtual int Extend(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce=true只能用于从回收站删除,false为放入垃圾箱 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce=true can only be used to delete from the recycle bin, + * false means to put it in the trash can */ - virtual int Unlink(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce = false); /** @@ -198,96 +203,98 @@ class FileClient { * @param: filename * @param: fileId */ - virtual int Recover(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - virtual int Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, + virtual int Listdir(const std::string& dirpath, const UserInfo_t& userinfo, std::vector* filestatVec); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Mkdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Rmdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information + * of the current file + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int StatFile(const std::string& filename, - const UserInfo_t& userinfo, - FileStatInfo* finfo); - - /** - * stat file - * @param: fd is file descriptor. - * @param: finfo is an output para, carry the base info of current file. - * @return: returns int::ok if success, - * otherwise returns an error code less than 0 - */ + const UserInfo_t& userinfo, FileStatInfo* finfo); + + /** + * stat file + * @param: fd is file descriptor. + * @param: finfo is an output para, carry the base info of current file. + * @return: returns int::ok if success, + * otherwise returns an error code less than 0 + */ virtual int StatFile(int fd, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to + * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED, etc */ virtual int ChangeOwner(const std::string& filename, const std::string& newOwner, const UserInfo_t& userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int Close(int fd); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ virtual void UnInit(); /** - * @brief: 获取集群id - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 失败返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain cluster ID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); /** - * @brief 获取集群id - * @return 成功返回集群id,失败返回空 + * @brief Get cluster ID + * @return Successfully returned cluster ID, failed returned empty */ std::string GetClusterId(); /** - * @brief 获取文件信息,测试使用 - * @param fd 文件句柄 - * @param[out] finfo 文件信息 - * @return 成功返回0,失败返回-LIBCURVE_ERROR::FAILED + * @brief to obtain file information for testing purposes + * @param fd file handle + * @param[out] finfo file information + * @return success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetFileInfo(int fd, FInfo* finfo); @@ -295,33 +302,33 @@ class FileClient { std::vector ListPoolset(); /** - * 测试使用,获取当前挂载文件数量 - * @return 返回当前挂载文件数量 + * Test usage to obtain the current number of mounted files + * @return Returns the current number of mounted files */ - uint64_t GetOpenedFileNum() const { - return openedFileNum_.get_value(); - } + uint64_t GetOpenedFileNum() const { return openedFileNum_.get_value(); } private: - static void BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo); + static void BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo); bool StartDummyServer(); private: BthreadRWLock rwlock_; - // 向上返回的文件描述符,对于QEMU来说,一个vdisk对应一个文件描述符 + // The file descriptor returned upwards, for QEMU, one vdisk corresponds to + // one file descriptor std::atomic fdcount_; - // 每个vdisk都有一个FileInstance,通过返回的fd映射到对应的instance + // Each vdisk has a FileInstance, which is mapped to the corresponding + // instance through the returned fd std::unordered_map fileserviceMap_; // std::unordered_map fileserviceFileNameMap_; - // FileClient配置 + // FileClient Configuration ClientConfig clientconfig_; - // fileclient对应的全局mdsclient + // Global mdsclient corresponding to fileclient std::shared_ptr mdsClient_; // chunkserver client @@ -329,10 +336,10 @@ class FileClient { // chunkserver broadCaster std::shared_ptr csBroadCaster_; - // 是否初始化成功 + // Is initialization successful bool inited_; - // 挂载文件数量 + // Number of mounted files bvar::Adder openedFileNum_; }; diff --git a/src/client/libcurve_snapshot.h b/src/client/libcurve_snapshot.h index d8b2ce841a..24f9d2f163 100644 --- a/src/client/libcurve_snapshot.h +++ b/src/client/libcurve_snapshot.h @@ -27,305 +27,304 @@ #include #include -#include "src/client/mds_client.h" -#include "src/client/config_info.h" #include "src/client/client_common.h" +#include "src/client/config_info.h" #include "src/client/iomanager4chunk.h" +#include "src/client/mds_client.h" namespace curve { namespace client { -// SnapshotClient为外围快照系统与MDS和Chunkserver通信的出口 +// SnapshotClient is the exit for peripheral snapshot systems to communicate +// with MDS and Chunkserver class SnapshotClient { public: - SnapshotClient(); - ~SnapshotClient() = default; - /** - * 初始化函数,外围系统直接传入配置选项 - * @param: opt为外围配置选项 - * @return:0为成功,-1为失败 - */ - int Init(const ClientConfigOption& opt); + SnapshotClient(); + ~SnapshotClient() = default; + /** + * Initialization function, peripheral system directly passes in + * configuration options + * @param: opt is the peripheral configuration option + * @return: 0 indicates success, -1 indicates failure + */ + int Init(const ClientConfigOption& opt); - /** - * file对象初始化函数 - * @param: 配置文件路径 - */ - int Init(const std::string& configpath); + /** + * file object initialization function + * @param: Configuration file path + */ + int Init(const std::string& configpath); - /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t* seq); - /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq); - /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapShot(const std::string& fname, - const UserInfo_t& userinfo, - uint64_t seq, - FInfo* snapinfo); - /** - * 列出当前文件对应版本列表的文件信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seqvec是当前文件的版本列表 - * @param: snapif是出参,获取多个seq号的文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - const std::vector* seqvec, - std::map* snapif); - /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo); + /** + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of + * the file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t* seq); + /** + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq); + /** + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot + * of the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapShot(const std::string& fname, const UserInfo_t& userinfo, + uint64_t seq, FInfo* snapinfo); + /** + * List the file information corresponding to the version list of the + * current file + * @param: userinfo is the user information + * @param: filenam file name + * @param: seqvec is the version list of the current file + * @param: snapif is a parameter that obtains file information for multiple + * seq numbers + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, + const std::vector* seqvec, + std::map* snapif); + /** + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment + * information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo); - /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, - uint64_t len, char *buf, SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 - */ - int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, - uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail *chunkInfo); - /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - */ - int CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - FileStatus* filestatus); - /** - * @brief 创建clone文件 - * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param: destination clone目标文件名 - * @param: userinfo 用户信息 - * @param: size 文件大小 - * @param: sn 版本号 - * @param: chunksize是要创建文件的chunk大小 - * @param stripeUnit stripe size - * @param stripeCount stripe count - * @param poolset poolset of destination file - * @param[out] fileinfo 创建的目标文件的文件信息 - * - * @return 错误码 - */ - int CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo); + /** + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correntSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected + */ + int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, + uint64_t correctedSeq); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail* chunkInfo); + /** + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + */ + int CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + /** + * @brief Create clone file + * @detail + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param: destination clone Destination file name + * @param: userinfo User information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the file to be created + * @param stripeUnit stripe size + * @param stripeCount stripe count + * @param poolset poolset of destination file + * @param[out] fileinfo The file information of the target file created + * + * @return error code + */ + int CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo); - /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * - * @return 错误码 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * + * @return error code + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); - /** - * @brief 实际恢复chunk数据 - * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param: scc是异步回调 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo &chunkidinfo, - uint64_t offset, uint64_t len, - SnapCloneClosure* scc); + /** + * @brief Actual recovery chunk data + * + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: scc is an asynchronous callback + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc); - /** - * @brief 通知mds完成Clone Meta - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneMeta(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Meta + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); - /** - * @brief 通知mds完成Clone Chunk - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneFile(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Chunk + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); - /** - * 设置clone文件状态 - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * - * @return 错误码 - */ - int SetCloneFileStatus(const std::string &filename, - const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID = 0); + /** + * Set clone file status + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * + * @return error code + */ + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); - /** - * @brief 获取文件信息 - * - * @param:filename 文件名 - * @param:userinfo 用户信息 - * @param[out] fileInfo 文件信息 - * - * @return 错误码 - */ - int GetFileInfo(const std::string &filename, - const UserInfo_t& userinfo, - FInfo* fileInfo); + /** + * @brief Get file information + * + * @param: filename File name + * @param: userinfo User Information + * @param[out] fileInfo file information + * + * @return error code + */ + int GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + FInfo* fileInfo); - /** - * @brief 查询或分配文件segment信息 - * - * @param:userinfo 用户信息 - * @param:offset 偏移值 - * @param:segInfo segment信息 - * - * @return 错误码 - */ - int GetOrAllocateSegmentInfo(bool allocate, - uint64_t offset, - const FInfo_t* fi, - SegmentInfo *segInfo); + /** + * @brief Query or allocate file segment information + * + * @param: userinfo User Information + * @param: offset offset value + * @param: segInfo segment information + * + * @return error code + */ + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + const FInfo_t* fi, SegmentInfo* segInfo); - /** - * @brief 为recover rename复制的文件 - * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * - * @return 错误码 - */ - int RenameCloneFile(const UserInfo_t& userinfo, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination); + /** + * @brief is the file copied for recover rename + * + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * + * @return error code + */ + int RenameCloneFile(const UserInfo_t& userinfo, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination); - /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - */ - int DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t id = 0); + /** + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + */ + int DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t id = 0); - /** - * 析构,回收资源 - */ - void UnInit(); - /** - * 获取iomanager信息,测试代码使用 - */ - IOManager4Chunk* GetIOManager4Chunk() {return &iomanager4chunk_;} + /** + * Deconstruct and recycle resources + */ + void UnInit(); + /** + * Obtain iomanager information and test code usage + */ + IOManager4Chunk* GetIOManager4Chunk() { return &iomanager4chunk_; } private: - /** - * 获取logicalpool中copyset的serverlist - * @param: lpid是逻辑池id - * @param: csid是逻辑池中的copysetid数据集 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetServerList(const LogicPoolID& lpid, - const std::vector& csid); + /** + * Obtain the serverlist of copyset in the logicalpool + * @param: lpid is the logical pool id + * @param: csid is the copysetid dataset in the logical pool + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetServerList(const LogicPoolID& lpid, + const std::vector& csid); private: - // MDSClient负责与Metaserver通信,所有通信都走这个接口 - MDSClient mdsclient_; + // MDSClient is responsible for communicating with Metaserver, and all + // communication goes through this interface + MDSClient mdsclient_; - // IOManager4Chunk用于管理发向chunkserver端的IO - IOManager4Chunk iomanager4chunk_; + // IOManager4Chunk is used to manage IO sent to the chunkserver end + IOManager4Chunk iomanager4chunk_; - // 用于client 配置读取 - ClientConfig clientconfig_; + // Used for client configuration reading + ClientConfig clientconfig_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LIBCURVE_SNAPSHOT_H_ diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index 9ace95e823..e8d8a35f6d 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -21,11 +21,11 @@ */ #include "src/client/mds_client.h" -#include #include +#include -#include #include +#include #include "src/client/lease_executor.h" #include "src/common/net_common.h" @@ -35,6 +35,7 @@ namespace curve { namespace client { +using curve::common::ChunkServerLocation; using curve::common::NetCommon; using curve::common::TimeUtility; using curve::mds::FileInfo; @@ -42,24 +43,23 @@ using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::ProtoSession; using curve::mds::StatusCode; -using curve::common::ChunkServerLocation; using curve::mds::topology::CopySetServerInfo; -// rpc发送和mds地址切换状态机 +// Rpc sending and mds address switching state machine int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { - // 记录上一次正在服务的mds index + // Record the last serving mds index int lastWorkingMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前正在使用的mds index + // Record the currently used mds index int curRetryMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前mds重试的次数 + // Record the number of current mds retries uint64_t currentMDSRetryCount = 0; - // 执行起始时间点 + // Execution start time point uint64_t startTime = TimeUtility::GetTimeofDayMs(); - // rpc超时时间 + // RPC timeout uint64_t rpcTimeOutMS = retryOpt_.rpcTimeoutMs; // The count of normal retry @@ -68,16 +68,18 @@ int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { int retcode = -1; bool retryUnlimit = (maxRetryTimeMS == 0); while (GoOnRetry(startTime, maxRetryTimeMS)) { - // 1. 创建当前rpc需要使用的channel和controller,执行rpc任务 + // 1. Create the channels and controllers required for the current RPC + // and execute the RPC task retcode = ExcuteTask(curRetryMDSIndex, rpcTimeOutMS, rpctask); - // 2. 根据rpc返回值进行预处理 + // 2. Preprocessing based on rpc return value if (retcode < 0) { curRetryMDSIndex = PreProcessBeforeRetry( retcode, retryUnlimit, &normalRetryCount, ¤tMDSRetryCount, curRetryMDSIndex, &lastWorkingMDSIndex, &rpcTimeOutMS); continue; - // 3. 此时rpc是正常返回的,更新当前正在服务的mds地址index + // 3. At this point, rpc returns normally and updates the index of + // the currently serving mds address } else { currentWorkingMDSAddrIndex_.store(curRetryMDSIndex); break; @@ -98,11 +100,11 @@ bool RPCExcutorRetryPolicy::GoOnRetry(uint64_t startTimeMS, } int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, - uint64_t *timeOutMS) { + int* lastWorkingMDSIndex, + uint64_t* timeOutMS) { int nextMDSIndex = 0; bool rpcTimeout = false; bool needChangeMDS = false; @@ -115,44 +117,48 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, bthread_usleep(retryOpt_.waitSleepMs * 1000); } - // 1. 访问存在的IP地址,但无人监听:ECONNREFUSED - // 2. 正常发送RPC情况下,对端进程挂掉了:EHOSTDOWN - // 3. 对端server调用了Stop:ELOGOFF - // 4. 对端链接已关闭:ECONNRESET - // 5. 在一个mds节点上rpc失败超过限定次数 - // 在这几种场景下,主动切换mds。 + // 1. Access to an existing IP address, but no one is listening: + // ECONNREFUSED + // 2. In the normal RPC scenario, the remote process has crashed: + // EHOSTDOWN + // 3. The remote server called Stop: ELOGOFF + // 4. The remote connection has been closed: ECONNRESET + // 5. RPC failures on a single MDS node exceed the specified limit. + // In these scenarios, actively switch the MDS. } else if (status == -EHOSTDOWN || status == -ECONNRESET || status == -ECONNREFUSED || status == -brpc::ELOGOFF || *curMDSRetryCount >= retryOpt_.maxFailedTimesBeforeChangeAddr) { needChangeMDS = true; - // 在开启健康检查的情况下,在底层tcp连接失败时 - // rpc请求会本地直接返回 EHOSTDOWN - // 这种情况下,增加一些睡眠时间,避免大量的重试请求占满bthread - // TODO(wuhanqing): 关闭健康检查 + // When health checks are enabled, in the event of a failure in the + // underlying TCP connection, RPC requests will directly return + // EHOSTDOWN locally. In this situation, add some sleep time to avoid a + // large number of retry requests overwhelming bthread. + // TODO(wuhanqing): Disable health checks. if (status == -EHOSTDOWN) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } } else if (status == -brpc::ERPCTIMEDOUT || status == -ETIMEDOUT) { rpcTimeout = true; needChangeMDS = false; - // 触发超时指数退避 + // Trigger timeout index backoff *timeOutMS *= 2; *timeOutMS = std::min(*timeOutMS, retryOpt_.maxRPCTimeoutMS); *timeOutMS = std::max(*timeOutMS, retryOpt_.rpcTimeoutMs); } - // 获取下一次需要重试的mds索引 + // Obtain the mds index that needs to be retried next time nextMDSIndex = GetNextMDSIndex(needChangeMDS, curRetryMDSIndex, lastWorkingMDSIndex); // NOLINT - // 更新curMDSRetryCount和rpctimeout + // Update curMDSRetryCount and rpctimeout if (nextMDSIndex != curRetryMDSIndex) { *curMDSRetryCount = 0; *timeOutMS = retryOpt_.rpcTimeoutMs; } else { ++(*curMDSRetryCount); - // 还是在当前mds上重试,且rpc不是超时错误,就进行睡眠,然后再重试 + // Try again on the current mds, and if the rpc is not a timeout error, + // go to sleep and try again if (!rpcTimeout) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } @@ -161,20 +167,21 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, return nextMDSIndex; } /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex + * Obtain the next MDS index to retry based on the input state. The MDS + * switching logic is as follows: Record three states: curRetryMDSIndex, + * lastWorkingMDSIndex, currentWorkingMDSIndex + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ + * 2. If an RPC fails, it triggers a switch to curRetryMDSIndex. If at this + * point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, then + * sequentially switch to the next MDS index. If lastWorkingMDSIndex is not + * equal to currentWorkingMDSIndex, it means that another interface has updated + * currentWorkingMDSAddrIndex_, so this time, switch directly to + * currentWorkingMDSAddrIndex_. */ int RPCExcutorRetryPolicy::GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex) { + int* lastWorkingindex) { int nextMDSIndex = 0; if (std::atomic_compare_exchange_strong( ¤tWorkingMDSAddrIndex_, lastWorkingindex, @@ -194,13 +201,14 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, assert(mdsindex >= 0 && mdsindex < static_cast(retryOpt_.addrs.size())); - const std::string &mdsaddr = retryOpt_.addrs[mdsindex]; + const std::string& mdsaddr = retryOpt_.addrs[mdsindex]; brpc::Channel channel; int ret = channel.Init(mdsaddr.c_str(), nullptr); if (ret != 0) { LOG(WARNING) << "Init channel failed! addr = " << mdsaddr; - // 返回EHOSTDOWN给上层调用者,促使其切换mds + // Return EHOSTDOWN to the upper level caller, prompting them to switch + // mds return -EHOSTDOWN; } @@ -211,14 +219,15 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, return task(mdsindex, rpcTimeOutMS, &channel, &cntl); } - -MDSClient::MDSClient(const std::string &metricPrefix) - : inited_(false), metaServerOpt_(), mdsClientMetric_(metricPrefix), +MDSClient::MDSClient(const std::string& metricPrefix) + : inited_(false), + metaServerOpt_(), + mdsClientMetric_(metricPrefix), rpcExcutor_() {} MDSClient::~MDSClient() { UnInitialize(); } -LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { +LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption& metaServerOpt) { if (inited_) { LOG(INFO) << "MDSClient already started!"; return LIBCURVE_ERROR::OK; @@ -229,7 +238,7 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { rpcExcutor_.SetOption(metaServerOpt.rpcRetryOpt); std::ostringstream oss; - for (const auto &addr : metaServerOpt_.rpcRetryOpt.addrs) { + for (const auto& addr : metaServerOpt_.rpcRetryOpt.addrs) { oss << " " << addr; } @@ -238,19 +247,15 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { return LIBCURVE_ERROR::OK; } - -void MDSClient::UnInitialize() { - inited_ = false; -} +void MDSClient::UnInitialize() { inited_ = false; } #define RPCTaskDefine \ [&](CURVE_UNUSED int addrindex, CURVE_UNUSED uint64_t rpctimeoutMS, \ brpc::Channel* channel, brpc::Controller* cntl) -> int -LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -278,12 +283,12 @@ LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, bool flag = response.has_protosession() && response.has_fileinfo(); if (flag) { - const ProtoSession &leasesession = response.protosession(); + const ProtoSession& leasesession = response.protosession(); lease->sessionID = leasesession.sessionid(); lease->leaseTime = leasesession.leasetime(); lease->createTime = leasesession.createtime(); - const curve::mds::FileInfo &protoFileInfo = response.fileinfo(); + const curve::mds::FileInfo& protoFileInfo = response.fileinfo(); LOG(INFO) << "OpenFile succeeded, filename: " << filename << ", file info " << protoFileInfo.DebugString(); ServiceHelper::ProtoFileInfo2Local(protoFileInfo, fi, fEpoch); @@ -349,9 +354,9 @@ LIBCURVE_ERROR MDSClient::CreateFile(const CreateFileContext& context) { rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid) { +LIBCURVE_ERROR MDSClient::CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -385,9 +390,9 @@ LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, - const UserInfo_t &uinfo, FInfo_t *fi, - FileEpoch_t *fEpoch) { +LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string& filename, + const UserInfo_t& uinfo, FInfo_t* fi, + FileEpoch_t* fEpoch) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -421,19 +426,17 @@ LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs) { +LIBCURVE_ERROR MDSClient::IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; IncreaseFileEpochResponse response; mdsClientMetric_.increaseEpoch.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.increaseEpoch.latency); - MDSClientBase::IncreaseEpoch( - filename, userinfo, &response, cntl, channel); + MDSClientBase::IncreaseEpoch(filename, userinfo, &response, cntl, + channel); if (cntl->Failed()) { mdsClientMetric_.increaseEpoch.eps.count << 1; @@ -445,10 +448,10 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, LIBCURVE_ERROR retcode; MDSStatusCode2LibcurveError(stcode, &retcode); LOG(ERROR) << "IncreaseEpoch: filename = " << filename - << ", owner = " << userinfo.owner - << ", errocde = " << retcode - << ", error msg = " << StatusCode_Name(stcode) - << ", log id = " << cntl->log_id(); + << ", owner = " << userinfo.owner + << ", errocde = " << retcode + << ", error msg = " << StatusCode_Name(stcode) + << ", log id = " << cntl->log_id(); return retcode; } @@ -466,12 +469,12 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, csinfo.peerID = response.cslocs(i).chunkserverid(); EndPoint internal; butil::str2endpoint(response.cslocs(i).hostip().c_str(), - response.cslocs(i).port(), &internal); + response.cslocs(i).port(), &internal); EndPoint external; const bool hasExternalIp = response.cslocs(i).has_externalip(); if (hasExternalIp) { butil::str2endpoint(response.cslocs(i).externalip().c_str(), - response.cslocs(i).port(), &external); + response.cslocs(i).port(), &external); } csinfo.internalAddr = PeerAddr(internal); csinfo.externalAddr = PeerAddr(external); @@ -508,10 +511,10 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if ((stcode == StatusCode::kOK || stcode == StatusCode::kFileUnderSnapShot) && hasinfo) { - FInfo_t *fi = new (std::nothrow) FInfo_t; + FInfo_t* fi = new (std::nothrow) FInfo_t; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - fi, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), fi, + &fEpoch); *seq = fi->seqnum; delete fi; if (stcode == StatusCode::kOK) { @@ -527,8 +530,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if (hasinfo) { FInfo_t fi; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - &fi, &fEpoch); // NOLINT + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), &fi, + &fEpoch); // NOLINT *seq = fi.seqnum; } @@ -545,8 +548,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq) { auto task = RPCTaskDefine { (void)addrindex; @@ -578,10 +581,10 @@ LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif) { +LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -612,8 +615,8 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, for (int i = 0; i < response.fileinfo_size(); i++) { FInfo_t tempInfo; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), - &tempInfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), &tempInfo, + &fEpoch); snapif->insert(std::make_pair(tempInfo.seqnum, tempInfo)); } @@ -628,10 +631,10 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo) { + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -692,11 +695,11 @@ LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -728,40 +731,39 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, } switch (stcode) { - case StatusCode::kSessionNotExist: - case StatusCode::kFileNotExists: - resp->status = LeaseRefreshResult::Status::NOT_EXIST; - break; - case StatusCode::kOwnerAuthFail: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kOK: - if (response.has_fileinfo()) { - FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - &resp->finfo, - &fEpoch); - resp->status = LeaseRefreshResult::Status::OK; - } else { - LOG(WARNING) << "session response has no fileinfo!"; - return LIBCURVE_ERROR::FAILED; - } - if (nullptr != lease) { - if (!response.has_protosession()) { - LOG(WARNING) << "session response has no protosession"; + case StatusCode::kSessionNotExist: + case StatusCode::kFileNotExists: + resp->status = LeaseRefreshResult::Status::NOT_EXIST; + break; + case StatusCode::kOwnerAuthFail: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kOK: + if (response.has_fileinfo()) { + FileEpoch_t fEpoch; + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), + &resp->finfo, &fEpoch); + resp->status = LeaseRefreshResult::Status::OK; + } else { + LOG(WARNING) << "session response has no fileinfo!"; return LIBCURVE_ERROR::FAILED; } - ProtoSession leasesession = response.protosession(); - lease->sessionID = leasesession.sessionid(); - lease->leaseTime = leasesession.leasetime(); - lease->createTime = leasesession.createtime(); - } - break; - default: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::FAILED; - break; + if (nullptr != lease) { + if (!response.has_protosession()) { + LOG(WARNING) << "session response has no protosession"; + return LIBCURVE_ERROR::FAILED; + } + ProtoSession leasesession = response.protosession(); + lease->sessionID = leasesession.sessionid(); + lease->leaseTime = leasesession.leasetime(); + lease->createTime = leasesession.createtime(); + } + break; + default: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::FAILED; + break; } return LIBCURVE_ERROR::OK; }; @@ -769,10 +771,10 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, - FileStatus *filestatus) { + FileStatus* filestatus) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -807,9 +809,9 @@ LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, } LIBCURVE_ERROR -MDSClient::GetServerList(const LogicPoolID &logicalpooid, - const std::vector ©setidvec, - std::vector> *cpinfoVec) { +MDSClient::GetServerList(const LogicPoolID& logicalpooid, + const std::vector& copysetidvec, + std::vector>* cpinfoVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -875,7 +877,7 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { +LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext* clsctx) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -927,19 +929,14 @@ LIBCURVE_ERROR MDSClient::ListPoolset(std::vector* out) { }; return ReturnError( - rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); + rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo) { +LIBCURVE_ERROR MDSClient::CreateCloneFile( + const std::string& source, const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -970,8 +967,8 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, if (stcode == StatusCode::kOK) { FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - fileinfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), fileinfo, + &fEpoch); fileinfo->sourceInfo.name = response.fileinfo().clonesource(); fileinfo->sourceInfo.length = response.fileinfo().clonelength(); } @@ -982,20 +979,20 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::CloneMetaInstalled, userinfo); } -LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::Cloned, userinfo); } -LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID) { auto task = RPCTaskDefine { (void)addrindex; @@ -1028,9 +1025,9 @@ LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, } LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo) { + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1050,23 +1047,23 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, auto statuscode = response.statuscode(); switch (statuscode) { - case StatusCode::kParaError: - LOG(WARNING) << "GetOrAllocateSegment: error param!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kOwnerAuthFail: - LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; - return LIBCURVE_ERROR::AUTHFAIL; - case StatusCode::kFileNotExists: - LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kSegmentNotAllocated: - LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; - return LIBCURVE_ERROR::NOT_ALLOCATE; - case StatusCode::kEpochTooOld: - LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; - return LIBCURVE_ERROR::EPOCH_TOO_OLD; - default: - break; + case StatusCode::kParaError: + LOG(WARNING) << "GetOrAllocateSegment: error param!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kOwnerAuthFail: + LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; + return LIBCURVE_ERROR::AUTHFAIL; + case StatusCode::kFileNotExists: + LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kSegmentNotAllocated: + LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; + return LIBCURVE_ERROR::NOT_ALLOCATE; + case StatusCode::kEpochTooOld: + LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; + return LIBCURVE_ERROR::EPOCH_TOO_OLD; + default: + break; } PageFileSegment pfs = response.pagefilesegment(); @@ -1094,7 +1091,7 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, +LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo* fileInfo, uint64_t offset) { auto task = RPCTaskDefine { (void)addrindex; @@ -1133,9 +1130,9 @@ LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, +LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId, uint64_t destinationId) { auto task = RPCTaskDefine { @@ -1177,8 +1174,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize) { +LIBCURVE_ERROR MDSClient::Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1211,8 +1208,8 @@ LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1251,8 +1248,8 @@ LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1285,9 +1282,9 @@ LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1326,9 +1323,9 @@ LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec) { +LIBCURVE_ERROR MDSClient::Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1379,8 +1376,8 @@ LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, - CopysetPeerInfo *chunkserverInfo) { +LIBCURVE_ERROR MDSClient::GetChunkServerInfo( + const PeerAddr& csAddr, CopysetPeerInfo* chunkserverInfo) { if (!chunkserverInfo) { LOG(ERROR) << "chunkserverInfo pointer is null!"; return LIBCURVE_ERROR::FAILED; @@ -1403,7 +1400,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, std::vector strs; curve::common::SplitString(csAddr.ToString(), ":", &strs); - const std::string &ip = strs[0]; + const std::string& ip = strs[0]; uint64_t port; bool succ = curve::common::StringToUll(strs[1], &port); @@ -1428,7 +1425,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, << ", log id = " << cntl->log_id(); if (statusCode == 0) { - const auto &csInfo = response.chunkserverinfo(); + const auto& csInfo = response.chunkserverinfo(); ChunkServerID csId = csInfo.chunkserverid(); std::string internalIp = csInfo.hostip(); std::string externalIp = internalIp; @@ -1440,9 +1437,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, butil::str2endpoint(internalIp.c_str(), port, &internal); EndPoint external; butil::str2endpoint(externalIp.c_str(), port, &external); - *chunkserverInfo = - CopysetPeerInfo(csId, PeerAddr(internal), - PeerAddr(external)); + *chunkserverInfo = CopysetPeerInfo( + csId, PeerAddr(internal), PeerAddr(external)); return LIBCURVE_ERROR::OK; } else { return LIBCURVE_ERROR::FAILED; @@ -1453,8 +1449,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, } LIBCURVE_ERROR -MDSClient::ListChunkServerInServer(const std::string &serverIp, - std::vector *csIds) { +MDSClient::ListChunkServerInServer(const std::string& serverIp, + std::vector* csIds) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1497,82 +1493,81 @@ MDSClient::ListChunkServerInServer(const std::string &serverIp, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -void MDSClient::MDSStatusCode2LibcurveError(const StatusCode &status, - LIBCURVE_ERROR *errcode) { +void MDSClient::MDSStatusCode2LibcurveError(const StatusCode& status, + LIBCURVE_ERROR* errcode) { switch (status) { - case StatusCode::kOK: - *errcode = LIBCURVE_ERROR::OK; - break; - case StatusCode::kFileExists: - *errcode = LIBCURVE_ERROR::EXISTS; - break; - case StatusCode::kSnapshotFileNotExists: - case StatusCode::kFileNotExists: - case StatusCode::kDirNotExist: - case StatusCode::kPoolsetNotExist: - *errcode = LIBCURVE_ERROR::NOTEXIST; - break; - case StatusCode::kSegmentNotAllocated: - *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; - break; - case StatusCode::kShrinkBiggerFile: - *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; - break; - case StatusCode::kNotSupported: - *errcode = LIBCURVE_ERROR::NOT_SUPPORT; - break; - case StatusCode::kOwnerAuthFail: - *errcode = LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kSnapshotFileDeleteError: - *errcode = LIBCURVE_ERROR::DELETE_ERROR; - break; - case StatusCode::kFileUnderSnapShot: - *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; - break; - case StatusCode::kFileNotUnderSnapShot: - *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; - break; - case StatusCode::kSnapshotDeleting: - *errcode = LIBCURVE_ERROR::DELETING; - break; - case StatusCode::kDirNotEmpty: - *errcode = LIBCURVE_ERROR::NOT_EMPTY; - break; - case StatusCode::kFileOccupied: - *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; - break; - case StatusCode::kSessionNotExist: - *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; - break; - case StatusCode::kParaError: - *errcode = LIBCURVE_ERROR::PARAM_ERROR; - break; - case StatusCode::kStorageError: - *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; - break; - case StatusCode::kFileLengthNotSupported: - *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; - break; - case ::curve::mds::StatusCode::kCloneStatusNotMatch: - *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; - break; - case ::curve::mds::StatusCode::kDeleteFileBeingCloned: - *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; - break; - case ::curve::mds::StatusCode::kClientVersionNotMatch: - *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; - break; - case ::curve::mds::StatusCode::kSnapshotFrozen: - *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; - break; - default: - *errcode = LIBCURVE_ERROR::UNKNOWN; - break; + case StatusCode::kOK: + *errcode = LIBCURVE_ERROR::OK; + break; + case StatusCode::kFileExists: + *errcode = LIBCURVE_ERROR::EXISTS; + break; + case StatusCode::kSnapshotFileNotExists: + case StatusCode::kFileNotExists: + case StatusCode::kDirNotExist: + case StatusCode::kPoolsetNotExist: + *errcode = LIBCURVE_ERROR::NOTEXIST; + break; + case StatusCode::kSegmentNotAllocated: + *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; + break; + case StatusCode::kShrinkBiggerFile: + *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; + break; + case StatusCode::kNotSupported: + *errcode = LIBCURVE_ERROR::NOT_SUPPORT; + break; + case StatusCode::kOwnerAuthFail: + *errcode = LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kSnapshotFileDeleteError: + *errcode = LIBCURVE_ERROR::DELETE_ERROR; + break; + case StatusCode::kFileUnderSnapShot: + *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; + break; + case StatusCode::kFileNotUnderSnapShot: + *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; + break; + case StatusCode::kSnapshotDeleting: + *errcode = LIBCURVE_ERROR::DELETING; + break; + case StatusCode::kDirNotEmpty: + *errcode = LIBCURVE_ERROR::NOT_EMPTY; + break; + case StatusCode::kFileOccupied: + *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; + break; + case StatusCode::kSessionNotExist: + *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; + break; + case StatusCode::kParaError: + *errcode = LIBCURVE_ERROR::PARAM_ERROR; + break; + case StatusCode::kStorageError: + *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; + break; + case StatusCode::kFileLengthNotSupported: + *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; + break; + case ::curve::mds::StatusCode::kCloneStatusNotMatch: + *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; + break; + case ::curve::mds::StatusCode::kDeleteFileBeingCloned: + *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; + break; + case ::curve::mds::StatusCode::kClientVersionNotMatch: + *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; + break; + case ::curve::mds::StatusCode::kSnapshotFrozen: + *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; + break; + default: + *errcode = LIBCURVE_ERROR::UNKNOWN; + break; } } - LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // logic error if (retcode >= 0) { @@ -1581,12 +1576,12 @@ LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // rpc error or special defined error switch (retcode) { - case -LIBCURVE_ERROR::NOT_SUPPORT: - return LIBCURVE_ERROR::NOT_SUPPORT; - case -LIBCURVE_ERROR::FILE_OCCUPIED: - return LIBCURVE_ERROR::FILE_OCCUPIED; - default: - return LIBCURVE_ERROR::FAILED; + case -LIBCURVE_ERROR::NOT_SUPPORT: + return LIBCURVE_ERROR::NOT_SUPPORT; + case -LIBCURVE_ERROR::FILE_OCCUPIED: + return LIBCURVE_ERROR::FILE_OCCUPIED; + default: + return LIBCURVE_ERROR::FAILED; } } diff --git a/src/client/mds_client.h b/src/client/mds_client.h index 36822fa31c..10c4a682cd 100644 --- a/src/client/mds_client.h +++ b/src/client/mds_client.h @@ -26,10 +26,10 @@ #include #include +#include #include #include #include -#include #include "include/client/libcurve.h" #include "proto/nameserver2.pb.h" @@ -48,28 +48,28 @@ class RPCExcutorRetryPolicy { RPCExcutorRetryPolicy() : retryOpt_(), currentWorkingMDSAddrIndex_(0), cntlID_(1) {} - void SetOption(const MetaServerOption::RpcRetryOption &option) { + void SetOption(const MetaServerOption::RpcRetryOption& option) { retryOpt_ = option; } using RPCFunc = std::function; + brpc::Channel*, brpc::Controller*)>; /** - * 将client与mds的重试相关逻辑抽离 - * @param: task为当前要进行的具体rpc任务 - * @param: maxRetryTimeMS是当前执行最大的重试时间 - * @return: 返回当前RPC的结果 + * Detach the retry related logic between client and mds + * @param: task is the specific rpc task to be carried out currently + * @param: maxRetryTimeMS is the maximum retry time currently executed + * @return: Returns the result of the current RPC */ int DoRPCTask(RPCFunc task, uint64_t maxRetryTimeMS); /** - * 测试使用: 设置当前正在服务的mdsindex + * Test usage: Set the currently serving mdsindex */ void SetCurrentWorkIndex(int index) { currentWorkingMDSAddrIndex_.store(index); } /** - * 测试使用:获取当前正在服务的mdsindex + * Test usage: Obtain the currently serving mdsindex */ int GetCurrentWorkIndex() const { return currentWorkingMDSAddrIndex_.load(); @@ -77,105 +77,117 @@ class RPCExcutorRetryPolicy { private: /** - * rpc失败需要重试,根据cntl返回的不同的状态,确定应该做什么样的预处理。 - * 主要做了以下几件事: - * 1. 如果上一次的RPC是超时返回,那么执行rpc 超时指数退避逻辑 - * 2. 如果上一次rpc返回not connect等返回值,会主动触发切换mds地址重试 - * 3. 更新重试信息,比如在当前mds上连续重试的次数 - * @param[in]: status为当前rpc的失败返回的状态 - * @param normalRetryCount The total count of normal retry - * @param[in][out]: curMDSRetryCount当前mds节点上的重试次数,如果切换mds - * 该值会被重置为1. - * @param[in]: curRetryMDSIndex代表当前正在重试的mds索引 - * @param[out]: lastWorkingMDSIndex上一次正在提供服务的mds索引 - * @param[out]: timeOutMS根据status对rpctimeout进行调整 + * When an RPC fails, it needs to be retried, and based on different + * statuses returned by `cntl`, determine what kind of preprocessing should + * be done. The main tasks performed are as follows: + * 1. If the last RPC timed out, execute RPC timeout exponential backoff + * logic. + * 2. If the last RPC returned values like "not connect," it will actively + * trigger MDS address switching and retry. + * 3. Update retry information, such as the number of consecutive retries on + * the current MDS. + * @param[in]: status is the status of the current RPC failure. + * @param[in]: normalRetryCount is the total count of normal retries. + * @param[in][out]: curMDSRetryCount is the number of retries on the current + * MDS node. If MDS switching occurs, this value will be reset to 1. + * @param[in]: curRetryMDSIndex represents the current MDS index being + * retried. + * @param[out]: lastWorkingMDSIndex is the index of the MDS that was + * providing service in the last attempt. + * @param[out]: timeOutMS is adjusted based on the status to control the RPC + * timeout. * - * @return: 返回下一次重试的mds索引 + * @return: Returns the next MDS index for the next retry. */ int PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, uint64_t *timeOutMS); + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, + int* lastWorkingMDSIndex, uint64_t* timeOutMS); /** - * 执行rpc发送任务 - * @param[in]: mdsindex为mds对应的地址索引 - * @param[in]: rpcTimeOutMS是rpc超时时间 - * @param[in]: task为待执行的任务 - * @return: channel获取成功则返回0,否则-1 + * Execute rpc send task + * @param[in]: mdsindex is the address index corresponding to mds + * @param[in]: rpcTimeOutMS is the rpc timeout time + * @param[in]: task is the task to be executed + * @return: If the channel is successfully obtained, 0 will be returned. + * Otherwise, -1 */ int ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, RPCExcutorRetryPolicy::RPCFunc task); /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex - * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. - * 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ - * @param[in]: needChangeMDS表示当前外围需不需要切换mds,这个值由 - * PreProcessBeforeRetry函数确定 - * @param[in]: currentRetryIndex为当前正在重试的mds索引 - * @param[in][out]: - * lastWorkingindex为上一次正在服务的mds索引,正在重试的mds - * 与正在服务的mds索引可能是不同的mds。 - * @return: 返回下一次要重试的mds索引 + * Get the next MDS index to retry based on the input state. MDS switching + * logic: Record three states: curRetryMDSIndex, lastWorkingMDSIndex, + * currentWorkingMDSIndex. + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex, + * lastWorkingMDSIndex = currentWorkingMDSIndex. + * 2. If an RPC fails, it will trigger a switch of curRetryMDSIndex. If at + * this point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, it + * will sequentially switch to the next MDS index. If lastWorkingMDSIndex is + * not equal to currentWorkingMDSIndex, it means that another interface has + * updated currentWorkingMDSAddrIndex_. In this case, the switch will + * directly go to currentWorkingMDSAddrIndex_. + * @param[in]: needChangeMDS indicates whether the current peripheral needs + * to switch MDS. This value is determined by the PreProcessBeforeRetry + * function. + * @param[in]: currentRetryIndex is the current MDS index being retried. + * @param[in][out]: lastWorkingIndex is the index of the last MDS being + * served in the last retry. The MDS being retried and the MDS being served + * may be different. + * @return: Returns the next MDS index to retry. */ int GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex); - /** - * 根据输入参数,决定是否继续重试,重试退出条件是重试时间超出最大允许时间 - * IO路径上和非IO路径上的重试时间不一样,非IO路径的重试时间由配置文件的 - * mdsMaxRetryMS参数指定,IO路径为无限循环重试。 + int* lastWorkingindex); + /** + * Based on the input parameters, decide whether to continue retry. The + * condition for retry exit is that the retry time exceeds the maximum + * allowed time The retry time on IO paths is different from that on non IO + * paths, and the retry time on non IO paths is determined by the + * configuration file The mdsMaxRetryMS parameter specifies that the IO path + * is an infinite loop retry. * @param[in]: startTimeMS - * @param[in]: maxRetryTimeMS为最大重试时间 - * @return:需要继续重试返回true, 否则返回false + * @param[in]: maxRetryTimeMS is the maximum retry time + * @return: Need to continue retrying and return true, otherwise return + * false */ bool GoOnRetry(uint64_t startTimeMS, uint64_t maxRetryTimeMS); /** - * 递增controller id并返回id + *Increment controller id and return id */ uint64_t GetLogId() { return cntlID_.fetch_add(1, std::memory_order_relaxed); } private: - // 执行rpc时必要的配置信息 + // Necessary configuration information for executing rpc MetaServerOption::RpcRetryOption retryOpt_; - // 记录上一次重试过的leader信息 + // Record the leader information from the last retry std::atomic currentWorkingMDSAddrIndex_; - // controller id,用于trace整个rpc IO链路 - // 这里直接用uint64即可,在可预测的范围内,不会溢出 + // controller ID, used to trace the entire RPC IO link + // Simply use uint64 here, within a predictable range, without overflow std::atomic cntlID_; }; - struct LeaseRefreshResult; -// MDSClient是client与MDS通信的唯一窗口 +// MDSClient is the only window where the client communicates with MDS class MDSClient : public MDSClientBase, public std::enable_shared_from_this { public: - explicit MDSClient(const std::string &metricPrefix = ""); + explicit MDSClient(const std::string& metricPrefix = ""); virtual ~MDSClient(); - LIBCURVE_ERROR Initialize(const MetaServerOption &metaopt); + LIBCURVE_ERROR Initialize(const MetaServerOption& metaopt); /** - * 创建文件 - * @param: context创建文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK - * 文件已存在返回LIBCURVE_ERROR::EXIST - * 否则返回LIBCURVE_ERROR::FAILED - * 如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, + * Create File + * @param: context Create file information + * @return: Successfully returned LIBCURVE_ERROR::OK + * File already exists Return LIBCURVE_ERROR::EXIST + * Otherwise, return LIBCURVE_ERROR::FAILED + * If authentication fails, return LIBCURVE_ERROR::AUTHFAIL */ LIBCURVE_ERROR CreateFile(const CreateFileContext& context); /** @@ -190,29 +202,31 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease); + LIBCURVE_ERROR OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: csid为要获取的copyset列表 - * @param: cpinfoVec保存获取到的server信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: csid is the list of copysets to obtain + * @param: cpinfoVec saves the obtained server information + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR - GetServerList(const LogicPoolID &logicPoolId, - const std::vector &csid, - std::vector> *cpinfoVec); + GetServerList(const LogicPoolID& logicPoolId, + const std::vector& csid, + std::vector>* cpinfoVec); /** - * 获取当前mds所属的集群信息 - * @param[out]: clsctx 为要获取的集群信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the cluster information to which the current mds belongs + * @param[out]: clsctx is the cluster information to be obtained + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetClusterInfo(ClusterContext *clsctx); + LIBCURVE_ERROR GetClusterInfo(ClusterContext* clsctx); LIBCURVE_ERROR ListPoolset(std::vector* out); @@ -229,9 +243,9 @@ class MDSClient : public MDSClientBase, * otherwise return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo); + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo); /** * @brief Send DeAllocateSegment request to current working MDS @@ -239,7 +253,7 @@ class MDSClient : public MDSClientBase, * @param offset segment start offset * @return LIBCURVE_ERROR::OK means success, other value means fail */ - virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo *fileInfo, + virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo* fileInfo, uint64_t offset); /** @@ -253,10 +267,9 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetFileInfo(const std::string &filename, - const UserInfo_t &userinfo, - FInfo_t *fi, - FileEpoch_t *fEpoch); + LIBCURVE_ERROR GetFileInfo(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch); /** * @brief Increase epoch and return chunkserver locations @@ -269,29 +282,29 @@ class MDSClient : public MDSClientBase, * * @return LIBCURVE_ERROR::OK for success, LIBCURVE_ERROR::FAILED for fail. */ - LIBCURVE_ERROR IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs); + LIBCURVE_ERROR IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - LIBCURVE_ERROR Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize); + LIBCURVE_ERROR Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce Does it force deletion without placing it in the + * garbage bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds */ - LIBCURVE_ERROR DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce = false, uint64_t id = 0); /** @@ -300,253 +313,266 @@ class MDSClient : public MDSClientBase, * @param: filename * @param: fileId is inodeid,default 0 */ - LIBCURVE_ERROR RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, uint64_t fileId); + LIBCURVE_ERROR RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileId); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param: seq是出参,返回创建快照时文件的版本信息 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is an output parameter that returns the version information + * of the file when creating the snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CreateSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t *seq); + LIBCURVE_ERROR CreateSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t* seq); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq); + LIBCURVE_ERROR DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: snapif是出参,保存文件的基本信息 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: snapif is a parameter that saves the basic information of the + * file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif); - /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param: segInfo是出参,保存chunk信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif); + /** + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param: segInfo is the output parameter, saving chunk information * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo); - /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: filestatus为快照状态 - */ - LIBCURVE_ERROR CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq, - FileStatus *filestatus); - - /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param: resp是mds端传递过来的lease信息 - * @param[out]: lease当前文件的session信息 + SegmentInfo* segInfo); + /** + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: filestatus is the snapshot status + */ + LIBCURVE_ERROR CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + + /** + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param: resp is the release information passed from the mds end + * @param[out]: lease the session information of the current file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease = nullptr); - /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease = nullptr); + /** + * To close the file, it is necessary to carry the session ID, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid); + LIBCURVE_ERROR CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid); /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged * - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] destFileId 创建的目标文件的Id + * @param[out] destFileId The ID of the target file created * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CreateCloneFile(const std::string &source, - const std::string &destination, - const UserInfo_t &userinfo, uint64_t size, + LIBCURVE_ERROR CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, - const std::string& poolset, - FInfo *fileinfo); + const std::string& poolset, FInfo* fileinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, + LIBCURVE_ERROR SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); /** - * @brief 重名文件 + * @brief duplicate file * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, + LIBCURVE_ERROR RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId = 0, uint64_t destinationId = 0); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo); + LIBCURVE_ERROR ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - LIBCURVE_ERROR Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec); + LIBCURVE_ERROR Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec); /** - * 向mds注册client metric监听的地址和端口 - * @param: ip客户端ip - * @param: dummyServerPort为监听端口 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Register the address and port for client metric listening with mds + * @param: IP client IP + * @param: dummyServerPort is the listening port + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR Register(const std::string &ip, uint16_t port); + LIBCURVE_ERROR Register(const std::string& ip, uint16_t port); /** - * 获取chunkserver信息 - * @param[in] addr chunkserver地址信息 - * @param[out] chunkserverInfo 待获取的信息 - * @return:成功返回ok + * Obtain chunkserver information + * @param[in] addr chunkserver address information + * @param[out] chunkserverInfo Information to be obtained + * @return: Successfully returned OK */ LIBCURVE_ERROR - GetChunkServerInfo(const PeerAddr &addr, - CopysetPeerInfo *chunkserverInfo); + GetChunkServerInfo(const PeerAddr& addr, + CopysetPeerInfo* chunkserverInfo); /** - * 获取server上所有chunkserver的id - * @param[in]: ip为server的ip地址 - * @param[out]: csIds用于保存chunkserver的id - * @return: 成功返回LIBCURVE_ERROR::OK,失败返回LIBCURVE_ERROR::FAILED + * Obtain the IDs of all chunkservers on the server + * @param[in]: ip is the IP address of the server + * @param[out]: csIds is used to save the id of the chunkserver + * @return: Successfully returned LIBCURVE_ERROR::OK, failure returns + * LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR ListChunkServerInServer(const std::string &ip, - std::vector *csIds); + LIBCURVE_ERROR ListChunkServerInServer(const std::string& ip, + std::vector* csIds); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInitialize(); /** - * 将mds侧错误码对应到libcurve错误码 - * @param: statecode为mds一侧错误码 - * @param[out]: 出参errcode为libcurve一侧的错误码 + * Map the mds side error code to the libcurve error code + * @param: statecode is the error code on the mds side + * @param[out]: The errcode of the output parameter is the error code on the + * side of libcurve */ - void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode &statcode, - LIBCURVE_ERROR *errcode); + void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode& statcode, + LIBCURVE_ERROR* errcode); LIBCURVE_ERROR ReturnError(int retcode); private: - // 初始化标志,放置重复初始化 + // Initialization flag, placing duplicate initialization bool inited_ = false; - // 当前模块的初始化option配置 + // Initialization option configuration for the current module MetaServerOption metaServerOpt_; - // client与mds通信的metric统计 + // Metric statistics of communication between client and mds MDSClientMetric mdsClientMetric_; RPCExcutorRetryPolicy rpcExcutor_; diff --git a/src/client/mds_client_base.h b/src/client/mds_client_base.h index 64178e43e9..6cb3340231 100644 --- a/src/client/mds_client_base.h +++ b/src/client/mds_client_base.h @@ -38,120 +38,120 @@ namespace curve { namespace client { -using curve::mds::OpenFileRequest; -using curve::mds::OpenFileResponse; -using curve::mds::CreateFileRequest; -using curve::mds::CreateFileResponse; +using curve::mds::ChangeOwnerRequest; +using curve::mds::ChangeOwnerResponse; +using curve::mds::CheckSnapShotStatusRequest; +using curve::mds::CheckSnapShotStatusResponse; using curve::mds::CloseFileRequest; using curve::mds::CloseFileResponse; -using curve::mds::RenameFileRequest; -using curve::mds::RenameFileResponse; -using curve::mds::ExtendFileRequest; -using curve::mds::ExtendFileResponse; +using curve::mds::CreateCloneFileRequest; +using curve::mds::CreateCloneFileResponse; +using curve::mds::CreateFileRequest; +using curve::mds::CreateFileResponse; +using curve::mds::CreateSnapShotRequest; +using curve::mds::CreateSnapShotResponse; +using curve::mds::DeAllocateSegmentRequest; +using curve::mds::DeAllocateSegmentResponse; using curve::mds::DeleteFileRequest; using curve::mds::DeleteFileResponse; -using curve::mds::RecoverFileRequest; -using curve::mds::RecoverFileResponse; +using curve::mds::DeleteSnapShotRequest; +using curve::mds::DeleteSnapShotResponse; +using curve::mds::ExtendFileRequest; +using curve::mds::ExtendFileResponse; using curve::mds::GetFileInfoRequest; using curve::mds::GetFileInfoResponse; +using curve::mds::GetOrAllocateSegmentRequest; +using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::IncreaseFileEpochRequest; using curve::mds::IncreaseFileEpochResponse; -using curve::mds::DeleteSnapShotRequest; -using curve::mds::DeleteSnapShotResponse; -using curve::mds::ReFreshSessionRequest; -using curve::mds::ReFreshSessionResponse; using curve::mds::ListDirRequest; using curve::mds::ListDirResponse; -using curve::mds::ChangeOwnerRequest; -using curve::mds::ChangeOwnerResponse; -using curve::mds::CreateSnapShotRequest; -using curve::mds::CreateSnapShotResponse; -using curve::mds::CreateCloneFileRequest; -using curve::mds::CreateCloneFileResponse; -using curve::mds::SetCloneFileStatusRequest; -using curve::mds::SetCloneFileStatusResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; -using curve::mds::DeAllocateSegmentRequest; -using curve::mds::DeAllocateSegmentResponse; -using curve::mds::CheckSnapShotStatusRequest; -using curve::mds::CheckSnapShotStatusResponse; using curve::mds::ListSnapShotFileInfoRequest; using curve::mds::ListSnapShotFileInfoResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::OpenFileRequest; +using curve::mds::OpenFileResponse; +using curve::mds::RecoverFileRequest; +using curve::mds::RecoverFileResponse; +using curve::mds::ReFreshSessionRequest; +using curve::mds::ReFreshSessionResponse; +using curve::mds::RenameFileRequest; +using curve::mds::RenameFileResponse; +using curve::mds::SetCloneFileStatusRequest; +using curve::mds::SetCloneFileStatusResponse; +using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::GetChunkServerListInCopySetsRequest; using curve::mds::topology::GetChunkServerListInCopySetsResponse; using curve::mds::topology::GetClusterInfoRequest; using curve::mds::topology::GetClusterInfoResponse; -using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::ListChunkServerResponse; -using curve::mds::IncreaseFileEpochRequest; -using curve::mds::IncreaseFileEpochResponse; using curve::mds::topology::ListPoolsetRequest; using curve::mds::topology::ListPoolsetResponse; extern const char* kRootUserName; -// MDSClientBase将所有与mds的RPC接口抽离,与业务逻辑解耦 -// 这里只负责rpc的发送,具体的业务处理逻辑通过reponse和controller向上 -// 返回给调用者,有调用者处理 +// MDSClientBase abstracts all RPC interfaces with the MDS, decoupling them from +// business logic. Here, it is responsible only for sending RPC requests, while +// the specific business logic processing is returned to the caller through +// responses and controllers, which are handled by the caller. class MDSClientBase { public: /** - * 打开文件 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Open File + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void OpenFile(const std::string& filename, - const UserInfo_t& userinfo, - OpenFileResponse* response, - brpc::Controller* cntl, + void OpenFile(const std::string& filename, const UserInfo_t& userinfo, + OpenFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建文件 - * @param: filename创建文件的文件名 - * @param: userinfo为user信息 - * @param: size文件长度 - * @param: normalFile表示创建的是普通文件还是目录文件,如果是目录则忽略size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create File + * @param: filename The file name used to create the file + * @param: userinfo is the user information + * @param: size File length + * @param: normalFile indicates whether the created file is a regular file + * or a directory file. If it is a directory, size is ignored + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateFile(const CreateFileContext& context, - CreateFileResponse* response, - brpc::Controller* cntl, + CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: userinfo为user信息 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * To close the file, it is necessary to carry the sessionid, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: userinfo is the user information + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CloseFile(const std::string& filename, - const UserInfo_t& userinfo, - const std::string& sessionid, - CloseFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void CloseFile(const std::string& filename, const UserInfo_t& userinfo, + const std::string& sessionid, CloseFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取文件信息,fi是出参 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain file information, where fi is the output parameter + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetFileInfo(const std::string& filename, - const UserInfo_t& userinfo, - GetFileInfoResponse* response, - brpc::Controller* cntl, + void GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + GetFileInfoResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -164,175 +164,177 @@ class MDSClientBase { * @param[in] channel rpc channel * */ - void IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - IncreaseFileEpochResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo, + IncreaseFileEpochResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, CreateSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - DeleteSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq, DeleteSnapShotResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, const std::vector* seq, ListSnapShotFileInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RefreshSession(const std::string& filename, - const UserInfo_t& userinfo, + void RefreshSession(const std::string& filename, const UserInfo_t& userinfo, const std::string& sessionid, ReFreshSessionResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, CheckSnapShotStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: copysetidvec为要获取的copyset列表 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: copysetidvec is the list of copysets to obtain + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetServerList(const LogicPoolID& logicalpooid, const std::vector& copysetidvec, GetChunkServerListInCopySetsResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取mds对应的cluster id - * @param[out]: response为该rpc的respoonse,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the cluster ID corresponding to the mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetClusterInfo(GetClusterInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); - void ListPoolset(ListPoolsetResponse* response, - brpc::Controller* cntl, + void ListPoolset(ListPoolsetResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建clone文件 - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * Create clone file + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateCloneFile(const std::string& source, const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, CreateCloneFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 通知mds完成Clone Meta - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief Notify mds to complete Clone Meta + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void SetCloneFileStatus(const std::string& filename, const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID, + const UserInfo_t& userinfo, uint64_t fileID, SetCloneFileStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** * Get or Alloc SegmentInfo,and update to Metacache @@ -344,68 +346,63 @@ class MDSClientBase { * @param[in|out]: cntl rpc controller * @param[in]:channel rpc channel */ - void GetOrAllocateSegment(bool allocate, - uint64_t offset, - const FInfo_t* fi, - const FileEpoch_t *fEpoch, + void GetOrAllocateSegment(bool allocate, uint64_t offset, const FInfo_t* fi, + const FileEpoch_t* fEpoch, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); void DeAllocateSegment(const FInfo* fileInfo, uint64_t segmentOffset, DeAllocateSegmentResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 重名文件 - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief duplicate file + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RenameFile(const UserInfo_t& userinfo, - const std::string &origin, - const std::string &destination, - uint64_t originId, - uint64_t destinationId, - RenameFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RenameFile(const UserInfo_t& userinfo, const std::string& origin, + const std::string& destination, uint64_t originId, + uint64_t destinationId, RenameFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void Extend(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t newsize, - ExtendFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void Extend(const std::string& filename, const UserInfo_t& userinfo, + uint64_t newsize, ExtendFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: Does deleteforce force deletion without placing it in the garbage + * bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - bool deleteforce, - uint64_t fileid, - DeleteFileResponse* response, - brpc::Controller* cntl, + void DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + bool deleteforce, uint64_t fileid, + DeleteFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -417,61 +414,59 @@ class MDSClientBase { * @param[in|out]: cntl, return RPC status * @param[in]:channel */ - void RecoverFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t fileid, - RecoverFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RecoverFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t fileid, RecoverFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ChangeOwner(const std::string& filename, - const std::string& newOwner, - const UserInfo_t& userinfo, - ChangeOwnerResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void ChangeOwner(const std::string& filename, const std::string& newOwner, + const UserInfo_t& userinfo, ChangeOwnerResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 - */ - void Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, - ListDirResponse* response, - brpc::Controller* cntl, + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS + */ + void Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + ListDirResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取chunkserverID信息 - * @param[in]: ip为当前client的监听地址 - * @param[in]: port为监听端口 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain chunkserverID information + * @param[in]: IP is the listening address of the current client + * @param[in]: port is the listening port + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetChunkServerInfo(const std::string& ip, - uint16_t port, + void GetChunkServerInfo(const std::string& ip, uint16_t port, GetChunkServerInfoResponse* reponse, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取server上的所有chunkserver的id - * @param[in]: ip为当前server的地址 - * @param[out]: response是当前rpc调用的response,返回给外部处理 - * @param[in|out]: cntl既是入参也是出参 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the IDs of all chunkservers on the server + * @param[in]: IP is the address of the current server + * @param[out]: response is the response of the current rpc call, returned + * to external processing + * @param[in|out]: cntl is both an input and output parameter + * @param[in]: channel is the current channel established with MDS */ void ListChunkServerInServer(const std::string& ip, ListChunkServerResponse* response, @@ -480,8 +475,8 @@ class MDSClientBase { private: /** - * 为不同的request填充user信息 - * @param: request是待填充的变量指针 + * Fill in user information for different requests + * @param: request is the pointer to the variable to be filled in */ template void FillUserInfo(T* request, const UserInfo_t& userinfo) { @@ -499,7 +494,7 @@ class MDSClientBase { std::string CalcSignature(const UserInfo& userinfo, uint64_t date) const; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_MDS_CLIENT_BASE_H_ diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index 7c0a25a262..2265f6b6dd 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -19,27 +19,26 @@ * File Created: Tuesday, 25th September 2018 2:06:35 pm * Author: tongguangxun */ -#include +#include "src/client/metacache.h" #include +#include +#include #include #include -#include #include "proto/cli.pb.h" - -#include "src/client/metacache.h" -#include "src/client/mds_client.h" #include "src/client/client_common.h" +#include "src/client/mds_client.h" #include "src/common/concurrent/concurrent.h" namespace curve { namespace client { -using curve::common::WriteLockGuard; -using curve::common::ReadLockGuard; using curve::client::ClientConfig; +using curve::common::ReadLockGuard; +using curve::common::WriteLockGuard; void MetaCache::Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient) { @@ -87,12 +86,9 @@ bool MetaCache::IsLeaderMayChange(LogicPoolID logicPoolId, return flag; } -int MetaCache::GetLeader(LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkServerID* serverId, - EndPoint* serverAddr, - bool refresh, - FileMetric* fm) { +int MetaCache::GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, + ChunkServerID* serverId, EndPoint* serverAddr, + bool refresh, FileMetric* fm) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo targetInfo; @@ -123,7 +119,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, << "logicpool id = " << logicPoolId << ", copyset id = " << copysetId; - // 重试失败,这时候需要向mds重新拉取最新的copyset信息了 + // The retry failed. At this point, it is necessary to retrieve the + // latest copyset information from mds again ret = UpdateCopysetInfoFromMDS(logicPoolId, copysetId); if (ret == 0) { continue; @@ -135,8 +132,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, if (ret == -1) { LOG(WARNING) << "get leader failed after retry!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } @@ -148,25 +145,24 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetInfo* toupdateCopyset, FileMetric* fm) { ChunkServerID csid = 0; - PeerAddr leaderaddr; + PeerAddr leaderaddr; GetLeaderRpcOption rpcOption(metacacheopt_.metacacheGetLeaderRPCTimeOutMS); - GetLeaderInfo getLeaderInfo(logicPoolId, - copysetId, toupdateCopyset->csinfos_, - toupdateCopyset->GetCurrentLeaderIndex(), - rpcOption); - int ret = ServiceHelper::GetLeader( - getLeaderInfo, &leaderaddr, &csid, fm); + GetLeaderInfo getLeaderInfo( + logicPoolId, copysetId, toupdateCopyset->csinfos_, + toupdateCopyset->GetCurrentLeaderIndex(), rpcOption); + int ret = ServiceHelper::GetLeader(getLeaderInfo, &leaderaddr, &csid, fm); if (ret == -1) { LOG(WARNING) << "get leader failed!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr); - // 如果更新失败,说明leader地址不在当前配置组中,从mds获取chunkserver的信息 + // If the update fails, it indicates that the leader address is not in the + // current configuration group. Obtain chunkserver information from MDS if (ret == -1 && !leaderaddr.IsEmpty()) { CopysetPeerInfo csInfo; ret = mdsclient_->GetChunkServerInfo(leaderaddr, &csInfo); @@ -177,8 +173,8 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, return -1; } - UpdateCopysetInfoIfMatchCurrentLeader( - logicPoolId, copysetId, leaderaddr); + UpdateCopysetInfoIfMatchCurrentLeader(logicPoolId, copysetId, + leaderaddr); *toupdateCopyset = GetCopysetinfo(logicPoolId, copysetId); ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr, csInfo); } @@ -201,18 +197,16 @@ int MetaCache::UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, return -1; } - // 更新chunkserverid到copyset映射关系 + // Update chunkserverid to copyset mapping relationship UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); return 0; } void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( - LogicPoolID logicPoolId, - CopysetID copysetId, - const PeerAddr& leaderAddr) { + LogicPoolID logicPoolId, CopysetID copysetId, const PeerAddr& leaderAddr) { std::vector> copysetInfos; (void)mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); @@ -224,15 +218,15 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( << ", copyset id = " << copysetId << ", current leader = " << leaderAddr.ToString(); - // 更新chunkserverid到copyset的映射关系 + // Update the mapping relationship between chunkserverid and copyset UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); } } CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, - CopysetID copysetId) { + CopysetID copysetId) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo ret; @@ -250,8 +244,7 @@ CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, * the copyset client will call UpdateLeader. * return the ChunkServerID to invoker */ -int MetaCache::UpdateLeader(LogicPoolID logicPoolId, - CopysetID copysetId, +int MetaCache::UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, const EndPoint& leaderAddr) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); @@ -329,11 +322,13 @@ void MetaCache::SetChunkserverUnstable(ChunkServerID csid) { ChunkServerID leaderid; if (cpinfo->second.GetCurrentLeaderID(&leaderid)) { if (leaderid == csid) { - // 只设置leaderid为当前serverid的Lcopyset + // Set only the Lcopyset with leaderid as the current + // serverid cpinfo->second.SetLeaderUnstableFlag(); } } else { - // 当前copyset集群信息未知,直接设置LeaderUnStable + // The current copyset cluster information is unknown, set + // LeaderUnStable directly cpinfo->second.SetLeaderUnstableFlag(); } } @@ -346,24 +341,24 @@ void MetaCache::AddCopysetIDInfo(ChunkServerID csid, chunkserverCopysetIDMap_[csid].emplace(cpidinfo); } -void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo& cpinfo) { +void MetaCache::UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, cpinfo.cpid_); - // 先获取原来的chunkserver到copyset映射 + // First, obtain the original chunkserver to copyset mapping auto previouscpinfo = lpcsid2CopsetInfoMap_.find(key); if (previouscpinfo != lpcsid2CopsetInfoMap_.end()) { std::vector newID; std::vector changedID; - // 先判断当前copyset有没有变更chunkserverid + // Determine if the current copyset has changed the chunkserverid for (auto iter : previouscpinfo->second.csinfos_) { changedID.push_back(iter.peerID); } for (auto iter : cpinfo.csinfos_) { - auto it = std::find(changedID.begin(), changedID.end(), - iter.peerID); + auto it = + std::find(changedID.begin(), changedID.end(), iter.peerID); if (it != changedID.end()) { changedID.erase(it); } else { @@ -371,7 +366,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 删除变更的copyset信息 + // Delete changed copyset information for (auto chunkserverid : changedID) { { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); @@ -382,7 +377,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 更新新的copyset信息到chunkserver + // Update new copyset information to chunkserver for (auto chunkserverid : newID) { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); chunkserverCopysetIDMap_[chunkserverid].emplace(lpid, cpinfo.cpid_); @@ -390,8 +385,8 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } -CopysetInfo MetaCache::GetCopysetinfo( - LogicPoolID lpid, CopysetID csid) { +CopysetInfo MetaCache::GetCopysetinfo(LogicPoolID lpid, + CopysetID csid) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, csid); auto cpinfo = lpcsid2CopsetInfoMap_.find(key); @@ -412,10 +407,8 @@ FileSegment* MetaCache::GetFileSegment(SegmentIndex segmentIndex) { WriteLockGuard lk(rwlock4Segments_); auto ret = segments_.emplace( - std::piecewise_construct, - std::forward_as_tuple(segmentIndex), - std::forward_as_tuple(segmentIndex, - fileInfo_.segmentsize, + std::piecewise_construct, std::forward_as_tuple(segmentIndex), + std::forward_as_tuple(segmentIndex, fileInfo_.segmentsize, metacacheopt_.discardGranularity)); return &(ret.first->second); @@ -435,5 +428,5 @@ void MetaCache::CleanChunksInSegment(SegmentIndex segmentIndex) { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/metacache.h b/src/client/metacache.h index a9a6e1fab7..1340a3eb25 100644 --- a/src/client/metacache.h +++ b/src/client/metacache.h @@ -61,69 +61,78 @@ class MetaCache { virtual ~MetaCache() = default; /** - * 初始化函数 - * @param: metacacheopt为当前metacache的配置option信息 - * @param: mdsclient为与mds通信的指针。 - * 为什么这里需要把mdsclient传进来? - * 因为首先metacache充当的角色就是对于MDS一侧的信息缓存 - * 所以对于底层想使用metacache的copyset client或者chunk closure - * 来说,他只需要知道metacache就可以了,不需要再去向mds查询信息, - * 在copyset client或者chunk closure发送IO失败之后会重新获取leader - * 然后再重试,如果leader获取不成功,需要向mds一侧查询当前copyset的最新信息, - * 这里将查询mds封装在内部了,这样copyset client和chunk closure就不感知mds了 + * Initialization function + * @param: metacacheopt is the configuration option information for the + * current Metacache + * @param: mdsclient is the pointer that communicates with mds. + * Why does it need to pass in mdsclient here? + * Because the first role that Metacache plays is to cache information on + * the MDS side So for low-level users who want to use Metacache's copyset + * client or chunk closure For example, he only needs to know the Metacache + * and no longer needs to query information from MDS, After the copyset + * client or chunk closure fails to send IO, it will retrieve the leader + * again Then try again. If the leader acquisition is unsuccessful, you need + * to query the latest information of the current copyset from the mds side, + * Here, the query mds is encapsulated internally, so that the copyset + * client and chunk closure are not aware of mds */ - void Init(const MetaCacheOption &metaCacheOpt, MDSClient *mdsclient); + void Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient); /** - * 通过chunk index获取chunkid信息 - * @param: chunkidx以index查询chunk对应的id信息 - * @param: chunkinfo是出参,存储chunk的版本信息 - * @param: 成功返回OK, 否则返回UNKNOWN_ERROR + * Obtain chunk information through chunk index + * @param: chunkidx queries the ID information corresponding to chunks using + * index + * @param: chunkinfo is an outgoing parameter that stores the version + * information of the chunk + * @param: Successfully returns OK, otherwise returns UNKNOWN_ ERROR */ virtual MetaCacheErrorType GetChunkInfoByIndex(ChunkIndex chunkidx, - ChunkIDInfo_t *chunkinfo); + ChunkIDInfo_t* chunkinfo); /** * @brief Update cached chunk info by chunk index */ virtual void UpdateChunkInfoByIndex(ChunkIndex cindex, - const ChunkIDInfo &chunkinfo); + const ChunkIDInfo& chunkinfo); /** - * sender发送数据的时候需要知道对应的leader然后发送给对应的chunkserver - * 如果get不到的时候,外围设置refresh为true,然后向chunkserver端拉取最新的 - * server信息,然后更新metacache。 - * 如果当前copyset的leaderMayChange置位的时候,即使refresh为false,也需要 - * 先去拉取新的leader信息,才能继续下发IO. - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: serverId对应chunkserver的id信息,是出参 - * @param: serverAddr为serverid对应的ip信息 - * @param: refresh,如果get不到的时候,外围设置refresh为true, - * 然后向chunkserver端拉取最新的 - * @param: fm用于统计metric - * @param: 成功返回0, 否则返回-1 + * When the sender sends data, it needs to know the corresponding leader and + * send it to the corresponding chunkserver. If it cannot retrieve the + * leader, and the external setting has "refresh" set to true, it will then + * fetch the latest server information from the chunkserver side and update + * the metacache. If the "leaderMayChange" flag of the current copyset is + * set, even if "refresh" is set to false, it is still necessary to fetch + * the new leader information before continuing with IO operations. + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: The serverId corresponds to the ID information of the + * chunkserver, which is the output parameter + * @param: serverAddr is the IP information corresponding to serverid + * @param: refresh. If it cannot be obtained, set the peripheral refresh to + * true, Then pull the latest data from the chunkserver end + * @param: fm for statistical metrics + * @param: Successfully returns 0, otherwise returns -1 */ virtual int GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, - ChunkServerID *serverId, butil::EndPoint *serverAddr, - bool refresh = false, FileMetric *fm = nullptr); + ChunkServerID* serverId, butil::EndPoint* serverAddr, + bool refresh = false, FileMetric* fm = nullptr); /** - * 更新某个copyset的leader信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @param leaderAddr leader地址 - * @return: 成功返回0, 否则返回-1 + * Update the leader information of a copyset + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param leaderAddr leader address + * @return: Successfully returns 0, otherwise returns -1 */ virtual int UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const butil::EndPoint &leaderAddr); + const butil::EndPoint& leaderAddr); /** - * 更新copyset数据信息,包含serverlist - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: csinfo是要更新的copyset info + * Update copyset data information, including serverlist + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: csinfo is the copyset info to be updated */ virtual void UpdateCopysetInfo(LogicPoolID logicPoolId, CopysetID copysetId, - const CopysetInfo &csinfo); + const CopysetInfo& csinfo); // Add copysets info to cache, and skip already copyset void AddCopysetsInfo( @@ -131,26 +140,26 @@ class MetaCache { std::vector>&& copysetsInfo); /** - * 通过chunk id更新chunkid信息 - * @param: cid为chunkid - * @param: cidinfo为当前chunk对应的id信息 + * Update chunk information through chunk id + * @param: cid is chunkid + * @param: cininfo is the ID information corresponding to the current chunk */ - virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo &cidinfo); + virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo& cidinfo); /** - * 获取当前copyset的server list信息 - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 当前copyset的copysetinfo信息 + * Obtain the server list information for the current copyset + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: The copysetinfo information of the current copyset */ virtual CopysetInfo GetServerList(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 将ID转化为cache的key - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 为当前的key + * Convert ID to key for cache + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: is the current key */ static LogicPoolCopysetID CalcLogicPoolCopysetID(LogicPoolID logicPoolId, CopysetID copysetId) { @@ -159,45 +168,45 @@ class MetaCache { } /** - * @brief: 标记整个server上的所有chunkserver为unstable状态 + * @brief: Mark all chunkservers on the entire server as unstable * - * @param: serverIp server的ip地址 - * @return: 0 设置成功 / -1 设置失败 + * @param: serverIp The IP address of the server + * @return: 0 set successfully/-1 set failed */ - virtual int SetServerUnstable(const std::string &endPoint); + virtual int SetServerUnstable(const std::string& endPoint); /** - * 如果leader所在的chunkserver出现问题了,导致RPC失败。这时候这个 - * chunkserver上的其他leader copyset也会存在同样的问题,所以需要 - * 通知当前chunkserver上的leader copyset. 主要是通过设置这个copyset - * 的leaderMayChange标志,当该copyset的再次下发IO的时候会查看这个 - * 状态,当这个标志位置位的时候,IO下发需要先进行leader refresh, - * 如果leaderrefresh成功,leaderMayChange会被reset。 - * SetChunkserverUnstable就会遍历当前chunkserver上的所有copyset - * 并设置这个chunkserver的leader copyset的leaderMayChange标志。 - * @param: csid是当前不稳定的chunkserver ID + * If the chunkserver where the leader is located encounters a problem, + * leading to RPC failures, then other leader copysets on this chunkserver + * will also face the same issue. Therefore, it is necessary to notify the + * leader copysets on the current chunkserver. This is primarily done by + * setting the "leaderMayChange" flag for these copysets. When IO is issued + * again for a copyset with this flag set, the system will check this + * status. When this flag is set, IO issuance will first perform a leader + * refresh. If the leader refresh is successful, the "leaderMayChange" flag + * will be reset. The "SetChunkserverUnstable" operation will iterate + * through all the copysets on the current chunkserver and set the + * "leaderMayChange" flag for the leader copysets of that chunkserver. + * @param: csid is the currently unstable chunkserver ID */ virtual void SetChunkserverUnstable(ChunkServerID csid); /** - * 向map中添加对应chunkserver的copyset信息 - * @param: csid为当前chunkserverid - * @param: cpid为当前copyset的id信息 + * Add copyset information for the corresponding chunkserver to the map + * @param: csid is the current chunkserverid + * @param: cpid is the ID information of the current copyset */ virtual void AddCopysetIDInfo(ChunkServerID csid, - const CopysetIDInfo &cpid); + const CopysetIDInfo& cpid); - virtual void - UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo &cpinfo); + virtual void UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo); - void UpdateFileInfo(const FInfo &fileInfo) { fileInfo_ = fileInfo; } + void UpdateFileInfo(const FInfo& fileInfo) { fileInfo_ = fileInfo; } - const FInfo *GetFileInfo() const { return &fileInfo_; } + const FInfo* GetFileInfo() const { return &fileInfo_; } - void UpdateFileEpoch(const FileEpoch& fEpoch) { - fEpoch_ = fEpoch; - } + void UpdateFileEpoch(const FileEpoch& fEpoch) { fEpoch_ = fEpoch; } const FileEpoch* GetFileEpoch() const { return &fEpoch_; } @@ -212,26 +221,26 @@ class MetaCache { } /** - * 获取对应的copyset的LeaderMayChange标志 + * Get the LeaderMayChange flag of the corresponding copyset */ virtual bool IsLeaderMayChange(LogicPoolID logicpoolId, CopysetID copysetId); /** - * 测试使用 - * 获取copysetinfo信息 + * Test Usage + * Obtain copysetinfo information */ virtual CopysetInfo GetCopysetinfo(LogicPoolID lpid, CopysetID csid); - UnstableHelper &GetUnstableHelper() { return unstableHelper_; } + UnstableHelper& GetUnstableHelper() { return unstableHelper_; } uint64_t InodeId() const { return fileInfo_.id; } /** * @brief Get file segment info about the segmentIndex */ - FileSegment *GetFileSegment(SegmentIndex segmentIndex); + FileSegment* GetFileSegment(SegmentIndex segmentIndex); /** * @brief Clean chunks of this segment @@ -240,68 +249,71 @@ class MetaCache { private: /** - * @brief 从mds更新copyset复制组信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @return 0 成功 / -1 失败 + * @brief Update copyset replication group information from mds + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @return 0 successful/-1 failed */ int UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 更新copyset的leader信息 - * @param[in]: logicPoolId逻辑池信息 - * @param[in]: copysetId复制组信息 - * @param[out]: toupdateCopyset为metacache中待更新的copyset信息指针 + * Update the leader information of the copyset + * @param[in]: logicPoolId Logical Pool Information + * @param[in]: copysetId Copy group information + * @param[out]: toupdateCopyset is the pointer to the copyset information to + * be updated in the metacache */ int UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetID copysetId, - CopysetInfo *toupdateCopyset, - FileMetric *fm = nullptr); + CopysetInfo* toupdateCopyset, + FileMetric* fm = nullptr); /** - * 从mds拉去复制组信息,如果当前leader在复制组中 - * 则更新本地缓存,反之则不更新 - * @param: logicPoolId 逻辑池id - * @param: copysetId 复制组id - * @param: leaderAddr 当前的leader address + * Pull replication group information from MDS, if the current leader is in + * the replication group Update local cache, otherwise do not update + * @param: logicPoolId Logical Pool ID + * @param: copysetId Copy group ID + * @param: leaderAddr The current leader address */ void UpdateCopysetInfoIfMatchCurrentLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const PeerAddr &leaderAddr); + const PeerAddr& leaderAddr); private: - MDSClient *mdsclient_; + MDSClient* mdsclient_; MetaCacheOption metacacheopt_; - // chunkindex到chunkidinfo的映射表 + // Mapping table from chunkindex to chunkidinfo CURVE_CACHELINE_ALIGNMENT ChunkIndexInfoMap chunkindex2idMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4Segments_; CURVE_CACHELINE_ALIGNMENT std::unordered_map segments_; // NOLINT - // logicalpoolid和copysetid到copysetinfo的映射表 + // Mapping table for logicalpoolid and copysetid to copysetinfo CURVE_CACHELINE_ALIGNMENT CopysetInfoMap lpcsid2CopsetInfoMap_; - // chunkid到chunkidinfo的映射表 + // chunkid to chunkidinfo mapping table CURVE_CACHELINE_ALIGNMENT ChunkInfoMap chunkid2chunkInfoMap_; - // 三个读写锁分别保护上述三个映射表 + // Three read and write locks protect each of the three mapping tables + // mentioned above CURVE_CACHELINE_ALIGNMENT RWLock rwlock4chunkInfoMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4ChunkInfo_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CopysetInfo_; - // chunkserverCopysetIDMap_存放当前chunkserver到copyset的映射 - // 当rpc closure设置SetChunkserverUnstable时,会设置该chunkserver - // 的所有copyset处于leaderMayChange状态,后续copyset需要判断该值来看 - // 是否需要刷新leader + // chunkserverCopysetIDMap_ stores the mapping of the current chunkserver to + // copysets. When an RPC closure sets SetChunkserverUnstable, it sets all + // the copysets of that chunkserver to the leaderMayChange state. Subsequent + // copyset operations will check this value to determine whether a leader + // refresh is needed. - // chunkserverid到copyset的映射 + // Mapping chunkserverid to copyset std::unordered_map> chunkserverCopysetIDMap_; // NOLINT - // 读写锁保护unStableCSMap + // Read write lock protection unstableCSMap CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CSCopysetIDMap_; - // 当前文件信息 + // Current file information FInfo fileInfo_; // epoch info diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index f283687f3c..4b17893a51 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -43,24 +43,25 @@ using curve::common::ReadLockGuard; using curve::common::SpinLock; using curve::common::WriteLockGuard; -// copyset内的chunkserver节点的基本信息 -// 包含当前chunkserver的id信息,以及chunkserver的地址信息 +// Basic information of chunkserver nodes in the copyset +// Contains the ID information of the current chunkserver and the address +// information of the chunkserver template struct CopysetPeerInfo { - // 当前chunkserver节点的ID + // The ID of the current chunkserver node T peerID = 0; - // 当前chunkserver节点的内部地址 + // The internal address of the current chunkserver node PeerAddr internalAddr; - // 当前chunkserver节点的外部地址 + // The external address of the current chunkserver node PeerAddr externalAddr; CopysetPeerInfo() = default; - CopysetPeerInfo(const T &cid, const PeerAddr &internal, - const PeerAddr &external) + CopysetPeerInfo(const T& cid, const PeerAddr& internal, + const PeerAddr& external) : peerID(cid), internalAddr(internal), externalAddr(external) {} - bool operator==(const CopysetPeerInfo &other) const { + bool operator==(const CopysetPeerInfo& other) const { return this->internalAddr == other.internalAddr && this->externalAddr == other.externalAddr; } @@ -72,7 +73,7 @@ struct CopysetPeerInfo { }; template -inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { +inline std::ostream& operator<<(std::ostream& os, const CopysetPeerInfo& c) { os << "peer id : " << c.peerID << ", internal address : " << c.internalAddr.ToString() << ", external address : " << c.externalAddr.ToString(); @@ -81,23 +82,25 @@ inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { } // copyset's informations including peer and leader information -template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { - // leader存在变更可能标志位 +template +struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { + // Possible flag bits for leader changes bool leaderMayChange_ = false; - // 当前copyset的节点信息 + // Node information of the current copyset std::vector> csinfos_; - // leader在本copyset信息中的索引,用于后面避免重复尝试同一个leader + // The index of the leader in this copyset information is used to avoid + // repeated attempts at the same leader in the future int16_t leaderindex_ = -1; - // 当前copyset的id信息 + // The ID information of the current copyset CopysetID cpid_ = 0; LogicPoolID lpid_ = 0; - // 用于保护对copyset信息的修改 + // Used to protect modifications to copyset information SpinLock spinlock_; CopysetInfo() = default; ~CopysetInfo() = default; - CopysetInfo &operator=(const CopysetInfo &other) { + CopysetInfo& operator=(const CopysetInfo& other) { this->cpid_ = other.cpid_; this->lpid_ = other.lpid_; this->csinfos_ = other.csinfos_; @@ -106,9 +109,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { return *this; } - CopysetInfo(const CopysetInfo &other) - : leaderMayChange_(other.leaderMayChange_), csinfos_(other.csinfos_), - leaderindex_(other.leaderindex_), cpid_(other.cpid_), + CopysetInfo(const CopysetInfo& other) + : leaderMayChange_(other.leaderMayChange_), + csinfos_(other.csinfos_), + leaderindex_(other.leaderindex_), + cpid_(other.cpid_), lpid_(other.lpid_) {} CopysetInfo(CopysetInfo&& other) noexcept @@ -142,11 +147,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 获取当前leader的索引 + * Get the index of the current leader */ int16_t GetCurrentLeaderIndex() const { return leaderindex_; } - bool GetCurrentLeaderID(T *id) const { + bool GetCurrentLeaderID(T* id) const { if (leaderindex_ >= 0) { if (static_cast(csinfos_.size()) < leaderindex_) { return false; @@ -160,10 +165,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 更新leaderindex,如果leader不在当前配置组中,则返回-1 - * @param: addr为新的leader的地址信息 + * Update the leaderindex, if the leader is not in the current configuration + * group, return -1 + * @param: addr is the address information of the new leader */ - int UpdateLeaderInfo(const PeerAddr &addr, + int UpdateLeaderInfo(const PeerAddr& addr, CopysetPeerInfo csInfo = CopysetPeerInfo()) { VLOG(3) << "update leader info, pool " << lpid_ << ", copyset " << cpid_ << ", current leader " << addr.ToString(); @@ -179,7 +185,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { tempindex++; } - // 新的addr不在当前copyset内,如果csInfo不为空,那么将其插入copyset + // The new addr is not within the current copyset. If csInfo is not + // empty, insert it into the copyset if (!exists && !csInfo.IsEmpty()) { csinfos_.push_back(csInfo); } else if (exists == false) { @@ -198,8 +205,10 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { * @param[out]: peer id * @param[out]: ep */ - int GetLeaderInfo(T *peerid, EndPoint *ep) { - // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader + int GetLeaderInfo(T* peerid, EndPoint* ep) { + // For the first time obtaining the leader, if the current leader + // information is not determined, return -1, and the external initiative + // will be initiated to update the leader if (leaderindex_ < 0 || leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ @@ -219,32 +228,32 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 添加copyset的peerinfo - * @param: csinfo为待添加的peer信息 + * Add peerinfo for copyset + * @param: csinfo is the peer information to be added */ - void AddCopysetPeerInfo(const CopysetPeerInfo &csinfo) { + void AddCopysetPeerInfo(const CopysetPeerInfo& csinfo) { spinlock_.Lock(); csinfos_.push_back(csinfo); spinlock_.UnLock(); } /** - * 当前CopysetInfo是否合法 + * Is the current CopysetInfo legal */ bool IsValid() const { return !csinfos_.empty(); } /** - * 更新leaderindex + * Update leaderindex */ void UpdateLeaderIndex(int index) { leaderindex_ = index; } /** - * 当前copyset是否存在对应的chunkserver address - * @param: addr需要检测的chunkserver - * @return: true存在;false不存在 + * Does the current copyset have a corresponding chunkserver address + * @param: addr Chunkserver to be detected + * @return: true exists; False does not exist */ - bool HasPeerInCopyset(const PeerAddr &addr) const { - for (const auto &peer : csinfos_) { + bool HasPeerInCopyset(const PeerAddr& addr) const { + for (const auto& peer : csinfos_) { if (peer.internalAddr == addr || peer.externalAddr == addr) { return true; } @@ -255,13 +264,13 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { }; template -inline std::ostream &operator<<(std::ostream &os, - const CopysetInfo ©set) { +inline std::ostream& operator<<(std::ostream& os, + const CopysetInfo& copyset) { os << "pool id : " << copyset.lpid_ << ", copyset id : " << copyset.cpid_ << ", leader index : " << copyset.leaderindex_ << ", leader may change : " << copyset.leaderMayChange_ << ", peers : "; - for (auto &p : copyset.csinfos_) { + for (auto& p : copyset.csinfos_) { os << p << " "; } @@ -276,13 +285,13 @@ struct CopysetIDInfo { : lpid(logicpoolid), cpid(copysetid) {} }; -inline bool operator<(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator<(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.lpid <= cpidinfo2.lpid && cpidinfo1.cpid < cpidinfo2.cpid; } -inline bool operator==(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator==(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.cpid == cpidinfo2.cpid && cpidinfo1.lpid == cpidinfo2.lpid; } @@ -290,9 +299,12 @@ class FileSegment { public: FileSegment(SegmentIndex segmentIndex, uint32_t segmentSize, uint32_t discardGranularity) - : segmentIndex_(segmentIndex), segmentSize_(segmentSize), - discardGranularity_(discardGranularity), rwlock_(), - discardBitmap_(segmentSize_ / discardGranularity_), chunks_() {} + : segmentIndex_(segmentIndex), + segmentSize_(segmentSize), + discardGranularity_(discardGranularity), + rwlock_(), + discardBitmap_(segmentSize_ / discardGranularity_), + chunks_() {} /** * @brief Confirm if all bit was discarded @@ -312,7 +324,7 @@ class FileSegment { * @brief Get internal bitmap for unit-test * @return Internal bitmap */ - Bitmap &GetBitmap() { return discardBitmap_; } + Bitmap& GetBitmap() { return discardBitmap_; } void SetBitmap(const uint64_t offset, const uint64_t length); void ClearBitmap(const uint64_t offset, const uint64_t length); @@ -370,14 +382,15 @@ inline void FileSegment::ClearBitmap(const uint64_t offset, enum class FileSegmentLockType { Read, Write }; -template class FileSegmentLockGuard { +template +class FileSegmentLockGuard { public: - explicit FileSegmentLockGuard(FileSegment *segment) : segment_(segment) { + explicit FileSegmentLockGuard(FileSegment* segment) : segment_(segment) { Lock(); } - FileSegmentLockGuard(const FileSegmentLockGuard &) = delete; - FileSegmentLockGuard &operator=(const FileSegmentLockGuard &) = delete; + FileSegmentLockGuard(const FileSegmentLockGuard&) = delete; + FileSegmentLockGuard& operator=(const FileSegmentLockGuard&) = delete; ~FileSegmentLockGuard() { UnLock(); } @@ -392,7 +405,7 @@ template class FileSegmentLockGuard { void UnLock() { segment_->ReleaseLock(); } private: - FileSegment *segment_; + FileSegment* segment_; }; using FileSegmentReadLockGuard = diff --git a/src/client/request_closure.h b/src/client/request_closure.h index 326f76e10b..753f16aea4 100644 --- a/src/client/request_closure.h +++ b/src/client/request_closure.h @@ -63,83 +63,60 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure /** * @brief Get error code */ - virtual int GetErrorCode() { - return errcode_; - } + virtual int GetErrorCode() { return errcode_; } /** * @brief Set error code, 0 means success */ - virtual void SetFailed(int errorCode) { - errcode_ = errorCode; - } + virtual void SetFailed(int errorCode) { errcode_ = errorCode; } /** - * @brief 获取当前closure属于哪个request + * @brief to obtain which request the current closure belongs to */ - virtual RequestContext* GetReqCtx() { - return reqCtx_; - } + virtual RequestContext* GetReqCtx() { return reqCtx_; } /** - * @brief 获取当前request属于哪个iotracker + * @brief: Obtain which iotracker the current request belongs to */ - virtual IOTracker* GetIOTracker() { - return tracker_; - } + virtual IOTracker* GetIOTracker() { return tracker_; } /** - * @brief 设置当前属于哪一个iotracker + * @brief Set which iotracker currently belongs to */ - void SetIOTracker(IOTracker* ioTracker) { - tracker_ = ioTracker; - } + void SetIOTracker(IOTracker* ioTracker) { tracker_ = ioTracker; } /** - * @brief 设置所属的iomanager + * @brief Set the iomanager to which it belongs */ - void SetIOManager(IOManager* ioManager) { - ioManager_ = ioManager; - } + void SetIOManager(IOManager* ioManager) { ioManager_ = ioManager; } /** - * @brief 设置当前closure重试次数 + * @brief Set the current closure retry count */ - void IncremRetriedTimes() { - retryTimes_++; - } + void IncremRetriedTimes() { retryTimes_++; } - uint64_t GetRetriedTimes() const { - return retryTimes_; - } + uint64_t GetRetriedTimes() const { return retryTimes_; } /** - * 设置metric + * Set metric */ - void SetFileMetric(FileMetric* fm) { - metric_ = fm; - } + void SetFileMetric(FileMetric* fm) { metric_ = fm; } /** - * 获取metric指针 + * Get metric pointer */ - FileMetric* GetMetric() const { - return metric_; - } + FileMetric* GetMetric() const { return metric_; } /** - * 获取下一次rpc超时时间, rpc超时时间实现了指数退避的策略 + * Obtain the next RPC timeout, which implements an exponential backoff + * strategy */ - uint64_t GetNextTimeoutMS() const { - return nextTimeoutMS_; - } + uint64_t GetNextTimeoutMS() const { return nextTimeoutMS_; } /** - * 设置下次重试超时时间 + * Set the next retry timeout time */ - void SetNextTimeOutMS(uint64_t timeout) { - nextTimeoutMS_ = timeout; - } + void SetNextTimeOutMS(uint64_t timeout) { nextTimeoutMS_ = timeout; } bool IsSlowRequest() const { return slowRequest_; } @@ -153,25 +130,25 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure // whether own inflight count bool ownInflight_ = false; - // 当前request的错误码 + // The error code of the current request int errcode_ = -1; - // 当前request的tracker信息 + // Tracker information for the current request IOTracker* tracker_ = nullptr; - // closure的request信息 + // Request information for closures RequestContext* reqCtx_ = nullptr; - // metric信息 + // metric Information FileMetric* metric_ = nullptr; - // 重试次数 + // Number of retries uint64_t retryTimes_ = 0; - // 当前closure属于的iomanager + // The iomanager to which the current closure belongs IOManager* ioManager_ = nullptr; - // 下一次rpc超时时间 + // Next RPC timeout uint64_t nextTimeoutMS_ = 0; // create time of this closure(in millisecond) diff --git a/src/client/request_context.h b/src/client/request_context.h index 0b7c9db649..76d2acf4c9 100644 --- a/src/client/request_context.h +++ b/src/client/request_context.h @@ -28,9 +28,9 @@ #include #include +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" #include "src/client/request_closure.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -73,13 +73,14 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { done_ = nullptr; } - // chunk的ID信息,sender在发送rpc的时候需要附带其ID信息 - ChunkIDInfo idinfo_; + // The ID information of the chunk, which the sender needs to include when + // sending rpc + ChunkIDInfo idinfo_; - // 用户IO被拆分之后,其小IO有自己的offset和length - off_t offset_ = 0; - OpType optype_ = OpType::UNKNOWN; - size_t rawlength_ = 0; + // After user IO is split, its small IO has its own offset and length + off_t offset_ = 0; + OpType optype_ = OpType::UNKNOWN; + size_t rawlength_ = 0; // user's single io request will split into several requests // subIoIndex_ is an index of serveral requests @@ -91,29 +92,31 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { // write data of current request butil::IOBuf writeData_; - // 因为RPC都是异步发送,因此在一个Request结束时,RPC回调调用当前的done - // 来告知当前的request结束了 - RequestClosure* done_ = nullptr; + // Because RPC is sent asynchronously, at the end of a request, the RPC + // callback calls the current done To inform you that the current request is + // over + RequestClosure* done_ = nullptr; // file id uint64_t fileId_; // file epoch uint64_t epoch_; - // request的版本信息 - uint64_t seq_ = 0; + // Version information of request + uint64_t seq_ = 0; - // 这个对应的GetChunkInfo的出参 - ChunkInfoDetail* chunkinfodetail_ = nullptr; + // The output parameter of this corresponding GetChunkInfo + ChunkInfoDetail* chunkinfodetail_ = nullptr; - // clone chunk请求需要携带源chunk的location及所需要创建的chunk的大小 - uint32_t chunksize_ = 0; - std::string location_; - RequestSourceInfo sourceInfo_; - // create clone chunk时候用于修改chunk的correctedSn - uint64_t correctedSeq_ = 0; + // The clone chunk request needs to carry the location of the source chunk + // and the size of the chunk that needs to be created + uint32_t chunksize_ = 0; + std::string location_; + RequestSourceInfo sourceInfo_; + // CorrectedSn used to modify a chunk when creating a clone chunk + uint64_t correctedSeq_ = 0; - // 当前request context id - uint64_t id_ = 0; + // Current request context id + uint64_t id_ = 0; static RequestContext* NewInitedRequestContext() { RequestContext* ctx = new (std::nothrow) RequestContext(); @@ -139,10 +142,8 @@ inline std::ostream& operator<<(std::ostream& os, os << "logicpool id = " << reqCtx.idinfo_.lpid_ << ", copyset id = " << reqCtx.idinfo_.cpid_ << ", chunk id = " << reqCtx.idinfo_.cid_ - << ", offset = " << reqCtx.offset_ - << ", length = " << reqCtx.rawlength_ - << ", sub-io index = " << reqCtx.subIoIndex_ - << ", sn = " << reqCtx.seq_ + << ", offset = " << reqCtx.offset_ << ", length = " << reqCtx.rawlength_ + << ", sub-io index = " << reqCtx.subIoIndex_ << ", sn = " << reqCtx.seq_ << ", source info = " << reqCtx.sourceInfo_; return os; diff --git a/src/client/request_scheduler.cpp b/src/client/request_scheduler.cpp index e723126235..939115e210 100644 --- a/src/client/request_scheduler.cpp +++ b/src/client/request_scheduler.cpp @@ -25,9 +25,9 @@ #include #include -#include "src/client/request_context.h" -#include "src/client/request_closure.h" #include "src/client/chunk_closure.h" +#include "src/client/request_closure.h" +#include "src/client/request_context.h" namespace curve { namespace client { @@ -35,8 +35,7 @@ namespace client { RequestScheduler::~RequestScheduler() {} int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache* metaCache, - FileMetric* fm) { + MetaCache* metaCache, FileMetric* fm) { blockIO_.store(false); reqschopt_ = reqSchdulerOpt; @@ -58,8 +57,7 @@ int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, } LOG(INFO) << "RequestScheduler conf info: " - << "scheduleQueueCapacity = " - << reqschopt_.scheduleQueueCapacity + << "scheduleQueueCapacity = " << reqschopt_.scheduleQueueCapacity << ", scheduleThreadpoolSize = " << reqschopt_.scheduleThreadpoolSize; return 0; @@ -77,7 +75,7 @@ int RequestScheduler::Fini() { if (running_.exchange(false, std::memory_order_acq_rel)) { for (int i = 0; i < threadPool_.NumOfThreads(); ++i) { // notify the wait thread - BBQItem stopReq(nullptr, true); + BBQItem stopReq(nullptr, true); queue_.PutBack(stopReq); } threadPool_.Stop(); @@ -89,7 +87,7 @@ int RequestScheduler::Fini() { int RequestScheduler::ScheduleRequest( const std::vector& requests) { if (running_.load(std::memory_order_acquire)) { - /* TODO(wudemiao): 后期考虑 qos */ + /* TODO(wudemiao): Consider QoS in the later stage */ for (auto it : requests) { // skip the fake request if (!it->idinfo_.chunkExist) { @@ -99,7 +97,7 @@ int RequestScheduler::ScheduleRequest( continue; } - BBQItem req(it); + BBQItem req(it); queue_.PutBack(req); } return 0; @@ -107,18 +105,18 @@ int RequestScheduler::ScheduleRequest( return -1; } -int RequestScheduler::ScheduleRequest(RequestContext *request) { +int RequestScheduler::ScheduleRequest(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutBack(req); return 0; } return -1; } -int RequestScheduler::ReSchedule(RequestContext *request) { +int RequestScheduler::ReSchedule(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutFront(req); return 0; } @@ -126,14 +124,17 @@ int RequestScheduler::ReSchedule(RequestContext *request) { } void RequestScheduler::WakeupBlockQueueAtExit() { - // 在scheduler退出的时候要把队列的内容清空, 通知copyset client - // 当前操作是退出状态,copyset client会针对inflight RPC做响应处理 - // 正常情况下队列内容一定会在Fini调用结束之后全部清空 - // 但是在session刷新失败的时候,scheduler无法继续下发 - // RPC请求,所以需要设置blockingQueue_标志,告知scheduler - // 把队列里内容统统扔到copyset client,因为在session - // 续约失败后copyset client会将IO全部失败返回,scheduler - // 模块不需要处理具体RPC请求,由copyset client负责。 + // When the scheduler exits, it is necessary to clear the contents of the + // queue and notify the copyset client The current operation is in the exit + // state, and the copyset client will respond to the inflight RPC Under + // normal circumstances, the queue content must be completely cleared after + // Fini calls are completed But when the session refresh fails, the + // scheduler cannot continue issuing RPC request, therefore blockingQueue + // needs to be set_ Sign to inform scheduler Throw all the content in the + // queue to the copyset client because in the session After the renewal + // fails, the copyset client will return all IO failures to the scheduler + // The module does not need to handle specific RPC requests, and is the + // responsibility of the copyset client. client_.ResetExitFlag(); blockingQueue_ = false; std::atomic_thread_fence(std::memory_order_acquire); @@ -151,8 +152,8 @@ void RequestScheduler::Process() { ProcessOne(req); } else { /** - * 一旦遇到stop item,所有线程都可以退出,因为此时 - * queue里面所有的request都被处理完了 + * Once a stop item is encountered, all threads can exit because at + * this point All requests in the queue have been processed */ stop_.store(true, std::memory_order_release); } @@ -172,8 +173,8 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { case OpType::WRITE: ctx->done_->GetInflightRPCToken(); client_.WriteChunk(ctx->idinfo_, ctx->fileId_, ctx->epoch_, - ctx->seq_, ctx->writeData_, - ctx->offset_, ctx->rawlength_, ctx->sourceInfo_, + ctx->seq_, ctx->writeData_, ctx->offset_, + ctx->rawlength_, ctx->sourceInfo_, guard.release()); break; case OpType::READ_SNAP: @@ -197,11 +198,12 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { guard.release()); break; default: - /* TODO(wudemiao) 后期整个链路错误发统一了在处理 */ + /* In the later stage of TODO(wudemiao), the entire link error was + * sent and processed uniformly */ ctx->done_->SetFailed(-1); LOG(ERROR) << "unknown op type: OpType::UNKNOWN"; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_scheduler.h b/src/client/request_scheduler.h index 752f72bcb0..f00ded5bc1 100644 --- a/src/client/request_scheduler.h +++ b/src/client/request_scheduler.h @@ -25,88 +25,86 @@ #include -#include "src/common/uncopyable.h" +#include "include/curve_compiler_specific.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" +#include "src/client/copyset_client.h" #include "src/common/concurrent/bounded_blocking_queue.h" #include "src/common/concurrent/thread_pool.h" -#include "src/client/client_common.h" -#include "src/client/copyset_client.h" -#include "include/curve_compiler_specific.h" +#include "src/common/uncopyable.h" namespace curve { namespace client { -using curve::common::ThreadPool; -using curve::common::BoundedBlockingDeque; using curve::common::BBQItem; +using curve::common::BoundedBlockingDeque; +using curve::common::ThreadPool; using curve::common::Uncopyable; struct RequestContext; /** - * 请求调度器,上层拆分的I/O会交给Scheduler的线程池 - * 分发到具体的ChunkServer,后期QoS也会放在这里处理 + * Request the scheduler, and the split I/O from the upper layer will be handed + * over to the scheduler's thread pool Distribute to specific ChunkServers, + * where QoS will also be handled in the future */ class RequestScheduler : public Uncopyable { public: RequestScheduler() - : running_(false), - stop_(true), - client_(), - blockingQueue_(true) {} + : running_(false), stop_(true), client_(), blockingQueue_(true) {} virtual ~RequestScheduler(); /** - * 初始化 - * @param: reqSchdulerOpt为scheduler的配置选项 - * @param: metacache为meta信息 - * @param: filematric为文件的metric信息 + * Initialize + * @param: reqSchdulerOpt is the configuration option for the scheduler + * @param: metacache is the meta information + * @param: filematric is the metric information of the file */ virtual int Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache *metaCache, - FileMetric* fileMetric = nullptr); + MetaCache* metaCache, FileMetric* fileMetric = nullptr); /** - * 启动Scheduler的线程池开始处理request - * 启动之后才能push request,除此之外,只有当 - * queue里面的任务都被处理完了,才会Scheduler - * 的 thread pool里面的所有线程都退出 - * @return 0成功,-1失败 + * Start the Scheduler's thread pool to begin processing requests. + * Requests can only be pushed after starting. Furthermore, only when + * all tasks in the queue have been processed will all threads in the + * Scheduler's thread pool exit. + * @return 0 for success, -1 for failure */ virtual int Run(); /** - * Stop Scheduler,一旦调用了Fini,那么 - * 此Scheduler不再接收新的request - * @return 0成功,-1失败 + * Stop Scheduler, once Fini is called, then + * This scheduler no longer receives new requests + * @return 0 succeeded, -1 failed */ virtual int Fini(); /** - * 将request push到Scheduler处理 - * @param requests:请求列表 - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param requests: Request List + * @return 0 succeeded, -1 failed */ virtual int ScheduleRequest(const std::vector& requests); /** - * 将request push到Scheduler处理 - * @param request:一个request - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param request: A request + * @return 0 succeeded, -1 failed */ - virtual int ScheduleRequest(RequestContext *request); + virtual int ScheduleRequest(RequestContext* request); /** - * 对于需要重新入队的RPC将其放在头部 + * For RPC that need to be re queued, place them at the top */ - virtual int ReSchedule(RequestContext *request); + virtual int ReSchedule(RequestContext* request); /** - * 关闭scheduler之前如果队列在sessionnotvalid睡眠就将其唤醒 + * Before closing the scheduler, if the queue is in sessionnotvalid, wake it + * up */ virtual void WakeupBlockQueueAtExit(); /** - * 当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 后续的IO调度会被阻塞 + * When LeaseExecutor renewal fails, call LeaseTimeoutDisableIO + * Subsequent IO scheduling will be blocked */ void LeaseTimeoutBlockIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -115,8 +113,8 @@ class RequestScheduler : public Uncopyable { } /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO, - * IO调度被恢复 + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO, IO scheduling restored */ void ResumeIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -126,7 +124,7 @@ class RequestScheduler : public Uncopyable { } /** - * 测试使用,获取队列 + * For testing purposes, get the queue. */ BoundedBlockingDeque>* GetQueue() { return &queue_; @@ -134,14 +132,16 @@ class RequestScheduler : public Uncopyable { private: /** - * Thread pool的运行函数,会从queue中取request进行处理 + * The run function of the Thread pool will retrieve the request from the + * queue for processing */ void Process(); void ProcessOne(RequestContext* ctx); void WaitValidSession() { - // lease续约失败的时候需要阻塞IO直到续约成功 + // When the lease renewal fails, IO needs to be blocked until the + // renewal is successful if (blockIO_.load(std::memory_order_acquire) && blockingQueue_) { std::unique_lock lk(leaseRefreshmtx_); leaseRefreshcv_.wait(lk, [&]() -> bool { @@ -151,32 +151,34 @@ class RequestScheduler : public Uncopyable { } private: - // 线程池和queue容量的配置参数 + // Configuration parameters for thread pool and queue capacity RequestScheduleOption reqschopt_; - // 存放 request 的队列 - BoundedBlockingDeque> queue_; - // 处理 request 的线程池 + // Queue for storing request + BoundedBlockingDeque> queue_; + // Thread pool for processing request ThreadPool threadPool_; - // Scheduler 运行标记,只有运行了,才接收 request + // The running flag of the Scheduler, it only accepts requests when it's + // running std::atomic running_; - // stop thread pool 标记,当调用 Scheduler Fini - // 之后且 queue 里面的 request 都处理完了,就可以 - // 让所有处理线程退出了 + // stop thread pool flag, when calling Scheduler Fini + // After processing all the requests in the queue, you can proceed + // Let all processing threads exit std::atomic stop_; - // 访问复制组Chunk的客户端 + // Client accessing replication group Chunk CopysetClient client_; - // 续约失败,卡住IO + // Renewal failed, IO stuck std::atomic blockIO_; - // 此锁与LeaseRefreshcv_条件变量配合使用 - // 在leasee续约失败的时候,所有新下发的IO被阻塞直到续约成功 - std::mutex leaseRefreshmtx_; - // 条件变量,用于唤醒和hang IO + // This lock is associated with LeaseRefreshcv_ Using Conditional Variables + // Together When lease renewal fails, all newly issued IO is blocked until + // the renewal is successful + std::mutex leaseRefreshmtx_; + // Conditional variables for wake-up and hang IO std::condition_variable leaseRefreshcv_; - // 阻塞队列 + // Blocking queue bool blockingQueue_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SCHEDULER_H_ diff --git a/src/client/request_sender.h b/src/client/request_sender.h index f288160267..99bc94b2e3 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -29,23 +29,22 @@ #include -#include "src/client/client_config.h" -#include "src/client/client_common.h" -#include "src/client/chunk_closure.h" #include "include/curve_compiler_specific.h" +#include "src/client/chunk_closure.h" +#include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/request_context.h" namespace curve { namespace client { /** - * 一个RequestSender负责管理一个ChunkServer的所有 - * connection,目前一个ChunkServer仅有一个connection + * A RequestSender is responsible for managing all aspects of a ChunkServer + * Connection, currently there is only one connection for a ChunkServer */ class RequestSender { public: - RequestSender(ChunkServerID chunkServerId, - butil::EndPoint serverEndPoint) + RequestSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint) : chunkServerId_(chunkServerId), serverEndPoint_(serverEndPoint), channel_() {} @@ -54,125 +53,111 @@ class RequestSender { int Init(const IOSenderOption& ioSenderOpt); /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); + int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + ClientClosure* done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param data 要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); + * Write Chunk + * @param IDInfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param data The data to be written + * @param offset: write offset + * @param length: The length written + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf& data, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + ClientClosure* done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + * Reading Chunk snapshot files + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - ClientClosure *done); + int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, ClientClosure* done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:chunk需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param IDInfo is the ID information related to chunk + * @param correctedSn: Chunk The version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - ClientClosure *done); + uint64_t correctedSn, + ClientClosure* done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param retriedTimes:已经重试了几次 + * Obtain information about chunk files + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param retriedTimes: Number of retries */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - ClientClosure *done); + int GetChunkInfo(const ChunkIDInfo& idinfo, ClientClosure* done); /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - ClientClosure *done, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:offset 偏移 - * @param:len 长度 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - ClientClosure* done, uint64_t offset, uint64_t len); + * @brief lazy Create clone chunk + * @detail + * - The format definition of a location is A@B The form of. + * - If the source data is on s3, the location format is uri@s3 Uri is the + * address of the actual chunk object; + * - If the source data is on curves, the location format + * is/filename/chunkindex@cs + * + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: location, URL of the data source + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: chunkSize Chunk size + * @param retriedTimes: Number of retries + * + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo& idinfo, ClientClosure* done, + const std::string& location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize); + + /** + * @brief Actual recovery chunk data + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: offset: offset + * @param: len: length + * @param retriedTimes: Number of retries + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& idinfo, ClientClosure* done, + uint64_t offset, uint64_t len); /** - * 重置和Chunk Server的链接 - * @param chunkServerId:Chunk Server唯一标识 - * @param serverEndPoint:Chunk Server - * @return 0成功,-1失败 + * Reset Link to Chunk Server + * @param chunkServerId: Chunk Server unique identifier + * @param serverEndPoint: Chunk Server + * @return 0 succeeded, -1 failed */ int ResetSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint); - bool IsSocketHealth() { - return channel_.CheckHealth() == 0; - } + bool IsSocketHealth() { return channel_.CheckHealth() == 0; } private: void UpdateRpcRPS(ClientClosure* done, OpType type) const; @@ -181,16 +166,17 @@ class RequestSender { google::protobuf::Message* rpcResponse) const; private: - // Rpc stub配置 + // Rpc stub configuration IOSenderOption iosenderopt_; - // ChunkServer 的唯一标识 id + // The unique identification ID of ChunkServer ChunkServerID chunkServerId_; - // ChunkServer 的地址 + // Address of ChunkServer butil::EndPoint serverEndPoint_; - brpc::Channel channel_; /* TODO(wudemiao): 后期会维护多个 channel */ + brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be + maintained in the later stage */ }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SENDER_H_ diff --git a/src/client/request_sender_manager.cpp b/src/client/request_sender_manager.cpp index a5c77a793f..a5f7264e4b 100644 --- a/src/client/request_sender_manager.cpp +++ b/src/client/request_sender_manager.cpp @@ -30,8 +30,7 @@ namespace curve { namespace client { RequestSenderManager::SenderPtr RequestSenderManager::GetOrCreateSender( - const ChunkServerID& leaderId, - const butil::EndPoint& leaderAddr, + const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt) { { curve::common::ReadLockGuard guard(rwlock_); @@ -66,7 +65,7 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { return; } - // 检查是否健康 + // Check for health if (iter->second->IsSocketHealth()) { return; } @@ -74,5 +73,5 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { senderPool_.erase(iter); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_sender_manager.h b/src/client/request_sender_manager.h index 530d8c1c82..189fab3cc8 100644 --- a/src/client/request_sender_manager.h +++ b/src/client/request_sender_manager.h @@ -38,8 +38,8 @@ using curve::common::Uncopyable; class RequestSender; /** - * 所有Chunk Server的request sender管理者, - * 可以理解为Chunk Server的链接管理者 + * Request sender managers for all Chunk Servers, + * It can be understood as the link manager of Chunk Server */ class RequestSenderManager : public Uncopyable { public: @@ -47,30 +47,31 @@ class RequestSenderManager : public Uncopyable { RequestSenderManager() : rwlock_(), senderPool_() {} /** - * 获取指定leader id的sender,如果没有则根据leader - * 地址,创建新的 sender并返回 - * @param leaderId:leader的id - * @param leaderAddr:leader的地址 - * @return nullptr:get或者create失败,否则成功 + * Obtain the sender with the specified leader ID, if not, based on the + * leader Address, create a new sender and return + * @param leaderId: The ID of the leader + * @param leaderAddr: The address of the leader + * @return nullptr: Get or create failed, otherwise successful */ SenderPtr GetOrCreateSender(const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId); private: - // 读写锁,保护senderPool_ + // Read write lock to protect senderPool_ curve::common::BthreadRWLock rwlock_; - // 请求发送链接的map,以ChunkServer ID为key + // Request to send a map for the link, with ChunkServer ID as the key std::unordered_map senderPool_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SENDER_MANAGER_H_ diff --git a/src/client/service_helper.cpp b/src/client/service_helper.cpp index 70a7be6e34..3c8fbee5da 100644 --- a/src/client/service_helper.cpp +++ b/src/client/service_helper.cpp @@ -28,6 +28,7 @@ #include #include #include + #include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/common/curve_define.h" @@ -164,6 +165,7 @@ void ServiceHelper::ProtoCloneSourceInfo2Local( class GetLeaderProxy : public std::enable_shared_from_this { friend struct GetLeaderClosure; + public: GetLeaderProxy() : proxyId_(getLeaderProxyId.fetch_add(1, std::memory_order_relaxed)), @@ -171,10 +173,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_(false) {} /** - * @brief 等待GetLeader返回结果 - * @param[out] leaderId leader的id - * @param[out] leaderAddr leader的ip地址 - * @return 0 成功 / -1 失败 + * @brief waiting for GetLeader to return results + * @param[out] leaderId The ID of the leader + * @param[out] leaderAddr The IP address of the leader + * @return 0 successful/-1 failed */ int Wait(ChunkServerID* leaderId, PeerAddr* leaderAddr) { { @@ -212,11 +214,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 发起GetLeader请求 - * @param peerAddresses 除当前leader以外的peer地址 - * @param logicPoolId getleader请求的logicpool id - * @param copysetId getleader请求的copyset id - * @param fileMetric metric统计 + * @brief initiates GetLeader request + * @param peerAddresses Peer addresses other than the current leader + * @param logicPoolId getleader requested logicpool ID + * @param copysetId getleader requested copyset id + * @param fileMetric metric statistics */ void StartGetLeader(const std::unordered_set& peerAddresses, const GetLeaderRpcOption& rpcOption, @@ -270,10 +272,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 处理异步请求结果 - * @param callId rpc请求id - * @param success rpc请求是否成功 - * @param peer rpc请求返回的leader信息 + * @brief processing asynchronous request results + * @param callId rpc request id + * @param success rpc request successful + * @param peer The leader information returned by the rpc request */ void HandleResponse(brpc::CallId callId, bool success, const curve::common::Peer& peer) { @@ -289,7 +291,8 @@ class GetLeaderProxy : public std::enable_shared_from_this { continue; } - // cancel以后,后续的rpc请求回调仍然会执行,但是会标记为失败 + // After canceling, subsequent rpc request callbacks will still + // be executed, but will be marked as failed brpc::StartCancel(id); } @@ -301,10 +304,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_ = true; finishCv_.notify_one(); } else { - // 删除当前call id + // Delete the current call id callIds_.erase(callId); - // 如果为空,说明是最后一个rpc返回,需要标记请求失败,并向上返回 + // If it is empty, it indicates that it is the last rpc returned, + // and the request needs to be marked as failed and returned upwards if (callIds_.empty()) { std::lock_guard ulk(finishMtx_); finish_ = true; @@ -317,24 +321,25 @@ class GetLeaderProxy : public std::enable_shared_from_this { private: uint64_t proxyId_; - // 是否完成请求 - // 1. 其中一个请求成功 - // 2. 最后一个请求返回 - // 都会标记为true + // Whether to complete the request + // 1. One of the requests was successful + // 2. Last request returned + // Will be marked as true bool finish_; bthread::ConditionVariable finishCv_; bthread::Mutex finishMtx_; - // 记录cntl id + // Record cntl id std::set callIds_; - // 请求是否成功 + // Is the request successful bool success_; - // leader信息 + // leader Information curve::common::Peer leader_; - // 保护callIds_/success_,避免异步rpc回调同时操作 + // Protect callIds_/success_, Avoiding asynchronous rpc callbacks from + // operating simultaneously bthread::Mutex mtx_; LogicPoolID logicPooldId_; @@ -367,17 +372,16 @@ void GetLeaderClosure::Run() { } else { success = true; LOG(INFO) << "GetLeader returned from " << cntl.remote_side() - << ", logicpool id = " << logicPoolId - << ", copyset id = " << copysetId - << ", proxy id = " << proxy->proxyId_ - << ", leader = " << response.DebugString(); + << ", logicpool id = " << logicPoolId + << ", copyset id = " << copysetId + << ", proxy id = " << proxy->proxyId_ + << ", leader = " << response.DebugString(); } proxy->HandleResponse(cntl.call_id(), success, response.leader()); } int ServiceHelper::GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr* leaderAddr, - ChunkServerID* leaderId, + PeerAddr* leaderAddr, ChunkServerID* leaderId, FileMetric* fileMetric) { const auto& peerInfo = getLeaderInfo.copysetPeerInfo; @@ -423,8 +427,8 @@ bool ServiceHelper::GetUserInfoFromFilename(const std::string& filename, return true; } -int ServiceHelper::CheckChunkServerHealth( - const butil::EndPoint& endPoint, int32_t requestTimeoutMs) { +int ServiceHelper::CheckChunkServerHealth(const butil::EndPoint& endPoint, + int32_t requestTimeoutMs) { brpc::Controller cntl; brpc::Channel httpChannel; brpc::ChannelOptions options; @@ -437,22 +441,22 @@ int ServiceHelper::CheckChunkServerHealth( return -1; } - // 访问 ip:port/health + // Accessing ip:port/health cntl.http_request().uri() = ipPort + "/health"; cntl.set_timeout_ms(requestTimeoutMs); httpChannel.CallMethod(nullptr, &cntl, nullptr, nullptr, nullptr); if (cntl.Failed()) { LOG(WARNING) << "CheckChunkServerHealth failed, " << cntl.ErrorText() - << ", url = " << cntl.http_request().uri(); + << ", url = " << cntl.http_request().uri(); return -1; } else { LOG(INFO) << "CheckChunkServerHealth success, " - << cntl.response_attachment() - << ", url = " << cntl.http_request().uri(); + << cntl.response_attachment() + << ", url = " << cntl.http_request().uri(); return 0; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/service_helper.h b/src/client/service_helper.h index 279c6a17f5..4de48afbf3 100644 --- a/src/client/service_helper.h +++ b/src/client/service_helper.h @@ -25,12 +25,13 @@ #include #include - #include -#include + +#include #include #include -#include +#include + #include "proto/cli2.pb.h" #include "proto/nameserver2.pb.h" #include "src/client/client_common.h" @@ -40,7 +41,7 @@ namespace curve { namespace client { -// GetLeader请求rpc参数信息 +// GetLeader request rpc parameter information struct GetLeaderRpcOption { uint32_t rpcTimeoutMs; @@ -48,29 +49,30 @@ struct GetLeaderRpcOption { : rpcTimeoutMs(rpcTimeoutMs) {} }; -// GetLeader请求对应的copyset信息及rpc相关参数信息 +// The copyset information and rpc related parameter information corresponding +// to the GetLeader request struct GetLeaderInfo { LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; std::vector> copysetPeerInfo; - int16_t currentLeaderIndex; + int16_t currentLeaderIndex; GetLeaderRpcOption rpcOption; - GetLeaderInfo(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - const std::vector>& copysetPeerInfo, //NOLINT + GetLeaderInfo(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const std::vector>& + copysetPeerInfo, // NOLINT int16_t currentLeaderIndex, const GetLeaderRpcOption& rpcOption = GetLeaderRpcOption()) - : logicPoolId(logicPoolId), - copysetId(copysetId), - copysetPeerInfo(copysetPeerInfo), - currentLeaderIndex(currentLeaderIndex), - rpcOption(rpcOption) {} + : logicPoolId(logicPoolId), + copysetId(copysetId), + copysetPeerInfo(copysetPeerInfo), + currentLeaderIndex(currentLeaderIndex), + rpcOption(rpcOption) {} }; class GetLeaderProxy; -// GetLeader异步请求回调 +// GetLeader asynchronous request callback struct GetLeaderClosure : public google::protobuf::Closure { GetLeaderClosure(LogicPoolID logicPoolId, CopysetID copysetId, std::shared_ptr proxy) @@ -86,7 +88,7 @@ struct GetLeaderClosure : public google::protobuf::Closure { curve::chunkserver::GetLeaderResponse2 response; }; -// ServiceHelper是client端RPC服务的一些工具 +// ServiceHelper is a tool for client-side RPC services class ServiceHelper { public: /** @@ -103,38 +105,41 @@ class ServiceHelper { CloneSourceInfo* info); /** - * 从chunkserver端获取最新的leader信息 - * @param[in]: getLeaderInfo为对应copyset的信息 - * @param[out]: leaderAddr是出参,返回当前copyset的leader信息 - * @param[out]: leaderId是出参,返回当前leader的id信息 - * @param[in]: fileMetric是用于metric的记录 - * @return: 成功返回0,否则返回-1 + * Obtain the latest leader information from the chunkserver side + * @param[in]: getLeaderInfo is the information of the corresponding copyset + * @param[out]: leaderAddr is the output parameter that returns the leader + * information of the current copyset + * @param[out]: leaderId is the output parameter, returning the ID + * information of the current leader + * @param[in]: fileMetric is a record used for metric + * @return: Successfully returns 0, otherwise returns -1 */ static int GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr *leaderAddr, + PeerAddr* leaderAddr, ChunkServerID* leaderId = nullptr, FileMetric* fileMetric = nullptr); /** - * 从文件名中获取user信息. - * 用户的user信息需要夹在文件名中,比如文件名为temp,用户名为user, - * 那么其完整的文件信息是:temp_user_。 - * 如果文件名为: /temp_temp_,那么完整文件名为/temp_temp__user_。 - * @param[in]: filename为用户传下来的文件名 - * @param[out]:realfilename是真正文件名 - * @param[out]: user信息,出参 - * @return: 获取到user信息为true,否则false + * Obtain user information from the file name + * The user information needs to be included in the file name, such as the + * file name being temp and the username being user, So the complete file + * information is: temp_user_. If the file name is: /temp_temp_, So the + * complete file name is /temp_temp__user_. + * @param[in]: filename is the file name passed down by the user + * @param[out]: realfilename is the true file name + * @param[out]: user information, output parameters + * @return: Obtained user information as true, otherwise false */ static bool GetUserInfoFromFilename(const std::string& fname, std::string* realfilename, std::string* user); /** - * @brief: 发送http请求,判断chunkserver是否健康 + * @brief: Send an HTTP request to determine if the chunkserver is healthy * - * @param: endPoint chunkserver的ip:port - * @param: http请求的超时时间 + * @param: endPoint chunkserver's ip:port + * @param: HTTP request timeout * - * @return: 0 表示健康,-1表示不健康 + * @return: 0 indicates health, -1 indicates unhealthy */ static int CheckChunkServerHealth(const butil::EndPoint& endPoint, int32_t requestTimeoutMs); @@ -147,6 +152,6 @@ class ServiceHelper { common::ReadWriteThrottleParams* localParams); }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SERVICE_HELPER_H_ diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..7e923cb1ea 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,53 +46,51 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: FileEpoch_t file epoch information */ - static int IO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch); + static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi, + const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: cid is the ID information of the current chunk + * @param: data is the data to be written + * @param: offset is the offset within the current chunk + * @param: length Data length + * @param: seq is the version number of the current chunk */ - static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - const ChunkIDInfo& cid, - butil::IOBuf* data, - off_t offset, - size_t length, - uint64_t seq); + static int SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, const ChunkIDInfo& cid, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief calculates the location information of the request + * @param ioTracker io Context Information + * @param metaCache file cache information + * @param chunkIdx Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,34 +103,33 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting + * operations + * @param: iotracker Big IO Context Information + * @param: mc is the cache information that needs to be used during IO + * splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: Data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: chunkidx is the index value of the current chunk in the vdisk */ - static bool AssignInternal(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - uint64_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch, - ChunkIndex chunkidx); - - static bool GetOrAllocateSegment(bool allocateIfNotExist, - uint64_t offset, - MDSClient* mdsClient, - MetaCache* metaCache, + static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, + uint64_t length, MDSClient* mdsclient, + const FInfo_t* fi, const FileEpoch_t* fEpoch, + ChunkIndex chunkidx); + + static bool GetOrAllocateSegment(bool allocateIfNotExist, uint64_t offset, + MDSClient* mdsClient, MetaCache* metaCache, const FInfo* fileInfo, - const FileEpoch_t *fEpoch, + const FileEpoch_t* fEpoch, ChunkIndex chunkidx); static int SplitForNormal(IOTracker* iotracker, MetaCache* metaCache, @@ -149,14 +146,13 @@ class Splitor { static bool MarkDiscardBitmap(IOTracker* iotracker, FileSegment* fileSegment, - SegmentIndex segmentIndex, - uint64_t offset, + SegmentIndex segmentIndex, uint64_t offset, uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SPLITOR_H_ diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..ae330b1294 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,13 +24,13 @@ namespace curve { namespace client { -UnstableState -UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, - const butil::EndPoint &csEndPoint) { +UnstableState UnstableHelper::GetCurrentUnstableState( + ChunkServerID csId, const butil::EndPoint& csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return + // chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..47c9be6a25 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -35,20 +35,17 @@ namespace curve { namespace client { -enum class UnstableState { - NoUnstable, - ChunkServerUnstable, - ServerUnstable -}; - -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +enum class UnstableState { NoUnstable, ChunkServerUnstable, ServerUnstable }; + +// If the chunkserver goes down or the network is unreachable, the rpc sent to +// the corresponding chunkserver will time out After returning, go back to the +// refresh leader and then send the request In this case, requests on different +// copysets will always first rpc timeout and then refresh the leader again To +// avoid a redundant rpc timeout Record the number of timeout requests sent to +// the same chunkserver If the threshold is exceeded, an HTTP request will be +// sent to check if the chunkserver is healthy If not healthy, notify all +// leaders of the copyset on this chunkserver Actively refresh the leader +// instead of directly sending rpc based on cached leader information class UnstableHelper { public: UnstableHelper() = default; @@ -56,9 +53,7 @@ class UnstableHelper { UnstableHelper(const UnstableHelper&) = delete; UnstableHelper& operator=(const UnstableHelper&) = delete; - void Init(const ChunkServerUnstableOption& opt) { - option_ = opt; - } + void Init(const ChunkServerUnstableOption& opt) { option_ = opt; } void IncreTimeout(ChunkServerID csId) { std::unique_lock guard(mtx_); @@ -78,10 +73,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint, ip:port address of endPoint chunkserver + * @return: true healthy/false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +87,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..f52560379a 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,31 +30,30 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, - const std::string& owner); + const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); private: - static int HMacSha256(const void* key, int key_size, - const void* data, int data_size, - void* digest); + static int HMacSha256(const void* key, int key_size, const void* data, + int data_size, void* digest); - static std::string Base64(const unsigned char *src, size_t sz); + static std::string Base64(const unsigned char* src, size_t sz); }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_AUTHENTICATOR_H_ diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..50d33181d9 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -20,20 +20,22 @@ * Author: yangyaokai */ +#include "src/common/bitmap.h" + #include #include -#include + #include -#include "src/common/bitmap.h" +#include namespace curve { namespace common { -std::string BitRangeVecToString(const std::vector &ranges) { +std::string BitRangeVecToString(const std::vector& ranges) { std::stringstream ss; for (uint32_t i = 0; i < ranges.size(); ++i) { if (i != 0) { - ss << ", "; + ss << ", "; } ss << "(" << ranges[i].beginIndex << "," << ranges[i].endIndex << ")"; } @@ -44,14 +46,14 @@ const uint32_t Bitmap::NO_POS = 0xFFFFFFFF; Bitmap::Bitmap(uint32_t bits) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memset(bitmap_, 0, count); } Bitmap::Bitmap(uint32_t bits, const char* bitmap) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -64,7 +66,7 @@ Bitmap::Bitmap(uint32_t bits, char* bitmap, bool transfer) : bits_(bits) { int count = unitCount(); if (!transfer) { - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -87,18 +89,17 @@ Bitmap::~Bitmap() { Bitmap::Bitmap(const Bitmap& bitmap) { bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); } -Bitmap& Bitmap::operator = (const Bitmap& bitmap) { - if (this == &bitmap) - return *this; +Bitmap& Bitmap::operator=(const Bitmap& bitmap) { + if (this == &bitmap) return *this; delete[] bitmap_; bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); return *this; @@ -118,23 +119,19 @@ Bitmap& Bitmap::operator=(Bitmap&& other) noexcept { return *this; } -bool Bitmap::operator == (const Bitmap& bitmap) const { - if (bits_ != bitmap.Size()) - return false; +bool Bitmap::operator==(const Bitmap& bitmap) const { + if (bits_ != bitmap.Size()) return false; return 0 == memcmp(bitmap_, bitmap.GetBitmap(), unitCount()); } -bool Bitmap::operator != (const Bitmap& bitmap) const { +bool Bitmap::operator!=(const Bitmap& bitmap) const { return !(*this == bitmap); } -void Bitmap::Set() { - memset(bitmap_, 0xff, unitCount()); -} +void Bitmap::Set() { memset(bitmap_, 0xff, unitCount()); } void Bitmap::Set(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] |= mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] |= mask(index); } void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { @@ -144,13 +141,10 @@ void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { } } -void Bitmap::Clear() { - memset(bitmap_, 0, unitCount()); -} +void Bitmap::Clear() { memset(bitmap_, 0, unitCount()); } void Bitmap::Clear(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] &= ~mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] &= ~mask(index); } void Bitmap::Clear(uint32_t startIndex, uint32_t endIndex) { @@ -169,106 +163,93 @@ bool Bitmap::Test(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t index) const { for (; index < bits_; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t index) const { for (; index < bits_; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } -void Bitmap::Divide(uint32_t startIndex, - uint32_t endIndex, +void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex - if (endIndex < startIndex) - return; + // The value of endIndex cannot be less than startIndex + if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; - if (endIndex > lastIndex) - endIndex = lastIndex; + if (endIndex > lastIndex) endIndex = lastIndex; BitRange clearRange; BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there + // is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range - setRange.endIndex = nextClearIndex == NO_POS - ? endIndex - : nextClearIndex - 1; + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range + setRange.endIndex = + nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; tmpSetRanges.push_back(setRange); } - if (nextClearIndex == NO_POS) - break; + if (nextClearIndex == NO_POS) break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear + // range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; - clearRange.endIndex = nextSetIndex == NO_POS - ? endIndex - : nextSetIndex - 1; + clearRange.endIndex = + nextSetIndex == NO_POS ? endIndex : nextSetIndex - 1; tmpClearRanges.push_back(clearRange); startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers + // in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } @@ -277,13 +258,9 @@ void Bitmap::Divide(uint32_t startIndex, } } -uint32_t Bitmap::Size() const { - return bits_; -} +uint32_t Bitmap::Size() const { return bits_; } -const char* Bitmap::GetBitmap() const { - return bitmap_; -} +const char* Bitmap::GetBitmap() const { return bitmap_; } } // namespace common } // namespace curve diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..f4b6f76ce7 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -24,8 +24,9 @@ #define SRC_COMMON_BITMAP_H_ #include -#include + #include +#include namespace curve { namespace common { @@ -36,30 +37,30 @@ const int BITMAP_UNIT_SIZE = 8; const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE /** - * 表示bitmap中的一段连续区域,为闭区间 + * Represents a continuous region in a bitmap, which is a closed interval */ struct BitRange { - // 连续区域起始位置在bitmap中的索引 + // Index of the starting position of a continuous region in Bitmap uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 + // Index of the end position of a continuous region in Bitmap uint32_t endIndex; }; - -std::string BitRangeVecToString(const std::vector &ranges); +std::string BitRangeVecToString(const std::vector& ranges); class Bitmap { public: /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap */ explicit Bitmap(uint32_t bits); /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap + * Constructor when initializing from an existing snapshot file + *The constructor will create a new bitmap internally, and then use the + *bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization */ explicit Bitmap(uint32_t bits, const char* bitmap); @@ -70,142 +71,158 @@ class Bitmap { ~Bitmap(); /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 + * Copy construction, using deep copy + * @param bitmap: Copy content from this object */ Bitmap(const Bitmap& bitmap); /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 + *Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference */ - Bitmap& operator = (const Bitmap& bitmap); + Bitmap& operator=(const Bitmap& bitmap); Bitmap(Bitmap&& other) noexcept; Bitmap& operator=(Bitmap&& other) noexcept; /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different */ - bool operator == (const Bitmap& bitmap) const; + bool operator==(const Bitmap& bitmap) const; /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same */ - bool operator != (const Bitmap& bitmap) const; + bool operator!=(const Bitmap& bitmap) const; /** - * 将所有位置1 + * Place all positions 1 */ void Set(); /** - * 将指定位置1 - * @param index: 指定位的位置 + * Specify position 1 + * @param index: Refers to the location of the positioning */ void Set(uint32_t index); /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Set(uint32_t startIndex, uint32_t endIndex); /** - * 将所有位置0 + * Move all positions to 0 */ void Clear(); /** - * 将指定位置0 - * @param index: 指定位的位置 + * Will specify position 0 + * @param index: Refers to the location of the positioning */ void Clear(uint32_t index); /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Clear(uint32_t startIndex, uint32_t endIndex); /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false + * indicates that it is 0 */ bool Test(uint32_t index) const; /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 1 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 1. If it does not exist, + * return NO_POS */ uint32_t NextSetBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 0 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 0. If it does not exist, + * return NO_POS */ uint32_t NextClearBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr - */ - void Divide(uint32_t startIndex, - uint32_t endIndex, + * Divide the designated area of the bitmap into several continuous areas + * based on bit states, with consistent bit states within the continuous + * areas For example, 00011100 will be divided into three regions: [0,2], + * [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit + * state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit + * state of 1, which can be specified as nullptr + */ + void Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const; /** - * bitmap的有效位数 - * @return: 返回位数 + * Bitmap's significant digits + * @return: Returns the number of digits */ uint32_t Size() const; /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap */ const char* GetBitmap() const; private: - // bitmap的字节数 + // Bytes of bitmap int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; } - // 指定位置的bit在其所在字节中的偏移 + // The offset of the bit at the specified position in its byte int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE + // Same as index / BITMAP_UNIT_SIZE return index >> ALIGN_FACTOR; } - // 逻辑计算掩码值 + // Logical calculation mask value char mask(uint32_t index) const { - int indexInUnit = index % BITMAP_UNIT_SIZE; + int indexInUnit = index % BITMAP_UNIT_SIZE; char mask = 0x01 << indexInUnit; return mask; } public: - // 表示不存在的位置,值为0xffffffff + // Represents a non-existent position, with a value of 0xffffffff static const uint32_t NO_POS; private: - uint32_t bits_; - char* bitmap_; + uint32_t bits_; + char* bitmap_; }; } // namespace common diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..fb549023e9 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -24,9 +24,10 @@ #define SRC_COMMON_CHANNEL_POOL_H_ #include -#include -#include + #include +#include +#include #include #include "src/common/concurrent/concurrent.h" @@ -39,18 +40,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the + * specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int GetOrInitChannel(const std::string& addr, - ChannelPtr* channelPtr); + int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); @@ -62,5 +63,4 @@ class ChannelPool { } // namespace common } // namespace curve -#endif // SRC_COMMON_CHANNEL_POOL_H_ - +#endif // SRC_COMMON_CHANNEL_POOL_H_ diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..7d8449d812 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -23,12 +23,12 @@ #ifndef SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ #define SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ +#include #include +#include //NOLINT #include -#include //NOLINT #include -#include //NOLINT -#include +#include //NOLINT #include #include "src/common/uncopyable.h" @@ -36,18 +36,17 @@ namespace curve { namespace common { -template +template class BBQItem { public: - explicit BBQItem(const T &t, bool stop = false) - : item_(t) { + explicit BBQItem(const T& t, bool stop = false) : item_(t) { stop_.store(stop, std::memory_order_release); } - BBQItem(const BBQItem &bbqItem) { + BBQItem(const BBQItem& bbqItem) { item_ = bbqItem.item_; stop_.store(bbqItem.stop_, std::memory_order_release); } - BBQItem &operator=(const BBQItem &bbqItem) { + BBQItem& operator=(const BBQItem& bbqItem) { if (&bbqItem == this) { return *this; } @@ -56,13 +55,9 @@ class BBQItem { return *this; } - bool IsStop() const { - return stop_.load(std::memory_order_acquire); - } + bool IsStop() const { return stop_.load(std::memory_order_acquire); } - T Item() { - return item_; - } + T Item() { return item_; } private: T item_; @@ -70,18 +65,13 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ -template +template class BoundedBlockingDeque : public Uncopyable { public: BoundedBlockingDeque() - : mutex_(), - notEmpty_(), - notFull_(), - deque_(), - capacity_(0) { - } + : mutex_(), notEmpty_(), notFull_(), deque_(), capacity_(0) {} int Init(const int capacity) { if (0 >= capacity) { @@ -91,7 +81,7 @@ class BoundedBlockingDeque : public Uncopyable { return 0; } - void PutBack(const T &x) { + void PutBack(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); @@ -100,7 +90,7 @@ class BoundedBlockingDeque : public Uncopyable { notEmpty_.notify_one(); } - void PutFront(const T &x) { + void PutFront(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); diff --git a/src/common/concurrent/concurrent.h b/src/common/concurrent/concurrent.h index df79ea8ec8..9d87996f2e 100644 --- a/src/common/concurrent/concurrent.h +++ b/src/common/concurrent/concurrent.h @@ -24,39 +24,38 @@ #define SRC_COMMON_CONCURRENT_CONCURRENT_H_ #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/rw_lock.h" - -#include "src/common/concurrent/thread_pool.h" +#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/task_queue.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/concurrent/thread_pool.h" namespace curve { namespace common { -// curve公共组件命名空间替换 -template -using Atomic = std::atomic; -using Mutex = std::mutex; -using Thread = std::thread; -using LockGuard = std::lock_guard; -using UniqueLock = std::unique_lock; -using ConditionVariable = std::condition_variable; - -// curve内部定义的锁组件 -using RWLock = RWLock; -using SpinLock = SpinLock; -using ReadLockGuard = ReadLockGuard; -using WriteLockGuard = WriteLockGuard; - -// curve内部定义的线程组件 -using TaskQueue = TaskQueue; -using ThreadPool = ThreadPool; - -} // namespace common -} // namespace curve +// curve public component namespace replacement +template +using Atomic = std::atomic; +using Mutex = std::mutex; +using Thread = std::thread; +using LockGuard = std::lock_guard; +using UniqueLock = std::unique_lock; +using ConditionVariable = std::condition_variable; + +// Lock components defined internally in curve +using RWLock = RWLock; +using SpinLock = SpinLock; +using ReadLockGuard = ReadLockGuard; +using WriteLockGuard = WriteLockGuard; + +// Thread components defined internally in curve +using TaskQueue = TaskQueue; +using ThreadPool = ThreadPool; + +} // namespace common +} // namespace curve #endif // SRC_COMMON_CONCURRENT_CONCURRENT_H_ diff --git a/src/common/concurrent/count_down_event.h b/src/common/concurrent/count_down_event.h index bfce259351..404fc32681 100644 --- a/src/common/concurrent/count_down_event.h +++ b/src/common/concurrent/count_down_event.h @@ -23,36 +23,30 @@ #ifndef SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ #define SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ -#include //NOLINT -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace common { /** - * 用于线程间同步,CountDownEvent是通过一个计数器来实现的,计数器的 - * 初始值initCnt为需要等待event的总数,通过接口Wait等待。每当一个 - * event发生,就会调用Signal接口,让计数器的值就会减 1。当计数器值到 - * 达0时,则Wait等待就会结束。一般用于等待一些事件发生 + * Used for inter-thread synchronization, CountDownEvent is implemented using a + * counter with an initial value (initCnt) representing the total number of + * events to wait for. Threads can wait for events using the Wait interface. + * Each time an event occurs, the Signal interface is called, decrementing the + * counter by 1. When the counter reaches 0, the waiting in Wait will conclude. + * It is typically used to wait for certain events to occur. */ class CountDownEvent { public: - CountDownEvent() : - mutex_(), - cond_(), - count_() { - } + CountDownEvent() : mutex_(), cond_(), count_() {} - explicit CountDownEvent(int initCnt) : - mutex_(), - cond_(), - count_(initCnt) { - } + explicit CountDownEvent(int initCnt) : mutex_(), cond_(), count_(initCnt) {} /** - * 重新设置event计数 - * @param eventCount:事件计数 + * Reset event count + * @param eventCount: Event Count */ void Reset(int eventCount) { std::unique_lock guard(mutex_); @@ -60,7 +54,7 @@ class CountDownEvent { } /** - * 通知wait event发生了一次,计数减1 + * Notify that a wait event has occurred once, count minus 1 */ void Signal() { std::unique_lock guard(mutex_); @@ -71,7 +65,7 @@ class CountDownEvent { } /** - * 等待initCnt的event发生之后,再唤醒 + * Wait for the event of initCnt to occur before waking up */ void Wait() { std::unique_lock guard(mutex_); @@ -81,9 +75,9 @@ class CountDownEvent { } /** - * 等待initCnt的event发生,或者指定时长 - * @param waitMs: 等待的ms数 - * @return:如果所有等待的event都发生,那么就返回true,否则false + * Wait for the event of initCnt to occur, or specify a duration + * @param waitMs: Number of ms waiting + * @return: If all waiting events occur, then return true; otherwise, false */ bool WaitFor(int waitMs) { std::unique_lock guard(mutex_); @@ -92,11 +86,11 @@ class CountDownEvent { while (count_ > 0) { auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration elapsed = now - start; - // 计算还剩余多少时间 + // Calculate how much time is left int leftMs = waitMs - static_cast(elapsed.count()); if (leftMs > 0) { - auto ret = cond_.wait_for(guard, - std::chrono::milliseconds(leftMs)); + auto ret = + cond_.wait_for(guard, std::chrono::milliseconds(leftMs)); (void)ret; } else { break; @@ -113,7 +107,7 @@ class CountDownEvent { private: mutable std::mutex mutex_; std::condition_variable cond_; - // 需要等待的事件计数 + // Count of events to wait for int count_; }; diff --git a/src/common/concurrent/task_thread_pool.h b/src/common/concurrent/task_thread_pool.h index b9b23eebe3..cfd9524024 100644 --- a/src/common/concurrent/task_thread_pool.h +++ b/src/common/concurrent/task_thread_pool.h @@ -23,27 +23,26 @@ #ifndef SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ #define SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ -#include -#include //NOLINT -#include -#include -#include //NOLINT #include -#include //NOLINT #include +#include //NOLINT +#include +#include #include #include +#include //NOLINT +#include //NOLINT #include +#include #include "src/common/uncopyable.h" namespace curve { namespace common { - using Task = std::function; -// 异步运行回调的线程池 +// Thread pool for asynchronously running callbacks template class TaskThreadPool : public Uncopyable { @@ -58,9 +57,10 @@ class TaskThreadPool : public Uncopyable { } /** - * 启动一个线程池 - * @param numThreads 线程池的线程数量,必须大于 0,不设置就是 INT_MAX (不推荐) - * @param queueCapacity queue 的容量,必须大于 0 + * Start a thread pool + * @param numThreads The number of threads in the thread pool must be + * greater than 0, otherwise it is INT_ MAX (not recommended) + * @param queueCapacity The capacity of queue must be greater than 0 * @return */ int Start(int numThreads, int queueCapacity = INT_MAX) { @@ -86,7 +86,7 @@ class TaskThreadPool : public Uncopyable { } /** - * 关闭线程池 + * Close Thread Pool */ void Stop() { if (running_.exchange(false, std::memory_order_acq_rel)) { @@ -101,10 +101,12 @@ class TaskThreadPool : public Uncopyable { } /** - * push 一个 task 给线程池处理,如果队列满,线程阻塞,直到 task push 进去 - * 需要注意的是用户自己需要保证 task 的有效的。除此之外,此 TaskThreadPool - * 并没有提供获取 f 的返回值,所以如果需要获取运行 f 的一些额外信息,需要用户 - * 自己在 f 内部逻辑添加 + * Push a task to the thread pool for processing. If the queue is full, the + * thread will block until the task is pushed in It should be noted that + * users themselves need to ensure the effectiveness of the task. In + * addition, this TaskThreadPool There is no provision for obtaining the + * return value of f, so if you need to obtain some additional information + * about running f, you need the user to Add your own internal logic to f * @tparam F * @tparam Args * @param f @@ -121,40 +123,39 @@ class TaskThreadPool : public Uncopyable { notEmpty_.notify_one(); } - /* 返回线程池 queue 的容量 */ - int QueueCapacity() const { - return capacity_; - } + /*Returns the capacity of the thread pool queue*/ + int QueueCapacity() const { return capacity_; } - /* 返回线程池当前 queue 中的 task 数量,线程安全 */ + /*Returns the number of tasks in the current queue of the thread pool, + * thread safe*/ int QueueSize() const { std::lock_guard guard(mutex_); return queue_.size(); } - /* 返回线程池的线程数 */ - int ThreadOfNums() const { - return threads_.size(); - } + /*Returns the number of threads in the thread pool*/ + int ThreadOfNums() const { return threads_.size(); } protected: - /*线程工作时执行的函数*/ + /*Functions executed during thread work*/ virtual void ThreadFunc() { while (running_.load(std::memory_order_acquire)) { Task task(Take()); - /* ThreadPool 退出的时候,queue 为空,那么会返回无效的 task */ + /*When ThreadPool exits, if the queue is empty, an invalid task will + * be returned*/ if (task) { task(); } } } - /* 判断线程池 queue 是否已经满了, 非线程安全,私有内部使用 */ + /*Determine if the thread pool queue is full, non thread safe, private + * internal use*/ bool IsFullUnlock() const { return queue_.size() >= static_cast(capacity_); } - /* 从线程池的 queue 中取一个 task 线程安全 */ + /*Taking a task from the queue in the thread pool is thread safe*/ Task Take() { std::unique_lock guard(mutex_); while (queue_.empty() && running_.load(std::memory_order_acquire)) { @@ -170,13 +171,13 @@ class TaskThreadPool : public Uncopyable { } protected: - mutable MutexT mutex_; + mutable MutexT mutex_; CondVarT notEmpty_; CondVarT notFull_; std::vector> threads_; - std::deque queue_; - int capacity_; - std::atomic running_; + std::deque queue_; + int capacity_; + std::atomic running_; }; } // namespace common diff --git a/src/common/configuration.cpp b/src/common/configuration.cpp index 0956423a3c..b4b5eda6e2 100644 --- a/src/common/configuration.cpp +++ b/src/common/configuration.cpp @@ -24,10 +24,11 @@ #include "src/common/configuration.h" #include -#include + +#include #include +#include #include -#include namespace curve { namespace common { @@ -46,13 +47,13 @@ bool Configuration::LoadConfig() { int delimiterPos = line.find("="); std::string key = line.substr(0, delimiterPos); int commentPos = line.find("#"); - std::string value = line.substr(delimiterPos + 1, - commentPos - delimiterPos - 1); + std::string value = + line.substr(delimiterPos + 1, commentPos - delimiterPos - 1); SetValue(key, value); } } else { - LOG(ERROR) << "Open config file '" << confFile_ << "' failed: " - << strerror(errno); + LOG(ERROR) << "Open config file '" << confFile_ + << "' failed: " << strerror(errno); return false; } @@ -60,8 +61,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -76,14 +79,13 @@ bool Configuration::SaveConfig() { void Configuration::PrintConfig() { LOG(INFO) << std::string(30, '=') << "BEGIN" << std::string(30, '='); - for (auto &item : config_) { + for (auto& item : config_) { LOG(INFO) << item.first << std::string(60 - item.first.size(), ' ') << ": " << item.second; } LOG(INFO) << std::string(31, '=') << "END" << std::string(31, '='); } - void Configuration::ExposeMetric(const std::string& exposeName) { if (!exposeName_.empty()) { LOG(WARNING) << "Config metric has been exposed."; @@ -96,20 +98,20 @@ void Configuration::ExposeMetric(const std::string& exposeName) { } } -void Configuration::UpdateMetricIfExposed(const std::string &key, - const std::string &value) { +void Configuration::UpdateMetricIfExposed(const std::string& key, + const std::string& value) { if (exposeName_.empty()) { return; } auto it = configMetric_.find(key); - // 如果配置项不存在,则新建配置项 + // If the configuration item does not exist, create a new configuration item if (it == configMetric_.end()) { ConfigItemPtr configItem = std::make_shared(); configItem->ExposeAs(exposeName_, key); configMetric_[key] = configItem; } - // 更新配置项 + // Update Configuration Items configMetric_[key]->Set("conf_name", key); configMetric_[key]->Set("conf_value", value); configMetric_[key]->Update(); @@ -119,33 +121,29 @@ std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -154,7 +152,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -163,7 +161,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -172,17 +170,16 @@ bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { return false; } - -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt64Value( - const std::string &key, const uint64_t value) { +void Configuration::SetUInt64Value(const std::string& key, + const uint64_t value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt32Value(const std::string &key, +void Configuration::SetUInt32Value(const std::string& key, const uint32_t value) { SetValue(key, std::to_string(value)); } @@ -201,14 +198,13 @@ void Configuration::SetInt64Value(const std::string& key, const int64_t value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -217,18 +213,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -237,11 +232,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -251,7 +246,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -271,16 +266,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -289,51 +283,47 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; UpdateMetricIfExposed(key, value); } -void Configuration::GetValueFatalIfFail(const std::string& key, - int* value) { - LOG_IF(FATAL, !GetIntValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, int* value) { + LOG_IF(FATAL, !GetIntValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, std::string* value) { - LOG_IF(FATAL, !GetStringValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetStringValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - bool* value) { - LOG_IF(FATAL, !GetBoolValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, bool* value) { + LOG_IF(FATAL, !GetBoolValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint32_t* value) { - LOG_IF(FATAL, !GetUInt32Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt32Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint64_t* value) { - LOG_IF(FATAL, !GetUInt64Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt64Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - float* value) { - LOG_IF(FATAL, !GetFloatValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, float* value) { + LOG_IF(FATAL, !GetFloatValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - double* value) { - LOG_IF(FATAL, !GetDoubleValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, double* value) { + LOG_IF(FATAL, !GetDoubleValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } } // namespace common diff --git a/src/common/configuration.h b/src/common/configuration.h index d546995ade..e3a5144e61 100644 --- a/src/common/configuration.h +++ b/src/common/configuration.h @@ -22,9 +22,10 @@ */ #include -#include + #include #include +#include #include #include "src/common/stringstatus.h" @@ -36,7 +37,7 @@ namespace curve { namespace common { using ConfigItemPtr = std::shared_ptr; -using ConfigMetricMap = std::unordered_map; +using ConfigMetricMap = std::unordered_map; class Configuration { public: @@ -45,94 +46,96 @@ class Configuration { void PrintConfig(); std::map ListConfig() const; /** - * 暴露config的metric供采集 - * 如果metric已经暴露,则直接返回 - * @param exposeName: 对外暴露的metric的名字 + * Expose the metric of config for collection + * If the metric has already been exposed, return it directly + * @param exposeName: The name of the exposed metric */ void ExposeMetric(const std::string& exposeName); - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); - void SetIntValue(const std::string &key, const int value); - void SetUInt32Value(const std::string &key, const uint32_t value); - void SetUInt64Value(const std::string &key, const uint64_t value); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); + void SetIntValue(const std::string& key, const int value); + void SetUInt32Value(const std::string& key, const uint32_t value); + void SetUInt64Value(const std::string& key, const uint64_t value); bool GetInt64Value(const std::string& key, int64_t* out); void SetInt64Value(const std::string& key, const int64_t value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); /* - * @brief GetValueFatalIfFail 获取指定配置项的值,失败打FATAL日志 - * - * @param[in] key 配置项名称 - * @param[out] value 获取的值 - * - * @return 无 - */ + * @brief GetValueFatalIfFail to obtain the value of the specified + * configuration item, failed to log FATAL + * + * @param[in] key configuration item name + * @param[out] value The value obtained + * + * @return None + */ void GetValueFatalIfFail(const std::string& key, int* value); void GetValueFatalIfFail(const std::string& key, std::string* value); void GetValueFatalIfFail(const std::string& key, bool* value); @@ -141,11 +144,11 @@ class Configuration { void GetValueFatalIfFail(const std::string& key, float* value); void GetValueFatalIfFail(const std::string& key, double* value); - bool GetValue(const std::string &key, int *value) { + bool GetValue(const std::string& key, int* value) { return GetIntValue(key, value); } - bool GetValue(const std::string &key, uint32_t *value) { + bool GetValue(const std::string& key, uint32_t* value) { return GetUInt32Value(key, value); } @@ -171,19 +174,19 @@ class Configuration { private: /** - * 更新新的配置到metric - * @param 要更新的metric + *Update new configuration to metric + * @param The metric to update */ - void UpdateMetricIfExposed(const std::string &key, - const std::string &value); + void UpdateMetricIfExposed(const std::string& key, + const std::string& value); private: - std::string confFile_; - std::map config_; - // metric对外暴露的名字 - std::string exposeName_; - // 每一个配置项使用单独的一个metric,用map管理 - ConfigMetricMap configMetric_; + std::string confFile_; + std::map config_; + // Metric's exposed name + std::string exposeName_; + // Each configuration item uses a separate metric and is managed using a map + ConfigMetricMap configMetric_; }; } // namespace common diff --git a/src/common/crc32.h b/src/common/crc32.h index 99916fe873..7df16e6654 100644 --- a/src/common/crc32.h +++ b/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef SRC_COMMON_CRC32_H_ #define SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace curve { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/src/common/curve_define.h b/src/common/curve_define.h index 04d07ad5ec..1bea28e298 100644 --- a/src/common/curve_define.h +++ b/src/common/curve_define.h @@ -28,34 +28,35 @@ #include #ifndef DLOG_EVERY_SECOND -#define DLOG_EVERY_SECOND(severity) \ +#define DLOG_EVERY_SECOND(severity) \ BAIDU_LOG_IF_EVERY_SECOND_IMPL(DLOG_IF, severity, true) #endif namespace curve { namespace common { -// curve系统中共用的定义,对于各模块自己独有的放在各模块自己的define中 -using ChunkID = uint64_t; -using CopysetID = uint32_t; -using ChunkIndex = uint32_t; -using LogicPoolID = uint32_t; -using ChunkServerID = uint32_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +// The definition shared in the curve system is unique to each module and placed +// in its own definition +using ChunkID = uint64_t; +using CopysetID = uint32_t; +using ChunkIndex = uint32_t; +using LogicPoolID = uint32_t; +using ChunkServerID = uint32_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; -using FileSeqType = uint64_t; -using PageSizeType = uint32_t; -using ChunkSizeType = uint32_t; -using SegmentSizeType = uint32_t; +using FileSeqType = uint64_t; +using PageSizeType = uint32_t; +using ChunkSizeType = uint32_t; +using SegmentSizeType = uint32_t; -using Status = butil::Status; -using EndPoint = butil::EndPoint; +using Status = butil::Status; +using EndPoint = butil::EndPoint; -const uint32_t kKB = 1024; -const uint32_t kMB = 1024*kKB; -const uint32_t kGB = 1024*kMB; +const uint32_t kKB = 1024; +const uint32_t kMB = 1024 * kKB; +const uint32_t kGB = 1024 * kMB; -// maigic number用于FilePool_meta file计算crc +// maigic number for FilePool_meta file calculation of crc const char kFilePoolMagic[3] = "01"; constexpr uint32_t kDefaultBlockSize = 4096; diff --git a/src/common/define.h b/src/common/define.h index e3f90d7bd0..6001e48120 100644 --- a/src/common/define.h +++ b/src/common/define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_DEFINE_H_ #define SRC_COMMON_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -67,72 +67,67 @@ extern const char* kTotalCountStr; extern const char* kSnapshotsStr; extern const char* kTaskInfosStr; - typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -144,8 +139,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 3e591fd5ca..6b23b9558c 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -24,16 +24,18 @@ #define SRC_COMMON_FS_UTIL_H_ #include + #include #include + #include "src/common/string_util.h" namespace curve { namespace common { -// 计算path2相对于path1的相对路径 -inline std::string CalcRelativePath(const std::string &path1, - const std::string &path2) { +// Calculate the relative path of path2 relative to path1 +inline std::string CalcRelativePath(const std::string& path1, + const std::string& path2) { if (path1.empty() || path2.empty()) { return ""; } @@ -66,7 +68,7 @@ inline std::string CalcRelativePath(const std::string &path1, } // Check whether the path2 is the subpath of path1 -inline bool IsSubPath(const std::string &path1, const std::string &path2) { +inline bool IsSubPath(const std::string& path1, const std::string& path2) { return StringStartWith(CalcRelativePath(path1, path2), "./"); } diff --git a/src/common/interruptible_sleeper.h b/src/common/interruptible_sleeper.h index 73c2cba645..7f0f641674 100644 --- a/src/common/interruptible_sleeper.h +++ b/src/common/interruptible_sleeper.h @@ -24,32 +24,35 @@ #define SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ #include // NOLINT + #include "src/common/concurrent/concurrent.h" namespace curve { namespace common { /** - * InterruptibleSleeper 实现可 interruptible 的 sleep 功能. - * 正常情况下 wait_for 超时, 接收到退出信号之后, 程序会立即被唤醒, - * 退出 while 循环, 并执行 cleanup 代码. + * Implement interruptible sleep functionality with InterruptibleSleeper. + * Under normal circumstances, when wait_for times out and receives an exit + * signal, the program will be immediately awakened, exit the while loop, and + * execute cleanup code. */ class InterruptibleSleeper { public: /** - * @brief wait_for 等待指定时间,如果接受到退出信号立刻返回 + * @brief wait_for Wait for the specified time, and immediately return if an + * exit signal is received * - * @param[in] time 指定wait时长 + * @param[in] time specifies the wait duration * - * @return false-收到退出信号 true-超时后退出 + * @return false - Received exit signal true - Exit after timeout */ - template + template bool wait_for(std::chrono::duration const& time) { UniqueLock lock(m); - return !cv.wait_for(lock, time, [&]{return terminate;}); + return !cv.wait_for(lock, time, [&] { return terminate; }); } /** - * @brief interrupt 给当前wait发送退出信号 + * @brief interrupt Send an exit signal to the current wait */ void interrupt() { UniqueLock lock(m); @@ -72,4 +75,3 @@ class InterruptibleSleeper { } // namespace curve #endif // SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ - diff --git a/src/common/location_operator.cpp b/src/common/location_operator.cpp index f9d5a8f4c8..3571f4e040 100644 --- a/src/common/location_operator.cpp +++ b/src/common/location_operator.cpp @@ -32,22 +32,21 @@ std::string LocationOperator::GenerateS3Location( return location; } -std::string LocationOperator::GenerateCurveLocation( - const std::string& fileName, off_t offset) { +std::string LocationOperator::GenerateCurveLocation(const std::string& fileName, + off_t offset) { std::string location(fileName); location.append(kOriginPathSeprator) - .append(std::to_string(offset)) - .append(kOriginTypeSeprator) - .append(CURVE_TYPE); + .append(std::to_string(offset)) + .append(kOriginTypeSeprator) + .append(CURVE_TYPE); return location; } -OriginType LocationOperator::ParseLocation( - const std::string& location, std::string* originPath) { - // 找到最后一个“@”,不能简单用SplitString - // 因为不能保证OriginPath中不包含“@” - std::string::size_type pos = - location.find_last_of(kOriginTypeSeprator); +OriginType LocationOperator::ParseLocation(const std::string& location, + std::string* originPath) { + // Found the last '@', cannot simply use SplitString + // Because it cannot be guaranteed that OriginPath does not contain '@' + std::string::size_type pos = location.find_last_of(kOriginTypeSeprator); if (std::string::npos == pos) { return OriginType::InvalidOrigin; } @@ -67,18 +66,17 @@ OriginType LocationOperator::ParseLocation( return type; } -bool LocationOperator::ParseCurveChunkPath( - const std::string& originPath, std::string* fileName, off_t* offset) { - std::string::size_type pos = - originPath.find_last_of(kOriginPathSeprator); +bool LocationOperator::ParseCurveChunkPath(const std::string& originPath, + std::string* fileName, + off_t* offset) { + std::string::size_type pos = originPath.find_last_of(kOriginPathSeprator); if (std::string::npos == pos) { return false; } std::string file = originPath.substr(0, pos); std::string offStr = originPath.substr(pos + 1); - if (file.empty() || offStr.empty()) - return false; + if (file.empty() || offStr.empty()) return false; if (fileName != nullptr) { *fileName = file; diff --git a/src/common/location_operator.h b/src/common/location_operator.h index a86b33d158..2669beb4c3 100644 --- a/src/common/location_operator.h +++ b/src/common/location_operator.h @@ -43,43 +43,45 @@ enum class OriginType { class LocationOperator { public: /** - * 生成s3的location - * location格式:${objectname}@s3 - * @param objectName:s3上object的名称 - * @return:生成的location + * Generate location for s3 + * location format: ${objectname}@s3 + * @param objectName: The name of the object on s3 + * @return: Generated location */ static std::string GenerateS3Location(const std::string& objectName); /** - * 生成curve的location - * location格式:${filename}:${offset}@cs + * Generate the location of the curve + * location format: ${filename}:${offset}@cs */ static std::string GenerateCurveLocation(const std::string& fileName, off_t offset); /** - * 解析数据源的位置信息 - * location格式: - * s3示例:${objectname}@s3 - * curve示例:${filename}:${offset}@cs + * Parsing the location information of data sources + * location format: + * example of s3: ${objectname}@s3 + * curve example: ${filename}:${offset}@cs * - * @param location[in]:数据源的位置,其格式为originPath@originType - * @param originPath[out]:表示数据源在源端的路径 - * @return:返回OriginType,表示源数据的源端类型是s3还是curve - * 如果路径格式不正确或者originType无法识别,则返回InvalidOrigin + * @param location[in]: The location of the data source, in the format + * originPath@originType + * @param originPath[out]: represents the path of the data source on the + * source side + * @return: Returns OriginType, indicating whether the source side type of + * the source data is s3 or curve If the path format is incorrect or the + * originType is not recognized, InvalidOrigin is returned */ static OriginType ParseLocation(const std::string& location, std::string* originPath); /** - * 解析curvefs的originPath - * 格式:${filename}:${offset} - * @param originPath[in]:数据源在curvefs上的路径 - * @param fileName[out]:数据源所属文件名 - * @param offset[out]:数据源在文件中的偏移 - * @return: 解析成功返回true,失败返回false + * Parsing the originPath of curves + * Format: ${filename}:${offset} + * @param originPath[in]: The path of the data source on curves + * @param fileName[out]: The file name to which the data source belongs + * @param offset[out]: The offset of the data source in the file + * @return: Successful parsing returns true, while failure returns false */ static bool ParseCurveChunkPath(const std::string& originPath, - std::string* fileName, - off_t* offset); + std::string* fileName, off_t* offset); }; } // namespace common diff --git a/src/common/net_common.h b/src/common/net_common.h index 8bf058e134..c31cb7b770 100644 --- a/src/common/net_common.h +++ b/src/common/net_common.h @@ -23,27 +23,27 @@ #ifndef SRC_COMMON_NET_COMMON_H_ #define SRC_COMMON_NET_COMMON_H_ -#include -#include -#include // in_addr -#include // inet_pton, inet_ntop +#include // inet_pton, inet_ntop #include +#include +#include // in_addr +#include + #include namespace curve { namespace common { class NetCommon { public: - // addr形式为"ip:port" + // The form of addr is "ip:port" static bool CheckAddressValid(const std::string& addr) { std::string ip; uint32_t port; return SplitAddrToIpPort(addr, &ip, &port); } - // addr形式为"ip:port" - static bool SplitAddrToIpPort(const std::string& addr, - std::string* ipstr, + // The form of addr is "ip:port" + static bool SplitAddrToIpPort(const std::string& addr, std::string* ipstr, uint32_t* port) { size_t splitpos = addr.find(":"); if (splitpos == std::string::npos) { @@ -91,7 +91,7 @@ class NetCommon { return true; } }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_NET_COMMON_H_ diff --git a/src/common/s3_adapter.cpp b/src/common/s3_adapter.cpp index 96c7c87e6b..85b5449da3 100644 --- a/src/common/s3_adapter.cpp +++ b/src/common/s3_adapter.cpp @@ -47,12 +47,12 @@ namespace { // https://github.com/aws/aws-sdk-cpp/issues/1430 class PreallocatedIOStream : public Aws::IOStream { public: - PreallocatedIOStream(char *buf, size_t size) + PreallocatedIOStream(char* buf, size_t size) : Aws::IOStream(new Aws::Utils::Stream::PreallocatedStreamBuf( - reinterpret_cast(buf), size)) {} + reinterpret_cast(buf), size)) {} - PreallocatedIOStream(const char *buf, size_t size) - : PreallocatedIOStream(const_cast(buf), size) {} + PreallocatedIOStream(const char* buf, size_t size) + : PreallocatedIOStream(const_cast(buf), size) {} ~PreallocatedIOStream() { // corresponding new in constructor @@ -83,28 +83,28 @@ void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, LOG_IF(FATAL, !conf->GetIntValue("s3.http_scheme", &s3Opt->scheme)); LOG_IF(FATAL, !conf->GetBoolValue("s3.verify_SSL", &s3Opt->verifySsl)); LOG_IF(FATAL, !conf->GetStringValue("s3.user_agent", &s3Opt->userAgent)); - LOG_IF(FATAL, !conf->GetIntValue("s3.maxConnections", - &s3Opt->maxConnections)); - LOG_IF(FATAL, !conf->GetIntValue("s3.connectTimeout", - &s3Opt->connectTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.requestTimeout", - &s3Opt->requestTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.asyncThreadNum", - &s3Opt->asyncThreadNum)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.maxConnections", &s3Opt->maxConnections)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.connectTimeout", &s3Opt->connectTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.requestTimeout", &s3Opt->requestTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.asyncThreadNum", &s3Opt->asyncThreadNum)); LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsTotalLimit", - &s3Opt->iopsTotalLimit)); + &s3Opt->iopsTotalLimit)); LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsReadLimit", - &s3Opt->iopsReadLimit)); + &s3Opt->iopsReadLimit)); LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsWriteLimit", - &s3Opt->iopsWriteLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsTotalMB", - &s3Opt->bpsTotalMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsReadMB", - &s3Opt->bpsReadMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsWriteMB", - &s3Opt->bpsWriteMB)); + &s3Opt->iopsWriteLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsTotalMB", &s3Opt->bpsTotalMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsReadMB", &s3Opt->bpsReadMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsWriteMB", &s3Opt->bpsWriteMB)); LOG_IF(FATAL, !conf->GetBoolValue("s3.useVirtualAddressing", - &s3Opt->useVirtualAddressing)); + &s3Opt->useVirtualAddressing)); LOG_IF(FATAL, !conf->GetStringValue("s3.region", &s3Opt->region)); if (!conf->GetUInt64Value("s3.maxAsyncRequestInflightBytes", @@ -134,7 +134,7 @@ void S3Adapter::InitExceptFsS3Option(const std::string& path) { Init(option); } -void S3Adapter::Init(const S3AdapterOption &option) { +void S3Adapter::Init(const S3AdapterOption& option) { auto initSDK = [&]() { AWS_SDK_OPTIONS.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel(option.loglevel); @@ -161,11 +161,10 @@ void S3Adapter::Init(const S3AdapterOption &option) { clientCfg_->executor = Aws::MakeShared( "S3Adapter.S3Client", asyncThreadNum); - s3Client_ = Aws::New(AWS_ALLOCATE_TAG, - Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), - *clientCfg_, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - option.useVirtualAddressing); + s3Client_ = Aws::New( + AWS_ALLOCATE_TAG, Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), *clientCfg_, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, + option.useVirtualAddressing); ReadWriteThrottleParams params; params.iopsTotal.limit = option.iopsTotalLimit; @@ -198,15 +197,12 @@ void S3Adapter::Deinit() { delete throttle_; throttle_ = nullptr; } - if (inflightBytesThrottle_ != nullptr) - inflightBytesThrottle_.release(); + if (inflightBytesThrottle_ != nullptr) inflightBytesThrottle_.release(); } void S3Adapter::Shutdown() { // one program should only call once - auto shutdownSDK = [&]() { - Aws::ShutdownAPI(AWS_SDK_OPTIONS); - }; + auto shutdownSDK = [&]() { Aws::ShutdownAPI(AWS_SDK_OPTIONS); }; std::call_once(S3SHUTDOWN_FLAG, shutdownSDK); } @@ -232,17 +228,15 @@ int S3Adapter::CreateBucket() { request.SetBucket(bucketName_); Aws::S3::Model::CreateBucketConfiguration conf; conf.SetLocationConstraint( - Aws::S3::Model::BucketLocationConstraint::us_east_1); + Aws::S3::Model::BucketLocationConstraint::us_east_1); request.SetCreateBucketConfiguration(conf); auto response = s3Client_->CreateBucket(request); if (response.IsSuccess()) { return 0; } else { - LOG(ERROR) << "CreateBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + LOG(ERROR) << "CreateBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return -1; } } @@ -254,11 +248,9 @@ int S3Adapter::DeleteBucket() { if (response.IsSuccess()) { return 0; } else { - LOG(ERROR) << "DeleteBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + LOG(ERROR) << "DeleteBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return -1; } } @@ -270,16 +262,14 @@ bool S3Adapter::BucketExist() { if (response.IsSuccess()) { return true; } else { - LOG(ERROR) << "HeadBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + LOG(ERROR) << "HeadBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return false; } } -int S3Adapter::PutObject(const Aws::String &key, const char *buffer, +int S3Adapter::PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize) { Aws::S3::Model::PutObjectRequest request; request.SetBucket(bucketName_); @@ -303,7 +293,7 @@ int S3Adapter::PutObject(const Aws::String &key, const char *buffer, } } -int S3Adapter::PutObject(const Aws::String &key, const std::string &data) { +int S3Adapter::PutObject(const Aws::String& key, const std::string& data) { return PutObject(key, data.data(), data.size()); } /* @@ -351,12 +341,11 @@ void S3Adapter::PutObjectAsync(std::shared_ptr context) { }; Aws::S3::PutObjectResponseReceivedHandler handler = - [context]( - const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::PutObjectRequest & /*request*/, - const Aws::S3::Model::PutObjectOutcome &response, - const std::shared_ptr - &awsCtx) { + [context](const Aws::S3::S3Client* /*client*/, + const Aws::S3::Model::PutObjectRequest& /*request*/, + const Aws::S3::Model::PutObjectOutcome& response, + const std::shared_ptr& + awsCtx) { std::shared_ptr ctx = std::const_pointer_cast( std::dynamic_pointer_cast( @@ -382,14 +371,13 @@ void S3Adapter::PutObjectAsync(std::shared_ptr context) { s3Client_->PutObjectAsync(request, handler, context); } -int S3Adapter::GetObject(const Aws::String &key, - std::string *data) { +int S3Adapter::GetObject(const Aws::String& key, std::string* data) { Aws::S3::Model::GetObjectRequest request; request.SetBucket(bucketName_); request.SetKey(key); std::stringstream ss; if (throttle_) { - throttle_->Add(true, 1); + throttle_->Add(true, 1); } auto response = s3Client_->GetObject(request); if (response.IsSuccess()) { @@ -398,15 +386,13 @@ int S3Adapter::GetObject(const Aws::String &key, return 0; } else { LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return -1; } } -int S3Adapter::GetObject(const std::string &key, - char *buf, - off_t offset, +int S3Adapter::GetObject(const std::string& key, char* buf, off_t offset, size_t len) { Aws::S3::Model::GetObjectRequest request; request.SetBucket(bucketName_); @@ -425,8 +411,8 @@ int S3Adapter::GetObject(const std::string &key, return 0; } else { LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return -1; } } @@ -453,11 +439,11 @@ void S3Adapter::GetObjectAsync(std::shared_ptr context) { }; Aws::S3::GetObjectResponseReceivedHandler handler = - [this](const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::GetObjectRequest & /*request*/, - const Aws::S3::Model::GetObjectOutcome &response, - const std::shared_ptr - &awsCtx) { + [this](const Aws::S3::S3Client* /*client*/, + const Aws::S3::Model::GetObjectRequest& /*request*/, + const Aws::S3::Model::GetObjectOutcome& response, + const std::shared_ptr& + awsCtx) { std::shared_ptr ctx = std::const_pointer_cast( std::dynamic_pointer_cast( @@ -481,7 +467,7 @@ void S3Adapter::GetObjectAsync(std::shared_ptr context) { s3Client_->GetObjectAsync(request, handler, context); } -bool S3Adapter::ObjectExist(const Aws::String &key) { +bool S3Adapter::ObjectExist(const Aws::String& key) { Aws::S3::Model::HeadObjectRequest request; request.SetBucket(bucketName_); request.SetKey(key); @@ -489,18 +475,14 @@ bool S3Adapter::ObjectExist(const Aws::String &key) { if (response.IsSuccess()) { return true; } else { - LOG(ERROR) << "HeadObject error:" - << bucketName_ - << "--" - << key - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + LOG(ERROR) << "HeadObject error:" << bucketName_ << "--" << key << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return false; } } -int S3Adapter::DeleteObject(const Aws::String &key) { +int S3Adapter::DeleteObject(const Aws::String& key) { Aws::S3::Model::DeleteObjectRequest request; request.SetBucket(bucketName_); request.SetKey(key); @@ -508,13 +490,9 @@ int S3Adapter::DeleteObject(const Aws::String &key) { if (response.IsSuccess()) { return 0; } else { - LOG(ERROR) << "DeleteObject error:" - << bucketName_ - << "--" - << key - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); + LOG(ERROR) << "DeleteObject error:" << bucketName_ << "--" << key + << "--" << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); return -1; } } @@ -554,10 +532,10 @@ int S3Adapter::DeleteObjects(const std::list& keyList) { return 0; } /* - // object元数据单独更新还有问题,需要单独的s3接口来支持 -int S3Adapter::UpdateObjectMeta(const Aws::String &key, - const Aws::Map &meta) { - Aws::S3::Model::PutObjectRequest request; +// There are still issues with updating the object metadata separately, and a +separate s3 interface is needed to support it int +S3Adapter::UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta) { Aws::S3::Model::PutObjectRequest request; request.SetBucket(bucketName_); request.SetKey(key); auto input_data = @@ -594,7 +572,7 @@ int S3Adapter::GetObjectMeta(const Aws::String &key, } } */ -Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) { +Aws::String S3Adapter::MultiUploadInit(const Aws::String& key) { Aws::S3::Model::CreateMultipartUploadRequest request; request.WithBucket(bucketName_).WithKey(key); auto response = s3Client_->CreateMultipartUpload(request); @@ -602,17 +580,14 @@ Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) { return response.GetResult().GetUploadId(); } else { LOG(ERROR) << "CreateMultipartUploadRequest error: " - << response.GetError().GetMessage(); + << response.GetError().GetMessage(); return ""; } } Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( - const Aws::String &key, - const Aws::String &uploadId, - int partNum, - int partSize, - const char* buf) { + const Aws::String& key, const Aws::String& uploadId, int partNum, + int partSize, const char* buf) { Aws::S3::Model::UploadPartRequest request; request.SetBucket(bucketName_); request.SetKey(key); @@ -629,16 +604,18 @@ Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( auto result = s3Client_->UploadPart(request); if (result.IsSuccess()) { return Aws::S3::Model::CompletedPart() - .WithETag(result.GetResult().GetETag()).WithPartNumber(partNum); + .WithETag(result.GetResult().GetETag()) + .WithPartNumber(partNum); } else { return Aws::S3::Model::CompletedPart() - .WithETag("errorTag").WithPartNumber(-1); + .WithETag("errorTag") + .WithPartNumber(-1); } } -int S3Adapter::CompleteMultiUpload(const Aws::String &key, - const Aws::String &uploadId, - const Aws::Vector &cp_v) { +int S3Adapter::CompleteMultiUpload( + const Aws::String& key, const Aws::String& uploadId, + const Aws::Vector& cp_v) { Aws::S3::Model::CompleteMultipartUploadRequest request; request.WithBucket(bucketName_); request.SetKey(key); @@ -650,14 +627,14 @@ int S3Adapter::CompleteMultiUpload(const Aws::String &key, return 0; } else { LOG(ERROR) << "CompleteMultiUpload error: " - << response.GetError().GetMessage(); + << response.GetError().GetMessage(); this->AbortMultiUpload(key, uploadId); return -1; } } -int S3Adapter::AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId) { +int S3Adapter::AbortMultiUpload(const Aws::String& key, + const Aws::String& uploadId) { Aws::S3::Model::AbortMultipartUploadRequest request; request.WithBucket(bucketName_); request.SetKey(key); diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index 67be6f7949..0670b1b0cb 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -105,10 +105,10 @@ struct S3InfoOption { uint32_t objectPrefix; }; -void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, - S3AdapterOption *s3Opt); +void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, + S3AdapterOption* s3Opt); -void InitS3AdaptorOption(Configuration *conf, S3AdapterOption *s3Opt); +void InitS3AdaptorOption(Configuration* conf, S3AdapterOption* s3Opt); using GetObjectAsyncCallBack = std::function&)>; @@ -183,27 +183,27 @@ class S3Adapter { } virtual ~S3Adapter() { Deinit(); } /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const std::string &path); + virtual void Init(const std::string& path); /** - * 初始化S3Adapter - * 但不包括 S3InfoOption + * Initialize S3Adapter + * But not including S3InfoOption */ - virtual void InitExceptFsS3Option(const std::string &path); + virtual void InitExceptFsS3Option(const std::string& path); /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const S3AdapterOption &option); + virtual void Init(const S3AdapterOption& option); /** * @brief * * @details */ - virtual void SetS3Option(const S3InfoOption &fsS3Opt); + virtual void SetS3Option(const S3InfoOption& fsS3Opt); /** - * 释放S3Adapter资源 + * Release S3Adapter resources */ virtual void Deinit(); /** @@ -213,7 +213,7 @@ class S3Adapter { /** * reinit s3client with new AWSCredentials */ - virtual void Reinit(const S3AdapterOption &option); + virtual void Reinit(const S3AdapterOption& option); /** * get s3 ak */ @@ -227,39 +227,40 @@ class S3Adapter { */ virtual std::string GetS3Endpoint(); /** - * 创建存储快照数据的桶(桶名称由配置文件指定,需要全局唯一) - * @return: 0 创建成功/ -1 创建失败 + * Create a bucket for storing snapshot data (the bucket name is specified + * by the configuration file and needs to be globally unique) + * @return: 0 successfully created/-1 failed to create */ virtual int CreateBucket(); /** - * 删除桶 - * @return 0 删除成功/-1 删除失败 + * Delete Bucket + * @return 0 deleted successfully/-1 deleted failed */ virtual int DeleteBucket(); /** - * 判断快照数据的桶是否存在 - * @return true 桶存在/ false 桶不存在 + * Determine whether the bucket of snapshot data exists + * @return true bucket exists/false bucket does not exist */ virtual bool BucketExist(); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @param 数据内容大小 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @param data content size + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const char *buffer, + virtual int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize); // Get object to buffer[bufferSize] // int GetObject(const Aws::String &key, void *buffer, // const int bufferSize); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const std::string &data); + virtual int PutObject(const Aws::String& key, const std::string& data); virtual void PutObjectAsync(std::shared_ptr context); /** * Get object from s3, @@ -271,40 +272,40 @@ class S3Adapter { * @param pointer which contain the data * @return 0 success / -1 fail */ - virtual int GetObject(const Aws::String &key, std::string *data); + virtual int GetObject(const Aws::String& key, std::string* data); /** - * 从对象存储读取数据 - * @param 对象名 - * @param[out] 返回读取的数据 - * @param 读取的偏移 - * @param 读取的长度 + * Reading data from object storage + * @param object name + * @param[out] returns the read data + * @param read Offset read + * @param The read length read */ - virtual int GetObject(const std::string &key, char *buf, off_t offset, + virtual int GetObject(const std::string& key, char* buf, off_t offset, size_t len); // NOLINT /** - * @brief 异步从对象存储读取数据 + * @brief asynchronously reads data from object storage * - * @param context 异步上下文 + * @param context asynchronous context */ virtual void GetObjectAsync(std::shared_ptr context); /** - * 删除对象 - * @param 对象名 - * @return: 0 删除成功/ - + * Delete Object + * @param object name + * @return: 0 successfully deleted/- */ - virtual int DeleteObject(const Aws::String &key); + virtual int DeleteObject(const Aws::String& key); - virtual int DeleteObjects(const std::list &keyList); + virtual int DeleteObjects(const std::list& keyList); /** - * 判断对象是否存在 - * @param 对象名 - * @return: true 对象存在/ false 对象不存在 + * Determine whether the object exists + * @param object name + * @return: true object exists/false object does not exist */ - virtual bool ObjectExist(const Aws::String &key); + virtual bool ObjectExist(const Aws::String& key); /* // Update object meta content - // Todo 接口还有问题 need fix + // There are still issues with the Todo interface, need fix virtual int UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta); // Get object meta content @@ -312,51 +313,53 @@ class S3Adapter { Aws::Map *meta); */ /** - * 初始化对象的分片上传任务 - * @param 对象名 - * @return 任务名 + * Initialize the sharding upload task of the object + * @param object name + * @return Task Name */ - virtual Aws::String MultiUploadInit(const Aws::String &key); + virtual Aws::String MultiUploadInit(const Aws::String& key); /** - * 增加一个分片到分片上传任务中 - * @param 对象名 - * @param 任务名 - * @param 第几个分片(从1开始) - * @param 分片大小 - * @param 分片的数据内容 - * @return: 分片任务管理对象 + * Add a shard to the shard upload task + * @param object name + * @param Task Name + * @param Which shard (starting from 1) + * @param shard size + * @param sharded data content + * @return: Fragmented task management object */ - virtual Aws::S3::Model::CompletedPart - UploadOnePart(const Aws::String &key, const Aws::String &uploadId, - int partNum, int partSize, const char *buf); + virtual Aws::S3::Model::CompletedPart UploadOnePart( + const Aws::String& key, const Aws::String& uploadId, int partNum, + int partSize, const char* buf); /** - * 完成分片上传任务 - * @param 对象名 - * @param 分片上传任务id - * @管理分片上传任务的vector - * @return 0 任务完成/ -1 任务失败 + * Complete the shard upload task + * @param object name + * @param Partitioning Upload Task ID + * @param Manage vector for sharded upload tasks + * @return 0 task completed/-1 task failed */ - virtual int - CompleteMultiUpload(const Aws::String &key, const Aws::String &uploadId, - const Aws::Vector &cp_v); + virtual int CompleteMultiUpload( + const Aws::String& key, const Aws::String& uploadId, + const Aws::Vector& cp_v); /** - * 终止一个对象的分片上传任务 - * @param 对象名 - * @param 任务id - * @return 0 终止成功/ -1 终止失败 + * Terminate the sharding upload task of an object + * @param object name + * @param Task ID + * @return 0 Terminated successfully/-1 Terminated failed */ - virtual int AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId); - void SetBucketName(const Aws::String &name) { bucketName_ = name; } + virtual int AbortMultiUpload(const Aws::String& key, + const Aws::String& uploadId); + void SetBucketName(const Aws::String& name) { bucketName_ = name; } Aws::String GetBucketName() { return bucketName_; } - Aws::Client::ClientConfiguration *GetConfig() { return clientCfg_; } + Aws::Client::ClientConfiguration* GetConfig() { return clientCfg_; } private: class AsyncRequestInflightBytesThrottle { public: explicit AsyncRequestInflightBytesThrottle(uint64_t maxInflightBytes) - : maxInflightBytes_(maxInflightBytes), inflightBytes_(0), mtx_(), + : maxInflightBytes_(maxInflightBytes), + inflightBytes_(0), + mtx_(), cond_() {} void OnStart(uint64_t len); @@ -371,19 +374,20 @@ class S3Adapter { }; private: - // S3服务器地址 + // S3 server address Aws::String s3Address_; - // 用于用户认证的AK/SK,需要从对象存储的用户管理中申请 + // AK/SK for user authentication needs to be applied for from user + // management in object storage Aws::String s3Ak_; Aws::String s3Sk_; - // 对象的桶名 + // The bucket name of the object Aws::String bucketName_; - // aws sdk的配置 - Aws::Client::ClientConfiguration *clientCfg_; - Aws::S3::S3Client *s3Client_; + // Configuration of AWS SDK + Aws::Client::ClientConfiguration* clientCfg_; + Aws::S3::S3Client* s3Client_; Configuration conf_; - Throttle *throttle_; + Throttle* throttle_; std::unique_ptr inflightBytesThrottle_; }; @@ -395,7 +399,7 @@ class FakeS3Adapter : public S3Adapter { bool BucketExist() override { return true; } - int PutObject(const Aws::String &key, const char *buffer, + int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize) override { (void)key; (void)buffer; @@ -403,20 +407,20 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int PutObject(const Aws::String &key, const std::string &data) override { + int PutObject(const Aws::String& key, const std::string& data) override { (void)key; (void)data; return 0; } - void - PutObjectAsync(std::shared_ptr context) override { + void PutObjectAsync( + std::shared_ptr context) override { context->retCode = 0; context->timer.stop(); context->cb(context); } - int GetObject(const Aws::String &key, std::string *data) override { + int GetObject(const Aws::String& key, std::string* data) override { (void)key; (void)data; // just return 4M data @@ -424,7 +428,7 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int GetObject(const std::string &key, char *buf, off_t offset, + int GetObject(const std::string& key, char* buf, off_t offset, size_t len) override { (void)key; (void)offset; @@ -433,30 +437,29 @@ class FakeS3Adapter : public S3Adapter { return 0; } - void - GetObjectAsync(std::shared_ptr context) override { + void GetObjectAsync( + std::shared_ptr context) override { memset(context->buf, '1', context->len); context->retCode = 0; context->cb(this, context); } - int DeleteObject(const Aws::String &key) override { + int DeleteObject(const Aws::String& key) override { (void)key; return 0; } - int DeleteObjects(const std::list &keyList) override { + int DeleteObjects(const std::list& keyList) override { (void)keyList; return 0; } - bool ObjectExist(const Aws::String &key) override { + bool ObjectExist(const Aws::String& key) override { (void)key; return true; } }; - } // namespace common } // namespace curve #endif // SRC_COMMON_S3_ADAPTER_H_ diff --git a/src/common/snapshotclone/snapshotclone_define.cpp b/src/common/snapshotclone/snapshotclone_define.cpp index b3b08f8d74..9e2ba8a0a6 100644 --- a/src/common/snapshotclone/snapshotclone_define.cpp +++ b/src/common/snapshotclone/snapshotclone_define.cpp @@ -20,14 +20,14 @@ * Author: xuchaojie */ -#include - #include "src/common/snapshotclone/snapshotclone_define.h" +#include + namespace curve { namespace snapshotcloneserver { -// 字符串常量定义 +// String constant definition const char* kServiceName = "SnapshotCloneService"; const char* kCreateSnapshotAction = "CreateSnapshot"; const char* kDeleteSnapshotAction = "DeleteSnapshot"; @@ -92,10 +92,8 @@ std::map code2Msg = { {kErrCodeNotSupport, "Not support."}, }; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid) { +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid) { Json::Value mainObj; mainObj[kCodeStr] = std::to_string(errCode); mainObj[kMessageStr] = code2Msg[errCode]; diff --git a/src/common/snapshotclone/snapshotclone_define.h b/src/common/snapshotclone/snapshotclone_define.h index ffa5428a6e..558fa15f97 100644 --- a/src/common/snapshotclone/snapshotclone_define.h +++ b/src/common/snapshotclone/snapshotclone_define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ #define SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -74,74 +74,66 @@ extern const char* kCloneFileInfoStr; typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -enum class CloneRefStatus { - kNoRef = 0, - kHasRef = 1, - kNeedCheck = 2 -}; +enum class CloneRefStatus { kNoRef = 0, kHasRef = 1, kNeedCheck = 2 }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -153,8 +145,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..a8ca00e1d8 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef SRC_COMMON_STRINGSTATUS_H_ -#define SRC_COMMON_STRINGSTATUS_H_ +#ifndef SRC_COMMON_STRINGSTATUS_H_ +#define SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace curve { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,30 +49,31 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key-value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common } // namespace curve #endif // SRC_COMMON_STRINGSTATUS_H_ - diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..d3fc2d244c 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -25,9 +25,10 @@ #include #include #include + +#include #include #include -#include namespace curve { namespace common { @@ -57,7 +58,8 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +69,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; @@ -85,13 +87,9 @@ class ExpiredTime { public: ExpiredTime() : startUs_(TimeUtility::GetTimeofDayUs()) {} - double ExpiredSec() const { - return ExpiredUs() / 1000000; - } + double ExpiredSec() const { return ExpiredUs() / 1000000; } - double ExpiredMs() const { - return ExpiredUs() / 1000; - } + double ExpiredMs() const { return ExpiredUs() / 1000; } double ExpiredUs() const { return TimeUtility::GetTimeofDayUs() - startUs_; @@ -101,7 +99,7 @@ class ExpiredTime { uint64_t startUs_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve -#endif // SRC_COMMON_TIMEUTILITY_H_ +#endif // SRC_COMMON_TIMEUTILITY_H_ diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..996704c987 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,29 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time +// synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not + * available and if the MAC address can be obtained, the UUID will be + * generated using a combination of random numbers, current time, and the + * MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +61,14 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of MAC + * address leakage. To ensure uniqueness, it also employs a time + * synchronization mechanism. However, if the time synchronization mechanism + * is not available, there is a possibility of UUID duplication when + * generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +80,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) + * and a fallback to pseudo-random number generation. When using the + * pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..bbf8b21b49 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -20,36 +20,37 @@ * Author: lixiaocui */ -#ifndef SRC_COMMON_WAIT_INTERVAL_H_ -#define SRC_COMMON_WAIT_INTERVAL_H_ +#ifndef SRC_COMMON_WAIT_INTERVAL_H_ +#define SRC_COMMON_WAIT_INTERVAL_H_ #include "src/common/interruptible_sleeper.h" namespace curve { namespace common { -class WaitInterval { +class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution determines how long to wait before executing based on + * the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index f4cd6cfcdb..d649b68ce7 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -20,14 +20,15 @@ * Author: yangyaokai */ +#include "src/fs/ext4_filesystem_impl.h" + +#include #include -#include -#include #include -#include +#include +#include #include "src/common/string_util.h" -#include "src/fs/ext4_filesystem_impl.h" #include "src/fs/wrap_posix.h" #define MIN_KERNEL_VERSION KERNEL_VERSION(3, 15, 0) @@ -40,13 +41,11 @@ std::mutex Ext4FileSystemImpl::mutex_; Ext4FileSystemImpl::Ext4FileSystemImpl( std::shared_ptr posixWrapper) - : posixWrapper_(posixWrapper) - , enableRenameat2_(false) { + : posixWrapper_(posixWrapper), enableRenameat2_(false) { CHECK(posixWrapper_ != nullptr) << "PosixWrapper is null"; } -Ext4FileSystemImpl::~Ext4FileSystemImpl() { -} +Ext4FileSystemImpl::~Ext4FileSystemImpl() {} std::shared_ptr Ext4FileSystemImpl::getInstance() { std::lock_guard lock(mutex_); @@ -54,13 +53,14 @@ std::shared_ptr Ext4FileSystemImpl::getInstance() { std::shared_ptr wrapper = std::make_shared(); self_ = std::shared_ptr( - new(std::nothrow) Ext4FileSystemImpl(wrapper)); + new (std::nothrow) Ext4FileSystemImpl(wrapper)); CHECK(self_ != nullptr) << "Failed to new ext4 local fs."; } return self_; } -void Ext4FileSystemImpl::SetPosixWrapper(std::shared_ptr wrapper) { //NOLINT +void Ext4FileSystemImpl::SetPosixWrapper( + std::shared_ptr wrapper) { // NOLINT CHECK(wrapper != nullptr) << "PosixWrapper is null"; posixWrapper_ = wrapper; } @@ -71,16 +71,17 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { ret = posixWrapper_->uname(&kernel_info); if (ret != 0) { - LOG(ERROR) << "Get kernel info failed."; - return false; + LOG(ERROR) << "Get kernel info failed."; + return false; } LOG(INFO) << "Kernel version: " << kernel_info.release; LOG(INFO) << "System version: " << kernel_info.version; LOG(INFO) << "Machine: " << kernel_info.machine; - // 通过uname获取的版本字符串格式可能为a.b.c-xxx - // a为主版本号,b为此版本号,c为修正号 + // The version string format obtained through uname may be a.b.c-xxx + // A is the main version number, b is the version number, and c is the + // revision number vector elements; ::curve::common::SplitString(kernel_info.release, "-", &elements); if (elements.size() == 0) { @@ -90,7 +91,8 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { vector numbers; ::curve::common::SplitString(elements[0], ".", &numbers); - // 有些系统可能版本格式前面部分是a.b.c.d,但是a.b.c是不变的 + // Some systems may have a version format with the front part being a.b.c.d, + // but a.b.c remains unchanged if (numbers.size() < 3) { LOG(ERROR) << "parse kenel version failed."; return false; @@ -99,11 +101,10 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int major = std::stoi(numbers[0]); int minor = std::stoi(numbers[1]); int revision = std::stoi(numbers[2]); - LOG(INFO) << "major: " << major - << ", minor: " << minor + LOG(INFO) << "major: " << major << ", minor: " << minor << ", revision: " << revision; - // 内核版本必须大于3.15,用于支持renameat2 + // The kernel version must be greater than 3.15 to support renameat2 if (KERNEL_VERSION(major, minor, revision) < MIN_KERNEL_VERSION) { LOG(ERROR) << "Kernel older than 3.15 is not supported."; return false; @@ -114,14 +115,13 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int Ext4FileSystemImpl::Init(const LocalFileSystemOption& option) { enableRenameat2_ = option.enableRenameat2; if (enableRenameat2_) { - if (!CheckKernelVersion()) - return -1; + if (!CheckKernelVersion()) return -1; } return 0; } int Ext4FileSystemImpl::Statfs(const string& path, - struct FileSystemInfo *info) { + struct FileSystemInfo* info) { struct statfs diskInfo; int rc = posixWrapper_->statfs(path.c_str(), &diskInfo); if (rc < 0) { @@ -157,7 +157,8 @@ int Ext4FileSystemImpl::Close(int fd) { int Ext4FileSystemImpl::Delete(const string& path) { int rc = 0; - // 如果删除对象是目录的话,需要先删除目录下的子对象 + // If the deleted object is a directory, you need to first delete the sub + // objects under the directory if (DirExists(path)) { vector names; rc = List(path, &names); @@ -165,9 +166,9 @@ int Ext4FileSystemImpl::Delete(const string& path) { LOG(WARNING) << "List " << path << " failed."; return rc; } - for (auto &name : names) { + for (auto& name : names) { string subPath = path + "/" + name; - // 递归删除子对象 + // Recursively delete sub objects rc = Delete(subPath); if (rc < 0) { LOG(WARNING) << "Delete " << subPath << " failed."; @@ -189,20 +190,19 @@ int Ext4FileSystemImpl::Mkdir(const string& dirName) { ::curve::common::SplitString(dirName, "/", &names); // root dir must exists - if (0 == names.size()) - return 0; + if (0 == names.size()) return 0; string path; for (size_t i = 0; i < names.size(); ++i) { - if (0 == i && dirName[0] != '/') // 相对路径 + if (0 == i && dirName[0] != '/') // Relative path path = path + names[i]; else path = path + "/" + names[i]; - if (DirExists(path)) - continue; - // 目录需要755权限,不然会出现“Permission denied” + if (DirExists(path)) continue; + // Directory requires 755 permissions, otherwise 'Permission denied' + // will appear if (posixWrapper_->mkdir(path.c_str(), 0755) < 0) { - LOG(WARNING) << "mkdir " << path << " failed. "<< strerror(errno); + LOG(WARNING) << "mkdir " << path << " failed. " << strerror(errno); return -errno; } } @@ -226,8 +226,7 @@ bool Ext4FileSystemImpl::FileExists(const string& filePath) { return false; } -int Ext4FileSystemImpl::DoRename(const string& oldPath, - const string& newPath, +int Ext4FileSystemImpl::DoRename(const string& oldPath, const string& newPath, unsigned int flags) { int rc = 0; if (enableRenameat2_) { @@ -237,8 +236,7 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } if (rc < 0) { LOG(WARNING) << "rename failed: " << strerror(errno) - << ". old path: " << oldPath - << ", new path: " << newPath + << ". old path: " << oldPath << ", new path: " << newPath << ", flag: " << flags; return -errno; } @@ -246,21 +244,22 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } int Ext4FileSystemImpl::List(const string& dirName, - vector *names) { - DIR *dir = posixWrapper_->opendir(dirName.c_str()); + vector* names) { + DIR* dir = posixWrapper_->opendir(dirName.c_str()); if (nullptr == dir) { LOG(WARNING) << "opendir:" << dirName << " failed:" << strerror(errno); return -errno; } - struct dirent *dirIter; + struct dirent* dirIter; errno = 0; - while ((dirIter=posixWrapper_->readdir(dir)) != nullptr) { - if (strcmp(dirIter->d_name, ".") == 0 - || strcmp(dirIter->d_name, "..") == 0) + while ((dirIter = posixWrapper_->readdir(dir)) != nullptr) { + if (strcmp(dirIter->d_name, ".") == 0 || + strcmp(dirIter->d_name, "..") == 0) continue; names->push_back(dirIter->d_name); } - // 可能存在其他携程改变了errno,但是只能通过此方式判断readdir是否成功 + // There may be other Ctrip changes to errno, but this is the only way to + // determine whether readdir is successful if (errno != 0) { LOG(WARNING) << "readdir failed: " << strerror(errno); } @@ -268,19 +267,14 @@ int Ext4FileSystemImpl::List(const string& dirName, return -errno; } -int Ext4FileSystemImpl::Read(int fd, - char *buf, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Read(int fd, char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pread(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pread(fd, buf + relativeOffset, remainLength, offset); - // 如果offset大于文件长度,pread会返回0 + // If the offset is greater than the file length, pread will return 0 if (ret == 0) { LOG(WARNING) << "pread returns zero." << "offset: " << offset @@ -304,17 +298,13 @@ int Ext4FileSystemImpl::Read(int fd, return length - remainLength; } -int Ext4FileSystemImpl::Write(int fd, - const char *buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, const char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pwrite(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pwrite(fd, buf + relativeOffset, remainLength, offset); if (ret < 0) { if (errno == EINTR && retryTimes < MAX_RETYR_TIME) { @@ -333,9 +323,7 @@ int Ext4FileSystemImpl::Write(int fd, return length; } -int Ext4FileSystemImpl::Write(int fd, - butil::IOBuf buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, butil::IOBuf buf, uint64_t offset, int length) { if (length != static_cast(buf.size())) { LOG(ERROR) << "IOBuf::pcut_into_file_descriptor failed, fd: " << fd @@ -376,9 +364,7 @@ int Ext4FileSystemImpl::Sync(int fd) { return 0; } -int Ext4FileSystemImpl::Append(int fd, - const char *buf, - int length) { +int Ext4FileSystemImpl::Append(int fd, const char* buf, int length) { (void)fd; (void)buf; (void)length; @@ -386,10 +372,7 @@ int Ext4FileSystemImpl::Append(int fd, return 0; } -int Ext4FileSystemImpl::Fallocate(int fd, - int op, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Fallocate(int fd, int op, uint64_t offset, int length) { int rc = posixWrapper_->fallocate(fd, op, offset, length); if (rc < 0) { LOG(ERROR) << "fallocate failed: " << strerror(errno); @@ -398,7 +381,7 @@ int Ext4FileSystemImpl::Fallocate(int fd, return 0; } -int Ext4FileSystemImpl::Fstat(int fd, struct stat *info) { +int Ext4FileSystemImpl::Fstat(int fd, struct stat* info) { int rc = posixWrapper_->fstat(fd, info); if (rc < 0) { LOG(ERROR) << "fstat failed: " << strerror(errno); diff --git a/src/fs/local_filesystem.h b/src/fs/local_filesystem.h index 3072867807..075e273a29 100644 --- a/src/fs/local_filesystem.h +++ b/src/fs/local_filesystem.h @@ -23,22 +23,23 @@ #ifndef SRC_FS_LOCAL_FILESYSTEM_H_ #define SRC_FS_LOCAL_FILESYSTEM_H_ -#include #include -#include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include // NOLINT +#include +#include #include "src/fs/fs_common.h" -using std::vector; using std::map; using std::string; +using std::vector; namespace curve { namespace fs { @@ -50,123 +51,130 @@ struct LocalFileSystemOption { class LocalFileSystem { public: - LocalFileSystem() {} + LocalFileSystem() {} virtual ~LocalFileSystem() {} /** - * 初始化文件系统 - * 如果文件系统还未格式化,首先会格式化, - * 然后挂载文件系统, - * 已经格式化或者已经挂载的文件系统不会重复格式化或挂载 - * @param option:初始化参数 + * Initialize file system + * If the file system has not been formatted yet, it will be formatted + * first, Then mount the file system, Formatted or mounted file systems will + * not be repeatedly formatted or mounted + * @param option: initialization parameters */ virtual int Init(const LocalFileSystemOption& option) = 0; /** - * 获取文件或目录所在的文件系统状态信息 - * @param path: 要获取的文件系统下的文件或目录路径 - * @param info[out]: 文件系统状态信息 - * @return 成功返回0 + * Obtain the file system status information where the file or directory is + * located + * @param path: The file or directory path under the file system to obtain + * @param info[out]: File system status information + * @return Successfully returned 0 */ virtual int Statfs(const string& path, struct FileSystemInfo* info) = 0; /** - * 打开文件句柄 - * @param path:文件路径 - * @param flags:操作文件方式的flag - * 此flag使用POSIX文件系统的定义 - * @return 成功返回文件句柄id,失败返回负值 + * Open file handle + * @param path: File path + * @param flags: flags for manipulating file methods + * This flag uses the definition of the POSIX file system + * @return successfully returns the file handle id, while failure returns a + * negative value */ virtual int Open(const string& path, int flags) = 0; /** - * 关闭文件句柄 - * @param fd: 文件句柄id - * @return 成功返回0 + * Close file handle + * @param fd: file handle id + * @return Successfully returned 0 */ virtual int Close(int fd) = 0; /** - * 删除文件或目录 - * 如果删除对象为目录,会删除目录下的文件或子目录 - * @param path:文件或目录的路径 - * return 成功返回0 + * Delete files or directories + * If the deleted object is a directory, the files or subdirectories under + * the directory will be deleted + * @param path: The path to a file or directory + * @return Successful return returns 0 */ virtual int Delete(const string& path) = 0; /** - * 创建目录 - * @param dirPath: 目录路径 - * @return 成功返回0 + * Create directory + * @param dirPath: Directory path + * @return Successfully returned 0 */ virtual int Mkdir(const string& dirPath) = 0; /** - * 判断目录是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the directory exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool DirExists(const string& dirPath) = 0; /** - * 判断文件是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the file exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool FileExists(const string& filePath) = 0; /** - * 重命名文件/目录 - * 将文件或目录重命名或者移到其他路径,不会覆盖已存在的文件 - * @param oldPath:原文件或目录路径 - * @param newPath:新的文件或目录路径 - * 新的文件或目录在重命名之前不存在,否则返回错误 - * @param flags:重命名使用的模式,默认值为0 - * 可选择RENAME_EXCHANGE、RENAME_EXCHANGE、RENAME_WHITEOUT三种模式 + * Rename File/Directory + * Renaming or moving files or directories to a different path will not + * overwrite existing files + * @param oldPath: Path to the original file or directory + * @param newPath: New file or directory path + * The new file or directory does not exist before renaming, otherwise an + * error will be returned + * @param flags: The mode used for renaming, with a default value of 0 + * Optional RENAME_EXCHANGE, RENAME_EXCHANGE, RENAME_WHITEOUT three modes * https://manpages.debian.org/testing/manpages-dev/renameat2.2.en.html - * @return 成功返回0 + * @return Successfully returned 0 */ - virtual int Rename(const string& oldPath, - const string& newPath, + virtual int Rename(const string& oldPath, const string& newPath, unsigned int flags = 0) { return DoRename(oldPath, newPath, flags); } /** - * 列举指定路径下的所有文件和目录名 - * @param dirPath:目录路径 - * @param name[out]:目录下的所有目录和文件名 - * @return 成功返回0 + * List all files and directory names under the specified path + * @param dirPath: Directory path + * @param name[out]: All directories and file names under the directory + * @return Successfully returned 0 */ virtual int List(const string& dirPath, vector* names) = 0; /** - * 从文件指定区域读取数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:接收读取数据的buffer - * @param offset:读取区域的起始偏移 - * @param length:读取数据的长度 - * @return 返回成功读取到的数据长度,失败返回-1 + * Read data from the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for receiving and reading data + * @param offset: The starting offset of the read area + * @param length: The length of the read data + * @return returns the length of the data successfully read, while failure + * returns -1 */ virtual int Read(int fd, char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据的buffer - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: The buffer of the data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, const char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据 - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buf: Data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, butil::IOBuf buf, uint64_t offset, int length) = 0; @@ -181,59 +189,62 @@ class LocalFileSystem { virtual int Sync(int fd) = 0; /** - * 向文件末尾追加数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待追加数据的buffer - * @param length:追加数据的长度 - * @return 返回成功追加的数据长度,失败返回-1 + * Append data to the end of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for data to be added + * @param length: Append the length of the data + * @return returns the length of successfully added data, while failure + * returns -1 */ virtual int Append(int fd, const char* buf, int length) = 0; /** - * 文件预分配/挖洞(未实现) - * @param fd:文件句柄id,通过Open接口获取 - * @param op:指定操作类型,预分配还是挖洞 - * @param offset:操作区域的起始偏移 - * @param length:操作区域的长度 - * @return 成功返回0 + * File pre allocation/excavation (not implemented) + * @param fd: File handle id, obtained through the Open interface + * @param op: Specify the type of operation, pre allocation or excavation + * @param offset: The starting offset of the operating area + * @param length: The length of the operation area + * @return Successfully returned 0 */ virtual int Fallocate(int fd, int op, uint64_t offset, int length) = 0; /** - * 获取指定文件状态信息 - * @param fd:文件句柄id,通过Open接口获取 - * @param info[out]:文件系统的信息 - * stat结构同POSIX接口中使用的stat - * @return 成功返回0 + * Obtain specified file status information + * @param fd: File handle id, obtained through the Open interface + * @param info[out]: Information about the file system + * The stat structure is the same as the stat used in the POSIX interface + * @return Successfully returned 0 */ virtual int Fstat(int fd, struct stat* info) = 0; /** - * 将文件数据和元数据刷新到磁盘 - * @param fd:文件句柄id,通过Open接口获取 - * @return 成功返回0 + * Flush file data and metadata to disk + * @param fd: File handle id, obtained through the Open interface + * @return Successfully returned 0 */ virtual int Fsync(int fd) = 0; private: virtual int DoRename(const string& /* oldPath */, const string& /* newPath */, - unsigned int /* flags */) { return -1; } + unsigned int /* flags */) { + return -1; + } }; - class LocalFsFactory { public: /** - * 创建文件系统对象 - * 本地文件系统的工厂方法,根据传入的类型,创建相应的对象 - * 由该接口创建的文件系统会自动进行初始化 - * @param type:文件系统类型 - * @param deviceID: 设备的编号 - * @return 返回本地文件系统对象指针 + * Creating File System Objects + * The factory method of the local file system creates corresponding objects + * based on the type passed in The file system created by this interface + * will automatically initialize + * @param type: File system type + * @param deviceID: Device number + * @return returns the local file system object pointer */ - static std::shared_ptr CreateFs(FileSystemType type, - const std::string& deviceID); + static std::shared_ptr CreateFs( + FileSystemType type, const std::string& deviceID); }; } // namespace fs diff --git a/src/kvstorageclient/etcd_client.h b/src/kvstorageclient/etcd_client.h index 16aec44e6a..b9c2266d83 100644 --- a/src/kvstorageclient/etcd_client.h +++ b/src/kvstorageclient/etcd_client.h @@ -24,9 +24,10 @@ #define SRC_KVSTORAGECLIENT_ETCD_CLIENT_H_ #include + #include -#include #include +#include namespace curve { namespace kvstorage { @@ -43,7 +44,7 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int Put(const std::string &key, const std::string &value) = 0; + virtual int Put(const std::string& key, const std::string& value) = 0; /** * @brief PutRewithRevision store key-value @@ -54,8 +55,9 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int PutRewithRevision(const std::string &key, - const std::string &value, int64_t *revision) = 0; + virtual int PutRewithRevision(const std::string& key, + const std::string& value, + int64_t* revision) = 0; /** * @brief Get Get the value of the specified key @@ -65,7 +67,7 @@ class KVStorageClient { * * @return error code */ - virtual int Get(const std::string &key, std::string *out) = 0; + virtual int Get(const std::string& key, std::string* out) = 0; /** * @brief List Get all the values ​​between [startKey, endKey) @@ -76,15 +78,16 @@ class KVStorageClient { * * @return error code */ - virtual int List(const std::string &startKey, const std::string &endKey, - std::vector *values) = 0; + virtual int List(const std::string& startKey, const std::string& endKey, + std::vector* values) = 0; /** * @brief List all the key and values between [startKey, endKey) * * @param[in] startKey * @param[in] endKey - * @param[out] out store key/value pairs that key is between [startKey, endKey) + * @param[out] out store key/value pairs that key is between [startKey, + * endKey) * * @return error code */ @@ -98,7 +101,7 @@ class KVStorageClient { * * @return error code */ - virtual int Delete(const std::string &key) = 0; + virtual int Delete(const std::string& key) = 0; /** * @brief DeleteRewithRevision Delete the value of the specified key @@ -108,17 +111,18 @@ class KVStorageClient { * * @return error code */ - virtual int DeleteRewithRevision( - const std::string &key, int64_t *revision) = 0; + virtual int DeleteRewithRevision(const std::string& key, + int64_t* revision) = 0; /* - * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., currently 2 and 3 operations are supported //NOLINT - * - * @param[in] ops Operation set - * - * @return error code - */ - virtual int TxnN(const std::vector &ops) = 0; + * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., + * currently 2 and 3 operations are supported //NOLINT + * + * @param[in] ops Operation set + * + * @return error code + */ + virtual int TxnN(const std::vector& ops) = 0; /** * @brief CompareAndSwap Transaction, to achieve CAS @@ -129,17 +133,15 @@ class KVStorageClient { * * @return error code */ - virtual int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) = 0; + virtual int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) = 0; }; // encapsulate the c header file of etcd generated by go compilation class EtcdClientImp : public KVStorageClient { public: EtcdClientImp() {} - ~EtcdClientImp() { - CloseClient(); - } + ~EtcdClientImp() { CloseClient(); } /** * @brief Init init the etcdclient, a global var in go @@ -154,30 +156,30 @@ class EtcdClientImp : public KVStorageClient { void CloseClient(); - int Put(const std::string &key, const std::string &value) override; + int Put(const std::string& key, const std::string& value) override; - int PutRewithRevision(const std::string &key, const std::string &value, - int64_t *revision) override; + int PutRewithRevision(const std::string& key, const std::string& value, + int64_t* revision) override; - int Get(const std::string &key, std::string *out) override; + int Get(const std::string& key, std::string* out) override; - int List(const std::string &startKey, - const std::string &endKey, std::vector *values) override; + int List(const std::string& startKey, const std::string& endKey, + std::vector* values) override; int List(const std::string& startKey, const std::string& endKey, - std::vector >* out) override; + std::vector>* out) override; - int Delete(const std::string &key) override; + int Delete(const std::string& key) override; - int DeleteRewithRevision( - const std::string &key, int64_t *revision) override; + int DeleteRewithRevision(const std::string& key, + int64_t* revision) override; - int TxnN(const std::vector &ops) override; + int TxnN(const std::vector& ops) override; - int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) override; + int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) override; - virtual int GetCurrentRevision(int64_t *revision); + virtual int GetCurrentRevision(int64_t* revision); /** * @brief ListWithLimitAndRevision @@ -191,9 +193,11 @@ class EtcdClientImp : public KVStorageClient { * @param[out] values the value vector of all the key-value pairs * @param[out] lastKey the last key of the vector */ - virtual int ListWithLimitAndRevision(const std::string &startKey, - const std::string &endKey, int64_t limit, int64_t revision, - std::vector *values, std::string *lastKey); + virtual int ListWithLimitAndRevision(const std::string& startKey, + const std::string& endKey, + int64_t limit, int64_t revision, + std::vector* values, + std::string* lastKey); /** * @brief CampaignLeader Leader campaign through etcd, return directly if @@ -209,14 +213,14 @@ class EtcdClientImp : public KVStorageClient { * leader when the session expired after * client offline. * @param[in] electionTimeoutMs the timeout,0 will block always - * @param[out] leaderOid leader的objectId,recorded in objectManager + * @param[out] leaderOid leader's objectId,recorded in objectManager * * @return EtcdErrCode::EtcdCampaignLeaderSuccess success,others fail */ - virtual int CampaignLeader( - const std::string &pfx, const std::string &leaderName, - uint32_t sessionInterSec, uint32_t electionTimeoutMs, - uint64_t *leaderOid); + virtual int CampaignLeader(const std::string& pfx, + const std::string& leaderName, + uint32_t sessionInterSec, + uint32_t electionTimeoutMs, uint64_t* leaderOid); /** * @brief LeaderObserve @@ -228,8 +232,8 @@ class EtcdClientImp : public KVStorageClient { * * @return if returned, the session between mds and etcd expired */ - virtual int LeaderObserve( - uint64_t leaderOid, const std::string &leaderName); + virtual int LeaderObserve(uint64_t leaderOid, + const std::string& leaderName); /** * @brief LeaderResign the leader resigns initiatively, the other peers @@ -241,7 +245,7 @@ class EtcdClientImp : public KVStorageClient { * @return EtcdErrCode::EtcdLeaderResiginSuccess resign seccess * EtcdErrCode::EtcdLeaderResiginErr resign fail */ - virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); + virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); // for test void SetTimeout(int time); diff --git a/src/leader_election/leader_election.cpp b/src/leader_election/leader_election.cpp index 76884e0b9c..de2a86c743 100644 --- a/src/leader_election/leader_election.cpp +++ b/src/leader_election/leader_election.cpp @@ -20,11 +20,14 @@ * Author: lixiaocui1 */ +#include "src/leader_election/leader_election.h" + #include -#include -#include //NOLINT + #include -#include "src/leader_election/leader_election.h" +#include +#include //NOLINT + #include "src/common/concurrent/concurrent.h" using ::curve::common::Thread; @@ -32,23 +35,21 @@ using ::curve::common::Thread; namespace curve { namespace election { int LeaderElection::CampaignLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start campaign leader prefix: " - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start campaign leader prefix: " << realPrefix_; int resCode = opt_.etcdCli->CampaignLeader( - realPrefix_, - opt_.leaderUniqueName, - opt_.sessionInterSec, - opt_.electionTimeoutMs, - &leaderOid_); + realPrefix_, opt_.leaderUniqueName, opt_.sessionInterSec, + opt_.electionTimeoutMs, &leaderOid_); if (resCode == EtcdErrCode::EtcdCampaignLeaderSuccess) { - LOG(INFO) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " success"; + LOG(INFO) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ << " success"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " err: " << resCode; + LOG(WARNING) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ + << " err: " << resCode; return -1; } @@ -61,28 +62,29 @@ int LeaderElection::LeaderResign() { int res = opt_.etcdCli->LeaderResign(leaderOid_, 1000 * opt_.sessionInterSec); if (EtcdErrCode::EtcdLeaderResiginSuccess == res) { - LOG(INFO) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " ok"; + LOG(INFO) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " ok"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " err: " << res; + LOG(WARNING) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " err: " << res; return -1; } int LeaderElection::ObserveLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start observe for prefix:" - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start observe for prefix:" << realPrefix_; int resCode = opt_.etcdCli->LeaderObserve(leaderOid_, opt_.leaderUniqueName); - LOG(ERROR) << opt_.leaderUniqueName << " leader observe for prefix:" - << realPrefix_ << " occur error, errcode: " << resCode; + LOG(ERROR) << opt_.leaderUniqueName + << " leader observe for prefix:" << realPrefix_ + << " occur error, errcode: " << resCode; // for test fiu_return_on("src/mds/leaderElection/observeLeader", -1); - // 退出当前进程 + // Exit the current process LOG(INFO) << "mds is existing due to the error of leader observation"; raise(SIGTERM); diff --git a/src/leader_election/leader_election.h b/src/leader_election/leader_election.h index 70a28722ec..2188950cf7 100644 --- a/src/leader_election/leader_election.h +++ b/src/leader_election/leader_election.h @@ -24,32 +24,33 @@ #define SRC_LEADER_ELECTION_LEADER_ELECTION_H_ #include + #include #include -#include "src/kvstorageclient/etcd_client.h" #include "src/common/namespace_define.h" +#include "src/kvstorageclient/etcd_client.h" namespace curve { namespace election { -using ::curve::kvstorage::EtcdClientImp; using ::curve::common::LEADERCAMPAIGNNPFX; +using ::curve::kvstorage::EtcdClientImp; struct LeaderElectionOptions { - // etcd客户端 + // etcd client std::shared_ptr etcdCli; - // 带ttl的session,ttl超时时间内 + // session with ttl, within ttl timeout uint32_t sessionInterSec; - // 竞选leader的超时时间 + // Overtime for running for leader uint32_t electionTimeoutMs; - // leader名称,建议使用ip+port以示区分 + // leader name, it is recommended to use ip+port for differentiation std::string leaderUniqueName; - // 需要竞选的key + // key that need to be contested std::string campaginPrefix; }; @@ -61,33 +62,35 @@ class LeaderElection { } /** - * @brief CampaignLeader 竞选leader + * @brief CampaignLeader * - * @return 0表示竞选成功 -1表示竞选失败 + * @return 0 indicates a successful election, -1 indicates a failed election */ int CampaignLeader(); /** - * @brief StartObserverLeader 启动leader节点监测线程 + * @brief StartObserverLeader starts the leader node monitoring thread */ void StartObserverLeader(); /** - * @brief LeaderResign leader主动卸任leader,卸任成功后其他节点可以竞选leader + * @brief LeaderResign Leader proactively resigns from its leadership + * position. After successful resignation, other nodes can compete to become + * the new leader */ int LeaderResign(); /** - * @brief 返回leader name + * @brief returns the leader name */ - const std::string& GetLeaderName() { - return opt_.leaderUniqueName; - } + const std::string& GetLeaderName() { return opt_.leaderUniqueName; } public: /** - * @brief ObserveLeader 监测在etcd中创建的leader节点,正常情况下一直block, - * 退出表示leader change或者从client端角度看etcd异常,进程退出 + * @brief Monitor the leader node created in etcd. Under normal + * circumstances, this function continuously blocks. Exiting indicates a + * leader change or, from the client's perspective, an abnormality in etcd, + * which leads to process termination */ int ObserveLeader(); @@ -95,14 +98,13 @@ class LeaderElection { // option LeaderElectionOptions opt_; - // realPrefix_ = leader竞选公共prefix + 自定义prefix + // realPrefix_ = leader campaign public prefix + custom prefix std::string realPrefix_; - // 竞选leader之后记录在objectManager中的id号 + // The ID number recorded in the object manager after leader election uint64_t leaderOid_; }; } // namespace election } // namespace curve #endif // SRC_LEADER_ELECTION_LEADER_ELECTION_H_ - diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index 54f743c300..de7b0ae432 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -24,28 +24,27 @@ namespace curve { namespace mds { -StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, +StatusCode CleanCore::CleanSnapShotFile(const FileInfo& fileInfo, TaskProgress* progress) { if (fileInfo.segmentsize() == 0) { LOG(ERROR) << "cleanSnapShot File Error, segmentsize = 0"; return StatusCode::KInternalError; } - uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); + uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint32_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; StoreStatus storeRet = storage_->GetSegment(fileInfo.parentid(), - i * segmentSize, - &segment); + i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "cleanSnapShot File Error: " - << "GetSegment Error, inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << fileInfo.seqnum(); + << "GetSegment Error, inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << fileInfo.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } @@ -54,40 +53,40 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, LogicalPoolID logicalPoolID = segment.logicalpoolid(); uint32_t chunkNum = segment.chunks_size(); for (uint32_t j = 0; j != chunkNum; j++) { - // 删除快照时如果chunk不存在快照,则需要修改chunk的correctedSn - // 防止删除快照后,后续的写触发chunk的快照 - // correctSn为创建快照后文件的版本号,也就是快照版本号+1 + // When deleting a snapshot, if the chunk does not have a snapshot, + // the correctedSn of the chunk needs to be modified Prevent + // subsequent writes from triggering Chunk snapshots after deleting + // snapshots CorrectSn is the version number of the file after + // creating the snapshot, which is the snapshot version number+1 SeqNum correctSn = fileInfo.seqnum() + 1; int ret = copysetClient_->DeleteChunkSnapshotOrCorrectSn( - logicalPoolID, - segment.chunks()[j].copysetid(), - segment.chunks()[j].chunkid(), - correctSn); + logicalPoolID, segment.chunks()[j].copysetid(), + segment.chunks()[j].chunkid(), correctSn); if (ret != 0) { LOG(ERROR) << "CleanSnapShotFile Error: " - << "DeleteChunkSnapshotOrCorrectSn Error" - << ", ret = " << ret - << ", inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", correctSn = " << correctSn; + << "DeleteChunkSnapshotOrCorrectSn Error" + << ", ret = " << ret + << ", inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", correctSn = " << correctSn; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } } - progress->SetProgress(100 * (i+1) / segmentNum); + progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteSnapshotFile(fileInfo.parentid(), - fileInfo.filename()); + StoreStatus ret = + storage_->DeleteSnapshotFile(fileInfo.parentid(), fileInfo.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete snapshotfile error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } else { LOG(INFO) << "inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", seq = " << fileInfo.seqnum() << ", deleted"; + << ", filename = " << fileInfo.filename() + << ", seq = " << fileInfo.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -95,27 +94,27 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, return StatusCode::kOK; } -StatusCode CleanCore::CleanFile(const FileInfo & commonFile, +StatusCode CleanCore::CleanFile(const FileInfo& commonFile, TaskProgress* progress) { if (commonFile.segmentsize() == 0) { LOG(ERROR) << "Clean commonFile File Error, segmentsize = 0"; return StatusCode::KInternalError; } - int segmentNum = commonFile.length() / commonFile.segmentsize(); + int segmentNum = commonFile.length() / commonFile.segmentsize(); uint64_t segmentSize = commonFile.segmentsize(); for (int i = 0; i != segmentNum; i++) { // load segment PageFileSegment segment; - StoreStatus storeRet = storage_->GetSegment(commonFile.id(), - i * segmentSize, &segment); + StoreStatus storeRet = + storage_->GetSegment(commonFile.id(), i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "GetSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize; + << "GetSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } @@ -123,8 +122,7 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, int ret = DeleteChunksInSegment(segment, commonFile.seqnum()); if (ret != 0) { LOG(ERROR) << "Clean common File Error: " - << ", ret = " << ret - << ", inodeid = " << commonFile.id() + << ", ret = " << ret << ", inodeid = " << commonFile.id() << ", filename = " << commonFile.filename() << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); @@ -133,33 +131,33 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, // delete segment int64_t revision; - storeRet = storage_->DeleteSegment( - commonFile.id(), i * segmentSize, &revision); + storeRet = storage_->DeleteSegment(commonFile.id(), i * segmentSize, + &revision); if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "DeleteSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << commonFile.seqnum(); + << "DeleteSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } allocStatistic_->DeAllocSpace(segment.logicalpoolid(), - segment.segmentsize(), revision); + segment.segmentsize(), revision); progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteFile(commonFile.parentid(), - commonFile.filename()); + StoreStatus ret = + storage_->DeleteFile(commonFile.parentid(), commonFile.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete common file error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } else { LOG(INFO) << "inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", seq = " << commonFile.seqnum() << ", deleted"; + << ", filename = " << commonFile.filename() + << ", seq = " << commonFile.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -223,10 +221,8 @@ int CleanCore::DeleteChunksInSegment(const PageFileSegment& segment, const LogicalPoolID logicalPoolId = segment.logicalpoolid(); for (int i = 0; i < segment.chunks_size(); ++i) { int ret = copysetClient_->DeleteChunk( - logicalPoolId, - segment.chunks()[i].copysetid(), - segment.chunks()[i].chunkid(), - seq); + logicalPoolId, segment.chunks()[i].copysetid(), + segment.chunks()[i].chunkid(), seq); if (ret != 0) { LOG(ERROR) << "DeleteChunk failed, ret = " << ret diff --git a/src/mds/nameserver2/clean_core.h b/src/mds/nameserver2/clean_core.h index 0cb4f3f8ab..8011d10ee8 100644 --- a/src/mds/nameserver2/clean_core.h +++ b/src/mds/nameserver2/clean_core.h @@ -25,12 +25,13 @@ #include #include -#include "src/mds/nameserver2/namespace_storage.h" + +#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/nameserver2/task_progress.h" -#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/topology/topology.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" using ::curve::mds::chunkserverclient::CopysetClient; using ::curve::mds::topology::Topology; @@ -41,30 +42,32 @@ namespace mds { class CleanCore { public: CleanCore(std::shared_ptr storage, - std::shared_ptr copysetClient, - std::shared_ptr allocStatistic) + std::shared_ptr copysetClient, + std::shared_ptr allocStatistic) : storage_(storage), copysetClient_(copysetClient), allocStatistic_(allocStatistic) {} /** - * @brief 删除快照文件,更新task状态 - * @param snapShotFile: 需要清理的snapshot文件 - * @param progress: CleanSnapShotFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 + * @brief Delete the snapshot file and update the task status + * @param snapShotFile: The snapshot file that needs to be cleaned + * @param progress: The CleanSnapShotFile interface is a relatively + * asynchronous task that takes a long time Here, progress is transmitted + * for tracking and feedback */ - StatusCode CleanSnapShotFile(const FileInfo & snapShotFile, + StatusCode CleanSnapShotFile(const FileInfo& snapShotFile, TaskProgress* progress); /** - * @brief 删除普通文件,更新task状态 - * @param commonFile: 需要清理的普通文件 - * @param progress: CleanFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 - * @return 是否执行成功,成功返回StatusCode::kOK + * @brief Delete regular files and update task status + * @param commonFile: A regular file that needs to be cleaned + * @param progress: The CleanFile interface is a relatively asynchronous + * task that takes a long time Here, progress is transmitted for tracking + * and feedback + * @return whether the execution was successful, and if successful, return + * StatusCode::kOK */ - StatusCode CleanFile(const FileInfo & commonFile, - TaskProgress* progress); + StatusCode CleanFile(const FileInfo& commonFile, TaskProgress* progress); /** * @brief clean discarded segment and chunks @@ -85,4 +88,4 @@ class CleanCore { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ diff --git a/src/mds/nameserver2/clean_manager.h b/src/mds/nameserver2/clean_manager.h index 86dbbd3474..223203952a 100644 --- a/src/mds/nameserver2/clean_manager.h +++ b/src/mds/nameserver2/clean_manager.h @@ -26,18 +26,19 @@ #include #include #include + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/clean_task_manager.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/namespace_storage.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_task_manager.h" +#include "src/mds/nameserver2/namespace_storage.h" using curve::common::DLock; using curve::common::DLockOpts; -namespace curve { +namespace curve { namespace mds { class CleanDiscardSegmentTask; @@ -45,8 +46,8 @@ class CleanDiscardSegmentTask; class CleanManagerInterface { public: virtual ~CleanManagerInterface() {} - virtual bool SubmitDeleteSnapShotFileJob(const FileInfo&, - std::shared_ptr entity) = 0; + virtual bool SubmitDeleteSnapShotFileJob( + const FileInfo&, std::shared_ptr entity) = 0; virtual std::shared_ptr GetTask(TaskIDType id) = 0; virtual bool SubmitDeleteCommonFileJob(const FileInfo&) = 0; @@ -56,24 +57,26 @@ class CleanManagerInterface { curve::common::CountDownEvent* counter) = 0; }; /** - * CleanManager 用于异步清理 删除快照对应的数据 - * 1. 接收在线的删除快照请求 - * 2. 线程池异步处理实际的chunk删除任务 + * CleanManager is used for asynchronous cleaning and deleting data + *corresponding to snapshots. + * 1. Receives online requests for snapshot deletion. + * 2. Asynchronously processes the actual chunk deletion tasks in a thread pool. **/ class CleanManager : public CleanManagerInterface { public: explicit CleanManager(std::shared_ptr core, - std::shared_ptr taskMgr, - std::shared_ptr storage); + std::shared_ptr taskMgr, + std::shared_ptr storage); bool Start(void); bool Stop(void); - bool SubmitDeleteSnapShotFileJob(const FileInfo &fileInfo, - std::shared_ptr entity) override; + bool SubmitDeleteSnapShotFileJob( + const FileInfo& fileInfo, + std::shared_ptr entity) override; - bool SubmitDeleteCommonFileJob(const FileInfo&fileInfo) override; + bool SubmitDeleteCommonFileJob(const FileInfo& fileInfo) override; bool SubmitCleanDiscardSegmentJob( const std::string& cleanSegmentKey, diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..c865ff6271 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -23,24 +23,26 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#include //NOLINT +#include //NOLINT + #include #include //NOLINT #include -#include //NOLINT -#include //NOLINT + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/task_progress.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" -#include "src/common/concurrent/dlock.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/task_progress.h" using curve::common::DLock; namespace curve { namespace mds { -typedef uint64_t TaskIDType; +typedef uint64_t TaskIDType; // default clean task retry times const uint32_t kDefaultTaskRetryTimes = 5; @@ -52,56 +54,40 @@ class Task { virtual void Run(void) = 0; std::function Closure() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } - TaskProgress GetTaskProgress(void) const { - return progress_; - } + TaskProgress GetTaskProgress(void) const { return progress_; } - void SetTaskProgress(TaskProgress progress) { - progress_ = progress; - } + void SetTaskProgress(TaskProgress progress) { progress_ = progress; } - TaskProgress* GetMutableTaskProgress(void) { - return &progress_; - } + TaskProgress* GetMutableTaskProgress(void) { return &progress_; } - void SetTaskID(TaskIDType taskID) { - taskID_ = taskID; - } + void SetTaskID(TaskIDType taskID) { taskID_ = taskID; } - TaskIDType GetTaskID(void) const { - return taskID_; - } + TaskIDType GetTaskID(void) const { return taskID_; } - void SetRetryTimes(uint32_t retry) { - retry_ = retry; - } + void SetRetryTimes(uint32_t retry) { retry_ = retry; } void Retry() { retry_--; progress_ = TaskProgress(); } - bool RetryTimesExceed() { - return retry_ == 0; - } + bool RetryTimesExceed() { return retry_ == 0; } protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; -class SnapShotCleanTask: public Task { +class SnapShotCleanTask : public Task { public: - SnapShotCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo, - std::shared_ptr entity = nullptr) { + SnapShotCleanTask( + TaskIDType taskID, std::shared_ptr core, FileInfo fileInfo, + std::shared_ptr entity = nullptr) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -110,29 +96,29 @@ class SnapShotCleanTask: public Task { SetRetryTimes(kDefaultTaskRetryTimes); } void Run(void) override { - StatusCode ret = cleanCore_->CleanSnapShotFile(fileInfo_, - GetMutableTaskProgress()); + StatusCode ret = + cleanCore_->CleanSnapShotFile(fileInfo_, GetMutableTaskProgress()); if (asyncEntity_ != nullptr) { brpc::ClosureGuard doneGuard(asyncEntity_->GetClosure()); brpc::Controller* cntl = static_cast(asyncEntity_->GetController()); - DeleteSnapShotResponse *response = - asyncEntity_->GetDeleteResponse(); - const DeleteSnapShotRequest *request = - asyncEntity_->GetDeleteRequest(); + DeleteSnapShotResponse* response = + asyncEntity_->GetDeleteResponse(); + const DeleteSnapShotRequest* request = + asyncEntity_->GetDeleteRequest(); response->set_statuscode(ret); if (ret != StatusCode::kOK) { LOG(ERROR) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile fail, filename = " - << request->filename() - << ", sequencenum = " << request->seq() - << ", statusCode = " << ret; + << ", CleanSnapShotFile fail, filename = " + << request->filename() + << ", sequencenum = " << request->seq() + << ", statusCode = " << ret; } else { LOG(INFO) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile ok, filename = " - << request->filename() - << ", sequencenum = " << request->seq(); + << ", CleanSnapShotFile ok, filename = " + << request->filename() + << ", sequencenum = " << request->seq(); } } return; @@ -144,10 +130,10 @@ class SnapShotCleanTask: public Task { std::shared_ptr asyncEntity_; }; -class CommonFileCleanTask: public Task { +class CommonFileCleanTask : public Task { public: CommonFileCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo) { + FileInfo fileInfo) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -211,4 +197,4 @@ class SegmentCleanTask : public Task { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ diff --git a/src/mds/nameserver2/clean_task_manager.cpp b/src/mds/nameserver2/clean_task_manager.cpp index 2a73ff87b9..3aadf6694c 100644 --- a/src/mds/nameserver2/clean_task_manager.cpp +++ b/src/mds/nameserver2/clean_task_manager.cpp @@ -19,16 +19,17 @@ * Created Date: Wednesday December 19th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/clean_task_manager.h" +#include +#include + namespace curve { namespace mds { CleanTaskManager::CleanTaskManager(std::shared_ptr channelPool, - int threadNum, int checkPeriod) - : channelPool_(channelPool) { + int threadNum, int checkPeriod) + : channelPool_(channelPool) { threadNum_ = threadNum; checkPeriod_ = checkPeriod; stopFlag_ = true; @@ -43,30 +44,29 @@ void CleanTaskManager::CheckCleanResult(void) { auto taskProgress = iter->second->GetTaskProgress(); if (taskProgress.GetStatus() == TaskStatus::SUCCESS) { LOG(INFO) << "going to remove task, taskID = " - << iter->second->GetTaskID(); + << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } else if (taskProgress.GetStatus() == TaskStatus::FAILED) { iter->second->Retry(); if (!iter->second->RetryTimesExceed()) { - LOG(WARNING) << "CleanTaskManager find Task Failed," - << " retry," - << " taskID = " - << iter->second->GetTaskID(); + LOG(WARNING) + << "CleanTaskManager find Task Failed," + << " retry," + << " taskID = " << iter->second->GetTaskID(); cleanWorkers_->Enqueue(iter->second->Closure()); } else { LOG(ERROR) << "CleanTaskManager find Task Failed," - << " retry times exceed," - << " going to remove task," - << " taskID = " - << iter->second->GetTaskID(); + << " retry times exceed," + << " going to remove task," + << " taskID = " << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } } ++iter; } - // clean task为空,清空channelPool + // Clean task is empty, clear channelPool if (cleanTasks_.empty() && notEmptyBefore) { LOG(INFO) << "All tasks completed, clear channel pool"; channelPool_->Clear(); @@ -81,7 +81,7 @@ bool CleanTaskManager::Start(void) { stopFlag_ = false; // start worker thread - cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); + cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); if (cleanWorkers_->Start(threadNum_) != 0) { LOG(ERROR) << "thread pool start error"; @@ -89,8 +89,8 @@ bool CleanTaskManager::Start(void) { } // start check thread - checkThread_ = new common::Thread(&CleanTaskManager::CheckCleanResult, - this); + checkThread_ = + new common::Thread(&CleanTaskManager::CheckCleanResult, this); LOG(INFO) << "TaskManger check thread started"; return true; } @@ -117,7 +117,7 @@ bool CleanTaskManager::PushTask(std::shared_ptr task) { common::LockGuard lck(mutex_); if (stopFlag_) { LOG(ERROR) << "task manager not started, taskID = " - << task->GetTaskID(); + << task->GetTaskID(); return false; } if (cleanTasks_.find(task->GetTaskID()) != cleanTasks_.end()) { @@ -137,7 +137,7 @@ std::shared_ptr CleanTaskManager::GetTask(TaskIDType id) { auto iter = cleanTasks_.begin(); if ((iter = cleanTasks_.find(id)) == cleanTasks_.end()) { - LOG(INFO) << "taskid = "<< id << ", not found"; + LOG(INFO) << "taskid = " << id << ", not found"; return nullptr; } else { return iter->second; diff --git a/src/mds/nameserver2/clean_task_manager.h b/src/mds/nameserver2/clean_task_manager.h index 9673a0b1c4..409b9df5b8 100644 --- a/src/mds/nameserver2/clean_task_manager.h +++ b/src/mds/nameserver2/clean_task_manager.h @@ -22,20 +22,21 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ -#include -#include //NOLINT -#include //NOLINT #include +#include //NOLINT +#include //NOLINT +#include + +#include "src/common/channel_pool.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/interruptible_sleeper.h" -#include "src/common/concurrent/concurrent.h" -#include "src/common/channel_pool.h" #include "src/mds/common/mds_define.h" #include "src/mds/nameserver2/clean_task.h" using ::curve::common::Atomic; -using ::curve::common::InterruptibleSleeper; using ::curve::common::ChannelPool; +using ::curve::common::InterruptibleSleeper; namespace curve { namespace mds { @@ -43,40 +44,40 @@ namespace mds { class CleanTaskManager { public: /** - * @brief 初始化TaskManager - * @param channelPool: 连接池 - * @param threadNum: worker线程的数量 - * @param checkPeriod: 周期性任务检查线程时间, ms + * @brief Initialize TaskManager + * @param channelPool: Connection Pool + * @param threadNum: Number of worker threads + * @param checkPeriod: Periodic task check thread time, ms */ explicit CleanTaskManager(std::shared_ptr channelPool, int threadNum = 10, int checkPeriod = 10000); - ~CleanTaskManager() { - Stop(); - } + ~CleanTaskManager() { Stop(); } /** - * @brief 启动worker线程池、启动检查线程 + * @brief: Start worker thread pool, start check thread * */ bool Start(void); /** - * @brief 停止worker线程池、启动检查线程 + * @brief: Stop worker thread pool, start check thread * */ bool Stop(void); /** - * @brief 向线程池推送task - * @param task: 对应的工作任务 - * @return 推送task是否成功,如已存在对应的任务,推送是吧 + * @brief Push task to thread pool + * @param task: corresponding work task + * @return: Is the task successfully pushed? If a corresponding task already + * exists, is it pushed */ bool PushTask(std::shared_ptr task); /** - * @brief 获取当前的task - * @param id: 对应任务的相关文件InodeID - * @return 返回对应task的shared_ptr 或者 不存在返回nullptr + * @brief Get the current task + * @param id: The relevant file InodeID of the corresponding task + * @return returns the shared_ptr of the corresponding task or return + * nullptr if it does not exist */ std::shared_ptr GetTask(TaskIDType id); @@ -85,20 +86,21 @@ class CleanTaskManager { private: int threadNum_; - ::curve::common::TaskThreadPool<> *cleanWorkers_; + ::curve::common::TaskThreadPool<>* cleanWorkers_; // for period check snapshot delete status std::unordered_map> cleanTasks_; common::Mutex mutex_; - common::Thread *checkThread_; + common::Thread* checkThread_; int checkPeriod_; Atomic stopFlag_; InterruptibleSleeper sleeper_; - // 连接池,和chunkserverClient共享,没有任务在执行时清空 + // Connection pool, shared with chunkserverClient, no tasks cleared during + // execution std::shared_ptr channelPool_; }; -} // namespace mds -} // namespace curve +} // namespace mds +} // namespace curve #endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index 2974ed06c8..021da6b359 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -22,28 +22,27 @@ #include "src/snapshotcloneserver/clone/clone_core.h" +#include #include #include #include -#include -#include "src/snapshotcloneserver/clone/clone_task.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/location_operator.h" #include "src/common/uuid.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/clone/clone_task.h" -using ::curve::common::UUIDGenerator; using ::curve::common::LocationOperator; using ::curve::common::NameLock; using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; namespace curve { namespace snapshotcloneserver { int CloneCoreImpl::Init() { int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::EXISTS) { + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "Mkdir fail, ret = " << ret << ", dirpath = " << cloneTempDir_; return kErrCodeServerInitFail; @@ -51,22 +50,20 @@ int CloneCoreImpl::Init() { return kErrCodeSuccess; } -int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *cloneInfo) { - // 查询数据库中是否有任务正在执行 +int CloneCoreImpl::CloneOrRecoverPre(const UUID& source, + const std::string& user, + const std::string& destination, + bool lazyFlag, CloneTaskType taskType, + std::string poolset, + CloneInfo* cloneInfo) { + // Check if there are tasks executing in the database std::vector cloneInfoList; metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); bool needJudgeFileExist = false; std::vector existCloneInfos; - for (auto &info : cloneInfoList) { + for (auto& info : cloneInfoList) { LOG(INFO) << "CloneOrRecoverPre find same clone task" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset << ", Exist CloneInfo : " << info; @@ -74,40 +71,42 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (taskType == CloneTaskType::kClone) { if (info.GetStatus() == CloneStatus::cloning || info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && + if ((info.GetUser() == user) && (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone + // Treat as the same clone *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || info.GetStatus() == CloneStatus::error || info.GetStatus() == CloneStatus::metaInstalled) { - // 可能已经删除,需要再判断文件存不存在, - // 在已删除的条件下,允许再克隆 + // It may have been deleted, and it is necessary to determine + // whether the file exists again, Allowing further cloning under + // deleted conditions existCloneInfos.push_back(info); needJudgeFileExist = true; } else { - // 此时,有个相同的克隆任务正在删除中, 返回文件被占用 + // At this point, the same clone task is being deleted and the + // return file is occupied return kErrCodeFileExist; } } else { // is recover if (info.GetStatus() == CloneStatus::recovering || info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && + if ((info.GetUser() == user) && (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone,返回任务已存在 + // Treat as the same clone, return task already exists *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || @@ -115,13 +114,15 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, info.GetStatus() == CloneStatus::metaInstalled) { // nothing } else { - // 此时,有个相同的任务正在删除中, 返回文件被占用 + // At this point, the same task is being deleted and the return + // file is occupied return kErrCodeFileExist; } } } - // 目标文件已存在不能clone, 不存在不能recover + // The target file already exists and cannot be cloned or recovered if it + // does not exist FInfo destFInfo; int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); switch (ret) { @@ -129,7 +130,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (CloneTaskType::kClone == taskType) { if (needJudgeFileExist) { bool match = false; - // 找出inodeid匹配的cloneInfo + // Find the cloneInfo that matches the inodeid for (auto& existInfo : existCloneInfos) { if (destFInfo.id == existInfo.GetDestId()) { *cloneInfo = existInfo; @@ -140,27 +141,29 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (match) { return kErrCodeTaskExist; } else { - // 如果没找到,那么dest file都不是这些clone任务创建的, - // 意味着文件重名了 - LOG(ERROR) << "Clone dest file exist, " - << "but task not match! " - << "source = " << source - << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset; + // If not found, then none of the dest files were + // created by these clone tasks, It means the file has a + // duplicate name + LOG(ERROR) + << "Clone dest file exist, " + << "but task not match! " + << "source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; return kErrCodeFileExist; } } else { - // 没有对应的cloneInfo,意味着文件重名了 + // There is no corresponding cloneInfo, which means the file + // has a duplicate name LOG(ERROR) << "Clone dest file must not exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; return kErrCodeFileExist; } } else if (CloneTaskType::kRecover == taskType) { - // recover任务,卷的poolset信息不变 + // The recover task keeps the poolset information of the volume + // unchanged poolset = destFInfo.poolset; } else { assert(false); @@ -169,21 +172,19 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, case -LIBCURVE_ERROR::NOTEXIST: if (CloneTaskType::kRecover == taskType) { LOG(ERROR) << "Recover dest file must exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination; return kErrCodeFileNotExist; } break; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; return kErrCodeInternalError; } - // 是否为快照 + // Is it a snapshot SnapshotInfo snapInfo; CloneFileType fileType; @@ -204,8 +205,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, } if (snapInfo.GetUser() != user) { LOG(ERROR) << "Clone snapshot by invalid user" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset << ", snapshot.user = " << snapInfo.GetUser(); @@ -225,15 +225,13 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, case -LIBCURVE_ERROR::NOTEXIST: case -LIBCURVE_ERROR::PARAM_ERROR: LOG(ERROR) << "Clone source file not exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; return kErrCodeFileNotExist; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; return kErrCodeInternalError; } @@ -245,27 +243,26 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeFileStatusInvalid; } - // TODO(镜像克隆的用户认证待完善) + // TODO (User authentication for mirror cloning to be improved) } UUID uuid = UUIDGenerator().GenerateUUID(); - CloneInfo info(uuid, user, taskType, - source, destination, poolset, fileType, lazyFlag); + CloneInfo info(uuid, user, taskType, source, destination, poolset, fileType, + lazyFlag); if (CloneTaskType::kClone == taskType) { info.SetStatus(CloneStatus::cloning); } else { info.SetStatus(CloneStatus::recovering); } - // 这里必须先AddCloneInfo, 因为如果先SetCloneFileStatus,然后AddCloneInfo, - // 如果AddCloneInfo失败又意外重启,将没人知道SetCloneFileStatus调用过,造成 - // 镜像无法删除 + // Here, you must first AddCloneInfo because if you first set + // CloneFileStatus and then AddCloneInfo, If AddCloneInfo fails and + // unexpectedly restarts, no one will know that SetCloneFileStatus has been + // called, causing Mirror cannot be deleted ret = metaStore_->AddCloneInfo(info); if (ret < 0) { LOG(ERROR) << "AddCloneInfo error" - << ", ret = " << ret - << ", taskId = " << uuid - << ", user = " << user - << ", source = " << source + << ", ret = " << ret << ", taskId = " << uuid + << ", user = " << user << ", source = " << source << ", destination = " << destination << ", poolset = " << poolset; if (CloneFileType::kSnapshot == fileType) { @@ -275,20 +272,19 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, } if (CloneFileType::kFile == fileType) { NameLockGuard lockGuard(cloneRef_->GetLock(), source); - ret = client_->SetCloneFileStatus(source, - FileStatus::BeingCloned, - mdsRootUser_); + ret = client_->SetCloneFileStatus(source, FileStatus::BeingCloned, + mdsRootUser_); if (ret < 0) { - // 这里不处理SetCloneFileStatus的错误, - // 因为SetCloneFileStatus失败的所有结果都是可接受的, - // 相比于处理SetCloneFileStatus失败的情况更直接: - // 比如调用DeleteCloneInfo删除任务, - // 一旦DeleteCloneInfo失败,给用户返回error之后, - // 重启服务将造成Clone继续进行, - // 跟用户结果返回的结果不一致,造成用户的困惑 + // The SetCloneFileStatus error is not handled here, + // Because all results of SetCloneFileStatus failure are acceptable, + // Compared to handling SetCloneFileStatus failure, it is more + // direct: For example, calling DeleteCloneInfo to delete a task, + // Once DeleteCloneInfo fails and an error is returned to the user, + // Restarting the service will cause Clone to continue, + // Inconsistency with the results returned by the user, causing + // confusion for the user LOG(WARNING) << "SetCloneFileStatus encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; } cloneRef_->IncrementRef(source); @@ -298,10 +294,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeSuccess; } -int CloneCoreImpl::FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { +int CloneCoreImpl::FlattenPre(const std::string& user, const TaskIdType& taskId, + CloneInfo* cloneInfo) { (void)user; int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { @@ -311,7 +305,8 @@ int CloneCoreImpl::FlattenPre( case CloneStatus::done: case CloneStatus::cloning: case CloneStatus::recovering: { - // 已经完成的或正在进行中返回task exist, 表示不需要处理 + // A task exists is returned for completed or in progress, + // indicating that it does not need to be processed return kErrCodeTaskExist; } case CloneStatus::metaInstalled: { @@ -362,7 +357,8 @@ void CloneCoreImpl::HandleCloneOrRecoverTask( } } - // 在kCreateCloneMeta以后的步骤还需更新CloneChunkInfo信息中的chunkIdInfo + // In the steps after kCreateCloneMeta, it is necessary to update the + // chunkIdInfo in the CloneChunkInfo information if (NeedUpdateCloneMeta(task)) { ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); if (ret < 0) { @@ -451,9 +447,8 @@ void CloneCoreImpl::HandleCloneOrRecoverTask( } int CloneCoreImpl::BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { + std::shared_ptr task, FInfo* newFileInfo, + CloneSegmentMap* segInfos) { segInfos->clear(); UUID source = task->GetCloneInfo().GetSrc(); @@ -477,8 +472,8 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( return kErrCodeInternalError; } newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : snapInfo.GetPoolset(); + ? task->GetCloneInfo().GetPoolset() + : snapInfo.GetPoolset(); if (IsRecover(task)) { FInfo fInfo; @@ -504,34 +499,33 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 从快照恢复的destinationId为目标文件的id + // The destinationId recovered from the snapshot is the ID of the target + // file task->GetCloneInfo().SetDestId(fInfo.id); - // 从快照恢复seqnum+1 + // Restore seqnum+1 from snapshot newFileInfo->seqnum = fInfo.seqnum + 1; } else { newFileInfo->seqnum = kInitializeSeqNum; } newFileInfo->owner = task->GetCloneInfo().GetUser(); - ChunkIndexDataName indexName(snapInfo.GetFileName(), - snapInfo.GetSeqNum()); + ChunkIndexDataName indexName(snapInfo.GetFileName(), snapInfo.GetSeqNum()); ChunkIndexData snapMeta; ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); if (ret < 0) { - LOG(ERROR) << "GetChunkIndexData error" - << ", fileName = " << snapInfo.GetFileName() - << ", seqNum = " << snapInfo.GetSeqNum() - << ", taskid = " << task->GetTaskId(); - return ret; + LOG(ERROR) << "GetChunkIndexData error" + << ", fileName = " << snapInfo.GetFileName() + << ", seqNum = " << snapInfo.GetSeqNum() + << ", taskid = " << task->GetTaskId(); + return ret; } uint64_t segmentSize = snapInfo.GetSegmentSize(); uint64_t chunkSize = snapInfo.GetChunkSize(); uint64_t chunkPerSegment = segmentSize / chunkSize; - std::vector chunkIndexs = - snapMeta.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexs) { + std::vector chunkIndexs = snapMeta.GetAllChunkIndex(); + for (auto& chunkIndex : chunkIndexs) { ChunkDataName chunkDataName; snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); uint64_t segmentIndex = chunkIndex / chunkPerSegment; @@ -556,10 +550,9 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( return kErrCodeSuccess; } -int CloneCoreImpl::BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::BuildFileInfoFromFile(std::shared_ptr task, + FInfo* newFileInfo, + CloneSegmentMap* segInfos) { segInfos->clear(); UUID source = task->GetCloneInfo().GetSrc(); std::string user = task->GetCloneInfo().GetUser(); @@ -568,13 +561,11 @@ int CloneCoreImpl::BuildFileInfoFromFile( int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // GetOrAllocateSegment依赖fullPathName + // GetOrAllocateSegment depends on fullPathName fInfo.fullPathName = source; newFileInfo->chunksize = fInfo.chunksize; @@ -591,8 +582,8 @@ int CloneCoreImpl::BuildFileInfoFromFile( return kErrCodeInternalError; } newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : fInfo.poolset; + ? task->GetCloneInfo().GetPoolset() + : fInfo.poolset; uint64_t fileLength = fInfo.length; uint64_t segmentSize = fInfo.segmentsize; @@ -603,33 +594,31 @@ int CloneCoreImpl::BuildFileInfoFromFile( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - if (fileLength%segmentSize != 0) { + if (fileLength % segmentSize != 0) { LOG(ERROR) << "GetFileInfo return invalid fileInfo, " << "fileLength is not align to SegmentSize" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - for (uint64_t i = 0; i< fileLength/segmentSize; i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfoOut; - ret = client_->GetOrAllocateSegmentInfo( - false, offset, &fInfo, mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { + ret = client_->GetOrAllocateSegmentInfo(false, offset, &fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", ret = " << ret - << ", filename = " << source - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "false" + << ", ret = " << ret << ", filename = " << source + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "false" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } if (segInfoOut.chunkvec.size() != 0) { CloneSegmentInfo segInfo; for (std::vector::size_type j = 0; - j < segInfoOut.chunkvec.size(); j++) { + j < segInfoOut.chunkvec.size(); j++) { CloneChunkInfo info; info.location = std::to_string(offset + j * chunkSize); info.seqNum = kInitializeSeqNum; @@ -642,10 +631,8 @@ int CloneCoreImpl::BuildFileInfoFromFile( return kErrCodeSuccess; } - -int CloneCoreImpl::CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::CreateCloneFile(std::shared_ptr task, + const FInfo& fInfo) { std::string fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = fInfo.owner; @@ -657,47 +644,43 @@ int CloneCoreImpl::CreateCloneFile( const auto& poolset = fInfo.poolset; std::string source = ""; - // 只有从文件克隆才带clone source + // Clone source is only available when cloning from a file if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) { source = task->GetCloneInfo().GetSrc(); } FInfo fInfoOut; - int ret = client_->CreateCloneFile(source, fileName, - mdsRootUser_, fileLength, seqNum, chunkSize, + int ret = client_->CreateCloneFile( + source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, stripeUnit, stripeCount, poolset, &fInfoOut); if (ret == LIBCURVE_ERROR::OK) { // nothing } else if (ret == -LIBCURVE_ERROR::EXISTS) { - ret = client_->GetFileInfo(fileName, - mdsRootUser_, &fInfoOut); + ret = client_->GetFileInfo(fileName, mdsRootUser_, &fInfoOut); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", fileName = " << fileName + << ", ret = " << ret << ", fileName = " << fileName << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } } else { LOG(ERROR) << "CreateCloneFile file" - << ", ret = " << ret - << ", destination = " << fileName - << ", user = " << user - << ", fileLength = " << fileLength - << ", seqNum = " << seqNum - << ", chunkSize = " << chunkSize + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user << ", fileLength = " << fileLength + << ", seqNum = " << seqNum << ", chunkSize = " << chunkSize << ", return fileId = " << fInfoOut.id << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } task->GetCloneInfo().SetOriginId(fInfoOut.id); if (IsClone(task)) { - // 克隆情况下destinationId = originId; + // In the case of cloning, destinationId = originId; task->GetCloneInfo().SetDestId(fInfoOut.id); } task->GetCloneInfo().SetTime(fInfoOut.ctime); - // 如果是lazy&非快照,先不要createCloneMeta,createCloneChunk - // 等后面stage2阶段recoveryChunk之前去createCloneMeta,createCloneChunk + // If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk + // yet Wait until stage 2 recoveryChunk, go to createCloneMeta, + // createCloneChunk if (IsLazy(task) && IsFile(task)) { task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); } else { @@ -707,17 +690,14 @@ int CloneCoreImpl::CreateCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateCloneMeta(std::shared_ptr task, + FInfo* fInfo, CloneSegmentMap* segInfos) { int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); if (ret < 0) { return ret; @@ -728,29 +708,28 @@ int CloneCoreImpl::CreateCloneMeta( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateCloneChunk(std::shared_ptr task, + const FInfo& fInfo, + CloneSegmentMap* segInfos) { int ret = kErrCodeSuccess; uint32_t chunkSize = fInfo.chunksize; uint32_t correctSn = 0; - // 克隆时correctSn为0,恢复时为新产生的文件版本 + // When cloning, correctSn is 0, and when restoring, it is the newly + // generated file version if (IsClone(task)) { correctSn = 0; } else { correctSn = fInfo.seqnum; } auto tracker = std::make_shared(); - for (auto & cloneSegmentInfo : *segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { + for (auto& cloneSegmentInfo : *segInfos) { + for (auto& cloneChunkInfo : cloneSegmentInfo.second) { std::string location; if (IsSnapshot(task)) { location = LocationOperator::GenerateS3Location( @@ -790,13 +769,13 @@ int CloneCoreImpl::CreateCloneChunk( } } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end do { tracker->WaitSome(1); std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); @@ -813,8 +792,7 @@ int CloneCoreImpl::CreateCloneChunk( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; @@ -824,33 +802,26 @@ int CloneCoreImpl::StartAsyncCreateCloneChunk( std::shared_ptr task, std::shared_ptr tracker, std::shared_ptr context) { - CreateCloneChunkClosure *cb = - new CreateCloneChunkClosure(tracker, context); + CreateCloneChunkClosure* cb = new CreateCloneChunkClosure(tracker, context); tracker->AddOneTrace(); LOG(INFO) << "Doing CreateCloneChunk" << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); - int ret = client_->CreateCloneChunk(context->location, - context->cidInfo, - context->sn, - context->csn, - context->chunkSize, - cb); + int ret = client_->CreateCloneChunk(context->location, context->cidInfo, + context->sn, context->csn, + context->chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "CreateCloneChunk fail" - << ", ret = " << ret - << ", location = " << context->location + << ", ret = " << ret << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); return ret; } @@ -860,7 +831,7 @@ int CloneCoreImpl::StartAsyncCreateCloneChunk( int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, std::shared_ptr tracker, - const std::list &results) { + const std::list& results) { int ret = kErrCodeSuccess; for (auto context : results) { if (context->retCode == -LIBCURVE_ERROR::EXISTS) { @@ -878,11 +849,9 @@ int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - ret = StartAsyncCreateCloneChunk( - task, tracker, context); + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + ret = StartAsyncCreateCloneChunk(task, tracker, context); if (ret < 0) { return kErrCodeInternalError; } @@ -897,45 +866,37 @@ int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( return ret; } -int CloneCoreImpl::CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::CompleteCloneMeta(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { (void)fInfo; (void)segInfos; - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = task->GetCloneInfo().GetUser(); int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "CompleteCloneMeta fail" - << ", ret = " << ret - << ", filename = " << origin - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", filename = " << origin + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep( - CloneStep::kChangeOwner); + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); } else { - task->GetCloneInfo().SetNextStep( - CloneStep::kRecoverChunk); + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); } ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::RecoverChunk(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { int ret = kErrCodeSuccess; uint32_t chunkSize = fInfo.chunksize; @@ -945,8 +906,7 @@ int CloneCoreImpl::RecoverChunk( double progressPerData = static_cast(totalProgress) / segNum; uint32_t index = 0; - if (0 == cloneChunkSplitSize_ || - chunkSize % cloneChunkSplitSize_ != 0) { + if (0 == cloneChunkSplitSize_ || chunkSize % cloneChunkSplitSize_ != 0) { LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" << ", taskid = " << task->GetTaskId(); return kErrCodeChunkSizeNotAligned; @@ -954,24 +914,25 @@ int CloneCoreImpl::RecoverChunk( auto tracker = std::make_shared(); uint64_t workingChunkNum = 0; - // 为避免发往同一个chunk碰撞,异步请求不同的chunk - for (auto & cloneSegmentInfo : segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { + // To avoid collisions with the same chunk, asynchronous requests for + // different chunks + for (auto& cloneSegmentInfo : segInfos) { + for (auto& cloneChunkInfo : cloneSegmentInfo.second) { if (!cloneChunkInfo.second.needRecover) { continue; } - // 当前并发工作的chunk数已大于要求的并发数时,先消化一部分 + // When the current number of chunks for concurrent work exceeds the + // required number of concurrent tasks, digest a portion first while (workingChunkNum >= recoverChunkConcurrency_) { uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); if (ret < 0) { return kErrCodeInternalError; } workingChunkNum -= completeChunkNum; } - // 加入新的工作的chunk + // Chunk joining a new job workingChunkNum++; auto context = std::make_shared(); context->cidInfo = cloneChunkInfo.second.chunkIdInfo; @@ -984,29 +945,27 @@ int CloneCoreImpl::RecoverChunk( clientAsyncMethodRetryTimeSec_; LOG(INFO) << "RecoverChunk start" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); ret = StartAsyncRecoverChunkPart(task, tracker, context); if (ret < 0) { return kErrCodeInternalError; } } - task->SetProgress(static_cast( - kProgressRecoverChunkBegin + index * progressPerData)); + task->SetProgress(static_cast(kProgressRecoverChunkBegin + + index * progressPerData)); task->UpdateMetric(); index++; } while (workingChunkNum > 0) { uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); if (ret < 0) { return kErrCodeInternalError; } @@ -1017,8 +976,7 @@ int CloneCoreImpl::RecoverChunk( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; @@ -1028,30 +986,25 @@ int CloneCoreImpl::StartAsyncRecoverChunkPart( std::shared_ptr task, std::shared_ptr tracker, std::shared_ptr context) { - RecoverChunkClosure *cb = new RecoverChunkClosure(tracker, context); + RecoverChunkClosure* cb = new RecoverChunkClosure(tracker, context); tracker->AddOneTrace(); uint64_t offset = context->partIndex * context->partSize; LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - int ret = client_->RecoverChunk(context->cidInfo, - offset, - context->partSize, - cb); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", offset = " << offset + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + int ret = + client_->RecoverChunk(context->cidInfo, offset, context->partSize, cb); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "RecoverChunk fail" << ", ret = " << ret - << ", logicalPoolId = " - << context->cidInfo.lpid_ + << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize + << ", offset = " << offset << ", len = " << context->partSize << ", taskid = " << task->GetTaskId(); return ret; } @@ -1061,20 +1014,18 @@ int CloneCoreImpl::StartAsyncRecoverChunkPart( int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, std::shared_ptr tracker, - uint64_t *completeChunkNum) { + uint64_t* completeChunkNum) { *completeChunkNum = 0; tracker->WaitSome(1); - std::list results = - tracker->PopResultContexts(); + std::list results = tracker->PopResultContexts(); for (auto context : results) { if (context->retCode != LIBCURVE_ERROR::OK) { uint64_t nowTime = TimeUtility::GetTimeofDaySec(); if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); int ret = StartAsyncRecoverChunkPart(task, tracker, context); if (ret < 0) { return ret; @@ -1086,7 +1037,7 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( return context->retCode; } } else { - // 启动一个新的分片,index++,并重置开始时间 + // Start a new shard, index++, and reset the start time context->partIndex++; context->startTime = TimeUtility::GetTimeofDaySec(); if (context->partIndex < context->totalPartNum) { @@ -1096,12 +1047,11 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( } } else { LOG(INFO) << "RecoverChunk Complete" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); (*completeChunkNum)++; } } @@ -1109,19 +1059,16 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( return kErrCodeSuccess; } -int CloneCoreImpl::ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::ChangeOwner(std::shared_ptr task, + const FInfo& fInfo) { (void)fInfo; std::string user = task->GetCloneInfo().GetUser(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); int ret = client_->ChangeOwner(origin, user); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "ChangeOwner fail, ret = " << ret - << ", fileName = " << origin - << ", newOwner = " << user + << ", fileName = " << origin << ", newOwner = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } @@ -1130,31 +1077,25 @@ int CloneCoreImpl::ChangeOwner( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; } -int CloneCoreImpl::RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::RenameCloneFile(std::shared_ptr task, + const FInfo& fInfo) { std::string user = fInfo.owner; uint64_t originId = task->GetCloneInfo().GetOriginId(); uint64_t destinationId = task->GetCloneInfo().GetDestId(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string destination = task->GetCloneInfo().GetDest(); - // 先rename - int ret = client_->RenameCloneFile(mdsRootUser_, - originId, - destinationId, - origin, - destination); + // Rename first + int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, + origin, destination); if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 有可能是已经rename过了 + // It is possible that it has already been renamed FInfo destFInfo; ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); if (ret != LIBCURVE_ERROR::OK) { @@ -1174,10 +1115,8 @@ int CloneCoreImpl::RenameCloneFile( } } else if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "RenameCloneFile fail" - << ", ret = " << ret - << ", user = " << user - << ", originId = " << originId - << ", origin = " << origin + << ", ret = " << ret << ", user = " << user + << ", originId = " << originId << ", origin = " << origin << ", destination = " << destination << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; @@ -1196,25 +1135,22 @@ int CloneCoreImpl::RenameCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::CompleteCloneFile(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { (void)fInfo; (void)segInfos; std::string fileName; if (IsLazy(task)) { fileName = task->GetCloneInfo().GetDest(); } else { - fileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); } std::string user = task->GetCloneInfo().GetUser(); int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); @@ -1224,15 +1160,13 @@ int CloneCoreImpl::CompleteCloneFile( case -LIBCURVE_ERROR::NOTEXIST: LOG(ERROR) << "CompleteCloneFile " << "find dest file not exist, maybe deleted" - << ", ret = " << ret - << ", destination = " << fileName + << ", ret = " << ret << ", destination = " << fileName << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; default: LOG(ERROR) << "CompleteCloneFile fail" - << ", ret = " << ret - << ", fileName = " << fileName + << ", ret = " << ret << ", fileName = " << fileName << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; @@ -1245,8 +1179,7 @@ int CloneCoreImpl::CompleteCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; @@ -1271,8 +1204,8 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { task->GetCloneInfo().SetStatus(CloneStatus::error); int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); @@ -1282,8 +1215,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; + << ", ret = " << ret << ", TaskInfo : " << *task; task->Finish(); return; } @@ -1293,8 +1225,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } task->SetProgress(kProgressCloneComplete); @@ -1305,7 +1236,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { } void CloneCoreImpl::HandleCloneError(std::shared_ptr task, - int retCode) { + int retCode) { int ret = kErrCodeSuccess; if (NeedRetry(task, retCode)) { HandleCloneToRetry(task); @@ -1322,8 +1253,8 @@ void CloneCoreImpl::HandleCloneError(std::shared_ptr task, cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret << ", taskid = " << task->GetTaskId(); @@ -1334,8 +1265,7 @@ void CloneCoreImpl::HandleCloneError(std::shared_ptr task, ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Task Fail" << ", TaskInfo : " << *task; @@ -1348,8 +1278,7 @@ void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) { int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(WARNING) << "Task Fail, Retrying" << ", TaskInfo : " << *task; @@ -1362,8 +1291,7 @@ void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) { int ret = metaStore_->DeleteCloneInfo(taskId); if (ret < 0) { LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; } else { LOG(INFO) << "Clean Task Success" << ", TaskInfo : " << *task; @@ -1380,8 +1308,7 @@ void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Clean Task Fail" << ", TaskInfo : " << *task; @@ -1389,17 +1316,17 @@ void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { return; } -int CloneCoreImpl::GetCloneInfoList(std::vector *taskList) { +int CloneCoreImpl::GetCloneInfoList(std::vector* taskList) { metaStore_->GetCloneInfoList(taskList); return kErrCodeSuccess; } -int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) { +int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) { return metaStore_->GetCloneInfo(taskId, cloneInfo); } -int CloneCoreImpl::GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) { +int CloneCoreImpl::GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) { return metaStore_->GetCloneInfoByFileName(fileName, list); } @@ -1423,26 +1350,24 @@ inline bool CloneCoreImpl::IsClone(std::shared_ptr task) { return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); } -bool CloneCoreImpl::NeedUpdateCloneMeta( - std::shared_ptr task) { +bool CloneCoreImpl::NeedUpdateCloneMeta(std::shared_ptr task) { bool ret = true; CloneStep step = task->GetCloneInfo().GetNextStep(); if (CloneStep::kCreateCloneFile == step || - CloneStep::kCreateCloneMeta == step || - CloneStep::kEnd == step) { + CloneStep::kCreateCloneMeta == step || CloneStep::kEnd == step) { ret = false; } return ret; } bool CloneCoreImpl::NeedRetry(std::shared_ptr task, - int retCode) { + int retCode) { if (IsLazy(task)) { CloneStep step = task->GetCloneInfo().GetNextStep(); if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step) { - // 文件不存在的场景下不需要再重试,因为可能已经被删除了 + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) { + // In scenarios where the file does not exist, there is no need to + // retry as it may have been deleted if (retCode != kErrCodeFileNotExist) { return true; } @@ -1451,10 +1376,9 @@ bool CloneCoreImpl::NeedRetry(std::shared_ptr task, return false; } -int CloneCoreImpl::CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo* fInfo, + CloneSegmentMap* segInfos) { std::string newFileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = fInfo->owner; @@ -1463,7 +1387,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( if (LIBCURVE_ERROR::OK == ret) { // nothing } else if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 可能已经rename过了 + // Perhaps it has already been renamed newFileName = task->GetCloneInfo().GetDest(); ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); if (ret != LIBCURVE_ERROR::OK) { @@ -1474,7 +1398,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // 如果是已经rename过,那么id应该一致 + // If it has already been renamed, then the id should be consistent uint64_t originId = task->GetCloneInfo().GetOriginId(); if (fInfoOut.id != originId) { LOG(ERROR) << "File is missing, fileId not equal, " @@ -1487,34 +1411,32 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( } } else { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", filename = " << newFileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", filename = " << newFileName + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 更新fInfo + // Update fInfo *fInfo = fInfoOut; - // GetOrAllocateSegment 依赖fullPathName,需要在此处更新 + // GetOrAllocateSegment depends on fullPathName and needs to be updated here fInfo->fullPathName = newFileName; uint32_t segmentSize = fInfo->segmentsize; - for (auto &segInfo : *segInfos) { + for (auto& segInfo : *segInfos) { SegmentInfo segInfoOut; uint64_t offset = segInfo.first * segmentSize; - ret = client_->GetOrAllocateSegmentInfo( - true, offset, fInfo, mdsRootUser_, &segInfoOut); + ret = client_->GetOrAllocateSegmentInfo(true, offset, fInfo, + mdsRootUser_, &segInfoOut); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetOrAllocateSegmentInfo fail" << ", newFileName = " << newFileName - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "true" + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "true" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - for (auto &cloneChunkInfo : segInfo.second) { + for (auto& cloneChunkInfo : segInfo.second) { if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) { LOG(ERROR) << "can not find chunkIndexInSeg = " << cloneChunkInfo.first @@ -1535,12 +1457,13 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( return kErrCodeSuccess; } -int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { +int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) { int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { - // 不存在时直接返回成功,使接口幂等 + // Directly returns success when it does not exist, making the interface + // idempotent return kErrCodeSuccess; } if (cloneInfo->GetUser() != user) { @@ -1567,8 +1490,7 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, ret = metaStore_->UpdateCloneInfo(*cloneInfo); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return ret; } return kErrCodeSuccess; @@ -1576,16 +1498,17 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, void CloneCoreImpl::HandleCleanCloneOrRecoverTask( std::shared_ptr task) { - // 只有错误的clone/recover任务才清理临时文件 + // Only the wrong clone/recover task cleans up temporary files if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { - // 错误情况下可能未清除镜像被克隆标志 + // In the event of an error, the mirror being cloned flag may not be + // cleared if (IsFile(task)) { - // 重新发送 + // Resend std::string source = task->GetCloneInfo().GetSrc(); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus( + source, FileStatus::Created, mdsRootUser_); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret @@ -1598,16 +1521,12 @@ void CloneCoreImpl::HandleCleanCloneOrRecoverTask( std::string tempFileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); uint64_t fileId = task->GetCloneInfo().GetOriginId(); - std::string user = - task->GetCloneInfo().GetUser(); + std::string user = task->GetCloneInfo().GetUser(); int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST) { + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(ERROR) << "DeleteFile failed" - << ", ret = " << ret - << ", fileName = " << tempFileName - << ", user = " << user - << ", fileId = " << fileId + << ", ret = " << ret << ", fileName = " << tempFileName + << ", user = " << user << ", fileId = " << fileId << ", taskid = " << task->GetTaskId(); HandleCleanError(task); return; @@ -1623,8 +1542,7 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( int ret = metaStore_->DeleteCloneInfo(taskId); if (ret < 0) { LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeInternalError; } @@ -1635,12 +1553,11 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; + << ", ret = " << ret << ", TaskInfo : " << *task; return kErrCodeInternalError; } } @@ -1649,8 +1566,8 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( return kErrCodeSuccess; } -int CloneCoreImpl::CheckFileExists(const std::string &filename, - uint64_t inodeId) { +int CloneCoreImpl::CheckFileExists(const std::string& filename, + uint64_t inodeId) { FInfo destFInfo; int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); if (ret == LIBCURVE_ERROR::OK) { @@ -1668,10 +1585,13 @@ int CloneCoreImpl::CheckFileExists(const std::string &filename, return kErrCodeInternalError; } -// 加减引用计数的时候,接口里面会对引用计数map加锁; -// 加引用计数、处理引用计数减到0的时候,需要额外对修改的那条记录加锁。 -int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { - // 先减引用计数,如果是从镜像克隆且引用计数减到0,需要修改源镜像的状态为created +// When adding or subtracting reference counts, the interface will lock the +// reference count map; When adding a reference count and reducing the reference +// count to 0, an additional lock needs to be added to the modified record. +int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo& cloneInfo) { + // First, reduce the reference count. If you are cloning from a mirror and + // the reference count is reduced to 0, you need to modify the status of the + // source mirror to 'created' std::string source = cloneInfo.GetSrc(); if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { snapshotRef_->DecrementSnapshotRef(source); @@ -1679,12 +1599,12 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret == -LIBCURVE_ERROR::NOTEXIST) { LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " << source; - } else if (ret != LIBCURVE_ERROR::OK) { + } else if (ret != LIBCURVE_ERROR::OK) { cloneRef_->IncrementRef(source); LOG(ERROR) << "SetCloneFileStatus fail" << ", ret = " << ret @@ -1694,7 +1614,8 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { } } - // 删除这条记录,如果删除失败,把前面已经减掉的引用计数加回去 + // Delete this record. If the deletion fails, add back the previously + // subtracted reference count int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); if (ret != 0) { if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { @@ -1706,8 +1627,7 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { cloneRef_->IncrementRef(source); } LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", CloneInfo = " << cloneInfo; + << ", ret = " << ret << ", CloneInfo = " << cloneInfo; return kErrCodeInternalError; } diff --git a/src/snapshotcloneserver/clone/clone_core.h b/src/snapshotcloneserver/clone/clone_core.h index 19c1c20c9d..f33e2f8d5c 100644 --- a/src/snapshotcloneserver/clone/clone_core.h +++ b/src/snapshotcloneserver/clone/clone_core.h @@ -23,20 +23,20 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ +#include +#include #include #include #include -#include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" -#include "src/snapshotcloneserver/common/snapshot_reference.h" #include "src/snapshotcloneserver/clone/clone_reference.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshot_reference.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -51,359 +51,334 @@ class CloneCore { virtual ~CloneCore() {} /** - * @brief 克隆或恢复任务前置 + * @brief Clone or restore task ahead * - * @param source 克隆或恢复源 - * @param user 用户名 - * @param destination 克隆或恢复的目标文件名 - * @param lazyFlag 是否lazy - * @param taskType 克隆或恢复 - * @param poolset 克隆时目标文件的poolset - * @param[out] info 克隆或恢复任务信息 + * @param source Clone or restore source + * @param user username + * @param destination The target file name for cloning or restoring + * @param lazyFlag is lazy + * @param taskType clone or restore + * @param poolset The poolset of the target file during cloning + * @param[out] info Clone or restore task information * - * @return 错误码 + * @return error code */ - virtual int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) = 0; + virtual int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) = 0; /** - * @brief 处理克隆或恢复任务 + * @brief Processing cloning or recovery tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 清理克隆或恢复任务前置 + * @brief Clean clone or restore tasks ahead * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆或恢复信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo Clone or restore information * - * @return 错误码 + * @return error code */ - virtual int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; - + virtual int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 异步处理清理克隆或恢复任务 + * @brief Asynchronous processing of clean clone or restore tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCleanCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 安装克隆文件数据的前置工作 - * - 进行一些必要的检查 - * - 获取并返回克隆信息 - * - 更新数据库状态 + * @brief Pre work for installing clone file data + * - Conduct necessary inspections + * - Obtain and return clone information + * - Update database status * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo clone information * - * @return 错误码 + * @return error code */ - virtual int FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; + virtual int FlattenPre(const std::string& user, const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 获取全部克隆/恢复任务列表,用于重启后恢复执行 + * @brief Get a list of all clone/restore tasks for resuming execution after + * reboot * - * @param[out] cloneInfos 克隆/恢复任务列表 + * @param[out] cloneInfos Clone/Restore Task List * - * @return 错误码 + * @return error code */ - virtual int GetCloneInfoList(std::vector *cloneInfos) = 0; + virtual int GetCloneInfoList(std::vector* cloneInfos) = 0; /** - * @brief 获取指定id的克隆/恢复任务 + * @brief Get the clone/restore task for the specified ID * - * @param taskId  任务id - * @param cloneInfo 克隆/恢复任务 + * @param taskId Task ID + * @param cloneInfo Clone/Restore Task * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) = 0; + virtual int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) = 0; /** - * @brief 获取指定文件名的克隆/恢复任务 + * @brief Get the clone/restore task for the specified file name * - * @param fileName  文件名 - * @param list 克隆/恢复任务列表 + * @param fileName File name + * @param list Clone/Restore Task List * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取快照引用管理模块 + * @brief Get snapshot reference management module * - * @return 快照引用管理模块 + * @return Snapshot Reference Management Module */ virtual std::shared_ptr GetSnapshotRef() = 0; - /** - * @brief 获取镜像引用管理模块 + * @brief Get Mirror Reference Management Module * - * @return 镜像引用管理模块 + * @return Image Reference Management Module */ virtual std::shared_ptr GetCloneRef() = 0; - /** - * @brief 移除克隆/恢复任务 + * @brief Remove clone/restore task * - * @param task 克隆任务 + * @param task Clone task * - * @return 错误码 + * @return error code */ virtual int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 检查文件是否存在 + * @brief Check if the file exists * - * @param filename 文件名 + * @param filename File name * - * @return 错误码 + * @return error code */ - virtual int CheckFileExists(const std::string &filename, + virtual int CheckFileExists(const std::string& filename, uint64_t inodeId) = 0; /** - * @brief 删除cloneInfo + * @brief Delete cloneInfo * - * @param cloneInfo 待删除的cloneInfo + * @param cloneInfo CloneInfo to be deleted * - * @return 错误码 + * @return error code */ - virtual int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) = 0; }; /** - * @brief 克隆/恢复所需chunk信息 + * @brief Chunk information required for cloning/restoring */ struct CloneChunkInfo { - // 该chunk的id信息 + // The ID information of the chunk ChunkIDInfo chunkIdInfo; - // 位置信息,如果在s3上,是objectName,否则在curvefs上,则是offset + // Location information, if on s3, it is objectName, otherwise on curves, it + // is offset std::string location; - // 该chunk的版本号 + // The version number of the chunk uint64_t seqNum; - // chunk是否需要recover + // Does Chunk require recover bool needRecover; }; -// 克隆/恢复所需segment信息,key是ChunkIndex In Segment, value是chunk信息 +// The segment information required for cloning/recovery, where key is +// ChunkIndex In Segment and value is chunk information using CloneSegmentInfo = std::map; -// 克隆/恢复所需segment信息表,key是segmentIndex +// The segment information table required for cloning/recovery, where the key is +// segmentIndex using CloneSegmentMap = std::map; class CloneCoreImpl : public CloneCore { public: - static const std::string kCloneTempDir; + static const std::string kCloneTempDir; public: - CloneCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - std::shared_ptr cloneRef, - const SnapshotCloneServerOptions option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - cloneRef_(cloneRef), - cloneChunkSplitSize_(option.cloneChunkSplitSize), - cloneTempDir_(option.cloneTempDir), - mdsRootUser_(option.mdsRootUser), - createCloneChunkConcurrency_(option.createCloneChunkConcurrency), - recoverChunkConcurrency_(option.recoverChunkConcurrency), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs) {} - - ~CloneCoreImpl() { - } + CloneCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + std::shared_ptr cloneRef, + const SnapshotCloneServerOptions option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + cloneRef_(cloneRef), + cloneChunkSplitSize_(option.cloneChunkSplitSize), + cloneTempDir_(option.cloneTempDir), + mdsRootUser_(option.mdsRootUser), + createCloneChunkConcurrency_(option.createCloneChunkConcurrency), + recoverChunkConcurrency_(option.recoverChunkConcurrency), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs) {} + + ~CloneCoreImpl() {} int Init(); - int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) override; + int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) override; void HandleCloneOrRecoverTask(std::shared_ptr task) override; - int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) override; + int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) override; void HandleCleanCloneOrRecoverTask( std::shared_ptr task) override; - int FlattenPre( - const std::string &user, - const std::string &fileName, - CloneInfo *cloneInfo) override; + int FlattenPre(const std::string& user, const std::string& fileName, + CloneInfo* cloneInfo) override; - int GetCloneInfoList(std::vector *taskList) override; - int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) override; + int GetCloneInfoList(std::vector* taskList) override; + int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - std::shared_ptr GetSnapshotRef() { - return snapshotRef_; - } + std::shared_ptr GetSnapshotRef() { return snapshotRef_; } - std::shared_ptr GetCloneRef() { - return cloneRef_; - } + std::shared_ptr GetCloneRef() { return cloneRef_; } int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) override; - int CheckFileExists(const std::string &filename, - uint64_t inodeId) override; - int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) override; + int CheckFileExists(const std::string& filename, uint64_t inodeId) override; + int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) override; private: /** - * @brief 从快照构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from snapshot * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); + int BuildFileInfoFromSnapshot(std::shared_ptr task, + FInfo* newFileInfo, + CloneSegmentMap* segInfos); /** - * @brief 从源文件构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from source files * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); - + int BuildFileInfoFromFile(std::shared_ptr task, + FInfo* newFileInfo, CloneSegmentMap* segInfos); /** - * @brief 判断是否需要更新CloneChunkInfo信息中的chunkIdInfo + * @brief to determine if it is necessary to update chunkIdInfo in + * CloneChunkInfo information * - * @param task 任务信息 + * @param task task information * - * @retVal true 需要更新 - * @retVal false 不需要更新 + * @retval true needs to be updated + * @retval false No update required */ - bool NeedUpdateCloneMeta( - std::shared_ptr task); + bool NeedUpdateCloneMeta(std::shared_ptr task); /** - * @brief 判断clone失败后是否需要重试 + * @brief: Determine whether to retry after clone failure * - * @param task 任务信息 - * @param retCode 错误码 + * @param task task information + * @param retCode error code * - * @retVal true 需要 - * @retVal false 不需要 + * @retval true requires + * @retval false No need */ - bool NeedRetry(std::shared_ptr task, - int retCode); + bool NeedRetry(std::shared_ptr task, int retCode); /** - * @brief 创建clone的元数据信息或更新元数据信息 + * @brief Create metadata information for clone or update metadata + * information * - * @param task 任务信息 - * @param[int][out] fInfo 新创建的文件信息 - * @param[int][out] segInfos 文件的segment信息 + * @param task task information + * @param[int][out] fInfo Newly created file information + * @param[int][out] segInfosThe segment information of the file * - * @return 错误码 + * @return error code */ - int CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo* fInfo, CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件 + * @brief Create a new clone file * - * @param task 任务信息 - * @param fInfo 需创建的文件信息 + * @param task task information + * @param fInfo File information to be created * - * @return 错误码 + * @return error code */ - int CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int CreateCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 创建新文件的源信息(创建segment) + * @brief Create source information for new files (create segments) * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateCloneMeta(std::shared_ptr task, FInfo* fInfo, + CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件的chunk + * @brief Create a chunk for a new clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos); + int CreateCloneChunk(std::shared_ptr task, + const FInfo& fInfo, CloneSegmentMap* segInfos); /** - * @brief 开始CreateCloneChunk的异步请求 + * @brief Start asynchronous request for CreateCloneChunk * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param context CreateCloneChunk上下文 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param context CreateCloneChunk context * - * @return 错误码 + * @return error code */ int StartAsyncCreateCloneChunk( std::shared_ptr task, @@ -411,55 +386,51 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 处理CreateCloneChunk的结果并重试 + * @brief Process the results of CreateCloneChunk and try again * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param results CreateCloneChunk结果列表 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param results CreateCloneChunk result list * - * @return 错误码 + * @return error code */ int HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, std::shared_ptr tracker, - const std::list &results); + const std::list& results); /** - * @brief 通知mds完成源数据创建步骤 + * @brief Notify mds to complete the step of creating source data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneMeta(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 恢复chunk,即通知chunkserver拷贝数据 + * @brief Restore Chunk, that is, notify Chunkserver to copy data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int RecoverChunk(std::shared_ptr task, const FInfo& fInfo, + const CloneSegmentMap& segInfos); /** - * @brief 开始RecoverChunk的异步请求 + * @brief Start asynchronous request for RecoverChunk * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪器 - * @param context RecoverChunk上下文 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param context RecoverChunk Context * - * @return 错误码 + * @return error code */ int StartAsyncRecoverChunkPart( std::shared_ptr task, @@ -467,110 +438,103 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 继续RecoverChunk的其他部分的请求以及等待完成某些RecoverChunk + * @brief Continue requests for other parts of the RecoverChunk and wait for + * certain RecoverChunks to be completed * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪者 - * @param[out] completeChunkNum 完成的chunk数 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param[out] completeChunkNum Number of chunks completed * - * @return 错误码 + * @return error code */ int ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, std::shared_ptr tracker, - uint64_t *completeChunkNum); + uint64_t* completeChunkNum); /** - * @brief 修改克隆文件的owner + * @brief Modify the owner of the cloned file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo); + int ChangeOwner(std::shared_ptr task, const FInfo& fInfo); /** - * @brief 重命名克隆文件 + * @brief Rename clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int RenameCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 通知mds完成数据创建 + * @brief Notify mds to complete data creation * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneFile(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 从快照克隆时,更新快照状态,通知克隆完成 + * @brief: When cloning from a snapshot, update the snapshot status and + * notify the clone to complete * - * @param task 任务信息 + * @param task task information * - * @return 错误码 + * @return error code */ - int UpdateSnapshotStatus( - std::shared_ptr task); + int UpdateSnapshotStatus(std::shared_ptr task); /** - * @brief 处理Lazy克隆/恢复阶段一结束 + * @brief Processing Lazy Clone/Restore Phase 1 End * - * @param task 任务信息 + * @param task task information */ - void HandleLazyCloneStage1Finish( - std::shared_ptr task); + void HandleLazyCloneStage1Finish(std::shared_ptr task); /** - * @brief 处理克隆/恢复成功 + * @brief Successfully processed clone/restore * - * @param task 任务信息 + * @param task task information */ void HandleCloneSuccess(std::shared_ptr task); - /** - * @brief 处理克隆或恢复失败 + * @brief processing clone or restore failed * - * @param task 任务信息 - * @param retCode 待处理的错误码 + * @param task task information + * @param retCode pending error code */ - void HandleCloneError(std::shared_ptr task, - int retCode); + void HandleCloneError(std::shared_ptr task, int retCode); /** - * @brief Lazy Clone 情况下处理Clone任务失败重试 + * @brief Lazy Clone failed to process Clone task and retry * - * @param task 任务信息 + * @param task task information */ void HandleCloneToRetry(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务成功 + * @brief Successfully processed cleanup clone or restore task * - * @param task 任务信息 + * @param task task information */ void HandleCleanSuccess(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务失败 + * @brief processing cleanup clone or recovery task failed * - * @param task 任务信息 + * @param task task information */ void HandleCleanError(std::shared_ptr task); @@ -587,19 +551,19 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr snapshotRef_; std::shared_ptr cloneRef_; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize_; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir_; // mds root user std::string mdsRootUser_; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency_; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency_; - // client异步请求重试时间 + // Client asynchronous request retry time uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; }; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.cpp b/src/snapshotcloneserver/clone/clone_service_manager.cpp index 9b7439fecf..98cf730c25 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_service_manager.cpp @@ -24,19 +24,19 @@ #include -#include #include +#include #include -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "include/curve_compiler_specific.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/string_util.h" -#include "include/curve_compiler_specific.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" namespace curve { namespace snapshotcloneserver { -int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { +int CloneServiceManager::Init(const SnapshotCloneServerOptions& option) { dlockOpts_ = std::make_shared(option.dlockOpts); std::shared_ptr stage1Pool = std::make_shared(option.stage1PoolThreadNum); @@ -45,8 +45,8 @@ int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { std::shared_ptr commonPool = std::make_shared(option.commonPoolThreadNum); cloneServiceManagerBackend_->Init( - option.backEndReferenceRecordScanIntervalMs, - option.backEndReferenceFuncScanIntervalMs); + option.backEndReferenceRecordScanIntervalMs, + option.backEndReferenceFuncScanIntervalMs); return cloneTaskMgr_->Init(stage1Pool, stage2Pool, commonPool, option); } @@ -60,38 +60,34 @@ void CloneServiceManager::Stop() { cloneServiceManagerBackend_->Stop(); } -int CloneServiceManager::CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, poolset, &cloneInfo); + int ret = cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, poolset, + &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination - << ", lazyFlag = " << lazyFlag - << ", poolset = " << poolset; + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination + << ", lazyFlag = " << lazyFlag << ", poolset = " << poolset; closure->SetErrCode(ret); return ret; } @@ -106,35 +102,31 @@ int CloneServiceManager::CloneFile(const UUID &source, return ret; } -int CloneServiceManager::RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::RecoverFile( + const UUID& source, const std::string& user, const std::string& destination, + bool lazyFlag, std::shared_ptr closure, TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfo); + int ret = + cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination << ", lazyFlag = " << lazyFlag; closure->SetErrCode(ret); return ret; @@ -151,29 +143,23 @@ int CloneServiceManager::RecoverFile(const UUID &source, } int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushStage1Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -186,29 +172,23 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( } int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, nullptr); + std::make_shared(cloneInfo, cloneInfoMetric, nullptr); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -218,17 +198,15 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( return kErrCodeSuccess; } -int CloneServiceManager::Flatten( - const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::Flatten(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->FlattenPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "FlattenPre error" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskId = " << taskId; return ret; } @@ -240,10 +218,10 @@ int CloneServiceManager::Flatten( dlock_ = std::make_shared(*dlockOpts_); if (0 == dlock_->Init()) { LOG(ERROR) << "Init DLock error" - << ", pfx = " << dlockOpts_->pfx - << ", retryTimes = " << dlockOpts_->retryTimes - << ", timeout = " << dlockOpts_->ctx_timeoutMS - << ", ttl = " << dlockOpts_->ttlSec; + << ", pfx = " << dlockOpts_->pfx + << ", retryTimes = " << dlockOpts_->retryTimes + << ", timeout = " << dlockOpts_->ctx_timeoutMS + << ", ttl = " << dlockOpts_->ttlSec; return kErrCodeInternalError; } } @@ -253,11 +231,9 @@ int CloneServiceManager::Flatten( closure->SetDLock(dlock_); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, cloneInfoMetric, closure); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" @@ -267,8 +243,8 @@ int CloneServiceManager::Flatten( return kErrCodeSuccess; } -int CloneServiceManager::GetCloneTaskInfo(const std::string &user, - std::vector *info) { +int CloneServiceManager::GetCloneTaskInfo(const std::string& user, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -280,16 +256,14 @@ int CloneServiceManager::GetCloneTaskInfo(const std::string &user, } int CloneServiceManager::GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info) { + const std::string& user, const TaskIdType& taskId, + std::vector* info) { std::vector cloneInfos; CloneInfo cloneInfo; int ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeFileNotExist; } if (cloneInfo.GetUser() != user) { @@ -300,23 +274,20 @@ int CloneServiceManager::GetCloneTaskInfoById( } int CloneServiceManager::GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info) { + const std::string& user, const std::string& fileName, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoByFileName(fileName, &cloneInfos); if (ret < 0) { LOG(ERROR) << "GetCloneInfoByFileName fail" - << ", ret = " << ret - << ", fileName = " << fileName; + << ", ret = " << ret << ", fileName = " << fileName; return kErrCodeFileNotExist; } return GetCloneTaskInfoInner(cloneInfos, user, info); } int CloneServiceManager::GetCloneTaskInfoByFilter( - const CloneFilterCondition &filter, - std::vector *info) { + const CloneFilterCondition& filter, std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -327,9 +298,9 @@ int CloneServiceManager::GetCloneTaskInfoByFilter( return GetCloneTaskInfoInner(cloneInfos, filter, info); } -int CloneServiceManager::GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles) { +int CloneServiceManager::GetCloneRefStatus( + const std::string& src, CloneRefStatus* refStatus, + std::vector* needCheckFiles) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -338,10 +309,10 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } *refStatus = CloneRefStatus::kNoRef; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetSrc() == src) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : + case CloneStatus::done: case CloneStatus::error: { break; } @@ -370,14 +341,13 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info) { + std::vector cloneInfos, CloneFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (filter.IsMatchCondition(cloneInfo)) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -395,7 +365,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -421,14 +391,13 @@ int CloneServiceManager::GetCloneTaskInfoInner( } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - const std::string &user, - std::vector *info) { + std::vector cloneInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetUser() == user) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -446,7 +415,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -471,7 +440,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( return kErrCodeSuccess; } -bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { +bool CloneFilterCondition::IsMatchCondition(const CloneInfo& cloneInfo) { if (user_ != nullptr && *user_ != cloneInfo.GetUser()) { return false; } @@ -489,45 +458,39 @@ bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(cloneInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(cloneInfo.GetStatus())) { return false; } int type; - if (type_ != nullptr - && common::StringToInt(*type_, &type) == false) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == false) { return false; } - if (type_ != nullptr - && common::StringToInt(*type_, &type) == true - && type != static_cast(cloneInfo.GetTaskType())) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == true && + type != static_cast(cloneInfo.GetTaskType())) { return false; } return true; } -int CloneServiceManager::GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut) { +int CloneServiceManager::GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut) { CloneInfo newInfo; int ret = cloneCore_->GetCloneInfo(taskId, &newInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return ret; } switch (newInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { taskCloneInfoOut->SetCloneInfo(newInfo); taskCloneInfoOut->SetCloneProgress(kProgressCloneComplete); break; @@ -544,32 +507,29 @@ int CloneServiceManager::GetFinishedCloneTask( } default: LOG(ERROR) << "can not reach here!" - << " status = " << static_cast( - newInfo.GetStatus()); - // 当更新数据库失败时,有可能进入这里 + << " status = " << static_cast(newInfo.GetStatus()); + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } return kErrCodeSuccess; } -int CloneServiceManager::CleanCloneTask(const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::CleanCloneTask(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "CleanCloneOrRecoverTaskPre fail" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskid = " << taskId; return ret; } std::shared_ptr taskInfo = std::make_shared(cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -579,40 +539,40 @@ int CloneServiceManager::CleanCloneTask(const std::string &user, return kErrCodeSuccess; } -int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo& cloneInfo) { auto cloneInfoMetric = std::make_shared(cloneInfo.GetTaskId()); auto closure = std::make_shared(); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); bool isLazy = cloneInfo.GetIsLazy(); int ret = kErrCodeSuccess; - // Lazy 克隆/恢复 + // Lazy Clone/Restore if (isLazy) { CloneStep step = cloneInfo.GetNextStep(); - // 处理kRecoverChunk,kCompleteCloneFile,kEnd这三个阶段的Push到stage2Pool - // 如果克隆source类型是file,阶段为kCreateCloneChunk和kCreateCloneMeta也需要push到stage2Pool // NOLINT + // Process the Push to stage2Pool for the three stages of + // kRecoverChunk,kCompleteCloneFile, and kEnd If the clone source type + // is file and the stages are kCreateCloneChunk and kCreateCloneMeta, + // they also need to be pushed to stage2Pool// NOLINT if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step || - (CloneStep::kCreateCloneChunk == step - && cloneInfo.GetFileType() == CloneFileType::kFile) || - (CloneStep::kCreateCloneMeta == step - && cloneInfo.GetFileType() == CloneFileType::kFile)) { + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step || + (CloneStep::kCreateCloneChunk == step && + cloneInfo.GetFileType() == CloneFileType::kFile) || + (CloneStep::kCreateCloneMeta == step && + cloneInfo.GetFileType() == CloneFileType::kFile)) { ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" << ", ret = " << ret; return ret; } - // 否则push到stage1Pool + // Otherwise, push to stage1Pool } else { - // stage1的task包含了异步的请求的返回,需要加锁 + // The task of stage1 contains the return of asynchronous requests + // that require locking std::string destination = cloneInfo.GetDest(); NameLockGuard lockDestFileGuard(*destFileLock_, destination); closure->SetDestFileLock(destFileLock_); @@ -625,7 +585,7 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return ret; } } - // 非Lazy 克隆/恢复push到commonPool + // Non Lazy clone/restore push to commonPool } else { ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { @@ -637,13 +597,11 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return kErrCodeSuccess; } -int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo& cloneInfo) { std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, nullptr, nullptr); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" @@ -660,26 +618,26 @@ int CloneServiceManager::RecoverCloneTask() { LOG(ERROR) << "GetCloneInfoList fail"; return ret; } - for (auto &cloneInfo : list) { + for (auto& cloneInfo : list) { switch (cloneInfo.GetStatus()) { case CloneStatus::retrying: { - // 重置重试任务的状态 + // Reset the status of the retry task if (cloneInfo.GetTaskType() == CloneTaskType::kClone) { cloneInfo.SetStatus(CloneStatus::cloning); } else { cloneInfo.SetStatus(CloneStatus::recovering); } } - FALLTHROUGH_INTENDED; + FALLTHROUGH_INTENDED; case CloneStatus::cloning: case CloneStatus::recovering: { - // 建立快照或镜像的引用关系 + // Establishing a reference relationship for a snapshot or + // mirror if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } ret = RecoverCloneTaskInternal(cloneInfo); if (ret < 0) { @@ -696,13 +654,13 @@ int CloneServiceManager::RecoverCloneTask() { break; } case CloneStatus::metaInstalled: { - // metaInstalled 状态下的克隆对文件仍然有依赖,需要建立引用关系 + // Clones in MetaInstalled state still have dependencies on + // files and need to establish a reference relationship if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } break; } @@ -713,52 +671,59 @@ int CloneServiceManager::RecoverCloneTask() { return kErrCodeSuccess; } -// 当clone处于matainstall状态,且克隆卷已经删除的情况下,原卷的引用计数没有减。 -// 这个后台线程处理函数周期性的检查这个场景,如果发现有clone处于metaintalled状态 -// 且克隆卷已经删除,就去删除这条无效的clone信息,并减去原卷的引用计数。 -// 如果原卷是镜像且引用计数减为0,还需要去mds把原卷的状态改为created。 +// When the clone is in a matainstall state and the clone volume has been +// deleted, the reference count of the original volume does not decrease. This +// backend thread processing function periodically checks this scenario, and if +// any clones are found to be in the 'metaled' state If the clone volume has +// been deleted, delete the invalid clone information and subtract the reference +// count of the original volume. If the original volume is a mirror and the +// reference count is reduced to 0, it is necessary to go to MDS to change the +// status of the original volume to created. void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc start"; while (!isStop_.load()) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { - LOG(WARNING) << "GetCloneInfoList fail" << ", ret = " << ret; + LOG(WARNING) << "GetCloneInfoList fail" + << ", ret = " << ret; } int deleteCount = 0; - for (auto &it : cloneInfos) { - if (it.GetStatus() == CloneStatus::metaInstalled - && it.GetIsLazy() == true) { - // 检查destination在不在 + for (auto& it : cloneInfos) { + if (it.GetStatus() == CloneStatus::metaInstalled && + it.GetIsLazy() == true) { + // Check if the destination is available if (it.GetTaskType() == CloneTaskType::kClone) { ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetDestId()); + it.GetDestId()); } else { - // rename时,inodeid恢复成 + // When renaming, the inodeid is restored to ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetOriginId()); + it.GetOriginId()); } if (ret == kErrCodeFileNotExist) { - // 如果克隆卷是metaInstalled状态,且destination文件不存在, - // 删除这条cloneInfo,并减引用计数 + // If the cloned volume is in a metaInstalled state and the + // destination file does not exist, Delete this cloneInfo + // and subtract the reference count TaskIdType taskId = it.GetTaskId(); CloneInfo cloneInfo; ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret != kErrCodeSuccess) { - // cloneInfo已经不存在了 + // CloneInfo no longer exists continue; } - // 再次检查cloneInfo是否是metaInstalled状态 + // Check again if cloneInfo is in the metaInstalled state if (cloneInfo.GetStatus() != CloneStatus::metaInstalled) { continue; } ret = cloneCore_->HandleDeleteCloneInfo(cloneInfo); if (ret != kErrCodeSuccess) { - LOG(WARNING) << "HandleDeleteCloneInfo fail, ret = " - << ret << ", cloneInfo = " << cloneInfo; + LOG(WARNING) + << "HandleDeleteCloneInfo fail, ret = " << ret + << ", cloneInfo = " << cloneInfo; } else { deleteCount++; } @@ -771,14 +736,14 @@ void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "backend scan list, size = " << cloneInfos.size() << ", delete clone record count = " << deleteCount; - // 控制每轮扫描间隔 + // Control the scanning interval of each round roundWaitInterval_.WaitForNextExcution(); } LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc exit"; } void CloneServiceManagerBackendImpl::Init(uint32_t recordIntevalMs, - uint32_t roundIntevalMs) { + uint32_t roundIntevalMs) { recordWaitInterval_.Init(recordIntevalMs); roundWaitInterval_.Init(roundIntevalMs); diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..70268a9942 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -22,18 +22,18 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ +#include #include #include -#include +#include "src/common/concurrent/dlock.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/wait_interval.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" namespace curve { namespace snapshotcloneserver { @@ -44,26 +44,16 @@ class TaskCloneInfo { public: TaskCloneInfo() = default; - TaskCloneInfo(const CloneInfo &cloneInfo, - uint32_t progress) - : cloneInfo_(cloneInfo), - cloneProgress_(progress) {} + TaskCloneInfo(const CloneInfo& cloneInfo, uint32_t progress) + : cloneInfo_(cloneInfo), cloneProgress_(progress) {} - void SetCloneInfo(const CloneInfo &cloneInfo) { - cloneInfo_ = cloneInfo; - } + void SetCloneInfo(const CloneInfo& cloneInfo) { cloneInfo_ = cloneInfo; } - CloneInfo GetCloneInfo() const { - return cloneInfo_; - } + CloneInfo GetCloneInfo() const { return cloneInfo_; } - void SetCloneProgress(uint32_t progress) { - cloneProgress_ = progress; - } + void SetCloneProgress(uint32_t progress) { cloneProgress_ = progress; } - uint32_t GetCloneProgress() const { - return cloneProgress_; - } + uint32_t GetCloneProgress() const { return cloneProgress_; } Json::Value ToJsonObj() const { Json::Value cloneTaskObj; @@ -72,88 +62,76 @@ class TaskCloneInfo { cloneTaskObj["User"] = info.GetUser(); cloneTaskObj["File"] = info.GetDest(); cloneTaskObj["Src"] = info.GetSrc(); - cloneTaskObj["TaskType"] = static_cast ( - info.GetTaskType()); - cloneTaskObj["TaskStatus"] = static_cast ( - info.GetStatus()); + cloneTaskObj["TaskType"] = static_cast(info.GetTaskType()); + cloneTaskObj["TaskStatus"] = static_cast(info.GetStatus()); cloneTaskObj["IsLazy"] = info.GetIsLazy(); - cloneTaskObj["NextStep"] = static_cast (info.GetNextStep()); + cloneTaskObj["NextStep"] = static_cast(info.GetNextStep()); cloneTaskObj["Time"] = info.GetTime(); cloneTaskObj["Progress"] = GetCloneProgress(); - cloneTaskObj["FileType"] = static_cast (info.GetFileType()); + cloneTaskObj["FileType"] = static_cast(info.GetFileType()); return cloneTaskObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { CloneInfo info; info.SetTaskId(jsonObj["UUID"].asString()); info.SetUser(jsonObj["User"].asString()); info.SetDest(jsonObj["File"].asString()); info.SetSrc(jsonObj["Src"].asString()); - info.SetTaskType(static_cast( - jsonObj["TaskType"].asInt())); - info.SetStatus(static_cast( - jsonObj["TaskStatus"].asInt())); + info.SetTaskType( + static_cast(jsonObj["TaskType"].asInt())); + info.SetStatus(static_cast(jsonObj["TaskStatus"].asInt())); info.SetIsLazy(jsonObj["IsLazy"].asBool()); info.SetNextStep(static_cast(jsonObj["NextStep"].asInt())); info.SetTime(jsonObj["Time"].asUInt64()); - info.SetFileType(static_cast( - jsonObj["FileType"].asInt())); + info.SetFileType( + static_cast(jsonObj["FileType"].asInt())); SetCloneInfo(info); } private: - CloneInfo cloneInfo_; - uint32_t cloneProgress_; + CloneInfo cloneInfo_; + uint32_t cloneProgress_; }; class CloneFilterCondition { public: CloneFilterCondition() - : uuid_(nullptr), - source_(nullptr), - destination_(nullptr), - user_(nullptr), - status_(nullptr), - type_(nullptr) {} - - CloneFilterCondition(const std::string *uuid, const std::string *source, - const std::string *destination, const std::string *user, - const std::string *status, const std::string *type) - : uuid_(uuid), - source_(source), - destination_(destination), - user_(user), - status_(status), - type_(type) {} - bool IsMatchCondition(const CloneInfo &cloneInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } - void SetSource(const std::string *source) { - source_ = source; - } - void SetDestination(const std::string *destination) { + : uuid_(nullptr), + source_(nullptr), + destination_(nullptr), + user_(nullptr), + status_(nullptr), + type_(nullptr) {} + + CloneFilterCondition(const std::string* uuid, const std::string* source, + const std::string* destination, + const std::string* user, const std::string* status, + const std::string* type) + : uuid_(uuid), + source_(source), + destination_(destination), + user_(user), + status_(status), + type_(type) {} + bool IsMatchCondition(const CloneInfo& cloneInfo); + + void SetUuid(const std::string* uuid) { uuid_ = uuid; } + void SetSource(const std::string* source) { source_ = source; } + void SetDestination(const std::string* destination) { destination_ = destination; } - void SetUser(const std::string *user) { - user_ = user; - } - void SetStatus(const std::string *status) { - status_ = status; - } - void SetType(const std::string *type) { - type_ = type; - } + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } + void SetType(const std::string* type) { type_ = type; } private: - const std::string *uuid_; - const std::string *source_; - const std::string *destination_; - const std::string *user_; - const std::string *status_; - const std::string *type_; + const std::string* uuid_; + const std::string* source_; + const std::string* destination_; + const std::string* user_; + const std::string* status_; + const std::string* type_; }; class CloneServiceManagerBackend { public: @@ -161,7 +139,8 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the + * existence of cloned volumes * */ virtual void Func() = 0; @@ -177,12 +156,9 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { public: explicit CloneServiceManagerBackendImpl( std::shared_ptr cloneCore) - : cloneCore_(cloneCore), - isStop_(true) { - } + : cloneCore_(cloneCore), isStop_(true) {} - ~CloneServiceManagerBackendImpl() { - } + ~CloneServiceManagerBackendImpl() {} void Func() override; void Init(uint32_t recordIntevalMs, uint32_t roundIntevalMs) override; @@ -191,13 +167,14 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + // Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + // Is the current background scanning stopped? Used to support start and + // stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + // Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + // The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -207,250 +184,242 @@ class CloneServiceManager { std::shared_ptr cloneTaskMgr, std::shared_ptr cloneCore, std::shared_ptr cloneServiceManagerBackend) - : cloneTaskMgr_(cloneTaskMgr), - cloneCore_(cloneCore), - cloneServiceManagerBackend_(cloneServiceManagerBackend) { + : cloneTaskMgr_(cloneTaskMgr), + cloneCore_(cloneCore), + cloneServiceManagerBackend_(cloneServiceManagerBackend) { destFileLock_ = std::make_shared(); } virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file name + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int RecoverFile(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user user + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int Flatten( - const std::string &user, - const TaskIdType &taskId); + virtual int Flatten(const std::string& user, const TaskIdType& taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfo(const std::string &user, - std::vector *info); + virtual int GetCloneTaskInfo(const std::string& user, + std::vector* info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user + * through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user username + * @param taskId Task Id specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info); + virtual int GetCloneTaskInfoById(const std::string& user, + const TaskIdType& taskId, + std::vector* info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through + * a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param fileName The file name specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info); + virtual int GetCloneTaskInfoByName(const std::string& user, + const std::string& fileName, + std::vector* info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering + * criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter filtering conditions + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, - std::vector *info); + virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition& filter, + std::vector* info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src specified file name + * @param refStatus 0 indicates no dependencies, 1 indicates dependencies, + * and 2 indicates further confirmation is needed + * @param needCheckFiles List of files that require further confirmation * - * @return 错误码 + * @return error code */ - virtual int GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles); + virtual int GetCloneRefStatus(const std::string& src, + CloneRefStatus* refStatus, + std::vector* needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user username + * @param taskId Task Id * - * @return 错误码 + * @return error code */ - virtual int CleanCloneTask(const std::string &user, - const TaskIdType &taskId); + virtual int CleanCloneTask(const std::string& user, + const TaskIdType& taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief: Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); // for test - void SetDLock(std::shared_ptr dlock) { - dlock_ = dlock; - } + void SetDLock(std::shared_ptr dlock) { dlock_ = dlock; } private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param user user information + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - const std::string &user, - std::vector *info); + const std::string& user, + std::vector* info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given + * task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param filter filtering conditions + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info); + CloneFilterCondition filter, + std::vector* info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId Task ID + * @param taskCloneInfoOut Clone task information * - * @return 错误码 + * @return error code */ - int GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut); + int GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); + int RecoverCloneTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); + int RecoverCleanTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 构建和push Lazy的任务 + *Task of building and pushing Lazy @brief * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); private: std::shared_ptr dlockOpts_; @@ -461,8 +430,6 @@ class CloneServiceManager { std::shared_ptr cloneServiceManagerBackend_; }; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..2ddc10976e 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -23,17 +23,17 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ -#include #include +#include -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/common/concurrent/dlock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" using ::curve::common::DLock; @@ -42,33 +42,23 @@ namespace snapshotcloneserver { class CloneTaskInfo : public TaskInfo { public: - CloneTaskInfo(const CloneInfo &cloneInfo, - std::shared_ptr metric, - std::shared_ptr closure) + CloneTaskInfo(const CloneInfo& cloneInfo, + std::shared_ptr metric, + std::shared_ptr closure) : TaskInfo(), cloneInfo_(cloneInfo), metric_(metric), closure_(closure) {} - CloneInfo& GetCloneInfo() { - return cloneInfo_; - } + CloneInfo& GetCloneInfo() { return cloneInfo_; } - const CloneInfo& GetCloneInfo() const { - return cloneInfo_; - } + const CloneInfo& GetCloneInfo() const { return cloneInfo_; } - TaskIdType GetTaskId() const { - return cloneInfo_.GetTaskId(); - } + TaskIdType GetTaskId() const { return cloneInfo_.GetTaskId(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } - std::shared_ptr GetClosure() { - return closure_; - } + std::shared_ptr GetClosure() { return closure_; } private: CloneInfo cloneInfo_; @@ -76,20 +66,16 @@ class CloneTaskInfo : public TaskInfo { std::shared_ptr closure_; }; -std::ostream& operator<<(std::ostream& os, const CloneTaskInfo &taskInfo); +std::ostream& operator<<(std::ostream& os, const CloneTaskInfo& taskInfo); class CloneTaskBase : public Task { public: - CloneTaskBase(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} - - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + CloneTaskBase(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} + + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: std::shared_ptr taskInfo_; @@ -98,9 +84,8 @@ class CloneTaskBase : public Task { class CloneTask : public CloneTaskBase { public: - CloneTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneTask(const TaskIdType& taskId, std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} void Run() override { @@ -121,17 +106,14 @@ class CloneTask : public CloneTaskBase { } }; - class CloneCleanTask : public CloneTaskBase { public: - CloneCleanTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneCleanTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} - void Run() override { - core_->HandleCleanCloneOrRecoverTask(taskInfo_); - } + void Run() override { core_->HandleCleanCloneOrRecoverTask(taskInfo_); } }; struct SnapCloneCommonClosure : public SnapCloneClosure { @@ -145,9 +127,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,16 +137,16 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 - struct CloneChunkInfo *cloneChunkInfo; + // Chunk Information + struct CloneChunkInfo* cloneChunkInfo; }; using CreateCloneChunkContextPtr = std::shared_ptr; @@ -173,21 +155,20 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { CreateCloneChunkClosure( std::shared_ptr tracker, CreateCloneChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "CreateCloneChunkClosure return fail" - << ", ret = " << context_->retCode - << ", location = " << context_->location - << ", logicalPoolId = " << context_->cidInfo.lpid_ - << ", copysetId = " << context_->cidInfo.cpid_ - << ", chunkId = " << context_->cidInfo.cid_ - << ", seqNum = " << context_->sn - << ", csn = " << context_->csn - << ", taskid = " << context_->taskid; + << ", ret = " << context_->retCode + << ", location = " << context_->location + << ", logicalPoolId = " << context_->cidInfo.lpid_ + << ", copysetId = " << context_->cidInfo.cpid_ + << ", chunkId = " << context_->cidInfo.cid_ + << ", seqNum = " << context_->sn + << ", csn = " << context_->csn + << ", taskid = " << context_->taskid; } tracker_->PushResultContext(context_); tracker_->HandleResponse(context_->retCode); @@ -197,21 +178,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -219,17 +200,15 @@ using RecoverChunkContextPtr = std::shared_ptr; struct RecoverChunkClosure : public SnapCloneClosure { RecoverChunkClosure(std::shared_ptr tracker, - RecoverChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + RecoverChunkContextPtr context) + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "RecoverChunkClosure return fail" << ", ret = " << context_->retCode - << ", logicalPoolId = " - << context_->cidInfo.lpid_ + << ", logicalPoolId = " << context_->cidInfo.lpid_ << ", copysetId = " << context_->cidInfo.cpid_ << ", chunkId = " << context_->cidInfo.cid_ << ", partIndex = " << context_->partIndex diff --git a/src/snapshotcloneserver/clone/clone_task_manager.cpp b/src/snapshotcloneserver/clone/clone_task_manager.cpp index be14fc5db6..559c22b315 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_task_manager.cpp @@ -21,8 +21,8 @@ */ #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/snapshotclone/snapshotclone_define.h" namespace curve { namespace snapshotcloneserver { @@ -48,9 +48,8 @@ int CloneTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 - backEndThread = - std::thread(&CloneTaskManager::BackEndThreadFunc, this); + // isStop_ Flag set first to prevent backEndThread from exiting first + backEndThread = std::thread(&CloneTaskManager::BackEndThreadFunc, this); } return kErrCodeSuccess; } @@ -66,10 +65,8 @@ void CloneTaskManager::Stop() { } int CloneTaskManager::PushCommonTask(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &commonTaskMap_, - &commonTasksLock_, - commonPool_); + int ret = + PushTaskInternal(task, &commonTaskMap_, &commonTasksLock_, commonPool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -80,10 +77,8 @@ int CloneTaskManager::PushCommonTask(std::shared_ptr task) { } int CloneTaskManager::PushStage1Task(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage1TaskMap_, - &stage1TasksLock_, - stage1Pool_); + int ret = + PushTaskInternal(task, &stage1TaskMap_, &stage1TasksLock_, stage1Pool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -93,12 +88,9 @@ int CloneTaskManager::PushStage1Task(std::shared_ptr task) { return ret; } -int CloneTaskManager::PushStage2Task( - std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage2TaskMap_, - &stage2TasksLock_, - stage2Pool_); +int CloneTaskManager::PushStage2Task(std::shared_ptr task) { + int ret = + PushTaskInternal(task, &stage2TaskMap_, &stage2TasksLock_, stage2Pool_); if (ret >= 0) { cloneMetric_->UpdateFlattenTaskBegin(); LOG(INFO) << "Push Task Into Stage2 Pool for data install success," @@ -107,13 +99,13 @@ int CloneTaskManager::PushStage2Task( return ret; } -int CloneTaskManager::PushTaskInternal(std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool) { - // 同一个clone的Stage1的Task和Stage2的Task的任务ID是一样的, - // clean task的ID也是一样的, - // 触发一次扫描,将已完成的任务Flush出去 +int CloneTaskManager::PushTaskInternal( + std::shared_ptr task, + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool) { + // The task IDs for Stage1 and Stage2 of the same clone are the same, + // The ID of the clean task is also the same, + // Trigger a scan to flush out completed tasks ScanStage2Tasks(); ScanStage1Tasks(); ScanCommonTasks(); @@ -124,12 +116,9 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(*taskMapMutex); - std::string destination = - task->GetTaskInfo()->GetCloneInfo().GetDest(); + std::string destination = task->GetTaskInfo()->GetCloneInfo().GetDest(); - auto ret = taskMap->emplace( - destination, - task); + auto ret = taskMap->emplace(destination, task); if (!ret.second) { LOG(ERROR) << "CloneTaskManager::PushTaskInternal fail, " << "same destination exist, " @@ -152,7 +141,7 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, } std::shared_ptr CloneTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(cloneTaskMapLock_); auto it = cloneTaskMap_.find(taskId); if (it != cloneTaskMap_.end()) { @@ -174,16 +163,13 @@ void CloneTaskManager::BackEndThreadFunc() { void CloneTaskManager::ScanCommonTasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(commonTasksLock_); - for (auto it = commonTaskMap_.begin(); - it != commonTaskMap_.end();) { + for (auto it = commonTaskMap_.begin(); it != commonTaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // 移除任务并更新metric + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Remove task and update metric cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "common task {" << " TaskInfo : " << *taskInfo @@ -200,15 +186,12 @@ void CloneTaskManager::ScanStage1Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(stage1TasksLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage1TaskMap_.begin(); - it != stage1TaskMap_.end();) { + for (auto it = stage1TaskMap_.begin(); it != stage1TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "stage1 task {" << " TaskInfo : " << *taskInfo @@ -224,27 +207,22 @@ void CloneTaskManager::ScanStage1Tasks() { void CloneTaskManager::ScanStage2Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage2TaskMap_.begin(); - it != stage2TaskMap_.end();) { + for (auto it = stage2TaskMap_.begin(); it != stage2TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // retrying 状态的任务需要重试 + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Tasks in the retrying state need to be retried if (CloneStatus::retrying == status) { if (CloneTaskType::kClone == taskType) { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::cloning); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::cloning); } else { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::recovering); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::recovering); } taskInfo->Reset(); stage2Pool_->PushTask(it->second); - // 其他任务结束更新metric + // Update metric after completing other tasks } else { cloneMetric_->UpdateAfterFlattenTaskFinish(status); LOG(INFO) << "stage2 task {" @@ -261,4 +239,3 @@ void CloneTaskManager::ScanStage2Tasks() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..916d25deae 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -23,50 +23,46 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/thread_pool.h" -using ::curve::common::RWLock; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; namespace curve { namespace snapshotcloneserver { class CloneTaskManager { public: - explicit CloneTaskManager( - std::shared_ptr core, - std::shared_ptr cloneMetric) + explicit CloneTaskManager(std::shared_ptr core, + std::shared_ptr cloneMetric) : isStop_(true), core_(core), cloneMetric_(cloneMetric), cloneTaskManagerScanIntervalMs_(0) {} - ~CloneTaskManager() { - Stop(); - } + ~CloneTaskManager() { Stop(); } int Init(std::shared_ptr stage1Pool, - std::shared_ptr stage2Pool, - std::shared_ptr commonPool, - const SnapshotCloneServerOptions &option) { - cloneTaskManagerScanIntervalMs_ = - option.cloneTaskManagerScanIntervalMs; + std::shared_ptr stage2Pool, + std::shared_ptr commonPool, + const SnapshotCloneServerOptions& option) { + cloneTaskManagerScanIntervalMs_ = option.cloneTaskManagerScanIntervalMs; stage1Pool_ = stage1Pool; stage2Pool_ = stage2Pool; commonPool_ = commonPool; @@ -78,40 +74,39 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such + * as clones * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushCommonTask( - std::shared_ptr task); + int PushCommonTask(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage1Task( - std::shared_ptr task); + int PushStage1Task(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery + * clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage2Task( - std::shared_ptr task); + int PushStage2Task(std::shared_ptr task); - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; private: void BackEndThreadFunc(); @@ -120,51 +115,52 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task task + * @param taskMap task table + * @param taskMapMutex task table and thread pool locks + * @param taskPool Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool); + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID ->Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and + // other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Is the current task management stopped? Used to support start and stop + // functions std::atomic_bool isStop_; // clone core @@ -173,16 +169,11 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; } // namespace snapshotcloneserver } // namespace curve - - - - - #endif // SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/common/config.h b/src/snapshotcloneserver/common/config.h index d5e93a24c1..3c8cc13263 100644 --- a/src/snapshotcloneserver/common/config.h +++ b/src/snapshotcloneserver/common/config.h @@ -23,9 +23,9 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ - -#include +#include #include + #include "src/common/concurrent/dlock.h" namespace curve { @@ -41,58 +41,61 @@ struct CurveClientOptions { std::string mdsRootUser; // mds root password std::string mdsRootPassword; - // 调用client方法的重试总时间 + // The total retry time for calling the client method uint64_t clientMethodRetryTimeSec; - // 调用client方法重试间隔时间 + // Call client method retry interval uint64_t clientMethodRetryIntervalMs; }; // snapshotcloneserver options struct SnapshotCloneServerOptions { // snapshot&clone server address - std::string addr; - // 调用client异步方法重试总时间 + std::string addr; + // Total retry time for calling client asynchronous methods uint64_t clientAsyncMethodRetryTimeSec; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs; - // 快照工作线程数 + // Number of snapshot worker threads int snapshotPoolThreadNum; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) uint32_t snapshotTaskManagerScanIntervalMs; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit; // snapshotcore threadpool threadNum uint32_t snapshotCoreThreadNum; // mdsSessionTimeUs uint32_t mdsSessionTimeUs; - // ReadChunkSnapshot同时进行的异步请求数量 + // The number of asynchronous requests simultaneously processed by + // ReadChunkSnapshot uint32_t readChunkSnapshotConcurrency; - // 用于Lazy克隆元数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone metadata section int stage1PoolThreadNum; - // 用于Lazy克隆数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone data section int stage2PoolThreadNum; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 + // Number of thread pool threads used for requests for non Lazy clones and + // deletion of clones and other control surfaces int commonPoolThreadNum; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir; // mds root user std::string mdsRootUser; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency; - // 引用计数后台扫描每条记录间隔 + // Reference Count Background Scan Each Record Interval uint32_t backEndReferenceRecordScanIntervalMs; - // 引用计数后台扫描每轮间隔 + // Reference Count Background Scan Every Round Interval uint32_t backEndReferenceFuncScanIntervalMs; // dlock options DLockOpts dlockOpts; diff --git a/src/snapshotcloneserver/common/curvefs_client.h b/src/snapshotcloneserver/common/curvefs_client.h index 131f01659c..72db5e3009 100644 --- a/src/snapshotcloneserver/common/curvefs_client.h +++ b/src/snapshotcloneserver/common/curvefs_client.h @@ -15,42 +15,41 @@ */ /************************************************************************* - > File Name: curvefs_client.h - > Author: - > Created Time: Wed Nov 21 11:33:46 2018 + > File Name: curvefs_client.h + > Author: + > Created Time: Wed Nov 21 11:33:46 2018 ************************************************************************/ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ - -#include -#include -#include #include //NOLINT +#include +#include #include //NOLINT -#include "proto/nameserver2.pb.h" -#include "proto/chunk.pb.h" +#include +#include "proto/chunk.pb.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" -#include "src/client/libcurve_snapshot.h" #include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" #include "src/common/timeutility.h" +#include "src/snapshotcloneserver/common/config.h" -using ::curve::client::SegmentInfo; -using ::curve::client::LogicPoolID; -using ::curve::client::CopysetID; using ::curve::client::ChunkID; -using ::curve::client::ChunkInfoDetail; using ::curve::client::ChunkIDInfo; -using ::curve::client::FInfo; +using ::curve::client::ChunkInfoDetail; +using ::curve::client::CopysetID; +using ::curve::client::FileClient; using ::curve::client::FileStatus; +using ::curve::client::FInfo; +using ::curve::client::LogicPoolID; +using ::curve::client::SegmentInfo; using ::curve::client::SnapCloneClosure; -using ::curve::client::UserInfo; using ::curve::client::SnapshotClient; -using ::curve::client::FileClient; +using ::curve::client::UserInfo; namespace curve { namespace snapshotcloneserver { @@ -60,15 +59,13 @@ using RetryCondition = std::function; class RetryHelper { public: - RetryHelper(const RetryMethod &retryMethod, - const RetryCondition &condition) { + RetryHelper(const RetryMethod& retryMethod, + const RetryCondition& condition) { retryMethod_ = retryMethod; condition_ = condition; } - int RetryTimeSecAndReturn( - uint64_t retryTimeSec, - uint64_t retryIntervalMs) { + int RetryTimeSecAndReturn(uint64_t retryTimeSec, uint64_t retryIntervalMs) { int ret = -LIBCURVE_ERROR::FAILED; uint64_t startTime = TimeUtility::GetTimeofDaySec(); uint64_t nowTime = startTime; @@ -85,7 +82,7 @@ class RetryHelper { } private: - RetryMethod retryMethod_; + RetryMethod retryMethod_; RetryCondition condition_; }; @@ -95,432 +92,366 @@ class CurveFsClient { virtual ~CurveFsClient() {} /** - * @brief client 初始化 + * @brief client initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const CurveClientOptions &options) = 0; + virtual int Init(const CurveClientOptions& options) = 0; /** - * @brief client 资源回收 + * @brief client resource recycling * - * @return 错误码 + * @return error code */ virtual int UnInit() = 0; /** - * @brief 创建快照 + * @brief Create a snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param[out] seq 快照版本号 + * @param filename File name + * @param user user information + * @param[out] seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) = 0; + virtual int CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) = 0; /** - * @brief 删除快照 + * @brief Delete snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 + * @param filename File name + * @param user user information + * @param seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) = 0; + virtual int DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) = 0; /** - * @brief 获取快照文件信息 + * @brief Get snapshot file information * - * @param filename 文件名 - * @param user 用户名 - * @param seq 快照版本号 - * @param[out] snapInfo 快照文件信息 + * @param filename File name + * @param user username + * @param seq snapshot version number + * @param[out] snapInfo snapshot file information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, FInfo* snapInfo) = 0; + virtual int GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, + FInfo* snapInfo) = 0; /** - * @brief 查询快照文件segment信息 + * @brief Query snapshot file segment information * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param segInfo segment信息 + * @param filename File name + * @param user user information + * @param seq snapshot version number + * @param offset offset value + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) = 0; + virtual int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, + SegmentInfo* segInfo) = 0; /** - * @brief 读取snapshot chunk的数据 + * @brief Read snapshot chunk data * - * @param cidinfo chunk ID 信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param len 长度 - * @param[out] buf buffer指针 - * @param: scc是异步回调 + * @param cidinfo chunk ID information + * @param seq snapshot version number + * @param offset offset value + * @param len length + * @param[out] buf buffer pointer + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) = 0; + virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) = 0; /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: filestatus 快照文件状态 + *Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: filestatus Snapshot file status */ - virtual int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) = 0; + virtual int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) = 0; /** - * @brief 获取chunk的版本号信息 + * @brief to obtain the version number information of the chunk * - * @param cidinfo chunk ID 信息 - * @param chunkInfo chunk详细信息 + * @param cidinfo chunk ID information + * @param chunkInfo chunk Details * - * @return 错误码 + * @return error code */ - virtual int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) = 0; + virtual int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) = 0; /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param filename clone目标文件名 - * @param user 用户信息 - * @param size 文件大小 - * @param sn 版本号 - * @param chunkSize chunk大小 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param filename clone Target filename + * @param user user information + * @param size File size + * @param sn version number + * @param chunkSize chunk size * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] fileInfo 文件信息 + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) = 0; + virtual int CreateCloneFile(const std::string& source, + const std::string& filename, + const std::string& user, uint64_t size, + uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, + FInfo* fileInfo) = 0; /** - * @brief lazy 创建clone chunk + * @brief lazy creation of a clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param location 数据源的url - * @param chunkidinfo 目标chunk - * @param sn chunk的序列号 - * @param csn correct sn - * @param chunkSize chunk的大小 - * @param: scc是异步回调 - * - * @return 错误码 + * - The location format is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual chunk object's address. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param location URL of the data source + * @param chunkidinfo Target chunk + * @param sn chunk's sequence number + * @param csn correct sequence number + * @param chunkSize Size of the chunk + * @param scc Asynchronous callback + * + * @return Error code */ - virtual int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) = 0; - + virtual int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) = 0; /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param: scc是异步回调 + * @param offset offset + * @param len length + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) = 0; + virtual int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) = 0; /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneMeta( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneMeta(const std::string& filename, + const std::string& user) = 0; /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneFile( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneFile(const std::string& filename, + const std::string& user) = 0; /** - * @brief 设置clone文件状态 + * @brief Set clone file status * - * @param filename 文件名 - * @param filestatus 要设置的目标状态 - * @param user 用户名 + * @param filename File name + * @param filestatus The target state to be set + * @param user username * - * @return 错误码 + * @return error code */ - virtual int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) = 0; + virtual int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) = 0; /** - * @brief 获取文件信息 + * @brief Get file information * - * @param filename 文件名 - * @param user 用户名 - * @param[out] fileInfo 文件信息 + * @param filename File name + * @param user username + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) = 0; + virtual int GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) = 0; /** - * @brief 查询或分配文件segment信息 + * @brief Query or allocate file segment information * - * @param allocate 是否分配 - * @param offset 偏移值 - * @param fileInfo 文件信息 - * @param user 用户名 - * @param segInfo segment信息 + * @param allocate whether to allocate + * @param offset offset value + * @param fileInfo file information + * @param user username + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) = 0; + virtual int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) = 0; /** - * @brief 为recover rename复制的文件 + * @brief is the file copied for recover rename * - * @param user 用户信息 - * @param originId 被恢复的原始文件Id - * @param destinationId 克隆出的目标文件Id - * @param origin 被恢复的原始文件名 - * @param destination 克隆出的目标文件 + * @param user user information + * @param originId The original file ID that was restored + * @param destinationId The cloned target file ID + * @param origin The original file name of the recovered file + * @param destination Cloned destination file * - * @return 错误码 + * @return error code */ - virtual int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) = 0; - + virtual int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) = 0; /** - * @brief 删除文件 + * @brief Delete file * - * @param fileName 文件名 - * @param user 用户名 - * @param fileId 删除文件的inodeId + * @param fileName File name + * @param user username + * @param fileId Delete the inodeId of the file * - * @return 错误码 + * @return error code */ - virtual int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) = 0; + virtual int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) = 0; /** - * @brief 创建目录 + * @brief Create directory * - * @param dirpath 目录名 - * @param user 用户名 + * @param dirpath directory name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int Mkdir(const std::string& dirpath, - const std::string &user) = 0; + virtual int Mkdir(const std::string& dirpath, const std::string& user) = 0; /** - * @brief 变更文件的owner + * @brief Change the owner of the file * - * @param filename 文件名 - * @param newOwner 新的owner + * @param filename File name + * @param newOwner New owner * - * @return 错误码 + * @return error code */ virtual int ChangeOwner(const std::string& filename, - const std::string& newOwner) = 0; + const std::string& newOwner) = 0; }; class CurveFsClientImpl : public CurveFsClient { public: CurveFsClientImpl(std::shared_ptr snapClient, - std::shared_ptr fileClient) : - snapClient_(snapClient), fileClient_(fileClient) {} + std::shared_ptr fileClient) + : snapClient_(snapClient), fileClient_(fileClient) {} virtual ~CurveFsClientImpl() {} - // 以下接口定义见CurveFsClient接口注释 - int Init(const CurveClientOptions &options) override; + // The following interface definitions can be found in the CurveFsClient + // interface annotations + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; private: - UserInfo GetUserInfo(const std::string &user) { + UserInfo GetUserInfo(const std::string& user) { if (user == mdsRootUser_) { return UserInfo(mdsRootUser_, mdsRootPassword_); } else { diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 766ae00e05..fb7804d1f6 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -23,10 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ -#include -#include #include #include +#include +#include #include "src/common/snapshotclone/snapshotclone_define.h" @@ -44,10 +44,7 @@ enum class CloneStatus { metaInstalled = 7, }; -enum class CloneFileType { - kFile = 0, - kSnapshot = 1 -}; +enum class CloneFileType { kFile = 0, kSnapshot = 1 }; enum class CloneStep { kCreateCloneFile = 0, @@ -61,10 +58,10 @@ enum class CloneStep { kEnd }; -// 数据库中clone/recover任务信息 +// Clone/recover task information in the database class CloneInfo { public: - CloneInfo() + CloneInfo() : type_(CloneTaskType::kClone), originId_(0), destinationId_(0), @@ -74,14 +71,10 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::error) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - CloneFileType fileType, - bool isLazy) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + CloneFileType fileType, bool isLazy) : taskId_(taskId), user_(user), type_(type), @@ -96,19 +89,12 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::cloning) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - uint64_t originId, - uint64_t destinationId, - uint64_t time, - CloneFileType fileType, - bool isLazy, - CloneStep nextStep, - CloneStatus status) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + uint64_t originId, uint64_t destinationId, uint64_t time, + CloneFileType fileType, bool isLazy, CloneStep nextStep, + CloneStatus status) : taskId_(taskId), user_(user), type_(type), @@ -123,146 +109,94 @@ class CloneInfo { nextStep_(nextStep), status_(status) {} - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } - void SetTaskId(const TaskIdType &taskId) { - taskId_ = taskId; - } + void SetTaskId(const TaskIdType& taskId) { taskId_ = taskId; } - std::string GetUser() const { - return user_; - } + std::string GetUser() const { return user_; } - void SetUser(const std::string &user) { - user_ = user; - } + void SetUser(const std::string& user) { user_ = user; } - CloneTaskType GetTaskType() const { - return type_; - } + CloneTaskType GetTaskType() const { return type_; } - void SetTaskType(CloneTaskType type) { - type_ = type; - } + void SetTaskType(CloneTaskType type) { type_ = type; } - std::string GetSrc() const { - return source_; - } + std::string GetSrc() const { return source_; } - void SetSrc(const std::string &source) { - source_ = source; - } + void SetSrc(const std::string& source) { source_ = source; } - std::string GetDest() const { - return destination_; - } + std::string GetDest() const { return destination_; } - void SetDest(const std::string &dest) { - destination_ = dest; - } + void SetDest(const std::string& dest) { destination_ = dest; } - std::string GetPoolset() const { - return poolset_; - } + std::string GetPoolset() const { return poolset_; } - void SetPoolset(const std::string &poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - uint64_t GetOriginId() const { - return originId_; - } + uint64_t GetOriginId() const { return originId_; } - void SetOriginId(uint64_t originId) { - originId_ = originId; - } + void SetOriginId(uint64_t originId) { originId_ = originId; } - uint64_t GetDestId() const { - return destinationId_; - } + uint64_t GetDestId() const { return destinationId_; } - void SetDestId(uint64_t destId) { - destinationId_ = destId; - } + void SetDestId(uint64_t destId) { destinationId_ = destId; } - uint64_t GetTime() const { - return time_; - } + uint64_t GetTime() const { return time_; } - void SetTime(uint64_t time) { - time_ = time; - } + void SetTime(uint64_t time) { time_ = time; } - CloneFileType GetFileType() const { - return fileType_; - } + CloneFileType GetFileType() const { return fileType_; } - void SetFileType(CloneFileType fileType) { - fileType_ = fileType; - } + void SetFileType(CloneFileType fileType) { fileType_ = fileType; } - bool GetIsLazy() const { - return isLazy_; - } + bool GetIsLazy() const { return isLazy_; } - void SetIsLazy(bool flag) { - isLazy_ = flag; - } + void SetIsLazy(bool flag) { isLazy_ = flag; } - CloneStep GetNextStep() const { - return nextStep_; - } + CloneStep GetNextStep() const { return nextStep_; } - void SetNextStep(CloneStep nextStep) { - nextStep_ = nextStep; - } - CloneStatus GetStatus() const { - return status_; - } + void SetNextStep(CloneStep nextStep) { nextStep_ = nextStep; } + CloneStatus GetStatus() const { return status_; } - void SetStatus(CloneStatus status) { - status_ = status; - } + void SetStatus(CloneStatus status) { status_ = status; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 任务Id - TaskIdType taskId_; - // 用户 + // Task Id + TaskIdType taskId_; + // Users std::string user_; - // 克隆或恢复 + // Clone or Restore CloneTaskType type_; - // 源文件或快照uuid + // Source file or snapshot uuid std::string source_; - // 目标文件名 + // Destination File Name std::string destination_; - // 目标文件所在的poolset + // The poolset where the target file is located std::string poolset_; - // 被恢复的原始文件id, 仅用于恢复 + // The original file ID that has been restored, for recovery purposes only uint64_t originId_; - // 目标文件id + // Target file id uint64_t destinationId_; - // 创建时间 + // Creation time uint64_t time_; - // 克隆/恢复的文件类型 + // Clone/Restore File Types CloneFileType fileType_; - // 是否lazy + // Lazy or not bool isLazy_; - // 克隆进度, 下一个步骤 + // Clone progress, next step CloneStep nextStep_; - // 处理的状态 + // Processing status CloneStatus status_; }; -std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); +std::ostream& operator<<(std::ostream& os, const CloneInfo& cloneInfo); -// 快照处理状态 -enum class Status{ +// Snapshot processing status +enum class Status { done = 0, pending, deleting, @@ -271,187 +205,127 @@ enum class Status{ error }; -// 快照信息 +// Snapshot Information class SnapshotInfo { public: SnapshotInfo() - :uuid_(), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &snapshotName) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(snapshotName), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &desc, - uint64_t seqnum, - uint32_t chunksize, - uint64_t segmentsize, - uint64_t filelength, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - uint64_t time, - Status status) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(desc), - seqNum_(seqnum), - chunkSize_(chunksize), - segmentSize_(segmentsize), - fileLength_(filelength), - stripeUnit_(stripeUnit), - stripeCount_(stripeCount), - poolset_(poolset), - time_(time), - status_(status) {} - - void SetUuid(const UUID &uuid) { - uuid_ = uuid; - } + : uuid_(), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} - UUID GetUuid() const { - return uuid_; - } + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& snapshotName) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(snapshotName), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& desc, + uint64_t seqnum, uint32_t chunksize, uint64_t segmentsize, + uint64_t filelength, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, uint64_t time, Status status) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(desc), + seqNum_(seqnum), + chunkSize_(chunksize), + segmentSize_(segmentsize), + fileLength_(filelength), + stripeUnit_(stripeUnit), + stripeCount_(stripeCount), + poolset_(poolset), + time_(time), + status_(status) {} - void SetUser(const std::string &user) { - user_ = user; - } + void SetUuid(const UUID& uuid) { uuid_ = uuid; } - std::string GetUser() const { - return user_; - } + UUID GetUuid() const { return uuid_; } - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetUser(const std::string& user) { user_ = user; } - std::string GetFileName() const { - return fileName_; - } + std::string GetUser() const { return user_; } + + void SetFileName(const std::string& fileName) { fileName_ = fileName; } + + std::string GetFileName() const { return fileName_; } - void SetSnapshotName(const std::string &snapshotName) { + void SetSnapshotName(const std::string& snapshotName) { snapshotName_ = snapshotName; } - std::string GetSnapshotName() const { - return snapshotName_; - } + std::string GetSnapshotName() const { return snapshotName_; } - void SetSeqNum(uint64_t seqNum) { - seqNum_ = seqNum; - } + void SetSeqNum(uint64_t seqNum) { seqNum_ = seqNum; } - uint64_t GetSeqNum() const { - return seqNum_; - } + uint64_t GetSeqNum() const { return seqNum_; } - void SetChunkSize(uint32_t chunkSize) { - chunkSize_ = chunkSize; - } + void SetChunkSize(uint32_t chunkSize) { chunkSize_ = chunkSize; } - uint32_t GetChunkSize() const { - return chunkSize_; - } + uint32_t GetChunkSize() const { return chunkSize_; } - void SetSegmentSize(uint64_t segmentSize) { - segmentSize_ = segmentSize; - } + void SetSegmentSize(uint64_t segmentSize) { segmentSize_ = segmentSize; } - uint64_t GetSegmentSize() const { - return segmentSize_; - } + uint64_t GetSegmentSize() const { return segmentSize_; } - void SetFileLength(uint64_t fileLength) { - fileLength_ = fileLength; - } + void SetFileLength(uint64_t fileLength) { fileLength_ = fileLength; } - uint64_t GetFileLength() const { - return fileLength_; - } + uint64_t GetFileLength() const { return fileLength_; } - void SetStripeUnit(uint64_t stripeUnit) { - stripeUnit_ = stripeUnit; - } + void SetStripeUnit(uint64_t stripeUnit) { stripeUnit_ = stripeUnit; } - uint64_t GetStripeUnit() const { - return stripeUnit_; - } + uint64_t GetStripeUnit() const { return stripeUnit_; } - void SetStripeCount(uint64_t stripeCount) { - stripeCount_ = stripeCount; - } + void SetStripeCount(uint64_t stripeCount) { stripeCount_ = stripeCount; } - uint64_t GetStripeCount() const { - return stripeCount_; - } + uint64_t GetStripeCount() const { return stripeCount_; } - void SetPoolset(const std::string& poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - const std::string& GetPoolset() const { - return poolset_; - } + const std::string& GetPoolset() const { return poolset_; } - void SetCreateTime(uint64_t createTime) { - time_ = createTime; - } + void SetCreateTime(uint64_t createTime) { time_ = createTime; } - uint64_t GetCreateTime() const { - return time_; - } + uint64_t GetCreateTime() const { return time_; } - void SetStatus(Status status) { - status_ = status; - } + void SetStatus(Status status) { status_ = status; } - Status GetStatus() const { - return status_; - } + Status GetStatus() const { return status_; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 快照uuid + // Snapshot uuid UUID uuid_; - // 租户信息 + // Tenant Information std::string user_; - // 快照目标文件名 + // Snapshot Destination File Name std::string fileName_; - // 快照名 + // Snapshot Name std::string snapshotName_; - // 快照版本号 + // Snapshot version number uint64_t seqNum_; - // 文件的chunk大小 + // Chunk size of the file uint32_t chunkSize_; - // 文件的segment大小 + // The segment size of the file uint64_t segmentSize_; - // 文件大小 + // File size uint64_t fileLength_; // stripe size uint64_t stripeUnit_; @@ -459,16 +333,15 @@ class SnapshotInfo { uint64_t stripeCount_; // poolset std::string poolset_; - // 快照创建时间 + // Snapshot creation time uint64_t time_; - // 快照处理的状态 + // Status of snapshot processing Status status_; }; -std::ostream& operator<<(std::ostream& os, const SnapshotInfo &snapshotInfo); +std::ostream& operator<<(std::ostream& os, const SnapshotInfo& snapshotInfo); } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store.h b/src/snapshotcloneserver/common/snapshotclone_meta_store.h index ff550f5fc7..9e15692eb2 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store.h @@ -23,15 +23,15 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ -#include -#include #include #include -#include //NOLINT +#include //NOLINT +#include +#include +#include "src/common/concurrent/concurrent.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/concurrent/concurrent.h" #include "src/snapshotcloneserver/common/snapshotclone_info.h" namespace curve { @@ -43,25 +43,25 @@ class SnapshotCloneMetaStore { public: SnapshotCloneMetaStore() {} virtual ~SnapshotCloneMetaStore() {} - // 添加一条快照信息记录 + // Add a snapshot information record /** - * 添加一条快照记录到metastore中 - * @param 快照信息结构体 - * @return: 0 插入成功/ -1 插入失败 + * Add a snapshot record to metastore + * @param snapshot information structure + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int AddSnapshot(const SnapshotInfo& snapinfo) = 0; /** - * 从metastore删除一条快照记录 - * @param 快照任务的uuid,全局唯一 - * @return 0 删除成功/ -1 删除失败 + * Delete a snapshot record from metastore + * @param The uuid of the snapshot task, globally unique + * @return 0 successfully deleted/-1 failed to delete */ - virtual int DeleteSnapshot(const UUID &uuid) = 0; + virtual int DeleteSnapshot(const UUID& uuid) = 0; /** - * 更新快照记录 - * @param 快照信息结构体 - * @return: 0 更新成功/ -1 更新失败 + * Update snapshot records + * @param snapshot information structure + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int UpdateSnapshot(const SnapshotInfo& snapinfo) = 0; /** * @brief Compare and set snapshot @@ -75,76 +75,76 @@ class SnapshotCloneMetaStore { virtual int CASSnapshot(const UUID& uuid, CASFunc cas) = 0; /** - * 获取指定快照的快照信息 - * @param 快照的uuid - * @param 保存快照信息的指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain snapshot information for the specified snapshot + * @param uuid of snapshot + * @param pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) = 0; /** - * 获取指定文件的快照信息列表 - * @param 文件名 - * @param 保存快照信息的vector指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain a list of snapshot information for the specified file + * @param file name + * @param vector pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(const std::string &filename, - std::vector *v) = 0; + virtual int GetSnapshotList(const std::string& filename, + std::vector* v) = 0; /** - * 获取全部的快照信息列表 - * @param 保存快照信息的vector指针 - * @return: 0 获取成功/ -1 获取失败 + * Obtain a list of all snapshot information + * @param vector pointer to save snapshot information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(std::vector *list) = 0; + virtual int GetSnapshotList(std::vector* list) = 0; /** - * @brief 获取快照总数 + * @brief Total number of snapshots taken * - * @return 快照总数 + * @return Total number of snapshots */ virtual uint32_t GetSnapshotCount() = 0; /** - * @brief 插入一条clone任务记录到metastore - * @param clone记录信息 - * @return: 0 插入成功/ -1 插入失败 + * @brief Insert a clone task record into metastore + * @param clone records information + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int AddCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 从metastore删除一条clone任务记录 - * @param clone任务的任务id - * @return: 0 删除成功/ -1 删除失败 + * @brief Delete a clone task record from metastore + * @param Task ID of clone task + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteCloneInfo(const std::string &taskID) = 0; + virtual int DeleteCloneInfo(const std::string& taskID) = 0; /** - * @brief 更新一条clone任务记录 - * @param clone记录信息 - * @return: 0 更新成功/ -1 更新失败 + * @brief Update a clone task record + * @param clone records information + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int UpdateCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 获取指定task id的clone任务信息 - * @param clone任务id - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get clone task information for the specified task ID + * @param clone Task ID + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfo(const std::string &taskID, CloneInfo *info) = 0; + virtual int GetCloneInfo(const std::string& taskID, CloneInfo* info) = 0; /** - * @brief 获取指定文件的clone任务信息 + * @brief Get clone task information for the specified file * - * @param fileName 文件名 - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @param fileName File name + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取所有clone任务的信息列表 - * @param[out] 只想clone任务vector指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get a list of information for all clone tasks + * @param[out] just wants to clone the task vector pointer + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoList(std::vector *list) = 0; + virtual int GetCloneInfoList(std::vector* list) = 0; }; } // namespace snapshotcloneserver diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h index 6bc69aca1e..a502042761 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h @@ -23,21 +23,21 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ -#include -#include #include +#include #include +#include -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/kvstorageclient/etcd_client.h" -#include "src/snapshotcloneserver/common/snapshotclonecodec.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/rw_lock.h" +#include "src/kvstorageclient/etcd_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclonecodec.h" -using ::curve::kvstorage::KVStorageClient; -using ::curve::common::RWLock; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; +using ::curve::kvstorage::KVStorageClient; namespace curve { namespace snapshotcloneserver { @@ -45,54 +45,53 @@ namespace snapshotcloneserver { class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { public: SnapshotCloneMetaStoreEtcd(std::shared_ptr client, - std::shared_ptr codec) - : client_(client), - codec_(codec) {} + std::shared_ptr codec) + : client_(client), codec_(codec) {} int Init(); - int AddSnapshot(const SnapshotInfo &info) override; + int AddSnapshot(const SnapshotInfo& info) override; - int DeleteSnapshot(const UUID &uuid) override; + int DeleteSnapshot(const UUID& uuid) override; - int UpdateSnapshot(const SnapshotInfo &info) override; + int UpdateSnapshot(const SnapshotInfo& info) override; int CASSnapshot(const UUID& uuid, CASFunc cas) override; - int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) override; - int GetSnapshotList(const std::string &filename, - std::vector *v) override; + int GetSnapshotList(const std::string& filename, + std::vector* v) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; uint32_t GetSnapshotCount() override; - int AddCloneInfo(const CloneInfo &info) override; + int AddCloneInfo(const CloneInfo& info) override; - int DeleteCloneInfo(const std::string &uuid) override; + int DeleteCloneInfo(const std::string& uuid) override; - int UpdateCloneInfo(const CloneInfo &info) override; + int UpdateCloneInfo(const CloneInfo& info) override; - int GetCloneInfo(const std::string &uuid, CloneInfo *info) override; + int GetCloneInfo(const std::string& uuid, CloneInfo* info) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - int GetCloneInfoList(std::vector *list) override; + int GetCloneInfoList(std::vector* list) override; private: /** - * @brief 加载快照信息 + * @brief Load snapshot information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadSnapshotInfos(); /** - * @brief 加载克隆信息 + * @brief Load clone information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadCloneInfos(); @@ -100,11 +99,11 @@ class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { std::shared_ptr client_; std::shared_ptr codec_; - // key is UUID, map 需要考虑并发保护 + // Key is UUID, map needs to consider concurrency protection std::map snapInfos_; // snap info lock RWLock snapInfos_mutex; - // key is TaskIdType, map 需要考虑并发保护 + // Key is TaskIdType, map needs to consider concurrency protection std::map cloneInfos_; // clone info map lock RWLock cloneInfos_lock_; diff --git a/src/snapshotcloneserver/common/snapshotclone_metric.h b/src/snapshotcloneserver/common/snapshotclone_metric.h index 410d9b19f9..e4fd013334 100644 --- a/src/snapshotcloneserver/common/snapshotclone_metric.h +++ b/src/snapshotcloneserver/common/snapshotclone_metric.h @@ -24,9 +24,11 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_METRIC_H_ #include -#include + #include #include +#include + #include "src/common/stringstatus.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" @@ -39,8 +41,8 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; class CloneTaskInfo; -static uint32_t GetSnapshotTotalNum(void *arg) { - SnapshotCloneMetaStore *metaStore = +static uint32_t GetSnapshotTotalNum(void* arg) { + SnapshotCloneMetaStore* metaStore = reinterpret_cast(arg); uint32_t snapshotCount = 0; if (metaStore != nullptr) { @@ -53,27 +55,27 @@ struct SnapshotMetric { const std::string SnapshotMetricPrefix = "snapshotcloneserver_snapshot_metric_"; - // 正在进行的快照数量 + // Number of snapshots in progress bvar::Adder snapshotDoing; - // 正在等待的快照数量 + // Number of waiting snapshots bvar::Adder snapshotWaiting; - // 累计成功的快照数量 + // Accumulated number of successful snapshots bvar::Adder snapshotSucceed; - // 累计失败的快照数量 + // Accumulated number of failed snapshots bvar::Adder snapshotFailed; std::shared_ptr metaStore_; - // 系统内快照总量 + // Total number of snapshots within the system bvar::PassiveStatus snapshotNum; - explicit SnapshotMetric(std::shared_ptr metaStore) : - snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), - snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), - snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), - snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), - metaStore_(metaStore), - snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", - GetSnapshotTotalNum, metaStore_.get()) {} + explicit SnapshotMetric(std::shared_ptr metaStore) + : snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), + snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), + snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), + snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), + metaStore_(metaStore), + snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", + GetSnapshotTotalNum, metaStore_.get()) {} }; struct SnapshotInfoMetric { @@ -81,60 +83,56 @@ struct SnapshotInfoMetric { "snapshotcloneserver_snapshotInfo_metric_"; StringStatus metric; - explicit SnapshotInfoMetric(const std::string &snapshotId) { + explicit SnapshotInfoMetric(const std::string& snapshotId) { metric.ExposeAs(SnapshotInfoMetricPrefix, snapshotId); } - void Update(SnapshotTaskInfo *taskInfo); + void Update(SnapshotTaskInfo* taskInfo); }; struct CloneMetric { - const std::string CloneMetricPrefix = - "snapshotcloneserver_clone_metric_"; + const std::string CloneMetricPrefix = "snapshotcloneserver_clone_metric_"; - // 正在执行的克隆任务数量 + // Number of cloning tasks being executed bvar::Adder cloneDoing; - // 累计成功的克隆任务数量 + // Accumulated number of successful cloning tasks bvar::Adder cloneSucceed; - // 累计失败的克隆任务数量 + // Accumulated number of failed clone tasks bvar::Adder cloneFailed; - // 正在执行的恢复任务数量 + // Number of recovery tasks being executed bvar::Adder recoverDoing; - // 累计成功的恢复任务数量 + // Accumulated number of successful recovery tasks bvar::Adder recoverSucceed; - // 累计失败的恢复任务数量 + // Accumulated number of failed recovery tasks bvar::Adder recoverFailed; - // 正在执行的Flatten任务数量 + // Number of Flatten tasks being executed bvar::Adder flattenDoing; - // 累计成功的Flatten任务数量 + // Accumulated number of successful Flatten tasks bvar::Adder flattenSucceed; - // 累计失败的Flatten任务数量 + // Accumulated number of failed Flatten tasks bvar::Adder flattenFailed; - CloneMetric() : - cloneDoing(CloneMetricPrefix, "clone_doing"), - cloneSucceed(CloneMetricPrefix, "clone_succeed"), - cloneFailed(CloneMetricPrefix, "clone_failed"), - recoverDoing(CloneMetricPrefix, "recover_doing"), - recoverSucceed(CloneMetricPrefix, "recover_succeed"), - recoverFailed(CloneMetricPrefix, "recover_failed"), - flattenDoing(CloneMetricPrefix, "flatten_doing"), - flattenSucceed(CloneMetricPrefix, "flatten_succeed"), - flattenFailed(CloneMetricPrefix, "flatten_failed") {} + CloneMetric() + : cloneDoing(CloneMetricPrefix, "clone_doing"), + cloneSucceed(CloneMetricPrefix, "clone_succeed"), + cloneFailed(CloneMetricPrefix, "clone_failed"), + recoverDoing(CloneMetricPrefix, "recover_doing"), + recoverSucceed(CloneMetricPrefix, "recover_succeed"), + recoverFailed(CloneMetricPrefix, "recover_failed"), + flattenDoing(CloneMetricPrefix, "flatten_doing"), + flattenSucceed(CloneMetricPrefix, "flatten_succeed"), + flattenFailed(CloneMetricPrefix, "flatten_failed") {} - void UpdateBeforeTaskBegin( - const CloneTaskType &taskType); + void UpdateBeforeTaskBegin(const CloneTaskType& taskType); - void UpdateAfterTaskFinish( - const CloneTaskType &taskType, - const CloneStatus &status); + void UpdateAfterTaskFinish(const CloneTaskType& taskType, + const CloneStatus& status); void UpdateFlattenTaskBegin(); - void UpdateAfterFlattenTaskFinish( - const CloneStatus &status); + void UpdateAfterFlattenTaskFinish(const CloneStatus& status); }; struct CloneInfoMetric { @@ -142,14 +140,13 @@ struct CloneInfoMetric { "snapshotcloneserver_cloneInfo_metric_"; StringStatus metric; - explicit CloneInfoMetric(const std::string &cloneTaskId) { + explicit CloneInfoMetric(const std::string& cloneTaskId) { metric.ExposeAs(CloneInfoMetricPrefix, cloneTaskId); } - void Update(CloneTaskInfo *taskInfo); + void Update(CloneTaskInfo* taskInfo); }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/common/task.h b/src/snapshotcloneserver/common/task.h index bc0faa4178..0034230311 100644 --- a/src/snapshotcloneserver/common/task.h +++ b/src/snapshotcloneserver/common/task.h @@ -25,6 +25,7 @@ #include #include + #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/task_tracker.h" @@ -33,8 +34,7 @@ namespace snapshotcloneserver { class Task { public: - explicit Task(const TaskIdType &taskId) - : taskId_(taskId) {} + explicit Task(const TaskIdType& taskId) : taskId_(taskId) {} virtual ~Task() {} @@ -44,47 +44,40 @@ class Task { Task& operator=(Task&&) = default; /** - * @brief 获取快照任务执行体闭包 + * @brief Get snapshot task execution body closure * - * @return 快照任务执行体 + * @return Snapshot Task Execution Body */ virtual std::function clousre() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } /** - * @brief 获取快照任务id + * @brief Get snapshot task ID * - * @return 快照任务id + * @return Snapshot Task ID */ - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } /** - * @brief 快照执行函数接口 + * @brief snapshot execution function interface */ virtual void Run() = 0; private: - // 快照id + // Snapshot ID TaskIdType taskId_; }; class TrackerTask : public Task { public: - explicit TrackerTask(const TaskIdType &taskId) - : Task(taskId) {} + explicit TrackerTask(const TaskIdType& taskId) : Task(taskId) {} void SetTracker(std::shared_ptr tracker) { tracker_ = tracker; } - std::shared_ptr GetTracker() { - return tracker_; - } + std::shared_ptr GetTracker() { return tracker_; } private: std::shared_ptr tracker_; @@ -93,5 +86,4 @@ class TrackerTask : public Task { } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_TASK_H_ diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..2faf6cb1b7 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -23,11 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ - -#include -#include -#include //NOLINT #include +#include +#include //NOLINT +#include #include "src/common/concurrent/concurrent.h" @@ -36,10 +35,7 @@ namespace snapshotcloneserver { class TaskInfo { public: - TaskInfo() - : progress_(0), - isFinish_(false), - isCanceled_(false) {} + TaskInfo() : progress_(0), isFinish_(false), isCanceled_(false) {} virtual ~TaskInfo() {} TaskInfo(const TaskInfo&) = delete; @@ -48,59 +44,47 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent task completion percentage */ - void SetProgress(uint32_t persent) { - progress_ = persent; - } + void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ - uint32_t GetProgress() const { - return progress_; - } + uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ - void Finish() { - isFinish_.store(true); - } + void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true Task completed + * @retval false Task not completed */ - bool IsFinish() const { - return isFinish_.load(); - } + bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ - void Cancel() { - isCanceled_ = true; - } + void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Do you want to cancel the task * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true The task has been canceled + * @retval false The task was not canceled */ - bool IsCanceled() const { - return isCanceled_; - } + bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +92,24 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine + * whether the task is cancelled, If cancelled, release the lock, Otherwise, + * release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine + * whether the task is completed, If completed, release the lock, Otherwise, + * execute the task to cancel the logic and release the lock. */ - curve::common::Mutex& GetLockRef() { - return lock_; - } + curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..1e5c664f15 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -24,6 +24,7 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_THREAD_POOL_H_ #include + #include "src/common/concurrent/task_thread_pool.h" #include "src/snapshotcloneserver/common/task.h" @@ -31,52 +32,49 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief snapshot thread pool */ class ThreadPool { public: - /** - * @brief 构造函数 - * - * @param threadNum 最大线程数 - */ - explicit ThreadPool(int threadNum) - : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief constructor + * + * @param threadNum maximum number of threads + */ + explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} + /** + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ - void PushTask(Task* task) { - threadPool_.Enqueue(task->clousre()); - } + void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); } private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/snapshotcloneserver/main.cpp b/src/snapshotcloneserver/main.cpp index b44468b857..725a0c12f3 100644 --- a/src/snapshotcloneserver/main.cpp +++ b/src/snapshotcloneserver/main.cpp @@ -19,23 +19,26 @@ * Created Date: Fri Dec 14 2018 * Author: xuchaojie */ -#include #include +#include + #include "src/snapshotcloneserver/snapshotclone_server.h" -DEFINE_string(conf, "conf/snapshot_clone_server.conf", "snapshot&clone server config file path"); //NOLINT +DEFINE_string(conf, "conf/snapshot_clone_server.conf", + "snapshot&clone server config file path"); // NOLINT DEFINE_string(addr, "127.0.0.1:5555", "snapshotcloneserver address"); using Configuration = ::curve::common::Configuration; using SnapShotCloneServer = ::curve::snapshotcloneserver::SnapShotCloneServer; -void LoadConfigFromCmdline(Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void LoadConfigFromCmdline(Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("addr", &info) && !info.is_default) { conf->SetStringValue("server.address", FLAGS_addr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { if (!conf->GetStringValue("log.dir", &FLAGS_log_dir)) { LOG(WARNING) << "no log.dir in " << FLAGS_conf @@ -68,13 +71,12 @@ int snapshotcloneserver_main(std::shared_ptr conf) { return 0; } -int main(int argc, char **argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, true); std::shared_ptr conf = std::make_shared(); conf->SetConfigPath(FLAGS_conf); if (!conf->LoadConfig()) { - LOG(ERROR) << "Failed to open config file: " - << conf->GetConfigPath(); + LOG(ERROR) << "Failed to open config file: " << conf->GetConfigPath(); return -1; } LoadConfigFromCmdline(conf.get()); @@ -83,4 +85,3 @@ int main(int argc, char **argv) { google::InitGoogleLogging(argv[0]); snapshotcloneserver_main(conf); } - diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 6abb94b5e9..ec541c4c80 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -23,17 +23,17 @@ #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include -#include + #include +#include #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "src/common/uuid.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::UUIDGenerator; -using ::curve::common::NameLockGuard; using ::curve::common::LockGuard; +using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; namespace curve { namespace snapshotcloneserver { @@ -47,10 +47,10 @@ int SnapshotCoreImpl::Init() { return kErrCodeSuccess; } -int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) { NameLockGuard lockGuard(snapshotNameLock_, file); std::vector fileInfo; metaStore_->GetSnapshotList(file, &fileInfo); @@ -60,11 +60,10 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, if ((snap.GetUser() == user) && (snap.GetSnapshotName() == snapshotName)) { LOG(INFO) << "CreateSnapshotPre find same snap task" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName << ", Exist SnapInfo : " << snap; - // 视为同一个快照,返回任务已存在 + // Treat as the same snapshot, return task already exists *snapInfo = snap; return kErrCodeTaskExist; } @@ -85,20 +84,17 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, break; case -LIBCURVE_ERROR::NOTEXIST: LOG(ERROR) << "create snapshot file not exist" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeFileNotExist; case -LIBCURVE_ERROR::AUTHFAIL: LOG(ERROR) << "create snapshot by invalid user" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeInvalidUser; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", file = " << file + << ", ret = " << ret << ", file = " << file << ", user = " << user; return kErrCodeInternalError; } @@ -117,8 +113,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, ret = metaStore_->AddSnapshot(info); if (ret < 0) { LOG(ERROR) << "AddSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid + << " ret = " << ret << ", uuid = " << uuid << ", fileName = " << file << ", snapshotName = " << snapshotName; return ret; @@ -131,46 +126,56 @@ constexpr uint32_t kProgressCreateSnapshotOnCurvefsComplete = 5; constexpr uint32_t kProgressBuildChunkIndexDataComplete = 6; constexpr uint32_t kProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kProgressTransferSnapshotDataStart = - kProgressBuildSnapshotMapComplete; + kProgressBuildSnapshotMapComplete; constexpr uint32_t kProgressTransferSnapshotDataComplete = 99; constexpr uint32_t kProgressComplete = 100; /** - * @brief 异步执行创建快照任务并更新任务进度 + * @brief Asynchronous execution of snapshot creation task and update of task + * progress * - * 快照进度规划如下: + * The snapshot schedule is planned as follows: * - * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | TransferSnapshotData | UpdateSnapshot | //NOLINT - * | 5% | 6% | 10% | 10%~99% | 100% | //NOLINT + * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | + * TransferSnapshotData | UpdateSnapshot | //NOLINT | 5% | 6% + * | 10% | 10%~99% | 100% | //NOLINT * * - * 异步执行期间发生error与cancel情况说明: - * 1. 发生error将导致整个异步任务直接中断,并且不做任何清理动作: - * 发生error时,一般系统存在异常,清理动作很可能不能完成, - * 因此,不进行任何清理,只置状态,待人工干预排除异常之后, - * 使用DeleteSnapshot功能去手动删除error状态的快照。 - * 2. 发生cancel时则以创建功能相反的顺序依次进行清理动作, - * 若清理过程发生error,则立即中断,之后同error过程。 + * Explanation of errors and cancellations during asynchronous execution: + * 1. The occurrence of an error will cause the entire asynchronous task to be + * directly interrupted without any cleaning action: When an error occurs, there + * is usually an abnormality in the system, and the cleaning action may not be + * completed, Therefore, no cleaning will be carried out, only the status will + * be set, and after manual intervention to eliminate anomalies, Use the + * DeleteSnapshot function to manually delete snapshots with error status. + * 2. When a cancel occurs, the cleaning actions are carried out in reverse + * order of creating functions, If an error occurs during the cleaning process, + * it will be immediately interrupted, followed by the same error process. * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleCreateSnapshotTask( std::shared_ptr task) { std::string fileName = task->GetFileName(); - // 如果当前有失败的快照,需先清理失败的快照,否则快照会再次失败 + // If there are currently failed snapshots, it is necessary to clean up the + // failed snapshots first, otherwise the snapshots will fail again int ret = ClearErrorSnapBeforeCreateSnapshot(task); if (ret < 0) { HandleCreateSnapshotError(task); return; } - // 为支持任务重启,这里有三种情况需要处理 - // 1. 没打过快照, 没有seqNum且curve上没有快照 - // 2. 打过快照, 有seqNum且curve上有快照 - // 3. 打过快照并已经转储完删除快照, 有seqNum但curve上没有快照 + // To support task restart, there are three situations that need to be + // addressed + // 1. I haven't taken a snapshot, there's no seqNum, and there's no snapshot + // on the curve + // 2. I have taken a snapshot, and there is seqNum and a snapshot on the + // curve + // 3. I have taken a snapshot and have completed the dump to delete it. + // There is seqNum, but there is no snapshot on the curve - SnapshotInfo *info = &(task->GetSnapshotInfo()); + SnapshotInfo* info = &(task->GetSnapshotInfo()); UUID uuid = task->GetUuid(); uint64_t seqNum = info->GetSeqNum(); bool existIndexData = false; @@ -178,8 +183,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = CreateSnapshotOnCurvefs(fileName, info, task); if (ret < 0) { LOG(ERROR) << "CreateSnapshotOnCurvefs error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; @@ -188,9 +192,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = false; } else { FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = + client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (-LIBCURVE_ERROR::NOTEXIST == ret) { HandleCreateSnapshotSuccess(task); return; @@ -200,8 +203,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = dataStore_->ChunkIndexDataExist(name); } else { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); @@ -224,8 +226,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->GetChunkIndexData(name, &indexData); if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); @@ -238,8 +239,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildSegmentInfo(*info, &segInfos); if (ret < 0) { LOG(ERROR) << "BuildSegmentInfo error," - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -247,8 +247,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildChunkIndexData(*info, &indexData, &segInfos, task); if (ret < 0) { LOG(ERROR) << "BuildChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -256,8 +255,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->PutChunkIndexData(name, indexData); if (ret < 0) { LOG(ERROR) << "PutChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -271,14 +269,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( } FileSnapMap fileSnapshotMap; - ret = BuildSnapshotMap(fileName, - seqNum, - &fileSnapshotMap); + ret = BuildSnapshotMap(fileName, seqNum, &fileSnapshotMap); if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -286,26 +281,23 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (existIndexData) { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [this] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [this](const ChunkDataName& chunkDataName) { return dataStore_->ChunkDataExist(chunkDataName); }, task); } else { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [&fileSnapshotMap] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [&fileSnapshotMap](const ChunkDataName& chunkDataName) { return fileSnapshotMap.IsExistChunk(chunkDataName); }, task); } if (ret < 0) { LOG(ERROR) << "TransferSnapshotData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -313,8 +305,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } ret = DeleteSnapshotOnCurvefs(*info); @@ -327,8 +319,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( LockGuard lockGuard(task->GetLockRef()); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } HandleCreateSnapshotSuccess(task); @@ -347,9 +339,9 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( std::make_shared(snap, snapInfoMetric); taskInfo->GetSnapshotInfo().SetStatus(Status::errorDeleting); taskInfo->UpdateMetric(); - // 处理删除快照 + // Processing deletion of snapshots HandleDeleteSnapshotTask(taskInfo); - // 仍然失败,则本次快照失败 + // If it still fails, the current snapshot fails if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { LOG(ERROR) << "Find error Snapshot and Delete Fail" << ", error snapshot Id = " << snap.GetUuid() @@ -362,15 +354,13 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( return kErrCodeSuccess; } -int SnapshotCoreImpl::StartCancel( - std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); +int SnapshotCoreImpl::StartCancel(std::shared_ptr task) { + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::canceling); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Cancel Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return kErrCodeInternalError; } @@ -378,18 +368,17 @@ int SnapshotCoreImpl::StartCancel( } void SnapshotCoreImpl::CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap) { + std::shared_ptr task, const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap) { LOG(INFO) << "Cancel After TransferSnapshotData" << ", uuid = " << task->GetUuid(); std::vector chunkIndexVec = indexData.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - int ret = dataStore_->DeleteChunkData(chunkDataName); + int ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error" << "while canceling CreateSnapshot, " @@ -410,19 +399,16 @@ void SnapshotCoreImpl::CancelAfterCreateChunkIndexData( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateChunkIndexData" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); int ret = dataStore_->DeleteChunkIndexData(name); if (ret < 0) { LOG(ERROR) << "DeleteChunkIndexData error " << "while canceling CreateSnapshot, " - << " ret = " << ret - << ", fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", fileName = " << task->GetFileName() + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -433,7 +419,7 @@ void SnapshotCoreImpl::CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateSnapshotOnCurvefs" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); int ret = DeleteSnapshotOnCurvefs(info); @@ -452,13 +438,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( if (ret < 0) { LOG(ERROR) << "MetaStore DeleteSnapshot error " << "while cancel CreateSnapshot, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "CancelSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -472,13 +457,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( void SnapshotCoreImpl::HandleCreateSnapshotSuccess( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::done); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } task->SetProgress(kProgressComplete); @@ -494,13 +478,12 @@ void SnapshotCoreImpl::HandleCreateSnapshotSuccess( void SnapshotCoreImpl::HandleCreateSnapshotError( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } LOG(INFO) << "CreateSnapshot Task Fail" @@ -514,14 +497,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotError( } int SnapshotCoreImpl::CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, + const std::string& fileName, SnapshotInfo* info, std::shared_ptr task) { uint64_t seqNum = 0; - int ret = - client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); - if (LIBCURVE_ERROR::OK == ret || - -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { + int ret = client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); + if (LIBCURVE_ERROR::OK == ret || -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { // ok } else if (-LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT == ret) { LOG(ERROR) << "CreateSnapshot on curvefs fail, " @@ -530,23 +510,18 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( return kErrCodeNotSupport; } else { LOG(ERROR) << "CreateSnapshot on curvefs fail, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } LOG(INFO) << "CreateSnapshot on curvefs success, seq = " << seqNum << ", uuid = " << task->GetUuid(); FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << info->GetUser() - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } @@ -573,46 +548,38 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( ret = metaStore_->CASSnapshot(uuid, compareAndSet); if (ret < 0) { LOG(ERROR) << "CASSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); return ret; } - // 打完快照需等待2个session时间,以保证seq同步到所有client + // After taking a snapshot, you need to wait for 2 sessions to ensure that + // the seq is synchronized to all clients std::this_thread::sleep_for( std::chrono::microseconds(mdsSessionTimeUs_ * 2)); return kErrCodeSuccess; } -int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { +int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo& info) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seqNum = info.GetSeqNum(); - int ret = client_->DeleteSnapshot(fileName, - user, - seqNum); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST && + int ret = client_->DeleteSnapshot(fileName, user, seqNum); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST && ret != -LIBCURVE_ERROR::DELETING) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seqNum = " << seqNum << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } do { FileStatus status; - ret = client_->CheckSnapShotStatus(info.GetFileName(), - info.GetUser(), - seqNum, - &status); + ret = client_->CheckSnapShotStatus(info.GetFileName(), info.GetUser(), + seqNum, &status); LOG(INFO) << "Doing CheckSnapShotStatus, fileName = " - << info.GetFileName() - << ", user = " << info.GetUser() + << info.GetFileName() << ", user = " << info.GetUser() << ", seqNum = " << seqNum << ", status = " << static_cast(status) << ", uuid = " << info.GetUuid(); @@ -631,8 +598,7 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } } else { LOG(ERROR) << "CheckSnapShotStatus fail" - << ", ret = " << ret - << ", uuid = " << info.GetUuid(); + << ", ret = " << ret << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } std::this_thread::sleep_for( @@ -642,9 +608,8 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } int SnapshotCoreImpl::BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, + const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, std::shared_ptr task) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); @@ -656,25 +621,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( indexData->SetFileName(fileName); uint64_t chunkIndex = 0; - for (uint64_t i = 0; i < fileLength/segmentSize; i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - int ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seqNum, - offset, - &segInfo); + int ret = client_->GetSnapshotSegmentInfo(fileName, user, seqNum, + offset, &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, segInfo); for (std::vector::size_type j = 0; - j < segInfo.chunkvec.size(); - j++) { + j < segInfo.chunkvec.size(); j++) { ChunkInfoDetail chunkInfo; ChunkIDInfo cidInfo = segInfo.chunkvec[j]; - ret = client_->GetChunkInfo(cidInfo, - &chunkInfo); + ret = client_->GetChunkInfo(cidInfo, &chunkInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetChunkInfo error, " << " ret = " << ret @@ -684,16 +643,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } - // 2个sn,小的是snap sn,大的是快照之后的写 - // 1个sn,有两种情况: - // 小于等于seqNum时为snap sn, 且快照之后未写过; - // 大于时, 表示打快照时为空,是快照之后首次写的版本(seqNum+1) - // 没有sn,从未写过 - // 大于2个sn,错误,报错 + // 2 Sns, the smaller one is the snap snap snap, and the larger + // one is the write after the snapshot 1 SN, there are two + // situations: + // When it is less than or equal to seqNum, it is a snap + // snap and has not been written since the snapshot; When + // greater than, it indicates that it was blank when taking + // a snapshot, and is the version written for the first + // time after the snapshot (seqNum+1) + // No sn, never written before + // Greater than 2 sns, error, error reported if (chunkInfo.chunkSn.size() == 2) { uint64_t seq = - std::min(chunkInfo.chunkSn[0], - chunkInfo.chunkSn[1]); + std::min(chunkInfo.chunkSn[0], chunkInfo.chunkSn[1]); chunkIndex = i * (segmentSize / chunkSize) + j; ChunkDataName chunkDataName(fileName, seq, chunkIndex); indexData->PutChunkDataName(chunkDataName); @@ -708,10 +670,10 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { // should not reach here - LOG(ERROR) << "GetChunkInfo return chunkInfo.chunkSn.size()" - << " invalid, size = " - << chunkInfo.chunkSn.size() - << ", uuid = " << task->GetUuid(); + LOG(ERROR) + << "GetChunkInfo return chunkInfo.chunkSn.size()" + << " invalid, size = " << chunkInfo.chunkSn.size() + << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } if (task->IsCanceled()) { @@ -722,10 +684,8 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seqNum << ", offset = " << offset << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; @@ -736,25 +696,18 @@ int SnapshotCoreImpl::BuildChunkIndexData( } int SnapshotCoreImpl::BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos) { + const SnapshotInfo& info, std::map* segInfos) { int ret = kErrCodeSuccess; std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seq = info.GetSeqNum(); uint64_t fileLength = info.GetFileLength(); uint64_t segmentSize = info.GetSegmentSize(); - for (uint64_t i = 0; - i < fileLength/segmentSize; - i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seq, - offset, - &segInfo); + ret = client_->GetSnapshotSegmentInfo(fileName, user, seq, offset, + &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, std::move(segInfo)); @@ -762,10 +715,8 @@ int SnapshotCoreImpl::BuildSegmentInfo( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seq + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seq << ", offset = " << offset << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; @@ -775,15 +726,14 @@ int SnapshotCoreImpl::BuildSegmentInfo( } int SnapshotCoreImpl::TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, + const ChunkIndexData indexData, const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, std::shared_ptr task) { int ret = 0; uint64_t segmentSize = info.GetSegmentSize(); uint64_t chunkSize = info.GetChunkSize(); - uint64_t chunkPerSegment = segmentSize/chunkSize; + uint64_t chunkPerSegment = segmentSize / chunkSize; if (0 == chunkSplitSize_ || chunkSize % chunkSplitSize_ != 0) { LOG(ERROR) << "error!, ChunkSize is not align to chunkSplitSize" @@ -794,13 +744,13 @@ int SnapshotCoreImpl::TransferSnapshotData( std::vector chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kProgressTransferSnapshotDataComplete - - kProgressTransferSnapshotDataStart; + kProgressTransferSnapshotDataStart; uint32_t transferDataNum = chunkIndexVec.size(); double progressPerData = static_cast(totalProgress) / transferDataNum; uint32_t index = 0; - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { uint64_t segNum = chunkIndex / chunkPerSegment; auto it = segInfos.find(segNum); @@ -818,17 +768,15 @@ int SnapshotCoreImpl::TransferSnapshotData( LOG(ERROR) << "TransferSnapshotData, " << "chunkIndexInSegment >= " << "segInfos[segNum].chunkvec.size()" - << ", chunkIndexInSegment = " - << chunkIndexInSegment - << ", size = " - << it->second.chunkvec.size() + << ", chunkIndexInSegment = " << chunkIndexInSegment + << ", size = " << it->second.chunkvec.size() << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } } auto tracker = std::make_shared(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); uint64_t segNum = chunkIndex / chunkPerSegment; @@ -836,8 +784,7 @@ int SnapshotCoreImpl::TransferSnapshotData( auto it = segInfos.find(segNum); if (it != segInfos.end()) { - ChunkIDInfo cidInfo = - it->second.chunkvec[chunkIndexInSegment]; + ChunkIDInfo cidInfo = it->second.chunkvec[chunkIndexInSegment]; if (!filter(chunkDataName)) { auto taskInfo = std::make_shared( @@ -847,10 +794,7 @@ int SnapshotCoreImpl::TransferSnapshotData( readChunkSnapshotConcurrency_); UUID taskId = UUIDGenerator().GenerateUUID(); auto task = new TransferSnapshotDataChunkTask( - taskId, - taskInfo, - client_, - dataStore_); + taskId, taskInfo, client_, dataStore_); task->SetTracker(tracker); tracker->AddOneTrace(); threadPool_->PushTask(task); @@ -865,50 +809,45 @@ int SnapshotCoreImpl::TransferSnapshotData( ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } task->SetProgress(static_cast( - kProgressTransferSnapshotDataStart + index * progressPerData)); + kProgressTransferSnapshotDataStart + index * progressPerData)); task->UpdateMetric(); index++; if (task->IsCanceled()) { return kErrCodeSuccess; } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end tracker->Wait(); ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } return kErrCodeSuccess; } - -int SnapshotCoreImpl::DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) { NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), uuid); int ret = metaStore_->GetSnapshotInfo(uuid, snapInfo); if (ret < 0) { - // 快照不存在时直接返回删除成功,使接口幂等 + // When the snapshot does not exist, it directly returns deletion + // success, making the interface idempotent return kErrCodeSuccess; } if (snapInfo->GetUser() != user) { LOG(ERROR) << "Can not delete snapshot by different user."; return kErrCodeInvalidUser; } - if ((!fileName.empty()) && - (fileName != snapInfo->GetFileName())) { + if ((!fileName.empty()) && (fileName != snapInfo->GetFileName())) { LOG(ERROR) << "Can not delete, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -938,8 +877,7 @@ int SnapshotCoreImpl::DeleteSnapshotPre( ret = metaStore_->UpdateSnapshot(*snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; return ret; } return kErrCodeSuccess; @@ -947,23 +885,24 @@ int SnapshotCoreImpl::DeleteSnapshotPre( constexpr uint32_t kDelProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kDelProgressDeleteChunkDataStart = - kDelProgressBuildSnapshotMapComplete; + kDelProgressBuildSnapshotMapComplete; constexpr uint32_t kDelProgressDeleteChunkDataComplete = 80; constexpr uint32_t kDelProgressDeleteChunkIndexDataComplete = 90; /** - * @brief 异步执行删除快照任务并更新任务进度 + * @brief Asynchronous execution of delete snapshot task and update task + * progress * - * 删除快照进度规划如下: + * Delete the snapshot schedule as follows: * * |BuildSnapshotMap|DeleteChunkData|DeleteChunkIndexData|DeleteSnapshot| * | 10% | 10%~80% | 90% | 100% | * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleDeleteSnapshotTask( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); FileSnapMap fileSnapshotMap; @@ -971,15 +910,13 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; } task->SetProgress(kDelProgressBuildSnapshotMapComplete); task->UpdateMetric(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); ChunkIndexData indexData; if (dataStore_->ChunkIndexDataExist(name)) { ret = dataStore_->GetChunkIndexData(name, &indexData); @@ -995,29 +932,28 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( auto chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kDelProgressDeleteChunkDataComplete - - kDelProgressDeleteChunkDataStart; + kDelProgressDeleteChunkDataStart; uint32_t chunkDataNum = chunkIndexVec.size(); - double progressPerData = static_cast (totalProgress) / - chunkDataNum; + double progressPerData = + static_cast(totalProgress) / chunkDataNum; uint32_t index = 0; LOG(INFO) << "HandleDeleteSnapshotTask GetChunkIndexData success, " << "begin to DeleteChunkData, " << "chunkDataNum = " << chunkIndexVec.size(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - ret = dataStore_->DeleteChunkData(chunkDataName); + ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error, " << " ret = " << ret << ", fileName = " << task->GetFileName() << ", seqNum = " << seqNum - << ", chunkIndex = " - << chunkDataName.chunkIndex_ + << ", chunkIndex = " << chunkDataName.chunkIndex_ << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; @@ -1059,8 +995,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( ret = metaStore_->DeleteSnapshot(uuid); if (ret < 0) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; HandleDeleteSnapshotError(task); return; } @@ -1068,7 +1003,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( task->SetProgress(kProgressComplete); task->GetSnapshotInfo().SetStatus(Status::done); - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1079,19 +1014,17 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( return; } - void SnapshotCoreImpl::HandleDeleteSnapshotError( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); info.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(info); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Fail" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1102,23 +1035,22 @@ void SnapshotCoreImpl::HandleDeleteSnapshotError( return; } -int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string &file, - std::vector *info) { +int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string& file, + std::vector* info) { metaStore_->GetSnapshotList(file, info); return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) { +int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) { return metaStore_->GetSnapshotInfo(uuid, info); } -int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap) { +int SnapshotCoreImpl::BuildSnapshotMap(const std::string& fileName, + uint64_t seqNum, + FileSnapMap* fileSnapshotMap) { std::vector snapInfos; int ret = metaStore_->GetSnapshotList(fileName, &snapInfos); - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetSeqNum() != seqNum && snap.GetSeqNum() != kUnInitializeSeqNum) { ChunkIndexDataName name(snap.GetFileName(), snap.GetSeqNum()); @@ -1127,10 +1059,11 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " << " ret = " << ret - << ", fileName = " << snap.GetFileName() + << ", fileName = " << snap.GetFileName() << ", seqNum = " << snap.GetSeqNum(); - // 此处不能返回错误, - // 否则一旦某个失败的快照没有indexdata,所有快照都无法删除 + // An error cannot be returned here, + // Otherwise, once a failed snapshot does not have indexdata, + // all snapshots cannot be deleted } else { fileSnapshotMap->maps.push_back(std::move(indexData)); } @@ -1139,19 +1072,18 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotList(std::vector *list) { +int SnapshotCoreImpl::GetSnapshotList(std::vector* list) { metaStore_->GetSnapshotList(list); return kErrCodeSuccess; } int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); int ret = metaStore_->DeleteSnapshot(snapInfo.GetUuid()); if (ret < 0) { LOG(ERROR) << "HandleCancelUnSchduledSnapshotTask fail, " - << " ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << " ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() @@ -1161,7 +1093,6 @@ int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( return kErrCodeSuccess; } - int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( std::shared_ptr task) { LockGuard lockGuard(task->GetLockRef()); @@ -1176,8 +1107,7 @@ int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( } else { auto& snapInfo = task->GetSnapshotInfo(); LOG(ERROR) << "HandleCancelSchduledSnapshotTask failed: " - << ", ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << ", ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.h b/src/snapshotcloneserver/snapshot/snapshot_core.h index 747e02ea2f..9667b64d39 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.h +++ b/src/snapshotcloneserver/snapshot/snapshot_core.h @@ -23,19 +23,19 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ +#include #include #include #include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshot_reference.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -45,22 +45,23 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; /** - * @brief 文件的快照索引块映射表 + * @brief Snapshot index block mapping table for file */ struct FileSnapMap { std::vector maps; /** - * @brief 获取当前映射表中是否存在当前chunk数据 + * @brief to obtain whether the current chunk data exists in the current + * mapping table * - * @param name chunk数据对象 + * @param name chunk data object * - * @retval true 存在 - * @retval false 不存在 + * @retval true exists + * @retval false does not exist */ - bool IsExistChunk(const ChunkDataName &name) const { + bool IsExistChunk(const ChunkDataName& name) const { bool find = false; - for (auto &v : maps) { + for (auto& v : maps) { find = v.IsExistChunkDataName(name); if (find) { break; @@ -71,7 +72,7 @@ struct FileSnapMap { }; /** - * @brief 快照核心模块 + * @brief snapshot core module */ class SnapshotCore { public: @@ -79,80 +80,76 @@ class SnapshotCore { virtual ~SnapshotCore() {} /** - * @brief 创建快照前置操作 + * @brief Create snapshot pre operation * - * @param file 文件名 - * @param user 用户名 - * @param snapshotName 快照名 - * @param[out] snapInfo 快照信息 + * @param file file name + * @param user username + * @param snapshotName SnapshotName + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) = 0; + virtual int CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行创建快照任务并更新progress - * 第一步,构建快照文件映射, put MateObj - * 第二步,从curvefs读取chunk文件,并put DataObj - * 第三步,删除curvefs中的临时快照 - * 第四步,update status + * @brief Execute the task of creating a snapshot and update the progress + * Step 1, build a snapshot file mapping and put MateObj + * Step 2, read the chunk file from curvefs and put DataObj + * Step 3, delete the temporary snapshot in curves + * Step 4, update status * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleCreateSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 删除快照前置操作 - * 更新数据库中的快照记录为deleting状态 + * @brief Delete snapshot pre operation + * Update the snapshot records in the database to a deleting state * - * @param uuid 快照uuid - * @param user 用户名 - * @param fileName 文件名 - * @param[out] snapInfo 快照信息 + * @param uuid Snapshot uuid + * @param user username + * @param fileName File name + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) = 0; + virtual int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行删除快照任务并更新progress + * @brief Execute the delete snapshot task and update the progress * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleDeleteSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 获取文件的快照信息 + * @brief Get snapshot information of files * - * @param file 文件名 - * @param info 快照信息列表 + * @param file file name + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - std::vector *info) = 0; + virtual int GetFileSnapshotInfo(const std::string& file, + std::vector* info) = 0; /** - * @brief 获取全部快照信息 + * @brief Get all snapshot information * - * @param list 快照信息列表 + * @param list snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotList(std::vector *list) = 0; - + virtual int GetSnapshotList(std::vector* list) = 0; - virtual int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) = 0; virtual int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) = 0; @@ -170,66 +167,61 @@ class SnapshotCore { class SnapshotCoreImpl : public SnapshotCore { public: - /** - * @brief 构造函数 - * - * @param client curve客户端对象 - * @param metaStore meta存储对象 - * @param dataStore data存储对象 - */ - SnapshotCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - const SnapshotCloneServerOptions &option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - chunkSplitSize_(option.chunkSplitSize), - checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), - maxSnapshotLimit_(option.maxSnapshotLimit), - snapshotCoreThreadNum_(option.snapshotCoreThreadNum), - mdsSessionTimeUs_(option.mdsSessionTimeUs), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs), - readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { - threadPool_ = std::make_shared( - option.snapshotCoreThreadNum); + /** + * @brief constructor + * + * @param client curve client object + * @param metaStore MetaStorage Object + * @param dataStore data storage object + */ + SnapshotCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + const SnapshotCloneServerOptions& option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + chunkSplitSize_(option.chunkSplitSize), + checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), + maxSnapshotLimit_(option.maxSnapshotLimit), + snapshotCoreThreadNum_(option.snapshotCoreThreadNum), + mdsSessionTimeUs_(option.mdsSessionTimeUs), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs), + readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { + threadPool_ = + std::make_shared(option.snapshotCoreThreadNum); } int Init(); - ~SnapshotCoreImpl() { - threadPool_->Stop(); - } + ~SnapshotCoreImpl() { threadPool_->Stop(); } - // 公有接口定义见SnapshotCore接口注释 - int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) override; + // Public interface definition can be found in the SnapshotCore interface + // annotation + int CreateSnapshotPre(const std::string& file, const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) override; void HandleCreateSnapshotTask( std::shared_ptr task) override; - int DeleteSnapshotPre(UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) override; + int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) override; void HandleDeleteSnapshotTask( std::shared_ptr task) override; - int GetFileSnapshotInfo(const std::string &file, - std::vector *info) override; + int GetFileSnapshotInfo(const std::string& file, + std::vector* info) override; - int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) override; @@ -239,201 +231,188 @@ class SnapshotCoreImpl : public SnapshotCore { private: /** - * @brief 构建快照文件映射 + * @brief Build snapshot file mapping * - * @param fileName 文件名 - * @param seqNum 快照版本号 - * @param fileSnapshotMap 快照文件映射表 + * @param fileName File name + * @param seqNum snapshot version number + * @param fileSnapshotMap snapshot file mapping table * - * @return 错误码 + * @return error code */ - int BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap); + int BuildSnapshotMap(const std::string& fileName, uint64_t seqNum, + FileSnapMap* fileSnapshotMap); /** - * @brief 构建Segment信息 + * @brief Build Segment Information * - * @param info 快照信息 - * @param segInfos Segment信息表 + * @param info snapshot information + * @param segInfos Segment Information Table * - * @return 错误码 + * @return error code */ - int BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos); + int BuildSegmentInfo(const SnapshotInfo& info, + std::map* segInfos); /** - * @brief 在curvefs上创建快照 + * @brief Create a snapshot on curves * - * @param fileName 文件名 - * @param info 快照信息 - * @param task 快照任务信息 + * @param fileName File name + * @param info snapshot information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, - std::shared_ptr task); + int CreateSnapshotOnCurvefs(const std::string& fileName, SnapshotInfo* info, + std::shared_ptr task); /** - * @brief 删除curvefs上的快照 + * @brief Delete snapshot on curves * - * @param info 快照信息 + * @param info snapshot information * - * @return 错误码 + * @return error code */ - int DeleteSnapshotOnCurvefs(const SnapshotInfo &info); + int DeleteSnapshotOnCurvefs(const SnapshotInfo& info); /** - * @brief 构建索引块 + * @brief Build Index Block * - * @param info 快照信息 - * @param[out] indexData 索引块 - * @param[out] segInfos Segment信息 - * @param task 快照任务信息 + * @param info snapshot information + * @param[out] indexData index block + * @param[out] segInfos Segment Information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, - std::shared_ptr task); + int BuildChunkIndexData(const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, + std::shared_ptr task); - using ChunkDataExistFilter = - std::function; + using ChunkDataExistFilter = std::function; /** - * @brief 转储快照过程 + * @brief Dump snapshot process * - * @param indexData 索引块 - * @param info 快照信息 - * @param segInfos Segment信息 - * @param filter 转储数据块过滤器 - * @param task 快照任务信息 + * @param indexData index block + * @param info snapshot information + * @param segInfos Segment Information + * @param filter Dump data block filter + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, - std::shared_ptr task); + int TransferSnapshotData(const ChunkIndexData indexData, + const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, + std::shared_ptr task); /** - * @brief 开始cancel,更新任务状态,更新数据库状态 + * @brief Start cancel, update task status, update database status * - * @param task 快照任务信息 + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int StartCancel( - std::shared_ptr task); + int StartCancel(std::shared_ptr task); /** - * @brief 转储数据之后取消快照过程 + * @brief: Cancel the snapshot process after dumping data * - * @param task 快照任务信息 - * @param indexData 索引块 - * @param fileSnapshotMap 快照文件映射表 + * @param task snapshot task information + * @param indexData index block + * @param fileSnapshotMap snapshot file mapping table */ - void CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap); + void CancelAfterTransferSnapshotData(std::shared_ptr task, + const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap); /** - * @brief 创建索引块之后取消快照过程 + * @brief Cancel the snapshot process after creating the index block * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateChunkIndexData( std::shared_ptr task); /** - * @brief 在curvefs上创建快照之后取消快照过程 + * @brief: Cancel the snapshot process after creating a snapshot on curves * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task); /** - * @brief 在Mate数据存储在删除快照 + * @brief in Mate data storage, delete snapshot * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleClearSnapshotOnMateStore( - std::shared_ptr task); + void HandleClearSnapshotOnMateStore(std::shared_ptr task); /** - * @brief 处理创建快照任务成功 + * @brief successfully processed the snapshot creation task * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotSuccess( - std::shared_ptr task); + void HandleCreateSnapshotSuccess(std::shared_ptr task); /** - * @brief 处理创建快照任务失败过程 + * @brief processing failed snapshot creation task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotError( - std::shared_ptr task); + void HandleCreateSnapshotError(std::shared_ptr task); /** - * @brief 处理删除快照任务失败过程 + * @brief failed to process the delete snapshot task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleDeleteSnapshotError( - std::shared_ptr task); - + void HandleDeleteSnapshotError(std::shared_ptr task); /** - * @brief 创建快照前尝试清理失败的快照,否则可能会再次失败 + * @brief Attempt to clean up failed snapshots before creating them, + * otherwise they may fail again * - * @param task 快照任务信息 - * @return 错误码 + * @param task snapshot task information + * @return error code */ int ClearErrorSnapBeforeCreateSnapshot( std::shared_ptr task); private: - // curvefs客户端对象 + // Curvefs client object std::shared_ptr client_; - // meta数据存储 + // Meta Data Storage std::shared_ptr metaStore_; - // data数据存储 + // Data storage std::shared_ptr dataStore_; - // 快照引用计数管理模块 + // Snapshot Reference Count Management Module std::shared_ptr snapshotRef_; - // 执行并发步骤的线程池 + // Thread pool for executing concurrent steps std::shared_ptr threadPool_; - // 锁住打快照的文件名,防止并发同时对其打快照,同一文件的快照需排队 + // Lock the file name of the snapshot to prevent concurrent snapshots. + // Snapshots of the same file need to be queued NameLock snapshotNameLock_; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize_; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs_; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit_; - // 线程数 + // Number of threads uint32_t snapshotCoreThreadNum_; - // session超时时间 + // Session timeout uint32_t mdsSessionTimeUs_; - // client异步回调请求的重试总时间 + // Total retry time for client asynchronous callback requests uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; - // 异步ReadChunkSnapshot的并发数 + // The concurrency of asynchronous ReadChunkSnapshots uint32_t readChunkSnapshotConcurrency_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp index 8401af3b82..2c9fd2e28c 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp @@ -27,10 +27,10 @@ namespace curve { namespace snapshotcloneserver { -bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { - // 逆向解析string,以支持文件名具有分隔字符的情况 - std::string::size_type pos = - name.find_last_of(kChunkDataNameSeprator); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName) { + // Reverse parsing of strings to support cases where file names have + // separator characters + std::string::size_type pos = name.find_last_of(kChunkDataNameSeprator); std::string::size_type lastPos = std::string::npos; if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; @@ -40,8 +40,7 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { cName->chunkSeqNum_ = std::stoll(seqNumStr); lastPos = pos - 1; - pos = - name.find_last_of(kChunkDataNameSeprator, lastPos); + pos = name.find_last_of(kChunkDataNameSeprator, lastPos); if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; return false; @@ -57,27 +56,26 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { return true; } -bool ChunkIndexData::Serialize(std::string *data) const { +bool ChunkIndexData::Serialize(std::string* data) const { ChunkMap map; - for (const auto &m : this->chunkMap_) { - map.mutable_indexmap()-> - insert({m.first, - ChunkDataName(fileName_, m.second, m.first). - ToDataChunkKey()}); + for (const auto& m : this->chunkMap_) { + map.mutable_indexmap()->insert( + {m.first, + ChunkDataName(fileName_, m.second, m.first).ToDataChunkKey()}); } - // Todo:可以转化为stream给adpater接口使用SerializeToOstream + // Todo: Can be converted into a stream for the adpater interface to use + // SerializeToOstream return map.SerializeToString(data); } -bool ChunkIndexData::Unserialize(const std::string &data) { - ChunkMap map; +bool ChunkIndexData::Unserialize(const std::string& data) { + ChunkMap map; if (map.ParseFromString(data)) { - for (const auto &m : map.indexmap()) { + for (const auto& m : map.indexmap()) { ChunkDataName chunkDataName; if (ToChunkDataName(m.second, &chunkDataName)) { this->fileName_ = chunkDataName.fileName_; - this->chunkMap_.emplace(m.first, - chunkDataName.chunkSeqNum_); + this->chunkMap_.emplace(m.first, chunkDataName.chunkSeqNum_); } else { return false; } @@ -89,7 +87,7 @@ bool ChunkIndexData::Unserialize(const std::string &data) { } bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, - ChunkDataName* nameOut) const { + ChunkDataName* nameOut) const { auto it = chunkMap_.find(index); if (it != chunkMap_.end()) { *nameOut = ChunkDataName(fileName_, it->second, index); @@ -99,7 +97,7 @@ bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, } } -bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName &name) const { +bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName& name) const { if (fileName_ != name.fileName_) { return false; } @@ -120,5 +118,5 @@ std::vector ChunkIndexData::GetAllChunkIndex() const { return ret; } -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.h b/src/snapshotcloneserver/snapshot/snapshot_data_store.h index ae88b7694b..ed7d675450 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.h @@ -26,16 +26,16 @@ #include #include -#include -#include #include -#include +#include #include +#include +#include #include "src/common/concurrent/concurrent.h" -using ::curve::common::SpinLock; using ::curve::common::LockGuard; +using ::curve::common::SpinLock; namespace curve { namespace snapshotcloneserver { @@ -47,25 +47,19 @@ const char kChunkDataNameSeprator[] = "-"; class ChunkDataName { public: - ChunkDataName() - : chunkSeqNum_(0), - chunkIndex_(0) {} - ChunkDataName(const std::string &fileName, - SnapshotSeqType seq, + ChunkDataName() : chunkSeqNum_(0), chunkIndex_(0) {} + ChunkDataName(const std::string& fileName, SnapshotSeqType seq, ChunkIndexType chunkIndex) - : fileName_(fileName), - chunkSeqNum_(seq), - chunkIndex_(chunkIndex) {} + : fileName_(fileName), chunkSeqNum_(seq), chunkIndex_(chunkIndex) {} /** - * 构建datachunk对象的名称 文件名-chunk索引-版本号 - * @return: 对象名称字符串 + * Build the name of the datachunk object File name Chunk index Version + * number + * @return: Object name string */ std::string ToDataChunkKey() const { - return fileName_ - + kChunkDataNameSeprator - + std::to_string(this->chunkIndex_) - + kChunkDataNameSeprator - + std::to_string(this->chunkSeqNum_); + return fileName_ + kChunkDataNameSeprator + + std::to_string(this->chunkIndex_) + kChunkDataNameSeprator + + std::to_string(this->chunkSeqNum_); } std::string fileName_; @@ -73,45 +67,41 @@ class ChunkDataName { ChunkIndexType chunkIndex_; }; -inline bool operator==(const ChunkDataName &lhs, const ChunkDataName &rhs) { +inline bool operator==(const ChunkDataName& lhs, const ChunkDataName& rhs) { return (lhs.fileName_ == rhs.fileName_) && (lhs.chunkSeqNum_ == rhs.chunkSeqNum_) && (lhs.chunkIndex_ == rhs.chunkIndex_); } /** - * @brief 根据对象名称解析生成chunkdataname对象 + * @brief Generate chunkdataname object based on object name parsing * - * @param name 对象名 - * @param[out] cName chunkDataName对象 + * @param name Object name + * @param[out] cName chunkDataName object * - * @retVal true 成功 - * @retVal false 失败 + * @retval true succeeded + * @retval false failed */ -bool ToChunkDataName(const std::string &name, ChunkDataName *cName); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName); class ChunkIndexDataName { public: - ChunkIndexDataName() - : fileSeqNum_(0) {} - ChunkIndexDataName(std::string filename, - SnapshotSeqType seq) { + ChunkIndexDataName() : fileSeqNum_(0) {} + ChunkIndexDataName(std::string filename, SnapshotSeqType seq) { fileName_ = filename; fileSeqNum_ = seq; } /** - * 构建索引chunk的名称 文件名+文件版本号 - * @return: 索引chunk的名称字符串 + * Build the name of the index chunk file name+file version number + * @return: The name string of the index chunk */ std::string ToIndexDataChunkKey() const { - return this->fileName_ - + "-" - + std::to_string(this->fileSeqNum_); + return this->fileName_ + "-" + std::to_string(this->fileSeqNum_); } - // 文件名 + // File name std::string fileName_; - // 文件版本号 + // File version number SnapshotSeqType fileSeqNum_; }; @@ -119,46 +109,41 @@ class ChunkIndexData { public: ChunkIndexData() {} /** - * 索引chunk数据序列化(使用protobuf实现) - * @param 保存序列化后数据的指针 - * @return: true 序列化成功/ false 序列化失败 + * Index chunk data serialization (implemented using protobuf) + * @param data Saves a pointer to serialized data + * @return: true Serialization succeeded/false Serialization failed */ - bool Serialize(std::string *data) const; + bool Serialize(std::string* data) const; /** - * 反序列化索引chunk的数据到map中 - * @param 索引chunk存储的数据 - * @return: true 反序列化成功/ false 反序列化失败 + * Deserialize the data of the index chunk into the map + * @param data The data stored in the index chunk + * @return: true Deserialization succeeded/false Deserialization failed */ - bool Unserialize(const std::string &data); + bool Unserialize(const std::string& data); - void PutChunkDataName(const ChunkDataName &name) { + void PutChunkDataName(const ChunkDataName& name) { chunkMap_.emplace(name.chunkIndex_, name.chunkSeqNum_); } bool GetChunkDataName(ChunkIndexType index, ChunkDataName* nameOut) const; - bool IsExistChunkDataName(const ChunkDataName &name) const; + bool IsExistChunkDataName(const ChunkDataName& name) const; std::vector GetAllChunkIndex() const; - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetFileName(const std::string& fileName) { fileName_ = fileName; } - std::string GetFileName() { - return fileName_; - } + std::string GetFileName() { return fileName_; } private: - // 文件名 + // File name std::string fileName_; - // 快照文件索引信息map + // Snapshot file index information map std::map chunkMap_; }; - -class ChunkData{ +class ChunkData { public: ChunkData() {} std::string data_; @@ -166,132 +151,131 @@ class ChunkData{ class TransferTask { public: - TransferTask() {} - std::string uploadId_; + TransferTask() {} + std::string uploadId_; - void AddPartInfo(int partNum, std::string etag) { - m_.Lock(); - partInfo_.emplace(partNum, etag); - m_.UnLock(); - } + void AddPartInfo(int partNum, std::string etag) { + m_.Lock(); + partInfo_.emplace(partNum, etag); + m_.UnLock(); + } - std::map GetPartInfo() { - return partInfo_; - } + std::map GetPartInfo() { return partInfo_; } private: - mutable SpinLock m_; - // partnumber <=> etag - std::map partInfo_; + mutable SpinLock m_; + // partnumber <=> etag + std::map partInfo_; }; class SnapshotDataStore { public: - SnapshotDataStore() {} + SnapshotDataStore() {} virtual ~SnapshotDataStore() {} /** - * 快照的datastore初始化,根据存储的类型有不同的实现 - * @param s3配置文件路径 - * @return 0 初始化成功/ -1 初始化失败 + * The datastore initialization of snapshots can be implemented differently + * depending on the type of storage + * @param s3 configuration file path + * @return 0 initialization successful/-1 initialization failed */ - virtual int Init(const std::string &confpath) = 0; + virtual int Init(const std::string& confpath) = 0; /** - * 存储快照文件的元数据信息到datastore中 - * @param 元数据对象名 - * @param 元数据对象的数据内容 - * @return 0 保存成功/ -1 保存失败 + * Store the metadata information of the snapshot file in the datastore + * @param name Metadata object name + * @param The data content of the metadata object + * @return 0 saved successfully/-1 failed to save */ - virtual int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) = 0; + virtual int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) = 0; /** - * 获取快照文件的元数据信息 - * @param 元数据对象名 - * @param 保存元数据数据内容的指针 - * return: 0 获取成功/ -1 获取失败 + * Obtain metadata information for snapshot files + * @param name Metadata object name + * @param Pointer to save metadata data content + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) = 0; + virtual int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) = 0; /** - * 删除快照文件的元数据 - * @param 元数据对象名 - * @return: 0 删除成功/ -1 删除失败 + * Delete metadata for snapshot files + * @param name Metadata object name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkIndexData(const ChunkIndexDataName &name) = 0; - // 快照元数据chunk是否存在 + virtual int DeleteChunkIndexData(const ChunkIndexDataName& name) = 0; + // Does the snapshot metadata chunk exist /** - * 判断快照元数据是否存在 - * @param 元数据对象名 - * @return: true 存在/ false 不存在 + * Determine whether snapshot metadata exists + * @param name Metadata object name + * @return: true exists/false does not exist */ - virtual bool ChunkIndexDataExist(const ChunkIndexDataName &name) = 0; -/* - // 存储快照文件的数据信息到datastore - virtual int PutChunkData(const ChunkDataName &name, - const ChunkData &data) = 0; - - // 读取快照文件的数据信息 - virtual int GetChunkData(const ChunkDataName &name, - ChunkData *data) = 0; -*/ + virtual bool ChunkIndexDataExist(const ChunkIndexDataName& name) = 0; + /* + // Store the data information of the snapshot file in the datastore + virtual int PutChunkData(const ChunkDataName &name, + const ChunkData &data) = 0; + + // Reading data information from snapshot files + virtual int GetChunkData(const ChunkDataName &name, + ChunkData *data) = 0; + */ /** - * 删除快照的数据chunk - * @param 数据chunk名 - * @return: 0 删除成功/ -1 删除失败 + * Delete the data chunk of the snapshot + * @param name chunk data name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkData(const ChunkDataName &name) = 0; + virtual int DeleteChunkData(const ChunkDataName& name) = 0; /** - * 判断快照的数据chunk是否存在 - * @param 数据chunk名称 - * @return: true 存在/ false 不存在 + * Determine whether the data chunk of the snapshot exists + * @param name chunk data name + * @return: true exists/false does not exist */ - virtual bool ChunkDataExist(const ChunkDataName &name) = 0; - // 设置快照转储完成标志 -/* - virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = 0; - // 获取快照转储完成标志 - virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; -*/ + virtual bool ChunkDataExist(const ChunkDataName& name) = 0; + // Set snapshot dump completion flag + /* + virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = + 0; + // Get snapshot dump completion flag + virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; + */ /** - * 初始化数据库chunk的分片转储任务 - * @param 数据chunk名称 - * @param 管理转储任务的指针 - * @return 0 任务初始化成功/ -1 任务初始化失败 + * Initialize the sharded dump task of the database chunk + * @param name chunk data name + * @param task Pointer to management dump task + * @return 0 Task initialization successful/-1 Task initialization failed */ - virtual int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) = 0; /** - * 添加数据chunk的一个分片到转储任务中 - * @param 数据chunk名 - * @转储任务 - * @第几个分片 - * @分片大小 - * @分片的数据内容 - * @return: 0 添加成功/ -1 添加失败 + * Add a shard of data chunk to a dumping task. + * @param name chunk name + * @param task Dumping task + * @param partNum Index of the shard + * @param partSize Shard size + * @param buf Shard data content + * @return: 0 for successful addition / -1 for failure to add */ - virtual int DataChunkTranferAddPart(const ChunkDataName &name, + virtual int DataChunkTranferAddPart(const ChunkDataName& name, std::shared_ptr task, - int partNum, - int partSize, - const char* buf) = 0; + int partNum, int partSize, + const char* buf) = 0; /** - * 完成数据chunk的转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 转储任务完成/ 转储任务失败 -1 + *Complete the dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 Dump task completed/Dump task failed -1 */ - virtual int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferComplete( + const ChunkDataName& name, std::shared_ptr task) = 0; /** - * 终止数据chunk的分片转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 任务终止成功/ -1 任务终止失败 + *Terminate the sharded dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 mission terminated successfully/-1 mission terminated failed */ - virtual int DataChunkTranferAbort(const ChunkDataName &name, + virtual int DataChunkTranferAbort(const ChunkDataName& name, std::shared_ptr task) = 0; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h index d1324243e4..d43add3f96 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h @@ -23,13 +23,14 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ -#include -#include #include -#include +#include #include -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include +#include + #include "src/common/s3_adapter.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::S3Adapter; namespace curve { @@ -37,59 +38,53 @@ namespace snapshotcloneserver { class S3SnapshotDataStore : public SnapshotDataStore { public: - S3SnapshotDataStore() { + S3SnapshotDataStore() { s3Adapter4Meta_ = std::make_shared(); s3Adapter4Data_ = std::make_shared(); } ~S3SnapshotDataStore() {} - int Init(const std::string &path) override; - int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) override; - int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) override; - int DeleteChunkIndexData(const ChunkIndexDataName &name) override; - bool ChunkIndexDataExist(const ChunkIndexDataName &name) override; + int Init(const std::string& path) override; + int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) override; + int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) override; + int DeleteChunkIndexData(const ChunkIndexDataName& name) override; + bool ChunkIndexDataExist(const ChunkIndexDataName& name) override; // int PutChunkData(const ChunkDataName &name, // const ChunkData &data) override; // int GetChunkData(const ChunkDataName &name, // ChunkData *data) override; - int DeleteChunkData(const ChunkDataName &name) override; - bool ChunkDataExist(const ChunkDataName &name) override; -/* nos暂时不支持,后续增加 - int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; - int GetSnapshotFlag(const ChunkIndexDataName &name) override; -*/ - int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAddPart(const ChunkDataName &name, - std::shared_ptr task, - int partNum, - int partSize, - const char* buf) override; - int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAbort(const ChunkDataName &name, - std::shared_ptr task) override; + int DeleteChunkData(const ChunkDataName& name) override; + bool ChunkDataExist(const ChunkDataName& name) override; + /* NOS is currently not supported, but will be added in the future + int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; + int GetSnapshotFlag(const ChunkIndexDataName &name) override; + */ + int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAddPart(const ChunkDataName& name, + std::shared_ptr task, int partNum, + int partSize, const char* buf) override; + int DataChunkTranferComplete(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAbort(const ChunkDataName& name, + std::shared_ptr task) override; - void SetMetaAdapter(std::shared_ptr adapter) { - s3Adapter4Meta_ = adapter; - } - std::shared_ptr GetMetaAdapter(void) { - return s3Adapter4Meta_; - } - void SetDataAdapter(std::shared_ptr adapter) { - s3Adapter4Data_ = adapter; - } - std::shared_ptr GetDataAdapter(void) { - return s3Adapter4Data_; - } + void SetMetaAdapter(std::shared_ptr adapter) { + s3Adapter4Meta_ = adapter; + } + std::shared_ptr GetMetaAdapter(void) { return s3Adapter4Meta_; } + void SetDataAdapter(std::shared_ptr adapter) { + s3Adapter4Data_ = adapter; + } + std::shared_ptr GetDataAdapter(void) { return s3Adapter4Data_; } private: std::shared_ptr s3Adapter4Data_; std::shared_ptr s3Adapter4Meta_; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp index 747b666350..6846b10e16 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp @@ -23,46 +23,39 @@ #include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include + #include "src/common/string_util.h" namespace curve { namespace snapshotcloneserver { -int SnapshotServiceManager::Init(const SnapshotCloneServerOptions &option) { +int SnapshotServiceManager::Init(const SnapshotCloneServerOptions& option) { std::shared_ptr pool = std::make_shared(option.snapshotPoolThreadNum); return taskMgr_->Init(pool, option); } -int SnapshotServiceManager::Start() { - return taskMgr_->Start(); -} +int SnapshotServiceManager::Start() { return taskMgr_->Start(); } -void SnapshotServiceManager::Stop() { - taskMgr_->Stop(); -} +void SnapshotServiceManager::Stop() { taskMgr_->Stop(); } -int SnapshotServiceManager::CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid) { +int SnapshotServiceManager::CreateSnapshot(const std::string& file, + const std::string& user, + const std::string& snapshotName, + UUID* uuid) { SnapshotInfo snapInfo; int ret = core_->CreateSnapshotPre(file, user, snapshotName, &snapInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *uuid = snapInfo.GetUuid(); return kErrCodeSuccess; } LOG(ERROR) << "CreateSnapshotPre error, " - << " ret =" - << ret - << ", file = " - << file - << ", snapshotName = " - << snapshotName - << ", uuid = " - << snapInfo.GetUuid(); + << " ret =" << ret << ", file = " << file + << ", snapshotName = " << snapshotName + << ", uuid = " << snapInfo.GetUuid(); return ret; } *uuid = snapInfo.GetUuid(); @@ -72,30 +65,27 @@ int SnapshotServiceManager::CreateSnapshot(const std::string &file, std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " - << " ret = " - << ret; + << " ret = " << ret; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::CancelSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::CancelSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { std::shared_ptr task = taskMgr_->GetTask(uuid); if (task != nullptr) { if (user != task->GetTaskInfo()->GetSnapshotInfo().GetUser()) { LOG(ERROR) << "Can not cancel snapshot by different user."; return kErrCodeInvalidUser; } - if ((!file.empty()) && - (file != task->GetTaskInfo()->GetFileName())) { + if ((!file.empty()) && (file != task->GetTaskInfo()->GetFileName())) { LOG(ERROR) << "Can not cancel, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -104,35 +94,30 @@ int SnapshotServiceManager::CancelSnapshot( int ret = taskMgr_->CancelTask(uuid); if (ret < 0) { LOG(ERROR) << "CancelSnapshot error, " - << " ret =" - << ret - << ", uuid = " - << uuid - << ", file =" - << file; + << " ret =" << ret << ", uuid = " << uuid + << ", file =" << file; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::DeleteSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::DeleteSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { SnapshotInfo snapInfo; int ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (kErrCodeSnapshotCannotDeleteUnfinished == ret) { - // 转Cancel + // Transfer to Cancel ret = CancelSnapshot(uuid, user, file); if (kErrCodeCannotCancelFinished == ret) { - // 防止这一过程中又执行完了 + // To prevent the execution from completing again during this + // process ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -141,8 +126,7 @@ int SnapshotServiceManager::DeleteSnapshot( } } else if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -151,8 +135,8 @@ int SnapshotServiceManager::DeleteSnapshot( std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -162,31 +146,28 @@ int SnapshotServiceManager::DeleteSnapshot( return kErrCodeSuccess; } -int SnapshotServiceManager::GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfo( + const std::string& file, const std::string& user, + std::vector* info) { std::vector snapInfos; int ret = core_->GetFileSnapshotInfo(file, &snapInfos); if (ret < 0) { LOG(ERROR) << "GetFileSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file; + << " ret = " << ret << ", file = " << file; return ret; } return GetFileSnapshotInfoInner(snapInfos, user, info); } -int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfoById( + const std::string& file, const std::string& user, const UUID& uuid, + std::vector* info) { std::vector snapInfos; SnapshotInfo snap; int ret = core_->GetSnapshotInfo(uuid, &snap); if (ret < 0) { LOG(ERROR) << "GetSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file + << " ret = " << ret << ", file = " << file << ", uuid = " << uuid; return kErrCodeFileNotExist; } @@ -201,11 +182,10 @@ int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, } int SnapshotServiceManager::GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info) { + std::vector snapInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetUser() == user) { Status st = snap.GetStatus(); switch (st) { @@ -226,15 +206,15 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -248,7 +228,8 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -263,7 +244,7 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( return kErrCodeSuccess; } -bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { +bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo& snapInfo) { if (user_ != nullptr && *user_ != snapInfo.GetUser()) { return false; } @@ -277,14 +258,12 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(snapInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(snapInfo.GetStatus())) { return false; } @@ -292,11 +271,10 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int SnapshotServiceManager::GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info) { + std::vector snapInfos, SnapshotFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (filter.IsMatchCondition(snap)) { Status st = snap.GetStatus(); switch (st) { @@ -317,15 +295,15 @@ int SnapshotServiceManager::GetSnapshotListInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -339,7 +317,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -355,8 +334,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } int SnapshotServiceManager::GetSnapshotListByFilter( - const SnapshotFilterCondition &filter, - std::vector *info) { + const SnapshotFilterCondition& filter, + std::vector* info) { std::vector snapInfos; int ret = core_->GetSnapshotList(&snapInfos); if (ret < 0) { @@ -374,50 +353,44 @@ int SnapshotServiceManager::RecoverSnapshotTask() { LOG(ERROR) << "GetSnapshotList error"; return ret; } - for (auto &snap : list) { + for (auto& snap : list) { Status st = snap.GetStatus(); switch (st) { - case Status::pending : { + case Status::pending: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; } - // 重启恢复的canceling等价于errorDeleting - case Status::canceling : - case Status::deleting : - case Status::errorDeleting : { + // canceling restart recovery is equivalent to errorDeleting + case Status::canceling: + case Status::deleting: + case Status::errorDeleting: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; @@ -431,4 +404,3 @@ int SnapshotServiceManager::RecoverSnapshotTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h index 1aa7143e9f..9c7944f17f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h @@ -27,49 +27,39 @@ #include #include +#include "json/json.h" +#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" -#include "json/json.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 文件单个快照信息 + * @brief file single snapshot information */ class FileSnapshotInfo { public: FileSnapshotInfo() = default; - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - * @param snapProgress 快照完成度百分比 - */ - FileSnapshotInfo(const SnapshotInfo &snapInfo, - uint32_t snapProgress) - : snapInfo_(snapInfo), - snapProgress_(snapProgress) {} - - void SetSnapshotInfo(const SnapshotInfo &snapInfo) { - snapInfo_ = snapInfo; - } + /** + * @brief constructor + * + * @param snapInfo snapshot information + * @param snapProgress snapshot completion percentage + */ + FileSnapshotInfo(const SnapshotInfo& snapInfo, uint32_t snapProgress) + : snapInfo_(snapInfo), snapProgress_(snapProgress) {} - SnapshotInfo GetSnapshotInfo() const { - return snapInfo_; - } + void SetSnapshotInfo(const SnapshotInfo& snapInfo) { snapInfo_ = snapInfo; } - void SetSnapProgress(uint32_t progress) { - snapProgress_ = progress; - } + SnapshotInfo GetSnapshotInfo() const { return snapInfo_; } - uint32_t GetSnapProgress() const { - return snapProgress_; - } + void SetSnapProgress(uint32_t progress) { snapProgress_ = progress; } + + uint32_t GetSnapProgress() const { return snapProgress_; } Json::Value ToJsonObj() const { Json::Value fileSnapObj; @@ -86,7 +76,7 @@ class FileSnapshotInfo { return fileSnapObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { SnapshotInfo snapInfo; snapInfo.SetUuid(jsonObj["UUID"].asString()); snapInfo.SetUser(jsonObj["User"].asString()); @@ -101,209 +91,185 @@ class FileSnapshotInfo { } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapInfo_; - // 快照处理进度百分比 + // Snapshot processing progress percentage uint32_t snapProgress_; }; class SnapshotFilterCondition { public: SnapshotFilterCondition() - : uuid_(nullptr), - file_(nullptr), - user_(nullptr), - status_(nullptr) {} - - SnapshotFilterCondition(const std::string *uuid, const std::string *file, - const std::string *user, - const std::string *status) - : uuid_(uuid), - file_(file), - user_(user), - status_(status) {} - bool IsMatchCondition(const SnapshotInfo &snapInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } + : uuid_(nullptr), file_(nullptr), user_(nullptr), status_(nullptr) {} - void SetFile(const std::string *file) { - file_ = file; - } + SnapshotFilterCondition(const std::string* uuid, const std::string* file, + const std::string* user, const std::string* status) + : uuid_(uuid), file_(file), user_(user), status_(status) {} + bool IsMatchCondition(const SnapshotInfo& snapInfo); - void SetUser(const std::string *user) { - user_ = user; - } + void SetUuid(const std::string* uuid) { uuid_ = uuid; } - void SetStatus(const std::string *status) { - status_ = status; - } + void SetFile(const std::string* file) { file_ = file; } + + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } private: - const std::string *uuid_; - const std::string *file_; - const std::string *user_; - const std::string *status_; + const std::string* uuid_; + const std::string* file_; + const std::string* user_; + const std::string* status_; }; class SnapshotServiceManager { public: - /** - * @brief 构造函数 - * - * @param taskMgr 快照任务管理类对象 - * @param core 快照核心模块 - */ - SnapshotServiceManager( - std::shared_ptr taskMgr, - std::shared_ptr core) - : taskMgr_(taskMgr), - core_(core) {} + /** + * @brief constructor + * + * @param taskMgr snapshot task management class object + * @param core snapshot core module + */ + SnapshotServiceManager(std::shared_ptr taskMgr, + std::shared_ptr core) + : taskMgr_(taskMgr), core_(core) {} virtual ~SnapshotServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 创建快照服务 + * @brief Create snapshot service * - * @param file 文件名 - * @param user 文件所属用户 - * @param snapshotName 快照名 - * @param uuid 快照uuid + * @param file file name + * @param user The user to whom the file belongs + * @param snapshotName SnapshotName + * @param uuid Snapshot uuid * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid); + virtual int CreateSnapshot(const std::string& file, const std::string& user, + const std::string& snapshotName, UUID* uuid); /** - * @brief 删除快照服务 + * @brief Delete snapshot service * - * @param uuid 快照uuid - * @param user 快照文件的用户 - * @param file 快照所属文件的文件名 + * @param uuid Snapshot uuid + * @param user The user of the snapshot file + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int DeleteSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 取消快照服务 + * @brief Cancel snapshot service * - * @param uuid 快照的uuid - * @param user 快照的用户 - * @param file 快照所属文件的文件名 + * @param uuid The uuid of the snapshot + * @param user snapshot user + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int CancelSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int CancelSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 获取文件的快照信息服务接口 + * @brief Gets the snapshot information service interface for files * - * @param file 文件名 - * @param user 用户名 - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info); + virtual int GetFileSnapshotInfo(const std::string& file, + const std::string& user, + std::vector* info); /** - * @brief 根据Id获取文件的快照信息 + * @brief Obtain snapshot information of the file based on the ID * - * @param file 文件名 - * @param user 用户名 - * @param uuid 快照Id - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param uuid SnapshotId + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info); + virtual int GetFileSnapshotInfoById(const std::string& file, + const std::string& user, + const UUID& uuid, + std::vector* info); /** - * @brief 获取快照列表 + * @brief Get snapshot list * - * @param filter 过滤条件 - * @param info 快照信息列表 + * @param filter filtering conditions + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotListByFilter(const SnapshotFilterCondition &filter, - std::vector *info); + virtual int GetSnapshotListByFilter(const SnapshotFilterCondition& filter, + std::vector* info); /** - * @brief 恢复快照任务接口 + * @brief Restore Snapshot Task Interface * - * @return 错误码 + * @return error code */ virtual int RecoverSnapshotTask(); private: /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param user 用户名 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param user username + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info); + int GetFileSnapshotInfoInner(std::vector snapInfos, + const std::string& user, + std::vector* info); /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param filter 过滤条件 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param filter filtering conditions + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info); + int GetSnapshotListInner(std::vector snapInfos, + SnapshotFilterCondition filter, + std::vector* info); private: - // 快照任务管理类对象 + // Snapshot Task Management Class Object std::shared_ptr taskMgr_; - // 快照核心模块 + // Snapshot Core Module std::shared_ptr core_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.cpp b/src/snapshotcloneserver/snapshot/snapshot_task.cpp index 179f2b4617..a66bf4c4ca 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task.cpp @@ -20,10 +20,11 @@ * Author: xuchaojie */ +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" + #include #include "src/common/timeutility.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" namespace curve { namespace snapshotcloneserver { @@ -46,18 +47,20 @@ void ReadChunkSnapshotClosure::Run() { } /** - * @brief 转储快照的单个chunk + * @brief Dump a single chunk of a snapshot * @detail - * 由于单个chunk过大,chunk转储分片进行,分片大小为chunkSplitSize_, - * 步骤如下: - * 1. 创建一个转储任务transferTask,并调用DataChunkTranferInit初始化 - * 2. 调用ReadChunkSnapshot从curvefs读取chunk的一个分片 - * 3. 调用DataChunkTranferAddPart转储一个分片 - * 4. 重复2、3直到所有分片转储完成,调用DataChunkTranferComplete结束转储任务 - * 5. 中间如有读取或转储发生错误,则调用DataChunkTranferAbort放弃转储, - * 并返回错误码 + * Since a single chunk is too large, chunk dumping is done in segments, with + * each segment size being chunkSplitSize_. The steps are as follows: + * 1. Create a dump task transferTask and initialize it using + * DataChunkTransferInit. + * 2. Call ReadChunkSnapshot to read a segment of the chunk from CurveFS. + * 3. Call DataChunkTransferAddPart to dump a segment. + * 4. Repeat steps 2 and 3 until all segments have been dumped, and then call + * DataChunkTransferComplete to end the dump task. + * 5. If there are any errors during reading or dumping in the process, call + * DataChunkTransferAbort to abandon the dump and return an error code. * - * @return 错误码 + * @return Error code */ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { ChunkDataName name = taskInfo_->name_; @@ -67,8 +70,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::shared_ptr transferTask = std::make_shared(); - int ret = dataStore_->DataChunkTranferInit(name, - transferTask); + int ret = dataStore_->DataChunkTranferInit(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferInit error, " << " ret = " << ret @@ -80,9 +82,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } auto tracker = std::make_shared(); - for (uint64_t i = 0; - i < chunkSize / chunkSplitSize; - i++) { + for (uint64_t i = 0; i < chunkSize / chunkSplitSize; i++) { auto context = std::make_shared(); context->cidInfo = taskInfo_->cidInfo_; context->seqNum = taskInfo_->name_.chunkSeqNum_; @@ -101,8 +101,8 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } std::list results = tracker->PopResultContexts(); - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } @@ -113,18 +113,17 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } } while (true); if (ret >= 0) { - ret = - dataStore_->DataChunkTranferComplete(name, transferTask); + ret = dataStore_->DataChunkTranferComplete(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferComplete fail" << ", ret = " << ret @@ -136,18 +135,15 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } } if (ret < 0) { - int ret2 = - dataStore_->DataChunkTranferAbort( - name, - transferTask); - if (ret2 < 0) { - LOG(ERROR) << "DataChunkTranferAbort fail" - << ", ret = " << ret2 - << ", chunkDataName = " << name.ToDataChunkKey() - << ", logicalPool = " << cidInfo.lpid_ - << ", copysetId = " << cidInfo.cpid_ - << ", chunkId = " << cidInfo.cid_; - } + int ret2 = dataStore_->DataChunkTranferAbort(name, transferTask); + if (ret2 < 0) { + LOG(ERROR) << "DataChunkTranferAbort fail" + << ", ret = " << ret2 + << ", chunkDataName = " << name.ToDataChunkKey() + << ", logicalPool = " << cidInfo.lpid_ + << ", copysetId = " << cidInfo.cpid_ + << ", chunkId = " << cidInfo.cid_; + } return ret; } return kErrCodeSuccess; @@ -156,7 +152,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context) { - ReadChunkSnapshotClosure *cb = + ReadChunkSnapshotClosure* cb = new ReadChunkSnapshotClosure(tracker, context); tracker->AddOneTrace(); uint64_t offset = context->partIndex * context->len; @@ -166,13 +162,9 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( << ", chunkId = " << context->cidInfo.cid_ << ", seqNum = " << context->seqNum << ", offset = " << offset; - int ret = client_->ReadChunkSnapshot( - context->cidInfo, - context->seqNum, - offset, - context->len, - context->buf.get(), - cb); + int ret = + client_->ReadChunkSnapshot(context->cidInfo, context->seqNum, offset, + context->len, context->buf.get(), cb); if (ret < 0) { LOG(ERROR) << "ReadChunkSnapshot error, " << " ret = " << ret @@ -189,7 +181,7 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results) { + const std::list& results) { int ret = kErrCodeSuccess; for (auto context : results) { if (context->retCode < 0) { @@ -197,9 +189,8 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - taskInfo_->clientAsyncMethodRetryIntervalMs_)); + std::this_thread::sleep_for(std::chrono::milliseconds( + taskInfo_->clientAsyncMethodRetryIntervalMs_)); ret = StartAsyncReadChunkSnapshot(tracker, context); if (ret < 0) { return ret; @@ -212,15 +203,11 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( } } else { ret = dataStore_->DataChunkTranferAddPart( - taskInfo_->name_, - transferTask, - context->partIndex, - context->len, - context->buf.get()); + taskInfo_->name_, transferTask, context->partIndex, + context->len, context->buf.get()); if (ret < 0) { LOG(ERROR) << "DataChunkTranferAddPart fail" - << ", ret = " << ret - << ", chunkDataName = " + << ", ret = " << ret << ", chunkDataName = " << taskInfo_->name_.ToDataChunkKey() << ", index = " << context->partIndex; return ret; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.h b/src/snapshotcloneserver/snapshot/snapshot_task.h index bf53993a61..23102eb4f5 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task.h @@ -23,172 +23,153 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ -#include -#include #include +#include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task_tracker.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务信息 + * @brief snapshot task information */ class SnapshotTaskInfo : public TaskInfo { public: - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - */ - explicit SnapshotTaskInfo(const SnapshotInfo &snapInfo, - std::shared_ptr metric) - : TaskInfo(), - snapshotInfo_(snapInfo), - metric_(metric) {} + /** + * @brief constructor + * + * @param snapInfo snapshot information + */ + explicit SnapshotTaskInfo(const SnapshotInfo& snapInfo, + std::shared_ptr metric) + : TaskInfo(), snapshotInfo_(snapInfo), metric_(metric) {} /** - * @brief 获取快照信息 + * @brief Get snapshot information * - * @return 快照信息 + * @return snapshot information */ - SnapshotInfo& GetSnapshotInfo() { - return snapshotInfo_; - } + SnapshotInfo& GetSnapshotInfo() { return snapshotInfo_; } /** - * @brief 获取快照uuid + * @brief Get snapshot uuid * - * @return 快照uuid + * @return snapshot uuid */ - UUID GetUuid() const { - return snapshotInfo_.GetUuid(); - } + UUID GetUuid() const { return snapshotInfo_.GetUuid(); } /** - * @brief 获取文件名 + * @brief Get file name * - * @return 文件名 + * @return file name */ - std::string GetFileName() const { - return snapshotInfo_.GetFileName(); - } + std::string GetFileName() const { return snapshotInfo_.GetFileName(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapshotInfo_; - // metric 信息 + // Metric Information std::shared_ptr metric_; }; - class SnapshotTask : public Task { public: /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - */ - SnapshotTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + */ + SnapshotTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} /** - * @brief 获取快照任务信息对象指针 + * @brief Get snapshot task information object pointer * - * @return 快照任务信息对象指针 + * @return Snapshot task information object pointer */ - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: - // 快照任务信息 + // Snapshot Task Information std::shared_ptr taskInfo_; - // 快照核心逻辑对象 + // Snapshot Core Logical Object std::shared_ptr core_; }; /** - * @brief 创建快照任务 + * @brief Create snapshot task */ class SnapshotCreateTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotCreateTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotCreateTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleCreateSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleCreateSnapshotTask(taskInfo_); } }; /** - * @brief 删除快照任务 + * @brief Delete snapshot task */ class SnapshotDeleteTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotDeleteTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotDeleteTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleDeleteSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleDeleteSnapshotTask(taskInfo_); } }; struct ReadChunkSnapshotContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seq uint64_t seqNum; - // 分片的索引 + // Fragmented index uint64_t partIndex; - // 分片的buffer + // Sliced buffer std::unique_ptr buf; - // 分片长度 + // Slice length uint64_t len; - // 返回值 + // Return value int retCode; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -200,8 +181,7 @@ struct ReadChunkSnapshotClosure : public SnapCloneClosure { ReadChunkSnapshotClosure( std::shared_ptr tracker, std::shared_ptr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() override; std::shared_ptr tracker_; std::shared_ptr context_; @@ -216,13 +196,13 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { uint64_t clientAsyncMethodRetryIntervalMs_; uint32_t readChunkSnapshotConcurrency_; - TransferSnapshotDataChunkTaskInfo(const ChunkDataName &name, - uint64_t chunkSize, - const ChunkIDInfo &cidInfo, - uint64_t chunkSplitSize, - uint64_t clientAsyncMethodRetryTimeSec, - uint64_t clientAsyncMethodRetryIntervalMs, - uint32_t readChunkSnapshotConcurrency) + TransferSnapshotDataChunkTaskInfo(const ChunkDataName& name, + uint64_t chunkSize, + const ChunkIDInfo& cidInfo, + uint64_t chunkSplitSize, + uint64_t clientAsyncMethodRetryTimeSec, + uint64_t clientAsyncMethodRetryIntervalMs, + uint32_t readChunkSnapshotConcurrency) : name_(name), chunkSize_(chunkSize), cidInfo_(cidInfo), @@ -234,7 +214,8 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { class TransferSnapshotDataChunkTask : public TrackerTask { public: - TransferSnapshotDataChunkTask(const TaskIdType &taskId, + TransferSnapshotDataChunkTask( + const TaskIdType& taskId, std::shared_ptr taskInfo, std::shared_ptr client, std::shared_ptr dataStore) @@ -255,37 +236,37 @@ class TransferSnapshotDataChunkTask : public TrackerTask { private: /** - * @brief 转储快照单个chunk + * @brief Dump snapshot single chunk * - * @return 错误码 + * @return error code */ int TransferSnapshotDataChunk(); /** - * @brief 开始异步ReadSnapshotChunk + * @brief Start asynchronous ReadSnapshotChunk * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param context ReadSnapshotChunk上下文 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param context ReadSnapshotChunk context * - * @return 错误码 + * @return error code */ int StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context); /** - * @brief 处理ReadChunkSnapshot的结果并重试 + * @brief Process the results of ReadChunkSnapshot and try again * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param transferTask 转储任务 - * @param results ReadChunkSnapshot结果列表 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param transferTask Dump Task + * @param results ReadChunkSnapshot result list * - * @return 错误码 + * @return error code */ int HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results); + const std::list& results); protected: std::shared_ptr taskInfo_; @@ -293,7 +274,6 @@ class TransferSnapshotDataChunkTask : public TrackerTask { std::shared_ptr dataStore_; }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp index aa57505b9f..2c82ae1d0f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp @@ -21,9 +21,9 @@ */ #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/concurrent/concurrent.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using curve::common::LockGuard; @@ -39,7 +39,7 @@ int SnapshotTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 + // isStop_ Flag set first to prevent backEndThread from exiting first backEndThread = std::thread(&SnapshotTaskManager::BackEndThreadFunc, this); } @@ -58,7 +58,7 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { if (isStop_.load()) { return kErrCodeServiceIsStop; } - // 移除实际已完成的task,防止uuid冲突 + // Remove actual completed tasks to prevent uuid conflicts ScanWorkingTask(); { @@ -73,13 +73,13 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { } snapshotMetric_->snapshotWaiting << 1; - // 立即执行task + // Execute task immediately ScanWaitingTask(); return kErrCodeSuccess; } std::shared_ptr SnapshotTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(taskMapLock_); auto it = taskMap_.find(taskId); if (it != taskMap_.end()) { @@ -88,14 +88,12 @@ std::shared_ptr SnapshotTaskManager::GetTask( return nullptr; } -int SnapshotTaskManager::CancelTask(const TaskIdType &taskId) { +int SnapshotTaskManager::CancelTask(const TaskIdType& taskId) { { - // 还在等待队列的Cancel直接移除 + // Waiting for the Cancel of the queue to be directly removed WriteLockGuard taskMapWlock(taskMapLock_); LockGuard waitingTasksLock(waitingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end(); - it++) { + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end(); it++) { if ((*it)->GetTaskId() == taskId) { int ret = core_->HandleCancelUnSchduledSnapshotTask( (*it)->GetTaskInfo()); @@ -131,12 +129,10 @@ void SnapshotTaskManager::BackEndThreadFunc() { void SnapshotTaskManager::ScanWaitingTask() { LockGuard waitingTasksLock(waitingTasksLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end();) { - if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) - == workingTasks_.end()) { - workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), - *it); + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end();) { + if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) == + workingTasks_.end()) { + workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), *it); threadpool_->PushTask(*it); snapshotMetric_->snapshotDoing << 1; snapshotMetric_->snapshotWaiting << -1; @@ -150,13 +146,11 @@ void SnapshotTaskManager::ScanWaitingTask() { void SnapshotTaskManager::ScanWorkingTask() { WriteLockGuard taskMapWlock(taskMapLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = workingTasks_.begin(); - it != workingTasks_.end();) { + for (auto it = workingTasks_.begin(); it != workingTasks_.end();) { auto taskInfo = it->second->GetTaskInfo(); if (taskInfo->IsFinish()) { snapshotMetric_->snapshotDoing << -1; - if (taskInfo->GetSnapshotInfo().GetStatus() - != Status::done) { + if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { snapshotMetric_->snapshotFailed << 1; } else { snapshotMetric_->snapshotSucceed << 1; @@ -171,4 +165,3 @@ void SnapshotTaskManager::ScanWorkingTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h index a22eb0e2ae..c2cee2baa3 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h @@ -23,54 +23,51 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::RWLock; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务管理器类 + * @brief Snapshot Task Manager Class */ class SnapshotTaskManager { public: - /** - * @brief 默认构造函数 - */ - SnapshotTaskManager( - std::shared_ptr core, - std::shared_ptr snapshotMetric) + /** + * @brief default constructor + */ + SnapshotTaskManager(std::shared_ptr core, + std::shared_ptr snapshotMetric) : isStop_(true), core_(core), snapshotMetric_(snapshotMetric), snapshotTaskManagerScanIntervalMs_(0) {} /** - * @brief 析构函数 + * @brief destructor */ - ~SnapshotTaskManager() { - Stop(); - } + ~SnapshotTaskManager() { Stop(); } int Init(std::shared_ptr pool, - const SnapshotCloneServerOptions &option) { + const SnapshotCloneServerOptions& option) { snapshotTaskManagerScanIntervalMs_ = option.snapshotTaskManagerScanIntervalMs; threadpool_ = pool; @@ -78,88 +75,92 @@ class SnapshotTaskManager { } /** - * @brief 启动 + * @brief start * - * @return 错误码 + * @return error code */ int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ void Stop(); /** - * @brief 添加任务 + * @brief Add Task * - * @param task 快照任务 + * @param task snapshot task * - * @return 错误码 + * @return error code */ int PushTask(std::shared_ptr task); /** - * @brief 获取任务 + * @brief Get Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 快照任务指针 + * @return Snapshot Task Pointer */ - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; /** - * @brief 取消任务 + * @brief Cancel Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - int CancelTask(const TaskIdType &taskId); + int CancelTask(const TaskIdType& taskId); private: /** - * @brief 后台线程执行函数 + * @brief Background Thread Execution Function * - * 定期执行扫描等待队列函数与扫描工作队列函数。 + * Regularly execute the scan wait queue function and scan work queue + * function */ void BackEndThreadFunc(); /** - * @brief 扫描等待任务队列函数 + * @brief Scan Waiting Task Queue Function * - * 扫描等待队列,判断工作队列中当前文件 - * 是否有正在执行的快照,若没有则放入工作队列 + * Scan the waiting queue to determine the current file in the work queue + * Check if there is an active snapshot; if not, place it in the work queue * */ void ScanWaitingTask(); /** - * @brief 扫描工作队列函数 + * @brief Scan Work Queue Function * - * 扫描工作队列,判断工作队列中当前 - * 快照任务是否已完成,若完成则移出工作队列 + * Scan the work queue to determine the current status in the work queue + * Check if the snapshot task has been completed; if completed, remove it + * from the work queue * */ void ScanWorkingTask(); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->快照任务表 + // Id ->Snapshot Task Table std::map > taskMap_; mutable RWLock taskMapLock_; - // 快照等待队列 + // Snapshot waiting queue std::list > waitingTasks_; mutable Mutex waitingTasksLock_; - // 快照工作队列,实际是个map,其中key是文件名,以便于查询 + // The snapshot work queue is actually a map, where key is the file name for + // easy query std::map > workingTasks_; mutable Mutex workingTasksLock_; std::shared_ptr threadpool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Indicates whether the current task management is stopped, used to support + // start and stop functions. std::atomic_bool isStop_; // snapshot core @@ -168,7 +169,8 @@ class SnapshotTaskManager { // metric std::shared_ptr snapshotMetric_; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) int snapshotTaskManagerScanIntervalMs_; }; diff --git a/src/snapshotcloneserver/snapshotclone_server.cpp b/src/snapshotcloneserver/snapshotclone_server.cpp index 91a6c199e7..3bfedf2440 100644 --- a/src/snapshotcloneserver/snapshotclone_server.cpp +++ b/src/snapshotcloneserver/snapshotclone_server.cpp @@ -19,15 +19,17 @@ * Created Date: Monday March 9th 2020 * Author: hzsunjianliang */ -#include +#include "src/snapshotcloneserver/snapshotclone_server.h" + #include #include -#include +#include + #include +#include -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshotclone_server.h" #include "src/common/curve_version.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using LeaderElectionOptions = ::curve::election::LeaderElectionOptions; @@ -41,78 +43,72 @@ const char ACTIVE[] = "active"; const char STANDBY[] = "standby"; void InitClientOption(std::shared_ptr conf, - CurveClientOptions *clientOption) { - conf->GetValueFatalIfFail("client.config_path", - &clientOption->configPath); - conf->GetValueFatalIfFail("mds.rootUser", - &clientOption->mdsRootUser); + CurveClientOptions* clientOption) { + conf->GetValueFatalIfFail("client.config_path", &clientOption->configPath); + conf->GetValueFatalIfFail("mds.rootUser", &clientOption->mdsRootUser); conf->GetValueFatalIfFail("mds.rootPassword", - &clientOption->mdsRootPassword); + &clientOption->mdsRootPassword); conf->GetValueFatalIfFail("client.methodRetryTimeSec", - &clientOption->clientMethodRetryTimeSec); + &clientOption->clientMethodRetryTimeSec); conf->GetValueFatalIfFail("client.methodRetryIntervalMs", - &clientOption->clientMethodRetryIntervalMs); + &clientOption->clientMethodRetryIntervalMs); } void InitSnapshotCloneServerOptions(std::shared_ptr conf, - SnapshotCloneServerOptions *serverOption) { - conf->GetValueFatalIfFail("server.address", - &serverOption->addr); + SnapshotCloneServerOptions* serverOption) { + conf->GetValueFatalIfFail("server.address", &serverOption->addr); conf->GetValueFatalIfFail("server.clientAsyncMethodRetryTimeSec", - &serverOption->clientAsyncMethodRetryTimeSec); - conf->GetValueFatalIfFail( - "server.clientAsyncMethodRetryIntervalMs", - &serverOption->clientAsyncMethodRetryIntervalMs); + &serverOption->clientAsyncMethodRetryTimeSec); + conf->GetValueFatalIfFail("server.clientAsyncMethodRetryIntervalMs", + &serverOption->clientAsyncMethodRetryIntervalMs); conf->GetValueFatalIfFail("server.snapshotPoolThreadNum", - &serverOption->snapshotPoolThreadNum); - conf->GetValueFatalIfFail( - "server.snapshotTaskManagerScanIntervalMs", - &serverOption->snapshotTaskManagerScanIntervalMs); + &serverOption->snapshotPoolThreadNum); + conf->GetValueFatalIfFail("server.snapshotTaskManagerScanIntervalMs", + &serverOption->snapshotTaskManagerScanIntervalMs); conf->GetValueFatalIfFail("server.chunkSplitSize", - &serverOption->chunkSplitSize); - conf->GetValueFatalIfFail( - "server.checkSnapshotStatusIntervalMs", - &serverOption->checkSnapshotStatusIntervalMs); + &serverOption->chunkSplitSize); + conf->GetValueFatalIfFail("server.checkSnapshotStatusIntervalMs", + &serverOption->checkSnapshotStatusIntervalMs); conf->GetValueFatalIfFail("server.maxSnapshotLimit", - &serverOption->maxSnapshotLimit); + &serverOption->maxSnapshotLimit); conf->GetValueFatalIfFail("server.snapshotCoreThreadNum", - &serverOption->snapshotCoreThreadNum); + &serverOption->snapshotCoreThreadNum); conf->GetValueFatalIfFail("server.mdsSessionTimeUs", - &serverOption->mdsSessionTimeUs); + &serverOption->mdsSessionTimeUs); conf->GetValueFatalIfFail("server.readChunkSnapshotConcurrency", - &serverOption->readChunkSnapshotConcurrency); + &serverOption->readChunkSnapshotConcurrency); conf->GetValueFatalIfFail("server.stage1PoolThreadNum", - &serverOption->stage1PoolThreadNum); + &serverOption->stage1PoolThreadNum); conf->GetValueFatalIfFail("server.stage2PoolThreadNum", - &serverOption->stage2PoolThreadNum); + &serverOption->stage2PoolThreadNum); conf->GetValueFatalIfFail("server.commonPoolThreadNum", - &serverOption->commonPoolThreadNum); + &serverOption->commonPoolThreadNum); - conf->GetValueFatalIfFail( - "server.cloneTaskManagerScanIntervalMs", - &serverOption->cloneTaskManagerScanIntervalMs); + conf->GetValueFatalIfFail("server.cloneTaskManagerScanIntervalMs", + &serverOption->cloneTaskManagerScanIntervalMs); conf->GetValueFatalIfFail("server.cloneChunkSplitSize", - &serverOption->cloneChunkSplitSize); + &serverOption->cloneChunkSplitSize); conf->GetValueFatalIfFail("server.cloneTempDir", - &serverOption->cloneTempDir); - conf->GetValueFatalIfFail("mds.rootUser", - &serverOption->mdsRootUser); + &serverOption->cloneTempDir); + conf->GetValueFatalIfFail("mds.rootUser", &serverOption->mdsRootUser); conf->GetValueFatalIfFail("server.createCloneChunkConcurrency", - &serverOption->createCloneChunkConcurrency); + &serverOption->createCloneChunkConcurrency); conf->GetValueFatalIfFail("server.recoverChunkConcurrency", - &serverOption->recoverChunkConcurrency); - conf->GetValueFatalIfFail("server.backEndReferenceRecordScanIntervalMs", - &serverOption->backEndReferenceRecordScanIntervalMs); - conf->GetValueFatalIfFail("server.backEndReferenceFuncScanIntervalMs", - &serverOption->backEndReferenceFuncScanIntervalMs); + &serverOption->recoverChunkConcurrency); + conf->GetValueFatalIfFail( + "server.backEndReferenceRecordScanIntervalMs", + &serverOption->backEndReferenceRecordScanIntervalMs); + conf->GetValueFatalIfFail( + "server.backEndReferenceFuncScanIntervalMs", + &serverOption->backEndReferenceFuncScanIntervalMs); conf->GetValueFatalIfFail("etcd.retry.times", - &(serverOption->dlockOpts.retryTimes)); + &(serverOption->dlockOpts.retryTimes)); conf->GetValueFatalIfFail("etcd.dlock.timeoutMs", - &(serverOption->dlockOpts.ctx_timeoutMS)); + &(serverOption->dlockOpts.ctx_timeoutMS)); conf->GetValueFatalIfFail("etcd.dlock.ttlSec", - &(serverOption->dlockOpts.ttlSec)); + &(serverOption->dlockOpts.ttlSec)); } void InitEtcdConf(std::shared_ptr conf, EtcdConf* etcdConf) { @@ -128,35 +124,37 @@ void InitEtcdConf(std::shared_ptr conf, EtcdConf* etcdConf) { void SnapShotCloneServer::InitAllSnapshotCloneOptions(void) { InitClientOption(conf_, &(snapshotCloneServerOptions_.clientOptions)); InitSnapshotCloneServerOptions(conf_, - &(snapshotCloneServerOptions_.serverOption)); + &(snapshotCloneServerOptions_.serverOption)); InitEtcdConf(conf_, &(snapshotCloneServerOptions_.etcdConf)); - conf_->GetValueFatalIfFail("etcd.operation.timeoutMs", + conf_->GetValueFatalIfFail( + "etcd.operation.timeoutMs", &(snapshotCloneServerOptions_.etcdClientTimeout)); conf_->GetValueFatalIfFail("etcd.retry.times", - &(snapshotCloneServerOptions_.etcdRetryTimes)); + &(snapshotCloneServerOptions_.etcdRetryTimes)); conf_->GetValueFatalIfFail("server.dummy.listen.port", - &(snapshotCloneServerOptions_.dummyPort)); + &(snapshotCloneServerOptions_.dummyPort)); conf_->GetValueFatalIfFail("leader.campagin.prefix", - &(snapshotCloneServerOptions_.campaginPrefix)); + &(snapshotCloneServerOptions_.campaginPrefix)); conf_->GetValueFatalIfFail("leader.session.intersec", - &(snapshotCloneServerOptions_.sessionInterSec)); + &(snapshotCloneServerOptions_.sessionInterSec)); - conf_->GetValueFatalIfFail("leader.election.timeoutms", + conf_->GetValueFatalIfFail( + "leader.election.timeoutms", &(snapshotCloneServerOptions_.electionTimeoutMs)); conf_->GetValueFatalIfFail("s3.config_path", - &(snapshotCloneServerOptions_.s3ConfPath)); + &(snapshotCloneServerOptions_.s3ConfPath)); } void SnapShotCloneServer::StartDummy() { // Expose conf and version and role(standby or active) LOG(INFO) << "snapshotCloneServer version: " - << curve::common::CurveVersion(); + << curve::common::CurveVersion(); curve::common::ExposeCurveVersion(); conf_->ExposeMetric(configMetricName); status_.expose(statusMetricName); @@ -173,38 +171,39 @@ void SnapShotCloneServer::StartDummy() { bool SnapShotCloneServer::InitEtcdClient(void) { etcdClient_ = std::make_shared(); auto res = etcdClient_->Init(snapshotCloneServerOptions_.etcdConf, - snapshotCloneServerOptions_.etcdClientTimeout, - snapshotCloneServerOptions_.etcdRetryTimes); + snapshotCloneServerOptions_.etcdClientTimeout, + snapshotCloneServerOptions_.etcdRetryTimes); if (res != EtcdErrCode::EtcdOK) { - LOG(ERROR) - << "init etcd client err! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " - << snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " - << snapshotCloneServerOptions_.etcdRetryTimes; + LOG(ERROR) << "init etcd client err! " + << "etcdaddr: " + << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " + << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; return false; } std::string out; res = etcdClient_->Get("test", &out); if (res != EtcdErrCode::EtcdOK && res != EtcdErrCode::EtcdKeyNotExist) { - LOG(ERROR) << - "Run snapsthotcloneserver err. Check if etcd is running."; + LOG(ERROR) << "Run snapsthotcloneserver err. Check if etcd is running."; return false; } LOG(INFO) << "init etcd client ok! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << - snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " << - snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " << - snapshotCloneServerOptions_.etcdRetryTimes; + << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; return true; } @@ -223,8 +222,7 @@ void SnapShotCloneServer::StartCompaginLeader(void) { // compagin leader and observe self then return while (0 != leaderElection_->CampaignLeader()) { - LOG(INFO) << option.leaderUniqueName - << " campaign for leader again"; + LOG(INFO) << option.leaderUniqueName << " campaign for leader again"; } LOG(INFO) << "Campain leader ok, I am the active member now"; status_.set_value(ACTIVE); @@ -233,8 +231,8 @@ void SnapShotCloneServer::StartCompaginLeader(void) { bool SnapShotCloneServer::Init() { snapClient_ = std::make_shared(); - fileClient_ = std::make_shared(); - client_ = std::make_shared(snapClient_, fileClient_); + fileClient_ = std::make_shared(); + client_ = std::make_shared(snapClient_, fileClient_); if (client_->Init(snapshotCloneServerOptions_.clientOptions) < 0) { LOG(ERROR) << "curvefs_client init fail."; @@ -242,8 +240,8 @@ bool SnapShotCloneServer::Init() { } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient_, - codec); + metaStore_ = + std::make_shared(etcdClient_, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return false; @@ -255,25 +253,20 @@ bool SnapShotCloneServer::Init() { return false; } - snapshotRef_ = std::make_shared(); snapshotMetric_ = std::make_shared(metaStore_); - snapshotCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - snapshotCloneServerOptions_.serverOption); + snapshotCore_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, + snapshotCloneServerOptions_.serverOption); if (snapshotCore_->Init() < 0) { LOG(ERROR) << "SnapshotCore init fail."; return false; } - snapshotTaskManager_ = std::make_shared(snapshotCore_, - snapshotMetric_); - snapshotServiceManager_ = - std::make_shared(snapshotTaskManager_, - snapshotCore_); + snapshotTaskManager_ = + std::make_shared(snapshotCore_, snapshotMetric_); + snapshotServiceManager_ = std::make_shared( + snapshotTaskManager_, snapshotCore_); if (snapshotServiceManager_->Init( snapshotCloneServerOptions_.serverOption) < 0) { LOG(ERROR) << "SnapshotServiceManager init fail."; @@ -283,36 +276,29 @@ bool SnapShotCloneServer::Init() { cloneMetric_ = std::make_shared(); cloneRef_ = std::make_shared(); cloneCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - snapshotCloneServerOptions_.serverOption); + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, + snapshotCloneServerOptions_.serverOption); if (cloneCore_->Init() < 0) { LOG(ERROR) << "CloneCore init fail."; return false; } - cloneTaskMgr_ = std::make_shared(cloneCore_, - cloneMetric_); + cloneTaskMgr_ = + std::make_shared(cloneCore_, cloneMetric_); cloneServiceManagerBackend_ = - std::make_shared(cloneCore_); + std::make_shared(cloneCore_); cloneServiceManager_ = std::make_shared( - cloneTaskMgr_, - cloneCore_, - cloneServiceManagerBackend_); - if (cloneServiceManager_->Init( - snapshotCloneServerOptions_.serverOption) < 0) { + cloneTaskMgr_, cloneCore_, cloneServiceManagerBackend_); + if (cloneServiceManager_->Init(snapshotCloneServerOptions_.serverOption) < + 0) { LOG(ERROR) << "CloneServiceManager init fail."; return false; } service_ = std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); - server_ = std::make_shared(); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + snapshotServiceManager_, cloneServiceManager_); + server_ = std::make_shared(); + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) { LOG(ERROR) << "Failed to add snapshot_service!\n"; return false; } @@ -320,7 +306,8 @@ bool SnapShotCloneServer::Init() { } bool SnapShotCloneServer::Start(void) { - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" @@ -349,16 +336,14 @@ bool SnapShotCloneServer::Start(void) { brpc::ServerOptions option; option.idle_timeout_sec = -1; if (server_->Start(snapshotCloneServerOptions_.serverOption.addr.c_str(), - &option) != 0) { + &option) != 0) { LOG(FATAL) << "snapshotclone rpc server start fail."; } LOG(INFO) << "snapshotclone service start ok ..."; return true; } -void SnapShotCloneServer::RunUntilQuit(void) { - server_->RunUntilAskedToQuit(); -} +void SnapShotCloneServer::RunUntilQuit(void) { server_->RunUntilAskedToQuit(); } void SnapShotCloneServer::Stop(void) { LOG(INFO) << "snapshotcloneserver stopping ..."; diff --git a/src/snapshotcloneserver/snapshotclone_server.h b/src/snapshotcloneserver/snapshotclone_server.h index 47163ddac4..ae33c61a6b 100644 --- a/src/snapshotcloneserver/snapshotclone_server.h +++ b/src/snapshotcloneserver/snapshotclone_server.h @@ -23,29 +23,26 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ -#include #include +#include +#include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/configuration.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/leader_election/leader_election.h" - -#include "src/client/libcurve_snapshot.h" -#include "src/client/libcurve_file.h" - +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" - +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshotclone_service.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" namespace curve { namespace snapshotcloneserver { @@ -56,7 +53,6 @@ extern const char statusMetricName[]; extern const char ACTIVE[]; extern const char STANDBY[]; - using EtcdClientImp = ::curve::kvstorage::EtcdClientImp; using Configuration = ::curve::common::Configuration; using LeaderElection = ::curve::election::LeaderElection; @@ -78,46 +74,48 @@ struct SnapShotCloneServerOptions { int dummyPort; // s3 - std::string s3ConfPath; + std::string s3ConfPath; }; class SnapShotCloneServer { public: explicit SnapShotCloneServer(std::shared_ptr config) - :conf_(config) {} - /** - * @brief 通过配置初始化snapshotcloneserver所需要的所有配置 - */ + : conf_(config) {} + /** + * @brief: Initialize all configurations required for snapshotcloneserver + * through configuration + */ void InitAllSnapshotCloneOptions(void); /** - * @brief leader选举,未选中持续等待,选中情况下建立watch并返回 + * @brief leader election, if not selected, continue to wait. If selected, + * establish a watch and return */ void StartCompaginLeader(void); /** - * @brief 启动dummyPort 用于检查主备snapshotserver - * 存活和各种config metric 和版本信息 + * @brief: Start dummyPort to check the active and standby snapshotserver + * Survival and various configuration metrics and version information */ void StartDummy(void); /** - * @brief 初始化clone与snapshot 各种核心结构 + * @brief initializes various core structures of clone and snapshot */ bool Init(void); /** - * @brief 启动各个组件的逻辑和线程池 + * @brief: Start the logic and thread pool of each component */ bool Start(void); /** - * @brief 停止所有服务 + * @brief Stop all services */ void Stop(void); /** - * @brief 启动RPC服务直到外部kill + * @brief Start RPC service until external kill */ void RunUntilQuit(void); @@ -127,9 +125,9 @@ class SnapShotCloneServer { private: std::shared_ptr conf_; SnapShotCloneServerOptions snapshotCloneServerOptions_; - // 标记自己为active/standby + // Mark yourself as active/standby bvar::Status status_; - // 与etcd交互的client + // Client interacting with ETCD std::shared_ptr etcdClient_; std::shared_ptr leaderElection_; @@ -138,21 +136,21 @@ class SnapShotCloneServer { std::shared_ptr client_; std::shared_ptr metaStore_; - std::shared_ptr dataStore_; - std::shared_ptr snapshotRef_; - std::shared_ptr snapshotMetric_; - std::shared_ptr snapshotCore_; + std::shared_ptr dataStore_; + std::shared_ptr snapshotRef_; + std::shared_ptr snapshotMetric_; + std::shared_ptr snapshotCore_; std::shared_ptr snapshotTaskManager_; std::shared_ptr snapshotServiceManager_; - std::shared_ptr cloneMetric_; - std::shared_ptr cloneRef_; - std::shared_ptr cloneCore_; - std::shared_ptr cloneTaskMgr_; + std::shared_ptr cloneMetric_; + std::shared_ptr cloneRef_; + std::shared_ptr cloneCore_; + std::shared_ptr cloneTaskMgr_; std::shared_ptr cloneServiceManagerBackend_; - std::shared_ptr cloneServiceManager_; + std::shared_ptr cloneServiceManager_; std::shared_ptr service_; - std::shared_ptr server_; + std::shared_ptr server_; }; } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 72f6b04683..f8505b03fe 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -22,14 +22,14 @@ #include "src/snapshotcloneserver/snapshotclone_service.h" +#include #include #include -#include #include "json/json.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/uuid.h" #include "src/common/string_util.h" +#include "src/common/uuid.h" #include "src/snapshotcloneserver/clone/clone_closure.h" using ::curve::common::UUIDGenerator; @@ -38,15 +38,14 @@ namespace curve { namespace snapshotcloneserver { void SnapshotCloneServiceImpl::default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done) { + const HttpRequest* req, + HttpResponse* resp, + Closure* done) { (void)req; (void)resp; brpc::ClosureGuard done_guard(done); - brpc::Controller* bcntl = - static_cast(cntl); - const std::string *action = + brpc::Controller* bcntl = static_cast(cntl); + const std::string* action = bcntl->http_request().uri().GetQuery(kActionStr); std::string requestId = UUIDGenerator().GenerateUUID(); @@ -91,39 +90,27 @@ void SnapshotCloneServiceImpl::default_method(RpcController* cntl, } LOG(INFO) << "SnapshotCloneServiceImpl Return : " - << "action = " << *action - << ", requestId = " << requestId - << ", context = " << bcntl->response_attachment(); + << "action = " << *action << ", requestId = " << requestId + << ", context = " << bcntl->response_attachment(); return; } void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *name = - bcntl->http_request().uri().GetQuery(kNameStr); - if ((version == nullptr) || - (user == nullptr) || - (file == nullptr) || - (name == nullptr) || - (version->empty()) || - (user->empty()) || - (file->empty()) || - (name->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* name = bcntl->http_request().uri().GetQuery(kNameStr); + if ((version == nullptr) || (user == nullptr) || (file == nullptr) || + (name == nullptr) || (version->empty()) || (user->empty()) || + (file->empty()) || (name->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CreateSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << *file - << ", Name = " << *name + << " Version = " << *version << ", User = " << *user + << ", File = " << *file << ", Name = " << *name << ", requestId = " << requestId; UUID uuid; int ret = snapshotManager_->CreateSnapshot(*file, *user, *name, &uuid); @@ -146,22 +133,14 @@ void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( } void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (version->empty()) || (user->empty()) || (uuid->empty())) { HandleBadRequestError(bcntl, requestId); return; } @@ -172,10 +151,8 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( fileName = *file; } LOG(INFO) << "DeleteSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << fileStr << ", requestId = " << requestId; int ret = snapshotManager_->DeleteSnapshot(*uuid, *user, fileName); if (ret < 0) { @@ -196,32 +173,21 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( } void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (file == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty()) || - (file->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (file == nullptr) || (version->empty()) || (user->empty()) || + (uuid->empty()) || (file->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CancelSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << *file + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << *file << ", requestId = " << requestId; int ret = snapshotManager_->CancelSnapshot(*uuid, *user, *file); if (ret < 0) { @@ -242,28 +208,21 @@ void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -271,7 +230,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -291,22 +250,18 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( fileName = *file; } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr + << " Version = " << *version << ", User = " << *user + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = snapshotManager_->GetFileSnapshotInfoById( - fileName, *user, *uuid, &info); + ret = snapshotManager_->GetFileSnapshotInfoById(fileName, *user, *uuid, + &info); } else { - ret = snapshotManager_->GetFileSnapshotInfo( - fileName, *user, &info); + ret = snapshotManager_->GetFileSnapshotInfo(fileName, *user, &info); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -323,8 +278,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -334,32 +288,22 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } -void SnapshotCloneServiceImpl::HandleCloneAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleCloneAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - const std::string *poolset = - bcntl->http_request().uri().GetQuery(kPoolset); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + const std::string* poolset = bcntl->http_request().uri().GetQuery(kPoolset); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty()) || // poolset is optional, but if it exists, it should not be empty (poolset != nullptr && poolset->empty())) { @@ -381,15 +325,12 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } LOG(INFO) << "Clone:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination << ", Lazy = " << *lazy << ", Poolset = " << (poolset != nullptr ? *poolset : "") << ", requestId = " << requestId; - TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); @@ -400,30 +341,21 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } -void SnapshotCloneServiceImpl::HandleRecoverAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleRecoverAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " @@ -443,37 +375,27 @@ void SnapshotCloneServiceImpl::HandleRecoverAction( return; } LOG(INFO) << "Recover:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination - << ", Lazy = " << *lazy - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination + << ", Lazy = " << *lazy << ", requestId = " << requestId; TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); - cloneManager_->RecoverFile( - *source, *user, *destination, lazyFlag, closure, &taskId); + cloneManager_->RecoverFile(*source, *user, *destination, lazyFlag, closure, + &taskId); done_guard.release(); return; } void SnapshotCloneServiceImpl::HandleFlattenAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " << "action = Flatten" @@ -482,10 +404,8 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( return; } LOG(INFO) << "Flatten:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->Flatten(*user, *taskId); if (ret < 0) { bcntl->http_response().set_status_code( @@ -505,28 +425,21 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( } void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -534,7 +447,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -554,25 +467,21 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( } LOG(INFO) << "GetTasks:" - << " Version = " << *version - << ", User = " << *user - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", File = " << fileStr << ", requestId = " << requestId; std::vector cloneTaskInfos; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = cloneManager_->GetCloneTaskInfoById( - *user, *uuid, &cloneTaskInfos); + ret = + cloneManager_->GetCloneTaskInfoById(*user, *uuid, &cloneTaskInfos); } else if (file != nullptr) { - ret = cloneManager_->GetCloneTaskInfoByName( - *user, *file, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfoByName(*user, *file, + &cloneTaskInfos); } else { - ret = cloneManager_->GetCloneTaskInfo( - *user, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfo(*user, &cloneTaskInfos); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -589,8 +498,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -601,16 +509,12 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } -bool SnapshotCloneServiceImpl::CheckBoolParamter( - const std::string *param, bool *valueOut) { - if (*param == "true" || - *param == "True" || - *param == "TRUE" || +bool SnapshotCloneServiceImpl::CheckBoolParamter(const std::string* param, + bool* valueOut) { + if (*param == "true" || *param == "True" || *param == "TRUE" || *param == "1") { *valueOut = true; - } else if (*param == "false" || - *param == "False" || - *param == "FALSE" || + } else if (*param == "false" || *param == "False" || *param == "FALSE" || *param == "0") { *valueOut = false; } else { @@ -620,30 +524,20 @@ bool SnapshotCloneServiceImpl::CheckBoolParamter( } void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CleanCloneTask:" - << ", Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; - + << ", Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->CleanCloneTask(*user, *taskId); if (ret < 0) { @@ -664,27 +558,22 @@ void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *status = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - if ((version == nullptr) || - (version->empty())) { + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -692,7 +581,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -719,14 +608,10 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << userStr - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Status = " << statusStr - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << userStr + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr + << ", Status = " << statusStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; @@ -748,8 +633,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -760,31 +644,26 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *source = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *status = + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - const std::string *type = - bcntl->http_request().uri().GetQuery(kTypeStr); - if ((version == nullptr) || - (version->empty())) { + const std::string* type = bcntl->http_request().uri().GetQuery(kTypeStr); + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -792,7 +671,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -832,15 +711,11 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } LOG(INFO) << "GetTaskList:" - << " Version = " << *version - << ", User = " << userStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Source = " << sourceStr + << " Version = " << *version << ", User = " << userStr + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", Source = " << sourceStr << ", Destination = " << destinationStr - << ", Status = " << statusStr - << ", Type = " << typeStr + << ", Status = " << statusStr << ", Type = " << typeStr << ", requestId = " << requestId; std::vector cloneTaskInfos; @@ -862,8 +737,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -876,33 +750,26 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (version->empty()) || - (source->empty()) || - (user->empty())) { + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (version->empty()) || (source->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "GetCloneRefStatus:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", requestId = " << requestId; std::vector cloneInfos; CloneRefStatus refStatus; - int ret = cloneManager_->GetCloneRefStatus(*source, &refStatus, - &cloneInfos); + int ret = + cloneManager_->GetCloneRefStatus(*source, &refStatus, &cloneInfos); if (ret < 0) { bcntl->http_response().set_status_code( brpc::HTTP_STATUS_INTERNAL_SERVER_ERROR); @@ -916,7 +783,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( mainObj[kCodeStr] = std::to_string(kErrCodeSuccess); mainObj[kMessageStr] = code2Msg[kErrCodeSuccess]; mainObj[kRequestIdStr] = requestId; - mainObj[kRefStatusStr] = static_cast (refStatus); + mainObj[kRefStatusStr] = static_cast(refStatus); mainObj[kTotalCountStr] = 0; if (refStatus == CloneRefStatus::kNeedCheck) { mainObj[kTotalCountStr] = cloneInfos.size(); @@ -943,20 +810,19 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( } void SnapshotCloneServiceImpl::SetErrorMessage(brpc::Controller* bcntl, - int errCode, - const std::string &requestId, - const std::string &uuid) { + int errCode, + const std::string& requestId, + const std::string& uuid) { butil::IOBufBuilder os; - std::string msg = BuildErrorMessage(errCode, - requestId, uuid); + std::string msg = BuildErrorMessage(errCode, requestId, uuid); os << msg; os.move_to(bcntl->response_attachment()); return; } -void SnapshotCloneServiceImpl::HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid) { +void SnapshotCloneServiceImpl::HandleBadRequestError( + brpc::Controller* bcntl, const std::string& requestId, + const std::string& uuid) { bcntl->http_response().set_status_code(brpc::HTTP_STATUS_BAD_REQUEST); SetErrorMessage(bcntl, kErrCodeInvalidRequest, requestId, uuid); } diff --git a/src/snapshotcloneserver/snapshotclone_service.h b/src/snapshotcloneserver/snapshotclone_service.h index 6ba1f34f48..c9d15fc222 100644 --- a/src/snapshotcloneserver/snapshotclone_service.h +++ b/src/snapshotcloneserver/snapshotclone_service.h @@ -24,87 +24,82 @@ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVICE_H_ #include + #include #include #include "proto/snapshotcloneserver.pb.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/clone/clone_service_manager.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" namespace curve { namespace snapshotcloneserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; /** - * @brief 快照转储rpc服务实现 + * @brief snapshot dump rpc service implementation */ class SnapshotCloneServiceImpl : public SnapshotCloneService { public: - /** - * @brief 构造函数 - * - * @param manager 快照转储服务管理对象 - */ + /** + * @brief constructor + * + * @param manager snapshot dump service management object + */ SnapshotCloneServiceImpl( std::shared_ptr snapshotManager, std::shared_ptr cloneManager) - : snapshotManager_(snapshotManager), - cloneManager_(cloneManager) {} + : snapshotManager_(snapshotManager), cloneManager_(cloneManager) {} virtual ~SnapshotCloneServiceImpl() {} /** - * @brief http服务默认方法 + * @brief HTTP service default method * * @param cntl rpc controller - * @param req http请求报文 - * @param resp http回复报文 - * @param done http异步回调闭包 + * @param req HTTP request message + * @param resp HTTP reply message + * @param done HTTP asynchronous callback closure */ - void default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done); + void default_method(RpcController* cntl, const HttpRequest* req, + HttpResponse* resp, Closure* done); private: void HandleCreateSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleDeleteSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCancelSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotInfoAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCloneAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleRecoverAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleFlattenAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTasksAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCleanCloneTaskAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTaskListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneRefStatusAction(brpc::Controller* bcntl, - const std::string &requestId); - bool CheckBoolParamter( - const std::string *param, bool *valueOut); + const std::string& requestId); + bool CheckBoolParamter(const std::string* param, bool* valueOut); void SetErrorMessage(brpc::Controller* bcntl, int errCode, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); void HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); private: - // 快照转储服务管理对象 + // Snapshot Dump Service Management Object std::shared_ptr snapshotManager_; std::shared_ptr cloneManager_; }; diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..8ecd7036cd 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -28,11 +28,10 @@ namespace curve { namespace tool { std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { - uint64_t groupId = (static_cast(chunk.logicPoolId) << 32) | - chunk.copysetId; + uint64_t groupId = + (static_cast(chunk.logicPoolId) << 32) | chunk.copysetId; os << "logicalPoolId:" << chunk.logicPoolId - << ",copysetId:" << chunk.copysetId - << ",groupId:" << groupId + << ",copysetId:" << chunk.copysetId << ",groupId:" << groupId << ",chunkId:" << chunk.chunkId; return os; } @@ -40,8 +39,8 @@ std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { int ChunkServerClient::Init(const std::string& csAddr) { csAddr_ = csAddr; if (channel_.Init(csAddr.c_str(), nullptr) != 0) { - std::cout << "Init channel to chunkserver: " << csAddr - << " failed!" << std::endl; + std::cout << "Init channel to chunkserver: " << csAddr << " failed!" + << std::endl; return -1; } return 0; @@ -69,7 +68,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -97,9 +96,8 @@ bool ChunkServerClient::CheckChunkServerOnline() { return false; } -int ChunkServerClient::GetCopysetStatus( - const CopysetStatusRequest& request, - CopysetStatusResponse* response) { +int ChunkServerClient::GetCopysetStatus(const CopysetStatusRequest& request, + CopysetStatusResponse* response) { brpc::Controller cntl; curve::chunkserver::CopysetService_Stub stub(&channel_); uint64_t retryTimes = 0; @@ -112,17 +110,16 @@ int ChunkServerClient::GetCopysetStatus( continue; } if (response->status() != - COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response->status() << std::endl; + << ", errCode: " << response->status() << std::endl; return -1; } else { return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -151,15 +148,14 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response.status() << std::endl; + << ", errCode: " << response.status() << std::endl; return -1; } else { *chunkHash = response.hash(); return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..400755cb30 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -23,30 +23,30 @@ #ifndef SRC_TOOLS_CHUNKSERVER_CLIENT_H_ #define SRC_TOOLS_CHUNKSERVER_CLIENT_H_ -#include -#include #include +#include +#include -#include #include +#include #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/tools/curve_tool_define.h" +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::CopysetStatusRequest; using curve::chunkserver::CopysetStatusResponse; -using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::GetChunkHashRequest; using curve::chunkserver::GetChunkHashResponse; -using curve::chunkserver::CHUNK_OP_STATUS; namespace curve { namespace tool { struct Chunk { - Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) : - logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} + Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) + : logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} uint32_t logicPoolId; uint32_t copysetId; uint64_t chunkId; @@ -58,39 +58,43 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 - */ + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure + */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief: Call the RaftStat interface of Braft to obtain detailed + * information about the replication group, and place it in iobuf + * @param iobuf replication group details, valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false - */ + * @brief: Check if the chunkserver is online, only check the controller, + * not the response + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief calls the GetCopysetStatus interface of chunkserver + * @param request Query the request for the copyset + * @param response The response returned contains detailed information about + * the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the hash value of chunks from chunkserver + * @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..55505eccf0 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -26,9 +26,9 @@ #include #include +#include "src/fs/ext4_filesystem_impl.h" #include "src/tools/curve_meta_tool.h" #include "src/tools/raft_log_tool.h" -#include "src/fs/ext4_filesystem_impl.h" namespace curve { namespace tool { @@ -38,20 +38,21 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( - const std::string& command); + const std::string& command); + private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..cea600eb5f 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -24,8 +24,9 @@ #define SRC_TOOLS_COMMON_H_ #include -#include + #include +#include DECLARE_uint32(logicalPoolId); DECLARE_uint32(copysetId); @@ -34,9 +35,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/consistency_check.cpp b/src/tools/consistency_check.cpp index e3a84366ae..7cc1d50ed1 100644 --- a/src/tools/consistency_check.cpp +++ b/src/tools/consistency_check.cpp @@ -20,16 +20,18 @@ * Author: tongguangxun */ -#include - #include "src/tools/consistency_check.h" +#include + DEFINE_string(filename, "", "filename to check consistency"); -DEFINE_bool(check_hash, true, R"(用户需要先确认copyset的applyindex一致之后 - 再去查copyset内容是不是一致。通常需要先设置 - check_hash = false先检查copyset的applyindex是否一致 - 如果一致了再设置check_hash = true, - 检查copyset内容是不是一致)"); +DEFINE_bool( + check_hash, true, + R"(Users need to confirm whether the apply index of the copyset is consistent + before checking if the copyset content is consistent. Usually, you should first set + check_hash = false to initially verify if the apply index of the copyset is consistent. + Once confirmed, then set check_hash = true, + to check if the copyset content is consistent)"); DEFINE_uint32(chunkServerBasePort, 8200, "base port of chunkserver"); DECLARE_string(mdsAddr); @@ -48,8 +50,8 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { for (uint32_t i = 0; i < csAddrs.size(); ++i) { std::string ip; uint32_t port; - if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], - &ip, &port)) { + if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], &ip, + &port)) { uint32_t csSeq = port - FLAGS_chunkServerBasePort; ipVec.emplace_back(ip); seqVec.emplace_back(csSeq); @@ -75,12 +77,11 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { } ConsistencyCheck::ConsistencyCheck( - std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient) : - nameSpaceToolCore_(nameSpaceToolCore), - csClient_(csClient), - inited_(false) { -} + std::shared_ptr nameSpaceToolCore, + std::shared_ptr csClient) + : nameSpaceToolCore_(nameSpaceToolCore), + csClient_(csClient), + inited_(false) {} bool ConsistencyCheck::SupportCommand(const std::string& command) { return (command == kCheckConsistencyCmd); @@ -98,7 +99,7 @@ int ConsistencyCheck::Init() { return 0; } -int ConsistencyCheck::RunCommand(const std::string &cmd) { +int ConsistencyCheck::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init ConsistencyCheck failed" << std::endl; return -1; @@ -131,13 +132,15 @@ int ConsistencyCheck::CheckFileConsistency(const std::string& fileName, return 0; } -void ConsistencyCheck::PrintHelp(const std::string &cmd) { +void ConsistencyCheck::PrintHelp(const std::string& cmd) { if (!SupportCommand(cmd)) { std::cout << "Command not supported!" << std::endl; return; } std::cout << "Example: " << std::endl; - std::cout << "curve_ops_tool check-consistency -filename=/test [-check_hash=false]" << std::endl; // NOLINT + std::cout << "curve_ops_tool check-consistency -filename=/test " + "[-check_hash=false]" + << std::endl; // NOLINT } int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, @@ -160,14 +163,11 @@ int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, return 0; } -int ConsistencyCheck::CheckCopysetConsistency( - const CopySet copyset, - bool checkHash) { +int ConsistencyCheck::CheckCopysetConsistency(const CopySet copyset, + bool checkHash) { std::vector csLocs; int res = nameSpaceToolCore_->GetChunkServerListInCopySet( - copyset.first, - copyset.second, - &csLocs); + copyset.first, copyset.second, &csLocs); if (res != 0) { std::cout << "GetServerList info failed, exit consistency check!" << std::endl; @@ -180,9 +180,9 @@ int ConsistencyCheck::CheckCopysetConsistency( std::string csAddr = hostIp + ":" + std::to_string(port); csAddrs.emplace_back(csAddr); } - // 检查当前copyset的chunkserver内容是否一致 + // Check if the chunkserver content of the current copyset is consistent if (checkHash) { - // 先检查apply index是否一致 + // First, check if the application index is consistent res = CheckApplyIndex(copyset, csAddrs); if (res != 0) { std::cout << "Apply index not match when check hash!" << std::endl; @@ -195,17 +195,16 @@ int ConsistencyCheck::CheckCopysetConsistency( } int ConsistencyCheck::GetCopysetStatusResponse( - const std::string& csAddr, - const CopySet copyset, - CopysetStatusResponse* response) { + const std::string& csAddr, const CopySet copyset, + CopysetStatusResponse* response) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } CopysetStatusRequest request; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(csAddr); request.set_logicpoolid(copyset.first); request.set_copysetid(copyset.second); @@ -213,8 +212,8 @@ int ConsistencyCheck::GetCopysetStatusResponse( request.set_queryhash(false); res = csClient_->GetCopysetStatus(request, response); if (res != 0) { - std::cout << "GetCopysetStatus from " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopysetStatus from " << csAddr << " fail!" + << std::endl; return -1; } return 0; @@ -226,8 +225,7 @@ int ConsistencyCheck::CheckCopysetHash(const CopySet& copyset, Chunk chunk(copyset.first, copyset.second, chunkId); int res = CheckChunkHash(chunk, csAddrs); if (res != 0) { - std::cout << "{" << chunk - << "," << csAddrs << "}" << std::endl; + std::cout << "{" << chunk << "," << csAddrs << "}" << std::endl; return -1; } } @@ -242,8 +240,8 @@ int ConsistencyCheck::CheckChunkHash(const Chunk& chunk, for (const auto& csAddr : csAddrs) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } res = csClient_->GetChunkHash(chunk, &curHash); @@ -276,8 +274,8 @@ int ConsistencyCheck::CheckApplyIndex(const CopySet copyset, CopysetStatusResponse response; int res = GetCopysetStatusResponse(csAddr, copyset, &response); if (res != 0) { - std::cout << "GetCopysetStatusResponse from " << csAddr - << " fail" << std::endl; + std::cout << "GetCopysetStatusResponse from " << csAddr << " fail" + << std::endl; ret = -1; break; } diff --git a/src/tools/consistency_check.h b/src/tools/consistency_check.h index 12e12346b9..aad241306f 100644 --- a/src/tools/consistency_check.h +++ b/src/tools/consistency_check.h @@ -23,25 +23,25 @@ #ifndef SRC_TOOLS_CONSISTENCY_CHECK_H_ #define SRC_TOOLS_CONSISTENCY_CHECK_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include #include +#include #include -#include +#include #include "proto/copyset.pb.h" #include "src/common/net_common.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/chunkserver_client.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" DECLARE_string(filename); DECLARE_bool(check_hash); @@ -57,115 +57,118 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs); class ConsistencyCheck : public CurveTool { public: ConsistencyCheck(std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient); + std::shared_ptr csClient); ~ConsistencyCheck() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 检查三副本一致性 - * @param fileName 要检查一致性的文件名 - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 一致返回0,否则返回-1 + * @brief Check consistency of three replicas + * @param fileName The file name to check for consistency + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return consistently returns 0, otherwise returns -1 */ int CheckFileConsistency(const std::string& fileName, bool checkHash); /** - * @brief 检查copyset的三副本一致性 - * @param copysetId 要检查的copysetId - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 成功返回0,失败返回-1 + * @brief Check the consistency of the three copies of the copyset + * @param copysetId The copysetId to be checked + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return returns 0 for success, -1 for failure */ - int CheckCopysetConsistency(const CopySet copysetId, - bool checkHash); + int CheckCopysetConsistency(const CopySet copysetId, bool checkHash); /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(); /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 从mds获取文件所在的copyset列表 - * @param fileName 文件名 - * @param[out] copysetIds copysetId的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of copysets where the file is located from mds + * @param fileName File name + * @param[out] copysetIds The list copysetIds is valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ int FetchFileCopyset(const std::string& fileName, std::set* copysets); /** - * @brief 从chunkserver获取copyset的状态 - * @param csAddr chunkserver地址 - * @param copysetId 要获取的copysetId - * @param[out] response 返回的response - * @return 成功返回0,失败返回-1 + * @brief Get the status of copyset from chunkserver + * @param csAddr chunkserver address + * @param copysetId The copysetId to obtain + * @param[out] response The response returned + * @return returns 0 for success, -1 for failure */ int GetCopysetStatusResponse(const std::string& csAddr, const CopySet copyset, CopysetStatusResponse* response); /** - * @brief 检查copyset中指定chunk的hash的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the hash of the specified chunk in the + * copyset + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckCopysetHash(const CopySet& copyset, - const CsAddrsType& csAddrs); + int CheckCopysetHash(const CopySet& copyset, const CsAddrsType& csAddrs); /** - * @brief chunk在三个副本的hash的一致性 - * @param chunk 要检查的chunk - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Consistency of hash in three replicates of chunk + * @param chunk The chunk to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckChunkHash(const Chunk& chunk, - const CsAddrsType& csAddrs); + int CheckChunkHash(const Chunk& chunk, const CsAddrsType& csAddrs); /** - * @brief 检查副本间applyindex的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the applyindex between replicas + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckApplyIndex(const CopySet copyset, - const CsAddrsType& csAddrs); + int CheckApplyIndex(const CopySet copyset, const CsAddrsType& csAddrs); private: - // 文件所在的逻辑池id - PoolIdType lpid_; - // 用来与mds的nameservice接口交互 + // The logical pool ID where the file is located + PoolIdType lpid_; + // Used to interact with the nameservice interface of mds std::shared_ptr nameSpaceToolCore_; - // 向chunkserver发送RPC的client + // Client sending RPC to chunkserver std::shared_ptr csClient_; - // copyset中需要检查hash的chunk + // The chunk of the hash needs to be checked in the copyset std::map> chunksInCopyset_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check.cpp b/src/tools/copyset_check.cpp index 06341e5291..1d38b1d691 100644 --- a/src/tools/copyset_check.cpp +++ b/src/tools/copyset_check.cpp @@ -20,42 +20,44 @@ * Author: charisu */ #include "src/tools/copyset_check.h" + #include "src/tools/common.h" #include "src/tools/metric_name.h" DEFINE_bool(detail, false, "list the copyset detail or not"); DEFINE_uint32(chunkserverId, 0, "chunkserver id"); -DEFINE_string(chunkserverAddr, "", "if specified, chunkserverId is not required"); // NOLINT +DEFINE_string(chunkserverAddr, "", + "if specified, chunkserverId is not required"); // NOLINT DEFINE_uint32(serverId, 0, "server id"); DEFINE_string(serverIp, "", "server ip"); DEFINE_string(opName, curve::tool::kTotalOpName, "operator name"); DECLARE_string(mdsAddr); -DEFINE_uint64(opIntervalExceptLeader, 5, "Operator generation interval other " - "than transfer leader"); +DEFINE_uint64(opIntervalExceptLeader, 5, + "Operator generation interval other " + "than transfer leader"); DEFINE_uint64(leaderOpInterval, 30, - "tranfer leader operator generation interval"); + "tranfer leader operator generation interval"); namespace curve { namespace tool { -#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ - do { \ - if ((FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) == 0) { \ - std::cout << # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - if (!(FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) != 0) { \ - std::cout << "Only one of " # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - } while (0); \ +#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ + do { \ + if ((FLAGS_##flagname1).empty() && (FLAGS_##flagname2) == 0) { \ + std::cout << #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + if (!(FLAGS_##flagname1).empty() && (FLAGS_##flagname2) != 0) { \ + std::cout << "Only one of " #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + } while (0); bool CopysetCheck::SupportCommand(const std::string& command) { - return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd - || command == kCheckServerCmd || command == kCopysetsStatusCmd - || command == kCheckOperatorCmd - || command == kListMayBrokenVolumes); + return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd || + command == kCheckServerCmd || command == kCopysetsStatusCmd || + command == kCheckOperatorCmd || command == kListMayBrokenVolumes); } int CopysetCheck::Init() { @@ -76,7 +78,7 @@ int CopysetCheck::RunCommand(const std::string& command) { return -1; } if (command == kCheckCopysetCmd) { - // 检查某个copyset的状态 + // Check the status of a copyset if (FLAGS_logicalPoolId == 0 || FLAGS_copysetId == 0) { std::cout << "logicalPoolId AND copysetId should be specified!" << std::endl; @@ -84,7 +86,7 @@ int CopysetCheck::RunCommand(const std::string& command) { } return CheckCopyset(); } else if (command == kCheckChunnkServerCmd) { - // 检查某个chunkserver上的所有copyset + // Check all copysets on a certain chunkserver CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(chunkserverAddr, chunkserverId); return CheckChunkServer(); } else if (command == kCheckServerCmd) { @@ -159,8 +161,8 @@ int CopysetCheck::CheckServer() { if (FLAGS_detail) { PrintDetail(); std::ostream_iterator out(std::cout, ", "); - std::cout << "unhealthy chunkserver list (total: " - << unhealthyCs.size() <<"): {"; + std::cout << "unhealthy chunkserver list (total: " << unhealthyCs.size() + << "): {"; std::copy(unhealthyCs.begin(), unhealthyCs.end(), out); std::cout << "}" << std::endl; } @@ -188,11 +190,10 @@ int CopysetCheck::CheckOperator(const std::string& opName) { } else { res = core_->CheckOperator(opName, FLAGS_opIntervalExceptLeader); } - if (res < 0) { + if (res < 0) { std::cout << "Check operator fail!" << std::endl; } else { - std::cout << "Operator num is " - << res << std::endl; + std::cout << "Operator num is " << res << std::endl; res = 0; } return res; @@ -202,27 +203,33 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Example: " << std::endl << std::endl; if (command == kCheckCopysetCmd) { std::cout << "curve_ops_tool check-copyset -logicalPoolId=2 " - << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckChunnkServerCmd) { - std::cout << "curve_ops_tool check-chunkserver " + std::cout + << "curve_ops_tool check-chunkserver " << "-chunkserverId=1 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " << "[-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool check-chunkserver " - << "[-mdsAddr=127.0.0.1:6666] " - << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] " + << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckServerCmd) { std::cout << "curve_ops_tool check-server -serverId=1 " - << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT std::cout << "curve_ops_tool check-server [-mdsAddr=127.0.0.1:6666] " - << "[-serverIp=127.0.0.1] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-serverIp=127.0.0.1] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (command == kCopysetsStatusCmd) { std::cout << "curve_ops_tool copysets-status [-mdsAddr=127.0.0.1:6666] " << "[-margin=1000] [-operatorMaxPeriod=30] [-checkOperator] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckOperatorCmd) { std::cout << "curve_ops_tool check-operator -opName=" << kTotalOpName << "/" << kChangeOpName << "/" << kAddOpName << "/" @@ -233,26 +240,32 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Command not supported!" << std::endl; } std::cout << std::endl; - std::cout << "Standard of healthy is no copyset in the following state:" << std::endl; // NOLINT + std::cout << "Standard of healthy is no copyset in the following state:" + << std::endl; // NOLINT std::cout << "1、copyset has no leader" << std::endl; std::cout << "2、number of replicas less than expected" << std::endl; std::cout << "3、some replicas not online" << std::endl; std::cout << "4、installing snapshot" << std::endl; std::cout << "5、gap of log index between peers exceed margin" << std::endl; - std::cout << "6、for check-cluster, it will also check whether the mds is scheduling if -checkOperator specified" // NOLINT - "(if no operators in operatorMaxPeriod, it considered healthy)" << std::endl; // NOLINT - std::cout << "By default, if the number of replicas is less than 3, it is considered unhealthy, " // NOLINT - "you can change it by specify -replicasNum" << std::endl; - std::cout << "The order is sorted by priority, if the former is satisfied, the rest will not be checked" << std::endl; // NOLINT + std::cout << "6、for check-cluster, it will also check whether the mds is " + "scheduling if -checkOperator specified" // NOLINT + "(if no operators in operatorMaxPeriod, it considered healthy)" + << std::endl; // NOLINT + std::cout << "By default, if the number of replicas is less than 3, it is " + "considered unhealthy, " // NOLINT + "you can change it by specify -replicasNum" + << std::endl; + std::cout << "The order is sorted by priority, if the former is satisfied, " + "the rest will not be checked" + << std::endl; // NOLINT } - void CopysetCheck::PrintStatistic() { const auto& statistics = core_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; } void CopysetCheck::PrintDetail() { @@ -282,7 +295,7 @@ void CopysetCheck::PrintDetail() { PrintCopySet(item.second); } std::cout << std::endl; - // 打印有问题的chunkserver + // Printing problematic chunkservers PrintExcepChunkservers(); } @@ -300,32 +313,30 @@ void CopysetCheck::PrintCopySet(const std::set& set) { } PoolIdType lgId = GetPoolID(groupId); CopySetIdType csId = GetCopysetID(groupId); - std::cout << "(grouId: " << gid << ", logicalPoolId: " - << std::to_string(lgId) << ", copysetId: " - << std::to_string(csId) << ")"; + std::cout << "(grouId: " << gid + << ", logicalPoolId: " << std::to_string(lgId) + << ", copysetId: " << std::to_string(csId) << ")"; } std::cout << "}" << std::endl; } void CopysetCheck::PrintExcepChunkservers() { - auto serviceExceptionChunkServers = - core_->GetServiceExceptionChunkServer(); + auto serviceExceptionChunkServers = core_->GetServiceExceptionChunkServer(); if (!serviceExceptionChunkServers.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "service-exception chunkservers (total: " << serviceExceptionChunkServers.size() << "): {"; std::copy(serviceExceptionChunkServers.begin(), - serviceExceptionChunkServers.end(), out); + serviceExceptionChunkServers.end(), out); std::cout << "}" << std::endl; } - auto copysetLoadExceptionCS = - core_->GetCopysetLoadExceptionChunkServer(); + auto copysetLoadExceptionCS = core_->GetCopysetLoadExceptionChunkServer(); if (!copysetLoadExceptionCS.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "copyset-load-exception chunkservers (total: " << copysetLoadExceptionCS.size() << "): {"; - std::copy(copysetLoadExceptionCS.begin(), - copysetLoadExceptionCS.end(), out); + std::copy(copysetLoadExceptionCS.begin(), copysetLoadExceptionCS.end(), + out); std::cout << "}" << std::endl; } } diff --git a/src/tools/copyset_check.h b/src/tools/copyset_check.h index b4fa76c28f..54d5e46d36 100644 --- a/src/tools/copyset_check.h +++ b/src/tools/copyset_check.h @@ -25,23 +25,23 @@ #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include +#include #include "src/mds/common/mds_define.h" #include "src/tools/copyset_check_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::PoolIdType; using curve::mds::topology::ServerIdType; namespace curve { @@ -49,94 +49,101 @@ namespace tool { class CopysetCheck : public CurveTool { public: - explicit CopysetCheck(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit CopysetCheck(std::shared_ptr core) + : core_(core), inited_(false) {} ~CopysetCheck() = default; /** - * @brief 根据flag检查复制组健康状态 - * 复制组健康的标准,没有任何副本处于以下状态,下面的顺序按优先级排序, - * 即满足上面一条,就不会检查下面一条 - * 1、leader为空(复制组的信息以leader处的为准,没有leader无法检查) - * 2、配置中的副本数量不足 - * 3、有副本不在线 - * 4、有副本在安装快照 - * 5、副本间log index差距太大 - * 6、对于集群来说,还要判断一下chunkserver上的copyset数量和leader数量是否均衡, - * 避免后续会有调度使得集群不稳定 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 - * @return 成功返回0,失败返回-1 + * @brief Check the health status of the replication group based on the flag + * The standard for replication group health is that no replica is in the + * following state. The following order is sorted by priority, If the above + * one is met, the following one will not be checked + * 1. The leader is empty (the information of the replication group is + * based on the leader, and cannot be checked without a leader) + * 2. Insufficient number of replicas in the configuration + * 3. Some replicas are not online + * 4. There is a replica in the installation snapshot + * 5. The log index difference between replicas is too large + * 6. For a cluster, it is also necessary to determine whether the number + * of copysets and the number of leaders on the chunkserver are balanced, + * Avoid scheduling that may cause instability in the cluster in the + * future + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 + * @brief Print help information + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true / false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 检查单个copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check a single copyset + * @return Health returns 0, otherwise returns -1 */ int CheckCopyset(); /** - * @brief 检查chunkserver上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on chunkserver + * @return Health returns 0, otherwise returns -1 */ int CheckChunkServer(); /** - * @brief 检查server上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on the server + * @return Health returns 0, otherwise returns -1 */ int CheckServer(); /** - * @brief 检查集群所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets in the cluster + * @return Health returns 0, otherwise returns -1 */ int CheckCopysetsInCluster(); /** - * @brief 检查mds端的operator - * @return 无operator返回0,其他情况返回-1 + * @brief Check the operator on the mds side + * @return returns 0 without an operator, otherwise returns -1 */ int CheckOperator(const std::string& opName); - // 打印copyset检查的详细结果 + // Print detailed results of copyset check void PrintDetail(); void PrintCopySet(const std::set& set); - // 打印检查的结果,一共多少copyset,有多少不健康 + // Print the results of the inspection, how many copies are there in total, + // and how many are unhealthy void PrintStatistic(); - // 打印有问题的chunkserver列表 + // Print a list of problematic chunkservers void PrintExcepChunkservers(); - // 打印大多数不在线的副本上面的卷 + // Print the volume on most offline copies int PrintMayBrokenVolumes(); private: - // 检查copyset的核心逻辑 + // Check the core logic of copyset std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index f32a7a923d..8a7a3165d9 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -20,15 +20,19 @@ * Author: charisu */ #include "src/tools/copyset_check_core.h" + #include + #include DEFINE_uint64(margin, 1000, "The threshold of the gap between peers"); DEFINE_uint64(replicasNum, 3, "the number of replicas that required"); -DEFINE_uint64(operatorMaxPeriod, 30, "max period of operator generating, " - "if no operators in a period, it considered to be healthy"); -DEFINE_bool(checkOperator, false, "if true, the operator number of " - "mds will be considered"); +DEFINE_uint64(operatorMaxPeriod, 30, + "max period of operator generating, " + "if no operators in a period, it considered to be healthy"); +DEFINE_bool(checkOperator, false, + "if true, the operator number of " + "mds will be considered"); namespace curve { namespace tool { @@ -38,24 +42,22 @@ int CopysetCheckCore::Init(const std::string& mdsAddr) { } CopysetStatistics::CopysetStatistics(uint64_t total, uint64_t unhealthy) - : totalNum(total), unhealthyNum(unhealthy) { + : totalNum(total), unhealthyNum(unhealthy) { if (total != 0) { - unhealthyRatio = - static_cast(unhealthyNum) / totalNum; + unhealthyRatio = static_cast(unhealthyNum) / totalNum; } else { unhealthyRatio = 0; } } CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId) { + const CopySetIdType& copysetId) { Clear(); std::vector chunkserverLocation; - int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, &chunkserverLocation); + int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + &chunkserverLocation); if (res != 0) { - std::cout << "GetChunkServerListInCopySet from mds fail!" - << std::endl; + std::cout << "GetChunkServerListInCopySet from mds fail!" << std::endl; return CheckResult::kOtherErr; } int majority = chunkserverLocation.size() / 2 + 1; @@ -69,7 +71,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; ++offlinePeers; @@ -92,7 +94,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } } else { if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { checkRes = CheckResult::kOtherErr; } } @@ -106,20 +108,20 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId) { + const ChunkServerIdType& chunkserverId) { Clear(); return CheckCopysetsOnChunkServer(chunkserverId, ""); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr) { + const std::string& chunkserverAddr) { Clear(); return CheckCopysetsOnChunkServer(0, chunkserverAddr); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId, - const std::string& chunkserverAddr) { + const ChunkServerIdType& chunkserverId, + const std::string& chunkserverAddr) { curve::mds::topology::ChunkServerInfo csInfo; int res = 0; if (chunkserverId > 0) { @@ -131,7 +133,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::cout << "GetChunkServerInfo from mds fail!" << std::endl; return -1; } - // 如果chunkserver retired的话不发送请求 + // If chunkserver is redirected, do not send the request if (csInfo.status() == ChunkServerStatus::RETIRED) { std::cout << "ChunkServer is retired!" << std::endl; return 0; @@ -139,7 +141,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::string hostIp = csInfo.hostip(); uint64_t port = csInfo.port(); std::string csAddr = hostIp + ":" + std::to_string(port); - // 向chunkserver发送RPC请求获取raft state + // Send RPC request to chunkserver to obtain raft state ChunkServerHealthStatus csStatus = CheckCopysetsOnChunkServer(csAddr, {}); if (csStatus == ChunkServerHealthStatus::kHealthy) { return 0; @@ -149,11 +151,8 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( } ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader, - std::pair *record, - bool queryCs) { + const std::string& chunkserverAddr, const std::set& groupIds, + bool queryLeader, std::pair* record, bool queryCs) { bool isHealthy = true; int res = 0; butil::IOBuf iobuf; @@ -165,33 +164,38 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } if (res != 0) { - // 如果查询chunkserver失败,认为不在线,把它上面所有的 - // copyset都添加到peerNotOnlineCopysets_里面 + // If querying the chunkserver fails, consider it offline and add all + // its copysets to the peerNotOnlineCopysets_. UpdatePeerNotOnlineCopysets(chunkserverAddr); serviceExceptionChunkServers_.emplace(chunkserverAddr); chunkserverCopysets_[chunkserverAddr] = {}; return ChunkServerHealthStatus::kNotOnline; } - // 存储每一个copyset的详细信息 + // Store detailed information for each copyset CopySetInfosType copysetInfos; ParseResponseAttachment(groupIds, &iobuf, ©setInfos); - // 只有查询全部chunkserver的时候才更新chunkServer上的copyset列表 + // Only update the copyset list on chunkServer when querying all + // chunkservers if (groupIds.empty()) { UpdateChunkServerCopysets(chunkserverAddr, copysetInfos); } - // 对应的chunkserver上没有要找的leader的copyset,可能已经迁移出去了, - // 但是follower这边还没更新,这种情况也认为chunkserver不健康 + // There is no copyset for the leader you are looking for on the + // corresponding chunkserver, it may have already been migrated, But the + // follower has not been updated yet, and this situation also suggests that + // chunkserver is unhealthy if (copysetInfos.empty() || - (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { + (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { std::cout << "Some copysets not found on chunkserver, may be tranfered" << std::endl; return ChunkServerHealthStatus::kNotHealthy; } - // 存储需要发送消息的chunkserver的地址和对应的groupId - // key是chunkserver地址,value是groupId的列表 + // Store the address and corresponding groupId of the chunkserver that needs + // to send messages Key is the chunkserver address, and value is a list of + // groupIds std::map> csAddrMap; - // 存储没有leader的copyset对应的peers,key为groupId,value为配置 + // Store the peers corresponding to the copyset without a leader, with key + // as groupId and value as configuration std::map> noLeaderCopysetsPeers; for (auto& copysetInfo : copysetInfos) { std::string groupId = copysetInfo[kGroupId]; @@ -228,17 +232,17 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( break; } } else if (state == kStateFollower) { - // 如果没有leader,检查是否是大多数不在线 - // 是的话标记为大多数不在线,否则标记为No leader + // If there is no leader, check if most are offline + // If yes, mark it as mostly offline, otherwise mark it as No leader if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { std::vector peers; curve::common::SplitString(copysetInfo[kPeers], " ", &peers); noLeaderCopysetsPeers[groupId] = peers; continue; } if (queryLeader) { - // 向leader发送rpc请求 + // Send an rpc request to the leader auto pos = copysetInfo[kLeader].rfind(":"); auto csAddr = copysetInfo[kLeader].substr(0, pos); csAddrMap[csAddr].emplace(groupId); @@ -247,25 +251,25 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( copysets_[kNoLeader].emplace(groupId); isHealthy = false; } else { - // 其他情况有ERROR,UNINITIALIZED,SHUTTING和SHUTDOWN,这种都认为不健康,统计到 - // copyset里面 + // In other cases such as ERROR, UNINITIALIZED, SHUTTING, and + // SHUTDOWN, they are considered unhealthy and are counted within + // the copyset. std::string key = "state " + copysetInfo[kState]; copysets_[key].emplace(groupId); isHealthy = false; } } - // 遍历没有leader的copyset - bool health = CheckCopysetsNoLeader(chunkserverAddr, - noLeaderCopysetsPeers); + // Traverse copysets without leaders + bool health = CheckCopysetsNoLeader(chunkserverAddr, noLeaderCopysetsPeers); if (!health) { isHealthy = false; } - // 遍历chunkserver发送请求 + // Traverse chunkserver to send requests for (const auto& item : csAddrMap) { - ChunkServerHealthStatus res = CheckCopysetsOnChunkServer(item.first, - item.second); + ChunkServerHealthStatus res = + CheckCopysetsOnChunkServer(item.first, item.second); if (res != ChunkServerHealthStatus::kHealthy) { isHealthy = false; } @@ -277,10 +281,9 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } } -bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers) { +bool CopysetCheckCore::CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers) { if (copysetsPeers.empty()) { return true; } @@ -296,13 +299,12 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return false; } for (const auto& item : result) { - // 如果在配置组中,检查是否是majority offline + // If in the configuration group, check if it is a majority offline if (item.second) { isHealthy = false; std::string groupId = item.first; - CheckResult checkRes = CheckPeerOnlineStatus( - groupId, - copysetsPeers.at(item.first)); + CheckResult checkRes = + CheckPeerOnlineStatus(groupId, copysetsPeers.at(item.first)); if (checkRes == CheckResult::kMajorityPeerNotOnline) { copysets_[kMajorityPeerNotOnline].emplace(groupId); continue; @@ -313,9 +315,9 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return isHealthy; } -int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, - const std::set copysets, - std::map* result) { +int CopysetCheckCore::CheckIfChunkServerInCopysets( + const std::string& csAddr, const std::set copysets, + std::map* result) { PoolIdType logicPoolId; std::vector copysetIds; for (const auto& gId : copysets) { @@ -330,8 +332,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, } std::vector csServerInfos; - int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, - copysetIds, &csServerInfos); + int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, copysetIds, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail!" << std::endl; return res; @@ -340,8 +342,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, CopySetIdType copysetId = info.copysetid(); std::string groupId = ToGroupId(logicPoolId, copysetId); for (const auto& csLoc : info.cslocs()) { - std::string addr = csLoc.hostip() + ":" - + std::to_string(csLoc.port()); + std::string addr = + csLoc.hostip() + ":" + std::to_string(csLoc.port()); if (addr == csAddr) { (*result)[groupId] = true; break; @@ -351,22 +353,23 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, return 0; } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(serverId, "", true, unhealthyChunkServers); } -int CopysetCheckCore::CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(0, serverIp, true, unhealthyChunkServers); } void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, std::map> *result) { + const std::vector& chunkservers, uint32_t* index, + std::map>* result) { while (1) { indexMutex.lock(); if (*index + 1 > chunkservers.size()) { @@ -386,11 +389,11 @@ void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( } } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, bool queryLeader, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, bool queryLeader, + std::vector* unhealthyChunkServers) { bool isHealthy = true; - // 向mds发送RPC + // Send RPC to mds int res = 0; std::vector chunkservers; if (serverId > 0) { @@ -406,16 +409,15 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, std::map> queryCsResult; uint32_t index = 0; for (uint64_t i = 0; i < FLAGS_rpcConcurrentNum; i++) { - threadpool.emplace_back(Thread( - &CopysetCheckCore::ConcurrentCheckCopysetsOnServer, - this, std::ref(chunkservers), &index, - &queryCsResult)); + threadpool.emplace_back( + Thread(&CopysetCheckCore::ConcurrentCheckCopysetsOnServer, this, + std::ref(chunkservers), &index, &queryCsResult)); } - for (auto &thread : threadpool) { + for (auto& thread : threadpool) { thread.join(); } - for (auto &record : queryCsResult) { + for (auto& record : queryCsResult) { std::string chunkserverAddr = record.first; auto res = CheckCopysetsOnChunkServer(chunkserverAddr, {}, queryLeader, &record.second, false); @@ -429,7 +431,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, if (isHealthy) { return 0; - } else { + } else { return -1; } } @@ -450,18 +452,21 @@ int CopysetCheckCore::CheckCopysetsInCluster() { isHealthy = false; } } - // 检查从chunkserver上获取的copyset数量与mds记录的数量是否一致 + // Check if the number of copysets obtained from chunkserver matches the + // number of mds records res = CheckCopysetsWithMds(); if (res != 0) { std::cout << "CheckCopysetNumWithMds fail!" << std::endl; return -1; } - // 如果不健康,直接返回,如果健康,还需要对operator作出判断 + // If not healthy, return directly. If healthy, make a judgment on the + // operator if (!isHealthy) { return -1; } - // 默认不检查operator,在测试脚本之类的要求比较严格的地方才检查operator,不然 - // 每次执行命令等待30秒很不方便 + // By default, operators are not checked, and only checked in areas with + // strict requirements such as test scripts, otherwise waiting for 30 + // seconds each time executing a command is inconvenient if (FLAGS_checkOperator) { int res = CheckOperator(kTotalOpName, FLAGS_operatorMaxPeriod); if (res != 0) { @@ -482,21 +487,22 @@ int CopysetCheckCore::CheckCopysetsWithMds() { if (copysetsInMds.size() != copysets_[kTotal].size()) { std::cout << "Copyset numbers in chunkservers not consistent" " with mds, please check! copysets on chunkserver: " - << copysets_[kTotal].size() << ", copysets in mds: " - << copysetsInMds.size() << std::endl; + << copysets_[kTotal].size() + << ", copysets in mds: " << copysetsInMds.size() << std::endl; return -1; } std::set copysetsInMdsGid; for (const auto& copyset : copysetsInMds) { - std::string gId = ToGroupId(copyset.logicalpoolid(), - copyset.copysetid()); + std::string gId = + ToGroupId(copyset.logicalpoolid(), copyset.copysetid()); copysetsInMdsGid.insert(gId); } int ret = 0; std::vector copysetsInMdsNotInCs(10); - auto iter = std::set_difference(copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsNotInCs.begin()); + auto iter = + std::set_difference(copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsNotInCs.begin()); copysetsInMdsNotInCs.resize(iter - copysetsInMdsNotInCs.begin()); if (!copysetsInMdsNotInCs.empty()) { std::cout << "There are " << copysetsInMdsNotInCs.size() @@ -508,9 +514,10 @@ int CopysetCheckCore::CheckCopysetsWithMds() { ret = -1; } std::vector copysetsInCsNotInMds(10); - iter = std::set_difference(copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysetsInCsNotInMds.begin()); + iter = + std::set_difference(copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysetsInCsNotInMds.begin()); copysetsInCsNotInMds.resize(iter - copysetsInCsNotInMds.begin()); if (!copysetsInCsNotInMds.empty()) { std::cout << "There are " << copysetsInCsNotInMds.size() @@ -542,8 +549,8 @@ int CopysetCheckCore::CheckScanStatus( continue; } - auto groupId = ToGroupId(copysetInfo.logicalpoolid(), - copysetInfo.copysetid()); + auto groupId = + ToGroupId(copysetInfo.logicalpoolid(), copysetInfo.copysetid()); copysets_[kThreeCopiesInconsistent].emplace(groupId); count++; } @@ -565,37 +572,41 @@ int CopysetCheckCore::CheckOperator(const std::string& opName, if (opNum != 0) { return opNum; } - if (curve::common::TimeUtility::GetTimeofDaySec() - - startTime >= checkTimeSec) { + if (curve::common::TimeUtility::GetTimeofDaySec() - startTime >= + checkTimeSec) { break; } sleep(1); - } while (curve::common::TimeUtility::GetTimeofDaySec() - - startTime < checkTimeSec); + } while (curve::common::TimeUtility::GetTimeofDaySec() - startTime < + checkTimeSec); return 0; } -// 每个copyset的信息都会存储在一个map里面,map的key有 -// groupId: 复制组的groupId -// peer_id: 10.182.26.45:8210:0格式的peer id -// state: 节点的状态,LEADER,FOLLOWER,CANDIDATE等等 -// peers: 配置组里的成员,通过空格分隔 -// last_log_id: 最后一个log entry的index -// leader: state为LEADER时才存在这个key,指向复制组leader +// Information for each copyset is stored in a map. The map's keys include: +// - groupId: The groupId of the replication group. +// - peer_id: The peer id in the format 10.182.26.45:8210:0. +// - state: The node's state, which can be LEADER, FOLLOWER, CANDIDATE, etc. +// - peers: Members in the configuration group, separated by spaces. +// - last_log_id: The index of the last log entry. +// - leader: This key exists only when the state is LEADER and points to the +// leader of the replication group. // -// replicator_1: 第一个follower的复制状态,value如下: -// next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 ic=0 -// next_index为下一个要发送给该follower的index -// flying_append_entries_size是发出去还未确认的entry的数量 -// idle表明没有在安装快照,如果在安装快照的话是installing snapshot {12, 3}, -// 1234和3分别是快照包含的最后一个log entry的index和term -// hc,ac,ic分别是发向follower的heartbeat,append entry, -// 和install snapshot的rpc的数量 +// replicator_1: The replication status of the first follower, with values as +// follows: next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 +// ic=0 +// - next_index: The next index to be sent to this follower. +// - flying_append_entries_size: The number of unconfirmed entries that have +// been sent. +// - idle: Indicates whether there is no snapshot installation. If a +// snapshot is being installed, it will show as "installing snapshot {12, +// 3}", +// where 1234 and 3 are the last log entry's index and term included in +// the snapshot. +// - hc, ac, ic: The counts of RPCs sent to the follower for heartbeat, +// append entry, and install snapshot, respectively. void CopysetCheckCore::ParseResponseAttachment( - const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr) { + const std::set& gIds, butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, bool saveIobufStr) { butil::IOBuf copyset; iobuf->append("\r\n"); while (iobuf->cut_until(©set, "\r\n\r\n") == 0) { @@ -629,7 +640,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } } - // 找到了copyset + // Found copyset auto pos = line.npos; if (line.find(kReplicator) != line.npos) { pos = line.rfind(":"); @@ -640,7 +651,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } std::string key = line.substr(0, pos); - // 如果是replicator,把key简化一下 + // If it's a replicator, simplify the key if (key.find(kReplicator) != key.npos) { key = kReplicator + std::to_string(i); ++i; @@ -660,10 +671,11 @@ void CopysetCheckCore::ParseResponseAttachment( } int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, - butil::IOBuf* iobuf) { + butil::IOBuf* iobuf) { // unit test will set csClient_ to mock - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -673,8 +685,7 @@ int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, } void CopysetCheckCore::UpdateChunkServerCopysets( - const std::string& csAddr, - const CopySetInfosType& copysetInfos) { + const std::string& csAddr, const CopySetInfosType& copysetInfos) { std::set copysetIds; for (const auto& copyset : copysetInfos) { copysetIds.emplace(copyset.at(kGroupId)); @@ -682,11 +693,12 @@ void CopysetCheckCore::UpdateChunkServerCopysets( chunkserverCopysets_[csAddr] = copysetIds; } -// 通过发送RPC检查chunkserver是否在线 +// Check if chunkserver is online by sending RPC bool CopysetCheckCore::CheckChunkServerOnline( - const std::string& chunkserverAddr) { - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + const std::string& chunkserverAddr) { + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -718,7 +730,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; return false; @@ -727,7 +739,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, ParseResponseAttachment({}, &iobuf, ©setInfos); UpdateChunkServerCopysets(csAddr, copysetInfos); bool online = (chunkserverCopysets_[csAddr].find(groupId) != - chunkserverCopysets_[csAddr].end()); + chunkserverCopysets_[csAddr].end()); if (!online) { copysetLoacExceptionChunkServers_.emplace(csAddr); } @@ -735,8 +747,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, } CheckResult CopysetCheckCore::CheckPeerOnlineStatus( - const std::string& groupId, - const std::vector& peers) { + const std::string& groupId, const std::vector& peers) { int notOnlineNum = 0; for (const auto& peer : peers) { auto pos = peer.rfind(":"); @@ -762,20 +773,20 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( } CheckResult CopysetCheckCore::CheckHealthOnLeader( - std::map* map) { - // 先判断peers是否小于3 + std::map* map) { + // First, determine if the peers are less than 3 std::vector peers; curve::common::SplitString((*map)[kPeers], " ", &peers); if (peers.size() < FLAGS_replicasNum) { return CheckResult::kPeersNoSufficient; } std::string groupId = (*map)[kGroupId]; - // 检查不在线peer的数量 + // Check the number of offline peers CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes != CheckResult::kHealthy) { return checkRes; } - // 根据replicator的情况判断log index之间的差距 + // Judging the gap between log indices based on the replicator's situation uint64_t lastLogId; std::string str = (*map)[kStorage]; auto pos1 = str.find("="); @@ -785,7 +796,7 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( return CheckResult::kParseError; } bool res = curve::common::StringToUll(str.substr(pos1 + 1, pos2 - pos1 - 1), - &lastLogId); + &lastLogId); if (!res) { std::cout << "parse last log id from string fail!" << std::endl; return CheckResult::kParseError; @@ -805,16 +816,15 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( } } if (info.substr(0, pos) == kNextIndex) { - res = curve::common::StringToUll( - info.substr(pos + 1), &nextIndex); + res = curve::common::StringToUll(info.substr(pos + 1), + &nextIndex); if (!res) { std::cout << "parse next index fail!" << std::endl; return CheckResult::kParseError; } } if (info.substr(0, pos) == "flying_append_entries_size") { - res = curve::common::StringToUll(info.substr(pos + 1), - &flying); + res = curve::common::StringToUll(info.substr(pos + 1), &flying); if (!res) { std::cout << "parse flying_size fail!" << std::endl; return CheckResult::kParseError; @@ -835,8 +845,8 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { std::vector copysets; int res = mdsClient_->GetCopySetsInChunkServer(csAddr, ©sets); if (res != 0) { - std::cout << "GetCopySetsInChunkServer " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopySetsInChunkServer " << csAddr << " fail!" + << std::endl; return; } else if (copysets.empty()) { std::cout << "No copysets on chunkserver " << csAddr << std::endl; @@ -849,26 +859,24 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { copysetIds.emplace_back(csInfo.copysetid()); } - // 获取每个copyset的成员 + // Get the members of each copyset std::vector csServerInfos; - res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, - copysetIds, + res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, copysetIds, &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return; } - // 遍历每个copyset + // Traverse each copyset for (const auto& info : csServerInfos) { std::vector peers; for (const auto& csLoc : info.cslocs()) { - std::string peer = csLoc.hostip() + ":" - + std::to_string(csLoc.port()) + ":0"; + std::string peer = + csLoc.hostip() + ":" + std::to_string(csLoc.port()) + ":0"; peers.emplace_back(peer); } CopySetIdType copysetId = info.copysetid(); - std::string groupId = ToGroupId(logicalPoolId, - copysetId); + std::string groupId = ToGroupId(logicalPoolId, copysetId); CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes == CheckResult::kMinorityPeerNotOnline) { copysets_[kMinorityPeerNotOnline].emplace(groupId); @@ -889,9 +897,8 @@ CopysetStatistics CopysetCheckCore::GetCopysetStatistics() { if (item.first == kTotal) { total = item.second.size(); } else { - // 求并集 - unhealthyCopysets.insert(item.second.begin(), - item.second.end()); + // Union + unhealthyCopysets.insert(item.second.begin(), item.second.end()); } } uint64_t unhealthyNum = unhealthyCopysets.size(); @@ -907,7 +914,7 @@ void CopysetCheckCore::Clear() { } int CopysetCheckCore::ListMayBrokenVolumes( - std::vector* fileNames) { + std::vector* fileNames) { int res = CheckCopysetsOnOfflineChunkServer(); if (res != 0) { std::cout << "CheckCopysetsOnOfflineChunkServer fail" << std::endl; @@ -928,10 +935,10 @@ int CopysetCheckCore::ListMayBrokenVolumes( } void CopysetCheckCore::GetCopysetInfos(const char* key, - std::vector* copysets) { + std::vector* copysets) { (void)key; for (auto iter = copysets_[kMajorityPeerNotOnline].begin(); - iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { + iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { std::string gid = *iter; uint64_t groupId; if (!curve::common::StringToUll(gid, &groupId)) { diff --git a/src/tools/copyset_check_core.h b/src/tools/copyset_check_core.h index 6e93a373c7..157ddf2458 100644 --- a/src/tools/copyset_check_core.h +++ b/src/tools/copyset_check_core.h @@ -25,38 +25,38 @@ #include #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include #include +#include +#include "include/chunkserver/chunkserver_common.h" #include "proto/topology.pb.h" -#include "src/mds/common/mds_define.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/string_util.h" -#include "src/tools/mds_client.h" +#include "src/mds/common/mds_define.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/metric_name.h" #include "src/tools/curve_tool_define.h" -#include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/concurrent.h" +#include "src/tools/mds_client.h" +#include "src/tools/metric_name.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; -using curve::mds::topology::ChunkServerIdType; -using curve::mds::topology::ServerIdType; -using curve::mds::topology::kTopoErrCodeSuccess; -using curve::mds::topology::OnlineState; -using curve::mds::topology::ChunkServerStatus; -using curve::chunkserver::ToGroupId; -using curve::chunkserver::GetPoolID; using curve::chunkserver::GetCopysetID; +using curve::chunkserver::GetPoolID; +using curve::chunkserver::ToGroupId; using curve::common::Mutex; using curve::common::Thread; +using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::topology::OnlineState; +using curve::mds::topology::PoolIdType; +using curve::mds::topology::ServerIdType; namespace curve { namespace tool { @@ -65,32 +65,31 @@ using CopySet = std::pair; using CopySetInfosType = std::vector>; enum class CheckResult { - // copyset健康 + // copyset Health kHealthy = 0, - // 解析结果失败 + // Parsing result failed kParseError = -1, - // peer数量小于预期 - kPeersNoSufficient = -2, - // 副本间的index差距太大 + // The number of peers is less than expected + kPeersNoSufficient = -2, + // The index difference between replicas is too large kLogIndexGapTooBig = -3, - // 有副本在安装快照 + // There is a replica installing the snapshot kInstallingSnapshot = -4, - // 少数副本不在线 + // A few instances are not online kMinorityPeerNotOnline = -5, - // 大多数副本不在线 + // Most replicas are not online kMajorityPeerNotOnline = -6, kOtherErr = -7 }; enum class ChunkServerHealthStatus { - kHealthy = 0, // chunkserver上所有copyset健康 - kNotHealthy = -1, // chunkserver上有copyset不健康 - kNotOnline = -2 // chunkserver不在线 + kHealthy = 0, // All copysets on chunkserver are healthy + kNotHealthy = -1, // Copyset on chunkserver is unhealthy + kNotOnline = -2 // Chunkserver is not online }; struct CopysetStatistics { - CopysetStatistics() : - totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} + CopysetStatistics() : totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} CopysetStatistics(uint64_t total, uint64_t unhealthy); uint64_t totalNum; uint64_t unhealthyNum; @@ -109,102 +108,108 @@ const char kThreeCopiesInconsistent[] = "Three copies inconsistent"; class CopysetCheckCore { public: CopysetCheckCore(std::shared_ptr mdsClient, - std::shared_ptr csClient = nullptr) : - mdsClient_(mdsClient), csClient_(csClient) {} + std::shared_ptr csClient = nullptr) + : mdsClient_(mdsClient), csClient_(csClient) {} virtual ~CopysetCheckCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief check health of one copyset - * - * @param logicalPoolId - * @param copysetId - * - * @return error code - */ + * @brief check health of one copyset + * + * @param logicalPoolId + * @param copysetId + * + * @return error code + */ virtual CheckResult CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId); + const CopySetIdType& copysetId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId); + const ChunkServerIdType& chunkserverId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserAddr chunkserver地址 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserAddr chunkserver address + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer(const std::string& chunkserverAddr); /** - * @brief Check copysets on offline chunkservers - */ + * @brief Check copysets on offline chunkservers + */ virtual int CheckCopysetsOnOfflineChunkServer(); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的ip - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId IP of server + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查集群中所有copyset的健康状态 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets in the cluster + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsInCluster(); /** - * @brief 检查集群中的operator - * @param opName operator的名字 - * @param checkTimeSec 检查时间 - * @return 检查正常返回0,检查失败或存在operator返回-1 - */ - virtual int CheckOperator(const std::string& opName, - uint64_t checkTimeSec); + * @brief Check the operators in the cluster + * @param opName The name of the operator + * @param checkTimeSec check time + * @return returns 0 if the check is normal, or -1 if the check fails or + * there is an operator present + */ + virtual int CheckOperator(const std::string& opName, uint64_t checkTimeSec); /** - * @brief 计算不健康的copyset的比例,检查后调用 - * @return 不健康的copyset的比例 + * @brief Calculate the proportion of unhealthy copysets, check and call + * @return The proportion of unhealthy copysets */ virtual CopysetStatistics GetCopysetStatistics(); /** - * @brief 获取copyset的列表,通常检查后会调用,然后打印出来 - * @return copyset的列表 + * @brief to obtain a list of copysets, usually called after checking, and + * then printed out + * @return List of copysets */ virtual const std::map>& GetCopysetsRes() - const { + const { return copysets_; } @@ -212,112 +217,119 @@ class CopysetCheckCore { * @brief Get copysets info for specified copysets */ virtual void GetCopysetInfos(const char* key, - std::vector* copysets); + std::vector* copysets); /** - * @brief 获取copyset的详细信息 - * @return copyset的详细信息 + * @brief Get detailed information about copyset + * @return Details of copyset */ virtual const std::string& GetCopysetDetail() const { return copysetsDetail_; } /** - * @brief 获取检查过程中服务异常的chunkserver列表,通常检查后会调用,然后打印出来 - * @return 服务异常的chunkserver的列表 + * @brief: Obtain a list of chunkservers with service exceptions during the + * inspection process, which is usually called after the inspection and + * printed out + * @return List of chunkservers with service exceptions */ virtual const std::set& GetServiceExceptionChunkServer() - const { + const { return serviceExceptionChunkServers_; } /** - * @brief 获取检查过程中copyset寻找失败的chunkserver列表,通常检查后会调用,然后打印出来 - * @return copyset加载异常的chunkserver的列表 + * @brief: Obtain the list of failed chunkservers for copyset during the + * check process, which is usually called after the check and printed out + * @return List of chunkservers with copyset loading exceptions */ virtual const std::set& GetCopysetLoadExceptionChunkServer() - const { + const { return copysetLoacExceptionChunkServers_; } /** - * @brief 通过发送RPC检查chunkserver是否在线 - * - * @param chunkserverAddr chunkserver的地址 - * - * @return 在线返回true,不在线返回false - */ + * @brief Check if chunkserver is online by sending RPC + * + * @param chunkserverAddr Address of chunkserver + * + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(const std::string& chunkserverAddr); /** - * @brief List volumes on majority peers offline copysets - * - * @param fileNames affected volumes - * - * @return return 0 when sucess, otherwise return -1 - */ + * @brief List volumes on majority peers offline copysets + * + * @param fileNames affected volumes + * + * @return return 0 when sucess, otherwise return -1 + */ virtual int ListMayBrokenVolumes(std::vector* fileNames); private: /** - * @brief 从iobuf分析出指定groupId的复制组的信息, - * 每个复制组的信息都放到一个map里面 - * - * @param gIds 要查询的复制组的groupId,为空的话全部查询 - * @param iobuf 要分析的iobuf - * @param[out] maps copyset信息的列表,每个copyset的信息都是一个map - * @param saveIobufStr 是否要把iobuf里的详细内容存下来 - * - */ + * @brief Analyze the replication group information for the specified + * groupId from iobuf, Each replication group's information is placed in a + * map + * + * @param gIds: The groupId of the replication group to be queried. If it is + * empty, all queries will be performed + * @param iobuf The iobuf to analyze + * @param[out] maps A list of copyset information, where each copyset's + * information is a map + * @param saveIobufStr Do you want to save the detailed content in iobuf + * + */ void ParseResponseAttachment(const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr = false); + butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, + bool saveIobufStr = false); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * @param chunkserverAddr chunkserver的地址,两者指定一个就好 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * @param chunkserverAddr chunkserver address, just specify one of the two + * + * @return Health returns 0, unhealthy returns -1 + */ int CheckCopysetsOnChunkServer(const ChunkServerIdType& chunkserverId, const std::string& chunkserverAddr); /** - * @brief check copysets' healthy status on chunkserver - * - * @param[in] chunkserAddr: chunkserver address - * @param[in] groupIds: groupId for check, default is null, check all the copysets - * @param[in] queryLeader: whether send rpc to chunkserver which copyset leader on. - * All the chunkserves will be check when check clusters status. - * @param[in] record: raft state rpc response from chunkserver - * @param[in] queryCs: whether send rpc to chunkserver - * - * @return error code - */ + * @brief check copysets' healthy status on chunkserver + * + * @param[in] chunkserAddr: chunkserver address + * @param[in] groupIds: groupId for check, default is null, check all the + * copysets + * @param[in] queryLeader: whether send rpc to chunkserver which copyset + * leader on. All the chunkserves will be check when check clusters status. + * @param[in] record: raft state rpc response from chunkserver + * @param[in] queryCs: whether send rpc to chunkserver + * + * @return error code + */ ChunkServerHealthStatus CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader = true, - std::pair *record = nullptr, - bool queryCs = true); + const std::string& chunkserverAddr, + const std::set& groupIds, bool queryLeader = true, + std::pair* record = nullptr, bool queryCs = true); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param serverIp server的ip,serverId或serverIp指定一个就好 - * @param queryLeader 是否向leader所在的server发送RPC查询, - * 对于检查cluster来说,所有server都会遍历到,不用查询 - * - * @return 健康返回0,不健康返回-1 - */ - int CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, - bool queryLeader = true, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param serverIp Just specify one of the server's IP, serverId, or + * serverIp + * @param queryLeader Does the send RPC queries to the server where the + * leader is located, For checking the cluster, all servers will be + * traversed without querying + * + * @return Health returns 0, unhealthy returns -1 + */ + int CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, + bool queryLeader = true, + std::vector* unhealthyChunkServers = nullptr); /** * @brief concurrent check copyset on server @@ -326,126 +338,130 @@ class CopysetCheckCore { * @param[in] result: rpc response from chunkserver */ void ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, - std::map> *result); + const std::vector& chunkservers, uint32_t* index, + std::map>* result); /** - * @brief 根据leader的map里面的copyset信息分析出copyset是否健康,健康返回0,否则 - * 否则返回错误码 - * - * @param map leader的copyset信息,以键值对的方式存储 - * - * @return 返回错误码 - */ + * @brief: Analyze whether the copyset is healthy based on the copyset + * information in the leader's map, and return 0 if it is healthy. Otherwise + * Otherwise, an error code will be returned + * + * @param map The copyset information of the leader is stored as key value + * pairs + * + * @return returns an error code + */ CheckResult CheckHealthOnLeader(std::map* map); /** - * @brief 向chunkserver发起raft state rpc - * - * @param chunkserverAddr chunkserver的地址 - * @param[out] iobuf 返回的responseattachment,返回0的时候有效 - * - * @return 成功返回0,失败返回-1 - */ + * @brief Initiate raft state rpc to chunkserver + * + * @param chunkserverAddr Address of chunkserver + * @param[out] iobuf The responseattachment returned by is valid when 0 is + * returned + * + * @return returns 0 for success, -1 for failure + */ int QueryChunkServer(const std::string& chunkserverAddr, butil::IOBuf* iobuf); /** - * @brief 把chunkserver上所有的copyset更新到peerNotOnline里面 - * - * @param csAddr chunkserver的地址 - * - * @return 无 - */ + * @brief: Update all copysets on chunkserver to peerNotOnline + * + * @param csAddr chunkserver Address of + * + * @return None + */ void UpdatePeerNotOnlineCopysets(const std::string& csAddr); /** - * @brief 以mds中的copyset配置组为参照,检查chunkserver是否在copyset的配置组中 - * - * @param csAddr chunkserver的地址 - * @param copysets copyset列表 - * @param[out] result 检查结果,copyset到存在与否的映射 - * - * @return 包含返回true,否则返回false - */ + * @brief: Using the copyset configuration group in mds as a reference, + * check if chunkserver is in the copyset configuration group + * + * @param csAddr Address of chunkserver + * @param copysets copyset list + * @param[out] result check result, copyset mapping to presence or absence + * + * @return returns true, otherwise returns false + */ int CheckIfChunkServerInCopysets(const std::string& csAddr, const std::set copysets, std::map* result); /** - * @brief 检查没有leader的copyset是否健康 - * - * @param csAddr chunkserver 地址 - * @param copysetsPeers copyset的groupId到peers的映射 - * - * @return 健康返回true,不健康返回false - */ - bool CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers); + * @brief Check if the copyset without a leader is healthy + * + * @param csAddr chunkserver address + * @param copysetsPeers copyset's groupId to Peers mapping + * + * @return returns true if healthy, false if unhealthy + */ + bool CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers); /** - * @brief 清空统计信息 - * - * @return 无 - */ + * @brief Clear Statistics + * + * @return None + */ void Clear(); /** - * @brief 获取chunkserver上的copyset的在线状态 - * - * @param csAddr chunkserver地址 - * @param groupId copyset的groupId - * - * @return 在线返回true - */ + * @brief: Obtain the online status of the copyset on chunkserver + * + * @param csAddr chunkserver address + * @param groupId copyset's groupId + * + * @return returns true online + */ bool CheckCopySetOnline(const std::string& csAddr, const std::string& groupId); /** - * @brief 获取不在线的peer的数量 - * - * - * @param peers 副本peer的列表ip:port:id的形式 - * - * @return 返回错误码 - */ + * @brief: Obtain the number of offline peers + * + * + * @param peers The list of replica peers in the form of ip:port:id + * + * @return returns an error code + */ CheckResult CheckPeerOnlineStatus(const std::string& groupId, const std::vector& peers); /** - * @brief 更新chunkserver上的copyset的groupId列表 - * - * @param csAddr chunkserver地址 - * @param copysetInfos copyset信息列表 - */ + * @brief Update the groupId list of copyset on chunkserver + * + * @param csAddr chunkserver address + * @param copysetInfos copyset information list + */ void UpdateChunkServerCopysets(const std::string& csAddr, - const CopySetInfosType& copysetInfos); + const CopySetInfosType& copysetInfos); int CheckCopysetsWithMds(); int CheckScanStatus(const std::vector& copysetInfos); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; // for unittest mock csClient std::shared_ptr csClient_; - // 保存copyset的信息 + // Save information for copyset std::map> copysets_; - // 用来保存发送RPC失败的那些chunkserver + // Used to save the chunkservers that failed to send RPC std::set serviceExceptionChunkServers_; - // 用来保存一些copyset加载有问题的chunkserver + // Used to save some copysets and load problematic chunkservers std::set copysetLoacExceptionChunkServers_; - // 用来存放访问过的chunkserver上的copyset列表,避免重复RPC + // Used to store the copyset list on accessed chunkservers to avoid + // duplicate RPCs std::map> chunkserverCopysets_; - // 查询单个copyset的时候,保存复制组的详细信息 + // When querying a single copyset, save the detailed information of the + // replication group std::string copysetsDetail_; const std::string kEmptyAddr = "0.0.0.0:0:0"; diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 0dc5dcf46e..60bb516b86 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -20,23 +20,21 @@ * Author: wudemiao */ -#include #include "src/tools/curve_cli.h" + +#include + #include "src/tools/common.h" -DEFINE_int32(timeout_ms, - -1, "Timeout (in milliseconds) of the operation"); -DEFINE_int32(max_retry, - 3, "Max retry times of each operation"); -DEFINE_string(conf, - "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", +DEFINE_int32(timeout_ms, -1, "Timeout (in milliseconds) of the operation"); +DEFINE_int32(max_retry, 3, "Max retry times of each operation"); +DEFINE_string(conf, "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", "Initial configuration of the replication group"); -DEFINE_string(peer, - "", "Id of the operating peer"); -DEFINE_string(new_conf, - "", "new conf to reset peer"); -DEFINE_bool(remove_copyset, false, "Whether need to remove broken copyset " - "after remove peer (default: false)"); +DEFINE_string(peer, "", "Id of the operating peer"); +DEFINE_string(new_conf, "", "new conf to reset peer"); +DEFINE_bool(remove_copyset, false, + "Whether need to remove broken copyset " + "after remove peer (default: false)"); DEFINE_bool(affirm, true, "If true, command line interactive affirmation is required." @@ -45,26 +43,22 @@ DECLARE_string(mdsAddr); namespace curve { namespace tool { -#define CHECK_FLAG(flagname) \ - do { \ - if ((FLAGS_ ## flagname).empty()) { \ - std::cout << __FUNCTION__ << " requires --" # flagname \ - << std::endl; \ - return -1; \ - } \ - } while (0); \ - +#define CHECK_FLAG(flagname) \ + do { \ + if ((FLAGS_##flagname).empty()) { \ + std::cout << __FUNCTION__ << " requires --" #flagname \ + << std::endl; \ + return -1; \ + } \ + } while (0); bool CurveCli::SupportCommand(const std::string& command) { - return (command == kResetPeerCmd || command == kRemovePeerCmd - || command == kTransferLeaderCmd - || command == kDoSnapshot - || command == kDoSnapshotAll); + return (command == kResetPeerCmd || command == kRemovePeerCmd || + command == kTransferLeaderCmd || command == kDoSnapshot || + command == kDoSnapshotAll); } -int CurveCli::Init() { - return mdsClient_->Init(FLAGS_mdsAddr); -} +int CurveCli::Init() { return mdsClient_->Init(FLAGS_mdsAddr); } butil::Status CurveCli::DeleteBrokenCopyset(braft::PeerId peerId, const LogicPoolID& poolId, @@ -121,13 +115,13 @@ int CurveCli::RemovePeer() { } // STEP 1: remove peer - butil::Status status = curve::chunkserver::RemovePeer( - poolId, copysetId, conf, peer, opt); + butil::Status status = + curve::chunkserver::RemovePeer(poolId, copysetId, conf, peer, opt); auto succ = status.ok(); - std::cout << "Remove peer " << peerId << " for copyset(" - << poolId << ", " << copysetId << ") " - << (succ ? "success" : "fail") << ", original conf: " << conf - << ", status: " << status << std::endl; + std::cout << "Remove peer " << peerId << " for copyset(" << poolId << ", " + << copysetId << ") " << (succ ? "success" : "fail") + << ", original conf: " << conf << ", status: " << status + << std::endl; if (!succ || !FLAGS_remove_copyset) { return succ ? 0 : -1; @@ -138,8 +132,8 @@ int CurveCli::RemovePeer() { succ = status.ok(); std::cout << "Delete copyset(" << poolId << ", " << copysetId << ")" << " in " << peerId << (succ ? "success" : "fail") - << ", original conf: " << conf - << ", status: " << status << std::endl; + << ", original conf: " << conf << ", status: " << status + << std::endl; return succ ? 0 : -1; } @@ -164,25 +158,19 @@ int CurveCli::TransferLeader() { opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::TransferLeader( - FLAGS_logicalPoolId, - FLAGS_copysetId, - conf, - targetPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, conf, targetPeer, opt); if (!st.ok()) { std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " fail, original conf: " << conf + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" + << " to " << targetPeerId << " fail, original conf: " << conf << ", detail: " << st << std::endl; return -1; } std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " success, original conf: " << conf << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << targetPeerId << " success, original conf: " << conf + << std::endl; return 0; } @@ -217,13 +205,14 @@ int CurveCli::ResetPeer() { } curve::common::Peer requestPeer; requestPeer.set_address(requestPeerId.to_string()); - // 目前reset peer只支持reset为1一个副本,不支持增加副本, - // 因为不能通过工具在chunkserver上创建copyset + // Currently, reset peer only supports resetting to 1 replica and does not + // support adding replicas, Because it is not possible to create a copyset + // on chunkserver through tools if (newConf.size() != 1) { std::cout << "New conf can only specify one peer!" << std::endl; return -1; } - // 新的配置必须包含发送RPC的peer + // The new configuration must include a peer that sends RPC if (*newConf.begin() != requestPeerId) { std::cout << "New conf must include the target peer!" << std::endl; return -1; @@ -233,25 +222,20 @@ int CurveCli::ResetPeer() { opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::ResetPeer( - FLAGS_logicalPoolId, - FLAGS_copysetId, - newConf, - requestPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, newConf, requestPeer, opt); if (!st.ok()) { std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " to " << newConf << " fail, requestPeer: " << requestPeerId << ", detail: " << st << std::endl; return -1; } std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << newConf - << " success, requestPeer: " << requestPeerId << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << newConf << " success, requestPeer: " << requestPeerId + << std::endl; return 0; } @@ -274,15 +258,12 @@ int CurveCli::DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - butil::Status st = curve::chunkserver::Snapshot( - FLAGS_logicalPoolId, - FLAGS_copysetId, - peer, - opt); + butil::Status st = curve::chunkserver::Snapshot(FLAGS_logicalPoolId, + FLAGS_copysetId, peer, opt); if (!st.ok()) { std::cout << "Do snapshot of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " fail, requestPeer: " << peer.address() << ", detail: " << st << std::endl; return -1; @@ -301,8 +282,8 @@ int CurveCli::DoSnapshotAll() { braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - std::string csAddr = chunkserver.hostip() + ":" + - std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); curve::common::Peer peer; peer.set_address(csAddr); butil::Status st = curve::chunkserver::SnapshotAll(peer, opt); @@ -315,17 +296,27 @@ int CurveCli::DoSnapshotAll() { return res; } -void CurveCli::PrintHelp(const std::string &cmd) { +void CurveCli::PrintHelp(const std::string& cmd) { std::cout << "Example " << std::endl; if (cmd == kResetPeerCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" + << std::endl; // NOLINT } else if (cmd == kRemovePeerCmd || cmd == kTransferLeaderCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 -max_retry=3 -timeout_ms=100 -remove_copyset=true/false" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 " + "-max_retry=3 -timeout_ms=100 -remove_copyset=true/false" + << std::endl; // NOLINT } else if (cmd == kDoSnapshot) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-max_retry=3 -timeout_ms=100" << std::endl; + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-max_retry=3 -timeout_ms=100" + << std::endl; } else if (cmd == kDoSnapshotAll) { std::cout << "curve_ops_tool " << cmd << std::endl; } else { @@ -333,7 +324,7 @@ void CurveCli::PrintHelp(const std::string &cmd) { } } -int CurveCli::RunCommand(const std::string &cmd) { +int CurveCli::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init CurveCli tool failed" << std::endl; return -1; @@ -358,4 +349,3 @@ int CurveCli::RunCommand(const std::string &cmd) { } } // namespace tool } // namespace curve - diff --git a/src/tools/curve_cli.h b/src/tools/curve_cli.h index 24a4944cee..7267262893 100644 --- a/src/tools/curve_cli.h +++ b/src/tools/curve_cli.h @@ -23,64 +23,65 @@ #ifndef SRC_TOOLS_CURVE_CLI_H_ #define SRC_TOOLS_CURVE_CLI_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" #include "src/tools/mds_client.h" -#include "proto/copyset.pb.h" namespace curve { namespace tool { -using ::curve::chunkserver::LogicPoolID; using ::curve::chunkserver::CopysetID; using ::curve::chunkserver::CopysetRequest; using ::curve::chunkserver::CopysetResponse; using ::curve::chunkserver::CopysetService_Stub; +using ::curve::chunkserver::LogicPoolID; +using ::curve::chunkserver::COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS; -using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT class CurveCli : public CurveTool { public: - explicit CurveCli(std::shared_ptr mdsClient) : - mdsClient_(mdsClient) {} + explicit CurveCli(std::shared_ptr mdsClient) + : mdsClient_(mdsClient) {} /** - * @brief 初始化mds client - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @return returns 0 for success, -1 for failure */ int Init(); /** - * @brief 打印help信息 - * @param 无 - * @return 无 + * @brief Print help information + * @param None + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); @@ -97,47 +98,48 @@ class CurveCli : public CurveTool { const CopysetID& copysetId); /** - * @brief 删除peer - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief delete peer + * @param None + * @return returns 0 for success, -1 for failure */ int RemovePeer(); /** - * @brief 转移leader - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief transfer leader + * @param None + * @return returns 0 for success, -1 for failure */ int TransferLeader(); /** - * @brief 触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshot(); /** - * @brief 触发打快照 - * @param lgPoolId 逻辑池id - * @param copysetId 复制组id - * @param peer 复制组成员 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param lgPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param peer replication group members + * @return returns 0 for success, -1 for failure */ int DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer); /** - * @brief 给集群中全部copyset node触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Trigger a snapshot of all copyset nodes in the cluster + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshotAll(); /** - * @brief 重置配置组成员,目前只支持reset成一个成员 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Reset configuration group members, currently only supports + * resetting to one member + * @param None + * @return returns 0 for success, -1 for failure */ int ResetPeer(); diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 08aa1f62ed..d5f30d9b7b 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -20,45 +20,41 @@ * Author: tongguangxun */ -#include +#include #include +#include #include -#include - -#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include +#include // NOLINT #include -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" -#include "src/common/crc32.h" +#include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/bitmap.h" +#include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "src/common/fast_align.h" - -#include "include/chunkserver/chunkserver_common.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using ::curve::common::align_up; using ::curve::common::is_aligned; /** - * chunkfile pool预分配工具,提供两种分配方式 - * 1. 以磁盘空间百分比方式,指定需要分配的百分比 - * 2. 指定以chunk数量分配 - * 默认的分配方式是以磁盘空间百分比作为分配方式,可以通过-allocateByPercent=false/true - * 调整分配方式。 + * chunkfile pool pre allocation tool, providing two allocation methods + * 1. Specify the percentage to be allocated as a percentage of disk space + * 2. Specify allocation by chunk quantity + * The default allocation method is based on the percentage of disk space, which + * can be achieved by -allocateByPercent=false/true Adjust the allocation + * method. */ -DEFINE_bool(allocateByPercent, - true, +DEFINE_bool(allocateByPercent, true, "allocate filePool by percent of disk size or by chunk num!"); -DEFINE_uint32(fileSize, - 16 * 1024 * 1024, - "chunk size"); +DEFINE_uint32(fileSize, 16 * 1024 * 1024, "chunk size"); DEFINE_uint32(blockSize, 4096, "minimum io alignment supported"); @@ -69,41 +65,34 @@ static bool ValidateBlockSize(const char* /*name*/, uint32_t blockSize) { DEFINE_validator(blockSize, &ValidateBlockSize); -DEFINE_string(fileSystemPath, - "./", - "chunkserver disk path"); +DEFINE_string(fileSystemPath, "./", "chunkserver disk path"); -DEFINE_string(filePoolDir, - "./filePool/", - "chunkfile pool dir"); +DEFINE_string(filePoolDir, "./filePool/", "chunkfile pool dir"); -DEFINE_string(filePoolMetaPath, - "./filePool.meta", +DEFINE_string(filePoolMetaPath, "./filePool.meta", "chunkfile pool meta info file path."); -// preallocateNum仅在测试的时候使用,测试提前预分配固定数量的chunk -// 当设置这个值的时候可以不用设置allocatepercent -DEFINE_uint32(preAllocateNum, - 0, +// preallocateNum is only used during testing, and a fixed number of chunks are +// pre allocated in advance during testing When setting this value, there is no +// need to set allocatepercent +DEFINE_uint32(preAllocateNum, 0, "preallocate chunk nums, this is JUST for curve test"); -// 在系统初始化的时候,管理员需要预先格式化磁盘,并进行预分配 -// 这时候只需要指定allocatepercent,allocatepercent是占整个盘的空间的百分比 -DEFINE_uint32(allocatePercent, - 80, - "preallocate storage percent of total disk"); +// During system initialization, the administrator needs to pre format the disk +// and pre allocate it At this point, only allocate percentage needs to be +// specified, which is the percentage of the entire disk space occupied by +// allocate percentage +DEFINE_uint32(allocatePercent, 80, "preallocate storage percent of total disk"); -// 测试情况下置为false,加快测试速度 -DEFINE_bool(needWriteZero, - true, - "not write zero for test."); +// Set to false during testing to accelerate testing speed +DEFINE_bool(needWriteZero, true, "not write zero for test."); -using curve::fs::FileSystemType; -using curve::fs::LocalFsFactory; +using curve::chunkserver::FilePoolMeta; +using curve::common::kFilePoolMagic; using curve::fs::FileSystemInfo; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::common::kFilePoolMagic; -using curve::chunkserver::FilePoolMeta; +using curve::fs::LocalFsFactory; class CompareInternal { public: @@ -128,7 +117,7 @@ struct AllocateStruct { static int AllocateFiles(AllocateStruct* allocatestruct) { const size_t actualFileSize = allocatestruct->actualFileSize; - char* data = new(std::nothrow)char[actualFileSize]; + char* data = new (std::nothrow) char[actualFileSize]; memset(data, 0, actualFileSize); uint64_t count = 0; @@ -137,14 +126,13 @@ static int AllocateFiles(AllocateStruct* allocatestruct) { { std::unique_lock lk(*allocatestruct->mtx); allocatestruct->allocateChunknum->fetch_add(1); - filename = std::to_string( - allocatestruct->allocateChunknum->load()); + filename = std::to_string(allocatestruct->allocateChunknum->load()); } - std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" - + filename + allocatestruct->cleanChunkSuffix; + std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" + filename + + allocatestruct->cleanChunkSuffix; - int ret = allocatestruct->fsptr->Open(tmpchunkfilepath, - O_RDWR | O_CREAT); + int ret = + allocatestruct->fsptr->Open(tmpchunkfilepath, O_RDWR | O_CREAT); if (ret < 0) { *allocatestruct->checkwrong = true; LOG(ERROR) << "file open failed, " << tmpchunkfilepath; @@ -205,12 +193,12 @@ static bool CanBitmapFitInMetaPage() { constexpr size_t kMaximumBitmapBytes = 1024; auto bitmapBytes = - FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; + FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; LOG(INFO) << "bitmap bytes is " << bitmapBytes; return bitmapBytes <= kMaximumBitmapBytes; } -// TODO(tongguangxun) :添加单元测试 +// TODO(tongguangxun): Adding unit tests int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); @@ -247,7 +235,9 @@ int main(int argc, char** argv) { } tmpChunkSet_.insert(tmpvec.begin(), tmpvec.end()); - uint64_t size = tmpChunkSet_.size() ? atoi((*(--tmpChunkSet_.end())).c_str()) : 0; // NOLINT + uint64_t size = tmpChunkSet_.size() + ? atoi((*(--tmpChunkSet_.end())).c_str()) + : 0; // NOLINT allocateChunknum_.store(size + 1); FileSystemInfo finfo; @@ -278,7 +268,7 @@ int main(int argc, char** argv) { bool checkwrong = false; // two threads concurrent, can reach the bandwidth of disk. - uint64_t threadAllocateNum = preAllocateChunkNum/2; + uint64_t threadAllocateNum = preAllocateChunkNum / 2; std::vector thvec; AllocateStruct allocateStruct; allocateStruct.fsptr = fsptr; @@ -316,7 +306,7 @@ int main(int argc, char** argv) { return -1; } - // 读取meta文件,检查是否写入正确 + // Read the meta file and check if it is written correctly FilePoolMeta recordMeta; ret = curve::chunkserver::FilePoolHelper::DecodeMetaInfoFromMetaFile( fsptr, FLAGS_filePoolMetaPath, 4096, &recordMeta); @@ -345,8 +335,8 @@ int main(int argc, char** argv) { if (recordMeta.filePoolPath != FLAGS_filePoolDir) { LOG(ERROR) << "meta info persistency failed!" - << ", read chunkpath = " << recordMeta.filePoolPath - << ", real chunkpath = " << FLAGS_filePoolDir; + << ", read chunkpath = " << recordMeta.filePoolPath + << ", real chunkpath = " << FLAGS_filePoolDir; break; } diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index 5d9da78ec0..6a4bd0af6f 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -40,8 +40,7 @@ std::ostream& operator<<(std::ostream& os, const vector& ranges) { } uint64_t startOff = ranges[i].beginIndex * FLAGS_pageSize; uint64_t endOff = (ranges[i].endIndex + 1) * FLAGS_pageSize; - os << "[" << startOff << "," - << endOff << ")"; + os << "[" << startOff << "," << endOff << ")"; } return os; } @@ -105,26 +104,24 @@ int CurveMetaTool::RunCommand(const std::string& cmd) { } } - - int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { - // 打开chunk文件 - int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY|O_NOATIME); + // Open chunk file + int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << chunkFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << chunkFileName << ", " << berror() + << std::endl; return -1; } - // 读取chunk头部 + // Read chunk header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << chunkFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << chunkFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << ", " << berror() << std::endl; @@ -138,29 +135,29 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { - // 打开快照文件 - int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY|O_NOATIME); + // Open snapshot file + int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << snapFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << snapFileName << ", " << berror() + << std::endl; return -1; } - // 读取快照文件头部 + // Read snapshot file header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << snapFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << snapFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << std::endl; @@ -174,7 +171,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } diff --git a/src/tools/curve_meta_tool.h b/src/tools/curve_meta_tool.h index fe2b040c58..2125679022 100644 --- a/src/tools/curve_meta_tool.h +++ b/src/tools/curve_meta_tool.h @@ -24,24 +24,26 @@ #define SRC_TOOLS_CURVE_META_TOOL_H_ #include + #include #include #include #include + +#include "src/chunkserver/datastore/chunkserver_chunkfile.h" #include "src/common/bitmap.h" #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -#include "src/chunkserver/datastore/chunkserver_chunkfile.h" namespace curve { namespace tool { -using curve::common::BitRange; -using curve::fs::LocalFileSystem; using curve::chunkserver::ChunkFileMetaPage; -using curve::chunkserver::SnapshotMetaPage; using curve::chunkserver::CSErrorCode; +using curve::chunkserver::SnapshotMetaPage; +using curve::common::BitRange; +using curve::fs::LocalFileSystem; std::ostream& operator<<(std::ostream& os, const vector& ranges); std::ostream& operator<<(std::ostream& os, const ChunkFileMetaPage& metaPage); @@ -49,40 +51,40 @@ std::ostream& operator<<(std::ostream& os, const SnapshotMetaPage& metaPage); class CurveMetaTool : public CurveTool { public: - explicit CurveMetaTool(std::shared_ptr localFs) : - localFS_(localFs) {} + explicit CurveMetaTool(std::shared_ptr localFs) + : localFS_(localFs) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印chunk文件元数据 - * @param chunkFileName chunk文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print chunk file metadata + * @param chunkFileName The file name of the chunk file + * @return successfully returns 0, otherwise returns -1 */ int PrintChunkMeta(const std::string& chunkFileName); /** - * @brief 打印快照文件元数据 - * @param snapFileName 快照文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print snapshot file metadata + * @param snapFileName The file name of the snapshot file + * @return successfully returns 0, otherwise returns -1 */ int PrintSnapshotMeta(const std::string& snapFileName); diff --git a/src/tools/curve_tool_define.h b/src/tools/curve_tool_define.h index a392b807bd..2ea50ebd5a 100644 --- a/src/tools/curve_tool_define.h +++ b/src/tools/curve_tool_define.h @@ -24,6 +24,7 @@ #define SRC_TOOLS_CURVE_TOOL_DEFINE_H_ #include + #include DECLARE_string(mdsAddr); @@ -40,10 +41,10 @@ DECLARE_string(password); namespace curve { namespace tool { -// 显示版本命令 +// Display Version Command const char kVersionCmd[] = "version"; -// StatusTool相关命令 +// StatusTool related commands const char kStatusCmd[] = "status"; const char kSpaceCmd[] = "space"; const char kChunkserverStatusCmd[] = "chunkserver-status"; @@ -58,7 +59,7 @@ const char kSnapshotCloneStatusCmd[] = "snapshot-clone-status"; const char kClusterStatusCmd[] = "cluster-status"; const char kScanStatusCmd[] = "scan-status"; -// NameSpaceTool相关命令 +// NameSpaceTool related commands const char kGetCmd[] = "get"; const char kListCmd[] = "list"; const char kSegInfoCmd[] = "seginfo"; @@ -70,7 +71,7 @@ const char kChunkLocatitonCmd[] = "chunk-location"; const char kUpdateThrottle[] = "update-throttle"; const char kListPoolsets[] = "list-poolsets"; -// CopysetCheck相关命令 +// CopysetCheck related commands const char kCheckCopysetCmd[] = "check-copyset"; const char kCheckChunnkServerCmd[] = "check-chunkserver"; const char kCheckServerCmd[] = "check-server"; @@ -78,13 +79,13 @@ const char kCopysetsStatusCmd[] = "copysets-status"; const char kCheckOperatorCmd[] = "check-operator"; const char kListMayBrokenVolumes[] = "list-may-broken-vol"; -// CopysetTool相关命令 +// CopysetTool related commands const char kSetCopysetAvailFlag[] = "set-copyset-availflag"; -// 一致性检查命令 +// Consistency check command const char kCheckConsistencyCmd[] = "check-consistency"; -// 配置变更命令 +// Configuration change command const char kRemovePeerCmd[] = "remove-peer"; const char kTransferLeaderCmd[] = "transfer-leader"; const char kResetPeerCmd[] = "reset-peer"; @@ -95,18 +96,18 @@ const char kDoSnapshotAll[] = "do-snapshot-all"; const char kRapidLeaderSchedule[] = "rapid-leader-schedule"; const char kSetScanState[] = "set-scan-state"; -// curve文件meta相关的命令 +// Meta related commands for curve files const char kChunkMeta[] = "chunk-meta"; const char kSnapshotMeta[] = "snapshot-meta"; -// raft log相关命令 +// raft log related commands const char kRaftLogMeta[] = "raft-log-meta"; const char kOffline[] = "offline"; const char kVars[] = "/vars/"; const char kConfValue[] = "conf_value"; -// raft state 相关常量 +// raft state related constants const char kState[] = "state"; const char kStateLeader[] = "LEADER"; const char kStateFollower[] = "FOLLOWER"; diff --git a/src/tools/curve_tool_factory.h b/src/tools/curve_tool_factory.h index dc48778713..a863bce5fb 100644 --- a/src/tools/curve_tool_factory.h +++ b/src/tools/curve_tool_factory.h @@ -23,18 +23,18 @@ #ifndef SRC_TOOLS_CURVE_TOOL_FACTORY_H_ #define SRC_TOOLS_CURVE_TOOL_FACTORY_H_ -#include #include #include +#include -#include "src/tools/curve_tool.h" -#include "src/tools/status_tool.h" -#include "src/tools/namespace_tool.h" #include "src/tools/consistency_check.h" -#include "src/tools/curve_cli.h" #include "src/tools/copyset_check.h" -#include "src/tools/schedule_tool.h" #include "src/tools/copyset_tool.h" +#include "src/tools/curve_cli.h" +#include "src/tools/curve_tool.h" +#include "src/tools/namespace_tool.h" +#include "src/tools/schedule_tool.h" +#include "src/tools/status_tool.h" namespace curve { namespace tool { @@ -42,41 +42,41 @@ namespace tool { class CurveToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateCurveTool( - const std::string& command); + const std::string& command); private: /** - * @brief 获取StatusTool实例 + * @brief Get StatusTool instance */ static std::shared_ptr GenerateStatusTool(); /** - * @brief 获取NameSpaceTool实例 + * @brief Get NameSpaceTool instance */ static std::shared_ptr GenerateNameSpaceTool(); /** - * @brief 获取ConsistencyCheck实例 + * @brief Get ConsistencyCheck instance */ static std::shared_ptr GenerateConsistencyCheck(); /** - * @brief 获取CurveCli实例 + * @brief Get CurveCli instance */ static std::shared_ptr GenerateCurveCli(); /** - * @brief 获取CopysetCheck实例 + * @brief Get CopysetCheck instance */ static std::shared_ptr GenerateCopysetCheck(); /** - * @brief 获取ScheduleTool实例 + * @brief to obtain a ScheduleTool instance */ static std::shared_ptr GenerateScheduleTool(); diff --git a/src/tools/curve_tool_main.cpp b/src/tools/curve_tool_main.cpp index 8e516dc0e7..5f57f718c1 100644 --- a/src/tools/curve_tool_main.cpp +++ b/src/tools/curve_tool_main.cpp @@ -21,12 +21,16 @@ */ #include + #include "src/common/curve_version.h" #include "src/tools/curve_tool_factory.h" -static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" +static const char* + kHelpStr = + "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "COMMANDS:\n" - "space : show curve all disk type space, include total space and used space\n" //NOLINT + "space : show curve all disk type space, include total space and used " + "space\n" // NOLINT "status : show the total status of the cluster\n" "chunkserver-status : show the chunkserver online status\n" "mds-status : show the mds status\n" @@ -35,22 +39,26 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "etcd-status : show the etcd status\n" "snapshot-clone-status : show the snapshot clone server status\n" "copysets-status : check the health state of all copysets\n" - "chunkserver-list : show curve chunkserver-list, list all chunkserver information\n" //NOLINT + "chunkserver-list : show curve chunkserver-list, list all chunkserver " + "information\n" // NOLINT "server-list : list all server information\n" "logical-pool-list : list all logical pool information\n" "cluster-status : show cluster status\n" "get : show the file info and the actual space of file\n" "list : list the file info of files in the directory\n" "seginfo : list the segments info of the file\n" - "delete : delete the file, to force delete, should specify the --forcedelete=true\n" //NOLINT + "delete : delete the file, to force delete, should specify the " + "--forcedelete=true\n" // NOLINT "clean-recycle : clean the RecycleBin\n" "create : create file, file length unit is GB\n" "extend : extend volume of file\n" - "chunk-location : query the location of the chunk corresponding to the offset\n" //NOLINT + "chunk-location : query the location of the chunk corresponding to the " + "offset\n" // NOLINT "check-consistency : check the consistency of three copies\n" "remove-peer : remove the peer from the copyset\n" - "transfer-leader : transfer the leader of the copyset to the peer\n" //NOLINT - "reset-peer : reset the configuration of copyset, only reset to one peer is supported\n" //NOLINT + "transfer-leader : transfer the leader of the copyset to the peer\n" // NOLINT + "reset-peer : reset the configuration of copyset, only reset to one " + "peer is supported\n" // NOLINT "do-snapshot : do snapshot of the peer of the copyset\n" "do-snapshot-all : do snapshot of all peers of all copysets\n" "check-chunkserver : check the health state of the chunkserver\n" @@ -60,11 +68,13 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "list-may-broken-vol: list all volumes on majority offline copysets\n" "set-copyset-availflag: set copysets available flags\n" "update-throttle: update file throttle params\n" - "rapid-leader-schedule: rapid leader schedule in cluster in logicalpool\n" //NOLINT + "rapid-leader-schedule: rapid leader schedule in cluster in " + "logicalpool\n" // NOLINT "set-scan-state: set scan state for specify logical pool\n" "scan-status: show scan status\n" "list-poolsets: list all poolsets in cluster\n\n" - "You can specify the config path by -confPath to avoid typing too many options\n"; //NOLINT + "You can specify the config path by -confPath to avoid typing too many " + "options\n"; // NOLINT DEFINE_bool(example, false, "print the example of usage"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); @@ -80,8 +90,10 @@ extern std::string rootUserPassword; } // namespace curve void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mdsAddr", &info) && info.is_default) { conf->GetStringValue("mdsAddr", &FLAGS_mdsAddr); @@ -98,27 +110,23 @@ void UpdateFlagsFromConf(curve::common::Configuration* conf) { if (GetCommandLineFlagInfo("rpcRetryTimes", &info) && info.is_default) { conf->GetUInt64Value("rpcRetryTimes", &FLAGS_rpcRetryTimes); } - if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && info.is_default) { conf->GetUInt64Value("rpcConcurrentNum", &FLAGS_rpcConcurrentNum); } - if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && info.is_default) { conf->GetStringValue("snapshotCloneAddr", &FLAGS_snapshotCloneAddr); } if (GetCommandLineFlagInfo("snapshotCloneDummyPort", &info) && - info.is_default) { + info.is_default) { conf->GetStringValue("snapshotCloneDummyPort", - &FLAGS_snapshotCloneDummyPort); + &FLAGS_snapshotCloneDummyPort); } - if (GetCommandLineFlagInfo("userName", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("userName", &info) && info.is_default) { conf->GetStringValue("rootUserName", &FLAGS_userName); } - if (GetCommandLineFlagInfo("password", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("password", &info) && info.is_default) { conf->GetStringValue("rootUserPassword", &FLAGS_password); } } @@ -168,7 +176,8 @@ int main(int argc, char** argv) { UpdateFlagsFromConf(&conf); - // 关掉健康检查,否则Not Connect to的时候重试没有意义 + // Turn off the health check, otherwise trying again when Not Connect to is + // meaningless brpc::FLAGS_health_check_interval = -1; auto curveTool = curve::tool::CurveToolFactory::GenerateCurveTool(command); if (!curveTool) { diff --git a/src/tools/etcd_client.h b/src/tools/etcd_client.h index b7d8f56964..5392a1c6b3 100644 --- a/src/tools/etcd_client.h +++ b/src/tools/etcd_client.h @@ -27,9 +27,9 @@ #include #include +#include #include #include -#include #include "src/common/string_util.h" #include "src/tools/version_tool.h" @@ -49,26 +49,29 @@ class EtcdClient { virtual ~EtcdClient() = default; /** - * @brief 初始化etcdAddrVec - * @param etcdAddr etcd的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize etcdAddrVec + * @param etcdAddr etcd addresses, supporting multiple addresses separated + * by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& etcdAddr); /** - * @brief 获取etcd集群的leader - * @param[out] leaderAddrVec etcd的leader的地址列表,返回值为0时有效 - * @param[out] onlineState etcd集群中每个节点的在线状态,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the leader of the ETCD cluster + * @param[out] leaderAddrVec The address list of the leader for etcd, valid + * when the return value is 0 + * @param[out] onlineState etcd The online state of each node in the + * cluster, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetEtcdClusterStatus(std::vector* leaderAddrVec, - std::map* onlineState); + std::map* onlineState); /** - * @brief 获取etcd的版本并检查版本一致性 - * @param[out] version 版本 - * @param[out] failedList 查询version失败的地址列表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of ETCD and check version consistency + * @param[out] version Version + * @param[out] failedList Query address list for version failure + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckEtcdVersion(std::string* version, std::vector* failedList); diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 7a119c77bc..2823edabc4 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -34,12 +34,11 @@ int MDSClient::Init(const std::string& mdsAddr) { return Init(mdsAddr, std::to_string(kDefaultMdsDummyPort)); } -int MDSClient::Init(const std::string& mdsAddr, - const std::string& dummyPort) { +int MDSClient::Init(const std::string& mdsAddr, const std::string& dummyPort) { if (isInited_) { return 0; } - // 初始化channel + // Initialize channel curve::common::SplitString(mdsAddr, ",", &mdsAddrVec_); if (mdsAddrVec_.empty()) { std::cout << "Split mds address fail!" << std::endl; @@ -57,7 +56,7 @@ int MDSClient::Init(const std::string& mdsAddr, std::cout << "Init channel to " << mdsAddr << "fail!" << std::endl; continue; } - // 寻找哪个mds存活 + // Looking for which mds survived curve::mds::topology::ListPhysicalPoolRequest request; curve::mds::topology::ListPhysicalPoolResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -83,7 +82,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < mdsAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -92,7 +91,8 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != mdsAddrVec_.size()) { std::cout << "mds dummy port list must be correspond as" - " mds addr list" << std::endl; + " mds addr list" + << std::endl; return -1; } @@ -109,8 +109,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { return 0; } -int MDSClient::GetFileInfo(const std::string &fileName, - FileInfo* fileInfo) { +int MDSClient::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { assert(fileInfo != nullptr); curve::mds::GetFileInfoRequest request; curve::mds::GetFileInfoResponse response; @@ -123,13 +122,12 @@ int MDSClient::GetFileInfo(const std::string &fileName, std::cout << "GetFileInfo info from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { fileInfo->CopyFrom(response.fileinfo()); return 0; } - std::cout << "GetFileInfo fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetFileInfo fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -150,19 +148,18 @@ int MDSClient::GetAllocatedSize(const std::string& fileName, *allocSize = response.allocatedsize(); if (allocMap) { for (auto it = response.allocsizemap().begin(); - it != response.allocsizemap().end(); ++it) { + it != response.allocsizemap().end(); ++it) { allocMap->emplace(it->first, it->second); } } return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetFileSize(const std::string& fileName, - uint64_t* fileSize) { +int MDSClient::GetFileSize(const std::string& fileName, uint64_t* fileSize) { assert(fileSize != nullptr); curve::mds::GetFileSizeRequest request; curve::mds::GetFileSizeResponse response; @@ -178,8 +175,8 @@ int MDSClient::GetFileSize(const std::string& fileName, *fileSize = response.filesize(); return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -200,8 +197,7 @@ int MDSClient::ListDir(const std::string& dirName, std::cout << "ListDir from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.fileinfo_size(); ++i) { files->emplace_back(response.fileinfo(i)); } @@ -213,8 +209,8 @@ int MDSClient::ListDir(const std::string& dirName, } GetSegmentRes MDSClient::GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment) { + uint64_t offset, + PageFileSegment* segment) { if (!segment) { std::cout << "The argument is a null pointer!" << std::endl; return GetSegmentRes::kOtherError; @@ -260,13 +256,13 @@ int MDSClient::DeleteFile(const std::string& fileName, bool forcedelete) { } if (response.has_statuscode() && - (response.statuscode() == StatusCode::kOK || - response.statuscode() == StatusCode::kFileNotExists || - response.statuscode() == StatusCode::kFileUnderDeleting)) { + (response.statuscode() == StatusCode::kOK || + response.statuscode() == StatusCode::kFileNotExists || + response.statuscode() == StatusCode::kFileUnderDeleting)) { return 0; } - std::cout << "DeleteFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "DeleteFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -294,8 +290,7 @@ int MDSClient::CreateFile(const CreateFileContext& context) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { return 0; } std::cout << "CreateFile fail with errCode: " @@ -316,19 +311,18 @@ int MDSClient::ExtendVolume(const std::string& fileName, uint64_t newSize) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { - std::cout << "extendFile success!" << std::endl; + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { + std::cout << "extendFile success!" << std::endl; return 0; } - std::cout << "extendFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "extendFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames) { + const std::vector& copysets, + std::vector* fileNames) { curve::mds::ListVolumesOnCopysetsRequest request; curve::mds::ListVolumesOnCopysetsResponse response; for (const auto& copyset : copysets) { @@ -343,8 +337,7 @@ int MDSClient::ListVolumesOnCopyset( return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.filenames_size(); ++i) { fileNames->emplace_back(response.filenames(i)); } @@ -373,31 +366,30 @@ int MDSClient::ListClient(std::vector* clientAddrs, return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.clientinfos_size(); ++i) { const auto& clientInfo = response.clientinfos(i); - std::string clientAddr = clientInfo.ip() + ":" + - std::to_string(clientInfo.port()); + std::string clientAddr = + clientInfo.ip() + ":" + std::to_string(clientInfo.port()); clientAddrs->emplace_back(clientAddr); } return 0; } - std::cout << "ListClient fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListClient fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { +int MDSClient::GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { if (!csLocs) { std::cout << "The argument is a null pointer!" << std::endl; return -1; } std::vector csServerInfos; - int res = GetChunkServerListInCopySets(logicalPoolId, - {copysetId}, &csServerInfos); + int res = GetChunkServerListInCopySets(logicalPoolId, {copysetId}, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return -1; @@ -409,9 +401,10 @@ int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, return 0; } -int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos) { +int MDSClient::GetChunkServerListInCopySets( + const PoolIdType& logicalPoolId, + const std::vector& copysetIds, + std::vector* csServerInfos) { if (!csServerInfos) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -424,7 +417,8 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServerListInCopySets; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetChunkServerListInCopySets; // NOLINT if (SendRpcToMds(&request, &response, &stub, fp) != 0) { std::cout << "GetChunkServerListInCopySets from all mds fail!" << std::endl; @@ -432,7 +426,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.csinfo_size(); ++i) { csServerInfos->emplace_back(response.csinfo(i)); } @@ -444,7 +438,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } int MDSClient::ListPhysicalPoolsInCluster( - std::vector* pools) { + std::vector* pools) { if (!pools) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -455,20 +449,19 @@ int MDSClient::ListPhysicalPoolsInCluster( auto fp = &curve::mds::topology::TopologyService_Stub::ListPhysicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPhysicalPool from all mds fail!" - << std::endl; + std::cout << "ListPhysicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.physicalpoolinfos_size(); ++i) { pools->emplace_back(response.physicalpoolinfos(i)); } return 0; } - std::cout << "ListPhysicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPhysicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -481,8 +474,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { } for (const auto& phyPool : phyPools) { std::vector lgPools; - ret = ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), - &lgPools); + ret = + ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), &lgPools); if (ret != 0) { std::cout << "ListLogicalPoolsInPhysicalPool " << phyPool.physicalpoolid() << " fail" << std::endl; @@ -493,8 +486,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { return 0; } -int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools) { +int MDSClient::ListLogicalPoolsInPhysicalPool( + const PoolIdType& id, std::vector* pools) { assert(pools != nullptr); curve::mds::topology::ListLogicalPoolRequest request; curve::mds::topology::ListLogicalPoolResponse response; @@ -503,20 +496,19 @@ int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListLogicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListLogicalPool from all mds fail!" - << std::endl; + std::cout << "ListLogicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.logicalpoolinfos_size(); ++i) { pools->emplace_back(response.logicalpoolinfos(i)); } return 0; } - std::cout << "ListLogicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListLogicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -530,20 +522,19 @@ int MDSClient::ListZoneInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListPoolZone; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPoolZone from all mds fail!" - << std::endl; + std::cout << "ListPoolZone from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.zones_size(); ++i) { zones->emplace_back(response.zones(i)); } return 0; } - std::cout << "ListPoolZone fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPoolZone fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -557,55 +548,54 @@ int MDSClient::ListServersInZone(const ZoneIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListZoneServer; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListZoneServer from all mds fail!" - << std::endl; + std::cout << "ListZoneServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.serverinfo_size(); ++i) { servers->emplace_back(response.serverinfo(i)); } return 0; } - std::cout << "ListZoneServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListZoneServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const ServerIdType& id, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_serverid(id); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const std::string& ip, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_ip(ip); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + ListChunkServerRequest* request, + std::vector* chunkservers) { curve::mds::topology::ListChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "ListChunkServer from all mds fail!" - << std::endl; + std::cout << "ListChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.chunkserverinfos_size(); ++i) { const auto& chunkserver = response.chunkserverinfos(i); - // 跳过retired状态的chunkserver + // Skipping chunkserver in Retired State if (chunkserver.status() == ChunkServerStatus::RETIRED) { continue; } @@ -613,9 +603,9 @@ int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, } return 0; } - std::cout << "ListChunkServer fail with errCode: " - << response.statuscode() << std::endl; - return -1; + std::cout << "ListChunkServer fail with errCode: " << response.statuscode() + << std::endl; + return -1; } int MDSClient::GetChunkServerInfo(const ChunkServerIdType& id, @@ -653,23 +643,22 @@ int MDSClient::GetChunkServerInfo(GetChunkServerInfoRequest* request, auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetChunkServer from all mds fail!" - << std::endl; + std::cout << "GetChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { chunkserver->CopyFrom(response.chunkserverinfo()); return 0; } - std::cout << "GetChunkServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetChunkServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -678,7 +667,7 @@ int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, } int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -697,7 +686,7 @@ int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, } int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, - bool availFlag) { + bool availFlag) { curve::mds::topology::SetCopysetsAvailFlagRequest request; curve::mds::topology::SetCopysetsAvailFlagResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -708,13 +697,12 @@ int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, request.set_availflag(availFlag); auto fp = &curve::mds::topology::TopologyService_Stub::SetCopysetsAvailFlag; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "SetCopysetsAvailFlag from all mds fail!" - << std::endl; + std::cout << "SetCopysetsAvailFlag from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { return 0; } std::cout << "SetCopysetsAvailFlag fail with errCode: " @@ -728,13 +716,12 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListUnAvailCopySets; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListUnAvailCopySets from all mds fail!" - << std::endl; + std::cout << "ListUnAvailCopySets from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.copysets_size(); ++i) { copysets->emplace_back(response.copysets(i)); } @@ -746,21 +733,21 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { } int MDSClient::GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets) { + GetCopySetsInChunkServerRequest* request, + std::vector* copysets) { curve::mds::topology::GetCopySetsInChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInChunkServer; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetCopySetsInChunkServer; // NOLINT if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInChunkServer from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -783,14 +770,13 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInCluster; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInCluster from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInCluster from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -800,9 +786,7 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, return -1; } - -int MDSClient::GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, +int MDSClient::GetCopyset(PoolIdType lpid, CopySetIdType copysetId, CopysetInfo* copysetInfo) { curve::mds::topology::GetCopysetRequest request; curve::mds::topology::GetCopysetResponse response; @@ -843,8 +827,8 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } for (const auto& zone : zones) { if (ListServersInZone(zone.zoneid(), servers) != 0) { - std::cout << "ListServersInZone fail, zoneId :" - << zone.zoneid() << std::endl; + std::cout << "ListServersInZone fail, zoneId :" << zone.zoneid() + << std::endl; return -1; } } @@ -853,7 +837,7 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } int MDSClient::ListChunkServersInCluster( - std::vector* chunkservers) { + std::vector* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -869,8 +853,8 @@ int MDSClient::ListChunkServersInCluster( return 0; } -int MDSClient::ListChunkServersInCluster(std::map>* chunkservers) { +int MDSClient::ListChunkServersInCluster( + std::map>* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -880,8 +864,8 @@ int MDSClient::ListChunkServersInCluster(std::map chunkserverList; - if (ListChunkServersOnServer(server.serverid(), - &chunkserverList) != 0) { + if (ListChunkServersOnServer(server.serverid(), &chunkserverList) != + 0) { std::cout << "ListChunkServersOnServer fail!" << std::endl; return -1; } @@ -889,7 +873,7 @@ int MDSClient::ListChunkServersInCluster(std::mapfind(server.physicalpoolid()); if (iter != chunkservers->end()) { iter->second.insert(iter->second.end(), chunkserverList.begin(), - chunkserverList.end()); + chunkserverList.end()); } else { chunkservers->emplace(server.physicalpoolid(), chunkserverList); } @@ -900,8 +884,8 @@ int MDSClient::ListChunkServersInCluster(std::map* onlineStatus) { assert(onlineStatus != nullptr); onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -943,7 +928,7 @@ int MDSClient::GetMetric(const std::string& metricName, std::string* value) { while (changeTimeLeft >= 0) { brpc::Controller cntl; MetricRet res = metricClient_.GetMetric(mdsAddrVec_[currentMdsIndex_], - metricName, value); + metricName, value); if (res == MetricRet::kOK) { return 0; } @@ -962,8 +947,7 @@ bool MDSClient::ChangeMDServer() { if (currentMdsIndex_ > static_cast(mdsAddrVec_.size() - 1)) { currentMdsIndex_ = 0; } - if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), - nullptr) != 0) { + if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), nullptr) != 0) { return false; } return true; @@ -971,14 +955,14 @@ bool MDSClient::ChangeMDServer() { std::vector MDSClient::GetCurrentMds() { std::vector leaderAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_.GetMetric(item.second, - kMdsStatusMetricName, &status); + MetricRet ret = + metricClient_.GetMetric(item.second, kMdsStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kMdsStatusLeader) { @@ -995,7 +979,8 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { request.set_logicalpoolid(lpoolId); - auto fp = &::curve::mds::schedule::ScheduleService_Stub::RapidLeaderSchedule; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + RapidLeaderSchedule; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "RapidLeaderSchedule fail" << std::endl; return -1; @@ -1006,7 +991,7 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { return 0; } std::cout << "RapidLeaderSchedule fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1027,8 +1012,8 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { auto retCode = response.statuscode(); if (retCode != ::curve::mds::topology::kTopoErrCodeSuccess) { - std::cout << "SetLogicalPoolScanState fail with retCode: " - << retCode << std::endl; + std::cout << "SetLogicalPoolScanState fail with retCode: " << retCode + << std::endl; return -1; } @@ -1037,7 +1022,7 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { int MDSClient::QueryChunkServerRecoverStatus( const std::vector& cs, - std::map *statusMap) { + std::map* statusMap) { assert(statusMap != nullptr); ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest request; ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse response; @@ -1047,7 +1032,8 @@ int MDSClient::QueryChunkServerRecoverStatus( request.add_chunkserverid(id); } - auto fp = &::curve::mds::schedule::ScheduleService_Stub::QueryChunkServerRecoverStatus; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + QueryChunkServerRecoverStatus; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "QueryChunkServerRecoverStatus fail" << std::endl; return -1; @@ -1056,13 +1042,13 @@ int MDSClient::QueryChunkServerRecoverStatus( if (response.statuscode() == ::curve::mds::schedule::kScheduleErrCodeSuccess) { for (auto it = response.recoverstatusmap().begin(); - it != response.recoverstatusmap().end(); ++it) { + it != response.recoverstatusmap().end(); ++it) { (*statusMap)[it->first] = it->second; } return 0; } std::cout << "QueryChunkServerRecoverStatus fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1095,21 +1081,22 @@ int MDSClient::UpdateFileThrottleParams( template int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)) { + void (T::*func)(google::protobuf::RpcController*, + const Request*, Response*, + google::protobuf::Closure*)) { int changeTimeLeft = mdsAddrVec_.size() - 1; while (changeTimeLeft >= 0) { brpc::Controller cntl; cntl.set_timeout_ms(FLAGS_rpcTimeout); (obp->*func)(&cntl, request, response, nullptr); if (!cntl.Failed()) { - // 如果成功了,就返回0,对response的判断放到上一层 + // If successful, return 0 and place the response judgment on the + // previous level return 0; } - bool needRetry = (cntl.ErrorCode() != EHOSTDOWN && - cntl.ErrorCode() != ETIMEDOUT && - cntl.ErrorCode() != brpc::ELOGOFF); + bool needRetry = + (cntl.ErrorCode() != EHOSTDOWN && cntl.ErrorCode() != ETIMEDOUT && + cntl.ErrorCode() != brpc::ELOGOFF); uint64_t retryTimes = 0; while (needRetry && retryTimes < FLAGS_rpcRetryTimes) { cntl.Reset(); @@ -1120,10 +1107,13 @@ int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, } return 0; } - // 对于需要重试的错误,重试次数用完了还没成功就返回错误不切换 - // ERPCTIMEDOUT比较特殊,这种情况下,mds可能切换了也可能没切换,所以 - // 需要重试并且重试次数用完后切换 - // 只有不需要重试的,也就是mds不在线的才会去切换mds + // For errors that require retries, if the retry limit is exhausted + // without success, return an error without switching. However, for + // ERPCTIMEDOUT, which is a special case, the MDS may have switched or + // may not have switched, so it needs to be retried, and if the retry + // limit is exhausted, then switch. Only for errors that do not require + // retries, meaning when the MDS is not online, will the MDS be + // switched. if (needRetry && cntl.ErrorCode() != brpc::ERPCTIMEDOUT) { std::cout << "Send RPC to mds fail, error content: " << cntl.ErrorText() << std::endl; diff --git a/src/tools/mds_client.h b/src/tools/mds_client.h index 05bac69cd5..fc62c4d3ec 100644 --- a/src/tools/mds_client.h +++ b/src/tools/mds_client.h @@ -23,55 +23,55 @@ #ifndef SRC_TOOLS_MDS_CLIENT_H_ #define SRC_TOOLS_MDS_CLIENT_H_ -#include #include +#include #include -#include #include -#include #include #include +#include #include +#include #include "proto/nameserver2.pb.h" -#include "proto/topology.pb.h" #include "proto/schedule.pb.h" +#include "proto/topology.pb.h" #include "src/common/authenticator.h" -#include "src/mds/common/mds_define.h" +#include "src/common/net_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/common/net_common.h" -#include "src/tools/metric_name.h" -#include "src/tools/metric_client.h" +#include "src/mds/common/mds_define.h" #include "src/tools/common.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/metric_client.h" +#include "src/tools/metric_name.h" +using curve::common::Authenticator; +using curve::common::ChunkServerLocation; +using curve::common::CopysetInfo; using curve::mds::FileInfo; +using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::StatusCode; -using curve::mds::PageFileChunkInfo; -using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::schedule::RapidLeaderScheduleRequst; +using curve::mds::schedule::RapidLeaderScheduleResponse; +using curve::mds::topology::ChunkServerIdType; using curve::mds::topology::ChunkServerInfo; -using curve::common::ChunkServerLocation; +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetIdType; using curve::mds::topology::CopySetServerInfo; -using curve::mds::topology::ServerInfo; -using curve::mds::topology::ZoneInfo; -using curve::mds::topology::PhysicalPoolInfo; +using curve::mds::topology::GetChunkServerInfoRequest; +using curve::mds::topology::GetCopySetsInChunkServerRequest; +using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::topology::ListChunkServerRequest; using curve::mds::topology::LogicalPoolInfo; -using curve::common::CopysetInfo; +using curve::mds::topology::PhysicalPoolInfo; +using curve::mds::topology::PoolIdType; using curve::mds::topology::ServerIdType; +using curve::mds::topology::ServerInfo; using curve::mds::topology::ZoneIdType; -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; -using curve::mds::topology::ChunkServerIdType; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::ListChunkServerRequest; -using curve::mds::topology::GetChunkServerInfoRequest; -using curve::mds::topology::GetCopySetsInChunkServerRequest; -using curve::mds::schedule::RapidLeaderScheduleRequst; -using curve::mds::schedule::RapidLeaderScheduleResponse; -using curve::common::Authenticator; +using curve::mds::topology::ZoneInfo; namespace curve { namespace tool { @@ -79,10 +79,10 @@ namespace tool { using curve::mds::topology::PoolsetInfo; enum class GetSegmentRes { - kOK = 0, // 获取segment成功 - kSegmentNotAllocated = -1, // segment不存在 - kFileNotExists = -2, // 文件不存在 - kOtherError = -3 // 其他错误 + kOK = 0, // Successfully obtained segment + kSegmentNotAllocated = -1, // segment does not exist + kFileNotExists = -2, // File does not exist + kOtherError = -3 // Other errors }; using AllocMap = std::unordered_map; @@ -98,93 +98,99 @@ struct CreateFileContext { class MDSClient { public: - MDSClient() : currentMdsIndex_(0), userName_(""), - password_(""), isInited_(false) {} + MDSClient() + : currentMdsIndex_(0), userName_(""), password_(""), isInited_(false) {} virtual ~MDSClient() = default; /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有mds用同样的dummy port,用字符串分隔有多个的话 - * 为每个mds设置不同的dummy port - * @return 成功返回0,失败返回-1 + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @param dummyPort dummy port list, if only one is entered + * All mds use the same dummy port, separated by strings if + * there are multiple Set different dummy ports for each mds + * @return returns 0 for success, -1 for failure */ - virtual int Init(const std::string& mdsAddr, - const std::string& dummyPort); + virtual int Init(const std::string& mdsAddr, const std::string& dummyPort); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 获取文件或目录分配大小 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录分配大小,返回值为0时有效 - * @param[out] allocMap 文件在各个池子分配的情况 - * @return 成功返回0,失败返回-1 + * @brief Get file or directory allocation size + * @param fileName File name + * @param[out] allocSize file or directory allocation size, valid when the + * return value is 0 + * @param[out] allocMap Allocation of files in various pools + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 获取文件或目录的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录分配大小,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the size of a file or directory + * @param fileName File name + * @param[out] fileSize File or directory allocation size, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetFileSize(const std::string& fileName, - uint64_t* fileSize); + virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取指定偏移的segment放到segment里面 - * @param fileName 文件名 - * @param offset 偏移值 - * @param[out] segment 文件中指定偏移的segmentInfo,返回值为0时有效 - * @return 返回GetSegmentRes,区分segment未分配和其他错误 + * @brief Get the segment with the specified offset and place it in the + * segment + * @param fileName File name + * @param offset offset value + * @param[out] segment The segmentInfo of the specified offset in the file + * is valid when the return value is 0 + * @return returns GetSegmentRes, distinguishing between unassigned segments + * and other errors */ virtual GetSegmentRes GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment); + uint64_t offset, + PageFileSegment* segment); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& context); @@ -195,144 +201,157 @@ class MDSClient { * @return return 0 when success, -1 when fail */ virtual int ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames); + const std::vector& copysets, + std::vector* fileNames); /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的卷大小 - * @return 成功返回0,失败返回-1 + * @brief expansion volume + * @param fileName File name + * @param newSize The volume size after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 列出client的dummyserver的地址 - * @param[out] clientAddrs client地址列表,返回0时有效 - * @param[out] listClientsInRepo 把数据库里的client也列出来 - * @return 成功返回0,失败返回-1 + * @brief List the address of the client's dummyserver + * @param[out] clientAddrs client address list, valid when 0 is returned + * @param[out] listClientsInRepo also lists the clients in the database + * @return returns 0 for success, -1 for failure */ virtual int ListClient(std::vector* clientAddrs, bool listClientsInRepo = false); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool id + * @param copysetId copyset id + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); + virtual int GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetIds 要查询的copysetId的列表 - * @param[out] csServerInfos copyset成员的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetIds List of copysetIds to query + * @param[out] csServerInfos A list of copyset members, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos); + virtual int GetChunkServerListInCopySets( + const PoolIdType& logicalPoolId, + const std::vector& copysetIds, + std::vector* csServerInfos); /** - * @brief 获取集群中的物理池列表 - * @param[out] pools 物理池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of physical pools in the cluster + * @param[out] pools A list of physical pool information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListPhysicalPoolsInCluster( - std::vector* pools); - + std::vector* pools); /** - * @brief 获取物理池中的逻辑池列表 - * @param id 物理池id - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of logical pools in the physical pool + * @param id Physical pool id + * @param[out] pools List of logical pool information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools); + virtual int ListLogicalPoolsInPhysicalPool( + const PoolIdType& id, std::vector* pools); /** - * @brief 集群中的逻辑池列表 - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + *List of logical pools in the @brief cluster + * @param[out] pools List of logical pool information, valid when the return + *value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListLogicalPoolsInCluster(std::vector* pools); /** - * @brief 获取物理池中的zone列表 - * @param id 物理池id - * @param[out] zones zone信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain a list of zones in the physical pool + * @param id Physical pool id + * @param[out] zones A list of zone information, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListZoneInPhysicalPool(const PoolIdType& id, - std::vector* zones); + std::vector* zones); /** - * @brief 获取zone中的server列表 - * @param id zone id - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain a list of servers in the zone + * @param id zone id + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListServersInZone(const ZoneIdType& id, - std::vector* servers); + std::vector* servers); /** - * @brief 获取server上的chunkserver的列表 - * @param id server id - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param id server id + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers); + virtual int ListChunkServersOnServer( + const ServerIdType& id, std::vector* chunkservers); /** - * @brief 获取server上的chunkserver的列表 - * @param ip server ip - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param ip server ip + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers); + virtual int ListChunkServersOnServer( + const std::string& ip, std::vector* chunkservers); /** - * @brief 获取chunkserver的详细信息 - * @param id chunkserver id - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param id chunkserver id + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerInfo(const ChunkServerIdType& id, - ChunkServerInfo* chunkserver); + ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver的详细信息 - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param csAddr The address of chunkserver, in the format of ip:port + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerInfo(const std::string& csAddr, - ChunkServerInfo* chunkserver); + ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver上的所有copyset - * @param id chunkserver的id - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get all copysets on chunkserver + * @param id The id of chunkserver + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets); + std::vector* copysets); /** - * @brief 获取chunkserver上的所有copyset - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get all copysets on chunkserver + * @param csAddr The address of chunkserver, in the format of ip: port + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets); + std::vector* copysets); /** * @brief Get all copysets in cluster @@ -350,32 +369,33 @@ class MDSClient { * @param[out] copysetInfo the copyset * @return 0 if success, else return -1 */ - virtual int GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, + virtual int GetCopyset(PoolIdType lpid, CopySetIdType copysetId, CopysetInfo* copysetInfo); /** - * @brief 列出集群中的所有server - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all servers in the cluster + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListServersInCluster(std::vector* servers); /** - * @brief 列出集群中的所有chunkserver - * @param[out] chunkservers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all chunkservers in the cluster + * @param[out] chunkservers A list of server information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListChunkServersInCluster( - std::vector* chunkservers); + std::vector* chunkservers); /** * @brief list all the chunkservers with poolid in cluster * @param[out] chunkservers chunkserver info * @return succeed return 0; failed return -1; */ - virtual int ListChunkServersInCluster(std::map>* chunkservers); + virtual int ListChunkServersInCluster( + std::map>* chunkservers); /** * @brief set copysets available flag @@ -394,57 +414,54 @@ class MDSClient { virtual int ListUnAvailCopySets(std::vector* copysets); /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名字 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the value of a metric for mds + * @param metricName The name of the metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetMetric(const std::string& metricName, uint64_t* value); /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名子 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the value of a metric for mds + * @param metricName The name of metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetMetric(const std::string& metricName, std::string* value); /** - * @brief 设置userName,访问namespace接口的时候调用 - * @param userName 用户名 + * @brief sets userName and calls it when accessing the namespace interface + * @param userName username */ - void SetUserName(const std::string& userName) { - userName_ = userName; - } + void SetUserName(const std::string& userName) { userName_ = userName; } /** - * @brief 设置password,访问namespace接口的时候调用 - * @param password 密码 + * @brief sets the password and calls it when accessing the namespace + * interface + * @param password password */ - void SetPassword(const std::string& password) { - password_ = password; - } + void SetPassword(const std::string& password) { password_ = password; } /** - * @brief 获取mds地址列表 - * @return mds地址的列表 + * @brief Get mds address list + * @return List of mds addresses */ virtual const std::vector& GetMdsAddrVec() const { return mdsAddrVec_; } virtual const std::map& GetDummyServerMap() - const { + const { return dummyServerMap_; } /** - * @brief 获取当前mds的地址 + * @brief Get the address of the current mds */ virtual std::vector GetCurrentMds(); /** - * @brief 向mds发送rpc触发快速leader均衡 + * @brief sends rpc to mds to trigger fast leader balancing */ virtual int RapidLeaderSchedule(PoolIdType lpid); @@ -457,23 +474,25 @@ class MDSClient { virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); /** - * @brief 获取mds在线状态, - * dummyserver在线且dummyserver记录的listen addr - * 与mds地址一致才认为在线 - * @param[out] onlineStatus mds在线状态,返回0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain mds online status, + * dummyserver is online and the dummyserver records a listen addr + * Only when the address is consistent with the mds address is + * considered online + * @param[out] onlineStatus mds online status, valid when returned to 0 + * @return returns 0 for success, -1 for failure */ virtual void GetMdsOnlineStatus(std::map* onlineStatus); /** - * @brief 获取指定chunkserver的恢复状态 - * @param[in] cs 需要查询的chunkserver列表 - * @param[out] statusMap 返回各chunkserver对应的恢复状态 - * @return 成功返回0,失败返回-1 + * @brief Get the recovery status of the specified chunkserver + * @param[in] cs List of chunkservers to query + * @param[out] statusMap returns the recovery status corresponding to each + * chunkserver + * @return returns 0 for success, -1 for failure */ int QueryChunkServerRecoverStatus( const std::vector& cs, - std::map *statusMap); + std::map* statusMap); virtual int UpdateFileThrottleParams( const std::string& fileName, const curve::mds::ThrottleParams& params); @@ -482,86 +501,88 @@ class MDSClient { private: /** - * @brief 切换mds - * @return 切换成功返回true,所有mds都失败则返回false + * @brief switch mds + * @return returns true if the switch is successful, and false if all mds + * fail */ bool ChangeMDServer(); /** - * @brief 向mds发送RPC,为了复用代码 - * @param - * @return 成功返回0,失败返回-1 + * @brief sends RPC to mds for code reuse + * @param + * @return returns 0 for success, -1 for failure */ template int SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)); + void (T::*func)(google::protobuf::RpcController*, + const Request*, Response*, + google::protobuf::Closure*)); /** - * @brief 获取server上的chunkserver的列表 - * @param request 要发送的request - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param request The request to be sent + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure */ int ListChunkServersOnServer(ListChunkServerRequest* request, std::vector* chunkservers); /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure */ int GetChunkServerInfo(GetChunkServerInfoRequest* request, - ChunkServerInfo* chunkserver); + ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ - int GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets); + int GetCopySetsInChunkServer(GetCopySetsInChunkServerRequest* request, + std::vector* copysets); /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取mds的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr mds的监听地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of mds through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr mds listening address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); - - // 填充signature + // Fill in the signature template void FillUserInfo(T* request); - // 用于发送http请求的client + // client used to send HTTP requests MetricClient metricClient_; - // 向mds发送RPC的channel + // Send RPC channel to mds brpc::Channel channel_; - // 保存mds地址的vector + // Save vector for mds address std::vector mdsAddrVec_; - // 保存mds地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the mds address std::map dummyServerMap_; - // 保存当前mds在mdsAddrVec_中的索引 + // Save the current mds in mdsAddrVec_ Index in int currentMdsIndex_; - // 用户名 + // User name std::string userName_; - // 密码 + // Password std::string password_; - // 避免重复初始化 + // Avoiding duplicate initialization bool isInited_; }; } // namespace tool diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index 776347f738..fc5012d58a 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -30,9 +30,9 @@ DECLARE_uint64(rpcRetryTimes); namespace curve { namespace tool { -MetricRet MetricClient::GetMetric(const std::string &addr, - const std::string &metricName, - std::string *value) { +MetricRet MetricClient::GetMetric(const std::string& addr, + const std::string& metricName, + std::string* value) { brpc::Channel httpChannel; brpc::ChannelOptions options; brpc::Controller cntl; @@ -70,15 +70,16 @@ MetricRet MetricClient::GetMetric(const std::string &addr, res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - // 这里不输出错误,因为对mds有切换的可能,把打印的处理交给外部 + // There is no output error here, as there is a possibility of switching + // between mds, and the printing process is handed over to external parties bool notExist = cntl.ErrorCode() == brpc::EHTTP && cntl.http_response().status_code() == kHttpCodeNotFound; return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; } -MetricRet MetricClient::GetMetricUint(const std::string &addr, - const std::string &metricName, - uint64_t *value) { +MetricRet MetricClient::GetMetricUint(const std::string& addr, + const std::string& metricName, + uint64_t* value) { std::string str; MetricRet res = GetMetric(addr, metricName, &str); if (res != MetricRet::kOK) { @@ -92,9 +93,9 @@ MetricRet MetricClient::GetMetricUint(const std::string &addr, return MetricRet::kOK; } -MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, - const std::string &metricName, - std::string *confValue) { +MetricRet MetricClient::GetConfValueFromMetric(const std::string& addr, + const std::string& metricName, + std::string* confValue) { std::string jsonString; brpc::Controller cntl; MetricRet res = GetMetric(addr, metricName, &jsonString); @@ -118,8 +119,8 @@ MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, return MetricRet::kOK; } -int MetricClient::GetValueFromAttachment(const std::string &attachment, - std::string *value) { +int MetricClient::GetValueFromAttachment(const std::string& attachment, + std::string* value) { auto pos = attachment.find(":"); if (pos == std::string::npos) { std::cout << "parse response attachment fail!" << std::endl; diff --git a/src/tools/metric_client.h b/src/tools/metric_client.h index 94e29a545f..103f8da7f3 100644 --- a/src/tools/metric_client.h +++ b/src/tools/metric_client.h @@ -25,65 +25,68 @@ #include #include + #include #include -#include "src/tools/common.h" + #include "src/common/string_util.h" +#include "src/tools/common.h" #include "src/tools/curve_tool_define.h" namespace curve { namespace tool { enum class MetricRet { - // 成功 + // Success kOK = 0, - // metric未找到 + // Metric not found kNotFound = -1, - // 其他错误 - kOtherErr = -2, + // Other errors + kOtherErr = -2, }; const int kHttpCodeNotFound = 404; class MetricClient { public: - virtual ~MetricClient() {} + virtual ~MetricClient() {} - /** - * @brief 从指定地址获取metric - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief Get metric from specified address + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetric(const std::string& addr, const std::string& metricName, std::string* value); - /** - * @brief 从指定地址获取metric,并转换成uint - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief retrieves metric from the specified address and converts it to + * uint + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetricUint(const std::string& addr, const std::string& metricName, uint64_t* value); /** - * @brief 从metric获取配置的值 - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] confValue metric中配置的值 - * @return 错误码 + * @brief Get the configured value from metric + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] confValue The value configured in metric + * @return error code */ virtual MetricRet GetConfValueFromMetric(const std::string& addr, const std::string& metricName, std::string* confValue); private: - // 从response attachment解析出metric值 + // Parse the metric value from the response attachment int GetValueFromAttachment(const std::string& attachment, std::string* value); }; diff --git a/src/tools/metric_name.h b/src/tools/metric_name.h index d284694aba..48a2eb7d56 100644 --- a/src/tools/metric_name.h +++ b/src/tools/metric_name.h @@ -22,13 +22,12 @@ #include -#include #include +#include #ifndef SRC_TOOLS_METRIC_NAME_H_ #define SRC_TOOLS_METRIC_NAME_H_ - namespace curve { namespace tool { @@ -37,7 +36,7 @@ const char kCurveVersionMetricName[] = "curve_version"; // snapshot clone server metric name const char kSnapshotCloneConfMetricName[] = - "snapshotcloneserver_config_server_address"; + "snapshotcloneserver_config_server_address"; const char kSnapshotCloneStatusMetricName[] = "snapshotcloneserver_status"; const char kSnapshotCloneStatusActive[] = "active"; @@ -50,100 +49,92 @@ const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; const char kMdsStatusMetricName[] = "mds_status"; const char kMdsStatusLeader[] = "leader"; -// operator名称 +// operator Name const char kTotalOpName[] = "operator"; const char kChangeOpName[] = "change_peer"; const char kAddOpName[] = "add_peer"; const char kRemoveOpName[] = "remove_peer"; const char kTransferOpName[] = "transfer_leader"; - -inline std::string GetPoolTotalChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeTotalBytes"; +inline std::string GetPoolTotalChunkSizeName(const std::string& poolName) { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeTotalBytes"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } -inline std::string GetPoolUsedChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeUsedBytes"; +inline std::string GetPoolUsedChunkSizeName(const std::string& poolName) { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeUsedBytes"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } -inline std::string GetPoolLogicalCapacityName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalCapacity"; +inline std::string GetPoolLogicalCapacityName(const std::string& poolName) { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_logicalCapacity"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } -inline std::string GetPoolLogicalAllocName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalAlloc"; +inline std::string GetPoolLogicalAllocName(const std::string& poolName) { + std::string tmpName = kLogicalPoolMetricPrefix + poolName + "_logicalAlloc"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline std::string GetCSLeftChunkName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_chunkfilepool_left"; + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_chunkfilepool_left"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline std::string GetCSLeftWalSegmentName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_walfilepool_left"; + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_walfilepool_left"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline std::string GetUseWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_copyset_raft_log_uri"; + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_config_copyset_raft_log_uri"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline std::string GetUseChunkFilePoolAsWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_walfilepool_use_chunk_file_pool"; + std::string tmpName = kChunkServerMetricPrefix + csAddr + + "_config_walfilepool_use_chunk_file_pool"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline std::string GetOpNumMetricName(const std::string& opName) { - std::string tmpName = kSechduleOpMetricpPrefix + - opName + "_num"; + std::string tmpName = kSechduleOpMetricpPrefix + opName + "_num"; std::string metricName; bvar::to_underscored_name(&metricName, tmpName); return metricName; } inline bool SupportOpName(const std::string& opName) { - return opName == kTotalOpName || opName == kChangeOpName - || opName == kAddOpName || opName == kRemoveOpName - || opName == kTransferOpName; + return opName == kTotalOpName || opName == kChangeOpName || + opName == kAddOpName || opName == kRemoveOpName || + opName == kTransferOpName; } inline void PrintSupportOpName() { - std::cout << kTotalOpName << ", " << kChangeOpName - << ", " << kAddOpName << ", " << kRemoveOpName - << ", " << kTransferOpName << std::endl; + std::cout << kTotalOpName << ", " << kChangeOpName << ", " << kAddOpName + << ", " << kRemoveOpName << ", " << kTransferOpName << std::endl; } } // namespace tool diff --git a/src/tools/namespace_tool.cpp b/src/tools/namespace_tool.cpp index 8d6119b75d..b0b039a835 100644 --- a/src/tools/namespace_tool.cpp +++ b/src/tools/namespace_tool.cpp @@ -28,8 +28,9 @@ DEFINE_string(fileName, "", "file name"); DEFINE_string(dirName, "", "directory name"); -DEFINE_string(expireTime, "7d", "Time for file in recyclebin exceed expire time " // NOLINT - "will be deleted (default: 7d)"); +DEFINE_string(expireTime, "7d", + "Time for file in recyclebin exceed expire time " // NOLINT + "will be deleted (default: 7d)"); DEFINE_bool(forcedelete, false, "force delete file or not"); DEFINE_uint64(fileLength, 20, "file length (GB)"); DEFINE_uint64(newSize, 30, "the new size of expanded volume(GB)"); @@ -37,11 +38,14 @@ DEFINE_string(poolset, "", "specify the poolset name"); DEFINE_bool(isTest, false, "is unit test or not"); DEFINE_uint64(offset, 0, "offset to query chunk location"); DEFINE_uint64(rpc_timeout, 3000, "millisecond for rpc timeout"); -DEFINE_bool(showAllocSize, true, "If specified, the allocated size will not be computed"); // NOLINT -DEFINE_bool(showFileSize, true, "If specified, the file size will not be computed"); // NOLINT +DEFINE_bool(showAllocSize, true, + "If specified, the allocated size will not be computed"); // NOLINT +DEFINE_bool(showFileSize, true, + "If specified, the file size will not be computed"); // NOLINT DECLARE_string(mdsAddr); -DEFINE_bool(showAllocMap, false, "If specified, the allocated size in each" - " logical pool will be print"); +DEFINE_bool(showAllocMap, false, + "If specified, the allocated size in each" + " logical pool will be print"); DEFINE_string(throttleType, "", "throttle type"); DEFINE_uint64(limit, 0, "throttle limit"); @@ -66,19 +70,15 @@ int NameSpaceTool::Init() { } bool NameSpaceTool::SupportCommand(const std::string& command) { - return (command == kGetCmd || command == kListCmd - || command == kSegInfoCmd - || command == kDeleteCmd - || command == kCreateCmd - || command == kExtendCmd - || command == kCleanRecycleCmd - || command == kChunkLocatitonCmd - || command == kUpdateThrottle - || command == kListPoolsets); + return (command == kGetCmd || command == kListCmd || + command == kSegInfoCmd || command == kDeleteCmd || + command == kCreateCmd || command == kExtendCmd || + command == kCleanRecycleCmd || command == kChunkLocatitonCmd || + command == kUpdateThrottle || command == kListPoolsets); } -// 根据命令行参数选择对应的操作 -int NameSpaceTool::RunCommand(const std::string &cmd) { +// Select the corresponding operation based on command line parameters +int NameSpaceTool::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init NameSpaceTool failed" << std::endl; return -1; @@ -92,12 +92,12 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } else if (cmd == kSegInfoCmd) { return PrintSegmentInfo(fileName); } else if (cmd == kDeleteCmd) { - // 单元测试不判断输入 + // Unit testing does not judge input if (FLAGS_isTest) { return core_->DeleteFile(fileName, FLAGS_forcedelete); } - std::cout << "Are you sure you want to delete " - << fileName << "?" << "(yes/no)" << std::endl; + std::cout << "Are you sure you want to delete " << fileName << "?" + << "(yes/no)" << std::endl; std::string str; std::cin >> str; if (str == "yes") { @@ -163,29 +163,71 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } } -void NameSpaceTool::PrintHelp(const std::string &cmd) { +void NameSpaceTool::PrintHelp(const std::string& cmd) { std::cout << "Example: " << std::endl; if (cmd == kGetCmd || cmd == kListCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT - " [-showAllocSize=false] [-showFileSize=false] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT + " [-showAllocSize=false] [-showFileSize=false] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kSegInfoCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kCleanRecycleCmd) { - std::cout << "curve_ops_tool " << cmd << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "If -fileName is specified, delete the files in recyclebin that the original directory is fileName" << std::endl; // NOLINT - std::cout << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "If -fileName is specified, delete the files in " + "recyclebin that the original directory is fileName" + << std::endl; // NOLINT + std::cout + << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" + << std::endl; // NOLINT } else if (cmd == kCreateCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -fileLength=20 [--poolset=default] [-stripeUnit=32768] [-stripeCount=32] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "curve_ops_tool " << cmd << " -dirName=/dir -userName=test -password=123 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "The first example can create a volume and the second create a directory." << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -fileLength=20 " + "[--poolset=default] [-stripeUnit=32768] [-stripeCount=32] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -dirName=/dir -userName=test -password=123 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "The first example can create a volume and the second " + "create a directory." + << std::endl; // NOLINT } else if (cmd == kExtendCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -newSize=30 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -newSize=30 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kDeleteCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -forcedelete=true [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 " + "-forcedelete=true [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kChunkLocatitonCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kUpdateThrottle) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test " + "-throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_" + "READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" + << std::endl; // NOLINT } else { std::cout << "command not found!" << std::endl; } @@ -204,7 +246,8 @@ int NameSpaceTool::PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo) { PrintFileInfo(fileInfo); int ret = GetAndPrintAllocSize(fullName); - // 如果是目录的话,计算目录中的文件大小(用户创建时指定的) + // If it is a directory, calculate the file size in the directory (specified + // by the user when creating it) if (fileInfo.filetype() == curve::mds::FileType::INODE_DIRECTORY) { ret = GetAndPrintFileSize(fullName); } @@ -255,14 +298,14 @@ void NameSpaceTool::PrintFileInfo(const FileInfo& fileInfo) { curve::common::SplitString(fileInfoStr, "\n", &items); for (const auto& item : items) { if (item.compare(0, 5, "ctime") == 0) { - // ctime是微妙,打印的时候只打印到秒 + // CTIME is subtle, printing only takes seconds time_t ctime = fileInfo.ctime() / 1000000; std::string standard; curve::common::TimeUtility::TimeStampToStandard(ctime, &standard); std::cout << "ctime: " << standard << std::endl; continue; } - // 把length转换成GB + // Convert length to GB if (item.compare(0, 6, "length") == 0) { uint64_t length = fileInfo.length(); double fileSize = static_cast(length) / curve::mds::kGB; @@ -315,15 +358,15 @@ int NameSpaceTool::PrintPoolsets() { for (const auto& poolset : poolsets) { const std::string str = absl::StrFormat( - "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), - poolset.poolsetname(), poolset.type(), poolset.desc()); + "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), + poolset.poolsetname(), poolset.type(), poolset.desc()); std::cout << str << std::endl; } return 0; } -int NameSpaceTool::PrintSegmentInfo(const std::string &fileName) { +int NameSpaceTool::PrintSegmentInfo(const std::string& fileName) { std::vector segments; if (core_->GetFileSegments(fileName, &segments) != 0) { std::cout << "GetFileSegments fail!" << std::endl; @@ -358,14 +401,13 @@ void NameSpaceTool::PrintSegment(const PageFileSegment& segment) { if (segment.chunks(i).has_copysetid()) { copysetId = segment.chunks(i).copysetid(); } - std::cout << "chunkID: " << chunkId << ", copysetID: " - << copysetId << std::endl; + std::cout << "chunkID: " << chunkId << ", copysetID: " << copysetId + << std::endl; } } - int NameSpaceTool::PrintChunkLocation(const std::string& fileName, - uint64_t offset) { + uint64_t offset) { uint64_t chunkId; std::pair copyset; if (core_->QueryChunkCopyset(fileName, offset, &chunkId, ©set) != 0) { @@ -375,13 +417,12 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, uint32_t logicPoolId = copyset.first; uint32_t copysetId = copyset.second; uint64_t groupId = (static_cast(logicPoolId) << 32) | copysetId; - std::cout << "chunkId: " << chunkId - << ", logicalPoolId: " << logicPoolId - << ", copysetId: " << copysetId - << ", groupId: " << groupId << std::endl; + std::cout << "chunkId: " << chunkId << ", logicalPoolId: " << logicPoolId + << ", copysetId: " << copysetId << ", groupId: " << groupId + << std::endl; std::vector csLocs; - int res = core_->GetChunkServerListInCopySet(logicPoolId, - copysetId, &csLocs); + int res = + core_->GetChunkServerListInCopySet(logicPoolId, copysetId, &csLocs); if (res != 0) { std::cout << "GetChunkServerListInCopySet fail!" << std::endl; return -1; @@ -400,7 +441,7 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, } void NameSpaceTool::TrimEndingSlash(std::string* fileName) { - // 如果最后面有/,去掉 + // If there is/at the end, remove it if (fileName->size() > 1 && fileName->back() == '/') { fileName->pop_back(); } diff --git a/src/tools/namespace_tool.h b/src/tools/namespace_tool.h index 1af7f8ca8f..3594afafa6 100644 --- a/src/tools/namespace_tool.h +++ b/src/tools/namespace_tool.h @@ -26,22 +26,22 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" using curve::mds::FileInfo; using curve::mds::PageFileSegment; @@ -52,71 +52,72 @@ namespace tool { class NameSpaceTool : public CurveTool { public: - explicit NameSpaceTool(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit NameSpaceTool(std::shared_ptr core) + : core_(core), inited_(false) {} /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - // 初始化 + // Initialize int Init(); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fileName); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo); - // 打印目录中的文件信息 + // Print file information in the directory int PrintListDir(const std::string& dirName); - // 打印出文件的segment信息 - int PrintSegmentInfo(const std::string &fileName); + // Print out the segment information of the file + int PrintSegmentInfo(const std::string& fileName); - // 打印fileInfo,把时间转化为易读的格式输出 + // Print fileInfo and convert the time into a readable format for output void PrintFileInfo(const FileInfo& fileInfo); - // 打印PageFileSegment,把同一个chunk的信息打在同一行 + // Print PageFileSegment and type information for the same chunk on the same + // line void PrintSegment(const PageFileSegment& segment); - // 打印chunk的位置信息 - int PrintChunkLocation(const std::string& fileName, - uint64_t offset); + // Print the location information of the chunk + int PrintChunkLocation(const std::string& fileName, uint64_t offset); - // 打印文件的分配大小 + // Allocation size of printed files int GetAndPrintAllocSize(const std::string& fileName); - // 打印目录的file size + // Print the file size of the directory int GetAndPrintFileSize(const std::string& fileName); - // 目前curve mds不支持/test/格式的文件名,需要把末尾的/去掉 + // Currently, curve mds does not support file names in the/test/format, so + // the/at the end needs to be removed void TrimEndingSlash(std::string* fileName); int PrintPoolsets(); private: - // 核心逻辑 + // Core logic std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index b69a6ecacc..4c1f8ff1a4 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -27,8 +27,8 @@ DEFINE_string(password, "", "password of administrator"); namespace curve { namespace tool { -NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) : - client_(client) { +NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) + : client_(client) { client_->SetUserName(FLAGS_userName); client_->SetPassword(FLAGS_password); } @@ -37,7 +37,7 @@ int NameSpaceToolCore::Init(const std::string& mdsAddr) { return client_->Init(mdsAddr); } -int NameSpaceToolCore::GetFileInfo(const std::string &fileName, +int NameSpaceToolCore::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { return client_->GetFileInfo(fileName, fileInfo); } @@ -48,11 +48,10 @@ int NameSpaceToolCore::ListDir(const std::string& dirName, } int NameSpaceToolCore::GetChunkServerListInCopySet( - const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { - return client_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, csLocs); + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { + return client_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + csLocs); } int NameSpaceToolCore::DeleteFile(const std::string& fileName, @@ -65,7 +64,7 @@ int NameSpaceToolCore::CreateFile(const CreateFileContext& ctx) { } int NameSpaceToolCore::ExtendVolume(const std::string& fileName, - uint64_t newSize) { + uint64_t newSize) { return client_->ExtendVolume(fileName, newSize); } int NameSpaceToolCore::GetAllocatedSize(const std::string& fileName, @@ -85,7 +84,7 @@ int NameSpaceToolCore::GetFileSize(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - std::vector* segments) { + std::vector* segments) { FileInfo fileInfo; int res = GetFileInfo(fileName, &fileInfo); if (res != 0) { @@ -96,28 +95,30 @@ int NameSpaceToolCore::GetFileSegments(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, - std::vector* segments) { - // 只能获取page file的segment + const FileInfo& fileInfo, + std::vector* segments) { + // Only segments of page files can be obtained if (fileInfo.filetype() != curve::mds::FileType::INODE_PAGEFILE) { std::cout << "It is not a page file!" << std::endl; return -1; } - // 获取文件的segment数,并打印每个segment的详细信息 + // Obtain the number of segments in the file and print detailed information + // for each segment uint64_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint64_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; - GetSegmentRes res = client_->GetSegmentInfo(fileName, - i * segmentSize, &segment); + GetSegmentRes res = + client_->GetSegmentInfo(fileName, i * segmentSize, &segment); if (res == GetSegmentRes::kOK) { segments->emplace_back(segment); } else if (res == GetSegmentRes::kSegmentNotAllocated) { continue; } else if (res == GetSegmentRes::kFileNotExists) { - // 查询过程中文件被删掉了,清空segment并返回0 + // uring the query process, the file was deleted, the segment was + // cleared, and 0 was returned segments->clear(); return 0; } else { @@ -137,8 +138,7 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, return -1; } - auto needDelete = [](const FileInfo &fileInfo, - uint64_t now, + auto needDelete = [](const FileInfo& fileInfo, uint64_t now, uint64_t expireTime) -> bool { auto filename = fileInfo.filename(); std::vector items; @@ -147,9 +147,9 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, uint64_t dtime; auto n = items.size(); auto id = std::to_string(fileInfo.id()); - if (n >= 2 && items[n - 2] == id - && ::curve::common::StringToUll(items[n - 1], &dtime) - && now - dtime < expireTime) { + if (n >= 2 && items[n - 2] == id && + ::curve::common::StringToUll(items[n - 1], &dtime) && + now - dtime < expireTime) { return false; } @@ -210,10 +210,9 @@ int NameSpaceToolCore::UpdateFileThrottle(const std::string& fileName, return client_->UpdateFileThrottleParams(fileName, params); } -int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, - uint64_t offset, - uint64_t* chunkId, - std::pair* copyset) { +int NameSpaceToolCore::QueryChunkCopyset( + const std::string& fileName, uint64_t offset, uint64_t* chunkId, + std::pair* copyset) { if (!chunkId || !copyset) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -229,11 +228,11 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t segmentSize = fileInfo.segmentsize(); - // segment对齐的offset + // segment aligned offset uint64_t segOffset = (offset / segmentSize) * segmentSize; PageFileSegment segment; - GetSegmentRes segRes = client_->GetSegmentInfo(fileName, - segOffset, &segment); + GetSegmentRes segRes = + client_->GetSegmentInfo(fileName, segOffset, &segment); if (segRes != GetSegmentRes::kOK) { if (segRes == GetSegmentRes::kSegmentNotAllocated) { std::cout << "Chunk has not been allocated!" << std::endl; @@ -243,7 +242,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } } - // 在segment里面的chunk的索引 + // Index of chunk in segment if (segment.chunksize() == 0) { std::cout << "No chunks in segment!" << std::endl; return -1; diff --git a/src/tools/namespace_tool_core.h b/src/tools/namespace_tool_core.h index febf0882f8..60e702e3f7 100644 --- a/src/tools/namespace_tool_core.h +++ b/src/tools/namespace_tool_core.h @@ -26,28 +26,28 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" -#include "src/common/string_util.h" #include "src/common/fs_util.h" +#include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" #include "src/tools/mds_client.h" +using curve::common::ChunkServerLocation; using curve::mds::FileInfo; +using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::StatusCode; -using curve::mds::PageFileChunkInfo; using curve::mds::topology::kTopoErrCodeSuccess; -using curve::common::ChunkServerLocation; namespace curve { namespace tool { @@ -60,107 +60,116 @@ class NameSpaceToolCore { virtual ~NameSpaceToolCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetId copyset ID + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); + virtual int GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& ctx); - /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的文件长度 - * @return 成功返回0,失败返回-1 + /** + * @brief expansion volume + * @param fileName File name + * @param newSize The file length after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 计算文件或目录实际分配的空间 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录已分配大小,返回值为0是有效 - * @param[out] allocMap 在每个池子的分配量,返回值0时有效 - * @return 成功返回0,失败返回-1 + * @brief Calculate the actual allocated space of a file or directory + * @param fileName File name + * @param[out] allocSize The file or directory has already been allocated a + * size, and a return value of 0 is valid + * @param[out] allocMap The allocation amount of each pool, valid when + * returning a value of 0 + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 返回文件或目录的中的文件的用户申请的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录中用户申请的大小,返回值为0是有效 - * @return 成功返回0,失败返回-1 + * @brief Returns the user requested size of files in a file or directory + * @param fileName File name + * @param[out] fileSize The size requested by the user in the file or + * directory, with a return value of 0 being valid + * @return returns 0 for success, -1 for failure */ virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ virtual int GetFileSegments(const std::string& fileName, - std::vector* segments); + std::vector* segments); /** - * @brief 查询offset对应的chunk的id和所属的copyset - * @param fileName 文件名 - * @param offset 文件中的偏移 - * @param[out] chunkId chunkId,返回值为0时有效 - * @param[out] copyset chunk对应的copyset,是logicalPoolId和copysetId的pair - * @return 成功返回0,失败返回-1 + * @brief: Query the ID of the chunk corresponding to the offset and the + * copyset it belongs to + * @param fileName File name + * @param offset Offset in file + * @param[out] chunkId chunkId, valid when the return value is 0 + * @param[out] copyset The copyset corresponding to the chunk is the pair of + * logicalPoolId and copysetId + * @return returns 0 for success, -1 for failure */ virtual int QueryChunkCopyset(const std::string& fileName, uint64_t offset, - uint64_t* chunkId, - std::pair* copyset); + uint64_t* chunkId, + std::pair* copyset); /** * @brief clean recycle bin @@ -174,25 +183,24 @@ class NameSpaceToolCore { virtual int UpdateFileThrottle(const std::string& fileName, const std::string& throttleType, - const uint64_t limit, - const int64_t burst, + const uint64_t limit, const int64_t burst, const int64_t burstLength); virtual int ListPoolset(std::vector* poolsets); private: /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param fileInfo 文件的fileInfo - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param fileInfo The fileInfo of the file + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ - int GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, + int GetFileSegments(const std::string& fileName, const FileInfo& fileInfo, std::vector* segments); - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr client_; }; } // namespace tool diff --git a/src/tools/raft_log_tool.cpp b/src/tools/raft_log_tool.cpp index a4fb97e142..cbe40eb2b5 100644 --- a/src/tools/raft_log_tool.cpp +++ b/src/tools/raft_log_tool.cpp @@ -35,33 +35,31 @@ enum class CheckSumType { CHECKSUM_CRC32 = 1, }; -inline bool VerifyCheckSum(int type, - const char* data, size_t len, uint32_t value) { +inline bool VerifyCheckSum(int type, const char* data, size_t len, + uint32_t value) { CheckSumType checkSunType = static_cast(type); switch (checkSunType) { - case CheckSumType::CHECKSUM_MURMURHASH32: - return (value == braft::murmurhash32(data, len)); - case CheckSumType::CHECKSUM_CRC32: - return (value == braft::crc32(data, len)); - default: - std::cout << "Unknown checksum_type=" << type <Fstat(fd_, &stBuf) != 0) { - std::cout << "Fail to get the stat of " << fileName - << ", " << berror() << std::endl; + std::cout << "Fail to get the stat of " << fileName << ", " << berror() + << std::endl; localFS_->Close(fd_); return -1; } @@ -135,9 +133,7 @@ int SegmentParser::Init(const std::string& fileName) { return 0; } -void SegmentParser::UnInit() { - localFS_->Close(fd_); -} +void SegmentParser::UnInit() { localFS_->Close(fd_); } bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { if (off_ >= fileLen_) { @@ -147,12 +143,11 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { const ssize_t n = localFS_->Read(fd_, buf, off_, ENTRY_HEADER_SIZE); if (n != (ssize_t)ENTRY_HEADER_SIZE) { if (n < 0) { - std::cout << "read header from file, fd: " << fd_ << ", offset: " - << off_ << ", " << berror() << std::endl; + std::cout << "read header from file, fd: " << fd_ + << ", offset: " << off_ << ", " << berror() << std::endl; } else { std::cout << "Read size not match, header size: " - << ENTRY_HEADER_SIZE << ", read size: " - << n << std::endl; + << ENTRY_HEADER_SIZE << ", read size: " << n << std::endl; } return false; } @@ -162,19 +157,20 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { uint32_t data_len = 0; uint32_t data_checksum = 0; uint32_t header_checksum = 0; - butil::RawUnpacker(buf).unpack64((uint64_t&)term) - .unpack32(meta_field) - .unpack32(data_len) - .unpack32(data_checksum) - .unpack32(header_checksum); + butil::RawUnpacker(buf) + .unpack64((uint64_t&)term) + .unpack32(meta_field) + .unpack32(data_len) + .unpack32(data_checksum) + .unpack32(header_checksum); EntryHeader tmp; tmp.term = term; tmp.type = meta_field >> 24; tmp.checksum_type = (meta_field << 8) >> 24; tmp.data_len = data_len; tmp.data_checksum = data_checksum; - if (!VerifyCheckSum(tmp.checksum_type, - buf, ENTRY_HEADER_SIZE - 4, header_checksum)) { + if (!VerifyCheckSum(tmp.checksum_type, buf, ENTRY_HEADER_SIZE - 4, + header_checksum)) { std::cout << "Found corrupted header at offset=" << off_ << ", header=" << tmp; return false; @@ -189,30 +185,28 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { int RaftLogTool::ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex) { int match = 0; - int64_t lastIndex = 0; + int64_t lastIndex = 0; std::string name; - auto pos = fileName.find_last_of("/"); + auto pos = fileName.find_last_of("/"); if (pos == std::string::npos) { name = fileName; } else { name = fileName.substr(pos + 1); } - match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, - firstIndex, &lastIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, firstIndex, + &lastIndex); if (match == 2) { std::cout << "it is a closed segment, path: " << fileName << " first index: " << *firstIndex << " last index: " << lastIndex << std::endl; } else { - match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, - firstIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, firstIndex); if (match == 1) { - std::cout << "it is a opening segment, path: " - << fileName + std::cout << "it is a opening segment, path: " << fileName << " first index: " << *firstIndex << std::endl; } else { - std::cout << "filename = " << fileName << - " is not a raft segment pattern!" << std::endl; + std::cout << "filename = " << fileName + << " is not a raft segment pattern!" << std::endl; return -1; } } diff --git a/src/tools/raft_log_tool.h b/src/tools/raft_log_tool.h index d056608bb9..d445b9a280 100644 --- a/src/tools/raft_log_tool.h +++ b/src/tools/raft_log_tool.h @@ -23,14 +23,16 @@ #ifndef SRC_TOOLS_RAFT_LOG_TOOL_H_ #define SRC_TOOLS_RAFT_LOG_TOOL_H_ -#include #include #include #include +#include + #include #include #include #include + #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" @@ -49,48 +51,46 @@ struct EntryHeader { uint32_t data_len; uint32_t data_checksum; - bool operator== (const EntryHeader& rhs) const; + bool operator==(const EntryHeader& rhs) const; }; std::ostream& operator<<(std::ostream& os, const EntryHeader& h); class SegmentParser { public: - explicit SegmentParser(std::shared_ptr localFS) : - localFS_(localFS) {} + explicit SegmentParser(std::shared_ptr localFS) + : localFS_(localFS) {} /** - * @brief 初始化 - * @param fileName segmnet文件的文件名 - * @return 获取成功返回0,失败返回-1 + * @brief initialization + * @param fileName The file name of the segmnet file + * @return returns 0 if successful, -1 if unsuccessful */ virtual int Init(const std::string& fileName); /** - * @brief 反初始化 + * @brief deinitialization */ virtual void UnInit(); /** - * @brief 获取下一个EntryHeader - * @param[out] header log entry header - * @return 获取成功返回true,失败返回false + * @brief Get the next EntryHeader + * @param[out] header log entry header + * @return returns true for success, false for failure */ virtual bool GetNextEntryHeader(EntryHeader* header); /** - * @brief 判断读取是否成功完成 + * @brief Determine if the read was successfully completed */ - virtual bool SuccessfullyFinished() { - return off_ >= fileLen_; - } + virtual bool SuccessfullyFinished() { return off_ >= fileLen_; } private: - // 文件描述符 + // File Descriptor int fd_; - // 下一个Entry的偏移 + // Offset for the next Entry int64_t off_; - // 文件长度 + // File length int64_t fileLen_; std::shared_ptr localFS_; @@ -98,50 +98,52 @@ class SegmentParser { class RaftLogTool : public CurveTool { public: - explicit RaftLogTool(std::shared_ptr parser) : - parser_(parser) {} + explicit RaftLogTool(std::shared_ptr parser) + : parser_(parser) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印文件中所有raft log的头部信息 - * @param fileName raft log文件名 - * @return 成功返回0,否则返回-1 + * @brief Print the header information of all raft logs in the file + * @param fileName raft log file name + * @return successfully returns 0, otherwise returns -1 */ int PrintHeaders(const std::string& fileName); /** - * @brief 从文件解析出entry header - * @param fd 文件描述符 - * @param offset 文件中的偏移 - * @param[out] head entry头部信息,返回值为0时有效 - * @return 成功返回0,否则返回-1 + * @brief Parse the entry header from the file + * @param fd file descriptor + * @param offset Offset in file + * @param[out] head entry header information, valid when the return value is + * 0 + * @return successfully returns 0, otherwise returns -1 */ - int ParseEntryHeader(int fd, off_t offset, EntryHeader *head); + int ParseEntryHeader(int fd, off_t offset, EntryHeader* head); /** - * @brief 从文件名解析first index - * @param fileName raft log文件名 - * @param[out] firstIndex segment文件包含的log entry的第一个index - * @return 成功返回0,否则返回-1 + * @brief Parsing first index from file name + * @param fileName raft log file name + * @param[out] firstIndex The first index of the log entry contained in the + * segment file + * @return successfully returns 0, otherwise returns -1 */ int ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex); diff --git a/src/tools/schedule_tool.cpp b/src/tools/schedule_tool.cpp index 25cd976382..2370bdd6ca 100644 --- a/src/tools/schedule_tool.cpp +++ b/src/tools/schedule_tool.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/tools/schedule_tool.h" + #include + #include -#include "src/tools/schedule_tool.h" + #include "src/tools/curve_tool_define.h" DEFINE_uint32(logical_pool_id, 1, "logical pool"); DECLARE_string(mdsAddr); DEFINE_bool(scheduleAll, true, "schedule all logical pool or not"); -DEFINE_bool(scanEnable, true, "Enable(true)/Disable(false) scan " - "for specify logical pool"); +DEFINE_bool(scanEnable, true, + "Enable(true)/Disable(false) scan " + "for specify logical pool"); namespace curve { namespace tool { bool ScheduleTool::SupportCommand(const std::string& command) { - return command == kRapidLeaderSchedule || - command == kSetScanState; + return command == kRapidLeaderSchedule || command == kSetScanState; } void ScheduleTool::PrintHelp(const std::string& cmd) { @@ -50,31 +53,28 @@ void ScheduleTool::PrintHelp(const std::string& cmd) { } void ScheduleTool::PrintRapidLeaderScheduleHelp() { - std::cout << "Example :" << std::endl + std::cout + << "Example :" << std::endl << "curve_ops_tool " << kRapidLeaderSchedule << " -logical_pool_id=1 -scheduleAll=false [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool " << kRapidLeaderSchedule - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } void ScheduleTool::PrintSetScanStateHelp() { - std::cout - << "Example:" << std::endl - << " curve_ops_tool " << kSetScanState - << " -logical_pool_id=1 -scanEnable=true/false" - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + std::cout << "Example:" << std::endl + << " curve_ops_tool " << kSetScanState + << " -logical_pool_id=1 -scanEnable=true/false" + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } -int ScheduleTool::RunCommand(const std::string &cmd) { +int ScheduleTool::RunCommand(const std::string& cmd) { if (kRapidLeaderSchedule == cmd) { return DoRapidLeaderSchedule(); - } else if (cmd == kSetScanState) { + } else if (cmd == kSetScanState) { return DoSetScanState(); } std::cout << "Command not supported!" << std::endl; @@ -90,14 +90,14 @@ int ScheduleTool::DoSetScanState() { auto lpid = FLAGS_logical_pool_id; auto scanEnable = FLAGS_scanEnable; auto retCode = mdsClient_->SetLogicalPoolScanState(lpid, scanEnable); - std::cout << (scanEnable ? "Enable" : "Disable") - << " scan for logicalpool(" << lpid << ")" - << (retCode == 0 ? " success" : " fail") << std::endl; + std::cout << (scanEnable ? "Enable" : "Disable") << " scan for logicalpool(" + << lpid << ")" << (retCode == 0 ? " success" : " fail") + << std::endl; return retCode; } int ScheduleTool::DoRapidLeaderSchedule() { - if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { + if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { std::cout << "Init mds client fail!" << std::endl; return -1; } @@ -109,11 +109,11 @@ int ScheduleTool::DoRapidLeaderSchedule() { } int ScheduleTool::ScheduleOne(PoolIdType lpoolId) { - // 给mds发送rpc + // Send rpc to mds int res = mdsClient_->RapidLeaderSchedule(lpoolId); if (res != 0) { - std::cout << "RapidLeaderSchedule pool " << lpoolId - << " fail" << std::endl; + std::cout << "RapidLeaderSchedule pool " << lpoolId << " fail" + << std::endl; return -1; } return 0; diff --git a/src/tools/schedule_tool.h b/src/tools/schedule_tool.h index edc9bf44dc..094475bafc 100644 --- a/src/tools/schedule_tool.h +++ b/src/tools/schedule_tool.h @@ -25,8 +25,9 @@ #include #include -#include "src/tools/mds_client.h" + #include "src/tools/curve_tool.h" +#include "src/tools/mds_client.h" namespace curve { namespace tool { @@ -39,36 +40,37 @@ class ScheduleTool : public CurveTool { : mdsClient_(mdsClient) {} /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; private: /** - * @brief PrintRapidLeaderSchedule 打印rapid-leader-schdule的help信息 + * @brief PrintRapidLeaderSchedule Print help information for + * rapid-leader-schdule */ void PrintRapidLeaderScheduleHelp(); void PrintSetScanStateHelp(); /** - * @brief DoRapidLeaderSchedule 向mds发送rpc进行快速transfer leader + * @brief DoRapidLeaderSchedule sends rpc to mds for fast transfer leader */ int DoRapidLeaderSchedule(); diff --git a/src/tools/snapshot_check.h b/src/tools/snapshot_check.h index 87bf512758..0750cf5b50 100644 --- a/src/tools/snapshot_check.h +++ b/src/tools/snapshot_check.h @@ -25,60 +25,60 @@ #include #include + +#include #include #include #include -#include -#include "src/client/libcurve_file.h" #include "src/client/client_common.h" +#include "src/client/libcurve_file.h" #include "src/common/configuration.h" -#include "src/common/s3_adapter.h" #include "src/common/crc32.h" -#include "src/tools/snapshot_read.h" +#include "src/common/s3_adapter.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/snapshot_read.h" namespace curve { namespace tool { class SnapshotCheck : public CurveTool { public: SnapshotCheck(std::shared_ptr client, - std::shared_ptr snapshot) : - client_(client), snapshot_(snapshot), inited_(false) {} + std::shared_ptr snapshot) + : client_(client), snapshot_(snapshot), inited_(false) {} ~SnapshotCheck(); - /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 比较文件和快照的一致性 - * @return 成功返回0,失败返回-1 + * @brief Compare file and snapshot consistency + * @return returns 0 for success, -1 for failure */ int Check(); private: /** - * 初始化 + * Initialize */ int Init(); diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 2b8be3c739..847027aab3 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -48,7 +48,7 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < serverAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -57,7 +57,8 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != serverAddrVec_.size()) { std::cout << "snapshot clone server dummy port list must be correspond" - " as snapshot clone addr list" << std::endl; + " as snapshot clone addr list" + << std::endl; return -1; } @@ -76,23 +77,23 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::vector SnapshotCloneClient::GetActiveAddrs() { std::vector activeAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_->GetMetric(item.second, - kSnapshotCloneStatusMetricName, &status); + MetricRet ret = metricClient_->GetMetric( + item.second, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kSnapshotCloneStatusActive) { - // 如果是active状态,再访问一下服务端口 - MetricRet ret = metricClient_->GetMetric(item.first, - kSnapshotCloneStatusMetricName, &status); + // If it is in an active state, please visit the service port again + MetricRet ret = metricClient_->GetMetric( + item.first, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.first - << " fail" << std::endl; + std::cout << "Get status metric from " << item.first << " fail" + << std::endl; continue; } activeAddrs.emplace_back(item.first); @@ -102,12 +103,13 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { } void SnapshotCloneClient::GetOnlineStatus( - std::map* onlineStatus) { + std::map* onlineStatus) { onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -117,10 +119,9 @@ void SnapshotCloneClient::GetOnlineStatus( } int SnapshotCloneClient::GetListenAddrFromDummyPort( - const std::string& dummyAddr, - std::string* listenAddr) { - MetricRet res = metricClient_->GetConfValueFromMetric(dummyAddr, - kSnapshotCloneConfMetricName, listenAddr); + const std::string& dummyAddr, std::string* listenAddr) { + MetricRet res = metricClient_->GetConfValueFromMetric( + dummyAddr, kSnapshotCloneConfMetricName, listenAddr); if (res != MetricRet::kOK) { return -1; } diff --git a/src/tools/snapshot_clone_client.h b/src/tools/snapshot_clone_client.h index 295134bd50..711952686a 100644 --- a/src/tools/snapshot_clone_client.h +++ b/src/tools/snapshot_clone_client.h @@ -23,10 +23,10 @@ #ifndef SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ #define SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ -#include -#include #include +#include #include +#include #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" @@ -36,66 +36,69 @@ namespace tool { class SnapshotCloneClient { public: - explicit SnapshotCloneClient(std::shared_ptr metricClient) : - metricClient_(metricClient) {} + explicit SnapshotCloneClient(std::shared_ptr metricClient) + : metricClient_(metricClient) {} virtual ~SnapshotCloneClient() = default; /** - * @brief 初始化,从字符串解析出地址和dummy port - * @param serverAddr snapshot clone server的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有server用同样的dummy port,用字符串分隔有多个的话 - * 为每个server设置不同的dummy port - * @return - * success: 0 - * failed: -1 - * no snapshot server: 1 + * @brief initialization, parsing the address and dummy port from the string + * @param serverAddr Address of snapshot clone server, supporting multiple + * addresses separated by ',' + * @param dummyPort dummyPort list, if only one is entered + * All servers use the same dummy port, separated by strings if there + * are multiple Set different dummy ports for each server + * @return + * Success: 0 + * Failed: -1 + * No snapshot server: 1 * */ virtual int Init(const std::string& serverAddr, const std::string& dummyPort); /** - * @brief 获取当前服务的snapshot clone server的地址 + * @brief Get the address of the snapshot clone server for the current + * service */ virtual std::vector GetActiveAddrs(); /** - * @brief 获取snapshot clone server的在线状态 - * dummyserver在线且dummyserver记录的listen addr - * 与服务地址一致才认为在线 - * @param[out] onlineStatus 每个节点的在线状态 + * @brief Get the online status of the snapshot clone server + * dummyserver is online and the dummyserver records a listen addr + * Only when consistent with the service address is considered + * online + * @param[out] onlineStatus The online status of each node */ virtual void GetOnlineStatus(std::map* onlineStatus); virtual const std::map& GetDummyServerMap() - const { + const { return dummyServerMap_; } private: /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取server的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr 服务地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of the server through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr service address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); private: - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 保存server地址的vector + // Save the vector of the server address std::vector serverAddrVec_; - // 保存server地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the server address std::map dummyServerMap_; }; diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 63cef36492..aa6a241c8c 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -20,17 +20,22 @@ * Author: charisu */ #include "src/tools/status_tool.h" + #include DEFINE_bool(offline, false, "if true, only list offline chunskervers"); -DEFINE_bool(unhealthy, false, "if true, only list chunkserver that unhealthy " - "ratio greater than 0"); -DEFINE_bool(checkHealth, true, "if true, it will check the health " - "state of chunkserver in chunkserver-list"); -DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " - "chunkservers with rpc in chunkserver-list"); -DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" - " include that in repo"); +DEFINE_bool(unhealthy, false, + "if true, only list chunkserver that unhealthy " + "ratio greater than 0"); +DEFINE_bool(checkHealth, true, + "if true, it will check the health " + "state of chunkserver in chunkserver-list"); +DEFINE_bool(checkCSAlive, false, + "if true, it will check the online state of " + "chunkservers with rpc in chunkserver-list"); +DEFINE_bool(listClientInRepo, true, + "if true, list-client will list all clients" + " include that in repo"); DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); @@ -42,8 +47,7 @@ const char* kProtocalCurve = "curve"; namespace curve { namespace tool { -std::ostream& operator<<(std::ostream& os, - std::vector strs) { +std::ostream& operator<<(std::ostream& os, std::vector strs) { for (uint32_t i = 0; i < strs.size(); ++i) { if (i != 0) { os << ", "; @@ -54,11 +58,10 @@ std::ostream& operator<<(std::ostream& os, } std::string ToString(ServiceName name) { - static std::map serviceNameMap = - {{ServiceName::kMds, "mds"}, - {ServiceName::kEtcd, "etcd"}, - {ServiceName::kSnapshotCloneServer, - "snapshot-clone-server"}}; + static std::map serviceNameMap = { + {ServiceName::kMds, "mds"}, + {ServiceName::kEtcd, "etcd"}, + {ServiceName::kSnapshotCloneServer, "snapshot-clone-server"}}; return serviceNameMap[name]; } @@ -83,7 +86,7 @@ int StatusTool::Init(const std::string& command) { } if (CommandNeedSnapshotClone(command)) { int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, - FLAGS_snapshotCloneDummyPort); + FLAGS_snapshotCloneDummyPort); switch (snapshotRet) { case 0: // success @@ -114,18 +117,13 @@ bool StatusTool::CommandNeedSnapshotClone(const std::string& command) { } bool StatusTool::SupportCommand(const std::string& command) { - return (command == kSpaceCmd || command == kStatusCmd - || command == kChunkserverListCmd - || command == kChunkserverStatusCmd - || command == kMdsStatusCmd - || command == kEtcdStatusCmd - || command == kClientStatusCmd - || command == kClientListCmd - || command == kSnapshotCloneStatusCmd - || command == kClusterStatusCmd - || command == kServerListCmd - || command == kLogicalPoolList - || command == kScanStatusCmd); + return (command == kSpaceCmd || command == kStatusCmd || + command == kChunkserverListCmd || + command == kChunkserverStatusCmd || command == kMdsStatusCmd || + command == kEtcdStatusCmd || command == kClientStatusCmd || + command == kClientListCmd || command == kSnapshotCloneStatusCmd || + command == kClusterStatusCmd || command == kServerListCmd || + command == kLogicalPoolList || command == kScanStatusCmd); } void StatusTool::PrintHelp(const std::string& cmd) { @@ -170,7 +168,7 @@ int StatusTool::SpaceCmd() { double physicalUsedRatio = 0; if (spaceInfo.totalChunkSize != 0) { physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / - spaceInfo.totalChunkSize; + spaceInfo.totalChunkSize; } double logicalUsedRatio = 0; @@ -179,28 +177,28 @@ int StatusTool::SpaceCmd() { double createdFileRatio = 0; if (spaceInfo.totalCapacity != 0) { logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - logicalLeftRatio = static_cast( - spaceInfo.totalCapacity - spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; + logicalLeftRatio = static_cast(spaceInfo.totalCapacity - + spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; createdFileRatio = static_cast(spaceInfo.currentFileSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; } if (spaceInfo.allocatedSize != 0) { canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / - spaceInfo.allocatedSize; + spaceInfo.allocatedSize; } - std:: cout.setf(std::ios::fixed); + std::cout.setf(std::ios::fixed); std::cout << std::setprecision(2); std::cout << "Space info:" << std::endl; - std::cout << "physical: total = " - << spaceInfo.totalChunkSize / mds::kGB << "GB" - << ", used = " << spaceInfo.usedChunkSize / mds::kGB - << "GB(" << physicalUsedRatio * 100 << "%), left = " + std::cout << "physical: total = " << spaceInfo.totalChunkSize / mds::kGB + << "GB" + << ", used = " << spaceInfo.usedChunkSize / mds::kGB << "GB(" + << physicalUsedRatio * 100 << "%), left = " << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; - std::cout << "logical: total = " - << spaceInfo.totalCapacity / mds::kGB << "GB" + std::cout << "logical: total = " << spaceInfo.totalCapacity / mds::kGB + << "GB" << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" << "(" << logicalUsedRatio * 100 << "%, can be recycled = " << spaceInfo.recycleAllocSize / mds::kGB << "GB(" @@ -209,18 +207,19 @@ int StatusTool::SpaceCmd() { << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB << "GB(" << logicalLeftRatio * 100 << "%)" << ", created file size = " - << spaceInfo.currentFileSize / mds::kGB - << "GB(" << createdFileRatio * 100 << "%)" << std::endl; + << spaceInfo.currentFileSize / mds::kGB << "GB(" + << createdFileRatio * 100 << "%)" << std::endl; std::cout << "Every Logicalpool Space info:" << std::endl; - for (const auto &i : spaceInfo.lpoolspaceinfo) { - std::cout << "logicalPool: name = "<< i.second.poolName - << ", poolid = " << i.first - << ", total = "<< i.second.totalCapacity / mds::kGB << "GB" - << ", used = " << i.second.allocatedSize / mds::kGB << "GB" - << ", left = " << (i.second.totalCapacity - - i.second.allocatedSize) / mds::kGB - << "GB"<< std::endl; + for (const auto& i : spaceInfo.lpoolspaceinfo) { + std::cout << "logicalPool: name = " << i.second.poolName + << ", poolid = " << i.first + << ", total = " << i.second.totalCapacity / mds::kGB << "GB" + << ", used = " << i.second.allocatedSize / mds::kGB << "GB" + << ", left = " + << (i.second.totalCapacity - i.second.allocatedSize) / + mds::kGB + << "GB" << std::endl; } return 0; } @@ -253,9 +252,9 @@ int StatusTool::ChunkServerListCmd() { double unhealthyRatio = 0.0; if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + // Send RPC to reset online status + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); if (isOnline) { chunkserver.set_onlinestate(OnlineState::ONLINE); @@ -279,7 +278,7 @@ int StatusTool::ChunkServerListCmd() { if (FLAGS_checkHealth) { copysetCheckCore_->CheckCopysetsOnChunkServer(csId); const auto& statistics = - copysetCheckCore_->GetCopysetStatistics(); + copysetCheckCore_->GetCopysetStatistics(); unhealthyRatio = statistics.unhealthyRatio; if (FLAGS_unhealthy && unhealthyRatio == 0) { continue; @@ -298,8 +297,7 @@ int StatusTool::ChunkServerListCmd() { std::cout << "chunkServerID = " << csId << ", diskType = " << chunkserver.disktype() << ", hostIP = " << chunkserver.hostip() - << ", port = " << chunkserver.port() - << ", rwStatus = " + << ", port = " << chunkserver.port() << ", rwStatus = " << ChunkServerStatus_Name(chunkserver.status()) << ", diskState = " << DiskState_Name(chunkserver.diskstatus()) @@ -307,13 +305,13 @@ int StatusTool::ChunkServerListCmd() { << OnlineState_Name(chunkserver.onlinestate()) << ", copysetNum = " << copysets.size() << ", mountPoint = " << chunkserver.mountpoint() - << ", diskCapacity = " << chunkserver.diskcapacity() - / curve::mds::kGB << " GB" - << ", diskUsed = " << chunkserver.diskused() - / curve::mds::kGB << " GB"; + << ", diskCapacity = " + << chunkserver.diskcapacity() / curve::mds::kGB << " GB" + << ", diskUsed = " << chunkserver.diskused() / curve::mds::kGB + << " GB"; if (FLAGS_checkHealth) { - std::cout << ", unhealthyCopysetRatio = " - << unhealthyRatio * 100 << "%"; + std::cout << ", unhealthyCopysetRatio = " << unhealthyRatio * 100 + << "%"; } if (chunkserver.has_externalip()) { std::cout << ", externalIP = " << chunkserver.externalip(); @@ -322,7 +320,7 @@ int StatusTool::ChunkServerListCmd() { } std::cout << "total: " << total << ", online: " << online; if (!FLAGS_checkCSAlive) { - std::cout <<", unstable: " << unstable; + std::cout << ", unstable: " << unstable; } std::cout << ", offline: " << offline << std::endl; @@ -367,8 +365,8 @@ int StatusTool::LogicalPoolListCmd() { uint64_t total = 0; uint64_t allocSize; AllocMap allocMap; - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &allocSize, &allocMap); + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &allocSize, + &allocMap); if (res != 0) { std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; return -1; @@ -406,15 +404,17 @@ int StatusTool::LogicalPoolListCmd() { << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) << ", scanEnable = " << lgPool.scanenable() << ", allocateStatus = " - << curve::mds::topology:: - AllocateStatus_Name(lgPool.allocatestatus()) + << curve::mds::topology::AllocateStatus_Name( + lgPool.allocatestatus()) << ", total space = " << totalSize / curve::mds::kGB << "GB" << ", used space = " << usedSize / curve::mds::kGB << "GB" - << "(" << usedRatio * 100 << "%, can be recycled = " - << canBeRecycle / curve::mds::kGB << "GB" - << "(" << recycleRatio * 100 << "%))" << ", left space = " - << (totalSize - usedSize) / curve::mds::kGB - << "GB(" << (1 - usedRatio) * 100 << "%)" << std::endl; + << "(" << usedRatio * 100 + << "%, can be recycled = " << canBeRecycle / curve::mds::kGB + << "GB" + << "(" << recycleRatio * 100 << "%))" + << ", left space = " + << (totalSize - usedSize) / curve::mds::kGB << "GB(" + << (1 - usedRatio) * 100 << "%)" << std::endl; } std::cout << "total: " << total << std::endl; return 0; @@ -458,9 +458,7 @@ int StatusTool::StatusCmd() { } } -int StatusTool::ChunkServerStatusCmd() { - return PrintChunkserverStatus(false); -} +int StatusTool::ChunkServerStatusCmd() { return PrintChunkserverStatus(false); } int StatusTool::PrintClusterStatus() { int ret = 0; @@ -475,8 +473,8 @@ int StatusTool::PrintClusterStatus() { const auto& statistics = copysetCheckCore_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; std::vector phyPools; std::vector lgPools; int res = GetPoolsInCluster(&phyPools, &lgPools); @@ -495,24 +493,24 @@ int StatusTool::PrintClusterStatus() { bool StatusTool::IsClusterHeatlhy() { bool ret = true; - // 1、检查copyset健康状态 + // 1. Check the health status of copyset int res = copysetCheckCore_->CheckCopysetsInCluster(); if (res != 0) { std::cout << "Copysets are not healthy!" << std::endl; ret = false; } - // 2、检查mds状态 + // 2. Check the mds status if (!CheckServiceHealthy(ServiceName::kMds)) { ret = false; } - // 3、检查etcd在线状态 + // 3. Check the online status of ETCD if (!CheckServiceHealthy(ServiceName::kEtcd)) { ret = false; } - // 4、检查snapshot clone server状态 + // 4. Check the status of the snapshot clone server if (!noSnapshotServer_ && !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { ret = false; @@ -531,10 +529,10 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { break; } case ServiceName::kEtcd: { - int res = etcdClient_->GetEtcdClusterStatus(&leaderVec, - &onlineStatus); + int res = + etcdClient_->GetEtcdClusterStatus(&leaderVec, &onlineStatus); if (res != 0) { - std:: cout << "GetEtcdClusterStatus fail!" << std::endl; + std::cout << "GetEtcdClusterStatus fail!" << std::endl; return false; } break; @@ -568,8 +566,8 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { return ret; } -void StatusTool::PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus) { +void StatusTool::PrintOnlineStatus( + const std::string& name, const std::map& onlineStatus) { std::vector online; std::vector offline; for (const auto& item : onlineStatus) { @@ -663,8 +661,8 @@ int StatusTool::PrintSnapshotCloneStatus() { } std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckSnapshotCloneVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckSnapshotCloneVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; @@ -699,7 +697,7 @@ int StatusTool::PrintClientStatus() { if (!first) { std::cout << ", "; } - std::cout << "version-" << item2.first << ": " + std::cout << "version-" << item2.first << ": " << item2.second.size(); first = false; } @@ -735,13 +733,12 @@ int StatusTool::ScanStatusCmd() { return -1; } - std::cout - << "Scan status for copyset(" - << lpid << "," << copysetId << "):" << std::endl - << " scaning=" << copysetInfo.scaning() - << " lastScanSec=" << copysetInfo.lastscansec() - << " lastScanConsistent=" << copysetInfo.lastscanconsistent() - << std::endl; + std::cout << "Scan status for copyset(" << lpid << "," << copysetId + << "):" << std::endl + << " scaning=" << copysetInfo.scaning() + << " lastScanSec=" << copysetInfo.lastscansec() + << " lastScanConsistent=" << copysetInfo.lastscanconsistent() + << std::endl; return 0; } @@ -758,8 +755,8 @@ int StatusTool::ScanStatusCmd() { if (count % 5 == 0) { std::cout << std::endl; } - std::cout << " (" << copysetInfo.logicalpoolid() - << "," << copysetInfo.copysetid() << ")"; + std::cout << " (" << copysetInfo.logicalpoolid() << "," + << copysetInfo.copysetid() << ")"; count++; } @@ -768,47 +765,47 @@ int StatusTool::ScanStatusCmd() { return 0; } -int CheckUseWalPool(const std::map> - &poolChunkservers, - bool *useWalPool, - bool *useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { +int CheckUseWalPool( + const std::map>& poolChunkservers, + bool* useWalPool, bool* useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) { int ret = 0; if (!poolChunkservers.empty()) { ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); // check whether use chunkfilepool std::string metricValue; std::string metricName = GetUseWalPoolName(csAddr); - MetricRet res = metricClient->GetConfValueFromMetric(csAddr, - metricName, &metricValue); + MetricRet res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool conf " << csAddr << " fail!" + << std::endl; ret = -1; } std::string raftLogProtocol = curve::common::UriParser ::GetProtocolFromUri(metricValue); - *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; + *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; - // check whether use chunkfilepool as walpool from chunkserver conf metric // NOLINT + // check whether use chunkfilepool as walpool from chunkserver conf + // metric // NOLINT metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); res = metricClient->GetConfValueFromMetric(csAddr, metricName, - &metricValue); + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool as walpool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool as walpool conf " << csAddr + << " fail!" << std::endl; ret = -1; } - *useChunkFilePoolAsWalPool = StringToBool(metricValue, - useChunkFilePoolAsWalPool); + *useChunkFilePoolAsWalPool = + StringToBool(metricValue, useChunkFilePoolAsWalPool); } return ret; } int PrintChunkserverOnlineStatus( - const std::map> &poolChunkservers, + const std::map>& poolChunkservers, std::shared_ptr copysetCheckCore, std::shared_ptr mdsClient) { int ret = 0; @@ -819,8 +816,8 @@ int PrintChunkserverOnlineStatus( for (const auto& poolChunkserver : poolChunkservers) { for (const auto& chunkserver : poolChunkserver.second) { total++; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); if (copysetCheckCore->CheckChunkServerOnline(csAddr)) { online++; } else { @@ -833,11 +830,11 @@ int PrintChunkserverOnlineStatus( std::vector offlineRecover; if (offlineCs.size() > 0) { std::map statusMap; - int res = mdsClient->QueryChunkServerRecoverStatus( - offlineCs, &statusMap); + int res = + mdsClient->QueryChunkServerRecoverStatus(offlineCs, &statusMap); if (res != 0) { std::cout << "query offlinne chunkserver recover status fail"; - ret = -1; + ret = -1; } else { // Distinguish between recovering and unrecovered for (auto it = statusMap.begin(); it != statusMap.end(); ++it) { @@ -847,14 +844,13 @@ int PrintChunkserverOnlineStatus( } } } - std::cout << "chunkserver: total num = " << total - << ", online = " << online - << ", offline = " << offline - << "(recoveringout = " << offlineRecover.size() - << ", chunkserverlist: ["; + std::cout << "chunkserver: total num = " << total << ", online = " << online + << ", offline = " << offline + << "(recoveringout = " << offlineRecover.size() + << ", chunkserverlist: ["; int i = 0; - for (ChunkServerIdType csId : offlineRecover) { + for (ChunkServerIdType csId : offlineRecover) { i++; if (i == static_cast(offlineRecover.size())) { std::cout << csId; @@ -867,26 +863,25 @@ int PrintChunkserverOnlineStatus( } int GetChunkserverLeftSize( - const std::map> &poolChunkservers, - std::map> *poolChunkLeftSize, - std::map> *poolWalSegmentLeftSize, - bool useWalPool, - bool useChunkFilePoolAsWalPool, + const std::map>& poolChunkservers, + std::map>* poolChunkLeftSize, + std::map>* poolWalSegmentLeftSize, + bool useWalPool, bool useChunkFilePoolAsWalPool, std::shared_ptr metricClient) { int ret = 0; for (const auto& poolChunkserver : poolChunkservers) { std::vector chunkLeftSize; std::vector walSegmentLeftSize; for (const auto& chunkserver : poolChunkserver.second) { - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); std::string metricName = GetCSLeftChunkName(csAddr); uint64_t chunkNum; - MetricRet res = metricClient->GetMetricUint(csAddr, - metricName, &chunkNum); + MetricRet res = + metricClient->GetMetricUint(csAddr, metricName, &chunkNum); if (res != MetricRet::kOK) { std::cout << "Get left chunk size of chunkserver " << csAddr - << " fail!" << std::endl; + << " fail!" << std::endl; ret = -1; continue; } @@ -898,10 +893,10 @@ int GetChunkserverLeftSize( metricName = GetCSLeftWalSegmentName(csAddr); uint64_t walSegmentNum; res = metricClient->GetMetricUint(csAddr, metricName, - &walSegmentNum); + &walSegmentNum); if (res != MetricRet::kOK) { std::cout << "Get left wal segment size of chunkserver " - << csAddr << " fail!" << std::endl; + << csAddr << " fail!" << std::endl; ret = -1; continue; } @@ -911,7 +906,7 @@ int GetChunkserverLeftSize( } poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); poolWalSegmentLeftSize->emplace(poolChunkserver.first, - walSegmentLeftSize); + walSegmentLeftSize); } return ret; } @@ -921,8 +916,8 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { std::cout << "ChunkServer status:" << std::endl; std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckChunkServerVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckChunkServerVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; @@ -943,8 +938,7 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { } // get chunkserver online status - ret = PrintChunkserverOnlineStatus(poolChunkservers, - copysetCheckCore_, + ret = PrintChunkserverOnlineStatus(poolChunkservers, copysetCheckCore_, mdsClient_); if (!checkLeftSize) { return ret; @@ -961,12 +955,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { // get chunkserver left size std::map> poolChunkLeftSize; std::map> poolWalSegmentLeftSize; - ret = GetChunkserverLeftSize(poolChunkservers, - &poolChunkLeftSize, - &poolWalSegmentLeftSize, - useWalPool, - useChunkFilePoolAsWalPool, - metricClient_); + ret = GetChunkserverLeftSize(poolChunkservers, &poolChunkLeftSize, + &poolWalSegmentLeftSize, useWalPool, + useChunkFilePoolAsWalPool, metricClient_); if (0 != ret) { return ret; } @@ -984,9 +975,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { return ret; } -void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize) { +void StatusTool::PrintCsLeftSizeStatistics( + const std::string& name, + const std::map>& poolLeftSize) { if (poolLeftSize.empty()) { std::cout << "No " << name << " left size found!" << std::endl; return; @@ -1015,19 +1006,19 @@ void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, } double var = sum / leftSize.second.size(); - std:: cout.setf(std::ios::fixed); - std::cout<< std::setprecision(2); - std::cout<< "pool" << leftSize.first << " " << name; + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "pool" << leftSize.first << " " << name; std::cout << " left size: min = " << min << "GB" - << ", max = " << max << "GB" - << ", average = " << avg << "GB" - << ", range = " << range << "GB" - << ", variance = " << var << std::endl; + << ", max = " << max << "GB" + << ", average = " << avg << "GB" + << ", range = " << range << "GB" + << ", variance = " << var << std::endl; } } int StatusTool::GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools) { + std::vector* lgPools) { int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); if (res != 0) { std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; @@ -1035,7 +1026,7 @@ int StatusTool::GetPoolsInCluster(std::vector* phyPools, } for (const auto& phyPool : *phyPools) { int res = mdsClient_->ListLogicalPoolsInPhysicalPool( - phyPool.physicalpoolid(), lgPools) != 0; + phyPool.physicalpoolid(), lgPools) != 0; if (res != 0) { std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; return -1; @@ -1057,9 +1048,9 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { std::cout << "Get root directory file size from mds fail!" << std::endl; return -1; } - // 从metric获取space信息 + // Obtain space information from metric for (const auto& lgPool : lgPools) { - LogicalpoolSpaceInfo lpinfo; + LogicalpoolSpaceInfo lpinfo; std::string poolName = lgPool.logicalpoolname(); lpinfo.poolName = poolName; std::string metricName = GetPoolTotalChunkSizeName(poolName); @@ -1070,7 +1061,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return -1; } spaceInfo->totalChunkSize += size; - lpinfo.totalChunkSize +=size; + lpinfo.totalChunkSize += size; metricName = GetPoolUsedChunkSizeName(poolName); res = mdsClient_->GetMetric(metricName, &size); if (res != 0) { @@ -1096,10 +1087,10 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { spaceInfo->allocatedSize += size; lpinfo.allocatedSize += size; spaceInfo->lpoolspaceinfo.insert( - std::pair( - lgPool.logicalpoolid(), lpinfo)); + std::pair(lgPool.logicalpoolid(), + lpinfo)); } - // 获取RecycleBin的分配大小 + // Obtain the allocation size of RecycleBin res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &spaceInfo->recycleAllocSize); if (res != 0) { @@ -1109,7 +1100,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return 0; } -int StatusTool::RunCommand(const std::string &cmd) { +int StatusTool::RunCommand(const std::string& cmd) { if (Init(cmd) != 0) { std::cout << "Init StatusTool failed" << std::endl; return -1; diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 2b54d70943..16aeed906d 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -23,31 +23,33 @@ #ifndef SRC_TOOLS_STATUS_TOOL_H_ #define SRC_TOOLS_STATUS_TOOL_H_ +#include #include #include -#include -#include + #include -#include -#include -#include +#include #include +#include +#include #include +#include + #include "proto/topology.pb.h" #include "src/common/timeutility.h" +#include "src/common/uri_parser.h" #include "src/mds/common/mds_define.h" -#include "src/tools/mds_client.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/copyset_check_core.h" -#include "src/tools/etcd_client.h" -#include "src/tools/version_tool.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/etcd_client.h" +#include "src/tools/mds_client.h" #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" +#include "src/tools/namespace_tool_core.h" #include "src/tools/snapshot_clone_client.h" -#include "src/common/uri_parser.h" +#include "src/tools/version_tool.h" using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; @@ -63,22 +65,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -100,49 +102,54 @@ class StatusTool : public CurveTool { std::shared_ptr versionTool, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), - etcdClient_(etcdClient), metricClient_(metricClient), - snapshotClient_(snapshotClient), versionTool_(versionTool), - mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} + : mdsClient_(mdsClient), + copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), + metricClient_(metricClient), + snapshotClient_(snapshotClient), + versionTool_(versionTool), + mdsInited_(false), + etcdInited_(false), + noSnapshotServer_(false) {} ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ - static bool SupportCommand(const std::string &command); + static bool SupportCommand(const std::string& command); /** - * @brief 判断集群是否健康 + * @brief to determine whether the cluster is healthy */ bool IsClusterHeatlhy(); private: - int Init(const std::string &command); + int Init(const std::string& command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector *phyPools, - std::vector *lgPools); - int GetSpaceInfo(SpaceInfo *spaceInfo); + int GetPoolsInCluster(std::vector* phyPools, + std::vector* lgPools); + int GetSpaceInfo(SpaceInfo* spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -151,67 +158,67 @@ class StatusTool : public CurveTool { int ClientListCmd(); int ScanStatusCmd(); void PrintCsLeftSizeStatistics( - const std::string &name, - const std::map> &poolLeftSize); + const std::string& name, + const std::map>& poolLeftSize); int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command needs to interact with ETCD + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedEtcd(const std::string &command); - + bool CommandNeedEtcd(const std::string& command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command requires mds + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedMds(const std::string &command); + bool CommandNeedMds(const std::string& command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedSnapshotClone(const std::string &command); + bool CommandNeedSnapshotClone(const std::string& command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus Map of online status */ - void PrintOnlineStatus(const std::string &name, - const std::map &onlineStatus); + void PrintOnlineStatus(const std::string& name, + const std::map& onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name Service Name */ - bool CheckServiceHealthy(const ServiceName &name); + bool CheckServiceHealthy(const ServiceName& name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and + // chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..42b1d3e9a5 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -48,8 +48,8 @@ int VersionTool::GetAndCheckMdsVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList) { std::vector chunkServers; int res = mdsClient_->ListChunkServersInCluster(&chunkServers); if (res != 0) { @@ -78,8 +78,8 @@ int VersionTool::GetAndCheckChunkServerVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList) { const auto& dummyServerMap = snapshotClient_->GetDummyServerMap(); std::vector dummyServers; for (const auto& item : dummyServerMap) { @@ -123,9 +123,8 @@ void VersionTool::FetchClientProcessMap(const std::vector& addrVec, ProcessMapType* processMap) { for (const auto& addr : addrVec) { std::string cmd; - MetricRet res = metricClient_->GetMetric(addr, - kProcessCmdLineMetricName, - &cmd); + MetricRet res = + metricClient_->GetMetric(addr, kProcessCmdLineMetricName, &cmd); if (res != MetricRet::kOK) { continue; } @@ -156,10 +155,11 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, failedList->clear(); for (const auto& addr : addrVec) { std::string version; - MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, - &version); + MetricRet res = + metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so + // let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..eb293433e6 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -23,13 +23,14 @@ #ifndef SRC_TOOLS_VERSION_TOOL_H_ #define SRC_TOOLS_VERSION_TOOL_H_ -#include #include -#include #include +#include +#include + +#include "src/common/string_util.h" #include "src/tools/mds_client.h" #include "src/tools/metric_client.h" -#include "src/common/string_util.h" #include "src/tools/snapshot_clone_client.h" namespace curve { @@ -49,95 +50,97 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + : mdsClient_(mdsClient), + snapshotClient_(snapshotClient), metricClient_(metricClient) {} virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int GetAndCheckMdsVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckMdsVersion(std::string* version, + std::vector* failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckChunkServerVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckSnapshotCloneVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ - virtual int GetClientVersion(ClientVersionMapType *versionMap); + virtual int GetClientVersion(ClientVersionMapType* versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap version to address list map */ - static void PrintVersionMap(const VersionMapType &versionMap); + static void PrintVersionMap(const VersionMapType& versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList Access Failed Address List */ - static void PrintFailedList(const std::vector &failedList); + static void PrintFailedList(const std::vector& failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] versionMap version to address map + * @param[out] failedList Query address list for version failure */ - void GetVersionMap(const std::vector &addrVec, - VersionMapType *versionMap, - std::vector *failedList); + void GetVersionMap(const std::vector& addrVec, + VersionMapType* versionMap, + std::vector* failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to + * different processes */ - void FetchClientProcessMap(const std::vector &addrVec, - ProcessMapType *processMap); + void FetchClientProcessMap(const std::vector& addrVec, + ProcessMapType* processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of + * starting the server For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd server + * @param addrVec Address List + * @return The name of the process */ - std::string GetProcessNameFromCmd(const std::string &cmd); + std::string GetProcessNameFromCmd(const std::string& cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshot clone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric clients std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..110a0923b8 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -20,25 +20,26 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service2.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service2.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/common/timeutility.h" +#include "src/common/uuid.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -48,10 +49,12 @@ using curve::common::UUIDGenerator; class BraftCliService2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { peer1.set_address("127.0.0.1:9310:0"); @@ -75,10 +78,10 @@ class BraftCliService2Test : public testing::Test { } public: - const char *ip = "127.0.0.1"; - int port = 9310; - const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + const char* ip = "127.0.0.1"; + int port = 9310; + const char* confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -128,12 +131,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dirMap[peer1.address()]; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -143,12 +142,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dirMap[peer2.address()]; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -158,16 +153,12 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dirMap[peer3.address()]; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,15 +173,15 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /*Add peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,10 +201,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /*Add peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -223,7 +214,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +228,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /*Add peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change + // request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,15 +266,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /*Remove peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,10 +294,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /*Remove peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -315,7 +307,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,15 +321,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /*Remove peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader and send the configuration change + // request to it for processing + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; } else { @@ -367,15 +359,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -395,8 +387,8 @@ TEST_F(BraftCliService2Test, basic2) { } /* transfer leader to leader */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -417,10 +409,10 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /*Transfer leader - illegal peer*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -430,7 +422,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,18 +436,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /*Get leader - illegal copyset*/ { PeerId leaderId = leaderId; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); - GetLeaderRequest2 request; GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -467,14 +458,13 @@ TEST_F(BraftCliService2Test, basic2) { /* remove peer then add peer */ { // 1 remove peer - Peer *removePeer = new Peer(); - Peer *leaderPeer1 = new Peer(); - Peer *leaderPeer2 = new Peer(); - Peer *addPeer = new Peer(); + Peer* removePeer = new Peer(); + Peer* leaderPeer1 = new Peer(); + Peer* leaderPeer2 = new Peer(); + Peer* addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader as a remove peer + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); *removePeer = peer2; } else { @@ -508,7 +498,6 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl1.Failed()); ASSERT_EQ(0, cntl1.ErrorCode()); - // add peer AddPeerRequest2 request2; request2.set_logicpoolid(logicPoolId); @@ -529,17 +518,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /*Snapshot - illegal copyset*/ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); *peerPtr = peer1; request.set_allocated_peer(peerPtr); @@ -557,11 +546,12 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + - ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; + ToGroupId(logicPoolId, copysetId) + "/" + + RAFT_LOG_DIR; std::shared_ptr fs( - LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); + LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); std::vector files; fs->List(copysetDataDir.c_str(), &files); ASSERT_GE(files.size(), 1); @@ -574,7 +564,7 @@ TEST_F(BraftCliService2Test, basic2) { SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); peerPtr->set_address(leaderId.to_string()); request.set_allocated_peer(peerPtr); @@ -586,19 +576,20 @@ TEST_F(BraftCliService2Test, basic2) { LOG(INFO) << "Start do snapshot"; CliService2_Stub stub(&channel); stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); + // Two consecutive snapshots are required to delete the log from the + // first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + // After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -619,18 +610,18 @@ TEST_F(BraftCliService2Test, basic2) { CliService2_Stub stub(&channel); stub.SnapshotAll(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /*Reset peer - illegal copyset*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,9 +637,9 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /*Reset peer - new peer is empty*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; @@ -669,7 +660,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* reset peer - normal */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..63a83cfe9d 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -20,21 +20,22 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" #include "test/chunkserver/chunkserver_test_util.h" namespace curve { @@ -43,10 +44,12 @@ namespace chunkserver { class BraftCliServiceTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { Exec("mkdir 6"); @@ -68,9 +71,9 @@ class BraftCliServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(BraftCliServiceTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9015; - const char *confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; + const char* confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; int snapshotInterval = 600; PeerId peer1("127.0.0.1:9015:0"); PeerId peer2("127.0.0.1:9016:0"); @@ -87,12 +90,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 1 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid1) { - const char *copysetdir = "local://./6"; - StartChunkserver(ip, - port + 0, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./6"; + StartChunkserver(ip, port + 0, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -102,12 +101,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 2 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid2) { - const char *copysetdir = "local://./7"; - StartChunkserver(ip, - port + 1, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./7"; + StartChunkserver(ip, port + 1, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -117,17 +112,13 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 3 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid3) { - const char *copysetdir = "local://./8"; - StartChunkserver(ip, - port + 2, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./8"; + StartChunkserver(ip, port + 2, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -144,6 +135,7 @@ TEST_F(BraftCliServiceTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -166,7 +158,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +180,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,12 +202,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -240,13 +232,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +253,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,12 +273,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -309,7 +301,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +338,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +357,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..c1191bde5b 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -20,24 +20,24 @@ * Author: wudemiao */ +#include "src/chunkserver/chunk_service.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -75,11 +75,10 @@ class ChunkserverTest : public testing::Test { butil::AtExitManager atExitManager; - TEST_F(ChunkserverTest, normal_read_write_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9020; - const char *confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; + const char* confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -96,12 +95,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -111,12 +106,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -126,16 +117,12 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -152,6 +139,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -313,7 +301,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +317,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /*Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +335,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /*Applied index Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -416,9 +404,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -435,9 +421,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -467,7 +451,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +469,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +544,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +563,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /*Multi chunk read/write/delete*/ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +669,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +687,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +754,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..9d3c136e14 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -20,24 +20,23 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -76,9 +75,9 @@ class ChunkService2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(ChunkService2Test, illegial_parameters_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9023; - const char *confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; + const char* confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -95,12 +94,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -110,12 +105,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -125,16 +116,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -151,6 +138,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -177,13 +165,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /*Illegal parameter request test*/ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /*Read overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +189,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /*Read offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +207,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /*Read size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +225,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /*Read copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +244,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /*Read snapshot overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +262,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /*Read snapshot offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +281,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /*Read snapshot size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +300,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /*Read snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +319,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /*Write overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +338,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /*Write offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +357,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /*Write size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +376,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /*The write copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +395,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /*Delete copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +411,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /*Delete snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -434,9 +422,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { request.set_copysetid(copysetId + 1); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, @@ -456,7 +442,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /*Not a leader*/ { PeerId peer1; PeerId peer2; @@ -562,13 +548,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { class ChunkServiceTestClosure : public ::google::protobuf::Closure { public: - explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~ChunkServiceTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -580,13 +565,12 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { class UpdateEpochTestClosure : public ::google::protobuf::Closure { public: - explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~UpdateEpochTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -602,12 +586,12 @@ TEST_F(ChunkService2Test, overload_test) { // inflight throttle uint64_t maxInflight = 0; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -690,9 +674,7 @@ TEST_F(ChunkService2Test, overload_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -750,12 +732,12 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { // inflight throttle uint64_t maxInflight = 10; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -780,17 +762,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -863,9 +845,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -916,7 +896,8 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can + // receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } @@ -995,9 +976,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_NE(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -1055,12 +1034,12 @@ TEST_F(ChunkService2Test, CheckEpochTest) { // inflight throttle uint64_t maxInflight = 10000; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -1083,7 +1062,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_chunkid(chunkId); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk request have epoch, but epoch map have no epoch @@ -1100,7 +1079,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // update epoch map to {(1, 1) , (2, 2)} { @@ -1130,7 +1109,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk check epoch failed { @@ -1146,7 +1125,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } // update epoch map to {(1, 2) , (2, 2)} @@ -1174,7 +1153,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..d401a22185 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -20,14 +20,16 @@ * Author: lixiaocui */ -#include #include "src/chunkserver/chunkserver_helper.h" + +#include + #include "src/chunkserver/register.h" namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +45,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..4b834a5037 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -20,40 +20,41 @@ * Author: lixiaocui1 */ -#include -#include +#include "src/chunkserver/chunkserver_service.h" + #include +#include #include +#include #include -#include "src/chunkserver/chunkserver_service.h" -#include "test/chunkserver/mock_copyset_node_manager.h" + #include "proto/chunkserver.pb.h" +#include "test/chunkserver/mock_copyset_node_manager.h" namespace curve { namespace chunkserver { -using ::testing::Return; using ::testing::_; +using ::testing::Return; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); auto listenAddr = butil::endpoint2str(server->listen_address()).c_str(); - brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr, NULL)); ChunkServerService_Stub stub(&channel); ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,23 +64,22 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/chunkserver/chunkserver_snapshot_test.cpp b/test/chunkserver/chunkserver_snapshot_test.cpp index b534ca2ee3..a05a9e6498 100644 --- a/test/chunkserver/chunkserver_snapshot_test.cpp +++ b/test/chunkserver/chunkserver_snapshot_test.cpp @@ -21,25 +21,25 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" +#include "proto/common.pb.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "proto/common.pb.h" -#include "proto/copyset.pb.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -64,7 +64,7 @@ class ChunkServerSnapshotTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); /* wait for process exit */ - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -77,26 +77,22 @@ class ChunkServerSnapshotTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -112,14 +108,13 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -134,13 +129,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -152,22 +146,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -181,16 +171,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -198,22 +186,18 @@ static void ReadVerify(PeerId leaderId, } /** - * 异常 I/O 验证,验证集群是否处于不可用状态 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Abnormal I/O verification to verify if the cluster is in an unavailable state + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerifyNotAvailable(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerifyNotAvailable(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -228,30 +212,29 @@ static void ReadVerifyNotAvailable(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证copyset status是否符合预期 + * Verify if the copyset status meets expectations * @param peerId: peer id - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id - * @param expectResp: 期待的copyset status + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID + * @param expectResp: Expected copyset status */ -static void CopysetStatusVerify(PeerId peerId, - LogicPoolID logicPoolID, +static void CopysetStatusVerify(PeerId peerId, LogicPoolID logicPoolID, CopysetID copysetId, - CopysetStatusResponse *expectResp) { + CopysetStatusResponse* expectResp) { brpc::Channel channel; ASSERT_EQ(0, channel.Init(peerId.addr, NULL)); CopysetService_Stub stub(&channel); @@ -261,7 +244,7 @@ static void CopysetStatusVerify(PeerId peerId, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -279,14 +262,13 @@ static void CopysetStatusVerify(PeerId peerId, } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -static void CopysetStatusVerify(const std::vector &peerIds, - LogicPoolID logicPoolID, - CopysetID copysetId, +static void CopysetStatusVerify(const std::vector& peerIds, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0) { std::vector resps; for (PeerId peerId : peerIds) { @@ -300,7 +282,7 @@ static void CopysetStatusVerify(const std::vector &peerIds, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -309,7 +291,8 @@ static void CopysetStatusVerify(const std::vector &peerIds, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -333,9 +316,11 @@ static void CopysetStatusVerify(const std::vector &peerIds, butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组是否能够正常提供服务 - * 1. 创建一个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the replication group of one node can provide services + * normally + * 1. Create a replication group for a replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -355,23 +340,18 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; - // read、write、1次配置变更 + // read, write, 1 configuration change int64_t commitedIndex = loop + 1; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -390,12 +370,15 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { } /** - * 验证1个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建1个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of one node can provide + * normal service + * 1. Create a replication group for 1 replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { LogicPoolID logicPoolId = 2; @@ -415,45 +398,30 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; int64_t commitedIndex = 2 * loop + 2; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -473,9 +441,10 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { } /** - * 验证2个节点是否能够正常提供服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether two nodes can provide services normally + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, TwoNodes) { LogicPoolID logicPoolId = 2; @@ -498,12 +467,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ::usleep(2000 * 1000); @@ -511,12 +475,15 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { } /** - * 验证2个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting two nodes after closing non leader nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -539,19 +506,14 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -563,40 +525,33 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ::usleep(2000 * electionTimeoutMs); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of two nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -619,48 +574,34 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { LogicPoolID logicPoolId = 2; @@ -685,26 +626,24 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting after closing non leader nodes on three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -729,19 +668,14 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -750,28 +684,26 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader node and restart of three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -796,62 +728,49 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群暂时不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is temporarily unavailable + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval and write read data + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 6. Wait for the leader to be generated, and then verify the data written + * before the read + * 7. transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -877,19 +796,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -900,40 +814,25 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - - // restart, 需要从 install snapshot 恢复 + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + + // Restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 2, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it ::sleep(3); @@ -944,10 +843,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -961,37 +857,35 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 删除 shutdown peer 的数据目录,然后再拉起来 - * 10. 然后 read 之前写入的数据验证一遍 - * 11. transfer leader 到shut down 的 peer 上 - * 12. 在 read 之前写入的数据验证 - * 13. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Delete the data directory of the shutdown peer and then pull it up again + * 10. Then verify the data written before read + * 11. Transfer leader to shut down peer + * 12. Verification of data written before read + * 13. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { LogicPoolID logicPoolId = 2; @@ -1017,19 +911,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1040,54 +929,40 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); - - // 删除此 peer 的数据,然后重启 + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); + + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(TestCluster::RemoveCopysetDirCmd(shutdownPeerid) - .c_str())); //NOLINT + .c_str())); // NOLINT LOG(INFO) << "remove data cmd: " << TestCluster::RemoveCopysetDirCmd(shutdownPeerid); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); Exec(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid).c_str()); LOG(INFO) << "remove data dir: " << TestCluster::CopysetDirWithoutProtocol(shutdownPeerid); - ASSERT_FALSE(fs->DirExists(TestCluster::CopysetDirWithoutProtocol( - shutdownPeerid).c_str())); //NOLINT + ASSERT_FALSE( + fs->DirExists(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid) + .c_str())); // NOLINT ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it @@ -1099,10 +974,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -1116,38 +988,36 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更 add peer - * 10. 然后 read 之前写入的数据验证一遍 - * 11. 在发起 write,再 read 读出来验证一遍 - * 12. transfer leader 到 add 的 peer 上 - * 13. 在 read 之前写入的数据验证 - * 14. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add peer through configuration changes + * 10. Then verify the data written before read + * 11. Initiate write and read again to verify + * 12. Transfer leader to add's peer + * 13. Verification of data written before read + * 14. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1173,21 +1043,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1198,46 +1063,31 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 一个 peer + // Add a peer { ASSERT_EQ(0, cluster.StartPeer(peer4, true)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); @@ -1245,26 +1095,18 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - peer4, - options); + butil::Status status = AddPeer(logicPoolId, copysetId, + cluster.CopysetConf(), peer4, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } @@ -1277,11 +1119,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - peer4, - options); + status = TransferLeader(logicPoolId, copysetId, conf, peer4, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == peer4) { @@ -1291,21 +1129,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { ::sleep(1); } - ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), - peer4.to_string().c_str())); + ASSERT_EQ( + 0, ::strcmp(leaderId.to_string().c_str(), peer4.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 5, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 5, loop); } @@ -1321,20 +1154,23 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { } /** - * * 验证3个节点的 remove 一个节点,然后再 add 回来,并控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. 通过配置变更 remove 一个 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更再将之前 remove 的 peer add 回来 - * 10. transfer leader 到此 peer - * 11. 在 read 之前写入的数据验证 - * 12. 再 write 数据,再 read 出来验证一遍 + * Verify the removal of one node from three nodes, then add it back and control + * it to recover from the install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Remove a non leader through configuration changes + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add the previously removed peer back through configuration changes + * 10. Transfer leader to this peer + * 11. Verification of data written before read + * 12. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1360,21 +1196,16 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId removePeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { removePeerid = peer2; } else { removePeerid = peer1; @@ -1383,70 +1214,51 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderId.to_string(); ASSERT_NE(0, ::strcmp(removePeerid.to_string().c_str(), leaderId.to_string().c_str())); - // remove 一个 peer + // Remove a peer { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 8000; - butil::Status status = RemovePeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + RemovePeer(logicPoolId, copysetId, cluster.CopysetConf(), + removePeerid, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 回来 + // Add, come back { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + AddPeer(logicPoolId, copysetId, cluster.CopysetConf(), removePeerid, + options); ASSERT_EQ(0, status.error_code()); } @@ -1459,11 +1271,8 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - removePeerid, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, removePeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == removePeerid) { @@ -1476,18 +1285,13 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), removePeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } diff --git a/test/chunkserver/chunkserver_test_util.cpp b/test/chunkserver/chunkserver_test_util.cpp index cb2d020048..612594e9ac 100644 --- a/test/chunkserver/chunkserver_test_util.cpp +++ b/test/chunkserver/chunkserver_test_util.cpp @@ -22,27 +22,27 @@ #include "test/chunkserver/chunkserver_test_util.h" -#include -#include -#include #include #include #include +#include #include #include +#include +#include #include #include #include -#include "src/common/concurrent/task_thread_pool.h" -#include "src/common/crc32.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "test/chunkserver/fake_datastore.h" +#include "src/common/concurrent/task_thread_pool.h" +#include "src/common/crc32.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "test/chunkserver/fake_datastore.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; using ::curve::common::UriParser; @@ -50,25 +50,22 @@ using ::curve::common::UriParser; namespace curve { namespace chunkserver { -std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; } -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath) { +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath) { auto filePoolPtr = std::make_shared(fsptr); if (filePoolPtr == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; @@ -76,10 +73,10 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, int count = 1; std::string dirname = poolpath; while (count <= chunkfileCount) { - std::string filename = poolpath + std::to_string(count); + std::string filename = poolpath + std::to_string(count); fsptr->Mkdir(poolpath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); - char *data = new char[chunkfileSize + 4096]; + char* data = new char[chunkfileSize + 4096]; memset(data, 'a', chunkfileSize + 4096); fsptr->Write(fd, data, 0, chunkfileSize + 4096); fsptr->Close(fd); @@ -87,7 +84,7 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, delete[] data; } /** - * 持久化FilePool meta file + * Persisting FilePool meta file */ FilePoolMeta meta; @@ -107,11 +104,8 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, return filePoolPtr; } -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs) { LOG(INFO) << "Going to start chunk server"; @@ -123,13 +117,14 @@ int StartChunkserver(const char *ip, return -1; } if (server.Start(port, NULL) != 0) { - LOG(ERROR) << "Fail to start Server, port: " << port << ", errno: " - << errno << ", " << strerror(errno); + LOG(ERROR) << "Fail to start Server, port: " << port + << ", errno: " << errno << ", " << strerror(errno); return -1; } LOG(INFO) << "start rpc server success"; - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT const uint32_t kMaxChunkSize = 16 * 1024 * 1024; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = ip; @@ -188,12 +183,10 @@ int StartChunkserver(const char *ip, CopysetID copysetId = 100001; CopysetNodeManager::GetInstance().Init(copysetNodeOptions); CopysetNodeManager::GetInstance().Run(); - CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode(logicPoolId, - copysetId, - peers)); + CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode( + logicPoolId, copysetId, peers)); auto copysetNode = CopysetNodeManager::GetInstance().GetCopysetNode( - logicPoolId, - copysetId); + logicPoolId, copysetId); DataStoreOptions options; options.baseDir = "./test-temp"; options.chunkSize = 16 * 1024 * 1024; @@ -214,18 +207,16 @@ int StartChunkserver(const char *ip, return 0; } -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs) { +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs) { butil::Status status; const int kMaxLoop = (5 * electionTimeoutMs) / 100; for (int i = 0; i < kMaxLoop; ++i) { status = GetLeader(logicPoolId, copysetId, conf, leaderId); if (status.ok()) { /** - * 等待 flush noop entry + * Waiting for flush noop entry */ ::usleep(electionTimeoutMs * 1000); return status; @@ -239,14 +230,14 @@ butil::Status WaitLeader(const LogicPoolID &logicPoolId, return status; } -TestCluster::TestCluster(const std::string &clusterName, +TestCluster::TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - catchupMargin_(10) { + const std::vector& peers) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + catchupMargin_(10) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -255,10 +246,8 @@ TestCluster::TestCluster(const std::string &clusterName, } } -int TestCluster::StartPeer(const PeerId &peerId, - const bool empty, - bool getChunkFromPool, - bool createChunkFilePool) { +int TestCluster::StartPeer(const PeerId& peerId, const bool empty, + bool getChunkFromPool, bool createChunkFilePool) { LOG(INFO) << "going start peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { @@ -299,30 +288,29 @@ int TestCluster::StartPeer(const PeerId &peerId, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ - StartPeerNode(peer->options, peer->conf, - getChunkFromPool, createChunkFilePool); + /*Starting a ChunkServer in a child process*/ + StartPeerNode(peer->options, peer->conf, getChunkFromPool, + createChunkFilePool); exit(0); } LOG(INFO) << "Start peer success, pid: " << pid; peer->pid = pid; peer->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peer))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peer))); return 0; } -int TestCluster::ShutdownPeer(const PeerId &peerId) { +int TestCluster::ShutdownPeer(const PeerId& peerId) { LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { int waitState; if (0 != kill(it->second->pid, SIGINT)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -335,7 +323,7 @@ int TestCluster::ShutdownPeer(const PeerId &peerId) { } } -int TestCluster::StopPeer(const PeerId &peerId) { +int TestCluster::StopPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::RUNNING) { @@ -345,8 +333,8 @@ int TestCluster::StopPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::STOP; @@ -358,7 +346,7 @@ int TestCluster::StopPeer(const PeerId &peerId) { } } -int TestCluster::ContPeer(const PeerId &peerId) { +int TestCluster::ContPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::STOP) { @@ -368,8 +356,8 @@ int TestCluster::ContPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::RUNNING; @@ -381,10 +369,10 @@ int TestCluster::ContPeer(const PeerId &peerId) { } } -int TestCluster::WaitLeader(PeerId *leaderId) { +int TestCluster::WaitLeader(PeerId* leaderId) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(2 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -393,8 +381,10 @@ int TestCluster::WaitLeader(PeerId *leaderId) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderId); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " @@ -417,9 +407,7 @@ int TestCluster::StopAllPeers() { return 0; } -const Configuration TestCluster::CopysetConf() const { - return conf_; -} +const Configuration TestCluster::CopysetConf() const { return conf_; } int TestCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -441,7 +429,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, bool enableGetchunkFromPool, bool createChunkFilePool) { /** - * 用于注释,说明 cmd format + * Used for annotation to explain the cmd format */ std::string cmdFormat = R"( ./bazel-bin/test/chunkserver/server-test @@ -466,7 +454,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, confStr += it->to_string(); confStr += ","; } - // 去掉最后的逗号 + // Remove the last comma confStr.pop_back(); std::string cmd_dir("./bazel-bin/test/chunkserver/server-test"); @@ -478,28 +466,22 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string confs; butil::string_printf(&confs, "-conf=%s", confStr.c_str()); std::string copyset_dir; - butil::string_printf(©set_dir, - "-copyset_dir=%s", + butil::string_printf(©set_dir, "-copyset_dir=%s", options.chunkDataUri.c_str()); std::string election_timeout_ms; - butil::string_printf(&election_timeout_ms, - "-election_timeout_ms=%d", + butil::string_printf(&election_timeout_ms, "-election_timeout_ms=%d", options.electionTimeoutMs); std::string snapshot_interval_s; - butil::string_printf(&snapshot_interval_s, - "-snapshot_interval_s=%d", + butil::string_printf(&snapshot_interval_s, "-snapshot_interval_s=%d", options.snapshotIntervalS); std::string catchup_margin; - butil::string_printf(&catchup_margin, - "-catchup_margin=%d", + butil::string_printf(&catchup_margin, "-catchup_margin=%d", options.catchupMargin); std::string getchunk_from_pool; - butil::string_printf(&getchunk_from_pool, - "-enable_getchunk_from_pool=%d", + butil::string_printf(&getchunk_from_pool, "-enable_getchunk_from_pool=%d", enableGetchunkFromPool); std::string create_pool; - butil::string_printf(&create_pool, - "-create_chunkfilepool=%d", + butil::string_printf(&create_pool, "-create_chunkfilepool=%d", createChunkFilePool); std::string logic_pool_id; butil::string_printf(&logic_pool_id, "-logic_pool_id=%d", logicPoolID_); @@ -508,59 +490,51 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string raft_sync; butil::string_printf(&raft_sync, "-raft_sync=%s", "true"); - char *arg[] = { - const_cast(cmd.c_str()), - const_cast(ip.c_str()), - const_cast(port.c_str()), - const_cast(confs.c_str()), - const_cast(copyset_dir.c_str()), - const_cast(election_timeout_ms.c_str()), - const_cast(snapshot_interval_s.c_str()), - const_cast(catchup_margin.c_str()), - const_cast(logic_pool_id.c_str()), - const_cast(copyset_id.c_str()), - const_cast(getchunk_from_pool.c_str()), - const_cast(create_pool.c_str()), - NULL - }; + char* arg[] = {const_cast(cmd.c_str()), + const_cast(ip.c_str()), + const_cast(port.c_str()), + const_cast(confs.c_str()), + const_cast(copyset_dir.c_str()), + const_cast(election_timeout_ms.c_str()), + const_cast(snapshot_interval_s.c_str()), + const_cast(catchup_margin.c_str()), + const_cast(logic_pool_id.c_str()), + const_cast(copyset_id.c_str()), + const_cast(getchunk_from_pool.c_str()), + const_cast(create_pool.c_str()), + NULL}; ::execv(cmd_dir.c_str(), arg); return 0; } -const std::string TestCluster::CopysetDirWithProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::RemoveCopysetDirCmd(const PeerId &peerId) { +const std::string TestCluster::RemoveCopysetDirCmd(const PeerId& peerId) { std::string cmd; - butil::string_printf(&cmd, - "rm -fr %s-%d-%d", + butil::string_printf(&cmd, "rm -fr %s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return cmd; } LogicPoolID TestCluster::logicPoolID_ = 0; -CopysetID TestCluster::copysetID_ = 0; +CopysetID TestCluster::copysetID_ = 0; } // namespace chunkserver } // namespace curve diff --git a/test/chunkserver/chunkserver_test_util.h b/test/chunkserver/chunkserver_test_util.h index b329e069cd..eaf423bbd4 100644 --- a/test/chunkserver/chunkserver_test_util.h +++ b/test/chunkserver/chunkserver_test_util.h @@ -26,188 +26,182 @@ #include #include -#include -#include -#include #include +#include +#include #include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { using curve::fs::LocalFileSystem; -std::string Exec(const char *cmd); +std::string Exec(const char* cmd); /** - * 当前FilePool需要事先格式化,才能使用,此函数用于事先格式化FilePool - * @param fsptr:本文文件系统指针 - * @param chunkfileSize:chunk文件的大小 - * @param metaPageSize:chunk文件的meta page大小 - * @param poolpath:文件池的路径,例如./chunkfilepool/ - * @param metaPath:meta文件路径,例如./chunkfilepool/chunkfilepool.meta - * @return 初始化成功返回FilePool指针,否则返回null + * The current FilePool needs to be formatted in advance before it can be used. + * This function is used to format the FilePool in advance + * @param fsptr: This article's file system pointer + * @param chunkfileSize: Chunk file size + * @param metaPageSize: The metapage size of the chunk file + * @param poolpath: The path to the file pool, for example ./chunkfilepool/ + * @param metaPath: meta file path, for example + * ./chunkfilepool/chunkfilepool.meta + * @return successfully initializes and returns the FilePool pointer. Otherwise, + * it returns null */ -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath); - -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath); + +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs); -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs); +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs); /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), options(), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; - // Peer的地址 + // Peer's address PeerId peerId; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // copyset的基本配置 + // Basic configuration of copyset CopysetNodeOptions options; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; /** - * 封装模拟 cluster 测试相关的接口 + * Package simulation cluster testing related interfaces */ class TestCluster { public: - TestCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers); + TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers); virtual ~TestCluster() { StopAllPeers(); } public: /** - * 启动一个 Peer + * Start a Peer * @param peerId - * @param empty 初始化配置是否为空 - * @param: get_chunk_from_pool是否从FilePool获取chunk - * @param: createFilePool是否创建FilePool,重启的情况下不需要 - * @return 0:成功,-1 失败 + * @param empty Is the initialization configuration empty + * @param: get_chunk_from_pool Does obtain a chunk from FilePool + * @param: createFilePool: create a FilePool? It is not necessary to restart + * it + * @return 0: Success, -1 failed */ - int StartPeer(const PeerId &peerId, - const bool empty = false, - bool getChunkFrom_pool = false, - bool createFilePool = true); + int StartPeer(const PeerId& peerId, const bool empty = false, + bool getChunkFrom_pool = false, bool createFilePool = true); /** - * 关闭一个 peer,使用 SIGINT + * Close a peer and use SIGINT * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int ShutdownPeer(const PeerId &peerId); - + int ShutdownPeer(const PeerId& peerId); /** - * hang 住一个 peer,使用 SIGSTOP + * Hang lives in a peer and uses SIGSTOP * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int StopPeer(const PeerId &peerId); + int StopPeer(const PeerId& peerId); /** - * 恢复 hang 住的 peer,使用 SIGCONT - * @param peerId - * @return 0:成功,-1 失败 - */ - int ContPeer(const PeerId &peerId); + * Restore the peer where Hang lives and use SIGCONT + * @param peerId + * @return 0: Success, -1 failed + */ + int ContPeer(const PeerId& peerId); /** - * 反复重试直到等到新的 leader 产生 - * @param leaderId 出参,返回 leader id - * @return 0:成功,-1 失败 + * Try again and again until a new leader is generated + * @param leaderId takes a parameter and returns the leader id + * @return 0: Success, -1 failed */ - int WaitLeader(PeerId *leaderId); + int WaitLeader(PeerId* leaderId); /** - * Stop 所有的 peer - * @return 0:成功,-1 失败 + * Stop all peers + * @return 0: Success, -1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /*Returns the current configuration of the cluster*/ const Configuration CopysetConf() const; - /* 修改 PeerNode 配置相关的接口,单位: s */ + /*Modify the interface related to PeerNode configuration, unit: s*/ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); int SetCatchupMargin(int catchupMargin); static int StartPeerNode(CopysetNodeOptions options, - const Configuration conf, - bool from_chunkfile_pool = false, - bool createFilePool = true); + const Configuration conf, + bool from_chunkfile_pool = false, + bool createFilePool = true); public: /** - * 返回执行 peer 的 copyset 路径 with protocol, ex: local://./127.0.0.1:9101:0 - */ - static const std::string CopysetDirWithProtocol(const PeerId &peerId); + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 + */ + static const std::string CopysetDirWithProtocol(const PeerId& peerId); /** - * 返回执行 peer 的 copyset 路径 without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const PeerId &peerId); + static const std::string CopysetDirWithoutProtocol(const PeerId& peerId); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const PeerId &peerid); + static const std::string RemoveCopysetDirCmd(const PeerId& peerid); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::set peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::set peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 + // Snapshot interval int snapshotIntervalS_; - // 选举超时时间 + // Election timeout int electionTimeoutMs_; - // catchup margin配置 + // Catchup margin configuration int catchupMargin_; - // 集群成员配置 + // Cluster member configuration Configuration conf_; - // 逻辑池id - static LogicPoolID logicPoolID_; - // 复制组id - static CopysetID copysetID_; + // Logical Pool ID + static LogicPoolID logicPoolID_; + // Copy Group ID + static CopysetID copysetID_; }; } // namespace chunkserver diff --git a/test/chunkserver/cli2_test.cpp b/test/chunkserver/cli2_test.cpp index d4d482d118..41d3b75ada 100644 --- a/test/chunkserver/cli2_test.cpp +++ b/test/chunkserver/cli2_test.cpp @@ -20,23 +20,24 @@ * Author: wudemiao */ -#include -#include -#include -#include -#include +#include "src/chunkserver/cli2.h" + #include #include #include +#include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli2.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -46,10 +47,12 @@ using curve::common::UUIDGenerator; class Cli2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -84,13 +87,14 @@ class Cli2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(Cli2Test, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9033; - const char *confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; + const char* confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -103,12 +107,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -118,12 +118,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -133,12 +129,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -148,16 +140,12 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid4) { std::string copysetdir = "local://./" + dir4; - StartChunkserver(ip, - port + 3, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 3, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3, pid_t pid4) { @@ -177,6 +165,7 @@ TEST_F(Cli2Test, basic) { kill(pid4_, SIGINT); waitpid(pid4_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -197,11 +186,12 @@ TEST_F(Cli2Test, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /*Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -211,23 +201,18 @@ TEST_F(Cli2Test, basic) { { Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -237,28 +222,21 @@ TEST_F(Cli2Test, basic) { conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -273,90 +251,70 @@ TEST_F(Cli2Test, basic) { peer3.set_address("127.0.0.1:9035:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } @@ -366,33 +324,29 @@ TEST_F(Cli2Test, basic) { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT - butil::Status st = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); - LOG(INFO) << "change peers: " - << st.error_code() << ", " << st.error_str(); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT + butil::Status st = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); + LOG(INFO) << "change peers: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* reset peer */ { - // 等待change peer完成,否则用例会失败 + // Wait for the change peer to complete, otherwise the instance will + // fail sleep(3); Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -401,127 +355,105 @@ TEST_F(Cli2Test, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; Peer leader; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ Peer peer; peer.set_address("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); Peer peer; peer.set_address("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "transfer leader: " << status.error_code() << ", " << status.error_str(); } } - /* change peers - 不存在的 peer */ + /*Change peers - non-existent peers*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT - butil::Status status = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT + butil::Status status = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "change peers: " << status.error_code() << ", " << status.error_str(); } - /* reset peer - newConf为空 */ + /*Reset peer - newConf is empty*/ { Configuration conf; Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status - status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); LOG(INFO) << "reset peer: " << status.error_code() << ", " << status.error_str(); ASSERT_EQ(EINVAL, status.error_code()); } - /* reset peer peer地址非法 */ + /*Illegal reset peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* reset peer peer地址不存在 */ + /*Reset peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } - /* snapshot peer地址非法 */ + /*Illegal snapshot peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* snapshot peer地址不存在 */ + /*The snapshot peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } /* snapshot all normal */ @@ -529,8 +461,8 @@ TEST_F(Cli2Test, basic) { Peer peer; peer.set_address("127.0.0.1:9040:0"); butil::Status status = curve::chunkserver::SnapshotAll(peer, opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } } diff --git a/test/chunkserver/cli_test.cpp b/test/chunkserver/cli_test.cpp index 111ec23773..7aa218a446 100644 --- a/test/chunkserver/cli_test.cpp +++ b/test/chunkserver/cli_test.cpp @@ -20,22 +20,23 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/cli.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -45,10 +46,12 @@ using curve::common::UUIDGenerator; class CliTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -78,13 +81,14 @@ class CliTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CliTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9030; - const char *confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; + const char* confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -97,12 +101,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -112,12 +112,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -127,16 +123,12 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -153,6 +145,7 @@ TEST_F(CliTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -172,11 +165,12 @@ TEST_F(CliTest, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /* Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -185,23 +179,18 @@ TEST_F(CliTest, basic) { /* remove peer */ { PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peerId, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -210,27 +199,20 @@ TEST_F(CliTest, basic) { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -242,95 +224,75 @@ TEST_F(CliTest, basic) { PeerId peer3("127.0.0.1:9032:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -338,41 +300,35 @@ TEST_F(CliTest, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9030:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ PeerId peerId("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peerId, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"); PeerId peer1("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..1452c24e72 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -20,49 +20,47 @@ * Author: wudemiao */ -#include -#include -#include #include #include +#include +#include +#include -#include "src/chunkserver/copyset_node.h" #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" #include "test/chunkserver/chunkserver_test_util.h" DEFINE_int32(request_size, 10, "Size of each requst"); DEFINE_int32(timeout_ms, 500, "Timeout for each request"); DEFINE_int32(election_timeout_ms, 3000, "election timeout ms"); DEFINE_int32(write_percentage, 100, "Percentage of fetch_add"); -DEFINE_string(confs, - "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", +DEFINE_string(confs, "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", "Configuration of the raft group"); -using curve::chunkserver::CopysetRequest; -using curve::chunkserver::CopysetResponse; -using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::CHUNK_OP_TYPE; using curve::chunkserver::ChunkRequest; using curve::chunkserver::ChunkResponse; using curve::chunkserver::ChunkService_Stub; -using curve::chunkserver::PeerId; -using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; using curve::chunkserver::Configuration; -using curve::chunkserver::CHUNK_OP_TYPE; -using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::COPYSET_OP_STATUS; +using curve::chunkserver::CopysetID; +using curve::chunkserver::CopysetRequest; +using curve::chunkserver::CopysetResponse; +using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::LogicPoolID; +using curve::chunkserver::PeerId; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - uint64_t chunkId = 1; - uint64_t sn = 1; - char fillCh = 'a'; + CopysetID copysetId = 100001; + uint64_t chunkId = 1; + uint64_t sn = 1; + char fillCh = 'a'; PeerId leader; curve::chunkserver::Configuration conf; @@ -70,9 +68,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "conf parse failed: " << FLAGS_confs; } - - - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); @@ -105,8 +101,10 @@ int main(int argc, char *argv[]) { if (cntl.Failed()) { LOG(FATAL) << "create copyset fialed: " << cntl.ErrorText(); } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS //NOLINT - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT + if (response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS // NOLINT + || response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT LOG(INFO) << "create copyset success: " << response.status(); } else { LOG(FATAL) << "create copyset failed: "; @@ -116,11 +114,9 @@ int main(int argc, char *argv[]) { // wait leader ::usleep(1000 * FLAGS_election_timeout_ms); - butil::Status status = curve::chunkserver::WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - FLAGS_election_timeout_ms); //NOLINT + butil::Status status = + curve::chunkserver::WaitLeader(logicPoolId, copysetId, conf, &leader, + FLAGS_election_timeout_ms); // NOLINT LOG(INFO) << "leader is: " << leader.to_string(); if (0 != status.error_code()) { LOG(FATAL) << "Wait leader failed"; @@ -176,8 +172,5 @@ int main(int argc, char *argv[]) { } } - return 0; } - - diff --git a/test/chunkserver/clone/clone_copyer_test.cpp b/test/chunkserver/clone/clone_copyer_test.cpp index 3c15969d9a..033664c6a3 100644 --- a/test/chunkserver/clone/clone_copyer_test.cpp +++ b/test/chunkserver/clone/clone_copyer_test.cpp @@ -20,12 +20,13 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_copyer.h" + #include +#include +#include #include "include/client/libcurve.h" -#include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/client/mock/mock_file_client.h" @@ -46,21 +47,16 @@ const uint64_t EXPIRED_USE = 5; class MockDownloadClosure : public DownloadClosure { public: explicit MockDownloadClosure(AsyncDownloadContext* context) - : DownloadClosure(nullptr, nullptr, context, nullptr) - , isRun_(false) {} + : DownloadClosure(nullptr, nullptr, context, nullptr), isRun_(false) {} void Run() { CHECK(!isRun_) << "closure has been invoked."; isRun_ = true; } - bool IsFailed() { - return isFailed_; - } + bool IsFailed() { return isFailed_; } - bool IsRun() { - return isRun_; - } + bool IsRun() { return isRun_; } void Reset() { isFailed_ = false; @@ -71,16 +67,14 @@ class MockDownloadClosure : public DownloadClosure { bool isRun_; }; -class CloneCopyerTest : public testing::Test { +class CloneCopyerTest : public testing::Test { public: void SetUp() { curveClient_ = std::make_shared(); s3Client_ = std::make_shared(); Aws::InitAPI(awsOptions_); } - void TearDown() { - Aws::ShutdownAPI(awsOptions_); - } + void TearDown() { Aws::ShutdownAPI(awsOptions_); } protected: std::shared_ptr curveClient_; @@ -133,8 +127,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,读取成功 - * 预期:调用Open和Read读取数据 + /* Use case: Reading data on curve, successful reading + * Expected: Calling Open and Read to read data */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) @@ -151,12 +145,11 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:再次读前面的文件,但是ret值为-1 - * 预期:直接Read,返回失败 + /* Use case: Read the previous file again, but the ret value is -1 + * Expected: Direct Read, return failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .WillOnce(Invoke([](int fd, CurveAioContext* context, curve::client::UserDataType dataType) { @@ -169,21 +162,20 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Open的时候失败 - * 预期:返回-1 + /* Use case: Reading data on curve, failed during Open + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) .WillOnce(Return(-1)); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Read的时候失败 - * 预期:返回-1 + /* Use case: Failed to read data on curve + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) @@ -195,14 +187,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - - /* 用例:读s3上的数据,读取成功 - * 预期:返回0 + /* Use case: Reading data on s3, successful reading + * Expected: Return 0 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = 0; context->cb(s3Client_.get(), context); })); @@ -211,13 +202,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 - * 预期:返回-1 + /* Use case: Read data on s3, read failed + * Expected: Return -1 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = -1; context->cb(s3Client_.get(), context); })); @@ -226,18 +217,14 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini test { - EXPECT_CALL(*curveClient_, Close(1)) - .Times(1); - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(1)).Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } } @@ -250,16 +237,15 @@ TEST_F(CloneCopyerTest, DisableTest) { options.curveUser.owner = ROOT_OWNER; options.curveUser.password = ROOT_PWD; options.curveFileTimeoutSec = EXPIRED_USE; - // 禁用curveclient和s3adapter + // Disable curveclient and s3adapter options.curveClient = nullptr; options.s3Client = nullptr; // curvefs init success - EXPECT_CALL(*curveClient_, Init(_)) - .Times(0); + EXPECT_CALL(*curveClient_, Init(_)).Times(0); ASSERT_EQ(0, copyer.Init(options)); - // 从上s3或者curve请求下载数据会返回失败 + // Requesting data download from s3 or curve above will return a failure { char* buf = new char[4096]; AsyncDownloadContext context; @@ -268,30 +254,27 @@ TEST_F(CloneCopyerTest, DisableTest) { context.buf = buf; MockDownloadClosure closure(&context); - /* 用例:读curve上的数据,读取失败 + /* Use case: Read data on curve, read failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 + /* Use case: Read data on s3, read failed */ context.location = "test@s3"; - EXPECT_CALL(*s3Client_, GetObjectAsync(_)) - .Times(0); + EXPECT_CALL(*s3Client_, GetObjectAsync(_)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } - // fini 可以成功 + // Fini can succeed ASSERT_EQ(0, copyer.Fini()); } @@ -308,7 +291,7 @@ TEST_F(CloneCopyerTest, ExpiredTest) { // curvefs init success EXPECT_CALL(*curveClient_, Init(StrEq(CURVE_CONF))) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + .WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(0, copyer.Init(options)); { @@ -320,18 +303,18 @@ TEST_F(CloneCopyerTest, ExpiredTest) { MockDownloadClosure closure(&context); /* Case: Read the same chunk after it expired - * Expect: Re-Open the curve file - */ + * Expect: Re-Open the curve file + */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) - .WillOnce(Return(1)); + .WillOnce(Return(1)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_FALSE(closure.IsFailed()); @@ -341,26 +324,23 @@ TEST_F(CloneCopyerTest, ExpiredTest) { context.location = "test:0@cs"; std::this_thread::sleep_for(std::chrono::seconds(1)); EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .WillOnce(Return(2)); + .WillOnce(Return(2)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index 86d6a70898..2632acb635 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -20,21 +20,22 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_core.h" + #include +#include #include +#include #include -#include "src/chunkserver/clone_core.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/op_request.h" -#include "test/chunkserver/mock_copyset_node.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_copyer.h" #include "test/chunkserver/datastore/mock_datastore.h" -#include "src/fs/local_filesystem.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -46,7 +47,7 @@ using curve::fs::LocalFsFactory; ACTION_TEMPLATE(SaveBraftTask, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(value)) { auto input = static_cast(::testing::get(args)); - auto output = static_cast(value); + auto output = static_cast(value); output->data->swap(*input.data); output->done = input.done; } @@ -83,18 +84,19 @@ class CloneCoreTest .WillRepeatedly(Return(LAST_INDEX)); } - std::shared_ptr - GenerateReadRequest(CHUNK_OP_TYPE optype, off_t offset, size_t length) { - ChunkRequest *readRequest = new ChunkRequest(); + std::shared_ptr GenerateReadRequest(CHUNK_OP_TYPE optype, + off_t offset, + size_t length) { + ChunkRequest* readRequest = new ChunkRequest(); readRequest->set_logicpoolid(LOGICPOOL_ID); readRequest->set_copysetid(COPYSET_ID); readRequest->set_chunkid(CHUNK_ID); readRequest->set_optype(optype); readRequest->set_offset(offset); readRequest->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - FakeChunkClosure *closure = new FakeChunkClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + FakeChunkClosure* closure = new FakeChunkClosure(); closure->SetCntl(cntl); closure->SetRequest(readRequest); closure->SetResponse(response); @@ -105,19 +107,19 @@ class CloneCoreTest } void SetCloneParam(std::shared_ptr readRequest) { - ChunkRequest *request = - const_cast(readRequest->GetChunkRequest()); + ChunkRequest* request = + const_cast(readRequest->GetChunkRequest()); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); } - void CheckTask(const braft::Task &task, off_t offset, size_t length, - char *buf) { + void CheckTask(const braft::Task& task, off_t offset, size_t length, + char* buf) { butil::IOBuf data; ChunkRequest request; auto req = ChunkOpRequest::Decode(*task.data, &request, &data, 0, PeerId("127.0.0.1:8200:0")); - auto preq = dynamic_cast(req.get()); + auto preq = dynamic_cast(req.get()); ASSERT_TRUE(preq != nullptr); ASSERT_EQ(LOGICPOOL_ID, request.logicpoolid()); @@ -139,19 +141,20 @@ class CloneCoreTest }; /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不是clone chunk - * result:不会从远端拷贝数据,直接从本地读取数据,结果返回成功 + * Test CHUNK_OP_READ type request, requesting to read a chunk that is not a + * clone chunk Result: Will not copy data from the remote end, directly read + * data from the local, and the result is returned as successful */ TEST_P(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; size_t length = 5 * blocksize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + // Will not copy data from the source EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -159,16 +162,16 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -176,15 +179,17 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk是clone chunk - * case1:请求读取的区域全部被写过 - * result1:全部从本地chunk读取 - * case2:请求读取的区域都未被写过 - * result2:全部从源端读取,产生paste请求 - * case3:请求读取的区域有部分被写过,部分未被写过 - * result3:写过区域从本地chunk读取,未写过区域从源端读取,产生paste请求 - * case4:请求读取的区域部分被写过,请求的偏移未与pagesize对齐 - * result4:返回错误 + * Test CHUNK_OP_READ type request, the requested chunk to read is a clone chunk + * Case1: All regions requested for reading have been written + * Result1: Read all from local chunk + * Case2: The requested read area has not been written + * Result2: Read all from the source and generate a pass request + * Case3: The requested read area has been partially written and partially + * unwritten Result3: Read from the local chunk for regions that have been + * written, and read from the source for regions that have not been written, + * resulting in a pass request Case4: The requested read area has been partially + * written, and the requested offset is not aligned with pagesize Result4: Error + * returned */ TEST_P(CloneCoreTest, ReadChunkTest2) { off_t offset = 0; @@ -195,32 +200,33 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -237,26 +243,27 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -264,16 +271,17 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -289,33 +297,34 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { { info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) - memset(chunkData, 'a', pagesize_ + 2 * blocksize_); + // Reading Chunk Files + char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) + memset(chunkData, 'a', pagesize_ + 2 * blocksize_); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, 0, pagesize_ + 2 * blocksize_)) .WillOnce( DoAll(SetArrayArgument<2>( chunkData, chunkData + pagesize_ + 2 * blocksize_), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -323,24 +332,30 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - 3 * blocksize_), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + 3 * blocksize_), + 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * blocksize_, //NOLINT - 2 * blocksize_), 0); + closure->resContent_.attachment.to_string().c_str() + + 3 * blocksize_, // NOLINT + 2 * blocksize_), + 0); } // case4 { @@ -349,7 +364,8 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { length = 4 * blocksize_; info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); @@ -357,18 +373,18 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); - // 不产生PasteChunkRequest + // Do not generate PasteChunkRequest braft::Task task; EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -377,8 +393,9 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不存在,但是请求中包含源端数据地址 - * 预期结果:从源端下载数据,产生paste请求 + * Test CHUNK_OP_READ type request, the chunk requested for reading does not + * exist, but the request contains the source data address Expected result: + * Download data from the source and generate a pass request */ TEST_P(CloneCoreTest, ReadChunkTest3) { off_t offset = 0; @@ -389,32 +406,33 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / pagesize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -422,16 +440,17 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -445,13 +464,13 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { } /** - * 执行HandleReadRequest过程中出现错误 - * case1:GetChunkInfo时出错 - * result1:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case2:Download时出错 - * result2:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case3:ReadChunk时出错 - * result3:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN + * An error occurred during the execution of HandleReadRequest + * Case1: Error in GetChunkInfo + * Result1: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN Case2: Error downloading Result2: Returns -1, + * and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN Case3: + * Error in ReadChunk Result3: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN */ TEST_P(CloneCoreTest, ReadChunkErrorTest) { off_t offset = 0; @@ -479,8 +498,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -494,10 +513,10 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); closure->SetFailed(); })); @@ -505,8 +524,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -522,17 +541,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -540,16 +559,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); delete[] cloneData; @@ -557,19 +577,20 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk不是clone chunk - * result:不会从远端拷贝数据,也不会从本地读取数据,直接返回成功 + * Test CHUNK_OP_RECOVER type request, the requested chunk is not a clone chunk + *Result: Will not copy data remotely or read data locally, returns success + *directly */ TEST_P(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; size_t length = 5 * pagesize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); + // Will not copy data from the sourc EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -577,14 +598,14 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -592,11 +613,11 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk是clone chunk - * case1:请求恢复的区域全部被写过 - * result1:不会拷贝数据,直接返回成功 - * case2:请求恢复的区域全部或部分未被写过 - * result2:从远端拷贝数据,并产生paste请求 + * Test CHUNK_OP_WRECOVER type request, the requested chunk is clone chunk + * Case1: All areas requested for recovery have been written + * Result1: Will not copy data, returns success directly + * Case2: The requested area for recovery has not been written in whole or in + * part Result2: Copy data from the remote end and generate a pass request */ TEST_P(CloneCoreTest, RecoverChunkTest2) { off_t offset = 0; @@ -607,26 +628,27 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -636,23 +658,24 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -660,14 +683,16 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -678,8 +703,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { } } -// case1: read chunk时,从远端拷贝数据,但是不会产生paste请求 -// case2: recover chunk时,从远端拷贝数据,会产生paste请求 +// Case1: When reading a chunk, copy data from the remote end, but do not +// generate a pass request Case2: When recovering a chunk, copying data from the +// remote end will generate a pass request TEST_P(CloneCoreTest, DisablePasteTest) { off_t offset = 0; size_t length = 5 * blocksize_; @@ -689,39 +715,40 @@ TEST_P(CloneCoreTest, DisablePasteTest) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, false, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, false, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生paste chunk请求 + // No paste chunk request will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -732,23 +759,24 @@ TEST_P(CloneCoreTest, DisablePasteTest) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -756,14 +784,16 @@ TEST_P(CloneCoreTest, DisablePasteTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -775,8 +805,7 @@ TEST_P(CloneCoreTest, DisablePasteTest) { } INSTANTIATE_TEST_CASE_P( - CloneCoreTest, - CloneCoreTest, + CloneCoreTest, CloneCoreTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/clone/clone_manager_test.cpp b/test/chunkserver/clone/clone_manager_test.cpp index f41bc1bed2..6b29058364 100644 --- a/test/chunkserver/clone/clone_manager_test.cpp +++ b/test/chunkserver/clone/clone_manager_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_manager.h" + #include +#include +#include + #include -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/op_request.h" namespace curve { @@ -33,22 +35,18 @@ namespace chunkserver { class UTCloneTask : public CloneTask { public: - UTCloneTask() : CloneTask(nullptr, nullptr, nullptr) - , sleepTime_(0) {} + UTCloneTask() : CloneTask(nullptr, nullptr, nullptr), sleepTime_(0) {} void Run() { - std::this_thread::sleep_for( - std::chrono::milliseconds(sleepTime_)); + std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime_)); isComplete_ = true; } - void SetSleepTime(uint32_t sleepTime) { - sleepTime_ = sleepTime; - } + void SetSleepTime(uint32_t sleepTime) { sleepTime_ = sleepTime; } private: uint32_t sleepTime_; }; -class CloneManagerTest : public testing::Test { +class CloneManagerTest : public testing::Test { public: void SetUp() {} void TearDown() {} @@ -58,32 +56,34 @@ TEST_F(CloneManagerTest, BasicTest) { CloneOptions options; options.checkPeriod = 100; CloneManager cloneMgr; - // 如果线程数设置为0,启动线程池失败 + // If the number of threads is set to 0, starting the thread pool fails { options.threadNum = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 队列深度为0,启动线程池会失败 + // Queue depth is 0, starting thread pool will fail { options.threadNum = 5; options.queueCapacity = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 线程数和队列深度都大于0,可以启动线程池 + // If the number of threads and queue depth are both greater than 0, the + // thread pool can be started { options.threadNum = 5; options.queueCapacity = 100; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), 0); - // 线程池启动运行后,重复Run直接返回成功 + // After the thread pool starts running, repeating the run directly + // returns success ASSERT_EQ(cloneMgr.Run(), 0); } - // 通过Fini暂停任务 + // Pause tasks through Fini { ASSERT_EQ(cloneMgr.Fini(), 0); - // 重复Fini直接返回成功 + // Repeated Fini directly returns success ASSERT_EQ(cloneMgr.Fini(), 0); } } @@ -99,9 +99,9 @@ TEST_F(CloneManagerTest, TaskTest) { std::shared_ptr req = std::make_shared(); - // 测试 GenerateCloneTask 和 IssueCloneTask + // Testing GenerateCloneTask and IssueCloneTask { - // options.core为nullptr,则产生的任务也是nullptr + // If options.core is nullptr, the resulting task is also nullptr std::shared_ptr task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_EQ(task, nullptr); @@ -111,55 +111,58 @@ TEST_F(CloneManagerTest, TaskTest) { task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_NE(task, nullptr); - // 自定义任务测试 + // Custom task testing task = std::make_shared(); ASSERT_FALSE(task->IsComplete()); - // 如果clone manager还未启动,则无法发布任务 + // If the clone manager has not yet started, the task cannot be + // published ASSERT_FALSE(cloneMgr.IssueCloneTask(task)); - // 启动以后就可以发布任务 + // After startup, tasks can be published ASSERT_EQ(cloneMgr.Run(), 0); ASSERT_TRUE(cloneMgr.IssueCloneTask(task)); - // 等待一点时间,任务执行完成,检查任务状态以及是否从队列中移除 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for a moment, the task execution is completed, check the task + // status and whether it has been removed from the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task->IsComplete()); - // 无法发布空任务 + // Unable to publish empty task ASSERT_FALSE(cloneMgr.IssueCloneTask(nullptr)); } - // 测试自定义的测试任务 + // Test custom test tasks { - // 初始化执行时间各不相同的任务 + // Initialize tasks with varying execution times std::shared_ptr task1 = std::make_shared(); std::shared_ptr task2 = std::make_shared(); std::shared_ptr task3 = std::make_shared(); task1->SetSleepTime(100); task2->SetSleepTime(300); task3->SetSleepTime(500); - // 同时发布所有任务 + // Publish all tasks simultaneously ASSERT_TRUE(cloneMgr.IssueCloneTask(task1)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task2)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task3)); - // 此时任务还在执行中,此时引用计数为2 + // At this point, the task is still executing and the reference count is + // 2 ASSERT_FALSE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 等待220ms,task1执行成功,其他还没完成;220ms基本可以保证task1执行完 - std::this_thread::sleep_for( - std::chrono::milliseconds(220)); + // Waiting for 220ms, task1 successfully executed, but other tasks have + // not been completed yet; 220ms basically guarantees the completion of + // task1 execution + std::this_thread::sleep_for(std::chrono::milliseconds(220)); ASSERT_TRUE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,task2执行成功,task3还在执行中 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait another 200ms, task2 successfully executed, and task3 is still + // executing + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,所有任务执行成功,任务全被移出队列 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for another 200ms, all tasks are successfully executed, and all + // tasks are moved out of the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_TRUE(task3->IsComplete()); diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 6746594097..1b509e4b0f 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -20,16 +20,18 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/op_request.h" + #include +#include +#include + #include -#include "src/chunkserver/op_request.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_manager.h" -#include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/datastore/mock_datastore.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -67,28 +69,23 @@ class OpRequestTest FakeCopysetNode(); FakeCloneManager(); } - void TearDown() { - } + void TearDown() {} void FakeCopysetNode() { - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, GetDataStore()) - .WillRepeatedly(Return(datastore_)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, GetDataStore()).WillRepeatedly(Return(datastore_)); EXPECT_CALL(*node_, GetConcurrentApplyModule()) .WillRepeatedly(Return(concurrentApplyModule_.get())); EXPECT_CALL(*node_, GetAppliedIndex()) .WillRepeatedly(Return(LAST_INDEX)); PeerId peer(PEER_STRING); - EXPECT_CALL(*node_, GetLeaderId()) - .WillRepeatedly(Return(peer)); + EXPECT_CALL(*node_, GetLeaderId()).WillRepeatedly(Return(peer)); } void FakeCloneManager() { EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillRepeatedly(Return(true)); } protected: @@ -99,11 +96,11 @@ class OpRequestTest std::shared_ptr node_; std::shared_ptr datastore_; std::shared_ptr cloneMgr_; - std::shared_ptr concurrentApplyModule_; + std::shared_ptr concurrentApplyModule_; }; TEST_P(OpRequestTest, CreateCloneTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -118,20 +115,17 @@ TEST_P(OpRequestTest, CreateCloneTest) { request->set_location(location); request->set_size(size); request->set_sn(sn); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cntl, - request, - response, - closure); + std::make_shared(node_, cntl, request, + response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -152,23 +146,22 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_EQ(sn, request->sn()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -176,28 +169,27 @@ TEST_P(OpRequestTest, CreateCloneTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); @@ -251,8 +243,7 @@ TEST_P(OpRequestTest, CreateCloneTest) { // set expection EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(0); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); opReq->OnApply(3, closure); @@ -264,15 +255,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { closure->response_->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -280,15 +271,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -296,37 +287,33 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, PasteChunkTest) { - // 生成临时的readrequest - ChunkResponse *response = new ChunkResponse(); + // Generate temporary readrequest + ChunkResponse* response = new ChunkResponse(); std::shared_ptr readChunkRequest = - std::make_shared(node_, - nullptr, - nullptr, - nullptr, - response, - nullptr); - - // 创建PasteChunkRequest + std::make_shared(node_, nullptr, nullptr, nullptr, + response, nullptr); + + // Create PasteChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -343,17 +330,14 @@ TEST_P(OpRequestTest, PasteChunkTest) { butil::IOBuf cloneData; cloneData.append(str); - UnitTestClosure *closure = new UnitTestClosure(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - request, - response, - &cloneData, - closure); + std::make_shared(node_, request, response, + &cloneData, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -376,23 +360,22 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_STREQ(str.c_str(), data.to_string().c_str()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -400,85 +383,83 @@ TEST_P(OpRequestTest, PasteChunkTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(response->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:CreateCloneChunk成功 - * 预期:返回 CHUNK_OP_STATUS_SUCCESS ,并更新apply index + * Test OnApply + * Scenario: CreateCloneChunk successful + * Expected: return CHUNK_OP_STATUS_SUCCESS and update the apply index */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -486,15 +467,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { response->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -502,15 +483,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -518,27 +499,27 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, ReadChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -551,21 +532,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { request->set_optype(CHUNK_OP_READ); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -585,17 +562,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -616,19 +592,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_EXPIRED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -649,20 +622,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_NOT_READY; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -688,20 +658,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_DISABLED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -724,36 +691,34 @@ TEST_P(OpRequestTest, ReadChunkTest) { info.bitmap = std::make_shared(chunksize_ / blocksize_); /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true, - * 请求的 apply index 小于等于 node的 apply index - * 预期: 不会走一致性协议,请求提交给concurrentApplyModule_处理 + * Test Process + * Scenario: node_->IsLeaderTerm() == true, + * The requested application index is less than or equal to the + * node's application index Expected: Will not follow the consistency + * protocol, request submission to ConcurrentApplyModule_ handle */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *chunkData = new char[length]; + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -775,21 +740,21 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -797,7 +762,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -811,22 +776,23 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Chunk read locally, returning + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -834,7 +800,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -850,31 +816,29 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -883,54 +847,50 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 但是请求中包含源chunk的信息 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * But the request contains information about the source chunk + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -939,137 +899,135 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 请求中包含源chunk的信息 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 The request contains information about the source + * chunk Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + length), + .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->response_->status()); ASSERT_EQ(memcmp(chunkData, - closure->cntl_->response_attachment().to_string().c_str(), //NOLINT - length), 0); + closure->cntl_->response_attachment() + .to_string() + .c_str(), // NOLINT + length), + 0); delete[] chunkData; } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:读本地chunk的时候返回错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: Error returned when reading local chunk + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件失败 + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Failed to read chunk file EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, RecoverChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -1082,21 +1040,17 @@ TEST_P(OpRequestTest, RecoverChunkTest) { request->set_optype(CHUNK_OP_RECOVER); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -1116,23 +1070,22 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -1154,29 +1107,25 @@ TEST_P(OpRequestTest, RecoverChunkTest) { * expect: don't propose to raft,request commit to concurrentApplyModule_ */ { - // 重置closure + // Reset closure closure->Reset(); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, Propose(_)).Times(0); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); opReq->Process(); @@ -1193,54 +1142,52 @@ TEST_P(OpRequestTest, RecoverChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Directly return to CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Directly return to + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -1248,31 +1195,29 @@ TEST_P(OpRequestTest, RecoverChunkTest) { closure->response_->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -1281,103 +1226,97 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } INSTANTIATE_TEST_CASE_P( - OpRequestTest, - OpRequestTest, + OpRequestTest, OpRequestTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/copyset_epoch_test.cpp b/test/chunkserver/copyset_epoch_test.cpp index f9f80ad50f..810b9c3c5d 100644 --- a/test/chunkserver/copyset_epoch_test.cpp +++ b/test/chunkserver/copyset_epoch_test.cpp @@ -20,26 +20,25 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" #include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/uuid.h" #include "src/fs/fs_common.h" +#include "test/chunkserver/chunkserver_test_util.h" #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 @@ -59,9 +58,7 @@ class CopysetEpochTest : public testing::Test { dir1 = uuidGenerator.GenerateUUID(); Exec(("mkdir " + dir1).c_str()); } - virtual void TearDown() { - Exec(("rm -fr " + dir1).c_str()); - } + virtual void TearDown() { Exec(("rm -fr " + dir1).c_str()); } public: std::string dir1; @@ -70,27 +67,23 @@ class CopysetEpochTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetEpochTest, DISABLED_basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9026; - const char *confs = "127.0.0.1:9026:0"; + const char* confs = "127.0.0.1:9026:0"; int snapshotInterval = 1; int electionTimeoutMs = 3000; - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::string snapshotPath = dir1 + "/4295067297/raft_snapshot"; uint64_t lastIncludeIndex = 0; /** - * 启动一个chunkserver + * Start a chunkserver */ std::string copysetdir = "local://./" + dir1; auto startChunkServerFunc = [&] { - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); }; Thread t1(startChunkServerFunc); @@ -105,111 +98,95 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { ::usleep(1000 * electionTimeoutMs); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); auto node = nodeManager.GetCopysetNode(logicPoolId, copysetId); ASSERT_EQ(1, node->GetConfEpoch()); std::string confEpochPath1 = snapshotPath; - butil::string_appendf(&confEpochPath1, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath1, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath1.append("/"); confEpochPath1.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath1)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(2, node->GetConfEpoch()); std::string confEpochPath2 = snapshotPath; - butil::string_appendf(&confEpochPath2, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath2, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath2.append("/"); confEpochPath2.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath2)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(3, node->GetConfEpoch()); std::string confEpochPath3 = snapshotPath; - butil::string_appendf(&confEpochPath3, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath3, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath3.append("/"); confEpochPath3.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath3)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(4, node->GetConfEpoch()); std::string confEpochPath4 = snapshotPath; - butil::string_appendf(&confEpochPath4, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath4, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath4.append("/"); confEpochPath4.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath4)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(5, node->GetConfEpoch()); std::string confEpochPath5 = snapshotPath; - butil::string_appendf(&confEpochPath5, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath5, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath5.append("/"); confEpochPath5.append(kCurveConfEpochFilename); diff --git a/test/chunkserver/copyset_node_manager_test.cpp b/test/chunkserver/copyset_node_manager_test.cpp index 7103ba0697..fe4f0472e3 100644 --- a/test/chunkserver/copyset_node_manager_test.cpp +++ b/test/chunkserver/copyset_node_manager_test.cpp @@ -20,14 +20,15 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node_manager.h" + +#include #include #include -#include #include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/copyset_node.h" #include "test/chunkserver/mock_copyset_node.h" @@ -35,10 +36,10 @@ namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; -using ::testing::Mock; using ::testing::DoAll; +using ::testing::Mock; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; using ::testing::SetArgPointee; @@ -72,20 +73,19 @@ class CopysetNodeManagerTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = std::make_shared(fs); defaultOptions_.trash = std::make_shared(); } void TearDown() { - CopysetNodeManager *copysetNodeManager = + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); copysetNodeManager->Fini(); ::system("rm -rf node_manager_test"); } protected: - CopysetNodeOptions defaultOptions_; + CopysetNodeOptions defaultOptions_; ConcurrentApplyModule concurrentModule_; }; @@ -93,34 +93,32 @@ TEST_F(CopysetNodeManagerTest, ErrorOptionsTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); defaultOptions_.chunkDataUri = "//."; defaultOptions_.logUri = "//."; ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); } TEST_F(CopysetNodeManagerTest, ServiceNotStartTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_FALSE(copysetNodeManager->LoadFinished()); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->LoadFinished()); /* null server */ { - brpc::Server *server = nullptr; + brpc::Server* server = nullptr; int port = 9000; butil::EndPoint addr(butil::IP_ANY, port); ASSERT_EQ(-1, copysetNodeManager->AddService(server, addr)); @@ -131,7 +129,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -143,21 +141,19 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - // 本地 copyset 未加载完成,则无法创建新的copyset - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Cannot create a new copyset if the local copyset has not been loaded + // completely + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_TRUE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->IsExist(logicPoolId, copysetId)); - // 重复创建 - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Duplicate creation + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); auto copysetNode1 = - copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); + copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); ASSERT_TRUE(nullptr != copysetNode1); auto copysetNode2 = copysetNodeManager->GetCopysetNode(logicPoolId + 1, copysetId + 1); @@ -168,8 +164,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(1, copysetNodes.size()); - ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, - copysetId)); + ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); ASSERT_FALSE(copysetNodeManager->IsExist(logicPoolId, copysetId)); ASSERT_EQ(0, copysetNodeManager->Fini()); @@ -178,46 +173,49 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { } TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - std::shared_ptr mockNode - = std::make_shared(); + std::shared_ptr mockNode = + std::make_shared(); - // 测试copyset node manager还没运行 + // The test copyset node manager has not been run yet EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 启动copyset node manager + // Start the copyset node manager ASSERT_EQ(0, copysetNodeManager->Run()); - // 测试node为空 + // Test node is empty EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(nullptr)); - // 测试无法获取到leader status的情况 + // Test the situation where the leader status cannot be obtained EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); NodeStatus leaderStatus; EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(defaultOptions_.checkRetryTimes) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); + .Times(defaultOptions_.checkRetryTimes) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); leaderStatus.leader_id.parse("127.0.0.1:9043:0"); - // 测试leader first_index 大于 follower last_index的情况 + // Test the situation that leader first_index greater than follower + // last_index leaderStatus.first_index = 1000; NodeStatus followerStatus; followerStatus.last_index = 999; - EXPECT_CALL(*mockNode, GetStatus(_)).Times(1) - .WillOnce(SetArgPointee<0>(followerStatus)); + EXPECT_CALL(*mockNode, GetStatus(_)) + .Times(1) + .WillOnce(SetArgPointee<0>(followerStatus)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 测试可以获取到leader status,且follower当前不在安装快照 的情况 + // The test can obtain the leader status and the follower is currently not + // installing the snapshot leaderStatus.first_index = 1; leaderStatus.committed_index = 2000; NodeStatus status1; @@ -233,14 +231,14 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { status4.last_index = 1666; status4.known_applied_index = 1001; EXPECT_CALL(*mockNode, GetStatus(_)) - .Times(4) - .WillOnce(SetArgPointee<0>(status1)) - .WillOnce(SetArgPointee<0>(status2)) - .WillOnce(SetArgPointee<0>(status3)) - .WillOnce(SetArgPointee<0>(status4)); + .Times(4) + .WillOnce(SetArgPointee<0>(status1)) + .WillOnce(SetArgPointee<0>(status2)) + .WillOnce(SetArgPointee<0>(status3)) + .WillOnce(SetArgPointee<0>(status4)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .Times(4) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_TRUE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); } @@ -248,7 +246,7 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -258,15 +256,14 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LOG(FATAL) << "Fail to start Server"; } - // 构造初始环境 + // Construct initial environment ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); - // 创建多个copyset + // Create multiple copyset int copysetNum = 5; for (int i = 0; i < copysetNum; ++i) { ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + i, - conf)); + copysetId + i, conf)); } std::vector> copysetNodes; copysetNodeManager->GetAllCopysetNodes(©setNodes); @@ -276,11 +273,10 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(0, copysetNodes.size()); - - // 本地 copyset 未加载完成,则无法创建新的copyset + // Cannot create a new copyset if the local copyset has not been loaded + // completely ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + 5, - conf)); + copysetId + 5, conf)); // reload copysets when loadConcurrency < copysetNum std::cout << "Test ReloadCopysets when loadConcurrency=3" << std::endl; diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index 46ed6a4fdb..c81a4b9358 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -20,46 +20,46 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node.h" + +#include #include #include -#include -#include #include -#include -#include #include +#include +#include +#include -#include "test/fs/mock_local_filesystem.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" -#include "test/chunkserver/fake_datastore.h" -#include "test/chunkserver/mock_node.h" -#include "src/chunkserver/conf_epoch_file.h" #include "proto/heartbeat.pb.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/conf_epoch_file.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" +#include "test/chunkserver/fake_datastore.h" #include "test/chunkserver/mock_curve_filesystem_adaptor.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "test/chunkserver/mock_node.h" +#include "test/fs/mock_local_filesystem.h" namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; -using ::testing::Matcher; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Matcher; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; -using curve::fs::MockLocalFileSystem; +using curve::chunkserver::concurrent::ConcurrentApplyOption; using curve::fs::FileSystemType; using curve::fs::MockLocalFileSystem; -using curve::chunkserver::concurrent::ConcurrentApplyOption; const char copysetUri[] = "local://./copyset_node_test"; const int port = 9044; @@ -67,52 +67,36 @@ const int port = 9044; class FakeSnapshotReader : public braft::SnapshotReader { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("/1002093939/temp/238408034"); } - void list_files(std::vector *files) { - return; - } - int load_meta(braft::SnapshotMeta *meta) { - return 1; - } - std::string generate_uri_for_copy() { - return std::string(""); - } + void list_files(std::vector* files) { return; } + int load_meta(braft::SnapshotMeta* meta) { return 1; } + std::string generate_uri_for_copy() { return std::string(""); } }; class FakeSnapshotWriter : public braft::SnapshotWriter { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("."); } - void list_files(std::vector *files) { - return; - } - virtual int save_meta(const braft::SnapshotMeta &meta) { - return 0; - } + void list_files(std::vector* files) { return; } + virtual int save_meta(const braft::SnapshotMeta& meta) { return 0; } - virtual int add_file(const std::string &filename) { - return 0; - } + virtual int add_file(const std::string& filename) { return 0; } - virtual int add_file(const std::string &filename, - const ::google::protobuf::Message *file_meta) { + virtual int add_file(const std::string& filename, + const ::google::protobuf::Message* file_meta) { return 0; } - virtual int remove_file(const std::string &filename) { - return 0; - } + virtual int remove_file(const std::string& filename) { return 0; } }; class FakeClosure : public braft::Closure { public: - void Run() { - std::cerr << "FakeClosure run" << std::endl; - } + void Run() { std::cerr << "FakeClosure run" << std::endl; } }; class CopysetNodeTest : public ::testing::Test { @@ -140,24 +124,21 @@ class CopysetNodeTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = std::make_shared(fs); defaultOptions_.trash = std::make_shared(); defaultOptions_.enableOdsyncWhenOpenChunkFile = true; } - void TearDown() { - ::system("rm -rf copyset_node_test"); - } + void TearDown() { ::system("rm -rf copyset_node_test"); } protected: - CopysetNodeOptions defaultOptions_; + CopysetNodeOptions defaultOptions_; ConcurrentApplyModule concurrentModule_; }; TEST_F(CopysetNodeTest, error_test) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -170,23 +151,24 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs)); - + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); @@ -210,10 +192,10 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); @@ -232,26 +214,30 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); copysetNode.on_snapshot_save(&writer, &closure); @@ -267,7 +253,9 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -278,19 +266,21 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); copysetNode.on_snapshot_save(&writer, &closure); @@ -328,10 +318,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); DataStoreOptions options; @@ -355,10 +345,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetID copysetID = 1345; Configuration conf; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; FakeClosure closure; FakeSnapshotReader reader; copysetNode.SetLocalFileSystem(mockfs); @@ -387,10 +377,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); @@ -409,19 +399,17 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(false)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); } @@ -434,22 +422,19 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(false)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); } @@ -466,27 +451,21 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); LOG(INFO) << "OK"; @@ -545,7 +524,7 @@ TEST_F(CopysetNodeTest, error_test) { copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: logic pool id 错误 */ + /* Load: logic pool id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -554,18 +533,15 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID + 1, - copysetID, - epoch)); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, + logicPoolID + 1, copysetID, epoch)); defaultOptions_.localFileSystem = fs; ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: copyset id 错误 */ + /* Load: copyset id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -575,11 +551,8 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID, - copysetID + 1, - epoch)); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, logicPoolID, + copysetID + 1, epoch)); defaultOptions_.localFileSystem = fs; ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); @@ -589,8 +562,8 @@ TEST_F(CopysetNodeTest, error_test) { } TEST_F(CopysetNodeTest, get_conf_change) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -607,12 +580,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { conf1.add_peer(peer1); conf2.add_peer(peer1); - // 当前没有在做配置变更 + // There are currently no configuration changes in progress { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -628,12 +600,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); EXPECT_EQ(ConfigChangeType::NONE, type); } - // 当前正在Add Peer + // Currently adding Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -643,14 +614,12 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, add_peer(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, add_peer(_, _)).Times(1); EXPECT_CALL(*mockNode, remove_peer(_, _)) - .WillOnce( - Invoke([](const PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); - })); + .WillOnce(Invoke([](const PeerId& peer, braft::Closure* done) { + done->status().set_error(-1, + "another config change is ongoing"); + })); Peer addPeer; addPeer.set_address("127.0.0.1:3202:0"); Peer removePeer; @@ -666,12 +635,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::ADD_PEER, type); EXPECT_EQ(addPeer.address(), alterPeer.address()); } - // 当前正在Remove Peer + // Currently removing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -681,13 +649,12 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, remove_peer(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, remove_peer(_, _)).Times(1); EXPECT_CALL(*mockNode, add_peer(_, _)) .WillOnce( Invoke([](const braft::PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); + done->status().set_error( + -1, "another config change is ongoing"); })); Peer addPeer1; addPeer1.set_address("127.0.0.1:3202:0"); @@ -704,12 +671,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); EXPECT_EQ(removePeer.address(), alterPeer.address()); } - // 当前正在Transfer leader + // Currently transferring leader { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -742,12 +708,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); EXPECT_EQ(transferee1.address(), alterPeer.address()); } - // 当前正在Change Peer + // Currently changing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -757,8 +722,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, change_peers(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, change_peers(_, _)).Times(1); Peer addPeer1; addPeer1.set_address("127.0.0.1:3201:0"); @@ -778,7 +742,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); EXPECT_EQ(addPeer1.address(), alterPeer.address()); } - // leader term小于0 + // leader term is less than 0 { CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); @@ -792,8 +756,8 @@ TEST_F(CopysetNodeTest, get_conf_change) { } TEST_F(CopysetNodeTest, get_hash) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -816,21 +780,26 @@ TEST_F(CopysetNodeTest, get_hash) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件 - ::system("echo \"abcddddddddd333\" >" - "copyset_node_test/8589934594/data/test-2.txt"); - ::system("echo \"mmmmmmmm\" >" - "copyset_node_test/8589934594/data/test-4.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934594/data/test-5.txt"); + // Generate multiple files with data + ::system( + "echo \"abcddddddddd333\" >" + "copyset_node_test/8589934594/data/test-2.txt"); + ::system( + "echo \"mmmmmmmm\" >" + "copyset_node_test/8589934594/data/test-4.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934594/data/test-5.txt"); ::system("touch copyset_node_test/8589934594/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934594/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934594/data/test-1.txt"); - // 获取hash + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934594"); @@ -838,26 +807,32 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; - // 使用不同的copyset id,让目录不一样 + // Using different copyset IDs to make the directory different CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件,并且交换生成文件的顺序 + // Generate multiple files with data and exchange the order of generated + // files ::system("touch copyset_node_test/8589934595/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934595/data/test-1.txt"); - - ::system("echo \"mmmmmmmm\" > " - "copyset_node_test/8589934595/data/test-4.txt"); - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934595/data/test-5.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"abcddddddddd333\" > " - "copyset_node_test/8589934595/data/test-2.txt"); - - // 获取hash + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934595/data/test-1.txt"); + + ::system( + "echo \"mmmmmmmm\" > " + "copyset_node_test/8589934595/data/test-4.txt"); + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934595/data/test-5.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"abcddddddddd333\" > " + "copyset_node_test/8589934595/data/test-2.txt"); + + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934595"); @@ -868,18 +843,17 @@ TEST_F(CopysetNodeTest, get_hash) { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -888,13 +862,14 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, copysetNode.GetHash(&hash)); @@ -905,18 +880,17 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -925,20 +899,18 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -949,49 +921,49 @@ TEST_F(CopysetNodeTest, get_hash) { struct stat fileInfo; fileInfo.st_size = 1024; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } // List success, open success, fstat success, read success { - char *buff = new (std::nothrow) char[1024]; + char* buff = new (std::nothrow) char[1024]; ::memset(buff, 'a', 1024); std::string hash; struct stat fileInfo; fileInfo.st_size = 1024; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*mockfs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); ASSERT_EQ(0, copysetNode.GetHash(&hash)); @@ -1002,40 +974,38 @@ TEST_F(CopysetNodeTest, get_leader_status) { LogicPoolID logicPoolID = 1; CopysetID copysetID = 1; Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); CopysetNode copysetNode(logicPoolID, copysetID, conf); copysetNode.SetCopysetNode(mockNode); - // 当前peer不是leader,且当前无leader + // The current peer is not a leader, and there is currently no leader { NodeStatus status; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); + .WillOnce(SetArgPointee<0>(status)); NodeStatus leaderStatus; ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); } - // 当前peer为leader + // The current peer is the leader { NodeStatus status; status.leader_id.parse("127.0.0.1:3200:0"); status.peer_id = status.leader_id; status.committed_index = 6666; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); + .WillOnce(SetArgPointee<0>(status)); NodeStatus leaderStatus; ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(status.committed_index, - leaderStatus.committed_index); + ASSERT_EQ(status.committed_index, leaderStatus.committed_index); } - // 存在leader,但不是当前peer + // There is a leader, but it is not the current peer { - // 模拟启动chunkserver - CopysetNodeManager* copysetNodeManager - = &CopysetNodeManager::GetInstance(); + // Simulate starting chunkserver + CopysetNodeManager* copysetNodeManager = + &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); PeerId leader_peer("127.0.0.1:9044:0"); @@ -1044,17 +1014,15 @@ TEST_F(CopysetNodeTest, get_leader_status) { if (server.Start(port, NULL) != 0) { LOG(FATAL) << "Fail to start Server"; } - // 构造leader copyset + // Construct a leader copyset ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, - copysetID, - conf)); - auto leaderNode = copysetNodeManager->GetCopysetNode(logicPoolID, - copysetID); + copysetID, conf)); + auto leaderNode = + copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); ASSERT_TRUE(nullptr != leaderNode); - // 设置预期值 - std::shared_ptr mockLeader - = std::make_shared(logicPoolID, - copysetID); + // Set expected values + std::shared_ptr mockLeader = + std::make_shared(logicPoolID, copysetID); leaderNode->SetCopysetNode(mockLeader); NodeStatus mockLeaderStatus; mockLeaderStatus.leader_id = leader_peer; @@ -1062,16 +1030,17 @@ TEST_F(CopysetNodeTest, get_leader_status) { mockLeaderStatus.committed_index = 10000; mockLeaderStatus.known_applied_index = 6789; EXPECT_CALL(*mockLeader, get_status(_)) - .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); + .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); - // 测试通过follower的node获取leader的committed index + // Test obtaining the committed index of the leader through the node of + // the follower NodeStatus followerStatus; followerStatus.leader_id = leader_peer; followerStatus.peer_id.parse("127.0.0.1:3201:0"); followerStatus.committed_index = 3456; followerStatus.known_applied_index = 3456; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(followerStatus)); + .WillOnce(SetArgPointee<0>(followerStatus)); NodeStatus leaderStatus; ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); @@ -1086,9 +1055,8 @@ TEST_F(CopysetNodeTest, is_lease_leader) { LogicPoolID logicPoolID = 1; CopysetID copysetID = 1; Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); CopysetNode copysetNode(logicPoolID, copysetID, conf); copysetNode.Init(defaultOptions_); copysetNode.SetCopysetNode(mockNode); @@ -1099,13 +1067,10 @@ TEST_F(CopysetNodeTest, is_lease_leader) { // not leader now { std::vector states = { - braft::LEASE_DISABLED, - braft::LEASE_VALID, - braft::LEASE_NOT_READY, - braft::LEASE_EXPIRED - }; + braft::LEASE_DISABLED, braft::LEASE_VALID, braft::LEASE_NOT_READY, + braft::LEASE_EXPIRED}; braft::LeaderLeaseStatus status; - for (auto &state : states) { + for (auto& state : states) { status.state = state; ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); } diff --git a/test/chunkserver/copyset_service_test.cpp b/test/chunkserver/copyset_service_test.cpp index 973529366b..d456b2a361 100644 --- a/test/chunkserver/copyset_service_test.cpp +++ b/test/chunkserver/copyset_service_test.cpp @@ -20,35 +20,34 @@ * Author: wudemiao */ -#include -#include -#include #include #include #include +#include +#include +#include #include -#include "src/chunkserver/trash.h" +#include "proto/chunk.pb.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "proto/chunk.pb.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; -static std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +static std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; @@ -72,9 +71,7 @@ class CopysetServiceTest : public testing::Test { trash_->Init(opt); } - void TearDown() { - Exec(rmCmd.c_str()); - } + void TearDown() { Exec(rmCmd.c_str()); } protected: std::string testDir; @@ -87,7 +84,7 @@ class CopysetServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetServiceTest, basic) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 1; CopysetID copysetId = 100002; std::string ip = "127.0.0.1"; @@ -99,7 +96,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -115,8 +113,7 @@ TEST_F(CopysetServiceTest, basic) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.trash = trash_; copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); @@ -128,7 +125,7 @@ TEST_F(CopysetServiceTest, basic) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -149,7 +146,7 @@ TEST_F(CopysetServiceTest, basic) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -169,7 +166,7 @@ TEST_F(CopysetServiceTest, basic) { response.status()); } - /* 非法参数测试 */ + /* Illegal parameter testing */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -213,8 +210,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(response.status(), COPYSET_OP_STATUS_FAILURE_UNKNOWN); // CASE 3: delete broken copyset success - ASSERT_TRUE(copysetNodeManager-> - DeleteCopysetNode(logicPoolId, copysetId)); + ASSERT_TRUE( + copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); cntl.Reset(); request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); @@ -228,8 +225,8 @@ TEST_F(CopysetServiceTest, basic) { } TEST_F(CopysetServiceTest, basic2) { - /********************* 设置初始环境 ***********************/ - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + /********************* Set Up Initial Environment ***********************/ + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 2; CopysetID copysetId = 100003; std::string ip = "127.0.0.1"; @@ -241,7 +238,8 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -257,8 +255,7 @@ TEST_F(CopysetServiceTest, basic2) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); ASSERT_EQ(0, copysetNodeManager->Run()); @@ -269,9 +266,9 @@ TEST_F(CopysetServiceTest, basic2) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /********************** 跑测试cases ************************/ + /********************** Run Test Cases ************************/ - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -279,15 +276,15 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -298,22 +295,22 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -324,7 +321,7 @@ TEST_F(CopysetServiceTest, basic2) { response.status()); } - /* 创建多个copyset */ + /* Create multiple copysets */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -332,31 +329,31 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - // 准备第1个copyset + // Prepare the first copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 1); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } - // 准备第2个copyset + // Prepare the second copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 2); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } @@ -370,18 +367,18 @@ TEST_F(CopysetServiceTest, basic2) { // get status { - // 创建一个copyset + // Create a copyset { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 3); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -392,11 +389,11 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - // 睡眠等待leader产生 + // Sleep waiting for leader generation ::usleep(2 * 1000 * 1000); { - // query hash为false + // query hash is false std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -404,7 +401,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(false); @@ -432,7 +429,7 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_FALSE(response.has_hash()); } { - // query hash为true + // query hash is true std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -440,7 +437,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(true); @@ -476,4 +473,3 @@ TEST_F(CopysetServiceTest, basic2) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 5910df808e..26cdd8fb9b 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -20,43 +20,44 @@ * Author: tongguangxun */ -#include #include -#include +#include + #include +#include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/common/bitmap.h" #include "src/common/crc32.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/mock_file_pool.h" #include "test/fs/mock_local_filesystem.h" +using curve::common::Bitmap; using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; -using curve::common::Bitmap; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::Invoke; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using std::shared_ptr; using std::make_shared; +using std::shared_ptr; using std::string; namespace curve { @@ -67,27 +68,21 @@ const char baseDir[] = "/home/chunkserver/copyset/data"; const char chunk1[] = "chunk_1"; const char chunk1Path[] = "/home/chunkserver/copyset/data/chunk_1"; const char chunk1snap1[] = "chunk_1_snap_1"; -const char chunk1snap1Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_1"; +const char chunk1snap1Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_1"; const char chunk1snap2[] = "chunk_1_snap_2"; -const char chunk1snap2Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_2"; +const char chunk1snap2Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_2"; const char chunk2[] = "chunk_2"; -const char chunk2Path[] - = "/home/chunkserver/copyset/data/chunk_2"; +const char chunk2Path[] = "/home/chunkserver/copyset/data/chunk_2"; const char chunk2snap1[] = "chunk_2_snap_1"; -const char chunk2snap1Path[] - = "/home/chunkserver/copyset/data/chunk_2_snap_1"; +const char chunk2snap1Path[] = "/home/chunkserver/copyset/data/chunk_2_snap_1"; const char temp1[] = "chunk_1_tmp"; -const char temp1Path[] - = "/home/chunkserver/copyset/data/chunk_1_tmp"; +const char temp1Path[] = "/home/chunkserver/copyset/data/chunk_1_tmp"; const char location[] = "/file1/0@curve"; const int UT_ERRNO = 1234; -bool hasCreatFlag(int flag) {return flag & O_CREAT;} +bool hasCreatFlag(int flag) { return flag & O_CREAT; } -ACTION_TEMPLATE(SetVoidArrayArgument, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArrayArgument, HAS_1_TEMPLATE_PARAMS(int, k), AND_2_VALUE_PARAMS(first, last)) { auto output = reinterpret_cast(::testing::get(args)); auto input = first; @@ -100,163 +95,140 @@ class CSDataStore_test : public testing::TestWithParam< std::tuple> { public: - void SetUp() { - chunksize_ = std::get<0>(GetParam()); - blocksize_ = std::get<1>(GetParam()); - metapagesize_ = std::get<2>(GetParam()); - - chunk1MetaPage = new char[metapagesize_]; - chunk2MetaPage = new char[metapagesize_]; - chunk1SnapMetaPage = new char[metapagesize_]; - - lfs_ = std::make_shared(); - fpool_ = std::make_shared(lfs_); - DataStoreOptions options; - options.baseDir = baseDir; - options.chunkSize = chunksize_; - options.blockSize = blocksize_; - options.metaPageSize = metapagesize_; - options.locationLimit = kLocationLimit; - options.enableOdsyncWhenOpenChunkFile = true; - dataStore = std::make_shared(lfs_, - fpool_, - options); - fdMock = 100; - memset(chunk1MetaPage, 0, metapagesize_); - memset(chunk2MetaPage, 0, metapagesize_); - memset(chunk1SnapMetaPage, 0, metapagesize_); - } - - void TearDown() override { - delete[] chunk1MetaPage; - delete[] chunk2MetaPage; - delete[] chunk1SnapMetaPage; - } - - inline void FakeEncodeChunk(char* buf, - SequenceNum correctedSn, - SequenceNum sn, - shared_ptr bitmap = nullptr, - const std::string& location = "") { - ChunkFileMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.correctedSn = correctedSn; - metaPage.bitmap = bitmap; - metaPage.location = location; - metaPage.encode(buf); - } - - inline void FakeEncodeSnapshot(char* buf, - SequenceNum sn) { - uint32_t bits = chunksize_ / blocksize_; - SnapshotMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.bitmap = std::make_shared(bits); - metaPage.encode(buf); - } - - /** - * 构造初始环境 - * datastore存在两个chunk,分别为chunk1、chunk2 - * chunk1 和 chunk2的sn都为2,correctSn为0 - * chunk1存在快照文件,快照文件版本号为1 - * chunk2不存在快照文件 - */ - void FakeEnv() { - // fake DirExists - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .WillRepeatedly(Return(true)); - // fake List - vector fileNames; - fileNames.push_back(chunk1); - fileNames.push_back(chunk1snap1); - fileNames.push_back(chunk2); - EXPECT_CALL(*lfs_, List(baseDir, NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - // fake FileExists - ON_CALL(*lfs_, FileExists(_)) - .WillByDefault(Return(false)); - EXPECT_CALL(*lfs_, FileExists(chunk1Path)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(true)); - // fake Open - ON_CALL(*lfs_, Open(_, _)) - .WillByDefault(Return(fdMock++)); - EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillRepeatedly(Return(2)); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk2Path, _)) - .WillRepeatedly(Return(3)); - EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))) - .Times(0); - // fake fpool->GetFile() - ON_CALL(*fpool_, GetFileImpl(_, NotNull())) - .WillByDefault(Return(0)); - EXPECT_CALL(*fpool_, RecycleFile(_)) - .WillRepeatedly(Return(0)); - // fake Close - ON_CALL(*lfs_, Close(_)) - .WillByDefault(Return(0)); - // fake Delete - ON_CALL(*lfs_, Delete(_)) - .WillByDefault(Return(0)); - // fake Fsync - ON_CALL(*lfs_, Fsync(_)) - .WillByDefault(Return(0)); - // fake Fstat - struct stat fileInfo; - fileInfo.st_size = chunksize_ + metapagesize_; - EXPECT_CALL(*lfs_, Fstat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - // fake Read - ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake Write - ON_CALL(*lfs_, - Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake read chunk1 metapage - FakeEncodeChunk(chunk1MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk1's snapshot1 metapage - FakeEncodeSnapshot(chunk1SnapMetaPage, 1); - EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk2 metapage - FakeEncodeChunk(chunk2MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); - } + void SetUp() { + chunksize_ = std::get<0>(GetParam()); + blocksize_ = std::get<1>(GetParam()); + metapagesize_ = std::get<2>(GetParam()); + + chunk1MetaPage = new char[metapagesize_]; + chunk2MetaPage = new char[metapagesize_]; + chunk1SnapMetaPage = new char[metapagesize_]; + + lfs_ = std::make_shared(); + fpool_ = std::make_shared(lfs_); + DataStoreOptions options; + options.baseDir = baseDir; + options.chunkSize = chunksize_; + options.blockSize = blocksize_; + options.metaPageSize = metapagesize_; + options.locationLimit = kLocationLimit; + options.enableOdsyncWhenOpenChunkFile = true; + dataStore = std::make_shared(lfs_, fpool_, options); + fdMock = 100; + memset(chunk1MetaPage, 0, metapagesize_); + memset(chunk2MetaPage, 0, metapagesize_); + memset(chunk1SnapMetaPage, 0, metapagesize_); + } + + void TearDown() override { + delete[] chunk1MetaPage; + delete[] chunk2MetaPage; + delete[] chunk1SnapMetaPage; + } + + inline void FakeEncodeChunk(char* buf, SequenceNum correctedSn, + SequenceNum sn, + shared_ptr bitmap = nullptr, + const std::string& location = "") { + ChunkFileMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.correctedSn = correctedSn; + metaPage.bitmap = bitmap; + metaPage.location = location; + metaPage.encode(buf); + } + + inline void FakeEncodeSnapshot(char* buf, SequenceNum sn) { + uint32_t bits = chunksize_ / blocksize_; + SnapshotMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.bitmap = std::make_shared(bits); + metaPage.encode(buf); + } + + /** + * Construct initial environment + * There are two chunks in the datastore, chunk1 and chunk2 + * The sn of chunk1 and chunk2 are both 2, and correctSn is 0 + * chunk1 has a snapshot file with version number 1 + * chunk2 does not have a snapshot file + */ + void FakeEnv() { + // fake DirExists + EXPECT_CALL(*lfs_, DirExists(baseDir)).WillRepeatedly(Return(true)); + // fake List + vector fileNames; + fileNames.push_back(chunk1); + fileNames.push_back(chunk1snap1); + fileNames.push_back(chunk2); + EXPECT_CALL(*lfs_, List(baseDir, NotNull())) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); + // fake FileExists + ON_CALL(*lfs_, FileExists(_)).WillByDefault(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk1Path)).WillRepeatedly(Return(true)); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(true)); + // fake Open + ON_CALL(*lfs_, Open(_, _)).WillByDefault(Return(fdMock++)); + EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillRepeatedly(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk2Path, _)).WillRepeatedly(Return(3)); + EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))).Times(0); + // fake fpool->GetFile() + ON_CALL(*fpool_, GetFileImpl(_, NotNull())).WillByDefault(Return(0)); + EXPECT_CALL(*fpool_, RecycleFile(_)).WillRepeatedly(Return(0)); + // fake Close + ON_CALL(*lfs_, Close(_)).WillByDefault(Return(0)); + // fake Delete + ON_CALL(*lfs_, Delete(_)).WillByDefault(Return(0)); + // fake Fsync + ON_CALL(*lfs_, Fsync(_)).WillByDefault(Return(0)); + // fake Fstat + struct stat fileInfo; + fileInfo.st_size = chunksize_ + metapagesize_; + EXPECT_CALL(*lfs_, Fstat(_, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + // fake Read + ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake Write + ON_CALL(*lfs_, + Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake read chunk1 metapage + FakeEncodeChunk(chunk1MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1MetaPage, + chunk1MetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk1's snapshot1 metapage + FakeEncodeSnapshot(chunk1SnapMetaPage, 1); + EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk2 metapage + FakeEncodeChunk(chunk2MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk2MetaPage, + chunk2MetaPage + metapagesize_), + Return(metapagesize_))); + } protected: int fdMock; std::shared_ptr lfs_; std::shared_ptr fpool_; - std::shared_ptr dataStore; + std::shared_ptr dataStore; char* chunk1MetaPage; char* chunk2MetaPage; char* chunk1SnapMetaPage; @@ -267,8 +239,8 @@ class CSDataStore_test }; /** * ConstructorTest - * case:测试构造参数为空的情况 - * 预期结果:进程退出 + * Case: Test the case where the construction parameter is empty + * Expected result: Process exited */ TEST_P(CSDataStore_test, ConstructorTest) { // null param test @@ -277,86 +249,66 @@ TEST_P(CSDataStore_test, ConstructorTest) { options.chunkSize = chunksize_; options.blockSize = blocksize_; options.metaPageSize = metapagesize_; - ASSERT_DEATH(std::make_shared(nullptr, - fpool_, - options), - ""); - ASSERT_DEATH(std::make_shared(lfs_, - nullptr, - options), - ""); + ASSERT_DEATH(std::make_shared(nullptr, fpool_, options), ""); + ASSERT_DEATH(std::make_shared(lfs_, nullptr, options), ""); options.baseDir = ""; - ASSERT_DEATH(std::make_shared(lfs_, - fpool_, - options), - ""); + ASSERT_DEATH(std::make_shared(lfs_, fpool_, options), ""); } /** * InitializeTest - * case:存在未知类型的文件 - * 预期结果:删除该文件,返回true + * Case: There is an unknown type of file + * Expected result: Delete the file and return true */ TEST_P(CSDataStore_test, InitializeTest1) { // test unknown file - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(0); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(0); vector fileNames; fileNames.push_back(temp1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // unknown file will be deleted EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在快照文件,但是快照文件没有对应的chunk - * 预期结果:删除快照文件,返回true + * Case: There is a snapshot file, but the snapshot file does not have a + * corresponding chunk Expected result: Delete the snapshot file and return true */ TEST_P(CSDataStore_test, InitializeTest2) { // test snapshot without chunk - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); vector fileNames; fileNames.push_back(chunk2snap1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(false)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(false)); EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在快照文件 - * 预期结果:正常加载文件,返回true + * Case: Chunk file exists, Chunk file has snapshot file + * Expected result: Loading the file normally, returning true */ TEST_P(CSDataStore_test, InitializeTest3) { // test chunk with snapshot FakeEnv(); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * 预期结果:返回true + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Expected result: Returns true */ TEST_P(CSDataStore_test, InitializeTest4) { // test snapshot founded before chunk file , @@ -368,19 +320,16 @@ TEST_P(CSDataStore_test, InitializeTest4) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在两个冲突的快照文件 - * 预期结果:返回false + * Case: There is a chunk file, and there are two conflicting snapshot files in + * the chunk file Expected result: returns false */ TEST_P(CSDataStore_test, InitializeTest5) { // test snapshot conflict @@ -391,47 +340,35 @@ TEST_P(CSDataStore_test, InitializeTest5) { fileNames.push_back(chunk1snap2); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeErrorTest - * case:data目录不存在,创建目录时失败 - * 预期结果:返回false + * Case: The data directory does not exist, creating the directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest1) { // dir not exist and mkdir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(-UT_ERRNO)); // List should not be called - EXPECT_CALL(*lfs_, List(baseDir, _)) - .Times(0); + EXPECT_CALL(*lfs_, List(baseDir, _)).Times(0); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:List目录时失败 - * 预期结果:返回false + * Case: List directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest2) { // List dir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(0)); // List failed EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) @@ -441,220 +378,182 @@ TEST_P(CSDataStore_test, InitializeErrorTest2) { /** * InitializeErrorTest - * case:open chunk文件的时候出错 - * 预期结果:返回false + * Case: Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest3) { // test chunk open failed FakeEnv(); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(1, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1MetaPage[1] += 1; // change the page data memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:open 快照文件的时候出错 - * 预期结果:返回false + * Case: Error opening snapshot file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest4) { // test chunk open failed FakeEnv(); // set open snapshot file failed - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1SnapMetaPage[1] += 1; // change the page data memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); } /** * InitializeErrorTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * open chunk文件的时候出错 - * 预期结果:返回false + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest5) { // test snapshot founded before chunk file , @@ -666,18 +565,16 @@ TEST_P(CSDataStore_test, InitializeErrorTest5) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); } /** * Test - * case:chunk 不存在 - * 预期结果:创建chunk文件,并成功写入数据 + * Case: chunk does not exist + * Expected result: Create chunk file and successfully write data */ TEST_P(CSDataStore_test, WriteChunkTest1) { // initialize @@ -691,47 +588,34 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); - - // 如果sn为0,返回InvalidArgError - EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->WriteChunk(id, - 0, - buf, - offset, - length, - nullptr)); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); + + // If sn is 0, returns InvalidArgError + EXPECT_EQ(CSErrorCode::InvalidArgError, + dataStore->WriteChunk(id, 0, buf, offset, length, nullptr)); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage char chunk3MetaPage[metapagesize_]; // NOLINT memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); FakeEncodeChunk(chunk3MetaPage, 0, 1); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); // will write data EXPECT_CALL(*lfs_, Write(4, Matcher(_), metapagesize_ + offset, length)) .Times(1); - EXPECT_EQ(CSErrorCode::Success, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - EXPECT_CALL(*lfs_, Sync(4)) - .WillOnce(Return(0)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Sync(4)).WillOnce(Return(0)).WillOnce(Return(-1)); // sync chunk success EXPECT_EQ(CSErrorCode::Success, dataStore->SyncChunk(id)); @@ -744,21 +628,17 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { ASSERT_EQ(1, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的sn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request sn smaller than chunk's sn + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest2) { // initialize @@ -776,46 +656,28 @@ TEST_P(CSDataStore_test, WriteChunkTest2) { // snchunk.correctedsn EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的correctedSn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request correctedSn with sn less than chunk + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest3) { // initialize @@ -824,9 +686,8 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { FakeEncodeChunk(chunk2MetaPage, 4, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -838,47 +699,29 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { // sn>chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk不存在快照 - * 预期结果:直接写数据到chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk does not have a snapshot Expected result: Directly + * write data to chunk file */ TEST_P(CSDataStore_test, WriteChunkTest4) { // initialize @@ -898,12 +741,7 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); @@ -914,52 +752,33 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, __amd64)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn, - * chunk不存在快照 - * 预期结果:会更新metapage,然后写数据到chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn, chunk does not have a snapshot Expected result: Metapage will be + * updated and data will be written to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest6) { // initialize @@ -968,9 +787,8 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { FakeEncodeChunk(chunk2MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -989,32 +807,25 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn以及correctSn, - * chunk不存在快照、 - * 预期结果:会创建快照文件,更新metapage, - * 写数据时先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn greater than Chunk's sn and correctSn, + * chunk does not have a snapshot + * Expected result: A snapshot file will be created, and the metapage will be + * updated, When writing data, first perform a Copy-On-Write operation to the + * snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest7) { // initialize @@ -1028,23 +839,19 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // snapshot not exists - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); // expect call chunkfile pool GetFile - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1066,54 +873,37 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写同一个block的数据,不再进行cow,而是直接写入数据 + // Write data for the same block again, no longer co w, but directly write + // the data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - // sn - 1 < chunk.sn , 返回 BackwardRequestError + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + // sn - 1 < chunk. sn, returns BackwardRequestError EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn - 1, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn - 1, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk has a snapshot Expected result: When writing data, + * first perform a Copy-On-Write operation to the snapshot, and then write to + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest9) { // initialize @@ -1143,31 +933,23 @@ TEST_P(CSDataStore_test, WriteChunkTest9) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn - * chunk存在快照 - * 预期结果:更新metapage,然后写chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn chunk has a snapshot Expected result: Update the metapage and write + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest10) { // initialize @@ -1176,9 +958,8 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1198,31 +979,24 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn和correctSn - * chunk存在快照,snapsn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1245,38 +1018,29 @@ TEST_P(CSDataStore_test, WriteChunkTest11) { // sn>chunk.sn, sn>chunk.correctedsn EXPECT_EQ(CSErrorCode::SnapshotConflictError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟克隆 - * case1:clone chunk存在,写入区域之前未写过 - * 预期结果1:写入数据并更新bitmap - * case2:clone chunk存在,写入区域之前已写过 - * 预期结果2:写入数据但不会更新bitmap - * case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果3:写入数据并更新bitmap - * case4:遍写整个chunk - * 预期结果4:写入数据,然后clone chunk会被转为普通chunk + * Write a clone chunk to simulate cloning + * Case1: clone chunk exists and has not been written before writing to the + * region Expected result 1: Write data and update bitmap Case2: clone chunk + * exists and has been written before writing to the region Expected result 2: + * Write data but not update bitmap Case3: chunk exists and is a clone chunk. + * Some areas have been written, while others have not Expected result 3: Write + * data and update bitmap Case4: Overwrite the entire chunk Expected result 4: + * Write data, and then the clone chunk will be converted to a regular chunk */ TEST_P(CSDataStore_test, WriteChunkTest13) { // initialize @@ -1291,7 +1055,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { LOG(INFO) << "case 1"; char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) @@ -1300,30 +1064,25 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk存在,且是clone chunk,写入区域之前未写过 + // Case1: chunk exists and is a clone chunk, which has not been written + // before writing to the region { LOG(INFO) << "case 2"; id = 3; // not exist @@ -1338,13 +1097,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1352,7 +1106,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case2:chunk存在,且是clone chunk,写入区域之前已写过 + // Case2: chunk exists and is a clone chunk, which has been written before + // writing to the region { LOG(INFO) << "case 3"; id = 3; // not exist @@ -1366,13 +1121,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1380,7 +1130,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case3: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { LOG(INFO) << "case 4"; id = 3; // not exist @@ -1389,8 +1140,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1398,14 +1149,10 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); @@ -1413,7 +1160,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case4:遍写整个chunk + // Case4: Overwrite the entire chun { LOG(INFO) << "case 5"; id = 3; // not exist @@ -1422,8 +1169,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1431,41 +1178,33 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟恢复 - * case1:clone chunk 存在,snchunk.sn,sn==chunk.correctedsn - * 预期结果2:写入数据并更新bitmap,更新chunk.sn为sn - * case3:clone chunk存在,sn==chunk.sn,sn==chunk.correctedsn - * 预期结果3:写入数据并更新bitmap - * case4:clone chunk 存在,sn>chunk.sn, sn>chunk.correctedsn - * 预期结果4:返回StatusConflictError + * Write clone chunk to simulate recovery + * Case1: clone chunk exists, snchunk.sn, sn==chunk.correctedsn + * Expected result 2: Write data and update bitmap, update chunk.sn to sn + * Case3: clone chunk exists, sn==chunk.sn, sn==chunk.correctedsn + * Expected result 3: Write data and update bitmap + * Case4: clone chunk exists, sn>chunk.sn, sn>chunk.correctedsn + * Expected result 4: Returning StatusConflictError */ TEST_P(CSDataStore_test, WriteChunkTest14) { // initialize @@ -1480,7 +1219,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -1488,26 +1227,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(2, info.curSn); @@ -1518,32 +1251,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case1:clone chunk存在 + // Case1: clone chunk exists { LOG(INFO) << "case 1"; // sn == chunk.sn, sn < chunk.correctedSn sn = 2; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn < chunk.sn, sn < chunk.correctedSn sn = 1; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); } - // case2:chunk存在,且是clone chunk, + // Case2: chunk exists and is a clone chunk, { LOG(INFO) << "case 2"; id = 3; @@ -1559,13 +1282,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { .Times(2); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1576,7 +1294,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn == correctedsn { LOG(INFO) << "case 3"; @@ -1585,8 +1303,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, blocksize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, blocksize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1594,14 +1312,10 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1612,25 +1326,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn > correctedsn { LOG(INFO) << "case 4"; sn = 4; - // 不会写数据 - EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)) - .Times(0); + // Unable to write data + EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)).Times(0); std::unique_ptr buf(new char[length]); - ASSERT_EQ(CSErrorCode::StatusConflictError, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // chunk的状态不变 + ASSERT_EQ( + CSErrorCode::StatusConflictError, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // The state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1641,25 +1350,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, + * Case: chunk exists, * sn==chunk.sn * sn>chunk.correctedSn * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1696,30 +1401,23 @@ TEST_P(CSDataStore_test, WriteChunkTest15) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, - * sn>chunk.sn - * sn>chunk.correctedSn - * chunk.sn==snap.sn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, + * sn>chunk.sn + * sn>chunk.correctedSn + * chunk.sn==snap.sn + * chunk has a snapshot + * Expected result: When writing data, first perform a Copy-On-Write operation + * to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest16) { // initialize @@ -1728,16 +1426,15 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1760,26 +1457,18 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件时出错 - * 预期结果:写失败,不会改变当前chunk状态 + * WriteChunkTest exception test + * Case: Error creating snapshot file + * Expected result: Write failed and will not change the current chunk state */ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { // initialize @@ -1792,80 +1481,56 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { size_t length = blocksize_; char* buf = new char[length]; // NOLINT memset(buf, 0, length); - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // getchunk failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); // open snapshot failed - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // open success but read snapshot metapage failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage失败 - * 预期结果:写失败,产生快照文件,但是chunk版本号不会改变 - * 再次写入,不会生成新的快照文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, failed to update metadata + * Expected result: Write failed, resulting in a snapshot file, but the chunk + * version number will not change Write again without generating a new snapshot + * file */ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { // initialize @@ -1879,22 +1544,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // write chunk metapage failed EXPECT_CALL(*lfs_, @@ -1902,34 +1563,26 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); // chunk sn not changed ASSERT_EQ(2, info.curSn); ASSERT_EQ(2, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap未发生变化,再次写入,仍会进行cow + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metadata, and failed row + * Expected result: Write failed, snapshot file generated, chunk version number + * changed, The bitmap of the snapshot has not changed. If written again, it + * will still be cowed */ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { // initialize @@ -1943,22 +1596,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1971,12 +1620,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); @@ -1991,12 +1635,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { metapagesize_ + offset, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); @@ -2014,17 +1653,12 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写入仍会cow + // Writing again will still slow down // will copy on write LOG(INFO) << "case 4"; EXPECT_CALL(*lfs_, Read(3, NotNull(), metapagesize_ + offset, length)) @@ -2043,29 +1677,21 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { LOG(INFO) << "case 5"; EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow成功,写数据失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap发生变化,再次写入,直接写chunk文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metapage, row, and write + * data failed Expected result: Write failed, snapshot file generated, chunk + * version number changed, The bitmap of the snapshot has changed, write it + * again and directly write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { // initialize @@ -2079,22 +1705,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -2116,39 +1738,25 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 再次写入直接写chunk文件 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Write directly to the chunk file again // will write data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * WriteChunkTest - * case:chunk 不存在 - * 预期结果:创建chunk文件的时候失败 + * Case: chunk does not exist + * Expected result: Failed to create chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { // initialize @@ -2162,117 +1770,78 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // getchunk success - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Fstat(4, NotNull())).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_EQ(CSErrorCode::FileFormatError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_EQ(CSErrorCode::FileFormatError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /* * WriteChunkErrorTest - * 所写chunk为clone chunk - * case1:请求location过长,导致metapage size超出page size - * 预期结果1:create clone chunk失败 - * case2:写数据时失败 - * 预期结果2:返回InternalError,chunk状态不变 - * case3:更新metapage时失败 - * 预期结果3:返回InternalError,chunk状态不变 + * The chunk written is a clone chunk + * Case1: The request location is too long, causing the metapage size to exceed + * the page size Expected result 1: Create clone chunk failed Case2: Failed to + * write data Expected result 2: InternalError returned, chunk status remains + * unchanged Case3: Failed to update metapage Expected result 3: InternalError + * returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { // initialize @@ -2287,17 +1856,14 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { - string longLocation(kLocationLimit+1, 'a'); + string longLocation(kLocationLimit + 1, 'a'); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, longLocation)); } - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -2305,29 +1871,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { make_shared(chunksize_ / metapagesize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -2340,18 +1900,13 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -2364,32 +1919,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest1) { // initialize @@ -2404,24 +1950,17 @@ TEST_P(CSDataStore_test, ReadChunkTest1) { memset(buf, 0, sizeof(buf)); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:chunk存在,读取区域超过chunk大小或者offset和length未对齐 - * 预期结果:返回InvalidArgError错误码 + * Case: chunk exists, reading area exceeds chunk size or offset and length are + * not aligned Expected result: InvalidArgError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest2) { // initialize @@ -2436,42 +1975,27 @@ TEST_P(CSDataStore_test, ReadChunkTest2) { memset(buf, 0, sizeof(buf)); // test read out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:正常读取存在的chunk - * 预期结果:读取成功 + * Case: Normal reading of existing chunks + * Expected result: read successfully */ TEST_P(CSDataStore_test, ReadChunkTest3) { // initialize @@ -2488,30 +2012,23 @@ TEST_P(CSDataStore_test, ReadChunkTest3) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadChunkTest - * 读取 clone chunk - * case1:读取区域未被写过 - * 预期结果:返回PageNerverWrittenError - * case2:读取区域部分被写过 - * 预期结果:返回PageNerverWrittenError - * case3:读取区域已被写过 - * 预期结果:返回Success,数据成功写入 + * Read clone chunk + * Case1: The read area has not been written + * Expected result: PageNerverWrittenError returned + * Case2: The read area part has been written + * Expected result: PageNerverWrittenError returned + * Case3: The read area has been written + * Expected result: Success returned, data successfully written */ TEST_P(CSDataStore_test, ReadChunkTest4) { // initialize @@ -2529,80 +2046,56 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // case1: 读取未写过区域 + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Case1: Read unwritten area off_t offset = 1 * blocksize_; size_t length = blocksize_; char buf[2 * length]; // NOLINT memset(buf, 0, sizeof(buf)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case2: 读取区域部分被写过 + // Case2: The read area part has been written offset = 0; length = 2 * blocksize_; - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case3: 读取区域已写过 + // Case3: The read area has been written offset = 0; length = blocksize_; EXPECT_CALL(*lfs_, Read(4, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkErrorTest - * case:读chunk文件时出错 - * 预期结果:读取失败,返回InternalError + * Case: Error reading chunk file + * Expected result: Read failed, returned InternalError */ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { // initialize @@ -2619,25 +2112,18 @@ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { // initialize @@ -2652,25 +2138,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于chunk版本号 - * 预期结果:读chunk的数据 + * Case: chunk exists, request version number equal to Chunk version number + * Expected result: Read chunk data */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { // initialize @@ -2685,54 +2164,35 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { memset(buf, 0, length); // test out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test offset not aligned offset = chunksize_ - 1; length = chunksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test length not aligned offset = chunksize_; length = chunksize_ + 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test in range offset = blocksize_; length = 2 * blocksize_; EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于snapshot版本号 - * 预期结果:读快照的数据 + * Case: chunk exists, request version number equal to snapshot version number + * Expected result: Read data from snapshot */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { // initialize @@ -2760,12 +2220,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test out of range sn = 1; @@ -2774,16 +2229,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { char* readBuf = new char[length]; memset(readBuf, 0, length); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // test in range, read [0, 4*blocksize_) offset = 0; // read chunk in[0, blocksize_) and [3*blocksize_, 4*blocksize_) - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2792,26 +2242,19 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { 2 * blocksize_)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkTest - * case:chunk存在,但是请求的版本号不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk exists, but the requested version number does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { // initialize @@ -2826,25 +2269,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { memset(buf, 0, length); // test sn not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkErrorTest - * case:读快照时失败 - * 预期结果:返回InternalError + * Case: Failed to read snapshot + * Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { // initialize @@ -2872,12 +2308,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { offset + metapagesize_, length)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test in range, read [0, 4*blocksize_) sn = 1; @@ -2889,15 +2320,10 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // read snapshot failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2907,20 +2333,17 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { ASSERT_EQ(CSErrorCode::InternalError, dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkErrorTest - * case:chunk存在,请求版本号等于chunk版本号,读数据时失败 - * 预期结果:返回InternalError + * Case: chunk exists, request version number is equal to Chunk version number, + * failed while reading data Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { // initialize @@ -2938,18 +2361,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } @@ -2971,12 +2387,9 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest1) { EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** @@ -2994,24 +2407,18 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest2) { char buf[blocksize_]; // NOLINT(runtime/arrays) memset(buf, 0, blocksize_); // test chunk exists - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .Times(1); - EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunkMetaPage(id, sn, buf)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)).Times(1); + EXPECT_EQ(CSErrorCode::Success, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } - /** * DeleteChunkTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest1) { // initialize @@ -3022,21 +2429,17 @@ TEST_P(CSDataStore_test, DeleteChunkTest1) { SequenceNum sn = 2; // test chunk not exists - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteChunkTest - * case:chunk存在快照文件 - * 预期结果:返回Success, chunk被删除,快照被删除 + * Case: Chunk has a snapshot file present + * Expected result: Success returned, chunk deleted, snapshot deleted */ TEST_P(CSDataStore_test, DeleteChunkTest2) { // initialize @@ -3046,25 +2449,21 @@ TEST_P(CSDataStore_test, DeleteChunkTest2) { ChunkID id = 1; SequenceNum sn = 2; - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // delete chunk with snapshot - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); CSChunkInfo info; ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** - * case:chunk存在,快照文件不存在 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest3) { // initialize @@ -3075,27 +2474,22 @@ TEST_P(CSDataStore_test, DeleteChunkTest3) { SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkTest - * chunk存在,快照文件不存在 - * case1: snchunkinfo.sn - * 预期结果2:返回成功 + * chunk exists, snapshot file does not exist + * Case1: snchunkinfo.sn + * Expected result 2: Success returned */ TEST_P(CSDataStore_test, DeleteChunkTest4) { // initialize @@ -3107,37 +2501,30 @@ TEST_P(CSDataStore_test, DeleteChunkTest4) { // case1 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(0); + EXPECT_CALL(*lfs_, Close(3)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).Times(0); EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->DeleteChunk(id, 1)); + dataStore->DeleteChunk(id, 1)); } // case2 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, 3)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, 3)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkErrorTest - * case:chunk存在,快照文件不存在,recyclechunk时出错 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist, error occurred during + * recyclechunk Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { // initialize @@ -3147,24 +2534,19 @@ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { ChunkID id = 2; SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(-1)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(-1)); + EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { // initialize @@ -3177,27 +2559,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } -// 对于DeleteSnapshotChunkOrCorrectSn来说,内部主要有两个操作 -// 一个是删除快照文件,一个是修改correctedSn -// 当存在快照文件时,fileSn>=chunk的sn是判断是否要删除快照的唯一条件 -// 对于correctedSn来说,fileSn大于chunk的sn以及correctedSn是判断 -// 是否要修改correctedSn的唯一条件 +// For DeleteSnapshotChunkOrCorrectSn, there are two main internal operations +// One is to delete the snapshot file, and the other is to modify correctedSn +// When there is a snapshot file, the sn of fileSn>=chunk is the only condition +// to determine whether to delete the snapshot For correctedSn, if fileSn is +// greater than chunk's sn and correctedSn is the judgment Do you want to modify +// the unique condition for correctedSn /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn >= chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn>snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn>=Chunk's sn + * fileSn==correctedSn of chunk + * chunk.sn>snap.sn + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // initialize @@ -3206,9 +2587,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3216,11 +2596,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // fileSn == correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3228,18 +2606,17 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn < chunk的sn - * 此时无论correctSn为何值都不会修改correctedSn - * 预期结果:返回成功,不会删除快照,不会修改correctedSn + * Case: chunk exists, snapshot exists + * fileSn < chunk's sn + * At this point, regardless of the value of correctSn, correctedSn will + * not be modified Expected result: Success returned, snapshot will not be + * deleted, correctedSn will not be modified */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // initialize @@ -3248,9 +2625,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { FakeEncodeChunk(chunk1MetaPage, 0, 3); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3258,8 +2634,7 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // 2 > correctedSn SequenceNum fileSn = 2; // snapshot should not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), 0, metapagesize_)) @@ -3267,17 +2642,14 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - // 下则用例用于补充DeleteSnapshotChunkOrCorrectSnTest2用例中 - // 当 fileSn == sn 时的边界情况 - // fileSn == sn - // fileSn > correctedSn + // The following use case is used to supplement the + // DeleteSnapshotChunkOrCorrectSnTest2 use case Boundary situation when + // fileSn == sn fileSn == sn fileSn > correctedSn fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3285,17 +2657,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn以及correctedSn - * 预期结果:删除快照,并修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn and correctedSn + * Expected result: Delete the snapshot and modify correctedSn, returning + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // initialize @@ -3307,11 +2678,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3319,17 +2688,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn <= chunk的sn或correctedSn - * 预期结果:不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn <= SN or correctedSn of chunk + * Expected result: CorrectedSn will not be modified, returning success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { // initialize @@ -3347,19 +2714,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn > chunk的sn及correctedSn - * 预期结果:修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn > chunk's sn and correctedSn + * Expected result: Modify correctedSn and return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { // initialize @@ -3377,18 +2741,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在,chunk为clone chunk - * 预期结果:返回StatusConflictError + * Case: chunk exists, snapshot does not exist, chunk is clone chunk + * Expected result: Returning StatusConflictError */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { // initialize @@ -3405,29 +2766,23 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // 无论correctedSn为多少,都返回StatusConflictError + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Returns StatusConflictError regardless of the number of correctedSn EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 1)); EXPECT_EQ(CSErrorCode::StatusConflictError, @@ -3439,23 +2794,20 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 5)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn - * fileSn > chunk的correctedSn + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn + * fileSn > chunk's correctedSn * chunk.sn==snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // initialize @@ -3464,16 +2816,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 2); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3481,11 +2832,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3493,21 +2842,19 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn == chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3533,11 +2879,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { // fileSn == correctedSn SequenceNum fileSn = 2; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3545,18 +2889,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:修改correctedSn时失败 - * 预期结果:返回失败,correctedSn的值未改变 + * Case: Failed to modify correctedSn + * Expected result: Failed to return, the value of correctedSn has not changed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { // initialize @@ -3582,18 +2923,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:回收snapshot的chunk的时候失败 - * 预期结果:返回失败 + * Case: Failed to recycle snapshot chunks + * Expected result: return failed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // initialize @@ -3605,11 +2943,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .WillOnce(Return(-1)); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).WillOnce(Return(-1)); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3617,26 +2953,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * CreateCloneChunkTest - * case1:指定的chunk不存在,输入错误的参数 - * 预期结果1:返回InvalidArgError - * case2:指定的chunk不存在,指定chunksize与配置一致 - * 预期结果2:创建成功 - * case3:指定的chunk存在,参数与原chunk一致 - * 预期结果3:返回成功 - * case4:指定的chunk存在,参数与原chunk不一致 - * 预期结果4:返回ChunkConflictError,不改变原chunk信息 - * case5:指定的chunk存在,指定chunksize与配置不一致 - * 预期结果5: 返回InvalidArgError,不改变原chunk信息 - * case6:指定的chunk存在,chunk不是clone chunk,参数与chunk信息一致 - * 预期结果:返回ChunkConflictError,不改变原chunk信息 + * Case1: The specified chunk does not exist, incorrect parameter input + * Expected result 1: InvalidArgError returned + * Case2: The specified chunk does not exist, the specified chunksize is + * consistent with the configuration Expected result 2: Creation successful + * Case3: The specified chunk exists, and the parameters are consistent with the + * original chunk Expected result 3: Success returned Case4: The specified chunk + * exists, and the parameters are inconsistent with the original chunk Expected + * result 4: ChunkConflictError returned without changing the original chunk + * information Case5: The specified chunk exists, but the specified chunk size + * is inconsistent with the configuration Expected result 5: InvalidArgError + * returned without changing the original chunk information Case6: The specified + * chunk exists, but the chunk is not a clone chunk. The parameters are + * consistent with the chunk information Expected result: ChunkConflictError + * returned without changing the original chunk information */ TEST_P(CSDataStore_test, CreateCloneChunkTest) { // initialize @@ -3652,58 +2988,44 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { shared_ptr bitmap = make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); - // case1:输入错误的参数 + // Case1: Input incorrect parameters { // size != chunksize EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - blocksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, blocksize_, location)); // sn == 0 EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - 0, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, 0, correctedSn, chunksize_, location)); // location is empty - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - "")); + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "")); } - // case2:指定的chunk不存在,指定chunksize与配置一致 + // Case2: The specified chunk does not exist, the specified chunksize is + // consistent with the configuration { // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3714,15 +3036,13 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case3:指定的chunk存在,参数与原chunk一致 + // Case3: The specified chunk exists, and the parameters are consistent with + // the original chunk { EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3733,31 +3053,23 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case4:指定的chunk存在,参数与原chunk不一致 - // 返回ChunkConflictError,但是不会改变原chunk信息 + // Case4: The specified chunk exists, and the parameters are inconsistent + // with the original chunk Returns ChunkConflictError, but does not change + // the original chunk information { - // 版本不一致 + // Version inconsistency EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn + 1, - correctedSn, - chunksize_, - location)); - // correctedSn不一致 + dataStore->CreateCloneChunk(id, sn + 1, correctedSn, + chunksize_, location)); + // Inconsistent correctedSn EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn + 1, - chunksize_, - location)); - // location不一致 + dataStore->CreateCloneChunk(id, sn, correctedSn + 1, + chunksize_, location)); + // Inconsistent location EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "temp")); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3768,16 +3080,15 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case5:指定的chunk存在,指定chunksize与配置不一致 - // 返回InvalidArgError,但是不会改变原chunk信息 + // Case5: The specified chunk exists, but the specified chunksize is + // inconsistent with the configuration Returns InvalidArgError, but does not + // change the original chunk information { - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_ + metapagesize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, + chunksize_ + metapagesize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3788,39 +3099,33 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case6:已存在chunk,chunk不是clone chunk + // Case6: Chunk already exists, chunk is not a clone chunk { - // location 为空 + // location is empty EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - "")); + chunksize_, "")); - // location 不为空 + // location is not empty EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - location)); + chunksize_, location)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * CreateCloneChunkErrorTest - * case:chunk不存在,调chunkFile->Open的时候失败 - * 预期结果:创建clone chunk失败 + * Case: chunk does not exist, failed when calling chunkFile->Open + * Expected result: Failed to create clone chunk */ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { // initialize @@ -3832,47 +3137,40 @@ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { SequenceNum correctedSn = 2; CSChunkInfo info; // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InternalError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * PasteChunkTedt - * case1:chunk 不存在 - * 预期结果1:返回ChunkNotExistError - * case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 - * 预期结果2:返回InvalidArgError - * case3:chunk存在,但不是clone chunk - * 预期结果3:返回成功 - * case4:chunk存在,且是clone chunk,写入区域之前未写过 - * 预期结果4:写入数据并更新bitmap - * case5:chunk存在,且是clone chunk,写入区域之前已写过 - * 预期结果5:无数据写入,且不会更新bitmap - * case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果6:只写入未写过数据,并更新bitmap - * case7:遍写整个chunk - * 预期结果7:数据写入未写过区域,然后clone chunk会被转为普通chunk + * Case1: Chunk does not exist + * Expected result 1: ChunkNotExistError returned + * Case2: chunk exists, requested offset exceeds chunk file size or offset + * length is not aligned Expected result 2: InvalidArgError returned Case3: + * chunk exists, but not clone chunk Expected result 3: Success returned Case4: + * chunk exists and is a clone chunk, which has not been written before writing + * to the region Expected result 4: Write data and update bitmap Case5: chunk + * exists and is a clone chunk, which has been written before writing to the + * region Expected result 5: No data written and Bitmap will not be updated + * Case6: chunk exists and is a clone chunk. Some areas have been written, while + * others have not Expected result 6: Only write unwritten data and update + * bitmap Case7: Overwrite the entire chunk Expected result 7: Data is written + * to an unwritten area, and then the clone chunk will be converted to a regular + * chunk */ TEST_P(CSDataStore_test, PasteChunkTest1) { // initialize @@ -3887,7 +3185,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -3895,90 +3193,68 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk 不存在 + // Case1: chunk does not exist { id = 4; // not exist ASSERT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 + // Case2: chunk exists, requested offset exceeds chunk file size or offset + // length is not aligned { id = 3; // not exist offset = chunksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_ - 1; length = blocksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_; length = blocksize_ + 1; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case3:chunk存在,但不是clone chunk + // Case3: chunk exists, but not clone chunk { EXPECT_CALL(*lfs_, Write(_, Matcher(NotNull()), _, _)) .Times(0); - // 快照不存在 + // The snapshot does not exist id = 2; offset = 0; length = blocksize_; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); - // 快照存在 + // Snapshot exists id = 1; offset = 0; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case4:chunk存在,且是clone chunk,写入区域之前未写过 + // Case4: chunk exists and is a clone chunk, which has not been written + // before writing to the region { id = 3; // not exist offset = blocksize_; @@ -3991,11 +3267,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -4003,7 +3276,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case5:chunk存在,且是clone chunk,写入区域之前已写过 + // Case5: chunk exists and is a clone chunk, which has been written before + // writing to the region { id = 3; // not exist offset = blocksize_; @@ -4015,23 +3289,22 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(1)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case6: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { id = 3; // not exist offset = 0; length = 4 * blocksize_; - // [2 * blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage + // [2 * blocksize_, 4 * blocksize_) area has been written, [0, + // blocksize_) is a metapage EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_, blocksize_)) .Times(1); @@ -4043,21 +3316,21 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { .Times(1); ASSERT_EQ(CSErrorCode::Success, dataStore->PasteChunk(id, buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(4, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case7:遍写整个chunk + // Case7: Overwrite the entire chunk { id = 3; // not exist offset = 0; length = chunksize_; - // [blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage - EXPECT_CALL(*lfs_, Write(4, - Matcher(NotNull()), + // [blocksize_, 4 * blocksize_) area has been written, [0, blocksize_) + // is a metapage + EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_ + 4 * blocksize_, chunksize_ - 4 * blocksize_)) .Times(1); @@ -4065,33 +3338,26 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* * PasteChunkErrorTest - * case1:写数据时失败 - * 预期结果1:返回InternalError,chunk状态不变 - * case2:更新metapage时失败 - * 预期结果2:返回InternalError,chunk状态不变 + * Case1: Failed to write data + * Expected result 1: InternalError returned, chunk status remains unchanged + * Case2: Failed to update metapage + * Expected result 2: InternalError returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { // initialize @@ -4106,7 +3372,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -4114,29 +3380,23 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -4149,16 +3409,13 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -4171,29 +3428,22 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* - * chunk不存在 + * Chunk does not exist */ TEST_P(CSDataStore_test, GetHashErrorTest1) { // initialize @@ -4205,21 +3455,15 @@ TEST_P(CSDataStore_test, GetHashErrorTest1) { // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); + dataStore->GetChunkHash(id, 0, 4096, &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * read报错 + * Read error */ TEST_P(CSDataStore_test, GetHashErrorTest2) { // initialize @@ -4231,23 +3475,16 @@ TEST_P(CSDataStore_test, GetHashErrorTest2) { off_t offset = 0; size_t length = metapagesize_ + chunksize_; // test read chunk failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->GetChunkHash(id, 0, 4096, &hash)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * 获取datastore状态测试 + * Obtain Datastore Status Test */ TEST_P(CSDataStore_test, GetStatusTest) { // initialize @@ -4259,17 +3496,13 @@ TEST_P(CSDataStore_test, GetStatusTest) { ASSERT_EQ(2, status.chunkFileCount); // ASSERT_EQ(1, status.snapshotCount); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } INSTANTIATE_TEST_CASE_P( - CSDataStoreTest, - CSDataStore_test, + CSDataStoreTest, CSDataStore_test, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/datastore/file_helper_unittest.cpp b/test/chunkserver/datastore/file_helper_unittest.cpp index 0f7ca39b95..359d7303d7 100644 --- a/test/chunkserver/datastore/file_helper_unittest.cpp +++ b/test/chunkserver/datastore/file_helper_unittest.cpp @@ -20,10 +20,11 @@ * Author: yangyaokai */ -#include #include -#include +#include + #include +#include #include "src/chunkserver/datastore/datastore_file_helper.h" #include "test/fs/mock_local_filesystem.h" @@ -32,17 +33,17 @@ using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; namespace curve { namespace chunkserver { @@ -54,6 +55,7 @@ class FileHelper_MockTest : public testing::Test { fileHelper_ = std::make_shared(fs_); } void TearDown() {} + protected: std::shared_ptr fs_; std::shared_ptr fileHelper_; @@ -64,29 +66,26 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { vector chunkFiles; vector snapFiles; - // case1:List失败,返回-1 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-1)); + // Case1: List failed, returned -1 + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); - // 如果返回ENOENT错误,直接返回成功 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-ENOENT)); + // If an ENOENT error is returned, success is returned directly + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-ENOENT)); ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); vector files; string chunk1 = "chunk_1"; string chunk2 = "chunk_2"; string snap1 = "chunk_1_snap_1"; - string other = "chunk_1_S"; // 非法文件名 + string other = "chunk_1_S"; // Illegal file name files.emplace_back(chunk1); files.emplace_back(chunk2); files.emplace_back(snap1); files.emplace_back(other); EXPECT_CALL(*fs_, List(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); - // case2:List成功,返回chunk文件和snapshot文件 + // Case2: List successful, returning chunk file and snapshot file ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); ASSERT_EQ(2, chunkFiles.size()); ASSERT_STREQ(chunk1.c_str(), chunkFiles[0].c_str()); @@ -94,7 +93,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { ASSERT_EQ(1, snapFiles.size()); ASSERT_STREQ(snap1.c_str(), snapFiles[0].c_str()); - // case3:允许vector为空指针 + // Case3: Allow vector to be a null pointer ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, nullptr, nullptr)); } diff --git a/test/chunkserver/datastore/filepool_mock_unittest.cpp b/test/chunkserver/datastore/filepool_mock_unittest.cpp index f9fc0502e1..2db8eba70d 100644 --- a/test/chunkserver/datastore/filepool_mock_unittest.cpp +++ b/test/chunkserver/datastore/filepool_mock_unittest.cpp @@ -20,36 +20,37 @@ * Author: yangyaokai */ +#include #include #include #include -#include + #include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::Invoke; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using curve::fs::MockLocalFileSystem; using curve::common::kFilePoolMagic; +using curve::fs::MockLocalFileSystem; namespace curve { namespace chunkserver { @@ -59,10 +60,10 @@ const PageSizeType PAGE_SIZE = 4096; const uint32_t metaFileSize = 4096; const uint32_t blockSize = 4096; const uint32_t fileSize = CHUNK_SIZE + PAGE_SIZE; -const std::string poolDir = "./chunkfilepool_dat"; // NOLINT +const std::string poolDir = "./chunkfilepool_dat"; // NOLINT const std::string poolMetaPath = "./chunkfilepool_dat.meta"; // NOLINT -const std::string filePath1 = poolDir + "/1"; // NOLINT -const std::string targetPath = "./data/chunk_1"; // NOLINT +const std::string filePath1 = poolDir + "/1"; // NOLINT +const std::string targetPath = "./data/chunk_1"; // NOLINT const char* kChunkSize = "chunkSize"; const char* kMetaPageSize = "metaPageSize"; const char* kChunkFilePoolPath = "chunkfilepool_path"; @@ -71,14 +72,12 @@ const char* kBlockSize = "blockSize"; class CSChunkfilePoolMockTest : public testing::Test { public: - void SetUp() { - lfs_ = std::make_shared(); - } + void SetUp() { lfs_ = std::make_shared(); } void TearDown() {} static Json::Value GenerateMetaJson(bool hasBlockSize = false) { - // 正常的meta文件的json格式 + // JSON format for normal meta files FilePoolMeta meta; meta.chunkSize = CHUNK_SIZE; meta.metaPageSize = PAGE_SIZE; @@ -102,8 +101,7 @@ class CSChunkfilePoolMockTest : public testing::Test { } void FakeMetaFile() { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(100)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(100)); EXPECT_CALL(*lfs_, Read(100, NotNull(), 0, metaFileSize)) .WillOnce(Invoke( [this](int /*fd*/, char* buf, uint64_t offset, int length) { @@ -116,12 +114,10 @@ class CSChunkfilePoolMockTest : public testing::Test { return metaFileSize; })); - EXPECT_CALL(*lfs_, Close(100)) - .Times(1); + EXPECT_CALL(*lfs_, Close(100)).Times(1); } - void FakePool(FilePool* pool, - const FilePoolOptions& options, + void FakePool(FilePool* pool, const FilePoolOptions& options, uint32_t fileNum) { if (options.getFileFromPool) { FakeMetaFile(); @@ -132,27 +128,20 @@ class CSChunkfilePoolMockTest : public testing::Test { std::string name = std::to_string(i); std::string filePath = poolDir + "/" + name; fileNames.push_back(name); - EXPECT_CALL(*lfs_, FileExists(filePath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath, _)) - .WillOnce(Return(i)); + EXPECT_CALL(*lfs_, FileExists(filePath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath, _)).WillOnce(Return(i)); EXPECT_CALL(*lfs_, Fstat(i, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(i)) - .Times(1); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(i)).Times(1); } - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); ASSERT_EQ(true, pool->Initialize(options)); ASSERT_EQ(fileNum, pool->Size()); } else { - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); ASSERT_EQ(true, pool->Initialize(options)); } } @@ -161,7 +150,7 @@ class CSChunkfilePoolMockTest : public testing::Test { std::shared_ptr lfs_; }; -// PersistEnCodeMetaInfo接口的异常测试 +// Exception testing for PersistEnCodeMetaInfo interface TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { FilePoolMeta meta; meta.chunkSize = CHUNK_SIZE; @@ -169,81 +158,67 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { meta.hasBlockSize = false; meta.filePoolPath = poolDir; - // open失败 + // open failed { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); } - // open成功,write失败 + // open successful, write failed { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); } - // open成功,write成功 + // open successful, write successful { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) .WillOnce(Return(4096)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ( 0, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); } } -// DecodeMetaInfoFromMetaFile接口的异常测试 +// Exception testing for DecodeMetaInfoFromMetaFile interface TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { FilePoolMeta meta; - // open失败 + // open failed { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // read失败 + // read failed { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // read成功,解析Json格式失败 + // read successful, parsing Json format failed { char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,chunksize为空 + // parsing Json format succeeded, chunksize is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -251,17 +226,15 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,metapagesize为空 + // parsing Json format succeeded, metapagesize is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -269,17 +242,15 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,kFilePoolPath为空 + // parsing Json format succeeded, kFilePoolPath is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -287,17 +258,15 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,kCRC为空 + // Successfully parsed Json format, kCRC is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -305,17 +274,15 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,crc不匹配 + // Successfully parsed Json format, crc mismatch { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -323,55 +290,49 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 正常流程 + // Normal process { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 正常流程 + // Normal process { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(true); memcpy(buf, root.toStyledString().c_str(), root.toStyledString().size()); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } } TEST_F(CSChunkfilePoolMockTest, InitializeTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -381,190 +342,154 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { options.metaFileSize = metaFileSize; options.retryTimes = 3; - /****************getFileFromPool为true**************/ - // checkvalid时失败 + /****************getFileFromPool is true**************/ + // Failed while checking valid { - // DecodeMetaInfoFromMetaFile在上面已经单独测试过了 - // 这里选上面中的一组异常用例来检验即可 - // 解析json格式失败 + // DecodeMetaInfoFromMetaFile has been tested separately on it + // Here, select a set of uncommon examples from the above to test + // parsing JSON format failed FilePool pool(lfs_); char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // getFileFromPool为true,checkvalid成功,当前目录不存在 + // getFileFromPool is true, checkvalid succeeded, current directory does not + // exist { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); ASSERT_EQ(false, pool.Initialize(options)); } - // 当前目录存在,list目录失败 + // The current directory exists, list directory failed { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,文件名中包含非数字字符 + // list directory successful, file name contains non numeric characters { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("aaa"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,目录中包含非普通文件类型的对象 + // list directory succeeded, it contains objects of non ordinary file types { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("1"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(false)); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,open文件时失败 + // list directory successful, open file failed { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("1"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // stat文件信息时失败 + // Failed to retrieve stat file information { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("1"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(2)).Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // stat文件信息成功,文件大小不匹配 + // stat file information successful, file size mismatch { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("1"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // 文件信息匹配 + // File information matching { FilePool pool(lfs_); FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); std::vector fileNames; fileNames.push_back("1"); EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); ASSERT_EQ(true, pool.Initialize(options)); ASSERT_EQ(1, pool.Size()); } - /****************getFileFromPool为false**************/ + /****************getFileFromPool is false**************/ options.getFileFromPool = false; - // 当前目录不存在,创建目录失败 + // The current directory does not exist, creating directory failed { FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // 当前目录不存在,创建目录成功 + // The current directory does not exist, creating the directory succeeded { FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(0)); ASSERT_EQ(true, pool.Initialize(options)); } - // 当前目录存在 + // The current directory exists { FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); ASSERT_EQ(true, pool.Initialize(options)); } } TEST_F(CSChunkfilePoolMockTest, GetFileTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -577,26 +502,25 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { char metapage[PAGE_SIZE] = {0}; - /****************getFileFromPool为true**************/ - // 没有剩余chunk的情况 + /****************getFileFromPool is true**************/ + // There is no remaining chunk situation { FilePool pool(lfs_); FakePool(&pool, options, 0); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // 存在chunk,open时失败 + // Chunk present, open failed { FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,write时失败 + // Chunk exists, write failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -606,12 +530,11 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,fsync时失败 + // Chunk present, fsync failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -624,12 +547,11 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { EXPECT_CALL(*lfs_, Fsync(1)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,close时失败 + // Chunk exists, closing failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -648,24 +570,20 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,rename时返回EEXIST错误 + // Chunk exists, EEXIST error returned when renaming { FilePool pool(lfs_); FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-EEXIST)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-EEXIST)); ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); ASSERT_EQ(9, pool.Size()); } - // 存在chunk,rename时返回非EEXIST错误 + // Chunk exists, non EEXIST error returned when renaming { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -687,38 +605,33 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,rename成功 + // Chunk exists, rename successful { FilePool pool(lfs_); FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); ASSERT_EQ(9, pool.Size()); } options.getFileFromPool = false; - /****************getFileFromPool为false**************/ - // open 时失败 + /****************getFileFromPool is false**************/ + // Failed on open { FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(0); + EXPECT_CALL(*lfs_, Close(1)).Times(0); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // fallocate 时失败 + // Failed while failing { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -728,11 +641,10 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // write 时失败 + // Failed while writing { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -746,11 +658,10 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { Write(1, Matcher(NotNull()), 0, fileSize)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // fsync 时失败 + // Fsync failed { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -767,11 +678,10 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { EXPECT_CALL(*lfs_, Fsync(1)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // close 时失败 + // Failed to close { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -796,7 +706,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { } TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -807,149 +717,119 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { int retryTimes = 3; options.retryTimes = retryTimes; - /****************getFileFromPool为false**************/ + /****************getFileFromPool is false**************/ options.getFileFromPool = false; - // delete文件时失败 + // Failed to delete file { FilePool pool(lfs_); FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(-1)); ASSERT_EQ(-1, pool.RecycleFile(filePath1)); } - // delete文件成功 + // Successfully deleted file { FilePool pool(lfs_); FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(0)); ASSERT_EQ(0, pool.RecycleFile(filePath1)); } - /****************getFileFromPool为true**************/ + /****************getFileFromPool is true**************/ options.getFileFromPool = true; - // open失败 + // open failed { FilePool pool(lfs_); FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat失败 + // Fstat failed { FilePool pool(lfs_); FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat成功,大小不匹配 + // Fstat successful, size mismatch { FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE; - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat信息匹配,rename失败 + // Fstat information matching, rename failed { FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, pool.RecycleFile(targetPath)); ASSERT_EQ(0, pool.Size()); } - // Fstat信息匹配,rename成功 + // Fstat information matching, rename successful { FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); ASSERT_EQ(0, pool.RecycleFile(targetPath)); ASSERT_EQ(1, pool.Size()); } diff --git a/test/chunkserver/datastore/filepool_unittest.cpp b/test/chunkserver/datastore/filepool_unittest.cpp index 480f6da72a..c61db4b17c 100644 --- a/test/chunkserver/datastore/filepool_unittest.cpp +++ b/test/chunkserver/datastore/filepool_unittest.cpp @@ -51,9 +51,9 @@ using ::testing::StrEq; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::FilePoolState; -using curve::chunkserver::FilePoolMeta; using curve::common::kFilePoolMagic; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; @@ -183,8 +183,9 @@ TEST_P(CSFilePool_test, InitializeTest) { // initialize ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cfop)); ASSERT_EQ(100, chunkFilePoolPtr_->Size()); - // 初始化阶段会扫描FilePool内的所有文件,在扫描结束之后需要关闭这些文件 - // 防止过多的文件描述符被占用 + // During the initialization phase, all files in the FilePool will be + // scanned, and after the scan is completed, these files need to be closed + // Prevent excessive file descriptors from being occupied ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "1")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "2")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "50.clean")); @@ -233,8 +234,7 @@ TEST_P(CSFilePool_test, GetFileTest) { ASSERT_EQ(-2, fsptr->Delete("test0")); // CASE 2: get dirty chunk - auto checkBytes = [this](const std::string& filename, - char byte, + auto checkBytes = [this](const std::string& filename, char byte, bool isCleaned = false) { ASSERT_TRUE(fsptr->FileExists(filename)); int fd = fsptr->Open(filename, O_RDWR); @@ -573,8 +573,7 @@ TEST_P(CSFilePool_test, CleanChunkTest) { } } -INSTANTIATE_TEST_CASE_P(CSFilePoolTest, - CSFilePool_test, +INSTANTIATE_TEST_CASE_P(CSFilePoolTest, CSFilePool_test, ::testing::Values(false, true)); TEST(CSFilePool, GetFileDirectlyTest) { @@ -583,8 +582,9 @@ TEST(CSFilePool, GetFileDirectlyTest) { fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); const std::string filePoolPath = FILEPOOL_DIR; // create chunkfile in chunkfile pool dir - // if chunkfile pool 的getFileFromPool开关关掉了,那么 - // FilePool的size是一直为0,不会从pool目录中找 + // if the getFileFromPool switch of the chunkfile pool is turned off, then + // The size of FilePool is always 0 and will not be found in the pool + // directory std::string filename = filePoolPath + "1000"; fsptr->Mkdir(filePoolPath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); @@ -608,7 +608,8 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cspopt)); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); - // 测试获取chunk,chunkfile pool size不变一直为0 + // Test to obtain chunk, chunkfile pool size remains unchanged and remains + // at 0 char metapage[4096]; memset(metapage, '1', 4096); @@ -625,12 +626,12 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_EQ(buf[i], '1'); } - // 测试回收chunk,文件被删除,FilePool Size不受影响 + // Test recycling chunk, file deleted, FilePool Size not affected chunkFilePoolPtr_->RecycleFile("./new1"); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists("./new1")); - // 删除测试文件及目录 + // Delete test files and directories ASSERT_EQ(0, fsptr->Close(fd)); ASSERT_EQ(0, fsptr->Delete(filePoolPath + "1000")); ASSERT_EQ(0, fsptr->Delete(filePoolPath)); diff --git a/test/chunkserver/fake_datastore.h b/test/chunkserver/fake_datastore.h index 75b5c80330..6d26815bc8 100644 --- a/test/chunkserver/fake_datastore.h +++ b/test/chunkserver/fake_datastore.h @@ -24,27 +24,25 @@ #define TEST_CHUNKSERVER_FAKE_DATASTORE_H_ #include -#include #include +#include #include -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; class FakeCSDataStore : public CSDataStore { public: FakeCSDataStore(DataStoreOptions options, - std::shared_ptr fs) : - CSDataStore(fs, - std::make_shared(fs), - options) { + std::shared_ptr fs) + : CSDataStore(fs, std::make_shared(fs), options) { chunk_ = new (std::nothrow) char[options.chunkSize]; ::memset(chunk_, 0, options.chunkSize); sn_ = 0; @@ -93,10 +91,7 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode ReadChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, + CSErrorCode ReadChunk(ChunkID id, SequenceNum sn, char* buf, off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -105,18 +100,15 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); if (HasInjectError()) { return CSErrorCode::InternalError; } return CSErrorCode::Success; } - CSErrorCode ReadSnapshotChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, - size_t length) override { + CSErrorCode ReadSnapshotChunk(ChunkID id, SequenceNum sn, char* buf, + off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -124,32 +116,26 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); return CSErrorCode::Success; } - CSErrorCode WriteChunk(ChunkID id, - SequenceNum sn, - const butil::IOBuf& buf, - off_t offset, - size_t length, - uint32_t *cost, - const std::string & csl = "") override { + CSErrorCode WriteChunk(ChunkID id, SequenceNum sn, const butil::IOBuf& buf, + off_t offset, size_t length, uint32_t* cost, + const std::string& csl = "") override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; } - ::memcpy(chunk_+offset, buf.to_string().c_str(), length); + ::memcpy(chunk_ + offset, buf.to_string().c_str(), length); *cost = length; chunkIds_.insert(id); sn_ = sn; return CSErrorCode::Success; } - CSErrorCode CreateCloneChunk(ChunkID id, - SequenceNum sn, - SequenceNum correctedSn, - ChunkSizeType size, + CSErrorCode CreateCloneChunk(ChunkID id, SequenceNum sn, + SequenceNum correctedSn, ChunkSizeType size, const string& location) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -160,9 +146,7 @@ class FakeCSDataStore : public CSDataStore { return CSErrorCode::Success; } - CSErrorCode PasteChunk(ChunkID id, - const char * buf, - off_t offset, + CSErrorCode PasteChunk(ChunkID id, const char* buf, off_t offset, size_t length) { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -171,12 +155,11 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(chunk_+offset, buf, length); + ::memcpy(chunk_ + offset, buf, length); return CSErrorCode::Success; } - CSErrorCode GetChunkInfo(ChunkID id, - CSChunkInfo* info) override { + CSErrorCode GetChunkInfo(ChunkID id, CSChunkInfo* info) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -190,10 +173,8 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode GetChunkHash(ChunkID id, - off_t offset, - size_t length, - std::string *hash) { + CSErrorCode GetChunkHash(ChunkID id, off_t offset, size_t length, + std::string* hash) { uint32_t crc32c = 0; if (chunkIds_.find(id) != chunkIds_.end()) { crc32c = curve::common::CRC32(chunk_ + offset, length); @@ -213,14 +194,14 @@ class FakeCSDataStore : public CSDataStore { if (errorCode == CSErrorCode::Success) { return error_; } else { - // 注入错误自动恢复 + // Automatic recovery of injection errors error_ = CSErrorCode::Success; return errorCode; } } private: - char *chunk_; + char* chunk_; std::set chunkIds_; bool snapDeleteFlag_; SequenceNum sn_; @@ -234,14 +215,14 @@ class FakeFilePool : public FilePool { : FilePool(lfs) {} ~FakeFilePool() {} - bool Initialize(const FilePoolOptions &cfop) { + bool Initialize(const FilePoolOptions& cfop) { LOG(INFO) << "FakeFilePool init success"; return true; } - int GetChunk(const std::string &chunkpath, char *metapage) { return 0; } - int RecycleChunk(const std::string &chunkpath) { return 0; } + int GetChunk(const std::string& chunkpath, char* metapage) { return 0; } + int RecycleChunk(const std::string& chunkpath) { return 0; } size_t Size() { return 4; } - void UnInitialize() { } + void UnInitialize() {} }; } // namespace chunkserver diff --git a/test/chunkserver/heartbeat_helper_test.cpp b/test/chunkserver/heartbeat_helper_test.cpp index 7b9f9a9c6b..57d88c6c45 100644 --- a/test/chunkserver/heartbeat_helper_test.cpp +++ b/test/chunkserver/heartbeat_helper_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/heartbeat_helper.h" + #include +#include #include -#include #include -#include "src/chunkserver/heartbeat_helper.h" +#include +#include + #include "src/chunkserver/chunkserver_service.h" #include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/mock_copyset_node_manager.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Mock; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace chunkserver { @@ -46,12 +48,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_epoch(2); std::vector newPeers; - // 1. 目标节点格式错误 + // 1. Destination node format error { - // 目标节点为空 + // The target node is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 目标节点不为空但格式有误 + // The target node is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.4"); conf.set_allocated_configchangeitem(replica); @@ -63,12 +65,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_configchangeitem(replica); } - // 2. 待删除节点格式错误 + // 2. The format of the node to be deleted is incorrect { - // 待删除节点为空 + // The node to be deleted is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 待删除接节点不为空但格式有误 + // The node to be deleted is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.1"); conf.set_allocated_oldpeer(replica); @@ -80,13 +82,13 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_oldpeer(replica); } - // 3. 生成新配置成功 + // 3. Successfully generated new configuration { for (int i = 0; i < 3; i++) { - auto replica = conf.add_peers(); - replica->set_id(i + 1); - replica->set_address( - "192.0.0." + std::to_string(i + 1) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i + 1); + replica->set_address("192.0.0." + std::to_string(i + 1) + + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); ASSERT_EQ(3, newPeers.size()); @@ -110,19 +112,17 @@ TEST(HeartbeatHelperTest, test_CopySetConfValid) { std::shared_ptr copyset; - // 1. chunkserver中不存在需要变更的copyset - { - ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); - } + // 1. There is no copyset that needs to be changed in chunkserver + { ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 2. mds下发copysetConf的epoch是落后的 + // 2. The epoch of copysetConf issued by mds is outdated { copyset = std::make_shared(); EXPECT_CALL(*copyset, GetConfEpoch()).Times(2).WillOnce(Return(3)); ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 3. mds下发copysetConf正常 + // 3. Mds sends copysetConf normally { EXPECT_CALL(*copyset, GetConfEpoch()).WillOnce(Return(2)); ASSERT_TRUE(HeartbeatHelper::CopySetConfValid(conf, copyset)); @@ -140,24 +140,24 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { auto copyset = std::make_shared(); - // 1. mds下发空配置 + // 1. MDS issued empty configuration { conf.set_epoch(0); ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 2. 该副本不在复制组中 + // 2. The replica is not in the replication group { conf.set_epoch(2); for (int i = 2; i <= 4; i++) { - auto replica = conf.add_peers(); - replica->set_id(i); - replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i); + replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 3. 该副本在复制组中 + // 3. This replica is in the replication group { butil::str2endpoint("192.0.0.4:8200", &csEp); ASSERT_FALSE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); @@ -165,39 +165,37 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { } TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { - // 1. peerId的格式不对 + // 1. The format of peerId is incorrect { std::string peerId = "127.0.0:5555:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - // 2. 对端的chunkserver_service未起起来 + // 2. Opposite chunkserver_service not started { std::string peerId = "127.0.0.1:8888:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); string listenAddr(butil::endpoint2str(server->listen_address()).c_str()); - // 3. 对端copyset未加载完成 + // 3. Peer copyset not loaded completed { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } - // 4. 对端copyset加载完成 + // 4. End to end copyset loading completed { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); ASSERT_TRUE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } @@ -210,4 +208,3 @@ TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/heartbeat_test.cpp b/test/chunkserver/heartbeat_test.cpp index fcfcae375a..eabadce0ee 100644 --- a/test/chunkserver/heartbeat_test.cpp +++ b/test/chunkserver/heartbeat_test.cpp @@ -20,25 +20,26 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat.h" + #include #include +#include + #include -#include "test/chunkserver/heartbeat_test_common.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" - +#include "src/common/configuration.h" +#include "test/chunkserver/heartbeat_test_common.h" #include "test/client/fake/fakeMDS.h" -std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT namespace curve { namespace chunkserver { -const LogicPoolID poolId = 666; -const CopysetID copysetId = 888; +const LogicPoolID poolId = 666; +const CopysetID copysetId = 888; class HeartbeatTest : public ::testing::Test { public: @@ -57,27 +58,27 @@ class HeartbeatTest : public ::testing::Test { hbtest_->UnInitializeMds(); } - protected: std::shared_ptr hbtest_; }; TEST_F(HeartbeatTest, TransferLeader) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; - std::string dest1 = "127.0.0.1:8200:0"; - std::string dest2 = "127.0.0.1:8201:0"; + std::string dest1 = "127.0.0.1:8200:0"; + std::string dest2 = "127.0.0.1:8201:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo,expectleader是dst1 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst1 ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -86,11 +87,11 @@ TEST_F(HeartbeatTest, TransferLeader) { peer->set_address(dest1); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst1 + // Construct CopySetConf in resp, transfer to dst CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -99,27 +100,28 @@ TEST_F(HeartbeatTest, TransferLeader) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::TRANSFER_LEADER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); - // 构造req中期望的CopySetInfo,expectleader是dst2 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst2 + // Construct CopySetConf in resp, transfer to dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); conf.set_allocated_configchangeitem(peer); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, RemovePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; std::string leaderPeer = "127.0.0.1:8200:0"; std::string destPeer = "127.0.0.1:8202:0"; @@ -128,21 +130,21 @@ TEST_F(HeartbeatTest, RemovePeer) { hbtest_->WaitCopysetReady(poolId, copysetId, confStr); hbtest_->TransferLeaderSync(poolId, copysetId, confStr, leaderPeer); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -151,62 +153,62 @@ TEST_F(HeartbeatTest, RemovePeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::REMOVE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_after_Configchange) { - // 创建copyset + // Create copyset std::vector cslist{"127.0.0.1:8200"}; std::string confStr = "127.0.0.1:8200:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_not_exist_in_MDS) { - // 在chunkserver上创建一个copyset + // Create a copyset on chunkserver std::vector cslist{"127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); conf.set_epoch(0); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, AddPeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0"; std::string addPeer = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -216,11 +218,11 @@ TEST_F(HeartbeatTest, AddPeer) { } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -229,14 +231,14 @@ TEST_F(HeartbeatTest, AddPeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::ADD_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, ChangePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string oldConf = "127.0.0.1:8200:0,127.0.0.1:8202:0"; std::string addOne = "127.0.0.1:8201:0"; std::string rmOne = "127.0.0.1:8202:0"; @@ -244,7 +246,7 @@ TEST_F(HeartbeatTest, ChangePeer) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, oldConf); hbtest_->WaitCopysetReady(poolId, copysetId, oldConf); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -254,7 +256,7 @@ TEST_F(HeartbeatTest, ChangePeer) { replica->set_address("127.0.0.1:8201:0"); expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -271,7 +273,7 @@ TEST_F(HeartbeatTest, ChangePeer) { conf.set_allocated_oldpeer(peer); conf.set_type(curve::mds::heartbeat::CHANGE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 20d6b444f8..5a24d3dac9 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -23,8 +23,8 @@ #include "test/chunkserver/heartbeat_test_common.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT static const char* confPath[3] = { "./8200/chunkserver.conf", @@ -37,12 +37,12 @@ namespace chunkserver { HeartbeatTestCommon* HeartbeatTestCommon::hbtestCommon_ = nullptr; -void HeartbeatTestCommon::CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; +void HeartbeatTestCommon::CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; LOG(INFO) << "Cleaning peer " << peer; @@ -52,16 +52,16 @@ void HeartbeatTestCommon::CleanPeer( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - std::string sender = req->ip() + ":" + std::to_string(req->port()) - + ":0"; + std::string sender = + req->ip() + ":" + std::to_string(req->port()) + ":0"; if (sender != peer) { continue; } if (req->copysetinfos_size() >= 1) { int i = 0; - for (; i < req->copysetinfos_size(); i ++) { - if ( req->copysetinfos(i).logicalpoolid() == poolId && - req->copysetinfos(i).copysetid() == copysetId ) { + for (; i < req->copysetinfos_size(); i++) { + if (req->copysetinfos(i).logicalpoolid() == poolId && + req->copysetinfos(i).copysetid() == copysetId) { break; } } @@ -94,7 +94,7 @@ void HeartbeatTestCommon::CleanPeer( void HeartbeatTestCommon::CreateCopysetPeers( LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& confStr) { + const std::vector& cslist, const std::string& confStr) { braft::Configuration conf; ASSERT_EQ(0, conf.parse_from(confStr)); std::vector confPeers; @@ -113,8 +113,8 @@ void HeartbeatTestCommon::CreateCopysetPeers( cntl.set_timeout_ms(3000); request.set_logicpoolid(poolId); request.set_copysetid(copysetId); - for (auto peer = confPeers.begin(); - peer != confPeers.end(); peer++) { + for (auto peer = confPeers.begin(); peer != confPeers.end(); + peer++) { request.add_peerid(peer->to_string()); } @@ -122,11 +122,11 @@ void HeartbeatTestCommon::CreateCopysetPeers( copyset_stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "Creating copyset failed: " - << cntl.ErrorCode() << " " << cntl.ErrorText(); + LOG(ERROR) << "Creating copyset failed: " << cntl.ErrorCode() + << " " << cntl.ErrorText(); } else if (COPYSET_OP_STATUS_EXIST == response.status()) { - LOG(INFO) << "Skipped creating existed copyset <" - << poolId << ", " << copysetId << ">: " << conf + LOG(INFO) << "Skipped creating existed copyset <" << poolId + << ", " << copysetId << ">: " << conf << " on peer: " << *it; break; } else if (COPYSET_OP_STATUS_SUCCESS == response.status()) { @@ -141,8 +141,9 @@ void HeartbeatTestCommon::CreateCopysetPeers( } } -void HeartbeatTestCommon::WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& confStr) { +void HeartbeatTestCommon::WaitCopysetReady(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -160,9 +161,10 @@ void HeartbeatTestCommon::WaitCopysetReady( } } -void HeartbeatTestCommon::TransferLeaderSync( - LogicPoolID poolId, CopysetID copysetId, - const std::string& confStr, const std::string& newLeader) { +void HeartbeatTestCommon::TransferLeaderSync(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr, + const std::string& newLeader) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -198,21 +200,18 @@ void HeartbeatTestCommon::ReleaseHeartbeat() { } void HeartbeatTestCommon::SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { cntl_ = cntl; req_ = request; resp_ = response; done_ = done; } -void HeartbeatTestCommon::GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done) { +void HeartbeatTestCommon::GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done) { std::unique_lock lock(hbtestCommon_->GetMutex()); handlerReady_.store(true, std::memory_order_release); @@ -230,10 +229,8 @@ void HeartbeatTestCommon::GetHeartbeat( } void HeartbeatTestCommon::HeartbeatCallback( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { { std::unique_lock lock(hbtestCommon_->GetMutex()); if (!hbtestCommon_->GetReady().load(std::memory_order_acquire)) { @@ -250,8 +247,8 @@ void HeartbeatTestCommon::HeartbeatCallback( } bool HeartbeatTestCommon::SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect) { + const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect) { if (!expect.IsInitialized()) { if (!orig.IsInitialized()) { return true; @@ -301,13 +298,12 @@ bool HeartbeatTestCommon::SameCopySetInfo( } bool HeartbeatTestCommon::WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimit) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimit) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; int64_t startTime = butil::monotonic_time_ms(); bool leaderPeerSet = expectedInfo.has_leaderpeer(); @@ -316,8 +312,8 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - // 获取当前copyset的leader - std::string sender = + // Get the leader of the current copyset + std::string sender = req->ip() + ":" + std::to_string(req->port()) + ":0"; if (1 == req->copysetinfos_size()) { leader = req->copysetinfos(0).leaderpeer().address(); @@ -333,8 +329,10 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( } } - // 如果当前req是leader发送的,判断req中的内容是否符合要求 - // 如果符合要求,返回true; 如果不符合要求,设置resp中的内容 + // If the current req is sent by the leader, determine whether the + // content in the req meets the requirements If it meets the + // requirements, return true; If it does not meet the requirements, set + // the content in resp if (leader == sender) { if (!leaderPeerSet) { auto peer = new ::curve::common::Peer(); @@ -342,22 +340,23 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( expectedInfo.set_allocated_leaderpeer(peer); } - // 判断req是否符合要求, 符合要求返回true + // Determine whether the req meets the requirements, and return true + // if it meets the requirements if (req->copysetinfos_size() == 1) { if (SameCopySetInfo(req->copysetinfos(0), expectedInfo)) { return true; } LOG(INFO) << "req->copysetinfos:" - << req->copysetinfos(0).DebugString() - << ", expectedInfo: " << expectedInfo.DebugString(); + << req->copysetinfos(0).DebugString() + << ", expectedInfo: " << expectedInfo.DebugString(); } else if (req->copysetinfos_size() == 0) { - if (SameCopySetInfo( - ::curve::mds::heartbeat::CopySetInfo{}, expectedInfo)) { + if (SameCopySetInfo(::curve::mds::heartbeat::CopySetInfo{}, + expectedInfo)) { return true; } } - // 不符合要求设置resp + // Not meeting the requirements to set resp if (req->copysetinfos_size() == 1) { auto build = resp->add_needupdatecopysets(); if (!build->has_epoch()) { @@ -388,7 +387,7 @@ int RmDirData(std::string uri) { int RemovePeersData(bool rmChunkServerMeta) { common::Configuration conf; - for (int i = 0; i < 3; i ++) { + for (int i = 0; i < 3; i++) { conf.SetConfigPath(confPath[i]); CHECK(conf.LoadConfig()) << "load conf err"; @@ -396,35 +395,35 @@ int RemovePeersData(bool rmChunkServerMeta) { LOG_IF(FATAL, !conf.GetStringValue("copyset.chunk_data_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " data dir: " << strerror(errno); + << " data dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " log dir: " << strerror(errno); + << " log dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft meta dir: " << strerror(errno); + << " raft meta dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_snapshot_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft snapshot dir: " << strerror(errno); + << " raft snapshot dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.recycler_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft recycler dir: " << strerror(errno); + << " raft recycler dir: " << strerror(errno); return -1; } @@ -432,7 +431,7 @@ int RemovePeersData(bool rmChunkServerMeta) { if (rmChunkServerMeta) { if (RmFile(res)) { LOG(ERROR) << "Failed to remove node " << i - << " chunkserver meta file: " << strerror(errno); + << " chunkserver meta file: " << strerror(errno); return -1; } } diff --git a/test/chunkserver/heartbeat_test_common.h b/test/chunkserver/heartbeat_test_common.h index 433f7119eb..744dbe78d3 100644 --- a/test/chunkserver/heartbeat_test_common.h +++ b/test/chunkserver/heartbeat_test_common.h @@ -23,20 +23,20 @@ #ifndef TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ #define TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ -#include #include +#include +#include #include +#include //NOLINT #include -#include -#include //NOLINT #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" #include "proto/heartbeat.pb.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/heartbeat.h" +#include "src/common/configuration.h" #include "src/common/uri_parser.h" #include "test/client/fake/fakeMDS.h" @@ -47,7 +47,7 @@ using ::curve::common::UriParser; class HeartbeatTestCommon { public: - explicit HeartbeatTestCommon(const std::string &filename) { + explicit HeartbeatTestCommon(const std::string& filename) { hbtestCommon_ = this; handlerReady_.store(false, std::memory_order_release); @@ -57,17 +57,11 @@ class HeartbeatTestCommon { mds_->StartService(); } - std::atomic& GetReady() { - return handlerReady_; - } + std::atomic& GetReady() { return handlerReady_; } - std::mutex& GetMutex() { - return hbMtx_; - } + std::mutex& GetMutex() { return hbMtx_; } - std::condition_variable& GetCV() { - return hbCV_; - } + std::condition_variable& GetCV() { return hbCV_; } void UnInitializeMds() { mds_->UnInitialize(); @@ -75,105 +69,110 @@ class HeartbeatTestCommon { } /** - * CleanPeer 清空peer上指定copyset数据 + * CleanPeer: Clear the specified copyset data on the peer * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] peer chunkserver ip + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] peer chunkserver IP */ - void CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer); + void CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer); /** - * CreateCopysetPeers 在指定chunkserverlist上创建指定配置的copyset + * CreateCopysetPeers: Create a copyset of the specified configuration on + * the specified chunkserverlist * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] cslist 待创建copyset的chunkserver列表 - * @param[in] conf 使用该配置作为初始配置创建copyset + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] cslist The chunkserver list for the copyset to be created + * @param[in] conf Use this configuration as the initial configuration to + * create a copyset */ void CreateCopysetPeers(LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& conf); + const std::vector& cslist, + const std::string& conf); /** - * WaitCopysetReady 等待指定copyset选出leader + * WaitCopysetReady: Wait for the specified copyset to select the leader * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members */ - void WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& conf); + void WaitCopysetReady(LogicPoolID poolId, CopysetID copysetId, + const std::string& conf); /** - * TransferLeaderSync 触发transferleader并等待完成 + * TransferLeaderSync: Trigger transferleader and waits for completion * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 - * @param[in] newLeader 目标leader + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members + * @param[in] newLeader Target Leader */ void TransferLeaderSync(LogicPoolID poolId, CopysetID copysetId, - const std::string& conf, const std::string& newLeader); + const std::string& conf, + const std::string& newLeader); /** - * WailForConfigChangeOk 指定时间内(timeLimitMs),chunkserver是否上报了 - * 符合预期的copyset信息 + * WailForConfigChangeOk: Determine whether the chunkserver has reported the + * expected copyset information within the specified time limit + * (timeLimitMs). * - * @param[in] conf mds需要下发给指定copyset的变更命令 - * @param[in] expectedInfo 变更之后期望复制组配置 - * @param[in] timeLimitMs 等待时间 + * @param[in] conf mds needs to issue a change command to the specified + * copyset + * @param[in] expectedInfo replication group configuration after change + * @param[in] timeLimitMs waiting time * - * @return false-指定时间内copyset配置未能达到预期, true-达到预期 + * @return false - Copyset configuration failed to meet expectations within + * the specified time, true - met expectations */ bool WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimitMs); + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimitMs); /** - * SameCopySetInfo 比较两个copysetInfo是否一致 + * SameCopySetInfo: Compare two copysetInfo structures to check if they are + * identical. * - * @param[in] orig 待比较的copysetInfo - * @param[in] expect 期望copysetInfo + * @param[in] orig The copysetInfo to compare. + * @param[in] expect The expected copysetInfo for comparison. * - * @return true-一致 false-不一致 + * @return true if they are identical, false if they are not. */ - bool SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect); + bool SameCopySetInfo(const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect); /** - * ReleaseHeartbeat heartbeat中的会掉设置为nullptr + * ReleaseHeartbeat: Set the callback in the heartbeat to nullptr. */ void ReleaseHeartbeat(); /** - * SetHeartbeatInfo 把mds接受到的cntl等信息复制到成员变量 + * SetHeartbeatInfo: Copy the cntl and other information received by mds to + * the member variable */ - void SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + void SetHeartbeatInfo(::google::protobuf::RpcController* cntl, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); /** - * GetHeartbeat 把当前成员中的cntl等变量设置到rpc中 + * GetHeartbeat: Set the current member's cntl and other variables into the + * RPC. */ - void GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done); + void GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done); /** - * HeartbeatCallback heartbeat回掉 + * HeartbeatCallback: heartbeat callback */ - static void HeartbeatCallback( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + static void HeartbeatCallback(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); private: FakeMDS* mds_; diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index de06bcc255..d2d517bfc4 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -21,9 +21,9 @@ * 2018/12/23 Wenyu Zhou Initial version */ -#include #include #include +#include #include "include/chunkserver/chunkserver_common.h" #include "src/chunkserver/chunkserver.h" @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static const char *param[3][15] = { +static const char* param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -89,7 +89,7 @@ using ::curve::chunkserver::ChunkServer; butil::AtExitManager atExitManager; -static int RunChunkServer(int i, int argc, char **argv) { +static int RunChunkServer(int i, int argc, char** argv) { auto chunkserver = new curve::chunkserver::ChunkServer(); if (chunkserver == nullptr) { LOG(ERROR) << "Failed to create chunkserver " << i; @@ -104,7 +104,7 @@ static int RunChunkServer(int i, int argc, char **argv) { return 0; } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { int ret; pid_t pids[3]; testing::InitGoogleTest(&argc, argv); @@ -133,10 +133,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create chunkserver process 0"; } else if (pids[i] == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), - const_cast(param[i])); + return RunChunkServer(i, sizeof(param[i]) / sizeof(char*), + const_cast(param[i])); } } @@ -148,8 +149,9 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create test proccess"; } else if (pid == 0) { /* - * RUN_ALL_TESTS内部可能会调用LOG(), - * 有较低概率因不兼容fork()而卡死 + * LOG() may be called internally in RUN_ALL_TESTS, + * There is a low probability of getting stuck due to incompatible + * fork() */ ret = RUN_ALL_TESTS(); return ret; @@ -171,10 +173,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to restart chunkserver process 1"; } else if (pid == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), - const_cast(param[1])); + ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char*), + const_cast(param[1])); return ret; } sleep(2); diff --git a/test/chunkserver/inflight_throttle_test.cpp b/test/chunkserver/inflight_throttle_test.cpp index 8faa18d76e..333e1f6934 100644 --- a/test/chunkserver/inflight_throttle_test.cpp +++ b/test/chunkserver/inflight_throttle_test.cpp @@ -20,10 +20,11 @@ * Author: wudemiao */ +#include "src/chunkserver/inflight_throttle.h" + #include #include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/inflight_throttle.h" namespace curve { namespace chunkserver { @@ -31,7 +32,7 @@ namespace chunkserver { using curve::common::Thread; TEST(InflightThrottleTest, basic) { - // 基本测试 + // Basic testing { uint64_t maxInflight = 1; InflightThrottle inflightThrottle(maxInflight); @@ -45,7 +46,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发加 + // Concurrent addition { uint64_t maxInflight = 10000; InflightThrottle inflightThrottle(maxInflight); @@ -78,7 +79,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发减 + // Concurrent reduction { uint64_t maxInflight = 16; InflightThrottle inflightThrottle(maxInflight); diff --git a/test/chunkserver/metrics_test.cpp b/test/chunkserver/metrics_test.cpp index 282802336f..57c7a79c33 100644 --- a/test/chunkserver/metrics_test.cpp +++ b/test/chunkserver/metrics_test.cpp @@ -20,24 +20,25 @@ * Author: yangyaokai */ -#include -#include +#include +#include #include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include -#include "src/common/configuration.h" #include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/datastore/filepool_helper.h" namespace curve { @@ -55,15 +56,14 @@ const PageSizeType PAGE_SIZE = 4 * 1024; const int chunkNum = 10; const LogicPoolID logicId = 1; -const string baseDir = "./data_csmetric"; // NOLINT -const string copysetDir = "local://./data_csmetric"; // NOLINT -const string logDir = "curve://./data_csmetric"; // NOLINT -const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT +const string baseDir = "./data_csmetric"; // NOLINT +const string copysetDir = "local://./data_csmetric"; // NOLINT +const string logDir = "curve://./data_csmetric"; // NOLINT +const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT const string chunkPoolMetaPath = "./chunkfilepool_csmetric.meta"; // NOLINT -const string walPoolDir = "./walfilepool_csmetric"; // NOLINT -const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT -const string trashPath = "./trash_csmetric"; // NOLINT - +const string walPoolDir = "./walfilepool_csmetric"; // NOLINT +const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT +const string trashPath = "./trash_csmetric"; // NOLINT class CSMetricTest : public ::testing::Test { public: @@ -90,8 +90,7 @@ class CSMetricTest : public ::testing::Test { cfop.blockSize = BLOCK_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool->Size()); @@ -147,8 +146,8 @@ class CSMetricTest : public ::testing::Test { } void CreateConfigFile() { - confFile_ = "csmetric.conf"; - // 创建配置文件 + confFile_ = "csmetric.conf"; + // Create Configuration File std::string confItem; std::ofstream cFile(confFile_); ASSERT_TRUE(cFile.is_open()); @@ -210,18 +209,18 @@ TEST_F(CSMetricTest, CopysetMetricTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - // 如果copyset的metric已经存在,返回-1 + // If the metric for the copyset already exists, return -1 rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, -1); - // 获取不存在的copyset metric,返回nullptr + // Get non-existent copyset metric and return nullptr CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, 2); ASSERT_EQ(copysetMetric, nullptr); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); ASSERT_NE(copysetMetric, nullptr); - // 删除copyset metric后,再去获取返回nullptr + // After deleting the copyset metric, go to retrieve and return nullptr rc = metric_->RemoveCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); @@ -233,7 +232,8 @@ TEST_F(CSMetricTest, OnRequestTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -257,7 +257,7 @@ TEST_F(CSMetricTest, OnRequestTest) { const IOMetricPtr cpDownloadMetric = copysetMetric->GetIOMetric(CSIOMetricType::DOWNLOAD); - // 统计写入成功的情况 + // Count the success of writing metric_->OnRequest(logicId, copysetId, CSIOMetricType::WRITE_CHUNK); ASSERT_EQ(1, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverWriteMetric->ioNum_.get_value()); @@ -268,7 +268,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 + // Statistics on successful reads metric_->OnRequest(logicId, copysetId, CSIOMetricType::READ_CHUNK); ASSERT_EQ(1, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverReadMetric->ioNum_.get_value()); @@ -279,7 +279,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 + // Statistics on successful recovery metric_->OnRequest(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK); ASSERT_EQ(1, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(0, serverRecoverMetric->ioNum_.get_value()); @@ -290,7 +290,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 + // Count the success of pass metric_->OnRequest(logicId, copysetId, CSIOMetricType::PASTE_CHUNK); ASSERT_EQ(1, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverPasteMetric->ioNum_.get_value()); @@ -301,7 +301,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 + // Statistics on successful downloads metric_->OnRequest(logicId, copysetId, CSIOMetricType::DOWNLOAD); ASSERT_EQ(1, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverDownloadMetric->ioNum_.get_value()); @@ -318,7 +318,8 @@ TEST_F(CSMetricTest, OnResponseTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -345,9 +346,9 @@ TEST_F(CSMetricTest, OnResponseTest) { size_t size = PAGE_SIZE; int64_t latUs = 100; bool hasError = false; - // 统计写入成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the success of writing + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -357,9 +358,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Statistics on successful reads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -369,9 +370,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on successful recovery + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -381,9 +382,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the success of pass + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -393,9 +394,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on successful downloads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -406,9 +407,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(0, cpDownloadMetric->errorNum_.get_value()); hasError = true; - // 统计写入失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the number of write failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -418,9 +420,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpWriteMetric->errorNum_.get_value()); - // 统计读取失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Count the number of read failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -430,9 +433,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpReadMetric->errorNum_.get_value()); - // 统计恢复失败的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on recovery failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -442,9 +445,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpRecoverMetric->errorNum_.get_value()); - // 统计paste失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the situation of pass failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -454,9 +457,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpPasteMetric->errorNum_.get_value()); - // 统计下载失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on download failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -468,19 +471,21 @@ TEST_F(CSMetricTest, OnResponseTest) { } TEST_F(CSMetricTest, CountTest) { - // 初始状态下,没有copyset,FilePool中有chunkNum个chunk + // In the initial state, there is no copyset and there are chunkNum chunks + // in FilePool ASSERT_EQ(0, metric_->GetCopysetCount()); ASSERT_EQ(10, metric_->GetChunkLeftCount()); // Shared with chunk file pool ASSERT_EQ(0, metric_->GetWalSegmentLeftCount()); - // 创建copyset + // Create copyset Configuration conf; CopysetID copysetId = 1; ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId, conf)); ASSERT_EQ(1, metric_->GetCopysetCount()); - // 此时copyset下面没有chunk和快照 - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + // At this point, there are no chunks or snapshots under the copyset + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_EQ(0, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); @@ -522,7 +527,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(10, metric_->GetWalSegmentLeftCount()); ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId2, conf)); ASSERT_EQ(2, metric_->GetCopysetCount()); - CopysetMetricPtr copysetMetric2 = metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT + CopysetMetricPtr copysetMetric2 = + metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT ASSERT_EQ(0, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(1, metric_->GetTotalWalSegmentCount()); @@ -534,7 +540,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(2, metric_->GetTotalWalSegmentCount()); - // 写入数据生成chunk + // Write data to generate chunk std::shared_ptr datastore = copysetMgr_->GetCopysetNode(logicId, copysetId)->GetDataStore(); ChunkID id = 1; @@ -553,7 +559,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(0, metric_->GetTotalCloneChunkCount()); - // 增加版本号,生成快照 + // Add version number and generate snapshot seq = 2; ASSERT_EQ(CSErrorCode::Success, datastore->WriteChunk(id, seq, dataBuf, offset, length, nullptr)); @@ -561,14 +567,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 删除快照 + // Delete snapshot ASSERT_EQ(CSErrorCode::Success, datastore->DeleteSnapshotChunkOrCorrectSn(id, seq)); ASSERT_EQ(1, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 创建 clone chunk + // Create clone chunk ChunkID id2 = 2; ChunkID id3 = 3; std::string location = "test@cs"; @@ -580,7 +586,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(2, copysetMetric->GetCloneChunkCount()); - // clone chunk被覆盖写一遍,clone chun转成普通chunk + // The clone chunk is overwritten and written once, converting it to a + // regular chunk char* buf2 = new char[CHUNK_SIZE]; butil::IOBuf dataBuf2; dataBuf2.append(buf2, CHUNK_SIZE); @@ -591,15 +598,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 删除上面的chunk - ASSERT_EQ(CSErrorCode::Success, - datastore->DeleteChunk(id2, 1)); + // Delete the chunk above + ASSERT_EQ(CSErrorCode::Success, datastore->DeleteChunk(id2, 1)); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 模拟copyset重新加载datastore,重新初始化后,chunk数量不变 - // for bug fix: CLDCFS-1473 + // Simulate copyset to reload the datastore, and after reinitialization, the + // number of chunks remains unchanged for bug fix: CLDCFS-1473 datastore->Initialize(); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); @@ -608,7 +614,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(1, metric_->GetTotalCloneChunkCount()); - // 模拟copyset放入回收站测试 + // Simulate copyset placement in the recycle bin for testing ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId)); ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId2)); ASSERT_EQ(nullptr, metric_->GetCopysetMetric(logicId, copysetId)); @@ -619,7 +625,7 @@ TEST_F(CSMetricTest, CountTest) { // copysetId2: 1(wal) ASSERT_EQ(4, metric_->GetChunkTrashedCount()); - // 测试leader count计数 + // Test leader count count ASSERT_EQ(0, metric_->GetLeaderCount()); metric_->IncreaseLeaderCount(); ASSERT_EQ(1, metric_->GetLeaderCount()); @@ -639,11 +645,11 @@ TEST_F(CSMetricTest, ConfigTest) { "{\"conf_name\":\"chunksize\",\"conf_value\":\"1234\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), "{\"conf_name\":\"timeout\",\"conf_value\":\"100\"}"); - // 修改新增配置信息 + // Modify new configuration information conf.SetStringValue("chunksize", "4321"); conf.SetStringValue("port", "9999"); metric_->ExposeConfigMetric(&conf); - // // 验证修改后信息 + // Verify modified information ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "chunksize").c_str(), "{\"conf_name\":\"chunksize\",\"conf_value\":\"4321\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), @@ -657,7 +663,7 @@ TEST_F(CSMetricTest, OnOffTest) { ChunkServerMetricOptions metricOptions; metricOptions.port = PORT; metricOptions.ip = IP; - // 关闭metric开关后进行初始化 + // Initialize after turning off the metric switch { metricOptions.collectMetric = false; ASSERT_EQ(0, metric_->Init(metricOptions)); @@ -669,7 +675,7 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(ret, true); metric_->ExposeConfigMetric(&conf); } - // 初始化后获取所有指标项都为空 + // Obtain all indicator items as empty after initialization { ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::READ_CHUNK), nullptr); ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::WRITE_CHUNK), nullptr); @@ -685,7 +691,8 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(metric_->GetTotalCloneChunkCount(), 0); ASSERT_EQ(metric_->GetTotalWalSegmentCount(), 0); } - // 创建copyset的metric返回成功,但实际并未创建 + // Creating the metric for the copyset returned success, but it was not + // actually created { CopysetID copysetId = 1; ASSERT_EQ(0, metric_->CreateCopysetMetric(logicId, copysetId)); @@ -696,7 +703,7 @@ TEST_F(CSMetricTest, OnOffTest) { PAGE_SIZE, 100, false); ASSERT_EQ(0, metric_->RemoveCopysetMetric(logicId, copysetId)); } - // 增加leader count,但是实际未计数 + // Increase the leader count, but it is not actually counted { metric_->IncreaseLeaderCount(); ASSERT_EQ(metric_->GetLeaderCount(), 0); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp index b33d196d95..1329b919a6 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp @@ -20,38 +20,38 @@ * Author: tongguangxun */ -#include -#include #include #include +#include +#include #include -#include "src/fs/local_filesystem.h" -#include "test/fs/mock_local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +#include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; using ::testing::SetArgReferee; -using ::testing::AtLeast; +using ::testing::StrEq; +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::chunkserver::FilePool; using curve::fs::MockLocalFileSystem; namespace curve { namespace chunkserver { @@ -63,7 +63,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); FilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(FilePoolPtr_); @@ -146,32 +146,33 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr FilePoolPtr_; - std::shared_ptr fsptr; - std::shared_ptr lfs; - CurveFilesystemAdaptor* rfa; + std::shared_ptr FilePoolPtr_; + std::shared_ptr fsptr; + std::shared_ptr lfs; + CurveFilesystemAdaptor* rfa; }; TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { - // 1. open flag不带CREAT, open失败 + // 1. open flag without CREAT, open failed CreateChunkFile("./10"); std::string path = "./10"; butil::File::Error e; ASSERT_EQ(FilePoolPtr_->Size(), 3); EXPECT_CALL(*lfs, Open(_, _)).Times(AtLeast(1)).WillRepeatedly(Return(-1)); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(FilePoolPtr_->Size(), 3); ASSERT_EQ(nullptr, fa); - // 2. open flag带CREAT, 从FilePool取文件,但是FilePool打开文件失败 - // 所以还是走原有逻辑,本地创建文件成功 - EXPECT_CALL(*lfs, Open(_, _)).Times(3).WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)); + // 2. open flag with CREAT to retrieve files from FilePool, but FilePool + // failed to open the file So we still follow the original logic and + // successfully create the file locally + EXPECT_CALL(*lfs, Open(_, _)) + .Times(3) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(0)); ASSERT_EQ(FilePoolPtr_->Size(), 3); path = "./11"; @@ -182,7 +183,8 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { ASSERT_FALSE(fsptr->FileExists("./10")); ASSERT_EQ(nullptr, fa); - // 3. 待创建文件在Filter中,但是直接本地创建该文件,创建成功 + // 3. The file to be created is in Filter, but it was created locally and + // successfully EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); path = BRAFT_SNAPSHOT_META_FILE; @@ -191,14 +193,16 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { - // 1. 删除文件,文件存在且在过滤名单里,但delete失败,返回false + // 1. Delete file. The file exists and is on the filter list, but delete + // failed with false return EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); bool ret = fsadaptor->delete_file(BRAFT_SNAPSHOT_META_FILE, true); ASSERT_FALSE(ret); - // 2. 删除文件,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 2. Delete file. The file exists and is not on the filter list, but the + // recycle chunk failed with false return EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); @@ -206,29 +210,35 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { ret = fsadaptor->delete_file("temp", true); ASSERT_FALSE(ret); - // 3. 删除目录,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 3. Delete directory. The file exists and is not on the filter list, but + // the recycle chunk failed with false return std::vector dircontent; dircontent.push_back("/2"); dircontent.push_back("/1"); dircontent.push_back(BRAFT_SNAPSHOT_META_FILE); - EXPECT_CALL(*lfs, DirExists(_)).Times(2).WillOnce(Return(true)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs, DirExists(_)) + .Times(2) + .WillOnce(Return(true)) + .WillOnce(Return(false)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs, List(_, _)).Times(2).WillRepeatedly(DoAll( - SetArgPointee<1>(dircontent), Return(-1))); + EXPECT_CALL(*lfs, List(_, _)) + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<1>(dircontent), Return(-1))); ret = fsadaptor->delete_file("1", true); ASSERT_FALSE(ret); } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { - // 1. 重命名文件,文件存在且在过滤名单里,但Rename失败,返回false + // 1. Renaming file, file exists and is on the filter list, but Rename + // failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); bool ret = fsadaptor->rename("1", BRAFT_SNAPSHOT_META_FILE); ASSERT_FALSE(ret); - // 2. 重命名文件,文件存在且不在过滤名单里,但Rename失败,返回false + // 2. Renaming file. The file exists and is not on the filter list, but + // Rename failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(0)); @@ -237,5 +247,5 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { ASSERT_TRUE(ret); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp index 926ccc76c5..a7de21c7fe 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp @@ -20,20 +20,21 @@ * Author: tongguangxun */ -#include +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include #include +#include #include -#include "src/fs/local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { @@ -44,7 +45,7 @@ class CurveFilesystemAdaptorTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); chunkFilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(chunkFilePoolPtr_); @@ -124,42 +125,39 @@ class CurveFilesystemAdaptorTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr chunkFilePoolPtr_; - std::shared_ptr fsptr; - CurveFilesystemAdaptor* rfa; + std::shared_ptr chunkFilePoolPtr_; + std::shared_ptr fsptr; + CurveFilesystemAdaptor* rfa; }; TEST_F(CurveFilesystemAdaptorTest, open_file_test) { - // 1. open flag不带CREAT + // 1. Open flag without CREAT std::string path = "./raftsnap/10"; butil::File::Error e; ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsptr->FileExists("./raftsnap/10")); ASSERT_EQ(nullptr, fa); - // 2. open flag待CREAT, 从FilePool取文件 + // 2. Open flag for CREAT, retrieve files from FilePool ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 2); ASSERT_TRUE(fsptr->FileExists("./raftsnap/10")); ASSERT_NE(nullptr, fa); - // 3. open flag待CREAT,FilePool为空时,从FilePool取文件 + // 3. Open flag, wait for CREAT, and when FilePool is empty, retrieve the + // file from FilePool ClearFilePool(); - fa = fsadaptor->open("./raftsnap/11", - O_RDONLY | O_CLOEXEC | O_CREAT, - nullptr, - &e); + fa = fsadaptor->open("./raftsnap/11", O_RDONLY | O_CLOEXEC | O_CREAT, + nullptr, &e); ASSERT_EQ(nullptr, fa); } TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1/test_temp2")); @@ -169,11 +167,11 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { CreateChunkFile("./test_temp/test_temp1/2"); CreateChunkFile("./test_temp/test_temp1/test_temp2/1"); CreateChunkFile("./test_temp/test_temp1/test_temp2/2"); - // 非递归删除非空文件夹,返回false + // Non recursive deletion of non empty folders, returning false ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsadaptor->delete_file("./test_temp", false)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - // 递归删除文件夹,chunk被回收到FilePool + // Recursively delete folder, chunk is recycled to FilePool ASSERT_TRUE(fsadaptor->delete_file("./test_temp", true)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp")); @@ -186,7 +184,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/1")); ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/2")); - // 2. 创建一个单层空目录 + // 2. Create a single level empty directory ASSERT_EQ(0, fsptr->Mkdir("./test_temp3")); ASSERT_TRUE(fsadaptor->delete_file("./test_temp3", false)); ASSERT_EQ(0, fsptr->Mkdir("./test_temp4")); @@ -195,7 +193,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->DirExists("./test_temp3")); ASSERT_FALSE(fsptr->DirExists("./test_temp4")); - // 3. 删除一个常规chunk文件, 会被回收到FilePool + // 3. Deleting a regular chunk file will be recycled to FilePool ASSERT_EQ(0, fsptr->Mkdir("./test_temp5")); CreateChunkFile("./test_temp5/3"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp5/3", false)); @@ -211,8 +209,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_EQ(0, fsptr->Delete("./test_temp5")); ASSERT_EQ(0, fsptr->Delete("./test_temp6")); - - // 4. 删除一个非chunk大小的文件,会直接删除该文件 + // 4. Deleting a file of a non chunk size will directly delete the file ASSERT_EQ(0, fsptr->Mkdir("./test_temp7")); int fd = fsptr->Open("./test_temp7/5", O_RDWR | O_CREAT); char data[4096]; @@ -226,12 +223,13 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { } TEST_F(CurveFilesystemAdaptorTest, rename_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); std::string filename = "./test_temp/"; filename.append(BRAFT_SNAPSHOT_META_FILE); - // 目标文件size是chunksize,但是目标文件在过滤名单里,所以直接过滤 + // The target file size is chunksize, but it is on the filtering list, so it + // is directly filtered CreateChunkFile(filename); int poolSize = chunkFilePoolPtr_->Size(); std::string temppath = "./temp"; @@ -243,7 +241,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete(filename)); - // 目标文件size是chunksize,但是目标文件不在过滤名单里,所以先回收再rename + // The target file size is chunksize, but it is not on the filter list, so + // recycle it first and rename it again filename = "./test_temp/"; filename.append("test"); CreateChunkFile(filename); @@ -254,9 +253,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_TRUE(fsptr->FileExists(filename)); ASSERT_EQ(0, fsptr->Delete(filename)); - ASSERT_EQ(0, fsptr->Delete("./test_temp")); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp index 9e3ca39605..8b72b7f84e 100644 --- a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp +++ b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include +#include + #include #include -#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -34,11 +36,11 @@ namespace chunkserver { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; -using ::testing::Mock; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::Mock; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::UnorderedElementsAre; @@ -53,13 +55,14 @@ class CurveSnapshotAttachmentMockTest : public testing::Test { new CurveSnapshotAttachment(fs_)); } void TearDown() {} + protected: std::shared_ptr fs_; scoped_refptr attachment_; }; TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { - // 返回成功 + // Return successful vector fileNames; fileNames.emplace_back("chunk_1"); fileNames.emplace_back("chunk_1_snap_1"); @@ -69,24 +72,21 @@ TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { vector snapFiles; attachment_->list_attach_files(&snapFiles, kRaftSnapDir); - std::string snapPath1 = - "../../data/chunk_1_snap_1"; - std::string snapPath2 = - "../../data/chunk_2_snap_1"; - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); + std::string snapPath1 = "../../data/chunk_1_snap_1"; + std::string snapPath2 = "../../data/chunk_2_snap_1"; + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); - // 路径结尾添加反斜杠 + // Add a backslash at the end of the path EXPECT_CALL(*fs_, List(kDataDir, _)) .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); attachment_->list_attach_files(&snapFiles, std::string(kRaftSnapDir) + "/"); - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); - // 返回失败 - EXPECT_CALL(*fs_, List(kDataDir, _)) - .WillRepeatedly(Return(-1)); + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); + // Return failed + EXPECT_CALL(*fs_, List(kDataDir, _)).WillRepeatedly(Return(-1)); ASSERT_DEATH(attachment_->list_attach_files(&snapFiles, kRaftSnapDir), ""); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp index 94bcc4d5a8..66891bc031 100644 --- a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp +++ b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp @@ -21,23 +21,23 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -61,7 +61,7 @@ class RaftSnapFilePoolTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer2).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -74,26 +74,22 @@ class RaftSnapFilePoolTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel* channel = new brpc::Channel; uint64_t sn = 1; ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); @@ -108,18 +104,16 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); ChunkService_Stub stub(channel); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - if (response.status() == - CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { + if (response.status() == CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { std::string redirect = response.redirect(); leaderId.parse(redirect); delete channel; @@ -127,8 +121,7 @@ static void WriteThenReadVerify(PeerId leaderId, ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); continue; } - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); // read { @@ -140,13 +133,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -158,22 +150,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -187,16 +175,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -204,18 +190,23 @@ static void ReadVerify(PeerId leaderId, } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了, install snapshot的数据从FilePool中取文件 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown and restart of non-leader nodes in a cluster of 3 nodes, + * and control them to recover from installing snapshots. + * 1. Create a replication group with 3 replicas. + * 2. Wait for the leader to emerge, write data, and then read to verify. + * 3. Shutdown a non-leader node. + * 4. Sleep for a duration longer than a snapshot interval, then write and read + * data. + * 5. Sleep for a duration longer than a snapshot interval again, then write and + * read data. Steps 4 and 5 are to ensure that at least two snapshots are taken. + * Therefore, when the node restarts, it must recover via an install snapshot + * because the log has already been deleted. The data for the install snapshot + * is retrieved from the FilePool. + * 6. Wait for the leader to emerge, then read the previously written data for + * verification. + * 7. Transfer leadership to the shut down peer. + * 8. Verify the data written before the transfer of leadership. + * 9. Write data again, then read it to verify. */ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -238,75 +229,67 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.StartPeer(peer2, false, true, true)); ASSERT_EQ(0, cluster.StartPeer(peer3, false, true, true)); - // 等待FilePool创建成功 + // Waiting for FilePool creation to succeed std::this_thread::sleep_for(std::chrono::seconds(60)); PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 获取三个chunkserver的FilePool的pool容量 - std::shared_ptr fs(LocalFsFactory::CreateFs( - FileSystemType::EXT4, "")); + // Obtain the pool capacity of FilePool for three chunkservers + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::vector Peer1ChunkPoolSize; std::vector Peer2ChunkPoolSize; std::vector Peer3ChunkPoolSize; std::string copysetdir1, copysetdir2, copysetdir3; - butil::string_printf(©setdir1, - "./%s-%d-%d", - butil::ip2str(peer1.addr.ip).c_str(), - peer1.addr.port, + butil::string_printf(©setdir1, "./%s-%d-%d", + butil::ip2str(peer1.addr.ip).c_str(), peer1.addr.port, 0); - butil::string_printf(©setdir2, - "./%s-%d-%d", - butil::ip2str(peer2.addr.ip).c_str(), - peer2.addr.port, + butil::string_printf(©setdir2, "./%s-%d-%d", + butil::ip2str(peer2.addr.ip).c_str(), peer2.addr.port, 0); - butil::string_printf(©setdir3, - "./%s-%d-%d", - butil::ip2str(peer3.addr.ip).c_str(), - peer3.addr.port, + butil::string_printf(©setdir3, "./%s-%d-%d", + butil::ip2str(peer3.addr.ip).c_str(), peer3.addr.port, 0); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 目前只有chunk文件才会从FilePool中取 - // raft snapshot meta 和 conf epoch文件直接从文件系统创建 + // Currently, only chunk files are retrieved from FilePool + // raft snapshot meta and conf epoch files are created directly from the + // file system ASSERT_EQ(20, Peer1ChunkPoolSize.size()); ASSERT_EQ(20, Peer2ChunkPoolSize.size()); ASSERT_EQ(20, Peer3ChunkPoolSize.size()); LOG(INFO) << "write 1 start"; - // 发起 read/write, 写数据会触发chunkserver从FilePool取chunk - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write, writing data will trigger chunkserver to fetch + // chunks from FilePool + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 - ::sleep(1*snapshotTimeoutS); + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,ChunkFilePool容量少一个 + // After writing the data, ChunkFilePool has one less capacity ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // shutdown 某个非 leader 的 peer + // shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -317,68 +300,61 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers the snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file, and it will delete the previous snapshot file. The + // deleted file will be reclaimed into the FilePool. So overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but a snapshot is taken and then returned. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); LOG(INFO) << "write 2 end"; - ::sleep(1*snapshotTimeoutS); + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file. Then, it will delete the previous snapshot file, and + // the deleted file will be reclaimed into the FilePool. So, overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but it involves taking one snapshot and returning + // another to the FilePool. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 3 start"; - // 增加chunkid,使chunkserver端的chunk又被取走一个 - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId + 1, - length, - ch + 2, - loop); + // Add a chunkid to remove another chunk from the chunkserver side + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, + ch + 2, loop); LOG(INFO) << "write 3 end"; ::sleep(snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one if (shutdownPeerid == peer1) { ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); @@ -388,22 +364,17 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { } ASSERT_EQ(18, Peer3ChunkPoolSize.size()); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid, false, true, false)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 - ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, - length, ch + 2, loop); + // Read it out and verify it again + ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, ch + 2, + loop); LOG(INFO) << "write 4 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); LOG(INFO) << "write 4 end"; @@ -416,10 +387,7 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -433,20 +401,21 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - ::sleep(5*snapshotTimeoutS); + ::sleep(5 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 当前的raftsnapshot filesystem只存取chunk文件 - // meta文件遵守原有逻辑,直接通过文件系统创建,所以这里只有两个chunk被取出 + // The current raftsnapshot filesystem only accesses chunk files + // The meta file follows the original logic and is created directly through + // the file system, so only two chunks are extracted here ASSERT_EQ(18, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); ASSERT_EQ(18, Peer3ChunkPoolSize.size()); diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index d6f5d9aa97..50f6f46c1d 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -20,20 +20,20 @@ * Author: wudemiao */ -#include -#include -#include #include +#include +#include +#include -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service.h" -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::Configuration; @@ -42,6 +42,7 @@ using curve::chunkserver::CopysetNodeManager; using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::LogicPoolID; using curve::chunkserver::PeerId; @@ -52,9 +53,6 @@ using curve::common::UriParser; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::chunkserver::FilePoolHelper; -using curve::chunkserver::FilePoolMeta; DEFINE_string(ip, "127.0.0.1", "Initial configuration of the replication group"); @@ -73,7 +71,7 @@ DEFINE_bool(create_chunkfilepool, true, "create chunkfile pool"); butil::AtExitManager atExitManager; -void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, +void CreateChunkFilePool(const std::string& dirname, uint64_t chunksize, std::shared_ptr fsptr) { std::string datadir = dirname + "/chunkfilepool"; std::string metapath = dirname + "/chunkfilepool.meta"; @@ -110,7 +108,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); FilePoolMeta meta; - meta.chunkSize = cpopt.fileSize; + meta.chunkSize = cpopt.fileSize; meta.metaPageSize = cpopt.metaFileSize; meta.hasBlockSize = true; meta.blockSize = cpopt.blockSize; @@ -120,7 +118,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, (void)FilePoolHelper::PersistEnCodeMetaInfo(fsptr, meta, metapath); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); /* Generally you only need one Server. */ @@ -142,7 +140,8 @@ int main(int argc, char *argv[]) { std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; - // TODO(yyk) 这部分实现不太优雅,后续进行重构 + // The implementation of TODO(yyk) is not very elegant, and will be + // refactored in the future std::string copysetUri = FLAGS_copyset_dir + "/copysets"; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = FLAGS_ip; diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..2c28a6015c 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); @@ -556,8 +556,7 @@ TEST_F(TrashTest, recycle_copyset_dir_list_err) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -569,8 +568,7 @@ TEST_F(TrashTest, recycle_copyset_dir_ok) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -607,18 +605,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // chunk_200_snap_1, abc +1 // log/ - using item4list = struct{ + using item4list = struct { std::string subdir; std::vector& names; }; std::vector action4List{ - { "", copysets }, - { "/4294967493.55555", dirs}, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, - { "/4294967494.55555", dirs}, - { "/4294967494.55555/data", chunks2 }, - { "/4294967494.55555/log", logfiles2 }, + {"", copysets}, + {"/4294967493.55555", dirs}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, + {"/4294967494.55555", dirs}, + {"/4294967494.55555/data", chunks2}, + {"/4294967494.55555/log", logfiles2}, }; for (auto& it : action4List) { @@ -627,18 +625,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { } EXPECT_CALL(*lfs, DirExists(_)) - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_100 - .WillOnce(Return(false)) // chunk_101 - .WillOnce(Return(true)) // log - .WillOnce(Return(false)) // curve_log_10086_10087 - .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 - .WillOnce(Return(false)) // log_10083_10084 - .WillOnce(Return(false)) // log_inprogress_10085 - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_200_snap_1 - .WillOnce(Return(false)) // abc - .WillOnce(Return(true)); // log + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_100 + .WillOnce(Return(false)) // chunk_101 + .WillOnce(Return(true)) // log + .WillOnce(Return(false)) // curve_log_10086_10087 + .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 + .WillOnce(Return(false)) // log_10083_10084 + .WillOnce(Return(false)) // log_inprogress_10085 + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_200_snap_1 + .WillOnce(Return(false)) // abc + .WillOnce(Return(true)); // log trash->Init(ops); ASSERT_EQ(5, trash->GetChunkNum()); @@ -657,14 +655,14 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, DirExists(_)) .WillOnce(Return(true)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // data .WillOnce(Return(false)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // log + .WillOnce(Return(true)) // log .WillOnce(Return(false)) - .WillOnce(Return(true)) // raft_snapshot - .WillOnce(Return(true)) // temp - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // raft_snapshot + .WillOnce(Return(true)) // temp + .WillOnce(Return(true)) // data .WillOnce(Return(false)); std::string trashedCopysetDir = "/trash_test0/copysets/4294967495"; @@ -695,21 +693,21 @@ TEST_F(TrashTest, test_chunk_num_statistic) { std::vector raftfiles{RAFT_DATA_DIR, RAFT_LOG_DIR}; // DirExists - using item4dirExists = struct{ + using item4dirExists = struct { std::string subdir; bool exist; }; std::vector action4DirExists{ - { "", true }, - { "/4294967493.55555", true }, - { "/4294967493.55555/data", true }, - { "/4294967493.55555/log", true }, - { "/4294967493.55555/data/chunk_100", false }, - { "/4294967493.55555/data/chunk_101", false }, - { "/4294967493.55555/log/curve_log_10086_10087", false }, - { "/4294967493.55555/log/curve_log_inprogress_10088", false }, - { "/4294967493.55555/log/log_10083_10084", false }, - { "/4294967493.55555/log/log_inprogress_10085", false }, + {"", true}, + {"/4294967493.55555", true}, + {"/4294967493.55555/data", true}, + {"/4294967493.55555/log", true}, + {"/4294967493.55555/data/chunk_100", false}, + {"/4294967493.55555/data/chunk_101", false}, + {"/4294967493.55555/log/curve_log_10086_10087", false}, + {"/4294967493.55555/log/curve_log_inprogress_10088", false}, + {"/4294967493.55555/log/log_10083_10084", false}, + {"/4294967493.55555/log/log_inprogress_10085", false}, }; for (auto& it : action4DirExists) { @@ -719,10 +717,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // List std::vector action4List2{ - { "", copysets }, - { "/4294967493.55555", raftfiles }, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, + {"", copysets}, + {"/4294967493.55555", raftfiles}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, }; for (auto& it : action4List2) { @@ -735,16 +733,16 @@ TEST_F(TrashTest, test_chunk_num_statistic) { SetCopysetNeedDelete(trashPath + "/" + copysets[2], notNeedDelete); // RecycleFile - using item4CycleFile = struct{ + using item4CycleFile = struct { std::shared_ptr pool; std::string subdir; int ret; }; std::vector action4CycleFile{ - { pool, "/4294967493.55555/data/chunk_100", 0 }, - { pool, "/4294967493.55555/data/chunk_101", -1 }, - { walPool, "/4294967493.55555/log/curve_log_10086_10087", 0 }, - { walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1 }, + {pool, "/4294967493.55555/data/chunk_100", 0}, + {pool, "/4294967493.55555/data/chunk_101", -1}, + {walPool, "/4294967493.55555/log/curve_log_10086_10087", 0}, + {walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1}, }; for (auto& it : action4CycleFile) { diff --git a/test/client/client_common_unittest.cpp b/test/client/client_common_unittest.cpp index d7601e19f1..6052bf93f1 100644 --- a/test/client/client_common_unittest.cpp +++ b/test/client/client_common_unittest.cpp @@ -20,28 +20,29 @@ * Author: tongguangxun */ -#include - #include "src/client/client_common.h" +#include + namespace curve { namespace client { TEST(ClientCommon, PeerAddrTest) { - // 默认构造函数创建的成员变量内容为空 + // The member variable content created by the default constructor is empty PeerAddr chunkaddr; ASSERT_TRUE(chunkaddr.IsEmpty()); EndPoint ep; str2endpoint("127.0.0.1:8000", &ep); - // 从已有的endpoint创建PeerAddr,变量内容非空 + // Create PeerAddr from an existing endpoint, with non empty variable + // content PeerAddr caddr(ep); ASSERT_FALSE(caddr.IsEmpty()); ASSERT_EQ(caddr.addr_.port, 8000); ASSERT_STREQ("127.0.0.1:8000:0", caddr.ToString().c_str()); - // reset置位后成员变量内容为空 + // After resetting, the member variable content is empty caddr.Reset(); ASSERT_TRUE(caddr.IsEmpty()); @@ -49,7 +50,8 @@ TEST(ClientCommon, PeerAddrTest) { PeerAddr caddr2; ASSERT_TRUE(caddr2.IsEmpty()); - // 从字符串中解析出地址信息,字符串不符合解析格式返回-1,"ip:port:index" + // Resolve address information from the string, if the string does not + // conform to the parsing format, return -1, "ip:port:index" std::string ipaddr1("127.0.0.1"); ASSERT_EQ(-1, caddr2.Parse(ipaddr1)); std::string ipaddr2("127.0.0.q:9000:0"); @@ -61,11 +63,12 @@ TEST(ClientCommon, PeerAddrTest) { std::string ipaddr5("127.0.0.1001:9000:0"); ASSERT_EQ(-1, caddr2.Parse(ipaddr5)); - // 从字符串解析地址成功后,成员变量即为非空 + // After successfully resolving the address from the string, the member + // variable becomes non empty ASSERT_EQ(0, caddr2.Parse(ipaddr)); ASSERT_FALSE(caddr2.IsEmpty()); - // 验证非空成员变量是否为预期值 + // Verify if the non empty member variable is the expected value EndPoint ep1; str2endpoint("127.0.0.1:9000", &ep1); ASSERT_EQ(caddr2.addr_, ep1); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index cfae5506e1..6f7fd3fdf3 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -20,48 +20,47 @@ * Author: tongguangxun */ -#include -#include -#include +#include #include +#include #include #include -#include -#include +#include +#include +#include +#include +#include //NOLINT #include #include //NOLINT -#include //NOLINT #include -#include +#include "absl/memory/memory.h" +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" +#include "test/client/mock/mock_namespace_service.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" -#include "test/client/mock/mock_namespace_service.h" - -#include "absl/memory/memory.h" uint32_t chunk_size = 4 * 1024 * 1024; uint32_t segment_size = 1 * 1024 * 1024 * 1024; -std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT -std::string configpath = // NOLINT - "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT +std::string configpath = // NOLINT + "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT -extern curve::client::FileClient *globalclient; +extern curve::client::FileClient* globalclient; namespace curve { namespace client { @@ -96,10 +95,10 @@ class MDSClientTest : public ::testing::Test { ASSERT_TRUE(false) << "Fail to add service"; } - curve::mds::topology::GetChunkServerInfoResponse *response = + curve::mds::topology::GetChunkServerInfoResponse* response = new curve::mds::topology::GetChunkServerInfoResponse(); response->set_statuscode(0); - curve::mds::topology::ChunkServerInfo *serverinfo = + curve::mds::topology::ChunkServerInfo* serverinfo = new curve::mds::topology::ChunkServerInfo(); serverinfo->set_chunkserverid(888); serverinfo->set_disktype("nvme"); @@ -113,8 +112,8 @@ class MDSClientTest : public ::testing::Test { serverinfo->set_diskcapacity(11111); serverinfo->set_diskused(1111); response->set_allocated_chunkserverinfo(serverinfo); - FakeReturn *getcsret = - new FakeReturn(nullptr, static_cast(response)); // NOLINT + FakeReturn* getcsret = + new FakeReturn(nullptr, static_cast(response)); // NOLINT topologyservice.SetGetChunkserverFakeReturn(getcsret); brpc::ServerOptions options; @@ -150,8 +149,8 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -163,19 +162,18 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Create(filename.c_str(), userinfo, len)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -197,8 +195,8 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -215,20 +213,18 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Mkdir(dirpath.c_str(), userinfo)); - - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -250,8 +246,8 @@ TEST_F(MDSClientTest, Closefile) { ::curve::mds::CloseFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCloseFile(fakeret); LOG(INFO) << "now create file!"; @@ -259,25 +255,23 @@ TEST_F(MDSClientTest, Closefile) { mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::NOTEXIST); - // file close ok ::curve::mds::CloseFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloseFile(fakeret1); LOG(INFO) << "now create file!"; ret = mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::OK); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloseFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -296,8 +290,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::OpenFileResponse openresponse; openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(fakeret); FInfo finfo; @@ -308,7 +302,7 @@ TEST_F(MDSClientTest, Openfile) { // has protosession no fileinfo ::curve::mds::OpenFileResponse openresponse1; - ::curve::mds::ProtoSession *se = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); se->set_leasetime(10000000); @@ -317,8 +311,8 @@ TEST_F(MDSClientTest, Openfile) { openresponse1.set_statuscode(::curve::mds::StatusCode::kOK); openresponse1.set_allocated_protosession(se); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&openresponse1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&openresponse1)); curvefsservice.SetOpenFile(fakeret1); ASSERT_EQ(globalclient->Open(filename, userinfo), -LIBCURVE_ERROR::FAILED); @@ -326,13 +320,13 @@ TEST_F(MDSClientTest, Openfile) { // has protosession and finfo ::curve::mds::OpenFileResponse openresponse2; - ::curve::mds::ProtoSession *se2 = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se2 = new ::curve::mds::ProtoSession; se2->set_sessionid("1"); se2->set_createtime(12345); se2->set_leasetime(10000000); se2->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *fin = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* fin = new ::curve::mds::FileInfo; fin->set_filename("_filename_"); fin->set_id(1); fin->set_parentid(0); @@ -347,21 +341,21 @@ TEST_F(MDSClientTest, Openfile) { openresponse2.set_allocated_protosession(se2); openresponse2.set_allocated_fileinfo(fin); - FakeReturn *fakeret2 = - new FakeReturn(nullptr, static_cast(&openresponse2)); + FakeReturn* fakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret2); ASSERT_EQ(globalclient->Open(filename, userinfo), LIBCURVE_ERROR::OK); ASSERT_EQ(LIBCURVE_ERROR::OK, Write(0, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(0, nullptr, 0, 0)); - ::curve::mds::ProtoSession *socupied = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* socupied = new ::curve::mds::ProtoSession; socupied->set_sessionid("1"); socupied->set_createtime(12345); socupied->set_leasetime(10000000); socupied->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *focupied = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* focupied = new ::curve::mds::FileInfo; focupied->set_filename("_filename_"); focupied->set_id(1); focupied->set_parentid(0); @@ -381,14 +375,14 @@ TEST_F(MDSClientTest, Openfile) { refreshresponse.set_statuscode(::curve::mds::StatusCode::kOK); refreshresponse.set_sessionid("2"); - FakeReturn *r = - new FakeReturn(nullptr, static_cast(&responseOccupied)); + FakeReturn* r = + new FakeReturn(nullptr, static_cast(&responseOccupied)); curvefsservice.SetOpenFile(r); - FakeReturn *refreshret = - new FakeReturn(nullptr, static_cast(&refreshresponse)); + FakeReturn* refreshret = + new FakeReturn(nullptr, static_cast(&refreshresponse)); curvefsservice.SetRefreshSession(refreshret, []() {}); - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename("_filename_"); info->set_id(1); @@ -402,8 +396,8 @@ TEST_F(MDSClientTest, Openfile) { getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); curvefsservice.SetGetFileInfoFakeReturn(fakegetinfo); int fd = globalclient->Open(filename, userinfo); @@ -411,12 +405,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, Write(fd, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(fd, nullptr, 0, 0)); - // 测试关闭文件 + // Test closing file ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecloseret = - new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* fakecloseret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(fakecloseret); globalclient->Close(fd); @@ -426,12 +420,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, AioWrite(fd, &aioctx)); ASSERT_EQ(LIBCURVE_ERROR::OK, AioRead(fd, &aioctx)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret3 = - new FakeReturn(&cntl, static_cast(&openresponse2)); + FakeReturn* fakeret3 = + new FakeReturn(&cntl, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret3); curvefsservice.CleanRetryTimes(); @@ -441,8 +435,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::CloseFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kSessionNotExist); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloseFile(fakeret4); globalclient->Close(0); @@ -458,8 +452,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetRenameFile(fakeret); @@ -475,8 +469,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetRenameFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -486,8 +480,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetRenameFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -497,8 +491,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetRenameFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -508,19 +502,18 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetRenameFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rename(userinfo, filename1, filename2)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetRenameFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -543,8 +536,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetExtendFile(fakeret); @@ -560,8 +553,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetExtendFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -571,8 +564,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetExtendFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -582,8 +575,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetExtendFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -593,8 +586,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetExtendFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, @@ -604,19 +597,18 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response5; response5.set_statuscode(::curve::mds::StatusCode::kShrinkBiggerFile); - FakeReturn *fakeret6 = - new FakeReturn(nullptr, static_cast(&response5)); + FakeReturn* fakeret6 = + new FakeReturn(nullptr, static_cast(&response5)); curvefsservice.SetExtendFile(fakeret6); ASSERT_EQ(-1 * LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE, globalclient->Extend(filename1, userinfo, newsize)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetExtendFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -640,8 +632,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -652,8 +644,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Unlink(filename1, userinfo)); @@ -662,8 +654,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -673,8 +665,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -684,26 +676,25 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Unlink(filename1, userinfo)); - // 设置delete force + // Set delete force fiu_init(0); fiu_enable("test/client/fake/fakeMDS/forceDeleteFile", 1, nullptr, 0); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOT_SUPPORT, globalclient->Unlink(filename1, userinfo, true)); fiu_disable("test/client/fake/fakeMDS/forceDeleteFile"); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -727,8 +718,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -744,8 +735,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Rmdir(filename1, userinfo)); @@ -754,8 +745,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -765,8 +756,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -776,19 +767,18 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rmdir(filename1, userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -802,7 +792,7 @@ TEST_F(MDSClientTest, Rmdir) { TEST_F(MDSClientTest, StatFile) { std::string filename = "/1_userinfo_"; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; info->set_filename("_filename_"); info->set_id(1); @@ -816,11 +806,11 @@ TEST_F(MDSClientTest, StatFile) { response.set_allocated_fileinfo(info); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); - curve::client::FInfo_t *finfo = new curve::client::FInfo_t; + curve::client::FInfo_t* finfo = new curve::client::FInfo_t; FileStatInfo fstat; globalclient->StatFile(filename, userinfo, &fstat); @@ -831,12 +821,11 @@ TEST_F(MDSClientTest, StatFile) { ASSERT_EQ(fstat.ctime, 12345678); ASSERT_EQ(fstat.length, 4 * 1024 * 1024 * 1024ul); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -872,7 +861,7 @@ TEST_F(MDSClientTest, GetFileInfo) { response.set_statuscode(::curve::mds::StatusCode::kOK); auto fakeret = absl::make_unique( - nullptr, static_cast(&response)); + nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret.get()); curve::client::FileEpoch_t fEpoch; @@ -890,19 +879,19 @@ TEST_F(MDSClientTest, GetFileInfo) { ASSERT_EQ(finfo->segmentsize, 1 * 1024 * 1024 * 1024ul); ASSERT_EQ(finfo->blocksize, hasBlockSize ? blocksize : 4096); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - auto fakeret2 = absl::make_unique( - &cntl, static_cast(&response)); + auto fakeret2 = + absl::make_unique(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2.get()); curvefsservice.CleanRetryTimes(); - ASSERT_EQ(LIBCURVE_ERROR::FAILED, - mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), - &fEpoch)); + ASSERT_EQ( + LIBCURVE_ERROR::FAILED, + mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), &fEpoch)); } } @@ -940,7 +929,7 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { // checkTimer(10000, 11000); curve::mds::GetOrAllocateSegmentResponse response; - curve::mds::PageFileSegment *pfs = new curve::mds::PageFileSegment; + curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(pfs); response.mutable_pagefilesegment()->set_logicalpoolid(1234); @@ -953,8 +942,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { chunk->set_copysetid(i); chunk->set_chunkid(i); } - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(fakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse response_1; @@ -971,8 +960,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { cslocs->set_port(5000 + j); } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); curve::client::MetaCache mc; @@ -1035,8 +1024,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { GetChunkServerListInCopySetsResponse response_2; response_2.set_statuscode(-1); - FakeReturn *faktopologyeret_2 = - new FakeReturn(nullptr, static_cast(&response_2)); + FakeReturn* faktopologyeret_2 = + new FakeReturn(nullptr, static_cast(&response_2)); topologyservice.SetFakeReturn(faktopologyeret_2); uint32_t csid; @@ -1097,8 +1086,8 @@ TEST_F(MDSClientTest, GetServerList) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; for (int j = 0; j < 256; j++) { csinfo = response_1.add_csinfo(); csinfo->set_copysetid(j); @@ -1111,8 +1100,8 @@ TEST_F(MDSClientTest, GetServerList) { } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); std::vector cpidvec; @@ -1222,12 +1211,12 @@ TEST_F(MDSClientTest, GetLeaderTest) { mc.UpdateCopysetInfo(1234, 1234, cslist); - // 测试复制组里第三个addr为leader + // The third addr in the test replication group is the leader curve::chunkserver::GetLeaderResponse2 response1; - curve::common::Peer *peer1 = new curve::common::Peer(); + curve::common::Peer* peer1 = new curve::common::Peer(); peer1->set_address(peerinfo_3.internalAddr.ToString()); response1.set_allocated_leader(peer1); - FakeReturn fakeret1(nullptr, static_cast(&response1)); + FakeReturn fakeret1(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); cliservice3.SetFakeReturn(&fakeret1); @@ -1245,12 +1234,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { butil::str2endpoint("127.0.0.1", 29122, &expected); EXPECT_EQ(expected, leaderep); - // 测试拉取新leader失败,需要到mds重新fetch新的serverlist - // 当前新leader是3,尝试再刷新leader,这个时候会从1, 2获取leader - // 但是这时候leader找不到了,于是就会触发向mds重新拉取最新的server list + // The test failed to retrieve the new leader, and a new serverlist needs to + // be retrieved from the mds The current new leader is 3. Try refreshing the + // leader again, and at this time, the leader will be obtained from 1 and 2 + // But at this point, the leader cannot be found, so it will trigger a new + // pull of the latest server list from the mds brpc::Controller controller11; controller11.SetFailed(-1, "error"); - FakeReturn fakeret111(&controller11, static_cast(&response1)); + FakeReturn fakeret111(&controller11, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret111); cliservice2.SetFakeReturn(&fakeret111); cliservice3.SetFakeReturn(&fakeret111); @@ -1259,8 +1250,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; csinfo = response_1.add_csinfo(); csinfo->set_copysetid(1234); for (int i = 0; i < 4; i++) { @@ -1271,28 +1262,31 @@ TEST_F(MDSClientTest, GetLeaderTest) { cslocs->set_port(29120 + i); } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); cliservice1.CleanInvokeTimes(); cliservice2.CleanInvokeTimes(); cliservice3.CleanInvokeTimes(); - // 向当前集群中拉取leader,然后会从mds一侧获取新server list + // Pull the leader from the current cluster, and then obtain a new server + // list from the mds side EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, true)); - // getleader请求会跳过当前leader + // The getleader request will skip the current leader EXPECT_EQ(0, cliservice3.GetInvokeTimes()); - // 因为从mds获取新的copyset信息了,所以其leader信息被重置了,需要重新获取新leader - // 获取新新的leader,这时候会从1,2,3,4这四个server拉取新leader,并成功获取新leader + // Because the new copyset information was obtained from the mds, its leader + // information has been reset and a new leader needs to be obtained Obtain a + // new leader, which will be pulled from servers 1, 2, 3, and 4 and + // successfully obtain the new leader std::string leader = "10.182.26.2:29123:0"; peer1 = new curve::common::Peer(); peer1->set_address(leader); peer1->set_id(4321); response1.set_allocated_leader(peer1); - fakeret1 = FakeReturn(nullptr, static_cast(&response1)); + fakeret1 = FakeReturn(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); @@ -1309,7 +1303,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { cliservice3.CleanInvokeTimes(); cliservice4.CleanInvokeTimes(); - // refresh为false,所以只会从metacache中获取,不会发起rpc请求 + // Refresh is false, so it will only be obtained from the metacache and will + // not initiate rpc requests EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, false)); EXPECT_EQ(expected, leaderep); EXPECT_EQ(0, cliservice1.GetInvokeTimes()); @@ -1317,13 +1312,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { EXPECT_EQ(0, cliservice3.GetInvokeTimes()); EXPECT_EQ(0, cliservice4.GetInvokeTimes()); - // 测试新增一个leader,该节点不在配置组内, 然后通过向mds - // 查询其chunkserverInfo之后, 将其成功插入metacache - curve::common::Peer *peer7 = new curve::common::Peer(); + // Add a new leader to the test, which is not in the configuration group, + // and then add it to the mds After querying its chunkserverInfo, + // successfully insert it into the metacache + curve::common::Peer* peer7 = new curve::common::Peer(); leader = "10.182.26.2:29124:0"; peer7->set_address(leader); response1.set_allocated_leader(peer7); - FakeReturn fakeret44(nullptr, static_cast(&response1)); + FakeReturn fakeret44(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret44); cliservice2.SetFakeReturn(&fakeret44); cliservice3.SetFakeReturn(&fakeret44); @@ -1355,19 +1351,18 @@ TEST_F(MDSClientTest, GetLeaderTest) { LOG(INFO) << "GetLeaderTest stopped"; } - TEST_F(MDSClientTest, GetFileInfoException) { std::string filename = "/1_userinfo_"; - FakeReturn *fakeret = nullptr; - curve::client::FInfo_t *finfo = nullptr; + FakeReturn* fakeret = nullptr; + curve::client::FInfo_t* finfo = nullptr; FileEpoch_t fEpoch; { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1376,7 +1371,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { } { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); info->clear_parentid(); @@ -1389,7 +1384,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { info->clear_segmentsize(); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1405,17 +1400,17 @@ TEST_F(MDSClientTest, CreateCloneFile) { std::string filename = "/1_userinfo_"; FInfo finfo; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::CreateCloneFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateCloneFile(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1424,12 +1419,12 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 认证失败 + // Authentication failed curve::mds::CreateCloneFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateCloneFile(fakecreateclone1); @@ -1437,14 +1432,14 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 请求成功 + // Request successful info->set_id(5); curve::mds::CreateCloneFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); response2.set_allocated_fileinfo(info); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCreateCloneFile(fakecreateclone2); @@ -1463,15 +1458,15 @@ TEST_F(MDSClientTest, CreateCloneFile) { TEST_F(MDSClientTest, CompleteCloneMeta) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1479,23 +1474,23 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1506,15 +1501,15 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { TEST_F(MDSClientTest, CompleteCloneFile) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1522,23 +1517,23 @@ TEST_F(MDSClientTest, CompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1556,8 +1551,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret); @@ -1568,8 +1563,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetChangeOwner(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -1579,8 +1574,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetChangeOwner(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1590,8 +1585,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetChangeOwner(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1601,19 +1596,18 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetChangeOwner(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->ChangeOwner(filename1, "newowner", userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1634,7 +1628,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_CntlFailed) { cntl.SetFailed(-1, "Failed"); std::unique_ptr fakeret( - new FakeReturn(&cntl, static_cast(&response))); + new FakeReturn(&cntl, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); auto startTime = curve::common::TimeUtility::GetTimeofDayMs(); @@ -1652,7 +1646,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseError) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); ASSERT_EQ(LIBCURVE_ERROR::FAILED, @@ -1680,7 +1674,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseOK) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); std::vector returnIds; @@ -1697,8 +1691,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetListDir(fakeret); @@ -1724,16 +1718,16 @@ TEST_F(MDSClientTest, ListDir) { fin->set_owner("test"); } - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetListDir(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Listdir(filename1, userinfo, &filestatVec)); C_UserInfo_t cuserinfo; memcpy(cuserinfo.owner, "test", 5); - FileStatInfo *filestat = new FileStatInfo[5]; - DirInfo_t *dir = OpenDir(filename1.c_str(), &cuserinfo); + FileStatInfo* filestat = new FileStatInfo[5]; + DirInfo_t* dir = OpenDir(filename1.c_str(), &cuserinfo); ASSERT_NE(dir, nullptr); ASSERT_EQ(-LIBCURVE_ERROR::FAILED, Listdir(nullptr)); ASSERT_EQ(LIBCURVE_ERROR::OK, Listdir(dir)); @@ -1767,8 +1761,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetListDir(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1778,8 +1772,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetListDir(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1789,19 +1783,18 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetListDir(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Listdir(filename1, userinfo, &filestatVec)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetListDir(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1816,7 +1809,7 @@ TEST_F(MDSClientTest, ListDir) { TEST(LibcurveInterface, InvokeWithOutInit) { CurveAioContext aioctx; UserInfo_t userinfo; - C_UserInfo_t *ui = nullptr; + C_UserInfo_t* ui = nullptr; FileClient fc; ASSERT_EQ(-LIBCURVE_ERROR::FAILED, fc.Create("", userinfo, 0)); @@ -1859,10 +1852,10 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { using GetLeaderResponse2 = curve::chunkserver::GetLeaderResponse2; void SetUp() override { - // 添加service,并启动server + // Add a service and start the server for (int i = 0; i < kChunkServerNum; ++i) { - auto &chunkserver = chunkServers[i]; - auto &fakeCliService = fakeCliServices[i]; + auto& chunkserver = chunkServers[i]; + auto& fakeCliService = fakeCliServices[i]; ASSERT_EQ(0, chunkserver.AddService( &fakeCliService, brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add service"; @@ -1870,7 +1863,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { brpc::ServerOptions options; options.idle_timeout_sec = -1; - const auto &ipPort = + const auto& ipPort = "127.0.0.1:" + std::to_string(chunkserverPorts[i]); ASSERT_EQ(0, chunkserver.Start(ipPort.c_str(), &options)) << "Fail to start server"; @@ -1886,7 +1879,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { externalAddrs[i] = PeerAddr(endpoint); } - // 设置copyset peer信息 + // Set copyset peer information for (int i = 0; i < kChunkServerNum; ++i) { curve::client::CopysetPeerInfo peerinfo; peerinfo.peerID = i + 1; @@ -1900,7 +1893,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void ResetAllFakeCliService() { - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.CleanInvokeTimes(); cliService.ClearDelay(); cliService.ClearErrorCode(); @@ -1909,7 +1902,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { int GetAllInvokeTimes() { int total = 0; - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { total += cliService.GetInvokeTimes(); } @@ -1917,29 +1910,29 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void TearDown() override { - for (auto &server : chunkServers) { + for (auto& server : chunkServers) { server.Stop(0); server.Join(); } } - GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr &addr) { + GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr& addr) { GetLeaderResponse2 response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(addr.ToString()); response.set_allocated_leader(peer); return response; } - void SetGetLeaderResponse(const curve::client::PeerAddr &addr) { + void SetGetLeaderResponse(const curve::client::PeerAddr& addr) { static GetLeaderResponse2 response; response = MakeResponse(addr); static FakeReturn fakeret(nullptr, nullptr); - fakeret = FakeReturn(nullptr, static_cast(&response)); + fakeret = FakeReturn(nullptr, static_cast(&response)); - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.SetFakeReturn(&fakeret); } @@ -1971,16 +1964,16 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { }; TEST_F(ServiceHelperGetLeaderTest, NormalTest) { - // 测试复制组里第一个chunkserver为leader + // Test the first chunkserver in the replication group as the leader GetLeaderResponse2 response = MakeResponse(internalAddrs[0]); - FakeReturn fakeret0(nullptr, static_cast(&response)); + FakeReturn fakeret0(nullptr, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret0); - FakeReturn fakeret1(nullptr, static_cast(&response)); + FakeReturn fakeret1(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - FakeReturn fakeret2(nullptr, static_cast(&response)); + FakeReturn fakeret2(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); GetLeaderRpcOption rpcOption; @@ -1993,14 +1986,15 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 + // Test pulling a new leader for the second time, skip the first chunkserver + // directly, and search for the second and third two int32_t currentLeaderIndex = 0; curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2012,15 +2006,16 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第三次获取leader,会跳过第二个chunkserver,重试1/3 + // Testing for the third time obtaining the leader will skip the second + // chunkserver and retry 1/3 currentLeaderIndex = 1; currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2034,13 +2029,14 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { } TEST_F(ServiceHelperGetLeaderTest, RpcDelayTest) { - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[2]; + const auto& currentLeader = internalAddrs[2]; SetGetLeaderResponse(currentLeader); - // 再次GetLeader会向chunkserver 1/2 发送请求 - // 在chunksever GetLeader service 中加入sleep,触发backup request + // GetLeader will send a request to chunkserver 1/2 again + // Add a sleep in the chunksever GetLeader service to trigger a backup + // request fakeCliServices[0].SetDelayMs(200); fakeCliServices[1].SetDelayMs(200); @@ -2063,25 +2059,26 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader,GetLeader会向chunkserver 1/2发送请求 + // Set the third chunkserver as the leader, and GetLeader will send a + // request to chunkserver 1/2 const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 设置第一个chunkserver GetLeader service 延迟 + // Set the delay for the first chunkserver GetLeader service fakeCliServices[0].SetDelayMs(200); - // 设置第二个chunkserver 返回对应的错误码 + // Set the second chunkserver to return the corresponding error code for (auto errCode : exceptionErrCodes) { fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret); GetLeaderRpcOption rpcOption; @@ -2095,7 +2092,7 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(1)); ASSERT_EQ(currentLeader, leaderAddr); - for (auto &cliservice : fakeCliServices) { + for (auto& cliservice : fakeCliServices) { cliservice.CleanInvokeTimes(); } } @@ -2105,25 +2102,25 @@ TEST_F(ServiceHelperGetLeaderTest, AllChunkServerExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 另外两个chunkserver都返回对应的错误码 + // The other two chunkservers both return corresponding error codes for (auto errCode : exceptionErrCodes) { fakeCliServices[0].SetErrorCode(errCode); fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret); fakeCliServices[1].SetFakeReturn(&fakeret); @@ -2178,8 +2175,8 @@ TEST_F(MDSClientTest, StatFileStatusTest) { response.set_allocated_fileinfo(info.release()); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); std::unique_ptr finfo( @@ -2208,7 +2205,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { brpc::Controller cntl; cntl.SetFailed(-1, "rpc failed"); - FakeReturn *fakeRet = new FakeReturn(&cntl, nullptr); + FakeReturn* fakeRet = new FakeReturn(&cntl, nullptr); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); uint64_t startMs = curve::common::TimeUtility::GetTimeofDayMs(); @@ -2222,7 +2219,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kOK); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2233,7 +2230,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2251,7 +2248,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { for (auto err : errorCodes) { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(err); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_NE(LIBCURVE_ERROR::OK, @@ -2272,10 +2269,10 @@ using ::testing::SaveArgPointee; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; -static void MockRefreshSession(::google::protobuf::RpcController *controller, - const curve::mds::ReFreshSessionRequest *request, - curve::mds::ReFreshSessionResponse *response, - ::google::protobuf::Closure *done) { +static void MockRefreshSession(::google::protobuf::RpcController* controller, + const curve::mds::ReFreshSessionRequest* request, + curve::mds::ReFreshSessionResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard guard(done); response->set_statuscode(curve::mds::StatusCode::kOK); @@ -2317,7 +2314,7 @@ TEST_F(MDSClientRefreshSessionTest, StartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2344,7 +2341,7 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2376,7 +2373,7 @@ const std::vector clientConf{ std::string("throttle.enable=true"), }; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index 2f092fc79f..4072bd60f4 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -20,37 +20,38 @@ * Author: tongguangxun */ -#include +#include "src/client/client_metric.h" + #include #include +#include -#include // NOLINT -#include // NOLINT -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -#include "proto/nameserver2.pb.h" #include "include/client/libcurve.h" -#include "src/client/client_metric.h" -#include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" #include "src/client/client_config.h" +#include "src/client/file_instance.h" +#include "src/client/libcurve_file.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" DECLARE_string(chunkserver_list); -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT namespace curve { namespace client { -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9150"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -64,7 +65,7 @@ const std::vector clientConf { }; TEST(MetricTest, ChunkServer_MetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -72,25 +73,26 @@ TEST(MetricTest, ChunkServer_MetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT std::string configpath("./test/client/configs/client_metric.conf"); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +149,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -165,8 +167,8 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(-2, ret); - - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + // 4 correct reads and writes, 4 timeout reads and writes, timeout will + // cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -204,7 +206,7 @@ void cb(CurveAioContext* ctx) { } // namespace TEST(MetricTest, SlowRequestMetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -212,16 +214,17 @@ TEST(MetricTest, SlowRequestMetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -267,13 +270,13 @@ TEST(MetricTest, SlowRequestMetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -383,5 +386,5 @@ TEST(MetricTest, MetricHelperTest) { ASSERT_NO_THROW(MetricHelper::IncremSlowRequestNum(nullptr)); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..680d80ce93 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -20,30 +20,29 @@ * Author: tongguangxun */ -#include -#include +#include +#include #include #include -#include +#include +#include +#include #include #include -#include -#include - -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "src/client/client_config.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/file_instance.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/fakeMDS.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -72,7 +71,7 @@ void sessioncallback(CurveAioContext* aioctx) { TEST(ClientSession, LeaseTaskTest) { FLAGS_chunkserver_list = - "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; + "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; std::string filename = "/1"; @@ -80,7 +79,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); @@ -104,7 +103,7 @@ TEST(ClientSession, LeaseTaskTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -117,8 +116,8 @@ TEST(ClientSession, LeaseTaskTest) { openresponse.set_allocated_protosession(se); openresponse.set_allocated_fileinfo(finfo); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice->SetOpenFile(openfakeret); // 2. set refresh response @@ -129,7 +128,7 @@ TEST(ClientSession, LeaseTaskTest) { std::unique_lock lk(mtx); refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename(filename); info->set_seqnum(2); info->set_id(1); @@ -143,8 +142,8 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); refreshresp.set_sessionid("1234"); refreshresp.set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice->SetRefreshSession(refreshfakeret, refresht); // 3. open the file @@ -253,10 +252,9 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_allocated_fileinfo(newFileInfo); refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* refreshFakeRetWithNewInodeId = new FakeReturn( - nullptr, static_cast(&refreshresp)); - curvefsservice->SetRefreshSession( - refreshFakeRetWithNewInodeId, refresht); + FakeReturn* refreshFakeRetWithNewInodeId = + new FakeReturn(nullptr, static_cast(&refreshresp)); + curvefsservice->SetRefreshSession(refreshFakeRetWithNewInodeId, refresht); { std::unique_lock lk(mtx); @@ -302,8 +300,8 @@ TEST(ClientSession, LeaseTaskTest) { // 11. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice->SetCloseFile(closefileret); LOG(INFO) << "uninit fileinstance"; @@ -321,12 +319,12 @@ TEST(ClientSession, LeaseTaskTest) { } // namespace client } // namespace curve -std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9101,127.0.0.1:9102"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -337,18 +335,17 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.maxRetryMS=5000") -}; + std::string("mds.maxRetryMS=5000")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..4ef1c6487c 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -20,10 +20,11 @@ * Author: wuhanqing */ -#include -#include -#include #include +#include +#include +#include + #include #include "src/client/unstable_helper.h" @@ -48,50 +49,51 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + // First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - cs.first, cs.second)); + helper.GetCurrentUnstableState(cs.first, cs.second)); } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + // Add another timeout to each chunkserver + // The first two are in the chunkserver unstable state, and the third is in + // the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + // Continue to increase the number of timeouts + // In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + // The first timeout of a new chunkserver can be directly set to chunkserver + // unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -99,22 +101,22 @@ TEST(UnstableHelperTest, normal_test) { helper.IncreTimeout(chunkserver4.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver4.first, chunkserver4.second)); + helper.GetCurrentUnstableState(chunkserver4.first, + chunkserver4.second)); - // 其他ip的chunkserver + // Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } } // namespace client diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..442af59c6f 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -20,23 +20,23 @@ * Author: tongguangxun */ -#include +#include #include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "include/client/libcurve.h" #include "src/client/client_common.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" #include "src/client/iomanager4chunk.h" +#include "src/client/libcurve_file.h" #include "src/client/libcurve_snapshot.h" +#include "test/client/fake/fakeMDS.h" extern std::string mdsMetaServerAddr; extern std::string configpath; @@ -70,8 +70,8 @@ class CurveClientUserAuthFail : public ::testing::Test { ASSERT_EQ(0, server.Join()); } - brpc::Server server; - MetaServerOption metaopt; + brpc::Server server; + MetaServerOption metaopt; FakeMDSCurveFSService curvefsservice; FakeMDSTopologyService topologyservice; }; @@ -102,7 +102,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -115,16 +115,16 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { openresponse.mutable_fileinfo()->set_seqnum(2); openresponse.mutable_fileinfo()->set_filename(filename); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret); // 1. create a File authfailed ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); size_t len = 4 * 1024 * 1024ul; @@ -138,7 +138,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { LOG(INFO) << "get refresh session request!"; refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::ReFreshSessionResponse refreshresp; refreshresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); refreshresp.set_sessionid("1234"); @@ -147,12 +147,13 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { refreshresp.mutable_fileinfo()->set_filename(filename); refreshresp.mutable_fileinfo()->set_id(1); refreshresp.mutable_fileinfo()->set_parentid(0); - refreshresp.mutable_fileinfo()->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + refreshresp.mutable_fileinfo()->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT refreshresp.mutable_fileinfo()->set_chunksize(4 * 1024 * 1024); refreshresp.mutable_fileinfo()->set_length(4 * 1024 * 1024 * 1024ul); refreshresp.mutable_fileinfo()->set_ctime(12345678); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, refresht); // 3. open the file auth failed @@ -161,47 +162,47 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // 4. open file success openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* openfakeret2 - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret2); openret = fileinstance.Open(); ASSERT_EQ(openret, LIBCURVE_ERROR::OK); -/* - // 5. wait for refresh - for (int i = 0; i < 4; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + /* + // 5. wait for refresh + for (int i = 0; i < 4; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; - aioctx.ret = LIBCURVE_ERROR::OK; - aioctx.cb = sessioncallback; - aioctx.buf = nullptr; - - fileinstance.AioRead(&aioctx); - fileinstance.AioWrite(&aioctx); - - for (int i = 0; i < 1; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + CurveAioContext aioctx; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.ret = LIBCURVE_ERROR::OK; + aioctx.cb = sessioncallback; + aioctx.buf = nullptr; + + fileinstance.AioRead(&aioctx); + fileinstance.AioWrite(&aioctx); + + for (int i = 0; i < 1; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - char buffer[10]; - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); -*/ + char buffer[10]; + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); + */ // 6. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(closefileret); ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, fileinstance.Close()); @@ -235,12 +236,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -255,54 +255,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - emptyuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, emptyuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16 * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - emptyuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, emptyuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -311,7 +308,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -319,20 +317,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - emptyuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, emptyuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); + cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; @@ -341,7 +338,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; @@ -359,7 +356,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ASSERT_TRUE(!cl.Init(opt)); UserInfo_t rootuserinfo; - rootuserinfo.owner ="root"; + rootuserinfo.owner = "root"; rootuserinfo.password = "123"; std::string filename = "./1_usertest_.img"; @@ -370,12 +367,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -390,54 +386,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - rootuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, rootuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16ull*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16ull * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - rootuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, rootuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -446,7 +439,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -454,21 +448,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - rootuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, rootuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, rootuserinfo, - &seqvec, &fivec)); + cl.ListSnapShot(filename, rootuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index b71383ec9d..548db4f6d0 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ -#include +#include "src/client/copyset_client.h" + #include +#include #include -#include #include +#include -#include //NOLINT -#include // NOLINT +#include // NOLINT +#include //NOLINT -#include "src/client/copyset_client.h" -#include "test/client/mock/mock_meta_cache.h" -#include "src/common/concurrent/count_down_event.h" -#include "test/client/mock/mock_chunkservice.h" -#include "test/client/mock/mock_request_context.h" #include "src/client/chunk_closure.h" +#include "src/client/metacache.h" +#include "src/client/request_closure.h" +#include "src/common/concurrent/count_down_event.h" #include "src/common/timeutility.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" +#include "test/client/mock/mock_request_context.h" #include "test/client/mock/mock_request_scheduler.h" -#include "src/client/request_closure.h" -#include "src/client/metacache.h" namespace curve { namespace client { @@ -47,18 +48,18 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkRequest; +using curve::client::MetaCache; +using curve::common::TimeUtility; using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SaveArgPointee; -using curve::client::MetaCache; -using curve::common::TimeUtility; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; class CopysetClientTest : public testing::Test { protected: @@ -76,60 +77,62 @@ class CopysetClientTest : public testing::Test { public: std::string listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; -/* TODO(wudemiao) 当前 controller 错误不能通过 mock 返回 */ +/* TODO(wudemiao) current controller error cannot be returned through mock */ int gWriteCntlFailedCode = 0; int gReadCntlFailedCode = 0; -static void WriteChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void WriteChunkFunc(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { /* return response */ brpc::ClosureGuard doneGuard(done); if (0 != gWriteCntlFailedCode) { if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) { std::this_thread::sleep_for(std::chrono::milliseconds(3500)); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); } } -static void ReadChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void ReadChunkFunc(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) { std::this_thread::sleep_for(std::chrono::milliseconds(4000)); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(gReadCntlFailedCode, "read controller error"); } } -static void ReadChunkSnapshotFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, //NOLINT - google::protobuf::Closure *done) { +static void ReadChunkSnapshotFunc( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, // NOLINT + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "read snapshot controller error"); } } -static void DeleteChunkSnapshotFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void DeleteChunkSnapshotFunc( + ::google::protobuf::RpcController* controller, // NOLINT + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "delete snapshot controller error"); } } @@ -146,32 +149,35 @@ static void CreateCloneChunkFunc( } } -static void RecoverChunkFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void RecoverChunkFunc( + ::google::protobuf::RpcController* controller, // NOLINT + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "recover chunk controller error"); } } -static void GetChunkInfoFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, //NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, //NOLINT - google::protobuf::Closure *done) { +static void GetChunkInfoFunc( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse* response, // NOLINT + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "get chunk info controller error"); } } TEST_F(CopysetClientTest, normal_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -215,7 +221,7 @@ TEST_F(CopysetClientTest, normal_test) { // write success for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -225,29 +231,29 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -256,30 +262,30 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -288,65 +294,62 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } // read success for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = i * 8; reqCtx->rawlength_ = len; reqCtx->subIoIndex_ = 0; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -355,30 +358,28 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -387,25 +388,23 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -417,8 +416,9 @@ TEST_F(CopysetClientTest, normal_test) { */ TEST_F(CopysetClientTest, write_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -465,9 +465,9 @@ TEST_F(CopysetClientTest, write_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -476,7 +476,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -485,21 +485,22 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -507,24 +508,27 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gWriteCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -534,7 +538,7 @@ TEST_F(CopysetClientTest, write_error_test) { } /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -543,14 +547,17 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为5000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The retry timeout set by the configuration file is 5000 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying three times is normal, only 3 * + // 1000 sleep is required But after increasing the index backoff, the + // timeout will increase to 1000+2000+2000=5000 Adding random factors, + // the three retry times should be greater than 7000 and less than 8000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -558,12 +565,12 @@ TEST_F(CopysetClientTest, write_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -577,7 +584,7 @@ TEST_F(CopysetClientTest, write_error_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -586,31 +593,35 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*5000 - // 但是增加指数退避之后,睡眠间隔将增加到10000 + 20000 = 30000 - // 加上随机因子,三次重试时间应该大于29000, 且小于50000 + // The retry sleep time set in the configuration file is 5000 because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying three times is normal, only 3 * 5000 sleep is + // required But after increasing the index retreat, the sleep interval + // will increase to 10000+20000=30000 Adding random factors, the three + // retry times should be greater than 29000 and less than 50000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); ASSERT_GT(end - start, 28000); @@ -618,9 +629,9 @@ TEST_F(CopysetClientTest, write_error_test) { gWriteCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -629,7 +640,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -637,21 +648,22 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -660,7 +672,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -670,28 +682,31 @@ TEST_F(CopysetClientTest, write_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -700,35 +715,37 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, did not return a leader, refreshing the meta cache failed + */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -737,38 +754,38 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -777,7 +794,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); FileMetric fm("test"); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -786,33 +803,36 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); - auto elpased = curve::common::TimeUtility::GetTimeofDayUs() - - startTimeUs; + auto elpased = + curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; // chunkserverOPRetryIntervalUS = 5000 - // 每次redirect睡眠500us,共重试2次(chunkserverOPMaxRetry=3,判断时大于等于就返回,所以共只重试了两次) - // 所以总共耗费时间大于1000us + // redirect sleep for 500us each time and retry a total of 2 times + // (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, + // so only two retries were made) So the total time spent is greater + // than 1000us ASSERT_GE(elpased, 1000); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -821,7 +841,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -829,22 +849,23 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -853,7 +874,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -864,24 +885,25 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } // epoch too old { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -890,7 +912,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -898,17 +920,18 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); } scheduler.Fini(); @@ -919,8 +942,9 @@ TEST_F(CopysetClientTest, write_error_test) { */ TEST_F(CopysetClientTest, write_failed_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -968,7 +992,7 @@ TEST_F(CopysetClientTest, write_failed_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -977,13 +1001,16 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要超时49*500 - // 但是增加指数退避之后,超时时间将增加到49*1000 = 49000 + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // a timeout of 49 * 500 But after increasing the index backoff, the + // timeout will increase to 49 * 1000=49000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -991,12 +1018,12 @@ TEST_F(CopysetClientTest, write_failed_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(50)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1009,7 +1036,7 @@ TEST_F(CopysetClientTest, write_failed_test) { } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1018,31 +1045,34 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000us - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000... ~= 4650000 + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times normally only requires 49 * 5000us of sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000... ~= 4650000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); ASSERT_GT(end - start, 250000); @@ -1052,14 +1082,14 @@ TEST_F(CopysetClientTest, write_failed_test) { scheduler.Fini(); } - /** * read failed testing */ TEST_F(CopysetClientTest, read_failed_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1105,7 +1135,7 @@ TEST_F(CopysetClientTest, read_failed_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1113,13 +1143,16 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要50*500 - // 但是增加指数退避之后,超时时间将增加到500 + 1000 + 2000... ~= 60000 + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // 50 * 500 But after increasing the index retreat, the timeout will + // increase to 500+1000+2000...~=60000 uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1128,12 +1161,11 @@ TEST_F(CopysetClientTest, read_failed_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(50)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1146,9 +1178,9 @@ TEST_F(CopysetClientTest, read_failed_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1157,29 +1189,32 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000 - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000 ... = 4650000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000 ... = 4650000 Adding random + // factors, the three retry times should be greater than 2900 and less + // than 5000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); @@ -1196,8 +1231,9 @@ TEST_F(CopysetClientTest, read_failed_test) { */ TEST_F(CopysetClientTest, read_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1242,9 +1278,9 @@ TEST_F(CopysetClientTest, read_error_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); iot.PrepareReadIOBuffers(1); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1253,7 +1289,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1261,21 +1297,20 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* chunk not exist */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1284,7 +1319,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1292,20 +1327,19 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1313,11 +1347,13 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1326,12 +1362,11 @@ TEST_F(CopysetClientTest, read_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1342,7 +1377,7 @@ TEST_F(CopysetClientTest, read_error_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1350,14 +1385,17 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的超时时间为1000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The timeout configured in the settings file is 1000, but due to chunk + // server timeout, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 1000. However, with the added + // exponential backoff, the timeout intervals will increase to 1000 + + // 2000 + 2000 = 5000. Considering the random factor, the total time for + // three retries should be greater than 7000 and less than 8000. uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1366,12 +1404,11 @@ TEST_F(CopysetClientTest, read_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1384,9 +1421,9 @@ TEST_F(CopysetClientTest, read_error_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1395,28 +1432,31 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为500,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*500 - // 但是增加指数退避之后,睡眠间隔将增加到1000 + 2000 = 3000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // The retry sleep time set in the configuration file is 500, but due to + // chunkserver timeouts, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 500. However, with the added + // exponential backoff, the sleep intervals will increase to 1000 + 2000 + // = 3000. Considering the random factor, the total time for three + // retries should be greater than 2900 and less than 5000. uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); @@ -1426,9 +1466,9 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_LT(end - start, 3 * 5000); } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1437,7 +1477,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1445,21 +1485,21 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1468,7 +1508,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1478,26 +1518,27 @@ TEST_F(CopysetClientTest, read_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1506,40 +1547,38 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1548,40 +1587,37 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1590,7 +1626,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1598,24 +1634,25 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1624,7 +1661,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1632,22 +1669,22 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1656,7 +1693,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1667,23 +1704,20 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -1696,8 +1730,9 @@ TEST_F(CopysetClientTest, read_error_test) { */ TEST_F(CopysetClientTest, read_snapshot_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1732,19 +1767,18 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1752,31 +1786,31 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* chunk snapshot not exist */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1784,61 +1818,61 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1846,31 +1880,31 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1880,36 +1914,38 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1918,34 +1954,35 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1954,38 +1991,37 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1993,34 +2029,35 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2028,32 +2065,32 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2064,51 +2101,51 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -2120,8 +2157,9 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { */ TEST_F(CopysetClientTest, delete_snapshot_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2148,17 +2186,16 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2166,59 +2203,59 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2226,30 +2263,30 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2259,38 +2296,39 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2299,33 +2337,34 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2334,73 +2373,73 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr);; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + response.set_redirect(leaderStr); + ; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2408,31 +2447,31 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2443,55 +2482,53 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -2503,8 +2540,9 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { */ TEST_F(CopysetClientTest, create_s3_clone_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2531,17 +2569,16 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2549,57 +2586,57 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(Invoke(CreateCloneChunkFunc)); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - // /* 其他错误 */ + // /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2607,29 +2644,29 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } /* op success */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2637,33 +2674,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2672,32 +2709,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2706,36 +2744,35 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2743,32 +2780,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2776,30 +2814,30 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2810,69 +2848,67 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } } - /** * recover chunk error testing */ TEST_F(CopysetClientTest, recover_chunk_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2899,17 +2935,16 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2917,12 +2952,13 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -2930,42 +2966,41 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(RecoverChunkFunc)); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2973,28 +3008,28 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3004,29 +3039,30 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3035,37 +3071,36 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3074,35 +3109,34 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3110,31 +3144,32 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3142,29 +3177,29 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3175,47 +3210,46 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, - 0, 4096, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -3227,8 +3261,9 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { */ TEST_F(CopysetClientTest, get_chunk_info_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -3254,28 +3289,27 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -3283,66 +3317,62 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(GetChunkInfoFunc)); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3351,32 +3381,33 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_redirect(leaderStr); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3384,30 +3415,30 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3415,92 +3446,89 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3510,54 +3538,49 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -3574,23 +3597,22 @@ void WriteCallBack(CurveAioContext* aioctx) { delete aioctx; } -void PrepareOpenFile(FakeCurveFSService *service, - OpenFileResponse *openresp, - FakeReturn *fakeReturn) { +void PrepareOpenFile(FakeCurveFSService* service, OpenFileResponse* openresp, + FakeReturn* fakeReturn) { openresp->set_statuscode(curve::mds::StatusCode::kOK); - auto *session = openresp->mutable_protosession(); + auto* session = openresp->mutable_protosession(); session->set_sessionid("xxx"); session->set_leasetime(10000); session->set_createtime(10000); session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); - auto *fileinfo = openresp->mutable_fileinfo(); + auto* fileinfo = openresp->mutable_fileinfo(); fileinfo->set_id(1); fileinfo->set_filename("filename"); fileinfo->set_parentid(0); fileinfo->set_length(10ULL * 1024 * 1024 * 1024); fileinfo->set_blocksize(4096); - *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); + *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); service->SetOpenFile(fakeReturn); } @@ -3620,7 +3642,7 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { // create fake chunkserver service FakeChunkServerService fakechunkservice; - // 设置cli服务 + // Set up cli service CliServiceFake fakeCliservice; FakeCurveFSService curvefsService; @@ -3631,9 +3653,11 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { brpc::Server server; ASSERT_EQ(0, server.AddService(&fakechunkservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakechunkservice"; - ASSERT_EQ(0, server.AddService(&fakeCliservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakecliservice"; + brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakechunkservice"; + ASSERT_EQ( + 0, server.AddService(&fakeCliservice, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakecliservice"; ASSERT_EQ( 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add curvefsService"; @@ -3670,11 +3694,12 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); - // 设置文件版本号 + // Set file version number fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - // 发送写请求,并等待sec秒后检查io是否返回 - auto startWriteAndCheckResult = [&fileinstance](int sec)-> bool { // NOLINT + // Send a write request and wait for seconds to check if IO returns + auto startWriteAndCheckResult = + [&fileinstance](int sec) -> bool { // NOLINT CurveAioContext* aioctx = new CurveAioContext(); char buffer[4096]; @@ -3684,29 +3709,30 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; aioctx->cb = WriteCallBack; - // 下发写请求 + // Send write request fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); std::this_thread::sleep_for(std::chrono::seconds(sec)); return gWriteSuccessFlag; }; - // 第一次写成功,并更新chunkserver端的文件版本号 + // Successfully written for the first time and updated the file version + // number on the chunkserver side ASSERT_TRUE(startWriteAndCheckResult(3)); - // 设置一个旧的版本号去写 + // Set an old version number to write fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); gWriteSuccessFlag = false; - // chunkserver返回backward,重新获取版本号后还是旧的版本 - // IO hang + // chunkserver returns the feedback, and after obtaining the version number + // again, it is still the old version IO hang ASSERT_FALSE(startWriteAndCheckResult(3)); - // 更新版本号为正常状态 + // Update version number to normal state fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); std::this_thread::sleep_for(std::chrono::seconds(1)); - // 上次写请求成功 + // Last write request successful ASSERT_EQ(true, gWriteSuccessFlag); server.Stop(0); @@ -3763,8 +3789,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); { - // redirect情况下, chunkserver返回新的leader - // 重试之前不会睡眠 + // In the redirect case, chunkserver returns a new leader + // Will not sleep until retry RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3791,7 +3817,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce(DoAll(SetArgPointee<2>(leaderId), SetArgPointee<3>(leaderAddr), Return(0))) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(1) .WillOnce(Return(0)); @@ -3803,21 +3829,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader ID, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver返回旧leader - // 重试之前会睡眠 + // In the redirect case, chunkserver returns the old leader + // Sleep before retrying RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3842,7 +3867,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(1) .WillOnce(Return(0)); @@ -3853,21 +3878,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回同样的leader id,重试之前会进行睡眠 + // Return the same leader ID and sleep before retrying ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到新leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain a new leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3893,7 +3917,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce(DoAll(SetArgPointee<2>(leaderId), SetArgPointee<3>(leaderAddr), Return(0))) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) .Times(2) @@ -3902,21 +3926,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader id, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到旧leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain old leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3940,7 +3963,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) .Times(2) @@ -3949,9 +3972,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3966,19 +3988,16 @@ class TestRunnedRequestClosure : public RequestClosure { public: TestRunnedRequestClosure() : RequestClosure(nullptr) {} - void Run() override { - runned_ = true; - } + void Run() override { runned_ = true; } - bool IsRunned() const { - return runned_; - } + bool IsRunned() const { return runned_; } private: bool runned_ = false; }; -// 测试session失效后,重试请求会被重新放入请求队列 +// After the test session fails, the retry request will be placed back in the +// request queue TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { MockRequestScheduler requestScheduler; CopysetClient copysetClient; @@ -3988,12 +4007,11 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, &requestScheduler, nullptr)); - // 设置session not valid + // Set session not valid copysetClient.StartRecycleRetryRPC(); { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); TestRunnedRequestClosure closure; copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); @@ -4001,8 +4019,7 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { } { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); TestRunnedRequestClosure closure; copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); @@ -4010,5 +4027,5 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/fake/client_workflow_test.cpp b/test/client/fake/client_workflow_test.cpp index c42a9371ba..fdab88f1ed 100644 --- a/test/client/fake/client_workflow_test.cpp +++ b/test/client/fake/client_workflow_test.cpp @@ -19,28 +19,28 @@ * File Created: Saturday, 13th October 2018 1:59:08 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include +#include +#include // NOLINT #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" -using curve::client::PeerAddr; using curve::client::EndPoint; +using curve::client::PeerAddr; -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -67,7 +67,7 @@ void readcallbacktest(CurveAioContext* context) { delete context; } -int main(int argc, char ** argv) { +int main(int argc, char** argv) { // google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); std::string configpath = "./test/client/configs/client.conf"; @@ -76,7 +76,7 @@ int main(int argc, char ** argv) { LOG(FATAL) << "Fail to init config"; } - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // uint64_t size = FLAGS_test_disk_size; @@ -86,7 +86,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -127,12 +127,11 @@ int main(int argc, char ** argv) { memset(buffer + 7 * 1024, 'h', 1024); uint64_t offset_base; - for (int i = 0; i < 16; i ++) { + for (int i = 0; i < 16; i++) { uint64_t offset = i * chunk_size; Write(fd, buffer, offset, 4096); } - char* buf2 = new char[128 * 1024]; char* buf1 = new char[128 * 1024]; @@ -155,7 +154,7 @@ int main(int argc, char ** argv) { aioctx2->op = LIBCURVE_OP_READ; aioctx2->cb = readcallbacktest; AioRead(fd, aioctx2); - if (j%10 == 0) { + if (j % 10 == 0) { mds.EnableNetUnstable(600); } else { mds.EnableNetUnstable(100); @@ -185,18 +184,18 @@ int main(int argc, char ** argv) { CurveAioContext readaioctx; { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } writeflag = false; AioWrite(fd, &writeaioctx); { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { @@ -204,31 +203,31 @@ int main(int argc, char ** argv) { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } @@ -236,7 +235,7 @@ int main(int argc, char ** argv) { LOG(INFO) << "LibCurve I/O verified for stage 1, going to read repeatedly"; -// skip_write_io: + // skip_write_io: std::atomic stop(false); auto testfunc = [&]() { while (!stop.load()) { @@ -247,44 +246,44 @@ int main(int argc, char ** argv) { AioRead(fd, &readaioctx); { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { if (readbuffer[i] != 'a') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } } -skip_read_io: + skip_read_io: std::this_thread::sleep_for(std::chrono::milliseconds(50)); } }; diff --git a/test/client/fake/client_workflow_test4snap.cpp b/test/client/fake/client_workflow_test4snap.cpp index 9aa9a75e23..4dcb77aec9 100644 --- a/test/client/fake/client_workflow_test4snap.cpp +++ b/test/client/fake/client_workflow_test4snap.cpp @@ -19,26 +19,26 @@ * File Created: Monday, 7th January 2019 10:04:50 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include +#include +#include //NOLINT -#include "src/client/client_common.h" #include "include/client/libcurve.h" -#include "src/client/libcurve_snapshot.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_snapshot.h" #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -55,21 +55,21 @@ std::condition_variable interfacecv; DECLARE_uint64(test_disk_size); -using curve::client::UserInfo_t; -using curve::client::PeerAddr; -using curve::client::EndPoint; -using curve::client::SegmentInfo; -using curve::client::ChunkInfoDetail; -using curve::client::SnapshotClient; using curve::client::ChunkID; -using curve::client::LogicPoolID; -using curve::client::CopysetID; using curve::client::ChunkIDInfo; +using curve::client::ChunkInfoDetail; +using curve::client::CopysetID; using curve::client::CopysetPeerInfo; -using curve::client::MetaCache; +using curve::client::EndPoint; using curve::client::LogicalPoolCopysetIDInfo; +using curve::client::LogicPoolID; +using curve::client::MetaCache; +using curve::client::PeerAddr; +using curve::client::SegmentInfo; +using curve::client::SnapshotClient; +using curve::client::UserInfo_t; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); std::string filename = "/1_userinfo_test.txt"; @@ -79,7 +79,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 8200, &ep); PeerId pd(ep); @@ -116,10 +116,8 @@ int main(int argc, char ** argv) { SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - if (LIBCURVE_ERROR::FAILED == cl.GetSnapshotSegmentInfo(filename, - userinfo, - 0, 0, - &seginfo)) { + if (LIBCURVE_ERROR::FAILED == + cl.GetSnapshotSegmentInfo(filename, userinfo, 0, 0, &seginfo)) { LOG(ERROR) << "GetSnapshotSegmentInfo failed!"; return -1; } @@ -140,7 +138,7 @@ int main(int argc, char ** argv) { cl.DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo(1, 10000, 1), 2); - ChunkInfoDetail *chunkInfo = new ChunkInfoDetail; + ChunkInfoDetail* chunkInfo = new ChunkInfoDetail; cl.GetChunkInfo(ChunkIDInfo(1, 10000, 1), chunkInfo); for (auto iter : chunkInfo->chunkSn) { if (iter != 1111) { diff --git a/test/client/fake/fakeChunkserver.h b/test/client/fake/fakeChunkserver.h index 6ebbbeffcf..0841e18d7d 100644 --- a/test/client/fake/fakeChunkserver.h +++ b/test/client/fake/fakeChunkserver.h @@ -23,15 +23,15 @@ #ifndef TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ #define TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ +#include #include #include #include -#include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include "proto/chunk.pb.h" #include "proto/cli2.pb.h" @@ -40,8 +40,8 @@ #include "test/client/fake/mockMDS.h" using braft::PeerId; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; class FakeChunkService : public ChunkService { public: @@ -53,20 +53,19 @@ class FakeChunkService : public ChunkService { } virtual ~FakeChunkService() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(-1, "set rpc failed!"); } - ::memcpy(chunk_, - cntl->request_attachment().to_string().c_str(), + ::memcpy(chunk_, cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_appliedindex(2); @@ -75,13 +74,13 @@ class FakeChunkService : public ChunkService { } } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(EHOSTDOWN, "set rpc failed!"); } @@ -97,67 +96,69 @@ class FakeChunkService : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {1}; ::memset(buff, 1, 8192); cntl->response_attachment().append(buff, request->size()); auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, // NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkInforet_->controller_ != nullptr && - fakeGetChunkInforet_->controller_->Failed()) { + fakeGetChunkInforet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkInfoResponse*>( - fakeGetChunkInforet_->response_); + fakeGetChunkInforet_->response_); response->CopyFrom(*resp); } - void GetChunkHash(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkHashRequest *request, // NOLINT - ::curve::chunkserver::GetChunkHashResponse *response, - google::protobuf::Closure *done) { + void GetChunkHash( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkHashRequest* request, // NOLINT + ::curve::chunkserver::GetChunkHashResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkHashRet_->controller_ != nullptr && - fakeGetChunkHashRet_->controller_->Failed()) { + fakeGetChunkHashRet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkHashResponse*>( - fakeGetChunkHashRet_->response_); + fakeGetChunkHashRet_->response_); response->CopyFrom(*resp); } @@ -177,13 +178,9 @@ class FakeChunkService : public ChunkService { fakeGetChunkHashRet_ = fakeret; } - void SetRPCFailed() { - rpcFailed = true; - } + void SetRPCFailed() { rpcFailed = true; } - void ReSetRPCFailed() { - rpcFailed = false; - } + void ReSetRPCFailed() { rpcFailed = false; } FakeReturn* fakedeletesnapchunkret_; FakeReturn* fakereadchunksnapret_; @@ -200,16 +197,13 @@ class FakeChunkService : public ChunkService { waittimeMS = 0; } - void CleanRetryTimes() { - retryTimes.store(0); - } + void CleanRetryTimes() { retryTimes.store(0); } - uint64_t GetRetryTimes() { - return retryTimes.load(); - } + uint64_t GetRetryTimes() { return retryTimes.load(); } private: - // wait4netunstable用来模拟网络延时,当打开之后,每个读写rpc会停留一段时间再返回 + // wait4netunstable is used to simulate network latency. When turned on, + // each read/write rpc will pause for a period of time before returning bool wait4netunstable; uint64_t waittimeMS; bool rpcFailed; @@ -219,32 +213,24 @@ class FakeChunkService : public ChunkService { class CliServiceFake : public curve::chunkserver::CliService2 { public: - CliServiceFake() { - invokeTimes = 0; - } + CliServiceFake() { invokeTimes = 0; } void GetLeader(::google::protobuf::RpcController* controller, - const curve::chunkserver::GetLeaderRequest2* request, - curve::chunkserver::GetLeaderResponse2* response, - ::google::protobuf::Closure* done) { + const curve::chunkserver::GetLeaderRequest2* request, + curve::chunkserver::GetLeaderResponse2* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(leaderid_.to_string()); response->set_allocated_leader(peer); invokeTimes++; } - void SetPeerID(PeerId peerid) { - leaderid_ = peerid; - } + void SetPeerID(PeerId peerid) { leaderid_ = peerid; } - uint64_t GetInvokeTimes() { - return invokeTimes; - } + uint64_t GetInvokeTimes() { return invokeTimes; } - void ReSetInvokeTimes() { - invokeTimes = 0; - } + void ReSetInvokeTimes() { invokeTimes = 0; } private: PeerId leaderid_; @@ -253,17 +239,19 @@ class CliServiceFake : public curve::chunkserver::CliService2 { class FakeChunkServerService : public ChunkService { public: - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakewriteret_->controller_ != nullptr && fakewriteret_->controller_->Failed()) { // NOLINT + if (fakewriteret_->controller_ != nullptr && + fakewriteret_->controller_->Failed()) { // NOLINT controller->SetFailed("failed"); } - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakewriteret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakewriteret_->response_); // NOLINT response->CopyFrom(*resp); static uint64_t latestSn = 0; @@ -274,13 +262,13 @@ class FakeChunkServerService : public ChunkService { latestSn = std::max(latestSn, request->sn()); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {0}; if (request->has_appliedindex()) { memset(buff, 'a', 4096); @@ -290,17 +278,14 @@ class FakeChunkServerService : public ChunkService { memset(buff + 4096, 'd', 4096); } cntl->response_attachment().append(buff, request->size()); - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakereadret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakereadret_->response_); // NOLINT response->CopyFrom(*resp); } - void SetFakeWriteReturn(FakeReturn* ret) { - fakewriteret_ = ret; - } + void SetFakeWriteReturn(FakeReturn* ret) { fakewriteret_ = ret; } - void SetFakeReadReturn(FakeReturn* ret) { - fakereadret_ = ret; - } + void SetFakeReadReturn(FakeReturn* ret) { fakereadret_ = ret; } private: FakeReturn* fakewriteret_; @@ -310,23 +295,20 @@ class FakeChunkServerService : public ChunkService { class FakeRaftStateService : public braft::raft_stat { public: void default_method(::google::protobuf::RpcController* controller, - const ::braft::IndexRequest*, - ::braft::IndexResponse*, + const ::braft::IndexRequest*, ::braft::IndexResponse*, ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); // NOLINT + brpc::Controller* cntl = + dynamic_cast(controller); // NOLINT if (failed_) { cntl->SetFailed("failed for test"); return; } cntl->response_attachment().append(buf_); } - void SetBuf(const butil::IOBuf& iobuf) { - buf_ = iobuf; - } - void SetFailed(bool failed) { - failed_ = failed; - } + void SetBuf(const butil::IOBuf& iobuf) { buf_ = iobuf; } + void SetFailed(bool failed) { failed_ = failed; } + private: butil::IOBuf buf_; bool failed_ = false; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index e29f251c26..6daed2e5ed 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -22,73 +22,68 @@ #ifndef TEST_CLIENT_FAKE_FAKEMDS_H_ #define TEST_CLIENT_FAKE_FAKEMDS_H_ -#include -#include -#include #include +#include +#include +#include -#include -#include #include -#include #include -#include "src/client/client_common.h" -#include "test/client/fake/mockMDS.h" -#include "test/client/fake/fakeChunkserver.h" +#include +#include +#include -#include "proto/nameserver2.pb.h" -#include "proto/topology.pb.h" #include "proto/copyset.pb.h" -#include "proto/schedule.pb.h" -#include "src/common/timeutility.h" -#include "src/common/authenticator.h" #include "proto/heartbeat.pb.h" +#include "proto/nameserver2.pb.h" +#include "proto/schedule.pb.h" +#include "proto/topology.pb.h" +#include "src/client/client_common.h" #include "src/client/mds_client_base.h" +#include "src/common/authenticator.h" +#include "src/common/timeutility.h" #include "src/common/uuid.h" +#include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/mockMDS.h" using curve::common::Authenticator; using braft::PeerId; -using curve::common::Authenticator; using curve::chunkserver::COPYSET_OP_STATUS; -using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; -using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using curve::common::Authenticator; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; +using ::curve::mds::schedule::RapidLeaderScheduleRequst; +using ::curve::mds::schedule::RapidLeaderScheduleResponse; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; using ::curve::mds::topology::GetChunkServerInfoRequest; using ::curve::mds::topology::GetChunkServerInfoResponse; +using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; +using ::curve::mds::topology::GetClusterInfoRequest; +using ::curve::mds::topology::GetClusterInfoResponse; +using ::curve::mds::topology::GetCopySetsInChunkServerRequest; +using ::curve::mds::topology::GetCopySetsInChunkServerResponse; using ::curve::mds::topology::ListChunkServerRequest; using ::curve::mds::topology::ListChunkServerResponse; +using ::curve::mds::topology::ListLogicalPoolRequest; +using ::curve::mds::topology::ListLogicalPoolResponse; using ::curve::mds::topology::ListPhysicalPoolRequest; using ::curve::mds::topology::ListPhysicalPoolResponse; using ::curve::mds::topology::ListPoolZoneRequest; using ::curve::mds::topology::ListPoolZoneResponse; using ::curve::mds::topology::ListZoneServerRequest; using ::curve::mds::topology::ListZoneServerResponse; -using ::curve::mds::topology::GetCopySetsInChunkServerRequest; -using ::curve::mds::topology::GetCopySetsInChunkServerResponse; -using ::curve::mds::topology::ListLogicalPoolRequest; -using ::curve::mds::topology::ListLogicalPoolResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; -using ::curve::mds::schedule::RapidLeaderScheduleRequst; -using ::curve::mds::schedule::RapidLeaderScheduleResponse; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; - DECLARE_bool(start_builtin_service); class FakeMDSCurveFSService : public curve::mds::CurveFSService { public: - FakeMDSCurveFSService() { - retrytimes_ = 0; - } + FakeMDSCurveFSService() { retrytimes_ = 0; } void ListClient(::google::protobuf::RpcController* controller, const ::curve::mds::ListClientRequest* request, @@ -96,39 +91,39 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListClient_->controller_ != nullptr && - fakeListClient_->controller_->Failed()) { + fakeListClient_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListClientResponse*>( - fakeListClient_->response_); + fakeListClient_->response_); response->CopyFrom(*resp); } void CreateFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateFileRequest* request, - ::curve::mds::CreateFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateFileRequest* request, + ::curve::mds::CreateFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateFileret_->controller_ != nullptr - && fakeCreateFileret_->controller_->Failed()) { + if (fakeCreateFileret_->controller_ != nullptr && + fakeCreateFileret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateFileResponse*>( - fakeCreateFileret_->response_); + fakeCreateFileret_->response_); response->CopyFrom(*resp); } void GetFileInfo(::google::protobuf::RpcController* controller, - const ::curve::mds::GetFileInfoRequest* request, - ::curve::mds::GetFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::GetFileInfoRequest* request, + ::curve::mds::GetFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetFileInforet_->controller_ != nullptr && fakeGetFileInforet_->controller_->Failed()) { @@ -138,14 +133,15 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::GetFileInfoResponse*>( - fakeGetFileInforet_->response_); + fakeGetFileInforet_->response_); response->CopyFrom(*resp); } - void IncreaseFileEpoch(::google::protobuf::RpcController* controller, - const ::curve::mds::IncreaseFileEpochRequest* request, - ::curve::mds::IncreaseFileEpochResponse* response, - ::google::protobuf::Closure* done) { + void IncreaseFileEpoch( + ::google::protobuf::RpcController* controller, + const ::curve::mds::IncreaseFileEpochRequest* request, + ::curve::mds::IncreaseFileEpochResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeIncreaseFileEpochret_->controller_ != nullptr && fakeIncreaseFileEpochret_->controller_->Failed()) { @@ -155,7 +151,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::IncreaseFileEpochResponse*>( - fakeIncreaseFileEpochret_->response_); + fakeIncreaseFileEpochret_->response_); response->CopyFrom(*resp); } @@ -165,41 +161,42 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetAllocatedSizeRet_->controller_ != nullptr && - fakeGetAllocatedSizeRet_->controller_->Failed()) { + fakeGetAllocatedSizeRet_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::GetAllocatedSizeResponse*>( - fakeGetAllocatedSizeRet_->response_); + fakeGetAllocatedSizeRet_->response_); response->CopyFrom(*resp); } - void GetOrAllocateSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetOrAllocateSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetOrAllocateSegmentret_->controller_ != nullptr && - fakeGetOrAllocateSegmentret_->controller_->Failed()) { + fakeGetOrAllocateSegmentret_->controller_->Failed()) { controller->SetFailed("failed"); } if (!strcmp(request->owner().c_str(), "root")) { - // 当user为root用户的时候需要检查其signature信息 + // When the user is root, it is necessary to check their signature + // information std::string str2sig = Authenticator::GetString2Signature( - request->date(), - request->owner()); - std::string sig = Authenticator::CalcString2Signature(str2sig, - "root_password"); + request->date(), request->owner()); + std::string sig = + Authenticator::CalcString2Signature(str2sig, "root_password"); ASSERT_STREQ(request->signature().c_str(), sig.c_str()); LOG(INFO) << "GetOrAllocateSegment with password!"; } retrytimes_++; - // 检查请求内容是全路径 + // Check that the request content is full path auto checkFullpath = [&]() { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); @@ -207,14 +204,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { (void)checkFullpath; fiu_do_on("test/client/fake/fakeMDS.GetOrAllocateSegment", - checkFullpath()); + checkFullpath()); curve::mds::GetOrAllocateSegmentResponse* resp; - if (request->filename() == "/clonesource") { + if (request->filename() == "/clonesource") { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentretForClone_->response_); + fakeGetOrAllocateSegmentretForClone_->response_); } else { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentret_->response_); + fakeGetOrAllocateSegmentret_->response_); } response->CopyFrom(*resp); } @@ -236,26 +233,26 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { } void OpenFile(::google::protobuf::RpcController* controller, - const ::curve::mds::OpenFileRequest* request, - ::curve::mds::OpenFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::OpenFileRequest* request, + ::curve::mds::OpenFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeopenfile_->controller_ != nullptr && - fakeopenfile_->controller_->Failed()) { + fakeopenfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::OpenFileResponse*>( - fakeopenfile_->response_); + fakeopenfile_->response_); response->CopyFrom(*resp); } void RefreshSession(::google::protobuf::RpcController* controller, const curve::mds::ReFreshSessionRequest* request, curve::mds::ReFreshSessionResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::Closure* done) { { brpc::ClosureGuard done_guard(done); if (fakeRefreshSession_->controller_ != nullptr && @@ -266,10 +263,10 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { static int seq = 1; auto resp = static_cast<::curve::mds::ReFreshSessionResponse*>( - fakeRefreshSession_->response_); + fakeRefreshSession_->response_); if (resp->statuscode() == ::curve::mds::StatusCode::kOK) { - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_seqnum(seq++); info->set_filename("_filename_"); info->set_id(resp->fileinfo().id()); @@ -279,13 +276,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { info->set_length(4 * 1024 * 1024 * 1024ul); info->set_ctime(12345678); - curve::mds::ProtoSession *protoSession = - new curve::mds::ProtoSession(); + curve::mds::ProtoSession* protoSession = + new curve::mds::ProtoSession(); protoSession->set_sessionid("1234"); protoSession->set_createtime(12345); protoSession->set_leasetime(10000000); protoSession->set_sessionstatus( - ::curve::mds::SessionStatus::kSessionOK); + ::curve::mds::SessionStatus::kSessionOK); response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_sessionid("1234"); @@ -299,175 +296,166 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; - if (refreshtask_) - refreshtask_(); + if (refreshtask_) refreshtask_(); } void CreateSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateSnapShotRequest* request, - ::curve::mds::CreateSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateSnapShotRequest* request, + ::curve::mds::CreateSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakecreatesnapshotret_->controller_ != nullptr && - fakecreatesnapshotret_->controller_->Failed()) { + fakecreatesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateSnapShotResponse*>( - fakecreatesnapshotret_->response_); + fakecreatesnapshotret_->response_); response->CopyFrom(*resp); } void ListSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::ListSnapShotFileInfoRequest* request, - ::curve::mds::ListSnapShotFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListSnapShotFileInfoRequest* request, + ::curve::mds::ListSnapShotFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakelistsnapshotret_->controller_ != nullptr && - fakelistsnapshotret_->controller_->Failed()) { + fakelistsnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::ListSnapShotFileInfoResponse*>( - fakelistsnapshotret_->response_); + fakelistsnapshotret_->response_); response->CopyFrom(*resp); } void DeleteSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::DeleteSnapShotRequest* request, - ::curve::mds::DeleteSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::DeleteSnapShotRequest* request, + ::curve::mds::DeleteSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapshotret_->controller_ != nullptr && - fakedeletesnapshotret_->controller_->Failed()) { + fakedeletesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakedeletesnapshotret_->response_); + fakedeletesnapshotret_->response_); response->CopyFrom(*resp); } - void CheckSnapShotStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::CheckSnapShotStatusRequest* request, - ::curve::mds::CheckSnapShotStatusResponse* response, - ::google::protobuf::Closure* done) { + void CheckSnapShotStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::CheckSnapShotStatusRequest* request, + ::curve::mds::CheckSnapShotStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakechecksnapshotret_->controller_ != nullptr && - fakechecksnapshotret_->controller_->Failed()) { + fakechecksnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakechecksnapshotret_->response_); + fakechecksnapshotret_->response_); response->CopyFrom(*resp); } - void GetSnapShotFileSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetSnapShotFileSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakegetsnapsegmentinforet_->controller_ != nullptr && - fakegetsnapsegmentinforet_->controller_->Failed()) { + fakegetsnapsegmentinforet_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakegetsnapsegmentinforet_->response_); + fakegetsnapsegmentinforet_->response_); response->CopyFrom(*resp); } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } void CloseFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CloseFileRequest* request, - ::curve::mds::CloseFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CloseFileRequest* request, + ::curve::mds::CloseFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeclosefile_->controller_ != nullptr && - fakeclosefile_->controller_->Failed()) { + fakeclosefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakeclosefile_->response_); + fakeclosefile_->response_); response->CopyFrom(*resp); if (closeFileTask_) { @@ -481,14 +469,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakerenamefile_->controller_ != nullptr && - fakerenamefile_->controller_->Failed()) { + fakerenamefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakerenamefile_->response_); + fakerenamefile_->response_); response->CopyFrom(*resp); } @@ -498,7 +486,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletefile_->controller_ != nullptr && - fakedeletefile_->controller_->Failed()) { + fakedeletefile_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -509,12 +497,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakedeletefile_->response_); + fakedeletefile_->response_); if (request->forcedelete()) { LOG(INFO) << "force delete file!"; - fiu_do_on("test/client/fake/fakeMDS/forceDeleteFile", - resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); + fiu_do_on( + "test/client/fake/fakeMDS/forceDeleteFile", + resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); } response->CopyFrom(*resp); @@ -526,103 +515,97 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeextendfile_->controller_ != nullptr && - fakeextendfile_->controller_->Failed()) { + fakeextendfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ExtendFileResponse*>( - fakeextendfile_->response_); + fakeextendfile_->response_); response->CopyFrom(*resp); } void CreateCloneFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateCloneFileRequest* request, - ::curve::mds::CreateCloneFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateCloneFileRequest* request, + ::curve::mds::CreateCloneFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateCloneFile_->controller_ != nullptr - && fakeCreateCloneFile_->controller_->Failed()) { + if (fakeCreateCloneFile_->controller_ != nullptr && + fakeCreateCloneFile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateCloneFileResponse*>( - fakeCreateCloneFile_->response_); + fakeCreateCloneFile_->response_); response->CopyFrom(*resp); } - void SetCloneFileStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::SetCloneFileStatusRequest* request, - ::curve::mds::SetCloneFileStatusResponse* response, - ::google::protobuf::Closure* done) { + void SetCloneFileStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::SetCloneFileStatusRequest* request, + ::curve::mds::SetCloneFileStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeSetCloneFileStatus_->controller_ != nullptr - && fakeSetCloneFileStatus_->controller_->Failed()) { + if (fakeSetCloneFileStatus_->controller_ != nullptr && + fakeSetCloneFileStatus_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::SetCloneFileStatusResponse*>( - fakeSetCloneFileStatus_->response_); + fakeSetCloneFileStatus_->response_); response->CopyFrom(*resp); } void ChangeOwner(::google::protobuf::RpcController* controller, - const ::curve::mds::ChangeOwnerRequest* request, - ::curve::mds::ChangeOwnerResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ChangeOwnerRequest* request, + ::curve::mds::ChangeOwnerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeChangeOwner_->controller_ != nullptr && - fakeChangeOwner_->controller_->Failed()) { + fakeChangeOwner_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ChangeOwnerResponse*>( - fakeChangeOwner_->response_); + fakeChangeOwner_->response_); response->CopyFrom(*resp); } void ListDir(::google::protobuf::RpcController* controller, - const ::curve::mds::ListDirRequest* request, - ::curve::mds::ListDirResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListDirRequest* request, + ::curve::mds::ListDirResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListDir_->controller_ != nullptr && - fakeListDir_->controller_->Failed()) { + fakeListDir_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListDirResponse*>( - fakeListDir_->response_); + fakeListDir_->response_); response->CopyFrom(*resp); } - void SetListDir(FakeReturn* fakeret) { - fakeListDir_ = fakeret; - } + void SetListDir(FakeReturn* fakeret) { fakeListDir_ = fakeret; } - void SetListClient(FakeReturn* fakeret) { - fakeListClient_ = fakeret; - } + void SetListClient(FakeReturn* fakeret) { fakeListClient_ = fakeret; } void SetCreateCloneFile(FakeReturn* fakeret) { fakeCreateCloneFile_ = fakeret; } - void SetExtendFile(FakeReturn* fakeret) { - fakeextendfile_ = fakeret; - } - + void SetExtendFile(FakeReturn* fakeret) { fakeextendfile_ = fakeret; } void SetCreateFileFakeReturn(FakeReturn* fakeret) { fakeCreateFileret_ = fakeret; @@ -652,9 +635,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakeDeAllocateSegment_ = fakeret; } - void SetOpenFile(FakeReturn* fakeret) { - fakeopenfile_ = fakeret; - } + void SetOpenFile(FakeReturn* fakeret) { fakeopenfile_ = fakeret; } void SetRefreshSession(FakeReturn* fakeret, std::function t) { fakeRefreshSession_ = fakeret; @@ -685,61 +666,41 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakedeletesnapchunkret_ = fakeret; } - void SetCloseFile(FakeReturn* fakeret) { - fakeclosefile_ = fakeret; - } + void SetCloseFile(FakeReturn* fakeret) { fakeclosefile_ = fakeret; } - void SetCheckSnap(FakeReturn* fakeret) { - fakechecksnapshotret_ = fakeret; - } + void SetCheckSnap(FakeReturn* fakeret) { fakechecksnapshotret_ = fakeret; } - void SetRenameFile(FakeReturn* fakeret) { - fakerenamefile_ = fakeret; - } + void SetRenameFile(FakeReturn* fakeret) { fakerenamefile_ = fakeret; } - void SetDeleteFile(FakeReturn* fakeret) { - fakedeletefile_ = fakeret; - } + void SetDeleteFile(FakeReturn* fakeret) { fakedeletefile_ = fakeret; } - void SetRegistRet(FakeReturn* fakeret) { - fakeRegisterret_ = fakeret; - } + void SetRegistRet(FakeReturn* fakeret) { fakeRegisterret_ = fakeret; } void SetCloneFileStatus(FakeReturn* fakeret) { fakeSetCloneFileStatus_ = fakeret; } - void SetChangeOwner(FakeReturn* fakeret) { - fakeChangeOwner_ = fakeret; - } + void SetChangeOwner(FakeReturn* fakeret) { fakeChangeOwner_ = fakeret; } void SetCloseFileTask(std::function task) { closeFileTask_ = task; } - void CleanRetryTimes() { - retrytimes_ = 0; - } + void CleanRetryTimes() { retrytimes_ = 0; } - uint64_t GetRetryTimes() { - return retrytimes_; - } + uint64_t GetRetryTimes() { return retrytimes_; } - std::string GetIP() { - return ip_; - } + std::string GetIP() { return ip_; } - uint16_t GetPort() { - return port_; - } + uint16_t GetPort() { return port_; } - void CheckAuth(const std::string& signature, - const std::string& filename, - const std::string& owner, - uint64_t date) { + void CheckAuth(const std::string& signature, const std::string& filename, + const std::string& owner, uint64_t date) { if (owner == curve::client::kRootUserName) { - std::string str2sig = Authenticator::GetString2Signature(date, owner); // NOLINT - std::string sigtest = Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT + std::string str2sig = + Authenticator::GetString2Signature(date, owner); // NOLINT + std::string sigtest = + Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT ASSERT_STREQ(sigtest.c_str(), signature.c_str()); } else { ASSERT_STREQ("", signature.c_str()); @@ -785,18 +746,17 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { class FakeMDSTopologyService : public curve::mds::topology::TopologyService { public: void GetChunkServerListInCopySets( - ::google::protobuf::RpcController* controller, - const GetChunkServerListInCopySetsRequest* request, - GetChunkServerListInCopySetsResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); int statcode = 0; if (response->has_statuscode()) { statcode = response->statuscode(); } - if (statcode == -1 || - (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed())) { + if (statcode == -1 || (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed())) { controller->SetFailed("failed"); } @@ -805,11 +765,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->CopyFrom(*resp); } - void RegistChunkServer( - ::google::protobuf::RpcController* controller, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - ::google::protobuf::Closure* done) { + void RegistChunkServer(::google::protobuf::RpcController* controller, + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); @@ -818,87 +777,87 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void GetChunkServer(::google::protobuf::RpcController* controller, - const GetChunkServerInfoRequest* request, - GetChunkServerInfoResponse* response, - ::google::protobuf::Closure* done) { + const GetChunkServerInfoRequest* request, + GetChunkServerInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListChunkServer(::google::protobuf::RpcController* controller, - const ListChunkServerRequest* request, - ListChunkServerResponse* response, - ::google::protobuf::Closure* done) { + const ListChunkServerRequest* request, + ListChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListPhysicalPool(::google::protobuf::RpcController* controller, - const ListPhysicalPoolRequest* request, - ListPhysicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistpoolret_->controller_ != nullptr - && fakelistpoolret_->controller_->Failed()) { + if (fakelistpoolret_->controller_ != nullptr && + fakelistpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistpoolret_->response_); + auto resp = + static_cast(fakelistpoolret_->response_); response->CopyFrom(*resp); } void ListPoolZone(::google::protobuf::RpcController* controller, - const ListPoolZoneRequest* request, - ListPoolZoneResponse* response, - ::google::protobuf::Closure* done) { + const ListPoolZoneRequest* request, + ListPoolZoneResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistzoneret_->controller_ != nullptr - && fakelistzoneret_->controller_->Failed()) { + if (fakelistzoneret_->controller_ != nullptr && + fakelistzoneret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistzoneret_->response_); + auto resp = + static_cast(fakelistzoneret_->response_); response->CopyFrom(*resp); } void ListZoneServer(::google::protobuf::RpcController* controller, - const ListZoneServerRequest* request, - ListZoneServerResponse* response, - ::google::protobuf::Closure* done) { + const ListZoneServerRequest* request, + ListZoneServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistserverret_->controller_ != nullptr - && fakelistserverret_->controller_->Failed()) { + if (fakelistserverret_->controller_ != nullptr && + fakelistserverret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistserverret_->response_); + auto resp = + static_cast(fakelistserverret_->response_); response->CopyFrom(*resp); } - void GetCopySetsInChunkServer(::google::protobuf::RpcController* controller, - const GetCopySetsInChunkServerRequest* request, - GetCopySetsInChunkServerResponse* response, - ::google::protobuf::Closure* done) { + void GetCopySetsInChunkServer( + ::google::protobuf::RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakegetcopysetincsret_->controller_ != nullptr - && fakegetcopysetincsret_->controller_->Failed()) { + if (fakegetcopysetincsret_->controller_ != nullptr && + fakegetcopysetincsret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -908,12 +867,12 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void ListLogicalPool(::google::protobuf::RpcController* controller, - const ListLogicalPoolRequest* request, - ListLogicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistlogicalpoolret_->controller_ != nullptr - && fakelistlogicalpoolret_->controller_->Failed()) { + if (fakelistlogicalpoolret_->controller_ != nullptr && + fakelistlogicalpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -933,9 +892,7 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->set_clusterid(uuid); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; FakeReturn* fakelistpoolret_; @@ -945,11 +902,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { FakeReturn* fakelistlogicalpoolret_; }; -typedef void (*HeartbeatCallback) ( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); +typedef void (*HeartbeatCallback)(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { public: @@ -975,19 +931,18 @@ class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { private: HeartbeatCallback cb_; - mutable std::mutex cbMtx_; + mutable std::mutex cbMtx_; }; class FakeCreateCopysetService : public curve::chunkserver::CopysetService { public: - void CreateCopysetNode( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::CopysetRequest* request, - ::curve::chunkserver::CopysetResponse* response, - ::google::protobuf::Closure* done) { + void CreateCopysetNode(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetRequest* request, + ::curve::chunkserver::CopysetResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -996,22 +951,23 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->CopyFrom(*resp); } - void GetCopysetStatus(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::CopysetStatusRequest *request, - ::curve::chunkserver::CopysetStatusResponse *response, - google::protobuf::Closure *done) { + void GetCopysetStatus( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetStatusRequest* request, + ::curve::chunkserver::CopysetStatusResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } response->set_state(::braft::State::STATE_LEADER); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); response->set_allocated_peer(peer); peer->set_address("127.0.0.1:1111"); - curve::common::Peer *leader = new curve::common::Peer(); + curve::common::Peer* leader = new curve::common::Peer(); response->set_allocated_leader(leader); leader->set_address("127.0.0.1:1111"); response->set_readonly(1); @@ -1029,21 +985,13 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->set_status(status_); } - void SetHash(uint64_t hash) { - hash_ = hash; - } + void SetHash(uint64_t hash) { hash_ = hash; } - void SetApplyindex(uint64_t index) { - applyindex_ = index; - } + void SetApplyindex(uint64_t index) { applyindex_ = index; } - void SetStatus(const COPYSET_OP_STATUS& status) { - status_ = status; - } + void SetStatus(const COPYSET_OP_STATUS& status) { status_ = status; } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } public: uint64_t applyindex_; @@ -1054,30 +1002,29 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { public: - void RapidLeaderSchedule( - google::protobuf::RpcController* cntl_base, - const RapidLeaderScheduleRequst* request, - RapidLeaderScheduleResponse* response, - google::protobuf::Closure* done) { + void RapidLeaderSchedule(google::protobuf::RpcController* cntl_base, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void QueryChunkServerRecoverStatus( google::protobuf::RpcController* cntl_base, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } @@ -1086,9 +1033,7 @@ class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { response->CopyFrom(*resp); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; }; @@ -1118,15 +1063,11 @@ class FakeMDS { std::vector conf; }; - FakeScheduleService* GetScheduleService() { - return &fakeScheduleService_; - } + FakeScheduleService* GetScheduleService() { return &fakeScheduleService_; } - FakeMDSCurveFSService* GetMDSService() { - return &fakecurvefsservice_; - } + FakeMDSCurveFSService* GetMDSService() { return &fakecurvefsservice_; } - std::vector GetCreateCopysetService() { + std::vector GetCreateCopysetService() { return copysetServices_; } @@ -1134,15 +1075,11 @@ class FakeMDS { return chunkServices_; } - CliServiceFake* GetCliService() { - return &fakeCliService_; - } + CliServiceFake* GetCliService() { return &fakeCliService_; } - std::vector GetChunkservice() { - return chunkServices_; - } + std::vector GetChunkservice() { return chunkServices_; } - std::vector GetRaftStateService() { + std::vector GetRaftStateService() { return raftStateServices_; } @@ -1159,23 +1096,23 @@ class FakeMDS { private: std::vector copysetnodeVec_; brpc::Server* server_; - std::vector chunkservers_; + std::vector chunkservers_; std::vector server_addrs_; std::vector peers_; - std::vector chunkServices_; - std::vector copysetServices_; - std::vector raftStateServices_; - std::vector fakeChunkServerServices_; + std::vector chunkServices_; + std::vector copysetServices_; + std::vector raftStateServices_; + std::vector fakeChunkServerServices_; std::string filename_; uint64_t size_; - CliServiceFake fakeCliService_; + CliServiceFake fakeCliService_; FakeMDSCurveFSService fakecurvefsservice_; FakeMDSTopologyService faketopologyservice_; FakeMDSHeartbeatService fakeHeartbeatService_; FakeScheduleService fakeScheduleService_; - std::map metrics_; + std::map metrics_; }; -#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ +#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ diff --git a/test/client/inflight_rpc_control_test.cpp b/test/client/inflight_rpc_control_test.cpp index 8d6d4de1ee..717211348f 100644 --- a/test/client/inflight_rpc_control_test.cpp +++ b/test/client/inflight_rpc_control_test.cpp @@ -72,7 +72,7 @@ TEST(InflightRPCTest, TestInflightRPC) { int maxInflightNum = 8; { - // 测试inflight数量 + // Number of inflight tests InflightControl control; control.SetMaxInflightNum(maxInflightNum); ASSERT_EQ(0, control.GetCurrentInflightNum()); @@ -89,7 +89,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试GetInflightTokan与ReleaseInflightToken的并发 + // Testing the concurrency of GetInflightTokan and ReleaseInflightToken InflightControl control; control.SetMaxInflightNum(maxInflightNum); @@ -123,7 +123,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试WaitInflightAllComeBack + // Testing WaitInflightAllComeBack InflightControl control; control.SetMaxInflightNum(maxInflightNum); for (int i = 1; i <= maxInflightNum; ++i) { @@ -148,13 +148,15 @@ TEST(InflightRPCTest, TestInflightRPC) { } TEST(InflightRPCTest, FileCloseTest) { - // 测试在文件关闭的时候,lese续约失败不会调用iomanager已析构的资源 - // lease时长10s,在lease期间仅续约一次,一次失败就会调用iomanager - // block IO,这时候其实调用的是scheduler的LeaseTimeoutBlockIO + // Test that when the lease renewal fails at the time of file closure, it + // will not invoke the already destructed resources of the IO manager. The + // lease duration is 10 seconds, and only one renewal is allowed during the + // lease period. If the renewal fails, it will trigger the IO manager's + // block IO, which actually calls the LeaseTimeoutBlockIO of the scheduler. IOOption ioOption; ioOption.reqSchdulerOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 10000; - // 设置inflight RPC最大数量为1 + // Set the maximum number of inflight RPCs to 1 ioOption.ioSenderOpt.inflightOpt.fileMaxInFlightRPCNum = 1; std::condition_variable cv; @@ -200,7 +202,8 @@ TEST(InflightRPCTest, FileCloseTest) { LeaseExecutor lease(lopt, userinfo, nullptr, iomanager); for (int j = 0; j < 5; j++) { - // 测试iomanager退出之后,lease再去调用其scheduler资源不会crash + // After testing the iomanager exit, please call its scheduler + // resource again without crashing lease.InvalidLease(); } @@ -214,11 +217,12 @@ TEST(InflightRPCTest, FileCloseTest) { } }; - // 并发两个线程,一个线程启动iomanager初始化,然后反初始化 - // 另一个线程启动lease续约,然后调用iomanager使其block IO - // 预期:并发两个线程,lease线程续约失败即使在iomanager线程 - // 退出的同时去调用其block IO接口也不会出现并发竞争共享资源的 - // 场景。 + // Concurrently run two threads: one thread initializes the IO manager and + // then deinitializes it, while the other thread initiates lease renewal and + // then calls the IO manager to make it block IO. Expectation: Concurrent + // execution of the two threads should not result in concurrent competition + // for shared resources, even if the lease thread fails to renew while the + // IO manager thread exits. std::thread t1(f1); std::thread t2(f2); diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 1f423250fa..10dae34e55 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -31,22 +31,22 @@ #include //NOLINT #include // NOLINT #include -#include //NOLINT +#include //NOLINT +#include "include/client/libcurve.h" #include "src/client/client_common.h" #include "src/client/client_config.h" #include "src/client/config_info.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" #include "src/client/iomanager4file.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" #include "src/client/mds_client.h" #include "src/client/metacache.h" #include "src/client/metacache_struct.h" #include "src/client/request_context.h" -#include "src/client/splitor.h" #include "src/client/source_reader.h" +#include "src/client/splitor.h" #include "test/client/fake/fakeMDS.h" #include "test/client/fake/mockMDS.h" #include "test/client/fake/mock_schedule.h" @@ -90,7 +90,8 @@ class IOTrackerSplitorTest : public ::testing::Test { fopt.ioOpt.ioSplitOpt.fileIOSplitMaxSizeKB = 64; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; // NOLINT + fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = + 500; // NOLINT fopt.ioOpt.metaCacheOpt.metacacheGetLeaderRetry = 3; fopt.ioOpt.metaCacheOpt.metacacheRPCRetryIntervalUS = 500; fopt.ioOpt.reqSchdulerOpt.scheduleQueueCapacity = 4096; @@ -131,11 +132,11 @@ class IOTrackerSplitorTest : public ::testing::Test { void InsertMetaCache() { if (server.AddService(&curvefsservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } if (server.AddService(&topologyservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } brpc::ServerOptions options; @@ -148,7 +149,7 @@ class IOTrackerSplitorTest : public ::testing::Test { * 1. set openfile response */ ::curve::mds::OpenFileResponse* openresponse = - new ::curve::mds::OpenFileResponse(); + new ::curve::mds::OpenFileResponse(); ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -170,21 +171,23 @@ class IOTrackerSplitorTest : public ::testing::Test { openresponse->set_statuscode(::curve::mds::StatusCode::kOK); openresponse->set_allocated_protosession(se); openresponse->set_allocated_fileinfo(fin); - FakeReturn* openfakeret = new FakeReturn(nullptr, static_cast(openresponse)); // NOLINT + FakeReturn* openfakeret = new FakeReturn( + nullptr, static_cast(openresponse)); // NOLINT curvefsservice.SetOpenFile(openfakeret); fileinstance_->Open(); /** * 2. set closefile response */ - ::curve::mds::CloseFileResponse* closeresp = new ::curve::mds::CloseFileResponse; // NOLINT + ::curve::mds::CloseFileResponse* closeresp = + new ::curve::mds::CloseFileResponse; // NOLINT closeresp->set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(closeresp)); curvefsservice.SetCloseFile(closefileret); /** - * 3. 设置GetOrAllocateSegmentresponse + * 3. Set GetOrAllocateSegmentresponse */ curve::mds::GetOrAllocateSegmentResponse* response = new curve::mds::GetOrAllocateSegmentResponse(); @@ -192,30 +195,27 @@ class IOTrackerSplitorTest : public ::testing::Test { response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_allocated_pagefilesegment(pfs); - response->mutable_pagefilesegment()-> - set_logicalpoolid(1234); - response->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_startoffset(0); - - for (int i = 0; i < 256; i ++) { + response->mutable_pagefilesegment()->set_logicalpoolid(1234); + response->mutable_pagefilesegment()->set_segmentsize(1 * 1024 * 1024 * + 1024); + response->mutable_pagefilesegment()->set_chunksize(4 * 1024 * 1024); + response->mutable_pagefilesegment()->set_startoffset(0); + + for (int i = 0; i < 256; i++) { auto chunk = response->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeret = new FakeReturn(nullptr, - static_cast(response)); + getsegmentfakeret = + new FakeReturn(nullptr, static_cast(response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(getsegmentfakeret); curve::mds::GetOrAllocateSegmentResponse* notallocateresponse = - new curve::mds::GetOrAllocateSegmentResponse(); - notallocateresponse->set_statuscode(::curve::mds::StatusCode - ::kSegmentNotAllocated); - notallocatefakeret = new FakeReturn(nullptr, - static_cast(notallocateresponse)); + new curve::mds::GetOrAllocateSegmentResponse(); + notallocateresponse->set_statuscode( + ::curve::mds::StatusCode ::kSegmentNotAllocated); + notallocatefakeret = + new FakeReturn(nullptr, static_cast(notallocateresponse)); // set GetOrAllocateSegmentResponse for read from clone source curve::mds::GetOrAllocateSegmentResponse* cloneSourceResponse = @@ -224,28 +224,27 @@ class IOTrackerSplitorTest : public ::testing::Test { cloneSourceResponse->set_statuscode(::curve::mds::StatusCode::kOK); cloneSourceResponse->set_allocated_pagefilesegment(clonepfs); - cloneSourceResponse->mutable_pagefilesegment()-> - set_logicalpoolid(1); - cloneSourceResponse->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_startoffset(1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_logicalpoolid(1); + cloneSourceResponse->mutable_pagefilesegment()->set_segmentsize( + 1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_chunksize(4 * 1024 * + 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_startoffset( + 1 * 1024 * 1024 * 1024); for (int i = 256; i < 512; i++) { - auto chunk = cloneSourceResponse->mutable_pagefilesegment() - ->add_chunks(); + auto chunk = + cloneSourceResponse->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeretclone = new FakeReturn(nullptr, - static_cast(cloneSourceResponse)); + getsegmentfakeretclone = + new FakeReturn(nullptr, static_cast(cloneSourceResponse)); /** * 4. set refresh response */ - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename("1_userinfo_.txt"); info->set_seqnum(2); info->set_id(1); @@ -260,18 +259,19 @@ class IOTrackerSplitorTest : public ::testing::Test { refreshresp->set_statuscode(::curve::mds::StatusCode::kOK); refreshresp->set_sessionid("1234"); refreshresp->set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, nullptr); /** - * 5. 设置topology返回值 + * 5. Set topology return value */ - ::curve::mds::topology::GetChunkServerListInCopySetsResponse* response_1 - = new ::curve::mds::topology::GetChunkServerListInCopySetsResponse; + ::curve::mds::topology::GetChunkServerListInCopySetsResponse* + response_1 = new ::curve::mds::topology:: + GetChunkServerListInCopySetsResponse; response_1->set_statuscode(0); uint64_t chunkserveridc = 1; - for (int i = 0; i < 256; i ++) { + for (int i = 0; i < 256; i++) { auto csinfo = response_1->add_csinfo(); csinfo->set_copysetid(i); @@ -282,23 +282,23 @@ class IOTrackerSplitorTest : public ::testing::Test { cslocs->set_port(9104); } } - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(response_1)); topologyservice.SetFakeReturn(faktopologyeret); - curve::client::MetaCache* mc = fileinstance_->GetIOManager4File()-> - GetMetaCache(); + curve::client::MetaCache* mc = + fileinstance_->GetIOManager4File()->GetMetaCache(); curve::client::FInfo_t fi; fi.userinfo = userinfo; - fi.chunksize = 4 * 1024 * 1024; + fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; SegmentInfo sinfo; LogicalPoolCopysetIDInfo_t lpcsIDInfo; mdsclient_->GetOrAllocateSegment(true, 0, &fi, nullptr, &sinfo); int count = 0; for (auto iter : sinfo.chunkvec) { - uint64_t index = (sinfo.startoffset + count*fi.chunksize ) - / fi.chunksize; + uint64_t index = + (sinfo.startoffset + count * fi.chunksize) / fi.chunksize; mc->UpdateChunkInfoByIndex(index, iter); ++count; } @@ -339,17 +339,17 @@ class IOTrackerSplitorTest : public ::testing::Test { curvefsservice.SetOpenFile(fakeOpen_.get()); } - FileClient *fileClient_; + FileClient* fileClient_; UserInfo_t userinfo; std::shared_ptr mdsclient_; FileServiceOption fopt; - FileInstance *fileinstance_; + FileInstance* fileinstance_; brpc::Server server; FakeMDSCurveFSService curvefsservice; FakeTopologyService topologyservice; - FakeReturn *getsegmentfakeret; - FakeReturn *notallocatefakeret; - FakeReturn *getsegmentfakeretclone; + FakeReturn* getsegmentfakeret; + FakeReturn* notallocatefakeret; + FakeReturn* getsegmentfakeretclone; OpenFileResponse openResp_; std::unique_ptr fakeOpen_; @@ -376,7 +376,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -521,7 +521,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } ASSERT_EQ('a', data[0]); ASSERT_EQ('a', data[4 * 1024 - 1]); @@ -557,7 +557,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { { std::unique_lock lk(writemtx); - writecv.wait(lk, []()->bool{return iowriteflag;}); + writecv.wait(lk, []() -> bool { return iowriteflag; }); } std::unique_ptr writebuffer(new char[aioctx->length]); @@ -603,13 +603,11 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetSegmentFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get segment接口返回失败,底层task thread层会一直重试, - // 但是不会阻塞上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { - while (reqcount > 0) { - fileinstance_->AioWrite(aioctx); - reqcount--; + // When the 'get segment' interface on the MDS (Metadata Server) side is +reported as failed, the underlying task thread layer will keep retrying. + // However, this will not block the upper layer from continuing to send IO +requests downward. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount +> 0) { fileinstance_->AioWrite(aioctx); reqcount--; } }; @@ -636,15 +634,12 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { ioctxmana->SetRequestScheduler(mockschuler); ioctxmana->SetIOOpt(fopt.ioOpt); - // offset 10*1024*1024*1024ul 不在metacache里 - // client回去mds拿segment和serverlist - CurveAioContext* aioctx = new CurveAioContext; - aioctx->offset = 10*1024*1024*1024ul; - aioctx->length = chunk_size + 8 * 1024; - aioctx->ret = LIBCURVE_ERROR::OK; - aioctx->cb = writecallback; - aioctx->buf = new char[aioctx->length]; - aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; + // The offset offset 10*1024*1024*1024ul is not in the metacache. + // The client will request the segment and serverlist from the MDS (Metadata +Server). CurveAioContext* aioctx = new CurveAioContext; aioctx->offset = +10*1024*1024*1024ul; aioctx->length = chunk_size + 8 * 1024; aioctx->ret = +LIBCURVE_ERROR::OK; aioctx->cb = writecallback; aioctx->buf = new +char[aioctx->length]; aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; char* data = static_cast(aioctx->buf); @@ -652,10 +647,10 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get server list接口返回失败,底层task thread层会一直重试 - // 但是不会阻塞,上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { + // If the "get server list" interface on the MDS side is reported as a +failure, the underlying task thread layer will keep retrying. + // However, this won't block the process, and the upper layer will continue +sending IO requests downstream. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount > 0) { fileinstance_->AioWrite(aioctx); reqcount--; @@ -722,7 +717,7 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { process.join(); } - std::unique_ptr writebuffer(new char[length]); + std::unique_ptr writebuffer(new char[length]); memcpy(writebuffer.get(), writeData.to_string().c_str(), length); ASSERT_EQ('a', writebuffer[0]); @@ -768,8 +763,8 @@ TEST_F(IOTrackerSplitorTest, ExceptionTest_TEST) { auto threadfunc = [&]() { iotracker->SetUserDataType(UserDataType::RawBuffer); - iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), - &fi, nullptr); + iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), &fi, + nullptr); }; std::thread process(threadfunc); @@ -800,8 +795,7 @@ TEST_F(IOTrackerSplitorTest, BoundaryTEST) { // this offset and length will make splitor split fail. // we set disk size = 1G. - uint64_t offset = 1 * 1024 * 1024 * 1024 - - 4 * 1024 * 1024 - 4 *1024; + uint64_t offset = 1 * 1024 * 1024 * 1024 - 4 * 1024 * 1024 - 4 * 1024; uint64_t length = 4 * 1024 * 1024 + 8 * 1024; char* buf = new char[length]; @@ -828,11 +822,10 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { /** * this offset and length will make splitor split into two 8k IO. */ - uint64_t length = 2 * 64 * 1024; // 128KB + uint64_t length = 2 * 64 * 1024; // 128KB uint64_t offset = 4 * 1024 * 1024 - length; // 4MB - 128KB char* buf = new char[length]; - memset(buf, 'a', 64 * 1024); // 64KB memset(buf + 64 * 1024, 'b', 64 * 1024); // 64KB butil::IOBuf writeData; @@ -902,37 +895,33 @@ TEST_F(IOTrackerSplitorTest, InvalidParam) { mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - nullptr, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + nullptr, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, nullptr, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, nullptr, - &reqlist, cid, &iobuf, offset, length, 0)); + ASSERT_EQ( + -1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, nullptr, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ( - -1, curve::client::Splitor::IO2ChunkRequests( - iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, - &fi, nullptr)); + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, + &fi, nullptr)); ASSERT_EQ(0, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + iotracker, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, nullptr, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - nullptr, cid, &iobuf, offset, length, 0)); + iotracker, mc, nullptr, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, nullptr, offset, length, @@ -961,7 +950,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { MetaCache metaCache; FInfo_t fileInfo; - fileInfo.chunksize = 16 * 1024 * 1024; // 16M + fileInfo.chunksize = 16 * 1024 * 1024; // 16M fileInfo.filestatus = FileStatus::CloneMetaInstalled; CloneSourceInfo cloneSourceInfo; @@ -969,7 +958,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { cloneSourceInfo.length = 10ull * 1024 * 1024 * 1024; // 10GB cloneSourceInfo.segmentSize = 1ull * 1024 * 1024 * 1024; // 1GB - // 源卷只分配了第一个和最后一个segment + // The source volume has only allocated the first and last segments cloneSourceInfo.allocatedSegmentOffsets.insert(0); cloneSourceInfo.allocatedSegmentOffsets.insert(cloneSourceInfo.length - cloneSourceInfo.segmentSize); @@ -980,14 +969,14 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ChunkIndex chunkIdx = 0; RequestSourceInfo sourceInfo; - // 第一个chunk + // First chunk sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_TRUE(sourceInfo.IsValid()); ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 克隆卷最后一个chunk + // Clone the last chunk of the volume chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize - 1; LOG(INFO) << "clone length = " << fileInfo.sourceInfo.length << ", chunk size = " << fileInfo.chunksize @@ -1000,19 +989,19 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 10720641024); - // 源卷未分配segment - // 读取每个segment的第一个chunk + // Source volume unassigned segment + // Read the first chunk of each segment for (int i = 1; i < 9; ++i) { ChunkIndex chunkIdx = i * cloneSourceInfo.segmentSize / fileInfo.chunksize; - RequestSourceInfo sourceInfo = Splitor::CalcRequestSourceInfo( - &ioTracker, &metaCache, chunkIdx); + RequestSourceInfo sourceInfo = + Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); } - // 超过长度 + // Exceeding length chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize; sourceInfo = @@ -1021,7 +1010,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 源卷长度为0 + // Source volume length is 0 chunkIdx = 0; fileInfo.sourceInfo.length = 0; metaCache.UpdateFileInfo(fileInfo); @@ -1031,7 +1020,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 不是read/write请求 + // Not a read/write request chunkIdx = 1; ioTracker.SetOpType(OpType::READ_SNAP); sourceInfo = @@ -1045,7 +1034,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { chunkIdx = 0; - // 不是克隆卷 + // Not a clone volume sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); @@ -1068,7 +1057,7 @@ TEST_F(IOTrackerSplitorTest, stripeTest) { fi.segmentsize = 1 * 1024 * 1024 * 1024ul; fi.stripeUnit = 1 * 1024 * 1024; fi.stripeCount = 4; - memset(buf, 'a', length); // 64KB + memset(buf, 'a', length); // 64KB dataCopy.append(buf, length); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); MetaCache* mc = iomana->GetMetaCache(); @@ -1162,9 +1151,9 @@ TEST_F(IOTrackerSplitorTest, TestDisableStripeForStripeFile) { IOTracker ioTracker(iomanager, cache, &scheduler, nullptr, true); std::vector reqlist; - ASSERT_EQ(0, - Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, &dataCopy, - offset, length, mdsclient_.get(), &fi, nullptr)); + ASSERT_EQ(0, Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, + &dataCopy, offset, length, + mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(2, reqlist.size()); auto* first = reqlist[0]; @@ -1206,7 +1195,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegment) { } for (int i = 0; i < length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1233,11 +1222,11 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < aioctx.length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1303,7 +1292,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment2) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < 4 * 1024; i++) { @@ -1342,8 +1331,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1352,7 +1340,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { mc->UpdateChunkInfoByIndex(257, chunkIdInfo); FInfo_t fileInfo; - fileInfo.chunksize = 4 * 1024 * 1024; // 4M + fileInfo.chunksize = 4 * 1024 * 1024; // 4M fileInfo.fullPathName = "/1_userinfo_.txt"; fileInfo.owner = "userinfo"; fileInfo.filestatus = FileStatus::CloneMetaInstalled; @@ -1389,7 +1377,6 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { ASSERT_EQ('a', data[4 * 1024 + chunk_size]); ASSERT_EQ('a', data[length - 1]); - fileinstance2->UnInitialize(); delete fileinstance2; @@ -1398,8 +1385,8 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { curvefsservice.SetGetOrAllocateSegmentFakeReturn(notallocatefakeret); - curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone - (getsegmentfakeretclone); + curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone( + getsegmentfakeretclone); PrepareOpenFile(); MockRequestScheduler* mockschuler = new MockRequestScheduler; @@ -1420,8 +1407,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1460,7 +1446,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -1478,28 +1464,22 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, TimedCloseFd) { std::unordered_map fakeHandlers; fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/1"), + std::piecewise_construct, std::forward_as_tuple("/1"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, true)); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/2"), + std::piecewise_construct, std::forward_as_tuple("/2"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); FileInstance* instance = new FileInstance(); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/3"), + std::piecewise_construct, std::forward_as_tuple("/3"), std::forward_as_tuple( instance, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, - false)); + ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); SourceReader::GetInstance().SetReadHandlers(fakeHandlers); diff --git a/test/client/lease_executor_test.cpp b/test/client/lease_executor_test.cpp index 4f5629ad8b..e008abd8f6 100644 --- a/test/client/lease_executor_test.cpp +++ b/test/client/lease_executor_test.cpp @@ -16,17 +16,18 @@ /* * Project: curve - * File Created: 2019年11月20日 + * File Created: November 20, 2019 * Author: wuhanqing */ +#include "src/client/lease_executor.h" + +#include #include #include #include -#include #include "src/client/iomanager4file.h" -#include "src/client/lease_executor.h" #include "src/client/mds_client.h" #include "test/client/mock/mock_namespace_service.h" @@ -81,8 +82,8 @@ class LeaseExecutorTest : public ::testing::Test { response_.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(response_), - Invoke(MockRefreshSession))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(response_), Invoke(MockRefreshSession))); } protected: diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 3f582b8a3c..82fe048992 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -21,33 +21,32 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include #include #include -#include +#include #include // #define CBD_BACKEND_FAKE #include "include/client/libcbd.h" - -#include "src/client/libcurve_file.h" #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_file.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" using curve::client::EndPoint; -#define BUFSIZE 4 * 1024 -#define FILESIZE 10uL * 1024 * 1024 * 1024 -#define NEWSIZE 20uL * 1024 * 1024 * 1024 +#define BUFSIZE 4 * 1024 +#define FILESIZE 10uL * 1024 * 1024 * 1024 +#define NEWSIZE 20uL * 1024 * 1024 * 1024 -#define filename "1_userinfo_test.img" +#define filename "1_userinfo_test.img" const uint64_t GiB = 1024ull * 1024 * 1024; @@ -68,11 +67,11 @@ class TestLibcbdLibcurve : public ::testing::Test { public: void SetUp() { FLAGS_chunkserver_list = - "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; + "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; mds_ = new FakeMDS(filename); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9110, &ep); braft::PeerId pd(ep); @@ -381,7 +380,8 @@ TEST_F(TestLibcbdLibcurve, ReadAndCloseConcurrencyTest) { auto readThread = [buffer](int fd) { auto start = curve::common::TimeUtility::GetTimeofDayMs(); - ASSERT_EQ(BUFSIZE, cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT + ASSERT_EQ(BUFSIZE, + cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT auto end = curve::common::TimeUtility::GetTimeofDayMs(); ASSERT_LE(end - start, 1000); @@ -429,12 +429,12 @@ TEST_F(TestLibcbdLibcurve, IncreaseEpochTest) { ASSERT_EQ(ret, LIBCURVE_ERROR::OK); } -std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9951"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -445,17 +445,16 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("discard.discardTaskDelayMs=10") -}; + std::string("discard.discardTaskDelayMs=10")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 99d35696b4..8a0c7a4b90 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -24,6 +24,7 @@ #include #include #include + #include // NOLINT #include // NOLINT #include @@ -58,14 +59,14 @@ std::condition_variable writeinterfacecv; std::mutex interfacemtx; std::condition_variable interfacecv; -void writecallbacktest(CurveAioContext *context) { +void writecallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); writeflag = true; writeinterfacecv.notify_one(); LOG(INFO) << "aio call back here, errorcode = " << context->ret; } -void readcallbacktest(CurveAioContext *context) { +void readcallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); readflag = true; interfacecv.notify_one(); @@ -88,7 +89,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { memcpy(userinfo.owner, "userinfo", 9); memcpy(userinfo.password, "", 1); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -128,7 +129,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { ASSERT_NE(fd, -1); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -155,7 +156,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -244,7 +245,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -279,7 +280,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { fiu_enable("test/client/fake/fakeMDS.GetOrAllocateSegment", 1, nullptr, 0); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -303,7 +304,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; memset(readbuffer, 0xFF, 8 * 1024); CurveAioContext readaioctx; readaioctx.buf = readbuffer; @@ -375,7 +376,7 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { mdsclient_.Initialize(fopt.metaServerOpt); fileinstance_.Initialize("/test", &mdsclient_, userinfo, fopt); - // 设置leaderid + // set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -413,12 +414,11 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // 正常情况下只有第一次会去get leader + // Normally, getting the leader will only occur the first time. ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // metacache中被写过的copyset leadermaychange都处于正常状态 - ChunkIDInfo_t chunkinfo1; - MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); - ASSERT_EQ(rc, MetaCacheErrorType::OK); + // LeaderMayChange remains in a normal state for copyset leader that has +been written to in metacache. ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = +mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_) { @@ -430,17 +430,21 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { } } - // 设置chunkservice返回失败,那么mds每次重试都会去拉新的leader - // 127.0.0.1:9151:0,127.0.0.1:9152:0,127.0.0.1:9153:0是当前集群信息 - // 127.0.0.1:9151对应第一个chunkservice - // 设置rpc失败,会导致client将该chunkserverid上的leader copyset都标记为 + // If chunkservice returns failure, MDS will retry and fetch new leaders +each time. + // The current cluster information is: 127.0.0.1:9151:0, 127.0.0.1:9152:0, +127.0.0.1:9153:0. + // 127.0.0.1:9151 corresponds to the first chunkservice. + // An RPC failure causes the client to mark all leader copysets on that +chunkserver id as // leadermaychange chunkservice[0]->SetRPCFailed(); // -现在写第二个chunk,第二个chunk与第一个chunk不在同一个copyset里,这次读写失败 - ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 +Now, write to the second chunk; as it does not belong to the same copyset as the +first chunk, this read and write attempt fails. ASSERT_EQ(-2, +fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, +fileinstance_.Read(buffer, 1 * chunk_size, length)); + // Obtain chunkid information for the second chunk. ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -449,33 +453,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_ || i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // 这两个leader为该chunkserver的copyset的LeaderMayChange置位 - ASSERT_TRUE(ci.LeaderMayChange()); - } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // Set LeaderMayChange for both of these leaders of the +chunkserver's copysets. ASSERT_TRUE(ci.LeaderMayChange()); } else { + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } chunkservice[0]->ReSetRPCFailed(); - // 再次写第二个chunk,这时候获取leader成功后,会将LeaderMayChange置位fasle - // 第一个chunk对应的copyset依然LeaderMayChange为true - ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); + // Write to the second chunk again; after successfully obtaining a leader, +LeaderMayChange will be set to false. + // LeaderMayChange for the copyset corresponding to the first chunk remains +true. ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2. ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange保持原有状态 + // LeaderMayChange for copyset1 remains unchanged. ASSERT_TRUE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } @@ -485,33 +489,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { butil::str2endpoint("127.0.0.1", 9152, &ep2); PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rpc失败,迫使copyset切换leader,切换leader后读写成功 - chunkservice[0]->SetRPCFailed(); - // 读写第一个和第二个chunk + // Force an RPC failure to trigger copyset leader switch; successful read +and write after leader switch. chunkservice[0]->SetRPCFailed(); + // Read and write to the first and second chunks. ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // 这个时候 + // At this point for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2 ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange置位 + // Set LeaderMayChange for copyset1 ASSERT_FALSE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For the current copyset without leader information, directly set +LeaderMayChange ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } - // 验证copyset id信息更新 + // Verify the update of copyset ID information. // copyset id = 888, chunkserver id = 100 101 102 // copyset id = 999, chunkserver id = 102 103 104 CopysetPeerInfo csinfo1; @@ -568,8 +572,8 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { curve::client::CopysetPeerInfo peer9(103, addr); csinfo3.csinfos_.push_back(peer9); - // 更新copyset信息,chunkserver 104的信息被清除 - // 100,和 101上添加了新的copyset信息 + // Update copyset information, clearing the information for chunkserver 104. + // New copyset information has been added on chunk servers 100 and 101. mc->UpdateChunkserverCopysetInfo(FLAGS_logic_pool_id, csinfo3); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 888)); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 999)); @@ -596,7 +600,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { // open not create file ASSERT_EQ(-1 * LIBCURVE_ERROR::FAILED, Open(filename.c_str(), &userinfo)); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -610,7 +614,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(0, Init(configpath.c_str())); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 8 * 1024); CurveAioContext writeaioctx; @@ -623,7 +627,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(-LIBCURVE_ERROR::BAD_FD, AioWrite(1234, &writeaioctx)); // aioread not opened file - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -681,10 +685,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { LOG(INFO) << "here"; mdsclient_->Initialize(fopt.metaServerOpt); - fileinstance_.Initialize( - "/UnstableChunkserverTest", mdsclient_, userinfo, OpenFlags{}, fopt); + fileinstance_.Initialize("/UnstableChunkserverTest", mdsclient_, userinfo, + OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -699,14 +703,14 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - CliServiceFake *cliservice = mds.GetCliService(); - std::vector chunkservice = mds.GetFakeChunkService(); + CliServiceFake* cliservice = mds.GetCliService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -722,7 +726,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -740,19 +745,20 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.EnableNetUnstable(10000); - // 写2次,读2次,每次请求重试3次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // unstable阈值为10,所以第11次请求返回时,对应的chunkserver被标记为unstable - // leader在对应chunkserver上的copyset会设置leaderMayChange为true - // 下次发起请求时,会先去刷新leader信息, - // 由于leader没有发生改变,而且延迟仍然存在 - // 所以第12次请求仍然超时,leaderMayChange仍然为true + // Write twice, read twice, and retry three times per request + // Due to the delay set on the chunkserver side, each request will time out + // The unstable threshold is 10, so when the 11th request returns, the + // corresponding chunkserver is marked as unstable The copyset of the leader + // on the corresponding chunkserver will set leaderMayChange to true The + // next time a request is made, the leader information will be refreshed + // first, Since the leader has not changed and the delay still exists So the + // 12th request still timed out, and leaderMayChange is still true ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 + // Obtain chunkid information for the second chunk ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -769,9 +775,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { } } - // 当copyset处于unstable状态时 - // 不进入超时时间指数退避逻辑,rpc超时时间设置为默认值 - // 所以每个请求总时间为3s,4个请求需要12s + // When copyset is in an unstable state + // Do not enter the timeout index backoff logic, and set the rpc timeout to + // the default value So the total time for each request is 3 seconds, and 4 + // requests require 12 seconds auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); @@ -783,9 +790,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.DisableNetUnstable(); - // 取消延迟,再次读写第2个chunk - // 获取leader信息后,会将leaderMayChange置为false - // 第一个chunk对应的copyset依赖leaderMayChange为true + // Cancel delay and read and write the second chunk again + // After obtaining the leader information, the leaderMayChange will be set + // to false The copyset dependency for the first chunk, leaderMayChange, is + // true ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; ++i) { @@ -809,7 +817,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rcp返回失败,迫使copyset切换leader, 切换leader后读写成功 + // Failed to set rcp return, forcing copyset to switch leaders. After + // switching leaders, read and write succeeded chunkservice[0]->SetRPCFailed(); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); @@ -872,7 +881,7 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { fileinstance_.Initialize("/ResumeTimeoutBackoff", mdsclient_, userinfo, OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -887,13 +896,13 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - std::vector chunkservice = mds.GetFakeChunkService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -909,7 +918,8 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -927,17 +937,18 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { mds.EnableNetUnstable(10000); - // 写2次, 每次请求重试11次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // 第一个请求重试11次,会把chunkserver标记为unstable + // Write twice, retry 11 times per request + // Due to the delay set on the chunkserver side, each request will time out + // The first request will be retried 11 times and the chunkserver will be + // marked as unstable ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - // 第二个写请求,由于其对应的copyset leader may change - // 第1次请求超时时间为1s - // 后面4次重试由于leader may change所以超时时间也是1s - // 第5-11次请求由于重试次数超过minRetryTimesForceTimeoutBackoff - // 所以超时时间都进入指数退避,为8s * 6 = 48s - // 所以第二次写请求,总共耗时53s,并写入失败 + // The second write request, due to its corresponding copyset leader may + // change The first request timeout is 1 second The timeout for the next + // four retries is also 1 second due to the leader may change 5th to 11th + // requests due to more than minRetryTimesForceTimeoutBackoff retries So all + // timeout times enter exponential backoff, which is 8s * 6 = 48s So the + // second write request took a total of 53 seconds and failed to write auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); auto elapsedMs = TimeUtility::GetTimeofDayMs() - start; @@ -961,7 +972,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { uint64_t size = 100 * 1024 * 1024 * 1024ul; FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -975,12 +986,12 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(0, fc.Init(configpath)); - FakeMDSCurveFSService *service = NULL; + FakeMDSCurveFSService* service = NULL; service = mds.GetMDSService(); ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); CreateFileContext context; context.pagefile = true; @@ -991,7 +1002,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, ret); response.set_statuscode(::curve::mds::StatusCode::kFileExists); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); context.pagefile = true; context.name = filename2; @@ -1003,7 +1014,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::EXISTS, -ret); FileStatInfo_t fsinfo; - ::curve::mds::FileInfo *info = new curve::mds::FileInfo; + ::curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename(filename2); info->set_id(1); @@ -1017,8 +1028,8 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { info->set_stripecount(4); getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); service->SetGetFileInfoFakeReturn(fakegetinfo); ret = fc.StatFile(filename2, userinfo, &fsinfo); ASSERT_EQ(1024 * 1024, fsinfo.stripeUnit); diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index e95912f610..c466457d99 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -20,41 +20,41 @@ * Author: tongguangxun */ -#include #include #include +#include +#include +#include //NOLINT #include -#include //NOLINT -#include //NOLINT +#include //NOLINT #include -#include +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" namespace curve { namespace client { -// 测试mds failover切换状态机 +// Testing mds failover switching state machine TEST(MDSChangeTest, MDSFailoverTest) { RPCExcutorRetryPolicy rpcexcutor; - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); @@ -70,12 +70,15 @@ TEST(MDSChangeTest, MDSFailoverTest) { int mds1RetryTimes = 0; int mds2RetryTimes = 0; - // 场景1: mds0、1、2, currentworkindex = 0, mds0, mds1, mds2都宕机, - // 发到其rpc都以EHOSTDOWN返回,导致上层client会一直切换mds重试 - // 按照0-->1-->2持续进行 - // 每次rpc返回-EHOSTDOWN,会直接触发RPC切换。最终currentworkindex没有切换 + // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all + // down, + // All RPCs sent to them are returned as EHOSTDOWN, resulting in + // upper level clients constantly switching to mds and retrying + // Continue according to 0-->1-->2 + // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC + // switching. The final currentworkindex did not switch auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; } @@ -91,12 +94,13 @@ TEST(MDSChangeTest, MDSFailoverTest) { }; uint64_t startMS = TimeUtility::GetTimeofDayMs(); - // 控制面接口调用, 1000为本次rpc的重试总时间 + // Control surface interface call, 1000 is the total retry time of this RPC rpcexcutor.DoRPCTask(task1, 1000); uint64_t endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 1000 - 1); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); @@ -106,16 +110,18 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 场景2:mds0、1、2, currentworkindex = 0, mds0宕机,并且这时候将正在工作的 - // mds索引切换到index2,预期client在index=0重试之后会直接切换到index 2 - // mds2这这时候直接返回OK,rpc停止重试。 - // 预期client总共发送两次rpc,一次发送到mds0,另一次发送到mds2,跳过中间的 - // mds1。 + // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will + // be working at this time + // Mds index switches to index2, and it is expected that the client + // will directly switch to index2 after retrying with index = 0 At + // this point, mds2 directly returns OK and rpc stops trying again. + // Expected client to send a total of two RPCs, one to mds0 and the + // other to mds2, skipping the middle mds1。 mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; rpcexcutor.SetCurrentWorkIndex(2); @@ -129,7 +135,8 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 2) { mds2RetryTimes++; - // 本次返回ok,那么RPC应该成功了,不会再重试 + // If OK is returned this time, then RPC should have succeeded and + // will not try again return LIBCURVE_ERROR::OK; } @@ -144,16 +151,17 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(mds1RetryTimes, 0); ASSERT_EQ(mds2RetryTimes, 1); - // 场景3:mds0、1、2,currentworkindex = 1,且mds1宕机了, - // 这时候会切换到mds0和mds2 - // 在切换到2之后,mds1又恢复了,这时候切换到mds1,然后rpc发送成功。 - // 这时候的切换顺序为1->2->0, 1->2->0, 1。 + // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, + // At this point, it will switch to mds0 and mds2 + // After switching to 2, mds1 resumed, and then switched to mds1, and + // the rpc was successfully sent. At this point, the switching order is + // 1->2->0, 1->2->0, 1. mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; rpcexcutor.SetCurrentWorkIndex(1); auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; return -ECONNRESET; @@ -161,7 +169,8 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 1) { mds1RetryTimes++; - // 当在mds1上重试到第三次的时候向上返回成功,停止重试 + // When retrying on mds1 for the third time, success is returned + // upwards and the retry is stopped if (mds1RetryTimes == 3) { return LIBCURVE_ERROR::OK; } @@ -186,22 +195,24 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); - // 场景4:mds0、1、2, currentWorkindex = 0, 但是发往mds1的rpc请求一直超时 - // 最后rpc返回结果是超时. - // 对于超时的mds节点会连续重试mds.maxFailedTimesBeforeChangeMDS后切换 - // 当前mds.maxFailedTimesBeforeChangeMDS=2。 - // 所以重试逻辑应该是:0->0->1->2, 0->0->1->2, 0->0->1->2, ... + // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 + // consistently times out + // The final result returned by rpc is timeout + // For timeout mds nodes, they will continuously retry + // mds.maxFailedTimesBeforeChangeMDS and switch Current + // mds.maxFailedTimesBeforeChangeMDS=2. + // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, + // ... LOG(INFO) << "case 4"; mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; rpcexcutor.SetCurrentWorkIndex(0); auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; - return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT - : -ETIMEDOUT; + return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT : -ETIMEDOUT; } if (mdsindex == 1) { @@ -222,17 +233,17 @@ TEST(MDSChangeTest, MDSFailoverTest) { endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); - // 场景5:mds0、1、2,currentWorkIndex = 0 - // 但是rpc请求前10次全部返回EHOSTDOWN - // mds重试睡眠10ms,所以总共耗时100ms时间 + // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 + // But the first 10 requests from rpc all returned EHOSTDOWN + // Mds retries sleep for 10ms, so it takes a total of 100ms rpcexcutor.SetCurrentWorkIndex(0); int hostDownTimes = 10; auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { + brpc::Channel* channel, brpc::Controller* cntl) { static int count = 0; if (++count <= hostDownTimes) { return -EHOSTDOWN; @@ -241,27 +252,28 @@ TEST(MDSChangeTest, MDSFailoverTest) { return 0; }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task5, 10000); // 总重试时间10s + rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 100); - // 场景6: mds在重试过程中一直返回EHOSTDOWN,总共重试5s + // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with + // a total of 5 retries rpcexcutor.SetCurrentWorkIndex(0); int calledTimes = 0; auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { + brpc::Channel* channel, brpc::Controller* cntl) { ++calledTimes; return -EHOSTDOWN; }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task6, 5 * 1000); // 总重试时间5s + rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 5 * 1000 - 1); - // 每次hostdown情况下,睡眠10ms,总重试时间5s,所以总共重试次数小于等于500次 - // 为了尽量减少误判,所以加入10次冗余 + // In each hostdown situation, sleep for 10ms and the total retry time is + // 5s, so the total number of retries is less than or equal to 500 times In + // order to minimize false positives, 10 redundant attempts were added LOG(INFO) << "called times " << calledTimes; ASSERT_LE(calledTimes, 510); } @@ -269,7 +281,7 @@ TEST(MDSChangeTest, MDSFailoverTest) { } // namespace client } // namespace curve -const std::vector registConfOff { +const std::vector registConfOff{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("rpcRetryTimes=3"), std::string("global.logPath=./runlog/"), @@ -281,10 +293,9 @@ const std::vector registConfOff { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=false") -}; + std::string("mds.registerToMDS=false")}; -const std::vector registConfON { +const std::vector registConfON{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("global.logPath=./runlog/"), std::string("synchronizeRPCTimeoutMS=500"), @@ -297,14 +308,14 @@ const std::vector registConfON { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=true") -}; - -std::string mdsMetaServerAddr = "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT -int main(int argc, char ** argv) { + std::string("mds.registerToMDS=true")}; + +std::string mdsMetaServerAddr = + "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/test/client/mock/mock_chunkservice.h b/test/client/mock/mock_chunkservice.h index 3891ce60bf..134f404a85 100644 --- a/test/client/mock/mock_chunkservice.h +++ b/test/client/mock/mock_chunkservice.h @@ -25,8 +25,8 @@ #include #include -#include #include +#include #include @@ -39,48 +39,48 @@ namespace client { using ::testing::_; using ::testing::Invoke; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; -/* 当前仅仅模拟单 chunk read/write */ +/*Currently, only single chunk read/write is simulated*/ class FakeChunkServiceImpl : public ChunkService { public: virtual ~FakeChunkServiceImpl() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); chunkIds_.insert(request->chunkid()); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); ::memcpy(chunk_ + request->offset(), cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunkSnapshot(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunkSnapshot(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); @@ -88,113 +88,114 @@ class FakeChunkServiceImpl : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); LOG(INFO) << "delete chunk snapshot: " << request->chunkid(); if (chunkIds_.find(request->chunkid()) == chunkIds_.end()) { - response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT - LOG(INFO) << "delete chunk snapshot: " - << request->chunkid() << " not exist"; + response->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT + LOG(INFO) << "delete chunk snapshot: " << request->chunkid() + << " not exist"; return; } chunkIds_.erase(request->chunkid()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->add_chunksn(1); response->add_chunksn(2); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void CreateCloneChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void CreateCloneChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void RecoverChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void RecoverChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } private: std::set chunkIds_; - /* 由于 bthread 栈空间的限制,这里不会开很大的空间,如果测试需要更大的空间 - * 请在堆上申请 */ + /* Due to the limitations of the bthread stack space, there will not be a + * large amount of space opened here. If testing requires more space Please + * apply on the pile*/ char chunk_[4096] = {0}; }; class MockChunkServiceImpl : public ChunkService { public: - MOCK_METHOD4(WriteChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunkSnapshot, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, void( - ::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(GetChunkInfo, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(WriteChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunkSnapshot, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(GetChunkInfo, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done)); MOCK_METHOD4(CreateCloneChunk, void(::google::protobuf::RpcController* controller, const ::curve::chunkserver::ChunkRequest* request, ::curve::chunkserver::ChunkResponse* response, google::protobuf::Closure* done)); - MOCK_METHOD4(RecoverChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(UpdateEpoch, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::UpdateEpochRequest *request, - ::curve::chunkserver::UpdateEpochResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(RecoverChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(UpdateEpoch, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::UpdateEpochRequest* request, + ::curve::chunkserver::UpdateEpochResponse* response, + google::protobuf::Closure* done)); void DelegateToFake() { ON_CALL(*this, WriteChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::WriteChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::WriteChunk)); ON_CALL(*this, ReadChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::ReadChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::ReadChunk)); } private: FakeChunkServiceImpl fakeChunkService; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // TEST_CLIENT_MOCK_MOCK_CHUNKSERVICE_H_ diff --git a/test/client/request_scheduler_test.cpp b/test/client/request_scheduler_test.cpp index 9ff0636530..bf75580957 100644 --- a/test/client/request_scheduler_test.cpp +++ b/test/client/request_scheduler_test.cpp @@ -20,18 +20,19 @@ * Author: wudemiao */ -#include -#include -#include +#include "src/client/request_scheduler.h" + #include +#include #include +#include +#include -#include "src/client/request_scheduler.h" #include "src/client/client_common.h" -#include "test/client/mock/mock_meta_cache.h" +#include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" #include "test/client/mock/mock_request_context.h" -#include "src/common/concurrent/count_down_event.h" namespace curve { namespace client { @@ -49,8 +50,9 @@ TEST(RequestSchedulerTest, fake_server_test) { brpc::Server server; std::string listenAddr = "127.0.0.1:9109"; FakeChunkServiceImpl fakeChunkService; - ASSERT_EQ(server.AddService(&fakeChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&fakeChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(server.Start(listenAddr.c_str(), &option), 0); @@ -94,7 +96,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* error request schedule test when scheduler not run */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -102,17 +104,17 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(-1, requestScheduler.ScheduleRequest(reqCtxs)); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -120,7 +122,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -147,7 +149,7 @@ TEST(RequestSchedulerTest, fake_server_test) { const uint64_t len1 = 16; /* write should with attachment size */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -157,18 +159,18 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -177,12 +179,12 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -190,11 +192,10 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; ::memset(writebuff1, 'a', 8); ::memset(writebuff1 + 8, '\0', 8); @@ -203,34 +204,33 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -258,9 +258,9 @@ TEST(RequestSchedulerTest, fake_server_test) { } // read snapshot - // 1. 先 write snapshot + // 1. Write snapshot first { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -272,35 +272,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } - // 2. 再 read snapshot 验证一遍 + // 2. Verify with read snapshot again { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -309,47 +308,45 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 3. 在 delete snapshot + // 3. In delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 4. 重复 delete snapshot + // 4. Repeat delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -357,22 +354,22 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试 get chunk info + // Test get chunk info { ChunkInfoDetail chunkInfo; - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->chunkinfodetail_ = &chunkInfo; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -383,9 +380,9 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试createClonechunk + // Test createClonechunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -395,36 +392,35 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->location_ = "destination"; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 测试recoverChunk + // Testing recoverChunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::RECOVER_CHUNK; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -434,7 +430,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* read/write chunk test */ const int kMaxLoop = 100; for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -444,35 +440,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -482,34 +477,33 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::UNKNOWN; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(-1, reqDone->GetErrorCode()); } - /* 2. 并发测试 */ + /* 2. Concurrent testing */ curve::common::CountDownEvent cond(4 * kMaxLoop); auto func = [&]() { for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -518,7 +512,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -538,18 +532,17 @@ TEST(RequestSchedulerTest, fake_server_test) { cond.Wait(); for (int i = 0; i < kMaxLoop; i += 1) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, 1000, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -578,11 +571,11 @@ TEST(RequestSchedulerTest, CommonTest) { MetaCache metaCache; FileMetric fm("test"); - // scheduleQueueCapacity 设置为 0 + // scheduleQueueCapacity set to 0 opt.scheduleQueueCapacity = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); - // threadpoolsize 设置为 0 + // threadpoolsize set to 0 opt.scheduleQueueCapacity = 4096; opt.scheduleThreadpoolSize = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); @@ -597,5 +590,5 @@ TEST(RequestSchedulerTest, CommonTest) { ASSERT_EQ(0, sche.Fini()); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/request_sender_test.cpp b/test/client/request_sender_test.cpp index 92882bac79..c453fd2468 100644 --- a/test/client/request_sender_test.cpp +++ b/test/client/request_sender_test.cpp @@ -20,11 +20,12 @@ * Author: wudemiao */ +#include "src/client/request_sender.h" + #include #include #include "src/client/client_common.h" -#include "src/client/request_sender.h" #include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" @@ -54,9 +55,7 @@ class FakeChunkClosure : public ClientClosure { SetClosure(&reqeustClosure); } - void Run() override { - event->Signal(); - } + void Run() override { event->Signal(); } void SendRetryRequest() override {} @@ -96,7 +95,7 @@ class RequestSenderTest : public ::testing::Test { }; TEST_F(RequestSenderTest, BasicTest) { - // 非法的 port + // Illegal port std::string leaderStr = "127.0.0.1:65539"; butil::EndPoint leaderAddr; ChunkServerID leaderId = 1; @@ -126,8 +125,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { FakeChunkClosure closure(&event); sourceInfo.cloneFileSource.clear(); - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_FALSE(chunkRequest.has_clonefilesource()); @@ -148,8 +147,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { sourceInfo.cloneFileOffset = 0; sourceInfo.valid = true; - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_TRUE(chunkRequest.has_clonefilesource()); diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..2bfbed38ca 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -20,10 +20,10 @@ * Author: yangyaokai */ -#include - #include "src/common/bitmap.h" +#include + namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test Comparison Operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set ranges at the beginning and end, and clear ranges + // in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +302,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear ranges at the beginning and end, and set ranges + // in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +321,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +342,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +363,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..d573142cf0 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -20,30 +20,30 @@ * Author: charisu */ -#include - #include "src/common/channel_pool.h" +#include + namespace curve { namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/common/configuration_test.cpp b/test/common/configuration_test.cpp index 9dc770bcc8..d51c2c84f4 100644 --- a/test/common/configuration_test.cpp +++ b/test/common/configuration_test.cpp @@ -21,17 +21,17 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "src/common/configuration.h" + #include +#include -#include -#include #include +#include #include +#include #include -#include "src/common/configuration.h" - namespace curve { namespace common { @@ -87,9 +87,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -129,52 +127,54 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } -// 覆盖原有配置 +// Overwrite the original configuration TEST_F(ConfigurationTest, SaveConfig) { bool ret; Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } -// 读取当前配置写到其他路径 +// Read the current configuration and write to another path TEST_F(ConfigurationTest, SaveConfigToFileNotExist) { bool ret; - // 加载当前配置 + // Load current configuration Configuration conf; conf.SetConfigPath(confFile_); ret = conf.LoadConfig(); ASSERT_EQ(ret, true); - // 写配置到其他位置 + // Write configuration to another location std::string newFile("curve.conf.test2"); conf.SetConfigPath(newFile); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 从新配置文件加载,并读取某项配置来进行校验 + // Load from a new configuration file and read a certain configuration for + // verification Configuration newConf; newConf.SetConfigPath(newFile); ret = newConf.LoadConfig(); @@ -337,11 +337,11 @@ TEST_F(ConfigurationTest, TestMetric) { "{\"conf_name\":\"key1\",\"conf_value\":\"123\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key2").c_str(), "{\"conf_name\":\"key2\",\"conf_value\":\"1.230000\"}"); - // 还未设置时,返回空 + // When not yet set, return empty ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key3").c_str(), ""); - // 支持自动更新metric + // Support for automatic updating of metrics conf.SetIntValue("key1", 234); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key1").c_str(), "{\"conf_name\":\"key1\",\"conf_value\":\"234\"}"); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index 8bdc5c9681..41633c6425 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -20,13 +20,13 @@ * Author: wudemiao */ +#include "src/common/concurrent/count_down_event.h" + #include -#include //NOLINT #include -#include //NOLINT - -#include "src/common/concurrent/count_down_event.h" +#include //NOLINT +#include //NOLINT namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(CountDownEventTest, basic) { }; std::thread t1(func); - std::this_thread::sleep_for(std::chrono::milliseconds(3*sleepMs)); + std::this_thread::sleep_for(std::chrono::milliseconds(3 * sleepMs)); ASSERT_TRUE(isRun.load()); t1.join(); @@ -89,8 +89,7 @@ TEST(CountDownEventTest, basic) { cond.WaitFor(1000); } - - /* 1. initCnt==Signal次数 */ + /* 1. InitCnt==Signal count */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); @@ -111,13 +110,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 2. initCnt signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 20; - const int kInitCnt = kEventNum - 10; + const int kInitCnt = kEventNum - 10; CountDownEvent cond(kInitCnt); auto func = [&] { for (int i = 0; i < kEventNum; ++i) { @@ -128,7 +127,7 @@ TEST(CountDownEventTest, basic) { std::thread t1(func); - /* 等到Signal次数>initCnt */ + /* Wait until SignalCount>initCnt */ while (true) { ::usleep(5); if (signalCount.load(std::memory_order_acquire) > kInitCnt) { @@ -141,13 +140,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 3. initCnt>Signal次数 */ + /* 3. initCnt>SignalCount */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 10; - /* kSignalEvent1 + kSignalEvent2等于kEventNum */ + /* kSignalEvent1 + kSignalEvent2 = kEventNum */ const int kSignalEvent1 = kEventNum - 5; const int kSignalEvent2 = 5; CountDownEvent cond(kEventNum); @@ -167,7 +166,8 @@ TEST(CountDownEventTest, basic) { }; std::thread waitThread(waitFunc); - /* 由于t1 唤醒的次数不够,所以waitThread会阻塞在wait那里 */ + /* Due to insufficient wake-up times for t1, waitThread will block at + * the wait location */ ASSERT_EQ(false, passWait.load(std::memory_order_acquire)); auto func2 = [&] { @@ -176,7 +176,7 @@ TEST(CountDownEventTest, basic) { cond.Signal(); } }; - /* 运行t2,补上不够的唤醒次数 */ + /* Run t2 to make up for insufficient wake-up times */ std::thread t2(func2); t1.join(); @@ -203,8 +203,9 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件未到达,超时返回,可以容许在一定的误差 - ASSERT_GT(static_cast(elpased.count()), waitForMs-1000); + // The event did not arrive and returned after a timeout, allowing for a + // certain error + ASSERT_GT(static_cast(elpased.count()), waitForMs - 1000); t1.join(); } @@ -226,7 +227,7 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件达到,提前返回 + // Event reached, return early ASSERT_GT(waitForMs, static_cast(elpased.count())); t1.join(); diff --git a/test/common/lru_cache_test.cpp b/test/common/lru_cache_test.cpp index a5e9d65e19..773d42e153 100644 --- a/test/common/lru_cache_test.cpp +++ b/test/common/lru_cache_test.cpp @@ -20,11 +20,13 @@ * Author: xuchaojie,lixiaocui */ -#include +#include "src/common/lru_cache.h" + #include +#include + #include -#include "src/common/lru_cache.h" #include "src/common/timeutility.h" namespace curve { @@ -33,26 +35,26 @@ namespace common { TEST(TestCacheMetrics, testall) { CacheMetrics cacheMetrics("LRUCache"); - // 1. 新增数据项 + // 1. Add Data Item cacheMetrics.UpdateAddToCacheCount(); ASSERT_EQ(1, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateAddToCacheBytes(1000); ASSERT_EQ(1000, cacheMetrics.cacheBytes.get_value()); - // 2. 移除数据项 + // 2. Remove Data Item cacheMetrics.UpdateRemoveFromCacheCount(); ASSERT_EQ(0, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateRemoveFromCacheBytes(200); ASSERT_EQ(800, cacheMetrics.cacheBytes.get_value()); - // 3. cache命中 + // 3. cache hit ASSERT_EQ(0, cacheMetrics.cacheHit.get_value()); cacheMetrics.OnCacheHit(); ASSERT_EQ(1, cacheMetrics.cacheHit.get_value()); - // 4. cache未命中 + // 4. cache Misses ASSERT_EQ(0, cacheMetrics.cacheMiss.get_value()); cacheMetrics.OnCacheMiss(); ASSERT_EQ(1, cacheMetrics.cacheMiss.get_value()); @@ -60,10 +62,10 @@ TEST(TestCacheMetrics, testall) { TEST(CaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get uint64_t cacheSize = 0; for (int i = 1; i <= maxCount + 1; i++) { std::string eliminated; @@ -74,8 +76,8 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { } else { cacheSize += std::to_string(i).size() * 2 - std::to_string(1).size() * 2; - ASSERT_EQ( - cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); + ASSERT_EQ(cacheSize, + cache->GetCacheMetrics()->cacheBytes.get_value()); } std::string res; @@ -83,7 +85,7 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 第一个元素被剔出 + // 2. The first element is removed std::string res; ASSERT_FALSE(cache->Get(std::to_string(1), &res)); for (int i = 2; i <= maxCount + 1; i++) { @@ -91,17 +93,17 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->Get("2", &res)); cacheSize -= std::to_string(2).size() * 2; ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); ASSERT_EQ(cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); - // 4. 重复put + // 4. Repeat put std::string eliminated; cache->Put("4", "hello", &eliminated); ASSERT_TRUE(cache->Get("4", &res)); @@ -116,7 +118,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -125,7 +127,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->Get("1", &res)); } @@ -192,9 +194,7 @@ TEST(CaCheTest, TestCacheGetLastKV) { ASSERT_EQ(1, k); ASSERT_EQ(1, v); } -bool TestFunction(const int& a) { - return a > 1; -} +bool TestFunction(const int& a) { return a > 1; } TEST(CaCheTest, TestCacheGetLastKVWithFunction) { auto cache = std::make_shared>( std::make_shared("LruCache")); @@ -228,10 +228,10 @@ TEST(SglCaCheTest, TestGetBefore) { TEST(SglCaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached uint64_t cacheSize = 0; for (int i = 1; i <= maxCount; i++) { cache->Put(std::to_string(i)); @@ -240,19 +240,19 @@ TEST(SglCaCheTest, test_cache_with_capacity_limit) { ASSERT_TRUE(cache->IsCached(std::to_string(i))); } - // 2. 第一个元素被剔出 + // 2. The first element is removed cache->Put(std::to_string(11)); ASSERT_FALSE(cache->IsCached(std::to_string(1))); - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->IsCached("2")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); - // 4. 重复put + // 4. Repeat put cache->Put("4"); ASSERT_TRUE(cache->IsCached("4")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); @@ -262,7 +262,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -271,7 +271,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { ASSERT_FALSE(cache->IsCached(std::to_string(100))); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->IsCached("1")); } @@ -315,7 +315,7 @@ TEST(TimedCaCheTest, test_base) { ASSERT_EQ(i, cache->GetCacheMetrics()->cacheCount.get_value()); } else { ASSERT_EQ(maxCount, - cache->GetCacheMetrics()->cacheCount.get_value()); + cache->GetCacheMetrics()->cacheCount.get_value()); } std::string res; ASSERT_TRUE(cache->Get(std::to_string(i), &res)); @@ -355,5 +355,3 @@ TEST(TimedCaCheTest, test_timeout) { } // namespace common } // namespace curve - - diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..fcb7791d54 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ +#include "src/common/concurrent/task_thread_pool.h" + #include -#include #include +#include #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace common { using curve::common::CountDownEvent; -void TestAdd1(int a, double b, CountDownEvent *cond) { +void TestAdd1(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); } -int TestAdd2(int a, double b, CountDownEvent *cond) { +int TestAdd2(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); @@ -47,7 +48,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /*Test thread pool start input parameter*/ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +75,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /*Test not set, at this time it is INT_ MAX*/ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +93,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /*TestAdd2 is a function with a return value*/ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +101,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /*Basic task testing*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +134,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /*Wait for all tasks to complete execution*/ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /*The test queue is full, push will block*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -157,8 +158,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond4(1); CountDownEvent startRunCond4(1); - auto waitTask = [&](CountDownEvent* sigCond, - CountDownEvent* waitCond) { + auto waitTask = [&](CountDownEvent* sigCond, CountDownEvent* waitCond) { sigCond->Signal(); waitCond->Wait(); runTaskCount.fetch_add(1, std::memory_order_acq_rel); @@ -169,12 +169,13 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /*Stuck all processing threads in the thread pool*/ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /*Wait for waitTask1, waitTask2, waitTask3, and waitTask4 to start + * running*/ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +187,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /*Record the number of tasks from thread push to thread pool queue*/ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +209,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /*Waiting for thread pool queue to be pushed full*/ int pushTaskCount; while (true) { ::usleep(50); @@ -222,32 +223,33 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /*The tasks that were pushed in were not executed*/ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + *At this point, the thread pool queue must be full of push, and the + *push After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /*Wake up all threads in the thread pool*/ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /*Wait for all task executions to complete*/ while (true) { ::usleep(10); - if (runTaskCount.load(std::memory_order_acquire) - >= 4 + 3 * kMaxLoop) { + if (runTaskCount.load(std::memory_order_acquire) >= + 4 + 3 * kMaxLoop) { break; } } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + *Wait for all push threads to exit so that the pushThreadCount count is + *updated */ pushThreadCond.Wait(); diff --git a/test/common/test_name_lock.cpp b/test/common/test_name_lock.cpp index e5520e0a1a..074dd885ce 100644 --- a/test/common/test_name_lock.cpp +++ b/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include "src/common/concurrent/name_lock.h" @@ -31,29 +32,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Rame lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Rame lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Rame lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -63,12 +62,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Ruccessfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -79,14 +79,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -94,12 +94,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = Thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve diff --git a/test/failpoint/failpoint_test.cpp b/test/failpoint/failpoint_test.cpp index f0096b0ea4..c77f3b6e52 100644 --- a/test/failpoint/failpoint_test.cpp +++ b/test/failpoint/failpoint_test.cpp @@ -19,56 +19,56 @@ * Created Date: Monday May 13th 2019 * Author: hzsunjianliang */ -#include -#include #include +#include +#include + #include "test/failpoint/fiu_local.h" /* - * libfiu 使用文档详见:https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html - * 分为2个部分,一部分是core API,包括fiu_do_on/fiu_return_on/fiu_init - * core API 用于作用与注入在业务代码处,并由外部control API控制触发。 - * control API 包括:fiu_enable\fiu_disable\fiu_enable_random等等 - * 用于在测试代码处用户进行错误的注入,具体使用方式和方法如下示例代码所示 + * For detailed documentation on how to use libfiu, please refer to: + * https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html Libfiu is divided into + * two parts: the core API, which includes functions like + * fiu_do_on/fiu_return_on/fiu_init. The core API is used to inject faults into + * your business code and is controlled externally using the control API. The + * control API includes functions like fiu_enable, fiu_disable, + * fiu_enable_random, and more. These functions are used in your test code to + * inject errors. You can find specific usage examples and methods in the code + * snippets below. */ namespace curve { namespace failpint { -class FailPointTest: public ::testing::Test { +class FailPointTest : public ::testing::Test { protected: - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } }; -// 注入方式: 通过返回值的方式进行注入 +// Injection method: Inject by returning a value size_t free_space() { - fiu_return_on("no_free_space", 0); - return 100; + fiu_return_on("no_free_space", 0); + return 100; } -// 注入方式: 通过side_effet 进行注入 -void modify_state(int *val) { +// Injection method: through side_effet injection +void modify_state(int* val) { *val += 1; fiu_do_on("side_effect", *val += 1); return; } -// 注入方式: 通过side_effet 进行注入(lambda方式) -void modify_state_with_lamda(int &val) { //NOLINT - fiu_do_on("side_effect_2", - auto func = [&] () { - val++; - }; - func();); +// Injection method: through side_effet injection (lambda method) +void modify_state_with_lamda(int& val) { // NOLINT + fiu_do_on( + "side_effect_2", auto func = [&]() { val++; }; func();); return; } -// 错误触发方式: 总是触发 +// Error triggering method: always triggered TEST_F(FailPointTest, alwaysfail) { if (fiu_enable("no_free_space", 1, NULL, 0) == 0) { ASSERT_EQ(free_space(), 0); @@ -80,7 +80,7 @@ TEST_F(FailPointTest, alwaysfail) { ASSERT_EQ(free_space(), 100); } -// 错误触发方式: 随机触发错误 +// Error triggering method: Random error triggering TEST_F(FailPointTest, nondeterministic) { if (fiu_enable_random("no_free_space", 1, NULL, 0, 1) == 0) { ASSERT_EQ(free_space(), 0); @@ -144,6 +144,5 @@ TEST_F(FailPointTest, WildZard) { } } - } // namespace failpint } // namespace curve diff --git a/test/fs/ext4_filesystem_test.cpp b/test/fs/ext4_filesystem_test.cpp index f2c6cfa520..65540555c5 100644 --- a/test/fs/ext4_filesystem_test.cpp +++ b/test/fs/ext4_filesystem_test.cpp @@ -21,34 +21,34 @@ */ #include -#include #include -#include -#include +#include #include +#include +#include + #include -#include "test/fs/mock_posix_wrapper.h" #include "src/fs/ext4_filesystem_impl.h" +#include "test/fs/mock_posix_wrapper.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; +using ::testing::StrEq; namespace curve { namespace fs { -ACTION_TEMPLATE(SetVoidArgPointee, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArgPointee, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(first)) { auto output = reinterpret_cast(::testing::get(args)); *output = first; @@ -56,18 +56,18 @@ ACTION_TEMPLATE(SetVoidArgPointee, class Ext4LocalFileSystemTest : public testing::Test { public: - void SetUp() { - wrapper = std::make_shared(); - lfs = Ext4FileSystemImpl::getInstance(); - lfs->SetPosixWrapper(wrapper); - errno = 1234; - } + void SetUp() { + wrapper = std::make_shared(); + lfs = Ext4FileSystemImpl::getInstance(); + lfs->SetPosixWrapper(wrapper); + errno = 1234; + } - void TearDown() { - errno = 0; - // allows the destructor of lfs_ to be invoked correctly - Mock::VerifyAndClear(wrapper.get()); - } + void TearDown() { + errno = 0; + // allows the destructor of lfs_ to be invoked correctly + Mock::VerifyAndClear(wrapper.get()); + } protected: std::shared_ptr wrapper; @@ -79,99 +79,70 @@ TEST_F(Ext4LocalFileSystemTest, InitTest) { option.enableRenameat2 = true; struct utsname kernel_info; - // 测试版本偏低的情况 - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "2.16.0"); + // Testing with a lower version + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "2.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.19-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.16.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "4.16.0"); + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "4.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); } // test Statfs TEST_F(Ext4LocalFileSystemTest, StatfsTest) { FileSystemInfo fsinfo; - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), 0); - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), -errno); } // test Open TEST_F(Ext4LocalFileSystemTest, OpenTest) { - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(666)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(666)); ASSERT_EQ(lfs->Open("/a", 0), 666); - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Open("/a", 0), -errno); } // test Close TEST_F(Ext4LocalFileSystemTest, CloseTest) { - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Close(666), 0); - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Close(666), -errno); } @@ -185,32 +156,26 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { fileInfo.st_mode = S_IFREG; // /a is a file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // /b is a dir EXPECT_CALL(*wrapper, stat(StrEq("/b"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), Return(0))); // /b/1 is a file EXPECT_CALL(*wrapper, stat(StrEq("/b/1"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); DIR* dirp = reinterpret_cast(0x01); struct dirent entryArray[1]; memset(entryArray, 0, sizeof(entryArray)); memcpy(entryArray[0].d_name, "1", 1); - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(2) .WillOnce(Return(entryArray)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillRepeatedly(Return(0)); } // test delete dir @@ -219,8 +184,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { ASSERT_EQ(lfs->Delete("/b"), 0); // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(nullptr)); // List will failed ASSERT_EQ(lfs->Delete("/b"), -errno); } @@ -229,8 +193,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { { ASSERT_EQ(lfs->Delete("/a"), 0); // error occured when remove file - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Delete("/a"), -errno); } } @@ -242,32 +205,25 @@ TEST_F(Ext4LocalFileSystemTest, MkdirTest) { info.st_mode = S_IFDIR; // success EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .Times(0); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).Times(0); ASSERT_EQ(lfs->Mkdir("/a"), 0); // stat failed ,mkdir success - EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("/a"), 0); // test relative path EXPECT_CALL(*wrapper, stat(_, NotNull())) .Times(2) .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))) .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("aaa/bbb"), 0); // is not a dir, mkdir failed info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Mkdir("/a"), -errno); } @@ -277,19 +233,16 @@ TEST_F(Ext4LocalFileSystemTest, DirExistsTest) { info.st_mode = S_IFDIR; // is dir EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->DirExists("/a"), false); // not dir info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), false); } @@ -299,19 +252,16 @@ TEST_F(Ext4LocalFileSystemTest, FileExistsTest) { info.st_mode = S_IFREG; // is file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->FileExists("/a"), false); // not file info.st_mode = S_IFDIR; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), false); } @@ -320,11 +270,9 @@ TEST_F(Ext4LocalFileSystemTest, RenameTest) { LocalFileSystemOption option; option.enableRenameat2 = false; ASSERT_EQ(0, lfs->Init(option)); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Rename("/a", "/b"), 0); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Rename("/a", "/b"), -errno); } @@ -333,13 +281,10 @@ TEST_F(Ext4LocalFileSystemTest, Renameat2Test) { LocalFileSystemOption option; option.enableRenameat2 = true; struct utsname kernel_info; - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); EXPECT_CALL(*wrapper, renameat2(NotNull(), NotNull(), 0)) .WillOnce(Return(0)); @@ -359,20 +304,17 @@ TEST_F(Ext4LocalFileSystemTest, ListTest) { memcpy(entryArray[2].d_name, "1", 1); vector names; // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(nullptr)); ASSERT_EQ(lfs->List("/a", &names), -errno); // success - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(4) .WillOnce(Return(entryArray)) .WillOnce(Return(entryArray + 1)) .WillOnce(Return(entryArray + 2)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->List("/a", &names), 0); ASSERT_THAT(names, ElementsAre("1")); } @@ -397,13 +339,11 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { ASSERT_EQ(lfs->Read(666, buf, 0, 3), 2); ASSERT_STREQ(buf, "12"); // pread failed - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillRepeatedly(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,but only return -1 once errno = EINTR; @@ -418,16 +358,12 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { TEST_F(Ext4LocalFileSystemTest, WriteTest) { char buf[4] = {0}; // success - EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)).WillOnce(Return(1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), 3); // pwrite failed - EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; @@ -509,12 +445,10 @@ TEST_F(Ext4LocalFileSystemTest, WriteIOBufTest) { // test Fallocate TEST_F(Ext4LocalFileSystemTest, FallocateTest) { // success - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), 0); // fallocate failed - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), -errno); } @@ -522,31 +456,27 @@ TEST_F(Ext4LocalFileSystemTest, FallocateTest) { TEST_F(Ext4LocalFileSystemTest, FstatTest) { struct stat info; // success - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fstat(666, &info), 0); // fallocate failed - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fstat(666, &info), -errno); } // test Fsync TEST_F(Ext4LocalFileSystemTest, FsyncTest) { // success - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fsync(666), 0); // fallocate failed - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fsync(666), -errno); } TEST_F(Ext4LocalFileSystemTest, ReadRealTest) { std::shared_ptr pw = std::make_shared(); lfs->SetPosixWrapper(pw); - int fd = lfs->Open("a", O_CREAT|O_RDWR); + int fd = lfs->Open("a", O_CREAT | O_RDWR); ASSERT_LT(0, fd); // 0 < fd char buf[8192] = {0}; ASSERT_EQ(4096, lfs->Write(fd, buf, 0, 4096)); diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index bc922d19e2..52657596ce 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -24,8 +24,8 @@ #include #include -#include #include +#include #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" @@ -49,24 +49,23 @@ static constexpr uint32_t kOpRequestAlignSize = 4096; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static const char *chunkServerParams[1][16] = { - { "chunkserver", "-chunkServerIp=127.0.0.1", - "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, - "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", - "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkserver.dat", - "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", - "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", - "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkfilepool.meta", - "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", - "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/walfilepool.meta", - "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", - "-raft_sync_segments=true", NULL }, +static const char* chunkServerParams[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", + "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, + "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", + "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkserver.dat", + "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", + "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", + "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool.meta", + "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", + "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -106,7 +105,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -124,11 +123,11 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } - int InitCluster(PeerCluster *cluster) { + int InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs_); @@ -138,7 +137,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -167,45 +166,46 @@ class ChunkServerIoTest : public testing::Test { std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* Scenario 1: Newly created file, Chunk file does not exist*/ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* Scenario 2: After generating a chunk file through WriteChunk, perform + * the operation*/ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); + data.c_str(), &chunkData)); ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunkId, sn1, NULL_SN, leader)); - ASSERT_EQ(0, verify->VerifyReadChunk( - chunkId, sn1, 0, 4 * KB, &chunkData)); + ASSERT_EQ(0, + verify->VerifyReadChunk(chunkId, sn1, 0, 4 * KB, &chunkData)); ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, kChunkSize - 4 * KB, - 4 * KB, nullptr)); + 4 * KB, nullptr)); data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length * 2, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 8 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files*/ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); } void TestSnapshotIO(std::shared_ptr verify) { @@ -216,150 +216,164 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, + 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, + 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, + 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not + // exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, - data.c_str(), &chunkData1a)); + data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 - */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + * Scenario 1: Taking a snapshot of a file for the first time + */ + chunkData1b.assign(chunkData1a); // Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + data.c_str(), &chunkData1b)); + // Write repeatedly to the same area to verify that there will be no + // duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, - data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + data.c_str(), &chunkData1b)); + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot + // version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 - ASSERT_EQ(0, - verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); + // Chunk1 read [0, 12KB], expected to read version 2 data + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 - ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( - chunk2, sn1, 0, 12 * KB, nullptr)); + // Reading snapshot of chunk2, expected chunk not to exist + ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn1, 0, 12 * KB, + nullptr)); /* - * 场景二:第一次快照结束,删除快照 - */ - // 删除chunk1快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); + // Obtain chunk1 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 - */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + * Scenario 3: Taking a second snapshot + */ + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); // Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, - data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + data.c_str(), &chunkData1c)); + // Obtain chunk1 information, expect its version to be 3 and snapshot + // version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, - &chunkData1b)); + &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, - &chunkData2)); + &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, + // the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); + verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 - */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + * Scenario 4: The second snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot because chunk1 and its snapshot have been + // deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 - */ - // 删除chunk1,已不存在,预期成功 + * Scenario 5: User deletes files + */ + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + verify->VerifyDeleteChunk(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk2, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunk(chunk2, sn3)); + // Obtaining chunk2 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk2, NULL_SN, NULL_SN, leader)); } public: @@ -369,7 +383,7 @@ class ChunkServerIoTest : public testing::Test { CopysetID copysetId_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; std::string externalIp_; private: @@ -390,8 +404,8 @@ class ChunkServerIoTest : public testing::Test { * */ TEST_F(ChunkServerIoTest, BasicIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } @@ -400,15 +414,15 @@ TEST_F(ChunkServerIoTest, BasicIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } TEST_F(ChunkServerIoTest, SnapshotIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } @@ -416,8 +430,8 @@ TEST_F(ChunkServerIoTest, SnapshotIO) { TEST_F(ChunkServerIoTest, SnapshotIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..0aae174746 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -20,9 +20,9 @@ * Author: qinyi */ -#include -#include #include +#include +#include #include #include @@ -30,14 +30,14 @@ #include #include "include/client/libcurve.h" -#include "src/common/s3_adapter.h" -#include "src/common/timeutility.h" -#include "src/client/inflight_controller.h" #include "src/chunkserver/cli2.h" +#include "src/client/inflight_controller.h" #include "src/common/concurrent/count_down_event.h" -#include "test/integration/common/chunkservice_op.h" +#include "src/common/s3_adapter.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" +#include "test/integration/common/chunkservice_op.h" #include "test/util/config_generator.h" using curve::CurveCluster; @@ -91,11 +91,11 @@ const uint32_t kChunkSize = 16 * 1024 * 1024; const uint32_t kChunkServerMaxIoSize = 64 * 1024; const std::vector mdsConf0{ - { "--confPath=" + MDS0_CONF_PATH }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME }, - { "--sessionInterSec=20" }, - { "--etcdAddr=" + ETCD_CLIENT_IP_PORT }, + {"--confPath=" + MDS0_CONF_PATH}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME}, + {"--sessionInterSec=20"}, + {"--etcdAddr=" + ETCD_CLIENT_IP_PORT}, }; const std::vector mdsFileConf0{ @@ -129,73 +129,67 @@ const std::vector csCommonConf{ }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER0_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER0_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER1_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER1_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER2_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER2_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta"}}; namespace curve { namespace chunkserver { @@ -203,7 +197,9 @@ namespace chunkserver { class CSCloneRecoverTest : public ::testing::Test { public: CSCloneRecoverTest() - : logicPoolId_(1), copysetId_(1), chunkData1_(kChunkSize, 'X'), + : logicPoolId_(1), + copysetId_(1), + chunkData1_(kChunkSize, 'X'), chunkData2_(kChunkSize, 'Y') {} void SetUp() { @@ -217,11 +213,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,19 +227,20 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, - mdsConf0, true); + mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, - CHUNK_SERVER1_IP_PORT, - CHUNK_SERVER2_IP_PORT}; + CHUNK_SERVER1_IP_PORT, + CHUNK_SERVER2_IP_PORT}; for (int i = 0; i < 3; ++i) { Json::Value server; std::vector ipPort; @@ -278,7 +275,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -291,13 +288,12 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createPPCmd: " << createPPCmd; ret = system(createPPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserve pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +315,8 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -331,27 +328,26 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createLPCmd: " << createLPCmd; ret = system(createLPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); - struct ChunkServiceOpConf conf0 = { &leaderPeer_, logicPoolId_, - copysetId_, 5000 }; + struct ChunkServiceOpConf conf0 = {&leaderPeer_, logicPoolId_, + copysetId_, 5000}; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +413,10 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** Send a write request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Is IO successfully completed */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +428,8 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -447,8 +444,7 @@ class CSCloneRecoverTest : public ::testing::Test { int ret; if ((ret = AioWrite(fd_, context))) { - LOG(ERROR) << "failed to send aio write request, err=" - << ret; + LOG(ERROR) << "failed to send aio write request, err=" << ret; return false; } @@ -460,11 +456,11 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** Send a read request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @data: Read out data + * @return: Is IO successfully completed */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +469,8 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -485,8 +482,7 @@ class CSCloneRecoverTest : public ::testing::Test { context->cb = readCallBack; int ret; if ((ret = AioRead(fd_, context))) { - LOG(ERROR) << "failed to send aio read request, err=" - << ret; + LOG(ERROR) << "failed to send aio read request, err=" << ret; return false; } @@ -547,7 +543,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +555,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +609,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +629,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +643,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,17 +663,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Clone files will not be converted to regular chunk1 files after being + * read through Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +682,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +707,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +721,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +747,17 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated and successfully written. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +765,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +800,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +815,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +826,10 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * The clone file will be converted to a regular chunk1 file after being + * overwritten Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +837,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +861,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +874,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,17 +894,18 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that the clone file will not be converted to a regular + * chunk1 file after being read through Write by increasing the version, If + * it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +913,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +938,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +951,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +977,17 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +995,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1019,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,17 +1045,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1064,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1087,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1119,17 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1137,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index b38f819da7..0698c9756e 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -21,72 +21,60 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "src/common/concurrent/concurrent.h" -#include "test/integration/common/peer_cluster.h" #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::common::Thread; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::common::Thread; static const char* kFakeMdsAddr = "127.0.0.1:9329"; constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *chunkConcurrencyParams1[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9076", - "-chunkServerStoreUri=local://./9076/", - "-chunkServerMetaUri=local://./9076/chunkserver.dat", - "-copySetUri=local://./9076/copysets", - "-raftSnapshotUri=curve://./9076/copysets", - "-raftLogUri=curve://./9076/copysets", - "-recycleUri=local://./9076/recycler", - "-chunkFilePoolDir=./9076/chunkfilepool/", - "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", - "-walFilePoolDir=./9076/walfilepool/", - "-walFilePoolMetaPath=./9076/walfilepool.meta", - "-conf=./9076/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams1[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9076", + "-chunkServerStoreUri=local://./9076/", + "-chunkServerMetaUri=local://./9076/chunkserver.dat", + "-copySetUri=local://./9076/copysets", + "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", + "-recycleUri=local://./9076/recycler", + "-chunkFilePoolDir=./9076/chunkfilepool/", + "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", + "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; -static const char *chunkConcurrencyParams2[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9077", - "-chunkServerStoreUri=local://./9077/", - "-chunkServerMetaUri=local://./9077/chunkserver.dat", - "-copySetUri=local://./9077/copysets", - "-raftSnapshotUri=curve://./9077/copysets", - "-raftLogUri=curve://./9077/copysets", - "-recycleUri=local://./9077/recycler", - "-chunkFilePoolDir=./9077/chunkfilepool/", - "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", - "-walFilePoolDir=./9077/walfilepool/", - "-walFilePoolMetaPath=./9077/walfilepool.meta", - "-conf=./9077/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams2[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9077", + "-chunkServerStoreUri=local://./9077/", + "-chunkServerMetaUri=local://./9077/chunkserver.dat", + "-copySetUri=local://./9077/copysets", + "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", + "-recycleUri=local://./9077/recycler", + "-chunkFilePoolDir=./9077/chunkfilepool/", + "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", + "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -94,7 +82,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -136,14 +124,14 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -162,10 +150,10 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { CopysetID copysetId; std::map paramsIndexs; - std::vector params; + std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -182,7 +170,6 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { electionTimeoutMs = 3000; snapshotIntervalS = 60; - ASSERT_TRUE(cg1.Init("9077")); cg1.SetKV("copyset.election_timeout_ms", "3000"); cg1.SetKV("copyset.snapshot_interval_s", "60"); @@ -197,14 +184,12 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - poolDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool/"; - metaDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool.meta"; + poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool/"; + metaDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool.meta"; FilePoolMeta meta(kChunkSize, kPageSize, poolDir); FilePoolHelper::PersistEnCodeMetaInfo(lfs, meta, metaDir); @@ -212,7 +197,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // There maybe one chunk in cleaning, so you should allocate // (kChunkNum + 1) chunks in start if you want to use kChunkNum chunks. // This situation will not occur in the production environment - allocateChunk(lfs, kChunkNum+1, poolDir, kChunkSize); + allocateChunk(lfs, kChunkNum + 1, poolDir, kChunkSize); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -223,14 +208,14 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // wait for process exit ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -242,28 +227,23 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::vector peers; PeerId leaderId; Peer leaderPeer; - int electionTimeoutMs; - int snapshotIntervalS; + int electionTimeoutMs; + int snapshotIntervalS; LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; - std::map paramsIndexs; - std::vector params; + std::map paramsIndexs; + std::vector params; std::string poolDir; std::string metaDir; - std::shared_ptr lfs; + std::shared_ptr lfs; }; -// 写chunk -int WriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - off_t offset, - size_t len, - const char *data, +// Write chunk +int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, off_t offset, size_t len, const char* data, const int sn = 1) { PeerId leaderId(leader.address()); brpc::Channel channel; @@ -298,13 +278,9 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read -void RandReadChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for read +void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; uint64_t appliedIndex = 1; PeerId leaderId(leader.address()); @@ -313,7 +289,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -329,7 +305,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -341,7 +317,8 @@ void RandReadChunk(Peer leader, } if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS && - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { //NOLINT + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { // NOLINT LOG(INFO) << "read failed: " << CHUNK_OP_STATUS_Name(response.status()); ret = -1; @@ -351,13 +328,9 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write -void RandWriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for writing +void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; char data[kOpRequestAlignSize] = {'a'}; int length = kOpRequestAlignSize; @@ -368,7 +341,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -384,7 +357,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -405,12 +378,9 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 -void RandDeleteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop) { +// Randomly select a chunk to delete +void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop) { int ret = 0; PeerId leaderId(leader.address()); @@ -419,7 +389,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -449,12 +419,9 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk -void CreateCloneChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID start, - ChunkID end) { +// Create clone chunk +void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID start, ChunkID end) { int ret = 0; SequenceNum sn = 2; SequenceNum correctedSn = 1; @@ -496,10 +463,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + *Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -510,37 +477,21 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -548,33 +499,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -582,8 +524,9 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -591,29 +534,19 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -621,7 +554,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -645,7 +578,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -653,7 +586,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -663,50 +596,30 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -715,7 +628,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -725,38 +638,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -764,33 +662,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + RandReadMultiNotExistChunk) { // NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -798,7 +689,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -808,39 +699,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already + // been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout + // failure for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -848,7 +726,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -856,38 +734,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -896,7 +760,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -906,38 +770,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -945,30 +795,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -978,10 +821,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -992,36 +835,21 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); + + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1029,33 +857,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1063,8 +882,9 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1072,29 +892,19 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -1102,7 +912,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1126,7 +936,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1134,7 +944,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1142,38 +952,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1182,7 +978,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1192,38 +988,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1231,33 +1012,25 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1265,7 +1038,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1273,26 +1046,17 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1300,7 +1064,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1308,38 +1072,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1348,7 +1098,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1358,38 +1108,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1397,30 +1133,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -1429,7 +1158,8 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, +// with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1439,52 +1169,32 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread with a 20% probability + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } diff --git a/test/integration/chunkserver/datastore/datastore_basic_test.cpp b/test/integration/chunkserver/datastore/datastore_basic_test.cpp index 14fdc3901c..a7367253c5 100644 --- a/test/integration/chunkserver/datastore/datastore_basic_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_basic_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_bas"; // NOLINT -const string poolDir = "./chunkfilepool_int_bas"; // NOLINT +const string baseDir = "./data_int_bas"; // NOLINT +const string poolDir = "./chunkfilepool_int_bas"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_bas.meta"; // NOLINT class BasicTestSuit : public DatastoreIntegrationBase { @@ -36,51 +36,49 @@ class BasicTestSuit : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(BasicTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scene One: New file created, Chunk file does not + * exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // ChunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /****************** Scene Two: Operations after generating chunk files + * through WriteChunk **************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -93,69 +91,53 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; @@ -164,27 +146,19 @@ TEST_F(BasicTestSuit, BasicTest) { butil::IOBuf iobuf1_1_4; iobuf1_1_4.append(buf1_1_4, length); - errorCode = dataStore_->WriteChunk(id, - sn, - iobuf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, iobuf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene Three: User deletes file******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp index 3b0d635652..6db8375ff2 100644 --- a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_clo"; // NOLINT -const string poolDir = "./chunkfilepool_int_clo"; // NOLINT +const string baseDir = "./data_int_clo"; // NOLINT +const string poolDir = "./chunkfilepool_int_clo"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_clo.meta"; // NOLINT class CloneTestSuit : public DatastoreIntegrationBase { @@ -36,7 +36,7 @@ class CloneTestSuit : public DatastoreIntegrationBase { }; /** - * 克隆场景测试 + * Clone scenario testing */ TEST_F(CloneTestSuit, CloneTest) { ChunkID id = 1; @@ -48,16 +48,14 @@ TEST_F(CloneTestSuit, CloneTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk1 + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -71,14 +69,13 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Call the interface again, but still return success. Chunk information + // remains unchanged + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -92,14 +89,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 创建克隆文件chunk2 - errorCode = dataStore_->CreateCloneChunk(2, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk2 + errorCode = + dataStore_->CreateCloneChunk(2, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(2, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -113,23 +108,19 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ - // 构造原始数据 + /******************Scene 2: Restoring Cloned Files******************/ + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // WriteChunk写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf1[2 * PAGE_SIZE]; memset(writeBuf1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf1, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf1, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -137,26 +128,23 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -164,30 +152,26 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘a’ + // Reading Chunk data, [0, 8KB] data should be 'a' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // WriteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; char writeBuf3[2 * PAGE_SIZE]; memset(writeBuf3, 'c', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf3, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf3, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -195,11 +179,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 4KB]数据应为‘a’,[4KB, 12KB]数据应为‘c’ + // Reading Chunk data, [0, 4KB] data should be 'a', [4KB, 12KB] data should + // be 'c' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -207,17 +192,18 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(0, memcmp(writeBuf1, readBuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(writeBuf3, readBuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Conversion of Cloned Files after Iterative + * Writing into Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->PasteChunk(id, - overBuf, + errorCode = dataStore_->PasteChunk(id, overBuf, i * kMB, // offset 1 * kMB); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -226,15 +212,15 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - /******************场景三:删除文件****************/ + /******************Scene 3: Delete File****************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(1, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(2, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(2, &info); @@ -242,7 +228,7 @@ TEST_F(CloneTestSuit, CloneTest) { } /** - * 恢复场景测试 + * Recovery scenario testing */ TEST_F(CloneTestSuit, RecoverTest) { ChunkID id = 1; @@ -254,16 +240,15 @@ TEST_F(CloneTestSuit, RecoverTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -277,14 +262,14 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 + // Call the interface again, but still return success. Chunk information + // remains unchanged errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, 3, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -298,20 +283,17 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ + /******************Scene 2: Restoring Cloned Files******************/ sn = 3; - // 构造原始数据 + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // PasteChunk写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); @@ -319,30 +301,26 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(pasteBuf, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf2[2 * PAGE_SIZE]; memset(writeBuf2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf2, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf2, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -350,26 +328,23 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘b’ + // Reading Chunk data, [0, 8KB] data should be 'b' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf2, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // PasteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -377,11 +352,12 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 8KB]数据应为‘b’,[8KB, 12KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be 'b', [8KB, 12KB] data should + // be '1' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -389,19 +365,19 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(0, memcmp(writeBuf2, readBuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(pasteBuf, readBuf + 2 * PAGE_SIZE, PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Convert Cloned Files from Sequential Write to + * Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->WriteChunk(id, - sn, - overBuf, - i * kMB, // offset + errorCode = dataStore_->WriteChunk(id, sn, overBuf, + i * kMB, // offset 1 * kMB, nullptr); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); diff --git a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp index e873cdb667..e1ded2ef1a 100644 --- a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_con"; // NOLINT -const string poolDir = "./chunkfilepool_int_con"; // NOLINT +const string baseDir = "./data_int_con"; // NOLINT +const string poolDir = "./chunkfilepool_int_con"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_con.meta"; // NOLINT class ConcurrencyTestSuit : public DatastoreIntegrationBase { @@ -46,9 +46,8 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { const int kThreadNum = 10; auto readFunc = [&](ChunkID id) { - // 五分之一概率增加版本号 - if (rand_r(&seed) % 5 == 0) - ++sn; + // One fifth probability of increasing version number + if (rand_r(&seed) % 5 == 0) ++sn; uint64_t pageIndex = rand_r(&seed) % (CHUNK_SIZE / PAGE_SIZE); offset = pageIndex * PAGE_SIZE; dataStore_->ReadChunk(id, sn, buf, offset, length); @@ -60,9 +59,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { dataStore_->WriteChunk(id, sn, buf, offset, length, nullptr); }; - auto deleteFunc = [&](ChunkID id) { - dataStore_->DeleteChunk(id, sn); - }; + auto deleteFunc = [&](ChunkID id) { dataStore_->DeleteChunk(id, sn); }; auto deleteSnapFunc = [&](ChunkID id) { dataStore_->DeleteSnapshotChunkOrCorrectSn(id, sn); @@ -107,7 +104,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { Thread threads[kThreadNum]; printf("===============TEST CHUNK1===================\n"); - // 测试并发对同一chunk进行随机操作 + // Testing concurrent random operations on the same chunk for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, 1, kLoopNum); } @@ -118,7 +115,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { printf("===============TEST RANDOM==================\n"); - // 测试并发对不同chunk进行随机操作 + // Test and perform random operations on different chunks simultaneously int idRange = 10; for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, idRange, kLoopNum); diff --git a/test/integration/chunkserver/datastore/datastore_exception_test.cpp b/test/integration/chunkserver/datastore/datastore_exception_test.cpp index 5405b03e8c..cc020c395b 100644 --- a/test/integration/chunkserver/datastore/datastore_exception_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_exception_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_exc"; // NOLINT -const string poolDir = "./chunkfilepool_int_exc"; // NOLINT +const string baseDir = "./data_int_exc"; // NOLINT +const string poolDir = "./chunkfilepool_int_exc"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_exc.meta"; // NOLINT class ExceptionTestSuit : public DatastoreIntegrationBase { @@ -36,9 +36,9 @@ class ExceptionTestSuit : public DatastoreIntegrationBase { }; /** - * 异常测试1 - * 用例:chunk的metapage数据损坏,然后启动DataStore - * 预期:重启失败 + * Exception test 1 + * Scenario: Chunk's metapage data is corrupt, and then start DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest1) { SequenceNum fileSn = 1; @@ -47,46 +47,41 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试2 - * 用例:chunk的metapage数据损坏,然后更新了metapage,然后重启DataStore - * 预期:重启datastore可以成功 + * Exception Test 2 + * Scenario: Chunk's metapage data is corrupt, then the metapage is updated, and + * then the DataStore is restarted Expected: Restarting the datastore can be + * successful */ TEST_F(ExceptionTestSuit, ExceptionTest2) { SequenceNum fileSn = 1; @@ -95,55 +90,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 触发metapage更新 + // Trigger metapage Update errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); } /** - * 异常测试3 - * 用例:chunk快照的metapage数据损坏,然后重启DataStore - * 预期:重启失败 + * Exception Test 3 + * Scenario: Chunk snapshot metadata data corruption, then restart DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest3) { SequenceNum fileSn = 1; @@ -152,55 +137,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试4 - * 用例:chunk快照的metapage数据损坏,但是更新了metapage,然后重启DataStore - * 预期:重启成功 + * Exception Test 4 + * Scenario: Chunk snapshot's metapage data is corrupt, but the metapage is + * updated, and then restart the DataStore Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest4) { SequenceNum fileSn = 1; @@ -209,64 +184,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 触发快照metapage更新 + // Trigger snapshot metadata update errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset + PAGE_SIZE, - length, + fileSn, buf, offset + PAGE_SIZE, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试5 - * 用例:WriteChunk数据写到一半重启 - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 5 + * Scenario: WriteChunk data is written halfway and restarted + * Expected: Successful restart, successful re execution of the previous + * operation */ TEST_F(ExceptionTestSuit, ExceptionTest5) { SequenceNum fileSn = 1; @@ -275,66 +238,54 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; - // 通过lfs写一半数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write half of the data to the chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf2, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试6 - * 用例:WriteChunk更新metapage后重启,sn>chunk.sn,sn==chunk.correctedSn - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 6 + * Scenario: WriteChunk updates the metapage and restarts, + * sn>chunk.sn,sn==chunk.correctedSn Expected: Successful restart, successful re + * execution of the previous operation */ TEST_F(ExceptionTestSuit, ExceptionTest6) { SequenceNum fileSn = 1; @@ -343,84 +294,70 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 更新 correctedsn 为2 + // Update correctedsn to 2 errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的请求参数 + // Construct request parameters to write char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; fileSn = 2; // sn > chunk.sn; sn == chunk.correctedSn - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = fileSn; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试7 - * 用例:WriteChunk产生快照后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 7 + * Scenario: WriteChunk generates a snapshot and restarts, restoring historical + * and current operations sn>chunk.sn, sn>chunk.correctedSn Test + * chunk.sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest7) { SequenceNum fileSn = 1; @@ -429,18 +366,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -452,19 +386,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -472,61 +404,47 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试8 - * 用例:WriteChunk产生快照后重启, + * Exception Test8 + * Scenario: WriteChunk generates a snapshot and restarts, * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn==chunk.correctedSn - * 预期:重启成功 + * Test chunk.sn==chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest8) { SequenceNum fileSn = 1; @@ -535,27 +453,20 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,构造chunk.sn==chunk.correctedsn的场景 + // Generate chunk1 and construct a scenario where + // chunk.sn==chunk.correctedsn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf1, - offset, - length, - nullptr); + ++fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 2; @@ -567,19 +478,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -587,60 +496,46 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 2, - readbuf, - offset, - length); + 2, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试9 - * 用例:WriteChunk产生快照并更新metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 9 + * Scenario: WriteChunk generates a snapshot and updates the metapage before + * restarting, restoring historical and current operations sn>chunk.sn, + * sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest9) { SequenceNum fileSn = 1; @@ -649,18 +544,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -672,38 +564,36 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = 2; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -711,56 +601,42 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试10 - * 用例:WriteChunk更新快照metapage前重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 10 + * Scenario: WriteChunk restarts before updating the snapshot metapage to + * restore historical and current operations sn>chunk.sn, sn>chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest10) { SequenceNum fileSn = 1; @@ -769,42 +645,35 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); - // 更新metapage + // Update Metapage char metabuf[PAGE_SIZE]; lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage SnapshotMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -813,19 +682,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -833,67 +700,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试11 - * 用例:WriteChunk更新快照metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 11 + * Scenario: WriteChunk updates snapshot metadata and restarts to restore + * historical and current operations sn>chunk.sn, sn>chunk.correctedSn Expected: + * Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest11) { SequenceNum fileSn = 1; @@ -902,53 +754,44 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -956,66 +799,51 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试12 - * 用例:PasteChunk,数据写入一半时,还未更新metapage重启/崩溃 - * 预期:重启成功,paste成功 + * Exception Test 12 + * Scenario: PasteChunk, when data is written halfway and the metapage has not + * been updated, restart/crash Expected: Reboot successful, pass successful */ TEST_F(ExceptionTestSuit, ExceptionTest12) { ChunkID id = 1; @@ -1027,14 +855,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { CSChunkInfo info; std::string location("test@s3"); - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -1048,58 +875,50 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf1[PAGE_SIZE]; memset(buf1, '1', length); offset = 0; length = PAGE_SIZE; - // 通过lfs写数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write data to chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.blockSize = BLOCK_SIZE; options.metaPageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->PasteChunk(1, // id - buf1, - offset, - length); + buf1, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查bitmap + // Check Bitmap errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(1, info.bitmap->NextClearBit(0)); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - sn, - readbuf, - offset, - length); + sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } diff --git a/test/integration/chunkserver/datastore/datastore_integration_base.h b/test/integration/chunkserver/datastore/datastore_integration_base.h index 0731eb39cd..bf99263214 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_base.h +++ b/test/integration/chunkserver/datastore/datastore_integration_base.h @@ -24,26 +24,27 @@ #define TEST_INTEGRATION_CHUNKSERVER_DATASTORE_DATASTORE_INTEGRATION_BASE_H_ #include -#include #include +#include #include + #include #include +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/timeutility.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/filepool_helper.h" -using curve::fs::FileSystemType; -using curve::fs::LocalFileSystem; -using curve::fs::LocalFsFactory; using curve::common::Atomic; using curve::common::Thread; using curve::common::TimeUtility; +using curve::fs::FileSystemType; +using curve::fs::LocalFileSystem; +using curve::fs::LocalFsFactory; using ::testing::UnorderedElementsAre; @@ -55,12 +56,12 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; const ChunkSizeType BLOCK_SIZE = 4096; const PageSizeType PAGE_SIZE = 4 * 1024; -extern const string baseDir; // NOLINT -extern const string poolDir; // NOLINT +extern const string baseDir; // NOLINT +extern const string poolDir; // NOLINT extern const string poolMetaPath; // NOLINT /** - * DataStore层集成LocalFileSystem层测试 + * Datastore layer integration LocalFileSystem layer testing */ class DatastoreIntegrationBase : public testing::Test { public: @@ -79,9 +80,7 @@ class DatastoreIntegrationBase : public testing::Test { options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + dataStore_ = std::make_shared(lfs_, filePool_, options); if (dataStore_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } @@ -105,8 +104,7 @@ class DatastoreIntegrationBase : public testing::Test { cfop.metaPageSize = PAGE_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool_->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool_->Size()); @@ -121,8 +119,8 @@ class DatastoreIntegrationBase : public testing::Test { } protected: - std::shared_ptr filePool_; - std::shared_ptr lfs_; + std::shared_ptr filePool_; + std::shared_ptr lfs_; std::shared_ptr dataStore_; }; diff --git a/test/integration/chunkserver/datastore/datastore_integration_test.cpp b/test/integration/chunkserver/datastore/datastore_integration_test.cpp index 52693dfa9e..a5f0316ba9 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_integration_test.cpp @@ -28,8 +28,8 @@ namespace chunkserver { const uint64_t kMB = 1024 * 1024; const ChunkSizeType CHUNK_SIZE = 16 * kMB; const PageSizeType PAGE_SIZE = 4 * 1024; -const string baseDir = "./data_int"; // NOLINT -const string poolDir = "./chunkfilepool_int"; // NOLINT +const string baseDir = "./data_int"; // NOLINT +const string poolDir = "./chunkfilepool_int"; // NOLINT const string poolMetaPath = "./chunkfilepool_int.meta"; // NOLINT class DatastoreIntegrationTest : public DatastoreIntegrationBase { @@ -39,51 +39,49 @@ class DatastoreIntegrationTest : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(DatastoreIntegrationTest, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scenario 1: New File Created, Chunk File Does Not + * Exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // chunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /******************Scene 2: Operations after generating chunk files via + * WriteChunk.**************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - // 第一次WriteChunk会产生chunk文件 - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + // The first WriteChunk will generate a chunk file + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -95,87 +93,63 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene 3: User Deletes File******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -185,7 +159,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { } /** - * 重启恢复测试 + * Restart Recovery Test */ TEST_F(DatastoreIntegrationTest, RestartTest) { SequenceNum fileSn = 1; @@ -196,7 +170,7 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSChunkInfo info3; std::string location("test@s3"); - // 构造要用到的读写缓冲区 + // Construct read and write buffers to be used char buf1_1[2 * PAGE_SIZE]; memset(buf1_1, 'a', length); char buf2_1[2 * PAGE_SIZE]; @@ -212,7 +186,8 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { size_t readSize = 4 * PAGE_SIZE; char readBuf[4 * PAGE_SIZE]; - // 各个操作对应的错误码返回值,错误码命名格式为 e_optype_chunid_sn + // The error code return value corresponding to each operation, and the + // error code naming format is e_optype_chunid_sn CSErrorCode e_write_1_1; CSErrorCode e_write_2_1; CSErrorCode e_write_2_2; @@ -224,112 +199,99 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSErrorCode e_delsnap_2_3; CSErrorCode e_clone_3_1; - // 模拟所有用户请求,用lamdba函数可以用于验证日志恢复时重用这部分代码 - // 如果后面要加用例,只需要在函数内加操作即可 + // Simulate all user requests and use the lamdba function to validate the + // reuse of this code during log recovery If you want to add use cases + // later, you only need to add operations within the function auto ApplyRequests = [&]() { fileSn = 1; - // 模拟普通文件操作,WriteChunk产生chunk1、chunk2 + // Simulate ordinary file operations, WriteChunk generates chunk1, + // chunk2 offset = 0; length = 2 * PAGE_SIZE; - // 产生chunk1 - e_write_1_1 = dataStore_->WriteChunk(1, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 产生chunk2 - e_write_2_1 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 删除chunk1 + // Generate chunk1 + e_write_1_1 = + dataStore_->WriteChunk(1, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Generate chunk2 + e_write_2_1 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Delete chunk1 e_del_1_1 = dataStore_->DeleteChunk(1, fileSn); - // 模拟快照操作 + // Simulate snapshot operations ++fileSn; offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_2 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_2, - offset, - length, - nullptr); - // 删除chunk2快照 + // Write chunk2 to generate a snapshot file + e_write_2_2 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_2, offset, length, nullptr); + // Delete chunk2 snapshot e_delsnap_2_2 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后删除chunk2快照 + // Simulate taking another snapshot and then delete the chunk2 snapshot ++fileSn; e_delsnap_2_3 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后写数据到chunk2产生快照 + // Simulate another snapshot, then write data to chunk2 to generate a + // snapshot ++fileSn; offset = 2 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_4 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_4, - offset, - length, - nullptr); - - // 模拟克隆操作 + // Write chunk2 to generate a snapshot file + e_write_2_4 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_4, offset, length, nullptr); + + // Simulate Clone Operations e_clone_3_1 = dataStore_->CreateCloneChunk(3, // chunk id - 1, // sn - 0, // corrected sn - CHUNK_SIZE, - location); - // 写数据到chunk3 + 1, // sn + 0, // corrected sn + CHUNK_SIZE, location); + // Write data to chunk3 offset = 0; length = 2 * PAGE_SIZE; - // 写chunk3 - e_write_3_1 = dataStore_->WriteChunk(3, // chunk id - 1, // sn - writeBuf, - offset, - length, - nullptr); - // paste数据到chunk3 + // Write chunk3 + e_write_3_1 = dataStore_->WriteChunk(3, // chunk id + 1, // sn + writeBuf, offset, length, nullptr); + // Paste data to chunk3 offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id - pasteBuf, - offset, - length); + e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id + pasteBuf, offset, length); }; - // 检查上面用户操作以后,DataStore层各文件的状态,可重用 + // After checking the user actions above, the status of each file in the + // DataStore layer can be reused auto CheckStatus = [&]() { CSErrorCode errorCode; - // chunk1 不存在 + // Chunk1 does not exist errorCode = dataStore_->GetChunkInfo(1, &info1); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // chunk2存在,版本为4,correctedSn为3,存在快照,快照版本为2 + // Chunk2 exists, version 4, correctedSn is 3, snapshot exists, snapshot + // version 2 errorCode = dataStore_->GetChunkInfo(2, &info2); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(4, info2.curSn); ASSERT_EQ(2, info2.snapSn); ASSERT_EQ(3, info2.correctedSn); - // 检查chunk2数据,[0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c + // Check chunk2 data, [0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c errorCode = dataStore_->ReadChunk(2, fileSn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_4, readBuf + 2 * PAGE_SIZE, 2 * PAGE_SIZE)); - // 检查chunk2快照数据,[0, 1KB]:a , [1KB, 3KB]:b + // Check chunk2 snapshot data, [0, 1KB]:a , [1KB, 3KB]:b errorCode = dataStore_->ReadSnapshotChunk(2, 2, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 2 * PAGE_SIZE)); }; - /******************构造重启前的数据******************/ - // 提交操作 + /******************Generate data before reboot******************/ + // Submit Action ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::Success); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); @@ -340,27 +302,27 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { ASSERT_EQ(e_clone_3_1, CSErrorCode::Success); ASSERT_EQ(e_write_3_1, CSErrorCode::Success); ASSERT_EQ(e_paste_3_1, CSErrorCode::Success); - // 检查此时各个文件的状态 + // Check the status of each file at this time CheckStatus(); - /******************场景一:重启重新加载文件******************/ - // 模拟重启 + /******************Scene 1: Reboot and Reload Files******************/ + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.pageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查各个chunk的状态,应该与前面的一致 + // Check the status of each chunk, which should be consistent with the + // previous one CheckStatus(); - /******************场景二:恢复日志,重放之前的操作******************/ - // 模拟日志回放 + /******************Scene 2: Restore logs, replay previous + * actions******************/ + // Simulate log playback ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::BackwardRequestError); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index f7a9d9ae5a..8d8a64812b 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -27,10 +27,10 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_res"; // NOLINT -const string poolDir = "./chunfilepool_int_res"; // NOLINT +const string baseDir = "./data_int_res"; // NOLINT +const string poolDir = "./chunfilepool_int_res"; // NOLINT const string poolMetaPath = "./chunfilepool_int_res.meta"; // NOLINT -// 以下的测试读写数据都在[0, 32kb]范围内 +// The following test read and write data are within the range of [0, 32kb] const uint64_t kMaxSize = 8 * PAGE_SIZE; struct RangeData { @@ -39,9 +39,7 @@ struct RangeData { size_t length; RangeData() = default; RangeData(char ch, off_t off, size_t len) - : data(ch) - , offset(off) - , length(len) {} + : data(ch), offset(off), length(len) {} }; struct ExpectStatus { @@ -52,12 +50,12 @@ struct ExpectStatus { ExpectStatus() : exist(false), chunkData(nullptr), snapshotData(nullptr) {} ~ExpectStatus() { if (chunkData != nullptr) { - delete [] chunkData; + delete[] chunkData; chunkData = nullptr; } if (snapshotData != nullptr) { - delete [] snapshotData; + delete[] snapshotData; snapshotData = nullptr; } } @@ -66,26 +64,16 @@ struct ExpectStatus { class ExecStep { public: explicit ExecStep(std::shared_ptr* datastore, ChunkID id) - : datastore_(datastore) - , id_(id) - , statusAfterExec_(nullptr) {} + : datastore_(datastore), id_(id), statusAfterExec_(nullptr) {} virtual ~ExecStep() {} - std::shared_ptr GetDataStore() { - return (*datastore_); - } + std::shared_ptr GetDataStore() { return (*datastore_); } - ChunkID GetChunkID() { - return id_; - } + ChunkID GetChunkID() { return id_; } - std::shared_ptr GetStatus() { - return statusAfterExec_; - } + std::shared_ptr GetStatus() { return statusAfterExec_; } - void ClearStatus() { - statusAfterExec_ = nullptr; - } + void ClearStatus() { statusAfterExec_ = nullptr; } virtual void SetExpectStatus() { statusAfterExec_ = std::make_shared(); @@ -100,29 +88,25 @@ class ExecStep { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - (*datastore_)->ReadChunk(id_, - info.curSn, - (chunkData + offset), - offset, - length); + (*datastore_) + ->ReadChunk(id_, info.curSn, (chunkData + offset), + offset, length); } } else { - (*datastore_)->ReadChunk(id_, - info.curSn, - chunkData, - 0, - kMaxSize); + (*datastore_) + ->ReadChunk(id_, info.curSn, chunkData, 0, kMaxSize); } statusAfterExec_->chunkData = chunkData; - // 快照存在,读取快照数据 + // Snapshot exists, reading snapshot data if (info.snapSn > 0) { char* snapData = new char[kMaxSize]; - (*datastore_)->ReadSnapshotChunk( - id_, info.snapSn, snapData, 0, kMaxSize); + (*datastore_) + ->ReadSnapshotChunk(id_, info.snapSn, snapData, 0, + kMaxSize); statusAfterExec_->snapshotData = snapData; } } // if (err == CSErrorCode::Success) @@ -142,23 +126,22 @@ class ExecWrite : public ExecStep { public: ExecWrite(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, RangeData data) - : ExecStep(datastore, id) - , sn_(sn) - , data_(data) {} + : ExecStep(datastore, id), sn_(sn), data_(data) {} ~ExecWrite() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); - (*datastore_)->WriteChunk(id_, sn_, buf, - data_.offset, data_.length, nullptr); + (*datastore_) + ->WriteChunk(id_, sn_, buf, data_.offset, data_.length, nullptr); } void Dump() override { - printf("WriteChunk, id = %lu, sn = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, sn_, data_.offset, data_.length, data_.data); + printf( + "WriteChunk, id = %lu, sn = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, sn_, data_.offset, data_.length, data_.data); } private: @@ -170,21 +153,21 @@ class ExecPaste : public ExecStep { public: ExecPaste(std::shared_ptr* datastore, ChunkID id, RangeData data) - : ExecStep(datastore, id) - , data_(data) {} + : ExecStep(datastore, id), data_(data) {} ~ExecPaste() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); (*datastore_)->PasteChunk(id_, buf, data_.offset, data_.length); - delete [] buf; + delete[] buf; } void Dump() override { - printf("PasteChunk, id = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, data_.offset, data_.length, data_.data); + printf( + "PasteChunk, id = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, data_.offset, data_.length, data_.data); } private: @@ -195,13 +178,10 @@ class ExecDelete : public ExecStep { public: ExecDelete(std::shared_ptr* datastore, ChunkID id, SequenceNum sn) - : ExecStep(datastore, id) - , sn_(sn) {} + : ExecStep(datastore, id), sn_(sn) {} ~ExecDelete() {} - void Exec() override { - (*datastore_)->DeleteChunk(id_, sn_); - } + void Exec() override { (*datastore_)->DeleteChunk(id_, sn_); } void Dump() override { printf("DeleteChunk, id = %lu, sn = %lu.\n", id_, sn_); @@ -213,11 +193,9 @@ class ExecDelete : public ExecStep { class ExecDeleteSnapshot : public ExecStep { public: - ExecDeleteSnapshot(std::shared_ptr* datastore, - ChunkID id, - SequenceNum correctedSn) - : ExecStep(datastore, id) - , correctedSn_(correctedSn) {} + ExecDeleteSnapshot(std::shared_ptr* datastore, ChunkID id, + SequenceNum correctedSn) + : ExecStep(datastore, id), correctedSn_(correctedSn) {} ~ExecDeleteSnapshot() {} void Exec() override { @@ -225,8 +203,10 @@ class ExecDeleteSnapshot : public ExecStep { } void Dump() override { - printf("DeleteSnapshotChunkOrCorrectSn, " - "id = %lu, correctedSn = %lu.\n", id_, correctedSn_); + printf( + "DeleteSnapshotChunkOrCorrectSn, " + "id = %lu, correctedSn = %lu.\n", + id_, correctedSn_); } private: @@ -238,22 +218,23 @@ class ExecCreateClone : public ExecStep { ExecCreateClone(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, SequenceNum correctedSn, ChunkSizeType size, std::string location) - : ExecStep(datastore, id) - , sn_(sn) - , correctedSn_(correctedSn) - , size_(size) - , location_(location) {} + : ExecStep(datastore, id), + sn_(sn), + correctedSn_(correctedSn), + size_(size), + location_(location) {} ~ExecCreateClone() {} void Exec() override { - (*datastore_)->CreateCloneChunk( - id_, sn_, correctedSn_, size_, location_); + (*datastore_) + ->CreateCloneChunk(id_, sn_, correctedSn_, size_, location_); } void Dump() override { - printf("CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " - "chunk size = %u, location = %s.\n", - id_, sn_, correctedSn_, size_, location_.c_str()); + printf( + "CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " + "chunk size = %u, location = %s.\n", + id_, sn_, correctedSn_, size_, location_.c_str()); } private: @@ -269,41 +250,41 @@ class StepList { explicit StepList(ClearFunc clearFunc) : clearFunc_(clearFunc) {} ~StepList() {} - void Add(std::shared_ptr step) { - steps.push_back(step); - } + void Add(std::shared_ptr step) { steps.push_back(step); } - int GetStepCount() { - return steps.size(); - } + int GetStepCount() { return steps.size(); } void ClearEnv() { clearFunc_(); - // 清理每一步的预期状态,因为清理环境后,读取到的数据内容可能会不一样 - // 因为通过FilePool分配的chunk初始内容是不确定的 - for (auto &step : steps) { + // Clean up the expected state of each step, as the data content read + // after cleaning up the environment may differ Because the initial + // content of the chunk allocated through FilePool is uncertain + for (auto& step : steps) { step->ClearStatus(); } } - // 重启前,用户最后执行的操作可能为任意步骤, - // 需要验证每个步骤作为最后执行操作时,日志从该步骤前任意步骤进行恢复的幂等性 - // 对于未执行的步骤可以不必验证,只要保证已执行步骤的恢复是幂等的 - // 未执行的步骤恢复一定是幂等的 + // Before restarting, the last action performed by the user may be any step, + // It is necessary to verify the idempotence of the log recovery from any + // step before each step as the final execution operation For steps that + // have not been executed, there is no need to verify as long as the + // recovery of the executed steps is idempotent Unexecuted step recovery + // must be idempotent bool VerifyLogReplay() { - // 验证每个步骤作为最后执行操作时日志恢复的幂等性 + // Verify the idempotence of log recovery at each step as the final + // operation for (int lastStep = 0; lastStep < steps.size(); ++lastStep) { - // 重新初始化环境 + // Reinitialize the environment ClearEnv(); printf("==============Verify log replay to step%d==============\n", - lastStep + 1); - // 构造重启前环境 + lastStep + 1); + // Construct a pre restart environment if (!ConstructEnv(lastStep)) { LOG(ERROR) << "Construct env failed."; Dump(); return false; } - // 验证日志恢复后的幂等性 + // Verify the idempotence of log recovery if (!ReplayLog(lastStep)) { LOG(ERROR) << "Replay log failed." << "last step: step" << lastStep + 1; @@ -322,15 +303,16 @@ class StepList { } private: - // 构造初始状态 + // Construction initial state bool ConstructEnv(int lastStep) { - // 模拟日志恢复前执行,用于构造初始Chunk状态,并初始化每一步的预期状态 + // Execute before simulating log recovery to construct the initial Chunk + // state and initialize the expected state for each step for (int curStep = 0; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); step->SetExpectStatus(); } - // 检查构造出来的状态是否符合预期 + // Check if the constructed state meets expectations if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "last step: step" << lastStep + 1; @@ -339,16 +321,18 @@ class StepList { return true; } - // 从最后步骤前任意一个步骤进行恢复都应该保证幂等性 + // Restoring from any step before the final step should ensure idempotence bool ReplayLog(int lastStep) { - // 模拟从不同的起始位置进行日志恢复 + // Simulate log recovery from different starting locations for (int beginStep = 0; beginStep <= lastStep; ++beginStep) { - // 执行恢复前,chunk的状态保证为预期的状态 + // Before performing the recovery, the state of the chunk is + // guaranteed to be the expected state for (int curStep = beginStep; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); } - // 每次日志恢复完成检查Chunk状态是否符合预期 + // Check if the Chunk status meets expectations after each log + // recovery is completed if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "begin step: step" << beginStep + 1 @@ -361,8 +345,7 @@ class StepList { bool CheckChunkData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -373,50 +356,41 @@ class StepList { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - datastore->ReadChunk(id, - info.curSn, - (actualData + offset), - offset, - length); + datastore->ReadChunk(id, info.curSn, (actualData + offset), + offset, length); } } else { - datastore->ReadChunk(id, - info.curSn, - actualData, - 0, - kMaxSize); + datastore->ReadChunk(id, info.curSn, actualData, 0, kMaxSize); } int ret = memcmp(expectStatus->chunkData, actualData, kMaxSize); if (ret != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id - << ", ret: " << ret; + << "chunk id: " << id << ", ret: " << ret; for (int i = 0; i < kMaxSize; ++i) { if (*(expectStatus->chunkData + i) != *(actualData + i)) { - LOG(ERROR) << "diff pos: " << i - << ", expect data: " - << *(expectStatus->chunkData + i) - << ", actual data: " << *(actualData + i); + LOG(ERROR) + << "diff pos: " << i + << ", expect data: " << *(expectStatus->chunkData + i) + << ", actual data: " << *(actualData + i); break; } } - delete [] actualData; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } bool CheckSnapData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -424,23 +398,22 @@ class StepList { char* actualData = new char[kMaxSize]; CSErrorCode err; - err = datastore->ReadSnapshotChunk( - id, info.snapSn, actualData, 0, kMaxSize); + err = datastore->ReadSnapshotChunk(id, info.snapSn, actualData, 0, + kMaxSize); if (err != CSErrorCode::Success) { LOG(ERROR) << "Read snapshot failed." - << "Error Code: " << err - << ", chunk id: " << id; - delete [] actualData; + << "Error Code: " << err << ", chunk id: " << id; + delete[] actualData; return false; } if (memcmp(expectStatus->snapshotData, actualData, kMaxSize) != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id; - delete [] actualData; + << "chunk id: " << id; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } @@ -448,57 +421,51 @@ class StepList { std::shared_ptr step = steps[lastStep]; std::shared_ptr expectStatus = step->GetStatus(); - // 获取chunk信息 - std::shared_ptr datastore = - step->GetDataStore(); + // Obtain chunk information + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; CSErrorCode err = datastore->GetChunkInfo(id, &info); - // 返回Success说明chunk存在 + // Returning Success indicates that the chunk exists if (err == CSErrorCode::Success) { - // 检查chunk的状态 - if (!expectStatus->exist || - expectStatus->chunkInfo != info) { + // Check the status of the chunk + if (!expectStatus->exist || expectStatus->chunkInfo != info) { LOG(ERROR) << "Chunk info is not as expected!"; LOG(ERROR) << "Expect status(" << "chunk exist: " << expectStatus->exist << ", sn: " << expectStatus->chunkInfo.curSn - << ", correctedSn: " << expectStatus->chunkInfo.correctedSn // NOLINT + << ", correctedSn: " + << expectStatus->chunkInfo.correctedSn // NOLINT << ", snap sn: " << expectStatus->chunkInfo.snapSn << ", isClone: " << expectStatus->chunkInfo.isClone << ", location: " << expectStatus->chunkInfo.location << ")."; LOG(ERROR) << "Actual status(" - << "chunk exist: " << true - << ", sn: " << info.curSn - << ", correctedSn: " << info.correctedSn + << "chunk exist: " << true << ", sn: " << info.curSn + << ", correctedSn: " << info.correctedSn << ", isClone: " << info.isClone - << ", location: " << info.location - << ")."; + << ", location: " << info.location << ")."; return false; } - // 检查chunk的数据状态 - if (!CheckChunkData(step)) - return false; + // Check the data status of the chunk + if (!CheckChunkData(step)) return false; - // 检查快照状态 + // Check snapshot status if (info.snapSn > 0) { - // 检查快照的数据状态 - if (!CheckSnapData(step)) - return false; + // Check the data status of the snapshot + if (!CheckSnapData(step)) return false; } } else if (err == CSErrorCode::ChunkNotExistError) { - // 预期chunk存在,实际却不存在 + // The expected chunk exists, but it does not actually exist if (expectStatus->exist) { LOG(ERROR) << "Chunk is expected to exist, but actual not."; return false; } } else { LOG(ERROR) << "Get chunk info failed." - << "chunk id: " << id - << ", error code: " << err; + << "chunk id: " << id << ", error code: " << err; return false; } return true; @@ -529,7 +496,7 @@ TEST_F(RestartTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -538,7 +505,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -547,7 +514,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:DeleteChunk + // Step 3: DeleteChunk std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); @@ -561,7 +528,7 @@ TEST_F(RestartTestSuit, SnapshotTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -570,10 +537,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 模拟用户打了快照,此时sn +1 + // Simulated user took a snapshot, at which point sn+1 ++sn; - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -582,20 +549,21 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:用户请求删除快照 + // Step 3: User requests to delete the snapshot std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第四步:此次快照过程中没有数据写入,直接DeleteSnapshotOrCorrectedSn + // Step 4: No data was written during this snapshot process, directly delete + // SnapshotOrCorrectedSn std::shared_ptr step4 = std::make_shared(&dataStore_, id, sn); list.Add(step4); - // 第五步:WriteChunk,写[8kb, 16kb]区域 + // Step 5: WriteChunk, write the [8kb, 16kb] area RangeData step5Data; step5Data.offset = 2 * PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -604,10 +572,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第六步:WriteChunk,写[4kb, 12kb]区域 + // Step 6: WriteChunk, write the [4kb, 12kb] area RangeData step6Data; step6Data.offset = PAGE_SIZE; step6Data.length = 2 * PAGE_SIZE; @@ -616,20 +584,20 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step6Data); list.Add(step6); - // 第七步:用户请求删除快照 + // Step 7: User requests to delete the snapshot std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第八步:用户请求删除快照 + // Step 8: User requests to delete the snapshot std::shared_ptr step8 = std::make_shared(&dataStore_, id, sn); list.Add(step8); - // 第九步:用户请求删除chunk + // Step 9: User requests to delete chunk std::shared_ptr step9 = std::make_shared(&dataStore_, id, sn); list.Add(step9); @@ -637,7 +605,8 @@ TEST_F(RestartTestSuit, SnapshotTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试克隆场景,以及克隆后打快照的组合场景 +// Test the cloning scenario and the combination scenario of taking a snapshot +// after cloning TEST_F(RestartTestSuit, CloneTest) { StepList list(clearFunc); @@ -646,17 +615,12 @@ TEST_F(RestartTestSuit, CloneTest) { SequenceNum correctedSn = 0; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 第二步:WriteChunk,写[0kb, 8kb]区域 + // Step 2: WriteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -665,7 +629,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -674,7 +638,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -683,10 +647,10 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step4Data); list.Add(step4); - // 模拟打快照 + // Simulate taking a snapshot ++sn; - // 第五步:WriteChunk,写[4kb, 12kb]区域 + // Step 5: WriteChunk, write the [4kb, 12kb] area RangeData step5Data; step5Data.offset = PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -695,12 +659,12 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 第六步:用户请求删除快照 + // Step 6: User requests to delete the snapshot std::shared_ptr step6 = std::make_shared(&dataStore_, id, sn); list.Add(step6); - // 第七步:DeleteChunk + // Step 7: DeleteChunk std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); @@ -708,7 +672,7 @@ TEST_F(RestartTestSuit, CloneTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试恢复场景 +// Testing Recovery Scenarios TEST_F(RestartTestSuit, RecoverTest) { StepList list(clearFunc); @@ -717,20 +681,15 @@ TEST_F(RestartTestSuit, RecoverTest) { SequenceNum correctedSn = 5; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 数据写入的版本应为最新的版本 + // The version of data writing should be the latest version sn = correctedSn; - // 第二步:PasteChunk,写[0kb, 8kb]区域 + // Step 2: PasteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -739,7 +698,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -748,7 +707,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -757,7 +716,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step4Data); list.Add(step4); - // 第五步:DeleteChunk + // Step 5: DeleteChunk std::shared_ptr step5 = std::make_shared(&dataStore_, id, sn); list.Add(step5); @@ -765,7 +724,9 @@ TEST_F(RestartTestSuit, RecoverTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 按照实际用户使用从场景随机产生每一步的操作,校验一定操作个数下都能保证幂等性 +// Randomly generate each step of the operation from the scene based on actual +// user usage, and verify that a certain number of operations can ensure +// idempotence TEST_F(RestartTestSuit, RandomCombine) { StepList list(clearFunc); @@ -775,7 +736,7 @@ TEST_F(RestartTestSuit, RandomCombine) { std::string location("test@s3"); std::srand(std::time(nullptr)); - // 写随机地址的数据,在[0, kMaxSize]范围内写 + // Write random address data within the range of [0, kMaxSize] auto randWriteOrPaste = [&](bool isPaste) { int pageCount = kMaxSize / PAGE_SIZE; RangeData stepData; @@ -793,21 +754,17 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 随机的克隆过程 + // Random cloning process auto randClone = [&]() { - // 二分之一概率,模拟恢复过程 - if (std::rand() % 2 == 0) - correctedSn = 2; + // Half probability, simulating the recovery process + if (std::rand() % 2 == 0) correctedSn = 2; std::shared_ptr createStep = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + std::make_shared(&dataStore_, id, sn, correctedSn, + CHUNK_SIZE, location); list.Add(createStep); - // 克隆过程模拟5个操作,Write或者Paste,三分之一概率Write + // The cloning process simulates 5 operations, Write or Paste, with a + // one-third probability of Write for (int i = 0; i < 5; ++i) { if (std::rand() % 3 == 0) { randWriteOrPaste(false); @@ -816,7 +773,8 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 遍写一遍chunk,可以用于模拟后续写入创建快照 + // Write the chunk over and over again, which can be used to simulate + // subsequent writes and create snapshots RangeData pasteData; pasteData.offset = 0; pasteData.length = CHUNK_SIZE; @@ -826,11 +784,12 @@ TEST_F(RestartTestSuit, RandomCombine) { list.Add(pasteStep); }; - // 随机的快照过程 + // Random snapshot process auto randSnapshot = [&](int* stepCount) { - // 快照需要将版本+1 + // Snapshots require version+1 ++sn; - // 三分之一的概率调DeleteSnapshot,一旦调了DeleteSnapshot就退出快照 + // One third of the probability is to call DeleteSnapshot, and once + // DeleteSnapshot is called, it exits the snapshot while (true) { if (std::rand() % 3 == 0) { std::shared_ptr step = @@ -844,14 +803,14 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 创建clone chunk, + // Create a clone chunk randClone(); - // 设置最长执行步数 + // Set the maximum number of execution steps int maxSteps = 30; int stepCount = 0; while (stepCount < maxSteps) { - // 三分之一的概率会模拟快照过程 + // One-third of the probability will simulate the snapshot process if (std::rand() % 3 == 0) { randSnapshot(&stepCount); } else { @@ -860,7 +819,7 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 最后删除chunk + // Finally, delete the chunk std::shared_ptr lastStep = std::make_shared(&dataStore_, id, sn); list.Add(lastStep); diff --git a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp index 61dc402c21..f1dfa68b26 100644 --- a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_sna"; // NOLINT -const string poolDir = "./chunkfilepool_int_sna"; // NOLINT +const string baseDir = "./data_int_sna"; // NOLINT +const string poolDir = "./chunkfilepool_int_sna"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_sna.meta"; // NOLINT class SnapshotTestSuit : public DatastoreIntegrationBase { @@ -36,14 +36,16 @@ class SnapshotTestSuit : public DatastoreIntegrationBase { }; /** - * 快照场景测试 - * 构造存在两个chunk的文件,分别为chunk1和chunk2,做如下操作 - * 1.写chunk1 - * 2.模拟第一次打快照,转储过程中写chunk1并产生快照,chunk2未发生数据写入 - * 3.删除快照,然后向chunk2中写入数据 - * 4.模拟第二次打快照,转储过程中写chunk1,但是不写chunk2 - * 5.删除快照,再次向chunk2写入数据 - * 6.删除文件 + * Snapshot scenario testing + * Construct a file with two chunks, chunk1 and chunk2, as follows + * 1. Write chunk1 + * 2. Simulate the first snapshot taken, write chunk1 during the dump process + * and generate a snapshot, but chunk2 does not have data write + * 3. Delete the snapshot and write data to chunk2 + * 4. Simulate taking a second snapshot, writing chunk1 during the dump process, + * but not chunk2 + * 5. Delete the snapshot and write data to chunk2 again + * 6. Delete files */ TEST_F(SnapshotTestSuit, SnapshotTest) { SequenceNum fileSn = 1; @@ -55,39 +57,34 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { CSChunkInfo chunk1Info; CSChunkInfo chunk2Info; - /******************构造初始环境,创建chunk1******************/ + /****************** Creating Initial Environment, Creating Chunk1 + * ******************/ - // 向chunk1的[0, 12KB)区域写入数据 "1" + // Write data '1' to the [0, 12KB) area of chunk1 offset = 0; length = 3 * PAGE_SIZE; // 12KB char buf1_1[3 * PAGE_SIZE]; memset(buf1_1, '1', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_1, - offset, - length, - nullptr); + fileSn, buf1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景一:第一次给文件打快照******************/ + /******************Scene 1: Take the first snapshot of the + * file******************/ - // 模拟打快照,此时文件版本递增 - ++fileSn; // fileSn == 2 + // Simulate taking a snapshot, where the file version increases + ++fileSn; // fileSn == 2 - // 向chunk1的[4KB, 8KB)区域写入数据 “2” + // Write data '2' to the [4KB, 8KB] area of chunk1 offset = 1 * PAGE_SIZE; length = 1 * PAGE_SIZE; char buf1_2[3 * PAGE_SIZE]; memset(buf1_2, '2', 3 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); @@ -96,256 +93,242 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { size_t readSize = 3 * PAGE_SIZE; char readbuf[3 * PAGE_SIZE]; - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该都是‘1’ + // Read the [0, 12KB) area of the chunk1 snapshot file, and the data read + // should all be '1' errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // 重复写入,验证不会重复cow,读快照时[4KB, 8KB)区域的数据应为“1” + // Repeat write, verify that there will be no duplicate rows, and when + // reading the snapshot, the data in the [4KB, 8KB] area should be '1' errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写未cow过的区域,写入[0,4kb]区域 + // Write to an uncooked area, write to the [0,4kb] area offset = 0; length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写部分cow过的区域,写入[4kb,12kb]区域 + // Write the area that has been partially cowed, and write the [4kb, 12kb] + // area offset = PAGE_SIZE; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk1Info.curSn); ASSERT_EQ(1, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB):1 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,12KB]:2 The data content returned from reading chunk1 snapshot should + // be [0, 12KB):1 The data in other address spaces can be guaranteed without + // any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该还是‘1’ + // When reading the [0, 12KB) area of the chunk1 snapshot file, the read + // data should still be '1' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // ReadSnapshotChun,请求offset+length > page size + // ReadSnapshotChun, request offset+length > page size offset = CHUNK_SIZE - PAGE_SIZE; readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, offset, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::InvalidArgError); - // 读chunk2快照文件,返回ChunkNotExistError + // Read chunk2 snapshot file and return ChunkNotExistError readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - /******************场景二:第一次快照结束,删除快照******************/ + /******************Scene 2: First snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功,并删除快照 + // Request to delete the snapshot of chunk1, return success, and delete the + // snapshot errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 向chunk2的[0, 8KB)区域写入数据 "a" + // Write data 'a' to the [0, 8KB) area of chunk2 offset = 0; length = 2 * PAGE_SIZE; // 8KB char buf2_2[2 * PAGE_SIZE]; memset(buf2_2, 'a', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_2, - offset, - length, - nullptr); + fileSn, buf2_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(0, chunk2Info.correctedSn); - /******************场景三:第二次打快照******************/ + /******************Scene 3: Take second snapshot******************/ - // 模拟第二次打快照,版本递增 + // Simulate taking a second snapshot and increasing the version ++fileSn; // fileSn == 3 - // 向chunk1的[0KB, 8KB)区域写入数据 “3” + // Write data '3' to the [0KB, 8KB) area of chunk1 offset = 0; length = 2 * PAGE_SIZE; char buf1_3[2 * PAGE_SIZE]; memset(buf1_3, '3', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_3, - offset, - length, - nullptr); + fileSn, buf1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(2, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,8KB]:3,[8KB, 12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB]:2 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,8KB]:3, [8KB, 12KB]:2 The data content returned from reading chunk1 + // snapshot should be [0, 12KB]:2 The data in other address spaces can be + // guaranteed without any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_3, readbuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_2, readbuf + 2 * PAGE_SIZE, 1 * PAGE_SIZE)); - // 读chunk1快照文件的[0, 12KB)区域,数据内容为‘2’ + // Read the [0, 12KB) area of the chunk1 snapshot file, with data content of + // '2' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk2快照返回的数据内容应该为[0, 8KB):a,其余地址空间的数据可以不用保证 + // The data content returned by reading the chunk2 snapshot should be [0, + // 8KB): a, and the data in the other address spaces can be guaranteed + // without any need to readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_2, readbuf, readSize)); - /******************场景四:第二次快照结束,删除快照******************/ + /******************Scene 4: Second snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功 + // Request to delete snapshot of chunk1, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk2信息,符合预期 + // Check chunk2 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 向chunk2的[0KB, 4KB)区域写入数据 “b” + // Write data 'b' to the [0KB, 4KB) area of chunk2 offset = 0; length = 1 * PAGE_SIZE; char buf2_3[1 * PAGE_SIZE]; memset(buf2_3, 'b', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,符合预期,curSn变为3,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, as expected, curSn becomes 3, no snapshot will + // be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 再次向chunk2的[0KB, 8KB)区域写入数据 + // Write data to the [0KB, 8KB) area of chunk2 again errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,chunk信息不变,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, chunk information remains unchanged and no + // snapshot will be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - /******************场景五:用户删除文件******************/ + /******************Scene 5: User Deletes File******************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id2, &chunk1Info); diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 2364d61dd2..ae59850db5 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_str"; // NOLINT -const string poolDir = "./chunkfilepool_int_str"; // NOLINT +const string baseDir = "./data_int_str"; // NOLINT +const string poolDir = "./chunkfilepool_int_str"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_str.meta"; // NOLINT class StressTestSuit : public DatastoreIntegrationBase { @@ -64,7 +64,7 @@ TEST_F(StressTestSuit, StressTest) { auto RunStress = [&](int threadNum, int rwPercent, int ioNum) { uint64_t beginTime = TimeUtility::GetTimeofDayUs(); - Thread *threads = new Thread[threadNum]; + Thread* threads = new Thread[threadNum]; int readThreadNum = threadNum * rwPercent / 100; int ioNumAvg = ioNum / threadNum; int idRange = 100; @@ -92,27 +92,27 @@ TEST_F(StressTestSuit, StressTest) { printf("===============TEST WRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 0, 10000); - // 10个线程 + // 10 threads RunStress(10, 0, 50000); - // 50个线程 + // 50 threads RunStress(50, 0, 100000); printf("===============TEST READ==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 100, 10000); - // 10个线程 + // 10 threads RunStress(10, 100, 50000); - // 50个线程 + // 50 threads RunStress(50, 100, 100000); printf("===============TEST READWRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 50, 10000); - // 10个线程 + // 10 threads RunStress(10, 50, 50000); - // 50个线程 + // 50 threads RunStress(50, 50, 100000); } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index dca71bdaf3..ae45e88940 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT -#include -#include #include +#include #include // NOLINT +#include +#include // NOLINT +#include +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -48,76 +48,73 @@ curve::client::InflightControl inflightContl; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "--mdsDbName=module_exception_curve_chunkserver" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22233" }, - { "--updateToRepoSec=5" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"--mdsDbName=module_exception_curve_chunkserver"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22233"}, + {"--updateToRepoSec=5"}, }; const std::vector chunkserverConf4{ - { "-chunkServerStoreUri=local://./moduleException4/" }, - { "-chunkServerMetaUri=local://./moduleException4/chunkserver.dat" }, - { "-copySetUri=local://./moduleException4/copysets" }, - { "-raftSnapshotUri=curve://./moduleException4/copysets" }, - { "-raftLogUri=curve://./moduleException4/copysets" }, - { "-recycleUri=local://./moduleException4/recycler" }, - { "-chunkFilePoolDir=./moduleException4/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException4/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22125" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException4/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException4/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException4/"}, + {"-chunkServerMetaUri=local://./moduleException4/chunkserver.dat"}, + {"-copySetUri=local://./moduleException4/copysets"}, + {"-raftSnapshotUri=curve://./moduleException4/copysets"}, + {"-raftLogUri=curve://./moduleException4/copysets"}, + {"-recycleUri=local://./moduleException4/recycler"}, + {"-chunkFilePoolDir=./moduleException4/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException4/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22125"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException4/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException4/walfilepool.meta"}}; const std::vector chunkserverConf5{ - { "-chunkServerStoreUri=local://./moduleException5/" }, - { "-chunkServerMetaUri=local://./moduleException5/chunkserver.dat" }, - { "-copySetUri=local://./moduleException5/copysets" }, - { "-raftSnapshotUri=curve://./moduleException5/copysets" }, - { "-raftLogUri=curve://./moduleException5/copysets" }, - { "-recycleUri=local://./moduleException5/recycler" }, - { "-chunkFilePoolDir=./moduleException5/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException5/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22126" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException5/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException5/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException5/"}, + {"-chunkServerMetaUri=local://./moduleException5/chunkserver.dat"}, + {"-copySetUri=local://./moduleException5/copysets"}, + {"-raftSnapshotUri=curve://./moduleException5/copysets"}, + {"-raftLogUri=curve://./moduleException5/copysets"}, + {"-recycleUri=local://./moduleException5/recycler"}, + {"-chunkFilePoolDir=./moduleException5/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException5/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22126"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException5/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException5/walfilepool.meta"}}; const std::vector chunkserverConf6{ - { "-chunkServerStoreUri=local://./moduleException6/" }, - { "-chunkServerMetaUri=local://./moduleException6/chunkserver.dat" }, - { "-copySetUri=local://./moduleException6/copysets" }, - { "-raftSnapshotUri=curve://./moduleException6/copysets" }, - { "-raftLogUri=curve://./moduleException6/copysets" }, - { "-recycleUri=local://./moduleException6/recycler" }, - { "-chunkFilePoolDir=./moduleException6/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException6/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22127" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException6/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException6/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException6/"}, + {"-chunkServerMetaUri=local://./moduleException6/chunkserver.dat"}, + {"-copySetUri=local://./moduleException6/copysets"}, + {"-raftSnapshotUri=curve://./moduleException6/copysets"}, + {"-raftLogUri=curve://./moduleException6/copysets"}, + {"-recycleUri=local://./moduleException6/recycler"}, + {"-chunkFilePoolDir=./moduleException6/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException6/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22127"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException6/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException6/walfilepool.meta"}}; std::string mdsaddr = // NOLINT "127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"; @@ -143,15 +140,16 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ - "--name=module_exception_test_chunkserver" }); + "--name=module_exception_test_chunkserver"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +166,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -190,7 +188,7 @@ class CSModuleException : public ::testing::Test { retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +205,8 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -228,15 +227,15 @@ class CSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } @@ -282,12 +281,15 @@ class CSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If io can be issued normally within the expected time, return + * true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, uint64_t* failCount = nullptr) { @@ -335,7 +337,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,7 +347,7 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Is there a failure to hang and uninstall bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -354,173 +356,185 @@ class CSModuleException : public ::testing::Test { CurveCluster* cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1.. Test restarting a chunkserver + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill a chunkserver: The client's read and + // write requests are stuck at most + // election_timeout*2s can read and write normally + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang a chunk server, and then restore the hang's chunk server + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang a chunkserver: client + // Read and write requests may experience a maximum delay of + // selection_timeout*2s for normal read and write operations + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunkservers + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill two chunkservers: expected client IO + // to continue to hang, new write IO and overwrite write both hang + // Pulling up a chunkserver in the kill: client IO is expected to be + // at most Restore read and write within (chunkserver starts + // playback of data+2 * selection_timeout) time + // c. Pulling up another kill chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunkservers, and then restore Hang's chunkservers + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang two chunkservers: client IO continues + // to hang, while new write IO and overwrite write both hang c. Restore + // one of the chunkservers: client IO restores read and write, + // Recovery time from chunkserver to client IO during election_ + // Timeout * 2 + // d. Restoring another hang's chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just hung, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunkservers + // 2. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Close three chunkservers: client IO hang + // c. Restart a chunkserver: client IO hang + // d. Restart the second chunkserver: client IO hang, + // Until the chunkserver is fully restored and IO is restored. + // The recovery time is approximately equal to (chunkserver starts + // playback data+2 * election_timeout) + // e. Restarting the third chunkserver: No impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +542,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunkservers, and then restore Hang's chunkservers + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang three chunkservers: client IO hang c. + // Restore a chunkserver: client IO hang d. Restore another + // chunkserver: expected to be + // election_ About timeout * 2, client IO recovery + // e. Restore the last chunkserver: Expected no impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 - // (copysetNum / load_concurrency) * election_timeout + // 7. The client's IO is expected to recover within a maximum of 2 * + // electtime seconds If slow start is configured, wait (copysetNum / + // load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/integration/client/common/file_operation.cpp b/test/integration/client/common/file_operation.cpp index 44dfc186a5..c5943a629f 100644 --- a/test/integration/client/common/file_operation.cpp +++ b/test/integration/client/common/file_operation.cpp @@ -43,15 +43,15 @@ int FileCommonOperation::Open(const std::string& filename, memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); - // 先创建文件 - int ret = Create(filename.c_str(), &userinfo, 100*1024*1024*1024ul); + // Create a file first + int ret = Create(filename.c_str(), &userinfo, 100 * 1024 * 1024 * 1024ul); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret << ", filename = " << filename; return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -68,8 +68,8 @@ void FileCommonOperation::Close(int fd) { } int FileCommonOperation::Open(const std::string& filename, - const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount) { + const std::string& owner, uint64_t stripeUnit, + uint64_t stripeCount) { assert(globalclient != nullptr); C_UserInfo_t userinfo; @@ -84,7 +84,7 @@ int FileCommonOperation::Open(const std::string& filename, context.stripeUnit = stripeUnit; context.stripeCount = stripeCount; - // 先创建文件 + // Create a file first int ret = globalclient->Create2(context); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret @@ -92,7 +92,7 @@ int FileCommonOperation::Open(const std::string& filename, return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -102,5 +102,5 @@ int FileCommonOperation::Open(const std::string& filename, return fd; } -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve diff --git a/test/integration/client/common/file_operation.h b/test/integration/client/common/file_operation.h index 0414146eff..c46b7add46 100644 --- a/test/integration/client/common/file_operation.h +++ b/test/integration/client/common/file_operation.h @@ -30,17 +30,18 @@ namespace curve { namespace test { class FileCommonOperation { public: - /** - * 指定文件名,打开文件,如果没创建则先创建,返回fd - */ + /** + * Specify a file name, open the file, if not created, create it first, + * return fd + */ static int Open(const std::string& filename, const std::string& owner); static void Close(int fd); static int Open(const std::string& filename, const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount); + uint64_t stripeUnit, uint64_t stripeCount); }; -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve #endif // TEST_INTEGRATION_CLIENT_COMMON_FILE_OPERATION_H_ diff --git a/test/integration/client/mds_exception_test.cpp b/test/integration/client/mds_exception_test.cpp index 4cf9f8ede3..2ede4b2742 100644 --- a/test/integration/client/mds_exception_test.cpp +++ b/test/integration/client/mds_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT -#include -#include #include +#include #include // NOLINT +#include +#include // NOLINT +#include +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -51,78 +51,75 @@ bool testIORead = false; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--mdsDbName=module_exception_curve_mds" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22230" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--mdsDbName=module_exception_curve_mds"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22230"}, }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://./moduleException1/" }, - { "-chunkServerMetaUri=local://./moduleException1/chunkserver.dat" }, - { "-copySetUri=local://./moduleException1/copysets" }, - { "-raftSnapshotUri=curve://./moduleException1/copysets" }, - { "-raftLogUri=curve://./moduleException1/copysets" }, - { "-recycleUri=local://./moduleException1/recycler" }, - { "-chunkFilePoolDir=./moduleException1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException1/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22225" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException1/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException1/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException1/"}, + {"-chunkServerMetaUri=local://./moduleException1/chunkserver.dat"}, + {"-copySetUri=local://./moduleException1/copysets"}, + {"-raftSnapshotUri=curve://./moduleException1/copysets"}, + {"-raftLogUri=curve://./moduleException1/copysets"}, + {"-recycleUri=local://./moduleException1/recycler"}, + {"-chunkFilePoolDir=./moduleException1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException1/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22225"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException1/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException1/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://./moduleException2/" }, - { "-chunkServerMetaUri=local://./moduleException2/chunkserver.dat" }, - { "-copySetUri=local://./moduleException2/copysets" }, - { "-raftSnapshotUri=curve://./moduleException2/copysets" }, - { "-raftLogUri=curve://./moduleException2/copysets" }, - { "-recycleUri=local://./moduleException2/recycler" }, - { "-chunkFilePoolDir=./moduleException2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException2/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22226" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException2/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException2/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException2/"}, + {"-chunkServerMetaUri=local://./moduleException2/chunkserver.dat"}, + {"-copySetUri=local://./moduleException2/copysets"}, + {"-raftSnapshotUri=curve://./moduleException2/copysets"}, + {"-raftLogUri=curve://./moduleException2/copysets"}, + {"-recycleUri=local://./moduleException2/recycler"}, + {"-chunkFilePoolDir=./moduleException2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException2/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22226"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException2/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException2/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://./moduleException3/" }, - { "-chunkServerMetaUri=local://./moduleException3/chunkserver.dat" }, - { "-copySetUri=local://./moduleException3/copysets" }, - { "-raftSnapshotUri=curve://./moduleException3/copysets" }, - { "-raftLogUri=curve://./moduleException3/copysets" }, - { "-recycleUri=local://./moduleException3/recycler" }, - { "-chunkFilePoolDir=./moduleException3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./moduleException3/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=22227" }, - { "-enableChunkfilepool=false" }, - { "-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./moduleException3/walfilepool/" }, - { "-walFilePoolMetaPath=./moduleException3/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://./moduleException3/"}, + {"-chunkServerMetaUri=local://./moduleException3/chunkserver.dat"}, + {"-copySetUri=local://./moduleException3/copysets"}, + {"-raftSnapshotUri=curve://./moduleException3/copysets"}, + {"-raftLogUri=curve://./moduleException3/copysets"}, + {"-recycleUri=local://./moduleException3/recycler"}, + {"-chunkFilePoolDir=./moduleException3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./moduleException3/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=22227"}, + {"-enableChunkfilepool=false"}, + {"-mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./moduleException3/walfilepool/"}, + {"-walFilePoolMetaPath=./moduleException3/walfilepool.meta"}}; std::string mdsaddr = // NOLINT "127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"; // NOLINT @@ -149,14 +146,15 @@ class MDSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22230", "127.0.0.1:22231", std::vector{"--name=module_exception_test_mds"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22230:22231, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; @@ -173,7 +171,7 @@ class MDSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -196,7 +194,7 @@ class MDSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22225", chunkserverConf1); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22225, pid = " << pid; @@ -212,7 +210,8 @@ class MDSModuleException : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -233,15 +232,15 @@ class MDSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); ipmap[0] = "127.0.0.1:22222"; @@ -299,12 +298,15 @@ class MDSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内嫩够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If the io is issued normally within the expected time, return + * true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) { inflightContl.SetMaxInflightNum(16); @@ -352,7 +354,7 @@ class MDSModuleException : public ::testing::Test { ret = resumeFlag; } - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -360,16 +362,17 @@ class MDSModuleException : public ::testing::Test { return ret; } - /**下发一个写请求 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否下发成功 + /** Send a write request + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Whether the IO was successfully issued */ bool SendAioWriteRequest(uint64_t offset, uint64_t size) { writeIOReturnFlag = false; auto writeCallBack = [](CurveAioContext* context) { - // 无论IO是否成功,只要返回,就置为true + // Regardless of whether IO is successful or not, as long as it + // returns, it is set to true writeIOReturnFlag = true; char* buffer = reinterpret_cast(context->buf); delete[] buffer; @@ -388,24 +391,24 @@ class MDSModuleException : public ::testing::Test { return AioWrite(fd, context) == 0; } - /** 下发一个写请求并读取进行数据验证 - * @param: fd 卷fd - * @param: 当前需要下发io的偏移 - * @param:下发io的大小 - * @return: 数据是否一致 - */ + /** Send a write request and read for data validation + * @param: fd volume fd + * @param: The offset that currently needs to be issued for IO + * @param: The size of the distributed IO + * @return: Whether the data is consistent + */ void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) { char* writebuf = new char[size]; char* readbuf = new char[size]; unsigned int i; - LOG(INFO) << "VerifyDataConsistency(): offset " << - offset << ", size " << size; + LOG(INFO) << "VerifyDataConsistency(): offset " << offset << ", size " + << size; for (i = 0; i < size; i++) { writebuf[i] = ('a' + std::rand() % 26); } - // 开始写 + // Start writing auto wcb = [](CurveAioContext* context) { if (context->ret == context->length) { testIOWrite = true; @@ -416,7 +419,8 @@ class MDSModuleException : public ::testing::Test { }; auto writefunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + CurveAioContext* context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = offset; context->length = size; @@ -434,7 +438,7 @@ class MDSModuleException : public ::testing::Test { writeThread.join(); ASSERT_TRUE(testIOWrite); - // 开始读 + // Start reading auto rcb = [](CurveAioContext* context) { if (context->ret == context->length) { testIORead = true; @@ -445,7 +449,8 @@ class MDSModuleException : public ::testing::Test { }; auto readfunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + CurveAioContext* context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_READ; context->offset = offset; context->length = size; @@ -471,7 +476,7 @@ class MDSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether mounting or unmounting fails. bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -484,40 +489,49 @@ class MDSModuleException : public ::testing::Test { }; #define segment_size 1 * 1024 * 1024 * 1024ul -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillOneInserviceMDSThenRestartTheMDS"; /********** KillOneInserviceMDSThenRestartTheMDS *************/ - // 1. 重启一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台mds,在mds服务切换到另一台mds之前, - // client 新写IO会hang,挂卸载服务会异常 - // c. mds服务切换后,预期client IO无影响,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restarting a currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down an MDS, before + // the MDS service switches to another MDS, + // new write IO from clients will hang, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // be unaffected, and mount/unmount services will be normal. d. When + // bringing the MDS back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Kill an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -525,85 +539,102 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillOneNotInserviceMDSThenRestartTheMDS"; /*********** KillOneNotInserviceMDSThenRestartTheMDS *******/ - // 1. 重启一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Restart an MDS that is not in service + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Turn off an MDS that is not in service, + // expect no impact on client IO, and suspend and uninstall the service + // normally + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台不在服务的mds,在启动的时候第一台mds当选leader, kill第二台 + // 2. Kill an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and kill the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int killid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(killid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(2 * segment_size, 4096, 25)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(killid, ipmap[killid], 22240 + killid, configmap[killid], false); LOG(INFO) << "mds " << killid << " started on " << ipmap[killid] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneInserviceMDSThenResumeTheMDS"; /************ hangOneInserviceMDSThenResumeTheMDS ********/ - // 1. hang一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 0. 先睡眠一段时间等待mds集群选出leader + // 1. Hang one of the currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS is restored, it is expected to have no impact on + // client IO. + // 0. First, sleep for a period of time to allow the MDS cluster to elect a + // leader. std::this_thread::sleep_for(std::chrono::seconds(10)); - // 1. 集群最初状态,io正常下发 + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Hang an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->HangMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) auto ret = MonitorResume(3 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_TRUE(false); } - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], @@ -614,39 +645,45 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被kill的mds,对集群没有影响 + // 7. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneNotInserviceMDSThenResumeTheMDS"; /********** hangOneNotInserviceMDSThenResumeTheMDS ***********/ - // 1. hang一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Hang an out of service MDS + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang an MDS that is not in service, + // expecting no impact on client IO, and suspending and uninstalling + // the service is normal + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台不在服务的mds,在启动的时候第一台mds当选leader, hang第二台 + // 2. Hang an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and hang the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int hangid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->HangMDS(hangid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台iops监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start backend iops monitoring and start writing from the next segment + // to trigger getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(4 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_EQ(0, cluster->StopMDS(hangid)); pid = cluster->StartSingleMDS(hangid, ipmap[hangid], 22240 + hangid, @@ -657,42 +694,50 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoInserviceMDSThenRestartTheMDS"; /************* KillTwoInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,在mds服务切换到另一台mds之前, - // client 新写IO会出现失败,挂卸载服务会异常 - // c. mds服务切换后,预期client IO恢复,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, one of which is currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // before the MDS service switches to another MDS, + // new write IO from clients will fail, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // recover, and mount/unmount services will be normal. d. When bringing + // the MDS nodes back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill两台mds,在启动的时候第一台mds当选leader, kill前二台 + // 2. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the first two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int secondid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(secondid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(5 * segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -700,10 +745,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起被kill的其他mds + // 9. Pull up other mds killed pid = cluster->StartSingleMDS(secondid, ipmap[secondid], 22240 + secondid, configmap[secondid], false); LOG(INFO) << "mds " << secondid << " started on " << ipmap[secondid] @@ -712,18 +757,22 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillTwoNotInserviceMDSThenRestartTheMDS"; /******** KillTwoNotInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中两台都不在服务 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,预期client IO无影响,挂卸载服务正常 - // c. 重启这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, with both nodes not currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // it is expected that client IO will be unaffected, and mount/unmount + // services will be normal. c. When restarting these two MDS nodes, it is + // expected that client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. kill两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 3. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the second two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int tempid_1 = (serviceMDSID + 1) % 3; @@ -731,27 +780,28 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->StopMDS(tempid_1)); ASSERT_EQ(0, cluster->StopMDS(tempid_2)); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ASSERT_TRUE(MonitorResume(6 * segment_size, 4096, 10)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(tempid_1, ipmap[tempid_1], 22240 + tempid_1, configmap[tempid_1], false); LOG(INFO) << "mds " << tempid_1 << " started on " << ipmap[tempid_1] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 集群没有影响 + // 8. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起其他mds,使集群恢复正常 + // 9. Pull up other mds to restore the cluster to normal pid = cluster->StartSingleMDS(tempid_2, ipmap[tempid_2], 22240 + tempid_2, configmap[tempid_2], false); LOG(INFO) << "mds " << tempid_2 << " started on " << ipmap[tempid_2] @@ -760,17 +810,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: hangTwoInserviceMDSThenResumeTheMDS"; /******** hangTwoInserviceMDSThenResumeTheMDS ************/ - // 1. hang两台mds,其中包含一台正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 1. hang两台mds,在启动的时候第一台mds当选leader, hang前二台 + // 1. Hang two MDS nodes, one of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS nodes are recovered, it is expected to have no + // impact on client IO. + // 1. Hang two MDS nodes, with the first MDS being elected as leader during + // startup, and both being hung before the process. serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = serviceMDSID; @@ -778,12 +835,15 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); LOG(INFO) << "monitor resume start!"; - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(7 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); @@ -792,11 +852,12 @@ TEST_F(MDSModuleException, MDSExceptionTest) { } LOG(INFO) << "monitor resume done!"; - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); LOG(INFO) << "wait backend create thread done!"; - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -814,20 +875,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被hang的mds,对集群没有影响 + // 7. Pulling up the hung mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangTwoNotInserviceMDSThenResumeTheMDS"; /********** hangTwoNotInserviceMDSThenResumeTheMDS ********/ - // 1. hang两台mds,其中不包含正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // c. 恢复这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two MDS nodes, neither of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang one MDS node that is not + // currently serving. It is expected that client IO will be unaffected, + // and mount/unmount services will behave normally. c. When these two MDS + // nodes are recovered, client IO is expected to be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 2. Hang two mds, the first mds is selected as the leader when starting, + // and kill the second two mds serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = (serviceMDSID + 1) % 3; @@ -835,11 +900,13 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ret = MonitorResume(8 * segment_size, 4096, 10); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); @@ -847,10 +914,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -868,41 +935,47 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeMDSThenRestartTheMDS"; /********* KillThreeMDSThenRestartTheMDS *********/ - // 1. 重启三台mds - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. kill三台mds:client 在session过期之后出现IO 失败 - // c. client session过期之前这段时间的新写会失败,覆盖写不影响 - // d. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // e. 恢复另外两台hang的mds,client io无影响 - - // 1. kill三台mds + // 1. Restart three MDS nodes. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Kill all three MDS nodes: Client + // IO failures occur after session expiration. c. During the period + // before the client session expires, new writes will fail, but overwrite + // writes will not be affected. d. Recover one of the hung MDS nodes: + // Client session renewal succeeds, and IO returns to normal. e. Recover + // the other two hung MDS nodes: Client IO remains unaffected. + + // 1. Kill three MDSs ASSERT_EQ(0, cluster->StopAllMDS()); - // 确保mds确实退出了 + // Ensure that the mds has indeed exited std::this_thread::sleep_for(std::chrono::seconds(10)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 3. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(9 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(30)); ASSERT_FALSE(writeIOReturnFlag); - // 4. 等待后台挂卸载监测结束 + // 4. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 5. 判断当前挂卸载情况 + // 5. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 6. 拉起被kill的进程 + // 6. Pulling up the process of being killed pid = -1; while (pid < 0) { pid = @@ -911,49 +984,54 @@ TEST_F(MDSModuleException, MDSExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 7. 检测上次IO是否返回 + // 7. Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ASSERT_TRUE(MonitorResume(segment_size, 4096, 10)); - // 9. 再拉起被kill的进程 + // 9. Pull up the process of being killed again pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, false); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; ASSERT_GT(pid, 0); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 11. 拉起其他被kill的mds + // 11. Pull up other killed mds pid = cluster->StartSingleMDS(2, "127.0.0.1:22224", 22232, mdsConf, false); LOG(INFO) << "mds 2 started on 127.0.0.1:22224, pid = " << pid; ASSERT_GT(pid, 0); LOG(INFO) << "current case: hangThreeMDSThenResumeTheMDS"; /********** hangThreeMDSThenResumeTheMDS **************/ - // 1. hang三台mds,然后恢复 - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. hang三台mds:client 在session过期之后出现IO hang - // c. client session过期之前这段时间的新写会一直hang,覆盖写不影响 - // e. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // f. 恢复另外两台hang的mds,client io无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three MDS nodes and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang three MDS nodes: Client IO + // hangs after the session expires. c. During the period before the + // client session expires, new writes will hang continuously, but + // overwrite writes will not be affected. e. Recover one of the hung MDS + // nodes: Client session renewal succeeds, and IO returns to normal. f. + // Recover the other two hung MDS nodes: Client IO remains unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang三台mds + // 2. Hang Three MDSs ASSERT_EQ(0, cluster->HangMDS(0)); ASSERT_EQ(0, cluster->HangMDS(1)); ASSERT_EQ(0, cluster->HangMDS(2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 4. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(10 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(3)); ret = writeIOReturnFlag; @@ -964,10 +1042,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 5. 等待监测结束 + // 5. Waiting for monitoring to end WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation if (!createOrOpenFailed) { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); @@ -975,9 +1053,11 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 7. 拉起被hang的进程, 有可能hang的进程因为长时间未与etcd握手, - // 导致其被拉起后就退出了,所以这里在recover之后再启动该mds, - // 这样保证集群中至少有一个mds在提供服务 + // 7. Pulling up the process being hung may result in the process not + // shaking hands with ETCD for a long time, + // After it was pulled up, it exited, so the mds was restarted after + // recover, This ensures that at least one mds in the cluster is + // providing services ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->StopMDS(1)); @@ -989,11 +1069,11 @@ TEST_F(MDSModuleException, MDSExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 检测上次IO是否返回 + // Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ret = MonitorResume(segment_size, 4096, 1); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); @@ -1001,45 +1081,45 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 9. 再拉起被hang的进程 + // 9. Pull up the process of being hung again ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); } TEST_F(MDSModuleException, StripeMDSExceptionTest) { LOG(INFO) << "current case: StripeMDSExceptionTest"; - // 1. 创建一个条带的卷 - int stripefd = curve::test::FileCommonOperation::Open("/test2", - "curve", 1024 * 1024, 8); + // 1. Create a striped volume + int stripefd = curve::test::FileCommonOperation::Open("/test2", "curve", + 1024 * 1024, 8); ASSERT_NE(stripefd, -1); uint64_t offset = std::rand() % 5 * segment_size; - // 2. 进行数据的读写校验 - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + // 2. Perform data read and write verification + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); std::this_thread::sleep_for(std::chrono::seconds(60)); - // 3. kill 一台当前为leader的mds + // 3. Kill an MDS that is currently the leader LOG(INFO) << "stop mds."; int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 4. 启动后台挂卸载线程 + // 4. Start the background suspend and unload thread CreateOpenFileBackend(); - // 5. 继续随机写数据进行校验 + // 5. Continue to randomly write data for verification offset = std::rand() % 5 * segment_size; LOG(INFO) << "when stop mds, write and read data."; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); - // 6. 等待挂卸载检测结果 + // 6. Waiting for the results of pending uninstallation detection WaitBackendCreateDone(); - // 7. 挂卸载服务正常 + // 7. Hanging and uninstalling service is normal ASSERT_TRUE(createOrOpenFailed); - LOG(INFO) <<"start mds."; + LOG(INFO) << "start mds."; pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -1047,10 +1127,9 @@ TEST_F(MDSModuleException, StripeMDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - LOG(INFO) << "start mds, write and read data."; offset = std::rand() % 5 * segment_size; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); ::Close(stripefd); } diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index cf1753ff2c..c3e577de66 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -20,26 +20,26 @@ * Author: wuhanqing */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT +#include #include -#include +#include +#include // NOLINT +#include #include -#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT +#include #include "include/client/libcurve.h" -#include "src/common/timeutility.h" #include "src/client/client_metric.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -58,16 +58,14 @@ const char* kLogPath = "./runlog/"; curve::client::PerSecondMetric iops("test", "iops"); -std::atomic running{ false }; +std::atomic running{false}; const std::vector chunkserverConfigOpts{ "chunkfilepool.enable_get_chunk_from_pool=false", - "walfilepool.enable_get_segment_from_pool=false" -}; + "walfilepool.enable_get_segment_from_pool=false"}; -const std::vector mdsConfigOpts{ - std::string("mds.etcd.endpoint=") + std::string(kEtcdClientIpPort) -}; +const std::vector mdsConfigOpts{std::string("mds.etcd.endpoint=") + + std::string(kEtcdClientIpPort)}; const std::vector clientConfigOpts{ std::string("mds.listen.addr=") + kMdsIpPort, @@ -81,28 +79,26 @@ const std::vector mdsConf{ std::string("--confPath=") + kMdsConfPath, std::string("--mdsAddr=") + kMdsIpPort, std::string("--etcdAddr=") + kEtcdClientIpPort, - { "--log_dir=./runlog/mds" }, - { "--stderrthreshold=3" } -}; + {"--log_dir=./runlog/mds"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverConfTemplate{ - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("-conf=") + kCSConfPath, - { "-chunkServerPort=%d" }, - { "-chunkServerStoreUri=local://./ttt/%d/" }, - { "-chunkServerMetaUri=local://./ttt/%d/chunkserver.dat" }, - { "-copySetUri=local://./ttt/%d/copysets" }, - { "-raftSnapshotUri=curve://./ttt/%d/copysets" }, - { "-raftLogUri=curve://./ttt/%d/copysets" }, - { "-recycleUri=local://./ttt/%d/recycler" }, - { "-chunkFilePoolDir=./ttt/%d/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./ttt/%d/chunkfilepool.meta" }, - { "-walFilePoolDir=./ttt/%d/walfilepool/" }, - { "-walFilePoolMetaPath=./ttt/%d/walfilepool.meta" }, - { "-mdsListenAddr=127.0.0.1:30010,127.0.0.1:30011,127.0.0.1:30012" }, - { "-log_dir=./runlog/cs_%d" }, - { "--stderrthreshold=3" } -}; + {"-chunkServerPort=%d"}, + {"-chunkServerStoreUri=local://./ttt/%d/"}, + {"-chunkServerMetaUri=local://./ttt/%d/chunkserver.dat"}, + {"-copySetUri=local://./ttt/%d/copysets"}, + {"-raftSnapshotUri=curve://./ttt/%d/copysets"}, + {"-raftLogUri=curve://./ttt/%d/copysets"}, + {"-recycleUri=local://./ttt/%d/recycler"}, + {"-chunkFilePoolDir=./ttt/%d/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./ttt/%d/chunkfilepool.meta"}, + {"-walFilePoolDir=./ttt/%d/walfilepool/"}, + {"-walFilePoolMetaPath=./ttt/%d/walfilepool.meta"}, + {"-mdsListenAddr=127.0.0.1:30010,127.0.0.1:30011,127.0.0.1:30012"}, + {"-log_dir=./runlog/cs_%d"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverPorts{ 31000, 31001, 31010, 31011, 31020, 31021, @@ -138,20 +134,16 @@ std::vector GenChunkserverConf(int port) { return conf; } -off_t RandomWriteOffset() { - return rand() % 32 * (16 * 1024 * 1024); -} +off_t RandomWriteOffset() { return rand() % 32 * (16 * 1024 * 1024); } -size_t RandomWriteLength() { - return rand() % 32 * 4096; -} +size_t RandomWriteLength() { return rand() % 32 * 4096; } static char buffer[1024 * 4096]; struct ChunkserverParam { int id; int port; - std::string addr{ "127.0.0.1:" }; + std::string addr{"127.0.0.1:"}; std::vector conf; ChunkserverParam(int id, int port) { @@ -165,7 +157,7 @@ struct ChunkserverParam { class UnstableCSModuleException : public ::testing::Test { protected: static void SetUpTestCase() { - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +167,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,50 +175,52 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ - "--name=module_exception_curve_unstable_cs" }); + "--name=module_exception_curve_unstable_cs"}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << ":" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 - ASSERT_EQ( - 0, - cluster->PreparePhysicalPool( - 1, - "./test/integration/client/config/unstable/" - "topo_unstable.json")); + // 3. Creating a physical pool + ASSERT_EQ(0, cluster->PreparePhysicalPool( + 1, + "./test/integration/client/config/unstable/" + "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster->PrepareLogicalPool( - 1, "test/integration/client/config/unstable/topo_unstable.json")); + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ( + 0, + cluster->PrepareLogicalPool( + 1, + "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } static void TearDownTestCase() { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -300,7 +294,8 @@ class UnstableCSModuleException : public ::testing::Test { int UnstableCSModuleException::fd = 0; std::unique_ptr UnstableCSModuleException::cluster; -std::unordered_map UnstableCSModuleException::chunkServers; // NOLINT +std::unordered_map + UnstableCSModuleException::chunkServers; // NOLINT TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { const std::string filename = "/TestCommonReadAndWrite"; @@ -323,15 +318,15 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside TEST_F(UnstableCSModuleException, HangOneZone) { srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; @@ -341,7 +336,7 @@ TEST_F(UnstableCSModuleException, HangOneZone) { "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); for (int i = 1; i <= 30; ++i) { @@ -353,18 +348,18 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 + // Print IOPS per second for (int i = 1; i <= 10; ++i) { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 + // Record the iops value for 5 seconds after recording if (i >= 5) { afterRecords.push_back(tmp); } diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..72410a5ca7 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -20,26 +20,28 @@ * Author: lixiaocui */ -#include +#include "test/integration/cluster_common/cluster.h" + #include -#include -#include -#include #include #include #include -#include -#include //NOLINT +#include +#include +#include +#include + #include //NOLINT +#include #include +#include +#include //NOLINT #include #include -#include -#include "test/integration/cluster_common/cluster.h" +#include "src/client/client_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/client/client_common.h" #include "src/kvstorageclient/etcd_client.h" using ::curve::client::UserInfo_t; @@ -50,29 +52,29 @@ namespace curve { using ::curve::client::CreateFileContext; -int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { +int CurveCluster::InitMdsClient(const curve::client::MetaServerOption& op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); } -std::vector VecStr2VecChar(std::vector args) { - std::vector argv(args.size() + 1); // for the NULL terminator +std::vector VecStr2VecChar(std::vector args) { + std::vector argv(args.size() + 1); // for the NULL terminator for (std::size_t i = 0; i < args.size(); ++i) { // not include cmd - argv[i] = new char[args[i].size()+1]; + argv[i] = new char[args[i].size() + 1]; snprintf(argv[i], args[i].size() + 1, "%s", args[i].c_str()); } argv[args.size()] = NULL; return argv; } -void ClearArgv(const std::vector &argv) { - for (auto const &item : argv) { - delete [] item; +void ClearArgv(const std::vector& argv) { + for (auto const& item : argv) { + delete[] item; } } int CurveCluster::InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints) { + const std::string& etcdEndpoints) { EtcdConf conf; conf.Endpoints = new char[etcdEndpoints.size()]; std::memcpy(conf.Endpoints, etcdEndpoints.c_str(), etcdEndpoints.size()); @@ -88,8 +90,8 @@ int CurveCluster::InitSnapshotCloneMetaStoreEtcd( } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient, - codec); + metaStore_ = + std::make_shared(etcdClient, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return -1; @@ -106,17 +108,13 @@ int CurveCluster::StopCluster() { LOG(INFO) << "stop cluster begin..."; int ret = 0; - if (StopAllMDS() < 0) - ret = -1; + if (StopAllMDS() < 0) ret = -1; - if (StopAllChunkServer() < 0) - ret = -1; + if (StopAllChunkServer() < 0) ret = -1; - if (StopAllSnapshotCloneServer() < 0) - ret = -1; + if (StopAllSnapshotCloneServer() < 0) ret = -1; - if (StopAllEtcd() < 0) - ret = -1; + if (StopAllEtcd() < 0) ret = -1; if (!ret) LOG(INFO) << "success stop cluster"; @@ -125,9 +123,9 @@ int CurveCluster::StopCluster() { return ret; } -int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, +int CurveCluster::StartSingleMDS(int id, const std::string& ipPort, int dummyPort, - const std::vector &mdsConf, + const std::vector& mdsConf, bool expectLeader) { LOG(INFO) << "start mds " << ipPort << " begin..."; pid_t pid = ::fork(); @@ -135,20 +133,21 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); args.emplace_back("--mdsAddr=" + ipPort); args.emplace_back("--dummyPort=" + std::to_string(dummyPort)); - for (auto &item : mdsConf) { + for (auto& item : mdsConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -221,26 +220,27 @@ int CurveCluster::StopAllMDS() { } int CurveCluster::StartSnapshotCloneServer( - int id, const std::string &ipPort, - const std::vector &snapshotcloneConf) { + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf) { LOG(INFO) << "start snapshotcloneserver " << ipPort << " begin ..."; pid_t pid = ::fork(); if (0 > pid) { LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + // Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); args.emplace_back("--addr=" + ipPort); - for (auto &item : snapshotcloneConf) { + for (auto& item : snapshotcloneConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -317,19 +317,18 @@ int CurveCluster::StopAllSnapshotCloneServer() { int ret = 0; auto tempMap = snapPidMap_; for (auto pair : tempMap) { - if (StopSnapshotCloneServer(pair.first) < 0) - ret = -1; + if (StopSnapshotCloneServer(pair.first) < 0) ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; } -int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf) { +int CurveCluster::StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf) { LOG(INFO) << "start etcd " << clientIpPort << " begin..."; pid_t pid = ::fork(); @@ -337,7 +336,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -348,14 +347,15 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, args.emplace_back("--initial-cluster-token=etcd-cluster-1"); args.emplace_back("--election-timeout=3000"); args.emplace_back("--heartbeat-interval=300"); - for (auto &item : etcdConf) { + for (auto& item : etcdConf) { args.push_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -380,7 +380,7 @@ bool CurveCluster::WaitForEtcdClusterAvalible(int waitSec) { return false; } else { int i = 0; - for (auto &item : etcdClientIpPort_) { + for (auto& item : etcdClientIpPort_) { i++; if (i == etcdClientIpPort_.size()) { endpoint += "http://" + item.second; @@ -464,9 +464,9 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystempath, +int CurveCluster::FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystempath, uint32_t size) { LOG(INFO) << "FormatFilePool begin..."; @@ -475,8 +475,7 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, " -filePoolMetaPath=" + filePoolmetapath + " -fileSystemPath=" + filesystempath + " -allocateByPercent=false -preAllocateNum=" + - std::to_string(size * 300) + - " -needWriteZero=false"; + std::to_string(size * 300) + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); @@ -485,8 +484,8 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, } int CurveCluster::StartSingleChunkServer( - int id, const std::string &ipPort, - const std::vector &chunkserverConf) { + int id, const std::string& ipPort, + const std::vector& chunkserverConf) { LOG(INFO) << "start chunkserver " << id << ", " << ipPort << " begin..."; std::vector split; ::curve::common::SplitString(ipPort, ":", &split); @@ -500,19 +499,20 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); args.emplace_back("-chunkServerPort=" + split[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -530,7 +530,7 @@ int CurveCluster::StartSingleChunkServer( } int CurveCluster::StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf) { + int id, const std::vector& chunkserverConf) { std::vector ipPort; ::curve::common::SplitString(ChunkServerIpPortInBackground(id), ":", &ipPort); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -555,13 +555,14 @@ int CurveCluster::StartSingleChunkServerInBackground( args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + ipPort[0]); args.emplace_back("-chunkServerPort=" + ipPort[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); @@ -723,7 +724,7 @@ std::string CurveCluster::ChunkServerIpPortInBackground(int id) { } int CurveCluster::PreparePhysicalPool(int mdsId, - const std::string &clusterMap) { + const std::string& clusterMap) { LOG(INFO) << "create physicalpool begin..."; std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + @@ -741,15 +742,14 @@ int CurveCluster::PreparePhysicalPool(int mdsId, return 0; } -int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { +int CurveCluster::PrepareLogicalPool(int mdsId, const std::string& clusterMap) { LOG(INFO) << "create logicalpool begin..."; - std::string createLPCmd = - std::string("./bazel-bin/tools/curvefsTool") + - std::string(" -cluster_map=") + clusterMap + - std::string(" -mds_addr=") + MDSIpPort(mdsId) + - std::string(" -op=create_logicalpool") + - std::string(" -stderrthreshold=0 -minloglevel=0"); + std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + + std::string(" -cluster_map=") + clusterMap + + std::string(" -mds_addr=") + MDSIpPort(mdsId) + + std::string(" -op=create_logicalpool") + + std::string(" -stderrthreshold=0 -minloglevel=0"); LOG(INFO) << "exec cmd: " << createLPCmd; RETURN_IF_NOT_ZERO(system(createLPCmd.c_str())); @@ -758,7 +758,7 @@ int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { return 0; } -bool CurveCluster::CurrentServiceMDS(int *curId) { +bool CurveCluster::CurrentServiceMDS(int* curId) { for (auto mdsId : mdsPidMap_) { if (0 == ProbePort(mdsIpPort_[mdsId.first], 20000, true)) { *curId = mdsId.first; @@ -772,8 +772,8 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { return false; } -int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize, +int CurveCluster::CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize, bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; @@ -785,13 +785,12 @@ int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, context.length = fileSize; context.poolset = poolset; - RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(context)); + RETURN_IF_NOT_ZERO(mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } -int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, +int CurveCluster::ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen) { int socket_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == socket_fd) { @@ -819,7 +818,7 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = - connect(socket_fd, (struct sockaddr *)&addr, sizeof(addr)); + connect(socket_fd, (struct sockaddr*)&addr, sizeof(addr)); if (expectOpen && connectRes == 0) { LOG(INFO) << "probe " << ipPort << " success."; close(socket_fd); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..71777d5241 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -23,215 +23,219 @@ #ifndef TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ #define TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ -#include #include -#include #include -#include "src/client/mds_client.h" +#include +#include + #include "src/client/config_info.h" -#include "test/util/config_generator.h" +#include "src/client/mds_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" +#include "test/util/config_generator.h" -using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; using ::curve::client::MDSClient; +using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; namespace curve { -#define RETURN_IF_NOT_ZERO(x) \ - do { \ - int ret = (x); \ - if (ret != 0) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get non-ZERO, return -1"; \ - return ret; \ - } \ +#define RETURN_IF_NOT_ZERO(x) \ + do { \ + int ret = (x); \ + if (ret != 0) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get non-ZERO, return -1"; \ + return ret; \ + } \ } while (0) -#define RETURN_IF_FALSE(x) \ - do { \ - bool ret = (x); \ - if (!ret) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get FALSE, return -1"; \ - return -1; \ - } \ +#define RETURN_IF_FALSE(x) \ + do { \ + bool ret = (x); \ + if (!ret) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get FALSE, return -1"; \ + return -1; \ + } \ } while (0) class CurveCluster { public: /** - * CurveCluster 构造函数 + * CurveCluster constructor * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" + * @param[in] netWorkSegment The network address of the bridge, which + * defaults to "192.168.200." + * @param[in] nsPrefix The prefix of the network namespace, which defaults + * to "integ_" */ - CurveCluster(const std::string &netWorkSegment = "192.168.200.", - const std::string &nsPrefix = "integ_") + CurveCluster(const std::string& netWorkSegment = "192.168.200.", + const std::string& nsPrefix = "integ_") : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 + * InitMdsClient initializes mdsclient for interaction with mds * - * @param op 参数设置 - * @return 0.成功; 非0.失败 + * @param op parameter setting + * @return 0. Success; Non 0. Failure */ - int InitMdsClient(const curve::client::MetaServerOption &op); - + int InitMdsClient(const curve::client::MetaServerOption& op); /** - * @brief 初始化metastore + * @brief Initialize metastore * - * @param[in] etcdEndpoints etcd client的ip port + * @param[in] etcdEndpoints etcd client's IP port * - * @return 返回错误码 + * @return returns an error code */ - int InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints); + int InitSnapshotCloneMetaStoreEtcd(const std::string& etcdEndpoints); /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 + * If BuildNet needs to use a different IP to start the chunkserver, + * This function needs to be called first in the SetUp of the test case + * @return 0. Success; Non 0. Failure */ int BuildNetWork(); /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 + * StopCluster stops all processes in the cluster + * @return 0.Success; -1.Failure */ int StopCluster(); /** - * @brief 生成各模块配置文件 + * @brief Generate configuration files for each module * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 + * @tparam T any ConfigGenerator + * @param configPath Configuration file path + * @param options Configuration items modified */ - template - void PrepareConfig(const std::string &configPath, - const std::vector &options) { + template + void PrepareConfig(const std::string& configPath, + const std::vector& options) { T gentor(configPath); gentor.SetConfigOptions(options); gentor.Generate(); } /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX + * StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to + 192.168.200.1:XXXX * * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: + * @param[in] ipPort specifies the ipPort of the mds + * @param[in] mdsConf mds startup parameter item, example: * const std::vector mdsConf{ {"--graceful_quit_on_sigterm"}, {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 + * @param[in] expectLeader is the expected leader expected + * @return success returns pid; Failure returns -1 */ - int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, - const std::vector &mdsConf, + int StartSingleMDS(int id, const std::string& ipPort, int dummyPort, + const std::vector& mdsConf, bool expectLeader); /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 + * StopMDS stops the specified id's mds + * @return 0.Success; -1.Failure */ int StopMDS(int id); /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 + * StopAllMDS stops all mds + * @return 0.Success; -1.Failure */ int StopAllMDS(); /** - * @brief 启动一个snapshotcloneserver + * @brief Start a snapshotcloneserver * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of snapshotclone server + * @param ipPort IP Port + * @param snapshot clone Conf parameter item + * @return success returns pid; Failure returns -1 */ - int - StartSnapshotCloneServer(int id, const std::string &ipPort, - const std::vector &snapshotcloneConf); + int StartSnapshotCloneServer( + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf); /** - * @brief 停止指定Id的snapshotcloneserver + * @brief Stop the snapshotcloneserver for the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return returns 0 for success, -1 for failure */ int StopSnapshotCloneServer(int id, bool force = false); /** - * @brief 重启指定Id的snapshotcloneserver + * @brief: Restart the snapshotcloneserver with the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return success returns pid; Failure returns -1 */ int RestartSnapshotCloneServer(int id, bool force = false); /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure */ int StopAllSnapshotCloneServer(); /** - * StartSingleEtcd 启动一个etcd节点 + * StartSingleEtcd starts an etcd node * * @param clientIpPort * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 + * @param etcdConf etcd startup parameter, it is recommended to specify the + * name according to the module to prevent concurrent runtime conflicts * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 + * @return success returns pid; Failure returns -1 */ - int StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf); + int StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf); /** * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 + * Wait for the ETCD cluster leader election to be successful and available + * for a certain period of time */ bool WaitForEtcdClusterAvalible(int waitSec = 20); /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 + * StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure */ int StopEtcd(int id); /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 + * StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure */ int StopAllEtcd(); /** - * @brief 格式化FilePool + * @brief Format FilePool * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 + * @param filePooldir FilePool directory + * @param filePoolmetapath FilePool metadata directory + * @param filesystemPath file system directory * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystemPath, uint32_t size); + int FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystemPath, uint32_t size); /** - * StartSingleChunkServer 启动一个chunkserver节点 + * StartSingleChunkServer starts a chunkserver node * * @param[in] id * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: + * @param[in] chunkserverConf chunkserver startup item, example: * const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./basic1/"}, @@ -243,209 +247,218 @@ class CurveCluster { {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, {"-raft_sync_segments=true"}, }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 + It is recommended to also use the abbreviation of the module for the + file name. The file name should not be too long, otherwise registering to + the database will fail + * @return success returns pid; Failure returns -1 */ - int StartSingleChunkServer(int id, const std::string &ipPort, - const std::vector &chunkserverConf); + int StartSingleChunkServer(int id, const std::string& ipPort, + const std::vector& chunkserverConf); /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort + * StartSingleChunkServer Starts a chunkserver with the specified id in the + * network namespace No need to specify ipPort * * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 + * @param chunkserverConf, same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf); + int id, const std::vector& chunkserverConf); /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 + * StopChunkServer stops the chunkserver process with the specified id + * @return 0.Success; -1.Failure */ int StopChunkServer(int id); /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 + * StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure */ int StopAllChunkServer(); /** - * PreparePhysicalPool 创建物理池 + * PreparePhysicalPool Create Physical Pool * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) + * @param[in] id Send command to the specified mds with id + * @param[in] clusterMap topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different + * IPs) * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in + * the clusterMap) + * @return 0.Success; -1.Failure */ - int PreparePhysicalPool(int mdsId, const std::string &clusterMap); + int PreparePhysicalPool(int mdsId, const std::string& clusterMap); /** - * @return 0.成功; -1.失败 + * @return 0.Success; -1.Failure */ - int PrepareLogicalPool(int mdsId, const std::string &clusterMap); + int PrepareLogicalPool(int mdsId, const std::string& clusterMap); /** - * MDSIpPort 获取指定id的mds地址 + * MDSIpPort retrieves the mds address of the specified id */ std::string MDSIpPort(int id); /** - * EtcdClientIpPort 获取指定id的etcd client地址 + * EtcdClientIpPort retrieves the etcd client address for the specified id */ std::string EtcdClientIpPort(int id); /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id */ std::string EtcdPeersIpPort(int id); /** - * ChunkServerIpPort 获取指定id的chunkserver地址 + * ChunkServerIpPort retrieves the chunkserver address for the specified id */ std::string ChunkServerIpPort(int id); /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 + * HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure */ int HangMDS(int id); /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangMDS(int id); /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 + * HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure */ int HangEtcd(int id); /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangEtcd(int id); /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 + * HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure */ int HangChunkServer(int id); /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 + * RecoverHangChunkServer Restores the chunkserver process where hang + * resides + * @return 0.Success; -1.Failure */ int RecoverHangChunkServer(int id); /** - * CurrentServiceMDS 获取当前正在提供服务的mds + * CurrentServiceMDS obtains the mds that are currently providing services * - * @param[out] curId 当前正在服务的mds编号 + * @param[out] curId is currently serving the mds number * - * @return true表示有正在服务的mds, false表示没有正在服务的mds + * @return true indicates that there are serving mds, while false indicates + * that there are no serving mds */ - bool CurrentServiceMDS(int *curId); + bool CurrentServiceMDS(int* curId); /** - * CreateFile 在curve中创建文件 + * CreateFile creates a file in Curve. * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 - */ - int CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize = 0, + * @param[in] user User + * @param[in] pwd Password + * @param[in] fileName File name + * @param[in] fileSize File size + * @param[in] normalFile Whether it is a normal file + * @return 0. Success; -1. Failure + */ + int CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize = 0, bool normalFile = true, const std::string& poolset = ""); private: /** - * ProbePort 探测指定ipPort是否处于监听状态 + * ProbePort checks if the specified ipPort is in a listening state. * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 + * @param[in] ipPort The specified ipPort value. + * @param[in] timeoutMs The timeout for probing in milliseconds. + * @param[in] expectOpen Whether it is expected to be in a listening state. * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 + * @return 0 indicates that the probing meets the expected condition within + * the specified time. -1 indicates that the probing does not meet the + * expected condition within the specified time. */ - int ProbePort(const std::string &ipPort, int64_t timeoutMs, + int ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen); /** * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort + * Used to generate chunkserver ipPort when chunkservers with different + * IPs are required */ std::string ChunkServerIpPortInBackground(int id); /** - * HangProcess hang住一个进程 + * HangProcess hang * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int HangProcess(pid_t pid); /** - * RecoverHangProcess 恢复hang住的进程 + * RecoverHangProcess * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int RecoverHangProcess(pid_t pid); private: - // 网络号 + // Network number std::string networkSegment_; - // 网络命名空间前缀 + // Network namespace prefix std::string nsPrefix_; - // mds的id对应的进程号 + // The process number corresponding to the ID of the mds std::map mdsPidMap_; - // mds的id对应的ipport + // The ipport corresponding to the ID of the mds std::map mdsIpPort_; - // snapshotcloneserver id对应的pid + // The pid corresponding to the snapshotcloneserver id std::map snapPidMap_; - // snapshotcloneserver id对应的ipPort + // The ipPort corresponding to the snapshotcloneserver ID std::map snapIpPort_; - // snapshotcloneserver id对应的conf + // Conf corresponding to snapshotcloneserver id std::map> snapConf_; - // etcd的id对应的进程号 + // The process number corresponding to the id of ETCD std::map etcdPidMap_; - // etcd的id对应的client ipport + // The client ipport corresponding to the id of ETCD std::map etcdClientIpPort_; - // etcd的id对应的peer ipport + // Peer ipport corresponding to the id of ETCD std::map etcdPeersIpPort_; - // chunkserver的id对应的进程号 + // The process number corresponding to the id of chunkserver std::map chunkserverPidMap_; - // chunkserver的id对应的ipport + // The IP port corresponding to the ID of the chunkserver std::map chunkserverIpPort_; // mdsClient std::shared_ptr mdsClient_; public: - // SnapshotCloneMetaStore用于测试过程中灌数据 + // SnapshotCloneMetaStore for filling data during testing std::shared_ptr metaStore_; }; } // namespace curve diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 8f49b1ebe0..071bc58e1f 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ - /* * Project: curve * Created Date: 19-09-02 @@ -22,113 +21,110 @@ */ #include + +#include //NOLINT #include #include -#include #include //NOLINT -#include //NOLINT +#include + #include "test/integration/cluster_common/cluster.h" namespace curve { const std::vector mdsConf{ - { "--graceful_quit_on_sigterm" }, - { "--confPath=./conf/mds.conf" }, - { "--mdsDbName=cluster_common_curve_mds" }, - { "--sessionInterSec=30" }, + {"--graceful_quit_on_sigterm"}, + {"--confPath=./conf/mds.conf"}, + {"--mdsDbName=cluster_common_curve_mds"}, + {"--sessionInterSec=30"}, }; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic1/" }, - { "-chunkServerMetaUri=local://./basic1/chunkserver.dat" }, - { "-copySetUri=local://./basic1/copysets" }, - { "-raftSnapshotUri=curve://./basic1/copysets" }, - { "-raftLogUri=curve://./basic1/copysets" }, - { "-recycleUri=local://./basic1/recycler" }, - { "-chunkFilePoolDir=./basic1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic1/walfilepool/" }, - { "-walFilePoolMetaPath=./basic1/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic1/"}, + {"-chunkServerMetaUri=local://./basic1/chunkserver.dat"}, + {"-copySetUri=local://./basic1/copysets"}, + {"-raftSnapshotUri=curve://./basic1/copysets"}, + {"-raftLogUri=curve://./basic1/copysets"}, + {"-recycleUri=local://./basic1/recycler"}, + {"-chunkFilePoolDir=./basic1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic1/walfilepool/"}, + {"-walFilePoolMetaPath=./basic1/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic2/" }, - { "-chunkServerMetaUri=local://./basic2/chunkserver.dat" }, - { "-copySetUri=local://./basic2/copysets" }, - { "-raftSnapshotUri=curve://./basic2/copysets" }, - { "-raftLogUri=curve://./basic2/copysets" }, - { "-recycleUri=local://./basic2/recycler" }, - { "-chunkFilePoolDir=./basic2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic2/walfilepool/" }, - { "-walFilePoolMetaPath=./basic2/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic2/"}, + {"-chunkServerMetaUri=local://./basic2/chunkserver.dat"}, + {"-copySetUri=local://./basic2/copysets"}, + {"-raftSnapshotUri=curve://./basic2/copysets"}, + {"-raftLogUri=curve://./basic2/copysets"}, + {"-recycleUri=local://./basic2/recycler"}, + {"-chunkFilePoolDir=./basic2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic2/walfilepool/"}, + {"-walFilePoolMetaPath=./basic2/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic3/" }, - { "-chunkServerMetaUri=local://./basic3/chunkserver.dat" }, - { "-copySetUri=local://./basic3/copysets" }, - { "-raftSnapshotUri=curve://./basic3/copysets" }, - { "-raftLogUri=curve://./basic3/copysets" }, - { "-recycleUri=local://./basic3/recycler" }, - { "-chunkFilePoolDir=./basic3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic3/walfilepool/" }, - { "-walFilePoolMetaPath=./basic3/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic3/"}, + {"-chunkServerMetaUri=local://./basic3/chunkserver.dat"}, + {"-copySetUri=local://./basic3/copysets"}, + {"-raftSnapshotUri=curve://./basic3/copysets"}, + {"-raftLogUri=curve://./basic3/copysets"}, + {"-recycleUri=local://./basic3/recycler"}, + {"-chunkFilePoolDir=./basic3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic3/walfilepool/"}, + {"-walFilePoolMetaPath=./basic3/walfilepool.meta"}}; class ClusterBasicTest : public ::testing::Test { protected: void SetUp() { curveCluster_ = std::make_shared(); - // TODO(lixiaocui): 需要用sudo去运行,后续打开 + // TODO(lixiaocui): It needs to be run with sudo and opened later // curveCluster_->BuildNetWork(); } - void TearDown() { - ASSERT_EQ(0, curveCluster_->StopCluster()); - } + void TearDown() { ASSERT_EQ(0, curveCluster_->StopCluster()); } protected: std::shared_ptr curveCluster_; }; -// TODO(lixiaocui): 需要sudo运行,ci变更后打开 +// TODO(lixiaocui): Requires sudo to run and open after ci changes TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { - // 起etcd + // Starting etcd pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=basic_test_start_stop_module1" }); + std::vector{"--name=basic_test_start_stop_module1"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); - // 起mds + // Starting mds pid = curveCluster_->StartSingleMDS(1, "192.168.200.1:3333", 3334, mdsConf, true); LOG(INFO) << "mds 1 started on 192.168.200.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 创建chunkserver + // Create chunkserver pid = curveCluster_->StartSingleChunkServerInBackground(1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started in background, pid = " << pid; @@ -142,17 +138,19 @@ TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { LOG(INFO) << "chunkserver 3 started in background, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system("rm -r test_start_stop_module1.etcd"); @@ -165,16 +163,16 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_EQ(0, system("rm -fr basic*")); ASSERT_EQ(0, system((std::string("mkdir -p ") + commonDir).c_str())); - // 起etcd + // Starting etcd std::string etcdDir = commonDir + "/etcd.log"; pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=test_start_stop_module2" }); + std::vector{"--name=test_start_stop_module2"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起mds + // Starting mds auto mdsConfbak = mdsConf; auto mdsDir = commonDir + "/mds"; ASSERT_EQ(0, system((std::string("mkdir ") + mdsDir).c_str())); @@ -184,19 +182,19 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { true); LOG(INFO) << "mds 1 started on 127.0.0.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 初始化mdsclient + // Initialize mdsclient curve::client::MetaServerOption op; op.rpcRetryOpt.rpcTimeoutMs = 4000; - op.rpcRetryOpt.addrs = std::vector{ "127.0.0.1:3333" }; + op.rpcRetryOpt.addrs = std::vector{"127.0.0.1:3333"}; ASSERT_EQ(0, curveCluster_->InitMdsClient(op)); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建chunkserver + // Create chunkserver auto copy1 = chunkserverConf1; std::string chunkserver1Dir = commonDir + "/chunkserver1"; ASSERT_EQ(0, system((std::string("mkdir ") + chunkserver1Dir).c_str())); @@ -224,40 +222,42 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { LOG(INFO) << "chunkserver 3 started on 127.0.0.1:2004, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建文件 + // Create File ASSERT_EQ(0, curveCluster_->CreateFile("test", "test", "/basic_test", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // hang mds进程 + // hang mds process ASSERT_EQ(0, curveCluster_->HangMDS(1)); - // 创建文件失败 + // Failed to create file ASSERT_NE(0, curveCluster_->CreateFile("test1", "test1", "/basic_test1", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 恢复mds进程 + // Resume mds process ASSERT_EQ(0, curveCluster_->RecoverHangMDS(1)); - // 创建文件成功 + // Successfully created file ASSERT_EQ(0, curveCluster_->CreateFile("test2", "test2", "/basic_test2", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system((std::string("rm -fr ") + commonDir).c_str()); @@ -271,7 +271,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_EQ(0, system("rm -fr test_multi_etcd_node*.etcd")); ASSERT_EQ(0, system((std::string("mkdir ") + commonDir).c_str())); - // 起三个etcd + // Start three ETCDs std::string etcdDir = commonDir + "/etcd"; ASSERT_EQ(0, system((std::string("mkdir ") + etcdDir).c_str())); std::vector etcdCluster{ @@ -307,7 +307,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起三mds + // Starting three mds std::string mds1Dir = commonDir + "/mds1"; std::string mds2Dir = commonDir + "/mds2"; std::string mds3Dir = commonDir + "/mds3"; @@ -340,16 +340,16 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { LOG(INFO) << "mds 3 started on 127.0.0.1:2312, pid = " << pid; ASSERT_GT(pid, 0); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); ASSERT_EQ(0, curveCluster_->StopMDS(2)); ASSERT_EQ(0, curveCluster_->StopMDS(3)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); ASSERT_EQ(0, curveCluster_->StopEtcd(2)); ASSERT_EQ(0, curveCluster_->StopEtcd(3)); diff --git a/test/integration/cluster_common/mds.basic.conf b/test/integration/cluster_common/mds.basic.conf index 9486982bf5..b0cb16d055 100644 --- a/test/integration/cluster_common/mds.basic.conf +++ b/test/integration/cluster_common/mds.basic.conf @@ -15,196 +15,196 @@ # # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=localhost:2221 -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=1000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 -# leader竞选时会创建session, 单位是秒, 因为go端代码的接口这个值得单位就是s +# During the leader campaign, a session will be created in seconds, as the value unit for the interface of the go side code is seconds mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误。这里设置10分钟超时,超时后mds会继续竞选 +# The timeout for leader election. If set to 0, the election will block indefinitely if unsuccessful. If set to a value greater than 0, an error will be returned if not elected as leader within the electionTimeoutMs duration. +# Here, a timeout of 10 minutes is set, and if it times out, the MDS will continue the election process. mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=4 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=1800 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=1800 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkserver offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): It needs to be related to the time interval of snapshots to some extent. mds.scheduler.chunkserver.cooling.timeSec=1800 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=1000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=3000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mysql Database config # -# 数据库使用的database名称 +# The database name used by the database mds.DbName=cluster_common_curve_mds -# 数据库用户名 +# Database username mds.DbUser=root -# 数据库地址 +# Database address mds.DbUrl=localhost -# 数据库登录密码 +# Database login password mds.DbPassword=qwer mds.DbPoolSize=128 # # mds.session settings # -# mds.session过期时间,单位us +# mds.session expiration time, in us mds.session.leaseTimeUs=5000000 -# 能够容忍的client和mds之间的时钟不同步的时间,单位us +# Tolerable time of clock asynchrony between client and mds, in units of us mds.session.toleranceTimeUs=500000 -# mds.session后台扫描线程扫描间隔时间,单位us +# mds.session Background Scan Thread Scan Interval Time, Unit: us mds.session.intevalTimeUs=500000 # # auth settings # -# root用户密码 +# root User Password mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=1 -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit=90 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of Deviation from the Mean ScatterWidth of All Chunk Servers. Setting a high percentage for scatterWidth deviation can lead to some machines having +# excessively small scatterWidth, which impacts machine recovery times and reduces the overall reliability of the cluster. Additionally, it can result in certain machines +# having excessively large scatterWidth values, causing copysets on these chunk servers to be scattered across various machines. When other machines write data, these servers +# with larger scatterWidth can become performance bottlenecks. +# Conversely, setting a low percentage for scatterWidth deviation requires a higher degree of scatterWidth uniformity, demanding more from the copyset algorithm. This +# can lead to the algorithm being unable to produce optimal results. It is recommended to set the value at 20 for a balance between these factors. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./runlog/ diff --git a/test/integration/common/chunkservice_op.cpp b/test/integration/common/chunkservice_op.cpp index d359d5e294..13e9f05954 100644 --- a/test/integration/common/chunkservice_op.cpp +++ b/test/integration/common/chunkservice_op.cpp @@ -31,9 +31,9 @@ namespace chunkserver { static constexpr uint32_t kOpRequestAlignSize = 4096; const PageSizeType kPageSize = kOpRequestAlignSize; -int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, + size_t len, const char* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -69,9 +69,9 @@ int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data, + size_t len, std::string* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -105,7 +105,7 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); LOG_IF(ERROR, status) << "read failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -114,10 +114,10 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - std::string *data) { + std::string* data) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -145,7 +145,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, LOG_IF(ERROR, status) << "readchunksnapshot failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -154,7 +154,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -184,7 +184,7 @@ int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, } int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( - struct ChunkServiceOpConf *opConf, ChunkID chunkId, uint64_t correctedSn) { + struct ChunkServiceOpConf* opConf, ChunkID chunkId, uint64_t correctedSn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -213,9 +213,9 @@ int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( return status; } -int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { PeerId leaderId(opConf->leaderPeer->address()); @@ -249,7 +249,7 @@ int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -280,10 +280,10 @@ int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, SequenceNum *curSn, - SequenceNum *snapSn, - std::string *redirectedLeader) { +int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, SequenceNum* curSn, + SequenceNum* snapSn, + std::string* redirectedLeader) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -305,18 +305,18 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); if (status == CHUNK_OP_STATUS_SUCCESS) { switch (response.chunksn().size()) { - case 2: - *snapSn = response.chunksn(1); - FALLTHROUGH_INTENDED; - case 1: - *curSn = response.chunksn(0); - break; - case 0: - return CHUNK_OP_STATUS_CHUNK_NOTEXIST; - default: - LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " - << response.chunksn().size(); - return -1; + case 2: + *snapSn = response.chunksn(1); + FALLTHROUGH_INTENDED; + case 1: + *curSn = response.chunksn(0); + break; + case 0: + return CHUNK_OP_STATUS_CHUNK_NOTEXIST; + default: + LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " + << response.chunksn().size(); + return -1; } } @@ -331,7 +331,7 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, string *chunkData, + const char* data, string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { int ret = @@ -342,7 +342,8 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, << ", offset=" << offset << ", len=" << len << ", cloneFileSource=" << cloneFileSource << ", cloneFileOffset=" << cloneFileOffset << ", ret=" << ret; - // chunk写成功,同步更新chunkData内容和existChunks_ + // Chunk successfully written, synchronously updating chunkData content and + // existChunks_ if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) chunkData->replace(offset, len, data); existChunks_.insert(chunkId); @@ -352,7 +353,7 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData, + string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { std::string data(len, 0); @@ -369,8 +370,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, if (ret != CHUNK_OP_STATUS_SUCCESS && ret != CHUNK_OP_STATUS_CHUNK_NOTEXIST) { return -1; - } else if (ret == CHUNK_OP_STATUS_SUCCESS && - !chunk_existed && + } else if (ret == CHUNK_OP_STATUS_SUCCESS && !chunk_existed && cloneFileSource.empty()) { LOG(ERROR) << "Unexpected read success, chunk " << chunkId << " should not existed"; @@ -381,20 +381,19 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences uint32_t i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个page的第一个字节 + // Print the first byte of each page uint32_t j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -409,7 +408,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData) { + string* chunkData) { std::string data(len, 0); bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); @@ -431,20 +430,19 @@ int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences int i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个4KB的第一个字节 + // Print the first byte of each 4KB int j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -461,8 +459,7 @@ int ChunkServiceVerify::VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn) { int ret = ChunkServiceOp::DeleteChunk(opConf_, chunkId, sn); LOG(INFO) << "Delete Chunk " << chunkId << ", sn " << sn << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.erase(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.erase(chunkId); return ret; } @@ -477,7 +474,7 @@ int ChunkServiceVerify::VerifyDeleteChunkSnapshotOrCorrectSn( } int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { @@ -487,8 +484,7 @@ int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, << location << ", correctedSn=" << correctedSn << ", sn=" << sn << ", chunkSize=" << chunkSize << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.insert(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.insert(chunkId); return ret; } @@ -517,31 +513,33 @@ int ChunkServiceVerify::VerifyGetChunkInfo(ChunkID chunkId, bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); switch (ret) { - case CHUNK_OP_STATUS_SUCCESS: - // 如果curSn或snapSn与预期不符,则返回-1 - LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) - << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn - << ", expected " << expCurSn << "; snapSn=" << snapSn - << ", expected " << expSnapSn; - return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; - - case CHUNK_OP_STATUS_CHUNK_NOTEXIST: - // 如果chunk预期存在,则返回-1 - LOG_IF(ERROR, chunk_existed) - << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId - << " must be existed"; - return chunk_existed ? -1 : 0; - - case CHUNK_OP_STATUS_REDIRECTED: - // 如果返回的redirectedLeader与给定的不符,则返回-1 - LOG_IF(ERROR, expLeader != redirectedLeader) - << "GetChunkInfo failed, redirected to " << redirectedLeader - << ", expected " << expLeader; - return (expLeader != redirectedLeader) ? -1 : 0; - - default: - LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, ret=" << ret; - return -1; + case CHUNK_OP_STATUS_SUCCESS: + // If curSn or snapSn does not match expectations, return -1 + LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) + << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn + << ", expected " << expCurSn << "; snapSn=" << snapSn + << ", expected " << expSnapSn; + return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; + + case CHUNK_OP_STATUS_CHUNK_NOTEXIST: + // If chunk is expected to exist, return -1 + LOG_IF(ERROR, chunk_existed) + << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId + << " must be existed"; + return chunk_existed ? -1 : 0; + + case CHUNK_OP_STATUS_REDIRECTED: + // If the redirectedLeader returned does not match the given, then + // -1 is returned + LOG_IF(ERROR, expLeader != redirectedLeader) + << "GetChunkInfo failed, redirected to " << redirectedLeader + << ", expected " << expLeader; + return (expLeader != redirectedLeader) ? -1 : 0; + + default: + LOG(ERROR) << "GetChunkInfo for " << chunkId + << "failed, ret=" << ret; + return -1; } LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, Illgal branch"; diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..b6338ba888 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -24,9 +24,11 @@ #define TEST_INTEGRATION_COMMON_CHUNKSERVICE_OP_H_ #include -#include -#include + #include +#include +#include + #include "include/chunkserver/chunkserver_common.h" #include "proto/common.pb.h" @@ -40,7 +42,7 @@ using std::string; #define NULL_SN -1 struct ChunkServiceOpConf { - Peer *leaderPeer; + Peer* leaderPeer; LogicPoolID logicPoolId; CopysetID copysetId; uint32_t rpcTimeout; @@ -49,221 +51,246 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data to be written + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, + const char* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum sn, off_t offset, size_t len, - string *data, + static int ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum sn, off_t offset, size_t len, string* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, + static int ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data); + size_t len, std::string* data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn chunk version + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical + * legacy through chunkService If no snapshot is generated during the dump + * process, modify the correctedSn of the chunk + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, + static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location The location of the source chunk on the source side, + * possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, const std::string &location, + static int CreateCloneChunk(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn returns the current chunk version + * @param snapSn returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum *curSn, SequenceNum *snapSn, - string *redirectedLeader); + static int GetChunkInfo(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum* curSn, SequenceNum* snapSn, + string* redirectedLeader); }; class ChunkServiceVerify { public: - explicit ChunkServiceVerify(struct ChunkServiceOpConf *opConf) + explicit ChunkServiceVerify(struct ChunkServiceOpConf* opConf) : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief executes the write chunk and writes the data to the corresponding + * area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data to be written + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, string *chunkData, + size_t len, const char* data, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief executes the read chunk and verifies whether the read content + * matches the expected data in the corresponding region of the chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData, + size_t len, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + *And verify whether the read content matches the expected data in the + *corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @return The read request result meets the expected return of 0, + *otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData); + size_t len, string* chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ - int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, + int VerifyCreateCloneChunk(ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief to obtain chunk metadata and verify if the results meet + * expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn Expected chunk version, -1 indicates non-existent + * @param expSanpSn Expected snapshot version, -1 indicates non-existent + * @param expLeader Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: - struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + struct ChunkServiceOpConf* opConf_; + // Record the chunkId (expected existence) that has been written, used to + // determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/common/config_generator.h b/test/integration/common/config_generator.h index e838aed61f..84e32f47d1 100644 --- a/test/integration/common/config_generator.h +++ b/test/integration/common/config_generator.h @@ -40,7 +40,7 @@ class CSTConfigGenerator : public ConfigGenerator { CSTConfigGenerator() {} ~CSTConfigGenerator() {} bool Init(const std::string& port) { - // 加载配置文件模板 + // Load Configuration File Template config_.SetConfigPath(DEFAULT_CHUNKSERVER_CONF); if (!config_.LoadConfig()) { return false; diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index f09db13283..ab335a4328 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -22,39 +22,38 @@ #include "test/integration/common/peer_cluster.h" -#include -#include #include +#include +#include +#include #include #include -#include -#include "src/chunkserver/cli2.h" -#include "src/chunkserver/register.h" +#include "proto/cli2.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/chunkserver_helper.h" +#include "src/chunkserver/cli2.h" +#include "src/chunkserver/register.h" #include "src/fs/fs_common.h" -#include "proto/cli2.pb.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; - -PeerCluster::PeerCluster(const std::string &clusterName, +PeerCluster::PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - paramsIndexs_(paramsIndexs), - params_(params), - isFakeMdsStart_(false) { + const std::vector& peers, + std::vector params, + std::map paramsIndexs) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + paramsIndexs_(paramsIndexs), + params_(params), + isFakeMdsStart_(false) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -63,7 +62,7 @@ PeerCluster::PeerCluster(const std::string &clusterName, } } -int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { +int PeerCluster::StartFakeTopoloyService(const std::string& listenAddr) { if (isFakeMdsStart_) { return 0; } @@ -81,9 +80,7 @@ int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { return ret; } -int PeerCluster::StartPeer(const Peer &peer, - int id, - const bool empty) { +int PeerCluster::StartPeer(const Peer& peer, int id, const bool empty) { LOG(INFO) << "going start peer: " << peer.address() << " " << id; auto it = peersMap_.find(peer.address()); if (it != peersMap_.end()) { @@ -109,18 +106,17 @@ int PeerCluster::StartPeer(const Peer &peer, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ + /* Starting a ChunkServer in a child process */ StartPeerNode(id, params_[paramsIndexs_[id]]); exit(0); } LOG(INFO) << "start peer success, peer id = " << pid; peerNode->pid = pid; peerNode->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peerNode))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peerNode))); - // 在创建copyset之前,先等chunkserver启动 + // Before creating a copyset, wait for chunkserver to start ::usleep(1500 * 1000); int ret = CreateCopyset(logicPoolID_, copysetID_, peer, peers_); @@ -133,7 +129,7 @@ int PeerCluster::StartPeer(const Peer &peer, return 0; } -int PeerCluster::ShutdownPeer(const Peer &peer) { +int PeerCluster::ShutdownPeer(const Peer& peer) { PeerId peerId(peer.address()); LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); @@ -141,8 +137,8 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { int waitState; if (0 != kill(it->second->pid, SIGKILL)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -156,7 +152,7 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { } } -int PeerCluster::HangPeer(const Peer &peer) { +int PeerCluster::HangPeer(const Peer& peer) { LOG(INFO) << "peer cluster: hang " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -168,8 +164,8 @@ int PeerCluster::HangPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -184,7 +180,7 @@ int PeerCluster::HangPeer(const Peer &peer) { } } -int PeerCluster::SignalPeer(const Peer &peer) { +int PeerCluster::SignalPeer(const Peer& peer) { LOG(INFO) << "peer cluster: signal " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -196,8 +192,8 @@ int PeerCluster::SignalPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -212,18 +208,17 @@ int PeerCluster::SignalPeer(const Peer &peer) { } } -int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader) { +int PeerCluster::ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const std::string& leaderAddr, Peer* leader) { brpc::Channel channel; auto pos = leaderAddr.rfind(":"); std::string addr = leaderAddr.substr(0, pos); if (channel.Init(addr.c_str(), NULL) != 0) { - LOG(ERROR) <<"Fail to init channel to " << leaderAddr.c_str(); + LOG(ERROR) << "Fail to init channel to " << leaderAddr.c_str(); return -1; } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -235,7 +230,7 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, stub.GetLeader(&cntl, &request, &response, NULL); if (cntl.Failed()) { - LOG(ERROR) <<"confirm leader fail"; + LOG(ERROR) << "confirm leader fail"; return -1; } Peer leader2 = response.leader(); @@ -244,21 +239,21 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, PeerId leaderId1; leaderId1.parse(leader->address()); if (leaderId2.is_empty()) { - LOG(ERROR) <<"Confirmed leaderId is null"; + LOG(ERROR) << "Confirmed leaderId is null"; return -1; } if (leaderId2 != leaderId1) { - LOG(INFO) << "twice leaderId is inconsistent, first is " - << leaderId1 << " second is " << leaderId2; + LOG(INFO) << "twice leaderId is inconsistent, first is " << leaderId1 + << " second is " << leaderId2; return -1; } return 0; } -int PeerCluster::WaitLeader(Peer *leaderPeer) { +int PeerCluster::WaitLeader(Peer* leaderPeer) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(3 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -267,15 +262,17 @@ int PeerCluster::WaitLeader(Peer *leaderPeer) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderPeer); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " << leaderPeer->address(); std::string leaderAddr = leaderPeer->address(); - int ret = ConfirmLeader(logicPoolID_, copysetID_, - leaderAddr, leaderPeer); + int ret = + ConfirmLeader(logicPoolID_, copysetID_, leaderAddr, leaderPeer); if (ret == 0) { return ret; } @@ -299,9 +296,7 @@ int PeerCluster::StopAllPeers() { return 0; } -Configuration PeerCluster::CopysetConf() const { - return conf_; -} +Configuration PeerCluster::CopysetConf() const { return conf_; } int PeerCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -313,10 +308,10 @@ int PeerCluster::SetElectionTimeoutMs(int electionTimeoutMs) { return 0; } -int PeerCluster::StartPeerNode(int id, char *arg[]) { +int PeerCluster::StartPeerNode(int id, char* arg[]) { struct RegisterOptions opt; - opt.chunkserverMetaUri = "local://./" + std::to_string(id) + - "/chunkserver.dat"; + opt.chunkserverMetaUri = + "local://./" + std::to_string(id) + "/chunkserver.dat"; opt.fs = fs_; Register regist(opt); @@ -334,52 +329,43 @@ int PeerCluster::StartPeerNode(int id, char *arg[]) { return 0; } -const std::string PeerCluster::CopysetDirWithProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::RemoveCopysetDirCmd(const Peer &peer) { +const std::string PeerCluster::RemoveCopysetDirCmd(const Peer& peer) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets", peerId.addr.port); + butil::string_printf(&cmd, "rm -fr %d/copysets", peerId.addr.port); return cmd; } -const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer &peer, +const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets/%s", - peerId.addr.port, + butil::string_printf(&cmd, "rm -fr %d/copysets/%s", peerId.addr.port, ToGroupIdString(logicPoolID, copysetID).c_str()); return cmd; } -int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers) { +int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers) { LOG(INFO) << "PeerCluster begin create copyset: " << ToGroupIdString(logicPoolID, copysetID); @@ -403,17 +389,17 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetService_Stub stub(&channel); stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "failed create copsyet, " - << cntl.ErrorText() << std::endl; + LOG(ERROR) << "failed create copsyet, " << cntl.ErrorText() + << std::endl; ::usleep(1000 * 1000); continue; } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT - LOG(INFO) << "create copyset " << ToGroupIdString(logicPoolID, - copysetID) - << " success."; + if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS || + response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT + LOG(INFO) << "create copyset " + << ToGroupIdString(logicPoolID, copysetID) << " success."; return 0; } @@ -423,14 +409,13 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, return -1; } -int PeerCluster::PeerToId(const Peer &peer) { +int PeerCluster::PeerToId(const Peer& peer) { PeerId peerId(peer.address()); return peerId.addr.port; } -int PeerCluster::GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers) { +int PeerCluster::GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers) { for (auto& peer : peers) { if (leader.address() != peer.address()) { followers->push_back(peer); @@ -442,28 +427,23 @@ int PeerCluster::GetFollwerPeers(const std::vector& peers, ChunkServerID PeerCluster::chunkServerId_ = 0; -std::shared_ptr PeerCluster::fs_ - = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); +std::shared_ptr PeerCluster::fs_ = + LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn) { +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn) { LOG(INFO) << "Write then read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); @@ -486,9 +466,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -507,9 +486,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -521,22 +499,17 @@ void WriteThenReadVerify(Peer leaderPeer, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop) { LOG(INFO) << "Read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -556,12 +529,10 @@ void ReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -569,22 +540,18 @@ void ReadVerify(Peer leaderPeer, } /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read snapshot verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -592,7 +559,7 @@ void ReadSnapshotVerify(Peer leaderPeer, ChunkService_Stub stub(&channel); - // 获取chunk的快照版本 + // Obtain the snapshot version of the chunk uint64_t snapSn; { brpc::Controller cntl; @@ -603,12 +570,10 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_copysetid(copysetId); request.set_chunkid(chunkId); stub.GetChunkInfo(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); ASSERT_EQ(2, response.chunksn_size()); snapSn = std::min(response.chunksn(0), response.chunksn(1)); } @@ -622,16 +587,14 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(snapSn); stub.ReadChunkSnapshot(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -639,18 +602,15 @@ void ReadSnapshotVerify(Peer leaderPeer, } /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn) { +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn) { LOG(INFO) << "Delete snapshot verify, csn: " << csn; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -668,31 +628,25 @@ void DeleteSnapshotVerify(Peer leaderPeer, request.set_chunkid(chunkId); request.set_correctedsn(csn); stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read not verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -712,12 +666,10 @@ void ReadNotVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STRNE(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -725,22 +677,18 @@ void ReadNotVerify(Peer leaderPeer, } /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -760,32 +708,28 @@ void ReadVerifyNotAvailable(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Write verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -807,23 +751,22 @@ void WriteVerifyNotAvailable(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch) { std::vector resps; for (Peer peer : peers) { @@ -838,7 +781,7 @@ void CopysetStatusVerify(const std::vector &peers, cntl.set_timeout_ms(2000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peerP = new Peer(); + Peer* peerP = new Peer(); request.set_allocated_peer(peerP); peerP->set_address(peerId.to_string()); request.set_queryhash(true); @@ -847,7 +790,8 @@ void CopysetStatusVerify(const std::vector &peers, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -869,20 +813,15 @@ void CopysetStatusVerify(const std::vector &peers, } } - - -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt) { Peer leaderPeer; const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(cluster->GetLogicPoolId(), - cluster->GetCopysetId(), - cluster->CopysetConf(), - targetLeader, - opt); + status = + TransferLeader(cluster->GetLogicPoolId(), cluster->GetCopysetId(), + cluster->CopysetConf(), targetLeader, opt); if (0 == status.error_code()) { cluster->WaitLeader(&leaderPeer); if (leaderPeer.address() == targetLeader.address()) { @@ -891,8 +830,7 @@ void TransferLeaderAssertSuccess(PeerCluster *cluster, } ::sleep(1); } - ASSERT_STREQ(targetLeader.address().c_str(), - leaderPeer.address().c_str()); + ASSERT_STREQ(targetLeader.address().c_str(), leaderPeer.address().c_str()); } } // namespace chunkserver diff --git a/test/integration/common/peer_cluster.h b/test/integration/common/peer_cluster.h index 4a5fcacb58..24b2c2d63e 100644 --- a/test/integration/common/peer_cluster.h +++ b/test/integration/common/peer_cluster.h @@ -23,29 +23,29 @@ #ifndef TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ #define TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ +#include +#include #include #include #include -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/copyset_node.h" #include "proto/common.pb.h" #include "proto/topology.pb.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" -using ::curve::mds::topology::TopologyService; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; +using ::curve::mds::topology::TopologyService; namespace curve { namespace chunkserver { @@ -53,37 +53,37 @@ namespace chunkserver { using curve::common::Peer; /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; // Peer Peer peer; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; class FakeTopologyService : public TopologyService { void RegistChunkServer(google::protobuf::RpcController* cntl_base, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - google::protobuf::Closure* done) { + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); response->set_chunkserverid(request->chunkserverid()); @@ -92,16 +92,13 @@ class FakeTopologyService : public TopologyService { }; /** - * 封装模拟cluster测试相关的接口 + * Package simulation cluster testing related interfaces */ class PeerCluster { public: - PeerCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs); + PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers, + std::vector params, std::map paramsIndexs); virtual ~PeerCluster() { StopAllPeers(); if (isFakeMdsStart_) { @@ -116,139 +113,134 @@ class PeerCluster { * * @return 0 for success, -1 for failed */ - int StartFakeTopoloyService(const std::string &listenAddr); + int StartFakeTopoloyService(const std::string& listenAddr); /** - * 启动一个 Peer + * Start a Peer * @param peer - * @param empty初始化配置是否为空 - * @return 0,成功;-1,失败 + * @param empty Is the initialization configuration empty + * @return 0, successful- 1. Failure */ - int StartPeer(const Peer &peer, - int id, - const bool empty = false); + int StartPeer(const Peer& peer, int id, const bool empty = false); /** - * 关闭一个peer,使用SIGINT + * Close a peer and use SIGINT * @param peer - * @return 0 成功;-1 失败 + * @return 0 successful, - 1 failed */ - int ShutdownPeer(const Peer &peer); - + int ShutdownPeer(const Peer& peer); /** - * hang住一个peer,使用SIGSTOP + * hang lives in a peer and uses SIGSTOP * @param peer - * @return 0成功;-1失败 + * @return 0 successful, - 1 failed */ - int HangPeer(const Peer &peer); + int HangPeer(const Peer& peer); /** - * 恢复hang住的peer,使用SIGCONT - * @param peer - * @return 0:成功,-1 失败 - */ - int SignalPeer(const Peer &peer); + * Restore the peer where Hang lives and use SIGCONT + * @param peer + * @return 0: Success, -1 failed + */ + int SignalPeer(const Peer& peer); /** - * 反复重试直到等到新的leader产生 - * @param leaderPeer出参,返回leader info - * @return 0,成功;-1 失败 + * Try again and again until a new leader is generated + * @param leaderPeer generates parameters and returns leader information + * @return 0 successful, - 1 failed */ - int WaitLeader(Peer *leaderPeer); + int WaitLeader(Peer* leaderPeer); /** - * confirm leader + * confirm leader * @param: LogicPoolID logicalPool id * @param: copysetId copyset id * @param: leaderAddr leader address - * @param: leader leader info - * @return 0,成功;-1 失败 + * @param: leader leader information + * @return 0 successful, - 1 failed */ - int ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader); - + int ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::string& leaderAddr, + Peer* leader); /** - * Stop所有的peer - * @return 0,成功;-1 失败 + * Stop all peers + * @return 0 successful, - 1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /* Returns the current configuration of the cluster */ Configuration CopysetConf() const; - LogicPoolID GetLogicPoolId() const {return logicPoolID_;} + LogicPoolID GetLogicPoolId() const { return logicPoolID_; } - CopysetID GetCopysetId() const {return copysetID_;} + CopysetID GetCopysetId() const { return copysetID_; } - void SetWorkingCopyset(CopysetID copysetID) {copysetID_ = copysetID;} + void SetWorkingCopyset(CopysetID copysetID) { copysetID_ = copysetID; } - /* 修改 PeerNode 配置相关的接口,单位: s */ + /* Modify the interface related to PeerNode configuration, unit: s */ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); - static int StartPeerNode(int id, char *arg[]); + static int StartPeerNode(int id, char* arg[]); - static int PeerToId(const Peer &peer); + static int PeerToId(const Peer& peer); - static int GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers); + static int GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers); public: /** - * 返回执行peer的copyset路径with protocol, ex: local://./127.0.0.1:9101:0 + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithProtocol(const Peer &peer); + static const std::string CopysetDirWithProtocol(const Peer& peer); /** - * 返回执行peer的copyset路径without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const Peer &peer); + static const std::string CopysetDirWithoutProtocol(const Peer& peer); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const Peer &peer); + static const std::string RemoveCopysetDirCmd(const Peer& peer); - static const std::string RemoveCopysetLogDirCmd(const Peer &peer, + static const std::string RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID); - static int CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers); + static int CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::vector peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::vector peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 - int snapshotIntervalS_; - // 选举超时时间 - int electionTimeoutMs_; - // 集群成员配置 - Configuration conf_; - - // 逻辑池id - LogicPoolID logicPoolID_; - // 复制组id - CopysetID copysetID_; + // Snapshot interval + int snapshotIntervalS_; + // Election timeout + int electionTimeoutMs_; + // Cluster member configuration + Configuration conf_; + + // Logical Pool ID + LogicPoolID logicPoolID_; + // Copy Group ID + CopysetID copysetID_; // chunkserver id - static ChunkServerID chunkServerId_; - // 文件系统适配层 + static ChunkServerID chunkServerId_; + // File System Adaptation Layer static std::shared_ptr fs_; - // chunkserver启动传入参数的映射关系(chunkserver id: params_'s index) + // chunkserver starts the mapping relationship of incoming parameters + // (chunkserver id: params_'s index) std::map paramsIndexs_; - // chunkserver启动需要传递的参数列表 - std::vector params_; + // List of parameters to be passed for chunkserver startup + std::vector params_; // fake mds server brpc::Server fakeMdsServer_; @@ -259,148 +251,117 @@ class PeerCluster { }; /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn = 1); +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn = 1); /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop); /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param csn corrected sn + *Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn); +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn); /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + *Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0); /** - * transfer leader,并且预期能够成功 - * @param cluster: 集群的指针 - * @param targetLeader: 期望tranfer的目标节点 - * @param opt: tranfer 请求使用的 clioption + * transfer leader and expected to succeed + * @param cluster: Pointer to the cluster + * @param targetLeader: The target node for the expected transfer + * @param opt: The clioption used in the transfer request */ -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt); } // namespace chunkserver diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..ae597506bc 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -21,44 +21,44 @@ */ #include "test/integration/heartbeat/common.h" + #include "test/mds/mock/mock_alloc_statistic.h" namespace curve { namespace mds { -void HeartbeatIntegrationCommon::PrepareAddPoolset( - const Poolset &poolset) { +void HeartbeatIntegrationCommon::PrepareAddPoolset(const Poolset& poolset) { int ret = topology_->AddPoolset(poolset); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } void HeartbeatIntegrationCommon::PrepareAddLogicalPool( - const LogicalPool &lpool) { + const LogicalPool& lpool) { int ret = topology_->AddLogicalPool(lpool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; } void HeartbeatIntegrationCommon::PrepareAddPhysicalPool( - const PhysicalPool &ppool) { + const PhysicalPool& ppool) { int ret = topology_->AddPhysicalPool(ppool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } -void HeartbeatIntegrationCommon::PrepareAddZone(const Zone &zone) { +void HeartbeatIntegrationCommon::PrepareAddZone(const Zone& zone) { int ret = topology_->AddZone(zone); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } -void HeartbeatIntegrationCommon::PrepareAddServer(const Server &server) { +void HeartbeatIntegrationCommon::PrepareAddServer(const Server& server) { int ret = topology_->AddServer(server); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void HeartbeatIntegrationCommon::PrepareAddChunkServer( - const ChunkServer &chunkserver) { + const ChunkServer& chunkserver) { ChunkServer cs(chunkserver); cs.SetOnlineState(OnlineState::ONLINE); int ret = topology_->AddChunkServer(cs); @@ -68,7 +68,7 @@ void HeartbeatIntegrationCommon::PrepareAddChunkServer( void HeartbeatIntegrationCommon::PrepareAddCopySet( CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members) { + const std::set& members) { CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); int ret = topology_->AddCopySet(cs); @@ -78,10 +78,10 @@ void HeartbeatIntegrationCommon::PrepareAddCopySet( void HeartbeatIntegrationCommon::UpdateCopysetTopo( CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, - ChunkServerIdType leader, const std::set &members, + ChunkServerIdType leader, const std::set& members, ChunkServerIdType candidate) { ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE(topology_->GetCopySet(CopySetKey{ logicalPoolId, copysetId }, + ASSERT_TRUE(topology_->GetCopySet(CopySetKey{logicalPoolId, copysetId}, ©setInfo)); copysetInfo.SetEpoch(epoch); copysetInfo.SetLeader(leader); @@ -93,8 +93,8 @@ void HeartbeatIntegrationCommon::UpdateCopysetTopo( } void HeartbeatIntegrationCommon::SendHeartbeat( - const ChunkServerHeartbeatRequest &request, bool expectFailed, - ChunkServerHeartbeatResponse *response) { + const ChunkServerHeartbeatRequest& request, bool expectFailed, + ChunkServerHeartbeatResponse* response) { // init brpc client brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr_.c_str(), NULL)); @@ -109,7 +109,7 @@ void HeartbeatIntegrationCommon::SendHeartbeat( } void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( - ChunkServerIdType id, ChunkServerHeartbeatRequest *req) { + ChunkServerIdType id, ChunkServerHeartbeatRequest* req) { ChunkServer out; EXPECT_TRUE(topology_->GetChunkServer(id, &out)) << "get chunkserver: " << id << " fail"; @@ -139,7 +139,7 @@ void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( } void HeartbeatIntegrationCommon::AddCopySetToRequest( - ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, + ChunkServerHeartbeatRequest* req, const CopySetInfo& csInfo, ConfigChangeType type) { auto info = req->add_copysetinfos(); info->set_logicalpoolid(csInfo.GetLogicalPoolId()); @@ -170,7 +170,7 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( << "get chunkserver: " << csInfo.GetCandidate() << " error"; std::string ipport = out.GetHostIp() + ":" + std::to_string(out.GetPort()) + ":0"; - ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); auto replica = new ::curve::common::Peer(); replica->set_address(ipport.c_str()); confChxInfo->set_allocated_peer(replica); @@ -180,13 +180,13 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( } } -void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator &op) { +void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator& op) { auto opController = coordinator_->GetOpController(); ASSERT_TRUE(opController->AddOperator(op)); } void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( - const CopySetKey &id) { + const CopySetKey& id) { auto opController = coordinator_->GetOpController(); opController->RemoveOperator(id); } @@ -243,11 +243,11 @@ void HeartbeatIntegrationCommon::PrepareBasicCluseter() { PrepareAddChunkServer(cs3); // add copyset - PrepareAddCopySet(1, 1, std::set{ 1, 2, 3 }); + PrepareAddCopySet(1, 1, std::set{1, 2, 3}); } void HeartbeatIntegrationCommon::InitHeartbeatOption( - Configuration *conf, HeartbeatOption *heartbeatOption) { + Configuration* conf, HeartbeatOption* heartbeatOption) { heartbeatOption->heartbeatIntervalMs = conf->GetIntValue("mds.heartbeat.intervalMs"); heartbeatOption->heartbeatMissTimeOutMs = @@ -259,7 +259,7 @@ void HeartbeatIntegrationCommon::InitHeartbeatOption( } void HeartbeatIntegrationCommon::InitSchedulerOption( - Configuration *conf, ScheduleOption *scheduleOption) { + Configuration* conf, ScheduleOption* scheduleOption) { scheduleOption->enableCopysetScheduler = conf->GetBoolValue("mds.enable.copyset.scheduler"); scheduleOption->enableLeaderScheduler = @@ -305,22 +305,20 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto idGen = std::make_shared(); auto tokenGen = std::make_shared(); - auto topologyStorage = - std::make_shared(); + auto topologyStorage = std::make_shared(); topology_ = std::make_shared(idGen, tokenGen, topologyStorage); ASSERT_EQ(kTopoErrCodeSuccess, topology_->Init(topologyOption)); // init topology manager - topologyStat_ = - std::make_shared(topology_); + topologyStat_ = std::make_shared(topology_); topologyStat_->Init(); auto copysetManager = std::make_shared(CopysetOption()); auto allocStat = std::make_shared(); auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +339,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/heartbeat/common.h b/test/integration/heartbeat/common.h index b281d5a9ab..7787a22910 100644 --- a/test/integration/heartbeat/common.h +++ b/test/integration/heartbeat/common.h @@ -23,41 +23,41 @@ #ifndef TEST_INTEGRATION_HEARTBEAT_COMMON_H_ #define TEST_INTEGRATION_HEARTBEAT_COMMON_H_ -#include -#include #include #include +#include +#include -#include -#include //NOLINT -#include //NOLINT -#include +#include //NOLINT #include +#include +#include #include +#include //NOLINT +#include #include -#include +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/topology.pb.h" #include "src/common/configuration.h" -#include "src/mds/topology/topology_config.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_token_generator.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/topology/topology_storge.h" -#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" +#include "src/mds/copyset/copyset_config.h" +#include "src/mds/copyset/copyset_manager.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" #include "src/mds/heartbeat/heartbeat_manager.h" #include "src/mds/heartbeat/heartbeat_service.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" -#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" #include "src/mds/schedule/operator.h" -#include "src/mds/copyset/copyset_manager.h" -#include "src/mds/copyset/copyset_config.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "proto/topology.pb.h" -#include "proto/heartbeat.pb.h" -#include "proto/common.pb.h" -#include "src/common/timeutility.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" +#include "src/mds/topology/topology_storge.h" +#include "src/mds/topology/topology_token_generator.h" using ::curve::common::Configuration; using std::string; @@ -65,15 +65,17 @@ using std::string; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::ChunkServerState; using ::curve::mds::topology::CopySetIdType; +using ::curve::mds::topology::CopySetKey; using ::curve::mds::topology::DefaultIdGenerator; using ::curve::mds::topology::DefaultTokenGenerator; using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::LogicalPool; using ::curve::mds::topology::LogicalPoolType; -using ::curve::mds::topology::Poolset; using ::curve::mds::topology::PhysicalPool; -using ::curve::mds::topology::PoolsetIdType; using ::curve::mds::topology::PoolIdType; +using ::curve::mds::topology::Poolset; +using ::curve::mds::topology::PoolsetIdType; +using ::curve::mds::topology::Server; using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::TopologyImpl; using ::curve::mds::topology::TopologyOption; @@ -82,8 +84,6 @@ using ::curve::mds::topology::TopologyStatImpl; using ::curve::mds::topology::UNINTIALIZE_ID; using ::curve::mds::topology::Zone; using ::curve::mds::topology::ZoneIdType; -using ::curve::mds::topology::Server; -using ::curve::mds::topology::CopySetKey; using ::curve::mds::heartbeat::ChunkServerHeartbeatRequest; using ::curve::mds::heartbeat::ChunkServerHeartbeatResponse; @@ -120,247 +120,206 @@ class FakeTopologyStorage : public TopologyStorage { public: FakeTopologyStorage() {} - bool - LoadPoolset(std::unordered_map *PoolsetMap, - PoolsetIdType *maxPoolsetId) { + bool LoadPoolset(std::unordered_map* PoolsetMap, + PoolsetIdType* maxPoolsetId) { return true; } - bool - LoadLogicalPool(std::unordered_map *logicalPoolMap, - PoolIdType *maxLogicalPoolId) { + bool LoadLogicalPool( + std::unordered_map* logicalPoolMap, + PoolIdType* maxLogicalPoolId) { return true; } bool LoadPhysicalPool( - std::unordered_map *physicalPoolMap, - PoolIdType *maxPhysicalPoolId) { + std::unordered_map* physicalPoolMap, + PoolIdType* maxPhysicalPoolId) { return true; } - bool LoadZone(std::unordered_map *zoneMap, - ZoneIdType *maxZoneId) { + bool LoadZone(std::unordered_map* zoneMap, + ZoneIdType* maxZoneId) { return true; } - bool LoadServer(std::unordered_map *serverMap, - ServerIdType *maxServerId) { + bool LoadServer(std::unordered_map* serverMap, + ServerIdType* maxServerId) { return true; } bool LoadChunkServer( - std::unordered_map *chunkServerMap, - ChunkServerIdType *maxChunkServerId) { + std::unordered_map* chunkServerMap, + ChunkServerIdType* maxChunkServerId) { return true; } - bool LoadCopySet(std::map *copySetMap, - std::map *copySetIdMaxMap) { + bool LoadCopySet(std::map* copySetMap, + std::map* copySetIdMaxMap) { return true; } - bool StoragePoolset(const Poolset &data) { - return true; - } - bool StorageLogicalPool(const LogicalPool &data) { - return true; - } - bool StoragePhysicalPool(const PhysicalPool &data) { - return true; - } - bool StorageZone(const Zone &data) { - return true; - } - bool StorageServer(const Server &data) { - return true; - } - bool StorageChunkServer(const ChunkServer &data) { - return true; - } - bool StorageCopySet(const CopySetInfo &data) { - return true; - } - - bool DeletePoolset(PoolsetIdType id) { - return true; - } - bool DeleteLogicalPool(PoolIdType id) { - return true; - } - bool DeletePhysicalPool(PoolIdType id) { - return true; - } - bool DeleteZone(ZoneIdType id) { - return true; - } - bool DeleteServer(ServerIdType id) { - return true; - } - bool DeleteChunkServer(ChunkServerIdType id) { - return true; - } - bool DeleteCopySet(CopySetKey key) { - return true; - } - - bool UpdateLogicalPool(const LogicalPool &data) { - return true; - } - bool UpdatePhysicalPool(const PhysicalPool &data) { - return true; - } - bool UpdateZone(const Zone &data) { - return true; - } - bool UpdateServer(const Server &data) { - return true; - } - bool UpdateChunkServer(const ChunkServer &data) { - return true; - } - bool UpdateCopySet(const CopySetInfo &data) { - return true; - } - - bool LoadClusterInfo(std::vector *info) { - return true; - } - bool StorageClusterInfo(const ClusterInformation &info) { - return true; - } + bool StoragePoolset(const Poolset& data) { return true; } + bool StorageLogicalPool(const LogicalPool& data) { return true; } + bool StoragePhysicalPool(const PhysicalPool& data) { return true; } + bool StorageZone(const Zone& data) { return true; } + bool StorageServer(const Server& data) { return true; } + bool StorageChunkServer(const ChunkServer& data) { return true; } + bool StorageCopySet(const CopySetInfo& data) { return true; } + + bool DeletePoolset(PoolsetIdType id) { return true; } + bool DeleteLogicalPool(PoolIdType id) { return true; } + bool DeletePhysicalPool(PoolIdType id) { return true; } + bool DeleteZone(ZoneIdType id) { return true; } + bool DeleteServer(ServerIdType id) { return true; } + bool DeleteChunkServer(ChunkServerIdType id) { return true; } + bool DeleteCopySet(CopySetKey key) { return true; } + + bool UpdateLogicalPool(const LogicalPool& data) { return true; } + bool UpdatePhysicalPool(const PhysicalPool& data) { return true; } + bool UpdateZone(const Zone& data) { return true; } + bool UpdateServer(const Server& data) { return true; } + bool UpdateChunkServer(const ChunkServer& data) { return true; } + bool UpdateCopySet(const CopySetInfo& data) { return true; } + + bool LoadClusterInfo(std::vector* info) { return true; } + bool StorageClusterInfo(const ClusterInformation& info) { return true; } }; } // namespace topology class HeartbeatIntegrationCommon { public: - /* HeartbeatIntegrationCommon 构造函数 + /* HeartbeatIntegrationCommon constructor * - * @param[in] conf 配置信息 + * @param[in] conf configuration information */ - explicit HeartbeatIntegrationCommon(const Configuration &conf) { + explicit HeartbeatIntegrationCommon(const Configuration& conf) { conf_ = conf; } - /* PrepareAddPoolset 在集群中添加物理池集合 + /* PrepareAddPoolset adds a physical pool collection to the cluster * - * @param[in] poolset 物理池集合(池组) + * @param[in] poolset Physical pool set (pool group) */ - void PrepareAddPoolset(const Poolset &poolset); + void PrepareAddPoolset(const Poolset& poolset); - /* PrepareAddLogicalPool 在集群中添加逻辑池 + /* PrepareAddLogicalPool Adding a Logical Pool to a Cluster * - * @param[in] lpool 逻辑池 + * @param[in] lpool logical pool */ - void PrepareAddLogicalPool(const LogicalPool &lpool); + void PrepareAddLogicalPool(const LogicalPool& lpool); - /* PrepareAddPhysicalPool 在集群中添加物理池 + /* PrepareAddPhysicalPool Adding a Physical Pool to a Cluster * - * @param[in] ppool 物理池 + * @param[in] ppool physical pool */ - void PrepareAddPhysicalPool(const PhysicalPool &ppool); + void PrepareAddPhysicalPool(const PhysicalPool& ppool); - /* PrepareAddZone 在集群中添加zone + /* PrepareAddZone adds a zone to the cluster * * @param[in] zone */ - void PrepareAddZone(const Zone &zone); + void PrepareAddZone(const Zone& zone); - /* PrepareAddServer 在集群中添加server + /* PrepareAddServer Adding a server to a Cluster * * @param[in] server */ - void PrepareAddServer(const Server &server); + void PrepareAddServer(const Server& server); - /* PrepareAddChunkServer 在集群中添加chunkserver节点 + /* PrepareAddChunkServer adds chunkserver nodes to the cluster * * @param[in] chunkserver */ - void PrepareAddChunkServer(const ChunkServer &chunkserver); + void PrepareAddChunkServer(const ChunkServer& chunkserver); - /* PrepareAddCopySet 在集群中添加copyset + /* PrepareAddCopySet Adding a copyset to a cluster * - * @param[in] copysetId copyset id - * @param[in] logicalPoolId 逻辑池id - * @param[in] members copyset成员 + * @param[in] copysetId copyset ID + * @param[in] logicalPoolId Logical Pool ID + * @param[in] members copyset members */ void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members); + const std::set& members); - /* UpdateCopysetTopo 更新topology中copyset的状态 + /* UpdateCopysetTopo updates the status of copyset in topology * - * @param[in] copysetId copyset的id - * @param[in] logicalPoolId 逻辑池id - * @param[in] epoch copyset的epoch - * @param[in] leader copyset的leader - * @param[in] members copyset的成员 - * @param[in] candidate copyset的candidate信息 + * @param[in] copysetId The ID of the copyset + * @param[in] logicalPoolId Logical Pool ID + * @param[in] epoch epoch of copyset + * @param[in] leader copyset's leader + * @param[in] members members of copyset + * @param[in] candidate copyset's candidate information */ void UpdateCopysetTopo(CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidate = UNINTIALIZE_ID); - /* SendHeartbeat 发送心跳 + /* SendHeartbeat sends a heartbeat * * @param[in] req - * @param[in] expectedFailed 为true表示希望发送成功,为false表示希望发送失败 + * @param[in] expectedFailed true: to indicate that the transmission is + * expected to succeed, false: indicate that the transmission is expected to + * fail * @param[out] response */ - void SendHeartbeat(const ChunkServerHeartbeatRequest &request, + void SendHeartbeat(const ChunkServerHeartbeatRequest& request, bool expectFailed, - ChunkServerHeartbeatResponse *response); + ChunkServerHeartbeatResponse* response); - /* BuildBasicChunkServerRequest 构建最基本的request + /* BuildBasicChunkServerRequest Build the most basic request * - * @param[in] id chunkserver的id - * @param[out] req 构造好的指定id的request + * @param[in] id chunkserver ID + * @param[out] req Constructed request with specified id */ void BuildBasicChunkServerRequest(ChunkServerIdType id, - ChunkServerHeartbeatRequest *req); + ChunkServerHeartbeatRequest* req); - /* AddCopySetToRequest 向request中添加copyset + /* AddCopySetToRequest adds a copyset to the request * * @param[in] req - * @param[in] csInfo copyset信息 - * @param[in] type copyset当前变更类型 + * @param[in] csInfo copyset information + * @param[in] type copyset Current change type */ - void AddCopySetToRequest(ChunkServerHeartbeatRequest *req, - const CopySetInfo &csInfo, + void AddCopySetToRequest(ChunkServerHeartbeatRequest* req, + const CopySetInfo& csInfo, ConfigChangeType type = ConfigChangeType::NONE); - /* AddOperatorToOpController 向调度模块添加op + /* AddOperatorToOpController adds op to the scheduling module * * @param[in] op */ - void AddOperatorToOpController(const Operator &op); + void AddOperatorToOpController(const Operator& op); - /* RemoveOperatorFromOpController 从调度模块移除指定copyset上的op + /* RemoveOperatorFromOpController removes the op on the specified copyset + * from the scheduling module * - * @param[in] id 需要移除op的copysetId + * @param[in] id needs to remove the copysetId of op */ - void RemoveOperatorFromOpController(const CopySetKey &id); + void RemoveOperatorFromOpController(const CopySetKey& id); /* - * PrepareBasicCluseter 在topology中构建最基本的拓扑结构 - * 一个物理池,一个逻辑池,三个zone,每个zone一个chunkserver, - * 集群中有一个copyset + * PrepareBasicCluseter builds the most basic topology structure in topology + * One physical pool, one logical pool, three zones, and one chunkserver for + * each zone, There is a copyset in the cluster */ void PrepareBasicCluseter(); /** - * InitHeartbeatOption 初始化heartbeatOption + * InitHeartbeatOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的心跳option + * @param[in] conf configuration module + * @param[out] heartbeat option assignment completed heartbeat option */ - void InitHeartbeatOption(Configuration *conf, - HeartbeatOption *heartbeatOption); + void InitHeartbeatOption(Configuration* conf, + HeartbeatOption* heartbeatOption); /** - * InitSchedulerOption 初始化scheduleOption + * InitSchedulerOption initializes scheduleOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的调度option + * @param[in] conf configuration module + * @param[out] heartbeat Scheduling option with completed assignment of + * option */ - void InitSchedulerOption(Configuration *conf, - ScheduleOption *scheduleOption); + void InitSchedulerOption(Configuration* conf, + ScheduleOption* scheduleOption); /** - * BuildBasicCluster 运行heartbeat/topology/scheduler模块 + * BuildBasicCluster runs the heartbeat/topology/scheduler module */ void BuildBasicCluster(); diff --git a/test/integration/heartbeat/heartbeat_basic_test.cpp b/test/integration/heartbeat/heartbeat_basic_test.cpp index c9a2ae416d..4144a9d53b 100644 --- a/test/integration/heartbeat/heartbeat_basic_test.cpp +++ b/test/integration/heartbeat/heartbeat_basic_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -32,19 +32,19 @@ namespace mds { class HeartbeatBasicTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 300); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 500); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", 0); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6879"); - // scheduler相关的内容 + // Schedule related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -65,14 +65,14 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithCandidateOpOnGoing() { - // 构造mds中copyset当前状 + // Construct the current state of copyset in mds ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 10); + std::set{1, 2, 3}, 10); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -80,15 +80,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsNoCnandidateOpOnGoing() { - // 构造mds中copyset当前状态 + // Construct the current state of copyset in mds // copyset-1(epoch=5, peers={1,2,3}, leader=1); ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -96,14 +96,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3, 4 }); + std::set{1, 2, 3, 4}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -111,14 +112,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - hbtest_->UpdateCopysetTopo( - 1, 1, 5, 1, std::set{ 1, 2, 3, 4 }, 4); + hbtest_->UpdateCopysetTopo(1, 1, 5, 1, + std::set{1, 2, 3, 4}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -126,12 +128,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -139,12 +142,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 2); + std::set{1, 2, 3}, 2); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -152,22 +156,23 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrePareMdsWithCandidateNoOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); } void PrepareMdsWithChangeOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); @@ -175,24 +180,25 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithChangeOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); - // scheduler中copyset-1有operator:startEpoch=5,step=step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // In the scheduler, copyset-1 has + // operator:startEpoch=5,step=step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); } - bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo &expected) { + bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo& expected) { ::curve::mds::topology::CopySetInfo copysetInfo; if (!hbtest_->topology_->GetCopySet( - CopySetKey{ expected.GetLogicalPoolId(), expected.GetId() }, + CopySetKey{expected.GetLogicalPoolId(), expected.GetId()}, ©setInfo)) { return false; } @@ -226,9 +232,9 @@ class HeartbeatBasicTest : public ::testing::Test { return true; } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -255,14 +261,14 @@ class HeartbeatBasicTest : public ::testing::Test { }; TEST_F(HeartbeatBasicTest, test_request_no_chunkserverID) { - // 空的HeartbeatRequest + // Empty HeartbeatRequest ChunkServerHeartbeatRequest req; ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBFAIL, &rep); } TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { - // mds不存在该chunkserver + // The chunkserver does not exist in the mds ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_chunkserverid(4); @@ -273,8 +279,8 @@ TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { } TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { - // chunkserver上报的id相同,ip和port不匹配 - // ip不匹配 + // The id reported by chunkserver is the same, but the IP and port do not + // match IP mismatch ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_ip("127.0.0.1"); @@ -283,14 +289,14 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // port不匹配 + // Port mismatch req.set_ip("10.198.100.3"); req.set_port(1111); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // token不匹配 + // Token mismatch req.set_ip("10.198.100.3"); req.set_port(9000); req.set_token("youdao"); @@ -300,20 +306,20 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { } TEST_F(HeartbeatBasicTest, test_chunkserver_offline_then_online) { - // chunkserver上报心跳时间间隔大于offline - // sleep 800ms, 该chunkserver onffline状态 + // Chunkserver reports that the heartbeat time interval is greater than + // offline Sleep 800ms, the chunkserver onffline status std::this_thread::sleep_for(std::chrono::milliseconds(800)); ChunkServer out; hbtest_->topology_->GetChunkServer(1, &out); ASSERT_EQ(OnlineState::OFFLINE, out.GetOnlineState()); - // chunkserver上报心跳,chunkserver online + // Chunkserver reports heartbeat, chunkserver online ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(out.GetId(), &req); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 后台健康检查程序把chunksrver更新为onlinne状态 + // The backend health check program updates chunksrver to online status uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool updateSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 2) { @@ -330,8 +336,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -339,8 +344,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); } @@ -349,8 +353,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -361,8 +364,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_TRUE(copysetInfo.HasCandidate()); @@ -373,7 +375,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - CopySetKey key{ 1, 1 }; + CopySetKey key{1, 1}; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -387,11 +389,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -400,8 +401,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -417,11 +417,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(4, copysetInfo.GetCandidate()); ASSERT_EQ(0, rep.needupdatecopysets_size()); @@ -431,8 +430,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -441,11 +439,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -454,8 +451,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -464,11 +460,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -477,8 +472,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -487,11 +481,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -500,8 +493,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -510,11 +502,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -523,12 +514,11 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // 上报copyset-1(epoch=2, peers={1,2,3,4}, leader=1) + // Report copyset-1(epoch=2, peers={1,2,3,4}, leader=1) auto copysetMembers = copysetInfo.GetCopySetMembers(); copysetMembers.emplace(4); CopySetInfo csInfo(1, 1); @@ -537,11 +527,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -550,8 +539,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -564,415 +552,398 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } -// 上报的是leader +// Reported as the leader TEST_F(HeartbeatBasicTest, test_leader_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5 + // response is empty, mds updates epoch to 5 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5,leader为2 + // response is empty, mds updates epoch to 5, and leader to 2 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的不是复制组成员 +// The reported member is not a replication group member TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -980,29 +951,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1010,29 +980,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1040,30 +1009,29 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1071,32 +1039,31 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9090, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1104,29 +1071,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1140,24 +1106,23 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(10, copysetInfo.GetCandidate()); } @@ -1165,246 +1130,238 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 10 }; + std::set res{1, 2, 3, 10}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition6) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition12) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition13) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1413,16 +1370,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition14) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1431,48 +1388,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition15) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition16) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition17) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1481,16 +1438,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition18) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1499,128 +1456,126 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition19) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition20) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition21) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition24) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition25) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1629,16 +1584,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition26) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1647,48 +1602,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition27) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition28) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition29) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1697,16 +1652,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1715,16 +1670,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition31) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1733,16 +1688,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition32) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1756,16 +1711,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition33) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1781,16 +1736,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition34) { ChunkServer cs5(5, "testtoekn", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1804,16 +1759,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1825,16 +1780,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(ConfigChangeType::ADD_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", @@ -1844,42 +1799,41 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_test_mdsWithCandidate_OpOnGoing_condition2) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报 + // chunkserver1 reporting // copyset-1(epoch=5, peers={1,2,3}, leader=1, // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); @@ -1888,206 +1842,200 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=6, peers={1,2,3,10}, leader=2) + // chunkserver1 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) + // chunkserver2 reports copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(7, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition6) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition7) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition11) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2096,28 +2044,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition12) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2126,84 +2073,81 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition15) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2212,28 +2156,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition16) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2242,196 +2185,189 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition23) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2440,28 +2376,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition24) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2470,86 +2405,83 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition27) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2558,19 +2490,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2580,14 +2512,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2596,19 +2527,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2618,14 +2549,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2634,19 +2564,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2656,30 +2586,29 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2694,48 +2623,47 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { ASSERT_EQ(ConfigChangeType::REMOVE_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_2) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // cofigChangeInfo={peer: 4, type:REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2743,22 +2671,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2766,23 +2694,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(7); csInfo.SetLeader(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2790,23 +2718,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2814,24 +2742,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2839,24 +2767,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2864,25 +2792,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2890,25 +2818,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2916,26 +2844,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2943,43 +2871,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2987,44 +2915,44 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3032,43 +2960,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3076,45 +3004,45 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_15) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3124,17 +3052,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3145,13 +3073,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3161,17 +3089,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3182,14 +3110,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3199,17 +3127,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3220,14 +3148,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3237,17 +3165,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3258,847 +3186,846 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // configChangeInfo={peer: 4, type: REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_2) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_3) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_4) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_5) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_33) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4108,17 +4035,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4129,14 +4056,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4146,17 +4073,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4167,27 +4094,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4198,16 +4125,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4217,75 +4144,75 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0 ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -4299,350 +4226,350 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.2:9000:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_3) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_4) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4651,15 +4578,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4669,13 +4596,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4684,15 +4611,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4702,14 +4629,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4720,15 +4647,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { hbtest_->PrepareAddChunkServer(cs1); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4738,15 +4665,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4758,15 +4685,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4776,25 +4703,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4804,346 +4731,346 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1, // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_2) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_3) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5152,15 +5079,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5170,14 +5097,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5186,15 +5113,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5204,15 +5131,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5224,15 +5151,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5242,26 +5169,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5271,661 +5198,661 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_1) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_2) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: ADD_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_3) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_4) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_5) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_6) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_7) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_8) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_9) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_10) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_11) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_12) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_13) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_14) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_15) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_16) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_17) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_18) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_19) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_20) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_21) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_22) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5934,16 +5861,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5953,11 +5880,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5966,17 +5893,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5986,12 +5913,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6000,17 +5927,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6020,13 +5947,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6035,15 +5962,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6053,23 +5980,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6079,468 +6006,467 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_1) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_2) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_3) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_4) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_5) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_6) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }, - 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_7) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_8) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_9) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_10) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_11) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_12) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_13) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_14) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6550,29 +6476,29 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6582,118 +6508,118 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6703,24 +6629,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6730,31 +6656,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6764,23 +6690,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6790,31 +6716,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6824,24 +6750,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6851,31 +6777,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6885,23 +6811,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6911,11 +6837,11 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6923,15 +6849,15 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6944,14 +6870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -6960,24 +6886,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -6985,20 +6911,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7006,20 +6932,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_4) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7029,20 +6955,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7050,20 +6976,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7071,20 +6997,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7092,20 +7018,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7113,23 +7039,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7137,24 +7063,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7162,24 +7088,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7187,25 +7113,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7213,25 +7139,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7239,26 +7165,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_14) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7268,25 +7194,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7296,26 +7222,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7323,24 +7249,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7348,25 +7274,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_18) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7376,25 +7302,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7404,26 +7330,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7431,24 +7357,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7456,25 +7382,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_22) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7484,25 +7410,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7512,26 +7438,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7539,23 +7465,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7563,24 +7489,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7588,25 +7514,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7614,26 +7540,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7641,24 +7567,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7666,25 +7592,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_30) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7694,25 +7620,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7722,26 +7648,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7749,24 +7675,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7774,25 +7700,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_34) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7802,25 +7728,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7830,26 +7756,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7859,16 +7785,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7878,14 +7804,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7895,16 +7821,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7914,13 +7840,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -7928,13 +7854,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7944,14 +7870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7961,16 +7887,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7980,28 +7906,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8011,15 +7937,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8029,16 +7955,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8048,26 +7974,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8077,15 +8003,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8093,15 +8019,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8114,14 +8040,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -8130,24 +8056,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8155,20 +8081,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8176,20 +8102,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_4) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8199,20 +8125,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8220,20 +8146,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8241,20 +8167,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8262,20 +8188,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8283,24 +8209,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8308,25 +8234,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8334,25 +8260,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8360,26 +8286,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8387,26 +8313,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8414,27 +8340,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_14) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8444,26 +8370,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8473,27 +8399,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8501,25 +8427,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8527,26 +8453,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_18) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8556,26 +8482,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8585,27 +8511,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8613,25 +8539,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8639,26 +8565,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_22) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8668,26 +8594,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8697,27 +8623,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8725,24 +8651,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8750,25 +8676,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8776,26 +8702,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8803,27 +8729,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8831,25 +8757,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8857,26 +8783,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_30) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8886,26 +8812,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8915,27 +8841,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8943,25 +8869,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8969,26 +8895,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_34) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8998,26 +8924,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9027,27 +8953,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9057,16 +8983,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9076,15 +9002,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9094,16 +9020,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9113,15 +9039,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -9129,13 +9055,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9145,16 +9071,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9164,16 +9090,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9183,28 +9109,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9214,15 +9140,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9232,16 +9158,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9251,27 +9177,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9281,16 +9207,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } diff --git a/test/integration/heartbeat/heartbeat_exception_test.cpp b/test/integration/heartbeat/heartbeat_exception_test.cpp index 67ac0bcf01..3b04c79390 100644 --- a/test/integration/heartbeat/heartbeat_exception_test.cpp +++ b/test/integration/heartbeat/heartbeat_exception_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -31,19 +31,19 @@ namespace curve { namespace mds { class HeartbeatExceptionTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 3000); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 5000); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", sleepTimeMs_); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6880"); - // scheduler相关的内容 + // scheduler related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -64,9 +64,9 @@ class HeartbeatExceptionTest : public ::testing::Test { conf->SetIntValue("mds.scheduler.minScatterWidth", 50); } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -95,35 +95,51 @@ class HeartbeatExceptionTest : public ::testing::Test { }; /* - * bug说明:稳定性测试环境,宕机一台机器之后设置pending,副本恢复过程中mds有切换 - * 最终发现有5个pending状态的chunkserver没有完成迁移 - * 分析: - * 1. mds1提供服务时产生operator并下发给copyset-1{A,B,C} + - * D的变更,C是offline状态 - * 2. copyset-1完成配置变更,此时leader上的配置更新为epoch=2/{A,B,C,D}, - * candidate上的配置为epoch=1/{A,B,C}, mds1中记录的配置为epoch=1/{A,B,C} - * 3. mds1挂掉,mds2提供服务, 并从数据库加载copyset,mds2中copyset-1的配置 - * epoch=1/{A,B,C} - * 4. candidate-D上报心跳,copyset-1的配置为epoch=1/{A,B,C}。mds2发现D上报的 - * copyset中epoch和mds2记录的相同,但D并不在mds2记录的复制组中且调度模块也没有 - * 对应的operator,下发命令把D上的copyset-1删除导致D被误删 + * Bug Description: In a stability testing environment, when one machine + crashes, it is set to "pending," and during the replica recovery process, there + is MDS switching. Eventually, it was found that there were 5 "pending" chunk + servers that did not complete migration. + + * Analysis: + * 1. When MDS1 is providing services, it generates an operator and sends it to + * copyset-1 {A, B, C} + D for modification, where C is in an offline state. + * 2. Copyset-1 completes the configuration change. At this point, the + configuration on the leader is updated to epoch=2/{A, B, C, D}, + * the configuration on the candidate is epoch=1/{A, B, C}, and the + configuration recorded in MDS1 is epoch=1/{A, B, C}. + * 3. MDS1 crashes, and MDS2 takes over the service. MDS2 loads copysets from + the database, and the configuration for copyset-1 in MDS2 is epoch=1/{A, B, C}. + * 4. Candidate-D reports a heartbeat, and the configuration for copyset-1 is + epoch=1/{A, B, C}. + * MDS2 finds that the epoch reported by D matches the one recorded in MDS2, + but D is not in the replication group recorded by MDS2, + * and there is no corresponding operator in the scheduling module. As a + result, a command is issued to delete copyset-1 on D, leading to an accidental + deletion of D. * - * 解决方法: - * 正常情况下,heartbeat模块会在mds启动一定时间(目前配置20min)后才可以下发删除copyset - * 的命令,极大概率保证这段时间内copyset-leader上的配置更新到mds, - * 防止刚加入复制组 副本上的数据被误删 + * Solution: + * Under normal circumstances, the heartbeat module should wait for a certain + period (currently configured as 20 minutes) after MDS starts before issuing a + command to delete a copyset. + * This greatly ensures that during this time, the configuration on the + copyset-leader is updated in MDS, + * preventing the accidental deletion of data on replicas that have just joined + the replication group. * - * 这个时间的起始点应该是mds正式对外提供服务的时间,而不是mds的启动时间。如果设置为mds的启动 - * 时间,备mds启动很久后如果能够提供服务,就立马可以删除,导致bug + * The starting point for this time should be when MDS officially starts + providing external services, + * rather than the MDS startup time. If it is set based on the MDS startup time, + then if the standby MDS starts much later but can still provide services, it + could be deleted immediately, leading to the bug. */ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { // 1. copyset-1(epoch=2, peers={1,2,3}, leader=1) - // scheduler中有+4的operator - CopySetKey key{ 1, 1 }; + // In the scheduler, there is an operator that +4 + CopySetKey key{1, 1}; int startEpoch = 2; ChunkServerIdType leader = 1; ChunkServerIdType candidate = 4; - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); Operator op(2, key, OperatorPriority::NormalPriority, @@ -131,8 +147,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); - // 2. leader上报copyset-1(epoch=2, peers={1,2,3}, leader=1) - // mds下发配置变更 + // 2. leader reports copyset-1(epoch=2, peers={1,2,3}, leader=1) + // mds issued configuration changes ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(leader, &req); CopySetInfo csInfo(key.first, key.second); @@ -140,7 +156,7 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发+D的配置变更 + // Check the response and issue configuration changes for+D ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(key.first, conf.logicalpoolid()); @@ -150,25 +166,28 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(ConfigChangeType::ADD_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); - // 3. 清除mds中的operrator(模拟mds重启) + // 3 Clear the optimizer in mds (simulate mds restart) hbtest_->RemoveOperatorFromOpController(key); - // 4. canndidate上报落后的与mds的配置(candidate回放日志时会一一apply旧配置): + // 4. The candidate reports the outdated configuration compared to MDS (the + // candidate replays logs one by one to apply the old configuration): // copyset-1(epoch=1, peers={1,2,3}, leader=1) - // 由于mds.heartbeat.clean_follower_afterMs时间还没有到,mds还不能下发 - // 删除命令。mds下发为空,candidate上的数据不会被误删 + // Because mds.heartbeat.clean_follower_afterMs time has not yet elapsed, + // MDS cannot issue deletion commands. MDS issues no commands, so the + // data on the candidate will not be accidentally deleted. rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 5. 睡眠mds.heartbeat.clean_follower_afterMs + 10ms后 - // canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds下发删除配置,candidate上的数据会被误删 + // 5. Sleep mds.heartbeat.clean_follower_afterMs + 10ms, + // candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds issues a deletion configuration, and the data on the candidate + // will be mistakenly deleted usleep((sleepTimeMs_ + 10) * 1000); rep.Clear(); req.Clear(); @@ -183,7 +202,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(peers.size(), conf.peers_size()); ASSERT_EQ(startEpoch, conf.epoch()); - // 6. leader上报最新配置copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // 6. leader reports the latest configuration copyset-1(epoch=3, + // peers={1,2,3,4}, leader=1) auto newPeers = peers; newPeers.emplace(candidate); auto newEpoch = startEpoch + 1; @@ -193,24 +213,25 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { BuildCopySetInfo(&csInfo, startEpoch + 1, leader, newPeers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查mdstopology的数据 + // Check the data of mdstopology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ASSERT_EQ(newEpoch, copysetInfo.GetEpoch()); ASSERT_EQ(leader, copysetInfo.GetLeader()); ASSERT_EQ(newPeers, copysetInfo.GetCopySetMembers()); - // 7. canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds不下发配置 + // 7. candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds does not distribute configuration rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发copyset当前配置指导candidate删除数据 + // Check the response and issue the copyset current configuration guide + // candidate to delete data ASSERT_EQ(0, rep.needupdatecopysets_size()); } diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index 5660617558..ca34604820 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftConfigChangeTestLogDir[] = "./runlog/RaftConfigChange"; const char* kFakeMdsAddr = "127.0.0.1:9080"; @@ -46,96 +46,66 @@ const char* kFakeMdsAddr = "127.0.0.1:9080"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftConfigParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9081", - "-chunkServerStoreUri=local://./9081/", - "-chunkServerMetaUri=local://./9081/chunkserver.dat", - "-copySetUri=local://./9081/copysets", - "-raftSnapshotUri=curve://./9081/copysets", - "-raftLogUri=curve://./9081/copysets", - "-recycleUri=local://./9081/recycler", - "-chunkFilePoolDir=./9081/chunkfilepool/", - "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", - "-walFilePoolDir=./9081/walfilepool/", - "-walFilePoolMetaPath=./9081/walfilepool.meta", - "-conf=./9081/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9082", - "-chunkServerStoreUri=local://./9082/", - "-chunkServerMetaUri=local://./9082/chunkserver.dat", - "-copySetUri=local://./9082/copysets", - "-raftSnapshotUri=curve://./9082/copysets", - "-raftLogUri=curve://./9082/copysets", - "-recycleUri=local://./9082/recycler", - "-chunkFilePoolDir=./9082/chunkfilepool/", - "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", - "-walFilePoolDir=./9082/walfilepool/", - "-walFilePoolMetaPath=./9082/walfilepool.meta", - "-conf=./9082/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9083", - "-chunkServerStoreUri=local://./9083/", - "-chunkServerMetaUri=local://./9083/chunkserver.dat", - "-copySetUri=local://./9083/copysets", - "-raftSnapshotUri=curve://./9083/copysets", - "-raftLogUri=curve://./9083/copysets", - "-recycleUri=local://./9083/recycler", - "-chunkFilePoolDir=./9083/chunkfilepool/", - "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", - "-walFilePoolDir=./9083/walfilepool/", - "-walFilePoolMetaPath=./9083/walfilepool.meta", - "-conf=./9083/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9084", - "-chunkServerStoreUri=local://./9084/", - "-chunkServerMetaUri=local://./9084/chunkserver.dat", - "-copySetUri=local://./9084/copysets", - "-raftSnapshotUri=curve://./9084/copysets", - "-raftLogUri=curve://./9084/copysets", - "-recycleUri=local://./9084/recycler", - "-chunkFilePoolDir=./9084/chunkfilepool/", - "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", - "-walFilePoolDir=./9084/walfilepool/", - "-walFilePoolMetaPath=./9084/walfilepool.meta", - "-conf=./9084/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9085", - "-chunkServerStoreUri=local://./9085/", - "-chunkServerMetaUri=local://./9085/chunkserver.dat", - "-copySetUri=local://./9085/copysets", - "-raftSnapshotUri=curve://./9085/copysets", - "-raftLogUri=curve://./9085/copysets", - "-recycleUri=local://./9085/recycler", - "-chunkFilePoolDir=./9085/chunkfilepool/", - "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", - "-walFilePoolDir=./9085/walfilepool/", - "-walFilePoolMetaPath=./9085/walfilepool.meta", - "-conf=./9085/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9081", + "-chunkServerStoreUri=local://./9081/", + "-chunkServerMetaUri=local://./9081/chunkserver.dat", + "-copySetUri=local://./9081/copysets", + "-raftSnapshotUri=curve://./9081/copysets", + "-raftLogUri=curve://./9081/copysets", + "-recycleUri=local://./9081/recycler", + "-chunkFilePoolDir=./9081/chunkfilepool/", + "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", + "-walFilePoolDir=./9081/walfilepool/", + "-walFilePoolMetaPath=./9081/walfilepool.meta", + "-conf=./9081/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9082", + "-chunkServerStoreUri=local://./9082/", + "-chunkServerMetaUri=local://./9082/chunkserver.dat", + "-copySetUri=local://./9082/copysets", + "-raftSnapshotUri=curve://./9082/copysets", + "-raftLogUri=curve://./9082/copysets", + "-recycleUri=local://./9082/recycler", + "-chunkFilePoolDir=./9082/chunkfilepool/", + "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", + "-walFilePoolDir=./9082/walfilepool/", + "-walFilePoolMetaPath=./9082/walfilepool.meta", + "-conf=./9082/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9083", + "-chunkServerStoreUri=local://./9083/", + "-chunkServerMetaUri=local://./9083/chunkserver.dat", + "-copySetUri=local://./9083/copysets", + "-raftSnapshotUri=curve://./9083/copysets", + "-raftLogUri=curve://./9083/copysets", + "-recycleUri=local://./9083/recycler", + "-chunkFilePoolDir=./9083/chunkfilepool/", + "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", + "-walFilePoolDir=./9083/walfilepool/", + "-walFilePoolMetaPath=./9083/walfilepool.meta", + "-conf=./9083/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9084", + "-chunkServerStoreUri=local://./9084/", + "-chunkServerMetaUri=local://./9084/chunkserver.dat", + "-copySetUri=local://./9084/copysets", + "-raftSnapshotUri=curve://./9084/copysets", + "-raftLogUri=curve://./9084/copysets", + "-recycleUri=local://./9084/recycler", + "-chunkFilePoolDir=./9084/chunkfilepool/", + "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", + "-walFilePoolDir=./9084/walfilepool/", + "-walFilePoolMetaPath=./9084/walfilepool.meta", + "-conf=./9084/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9085", + "-chunkServerStoreUri=local://./9085/", + "-chunkServerMetaUri=local://./9085/chunkserver.dat", + "-copySetUri=local://./9085/copysets", + "-raftSnapshotUri=curve://./9085/copysets", + "-raftLogUri=curve://./9085/copysets", + "-recycleUri=local://./9085/recycler", + "-chunkFilePoolDir=./9085/chunkfilepool/", + "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", + "-walFilePoolDir=./9085/walfilepool/", + "-walFilePoolMetaPath=./9085/walfilepool.meta", + "-conf=./9085/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftConfigChangeTest : public testing::Test { @@ -179,39 +149,34 @@ class RaftConfigChangeTest : public testing::Test { ASSERT_TRUE(cg4.Init("9084")); ASSERT_TRUE(cg5.Init("9085")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -268,22 +233,20 @@ class RaftConfigChangeTest : public testing::Test { int confChangeTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; + std::vector params; int maxWaitInstallSnapshotMs; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 1. 3个节点正常启动 - * 2. 移除一个follower - * 3. 重复移除上一个follower - * 4. 再添加回来 - * 5. 重复添加回来 + * 1. 3 nodes start normally + * 2. Remove a follower + * 3. Repeatedly remove the previous follower + * 4. Add it back again + * 5. Repeatedly add back */ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { LogicPoolID logicPoolId = 2; @@ -293,7 +256,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 member LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -301,12 +264,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -317,15 +276,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除1个follower + // 2. Remove 1 follower LOG(INFO) << "remove 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -335,61 +289,40 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st1.ok()); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 重复移除,验证重复移除的逻辑是否正常 - butil::Status - st2 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + // 3. Duplicate removal, verify if the logic of duplicate removal is normal + butil::Status st2 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st2.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. add回来 + // 4. Add it back conf.remove_peer(removePeer.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st3.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 重复add回来,验证重复添加的逻辑是否正常 + // 5. Repeat the add and verify if the logic added repeatedly is normal conf.add_peer(removePeer.address()); - butil::Status - st4 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st4 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st4.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } @@ -402,7 +335,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -410,12 +343,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -426,75 +355,50 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()) << st2.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } @@ -507,7 +411,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -515,12 +419,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -531,81 +431,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower LOG(INFO) << "recover hang follower"; ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 3个节点正常启动 - * 2. 移除leader - * 3. 再将old leader添加回来 + * 1. 3 nodes start normally + * 2. Remove leader + * 3. Add the old leader back again */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -615,7 +490,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -623,12 +498,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -638,22 +509,17 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除leader + // 2. Remove leader LOG(INFO) << "remove leader"; braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); ASSERT_TRUE(st1.ok()); Peer oldLeader = leaderPeer; @@ -661,50 +527,35 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. add回来 + // 3. Add it back conf.remove_peer(oldLeader.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, oldLeader, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, oldLeader, options); ASSERT_TRUE(st3.ok()); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } /** - * 1. 3个节点正常启动 - * 2. 挂一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull it up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -714,7 +565,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -722,12 +573,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -738,79 +585,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以只用验证2个副本数据一致性 + // The leader has been removed, so only the consistency of the data for two + // replicas is verified ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -822,10 +647,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { } /** - * 1. 3个节点正常启动 - * 2. hang一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -835,7 +660,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -843,12 +668,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -859,78 +680,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以验证2个副本数据一致性 + // The leader has been removed, so verify the data consistency of the two + // replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -942,9 +741,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. 挂掉B,transfer leader给B - * 3. 拉起B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang up B, transfer leader to B + * 3. Pull up B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -954,7 +753,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -962,12 +761,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -978,28 +773,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to shutdown peer braft::cli::CliOptions options; @@ -1009,19 +794,14 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status st1 = TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch -1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(shutdownPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 拉起follower,然后再把leader transfer过去 + // 4. Pull up the follower and then transfer the leader over LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -1032,11 +812,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -1050,32 +827,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_STREQ(shutdownPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. hang B,transfer leader给B - * 3. 恢复 B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang B, transfer leader to B + * 3. Restore B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -1085,7 +852,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1093,12 +860,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1109,28 +872,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to hang peer braft::cli::CliOptions options; @@ -1145,7 +898,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(hangPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 恢复follower,然后再把leader transfer过去 + // 4. Restore the follower and then transfer the leader LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); @@ -1155,54 +908,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << hangPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - hangPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, hangPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == hangPeer.address()) { break; } } - LOG(INFO) << i + 1 << " th transfer leader to " - << hangPeer.address() << " failed"; + LOG(INFO) << i + 1 << " th transfer leader to " << hangPeer.address() + << " failed"; ::sleep(1); } ASSERT_STREQ(hangPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** * - * 1. {A、B、C} 3个节点正常启 - * 2. 挂掉一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1210,7 +952,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1218,12 +960,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1234,54 +972,34 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 通过打两次快照确保后面的恢复必须走安装快照 + // Wait snapshot, ensuring that subsequent restores must follow the + // installation snapshot by taking two snapshots LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); Configuration conf = cluster.CopysetConf(); @@ -1291,25 +1009,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()) << st.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1324,12 +1032,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta /** * - * 1. {A、B、C} 3个节点正常启 - * 2. hang一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1337,7 +1046,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1345,12 +1054,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1361,54 +1066,33 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); @@ -1419,25 +1103,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1451,11 +1125,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log和数据 - * 3. 重启follower,follower能够通过数据恢复最终追上leader + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs and data + * 3. Restart the follower, and the follower can eventually catch up with the + * leader through data recovery */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1463,7 +1139,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1471,12 +1147,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1487,92 +1159,63 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 删除此peer的数据,然后重启 + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(PeerCluster::RemoveCopysetDirCmd(shutdownPeer).c_str())); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::CopysetDirWithoutProtocol( - shutdownPeer))); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE( + fs->DirExists(PeerCluster::CopysetDirWithoutProtocol(shutdownPeer))); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log - * 3. 重启follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs + * 3. Restart follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1580,7 +1223,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1588,12 +1231,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1604,96 +1243,69 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 删除此peer的log,然后重启 - ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId).c_str()); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId))); - - // wait snapshot, 保证能够触发安装快照 + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // Delete the log of this peer and restart it + ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, logicPoolId, + copysetId) + .c_str()); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd( + shutdownPeer, logicPoolId, copysetId))); + + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中挂掉leader - * 本次install snapshot失败,但是new leader会被选出来,new leader继续给 - * follower恢复数据,最终follower数据追上leader并一致 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process + * The install snapshot failed this time, but the new leader will be selected + * and will continue to provide The follower recovers data, and ultimately the + * follower data catches up with the leader and is consistent */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1701,7 +1313,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1709,12 +1321,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1725,91 +1333,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1822,11 +1396,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader重启 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and restart the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1834,7 +1410,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1842,12 +1418,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1858,93 +1430,59 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1957,11 +1495,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中hang leader + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1969,7 +1509,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1977,12 +1517,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1993,91 +1529,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader hang + // 4. After a period of random sleep, hang up the leader and simulate the + // leader hang during installation snapshot int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2090,11 +1592,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader hang一会 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and during the recovery process, the leader will + * hang for a while */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2102,7 +1608,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2110,12 +1616,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2126,58 +1628,39 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); @@ -2191,22 +1674,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2219,12 +1692,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower挂了 - * 4. 一段时间后拉起来 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), but the follower hung during the recovery process + * 4. After a period of time, pull it up */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2232,7 +1708,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2240,12 +1716,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2256,63 +1728,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, @@ -2321,33 +1773,27 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower重启了 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot). During the recovery process, the follower + * restarted */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2355,7 +1801,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2363,12 +1809,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2379,97 +1821,70 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower hang了 - * 4. 一段时间后恢复 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and the follower has changed during the recovery + * process + * 4. Restore after a period of time */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2477,7 +1892,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2485,12 +1900,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2501,63 +1912,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,hang follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a period of random sleep, hang the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - // 5. 把follower恢复 + // 5. Restore the follower int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); @@ -2565,32 +1956,23 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2600,7 +1982,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2608,12 +1990,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2624,52 +2002,32 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -2684,11 +2042,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -2703,31 +2058,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderPeer.address().c_str(), shutdownPeer.address().c_str())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 创建5个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉两个follower - * 3. 让两个follower从installsnapshot恢复 + * 1. Create a replication group of 5 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up two followers + * 3. Restore two followers from installsnapshot */ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2737,7 +2083,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 member LOG(INFO) << "start 5 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2747,12 +2093,8 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2765,15 +2107,10 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个follower + // 2. Hang up 2 followers LOG(INFO) << "shutdown 2 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2782,37 +2119,22 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { Peer shutdownPeer2 = followerPeers[1]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer1)); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer2)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown 2 follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer1, PeerCluster::PeerToId(shutdownPeer1))); @@ -2820,33 +2142,24 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { PeerCluster::PeerToId(shutdownPeer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组{A、B、C},并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2856,19 +2169,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2878,30 +2187,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -2912,33 +2213,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -2954,11 +2244,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2968,19 +2259,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2990,30 +2277,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3024,33 +2303,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3066,11 +2334,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3080,19 +2349,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3102,29 +2367,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader LOG(INFO) << "shutdown 1 leader"; Peer shutdownPeer = leaderPeer; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3135,33 +2392,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3177,11 +2423,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3191,19 +2438,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3213,29 +2456,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 leader"; Peer hangPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3246,33 +2481,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index f6a39c3436..15b731e329 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -21,121 +21,91 @@ */ #include -#include #include +#include -#include #include +#include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftLogRepTestLogDir[] = "./runlog/RaftLogRep"; const char* kFakeMdsAddr = "127.0.0.1:9070"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftLogParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9071", - "-chunkServerStoreUri=local://./9071/", - "-chunkServerMetaUri=local://./9071/chunkserver.dat", - "-copySetUri=local://./9071/copysets", - "-raftSnapshotUri=curve://./9071/copysets", - "-raftLogUri=curve://./9071/copysets", - "-recycleUri=local://./9071/recycler", - "-chunkFilePoolDir=./9071/chunkfilepool/", - "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", - "-walFilePoolDir=./9071/walfilepool/", - "-walFilePoolMetaPath=./9071/walfilepool.meta", - "-conf=./9071/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9072", - "-chunkServerStoreUri=local://./9072/", - "-chunkServerMetaUri=local://./9072/chunkserver.dat", - "-copySetUri=local://./9072/copysets", - "-raftSnapshotUri=curve://./9072/copysets", - "-raftLogUri=curve://./9072/copysets", - "-recycleUri=local://./9072/recycler", - "-chunkFilePoolDir=./9072/chunkfilepool/", - "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", - "-walFilePoolDir=./9072/walfilepool/", - "-walFilePoolMetaPath=./9072/walfilepool.meta", - "-conf=./9072/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9073", - "-chunkServerStoreUri=local://./9073/", - "-chunkServerMetaUri=local://./9073/chunkserver.dat", - "-copySetUri=local://./9073/copysets", - "-raftSnapshotUri=curve://./9073/copysets", - "-raftLogUri=curve://./9073/copysets", - "-recycleUri=local://./9073/recycler", - "-chunkFilePoolDir=./9073/chunkfilepool/", - "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", - "-walFilePoolDir=./9073/walfilepool/", - "-walFilePoolMetaPath=./9073/walfilepool.meta", - "-conf=./9073/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9074", - "-chunkServerStoreUri=local://./9074/", - "-chunkServerMetaUri=local://./9074/chunkserver.dat", - "-copySetUri=local://./9074/copysets", - "-raftSnapshotUri=curve://./9074/copysets", - "-raftLogUri=curve://./9074/copysets", - "-recycleUri=local://./9074/recycler", - "-chunkFilePoolDir=./9074/chunkfilepool/", - "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", - "-walFilePoolDir=./9074/walfilepool/", - "-walFilePoolMetaPath=./9074/walfilepool.meta", - "-conf=./9074/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9075", - "-chunkServerStoreUri=local://./9075/", - "-chunkServerMetaUri=local://./9075/chunkserver.dat", - "-copySetUri=local://./9075/copysets", - "-raftSnapshotUri=curve://./9075/copysets", - "-raftLogUri=curve://./9075/copysets", - "-recycleUri=local://./9075/recycler", - "-chunkFilePoolDir=./9075/chunkfilepool/", - "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", - "-walFilePoolDir=./9075/walfilepool/", - "-walFilePoolMetaPath=./9075/walfilepool.meta", - "-conf=./9075/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9071", + "-chunkServerStoreUri=local://./9071/", + "-chunkServerMetaUri=local://./9071/chunkserver.dat", + "-copySetUri=local://./9071/copysets", + "-raftSnapshotUri=curve://./9071/copysets", + "-raftLogUri=curve://./9071/copysets", + "-recycleUri=local://./9071/recycler", + "-chunkFilePoolDir=./9071/chunkfilepool/", + "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", + "-walFilePoolDir=./9071/walfilepool/", + "-walFilePoolMetaPath=./9071/walfilepool.meta", + "-conf=./9071/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9072", + "-chunkServerStoreUri=local://./9072/", + "-chunkServerMetaUri=local://./9072/chunkserver.dat", + "-copySetUri=local://./9072/copysets", + "-raftSnapshotUri=curve://./9072/copysets", + "-raftLogUri=curve://./9072/copysets", + "-recycleUri=local://./9072/recycler", + "-chunkFilePoolDir=./9072/chunkfilepool/", + "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", + "-walFilePoolDir=./9072/walfilepool/", + "-walFilePoolMetaPath=./9072/walfilepool.meta", + "-conf=./9072/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9073", + "-chunkServerStoreUri=local://./9073/", + "-chunkServerMetaUri=local://./9073/chunkserver.dat", + "-copySetUri=local://./9073/copysets", + "-raftSnapshotUri=curve://./9073/copysets", + "-raftLogUri=curve://./9073/copysets", + "-recycleUri=local://./9073/recycler", + "-chunkFilePoolDir=./9073/chunkfilepool/", + "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", + "-walFilePoolDir=./9073/walfilepool/", + "-walFilePoolMetaPath=./9073/walfilepool.meta", + "-conf=./9073/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9074", + "-chunkServerStoreUri=local://./9074/", + "-chunkServerMetaUri=local://./9074/chunkserver.dat", + "-copySetUri=local://./9074/copysets", + "-raftSnapshotUri=curve://./9074/copysets", + "-raftLogUri=curve://./9074/copysets", + "-recycleUri=local://./9074/recycler", + "-chunkFilePoolDir=./9074/chunkfilepool/", + "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", + "-walFilePoolDir=./9074/walfilepool/", + "-walFilePoolMetaPath=./9074/walfilepool.meta", + "-conf=./9074/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9075", + "-chunkServerStoreUri=local://./9075/", + "-chunkServerMetaUri=local://./9075/chunkserver.dat", + "-copySetUri=local://./9075/copysets", + "-raftSnapshotUri=curve://./9075/copysets", + "-raftLogUri=curve://./9075/copysets", + "-recycleUri=local://./9075/recycler", + "-chunkFilePoolDir=./9075/chunkfilepool/", + "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", + "-walFilePoolDir=./9075/walfilepool/", + "-walFilePoolMetaPath=./9075/walfilepool.meta", + "-conf=./9075/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftLogReplicationTest : public testing::Test { @@ -177,39 +147,34 @@ class RaftLogReplicationTest : public testing::Test { ASSERT_TRUE(cg4.Init("9074")); ASSERT_TRUE(cg5.Init("9075")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -265,19 +230,20 @@ class RaftLogReplicationTest : public testing::Test { int electionTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; butil::AtExitManager atExitManager; /** - * 验证3个节点的复制组,测试隐式提交 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 等带step down - * 3. 拉起1个follower + * Validate replication groups for 3 nodes and test implicit commit + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Wait for step down + * 3. Pull up 1 follower */ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { LogicPoolID logicPoolId = 2; @@ -287,19 +253,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -310,62 +272,38 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); - // 3. 等待step down,等待2个选举超时,保证一定step down + // 3. Wait for step down, wait for 2 elections to timeout, ensure a certain + // step down ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 4. 拉起1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // new leader就是old leader + // new leader is an old leader ASSERT_STREQ(leaderPeer.address().c_str(), newLeader.address().c_str()); - // read step down之前append进去的log entry,测试隐式提交 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Read the log entries appended before the "read step down" to test + // implicit commits. + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -378,11 +316,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { } /** - * 验证3个节点的复制组,测试日志截断 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 挂掉leader - * 3. 拉起2个follower + * Verify the replication groups of three nodes and test log truncation + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Hang up the leader + * 3. Pull up 2 followers */ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { LogicPoolID logicPoolId = 2; @@ -392,19 +331,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -415,33 +350,23 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - 2); - - // 3. 挂掉leader + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 2); + + // 3. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - // 4. 拉起2个follower + // 4. Pull up 2 followers ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], @@ -449,22 +374,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 日志截断 - ReadNotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Log truncation + ReadNotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 2); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -477,12 +392,14 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { } /** - * 验证3个节点的复制组,测试向落后多个term的follower复制日志 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉一个follower - * 3. 挂掉leader,等待2个ET重启 - * 4. 挂掉leader,等待2个ET重启 - * 3. 拉起挂掉的follower + * Verify the replication group of three nodes, and test copying logs to + * followers who fall behind multiple terms + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up a follower + * 3. Hang up the leader and wait for 2 ETs to restart + * 4. Hang up the leader and wait for 2 ETs to restart + * 3. Pull up the hanging follower */ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { LogicPoolID logicPoolId = 2; @@ -492,19 +409,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -515,89 +428,64 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个Follower + // 2. Hang up 1 Follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 挂掉leader,等待2个ET重启 + // 3. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 挂掉leader,等待2个ET重启 + // 4. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 拉起挂掉的follower + // 5. Pull up the hanging follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 多等一会,保证安装快照成功 + // Wait a little longer to ensure successful installation of the snapshot ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. 挂掉leader - * 3. leader拉起来 - * 4. 挂1一个follower - * 5. follower拉起来 - * 6. 挂2个follower - * 7. 拉起1个follower - * 8. 挂掉leader - * 9. 拉起上一步挂的leader - * 10. 挂掉leader和两个follower - * 11. 逐个拉起来 - * 12. 挂掉3个follower - * 13. 逐个拉起来 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang up the leader + * 3. Pull up the leader + * 4. Hang 1 follower + * 5. Follower, pull it up + * 6. Hang 2 followers + * 7. Pull up 1 follower + * 8. Hang up the leader + * 9. Pull up the leader from the previous step + * 10. Hang up the leader and two followers + * 11. Pull up one by one + * 12. Hang up three followers + * 13. Pull up one by one */ TEST_F(RaftLogReplicationTest, FourNodeKill) { LogicPoolID logicPoolId = 2; @@ -607,7 +495,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -615,12 +503,8 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -632,124 +516,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -758,117 +599,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j 1); ::usleep(1000 * electionTimeoutMs * 2); - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang1一个follower - * 5. 恢复follower - * 6. hang2个follower - * 7. 恢复1个follower - * 8. hangleader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang1, a follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hangleader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FourNodeHang) { LogicPoolID logicPoolId = 2; @@ -878,7 +683,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -886,12 +691,8 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -903,119 +704,76 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); ASSERT_STRNE(oldLeader.address().c_str(), newLeader.address().c_str()); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); -// 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1024,107 +782,70 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个恢复 + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k 1); - - // 13. 逐个恢复 + // 13. Restore one by one ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. 挂 leader - * 3. 恢复leader - * 4. 挂1一个follower - * 5. 恢复follower - * 6. 挂2个follower - * 7. 恢复1个follower - * 8. 挂leader - * 9. 恢复一步挂的leader - * 10. 挂leader和两个follower - * 11. 逐个恢复 - * 12. 挂3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang the leader + * 3. Restore leader + * 4. Hang 1 follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang the leader + * 9. Restore one-step suspended leaders + * 10. Hang the leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeKill) { LogicPoolID logicPoolId = 2; @@ -1134,7 +855,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1143,12 +864,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1161,122 +878,79 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Follower, pull it up ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(newLeader, - PeerCluster::PeerToId(newLeader))); + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(newLeader, PeerCluster::PeerToId(newLeader))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1285,113 +959,78 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } - /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang 1一个follower - * 5. 恢复follower - * 6. hang 2个follower - * 7. 恢复1个follower - * 8. hang leader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang 1, one follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang leader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeHang) { LogicPoolID logicPoolId = 2; @@ -1401,7 +1040,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1410,12 +1049,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1428,115 +1063,72 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(newLeader)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1545,83 +1137,49 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个恢复 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个恢复 + // 13. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index a8e57aaa3f..d6cd2981dc 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -21,102 +21,78 @@ */ #include -#include #include +#include #include -#include "test/integration/common/peer_cluster.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftSnapshotTestLogDir[] = "./runlog/RaftSnapshot"; const char* kFakeMdsAddr = "127.0.0.1:9320"; static constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *raftVoteParam[4][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9321", - "-chunkServerStoreUri=local://./9321/", - "-chunkServerMetaUri=local://./9321/chunkserver.dat", - "-copySetUri=local://./9321/copysets", - "-raftSnapshotUri=curve://./9321/copysets", - "-recycleUri=local://./9321/recycler", - "-chunkFilePoolDir=./9321/chunkfilepool/", - "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", - "-conf=./9321/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9321/copysets", - "-walFilePoolDir=./9321/walfilepool/", - "-walFilePoolMetaPath=./9321/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9322", - "-chunkServerStoreUri=local://./9322/", - "-chunkServerMetaUri=local://./9322/chunkserver.dat", - "-copySetUri=local://./9322/copysets", - "-raftSnapshotUri=curve://./9322/copysets", - "-recycleUri=local://./9322/recycler", - "-chunkFilePoolDir=./9322/chunkfilepool/", - "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", - "-conf=./9322/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9322/copysets", - "-walFilePoolDir=./9322/walfilepool/", - "-walFilePoolMetaPath=./9322/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9323", - "-chunkServerStoreUri=local://./9323/", - "-chunkServerMetaUri=local://./9323/chunkserver.dat", - "-copySetUri=local://./9323/copysets", - "-raftSnapshotUri=curve://./9323/copysets", - "-recycleUri=local://./9323/recycler", - "-chunkFilePoolDir=./9323/chunkfilepool/", - "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", - "-conf=./9323/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9323/copysets", - "-walFilePoolDir=./9323/walfilepool/", - "-walFilePoolMetaPath=./9323/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9324", - "-chunkServerStoreUri=local://./9324/", - "-chunkServerMetaUri=local://./9324/chunkserver.dat", - "-copySetUri=local://./9324/copysets", - "-raftSnapshotUri=curve://./9324/copysets", - "-recycleUri=local://./9324/recycler", - "-chunkFilePoolDir=./9324/chunkfilepool/", - "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", - "-conf=./9324/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9324/copysets", - "-walFilePoolDir=./9324/walfilepool/", - "-walFilePoolMetaPath=./9324/walfilepool.meta", - NULL - }, +static const char* raftVoteParam[4][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9321", + "-chunkServerStoreUri=local://./9321/", + "-chunkServerMetaUri=local://./9321/chunkserver.dat", + "-copySetUri=local://./9321/copysets", + "-raftSnapshotUri=curve://./9321/copysets", + "-recycleUri=local://./9321/recycler", + "-chunkFilePoolDir=./9321/chunkfilepool/", + "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", + "-conf=./9321/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9321/copysets", + "-walFilePoolDir=./9321/walfilepool/", + "-walFilePoolMetaPath=./9321/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9322", + "-chunkServerStoreUri=local://./9322/", + "-chunkServerMetaUri=local://./9322/chunkserver.dat", + "-copySetUri=local://./9322/copysets", + "-raftSnapshotUri=curve://./9322/copysets", + "-recycleUri=local://./9322/recycler", + "-chunkFilePoolDir=./9322/chunkfilepool/", + "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", + "-conf=./9322/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9322/copysets", + "-walFilePoolDir=./9322/walfilepool/", + "-walFilePoolMetaPath=./9322/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9323", + "-chunkServerStoreUri=local://./9323/", + "-chunkServerMetaUri=local://./9323/chunkserver.dat", + "-copySetUri=local://./9323/copysets", + "-raftSnapshotUri=curve://./9323/copysets", + "-recycleUri=local://./9323/recycler", + "-chunkFilePoolDir=./9323/chunkfilepool/", + "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", + "-conf=./9323/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9323/copysets", + "-walFilePoolDir=./9323/walfilepool/", + "-walFilePoolMetaPath=./9323/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9324", + "-chunkServerStoreUri=local://./9324/", + "-chunkServerMetaUri=local://./9324/chunkserver.dat", + "-copySetUri=local://./9324/copysets", + "-raftSnapshotUri=curve://./9324/copysets", + "-recycleUri=local://./9324/recycler", + "-chunkFilePoolDir=./9324/chunkfilepool/", + "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", + "-conf=./9324/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9324/copysets", + "-walFilePoolDir=./9324/walfilepool/", + "-walFilePoolMetaPath=./9324/walfilepool.meta", NULL}, }; class RaftSnapshotTest : public testing::Test { @@ -152,32 +128,28 @@ class RaftSnapshotTest : public testing::Test { ASSERT_TRUE(cg3_.Init("9323")); ASSERT_TRUE(cg4_.Init("9324")); cg1_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg1_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg1_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg1_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg1_.SetKV("mds.listen.addr", kFakeMdsAddr); cg2_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg2_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg2_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg2_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg2_.SetKV("mds.listen.addr", kFakeMdsAddr); cg3_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg3_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg3_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg3_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg3_.SetKV("mds.listen.addr", kFakeMdsAddr); cg4_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg4_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg4_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg4_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg4_.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1_.Generate()); ASSERT_TRUE(cg2_.Generate()); @@ -194,7 +166,7 @@ class RaftSnapshotTest : public testing::Test { params_.push_back(const_cast(raftVoteParam[2])); params_.push_back(const_cast(raftVoteParam[3])); - // 配置默认raft client option + // Configure default raft client option defaultCliOpt_.max_retry = 3; defaultCliOpt_.timeout_ms = 10000; } @@ -232,20 +204,20 @@ class RaftSnapshotTest : public testing::Test { braft::cli::CliOptions defaultCliOpt_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; }; - /** - * 验证连续通过快照恢复copyset - * 1.创建3个副本的复制组 - * 2.挂掉一个follower - * 3.写入数据,并等待raft snapshot 产生 - * 4.启动挂掉的follower,使其通过snapshot恢复 - * 5.transfer leader到刚启动的follower,读数据验证 - * 6.remove old leader,主要为了删除其copyset目录 - * 7.添加新的peer,使其通过快照加载数据 - * 8.transfer leader到新加入的peer,读数据验证 + *Verify continuous recovery of copyset through snapshots + *1. Create a replication group of 3 replicas + *2. Hang up a follower + *3. Write data and wait for the raft snapshot to be generated + *4. Start the failed follower and restore it through snapshot + *5. Transfer the leader to the newly started follower and read the data for + *verification + *6. Remove old leader, mainly to delete its copyset directory + *7. Add a new peer to load data through a snapshot + *8. Transfer leader to newly added peer, read data validation */ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { LogicPoolID logicPoolId = 2; @@ -261,12 +233,8 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -276,7 +244,7 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); Peer oldLeader = leaderPeer; - // 挂掉一个follower + // Hang up a follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -288,21 +256,15 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot,保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -310,43 +272,44 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); Configuration conf = cluster.CopysetConf(); - // 删除旧leader及其目录 + // Delete old leader and its directory butil::Status status = RemovePeer(logicPoolId, copysetId, conf, oldLeader, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); + rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); ::system(rmdir.c_str()); - // 添加新的peer - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + // Add a new peer + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()) << status; - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据, - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + *verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval, write read data, + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + *two-step It is to ensure that at least two snapshots are taken, so that when + *the node restarts again, it must pass the install snapshot + * 6. Wait for the leader to be generated, and then verify the data written + *before the read + * 7. Transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -362,12 +325,8 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -377,22 +336,17 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -403,47 +357,31 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); - // 再次发起 read/write + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop, - initsn + 1); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 1, loop, initsn + 1); LOG(INFO) << "write 2 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 3 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop, - initsn + 1); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 2, loop, initsn + 1); LOG(INFO) << "write 3 end"; @@ -451,24 +389,29 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch + 2, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch + 2, + loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,并更新写版本,产生chunk快照 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval, - * 5. 删除chunk快照,再次用新版本write 数据,产生新的chunk快照 - * 6. 然后再 sleep 超过一个 snapshot interval;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 7. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 8. transfer leader 到shut down 的peer 上 - * 9. 在 read 之前写入的数据验证 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and update the write + *version to generate a chunk snapshot + * 3. Shutdown non leader + * 4. Then the sleep exceeds one snapshot interval, + * 5. Delete the chunk snapshot and write the data again with a new version to + *generate a new chunk snapshot + * 6. Then sleep more than one snapshot interval; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the + *node restarts again, it must pass the install snapshot + * 7. Wait for the leader to be generated, and then verify the data written + *before the read + * 8. Transfer leader to shut down peer + * 9. Verification of data written before read */ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LogicPoolID logicPoolId = 2; @@ -484,12 +427,8 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -499,43 +438,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -546,41 +473,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); - // 删除旧的快照 - DeleteSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, + // Delete old snapshots + DeleteSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, initsn + 1); // csn = 2 - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 3 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // c loop, initsn + 2); // sn = 3 LOG(INFO) << "write 3 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // b + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // b loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -589,24 +506,29 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } /** - * 验证curve快照转储过程当中,chunkserver存在多个copyset情况下, - * 1. 创建3个副本的复制组 - * 2. 为每个复制组的chunkserver生成新的copyset,并作为后续操作对象 - * 3. 等待 leader 产生,write 数据 - * 4. sleep 超过一个 snapshot interval,确保产生raft快照 - * 5. 更新写版本,产生chunk快照 - * 6. 然后 sleep 超过一个 snapshot interval,确保产生raft快照 - * 7. shutdown 非 leader - * 8. AddPeer添加一个新节点使其通过加载快照恢复,然后remove掉shutdown的peer - * 9. 切换leader到新添加的peer - * 10. 等待 leader 产生,然后 read 之前产生的数据和chunk快照进行验证 + * During the process of verifying the curve snapshot dump, if there are + * multiple copysets in the chunkserver, + * 1. Create a replication group of 3 replicas + * 2. Generate a new copyset for each replication group's chunkserver and use it + * as a subsequent operation object + * 3. Wait for the leader to generate and write data + * 4. If the sleep exceeds one snapshot interval, ensure that a raft snapshot is + * generated + * 5. Update the write version to generate a chunk snapshot + * 6. Then the sleep exceeds one snapshot interval to ensure that a raft + * snapshot is generated + * 7. Shutdown non leader + * 8. Add a new node to AddPeer and restore it by loading a snapshot, then + * remove the shutdown peer + * 9. Switch the leader to the newly added peer + * 10. Wait for the leader to be generated, then read the data and chunk + * snapshot generated before validation */ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LogicPoolID logicPoolId = 2; @@ -622,18 +544,14 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); ASSERT_EQ(0, cluster.StartPeer(peer3_, PeerCluster::PeerToId(peer3_))); - // 创建新的copyset + // Create a new copyset LOG(INFO) << "create new copyset."; ++copysetId; int ret = cluster.CreateCopyset(logicPoolId, copysetId, peer1_, peers); @@ -643,57 +561,46 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ret = cluster.CreateCopyset(logicPoolId, copysetId, peer3_, peers); ASSERT_EQ(0, ret); - // 使用新的copyset作为操作对象 + // Use the new copyset as the operand cluster.SetWorkingCopyset(copysetId); Peer leaderPeer; ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // wait snapshot, 保证能够触发打快照 - // 通过至少两次快照,保证新加的peer通过下载快照安装 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + // Ensure that the newly added peer is installed by downloading the snapshot + // by taking at least two snapshots + ::sleep(1.5 * snapshotIntervalS_); - // shutdown 某个follower + // Shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -704,30 +611,28 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 添加新的peer,并移除shutdown的peer + // Add a new peer and remove the shutdown peer Configuration conf = cluster.CopysetConf(); - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); butil::Status status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()); - // 删除旧leader及其目录 + // Delete old leader and its directory status = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); + rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); ::system(rmdir.c_str()); - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } } // namespace chunkserver diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index 5f87a1495f..9b5d97b98f 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -21,84 +21,66 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftVoteTestLogDir[] = "./runlog/RaftVote"; const char* kFakeMdsAddr = "127.0.0.1:9089"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftVoteParam[3][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9091", - "-chunkServerStoreUri=local://./9091/", - "-chunkServerMetaUri=local://./9091/chunkserver.dat", - "-copySetUri=local://./9091/copysets", - "-raftSnapshotUri=curve://./9091/copysets", - "-recycleUri=local://./9091/recycler", - "-chunkFilePoolDir=./9091/chunkfilepool/", - "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", - "-conf=./9091/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9091/copysets", - "-walFilePoolDir=./9091/walfilepool/", - "-walFilePoolMetaPath=./9091/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9092", - "-chunkServerStoreUri=local://./9092/", - "-chunkServerMetaUri=local://./9092/chunkserver.dat", - "-copySetUri=local://./9092/copysets", - "-raftSnapshotUri=curve://./9092/copysets", - "-recycleUri=local://./9092/recycler", - "-chunkFilePoolDir=./9092/chunkfilepool/", - "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", - "-conf=./9092/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9092/copysets", - "-walFilePoolDir=./9092/walfilepool/", - "-walFilePoolMetaPath=./9092/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9093", - "-chunkServerStoreUri=local://./9093/", - "-chunkServerMetaUri=local://./9093/chunkserver.dat", - "-copySetUri=local://./9093/copysets", - "-raftSnapshotUri=curve://./9093/copysets", - "-recycleUri=local://./9093/recycler", - "-chunkFilePoolDir=./9093/chunkfilepool/", - "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", - "-conf=./9093/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9093/copysets", - "-walFilePoolDir=./9093/walfilepool/", - "-walFilePoolMetaPath=./9093/walfilepool.meta", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9091", + "-chunkServerStoreUri=local://./9091/", + "-chunkServerMetaUri=local://./9091/chunkserver.dat", + "-copySetUri=local://./9091/copysets", + "-raftSnapshotUri=curve://./9091/copysets", + "-recycleUri=local://./9091/recycler", + "-chunkFilePoolDir=./9091/chunkfilepool/", + "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", + "-conf=./9091/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9091/copysets", + "-walFilePoolDir=./9091/walfilepool/", + "-walFilePoolMetaPath=./9091/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9092", + "-chunkServerStoreUri=local://./9092/", + "-chunkServerMetaUri=local://./9092/chunkserver.dat", + "-copySetUri=local://./9092/copysets", + "-raftSnapshotUri=curve://./9092/copysets", + "-recycleUri=local://./9092/recycler", + "-chunkFilePoolDir=./9092/chunkfilepool/", + "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", + "-conf=./9092/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9092/copysets", + "-walFilePoolDir=./9092/walfilepool/", + "-walFilePoolMetaPath=./9092/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9093", + "-chunkServerStoreUri=local://./9093/", + "-chunkServerMetaUri=local://./9093/chunkserver.dat", + "-copySetUri=local://./9093/copysets", + "-raftSnapshotUri=curve://./9093/copysets", + "-recycleUri=local://./9093/recycler", + "-chunkFilePoolDir=./9093/chunkfilepool/", + "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", + "-conf=./9093/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9093/copysets", + "-walFilePoolDir=./9093/walfilepool/", + "-walFilePoolMetaPath=./9093/walfilepool.meta", NULL}, }; class RaftVoteTest : public testing::Test { @@ -130,25 +112,22 @@ class RaftVoteTest : public testing::Test { ASSERT_TRUE(cg2.Init("9092")); ASSERT_TRUE(cg3.Init("9093")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -189,22 +168,21 @@ class RaftVoteTest : public testing::Test { int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组 - * 1. 创建1个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader,验证可用性 - * 3. 拉起leader - * 4. hang住leader - * 5. 恢复leader + * Verify replication group for 1 node + * 1. Create a replication group of 1 member, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and verify availability + * 3. Pull up the leader + * 4. Hang in the leader + * 5. Restore leader */ TEST_F(RaftVoteTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -214,17 +192,13 @@ TEST_F(RaftVoteTest, OneNode) { char ch = 'a'; int loop = 25; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -234,85 +208,51 @@ TEST_F(RaftVoteTest, OneNode) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉这个节点 + // 2. Hang up this node ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 将节点拉起来 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the node ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. hang住此节点 + // 4. Hang on to this node ASSERT_EQ(0, cluster.HangPeer(peer1)); ::usleep(200 * 1000); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 5. 恢复节点 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 5. Restore nodes ASSERT_EQ(0, cluster.SignalPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证2个节点的复制组,并挂掉leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of two nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -322,18 +262,14 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -343,55 +279,36 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并挂掉follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of two nodes and hang the follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LogicPoolID logicPoolId = 2; @@ -401,19 +318,15 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -423,15 +336,10 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉follower + // 2. Hang up the follower Peer followerPeer; if (leaderPeer.address() == peer1.address()) { followerPeer = peer2; @@ -441,57 +349,37 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LOG(INFO) << "kill follower " << followerPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down,之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // Wait for the leader step to down, and after that, read is no longer + // supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 拉起follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Pull up the follower LOG(INFO) << "restart follower " << followerPeer.address(); - ASSERT_EQ(0, - cluster.StartPeer(followerPeer, - PeerCluster::PeerToId(followerPeer))); + ASSERT_EQ(0, cluster.StartPeer(followerPeer, + PeerCluster::PeerToId(followerPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并hang leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication group of 2 nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -501,18 +389,14 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -522,56 +406,37 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. Hang leader LOG(INFO) << "hang leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore leader LOG(INFO) << "recover leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的复制组,并发Hang一个follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 恢复follower + * Verify the replication group of two nodes and concurrently hang a follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LogicPoolID logicPoolId = 2; @@ -581,19 +446,15 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -603,13 +464,8 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang follower Peer followerPeer; @@ -621,53 +477,33 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LOG(INFO) << "hang follower " << followerPeer.address(); ASSERT_EQ(0, cluster.HangPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // After waiting for the leader step to down, read is no longer supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 恢复follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Restore follower LOG(INFO) << "recover follower " << followerPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(followerPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组,等待leader产生,write数据,然后read出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of three replicas, wait for the leader to + * generate, write the data, and then read it out for verification */ TEST_F(RaftVoteTest, ThreeNodesNormal) { LogicPoolID logicPoolId = 2; @@ -682,12 +518,8 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -700,24 +532,20 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { PeerId leaderId; ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Initiate read/write agai + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -727,19 +555,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -750,55 +574,36 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { LogicPoolID logicPoolId = 2; @@ -808,19 +613,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -831,57 +632,37 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 拉起follower - ASSERT_EQ(0, - cluster.StartPeer(followerPeers[0], - PeerCluster::PeerToId(followerPeers[0]))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Pull up the follower + ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], + PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,反复restart leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复restart leader + * Verify the replication group of three nodes and repeatedly restart the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeated restart leader */ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { LogicPoolID logicPoolId = 2; @@ -891,19 +672,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -914,13 +691,8 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. restart leader for (int i = 0; i < 5; ++i) { @@ -928,32 +700,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ::sleep(3); ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, + length, ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); @@ -961,9 +718,11 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { } /** - * 验证3个节点的复制组,反复重启一个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复重启follower + * Verify the replication groups of three nodes and restart a follower + * repeatedly + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeatedly restarting the follower */ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { LogicPoolID logicPoolId = 2; @@ -973,19 +732,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -996,27 +751,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 反复 restart follower + // 2. Repeatedly restart follower for (int i = 0; i < 5; ++i) { std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); @@ -1028,11 +773,13 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { } /** - * 验证3个节点的复制组,并挂掉leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader和1个follwoer - * 3. 拉起leader - * 4. 拉起follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and 1 follower + * 3. Pull up the leader + * 4. Pull up the follower */ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1042,19 +789,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1065,72 +808,48 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader和Follower + // 2. Hang up the leader and follower ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 拉起1个follower - * 4. 拉起1个follower + * Verify the replication groups of three nodes and hang two followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Pull up 1 follower + * 4. Pull up 1 follower */ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1140,19 +859,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1163,73 +878,49 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个follower + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], PeerCluster::PeerToId(followerPeers[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉3个成员 - * 3. 拉起1个成员 - * 4. 拉起1个成员 - * 5. 拉起1个成员 + * Verify the replication group of 3 nodes and suspend 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Pull up 1 member + * 4. Pull up 1 member + * 5. Pull up 1 member */ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { LogicPoolID logicPoolId = 2; @@ -1239,19 +930,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1262,80 +949,50 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); ASSERT_EQ(0, cluster.ShutdownPeer(peer2)); ASSERT_EQ(0, cluster.ShutdownPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer1, - PeerCluster::PeerToId(peer1))); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(peer1, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - - // 4. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer2, - PeerCluster::PeerToId(peer2))); + ReadVerifyNotAvailable(peer1, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer2, PeerCluster::PeerToId(peer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 5. 再拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer3, - PeerCluster::PeerToId(peer3))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 5. Pull up one more member + ASSERT_EQ(0, cluster.StartPeer(peer3, PeerCluster::PeerToId(peer3))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - - /** - * 验证3个节点的复制组,并hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -1345,19 +1002,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1368,65 +1021,40 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader Peer oldPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 等待new leader产生 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // Waiting for new leader generation ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复 old leader + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - /** - * 验证3个节点的复制组,并hang1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of 3 nodes and hang 1 follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { LogicPoolID logicPoolId = 2; @@ -1436,19 +1064,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1459,56 +1083,38 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复follower + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并hang leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader和1个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader and 1 follower + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1518,19 +1124,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1541,13 +1143,8 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); @@ -1555,63 +1152,39 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复 old leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 0); } /** - * 验证3个节点的复制组,并hang 2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang两个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of 3 nodes and hang 2 followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang two followers + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1621,19 +1194,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1644,89 +1213,54 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 2个follower + // 2. Hang 2 followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers[1])); - // step down之前提交request会超时 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); - - // 等待step down之后,读也不可提供服务 + // Submitting a request before the step down will timeout + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); + + // After waiting for the step down, reading is not available for service ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 4. 恢复1个follower + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 4. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[1])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并hang 3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang 3个成员 - * 3. 恢复1个成员 - * 4. 恢复1个成员 - * 5. 恢复1个成员 + * Verify the replication group of 3 nodes and hang 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Restore 1 member + * 4. Restore 1 member + * 5. Restore 1 member */ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { LogicPoolID logicPoolId = 2; @@ -1736,19 +1270,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1759,77 +1289,41 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.HangPeer(peer1)); ASSERT_EQ(0, cluster.HangPeer(peer2)); ASSERT_EQ(0, cluster.HangPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个成员 + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer1)); ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); - // 4. 恢复1个成员 + // 4. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer2)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 再恢复1个成员 + // 5. Restore 1 more member ASSERT_EQ(0, cluster.SignalPeer(peer3)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp index 49191fdd40..af6be699fd 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp @@ -20,11 +20,11 @@ * Author: xuchaojie */ +#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" + #include #include -#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" - namespace curve { namespace snapshotcloneserver { @@ -36,9 +36,8 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t fileLength = 64ULL * 1024 * 1024; - -int FakeCurveFsClient::Init(const CurveClientOptions &options) { - // 初始化一个文件用打快照和克隆 +int FakeCurveFsClient::Init(const CurveClientOptions& options) { + // Initialize a file for snapshot and cloning FInfo fileInfo; fileInfo.id = 100; fileInfo.parentid = 3; @@ -59,15 +58,13 @@ int FakeCurveFsClient::Init(const CurveClientOptions &options) { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::UnInit() { - return LIBCURVE_ERROR::OK; -} +int FakeCurveFsClient::UnInit() { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) { +int FakeCurveFsClient::CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { @@ -77,8 +74,8 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, snapInfo.filetype = FileType::INODE_SNAPSHOT_PAGEFILE; snapInfo.id = fileId_++; snapInfo.parentid = it->second.id; - snapInfo.filename = (it->second.filename + "-" - + std::to_string(it->second.seqnum)); + snapInfo.filename = + (it->second.filename + "-" + std::to_string(it->second.seqnum)); snapInfo.filestatus = FileStatus::Created; it->second.seqnum++; @@ -89,11 +86,11 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, } } -int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) { +int FakeCurveFsClient::DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileSnapInfoMap_.find(filename); if (it != fileSnapInfoMap_.end()) { fileSnapInfoMap_.erase(it); @@ -102,12 +99,12 @@ int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, +int FakeCurveFsClient::GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, FInfo* snapInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileSnapInfoMap_.find(filename) != fileSnapInfoMap_.end()) { *snapInfo = fileSnapInfoMap_[filename]; return LIBCURVE_ERROR::OK; @@ -115,17 +112,18 @@ int FakeCurveFsClient::GetSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, + uint64_t seq, uint64_t offset, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -134,50 +132,47 @@ int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) { +int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, + char* buf, SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT memset(buf, 'x', len); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) { + std::string user, uint64_t seq, + FileStatus* filestatus) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) { +int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT chunkInfo->chunkSn.push_back(1); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT fileInfo->id = fileId_++; fileInfo->parentid = 2; @@ -202,37 +197,37 @@ int FakeCurveFsClient::CreateCloneFile( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) { +int FakeCurveFsClient::CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, + uint64_t sn, uint64_t csn, + uint64_t chunkSize, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) { +int FakeCurveFsClient::RecoverChunk(const ChunkIDInfo& chunkidinfo, + uint64_t offset, uint64_t len, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CompleteCloneMeta( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneMeta(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::CloneMetaInstalled; @@ -242,11 +237,12 @@ int FakeCurveFsClient::CompleteCloneMeta( } } -int FakeCurveFsClient::CompleteCloneFile( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneFile(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::Cloned; @@ -256,12 +252,13 @@ int FakeCurveFsClient::CompleteCloneFile( } } -int FakeCurveFsClient::SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) { +int FakeCurveFsClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.SetCloneFileStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.SetCloneFileStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = filestatus; @@ -271,12 +268,11 @@ int FakeCurveFsClient::SetCloneFileStatus( } } -int FakeCurveFsClient::GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) { +int FakeCurveFsClient::GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileMap_.find(filename) != fileMap_.end()) { *fileInfo = fileMap_[filename]; return LIBCURVE_ERROR::OK; @@ -284,18 +280,18 @@ int FakeCurveFsClient::GetFileInfo( return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -304,16 +300,16 @@ int FakeCurveFsClient::GetOrAllocateSegmentInfo( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) { - LOG(INFO) << "RenameCloneFile from " << origin - << " to " << destination; +int FakeCurveFsClient::RenameCloneFile(const std::string& user, + uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) { + LOG(INFO) << "RenameCloneFile from " << origin << " to " << destination; fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(origin); if (it != fileMap_.end()) { it->second.parentid = 3; @@ -326,10 +322,8 @@ int FakeCurveFsClient::RenameCloneFile( } } -int FakeCurveFsClient::DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) { +int FakeCurveFsClient::DeleteFile(const std::string& fileName, + const std::string& user, uint64_t fileId) { auto it = fileMap_.find(fileName); if (it != fileMap_.end()) { fileMap_.erase(it); @@ -340,14 +334,15 @@ int FakeCurveFsClient::DeleteFile( } int FakeCurveFsClient::Mkdir(const std::string& dirpath, - const std::string &user) { + const std::string& user) { return -LIBCURVE_ERROR::EXISTS; } int FakeCurveFsClient::ChangeOwner(const std::string& filename, const std::string& newOwner) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.owner = newOwner; @@ -358,7 +353,7 @@ int FakeCurveFsClient::ChangeOwner(const std::string& filename, } bool FakeCurveFsClient::JudgeCloneDirHasFile() { - for (auto &f : fileMap_) { + for (auto& f : fileMap_) { if (2 == f.second.parentid) { LOG(INFO) << "Clone dir has file, fileinfo is :" << " id = " << f.second.id diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.h b/test/integration/snapshotcloneserver/fake_curvefs_client.h index 0f3a0a6107..c93d76daa4 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.h +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.h @@ -23,15 +23,13 @@ #ifndef TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ #define TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" - using ::curve::client::UserInfo_t; - namespace curve { namespace snapshotcloneserver { @@ -43,122 +41,84 @@ extern const char* testFile1; class FakeCurveFsClient : public CurveFsClient { public: - FakeCurveFsClient() : - fileId_(101) {} + FakeCurveFsClient() : fileId_(101) {} virtual ~FakeCurveFsClient() {} - int Init(const CurveClientOptions &options) override; + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure *scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; /** - * @brief 判断/clone目录下是否存在临时文件 + * @brief Check if there are temporary files under the /clone directory. * - * @retval true 存在 - * @retval false 不存在 + * @retval true If they exist. + * @retval false If they do not exist. */ bool JudgeCloneDirHasFile(); @@ -169,11 +129,11 @@ class FakeCurveFsClient : public CurveFsClient { // fileName -> snapshot fileInfo std::map fileSnapInfoMap_; - // inodeid 从101开始,100以内预留 - // 快照所属文件Id一律为100, parentid = 99 - // "/" 目录的Id为1 - // "/clone" 目录的Id为2 - // "/user1" 目录的Id为3 + // Inode IDs start from 101, with numbers under 100 reserved. + // Snapshot file IDs are always 100, with a parentid = 99. + // The ID for the "/" directory is 1. + // The ID for the "/clone" directory is 2. + // The ID for the "/user1" directory is 3. std::atomic fileId_; }; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 8eff45065c..0160f5010b 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -20,24 +20,24 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; const std::string kTestPrefix = "SCSTest"; // NOLINT @@ -65,27 +65,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -99,11 +98,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -127,66 +126,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -211,7 +207,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -240,16 +236,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -259,13 +255,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -274,21 +270,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -302,7 +295,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -321,7 +314,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/config/" @@ -395,7 +389,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -495,23 +489,23 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 常规测试用例 -// 场景一:快照增加删除查找 +// Regular test cases +// Scenario 1: Adding, deleting, and searching snapshots TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { std::string uuid1; int ret = 0; - // 操作1:用户testUser1_对不存在的文件打快照 - // 预期1:返回文件不存在 + // Step1: User testUser1_ Take a snapshot of non-existent files + // Expected 1: Return file does not exist ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:用户testUser2_对testFile1_打快照 - // 预期2:返回用户认证失败 + // Step2: User testUser2_ For testFile1_ Take a snapshot + // Expected 2: Failed to return user authentication ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:用户testUser1_对testFile1_打快照snap1。 - // 预期3:打快照成功 + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. + // Expected 3: Successful snapshot taking ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); ASSERT_EQ(0, ret); @@ -519,56 +513,56 @@ TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4: 获取快照信息,user=testUser1_,filename=testFile1_ - // 预期4:返回快照snap1的信息 + // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 4: Return information for snapshot snap1 bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_TRUE(success1); - // 操作5:获取快照信息,user=testUser2_,filename=testFile1_ - // 预期5:返回用户认证失败 + // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ + // Expected 5: User authentication failure returned FileSnapshotInfo info1; ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作6:获取快照信息,user=testUser2_,filename=testFile2_ - // 预期6:返回空 + // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ + // Expected 6: Return null std::vector infoVec; ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7:testUser2_删除快照snap1 - // 预期7:返回用户认证失败 + // Step7: testUser2_ Delete snapshot snap1 + // Expected 7: User authentication failure returned ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8:testUser1_删除testFile2_的快照,ID为snap1 - // 预期8:返回文件名不匹配 + // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for + // Expected 8: Return file name mismatch ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); - // 操作9:testUser1_删除快照snap1 - // 预期9:返回删除成功 + // Step9: testUser1_ Delete snapshot snap1 + // Expected 9: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 操作10:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期10:返回空 + // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 10: Return empty ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作11:testUser1_删除快照snap1(重复删除) - // 预期11:返回删除成功 + // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) + // Expected 11: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 复原testFile1_ + // Restore testFile1_ std::string fakeData2(4096, 'x'); ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); } -// 场景二:取消快照 +// Scenario 2: Cancel Snapshot TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); @@ -583,29 +577,35 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { if (info1.GetSnapshotInfo().GetStatus() == Status::pending || info1.GetSnapshotInfo().GetStatus() == Status::canceling) { if (!isCancel) { - // 操作1:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser2_取消testFile1_的快照snap1 - // 预期1:取消用户认证失败 + // Step1: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser2_ before the snapshot is completed_ + // Cancel testFile1_ Snap1 of snapshot + // Expected 1: Failed to cancel user authentication int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, retCode); - // 操作2:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile1_ - // 的不存在的快照 - // 预期2:返回kErrCodeCannotCancelFinished + // Step2: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile1_ A non-existent snapshot of + // Expected 2: Return kErrCodeCannotCancelFinished retCode = CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); - // 操作3:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile2_的快照snap1 - // 预期3: 返回文件名不匹配 + // Step3: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile2_ Snap1 of snapshot + // Expected 3: Return file name mismatch retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); - // 操作4:用户testUser1_对testFile1_打快照, - // 在快照未完成前testUser1_取消快照snap1 - // 预期4:取消快照成功 + // Step4: User testUser1_ For testFile1_ Take a snapshot, + // testUser1_ before the snapshot is completed_ + // Cancel snapshot snap1 + // Expected 4: Successfully cancelled snapshot retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, retCode); isCancel = true; @@ -620,47 +620,48 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { << static_cast(info1.GetSnapshotInfo().GetStatus()); } } else if (retCode == -8) { - // 操作5:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期5:返回空 + // Step5: Obtain snapshot information, user=testUser1_, + // filename=testFile1_ Expected 5: Return empty success1 = true; break; } } ASSERT_TRUE(success1); - // 操作6: 在快照已完成后,testUser1_取消testFile1_的快照snap1 - // 预期6: 返回待取消的快照不存在或已完成 + // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ + // Snap1 of snapshot Expected 6: Returning a pending snapshot that does not + // exist or has been completed ret = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(kErrCodeCannotCancelFinished, ret); } -// 场景三:lazy快照克隆场景 +// Scenario 3: Lazy snapshot clone scene TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1, uuid2, uuid3, uuid4, uuid5; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", "/ItUser1/SnapLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 操作4: testUser1_ clone 块照snap1,fileName=SnapLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 + // (duplicate clone) Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", true, &uuid4); ASSERT_EQ(0, ret); @@ -669,68 +670,68 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { ret = Flatten(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作5: testUser1_ GetCloneTask - // 预期5:返回SnapLazyClone1的clone 任务 + // Step5: testUser1_ GetCloneTask + // Expected 5: Return clone task for SnapLazyClone1 bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); ASSERT_TRUE(success1); - // 操作6: testUser2_ GetCloneTask - // 预期6: 返回空 + // Step6: testUser2_ GetCloneTask + // Expected 6: Return null std::vector infoVec; ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7: testUser2_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期7:返回用户认证失败 + // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 7: User authentication failure returned ret = CleanCloneTask(testUser2_, uuid3); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期8:返回执行成功 + // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 8: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 等待清理完成 + // Waiting for cleaning to complete std::this_thread::sleep_for(std::chrono::seconds(3)); - // 操作9: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID(重复执行) - // 预期9:返回执行成功 + // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // (repeated execution) Expected 9: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作10:testUser1_ GetCloneTask - // 预期10:返回空 + // Step10: testUser1_ GetCloneTask + // Expected 10: Return empty TaskCloneInfo info; ret = GetCloneTaskInfo(testUser1_, uuid3, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景四:非lazy快照克隆场景 +// Scenario 4: Non lazy snapshot clone scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapNotLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapNotLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapNotLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); ASSERT_EQ(0, ret); @@ -738,39 +739,39 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作4: testUser1_ clone 块照snap1, - // fileName=SnapNotLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, + // fileName=SnapNotLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景五:lazy快照恢复场景 +// Scenario 5: Lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(0, ret); @@ -782,38 +783,38 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景六:非lazy快照恢复场景 +// Scenario 6: Non lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(0, ret); @@ -821,43 +822,43 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景七: lazy镜像克隆场景 +// Scenario 7: Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageLazyClone1 - // 预期1:返回文件不存在 + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageLazyClone1 Expected 1: Return file does not exist std::string uuid1, uuid2, uuid3, uuid4; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageLazyClone1", true, &uuid3); ASSERT_EQ(0, ret); - // 操作4:对未完成lazy克隆的文件ImageLazyClone1打快照snap1 - // 预期4:返回文件状态异常 + // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not + // completed the lazy clone Expected 4: Abnormal file status returned ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); FileSnapshotInfo info2; @@ -866,7 +867,7 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); - // Flatten之前验证数据正确性 + // Verify data correctness before Flatten std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -877,23 +878,23 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); ASSERT_TRUE(success1); - // Flatten之后验证数据正确性 + // Verify data correctness after Flatten std::string fakeData2(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); } -// 场景八:非lazy镜像克隆场景 +// Scenario 8: Non Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageNotLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageNotLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); @@ -902,23 +903,23 @@ TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageNotLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageNotLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景九:快照存在失败场景 +// Scenario 9: The snapshot has a failure scenario TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { std::string snapId = "errorSnapUuid"; SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, - 0, 0, kDefaultPoolset, 0, Status::error); + 0, 0, kDefaultPoolset, 0, Status::error); cluster_->metaStore_->AddSnapshot(snapInfo); @@ -927,114 +928,113 @@ TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { ASSERT_GT(pid, 0); std::string uuid1, uuid2; - // 操作1: lazy clone 快照snap1 - // 预期1:返回快照存在异常 + // Step1: lazy clone snapshot snap1 + // Expected 1: Exception in returning snapshot int ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作2:非lazy clone 快照snap1 - // 预期2:返回快照存在异常 + // Step2: Non lazy clone snapshot snap1 + // Expected 2: Exception in returning snapshot ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作3:lazy 从 快照snap1 recover - // 预期3:返回快照存在异常 + // Step3: lazy snap1 recover from snapshot + // Expected 3: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作4:非lazy 从 快照snap1 recover - // 预期4:返回快照存在异常 + // Step4: Snap1 recover from snapshot without lazy + // Expected 4: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作5:用户testUser1_对testFile4_打快照snap1 - // 预期5:清理失败快照,并打快照成功 + // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 + // Expectation 5: Clean failed snapshot and take snapshot successfully ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); ASSERT_EQ(0, ret); - // 校验快照成功 + // Successfully verified snapshot bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); ASSERT_TRUE(success1); - // 校验清理失败快照成功 + // Verification cleaning failed, snapshot succeeded FileSnapshotInfo info1; int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); ASSERT_EQ(kErrCodeFileNotExist, retCode); } -// [线上问题修复]克隆失败,回滚删除克隆卷,再次创建同样的uuid的卷的场景 +//[Online issue repair] Clone failed, rollback delete clone volume, and create +//the same uuid volume again scenario TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; - // 操作1:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDestUUID - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID + // Expected 1 to return successful cloning std::string dstFile = "/ItUser1/CloneHasSameDest"; int ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo; userinfo.owner = testUser1_; int ret2 = fileClient_->Unlink(dstFile, userinfo, false); ASSERT_EQ(0, ret2); - - // 操作2:testUser1_ 再次clone 镜像testFile1_, + // Step2: testUser1_ Clone image testFile1_ again, // fileName=CloneHasSameDestUUID - // 预期2 返回克隆成功 + // Expected 2 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 操作3:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest2 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 + // Expected 3 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest2"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo2; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - - // 操作4:testUser1_ 再次clone 镜像testFile2_, + // Step4: testUser1_ Clone the image testFile2_ again, // fileName=CloneHasSameDest2 - // 预期4 返回克隆成功 + // Expected 4 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 验证再次克隆lazyflag不同的情况 - // 操作5:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest3 - // 预期5 返回克隆成功 + // Verify different situations when cloning lazyflag again + // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 + // Expected 5 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest3"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo3; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作6:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step6: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期6 返回克隆成功 + // Expected 6 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); ASSERT_EQ(0, ret); @@ -1042,30 +1042,31 @@ TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo4; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作7:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step7: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期7 返回克隆成功 + // Expected 7 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// lazy克隆卷,删除克隆卷,再删除源卷,源卷需要可以删除 +// Lazy clone volume, delete clone volume, and then delete source volume. The +// source volume can be deleted if needed TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { - // 操作1:testUser1_ clone 镜像testFile5_,lazy克隆两个卷dstFile1,dstFile2 - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 + // and dstFile2 Expected 1 to return successful cloning std::string uuid1; std::string uuid2; std::string dstFile1 = "/dest1"; @@ -1080,29 +1081,29 @@ TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); ASSERT_EQ(0, ret); - // 删除源卷,删除失败,卷被占用 + // Delete source volume, deletion failed, volume occupied ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(-27, ret); - // 操作2:删除目的卷dstFile1成功,再次删除源卷 - // 预期2 删除失败,卷被占用 + // Step2: Successfully delete the destination volume dstFile1, delete the + // source volume again Expected 2 deletion failed, volume occupied ret = fileClient_->Unlink(dstFile1, userinfo, false); ASSERT_EQ(0, ret); ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(-27, ret); - - // 操作3:删除目的卷dstFile2成功,再次删除源卷 - // 预期3 删除成功 + // Step3: Successfully delete the destination volume dstFile2, delete the + // source volume again Expected 3 deletion successful ret = fileClient_->Unlink(dstFile2, userinfo, false); ASSERT_EQ(0, ret); ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(0, ret); - // 操作4: 等待一段时间,看垃圾记录后台能否删除 + // Step4: Wait for a period of time to see if the garbage record can be + // deleted in the background bool noRecord = false; for (int i = 0; i < 100; i++) { TaskCloneInfo info; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp index 55484d7ec3..1b231170a8 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp @@ -20,24 +20,24 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; const std::string kTestPrefix = "ConSCSTest"; // NOLINT @@ -96,11 +96,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -124,66 +124,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -195,7 +192,8 @@ const std::vector snapshotcloneserverConfigOptions{ std::string("server.clonePoolThreadNum=8"), std::string("server.createCloneChunkConcurrency=2"), std::string("server.recoverChunkConcurrency=2"), - // 最大快照数修改为3,以测试快照达到上限的用例 + // Modify the maximum number of snapshots to 3 to test cases where snapshots + // reach the upper limit std::string("server.maxSnapshotLimit=3"), std::string("client.methodRetryTimeSec=1"), std::string("server.clientAsyncMethodRetryTimeSec=1"), @@ -210,7 +208,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -221,7 +219,8 @@ const std::vector clientConfigOptions{ const char* testFile1_ = "/concurrentItUser1/file1"; const char* testFile2_ = - "/concurrentItUser1/file2"; // 将在TestImage2Clone2Success中删除 //NOLINT + "/concurrentItUser1/file2"; // Will be removed from + // TestImage2Clone2Success//NOLINT const char* testFile3_ = "/concurrentItUser2/file3"; const char* testFile4_ = "/concurrentItUser1/file3"; const char* testUser1_ = "concurrentItUser1"; @@ -239,16 +238,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -256,13 +255,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -271,21 +270,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -299,7 +295,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -318,7 +314,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -386,7 +383,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -486,9 +483,9 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 并发测试用例 +// Concurrent test cases -// 这个用例测试快照层数,放在最前面 +// This use case tests the number of snapshot layers, placed at the top TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { std::string uuid1, uuid2, uuid3; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -506,7 +503,8 @@ TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { bool success3 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid3); ASSERT_TRUE(success3); - // 快照层数设置为3,尝试再打一次快照,超过层数失败 + // Set the number of snapshot layers to 3. Attempt to take another snapshot, + // exceeding the number of layers failed ret = MakeSnapshot(testUser1_, testFile1_, "snap3", &uuid3); ASSERT_EQ(kErrCodeSnapshotCountReachLimit, ret); @@ -585,7 +583,7 @@ TEST_F(SnapshotCloneServerTest, TestSnapSameClone1Success) { ret1 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid1); ASSERT_EQ(0, ret1); - // 幂等 + // Idempotent ret2 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid2); ASSERT_EQ(0, ret2); @@ -732,7 +730,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -747,7 +745,8 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid1, true)); - // clone完成stage1之后即可对外提供服务,测试克隆卷是否能正常读取数据 + // After the clone completes stage1, it can provide external services and + // test whether the cloned volume can read data normally std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -759,7 +758,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData2)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -782,7 +781,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyRecoverSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp index 326ebe66c0..6da5478c86 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp @@ -20,15 +20,15 @@ * Author: xuchaojie */ -#include -#include -#include #include #include +#include +#include +#include +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" -#include "test/integration/cluster_common/cluster.h" using curve::CurveCluster; @@ -73,9 +73,9 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); system(std::string("rm -rf ExcSCSTest.etcd").c_str()); - pid_t pid = cluster_->StartSingleEtcd(1, kEtcdClientIpPort, - kEtcdPeerIpPort, - std::vector{ "--name=ExcSCSTest"}); + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=ExcSCSTest"}); LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -92,22 +92,18 @@ class SnapshotCloneServerTest : public ::testing::Test { system(std::string("rm -rf ExcSCSTest.etcd").c_str()); } - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } - bool JudgeSnapTaskFailCleanTaskAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid, - SnapshotInfo *snapInfo) { - // 验证任务失败 + bool JudgeSnapTaskFailCleanTaskAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid, + SnapshotInfo* snapInfo) { + // Verification task failed FileSnapshotInfo info1; - int ret = GetSnapshotInfo( - user, file, uuid, &info1); + int ret = GetSnapshotInfo(user, file, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetSnapshotInfo Fail" << ", ret = " << ret; @@ -126,7 +122,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); if (ret != -1) { @@ -137,28 +133,27 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeSnapTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid) { + bool JudgeSnapTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid) { SnapshotInfo snapInfo; - bool success = JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo); + bool success = + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo); if (!success) { return false; } int seqNum = snapInfo.GetSeqNum(); - // 验证curve上无快照 + // Verify that there are no snapshots on the curve FInfo fInfo; - int ret = server_->GetCurveFsClient()->GetSnapshot( - file, user, seqNum, &fInfo); + int ret = server_->GetCurveFsClient()->GetSnapshot(file, user, seqNum, + &fInfo); if (ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on curve" << ", ret = " << ret; return false; } - // 验证nos上无快照 + // Verify that there are no snapshots on NOS ChunkIndexDataName indexData(file, seqNum); if (server_->GetDataStore()->ChunkIndexDataExist(indexData)) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on nos."; @@ -167,13 +162,11 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeCloneTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务状态为error + bool JudgeCloneTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verify that the task status is error TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetCloneTask fail" << ", ret = " << ret; @@ -188,31 +181,28 @@ class SnapshotCloneServerTest : public ::testing::Test { return CleanCloneTaskAndCheckEnvClean(user, uuid); } - bool JudgeCloneTaskNotExistCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务不存在 + bool JudgeCloneTaskNotExistCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verification task does not exist TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret != kErrCodeFileNotExist) { LOG(INFO) << "AsserTaskNotExist fail" << ", ret = " << ret; return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool CleanCloneTaskAndCheckEnvClean( - const std::string &user, - const std::string &uuid) { + bool CleanCloneTaskAndCheckEnvClean(const std::string& user, + const std::string& uuid) { int ret = CleanCloneTask(user, uuid); if (ret < 0) { LOG(INFO) << "CleanCloneTask fail" @@ -222,7 +212,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - // 验证任务不存在 + // Verification task does not exist TaskCloneInfo info; ret = GetCloneTaskInfo(user, uuid, &info); if (kErrCodeFileNotExist != ret) { @@ -231,34 +221,29 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool PrepreTestSnapshot( - const std::string &user, - const std::string &file, - const std::string &snapName, - std::string *uuid) { - int ret = MakeSnapshot(user, - file , snapName, uuid); + bool PrepreTestSnapshot(const std::string& user, const std::string& file, + const std::string& snapName, std::string* uuid) { + int ret = MakeSnapshot(user, file, snapName, uuid); if (ret < 0) { return false; } - bool success1 = CheckSnapshotSuccess(user, file, - *uuid); + bool success1 = CheckSnapshotSuccess(user, file, *uuid); return success1; } bool PrepreTestSnapshotIfNotExist() { if (testSnapId_.empty()) { - bool ret = PrepreTestSnapshot(testUser1, - testFile1, "testSnap", &testSnapId_); + bool ret = PrepreTestSnapshot(testUser1, testFile1, "testSnap", + &testSnapId_); return ret; } return true; @@ -266,53 +251,56 @@ class SnapshotCloneServerTest : public ::testing::Test { std::string testSnapId_; - static SnapshotCloneServerModule *server_; - static SnapshotCloneServerOptions *options_; + static SnapshotCloneServerModule* server_; + static SnapshotCloneServerOptions* options_; static CurveCluster* cluster_; }; -SnapshotCloneServerModule * SnapshotCloneServerTest::server_ = nullptr; -SnapshotCloneServerOptions * SnapshotCloneServerTest::options_ = nullptr; -CurveCluster * SnapshotCloneServerTest::cluster_ = nullptr; +SnapshotCloneServerModule* SnapshotCloneServerTest::server_ = nullptr; +SnapshotCloneServerOptions* SnapshotCloneServerTest::options_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCurvefs) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap1", &uuid); + int ret = MakeSnapshot(user, file, "snap1", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateSnapshot"); // NOLINT SnapshotInfo snapInfo; - ASSERT_TRUE(JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo)); + ASSERT_TRUE( + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo)); } - TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetSnapshot) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap2", &uuid); + int ret = MakeSnapshot(user, file, "snap2", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { @@ -320,18 +308,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap3", &uuid); + int ret = MakeSnapshot(user, file, "snap3", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.DeleteSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { @@ -339,38 +329,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap4", &uuid); + int ret = MakeSnapshot(user, file, "snap4", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { + TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap5", &uuid); + int ret = MakeSnapshot(user, file, "snap5", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { @@ -378,18 +374,21 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap6", &uuid); + int ret = MakeSnapshot(user, file, "snap6", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { @@ -397,18 +396,19 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap7", &uuid); + int ret = MakeSnapshot(user, file, "snap7", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { @@ -416,16 +416,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT 1, NULL, 0); - // 验证任务失败 - int ret = MakeSnapshot(user, file , "snap8", &uuid); + // Verification task failed + int ret = MakeSnapshot(user, file, "snap8", &uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(-1, ret); @@ -436,20 +440,23 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnUpdateSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap9", &uuid); + int ret = MakeSnapshot(user, file, "snap9", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed FileSnapshotInfo info1; - ret = GetSnapshotInfo( - user, file, uuid, &info1); + ret = GetSnapshotInfo(user, file, uuid, &info1); ASSERT_EQ(kErrCodeInternalError, ret); @@ -462,38 +469,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnPutChunkIndexData) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap10", &uuid); + int ret = MakeSnapshot(user, file, "snap10", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnDataChunkTranferComplete) { + TestCreateSnapshotFailOnDataChunkTranferComplete) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap11", &uuid); + int ret = MakeSnapshot(user, file, "snap11", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { @@ -503,16 +516,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap12", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -526,16 +543,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap13", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -549,16 +570,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap14", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -572,16 +597,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteSnapshot) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap15", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -592,220 +621,234 @@ TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/cloneSuccess1", true, - &uuid1); + "/user1/cloneSuccess1", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -815,238 +858,251 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess1", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCompleteCloneMeta) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnGetFileInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnAddCloneInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnCreateCloneChunk) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1056,52 +1112,53 @@ TEST_F(SnapshotCloneServerTest, std::this_thread::sleep_for(std::chrono::milliseconds(3000)); ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/cloneSuccess2", true, - &uuid1); + "/user1/cloneSuccess2", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1111,276 +1168,299 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess2", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess2", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRecoverChunk) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnCompleteCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } - TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1390,116 +1470,121 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, false); ASSERT_TRUE(success1); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 恢复未完成前删除目标文件 + // Delete target files before recovery is complete ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); + server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } } // namespace snapshotcloneserver diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp index 2e549688b8..b1b99953ae 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp @@ -20,16 +20,14 @@ * Author: xuchaojie */ -#include - #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" +#include namespace curve { namespace snapshotcloneserver { -int SnapshotCloneServerModule::Start( - const SnapshotCloneServerOptions &option) { +int SnapshotCloneServerModule::Start(const SnapshotCloneServerOptions& option) { serverOption_ = option; client_ = std::make_shared(); @@ -45,13 +43,8 @@ int SnapshotCloneServerModule::Start( auto cloneRef_ = std::make_shared(); - auto core = - std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - serverOption_); + auto core = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, serverOption_); if (core->Init() < 0) { LOG(ERROR) << "SnapshotCore init fail."; @@ -61,8 +54,7 @@ int SnapshotCloneServerModule::Start( auto taskMgr = std::make_shared(core, snapshotMetric); snapshotServiceManager_ = - std::make_shared(taskMgr, - core); + std::make_shared(taskMgr, core); if (snapshotServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "SnapshotServiceManager init fail."; @@ -71,13 +63,9 @@ int SnapshotCloneServerModule::Start( auto cloneMetric = std::make_shared(); - auto cloneCore = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - serverOption_); + auto cloneCore = + std::make_shared(client_, metaStore_, dataStore_, + snapshotRef_, cloneRef_, serverOption_); if (cloneCore->Init() < 0) { LOG(ERROR) << "CloneCore init fail."; return kErrCodeServerInitFail; @@ -87,28 +75,26 @@ int SnapshotCloneServerModule::Start( std::make_shared(cloneCore, cloneMetric); auto cloneServiceManagerBackend = - std::make_shared(cloneCore); + std::make_shared(cloneCore); - cloneServiceManager_ = - std::make_shared(cloneTaskMgr, - cloneCore, cloneServiceManagerBackend); + cloneServiceManager_ = std::make_shared( + cloneTaskMgr, cloneCore, cloneServiceManagerBackend); if (cloneServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "CloneServiceManager init fail."; return kErrCodeServerInitFail; } server_ = std::make_shared(); - service_ = - std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); + service_ = std::make_shared( + snapshotServiceManager_, cloneServiceManager_); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) { LOG(ERROR) << "Failed to add snapshot_service!\n"; return kErrCodeServerInitFail; } - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index 18a113ef0f..78c0f3211a 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -20,20 +20,20 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "src/common/uuid.h" -#include "src/common/location_operator.h" -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/common/location_operator.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/uuid.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; @@ -49,27 +49,27 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +// Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 -const char *cloneTempDir_ = "/clone"; -const char *mdsRootUser_ = "root"; -const char *mdsRootPassword_ = "root_password"; +// Some constant definitions +const char* cloneTempDir_ = "/clone"; +const char* mdsRootUser_ = "root"; +const char* mdsRootPassword_ = "root_password"; constexpr uint32_t kProgressTransferSnapshotDataStart = 10; -const char *kEtcdClientIpPort = "127.0.0.1:10021"; -const char *kEtcdPeerIpPort = "127.0.0.1:10022"; -const char *kMdsIpPort = "127.0.0.1:10023"; -const char *kChunkServerIpPort1 = "127.0.0.1:10024"; -const char *kChunkServerIpPort2 = "127.0.0.1:10025"; -const char *kChunkServerIpPort3 = "127.0.0.1:10026"; -const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; -const char *kSnapshotCloneServerDummyServerPort = "12002"; -const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +const char* kEtcdClientIpPort = "127.0.0.1:10021"; +const char* kEtcdPeerIpPort = "127.0.0.1:10022"; +const char* kMdsIpPort = "127.0.0.1:10023"; +const char* kChunkServerIpPort1 = "127.0.0.1:10024"; +const char* kChunkServerIpPort2 = "127.0.0.1:10025"; +const char* kChunkServerIpPort3 = "127.0.0.1:10026"; +const char* kSnapshotCloneServerIpPort = "127.0.0.1:10027"; +const char* kSnapshotCloneServerDummyServerPort = "12002"; +const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; -static const char *kDefaultPoolset = "default"; +static const char* kDefaultPoolset = "default"; const int kMdsDummyPort = 10028; @@ -79,27 +79,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -120,11 +119,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -150,66 +149,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -236,7 +232,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -245,8 +241,8 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char *testFile1_ = "/RcvItUser1/file1"; -const char *testUser1_ = "RcvItUser1"; +const char* testFile1_ = "/RcvItUser1/file1"; +const char* testUser1_ = "RcvItUser1"; int testFd1_ = 0; namespace curve { @@ -261,16 +257,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -280,13 +276,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -295,21 +291,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 2); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 2); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 2); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 2); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 2); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -323,7 +316,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -342,7 +335,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -380,9 +374,9 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(INFO) << "Write testFile1_ success."; } - static bool CreateAndWriteFile(const std::string &fileName, - const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool CreateAndWriteFile(const std::string& fileName, + const std::string& user, + const std::string& dataSample, int* fdOut) { UserInfo_t userinfo; userinfo.owner = user; int ret = fileClient_->Create(fileName, userinfo, testFile1Length); @@ -393,8 +387,8 @@ class SnapshotCloneServerTest : public ::testing::Test { return WriteFile(fileName, user, dataSample, fdOut); } - static bool WriteFile(const std::string &fileName, const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool WriteFile(const std::string& fileName, const std::string& user, + const std::string& dataSample, int* fdOut) { int ret = 0; UserInfo_t userinfo; userinfo.owner = user; @@ -403,7 +397,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + // 2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -420,14 +414,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - static bool CheckFileData(const std::string &fileName, - const std::string &user, - const std::string &dataSample) { + static bool CheckFileData(const std::string& fileName, + const std::string& user, + const std::string& dataSample) { UserInfo_t userinfo; userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -489,7 +483,7 @@ class SnapshotCloneServerTest : public ::testing::Test { void TearDown() {} - void PrepareSnapshotForTestFile1(std::string *uuid1) { + void PrepareSnapshotForTestFile1(std::string* uuid1) { if (!hasSnapshotForTestFile1_) { int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); ASSERT_EQ(0, ret); @@ -508,23 +502,23 @@ class SnapshotCloneServerTest : public ::testing::Test { } } - int PrepareCreateCloneFile(const std::string &fileName, FInfo *fInfoOut, + int PrepareCreateCloneFile(const std::string& fileName, FInfo* fInfoOut, bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( - testFile1_, fileName, - UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, - seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); + testFile1_, fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), + testFile1Length, seqNum, chunkSize, 0, 0, kDefaultPoolset, + fInfoOut); return ret; } - int PrepareCreateCloneMeta(FInfo *fInfoOut, const std::string &newFileName, - std::vector *segInfoOutVec) { + int PrepareCreateCloneMeta(FInfo* fInfoOut, const std::string& newFileName, + std::vector* segInfoOutVec) { fInfoOut->fullPathName = newFileName; fInfoOut->userinfo = UserInfo_t(mdsRootUser_, mdsRootPassword_); for (int i = 0; i < testFile1AllocSegmentNum; i++) { @@ -539,7 +533,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCreateCloneChunk(const std::vector &segInfoVec, + int PrepareCreateCloneChunk(const std::vector& segInfoVec, bool IsRecover = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -554,13 +548,14 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So cloning each segment from the snapshot + // only creates the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -570,8 +565,10 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the + // snapshot + 2, // Restore the version number of the new file, which is + // the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -584,7 +581,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LocationOperator::GenerateCurveLocation( testFile1_, i * segmentSize + j * chunkSize); ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -592,11 +589,11 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", copysetId = " << cidInfo.cpid_ << ", chunkId = " << cidInfo.cid_ << ", seqNum = " << 1 << ", csn = " << 0; - int ret = - snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 - chunkSize, cb); + int ret = snapClient_->CreateCloneChunk( + location, cidInfo, + 1, // Clone using initial version number 1 + 0, // Clone using 0 + chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; } @@ -613,14 +610,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneMeta(const std::string &uuid) { + int PrepareCompleteCloneMeta(const std::string& uuid) { std::string fileName = std::string(cloneTempDir_) + "/" + uuid; int ret = snapClient_->CompleteCloneMeta( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); return ret; } - int PrepareRecoverChunk(const std::vector &segInfoVec, + int PrepareRecoverChunk(const std::vector& segInfoVec, bool IsSnapshot = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -629,14 +626,15 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So clone each segment from the snapshot and + // only recover the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -657,7 +655,7 @@ class SnapshotCloneServerTest : public ::testing::Test { for (uint64_t j = 0; j < segmentSize / chunkSize; j++) { ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -685,44 +683,42 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneFile(const std::string &fileName) { + int PrepareCompleteCloneFile(const std::string& fileName) { return snapClient_->CompleteCloneFile( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } - int PrepareChangeOwner(const std::string &fileName) { + int PrepareChangeOwner(const std::string& fileName) { return fileClient_->ChangeOwner( fileName, testUser1_, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } int PrepareRenameCloneFile(uint64_t originId, uint64_t destinationId, - const std::string &fileName, - const std::string &newFileName) { + const std::string& fileName, + const std::string& newFileName) { return snapClient_->RenameCloneFile( UserInfo_t(mdsRootUser_, mdsRootPassword_), originId, destinationId, fileName, newFileName); } - static CurveCluster *cluster_; - static FileClient *fileClient_; - static SnapshotClient *snapClient_; + static CurveCluster* cluster_; + static FileClient* fileClient_; + static SnapshotClient* snapClient_; bool hasSnapshotForTestFile1_ = false; std::string snapIdForTestFile1_; }; -CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; -FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; -SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; +FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; +SnapshotClient* SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +// Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -739,19 +735,18 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +// A snapshot has been created in the curve, but the successful result has not +// been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -768,18 +763,18 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +// A snapshot has been created in the curve, and the results have been returned. +// Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, - chunkSize, segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -796,7 +791,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots +// and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -811,7 +807,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -835,16 +831,14 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -859,7 +853,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -869,12 +864,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -889,7 +882,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -897,12 +890,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -917,7 +908,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -931,12 +923,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -951,7 +941,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -963,12 +954,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -983,7 +972,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -999,12 +989,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1019,7 +1007,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1033,12 +1021,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1053,7 +1039,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1071,12 +1058,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1091,7 +1076,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1107,12 +1092,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1127,7 +1110,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1146,12 +1130,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1166,7 +1148,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1184,12 +1166,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1204,7 +1184,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1226,12 +1207,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1246,7 +1225,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1266,12 +1245,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1286,7 +1263,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1309,12 +1287,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1329,7 +1305,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1351,12 +1327,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1371,7 +1345,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1399,12 +1374,10 @@ TEST_F(SnapshotCloneServerTest, LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, fInfoOut.id, fileName, dstFile)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1419,18 +1392,16 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); std::string uuid1 = UUIDGenerator().GenerateUUID(); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1450,7 +1421,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1461,12 +1433,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1486,7 +1456,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1496,12 +1466,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1521,7 +1489,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1536,12 +1505,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1561,7 +1528,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1575,12 +1543,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1600,7 +1566,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1617,12 +1584,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1642,7 +1607,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1658,12 +1623,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1683,7 +1646,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1702,12 +1666,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1727,7 +1689,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1745,12 +1707,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1770,7 +1730,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1791,12 +1752,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1816,7 +1775,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1836,12 +1795,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1861,7 +1818,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1885,12 +1843,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1910,7 +1866,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1933,12 +1889,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1953,7 +1907,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1979,12 +1934,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1999,7 +1952,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2024,12 +1977,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2044,7 +1995,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; @@ -2072,12 +2024,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp index f56bae71e7..94d648ab86 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp @@ -20,22 +20,23 @@ * Author: hzsunjianliang */ -#include -#include -#include #include +#include +#include +#include + #include // NOLINT #include // NOLINT -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" #include "src/snapshotcloneserver/snapshotclone_server.h" +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" #include "test/util/config_generator.h" const std::string kTestPrefix = "MainSCSTest"; // NOLINT -// 一些常数定义 +// Some constant definitions const char* cloneTempDir_ = "/clone"; const char* mdsRootUser_ = "root"; const char* mdsRootPassword_ = "root_password"; @@ -56,13 +57,12 @@ const std::string kEtcdName = kTestPrefix; // NOLINT const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; @@ -81,11 +81,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector snapClientConfigOptions{ @@ -119,7 +119,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; namespace curve { @@ -135,11 +135,11 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; system(rmcmd.c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{"--name=" + std::string(kEtcdName)}); @@ -150,7 +150,7 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; @@ -201,7 +201,7 @@ TEST_F(SnapshotCloneServerMainTest, testmain) { std::this_thread::sleep_for(std::chrono::seconds(2)); - // 测试验证是否状态为active + // Test and verify if the status is active // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; std::string cmd = "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index ff92a579f3..8bb7f66138 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -20,18 +20,20 @@ * Author: lixiaocui1 */ -#include #include -#include //NOLINT +#include + #include //NOLINT #include #include -#include "src/kvstorageclient/etcd_client.h" -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/common/timeutility.h" +#include //NOLINT + +#include "proto/nameserver2.pb.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/timeutility.h" +#include "src/kvstorageclient/etcd_client.h" #include "src/mds/common/mds_define.h" -#include "proto/nameserver2.pb.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" namespace curve { namespace kvstorage { @@ -43,7 +45,7 @@ using ::curve::mds::NameSpaceStorageCodec; using ::curve::mds::PageFileChunkInfo; using ::curve::mds::PageFileSegment; -// 接口测试 +// Interface testing class TestEtcdClinetImp : public ::testing::Test { protected: TestEtcdClinetImp() {} @@ -63,8 +65,9 @@ class TestEtcdClinetImp : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", @@ -75,7 +78,7 @@ class TestEtcdClinetImp : public ::testing::Test { exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -108,8 +111,8 @@ class TestEtcdClinetImp : public ::testing::Test { TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { // 1. put file - // - file0~file9 put到etcd中 - // - file6有快照 + // - file0~file9 put into etcd + // - file6 has a snapshot std::map keyMap; std::map fileName; FileInfo fileInfo7, fileInfo8; @@ -170,7 +173,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } } - // 2. get file, 可以正确获取并解码file0~file9 + // 2. get file, which can correctly obtain and decode file0~file9 for (int i = 0; i < keyMap.size(); i++) { std::string out; int errCode = client_->Get(keyMap[i], &out); @@ -180,7 +183,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], fileinfo.filename()); } - // 3. list file, 可以list到file0~file9 + // 3. list file, which can be listed to file0~file9 std::vector listRes; std::vector> listRes2; int errCode = client_->List("01", "02", &listRes2); @@ -193,7 +196,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], finfo.filename()); } - // 4. delete file, 删除file0~file4,这部分文件不能再获取到 + // 4. Delete file, delete file0~file4, these files cannot be retrieved + // anymore for (int i = 0; i < keyMap.size() / 2; i++) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Delete(keyMap[i])); // can not get delete file @@ -201,13 +205,13 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[i], &out)); } - // 5. rename file: rename file9 ~ file10, file10本来不存在 - Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), - const_cast(fileInfo9.c_str()), + // 5. Rename file: rename file9~file10, file10 does not originally exist + Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), + const_cast(fileInfo9.c_str()), static_cast(keyMap[9].size()), static_cast(fileInfo9.size())}; - Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), - const_cast(fileInfo10.c_str()), + Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), + const_cast(fileInfo10.c_str()), static_cast(fileKey10.size()), static_cast(fileInfo10.size())}; std::vector ops{op1, op2}; @@ -222,12 +226,12 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName10, fileinfo.filename()); // 6. snapshot of keyMap[6] - Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), - const_cast(fileInfo6.c_str()), + Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), + const_cast(fileInfo6.c_str()), static_cast(keyMap[6].size()), static_cast(fileInfo6.size())}; - Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -258,9 +262,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ("200", out); // 8. rename file: rename file7 ~ file8 - Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), - const_cast(""), static_cast(keyMap[7].size()), - 0}; + Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), + const_cast(""), static_cast(keyMap[7].size()), 0}; FileInfo newFileInfo7; newFileInfo7.CopyFrom(fileInfo7); newFileInfo7.set_parentid(fileInfo8.parentid()); @@ -271,17 +274,17 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { std::string encodeNewFileInfo7; ASSERT_TRUE(newFileInfo7.SerializeToString(&encodeNewFileInfo7)); Operation op9{OpType::OpPut, - const_cast(encodeNewFileInfo7Key.c_str()), - const_cast(encodeNewFileInfo7.c_str()), + const_cast(encodeNewFileInfo7Key.c_str()), + const_cast(encodeNewFileInfo7.c_str()), static_cast(encodeNewFileInfo7Key.size()), static_cast(encodeNewFileInfo7.size())}; ops.clear(); ops.emplace_back(op8); ops.emplace_back(op9); ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); - // 不能获取 file7 + // Unable to obtain file7 ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[7], &out)); - // 成功获取rename以后的file7 + // Successfully obtained file7 after renam ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Get(keyMap[8], &out)); ASSERT_TRUE(NameSpaceStorageCodec::DecodeFileInfo(out, &fileinfo)); ASSERT_EQ(newFileInfo7.filename(), fileinfo.filename()); @@ -304,8 +307,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->TxnN(ops)); client_->SetTimeout(5000); - Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -321,7 +324,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { - // 准备一批数据 + // Prepare a batch of data // "011" "013" "015" "017" "019" for (int i = 1; i <= 9; i += 2) { std::string key = std::string("01") + std::to_string(i); @@ -336,13 +339,13 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Put(key, value)); } - // 获取当前revision - // 通过GetCurrentRevision获取 + // Obtain the current revision + // Obtained through GetCurrentRevision int64_t curRevision; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->GetCurrentRevision(&curRevision)); LOG(INFO) << "get current revision: " << curRevision; - // 根据当前revision获取前5个key-value + // Obtain the top 5 key values based on the current revision std::vector out; std::string lastKey; int res = client_->ListWithLimitAndRevision("01", "", 5, curRevision, &out, @@ -355,7 +358,7 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(value, out[i - 1]); } - // 根据当前revision获取后5个key-value + // Obtain the last 5 key values based on the current revision out.clear(); res = client_->ListWithLimitAndRevision(lastKey, "", 5, curRevision, &out, &lastKey); @@ -395,37 +398,41 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { uint64_t leaderOid; { - // 1. leader1竞选成功,client退出后leader2竞选成功 + // 1. leader1 successfully ran, but leader2 successfully ran after + // client exited LOG(INFO) << "test case1 start..."; - // 启动一个线程竞选leader + // Start a thread to run for the leader int electionTimeoutMs = 0; uint64_t targetOid; common::Thread thread1(&EtcdClientImp::CampaignLeader, client_, pfx, leaderName1, sessionnInterSec, electionTimeoutMs, &targetOid); - // 等待线程1执行完成, 线程1执行完成就说明竞选成功, - // 否则electionTimeoutMs为0的情况下会一直hung在里面 + // Waiting for thread 1 to complete execution indicates a successful + // election, Otherwise, if electionTimeoutMs is 0, they will remain in + // it all the time thread1.join(); LOG(INFO) << "thread 1 exit."; client_->CloseClient(); - // 启动第二个线程竞选leader + // Start the second thread to run for the leader auto client2 = std::make_shared(); ASSERT_EQ(0, client2->Init(conf, dialtTimeout, retryTimes)); common::Thread thread2(&EtcdClientImp::CampaignLeader, client2, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); - // 线程1退出后,leader2会当选 + // After thread1 exits, leader2 will be elected thread2.join(); LOG(INFO) << "thread 2 exit."; - // leader2为leader的情况下此时观察leader1的key应该发现session过期 + // If leader2 is the leader, observing the key of leader1 at this time + // should reveal that the session has expired ASSERT_EQ(EtcdErrCode::EtcdObserverLeaderInternal, client2->LeaderObserve(targetOid, leaderName1)); client2->CloseClient(); } { - // 2. leader1竞选成功后,不退出; leader2竞选超时 + // 2. After the successful election of leader1, do not withdraw; leader2 + // campaign timeout LOG(INFO) << "test case2 start..."; int electionTimeoutMs = 1000; auto client1 = std::make_shared(); @@ -436,7 +443,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader2再次竞选 + // leader2 is running again common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); @@ -446,8 +453,9 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { } { - // 3. leader1竞选成功后,删除key; leader2竞选成功; observe leader1改变; - // observer leader2的过程中etcd挂掉 + // 3. After the successful election of leader1, delete the key; The + // leader2 campaign was successful; Observe leader1 changed; + // During the process of observer leader2, etcd crashes LOG(INFO) << "test case3 start..."; uint64_t targetOid; int electionTimeoutMs = 0; @@ -458,17 +466,17 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { &targetOid); thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader1卸任leader + // leader1 Resignation Leader ASSERT_EQ(EtcdErrCode::EtcdLeaderResiginSuccess, client1->LeaderResign(targetOid, 1000)); - // leader2当选 + // leader2 elected common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); thread2.join(); - // leader2启动线程observe + // leader2 starts thread observe common::Thread thread3(&EtcdClientImp::LeaderObserve, client1, targetOid, leaderName2); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -477,7 +485,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { client1->CloseClient(); LOG(INFO) << "thread 2 exit."; - // 使得etcd完全停掉 + // Make the ETCD completely stop std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -490,12 +498,13 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { segment.set_logicalpoolid(11); int size = segment.segmentsize() / segment.chunksize(); for (uint32_t i = 0; i < size; i++) { - PageFileChunkInfo *chunkinfo = segment.add_chunks(); + PageFileChunkInfo* chunkinfo = segment.add_chunks(); chunkinfo->set_chunkid(i + 1); chunkinfo->set_copysetid(i + 1); } - // 放入segment,前三个属于文件1,后四个属于文件2 + // Place the segment, with the first three belonging to file1 and the last + // four belonging to file2 uint64_t id1 = 101; uint64_t id2 = 100001; for (uint32_t i = 0; i < 7; ++i) { @@ -514,7 +523,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { LOG(INFO) << segment.startoffset(); } - // 获取文件1的segment + // Obtain the segment of file1 std::string startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1, 0); std::string endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1 + 1, 0); @@ -527,7 +536,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { ASSERT_EQ(i * 1024, segment2.startoffset()); } - // 获取文件2的segment + // Obtain the segment of file2 startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2, 0); endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2 + 1, 0); out.clear(); diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..222f76a6bc 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -20,24 +20,26 @@ * Author: lixiaocui */ -#include -#include #include "src/mds/heartbeat/chunkserver_healthy_checker.h" + +#include +#include + #include "src/mds/topology/topology_item.h" #include "test/mds/mock/mock_topology.h" +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::ChunkServer; using ::curve::mds::topology::ChunkServerStatus; -using ::curve::mds::topology::OnlineState; using ::curve::mds::topology::CopySetKey; -using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::kTopoErrCodeInternalError; +using ::curve::mds::topology::kTopoErrCodeSuccess; +using ::curve::mds::topology::OnlineState; namespace curve { namespace mds { @@ -53,7 +55,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -65,8 +67,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { 6, steady_clock::now() - std::chrono::milliseconds(10000)); checker->UpdateLastReceivedHeartbeatTime( 7, steady_clock::now() - std::chrono::milliseconds(10000)); - checker->UpdateLastReceivedHeartbeatTime( - 8, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(8, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 9, steady_clock::now() - std::chrono::milliseconds(4000)); checker->UpdateLastReceivedHeartbeatTime( @@ -94,30 +95,31 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // chunkserver-1 update to online + // chunkserver-2 Heartbeat Miss, Keep Unstable + // chunkserver-3, chunkserver-5, chunkserver-6 heartbeat offline, + // The retried status of chunkserver-3 will be updated and removed from + // the heartbeat map chunkserver-5 is already in a retired state and + // does not need to be updated chunkserver-6 get info failed, status not + // successfully updated chunkserver-7 update failed, status not + // successfully updated chunkserver-8, pendding && online, updated to + // onLine chunkserver-9, pendding && unstable, updated to retired + // chunkserver-10, pendding && offline, updated to retired EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); - ChunkServer cs2(2, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs3(3, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs5(5, "", "", 1, "", 0, "", - ChunkServerStatus::RETIRED, OnlineState::UNSTABLE); - ChunkServer cs7(7, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs9(9, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); - ChunkServer cs10(10, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); + .Times(7) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); + ChunkServer cs2(2, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs3(3, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs5(5, "", "", 1, "", 0, "", ChunkServerStatus::RETIRED, + OnlineState::UNSTABLE); + ChunkServer cs7(7, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs9(9, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); + ChunkServer cs10(10, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); EXPECT_CALL(*topology, GetChunkServer(2, _)) .WillOnce(DoAll(SetArgPointee<1>(cs2), Return(true))); EXPECT_CALL(*topology, GetChunkServer(3, _)) @@ -128,8 +130,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { .WillOnce(Return(std::vector{})); EXPECT_CALL(*topology, GetChunkServer(5, _)) .WillOnce(DoAll(SetArgPointee<1>(cs5), Return(true))); - EXPECT_CALL(*topology, GetChunkServer(6, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology, GetChunkServer(6, _)).WillOnce(Return(false)); EXPECT_CALL(*topology, GetChunkServer(7, _)) .WillOnce(DoAll(SetArgPointee<1>(cs7), Return(true))); EXPECT_CALL(*topology, GetChunkServer(9, _)) @@ -164,15 +165,13 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 - checker->UpdateLastReceivedHeartbeatTime( - 2, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 6, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 7, steady_clock::now()); + // chunkserver 2, 6, 7 Heartbeat received + checker->UpdateLastReceivedHeartbeatTime(2, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(6, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(7, steady_clock::now()); EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(3).WillRepeatedly(Return(kTopoErrCodeSuccess)); + .Times(3) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); checker->CheckHeartBeatInterval(); ASSERT_TRUE(checker->GetHeartBeatInfo(2, &info)); ASSERT_EQ(OnlineState::ONLINE, info.state); diff --git a/test/mds/heartbeat/heartbeat_manager_test.cpp b/test/mds/heartbeat/heartbeat_manager_test.cpp index 54c4397287..6f1b539405 100644 --- a/test/mds/heartbeat/heartbeat_manager_test.cpp +++ b/test/mds/heartbeat/heartbeat_manager_test.cpp @@ -20,52 +20,54 @@ * Author: lixiaocui */ -#include +#include "src/mds/heartbeat/heartbeat_manager.h" + #include +#include #include -#include "src/mds/heartbeat/heartbeat_manager.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" + #include "src/common/timeutility.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" +#include "test/mds/heartbeat/common.h" #include "test/mds/mock/mock_coordinator.h" -#include "test/mds/mock/mock_topology.h" #include "test/mds/mock/mock_topoAdapter.h" -#include "test/mds/heartbeat/common.h" +#include "test/mds/mock/mock_topology.h" -using ::testing::Return; -using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::MockTopologyStat; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { namespace heartbeat { class TestHeartbeatManager : public ::testing::Test { protected: - TestHeartbeatManager() {} - ~TestHeartbeatManager() {} - - void SetUp() override { - HeartbeatOption option; - option.cleanFollowerAfterMs = 0; - option.heartbeatMissTimeOutMs = 10000; - option.offLineTimeOutMs = 30000; - option.mdsStartTime = steady_clock::now(); - topology_ = std::make_shared(); - coordinator_ = std::make_shared(); - topologyStat_ = std::make_shared(); - heartbeatManager_ = std::make_shared( - option, topology_, topologyStat_, coordinator_); - } - - void TearDown() override {} + TestHeartbeatManager() {} + ~TestHeartbeatManager() {} + + void SetUp() override { + HeartbeatOption option; + option.cleanFollowerAfterMs = 0; + option.heartbeatMissTimeOutMs = 10000; + option.offLineTimeOutMs = 30000; + option.mdsStartTime = steady_clock::now(); + topology_ = std::make_shared(); + coordinator_ = std::make_shared(); + topologyStat_ = std::make_shared(); + heartbeatManager_ = std::make_shared( + option, topology_, topologyStat_, coordinator_); + } + + void TearDown() override {} protected: - std::shared_ptr topology_; - std::shared_ptr topologyStat_; - std::shared_ptr coordinator_; - std::shared_ptr heartbeatManager_; + std::shared_ptr topology_; + std::shared_ptr topologyStat_; + std::shared_ptr coordinator_; + std::shared_ptr heartbeatManager_; }; TEST_F(TestHeartbeatManager, test_stop_and_run) { @@ -124,9 +126,10 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { ASSERT_EQ(0, response.needupdatecopysets_size()); // 7. startTime not initialized - // TODO(lixiaocui): 后续考虑心跳加上错误码 - ::curve::mds::topology::ChunkServer normalCs( - 1, "hello", "", 1, "192.168.10.1", 9000, ""); + // TODO(lixiaocui): Consider adding an error code to the heartbeat in the + // future + ::curve::mds::topology::ChunkServer normalCs(1, "hello", "", 1, + "192.168.10.1", 9000, ""); EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); heartbeatManager_->ChunkServerHeartbeat(req, &response); @@ -138,7 +141,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(t, req.chunkserverid())) + UpdateChunkServerStartUpTime(t, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -148,7 +151,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(0, req.chunkserverid())) + UpdateChunkServerStartUpTime(0, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -394,8 +397,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -450,8 +452,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -509,8 +510,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -626,7 +626,8 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { .WillOnce(DoAll(SetArgPointee<2>(chunkServer2), Return(true))) .WillOnce(DoAll(SetArgPointee<2>(chunkServer3), Return(true))); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .Times(2).WillRepeatedly(Return(false)); + .Times(2) + .WillRepeatedly(Return(false)); heartbeatManager_->ChunkServerHeartbeat(request, &response); ASSERT_EQ(1, response.needupdatecopysets_size()); ASSERT_EQ(1, response.needupdatecopysets(0).logicalpoolid()); @@ -634,8 +635,7 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { ASSERT_EQ(0, response.needupdatecopysets(0).epoch()); } -TEST_F(TestHeartbeatManager, - test_handle_copySetInfo_stale_epoch_update_err) { +TEST_F(TestHeartbeatManager, test_handle_copySetInfo_stale_epoch_update_err) { auto request = GetChunkServerHeartbeatRequestForTest(); ChunkServerHeartbeatResponse response; ::curve::mds::topology::ChunkServer chunkServer1( @@ -937,5 +937,3 @@ TEST_F(TestHeartbeatManager, test_patrol_copySetInfo_return_order) { } // namespace heartbeat } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..2a388c8944 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include "src/common/namespace_define.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; namespace curve { namespace mds { @@ -44,18 +47,18 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { auto mockEtcdClient = std::make_shared(); { - // 1. list失败 + // 1. list failed EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) .WillOnce(Return(EtcdErrCode::EtcdCanceled)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 2. list成功,解析失败 + // 2. list successful, parsing failed std::vector values{"hello"}; EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, @@ -64,10 +67,10 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 3. 获取已有的segment alloc value成功 + // 3. Successfully obtained the existing segment alloc value std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient, @@ -77,7 +80,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1024, out[1]); } @@ -89,32 +92,35 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { // 1. CalculateSegmentAlloc ok LOG(INFO) << "start test1......"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(Return(EtcdErrCode::EtcdUnknown)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 2. ListWithLimitAndRevision成功,但是解析失败 + // 2. ListWithLimitAndRevision succeeded, but parsing failed LOG(INFO) << "start test2......"; std::vector values{"hello"}; std::string lastKey = "021"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce( DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 LOG(INFO) << "start test3......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; ASSERT_TRUE( @@ -123,23 +129,24 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); + Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1 << 30, out[1]); } { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 LOG(INFO) << "start test4......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; std::vector values; @@ -160,20 +167,22 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(2, out.size()); ASSERT_EQ(500L * (1 << 30), out[1]); ASSERT_EQ(501L * (1 << 30), out[2]); @@ -181,5 +190,3 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { } } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..f250e7e401 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -60,14 +60,14 @@ class AllocStatisticTest : public ::testing::Test { TEST_F(AllocStatisticTest, test_Init) { { - // 1. 从etcd中获取当前revision失败 + // 1. Failed to obtain the current revision from ETCD LOG(INFO) << "test1......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdCanceled)); ASSERT_EQ(-1, allocStatistic_->Init()); } { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 + // 2. Failed to obtain the alloc size corresponding to the existing logicalPool LOG(INFO) << "test2......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdOK)); @@ -80,7 +80,7 @@ TEST_F(AllocStatisticTest, test_Init) { ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); } { - // 3. init成功 + // 3. init successful LOG(INFO) << "test3......"; std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; @@ -99,8 +99,8 @@ TEST_F(AllocStatisticTest, test_Init) { } TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 + // Initialize allocStatistics + // Old value: logicalPooId(1):1024 std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) @@ -124,19 +124,19 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { values.emplace_back(encodeSegment); } - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 + // 1 Only old values can be obtained before regular persistent threads and statistical threads are started int64_t alloc; ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024, alloc); ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - // 2. 更新segment的值 + // 2 Update the value of segment allocStatistic_->DeAllocSpace(1, 64, 1); allocStatistic_->AllocSpace(1, 32, 1); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024 - 32, alloc); - // 设置mock的etcd中segment的值 + // Set the value of segment in the ETCD of the mock // logicalPoolId(1):500 * (1<<30) // logicalPoolId(2):501 * (1<<30) segment.set_logicalpoolid(2); @@ -167,7 +167,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { .WillOnce(Return(EtcdErrCode::EtcdCanceled)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - // 设置mock的Put结果 + // Set the Put result of the mock EXPECT_CALL(*mockEtcdClient_, Put( NameSpaceStorageCodec::EncodeSegmentAllocKey(1), NameSpaceStorageCodec::EncodeSegmentAllocValue( @@ -198,7 +198,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - // 2. 启动定期持久化线程和统计线程 + // 2 Start regular persistence and statistics threads for (int i = 1; i <= 2; i++) { allocStatistic_->AllocSpace(i, 1L << 30, i + 3); } @@ -211,7 +211,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { ASSERT_EQ(502L *(1 << 30), alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); - // 再通过alloc进行更新 + // Update through alloc again for (int i = 1; i <= 2; i++) { allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); } diff --git a/test/mds/nameserver2/clean_core_test.cpp b/test/mds/nameserver2/clean_core_test.cpp index 5288fd83d6..ca568b7209 100644 --- a/test/mds/nameserver2/clean_core_test.cpp +++ b/test/mds/nameserver2/clean_core_test.cpp @@ -20,23 +20,25 @@ * Author: hzsunjianliang */ -#include -#include -#include #include "src/mds/nameserver2/clean_core.h" -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/mock/mock_topology.h" + +#include +#include +#include + #include "src/mds/chunkserverclient/copyset_client.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_chunkserverclient.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using ::curve::mds::chunkserverclient::MockChunkServerClient; +using curve::mds::topology::MockTopology; using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using curve::mds::topology::MockTopology; -using ::curve::mds::chunkserverclient::ChunkServerClientOption; -using ::curve::mds::chunkserverclient::MockChunkServerClient; namespace curve { namespace mds { @@ -56,8 +58,8 @@ class CleanCoreTest : public testing::Test { cleanCore_ = std::make_shared(storage_, client_, allocStatistic_); - csClient_ = std::make_shared( - topology_, option_, channelPool_); + csClient_ = std::make_shared(topology_, option_, + channelPool_); } void TearDown() override {} @@ -81,7 +83,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -89,19 +91,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -111,47 +113,48 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { - // 联调Bug修复:快照文件共享源文件的segment,所以在查询segment的时候需要使用 - // ParentID 进行查找 + // Joint debugging bug fix: The snapshot file shares a segment of the + // source file, so it needs to be used when querying segments ParentID + // for lookup uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; uint64_t expectParentID = 101; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(expectParentID, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); @@ -159,7 +162,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_parentid(expectParentID); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -173,19 +176,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -200,7 +203,7 @@ TEST_F(CleanCoreTest, testcleanfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -208,19 +211,18 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; - ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kOK); + ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -231,52 +233,51 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment ok, DeleteSnapShotChunk Error - } - { + } { // get segment ok, DeleteSnapShotChunk ok, DeleteSegment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*storage_, DeleteSegment(_, _, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } } @@ -310,12 +311,9 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { // CopysetClient DeleteChunk failed { - EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillOnce(Return(false)); - EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) - .Times(0); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*topology_, GetCopySet(_, _)).WillOnce(Return(false)); + EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)).Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, cleanCore_->CleanDiscardSegment(fakeKey, discardSegmentInfo, @@ -333,16 +331,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::InternalError)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, @@ -361,16 +357,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(1); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(1); TaskProgress progress; ASSERT_EQ(StatusCode::kOK, cleanCore_->CleanDiscardSegment( diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index 899b942ee8..7ce79cb724 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -19,46 +19,47 @@ * Created Date: Wednesday September 12th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" -#include "src/mds/nameserver2/namespace_storage.h" + +#include +#include + +#include "src/common/namespace_define.h" #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/topology/topology_item.h" -#include "src/common/namespace_define.h" - -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" #include "test/mds/nameserver2/mock/mock_chunk_allocate.h" #include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" #include "test/mds/nameserver2/mock/mock_file_record_manager.h" -#include "test/mds/mock/mock_alloc_statistic.h" -#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" -using ::testing::AtLeast; -using ::testing::StrEq; +using curve::common::Authenticator; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; -using ::testing::SetArgPointee; using ::testing::SaveArg; -using ::testing::Invoke; -using curve::common::Authenticator; +using ::testing::SetArgPointee; +using ::testing::StrEq; +using curve::common::kDefaultPoolsetName; using curve::common::TimeUtility; -using curve::mds::topology::MockTopology; -using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; using curve::mds::snapshotcloneclient::DestFileInfo; -using curve::common::kDefaultPoolsetName; +using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; +using curve::mds::topology::MockTopology; namespace curve { namespace mds { -class CurveFSTest: public ::testing::Test { +class CurveFSTest : public ::testing::Test { protected: void SetUp() override { storage_ = std::make_shared(); @@ -68,7 +69,8 @@ class CurveFSTest: public ::testing::Test { mockcleanManager_ = std::make_shared(); topology_ = std::make_shared(); snapshotClient_ = std::make_shared(); - // session repo已经mock,数据库相关参数不需要 + // The session repo has been mocked, and database related parameters are + // not required fileRecordManager_ = std::make_shared(); fileRecordOptions_.fileRecordExpiredTimeUs = 5 * 1000; fileRecordOptions_.scanIntervalTimeUs = 1 * 1000; @@ -83,7 +85,7 @@ class CurveFSTest: public ::testing::Test { curveFSOptions_.authOptions = authOptions_; curveFSOptions_.fileRecordOptions = fileRecordOptions_; - curvefs_ = &kCurveFS; + curvefs_ = &kCurveFS; allocStatistic_ = std::make_shared(); FileInfo fileInfo; @@ -95,16 +97,12 @@ class CurveFSTest: public ::testing::Test { fileInfo.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); curvefs_->Init(storage_, inodeIdGenerator_, mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - snapshotClient_); + mockcleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, snapshotClient_); DefaultSegmentSize = curvefs_->GetDefaultSegmentSize(); kMiniFileLength = curvefs_->GetMinFileLength(); kMaxFileLength = curvefs_->GetMaxFileLength(); @@ -115,11 +113,9 @@ class CurveFSTest: public ::testing::Test { Return(std::vector{kDefaultPoolsetName})); } - void TearDown() override { - curvefs_->Uninit(); - } + void TearDown() override { curvefs_->Uninit(); } - CurveFS *curvefs_; + CurveFS* curvefs_; std::shared_ptr storage_; std::shared_ptr inodeIdGenerator_; std::shared_ptr mockChunkAllocator_; @@ -140,108 +136,112 @@ class CurveFSTest: public ::testing::Test { TEST_F(CurveFSTest, testCreateFile1) { // test parm error std::map spacePools; - spacePools.insert(std::pair(1, - kMaxFileLength - kMiniFileLength)); - EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) + spacePools.insert( + std::pair(1, kMaxFileLength - kMiniFileLength)); + EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength - 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMaxFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", "owner1", - FileType::INODE_PAGEFILE, - kMaxFileLength - kMiniFileLength + DefaultSegmentSize, - 0, 0), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->CreateFile( + "/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength - kMiniFileLength + DefaultSegmentSize, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, - 0, 0, 0), StatusCode::kFileExists); + ASSERT_EQ( + curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, 0, 0, 0), + StatusCode::kFileExists); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); - + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -253,17 +253,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateFile( - "/dir1", kDefaultPoolsetName, "owner1", - FileType::INODE_DIRECTORY, 0, 0, 0); + auto statusCode = + curvefs_->CreateFile("/dir1", kDefaultPoolsetName, "owner1", + FileType::INODE_DIRECTORY, 0, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_FALSE(fileInfo.has_throttleparams()); } @@ -276,18 +274,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = - curvefs_->CreateFile("/file1", kDefaultPoolsetName, - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength, 0, 0); + auto statusCode = curvefs_->CreateFile( + "/file1", kDefaultPoolsetName, "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_TRUE(fileInfo.has_throttleparams()); } @@ -300,71 +295,76 @@ TEST_F(CurveFSTest, testCreateStripeFile) { spacePools.insert(std::pair(1, kMaxFileLength)); spacePools.insert(std::pair(2, kMaxFileLength)); EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + GetRemainingSpaceInLogicalPool(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 1 * 1024 * 1024, 4), StatusCode::kOK); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1 * 1024 * 1024, 4), + StatusCode::kOK); } { // test stripeStripe and stripeCount is not all zero ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 1), - StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 1), + StatusCode::kParaError); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 1024*1024ul, - 0), StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1024 * 1024ul, 0), + StatusCode::kParaError); } { // test stripeUnit more then chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 16*1024*1024ul + 1, - 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 16 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } { // test stripeUnit is not divisible by chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 4*1024*1024ul + 1, 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 4 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } } TEST_F(CurveFSTest, testCreateFileWithPoolset) { const std::map spacePools{ - {1, kMaxFileLength}, - {2, kMaxFileLength}, + {1, kMaxFileLength}, + {2, kMaxFileLength}, }; EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillRepeatedly(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .WillRepeatedly(Invoke([](uint64_t* id) { - static std::atomic counter{0}; - *id = counter++; - return true; - })); + .WillRepeatedly(Invoke([](uint64_t* id) { + static std::atomic counter{0}; + *id = counter++; + return true; + })); // create file without poolset, assign to default poolset { @@ -382,8 +382,8 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset but not same with anyone { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{kDefaultPoolsetName, "SSD"})); + .WillOnce( + Return(std::vector{kDefaultPoolsetName, "SSD"})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "HDD", "owner", @@ -393,8 +393,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset and poolset exists { - EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(Return(StoreStatus::OK)); + EXPECT_CALL(*storage_, PutFile(_)).WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)); ASSERT_EQ(StatusCode::kOK, @@ -406,8 +405,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // cluster doesn't have poolset { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{})); + .WillOnce(Return(std::vector{})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "SSD", "owner", FileType::INODE_PAGEFILE, @@ -419,23 +417,19 @@ TEST(TestSelectPoolsetByRules, Test) { ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/filename", {})); { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ("system", SelectPoolsetByRules("/system/file", rules)); } { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/systems", rules)); } { std::map rules{ - {"/system/", "system"}, - {"/systems/", "system1"}, + {"/system/", "system"}, + {"/systems/", "system1"}, }; ASSERT_EQ("system1", SelectPoolsetByRules("/systems/file", rules)); } @@ -443,9 +437,7 @@ TEST(TestSelectPoolsetByRules, Test) { // subdir rules { std::map rules{ - {"/system/", "system"}, - {"/system/sub/", "system-sub"} - }; + {"/system/", "system"}, {"/system/sub/", "system-sub"}}; ASSERT_EQ("system-sub", SelectPoolsetByRules("/system/sub/file", rules)); @@ -462,15 +454,15 @@ TEST_F(CurveFSTest, testGetFileInfo) { FileInfo rootFileInfo = curvefs_->GetRootFileInfo(); ASSERT_EQ(fileInfo.id(), rootFileInfo.id()); - ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); + ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); ASSERT_EQ(fileInfo.filetype(), rootFileInfo.filetype()); { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kFileNotExists); } @@ -478,8 +470,8 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kStorageError); } @@ -487,134 +479,134 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test ok FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(Return(StoreStatus::OK)); + .Times(2) + .WillRepeatedly(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kOK); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); FileInfo retFileInfo; std::string lastEntry; ASSERT_EQ(curvefs_->GetFileInfo("/testdir/file1", &retFileInfo), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo fileInfo1; ASSERT_EQ(curvefs_->GetFileInfo("testdir/file1", &fileInfo1), - StatusCode::kStorageError); + StatusCode::kStorageError); } } TEST_F(CurveFSTest, testDeleteFile) { // test remove root ASSERT_EQ(curvefs_->DeleteFile("/", kUnitializedFileID, false), - StatusCode::kParaError); + StatusCode::kParaError); // test delete directory ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete directory, directory is not empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kDirNotEmpty); + StatusCode::kDirNotEmpty); } // test delete directory, delete file fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete recyclebin pagefile,cleanManager fail @@ -623,44 +615,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(false)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::KInternalError); + kUnitializedFileID, true), + StatusCode::KInternalError); } // test force delete recyclebin file ok @@ -669,44 +659,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } // test force delete already deleting @@ -715,250 +703,245 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); // mockcleanManager_ = std::make_shared(); auto notNullTask = std::make_shared(1, nullptr, fileInfo); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(notNullTask)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } - // test force delete file not in recyclebin + // test force delete file not in recyclebin { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(USERSTARTINODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, true), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under snapshot { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileUnderSnapShot); + StatusCode::kFileUnderSnapShot); } // test delete pagefile, storage error { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } // delete not support file type { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under clone { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kHasRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete pagefile, file under clone but has no ref but delete fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile, file under clone but has no ref success { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check list empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, file has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::KeyNotExist))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::KeyNotExist))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -967,37 +950,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, inode mismatch { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(10); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1006,37 +989,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1045,17 +1028,17 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); // EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) // .Times(1) // .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete failed when mds didn't start for enough time @@ -1113,7 +1096,7 @@ TEST_F(CurveFSTest, testDeleteFile) { TEST_F(CurveFSTest, testGetAllocatedSize) { AllocatedSize allocSize; - FileInfo fileInfo; + FileInfo fileInfo; uint64_t segmentSize = 1 * 1024 * 1024 * 1024ul; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); @@ -1130,22 +1113,21 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { segment.set_logicalpoolid(2); segments.emplace_back(segment); - // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(3 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 2 * segmentSize}, {2, segmentSize}}; + std::unordered_map expected = { + {1, 2 * segmentSize}, {2, segmentSize}}; ASSERT_EQ(expected, allocSize.allocSizeMap); } // test directory normal @@ -1157,73 +1139,72 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(9 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 6 * segmentSize}, {2, 3 * segmentSize}}; + std::unordered_map expected = { + {1, 6 * segmentSize}, {2, 3 * segmentSize}}; } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list segment fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } } TEST_F(CurveFSTest, testGetFileSize) { uint64_t fileSize; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_length(10 * kGB); @@ -1231,11 +1212,10 @@ TEST_F(CurveFSTest, testGetFileSize) { // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(10 * kGB, fileSize); } // test directory normal @@ -1247,49 +1227,47 @@ TEST_F(CurveFSTest, testGetFileSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(30 * kGB, fileSize); } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } } @@ -1301,9 +1279,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kNotDirectory); @@ -1313,8 +1291,8 @@ TEST_F(CurveFSTest, testReadDir) { // test getFile Not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kDirNotExist); @@ -1324,9 +1302,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector sideEffectArgs; sideEffectArgs.clear(); @@ -1335,9 +1313,9 @@ TEST_F(CurveFSTest, testReadDir) { sideEffectArgs.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), + Return(StoreStatus::OK))); auto ret = curvefs_->ReadDir("/file1", &items); ASSERT_EQ(ret, StatusCode::kOK); @@ -1355,16 +1333,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kOK); @@ -1373,19 +1351,19 @@ TEST_F(CurveFSTest, testRecoverFile) { // the upper dir not exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->RecoverFile("/k8s/file1", - "/RecycleBin/k8s/file1-10", 2), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->RecoverFile("/k8s/file1", "/RecycleBin/k8s/file1-10", 2), + StatusCode::kFileNotExists); } // the same file exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileExists); @@ -1400,12 +1378,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 2), StatusCode::kFileIdNotMatch); @@ -1420,12 +1398,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileUnderDeleting); @@ -1440,12 +1418,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileCloneMetaInstalled); @@ -1460,12 +1438,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileError); @@ -1478,16 +1456,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kStorageError); @@ -1502,22 +1480,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1528,18 +1506,18 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo1; fileInfo1.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1548,8 +1526,8 @@ TEST_F(CurveFSTest, testRenameFile) { // old file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1560,16 +1538,16 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1582,22 +1560,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1625,9 +1603,9 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(AtLeast(1)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kNotSupported); @@ -1644,33 +1622,33 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_id(10); fileInfo3.set_id(11); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 10, 11), StatusCode::kOK); @@ -1683,18 +1661,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(4) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileExists); @@ -1707,31 +1685,31 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1744,32 +1722,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1782,32 +1760,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1820,32 +1798,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1859,18 +1837,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( @@ -1908,26 +1886,25 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); - EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 0), StatusCode::kShrinkBiggerFile); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", kMiniFileLength), + StatusCode::kOK); } // test enlarge size unit is not segment @@ -1941,14 +1918,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 1 + kMiniFileLength), StatusCode::kExtentUnitError); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 1 + kMiniFileLength), + StatusCode::kExtentUnitError); } // test enlarge size ok @@ -1962,11 +1939,11 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -1974,8 +1951,8 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo doesn't has throttle params ASSERT_FALSE(modifiedInfo.has_throttleparams()); @@ -1999,11 +1976,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(1); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2011,16 +1988,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test enlarge size ok, and update throttle params @@ -2041,11 +2016,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(120 * kMB); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2053,16 +2028,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test size over maxsize @@ -2076,14 +2049,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMaxFileLength), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMaxFileLength), + StatusCode::kFileLengthNotSupported); } // file not exist @@ -2097,14 +2070,13 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kFileNotExists); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kFileNotExists); } // extend directory @@ -2116,15 +2088,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kNotSupported); } } @@ -2135,20 +2106,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // file owner same with newowner @@ -2157,12 +2127,11 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), StatusCode::kOK); } // file is under snapshot, can not changeOwner @@ -2171,16 +2140,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileUnderSnapShot); @@ -2189,8 +2158,8 @@ TEST_F(CurveFSTest, testChangeOwner) { // file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileNotExists); @@ -2202,17 +2171,17 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kStorageError); @@ -2224,20 +2193,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // changeOwner dir not empty @@ -2246,16 +2214,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo1); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kDirNotEmpty); @@ -2267,9 +2235,9 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_APPENDECFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kNotSupported); @@ -2315,18 +2283,19 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kOK); } // test normal get & allocate not exist segment @@ -2343,29 +2312,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kOK); } // file is a directory @@ -2379,14 +2347,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kParaError); } // segment offset not align file segment size @@ -2403,14 +2372,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 1, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 1, false, &segment), + StatusCode::kParaError); } // file length < segment offset + segmentsize @@ -2427,14 +2397,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - kMiniFileLength, false, &segment), StatusCode::kParaError); + ASSERT_EQ(curvefs_->GetOrAllocateSegment( + "/user1/file2", kMiniFileLength, false, &segment), + StatusCode::kParaError); } // alloc chunk segment fail @@ -2451,24 +2422,24 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(false)); + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(false)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kSegmentAllocateError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kSegmentAllocateError); } // put segment fail @@ -2485,29 +2456,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kStorageError); } } @@ -2732,8 +2702,8 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { std::string fileName = "/snapshotFile1WithInvalidClientVersion"; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2753,10 +2723,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2769,8 +2739,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test client version empty invalid @@ -2780,10 +2751,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2791,13 +2762,14 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo info; ASSERT_EQ(StatusCode::kOK, - curvefs_->RefreshSession( - fileName, "", 0 , "", "127.0.0.1", 1234, "", &info)); + curvefs_->RefreshSession(fileName, "", 0, "", "127.0.0.1", + 1234, "", &info)); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test under snapshot @@ -2806,9 +2778,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2819,24 +2791,22 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { snapShotFiles.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/snapshotFile1", - &snapShotFileInfoRet), StatusCode::kFileUnderSnapShot); + &snapShotFileInfoRet), + StatusCode::kFileUnderSnapShot); } { // test File is not PageFile - } - { + } { // test storage ListFile error - } - { + } { // test GenId error - } - { + } { // test create snapshot ok FileInfo originalFile; originalFile.set_id(1); @@ -2845,25 +2815,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2871,15 +2840,16 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; - ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile", - &snapShotFileInfoRet), StatusCode::kOK); + ASSERT_EQ( + curvefs_->CreateSnapShotFile("/originalFile", &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test create snapshot ok @@ -2890,25 +2860,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2917,14 +2886,15 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile2", - &snapShotFileInfoRet), StatusCode::kOK); + &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test storage snapshotFile Error @@ -2934,22 +2904,21 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { TEST_F(CurveFSTest, testListSnapShotFile) { { // workPath error - } - { + } { // dir not support std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("/", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // lookupFile error std::vector snapFileInfos; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ListSnapShotFile("/originalFile", &snapFileInfos), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // check type not support @@ -2960,13 +2929,13 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // ListFile error @@ -2977,17 +2946,17 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kStorageError); + StatusCode::kStorageError); } { // ListFile ok @@ -2998,37 +2967,36 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; - FileInfo snapShotFile; + FileInfo snapShotFile; snapShotFile.set_parentid(1); snapFileInfos.push_back(snapShotFile); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), + Return(StoreStatus::OK))); std::vector snapFileInfosRet; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfosRet), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(snapFileInfosRet.size(), 1); ASSERT_EQ(snapFileInfosRet[0].SerializeAsString(), - snapShotFile.SerializeAsString()); - } + snapShotFile.SerializeAsString()); + } } - TEST_F(CurveFSTest, testGetSnapShotFileInfo) { { // ListSnapShotFile error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/", 1, &snapshotFileInfo), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // snapfile not exist(not under snapshot) @@ -3039,19 +3007,20 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // under snapshot, butsnapfile not exist @@ -3062,22 +3031,23 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(2); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // test ok @@ -3088,24 +3058,25 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(1); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kOK); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kOK); ASSERT_EQ(snapshotFileInfo.SerializeAsString(), - snapInfo.SerializeAsString()); + snapInfo.SerializeAsString()); } } @@ -3114,7 +3085,7 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { // GetSnapShotFileInfo error PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // offset not align @@ -3125,9 +3096,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3135,13 +3106,14 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_segmentsize(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 1, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 1, &segment), + StatusCode::kParaError); } { // storage->GetSegment return error @@ -3154,11 +3126,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3167,17 +3139,18 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kSegmentNotAllocated); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kSegmentNotAllocated); } { // ok @@ -3190,12 +3163,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); - + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3204,9 +3176,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment expectSegment; expectSegment.set_logicalpoolid(1); @@ -3214,20 +3186,21 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { expectSegment.set_chunksize(curvefs_->GetDefaultChunkSize()); expectSegment.set_startoffset(0); - PageFileChunkInfo *chunkInfo = expectSegment.add_chunks(); + PageFileChunkInfo* chunkInfo = expectSegment.add_chunks(); chunkInfo->set_chunkid(1); chunkInfo->set_copysetid(1); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expectSegment), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(expectSegment), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kOK); ASSERT_EQ(expectSegment.SerializeAsString(), - segment.SerializeAsString()); + segment.SerializeAsString()); } } @@ -3236,7 +3209,7 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { // GetSnapShotFileInfo error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->DeleteFileSnapShotFile("/", 1, nullptr), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // under deleteing @@ -3247,9 +3220,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3258,12 +3231,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kSnapshotDeleting); + StatusCode::kSnapshotDeleting); } { // delete snapshot file filetype error (internal case) @@ -3274,9 +3247,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3286,12 +3259,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete storage error @@ -3302,9 +3275,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3315,16 +3288,16 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete snapshot ok @@ -3335,9 +3308,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3348,21 +3321,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kOK); + StatusCode::kOK); } { // message the snapshot delete manager error, return error @@ -3373,9 +3345,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3386,21 +3358,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(false)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } } @@ -3409,7 +3380,7 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { { PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // snapshot file is not deleting @@ -3421,9 +3392,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3431,14 +3402,15 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileCreated); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileCreated); ASSERT_EQ(progress, 0); } @@ -3452,9 +3424,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3464,21 +3436,21 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { std::vector snapShotFiles2; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), - Return(StoreStatus::OK))); - - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(snapShotFiles), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), + Return(StoreStatus::OK))); + + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileNotExists); ASSERT_EQ(progress, 100); } @@ -3491,9 +3463,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3501,19 +3473,19 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileDeleteError); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileDeleteError); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 0); } @@ -3527,9 +3499,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3537,24 +3509,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::PROGRESSING); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3568,9 +3541,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3578,24 +3551,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::FAILED); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3609,9 +3583,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3619,66 +3593,67 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(100); taskProgress.SetStatus(TaskStatus::SUCCESS); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 100); } } TEST_F(CurveFSTest, testOpenFile) { - // 文件不存在 + // File does not exist { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kFileNotExists); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kFileNotExists); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // open目录 + // Open directory { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kNotSupported); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kNotSupported); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // 执行成功 + // Execution successful { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), @@ -3854,19 +3829,19 @@ TEST_F(CurveFSTest, testOpenFile) { TEST_F(CurveFSTest, testCloseFile) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), StatusCode::kOK); - // 执行成功 + // Execution successful { EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) @@ -3880,39 +3855,41 @@ TEST_F(CurveFSTest, testCloseFile) { TEST_F(CurveFSTest, testRefreshSession) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kOK); - // 文件不存在 + // File does not exist { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RefreshSession("/file1", "sessionidxxxxx", 12345, - "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + "signaturexxxx", "127.0.0.1", 1234, + "", &fileInfo1), StatusCode::kFileNotExists); } - // 执行成功 + // Execution successful { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); uint64_t date = ::curve::common::TimeUtility::GetTimeofDayUs(); ASSERT_EQ(curvefs_->RefreshSession("/file1", protoSession.sessionid(), - date, "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + date, "signaturexxxx", "127.0.0.1", + 1234, "", &fileInfo1), StatusCode::kOK); ASSERT_EQ(1, curvefs_->GetOpenFileNum()); } @@ -3921,39 +3898,41 @@ TEST_F(CurveFSTest, testRefreshSession) { TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配,date超时 + // Root user, signature matching, date timeout { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date), + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); + ASSERT_EQ(curvefs_->CheckDestinationOwner( + filename, authOptions_.rootOwner, sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), - StatusCode::kOwnerAuthFail); + ASSERT_EQ( + curvefs_->CheckDestinationOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), + StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckDestinationOwner( + "/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner(authOptions_.rootOwner); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - "normaluser", "wrongpass", date), + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", "normaluser", + "wrongpass", date), StatusCode::kOwnerAuthFail); } } @@ -3961,16 +3940,16 @@ TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { TEST_F(CurveFSTest, testCheckPathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配, 并检测date过期 + // Root user, signature matching, and detecting date expiration { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckPathOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, @@ -3978,168 +3957,176 @@ TEST_F(CurveFSTest, testCheckPathOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckPathOwner("/file1", authOptions_.rootOwner, - "wrongpass", date), + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功, 并检测date超时 + // Normal user, non root user authentication successful for files in the + // root directory, and detection of date timeout { - ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", - "wrongpass", date), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", date), + StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", - date + 15 * 2000 * 2000), + date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } } TEST_F(CurveFSTest, testCheckFileOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配 + // Root user, signature matching { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", authOptions_.rootOwner, + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功 + // Normal user, non root user authentication succeeded for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser", "", date), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser", "", date), + StatusCode::kOK); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser1", "", date), StatusCode::kOwnerAuthFail); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser1", "", date), + StatusCode::kOwnerAuthFail); } } - TEST_F(CurveFSTest, testCreateCloneFile) { // test parm error - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_DIRECTORY, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_DIRECTORY, + kMiniFileLength, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength - 1, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); FileInfo fileInfo; - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, - "default", &fileInfo); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + &fileInfo); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_EQ(fileInfo.filename(), "file1"); ASSERT_EQ(fileInfo.owner(), "owner1"); @@ -4156,54 +4143,58 @@ TEST_F(CurveFSTest, testCreateCloneFile) { TEST_F(CurveFSTest, testSetCloneFileStatus) { { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test inodeid not match @@ -4211,13 +4202,13 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - 10, FileStatus::kFileCloned), - StatusCode::kFileIdNotMatch); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", 10, FileStatus::kFileCloned), + StatusCode::kFileIdNotMatch); } { // test filestatus not ok @@ -4226,43 +4217,41 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { FileStatus setStatus; StatusCode expectReturn; int putFileTime; - } testCases[] { + } testCases[]{ {FileStatus::kFileCloning, FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloning, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, - FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, - {FileStatus::kFileCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + FileStatus::kFileCloneMetaInstalled, StatusCode::kOK, 1}, + {FileStatus::kFileCloned, FileStatus::kFileCloned, StatusCode::kOK, + 1}, {FileStatus::kFileCreated, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCreated, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloning, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCreated, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileDeleting, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloning, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0} - }; + StatusCode::kCloneStatusNotMatch, 0}}; for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { { @@ -4270,17 +4259,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(testCases[i].originStatus); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(testCases[i].putFileTime)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(testCases[i].putFileTime)) + .WillOnce(Return(StoreStatus::OK)); - - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - kUnitializedFileID, testCases[i].setStatus), + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", kUnitializedFileID, + testCases[i].setStatus), testCases[i].expectReturn); } } @@ -4291,17 +4280,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test put file ok @@ -4309,17 +4298,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kOK); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kOK); } } @@ -4327,10 +4316,10 @@ TEST_F(CurveFSTest, Init) { // test getFile ok { FileInfo fileInfo1, fileInfo2, fileInfo3, fileInfo4, fileInfo5; - fileInfo1.set_parentid(ROOTINODEID+1); + fileInfo1.set_parentid(ROOTINODEID + 1); fileInfo2.set_parentid(ROOTINODEID); - fileInfo2.set_id(RECYCLEBININODEID+1); + fileInfo2.set_id(RECYCLEBININODEID + 1); fileInfo3.set_parentid(ROOTINODEID); fileInfo3.set_id(RECYCLEBININODEID); @@ -4355,30 +4344,23 @@ TEST_F(CurveFSTest, Init) { const struct { FileInfo info; - bool ret; + bool ret; } testCases[] = { - {fileInfo1, false}, - {fileInfo2, false}, - {fileInfo3, false}, - {fileInfo4, false}, - {fileInfo5, true}, + {fileInfo1, false}, {fileInfo2, false}, {fileInfo3, false}, + {fileInfo4, false}, {fileInfo5, true}, }; - for (int i = 0; i < sizeof(testCases)/ sizeof(testCases[0]); i++) { + for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), - Return(StoreStatus::OK))); - - ASSERT_EQ(testCases[i].ret, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), + Return(StoreStatus::OK))); + + ASSERT_EQ(testCases[i].ret, + kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4388,15 +4370,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } // test getfile not exist @@ -4410,15 +4387,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); // putfile ok FileInfo fileInfo5; @@ -4436,15 +4408,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(true, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(true, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4497,11 +4464,11 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { { // normal test EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); PageFileSegment segment2 = segment; PageFileSegment segment3 = segment; auto chunk = segment.add_chunks(); @@ -4515,41 +4482,39 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { std::vector segVec2 = {segment2}; std::vector segVec3 = {segment3}; EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<1>(segVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec3), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(DoAll(SetArgPointee<1>(segVec1), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<1>(segVec2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<1>(segVec3), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); ASSERT_EQ(1, fileNames.size()); ASSERT_EQ("file1", fileNames[0]); } // list file fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } // list segment fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } } @@ -4571,8 +4536,8 @@ TEST_F(CurveFSTest, TestUpdateFileThrottleParams) { FileInfo updatedFileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) .WillOnce( DoAll(SaveArg<0>(&updatedFileInfo), Return(StoreStatus::OK))); @@ -4635,8 +4600,8 @@ TEST(StripeParamTest, Test) { rc = CheckStripeParam(segmentSize, chunkSize, 4096, 128); EXPECT_EQ(StatusCode::kParaError, rc); - rc = CheckStripeParam(segmentSize, chunkSize, 4096, - segmentSize / chunkSize); + rc = + CheckStripeParam(segmentSize, chunkSize, 4096, segmentSize / chunkSize); EXPECT_EQ(StatusCode::kOK, rc); } diff --git a/test/mds/nameserver2/file_lock_test.cpp b/test/mds/nameserver2/file_lock_test.cpp index 25b524d195..6c5f14a943 100644 --- a/test/mds/nameserver2/file_lock_test.cpp +++ b/test/mds/nameserver2/file_lock_test.cpp @@ -19,26 +19,28 @@ * Created Date: 2019-04-03 * Author: hzchenwei7 */ +#include "src/mds/nameserver2/file_lock.h" + #include -#include #include +#include + #include // NOLINT -#include "src/mds/nameserver2/file_lock.h" -using ::testing::AtLeast; -using ::testing::StrEq; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; using ::testing::SetArgPointee; +using ::testing::StrEq; namespace curve { namespace mds { FileLockManager flm(4); -class FileLockManagerTest: public ::testing::Test { +class FileLockManagerTest : public ::testing::Test { public: FileLockManagerTest() {} }; @@ -59,9 +61,7 @@ void ReadLock(const std::string& filePath, bool unlock = false) { } } -void Unlock(const std::string& filePath) { - flm.Unlock(filePath); -} +void Unlock(const std::string& filePath) { flm.Unlock(filePath); } TEST_F(FileLockManagerTest, Basic) { std::string filePath1 = "/home/dir1/file1"; @@ -115,62 +115,46 @@ TEST_F(FileLockManagerTest, UnlockInAnotherThread) { Unlock(filePath); } -class FileReadLockGuardTest: public ::testing::Test { +class FileReadLockGuardTest : public ::testing::Test { public: FileReadLockGuardTest() {} }; TEST_F(FileReadLockGuardTest, LockUnlockTest) { - { - FileReadLockGuard guard(&flm, "/"); - } + { FileReadLockGuard guard(&flm, "/"); } - { - FileReadLockGuard guard(&flm, "/a"); - } + { FileReadLockGuard guard(&flm, "/a"); } - { - FileReadLockGuard guard(&flm, "/a/b"); - } + { FileReadLockGuard guard(&flm, "/a/b"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -class FileWriteLockGuardTest: public ::testing::Test { +class FileWriteLockGuardTest : public ::testing::Test { public: FileWriteLockGuardTest() {} }; TEST_F(FileWriteLockGuardTest, LockUnlockTest) { - { - FileWriteLockGuard guard(&flm, "/"); - } + { FileWriteLockGuard guard(&flm, "/"); } - { - FileWriteLockGuard guard(&flm, "/a"); - } + { FileWriteLockGuard guard(&flm, "/a"); } - { - FileWriteLockGuard guard(&flm, "/a/b"); - } + { FileWriteLockGuard guard(&flm, "/a/b"); } - { - FileWriteLockGuard guard(&flm, "/a", "/a"); - } + { FileWriteLockGuard guard(&flm, "/a", "/a"); } - { - FileWriteLockGuard guard(&flm, "/a", "/b"); - } + { FileWriteLockGuard guard(&flm, "/a", "/b"); } - { - FileWriteLockGuard guard(&flm, "/b", "/a"); - } + { FileWriteLockGuard guard(&flm, "/b", "/a"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -// 以下这种情况,跑测试的时候会出现Segmentation fault,是锁的实现机制的问题 -// 要避免这样使用锁,已在代码里进行规避,以下注释的测试保留,提醒使用者注意 +// In the following scenario, a Segmentation fault may occur when running tests, +// due to issues with the locking mechanism. To avoid using locks in this way, +// precautions have been taken in the code. The commented-out test cases are +// retained to remind users to be cautious. /* TEST_F(FileWriteLockGuardTest, LockUnlockTest1) { { diff --git a/test/mds/nameserver2/file_record_test.cpp b/test/mds/nameserver2/file_record_test.cpp index 37a728b012..3369db4554 100644 --- a/test/mds/nameserver2/file_record_test.cpp +++ b/test/mds/nameserver2/file_record_test.cpp @@ -20,15 +20,16 @@ * Author : wuhanqing */ +#include "src/mds/nameserver2/file_record.h" + #include #include -#include //NOLINT -#include // NOLINT +#include //NOLINT +#include // NOLINT #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/nameserver2/file_record.h" namespace curve { namespace mds { @@ -37,15 +38,15 @@ TEST(FileRecordTest, timeout_test) { butil::EndPoint ep; butil::str2endpoint("127.0.0.1:1111", &ep); - // 设置有效时间为1ms + // Set the effective time to 1ms FileRecord record(1 * 1000, "0.0.6", ep); - // 判断超时 + // Judgment timeout ASSERT_EQ(false, record.IsTimeout()); - // 判断版本号 + // Determine version number ASSERT_EQ("0.0.6", record.GetClientVersion()); - // 睡眠一段时间判断超时是否生效 + // Sleep for a period of time to determine if the timeout is effective std::this_thread::sleep_for(std::chrono::milliseconds(15)); ASSERT_EQ(true, record.IsTimeout()); @@ -89,9 +90,9 @@ TEST(FileRecordManagerTest, normal_test) { kInvalidPort); fileRecordManager.UpdateFileRecord("file4", "0.0.6", "127.0.0.1", 1235); - // 总共记录了4个文件 - // 其中一个port为Invalid - // 其中两个文件打开的client ip port相同 + // A total of 4 files were recorded + // One of the ports is Invalid + // Two of the files have the same client IP port opened ASSERT_EQ(2, fileRecordManager.ListAllClient().size()); // ClientIpPortType clientIpPort; @@ -110,8 +111,7 @@ TEST(FileRecordManagerTest, normal_test) { butil::endpoint2str(clients[0]).c_str()); clients.clear(); - ASSERT_FALSE( - fileRecordManager.FindFileMountPoint("file100", &clients)); + ASSERT_FALSE(fileRecordManager.FindFileMountPoint("file100", &clients)); fileRecordManager.Stop(); } @@ -127,7 +127,7 @@ TEST(FileRecordManagerTest, open_file_num_test) { ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); - // 插入两个记录 + // Insert two records fileRecordManager.UpdateFileRecord("file1", "", "127.0.0.1", 0); fileRecordManager.UpdateFileRecord("file2", "", "127.0.0.1", 0); @@ -138,18 +138,18 @@ TEST(FileRecordManagerTest, open_file_num_test) { } }; - // 只对 file1 定期续约 + // Regular renewal only for file1 std::thread th(task, "file1"); - // sleep 50ms后,file2 会超时 + // After 50ms of sleep, file2 will timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(1, fileRecordManager.GetOpenFileNum()); - // 停止 file1 的定期续约 + // Stop regular renewal of file1 running = false; th.join(); - // sleep 50ms后,file1 也会超时 + // After 50ms of sleep, file1 will also timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); diff --git a/test/mds/nameserver2/namespace_service_test.cpp b/test/mds/nameserver2/namespace_service_test.cpp index c5247030f2..09fff706e2 100644 --- a/test/mds/nameserver2/namespace_service_test.cpp +++ b/test/mds/nameserver2/namespace_service_test.cpp @@ -19,40 +19,42 @@ * Created Date: Wednesday September 26th 2018 * Author: hzsunjianliang */ -#include -#include -#include +#include "src/mds/nameserver2/namespace_service.h" + #include #include -#include "src/mds/nameserver2/namespace_service.h" -#include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/chunk_allocator.h" -#include "src/common/timeutility.h" +#include +#include +#include + +#include "src/common/authenticator.h" #include "src/common/configuration.h" #include "src/common/string_util.h" -#include "test/mds/nameserver2/fakes.h" -#include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" -#include "src/mds/nameserver2/clean_manager.h" +#include "src/common/timeutility.h" +#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/chunk_allocator.h" #include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_manager.h" #include "src/mds/nameserver2/clean_task_manager.h" -#include "src/common/authenticator.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/mock/mock_chunkserver.h" -#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/curvefs.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_chunkserver.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/fakes.h" +#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" +#include "test/mds/nameserver2/mock/mock_clean_manager.h" -using curve::common::TimeUtility; using curve::common::Authenticator; -using curve::mds::topology::MockTopology; +using curve::common::TimeUtility; using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Invoke; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -74,34 +76,33 @@ class NameSpaceServiceTest : public ::testing::Test { protected: void SetUp() override { // init the kcurvefs, use the fake element - storage_ = std::make_shared(); + storage_ = std::make_shared(); inodeGenerator_ = std::make_shared(0); topology_ = std::make_shared(); ChunkServerClientOption option; auto channelPool = std::make_shared(); - auto client = std::make_shared(topology_, - option, channelPool); + auto client = + std::make_shared(topology_, option, channelPool); allocStatistic_ = std::make_shared(); - cleanCore_ = std::make_shared( - storage_, client, allocStatistic_); + cleanCore_ = + std::make_shared(storage_, client, allocStatistic_); // new taskmanger for 2 worker thread, and check thread period 2 second - cleanTaskManager_ = std::make_shared(channelPool, - 2, 2000); + cleanTaskManager_ = + std::make_shared(channelPool, 2, 2000); - cleanManager_ = std::make_shared(cleanCore_, - cleanTaskManager_, storage_); + cleanManager_ = std::make_shared( + cleanCore_, cleanTaskManager_, storage_); ASSERT_EQ(cleanManager_->Start(), true); std::shared_ptr topologyChunkAllocator = - std::make_shared(); + std::make_shared(); std::shared_ptr chunkIdGenerator = - std::make_shared(); - chunkSegmentAllocate_ = - std::make_shared( - topologyChunkAllocator, chunkIdGenerator); + std::make_shared(); + chunkSegmentAllocate_ = std::make_shared( + topologyChunkAllocator, chunkIdGenerator); fileRecordManager_ = std::make_shared(); fileRecordOptions.fileRecordExpiredTimeUs = 5 * 1000; @@ -118,16 +119,13 @@ class NameSpaceServiceTest : public ::testing::Test { curveFSOptions.authOptions = authOptions; kCurveFS.Init(storage_, inodeGenerator_, chunkSegmentAllocate_, - cleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions, topology_, - nullptr); + cleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions, topology_, nullptr); ASSERT_EQ(curveFSOptions.defaultChunkSize, - kCurveFS.GetDefaultChunkSize()); + kCurveFS.GetDefaultChunkSize()); ASSERT_EQ(curveFSOptions.defaultSegmentSize, - kCurveFS.GetDefaultSegmentSize()); + kCurveFS.GetDefaultSegmentSize()); ASSERT_EQ(curveFSOptions.minFileLength, kCurveFS.GetMinFileLength()); ASSERT_EQ(curveFSOptions.maxFileLength, kCurveFS.GetMaxFileLength()); DefaultSegmentSize = kCurveFS.GetDefaultSegmentSize(); @@ -150,7 +148,7 @@ class NameSpaceServiceTest : public ::testing::Test { } } - template + template void SetRequestAuth(T* request, RequestOption option) { uint64_t date = TimeUtility::GetTimeofDayUs(); request->set_date(date); @@ -173,18 +171,16 @@ class NameSpaceServiceTest : public ::testing::Test { uint64_t time; auto n = items.size(); - if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) - || time < dtime || time - dtime > 1) { + if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) || + time < dtime || time - dtime > 1) { LOG(INFO) << "unexpected filename: " << filename - << ", dtime: " << dtime - << ", time in file: " << time; + << ", dtime: " << dtime << ", time in file: " << time; return false; } return true; } - bool DeleteFile(const std::string& filename, - RequestOption option, + bool DeleteFile(const std::string& filename, RequestOption option, DeleteFileResponse* response) { brpc::Controller cntl; DeleteFileRequest request; @@ -201,8 +197,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool GetFileInfo(const std::string& filename, - RequestOption option, + bool GetFileInfo(const std::string& filename, RequestOption option, GetFileInfoResponse* response) { brpc::Controller cntl; GetFileInfoRequest request; @@ -218,8 +213,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool ListDir(const std::string& dirname, - RequestOption option, + bool ListDir(const std::string& dirname, RequestOption option, ListDirResponse* response) { brpc::Controller cntl; ListDirRequest request; @@ -260,8 +254,9 @@ TEST_F(NameSpaceServiceTest, test1) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -273,7 +268,6 @@ TEST_F(NameSpaceServiceTest, test1) { CurveFSService_Stub stub(&channel); - // test CreateFile // create /file1(owner1) , /file2(owner2), /dir/file3(owner3) std::vector logicalPools{1, 2, 3}; @@ -285,7 +279,7 @@ TEST_F(NameSpaceServiceTest, test1) { brpc::Controller cntl; uint64_t fileLength = kMiniFileLength; - // 创建file1,owner1 + // Create file1, owner1 request.set_filename("/file1"); request.set_owner("owner1"); request.set_date(TimeUtility::GetTimeofDayUs()); @@ -347,7 +341,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个不存在的目录下创建文件,会失败 kFileNotExists + // Creating a file in a non-existent directory will fail kFileNotExists cntl.Reset(); request.set_filename("/dir4/file4"); request.set_owner("owner4"); @@ -363,7 +357,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个文件下创建文件,会失败 kNotDirectory + // Creating a file under one file will fail kNotDirectory cntl.Reset(); request.set_filename("/file2/file4"); request.set_owner("owner2"); @@ -379,7 +373,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的文件,会创建失败kFileExists + // If you create an existing file, it will fail to create kFileExists cntl.Reset(); request.set_filename("/file2"); request.set_poolset(""); @@ -396,7 +390,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的目录,会创建失败kFileExists + // If you create an existing directory, it will fail to create kFileExist cntl.Reset(); request.set_filename("/dir"); request.set_owner("owner3"); @@ -412,7 +406,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建其他类型文件,返回kNotSupported + // Create other types of files and return kNotSupported cntl.Reset(); request.set_filename("/file4"); request.set_owner("owner4"); @@ -457,7 +451,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建文件名不规范的文件会失败 + // Creating files with non-standard file names will fail cntl.Reset(); request.set_filename("/file4/"); request.set_owner("owner4"); @@ -515,10 +509,10 @@ TEST_F(NameSpaceServiceTest, test1) { cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename("/"); listRequest.set_owner(authOptions.rootOwner); @@ -527,7 +521,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(listResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(listResponse.fileinfo_size(), 4); - } else { + } else { ASSERT_TRUE(false); } } @@ -559,7 +553,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_EQ(response1.fileinfo().parentid(), 0); ASSERT_EQ(response1.fileinfo().filetype(), INODE_PAGEFILE); ASSERT_EQ(response1.fileinfo().chunksize(), - curveFSOptions.defaultChunkSize); + curveFSOptions.defaultChunkSize); ASSERT_EQ(response1.fileinfo().segmentsize(), DefaultSegmentSize); ASSERT_EQ(response1.fileinfo().length(), fileLength); } else { @@ -567,7 +561,7 @@ TEST_F(NameSpaceServiceTest, test1) { } // test GetOrAllocateSegment - // 为file1分配空间 + // Allocate space for file1 cntl.Reset(); GetOrAllocateSegmentRequest request2; GetOrAllocateSegmentResponse response2; @@ -606,13 +600,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response2.statuscode(), StatusCode::kOK); ASSERT_EQ(response2.pagefilesegment().segmentsize(), - response1.fileinfo().segmentsize()); + response1.fileinfo().segmentsize()); ASSERT_EQ(response2.pagefilesegment().chunksize(), - response1.fileinfo().chunksize()); + response1.fileinfo().chunksize()); ASSERT_EQ(response2.pagefilesegment().startoffset(), request2.offset()); - int chunkNumber = response2.pagefilesegment().segmentsize()/ - response2.pagefilesegment().chunksize(); + int chunkNumber = response2.pagefilesegment().segmentsize() / + response2.pagefilesegment().chunksize(); ASSERT_EQ(response2.pagefilesegment().chunks().size(), chunkNumber); } else { @@ -631,7 +625,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response3.statuscode(), StatusCode::kOK); ASSERT_EQ(response3.pagefilesegment().SerializeAsString(), - response2.pagefilesegment().SerializeAsString()); + response2.pagefilesegment().SerializeAsString()); } else { ASSERT_TRUE(false); } @@ -682,8 +676,8 @@ TEST_F(NameSpaceServiceTest, test1) { // test change owner { - // 当前有文件 /file1(owner1) , /file2(owner2), /dir/file3(owner3) - // changeowner success + // There are currently /file1(owner1) , /file2(owner2), + // /dir/file3(owner3) changeowner success cntl.Reset(); ChangeOwnerRequest request; ChangeOwnerResponse response; @@ -694,10 +688,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -713,10 +707,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -732,10 +726,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner("newowner1"); request.set_signature(sig); request.set_date(date); @@ -766,10 +760,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date + kStaledRequestTimeIntervalUs * 2); @@ -785,10 +779,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -799,15 +793,15 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // changeowner 文件名不规范,失败 + // changeowner file name is not standardized, failed cntl.Reset(); request.set_filename("/file1/"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -820,12 +814,12 @@ TEST_F(NameSpaceServiceTest, test1) { } // test RenameFile - // 重命名到根目录下,非root owner,失败 - // fileinfoid不匹配,失败 - // 重命名成功 /dir/file3 -> /dir/file4 - // 原文件不存在,重命名失败 - // 重命名到根目录下,root owner,成功 /dir/file4 -> /file4 - // 文件名不规范,失败 + // Renaming to root directory, not root owner, failed + // fileinfoid mismatch, failed + // Rename successful /dir/file3 -> /dir/file4 + // The original file does not exist, renaming failed + // Rename to the root directory, root owner, successful /dir/file4 -> + // /file4 File name not standardized, failed cntl.Reset(); RenameFileRequest request4; RenameFileResponse response4; @@ -858,10 +852,10 @@ TEST_F(NameSpaceServiceTest, test1) { std::string oldname = "/dir/file4"; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); request4.set_oldfilename(oldname); request4.set_newfilename("/file4"); @@ -951,8 +945,8 @@ TEST_F(NameSpaceServiceTest, test1) { } // test ExtendFile - // 扩容file2,第一次扩大,成功;第二次缩小,失败 - // 扩容的文件名不符合规范,失败 + // Expanding file2 for the first time, successful; Second reduction, failed + // The expanded file name does not meet the specifications and failed uint64_t newsize = kMiniFileLength * 2; cntl.Reset(); ExtendFileRequest request5; @@ -992,8 +986,9 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // begin session test,开始测试时,有/file1,/file2和/file4 - // OpenFile case1. 文件不存在,返回kFileNotExists + // begin session test, at the beginning of the session test, there are + // /file1,/file2, and/file4 OpenFile case1. File does not exist, returned + // kFileNotExists cntl.Reset(); OpenFileRequest request8; OpenFileResponse response8; @@ -1008,7 +1003,8 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // OpenFile case2. 文件存在,没有open过,返回成功、session、fileInfo + // OpenFile case2. The file exists and has not been opened. Success, + // session, and fileInfo are returned cntl.Reset(); OpenFileRequest request9; OpenFileResponse response9; @@ -1020,7 +1016,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response9.statuscode(), StatusCode::kOK); ASSERT_EQ(response9.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response9.fileinfo().filename(), "file2"); } else { ASSERT_TRUE(false); @@ -1037,13 +1033,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response10.statuscode(), StatusCode::kOK); ASSERT_EQ(response10.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response10.fileinfo().filename(), "file1"); } else { ASSERT_TRUE(false); } - // openFile case3, 文件名不符合规范 + // OpenFile case3, file name does not meet specifications OpenFileRequest request11; OpenFileResponse response11; cntl.Reset(); @@ -1058,7 +1054,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case1. 文件不存在,返回kFileNotExists + // CloseFile case1 File does not exist, returned kFileNotExists cntl.Reset(); CloseFileRequest request12; CloseFileResponse response12; @@ -1074,7 +1070,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case2. 文件存在,session存在,返回成功 + // CloseFile case2 File exists, session exists, success returne CloseFileRequest request13; CloseFileResponse response13; cntl.Reset(); @@ -1092,7 +1088,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case3. 文件名不符合规范 + // CloseFile case3. The file name does not meet the specification cntl.Reset(); request14.set_filename("/file2/"); request14.set_owner("owner2"); @@ -1106,7 +1102,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case1. 文件不存在,返回kFileNotExists + // RefreshSession case1. File does not exist, returned kFileNotExists cntl.Reset(); ReFreshSessionRequest request15; ReFreshSessionResponse response15; @@ -1124,7 +1120,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case2. 文件名不符合规范 + // RefreshSession case2. The file name does not meet the specifications ReFreshSessionRequest request18; ReFreshSessionResponse response18; cntl.Reset(); @@ -1155,8 +1151,9 @@ TEST_F(NameSpaceServiceTest, snapshottests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1168,7 +1165,6 @@ TEST_F(NameSpaceServiceTest, snapshottests) { CurveFSService_Stub stub(&channel); - // test create file std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) @@ -1188,7 +1184,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1204,7 +1200,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1246,7 +1242,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { snapshotRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.CreateSnapShot(&cntl, &snapshotRequest, &snapshotResponses, NULL); if (!cntl.Failed()) { - ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); + ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); } else { ASSERT_TRUE(false); } @@ -1310,11 +1306,11 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), - StatusCode::kSegmentNotAllocated); + StatusCode::kSegmentNotAllocated); } else { ASSERT_TRUE(false); } @@ -1326,8 +1322,8 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), StatusCode::kParaError); } else { @@ -1407,13 +1403,14 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); using ::curve::chunkserver::MockChunkService; - MockChunkService *chunkService = new MockChunkService(); - ASSERT_EQ(server.AddService(chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + MockChunkService* chunkService = new MockChunkService(); + ASSERT_EQ(server.AddService(chunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1426,7 +1423,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { CurveFSService_Stub stub(&channel); - // 先创建文件/file1,目录/dir1,文件/dir1/file2 + // First create file '/file1', directory '/dir1', file '/dir1/file2' std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) .Times(AtLeast(1)) @@ -1444,7 +1441,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1480,7 +1477,8 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FAIL(); } - // 查看文件/file1,目录/dir1,文件/dir1/file2的状态 + // View the status of file '/file1', directory '/dir1', and file + // '/dir1/file2' cntl.Reset(); GetFileInfoRequest request1; GetFileInfoResponse response1; @@ -1489,7 +1487,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1509,7 +1507,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -1539,7 +1537,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 文件/dir1/file2申请segment + // File '/dir1/file2' application segment GetOrAllocateSegmentRequest allocRequest; GetOrAllocateSegmentResponse allocResponse; for (int i = 0; i < 10; i++) { @@ -1551,15 +1549,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } } - // 开始测试删除文件逻辑 - // 1 如果文件有快照,那么删除文件返回kFileUnderSnapShot + // Start testing delete file logic + // 1. If the file has a snapshot, deleting the file returns + // kFileUnderSnapShot cntl.Reset(); CreateSnapShotRequest snapshotRequest; CreateSnapShotResponse snapshotResponses; @@ -1623,7 +1621,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { stub.CheckSnapShotStatus(&cntl, &checkRequest, &checkResponse, NULL); if (!cntl.Failed()) { if (checkResponse.statuscode() == - StatusCode::kSnapshotFileNotExists) { + StatusCode::kSnapshotFileNotExists) { break; } else { ASSERT_EQ(checkResponse.statuscode(), StatusCode::kOK); @@ -1636,10 +1634,10 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { } } ASSERT_LE(attempts, 100) - << "max attempts for check snapshot status exhausted"; - + << "max attempts for check snapshot status exhausted"; - // 2 如果目录下有文件,那么删除目录返回kDirNotEmpty + // 2. If there are files in the directory, deleting the directory returns + // kDirNotEmpty cntl.Reset(); request3.set_filename("/dir1"); request3.set_owner("owner"); @@ -1653,7 +1651,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 3 如果传入的fileid不匹配,删除文件失败 + // 3. If the passed in fileids do not match, deleting the file fails cntl.Reset(); DeleteFileRequest request5; DeleteFileResponse response5; @@ -1670,7 +1668,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 4 删除文件/file1成功,查询文件已经删除 + // 4. Successfully deleted file '/file1', query file has been deleted cntl.Reset(); request3.set_filename("/file1"); request3.set_owner("owner"); @@ -1696,15 +1694,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 查询垃圾箱 + // Query Trash Bin ListDirRequest listRequest; ListDirResponse listResponse; cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -1716,37 +1714,36 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FileInfo file = listResponse.fileinfo(0); ASSERT_TRUE(CheckFilename(file.filename(), dtime)); // file1-1-${dtime} ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); - } else { + } else { ASSERT_TRUE(false); } - // 删除文件/dir1/file2成功,删除目录/dir1成功,查询目录和文件均已经删除 - using ::curve::mds::topology::ChunkServerStatus; - using ::curve::mds::topology::OnlineState; + // Successfully deleted file '/dir1/file2', deleted directory '/dir1', + // queried directory and files have been deleted + using ::curve::chunkserver::CHUNK_OP_STATUS; using ::curve::chunkserver::ChunkRequest; using ::curve::chunkserver::ChunkResponse; - using ::curve::chunkserver::CHUNK_OP_STATUS; + using ::curve::mds::topology::ChunkServerStatus; + using ::curve::mds::topology::OnlineState; CopySetInfo copyset(1, 1); copyset.SetLeader(1); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); ChunkServer chunkserver(1, "", "", 1, "127.0.0.1", listenAddr.port, "", - ChunkServerStatus::READWRITE, OnlineState::ONLINE); + ChunkServerStatus::READWRITE, OnlineState::ONLINE); EXPECT_CALL(*topology_, GetChunkServer(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); ChunkResponse chunkResponse; chunkResponse.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); EXPECT_CALL(*chunkService, DeleteChunk(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(chunkResponse), - Invoke([](RpcController *controller, - const ChunkRequest *chunkRequest, - ChunkResponse *chunkResponse, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(chunkResponse), + Invoke([](RpcController* controller, + const ChunkRequest* chunkRequest, + ChunkResponse* chunkResponse, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); stub_ = std::make_shared(&channel); @@ -1858,8 +1855,9 @@ TEST_F(NameSpaceServiceTest, clonetest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1944,8 +1942,9 @@ TEST_F(NameSpaceServiceTest, listClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1979,8 +1978,9 @@ TEST_F(NameSpaceServiceTest, listAllClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2085,8 +2085,9 @@ TEST_F(NameSpaceServiceTest, ListVolumesOnCopysets) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2116,8 +2117,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2143,7 +2145,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_PAGEFILE); createRequest.set_filelength(fileLength); - stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); } else { @@ -2187,7 +2189,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -2207,7 +2209,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -2249,8 +2251,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } @@ -2278,10 +2279,10 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ListDirRequest listRequest; ListDirResponse listResponse; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2295,7 +2296,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.originalfullpathname(), "/dir1/file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2327,7 +2328,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.filename(), "file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2400,14 +2401,14 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_TRUE(false); } - // 3. check the ctime of recovered file is greater than the other in recyclebin //NOLINT + // 3. check the ctime of recovered file is greater than the other in + // recyclebin //NOLINT FileInfo recycleFile; cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2471,10 +2472,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // 3. check the fileId of recovered file 3 and not recovered is 4 cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2588,8 +2588,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_poolset(kDefaultPoolset); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2632,7 +2632,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileCloneMetaInstalled); + StatusCode::kRecoverFileCloneMetaInstalled); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2649,8 +2649,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_owner("owner"); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2690,8 +2690,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { recoverRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileError); + ASSERT_EQ(recoverRresponse.statuscode(), StatusCode::kRecoverFileError); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2754,9 +2753,9 @@ TEST_F(NameSpaceServiceTest, TestDeAllocateSegment) { // create file and allocate segment { std::vector logicalPools{1, 2, 3}; - EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(logicalPools)); + EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(logicalPools)); CreateFileRequest createRequest; CreateFileResponse createResponse; createRequest.set_filename(filename); diff --git a/test/mds/schedule/coordinator_test.cpp b/test/mds/schedule/coordinator_test.cpp index b18aa07b31..90284dfeff 100644 --- a/test/mds/schedule/coordinator_test.cpp +++ b/test/mds/schedule/coordinator_test.cpp @@ -20,19 +20,21 @@ * Author: lixiaocui */ -#include #include "src/mds/schedule/coordinator.h" + +#include + #include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::schedule::ScheduleOption; +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::UNINTIALIZE_ID; @@ -85,29 +87,31 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { copySetKey.first = 1; copySetKey.second = 1; Operator testOperator(startEpoch, copySetKey, - OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(4)); + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -115,21 +119,22 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -139,21 +144,23 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -167,8 +174,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -179,8 +187,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -189,16 +198,18 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -228,34 +239,36 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { CopySetKey copySetKey; copySetKey.first = 1; copySetKey.second = 1; - Operator testOperator( - startEpoch, copySetKey, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + Operator testOperator(startEpoch, copySetKey, + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(1, 4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); PeerInfo peer1(1, 1, 1, "127.0.0.1", 9001); ChunkServerInfo csInfo1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -265,22 +278,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -290,21 +304,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -318,8 +334,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -330,8 +347,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -340,16 +358,18 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -359,70 +379,68 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { auto coordinator = std::make_shared(topoAdapter); ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; - coordinator->InitScheduler( - scheduleOption, std::make_shared(topo)); + coordinator->InitScheduler(scheduleOption, + std::make_shared(topo)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为chunkserver-1 - Operator testOperator(1, CopySetKey{1, 1}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 1)); + // 2. There is a leader change on the copyset and the target leader is + // chunkserver-1 + Operator testOperator( + 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 3. copyset上有remove peer操作 - Operator testOperator(1, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 3. There is a remove peer operation on the copyset + Operator testOperator( + 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 2})); } { - // 4. copyset上有add peer操作, target不是1 - Operator testOperator(1, CopySetKey{1, 3}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2)); + // 4. There is an add peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 3})); } { - // 5. copyset上有add peer操作, target是1 - Operator testOperator(1, CopySetKey{1, 4}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 5. There is an add peer operation on the copyset, with a target of 1 + Operator testOperator( + 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 4})); } { - // 6. copyset上有change peer操作,target不是1 - Operator testOperator(1, CopySetKey{1, 5}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 2)); + // 6. There is a change peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 5})); } { - // 7. copyset上有change peer操作,target是1 - Operator testOperator(1, CopySetKey{1, 6}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 1)); + // 7. There is a change peer operation on the copyset, with a target of + // 1 + Operator testOperator( + 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 6})); } @@ -479,15 +497,15 @@ TEST(CoordinatorTest, test_RapidLeaderSchedule) { EXPECT_CALL(*topoAdapter, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - coordinator->RapidLeaderSchedule(2)); + coordinator->RapidLeaderSchedule(2)); } TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { /* - 场景: - chunkserver1: offline 有恢复op - chunkserver2: offline 没有恢复op,没有candidate,有其他op - chunkserver3: offline 有candidate + Scenario: + chunkserver1: offline has recovery op + chunkserver2: offline has no recovery op, no candidate, and other ops + chunkserver3: offline has candidate chunkserver4: online chunkserver4: online */ @@ -496,21 +514,18 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { auto topoAdapter = std::make_shared(); auto coordinator = std::make_shared(topoAdapter); - // 获取option + // Get option ScheduleOption scheduleOption = GetScheduleOption(); coordinator->InitScheduler(scheduleOption, metric); - // 构造chunkserver + // Construct chunkserver std::vector chunkserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { PeerInfo peer(i, i % 3 + 1, i, "192.168.0." + std::to_string(i), 9000); - ChunkServerInfo csInfo( - peer, - OnlineState::ONLINE, - DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); if (i <= 3) { csInfo.state = OnlineState::OFFLINE; } @@ -519,28 +534,21 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op - Operator opForCopySet1( - 1, CopySetKey{1, 1}, - OperatorPriority::HighPriority, - steady_clock::now(), - std::make_shared(1, 4)); + // Construct op + Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, + steady_clock::now(), + std::make_shared(1, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet1)); Operator opForCopySet2( - 2, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 4)); + 2, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); - CopySetInfo copyset2( - CopySetKey{1, 2}, 1, 4, - peersFor2, - ConfigChangeInfo{}, - CopysetStatistics{}); + CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}, + CopysetStatistics{}); std::vector peersFor3({peerInfos[2], peerInfos[3], peerInfos[4]}); ConfigChangeInfo configChangeInfoForCS3; @@ -550,13 +558,10 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { configChangeInfoForCS3.set_allocated_peer(replica); configChangeInfoForCS3.set_type(ConfigChangeType::CHANGE_PEER); configChangeInfoForCS3.set_finished(true); - CopySetInfo copyset3( - CopySetKey{1, 3}, 1, 4, - peersFor3, - configChangeInfoForCS3, - CopysetStatistics{}); + CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, + configChangeInfoForCS3, CopysetStatistics{}); - // 1. 查询所有chunkserver + // 1. Query all chunkservers { EXPECT_CALL(*topoAdapter, GetChunkServerInfos()) .WillOnce(Return(chunkserverInfos)); @@ -567,8 +572,8 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{}, &statusMap)); ASSERT_EQ(6, statusMap.size()); ASSERT_TRUE(statusMap[1]); ASSERT_FALSE(statusMap[2]); @@ -578,26 +583,26 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定chunkserver, 但chunkserver不存在 + // 2. Query for specified chunkserver, but chunkserver does not exist { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(7, _)) .WillOnce(Return(false)); std::map statusMap; ASSERT_EQ(kScheduleErrInvalidQueryChunkserverID, - coordinator->QueryChunkServerRecoverStatus( - std::vector{7}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{7}, &statusMap)); } - // 3. 查询指定chunkserver, 不在恢复中 + // 3. Query the specified chunkserver, not in recovery { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(6, _)) - .WillOnce(DoAll(SetArgPointee<1>(chunkserverInfos[5]), - Return(true))); + .WillOnce( + DoAll(SetArgPointee<1>(chunkserverInfos[5]), Return(true))); std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{6}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{6}, &statusMap)); ASSERT_EQ(1, statusMap.size()); ASSERT_FALSE(statusMap[6]); } @@ -606,4 +611,3 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index 3be00637b0..f1705f950a 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -21,20 +21,21 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/common/timeutility.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" -#include "src/common/timeutility.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -58,8 +59,8 @@ class TestLeaderSchedule : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.leaderSchedulerIntervalSec = 1; opt.chunkserverCoolingTimeSec = 0; - leaderScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + leaderScheduler_ = + std::make_shared(opt, topoAdapter_, opController_); } void TearDown() override { @@ -91,15 +92,12 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; - ChunkServerInfo csInfo1( - peer1, offlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, offlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -110,8 +108,8 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -134,15 +132,12 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; CopySetIdType copysetId = 1; @@ -152,8 +147,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySet1.candidatePeerInfo = PeerInfo(1, 1, 1, "192.168.10.1", 9000); std::vector copySetInfos({copySet1}); @@ -165,7 +160,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { .WillRepeatedly(Return(copySetInfos)); leaderScheduler_->Schedule(); - ASSERT_EQ(0, opController_->GetOperators().size());} + ASSERT_EQ(0, opController_->GetOperators().size()); +} TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -174,15 +170,12 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -193,8 +186,8 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -206,7 +199,6 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .WillRepeatedly(Return(false)); - leaderScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } @@ -218,15 +210,12 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo3.startUpTime = 3; std::vector csInfos({csInfo1, csInfo2, csInfo3}); @@ -238,8 +227,8 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -264,25 +253,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -298,11 +281,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -334,14 +317,14 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - res = dynamic_cast(op.step.get()); + res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo6.info.id, res->GetTargetPeer()); } @@ -359,25 +342,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 4, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 4, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -393,11 +370,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -429,7 +406,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); @@ -439,7 +416,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -449,19 +426,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -472,18 +445,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -498,7 +471,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -513,7 +486,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -521,7 +494,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -531,19 +504,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::PENDDING, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::PENDDING, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -554,18 +523,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -580,7 +549,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -595,7 +564,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -603,7 +572,3 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { } // namespace schedule } // namespace mds } // namespace curve - - - - diff --git a/test/mds/schedule/operatorStep_test.cpp b/test/mds/schedule/operatorStep_test.cpp index 3cab9d2911..0147579ce8 100644 --- a/test/mds/schedule/operatorStep_test.cpp +++ b/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "src/common/timeutility.h" #include "test/mds/schedule/common.h" @@ -30,8 +31,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -49,21 +50,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -76,7 +77,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -90,14 +91,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -110,7 +111,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -127,8 +128,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -141,8 +141,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -158,7 +158,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -174,8 +174,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -199,13 +198,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -218,7 +216,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -234,31 +232,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -269,24 +267,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the optimizer in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -296,7 +294,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } @@ -362,9 +360,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -377,9 +375,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -509,12 +507,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 3: copyset has no config change -> Ordered @@ -525,12 +524,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 4: copyset has config change but the change type diff --git a/test/mds/schedule/rapidLeaderSheduler_test.cpp b/test/mds/schedule/rapidLeaderSheduler_test.cpp index 3caecf7111..5d9389c6d9 100644 --- a/test/mds/schedule/rapidLeaderSheduler_test.cpp +++ b/test/mds/schedule/rapidLeaderSheduler_test.cpp @@ -20,20 +20,20 @@ * Author: lixiaocui */ -#include "test/mds/schedule/mock_topoAdapter.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/schedule/common.h" +#include "src/mds/schedule/operatorFactory.h" #include "src/mds/schedule/scheduleMetrics.h" #include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorFactory.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -55,14 +55,17 @@ class TestRapidLeaderSchedule : public ::testing::Test { auto testCopySetInfo = GetCopySetInfoForTest(); ChunkServerInfo csInfo1(testCopySetInfo.peers[0], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 1, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo2(testCopySetInfo.peers[1], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo3(testCopySetInfo.peers[2], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); chunkServerInfos_.emplace_back(csInfo1); chunkServerInfos_.emplace_back(csInfo2); chunkServerInfos_.emplace_back(csInfo3); @@ -77,14 +80,14 @@ class TestRapidLeaderSchedule : public ::testing::Test { TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { std::shared_ptr rapidLeaderScheduler; - // 1. mds没有任何logicalpool + // 1. Mds does not have any logicalpool { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 0); @@ -93,21 +96,21 @@ TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); } - // 2. mds逻辑池列表中没有指定logicalpool + // 2. No logicalpool specified in the mds logical pool list { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); } } TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { std::shared_ptr rapidLeaderScheduler; { - // 1. 指定logicalpool中没有chunkserver + // 1. There is no chunkserver in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -121,7 +124,7 @@ TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { } { - // 2. 指定logicalpool中没有copyset + // 2. There is no copyset in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -141,7 +144,8 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { opt_, topoAdapter_, opController_, 1); { - // 1. copyset的副本数目为1, 不会产生迁移 + // 1. The number of copies for copyset is 1, and migration will not + // occur EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -158,16 +162,17 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } { - // 2. chunkserver上拥有的leader数目最多相差1, 不会产生迁移 + // 2. The maximum difference in the number of leaders owned on + // chunkserver is 1, and migration will not occur // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 EXPECT_CALL(*topoAdapter_, GetLogicalpools()) - .WillOnce(Return(std::vector{1})); + .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) .WillOnce(Return(chunkServerInfos_)); EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{GetCopySetInfoForTest()})); + .WillOnce( + Return(std::vector{GetCopySetInfoForTest()})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); ASSERT_EQ(0, opController_->GetOperators().size()); @@ -175,7 +180,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -189,7 +194,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto chunkserverInfosBak = chunkServerInfos_; chunkserverInfosBak[0].leaderCount = 3; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -197,8 +202,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); OperatorFactory factory; opController_->AddOperator(factory.CreateRemovePeerOperator( copyset2, 2, OperatorPriority::NormalPriority)); @@ -206,18 +211,18 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); ASSERT_EQ(3, operators.size()); - auto op1 = dynamic_cast(operators[0].step.get()); + auto op1 = dynamic_cast(operators[0].step.get()); ASSERT_TRUE(nullptr != op1); ASSERT_EQ(2, op1->GetTargetPeer()); ASSERT_EQ(1, operators[0].copysetID.second); - auto op2 = dynamic_cast(operators[2].step.get()); + auto op2 = dynamic_cast(operators[2].step.get()); ASSERT_TRUE(nullptr != op2); ASSERT_EQ(3, op2->GetTargetPeer()); ASSERT_EQ(3, operators[2].copysetID.second); } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -232,7 +237,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { chunkserverInfosBak[0].leaderCount = 3; chunkserverInfosBak[0].status = ChunkServerStatus::PENDDING; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -240,8 +245,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index c7c11b299e..8e26a2ff57 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -21,23 +21,24 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/mds/common/mds_define.h" #include "src/mds/schedule/operatorController.h" -#include "src/mds/topology/topology_id_generator.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/topology/topology_id_generator.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::mds::topology::TopologyIdGenerator; using ::curve::mds::topology::MockTopology; +using ::curve::mds::topology::TopologyIdGenerator; namespace curve { namespace mds { @@ -62,7 +63,7 @@ class TestRecoverSheduler : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.chunkserverFailureTolerance = 3; recoverScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + opt, topoAdapter_, opController_); } void TearDown() override { opController_ = nullptr; @@ -71,9 +72,9 @@ class TestRecoverSheduler : public ::testing::Test { } protected: - std::shared_ptr topoAdapter_; - std::shared_ptr opController_; - std::shared_ptr recoverScheduler_; + std::shared_ptr topoAdapter_; + std::shared_ptr opController_; + std::shared_ptr recoverScheduler_; }; TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { @@ -82,10 +83,8 @@ TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); CopySetKey copySetKey; - copySetKey. - first = 1; - copySetKey. - second = 1; + copySetKey.first = 1; + copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(opController_->AddOperator(testOperator)); @@ -107,8 +106,8 @@ TEST_F(TestRecoverSheduler, test_copySet_has_configChangeInfo) { TEST_F(TestRecoverSheduler, test_chunkServer_cannot_get) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()). - WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) + .WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(_, _)) .Times(3) .WillRepeatedly(Return(false)); @@ -132,27 +131,27 @@ TEST_F(TestRecoverSheduler, test_server_has_more_offline_chunkserver) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::UNSTABLE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } TEST_F(TestRecoverSheduler, - test_server_has_more_offline_and_retired_chunkserver) { + test_server_has_more_offline_and_retired_chunkserver) { auto testCopySetInfo = GetCopySetInfoForTest(); EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) .WillRepeatedly(Return(std::vector({testCopySetInfo}))); @@ -168,27 +167,27 @@ TEST_F(TestRecoverSheduler, PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); - EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) + EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillOnce(Return(2)); recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -208,64 +207,61 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, 2, 100, 100, ChunkServerStatisticInfo{}); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); - ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 2, 100, 100, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerIdType id1 = 1; ChunkServerIdType id2 = 2; ChunkServerIdType id3 = 3; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) - .WillRepeatedly(Return(90)); + .WillRepeatedly(Return(90)); { - // 1. 所有chunkserveronline + // 1. All chunkserveronline EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id3, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) - .Times(2).WillRepeatedly(Return(2)); + .Times(2) + .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); @@ -290,14 +286,13 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(4, op.step.get()->GetTargetPeer()); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换chunkserver + // 5. Unable to select a replacement chunkserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(_)) .WillOnce(Return(std::vector{})); @@ -306,7 +301,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 6. 在chunkserver上创建copyset失败 + // 6. Failed to create copyset on chunkserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); std::vector chunkserverList( @@ -335,5 +330,3 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } // namespace schedule } // namespace mds } // namespace curve - - diff --git a/test/mds/schedule/scheduleMetrics_test.cpp b/test/mds/schedule/scheduleMetrics_test.cpp index 66969a6845..3714260772 100644 --- a/test/mds/schedule/scheduleMetrics_test.cpp +++ b/test/mds/schedule/scheduleMetrics_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduleMetrics.h" + #include #include #include -#include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/schedule/operatorController.h" + #include "src/mds/schedule/operator.h" +#include "src/mds/schedule/operatorController.h" #include "test/mds/mock/mock_topology.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::CopySetKey; +using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -47,24 +49,22 @@ class ScheduleMetricsTest : public testing::Test { scheduleMetrics = std::make_shared(topo); } - void TearDown() { - } + void TearDown() {} ::curve::mds::topology::ChunkServer GetChunkServer(int id) { - return ::curve::mds::topology::ChunkServer( - id, "", "", id, "", 9000, ""); + return ::curve::mds::topology::ChunkServer(id, "", "", id, "", 9000, + ""); } ::curve::mds::topology::Server GetServer(int id) { - std::string hostName = - "pubbeta2-curve" + std::to_string(id) + ".org"; - return ::curve::mds::topology::Server( - id, hostName, "", 0, "", 0, id, 1, ""); + std::string hostName = "pubbeta2-curve" + std::to_string(id) + ".org"; + return ::curve::mds::topology::Server(id, hostName, "", 0, "", 0, id, 1, + ""); } std::string GetChunkServerHostPort(int id) { return GetServer(id).GetHostName() + ":" + - std::to_string(GetChunkServer(id).GetPort()); + std::to_string(GetChunkServer(id).GetPort()); } public: @@ -74,24 +74,24 @@ class ScheduleMetricsTest : public testing::Test { TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { Operator addOp(1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operators of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(addOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -100,34 +100,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(addCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(addCsInfo.GetId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + GetChunkServerHostPort(2); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(addCsInfo.GetEpoch()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ("UNINTIALIZE_ID", - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(addOp.startEpoch), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(ADDPEER, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"1\",") + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + @@ -143,7 +143,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -154,25 +154,26 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { Operator rmOp(1, CopySetKey{1, 2}, OperatorPriority::HighPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo rmCsInfo(1, 2); rmCsInfo.SetCopySetMembers(std::set{1, 2, 3}); rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(rmOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -181,34 +182,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(rmCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(rmCsInfo.GetId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(rmCsInfo.GetEpoch()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(GetChunkServerHostPort(1), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(rmOp.startEpoch), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(HIGH, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(REMOVEPEER, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "opPriority")); + ASSERT_EQ( + REMOVEPEER, + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opType")); + ASSERT_EQ( + GetChunkServerHostPort(3), + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"2\",") + std::string("\"copySetLeader\":") + @@ -226,7 +228,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -237,25 +239,27 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -263,30 +267,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { ASSERT_EQ(1, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(1, scheduleMetrics->operators.size()); - ASSERT_EQ(std::to_string(transCsInfo.GetLogicalPoolId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetLogicalPoolId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "logicalPoolId")); - ASSERT_EQ(std::to_string(transCsInfo.GetId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); - ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "copySetPeers")); - ASSERT_EQ(std::to_string(transCsInfo.GetEpoch()), + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); + ASSERT_EQ(copysetpeers, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("copySetPeers")); + ASSERT_EQ( + std::to_string(transCsInfo.GetEpoch()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetEpoch")); - ASSERT_EQ(std::to_string(transferOp.startEpoch), + ASSERT_EQ( + std::to_string(transferOp.startEpoch), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "startEpoch")); - ASSERT_EQ(NORMAL, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(TRANSFERLEADER, + ASSERT_EQ(NORMAL, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("opPriority")); + ASSERT_EQ( + TRANSFERLEADER, scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), + ASSERT_EQ( + GetChunkServerHostPort(3), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opItem")); std::string res = @@ -301,47 +310,49 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("\"Normal\",\"opType\":\"TransferLeader\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[transferOp.copysetID].JsonBody()); - LOG(INFO) << "format: " + scheduleMetrics->operators[transferOp.copysetID].JsonBody()); + LOG(INFO) + << "format: " << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { Operator changeOp(1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + steady_clock::now(), std::make_shared(1, 4)); ::curve::mds::topology::CopySetInfo changeCsInfo(1, 4); changeCsInfo.SetCopySetMembers(std::set{1, 2, 3}); changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(4)) .WillOnce(Return(GetServer(4).GetHostName() + ":" + - std::to_string(GetChunkServer(4).GetPort()))); + std::to_string(GetChunkServer(4).GetPort()))); scheduleMetrics->UpdateAddMetric(changeOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -350,31 +361,32 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(changeCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "logicalPoolId")); - ASSERT_EQ(std::to_string(changeCsInfo.GetId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "logicalPoolId")); + ASSERT_EQ(std::to_string(changeCsInfo.GetId()), + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(changeCsInfo.GetEpoch()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(std::to_string(changeOp.startEpoch), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(CHANGEPEER, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(4), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"4\",") + std::string("\"copySetLeader\":") + @@ -387,32 +399,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { std::string("\"Normal\",\"opType\":\"ChangePeer\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[changeOp.copysetID].JsonBody()); + scheduleMetrics->operators[changeOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); + << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } TEST_F(ScheduleMetricsTest, test_abnormal) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -422,36 +436,32 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { ASSERT_TRUE( scheduleMetrics->operators[transferOp.copysetID].JsonBody().empty()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - - // 获取chunkserver 或者 server失败 + // Failed to obtain chunkserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) - .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(1)) - .WillOnce(Return("")); - EXPECT_CALL(*topo, GetHostNameAndPortById(2)) - .WillOnce(Return("")); + .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); + EXPECT_CALL(*topo, GetHostNameAndPortById(1)).WillOnce(Return("")); + EXPECT_CALL(*topo, GetHostNameAndPortById(2)).WillOnce(Return("")); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); std::string res = - std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + - std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + - std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + - std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + - std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + - std::string("\"Normal\",\"opType\":\"TransferLeader\",") + - std::string("\"startEpoch\":\"1\"}"); + std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + + std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + + std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + + std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + + std::string("\"Normal\",\"opType\":\"TransferLeader\",") + + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/scheduleService/scheduleService_test.cpp b/test/mds/schedule/scheduleService/scheduleService_test.cpp index 9814f8ce0b..17ab08e546 100644 --- a/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,24 +20,25 @@ * Author: lixiaocui */ -#include -#include -#include +#include "src/mds/schedule/scheduleService/scheduleService.h" + #include +#include #include +#include +#include -#include "src/mds/schedule/scheduleService/scheduleService.h" -#include "test/mds/mock/mock_coordinator.h" #include "proto/schedule.pb.h" +#include "test/mds/mock/mock_coordinator.h" namespace curve { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,10 +46,10 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); - ASSERT_EQ(0, - server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server_->Start("127.0.0.1", {5900, 5999}, nullptr)); listenAddr_ = server_->listen_address(); } @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_RapidLeaderSchedule) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { request.set_logicalpoolid(1); RapidLeaderScheduleResponse response; - // 1. 快速leader均衡返回成功 + // 1. Fast leader balance returned successfully { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeSuccess)); @@ -85,7 +86,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { ASSERT_EQ(kScheduleErrCodeSuccess, response.statuscode()); } - // 2. 传入的logicalpoolid不存在 + // 2. The logicaltool passed in does not exist { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeInvalidLogicalPool)); @@ -105,13 +106,13 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { request.add_chunkserverid(1); QueryChunkServerRecoverStatusResponse response; - // 1. 查询chunkserver恢复状态返回成功 + // 1. Querying the recovery status of chunkserver returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(DoAll(SetArgPointee<1>(expectRes), - Return(kScheduleErrCodeSuccess))); + Return(kScheduleErrCodeSuccess))); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); @@ -121,11 +122,11 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的chunkserverid不合法 + // 2. The chunkserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(Return(kScheduleErrInvalidQueryChunkserverID)); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index b8b3ddb148..b6919dee9b 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -22,25 +22,27 @@ #include #include -#include -#include -#include -#include + #include +#include +#include +#include #include -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_config.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/schedule/topoAdapter.h" -#include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorController.h" +#include + #include "src/mds/common/mds_define.h" -#include "src/mds/copyset/copyset_policy.h" #include "src/mds/copyset/copyset_manager.h" +#include "src/mds/copyset/copyset_policy.h" +#include "src/mds/schedule/operatorController.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/schedulerPOC/mock_topology.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/schedulerPOC/mock_topology.h" using ::curve::mds::topology::MockTopology; @@ -141,10 +143,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { logicalPoolSet_.insert(0); } - std::vector - GetLogicalPoolInCluster(LogicalPoolFilter filter = [](const LogicalPool &) { - return true; - }) const override { + std::vector GetLogicalPoolInCluster(LogicalPoolFilter filter = + [](const LogicalPool&) { + return true; + }) const override { std::vector ret; for (auto lid : logicalPoolSet_) { ret.emplace_back(lid); @@ -152,10 +154,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - std::vector - GetChunkServerInCluster(ChunkServerFilter filter = [](const ChunkServer &) { - return true; - }) const override { + std::vector GetChunkServerInCluster( + ChunkServerFilter filter = [](const ChunkServer&) { + return true; + }) const override { std::vector ret; for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); it++) { @@ -165,7 +167,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInLogicalPool( - PoolIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + PoolIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list ret; @@ -177,7 +179,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInServer( - ServerIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + ServerIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list res; @@ -190,7 +192,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector GetCopySetsInCluster( - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -202,7 +204,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector GetCopySetsInChunkServer( ChunkServerIdType csId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -217,7 +219,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector<::curve::mds::topology::CopySetInfo> GetCopySetInfosInLogicalPool( PoolIdType logicalPoolId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector<::curve::mds::topology::CopySetInfo> ret; @@ -230,7 +232,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - bool GetServer(ServerIdType serverId, Server *out) const override { + bool GetServer(ServerIdType serverId, Server* out) const override { auto it = serverMap_.find(serverId); if (it != serverMap_.end()) { *out = it->second; @@ -240,7 +242,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetCopySet(::curve::mds::topology::CopySetKey key, - ::curve::mds::topology::CopySetInfo *out) const override { + ::curve::mds::topology::CopySetInfo* out) const override { auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { *out = it->second; @@ -251,7 +253,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetChunkServer(ChunkServerIdType chunkserverId, - ChunkServer *out) const override { + ChunkServer* out) const override { auto it = chunkServerMap_.find(chunkserverId); if (it != chunkServerMap_.end()) { *out = it->second; @@ -260,7 +262,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return false; } - bool GetLogicalPool(PoolIdType poolId, LogicalPool *out) const override { + bool GetLogicalPool(PoolIdType poolId, LogicalPool* out) const override { LogicalPool::RedundanceAndPlaceMentPolicy rap; rap.pageFileRAP.copysetNum = copySetMap_.size(); rap.pageFileRAP.replicaNum = 3; @@ -273,7 +275,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return true; } - int UpdateChunkServerOnlineState(const OnlineState &onlineState, + int UpdateChunkServerOnlineState(const OnlineState& onlineState, ChunkServerIdType id) override { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -284,7 +286,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } } - int UpdateChunkServerRwState(const ChunkServerStatus &rwStatus, + int UpdateChunkServerRwState(const ChunkServerStatus& rwStatus, ChunkServerIdType id) { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -296,7 +298,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } int UpdateCopySetTopo( - const ::curve::mds::topology::CopySetInfo &data) override { + const ::curve::mds::topology::CopySetInfo& data) override { CopySetKey key(data.GetLogicalPoolId(), data.GetId()); auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { @@ -330,19 +332,19 @@ class FakeTopologyServiceManager : public TopologyServiceManager { bool CreateCopysetNodeOnChunkServer( ChunkServerIdType csId, - const std::vector<::curve::mds::topology::CopySetInfo> &cs) override { + const std::vector<::curve::mds::topology::CopySetInfo>& cs) override { return true; } }; class FakeTopologyStat : public TopologyStat { public: - explicit FakeTopologyStat(const std::shared_ptr &topo) + explicit FakeTopologyStat(const std::shared_ptr& topo) : topo_(topo) {} void UpdateChunkServerStat(ChunkServerIdType csId, - const ChunkServerStat &stat) {} + const ChunkServerStat& stat) {} - bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat *stat) { + bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat* stat) { if (!leaderCountOn) { stat->leaderCount = 10; return true; @@ -360,7 +362,7 @@ class FakeTopologyStat : public TopologyStat { stat->leaderCount = leaderCount; return true; } - bool GetChunkPoolSize(PoolIdType pId, uint64_t *chunkPoolSize) { + bool GetChunkPoolSize(PoolIdType pId, uint64_t* chunkPoolSize) { return true; } @@ -401,7 +403,7 @@ class CopysetSchedulerPOC : public testing::Test { void TearDown() override {} void PrintScatterWithInOnlineChunkServer(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; std::map factorMap; int max = -1; @@ -437,7 +439,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (online chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -446,14 +448,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintScatterWithInLogicalPool(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; int max = -1; int maxId = -1; @@ -477,7 +479,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (all chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -486,14 +488,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInOnlineChunkServer(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -526,7 +528,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", copyset num:" << number; } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -535,14 +537,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << "), Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInLogicalPool(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -561,7 +563,7 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -570,13 +572,13 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" - << min; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: " << max << ", Minimum Value: " << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { - // 打印每个chunkserver上leader的数量 + // Print the number of leaders on each chunkserver std::map leaderDistribute; int sumNumber = 0; int max = -1; @@ -612,10 +614,10 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } int GetLeaderCountRange(PoolIdType lid = 0) { @@ -637,16 +639,16 @@ class CopysetSchedulerPOC : public testing::Test { return max - min; } - // 计算每个chunkserver的scatter-with + // Calculate the scatter with for each chunkserver int GetChunkServerScatterwith(ChunkServerIdType csId) { - // 计算chunkserver上的scatter-with + // Calculate scatter with on chunkserver std::map chunkServerCount; for (auto it : topo_->GetCopySetsInChunkServer(csId)) { // get copyset info ::curve::mds::topology::CopySetInfo info; topo_->GetCopySet(it, &info); - // 统计所分布的chunkserver + // Count the distributed chunkservers for (auto it : info.GetCopySetMembers()) { if (it == csId) { continue; @@ -673,11 +675,11 @@ class CopysetSchedulerPOC : public testing::Test { ChunkServerIdType RandomOfflineOneChunkServer(PoolIdType lid = 0) { auto chunkServers = topo_->GetChunkServerInLogicalPool(lid); - // 选择[0, chunkServers.size())中的index + // Select the index in [0, chunkServers.size()) std::srand(std::time(nullptr)); int index = std::rand() % chunkServers.size(); - // 设置目标chunkserver的状态为offline + // Set the status of the target chunkserver to offline auto it = chunkServers.begin(); std::advance(it, index); topo_->UpdateChunkServerOnlineState(OnlineState::OFFLINE, *it); @@ -697,7 +699,7 @@ class CopysetSchedulerPOC : public testing::Test { topo_->UpdateChunkServerOnlineState(OnlineState::ONLINE, id); } - void SetChunkServerOnline(const std::set &list) { + void SetChunkServerOnline(const std::set& list) { for (auto id : list) { SetChunkServerOnline(id); } @@ -741,10 +743,10 @@ class CopysetSchedulerPOC : public testing::Test { opt, topoAdapter_, opController_); } - void ApplyOperatorsInOpController(const std::set &list) { + void ApplyOperatorsInOpController(const std::set& list) { std::vector keys; for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ASSERT_TRUE(list.end() != list.find(type->GetOldPeer())); @@ -771,7 +773,7 @@ class CopysetSchedulerPOC : public testing::Test { void ApplyTranferLeaderOperator() { for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ::curve::mds::topology::CopySetInfo info; @@ -781,9 +783,9 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 有两个chunkserver offline的停止条件: - // 所有copyset均有两个及以上的副本offline - bool SatisfyStopCondition(const std::set &idList) { + // There are two stopping conditions for chunkserver offline: + // All copysets have two or more copies offline + bool SatisfyStopCondition(const std::set& idList) { std::vector<::curve::mds::topology::CopySetKey> copysetList; for (auto id : idList) { auto list = topo_->GetCopySetsInChunkServer(id); @@ -831,58 +833,65 @@ class CopysetSchedulerPOC : public testing::Test { }; TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { - // 测试一个chunkserver offline恢复后的情况 - // 1. 创建recoverScheduler + // Testing the situation of a chunkserver offline recovery + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择一个chunkserver处于offline状态 + // 2. Select any chunkserver to be offline ChunkServerIdType choose = RandomOfflineOneChunkServer(); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); // update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // =============================结果====================================== - // ===========================集群初始状态================================= + // =============================Result====================================== + // =============================Initial state of the + // cluster============================= // ###print scatter-with in cluster### - // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 + // Mean: 97.9556, Variance: 11.5314, Standard Deviation: 3.39579, Max: 106, + // Min: 88 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // =============================Status after + // Recovery================================= // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, - // 最小值:95 //NOLINT + // Mean: 98.8156, variance: 10.3403, standard deviation: 3.21564, maximum + // value: 106, Minimum value: 95//NOLINT // ###print scatter-with in cluster### - // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 + // Mean: 98.2667, Variance: 64.2289, Standard Deviation: 8.0143, Max: 106, + // Min: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.77729, 标准差: 1.33315, 最大值:109, 最小值:100 + // Mean value: 100.559, variance: 1.77729, standard deviation: 1.33315, + // maximum value: 109, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.6333, 标准差: 7.59166, 最大值: 109, 最小值:0 + // Mean value: 100, variance: 57.6333, standard deviation: 7.59166, maximum + // value: 109, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline的情况 - // 1. 创建recoverScheduler + // Testing the situation of another chunkserver offline during the recovery + // process of one chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; ChunkServerIdType choose1 = 0; ChunkServerIdType choose2 = 0; choose1 = RandomOfflineOneChunkServer(); idlist.emplace(choose1); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -896,35 +905,43 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { ApplyOperatorsInOpController(std::set{choose2}); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.3, 方差:9.89889, 标准差:3.14625, 最大值:106, 最小值:89 + // Mean value: 97.3, variance: 9.89889, standard deviation: 3.14625, maximum + // value: 106, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.348, 方差:7.47418, 标准差: 2.73389, 最大值:108, 最小值:101 + // Mean value: 100.348, variance: 7.47418, standard deviation: 2.73389, + // maximum value: 108, minimum value: 101 // ###print scatter-with in cluster### - // 均值:99.2333, 方差:118.034, 标准差: 10.8644, 最大值:108, 最小值:0 + // Mean value: 99.2333, variance: 118.034, standard deviation: 10.8644, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.9735, 标准差: 1.72438, 最大值:112, 最小值:100 + // Mean value: 101.124, variance: 2.9735, standard deviation: 1.72438, + // maximum value: 112, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.3, 标准差: 10.7378, 最大值: 112, 最小值:0 + // Mean value: 100, variance: 115.3, standard deviation: 10.7378, maximum + // value: 112, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 1. 创建recoverScheduler + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 6; i++) { @@ -934,7 +951,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -950,35 +967,42 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { - // 测试20个chunkserver 接连 offline - // 1. 创建recoverScheduler + // Test 20 chunkservers connected offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 20; i++) { @@ -988,7 +1012,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1004,7 +1028,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); @@ -1012,24 +1036,24 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { - // 测试一个server有多个chunkserver offline, 有一个被设置为pending, - // 可以recover的情况 + // Testing a server with multiple chunkservers offline, with one set to + // pending, Conditions that can be recovered offlineTolerent_ = 20; BuilRecoverScheduler(4); - // offline一个server上的chunkserver + // Offline Chunkserver on a server auto chunkserverSet = OfflineChunkServerInServer1(); - // 选择其中一个设置为pendding状态 + // Select one of the settings as pending status ChunkServerIdType target = *chunkserverSet.begin(); topo_->UpdateChunkServerRwState(ChunkServerStatus::PENDDING, target); int opNum = 0; int targetOpNum = topo_->GetCopySetsInChunkServer(target).size(); - // 开始恢复 + // Start recovery do { recoverScheduler_->Schedule(); opNum += opController_->GetOperators().size(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{target}); } while (topo_->GetCopySetsInChunkServer(target).size() > 0); @@ -1038,14 +1062,14 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { // NOLINT - // 测试一个chunkserver offline, 集群回迁的情况 + // Testing a cluster offline and cluster fetch situation - // 1. 一个chunkserver offline后恢复 + // 1. Restore after a chunkserver is offline BuilRecoverScheduler(1); ChunkServerIdType choose = RandomOfflineOneChunkServer(); do { recoverScheduler_->Schedule(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); @@ -1053,23 +1077,30 @@ TEST_F(CopysetSchedulerPOC, PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6667, 方差:10.9444, 标准差: 3.30824, 最大值:107, 最小值:90 + // Mean value: 97.6667, variance: 10.9444, standard deviation: 3.30824, + // maximum value: 107, minimum value: 90 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:99.1061, 方差:10.1172, 标准差: 3.18076, 最大值:108, 最小值:91 + // Mean value: 99.1061, variance: 10.1172, standard deviation: 3.18076, + // maximum value: 108, minimum value: 91 // ###print scatter-with in cluster### - // 均值:98.5556, 方差:64.3247, 标准差: 8.02027, 最大值:108, 最小值:0 + // Mean value: 98.5556, variance: 64.3247, standard deviation: 8.02027, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.56499, 标准差: 1.251, 最大值:107, 最小值:100 + // Mean value: 100.559, variance: 1.56499, standard deviation: 1.251, + // maximum value: 107, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.4222, 标准差: 7.57774, 最大值: 107, 最小值:0 + // Mean value: 100, variance: 57.4222, standard deviation: 7.57774, maximum + // value: 107, minimum value: 0 - // 2. chunkserver-choose恢复成online状态 + // 2. Chunkserver house restore to online state SetChunkServerOnline(choose); BuildCopySetScheduler(1); std::vector csList; @@ -1087,20 +1118,23 @@ TEST_F(CopysetSchedulerPOC, minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ====================================Result==================================== + // ====================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:99.2667, 方差:9.65111, 标准差: 3.10662, 最大值:109, 最小值:91 + // Mean value: 99.2667, variance: 9.65111, standard deviation: 3.10662, + // maximum value: 109, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 + // Mean value: 100, variance: 0.5, standard deviation: 0.707107, maximum + // value: 101, minimum value: 91 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline - // 集群回迁的情况 + // During the recovery process of testing one chunkserver offline, another + // chunkserver offline Cluster fetch situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; ChunkServerIdType choose1 = 0; @@ -1124,23 +1158,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.4889, 方差:9.96099, 标准差: 3.1561, 最大值:105, 最小值:89 + // Mean value: 97.4889, variance: 9.96099, standard deviation: 3.1561, + // maximum value: 105, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.472, 方差:7.37281, 标准差: 2.71529, 最大值:106, 最小值:91 + // Mean value: 100.472, variance: 7.37281, standard deviation: 2.71529, + // maximum value: 106, minimum value: 91 // ###print scatter-with in cluster### - // 均值:99.3556, 方差:118.207, 标准差: 10.8723, 最大值:106, 最小值:0 + // Mean value: 99.3556, variance: 118.207, standard deviation: 10.8723, + // maximum value: 106, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.77125, 标准差: 1.66471, 最大值:111, 最小值:100 + // Mean value: 101.124, variance: 2.77125, standard deviation: 1.66471, + // maximum value: 111, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.1, 标准差: 10.7285, 最大值: 111, 最小值:0 + // Mean value: 100, variance: 115.1, standard deviation: 10.7285, maximum + // value: 111, minimum value: 0 - // 2. cchunkserver恢复成online状态 + // 2. Restore cchunkserver to online state SetChunkServerOnline(choose1); SetChunkServerOnline(choose2); BuildCopySetScheduler(1); @@ -1152,20 +1193,22 @@ TEST_F(CopysetSchedulerPOC, } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { // NOLINT - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 回迁的情况 + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline Migration situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; std::vector origin; @@ -1176,7 +1219,7 @@ TEST_F(CopysetSchedulerPOC, origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1197,23 +1240,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ===================================Result==================================== + // ===================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 - // 2. chunkserver恢复成online状态 + // 2. Chunkserver restored to online state SetChunkServerOnline(idlist); BuildCopySetScheduler(1); std::vector csList; @@ -1235,12 +1285,14 @@ TEST_F(CopysetSchedulerPOC, ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, diff --git a/test/mds/schedule/scheduler_helper_test.cpp b/test/mds/schedule/scheduler_helper_test.cpp index ff54d4c5bf..76668c415d 100644 --- a/test/mds/schedule/scheduler_helper_test.cpp +++ b/test/mds/schedule/scheduler_helper_test.cpp @@ -20,15 +20,17 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduler_helper.h" + #include + #include "test/mds/schedule/common.h" #include "test/mds/schedule/mock_topoAdapter.h" -#include "src/mds/schedule/scheduler_helper.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace mds { @@ -42,9 +44,7 @@ class TestSchedulerHelper : public ::testing::Test { topoAdapter_ = std::make_shared(); } - void TearDown() override { - topoAdapter_ = nullptr; - } + void TearDown() override { topoAdapter_ = nullptr; } protected: std::shared_ptr topoAdapter_; @@ -56,67 +56,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = true; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -126,67 +142,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_not_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = false; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -195,7 +227,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { ChunkServerIdType source = 1; ChunkServerIdType target = 4; { - // 1. 获取target的信息失败 + // 1. Failed to obtain information for target EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(Return(false)); ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( @@ -204,9 +236,10 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); ChunkServerInfo info4(peer4, OnlineState::ONLINE, DiskState::DISKERROR, - ChunkServerStatus::READWRITE, 1, 1, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 1, 1, + ChunkServerStatisticInfo{}); { - // 2. 获取到的标准zoneNum = 0 + // 2. Obtained standard zoneNum=0 EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) @@ -216,12 +249,12 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { } { - // 3. 迁移之后不符合zone条件 + // 3. Does not meet zone conditions after migration EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) .WillOnce(Return(4)); - ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( + ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( topoAdapter_, target, source, copyset, 1, 0.01)); } } @@ -283,18 +316,18 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于target, old=1, new=2 + // For target, old=1, new=2 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(2, scatterWidth[target].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -324,19 +357,19 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_source) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于target, old=1, new=3 + // For target, old=1, new=3 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(3, scatterWidth[target].second); - // 对于replica1, old=2, new=3 + // For replica1, old=2, new=3 ASSERT_EQ(2, scatterWidth[1].first); ASSERT_EQ(3, scatterWidth[1].second); - // 对于replica2, old=3, new=3 + // For replica2, old=3, new=3 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(3, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -362,22 +395,22 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_target) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=2 + // For replica3, old=2, new=2 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(2, scatterWidth[3].second); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -405,14 +438,14 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, - 10, 0.1, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 10, 0.1, + &affected); ASSERT_FALSE(res); ASSERT_EQ(0, affected); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -440,53 +473,55 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, + &affected); ASSERT_TRUE(res); ASSERT_EQ(0, affected); } - TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); ChunkServerInfo info1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info2(peer2, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info3(peer3, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); std::vector chunkserverList{info1, info2, info3}; // {1,2,3} CopySetInfo copyset1(CopySetKey{1, 1}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset2(CopySetKey{1, 2}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,3} CopySetInfo copyset3(CopySetKey{1, 3}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,4} CopySetInfo copyset4(CopySetKey{1, 4}, 1, 1, - std::vector{peer1, peer2, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset5(CopySetKey{1, 5}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); - std::vector copysetList{ - copyset1, copyset2, copyset3, copyset4, copyset5}; + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); + std::vector copysetList{copyset1, copyset2, copyset3, copyset4, + copyset5}; // chunkserver-1: 5, chunkserver-2: 3 chunkserver-3: 4 - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) - .WillOnce(Return(copysetList)); - SchedulerHelper::SortChunkServerByCopySetNumAsc( - &chunkserverList, topoAdapter_); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()).WillOnce(Return(copysetList)); + SchedulerHelper::SortChunkServerByCopySetNumAsc(&chunkserverList, + topoAdapter_); ASSERT_EQ(info2.info.id, chunkserverList[0].info.id); ASSERT_EQ(info3.info.id, chunkserverList[1].info.id); @@ -496,4 +531,3 @@ TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 236e526371..1881504452 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -20,19 +20,20 @@ * Author: charisu */ +#include "src/mds/server/mds.h" + +#include #include -#include #include -#include +#include #include + #include #include -#include "src/mds/server/mds.h" #include "src/common/concurrent/concurrent.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" - +#include "src/common/timeutility.h" #include "test/mds/mock/mock_etcdclient.h" using ::curve::common::Thread; @@ -55,18 +56,19 @@ class MDSTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", - "http://localhost:10032", - "--advertise-client-urls", - "http://localhost:10032", "--listen-peer-urls", - "http://localhost:10033", "--name", "testMds", - nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://localhost:10032", "--advertise-client-urls", + "http://localhost:10032", "--listen-peer-urls", + "http://localhost:10033", "--name", "testMds", nullptr)); exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered auto client = std::make_shared(); EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); @@ -102,7 +104,7 @@ class MDSTest : public ::testing::Test { }; TEST_F(MDSTest, common) { - // 加载配置 + // Load Configuration std::string confPath = "./conf/mds.conf"; auto conf = std::make_shared(); conf->SetConfigPath(confPath); @@ -116,7 +118,7 @@ TEST_F(MDSTest, common) { mds.InitMdsOptions(conf); mds.StartDummy(); - // 从dummy server获取version和mds监听端口 + // Obtain version and mds listening ports from dummy server brpc::Channel httpChannel; brpc::Controller cntl; brpc::ChannelOptions options; @@ -124,12 +126,12 @@ TEST_F(MDSTest, common) { std::string dummyAddr = "127.0.0.1:" + std::to_string(kDummyPort); ASSERT_EQ(0, httpChannel.Init(dummyAddr.c_str(), &options)); - // 测试获取version + // Test to obtain version cntl.http_request().uri() = dummyAddr + "/vars/curve_version"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); ASSERT_FALSE(cntl.Failed()); - // 测试获取mds监听端口 + // Testing to obtain the mds listening port cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_config_mds_listen_addr"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -140,13 +142,13 @@ TEST_F(MDSTest, common) { auto pos = attachment.find(":"); ASSERT_NE(std::string::npos, pos); std::string jsonString = attachment.substr(pos + 2); - // 去除两端引号 + // Remove double quotes jsonString = jsonString.substr(1, jsonString.size() - 2); reader.parse(jsonString, value); std::string mdsAddr = value["conf_value"].asString(); ASSERT_EQ(kMdsAddr, mdsAddr); - // 获取leader状态,此时mds_status应为follower + // Obtain the leader status, at which point mds_status should be follower cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_status"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -156,7 +158,7 @@ TEST_F(MDSTest, common) { mds.StartCompaginLeader(); - // 此时isLeader应为true + // At this point, isLeader should be true cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/is_leader"; ASSERT_FALSE(cntl.Failed()); @@ -164,7 +166,7 @@ TEST_F(MDSTest, common) { cntl.response_attachment().to_string().find("leader")); mds.Init(); - // 启动mds + // Start mds Thread mdsThread(&MDS::Run, &mds); // sleep 5s sleep(5); @@ -172,7 +174,7 @@ TEST_F(MDSTest, common) { // 1、init channel ASSERT_EQ(0, channel_.Init(kMdsAddr.c_str(), nullptr)); - // 2、测试hearbeat接口 + // 2. Test the heartbeat interface cntl.Reset(); heartbeat::ChunkServerHeartbeatRequest request1; heartbeat::ChunkServerHeartbeatResponse response1; @@ -180,7 +182,7 @@ TEST_F(MDSTest, common) { request1.set_token("123"); request1.set_ip("127.0.0.1"); request1.set_port(8888); - heartbeat::DiskState *diskState = new heartbeat::DiskState(); + heartbeat::DiskState* diskState = new heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); request1.set_allocated_diskstate(diskState); @@ -193,7 +195,7 @@ TEST_F(MDSTest, common) { stub1.ChunkServerHeartbeat(&cntl, &request1, &response1, nullptr); ASSERT_FALSE(cntl.Failed()); - // 3、测试namespaceService接口 + // 3. Test the namespaceService interface cntl.Reset(); GetFileInfoRequest request2; GetFileInfoResponse response2; @@ -205,7 +207,7 @@ TEST_F(MDSTest, common) { stub2.GetFileInfo(&cntl, &request2, &response2, nullptr); ASSERT_FALSE(cntl.Failed()); - // 4、测试topology接口 + // 4. Testing the topology interface cntl.Reset(); topology::ListPhysicalPoolRequest request3; topology::ListPhysicalPoolResponse response3; @@ -213,7 +215,7 @@ TEST_F(MDSTest, common) { stub3.ListPhysicalPool(&cntl, &request3, &response3, nullptr); ASSERT_FALSE(cntl.Failed()); - // 5、停掉mds + // 5. Stop the MDS uint64_t startTime = curve::common::TimeUtility::GetTimeofDayMs(); mds.Stop(); mdsThread.join(); @@ -250,7 +252,7 @@ TEST(TestParsePoolsetRules, Test) { { // subdir rules ASSERT_TRUE(ParsePoolsetRules( - "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); + "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); ASSERT_EQ(3, rules.size()); ASSERT_EQ("system", rules["/system/"]); ASSERT_EQ("data", rules["/data/"]); @@ -260,8 +262,8 @@ TEST(TestParsePoolsetRules, Test) { TEST_F(MDSTest, TestBlockSize) { using ::testing::_; - using ::testing::Return; using ::testing::Invoke; + using ::testing::Return; auto client = std::make_shared(); @@ -269,8 +271,7 @@ TEST_F(MDSTest, TestBlockSize) { { EXPECT_CALL(*client, Get(_, _)) .WillOnce(Return(EtcdErrCode::EtcdKeyNotExist)); - EXPECT_CALL(*client, Put(_, _)) - .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*client, Put(_, _)).WillOnce(Return(EtcdErrCode::EtcdOK)); ASSERT_TRUE(CheckOrInsertBlockSize(client.get())); } diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..59c394cda9 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -22,25 +22,25 @@ #include -#include "test/mds/topology/mock_topology.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" #include "src/common/namespace_define.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_item.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; -using ::testing::_; -using ::testing::Contains; -using ::testing::SetArgPointee; -using ::testing::SaveArg; -using ::testing::DoAll; using ::curve::common::Configuration; using ::curve::common::kDefaultPoolsetId; using ::curve::common::kDefaultPoolsetName; +using ::testing::_; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestTopology : public ::testing::Test { protected: @@ -52,13 +52,11 @@ class TestTopology : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); const std::unordered_map poolsetMap{ {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; ON_CALL(*storage_, LoadPoolset(_, _)) .WillByDefault(DoAll( @@ -80,128 +78,90 @@ class TestTopology : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(id, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 0) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 0) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - uint32_t internalPort = 0, - const std::string &externalHostIp = "testExternalIp", - uint32_t externalPort = 0, - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - internalPort, - externalHostIp, - externalPort, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + uint32_t internalPort = 0, + const std::string& externalHostIp = "testExternalIp", + uint32_t externalPort = 0, ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, internalPort, + externalHostIp, externalPort, zoneId, physicalPoolId, + desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -218,16 +178,12 @@ class TestTopology : public ::testing::Test { TEST_F(TestTopology, test_init_success) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); const std::unordered_map poolsetMap{ - {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -235,40 +191,33 @@ TEST_F(TestTopology, test_init_success) { std::unordered_map chunkServerMap_; std::map copySetMap_; - logicalPoolMap_[0x01] = LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, false, true); + logicalPoolMap_[0x01] = + LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, false, true); physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); - serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, - "127.0.0.1", 8200, 0x21, 0x11, "desc1"); - chunkServerMap_[0x41] = ChunkServer(0x41, "token", "ssd", - 0x31, "127.0.0.1", 8200, "/"); + serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", + 8200, 0x21, 0x11, "desc1"); + chunkServerMap_[0x41] = + ChunkServer(0x41, "token", "ssd", 0x31, "127.0.0.1", 8200, "/"); copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), Return(true))); EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(serverMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(serverMap_), Return(true))); EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), Return(true))); EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -278,10 +227,8 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*idGenerator_, initChunkServerIdGenerator(_)); EXPECT_CALL(*idGenerator_, initCopySetIdGenerator(_)); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); TopologyOption option; int ret = topology_->Init(option); @@ -291,8 +238,7 @@ TEST_F(TestTopology, test_init_success) { TEST_F(TestTopology, test_init_loadClusterFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(false))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(false))); TopologyOption option; int ret = topology_->Init(option); @@ -302,11 +248,9 @@ TEST_F(TestTopology, test_init_loadClusterFail) { TEST_F(TestTopology, test_init_StorageClusterInfoFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -318,11 +262,9 @@ TEST_F(TestTopology, test_init_loadLogicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -334,13 +276,10 @@ TEST_F(TestTopology, test_init_LoadPhysicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -354,15 +293,11 @@ TEST_F(TestTopology, test_init_LoadZoneFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -377,16 +312,11 @@ TEST_F(TestTopology, test_init_LoadServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -402,19 +332,13 @@ TEST_F(TestTopology, test_init_LoadChunkServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -431,21 +355,14 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -462,18 +379,11 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); @@ -487,15 +397,9 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "test1", physicalPoolId); - LogicalPool pool(id, - "test2", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(id, "test2", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -506,18 +410,11 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddLogicalPool(pool); @@ -528,16 +425,9 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - ++physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); - + LogicalPool pool(0x01, "test1", ++physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -546,26 +436,18 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, test_AddPhysicalPool_success) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { PrepareAddPoolset(); PoolIdType id = 0x11; PoolsetIdType pid = 0x61; - PhysicalPool pool(id, - "test1", - pid, - "desc"); + PhysicalPool pool(id, "test1", pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeIdDuplicated, ret); @@ -573,12 +455,8 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(false)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -590,13 +468,9 @@ TEST_F(TestTopology, test_AddZone_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); @@ -616,10 +490,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -631,13 +502,9 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(0x21, - "testZone", - physicalPoolId, - "desc"); + Zone zone(0x21, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(false)); int ret = topology_->AddZone(zone); @@ -649,11 +516,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); - + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -668,18 +531,10 @@ TEST_F(TestTopology, test_AddServer_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -701,15 +556,8 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { PrepareAddZone(zoneId, "test", physicalPoolId); PrepareAddServer(id); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); @@ -724,46 +572,29 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(false)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, test_AddServer_ZoneNotFound) { PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); } - TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddPoolset(); ChunkServerIdType csId = 0x41; @@ -773,20 +604,13 @@ TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); ChunkServerState state; state.SetDiskCapacity(1024); state.SetDiskUsed(512); cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); @@ -812,18 +636,9 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token2", - "ssd", - serverId); - - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + PrepareAddChunkServer(csId, "token2", "ssd", serverId); + + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -839,16 +654,9 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(false)); int ret = topology_->AddChunkServer(cs); @@ -860,13 +668,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -880,8 +682,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemoveLogicalPool(id); @@ -904,8 +705,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemoveLogicalPool(id); @@ -917,8 +717,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemovePhysicalPool(poolId); @@ -939,8 +738,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemovePhysicalPool(poolId); @@ -952,12 +750,9 @@ TEST_F(TestTopology, test_RemoveZone_success) { ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - PrepareAddZone(zoneId, - "testZone", - poolId); + PrepareAddZone(zoneId, "testZone", poolId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(true)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -982,8 +777,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(false)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -995,16 +789,9 @@ TEST_F(TestTopology, test_RemoveServer_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(true)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1030,16 +817,9 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(false)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1052,18 +832,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(true)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1075,7 +851,6 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { ASSERT_TRUE(it == csList.end()); } - TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { ChunkServerIdType csId = 0x41; @@ -1090,19 +865,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(false)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1113,26 +883,15 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdateLogicalPool(pool); @@ -1146,15 +905,9 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->UpdateLogicalPool(pool); @@ -1166,26 +919,15 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdateLogicalPool(pool); @@ -1197,24 +939,19 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); LogicalPool pool2; topology_->GetLogicalPool(logicalPoolId, &pool2); ASSERT_EQ(AllocateStatus::ALLOW, pool2.GetStatus()); // update to deny - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1223,11 +960,10 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { ASSERT_EQ(AllocateStatus::DENY, pool3.GetStatus()); // update to allow - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1239,18 +975,12 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeLogicalPoolNotFound, ret); } @@ -1260,19 +990,14 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1285,8 +1010,7 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { PrepareAddLogicalPool(lpid, "name", ppid); auto set_state = [&](PoolIdType lpid, bool scanEnable) { - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); auto retCode = topology_->UpdateLogicalPoolScanState(lpid, scanEnable); ASSERT_EQ(retCode, kTopoErrCodeSuccess); }; @@ -1309,14 +1033,12 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { check_state(lpid, true); // CASE 4: logical pool not found -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .Times(0); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).Times(0); auto retCode = topology_->UpdateLogicalPoolScanState(lpid + 1, true); ASSERT_EQ(retCode, kTopoErrCodeLogicalPoolNotFound); // CASE 5: update storage fail -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); retCode = topology_->UpdateLogicalPoolScanState(lpid, true); ASSERT_EQ(retCode, kTopoErrCodeStorgeFail); } @@ -1325,18 +1047,11 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1349,69 +1064,45 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; PoolIdType pid = 0x61; - PhysicalPool newPool(physicalPoolId, - "name1", - pid, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, ret); } - TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - - TEST_F(TestTopology, UpdateZone_success) { PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(true)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(true)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, UpdateZone_ZoneNotFound) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); @@ -1422,18 +1113,11 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(false)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(false)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1445,28 +1129,13 @@ TEST_F(TestTopology, UpdateServer_success) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1477,15 +1146,8 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeServerNotFound, ret); @@ -1498,34 +1160,18 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(false)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1535,24 +1181,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1566,28 +1199,15 @@ TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { ChunkServerIdType csId = 0x41; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, "server1", - "ip1", 0, "ip2", 0, zoneId, physicalPoolId); - PrepareAddServer(serverId2, "server2", - "ip3", 0, "ip4", 0, zoneId, physicalPoolId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId2, - "ip3", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "server1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); + PrepareAddServer(serverId2, "server2", "ip3", 0, "ip4", 0, zoneId, + physicalPoolId); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId2, "ip3", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1596,13 +1216,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { ServerIdType serverId = 0x31; ChunkServerIdType csId = 0x41; - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); @@ -1617,24 +1231,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(false)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1648,11 +1249,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); @@ -1662,17 +1259,16 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1684,7 +1280,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1697,22 +1293,17 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1726,60 +1317,50 @@ TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); @@ -1790,7 +1371,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1803,13 +1384,9 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); uint64_t time = 0x1234567812345678; - int ret = topology_->UpdateChunkServerStartUpTime(time, csId); + int ret = topology_->UpdateChunkServerStartUpTime(time, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ChunkServer cs; @@ -1819,7 +1396,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { ChunkServerIdType csId = 0x41; - int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); + int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1831,19 +1408,18 @@ TEST_F(TestTopology, FindLogicalPool_success) { std::string physicalPoolName = "PhysiclPool1"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); PrepareAddLogicalPool(logicalPoolId, logicalPoolName, physicalPoolId); - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); ASSERT_EQ(logicalPoolId, ret); } TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { std::string logicalPoolName = "logicalPool1"; std::string physicalPoolName = "PhysiclPool1"; - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindPhysicalPool_success) { @@ -1858,11 +1434,9 @@ TEST_F(TestTopology, FindPhysicalPool_success) { TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { std::string physicalPoolName = "physicalPoolName"; PoolIdType ret = topology_->FindPhysicalPool(physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } - TEST_F(TestTopology, FindZone_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1879,8 +1453,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindZone_success2) { @@ -1900,8 +1473,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolId); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostName_success) { @@ -1910,8 +1482,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { std::string hostName = "host1"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName); + PrepareAddServer(serverId, hostName); ServerIdType ret = topology_->FindServerByHostName(hostName); ASSERT_EQ(serverId, ret); @@ -1920,8 +1491,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { std::string hostName = "host1"; ServerIdType ret = topology_->FindServerByHostName(hostName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostIpPort_success) { @@ -1932,12 +1502,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort(internalHostIp, 0); ASSERT_EQ(serverId, ret); @@ -1954,16 +1519,10 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort("ip3", 0); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindChunkServerNotRetired_success) { @@ -1977,21 +1536,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); - - ChunkServerIdType ret = topology_->FindChunkServerNotRetired( - internalHostIp, port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); + + ChunkServerIdType ret = + topology_->FindChunkServerNotRetired(internalHostIp, port); ASSERT_EQ(csId, ret); } @@ -2006,22 +1555,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); ChunkServerIdType ret = topology_->FindChunkServerNotRetired("ip3", port); - ASSERT_EQ(static_cast( - UNINTIALIZE_ID), ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, GetLogicalPool_success) { @@ -2089,7 +1627,6 @@ TEST_F(TestTopology, GetServer_success) { ASSERT_EQ(true, ret); } - TEST_F(TestTopology, GetServer_GetServerNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -2133,7 +1670,6 @@ TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { ASSERT_EQ(false, ret); } - TEST_F(TestTopology, GetChunkServerInCluster_success) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -2371,8 +1907,8 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); - PrepareAddServer( - serverId, "name2", "ip1", 0, "ip2", 0, zoneId, physicalPoolId); + PrepareAddServer(serverId, "name2", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); PrepareAddChunkServer(csId, "token", "ssd", serverId); PrepareAddChunkServer(csId2, "token", "ssd", serverId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); @@ -2452,12 +1988,12 @@ TEST_F(TestTopology, AddCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2470,8 +2006,7 @@ TEST_F(TestTopology, AddCopySet_success) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -2486,12 +2021,12 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2519,12 +2054,12 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2551,12 +2086,12 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2569,8 +2104,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(false)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -2585,12 +2119,12 @@ TEST_F(TestTopology, RemoveCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2601,8 +2135,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2620,12 +2153,12 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2636,8 +2169,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(false)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2655,12 +2187,12 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2687,12 +2219,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2716,11 +2248,10 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateCopySet(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateCopySet(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -2735,12 +2266,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2775,12 +2306,12 @@ TEST_F(TestTopology, GetCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2809,12 +2340,12 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2843,12 +2374,12 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2860,7 +2391,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddCopySet(copysetId, logicalPoolId, replicas); std::vector csList = - topology_->GetCopySetsInLogicalPool(logicalPoolId); + topology_->GetCopySetsInLogicalPool(logicalPoolId); ASSERT_EQ(1, csList.size()); } @@ -2874,12 +2405,12 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2890,8 +2421,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInCluster(); + std::vector csList = topology_->GetCopySetsInCluster(); ASSERT_EQ(1, csList.size()); } @@ -2905,12 +2435,12 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2921,44 +2451,33 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInChunkServer(0x41); + std::vector csList = topology_->GetCopySetsInChunkServer(0x41); ASSERT_EQ(1, csList.size()); } TEST_F(TestTopology, test_create_default_poolset) { - EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadClusterInfo(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPoolset(_, _)).WillOnce(Return(true)); Poolset poolset; EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce( - DoAll(SaveArg<0>(&poolset), Return(true))); + .WillOnce(DoAll(SaveArg<0>(&poolset), Return(true))); std::unordered_map physicalPoolMap{ {1, {1, "pool1", UNINTIALIZE_ID, ""}}, {2, {2, "pool2", UNINTIALIZE_ID, ""}}, }; EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), - SetArgPointee<1>(2), + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), SetArgPointee<1>(2), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(true)); int rc = topology_->Init({}); ASSERT_EQ(kTopoErrCodeSuccess, rc); diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..2f3c59e089 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -20,30 +20,28 @@ * Author: xuchaojie */ -#include #include +#include #include - -#include "src/mds/topology/topology_chunk_allocator.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/topology/mock_topology.h" -#include "test/mds/mock/mock_topology.h" #include "proto/nameserver2.pb.h" #include "src/common/timeutility.h" +#include "src/mds/common/mds_define.h" +#include "src/mds/topology/topology_chunk_allocator.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; using ::testing::Invoke; - +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyChunkAllocator : public ::testing::Test { protected: @@ -54,21 +52,17 @@ class TestTopologyChunkAllocator : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); TopologyOption option; topoStat_ = std::make_shared(topology_); - chunkFilePoolAllocHelp_ = - std::make_shared(); + chunkFilePoolAllocHelp_ = std::make_shared(); chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig(true, true, 15); option.PoolUsagePercentLimit = 85; option.enableLogicalPoolStatus = true; allocStatistic_ = std::make_shared(); - testObj_ = std::make_shared(topology_, - allocStatistic_, - topoStat_, - chunkFilePoolAllocHelp_, - option); + testObj_ = std::make_shared( + topology_, allocStatistic_, topoStat_, chunkFilePoolAllocHelp_, + option); } virtual void TearDown() { @@ -85,53 +79,37 @@ class TestTopologyChunkAllocator : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 10240) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 10240) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) @@ -139,78 +117,56 @@ class TestTopologyChunkAllocator : public ::testing::Test { } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ChunkServerStat stat; - stat.chunkFilepoolSize = diskCapacity-diskUsed; + stat.chunkFilepoolSize = diskCapacity - diskUsed; topoStat_->UpdateChunkServerStat(id, stat); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members, - bool availFlag = true) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members, + bool availFlag = true) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); cs.SetAvailableFlag(availFlag); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -228,7 +184,7 @@ class TestTopologyChunkAllocator : public ::testing::Test { }; TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_success) { + Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -247,7 +203,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -258,12 +214,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -275,20 +227,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { + Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -304,7 +252,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -315,12 +263,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); @@ -328,12 +272,8 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -343,18 +283,14 @@ TEST_F(TestTopologyChunkAllocator, topoStat_->UpdateChunkServerStat(0x42, stat); topoStat_->UpdateChunkServerStat(0x43, stat); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { + Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -370,7 +306,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -385,16 +321,16 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); std::map enoughsize; - std::vector pools ={0x01}; + std::vector pools = {0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, - &enoughsize, "testPoolset"); + testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize, + "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -412,7 +348,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -423,16 +359,11 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_TRUE(ret); @@ -443,12 +374,8 @@ TEST_F(TestTopologyChunkAllocator, // second time std::vector infos2; - ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos2); + ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos2); ASSERT_TRUE(ret); @@ -493,20 +420,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -514,18 +437,14 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -542,7 +461,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -553,27 +472,23 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_FALSE(ret); } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -584,12 +499,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { for (int i = 0; i < 100000; i++) { int chunkNumber = 64; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( + copySetIds, 1, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(chunkNumber, infos.size()); for (int j = 0; j < chunkNumber; j++) { @@ -598,7 +509,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } int minCount = copySetMap[0]; int maxCount = copySetMap[0]; - for (auto &pair : copySetMap) { + for (auto& pair : copySetMap) { if (pair.second > maxCount) { maxCount = pair.second; } @@ -610,10 +521,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { double minPercent = static_cast(avg - minCount) / avg; double maxPercent = static_cast(maxCount - avg) / avg; LOG(INFO) << "AllocateChunkRandomInSingleLogicalPool poc" - <<", minCount = " << minCount - <<", maxCount = " << maxCount - << ", avg = " << avg - << ", minPercent = " << minPercent + << ", minCount = " << minCount << ", maxCount = " << maxCount + << ", avg = " << avg << ", minPercent = " << minPercent << ", maxPercent = " << maxPercent; ASSERT_TRUE(minPercent < 0.1); @@ -621,7 +530,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -632,23 +542,19 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { int chunkNumber = 64; std::vector infos; AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + copySetIds, 1, chunkNumber, &infos); } uint64_t stoptime = curve::common::TimeUtility::GetTimeofDayUs(); double usetime = stoptime - startime; - double tps = 1000000.0 * 100000.0/usetime; + double tps = 1000000.0 * 100000.0 / usetime; - std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " - << tps + std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " << tps << " * 64 chunk per second."; } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { + TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 20; i++) { @@ -657,13 +563,8 @@ TEST(TestAllocateChunkPolicy, uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(5, nextIndex); ASSERT_EQ(chunkNumber, infos.size()); @@ -680,26 +581,20 @@ TEST(TestAllocateChunkPolicy, } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { + TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { std::vector copySetIds; std::map copySetMap; uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_FALSE(ret); ASSERT_EQ(15, nextIndex); ASSERT_EQ(0, infos.size()); } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc) { std::map poolWeightMap; std::map poolMap; for (int i = 0; i < 5; i++) { @@ -709,8 +604,8 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolWeightMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolWeightMap, + &pid); poolMap[pid]++; } @@ -719,7 +614,8 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be 0, 10000, 20000, 30000, + // 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -727,8 +623,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc2) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc2) { std::map poolMap; poolMap[0] = 100000; poolMap[1] = 90000; @@ -738,12 +633,11 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolMap, &pid); poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,9 +645,8 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolRandom) { +// Test to see if random allocation to each pool is possible +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; std::map allocMap; allocMap[1] = 0; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..fd1112a4ec 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include "src/mds/topology/topology_metric.h" -#include "test/mds/topology/mock_topology.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyMetric : public ::testing::Test { public: @@ -48,10 +48,9 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); topologyStat_ = std::make_shared(); allocStatistic_ = std::make_shared(); @@ -76,122 +75,87 @@ class TestTopologyMetric : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool") { - PhysicalPool pool(id, - name, - pid, - desc); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool") { + PhysicalPool pool(id, name, pid, desc); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/") { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState st; - st.SetDiskCapacity(100 * 1024); - st.SetDiskUsed(10 * 1024); - cs.SetChunkServerState(st); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + const std::string& token = "testToken", + const std::string& diskType = "nvme", + ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", + uint32_t port = 0, + const std::string& diskPath = "/") { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState st; + st.SetDiskCapacity(100 * 1024); + st.SetDiskUsed(10 * 1024); + cs.SetChunkServerState(st); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -207,7 +171,7 @@ class TestTopologyMetric : public ::testing::Test { std::shared_ptr testObj_; }; -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -229,14 +193,13 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { rap.pageFileRAP.replicaNum = 3; PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE, rap); + PAGEFILE, rap); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -258,12 +221,10 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), Return(true))); testObj_->UpdateTopologyMetrics(); @@ -283,9 +244,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x42]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x42]->copysetNum.get_value()); @@ -301,9 +262,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x43]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x43]->copysetNum.get_value()); @@ -319,43 +280,75 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(1, gLogicalPoolMetrics.size()); - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); //NOLINT - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->chunkServerNum.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthRange.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMin.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMax.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumRange.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMin.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMax.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumRange.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMin.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMax.get_value()); //NOLINT - ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskCapacity.get_value()); //NOLINT - ASSERT_EQ(20 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); //NOLINT - ASSERT_EQ(10 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); //NOLINT - - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 3, + gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); // NOLINT + ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId] + ->chunkServerNum.get_value()); // NOLINT + ASSERT_EQ( + 1, + gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthRange.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMin.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMax.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumRange.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMin.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMax.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumRange.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMin.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMax.get_value()); // NOLINT + ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId] + ->diskCapacity.get_value()); // NOLINT + ASSERT_EQ( + 20 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); // NOLINT + ASSERT_EQ( + 10 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); // NOLINT + + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeUsedBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeLeftBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTrashedBytes.get_value()); - ASSERT_EQ(1024 * 9, + ASSERT_EQ( + 1024 * 9, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->readIOPS.get_value()); @@ -372,7 +365,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1, gClusterMetrics->copysetNum.get_value()); } -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -396,7 +389,6 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -414,8 +406,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); testObj_->UpdateTopologyMetrics(); diff --git a/test/resources.list b/test/resources.list index 9be11dbb07..20b047da17 100644 --- a/test/resources.list +++ b/test/resources.list @@ -18,30 +18,30 @@ Used port list: # client - 9101: session service 测试 - 9102: applyindex service 测试 - 9103: snapshot service 测试 - 9104: client端其他测试 - 9105: client workflow测试mds占用 - 9106: client workflow测试chunkserver占用 - 9107: client workflow测试chunkserver占用 - 9108: client workflow测试chunkserver占用 - 9109: request scheduler测试占用 - 9110/9111/9112: TestLibcbdLibcurve测试占用 - 9115/9116/9117: TestLibcurveInterface测试占用 - - 9120: mds 接口测试 - 9121: mds 接口测试 - 9122: mds 接口测试 - 9123: mds 接口测试 - 9130: metric测试 - 9131: metric测试 - 9132: metric测试 - 9140: metric测试 - 9141: metric测试 - 9142: metric测试 - 9150/9151 ChunkserverUnstableTest - 19151/19110/19111/19112 curveClient测试 + 9101: session service testing + 9102: applyindex service testing + 9103: snapshot service testing + 9104: Other client testing + 9105: client workflow testing, MDS usage + 9106: client workflow testing, Chunkserver usage + 9107: client workflow testing, Chunkserver usage + 9108: client workflow testing, Chunkserver usage + 9109: request scheduler testing usage + 9110/9111/9112: TestLibcbdLibcurve testing usage + 9115/9116/9117: TestLibcurveInterface testing usage + + 9120: MDS interface testing + 9121: MDS interface testing + 9122: MDS interface testing + 9123: MDS interface testing + 9130: metric testing + 9131: metric testing + 9132: metric testing + 9140: metric testing + 9141: metric testing + 9142: metric testing + 9150/9151: ChunkserverUnstableTest + 19151/19110/19111/19112: curveClient testing client_test_unittest: 21000 diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index f57c2d15c0..882905855d 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include +#include "src/common/location_operator.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/location_operator.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" using ::curve::common::LocationOperator; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,10 +50,8 @@ class TestCloneCoreImpl : public ::testing::Test { virtual ~TestCloneCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); - cloneRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); + cloneRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -65,14 +62,9 @@ class TestCloneCoreImpl : public ::testing::Test { option.recoverChunkConcurrency = 2; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - option); - EXPECT_CALL(*client_, Mkdir(_, _)) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, option); + EXPECT_CALL(*client_, Mkdir(_, _)).WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(core_->Init(), 0); } @@ -86,66 +78,47 @@ class TestCloneCoreImpl : public ::testing::Test { } protected: - // 辅助mock函数 + // Auxiliary mock function void MockBuildFileInfoFromSnapshotSuccess( std::shared_ptr task); - void MockBuildFileInfoFromFileSuccess( - std::shared_ptr task); + void MockBuildFileInfoFromFileSuccess(std::shared_ptr task); - void MockCreateCloneFileSuccess( - std::shared_ptr task); + void MockCreateCloneFileSuccess(std::shared_ptr task); - void MockCloneMetaSuccess( - std::shared_ptr task); + void MockCloneMetaSuccess(std::shared_ptr task); - void MockCreateCloneChunkSuccess( - std::shared_ptr task); + void MockCreateCloneChunkSuccess(std::shared_ptr task); - void MockCompleteCloneMetaSuccess( - std::shared_ptr task); + void MockCompleteCloneMetaSuccess(std::shared_ptr task); - void MockRecoverChunkSuccess( - std::shared_ptr task); + void MockRecoverChunkSuccess(std::shared_ptr task); - void MockChangeOwnerSuccess( - std::shared_ptr task); + void MockChangeOwnerSuccess(std::shared_ptr task); - void MockRenameCloneFileSuccess( - std::shared_ptr task); + void MockRenameCloneFileSuccess(std::shared_ptr task); - void MockCompleteCloneFileSuccess( - std::shared_ptr task); + void MockCompleteCloneFileSuccess(std::shared_ptr task); - void MockBuildFileInfoFromSnapshotFail( - std::shared_ptr task); + void MockBuildFileInfoFromSnapshotFail(std::shared_ptr task); - void MockBuildFileInfoFromFileFail( - std::shared_ptr task); + void MockBuildFileInfoFromFileFail(std::shared_ptr task); - void MockCreateCloneFileFail( - std::shared_ptr task); + void MockCreateCloneFileFail(std::shared_ptr task); - void MockCloneMetaFail( - std::shared_ptr task); + void MockCloneMetaFail(std::shared_ptr task); - void MockCreateCloneChunkFail( - std::shared_ptr task); + void MockCreateCloneChunkFail(std::shared_ptr task); - void MockCompleteCloneMetaFail( - std::shared_ptr task); + void MockCompleteCloneMetaFail(std::shared_ptr task); - void MockRecoverChunkFail( - std::shared_ptr task); + void MockRecoverChunkFail(std::shared_ptr task); - void MockChangeOwnerFail( - std::shared_ptr task); + void MockChangeOwnerFail(std::shared_ptr task); - void MockRenameCloneFileFail( - std::shared_ptr task); + void MockRenameCloneFileFail(std::shared_ptr task); - void MockCompleteCloneFileFail( - std::shared_ptr task); + void MockCompleteCloneFileFail(std::shared_ptr task); protected: std::shared_ptr core_; @@ -157,9 +130,8 @@ class TestCloneCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -171,16 +143,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -188,35 +157,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -224,35 +183,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kRecover, - source, - destination, - "", - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kRecover, source, + destination, "", 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -260,7 +209,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -283,15 +232,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { fInfo.filestatus = FileStatus::Created; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -299,7 +246,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -311,13 +258,11 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -325,7 +270,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -336,20 +281,18 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -360,23 +303,21 @@ TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, AddCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -390,16 +331,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -413,16 +354,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -431,16 +372,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -448,42 +389,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -491,42 +420,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId + 1; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -535,16 +452,16 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -556,23 +473,20 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { SnapshotInfo snap("id1", "user1", destination, "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -584,20 +498,18 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -606,16 +518,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::UNKNOWN)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -631,16 +543,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -662,29 +574,26 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { fInfo.filename = "file1"; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, SetCloneFileStatus(source, - FileStatus::BeingCloned, - option.mdsRootUser)) + EXPECT_CALL(*client_, SetCloneFileStatus(source, FileStatus::BeingCloned, + option.mdsRootUser)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(1, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -705,7 +614,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::cloning); @@ -726,7 +635,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { + HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cloning); @@ -752,9 +661,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -769,10 +678,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { - CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { + CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); cinfo.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -792,22 +700,20 @@ TEST_F(TestCloneCoreImpl, uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kRecover, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::recovering); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -827,7 +733,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::recovering); @@ -847,10 +753,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnCreateCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -867,8 +772,8 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -886,8 +791,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -906,8 +811,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -927,8 +832,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -949,8 +854,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRenameCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1011,8 +916,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneFail) { core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); @@ -1034,11 +938,10 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1056,9 +959,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1074,9 +977,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1095,17 +998,16 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1124,17 +1026,15 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStepUnknown) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStepUnknown) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); info.SetNextStep(static_cast(8)); auto cloneMetric = std::make_shared("id1"); @@ -1163,14 +1063,12 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( uint64_t filelength = 1 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, kDefaultPoolset, - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, kDefaultPoolset, time, + status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; @@ -1178,9 +1076,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } ChunkIndexData snapMeta; @@ -1191,18 +1088,15 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( snapMeta.PutChunkDataName(chunk2); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapMeta), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapMeta), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = 100; fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( @@ -1216,9 +1110,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneFileSuccess( @@ -1226,8 +1119,8 @@ void TestCloneCoreImpl::MockCreateCloneFileSuccess( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(LIBCURVE_ERROR::OK))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCloneMetaSuccess( @@ -1238,33 +1131,25 @@ void TestCloneCoreImpl::MockCloneMetaSuccess( segInfoOut.segmentsize = segmentsize; segInfoOut.chunksize = chunksize; segInfoOut.startoffset = 0; - segInfoOut.chunkvec = {{1, 1, 1}, - {2, 2, 1}}; + segInfoOut.chunkvec = {{1, 1, 1}, {2, 2, 1}}; segInfoOut.lpcpIDInfo.lpid = 1; segInfoOut.lpcpIDInfo.cpidVec = {1, 2}; EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, 0, _, _, _)) .WillRepeatedly( - DoAll(SetArgPointee<4>(segInfoOut), - Return(LIBCURVE_ERROR::OK))); + DoAll(SetArgPointee<4>(segInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneChunkSuccess( std::shared_ptr task) { std::string location1, location2; if (CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType()) { - location1 = LocationOperator::GenerateS3Location( - "file1-0-1"); - location2 = LocationOperator::GenerateS3Location( - "file1-1-1"); + location1 = LocationOperator::GenerateS3Location("file1-0-1"); + location2 = LocationOperator::GenerateS3Location("file1-1-1"); } else { - location1 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("0")); - location2 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("1048576")); + location1 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("0")); + location2 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("1048576")); } uint32_t correctSn = 0; @@ -1273,18 +1158,15 @@ void TestCloneCoreImpl::MockCreateCloneChunkSuccess( } else { correctSn = 100; } - EXPECT_CALL(*client_, CreateCloneChunk( - AnyOf(location1, location2), _, _, correctSn, _, _)) + EXPECT_CALL(*client_, CreateCloneChunk(AnyOf(location1, location2), _, _, + correctSn, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(LIBCURVE_ERROR::OK))); } @@ -1297,15 +1179,12 @@ void TestCloneCoreImpl::MockCompleteCloneMetaSuccess( void TestCloneCoreImpl::MockRecoverChunkSuccess( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK), - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK), scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockChangeOwnerSuccess( @@ -1338,22 +1217,18 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) @@ -1362,9 +1237,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( @@ -1378,9 +1252,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockCreateCloneFileFail( @@ -1388,12 +1261,11 @@ void TestCloneCoreImpl::MockCreateCloneFileFail( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(-LIBCURVE_ERROR::FAILED))); } -void TestCloneCoreImpl::MockCloneMetaFail( - std::shared_ptr task) { +void TestCloneCoreImpl::MockCloneMetaFail(std::shared_ptr task) { EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, _, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); } @@ -1402,15 +1274,12 @@ void TestCloneCoreImpl::MockCreateCloneChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, CreateCloneChunk(_, _, _, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(-LIBCURVE_ERROR::FAILED))); } @@ -1424,13 +1293,9 @@ void TestCloneCoreImpl::MockRecoverChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { scc->Run(); }), + Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockChangeOwnerFail( @@ -1452,7 +1317,7 @@ void TestCloneCoreImpl::MockCompleteCloneFileFail( } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1462,20 +1327,17 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1485,17 +1347,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(-1))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(0, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1505,17 +1364,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1525,17 +1381,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { cinfo.SetStatus(CloneStatus::cloning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeCannotCleanCloneUnfinished, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1545,17 +1398,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { cinfo.SetStatus(CloneStatus::errorCleaning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1565,21 +1415,18 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1589,14 +1436,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1606,14 +1452,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1624,14 +1469,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { .Times(1) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1642,43 +1486,36 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { .Times(1) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); std::shared_ptr task = std::make_shared(info, cloneMetric, cloneClosure); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - TestCheckFileExists) { +TEST_F(TestCloneCoreImpl, TestCheckFileExists) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeFileExist); EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 10), kErrCodeFileNotExist); @@ -1693,36 +1530,31 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeInternalError); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeSuccess); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 0); } TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); + TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 2); @@ -1730,26 +1562,22 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSetStatusFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSetStatusFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); - EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) @@ -1758,13 +1586,11 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..ec27aa8fe7 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -20,9 +20,8 @@ * Author: xuchaojie */ - -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" #include "test/util/config_generator.h" @@ -40,19 +39,14 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to + // complete as soon as possible std::vector options = { - {"mds.listen.addr=127.0.0.1:8888", - "mds.registerToMDS=false", - "mds.rpcTimeoutMS=1", - "mds.maxRPCTimeoutMS=1", - "mds.maxRetryMS=1", - "mds.rpcRetryIntervalUS=1", - "metacache.getLeaderTimeOutMS=1", - "metacache.getLeaderRetry=1", - "metacache.rpcRetryIntervalUS=1", - "chunkserver.opRetryIntervalUS=1", - "chunkserver.opMaxRetry=1", + {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", + "mds.rpcTimeoutMS=1", "mds.maxRPCTimeoutMS=1", "mds.maxRetryMS=1", + "mds.rpcRetryIntervalUS=1", "metacache.getLeaderTimeOutMS=1", + "metacache.getLeaderRetry=1", "metacache.rpcRetryIntervalUS=1", + "chunkserver.opRetryIntervalUS=1", "chunkserver.opMaxRetry=1", "chunkserver.rpcTimeoutMS=1", "chunkserver.maxRetrySleepIntervalUS=1", "chunkserver.maxRPCTimeoutMS=1"}, @@ -64,8 +58,7 @@ class TestCurveFsClientImpl : public ::testing::Test { virtual void SetUp() { std::shared_ptr snapClient = std::make_shared(); - std::shared_ptr fileClient = - std::make_shared(); + std::shared_ptr fileClient = std::make_shared(); client_ = std::make_shared(snapClient, fileClient); clientOption_.configPath = kClientConfigPath; clientOption_.mdsRootUser = "root"; @@ -75,9 +68,7 @@ class TestCurveFsClientImpl : public ::testing::Test { client_->Init(clientOption_); } - virtual void TearDown() { - client_->UnInit(); - } + virtual void TearDown() { client_->UnInit(); } protected: std::shared_ptr client_; @@ -85,9 +76,7 @@ class TestCurveFsClientImpl : public ::testing::Test { }; struct TestClosure : public SnapCloneClosure { - void Run() { - std::unique_ptr selfGuard(this); - } + void Run() { std::unique_ptr selfGuard(this); } }; TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { @@ -111,35 +100,35 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { SegmentInfo segInfo; ret = client_->GetSnapshotSegmentInfo("file1", "user1", 1, 0, &segInfo); ASSERT_LT(ret, 0); - ret = client_->GetSnapshotSegmentInfo( - "file1", clientOption_.mdsRootUser, 1, 0, &segInfo); + ret = client_->GetSnapshotSegmentInfo("file1", clientOption_.mdsRootUser, 1, + 0, &segInfo); ASSERT_LT(ret, 0); ChunkIDInfo cidinfo; FileStatus fstatus; ret = client_->CheckSnapShotStatus("file1", "user1", 1, &fstatus); ASSERT_LT(ret, 0); - ret = client_->CheckSnapShotStatus( - "file1", clientOption_.mdsRootUser, 1, &fstatus); + ret = client_->CheckSnapShotStatus("file1", clientOption_.mdsRootUser, 1, + &fstatus); ASSERT_LT(ret, 0); ChunkInfoDetail chunkInfo; ret = client_->GetChunkInfo(cidinfo, &chunkInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); + ret = client_->CreateCloneFile("source1", "file1", "user1", 1024, 1, 1024, + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, "default", &fInfo); + ret = + client_->CreateCloneFile("source1", "file1", clientOption_.mdsRootUser, + 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - TestClosure *cb = new TestClosure(); + TestClosure* cb = new TestClosure(); ret = client_->CreateCloneChunk("", cidinfo, 1, 2, 1024, cb); ASSERT_EQ(ret, 0); - TestClosure *cb2 = new TestClosure(); + TestClosure* cb2 = new TestClosure(); ret = client_->RecoverChunk(cidinfo, 0, 1024, cb2); ASSERT_EQ(ret, 0); @@ -159,8 +148,9 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 - // ret = client_->GetOrAllocateSegmentInfo( + // The client retries the mds interface infinitely, and these two interfaces + // loop endlessly. Please comment them out first ret = + // client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); // ret = client_->GetOrAllocateSegmentInfo( @@ -169,8 +159,8 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->RenameCloneFile("user1", 1, 2, "file1", "file2"); ASSERT_LT(ret, 0); - ret = client_->RenameCloneFile( - clientOption_.mdsRootUser, 1, 2, "file1", "file2"); + ret = client_->RenameCloneFile(clientOption_.mdsRootUser, 1, 2, "file1", + "file2"); ASSERT_LT(ret, 0); ret = client_->DeleteFile("file1", "user1", 1); @@ -187,7 +177,5 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); } - - } // namespace snapshotcloneserver } // namespace curve diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index d4c40963f1..02e363ee1a 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -20,26 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" - namespace curve { namespace snapshotcloneserver { -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestSnapshotCoreImpl : public ::testing::Test { public: @@ -47,8 +45,7 @@ class TestSnapshotCoreImpl : public ::testing::Test { virtual ~TestSnapshotCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -60,11 +57,8 @@ class TestSnapshotCoreImpl : public ::testing::Test { option.snapshotCoreThreadNum = 1; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - option); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, option); ASSERT_EQ(core_->Init(), 0); } @@ -84,7 +78,6 @@ class TestSnapshotCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { const std::string file = "file"; const std::string user = "user"; @@ -96,18 +89,13 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { sinfo.SetStatus(Status::done); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddSnapshot(_)) - .WillOnce(Return(kErrCodeSuccess)); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); + EXPECT_CALL(*metaStore_, AddSnapshot(_)).WillOnce(Return(kErrCodeSuccess)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeSuccess, ret); } @@ -119,16 +107,11 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreTaskExist) { SnapshotInfo info; std::vector list; - SnapshotInfo sinfo("snapid1", - user, - file, - desc); + SnapshotInfo sinfo("snapid1", user, file, desc); sinfo.SetStatus(Status::pending); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeTaskExist, ret); } @@ -144,9 +127,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreAddSnapshotFail) { fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, AddSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); @@ -163,9 +144,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFileNotExist) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::NOTEXIST))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::NOTEXIST))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); } @@ -181,9 +161,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInvalidUser) { fInfo.filestatus = FileStatus::Created; fInfo.owner = "user2"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::AUTHFAIL))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::AUTHFAIL))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInvalidUser, ret); } @@ -198,9 +177,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInternalError) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInternalError, ret); } @@ -216,9 +194,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFailStatusInvalid) { fInfo.filestatus = FileStatus::Cloning; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); } @@ -232,8 +208,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPreSuccess) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -252,8 +227,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_GetSnapshotInfoNotExist) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -269,8 +243,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_UpdateSnapshotFail) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); @@ -290,8 +263,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_InvalidUser) { SnapshotInfo info(uuid, user2, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -307,8 +279,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_DeleteSnapshotUnfinished) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -325,8 +296,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_FileNameNotMatch) { SnapshotInfo info(uuid, user, fileName2, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -342,16 +312,14 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_TaskExit) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::deleting); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } -TEST_F(TestSnapshotCoreImpl, - TestGetFileSnapshotInfoSuccess) { +TEST_F(TestSnapshotCoreImpl, TestGetFileSnapshotInfoSuccess) { std::string file = "file1"; std::vector info; @@ -362,8 +330,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -378,9 +345,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -389,10 +354,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -407,10 +370,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -420,29 +381,21 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce(DoAll(SetArgPointee<4>(segInfo2), Return(kErrCodeSuccess))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -464,16 +417,13 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) @@ -481,28 +431,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -518,8 +462,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::done, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CreateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_CreateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -533,10 +476,8 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(-LIBCURVE_ERROR::FAILED))); - + .WillOnce( + DoAll(SetArgPointee<2>(seqNum), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -547,8 +488,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -562,10 +502,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -574,9 +511,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -587,8 +523,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_UpdateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_UpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -601,10 +536,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -613,10 +545,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -630,7 +560,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { + TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -643,10 +573,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -655,10 +582,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -672,7 +597,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { + TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -685,12 +610,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -699,21 +620,15 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); core_->HandleCreateSnapshotTask(task); @@ -722,8 +637,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetChunkInfoFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetChunkInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -737,10 +651,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -749,10 +660,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -767,10 +676,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -780,25 +687,19 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(chunkInfo), - Return(-LIBCURVE_ERROR::FAILED))); + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -807,7 +708,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { + TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -820,12 +721,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -834,10 +731,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -852,10 +747,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -865,29 +758,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -899,7 +785,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { + TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -912,11 +798,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -925,10 +808,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -943,10 +824,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -956,29 +835,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -989,23 +861,20 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); // 上一个快照 + info2.SetSeqNum(seqNum - 1); // Previous snapshot info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -1017,7 +886,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { + TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1031,9 +900,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1042,10 +909,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1060,10 +925,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1073,29 +936,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1111,35 +967,28 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info); snapInfos.push_back(info2); - EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) + EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillOnce(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -1148,7 +997,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { + TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1161,11 +1010,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1174,10 +1020,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1192,10 +1036,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1205,29 +1047,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1245,34 +1080,26 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -1287,7 +1114,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { + TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1300,11 +1127,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1313,10 +1137,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1331,10 +1153,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1344,29 +1164,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1384,35 +1197,27 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(2) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(2) @@ -1430,8 +1235,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_DeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1444,11 +1248,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1457,10 +1258,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1475,10 +1274,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1488,29 +1285,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1528,41 +1318,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1577,7 +1358,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1590,11 +1371,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1603,10 +1381,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1621,10 +1397,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1634,29 +1408,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1674,41 +1441,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1726,7 +1484,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1739,11 +1497,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1752,10 +1507,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1770,10 +1523,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1783,29 +1534,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1823,41 +1567,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1875,7 +1610,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1888,11 +1623,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1901,10 +1633,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1919,10 +1649,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1932,29 +1660,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1972,41 +1693,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2025,7 +1737,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskExistIndexDataSuccess) { + TestHandleCreateSnapshotTaskExistIndexDataSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2043,8 +1755,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2057,12 +1768,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2072,10 +1779,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2085,21 +1790,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2114,29 +1812,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); - + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) @@ -2163,7 +1854,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { + TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2180,8 +1871,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2194,13 +1884,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2210,10 +1895,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2223,21 +1906,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2252,9 +1928,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2266,8 +1941,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkVecInfoMiss) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskChunkVecInfoMiss) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2284,8 +1958,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2298,28 +1971,18 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); SegmentInfo segInfo1; SegmentInfo segInfo2; - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2334,9 +1997,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2348,8 +2010,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2374,9 +2035,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2384,15 +2043,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2412,7 +2066,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { + TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2437,9 +2091,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2447,12 +2099,9 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce( + DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeInternalError))); EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); @@ -2466,7 +2115,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { + TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2491,9 +2140,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2501,15 +2148,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2528,8 +2170,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2554,9 +2195,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2564,15 +2203,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2580,7 +2214,6 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); - EXPECT_CALL(*dataStore_, DeleteChunkIndexData(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -2609,9 +2242,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2620,10 +2251,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2636,10 +2265,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2649,29 +2276,22 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2689,60 +2309,50 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -2764,7 +2374,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { + TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2778,9 +2388,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2789,19 +2397,17 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Invoke([task](const UUID& uuid, CASFunc cas) { task->Cancel(); return kErrCodeSuccess; })); - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2818,7 +2424,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { + TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2832,9 +2438,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2843,10 +2447,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2859,10 +2461,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2872,40 +2472,32 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) - .WillOnce(Invoke([task](const ChunkIndexDataName &name, - const ChunkIndexData &meta) { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke( + [task](const ChunkIndexDataName& name, const ChunkIndexData& meta) { + task->Cancel(); + return kErrCodeSuccess; + })); - - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2925,7 +2517,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2939,9 +2531,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2950,10 +2540,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2968,10 +2556,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2981,29 +2567,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3021,60 +2600,49 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + // Enter cancel + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -3086,7 +2654,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3100,9 +2668,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3111,10 +2677,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3127,10 +2691,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3140,29 +2702,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3180,58 +2735,48 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3250,7 +2795,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3264,9 +2809,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3275,10 +2818,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3291,10 +2832,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3304,29 +2843,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3344,60 +2876,50 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3420,4 +2942,3 @@ TEST_F(TestSnapshotCoreImpl, } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..0af03c9315 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" - -#include "test/snapshotcloneserver/mock_snapshot_server.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/snapshotcloneserver/mock_snapshot_server.h" using curve::common::CountDownEvent; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Property; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,21 +50,16 @@ class TestSnapshotServiceManager : public ::testing::Test { virtual void SetUp() { serverOption_.snapshotPoolThreadNum = 8; serverOption_.snapshotTaskManagerScanIntervalMs = 100; - core_ = - std::make_shared(); - auto metaStore_ = - std::shared_ptr(); + core_ = std::make_shared(); + auto metaStore_ = std::shared_ptr(); snapshotMetric_ = std::make_shared(metaStore_); - std::shared_ptr - taskMgr_ = + std::shared_ptr taskMgr_ = std::make_shared(core_, snapshotMetric_); manager_ = std::make_shared(taskMgr_, core_); - ASSERT_EQ(0, manager_->Init(serverOption_)) - << "manager init fail."; - ASSERT_EQ(0, manager_->Start()) - << "manager start fail."; + ASSERT_EQ(0, manager_->Init(serverOption_)) << "manager init fail."; + ASSERT_EQ(0, manager_->Start()) << "manager start fail."; } virtual void TearDown() { @@ -75,31 +69,22 @@ class TestSnapshotServiceManager : public ::testing::Test { snapshotMetric_ = nullptr; } - void PrepareCreateSnapshot( - const std::string &file, - const std::string &user, - const std::string &desc, - UUID uuid) { + void PrepareCreateSnapshot(const std::string& file, const std::string& user, + const std::string& desc, UUID uuid) { SnapshotInfo info(uuid, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); @@ -112,8 +97,7 @@ class TestSnapshotServiceManager : public ::testing::Test { SnapshotCloneServerOptions serverOption_; }; -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -122,32 +106,25 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -155,8 +132,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPreFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -165,21 +141,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeInternalError))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccessByTaskExist) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccessByTaskExist) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -188,20 +156,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeTaskExist))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeTaskExist))); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPushTaskFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPushTaskFail) { const std::string file1 = "file1"; const std::string user1 = "user1"; const std::string desc1 = "snap1"; @@ -209,33 +170,21 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuid1, user1, file1, desc1); EXPECT_CALL(*core_, CreateSnapshotPre(file1, user1, desc1, _)) - .WillRepeatedly(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([] (std::shared_ptr task) { - })); + .WillOnce(Invoke([](std::shared_ptr task) {})); UUID uuid; - int ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid); + int ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); UUID uuid2; - ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid2); + ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid2); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -243,8 +192,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotMultiThreadSuccess) { const std::string file1 = "file1"; const std::string file2 = "file2"; const std::string file3 = "file3"; @@ -264,15 +212,9 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); std::condition_variable cv; std::mutex m; @@ -281,43 +223,28 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cv, &m, &count] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - std::unique_lock lk(m); - count++; - task->Finish(); - cv.notify_all(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cv, &m, &count](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + std::unique_lock lk(m); + count++; + task->Finish(); + cv.notify_all(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file2, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file2, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file3, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file3, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - cv.wait(lk, [&count](){return count == 3;}); + cv.wait(lk, [&count]() { return count == 3; }); - - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); ASSERT_EQ(3, snapshotMetric_->snapshotSucceed.get_value()); @@ -325,7 +252,7 @@ TEST_F(TestSnapshotServiceManager, } TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSameFileSuccess) { + TestCreateSnapshotMultiThreadSameFileSuccess) { const std::string file1 = "file1"; const std::string user = "user1"; const std::string desc1 = "snap1"; @@ -343,52 +270,32 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); CountDownEvent cond1(3); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cond1] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -408,19 +315,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -428,7 +334,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -438,30 +344,23 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] ( - std::shared_ptr task) { - LOG(INFO) << "in HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - break; - } - } - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + LOG(INFO) << "in HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + break; + } + } + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); @@ -496,19 +395,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -516,8 +414,6 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - - TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; @@ -543,10 +439,10 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - cond1.Signal(); - })); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -555,9 +451,8 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -579,19 +474,18 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -599,7 +493,6 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -610,29 +503,22 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -659,8 +545,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -688,8 +573,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -702,8 +586,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeInternalError))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -722,8 +606,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail2) { snapInfo.push_back(snap1); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -740,29 +624,22 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -789,8 +666,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // empty filter SnapshotFilterCondition filter; @@ -826,14 +702,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter uuid SnapshotFilterCondition filter2; @@ -852,14 +726,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::pending, s.GetStatus()); ASSERT_EQ(progress, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by filename SnapshotFilterCondition filter3; @@ -890,14 +762,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by status SnapshotFilterCondition filter4; @@ -923,14 +793,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by user SnapshotFilterCondition filter5; @@ -949,8 +817,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -964,8 +831,8 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeInternalError))); SnapshotFilterCondition filter; std::vector fileSnapInfo; @@ -993,32 +860,30 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskSuccess) { list.push_back(snap2); list.push_back(snap3); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeSuccess))); CountDownEvent cond1(2); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -1041,15 +906,13 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskFail) { list.push_back(snap1); list.push_back(snap2); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeInternalError))); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1062,31 +925,27 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info2(uuidOut2, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - LOG(INFO) << "in mock HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - cond1.Signal(); - break; - } - } - task->Finish(); - cond2.Signal(); - })); - - // 取消排队的快照会调一次 + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + LOG(INFO) << "in mock HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + cond1.Signal(); + break; + } + } + task->Finish(); + cond2.Signal(); + })); + + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1099,32 +958,20 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCancelScheduledSnapshotTask(_)) .WillOnce(Invoke(callback)); - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 - ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid2); + // Take another snapshot to cover the queuing situation + ret = manager_->CreateSnapshot(file, user, desc, &uuid2); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 - ret = manager_->CancelSnapshot(uuidOut2, - user, - file); + // Cancel queued snapshots first + ret = manager_->CancelSnapshot(uuidOut2, user, file); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CancelSnapshot(uuidOut, - user, - file); + ret = manager_->CancelSnapshot(uuidOut, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -1132,8 +979,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffUser) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffUser) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1142,41 +988,32 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string user2 = "user2"; - ret = manager_->CancelSnapshot(uuidOut, - user2, - file); + ret = manager_->CancelSnapshot(uuidOut, user2, file); cond2.Signal(); ASSERT_EQ(kErrCodeInvalidUser, ret); cond1.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffFile) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffFile) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1185,40 +1022,30 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string file2 = "file2"; - ret = manager_->CancelSnapshot(uuidOut, - user, - file2); + ret = manager_->CancelSnapshot(uuidOut, user, file2); cond2.Signal(); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); cond1.Wait(); } - } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..b88d1fab08 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -20,14 +20,15 @@ * Author: charisu */ -#include #include "src/tools/chunkserver_client.h" -#include "test/client/fake/mockMDS.h" + +#include + #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" -using curve::chunkserver::GetChunkInfoResponse; using curve::chunkserver::CHUNK_OP_STATUS; - +using curve::chunkserver::GetChunkInfoResponse; DECLARE_string(chunkserver_list); namespace brpc { @@ -46,9 +47,7 @@ class ChunkServerClientTest : public ::testing::Test { fakemds.Initialize(); fakemds.CreateFakeChunkservers(false); } - void TearDown() { - fakemds.UnInitialize(); - } + void TearDown() { fakemds.UnInitialize(); } ChunkServerClient client; FakeMDS fakemds; }; @@ -59,37 +58,36 @@ TEST_F(ChunkServerClientTest, Init) { } TEST_F(ChunkServerClientTest, GetRaftStatus) { - std::vector statServices = - fakemds.GetRaftStateService(); - // 正常情况 + std::vector statServices = + fakemds.GetRaftStateService(); + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkInfoResponse()); + std::unique_ptr response(new GetChunkInfoResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -98,23 +96,23 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { auto copysetServices = fakemds.GetCreateCopysetService(); CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address("127.0.0.1:9191"); request.set_logicpoolid(1); request.set_copysetid(1001); request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -122,27 +120,26 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { } TEST_F(ChunkServerClientTest, GetChunkHash) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkHashResponse()); + std::unique_ptr response(new GetChunkHashResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_hash("1234"); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); } diff --git a/test/tools/config/data_check.conf b/test/tools/config/data_check.conf index 7380f75bd5..0f93452c72 100644 --- a/test/tools/config/data_check.conf +++ b/test/tools/config/data_check.conf @@ -15,131 +15,131 @@ # # -# mds一侧配置信息 +# MDS side configuration information # -# mds的地址信息 +# Address information of mds mds.listen.addr=127.0.0.1:9160 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的超时时间 +# Time out for communication with mds mds.rpcTimeoutMS=1000 -# 与mds通信最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout time for communication with MDS, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 在当前mds上连续重试次数超过该限制就切换 +# Switch if the number of consecutive retries on the current mds exceeds this limit mds.maxFailedTimesBeforeChangeMDS=5 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=1000 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=3 -# getleader接口每次重试之前需要先睡眠一段时间 +# The getleader interface needs to sleep for a period of time before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=4096 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of Execution Threads for the Queue +# The task of execution threads is to retrieve IO and then send it over the network before moving on to the next network task. +# The time it takes for a task to be retrieved from the queue and the RPC request to be sent typically ranges from 20 microseconds (20us) to 100 microseconds (100us). +# The lower end of this range, 20us, is under normal conditions when leader acquisition is not required during transmission. If leader acquisition is necessary during transmission, the time may extend to around 100us. +# The throughput of a single thread ranges from 100,000 (10w) to 500,000 (50w) tasks per second. This performance level meets the requirements. schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=500000 -# 任务队列线程池大小, 默认值为1个线程 +# Task queue thread pool size, default value is 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=50000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 下发IO最大的分片KB +# Maximum sharding KB for issuing IO global.fileIOSplitMaxSizeKB=4 -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=2048 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.opRetryIntervalUS=100000 metacache.getLeaderBackupRequestMS=100 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=./runlog/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/test/tools/copyset_check_core_test.cpp b/test/tools/copyset_check_core_test.cpp index 9ef6de55ce..ef085e2548 100644 --- a/test/tools/copyset_check_core_test.cpp +++ b/test/tools/copyset_check_core_test.cpp @@ -20,20 +20,22 @@ * Author: charisu */ -#include #include "src/tools/copyset_check_core.h" -#include "test/tools/mock/mock_mds_client.h" + +#include + #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_mds_client.h" -using ::testing::_; -using ::testing::Return; -using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::An; using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetServerInfo; using curve::mds::topology::DiskState; using curve::mds::topology::OnlineState; -using curve::mds::topology::CopySetServerInfo; +using ::testing::_; +using ::testing::An; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; DECLARE_uint64(operatorMaxPeriod); DECLARE_bool(checkOperator); @@ -69,9 +71,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -93,7 +95,7 @@ class CopysetCheckCoreTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *serverInfo) { + void GetServerInfoForTest(curve::mds::topology::ServerInfo* serverInfo) { serverInfo->set_serverid(1); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -108,24 +110,24 @@ class CopysetCheckCoreTest : public ::testing::Test { } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - const std::string& state = "FOLLOWER", - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool minOffline = false, - bool majOffline = false) { + const std::string& state = "FOLLOWER", + bool noLeader = false, bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool minOffline = false, + bool majOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (minOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else if (majOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 127.0.0.1:9195:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 " + "127.0.0.1:9195:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -135,7 +137,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (state == "LEADER") { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -150,11 +154,15 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else if (state == "FOLLOWER") { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } else { os << "state: " << state << "\r\n"; @@ -176,7 +184,7 @@ TEST_F(CopysetCheckCoreTest, Init) { ASSERT_EQ(-1, copysetCheck.Init("127.0.0.1:6666")); } -// CheckOneCopyset正常情况 +// CheckOneCopyset normal situation TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { std::vector csLocs; butil::IOBuf followerBuf; @@ -191,17 +199,12 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kHealthy, copysetCheck.CheckOneCopyset(1, 100)); butil::IOBuf iobuf; @@ -215,7 +218,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { ASSERT_EQ(iobuf.to_string(), copysetCheck.GetCopysetDetail()); } -// CheckOneCopyset异常情况 +// CheckOneCopyset Exception TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { std::vector csLocs; butil::IOBuf followerBuf; @@ -231,52 +234,45 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { copyset.set_logicalpoolid(1); copyset.set_copysetid(100); - // 1、GetChunkServerListInCopySet失败 + // 1. GetChunkServerListInCopySet failed EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck1.CheckOneCopyset(1, 100)); - // 2、copyset不健康 + // 2. Copyset is unhealthy GetIoBufForTest(&followerBuf, "4294967396", "FOLLOWER", true); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck2.CheckOneCopyset(1, 100)); - // 3、有peer不在线,一个是chunkserver不在线,一个是copyset不在线 + // 3. Some peers are not online, one is chunkserver, and the other is + // copyset GetIoBufForTest(&followerBuf, "4294967397"); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(4) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kMajorityPeerNotOnline, - copysetCheck3.CheckOneCopyset(1, 100)); + copysetCheck3.CheckOneCopyset(1, 100)); } - -// CheckCopysetsOnChunkserver正常情况 +// CheckCopysetsOnChunkserver normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -297,63 +293,52 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { csServerInfos.emplace_back(csServerInfo); } - // mds返回Chunkserver retired的情况,直接返回0 + // Mds returns the case of Chunkserver retired, directly returning 0 GetCsInfoForTest(&csInfo, csId, false, "LEADER"); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); expectedRes[kTotal].insert(gId); - // 通过id查询,有一个copyset配置组中没有当前chunkserver,应忽略 + // Through ID query, there is a copyset configuration group that does not + // have the current chunkserver and should be ignored GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(4) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 通过地址查询 + // Search through address EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck3.CheckCopysetsOnChunkServer(csAddr)); ASSERT_DOUBLE_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); } -// CheckCopysetsOnChunkserver异常情况 +// CheckCopysetsOnChunkserver Exception TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -376,7 +361,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetIoBufForTest(&followerBuf2, gId, "FOLLOWER", true); std::map> expectedRes; - // 1、GetChunkServerInfo失败的情况 + // 1. The situation of GetChunkServerInfo failur CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -385,7 +370,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、chunkserver发送RPC失败的情况 + // 2. The situation where chunkserver fails to send RPC std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -398,53 +383,43 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(10) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillRepeatedly(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::set expectedExcepCs = {csAddr, "127.0.0.1:9493", - "127.0.0.1:9394", "127.0.0.1:9496", - "127.0.0.1:9293", "127.0.0.1:9396", - "127.0.0.1:9499"}; + std::set expectedExcepCs = { + csAddr, "127.0.0.1:9493", "127.0.0.1:9394", "127.0.0.1:9496", + "127.0.0.1:9293", "127.0.0.1:9396", "127.0.0.1:9499"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); std::set expectedCopysetExcepCs = {"127.0.0.1:9292"}; ASSERT_EQ(expectedCopysetExcepCs, - copysetCheck2.GetCopysetLoadExceptionChunkServer()); + copysetCheck2.GetCopysetLoadExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); expectedRes.clear(); - // 3、获取chunkserver上的copyset失败的情况 + // 3. Failure in obtaining copyset on chunkserver GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) .WillOnce(Return(-1)); @@ -455,22 +430,16 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck3.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // 4、获取copyset对应的chunkserver列表失败的情况 + // 4. Failure in obtaining the chunkserver list corresponding to the copyset GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*csClient_, GetRaftStatus(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*csClient_, GetRaftStatus(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -480,18 +449,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck4.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 检查copyset是否在配置组中时出错 + // Error checking if copyset is in configuration group EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -499,10 +464,12 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(-1, copysetCheck5.CheckCopysetsOnChunkServer(csAddr)); } -// chunkserver上copyset不健康的情况 -// 检查单个server和集群都是复用的CheckCopysetsOnChunkserver -// 所以CheckCopysetsOnChunkserver要测每个不健康的情况,其他的只要测健康和不健康还有不在线的情况就好 -// 具体什么原因不健康不用关心 +// Unhealthy copyset on chunkserver +// Check that both individual servers and clusters are reusable +// CheckCopysetsOnChunkservers So CheckCopysetsOnChunkserver needs to test every +// unhealthy situation, and the rest just needs to test for healthy, unhealthy, +// and offline situations What are the specific reasons for being unhealthy? +// Don't worry TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ChunkServerIdType csId = 1; std::string csAddr1 = "127.0.0.1:9194"; @@ -516,110 +483,107 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { uint64_t gId = 4294967396; std::string groupId; - // 1、首先加入9个健康的copyset + // 1. First, add 9 healthy copysets for (int i = 0; i < 9; ++i) { groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; } - // 2、加入没有leader的copyset + // 2. Add a copyset without a leader groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 3、加入正在安装快照的copyset + // 3. Add a copyset that is currently installing snapshots groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kInstallingSnapshot].emplace(groupId); os << temp << "\r\n"; - // 4、加入peer不足的copyset + // 4. Add a copyset with insufficient peers groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kPeersNoSufficient].emplace(groupId); os << temp << "\r\n"; - // 5、加入日志差距大的copset + // 5. Add a eclipse with a large log gap groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - true, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, true, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kLogIndexGapTooBig].emplace(groupId); os << temp << "\r\n"; - // 6、加入无法解析的copyset,这种情况不会发生,发生了表明程序有bug - // 打印错误信息,但不会加入到unhealthy + // 6. Add a copyset that cannot be parsed. This situation will not occur, + // indicating a bug in the program + // Print error message, but it will not be added to unhealthy groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, true, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, true, + false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; - // 7.1、加入少数peer不在线的copyset + // 7.1. Add a few copysets where peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, true); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, false, + true); expectedRes[kTotal].emplace(groupId); expectedRes[kMinorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 7.2、加入大多数peer不在线的copyset + // 7.2. Add copysets where most peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false, true); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false, true); expectedRes[kTotal].emplace(groupId); expectedRes[kMajorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 8、加入CANDIDATE状态的copyset + // 8. Add a copyset in the CANDIDATE state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "CANDIDATE"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 9、加入TRANSFERRING状态的copyset + // 9. Add a copyset in the TRANSFERRING state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "TRANSFERRING"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 10、加入ERROR状态的copyset + // 10. Add a copyset in the ERROR state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "ERROR"); expectedRes[kTotal].emplace(groupId); expectedRes["state ERROR"].emplace(groupId); os << temp << "\r\n"; - // 11、加入SHUTDOWN状态的copyset + // 11. Add a copyset in SHUTDOWN state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "SHUTDOWN"); expectedRes[kTotal].emplace(groupId); expectedRes["state SHUTDOWN"].emplace(groupId); os << temp; - // 设置mock对象的返回,8个正常iobuf里面,设置一个的peer不在线,因此unhealthy++ + // Set the return of mock objects. Among the 8 normal iobufs, one peer is + // set to be offline, resulting in unhealthy++ os.move_to(iobuf); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*csClient_, Init(csAddr1)) - .WillOnce(Return(-1)); - EXPECT_CALL(*csClient_, Init(csAddr2)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(csAddr1)).WillOnce(Return(-1)); + EXPECT_CALL(*csClient_, Init(csAddr2)).WillOnce(Return(-1)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); std::vector csServerInfos; CopySetServerInfo csServerInfo; GetCsServerInfoForTest(&csServerInfo, 1); @@ -629,10 +593,9 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { csServerInfos.emplace_back(csServerInfo); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); - // 检查结果 + // Inspection results std::set expectedExcepCs = {csAddr1, csAddr2}; CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck.CheckCopysetsOnChunkServer(csId)); @@ -641,7 +604,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ASSERT_EQ(expectedExcepCs, copysetCheck.GetServiceExceptionChunkServer()); } -// CheckCopysetsOnServer正常情况 +// CheckCopysetsOnServer normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ServerIdType serverId = 1; std::string serverIp = "127.0.0.1"; @@ -656,21 +619,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { std::string groupId; groupId = std::to_string(gId++); expectedRes[kTotal].emplace(groupId); - GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, false, + false, false); - // 通过id查询 + // Query by ID EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnServer(serverId, &unhealthyCs)); @@ -678,19 +637,15 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 通过ip查询 + // Query through IP EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 通过ip查询 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Query through IP CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnServer(serverIp, &unhealthyCs)); ASSERT_EQ(0, unhealthyCs.size()); @@ -698,7 +653,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); } -// CheckCopysetsOnServer异常情况 +// CheckCopysetsOnServer Exceptio TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ServerIdType serverId = 1; butil::IOBuf iobuf; @@ -721,7 +676,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { gIds.emplace(std::to_string(gId)); } - // 1、ListChunkServersOnServer失败的情况 + // 1. Situation of ListChunkServersOnServer failure EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) .WillOnce(Return(-1)); @@ -730,7 +685,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 3、一个chunkserver访问失败,一个chunkserver不健康的情况 + // 3. A chunkserver access failure and an unhealthy chunkserver situation GetIoBufForTest(&iobuf, groupId, "LEADER", false, true); expectedRes[kTotal] = gIds; expectedRes[kTotal].emplace(groupId); @@ -738,21 +693,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { expectedRes[kMinorityPeerNotOnline] = gIds; EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - EXPECT_CALL(*mdsClient_, - GetCopySetsInChunkServer("127.0.0.1:9191", _)) + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer("127.0.0.1:9191", _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -760,15 +711,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnServer(serverId, &unhealthyCs)); ASSERT_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::vector unhealthyCsExpected = - {"127.0.0.1:9191", "127.0.0.1:9192"}; + std::vector unhealthyCsExpected = {"127.0.0.1:9191", + "127.0.0.1:9192"}; ASSERT_EQ(unhealthyCsExpected, unhealthyCs); - std::set expectedExcepCs = - {"127.0.0.1:9191"}; + std::set expectedExcepCs = {"127.0.0.1:9191"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); } -// CheckCopysetsInCluster正常情况 +// CheckCopysetsInCluster normal situation TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", "LEADER"); @@ -783,23 +733,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); std::vector copysetsInMds; CopysetInfo copyset; copyset.set_logicalpoolid(1); @@ -807,8 +751,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); @@ -826,7 +769,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { GetCsInfoForTest(&chunkserver, 1); std::vector chunkservers = {chunkserver}; - // 1、ListServersInCluster失败 + // 1. ListServersInCluster failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -835,89 +778,75 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、CheckCopysetsOnServer返回不为0 + // 2. CheckCopysetsOnServer returned a non zero value EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) .WillOnce(Return(-1)); std::vector copysetsInMds; EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); expectedRes[kTotal] = {}; ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 3、GetMetric失败 + // 3. GetMetric failed expectedRes[kTotal] = {"4294967396"}; EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(2) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10), Return(0))); CopysetInfo copyset; copyset.set_logicalpoolid(1); copyset.set_copysetid(100); copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); - // 获取operator失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); + // Failed to obtain operator CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck3.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // operator数量大于0 + // The number of operators is greater than 0 CopysetCheckCore copysetCheck4(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 4、比较chunkserver跟mds的copyset失败 + // 4. Failed to compare the copyset between chunkserver and mds EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(9) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(9).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 从获取copyset失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Failed to obtain copyset from EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量不一致 + // Inconsistent number of copysets copysetsInMds.clear(); copyset.set_logicalpoolid(1); copyset.set_copysetid(101); @@ -926,16 +855,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量一致,但是内容不一致 + // The number of copysets is consistent, but the content is inconsistent copysetsInMds.pop_back(); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); } @@ -944,21 +871,17 @@ TEST_F(CopysetCheckCoreTest, CheckOperator) { CopysetCheckCore copysetCheck(mdsClient_, csClient_); std::string opName = "change_peer"; uint64_t checkTime = 3; - // 1、获取metric失败 - EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 1. Failed to obtain metric + EXPECT_CALL(*mdsClient_, GetMetric(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.CheckOperator(opName, checkTime)); - // 2、operator数量不为0 + // 2. The number of operators is not 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10), Return(0))); ASSERT_EQ(10, copysetCheck.CheckOperator(opName, checkTime)); - // 3、operator数量为0 + // 3. The number of operators is 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); ASSERT_EQ(0, copysetCheck.CheckOperator(opName, checkTime)); } @@ -969,11 +892,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { GetCsInfoForTest(&chunkserver, 1); chunkservers.emplace_back(chunkserver); } - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(12) .WillOnce(Return(0)) @@ -988,11 +910,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { copyset.set_copysetid(100 + i); copysets.emplace_back(copyset); } - EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer( - An(), _)) + EXPECT_CALL(*mdsClient_, + GetCopySetsInChunkServer(An(), _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), Return(0))); std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -1001,16 +922,14 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { } EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); std::vector fileNames = {"file1", "file2"}; std::vector fileNames2; CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, ListVolumesOnCopyset(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); ASSERT_EQ(0, copysetCheck1.ListMayBrokenVolumes(&fileNames2)); ASSERT_EQ(fileNames, fileNames2); } diff --git a/test/tools/copyset_check_test.cpp b/test/tools/copyset_check_test.cpp index 01c7e3f4c2..2e034b6d27 100644 --- a/test/tools/copyset_check_test.cpp +++ b/test/tools/copyset_check_test.cpp @@ -20,15 +20,17 @@ * Author: charisu */ -#include #include "src/tools/copyset_check.h" + +#include + #include "src/tools/copyset_check_core.h" #include "test/tools/mock/mock_copyset_check_core.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; DECLARE_bool(detail); @@ -55,26 +57,23 @@ class CopysetCheckTest : public ::testing::Test { core_ = std::make_shared(); FLAGS_detail = true; } - void TearDown() { - core_ = nullptr; - } + void TearDown() { core_ = nullptr; } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - bool isLeader = false, - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool peerOffline = false) { + bool isLeader = false, bool noLeader = false, + bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool peerOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (peerOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -84,7 +83,9 @@ class CopysetCheckTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (isLeader) { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -99,26 +100,31 @@ class CopysetCheckTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } os.move_to(*buf); } - std::map> res1 = - {{"total", {"4294967396", "4294967397"}}}; - std::map> res2 = - {{"total", {"4294967396", "4294967397", "4294967398", - "4294967399", "4294967400", "4294967401"}}, - {"installing snapshot", {"4294967397"}}, - {"no leader", {"4294967398"}}, - {"index gap too big", {"4294967399"}}, - {"peers not sufficient", {"4294967400"}}, - {"peer not online", {"4294967401"}}}; + std::map> res1 = { + {"total", {"4294967396", "4294967397"}}}; + std::map> res2 = { + {"total", + {"4294967396", "4294967397", "4294967398", "4294967399", "4294967400", + "4294967401"}}, + {"installing snapshot", {"4294967397"}}, + {"no leader", {"4294967398"}}, + {"index gap too big", {"4294967399"}}, + {"peers not sufficient", {"4294967400"}}, + {"peer not online", {"4294967401"}}}; std::set serviceExcepCs = {"127.0.0.1:9092"}; std::set copysetExcepCs = {"127.0.0.1:9093"}; std::set emptySet; @@ -143,29 +149,25 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { copysetCheck.PrintHelp("check-copyset"); butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", true); - std::vector peersInCopyset = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; + std::vector peersInCopyset = { + "127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; std::string copysetDetail = iobuf.to_string(); - // Init失败的情况 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // The situation of Init failure + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 不支持的命令 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Unsupported command ASSERT_EQ(-1, copysetCheck.RunCommand("check-nothings")); copysetCheck.PrintHelp("check-nothins"); - // 没有指定逻辑池和copyset的话返回失败 + // If no logical pool and copyset are specified, a failure is returne ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); FLAGS_logicalPoolId = 1; FLAGS_copysetId = 100; copysetCheck.PrintHelp("check-copyset"); - // 健康的情况 + // Healthy situation EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kHealthy)); @@ -180,7 +182,7 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-copyset")); - // copyset不健康的情况 + // The unhealthy situation of copyset EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kLogIndexGapTooBig)); @@ -199,15 +201,13 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { TEST_F(CopysetCheckTest, testCheckChunkServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-chunkserver"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 没有指定chunkserver的话报错 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Error reported if chunkserver is not specified ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); copysetCheck.PrintHelp("check-chunkserver"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_chunkserverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverId)) .Times(1) @@ -225,11 +225,11 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // id和地址同时指定,报错 + // Error reported when both ID and address are specified simultaneously FLAGS_chunkserverAddr = "127.0.0.1:8200"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); FLAGS_chunkserverId = 0; - // 通过地址查询 + // Search through address EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(0)); @@ -247,7 +247,7 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(-1)); @@ -269,23 +269,20 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { TEST_F(CopysetCheckTest, testCheckServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-server"); - std::vector chunkservers = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + std::vector chunkservers = {"127.0.0.1:9091", "127.0.0.1:9092", + "127.0.0.1:9093"}; + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 没有指定server的话报错 + // If no server is specified, an error will be reported ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); copysetCheck.PrintHelp("check-server"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_serverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -299,15 +296,14 @@ TEST_F(CopysetCheckTest, testCheckServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // id和ip同时指定,报错 + // Error reported when both ID and IP are specified simultaneously FLAGS_serverIp = "127.0.0.1"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); FLAGS_serverId = 0; - // 通过ip查询 + // Query through IP EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -322,7 +318,7 @@ TEST_F(CopysetCheckTest, testCheckServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) .WillOnce(Return(-1)); @@ -344,14 +340,10 @@ TEST_F(CopysetCheckTest, testCheckServer) { TEST_F(CopysetCheckTest, testCheckCluster) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("copysets-status"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(0)); + // Healthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(0)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -366,10 +358,8 @@ TEST_F(CopysetCheckTest, testCheckCluster) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand(kCopysetsStatusCmd)); - // 不健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(-1)); + // Unhealthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res2)); @@ -388,14 +378,12 @@ TEST_F(CopysetCheckTest, testCheckCluster) { TEST_F(CopysetCheckTest, testCheckOperator) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-operator"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、不支持的operator + // 1. Unsupported operator FLAGS_opName = "no_operator"; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、transfer leader的operator和total的 + // 2. The operator and total of the transfer leader EXPECT_CALL(*core_, CheckOperator(_, FLAGS_leaderOpInterval)) .Times(2) .WillOnce(Return(0)) @@ -404,7 +392,7 @@ TEST_F(CopysetCheckTest, testCheckOperator) { ASSERT_EQ(0, copysetCheck.RunCommand(kCheckOperatorCmd)); FLAGS_opName = kTotalOpName; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、其他operator + // 2. Other operators EXPECT_CALL(*core_, CheckOperator(_, FLAGS_opIntervalExceptLeader)) .Times(3) .WillOnce(Return(10)) @@ -420,15 +408,11 @@ TEST_F(CopysetCheckTest, testCheckOperator) { TEST_F(CopysetCheckTest, PrintMayBrokenVolumes) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp(kListMayBrokenVolumes); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); // fail - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand(kListMayBrokenVolumes)); - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(0)); ASSERT_EQ(0, copysetCheck.RunCommand(kListMayBrokenVolumes)); } diff --git a/test/tools/curve_cli_test.cpp b/test/tools/curve_cli_test.cpp index 133d9de42d..0ad6d9cae8 100644 --- a/test/tools/curve_cli_test.cpp +++ b/test/tools/curve_cli_test.cpp @@ -20,22 +20,25 @@ * Author: charisu */ -#include -#include +#include "src/tools/curve_cli.h" + #include +#include #include +#include + #include -#include "src/tools/curve_cli.h" + #include "test/tools/mock/mock_cli_service.h" #include "test/tools/mock/mock_copyset_service.h" #include "test/tools/mock/mock_mds_client.h" using ::testing::_; -using ::testing::Return; -using ::testing::Invoke; +using ::testing::An; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::An; DECLARE_int32(timeout_ms); DECLARE_int32(max_retry); @@ -50,10 +53,8 @@ DECLARE_bool(affirm); namespace curve { namespace tool { -template -void callback(RpcController* controller, - const Req* request, - Resp* response, +template +void callback(RpcController* controller, const Req* request, Resp* response, Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -67,7 +68,7 @@ class CurveCliTest : public ::testing::Test { mockCliService = new MockCliService(); mockCopysetService_ = std::make_shared(); ASSERT_EQ(0, server->AddService(mockCliService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(mockCopysetService_.get(), brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); @@ -82,8 +83,8 @@ class CurveCliTest : public ::testing::Test { mockCliService = nullptr; } - brpc::Server *server; - MockCliService *mockCliService; + brpc::Server* server; + MockCliService* mockCliService; std::shared_ptr mockCopysetService_; const std::string conf = "127.0.0.1:9192:0"; const std::string peer = "127.0.0.1:9192:0"; @@ -113,20 +114,20 @@ TEST_F(CurveCliTest, RemovePeer) { curveCli.PrintHelp("remove-peer"); curveCli.PrintHelp("test"); curveCli.RunCommand("test"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -134,32 +135,27 @@ TEST_F(CurveCliTest, RemovePeer) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const RemovePeerRequest2 *request, - RemovePeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const RemovePeerRequest2* request, + RemovePeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("remove-peer")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); // TEST CASES: remove broken copyset after remove peer @@ -181,8 +177,8 @@ TEST_F(CurveCliTest, RemovePeer) { EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(getLeaderResp), - Invoke(getLeaderFunc))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(getLeaderResp), Invoke(getLeaderFunc))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) .Times(3) .WillRepeatedly(Invoke(removePeerFunc)); @@ -210,21 +206,21 @@ TEST_F(CurveCliTest, RemovePeer) { TEST_F(CurveCliTest, TransferLeader) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("transfer-leader"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -232,147 +228,132 @@ TEST_F(CurveCliTest, TransferLeader) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, curveCli.RunCommand("transfer-leader")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); } TEST_F(CurveCliTest, ResetPeer) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("reset-peer"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf为空 + // newConf is empty FLAGS_peer = peer; FLAGS_new_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析newConf失败 + // Failed to parse newConf FLAGS_new_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_new_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf有三个副本 + // newConf has three copies FLAGS_peer = peer; FLAGS_new_conf = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf不包含peer + // newConf does not contain peer FLAGS_new_conf = "127.0.0.1:8201:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 执行变更成功 + // Successfully executed changes FLAGS_new_conf = conf; EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const ResetPeerRequest2* request, + ResetPeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("reset-peer")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const ResetPeerRequest2* request, + ResetPeerResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); } TEST_F(CurveCliTest, DoSnapshot) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 解析peer失败 + // Parsing peer failed FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotRequest2* request, + SnapshotResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const SnapshotRequest2* request, + SnapshotResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); } TEST_F(CurveCliTest, DoSnapshotAll) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot-all"); - // 执行变更成功 + // Successfully executed changes std::vector chunkservers; ChunkServerInfo csInfo; csInfo.set_hostip("127.0.0.1"); csInfo.set_port(9192); chunkservers.emplace_back(csInfo); - EXPECT_CALL(*mdsClient_, Init(_)) + EXPECT_CALL(*mdsClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(2) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotAllRequest* request, + SnapshotAllResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot-all")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) + // Failed to execute changes + EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const SnapshotAllRequest* request, + SnapshotAllResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot-all")); } diff --git a/test/tools/curve_meta_tool_test.cpp b/test/tools/curve_meta_tool_test.cpp index 1d493c56f8..a94d54dbb3 100644 --- a/test/tools/curve_meta_tool_test.cpp +++ b/test/tools/curve_meta_tool_test.cpp @@ -20,10 +20,13 @@ * Author: charisu */ +#include "src/tools/curve_meta_tool.h" + #include + #include #include -#include "src/tools/curve_meta_tool.h" + #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -32,8 +35,8 @@ namespace tool { using curve::common::Bitmap; using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -43,12 +46,8 @@ const char chunkFileName[] = "chunk_001"; class CurveMetaToolTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } std::shared_ptr localFs_; }; @@ -65,30 +64,28 @@ TEST_F(CurveMetaToolTest, SupportCommand) { TEST_F(CurveMetaToolTest, PrintChunkMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(6) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(5) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(5).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 3、解析失败 + // 3. Parsing failed char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 4、普通chunk + // 4. Ordinary chunk ChunkFileMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -97,9 +94,9 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); - // 5、克隆chunk + // 5. Clone chunk metaPage.location = "test@s3"; uint32_t size = CHUNK_SIZE / PAGE_SIZE; auto bitmap = std::make_shared(size); @@ -110,36 +107,34 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); } TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(5) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(4) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(4).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 3、解析失败 + // 3. Parsing faile char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 4、成功chunk + // 4. Successful Chunk SnapshotMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -153,9 +148,8 @@ TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("snapshot-meta")); } } // namespace tool } // namespace curve - diff --git a/test/tools/data_consistency_check_test.cpp b/test/tools/data_consistency_check_test.cpp index 15cd238004..c9641ee9b5 100644 --- a/test/tools/data_consistency_check_test.cpp +++ b/test/tools/data_consistency_check_test.cpp @@ -19,20 +19,20 @@ * File Created: Friday, 28th June 2019 2:29:14 pm * Author: tongguangxun */ +#include +#include #include #include -#include -#include #include "src/tools/consistency_check.h" -#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" DECLARE_bool(check_hash); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; extern uint32_t segment_size; @@ -41,8 +41,7 @@ extern uint32_t chunk_size; class ConsistencyCheckTest : public ::testing::Test { public: void SetUp() { - nameSpaceTool_ = - std::make_shared(); + nameSpaceTool_ = std::make_shared(); csClient_ = std::make_shared(); } @@ -70,8 +69,7 @@ class ConsistencyCheckTest : public ::testing::Test { } void GetCopysetStatusForTest(CopysetStatusResponse* response, - int64_t applyingIndex = 1111, - bool ok = true) { + int64_t applyingIndex = 1111, bool ok = true) { if (ok) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { @@ -110,51 +108,41 @@ TEST_F(ConsistencyCheckTest, Consistency) { CopysetStatusResponse response; GetCopysetStatusForTest(&response); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(90) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(90).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(60) - .WillRepeatedly(DoAll(SetArgPointee<1>(response), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(30) - .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), - Return(0))); - // 1、检查hash + .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), Return(0))); + // 1. Check hash FLAGS_check_hash = true; curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); cfc1.PrintHelp("check-consistency"); cfc1.PrintHelp("check-nothing"); ASSERT_EQ(0, cfc1.RunCommand("check-consistency")); - // 2、检查applyIndex + // 2. Check the applyIndex FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); ASSERT_EQ(-1, cfc2.RunCommand("check-nothing")); - // mds返回副本为空的情况 + // Mds returns a situation where the replica is empty EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>( - std::vector()), - Return(0))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(std::vector()), Return(0))); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); FLAGS_check_hash = true; ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); @@ -180,61 +168,45 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { CopysetStatusResponse response3; GetCopysetStatusForTest(&response3, 2222); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); - // 1、检查hash,apply index一致,hash不一致 + // 1. Check if the hash and apply index are consistent and the hash is + // inconsistent FLAGS_check_hash = true; - EXPECT_CALL(*csClient_, Init(_)) - .Times(5) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(5).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>("2222"), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>("1111"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>("2222"), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>("1111"), Return(0))); curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc1.RunCommand("check-consistency")); - // 2、检查hash的时候apply index不一致 - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // 2. When checking the hash, the apply index is inconsistent + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc2.RunCommand("check-consistency")); - // 3、检查applyIndex + // 3. Check the applyIndex FLAGS_check_hash = false; - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc3(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc3.RunCommand("check-consistency")); } @@ -254,62 +226,47 @@ TEST_F(ConsistencyCheckTest, CheckError) { } FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc(nameSpaceTool_, csClient_); - // 0、Init失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 1、获取segment失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Failed to obtain segment + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 2、获取chunkserver list失败 + // 2. Failed to obtain chunkserver list EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 3、init 向chunkserverclient init失败 + // 3. Failed to init to chunkserverclient init EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 4、从chunkserver获取copyset status失败 - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. Failed to obtain copyset status from chunkserver + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 5、从chunkserver获取chunk hash失败 + // 5. Failed to obtain chunk hash from chunkserver FLAGS_check_hash = true; CopysetStatusResponse response1; GetCopysetStatusForTest(&response1); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); - EXPECT_CALL(*csClient_, GetChunkHash(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); + EXPECT_CALL(*csClient_, GetChunkHash(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); } diff --git a/test/tools/etcd_client_test.cpp b/test/tools/etcd_client_test.cpp index b6774425bd..0e7d8a9765 100644 --- a/test/tools/etcd_client_test.cpp +++ b/test/tools/etcd_client_test.cpp @@ -20,11 +20,14 @@ * Author: charisu */ +#include "src/tools/etcd_client.h" + #include -#include //NOLINT + #include //NOLINT #include -#include "src/tools/etcd_client.h" +#include //NOLINT + #include "src/common/timeutility.h" class EtcdClientTest : public ::testing::Test { @@ -36,21 +39,23 @@ class EtcdClientTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, - execlp("etcd", "etcd", "--listen-client-urls", - "http://127.0.0.1:2366", "--advertise-client-urls", - "http://127.0.0.1:2366", "--listen-peer-urls", - "http://127.0.0.1:2367", - "--initial-advertise-peer-urls", - "http://127.0.0.1:2367", "--initial-cluster", - "toolEtcdClientTest=http://127.0.0.1:2367", - "--name", "toolEtcdClientTest", nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://127.0.0.1:2366", "--advertise-client-urls", + "http://127.0.0.1:2366", "--listen-peer-urls", + "http://127.0.0.1:2367", "--initial-advertise-peer-urls", + "http://127.0.0.1:2367", "--initial-cluster", + "toolEtcdClientTest=http://127.0.0.1:2367", "--name", + "toolEtcdClientTest", nullptr)); exit(0); } - // 一定时间内尝试check直到etcd完全起来 + // Try checking for a certain period of time until the ETCD is + // completely up curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); bool running; @@ -59,8 +64,8 @@ class EtcdClientTest : public ::testing::Test { 5) { std::vector leaderAddrVec; std::map onlineState; - ASSERT_EQ(0, - client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); + ASSERT_EQ( + 0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); if (onlineState["127.0.0.1:2366"]) { running = true; break; @@ -81,22 +86,22 @@ class EtcdClientTest : public ::testing::Test { TEST_F(EtcdClientTest, GetEtcdClusterStatus) { curve::tool::EtcdClient client; - // Init失败的情况 + // The situation of Init failure ASSERT_EQ(-1, client.Init("")); - // Init成功 + // Init succeeded ASSERT_EQ(0, client.Init(etcdAddr)); std::vector leaderAddrVec; std::map onlineState; - // 正常情况 + // Normal situation ASSERT_EQ(0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); - std::map expected = { { "127.0.0.1:2366", true }, - { "127.0.0.1:2368", false } }; + std::map expected = {{"127.0.0.1:2366", true}, + {"127.0.0.1:2368", false}}; ASSERT_EQ(expected, onlineState); ASSERT_EQ(1, leaderAddrVec.size()); ASSERT_EQ("127.0.0.1:2366", leaderAddrVec[0]); - // 空指针错误 + // Null pointer error ASSERT_EQ(-1, client.GetEtcdClusterStatus(nullptr, &onlineState)); ASSERT_EQ(-1, client.GetEtcdClusterStatus(&leaderAddrVec, nullptr)); } @@ -105,13 +110,13 @@ TEST_F(EtcdClientTest, GetAndCheckEtcdVersion) { curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); - // 正常情况 + // Normal situation std::string version; std::vector failedList; ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 个别etcd获取version失败 + // Individual ETCD failed to obtain version ASSERT_EQ(0, client.Init(etcdAddr)); ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_EQ(1, failedList.size()); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index e261d43895..c89d8d7066 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -20,13 +20,16 @@ * Author: charisu */ -#include +#include "src/tools/mds_client.h" + #include +#include + #include -#include "src/tools/mds_client.h" + #include "test/tools/mock/mock_namespace_service.h" -#include "test/tools/mock/mock_topology_service.h" #include "test/tools/mock/mock_schedule_service.h" +#include "test/tools/mock/mock_topology_service.h" using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; @@ -68,8 +71,8 @@ namespace tool { const char mdsAddr[] = "127.0.0.1:9191,127.0.0.1:9192"; template -void callback(RpcController *controller, const Req *request, Resp *response, - Closure *done) { +void callback(RpcController* controller, const Req* request, Resp* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -90,15 +93,15 @@ class ToolMDSClientTest : public ::testing::Test { ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); - // 初始化mds client + // Initialize mds client curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.Init(mdsAddr, "9194,9193")); @@ -116,7 +119,7 @@ class ToolMDSClientTest : public ::testing::Test { scheduleService = nullptr; } - void GetFileInfoForTest(uint64_t id, FileInfo *fileInfo) { + void GetFileInfoForTest(uint64_t id, FileInfo* fileInfo) { fileInfo->set_id(id); fileInfo->set_filename("test"); fileInfo->set_parentid(0); @@ -127,11 +130,11 @@ class ToolMDSClientTest : public ::testing::Test { fileInfo->set_ctime(1573546993000000); } - void GetCopysetInfoForTest(CopySetServerInfo *info, int num, + void GetCopysetInfoForTest(CopySetServerInfo* info, int num, uint32_t copysetId = 1) { info->Clear(); for (int i = 0; i < num; ++i) { - curve::common::ChunkServerLocation *csLoc = info->add_cslocs(); + curve::common::ChunkServerLocation* csLoc = info->add_cslocs(); csLoc->set_chunkserverid(i); csLoc->set_hostip("127.0.0.1"); csLoc->set_port(9191 + i); @@ -139,14 +142,14 @@ class ToolMDSClientTest : public ::testing::Test { info->set_copysetid(copysetId); } - void GetSegmentForTest(PageFileSegment *segment) { + void GetSegmentForTest(PageFileSegment* segment) { segment->set_logicalpoolid(1); segment->set_segmentsize(DefaultSegmentSize); segment->set_chunksize(kChunkSize); segment->set_startoffset(0); } - void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo *pool) { + void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo* pool) { pool->set_physicalpoolid(id); pool->set_physicalpoolname("testPool"); pool->set_desc("physical pool for test"); @@ -155,7 +158,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetLogicalPoolForTest(PoolIdType id, - curve::mds::topology::LogicalPoolInfo *lpInfo) { + curve::mds::topology::LogicalPoolInfo* lpInfo) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); lpInfo->set_physicalpoolid(1); @@ -167,14 +170,14 @@ class ToolMDSClientTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetZoneInfoForTest(ZoneIdType id, ZoneInfo *zoneInfo) { + void GetZoneInfoForTest(ZoneIdType id, ZoneInfo* zoneInfo) { zoneInfo->set_zoneid(1); zoneInfo->set_zonename("testZone"); zoneInfo->set_physicalpoolid(1); zoneInfo->set_physicalpoolname("testPool"); } - void GetServerInfoForTest(ServerIdType id, ServerInfo *serverInfo) { + void GetServerInfoForTest(ServerIdType id, ServerInfo* serverInfo) { serverInfo->set_serverid(id); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -189,7 +192,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetChunkServerInfoForTest(ChunkServerIdType id, - ChunkServerInfo *csInfo, + ChunkServerInfo* csInfo, bool retired = false) { csInfo->set_chunkserverid(id); csInfo->set_disktype("ssd"); @@ -206,10 +209,10 @@ class ToolMDSClientTest : public ::testing::Test { csInfo->set_diskcapacity(1024); csInfo->set_diskused(512); } - brpc::Server *server; - curve::mds::MockNameService *nameService; - curve::mds::topology::MockTopologyService *topoService; - curve::mds::schedule::MockScheduleService *scheduleService; + brpc::Server* server; + curve::mds::MockNameService* nameService; + curve::mds::topology::MockTopologyService* topoService; + curve::mds::schedule::MockScheduleService* scheduleService; MDSClient mdsClient; const uint64_t kChunkSize = 16777216; const uint64_t DefaultSegmentSize = 1024 * 1024 * 1024; @@ -220,9 +223,9 @@ TEST(MDSClientInitTest, Init) { ASSERT_EQ(-1, mdsClient.Init("")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1:65536")); - // dummy server非法 + // dummy server is illegal ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "")); - // dummy server与mds不匹配 + // dummy server and mds do not match ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "9091,9092,9093")); } @@ -232,44 +235,44 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { std::string filename = "/test"; curve::mds::FileInfo outFileInfo; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { + .WillRepeatedly(Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 返回码不为OK + // The return code is not O curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 正常情况 - curve::mds::FileInfo *info = new curve::mds::FileInfo; + // Normal situation + curve::mds::FileInfo* info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetFileInfo(filename, &outFileInfo)); ASSERT_EQ(info->DebugString(), outFileInfo.DebugString()); } @@ -277,33 +280,33 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { TEST_F(ToolMDSClientTest, GetAllocatedSize) { uint64_t allocSize; std::string filename = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, Closure *done) { + [](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 返回码不为OK + // The return code is not OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 正常情况 + // Normal situation response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { response.mutable_allocsizemap()->insert( @@ -313,10 +316,10 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); AllocMap allocMap; ASSERT_EQ(0, mdsClient.GetAllocatedSize(filename, &allocSize, &allocMap)); ASSERT_EQ(DefaultSegmentSize * 3, allocSize); @@ -330,32 +333,32 @@ TEST_F(ToolMDSClientTest, ListDir) { std::string fileName = "/test"; std::vector fileInfoVec; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 返回码不为OK + // The return code is not OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto fileInfo = response.add_fileinfo(); @@ -364,10 +367,10 @@ TEST_F(ToolMDSClientTest, ListDir) { EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListDir(fileName, &fileInfoVec)); for (int i = 0; i < 5; i++) { FileInfo expected; @@ -381,70 +384,70 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { curve::mds::PageFileSegment outSegment; uint64_t offset = 0; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // segment不存在 + // segment does not exist curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 文件不存在 + // File does not exist response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kFileNotExists, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 其他错误 + // Other errors response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 正常情况 - PageFileSegment *segment = new PageFileSegment(); + // Normal situation + PageFileSegment* segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(segment); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOK, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); ASSERT_EQ(segment->DebugString(), outSegment.DebugString()); @@ -453,41 +456,41 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { TEST_F(ToolMDSClientTest, DeleteFile) { std::string fileName = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 返回码不为OK + // The return code is not OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.DeleteFile(fileName)); } @@ -505,43 +508,41 @@ TEST_F(ToolMDSClientTest, CreateFile) { context.stripeCount = stripeCount; context.poolset = ""; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 返回码不为OK + // The return code is not OK curve::mds::CreateFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.CreateFile(context)); } @@ -553,10 +554,10 @@ TEST_F(ToolMDSClientTest, ExtendVolume_success) { EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ExtendVolume(fileName, length)); } @@ -564,32 +565,32 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); return; - // 返回码不为OK + // The return code is not OK curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); } @@ -598,35 +599,35 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { CopySetIdType copysetId = 100; std::vector csLocs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, Closure *done) { + [](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 返回码不为OK + // The return code is not OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); CopySetServerInfo csInfo; GetCopysetInfoForTest(&csInfo, 3, copysetId); @@ -635,10 +636,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); ASSERT_EQ(csInfo.cslocs_size(), csLocs.size()); @@ -646,7 +647,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); } - // 测试获取多个copyset + // Test obtaining multiple copysets std::vector expected; response.Clear(); response.set_statuscode(kTopoErrCodeSuccess); @@ -662,10 +663,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets(logicalPoolId, copysets, &csServerInfos)); ASSERT_EQ(expected.size(), csServerInfos.size()); @@ -677,47 +678,45 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 返回码不为OK + // The return code is not OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_physicalpoolinfos(); GetPhysicalPoolInfoForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListPhysicalPoolsInCluster(&pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -731,46 +730,44 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { PoolIdType poolId = 1; std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { + [](RpcController* controller, const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 返回码不为OK + // The return code is not OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_logicalpoolinfos(); GetLogicalPoolForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -783,33 +780,33 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { PoolIdType poolId = 1; std::vector zones; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto zoneInfo = response.add_zones(); @@ -818,10 +815,10 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); ASSERT_EQ(3, zones.size()); for (int i = 0; i < 3; ++i) { @@ -835,35 +832,35 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { ZoneIdType zoneId; std::vector servers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto serverInfo = response.add_serverinfo(); @@ -873,10 +870,10 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListServersInZone(zoneId, &servers)); ASSERT_EQ(3, servers.size()); for (int i = 0; i < 3; ++i) { @@ -890,35 +887,36 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { ServerIdType serverId = 1; std::vector chunkservers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 正常情况,两个chunkserver正常,一个chunkserver retired + // Under normal circumstances, two chunkservers are normal and one + // chunkserver retired response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto csInfo = response.add_chunkserverinfos(); @@ -928,10 +926,10 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); ASSERT_EQ(2, chunkservers.size()); for (int i = 0; i < 2; ++i) { @@ -946,23 +944,23 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { std::string csAddr = "127.0.0.1:8200"; ChunkServerInfo chunkserver; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::GetChunkServerInfoResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -970,17 +968,17 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); - ChunkServerInfo *csInfo = new ChunkServerInfo(); + ChunkServerInfo* csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); response.set_allocated_chunkserverinfo(csInfo); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -988,18 +986,18 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); ChunkServerInfo expected; GetChunkServerInfoForTest(1, &expected); ASSERT_EQ(expected.DebugString(), chunkserver.DebugString()); - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); csAddr = "127.0.0.1"; @@ -1013,36 +1011,36 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { std::string csAddr = "127.0.0.1:8200"; std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, Closure *done) { + [](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 返回码不为OK + // The return code is not OK GetCopySetsInChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 正常情况 + // Normal situatio response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1053,10 +1051,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1066,7 +1064,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(1, copysets[i].logicalpoolid()); ASSERT_EQ(1000 + i, copysets[i].copysetid()); } - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); csAddr = "127.0.0.1"; @@ -1078,34 +1076,34 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 返回码不为OK + // The return code is not O GetCopySetsInClusterResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1116,10 +1114,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInCluster(©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1131,11 +1129,11 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { TEST_F(ToolMDSClientTest, GetCopyset) { auto succCallback = callback; - auto failCallback = [](RpcController *controller, - const GetCopysetRequest *request, - GetCopysetResponse *response, Closure *done) { + auto failCallback = [](RpcController* controller, + const GetCopysetRequest* request, + GetCopysetResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed("fail"); }; @@ -1184,42 +1182,42 @@ TEST_F(ToolMDSClientTest, GetCopyset) { } TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 返回码不为OK + // The return code is not OK RapidLeaderScheduleResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 成功 + // Success response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.RapidLeaderSchedule(1)); } @@ -1234,13 +1232,13 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { // CASE 1: Send rpc failed { - auto failCallback = [](RpcController *controller, - const SetLogicalPoolScanStateRequest *request, - SetLogicalPoolScanStateResponse *response, - Closure *done) { + auto failCallback = [](RpcController* controller, + const SetLogicalPoolScanStateRequest* request, + SetLogicalPoolScanStateResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("fail"); }; EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) @@ -1267,43 +1265,43 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { std::map statusMap; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, Closure *done) { + [](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 1. QueryChunkServerRecoverStatus失败的情况 + // 1. QueryChunkServerRecoverStatus failed situation QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 2. QueryChunkServerRecoverStatus成功的情况 + // 2. Success of QueryChunkServerRecoverStatus response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); } @@ -1329,12 +1327,12 @@ TEST_F(ToolMDSClientTest, GetMetric) { TEST_F(ToolMDSClientTest, GetCurrentMds) { bvar::Status value; value.expose("mds_status"); - // 有leader + // With a leader value.set_value("leader"); std::vector curMds = mdsClient.GetCurrentMds(); ASSERT_EQ(1, curMds.size()); ASSERT_EQ("127.0.0.1:9192", curMds[0]); - // 没有leader + // No leader value.set_value("follower"); ASSERT_TRUE(mdsClient.GetCurrentMds().empty()); } @@ -1343,20 +1341,22 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { bvar::Status value; value.expose("mds_config_mds_listen_addr"); std::map onlineStatus; - // 9180在线,9999不在线 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9192\"}"); + // 9180 online, 9999 offline + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); - // 9180的服务端口不一致 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9188\"}"); + // The service ports of 9180 are inconsistent + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); - // 非json格式 + // Non JSON format value.set_value("127.0.0.1::9191"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; @@ -1366,33 +1366,33 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { TEST_F(ToolMDSClientTest, ListClient) { std::vector clientAddrs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 返回码不为OK + // The return code is not OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto clientInfo = response.add_clientinfos(); @@ -1402,14 +1402,14 @@ TEST_F(ToolMDSClientTest, ListClient) { EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListClient(&clientAddrs)); ASSERT_EQ(response.clientinfos_size(), clientAddrs.size()); for (int i = 0; i < 5; i++) { - const auto &clientInfo = response.clientinfos(i); + const auto& clientInfo = response.clientinfos(i); std::string expected = clientInfo.ip() + ":" + std::to_string(clientInfo.port()); ASSERT_EQ(expected, clientAddrs[i]); @@ -1424,13 +1424,13 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); @@ -1441,10 +1441,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // normal @@ -1456,10 +1456,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); ASSERT_EQ(response.filenames_size(), fileNames.size()); for (int i = 0; i < 5; i++) { @@ -1478,12 +1478,12 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); @@ -1494,10 +1494,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // normal @@ -1505,10 +1505,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.SetCopysetsAvailFlag(copysets, false)); } @@ -1518,12 +1518,12 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); @@ -1534,10 +1534,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // normal @@ -1550,10 +1550,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListUnAvailCopySets(©sets)); } diff --git a/test/tools/metric_client_test.cpp b/test/tools/metric_client_test.cpp index 30f6c78802..7e41b910f5 100644 --- a/test/tools/metric_client_test.cpp +++ b/test/tools/metric_client_test.cpp @@ -20,10 +20,12 @@ * Author: charisu */ -#include +#include "src/tools/metric_client.h" + #include +#include + #include -#include "src/tools/metric_client.h" namespace curve { namespace tool { @@ -43,82 +45,71 @@ class MetricClientTest : public ::testing::Test { delete server; server = nullptr; } - brpc::Server *server; + brpc::Server* server; }; TEST_F(MetricClientTest, GetMetric) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "string_metric"; bvar::Status metric(metricName, "value"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetric(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetric("127.0.0.1:9191", - "not-exist-metric", - &value)); + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetric("127.0.0.1:9191", "not-exist-metric", &value)); } TEST_F(MetricClientTest, GetMetricUint) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "uint_metric"; bvar::Status metric(metricName, 10); uint64_t value; - ASSERT_EQ(MetricRet::kOK, client.GetMetricUint(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetMetricUint(serverAddr, metricName, &value)); ASSERT_EQ(10, value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetricUint(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint("127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetricUint(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ( + MetricRet::kOtherErr, + client.GetMetricUint("127.0.0.1:9191", "not-exist-metric", &value)); + // Parsing failed bvar::Status metric2("string_metric", "value"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint(serverAddr, - "string_metric", - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetricUint(serverAddr, "string_metric", &value)); } TEST_F(MetricClientTest, GetConfValue) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "conf_metric"; bvar::Status conf_metric(metricName, ""); - conf_metric.set_value("{\"conf_name\":\"key\"," - "\"conf_value\":\"value\"}"); + conf_metric.set_value( + "{\"conf_name\":\"key\"," + "\"conf_value\":\"value\"}"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetConfValueFromMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetConfValueFromMetric( - serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - "127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ( + MetricRet::kNotFound, + client.GetConfValueFromMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric("127.0.0.1:9191", + "not-exist-metric", &value)); + // Parsing failed conf_metric.set_value("string"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); } } // namespace tool diff --git a/test/tools/namespace_tool_core_test.cpp b/test/tools/namespace_tool_core_test.cpp index e1b365b28f..7affe3b1a6 100644 --- a/test/tools/namespace_tool_core_test.cpp +++ b/test/tools/namespace_tool_core_test.cpp @@ -20,18 +20,20 @@ * Author: charisu */ +#include "src/tools/namespace_tool_core.h" + #include + #include "src/common/timeutility.h" -#include "src/tools/namespace_tool_core.h" #include "test/tools/mock/mock_mds_client.h" +using curve::tool::CreateFileContext; +using curve::tool::GetSegmentRes; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; -using ::testing::SetArgPointee; +using ::testing::Return; using ::testing::SaveArg; -using curve::tool::GetSegmentRes; -using curve::tool::CreateFileContext; +using ::testing::SetArgPointee; DECLARE_bool(isTest); DECLARE_string(fileName); @@ -39,12 +41,8 @@ DECLARE_uint64(offset); class NameSpaceToolCoreTest : public ::testing::Test { protected: - void SetUp() { - client_ = std::make_shared(); - } - void TearDown() { - client_ = nullptr; - } + void SetUp() { client_ = std::make_shared(); } + void TearDown() { client_ = nullptr; } void GetFileInfoForTest(FileInfo* fileInfo) { fileInfo->set_id(1); @@ -98,14 +96,11 @@ TEST_F(NameSpaceToolCoreTest, GetFileInfo) { EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetFileInfo(fileName, &fileInfo)); ASSERT_EQ(expected.DebugString(), fileInfo.DebugString()); - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileInfo(fileName, &fileInfo)); } @@ -122,17 +117,14 @@ TEST_F(NameSpaceToolCoreTest, ListDir) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.ListDir(fileName, &files)); ASSERT_EQ(expected.size(), files.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), files[i].DebugString()); } - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ListDir(fileName, &files)); } @@ -140,14 +132,12 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 5 * segmentSize; - uint64_t stripeUnit = 32 * 1024 *1024; + uint64_t stripeUnit = 32 * 1024 * 1024; uint64_t stripeCount = 32; std::string pstName = ""; - // 1、正常情况 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(0)); CreateFileContext context; context.type = curve::mds::FileType::INODE_PAGEFILE; @@ -159,10 +149,8 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { ASSERT_EQ(0, namespaceTool.CreateFile(context)); - // 2、创建失败 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CreateFile(context)); } @@ -170,16 +158,12 @@ TEST_F(NameSpaceToolCoreTest, ExtendVolume) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 10 * segmentSize; - // 1、正常情况 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.ExtendVolume(fileName, length)); - // 2、创建失败 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ExtendVolume(fileName, length)); } @@ -188,16 +172,12 @@ TEST_F(NameSpaceToolCoreTest, DeleteFile) { std::string fileName = "/test"; bool forceDelete = false; - // 1、正常情况 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.DeleteFile(fileName, forceDelete)); - // 2、创建失败 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.DeleteFile(fileName, forceDelete)); } @@ -213,23 +193,22 @@ TEST_F(NameSpaceToolCoreTest, GetChunkServerListInCopySet) { expected.emplace_back(csLoc); } - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + copysetId, &csLocs)); ASSERT_EQ(expected.size(), csLocs.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csLocs[i].DebugString()); } - // 2、失败 + // 2. Failure EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet( + logicalPoolId, copysetId, &csLocs)); } TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { @@ -274,18 +253,14 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { // CASE 1: clean recycle bin success EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(7) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(7).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 2: clean recycle bin fail EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(7) .WillOnce(Return(-1)) @@ -293,47 +268,35 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 3: list dir fail - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 4: clean recycle bin with expireTime is "3s" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(6).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3s"))); // CASE 5: clean recycle bin with expireTime is "3m" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(5) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(5).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3m"))); // CASE 6: clean recycle bin with expireTime is "3d" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(3).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3d"))); // CASE 7: clean recycle bin with different dirname auto cleanByDir = [&](const std::string& dirname, int deleteTimes) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(deleteTimes) @@ -352,10 +315,9 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { cleanByDir("/", 7); } - TEST_F(NameSpaceToolCoreTest, GetAllocatedSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t allocSize; EXPECT_CALL(*client_, GetAllocatedSize(_, _, _)) .Times(1) @@ -374,38 +336,33 @@ TEST_F(NameSpaceToolCoreTest, QueryChunkCopyset) { uint64_t chunkId; std::pair copyset; - // 正常情况 + // Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(segment), - Return(GetSegmentRes::kOK))); - ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + .WillOnce(DoAll(SetArgPointee<2>(segment), Return(GetSegmentRes::kOK))); + ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); ASSERT_EQ(2001, chunkId); ASSERT_EQ(1, copyset.first); ASSERT_EQ(1001, copyset.second); - // GetFileInfo失败 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + // GetFileInfo failed + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); - // GetSegmentInfo失败 + // GetSegmentInfo failed EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); } TEST_F(NameSpaceToolCoreTest, GetFileSegments) { @@ -417,33 +374,29 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { PageFileSegment expected; GetSegmentForTest(&expected); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(5) .WillOnce(Return(GetSegmentRes::kSegmentNotAllocated)) - .WillRepeatedly(DoAll(SetArgPointee<2>(expected), - Return(GetSegmentRes::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(expected), Return(GetSegmentRes::kOK))); ASSERT_EQ(0, namespaceTool.GetFileSegments(fileName, &segments)); ASSERT_EQ(4, segments.size()); for (uint64_t i = 0; i < segments.size(); ++i) { ASSERT_EQ(expected.DebugString(), segments[i].DebugString()); } - // 2、GetFileInfo失败的情况 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The situation of GetFileInfo failure + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileSegments(fileName, &segments)); - // 3、获取segment失败 + // 3. Failed to obtain segment EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); @@ -452,11 +405,9 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { TEST_F(NameSpaceToolCoreTest, GetFileSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t size; - EXPECT_CALL(*client_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*client_, GetFileSize(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.GetFileSize("/test", &size)); } @@ -465,8 +416,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 1. throttle type is invalid { - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "hello", 10000, 0, 0)); @@ -476,11 +426,10 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { { curve::mds::ThrottleParams params; EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .WillOnce( - DoAll(SaveArg<1>(¶ms), Return(0))); + .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, -1, -1)); + 10000, -1, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_FALSE(params.has_burst()); ASSERT_FALSE(params.has_burstlength()); @@ -489,8 +438,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 3. burst lower than limit { curve::mds::ThrottleParams params; - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", 10000, 5000, -1)); @@ -504,7 +452,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, -1)); + 10000, 50000, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(1, params.burstlength()); @@ -518,7 +466,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, 10)); + 10000, 50000, 10)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(10, params.burstlength()); diff --git a/test/tools/namespace_tool_test.cpp b/test/tools/namespace_tool_test.cpp index a8202bda39..526263446f 100644 --- a/test/tools/namespace_tool_test.cpp +++ b/test/tools/namespace_tool_test.cpp @@ -21,13 +21,15 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/namespace_tool.h" + +#include + #include "test/tools/mock/mock_namespace_tool_core.h" using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; DECLARE_bool(isTest); @@ -39,9 +41,7 @@ DECLARE_bool(showAllocMap); class NameSpaceToolTest : public ::testing::Test { protected: - NameSpaceToolTest() { - FLAGS_isTest = true; - } + NameSpaceToolTest() { FLAGS_isTest = true; } void SetUp() { core_ = std::make_shared(); } @@ -106,80 +106,68 @@ TEST_F(NameSpaceToolTest, GetFile) { PageFileSegment segment; GetSegmentForTest(&segment); FLAGS_fileName = "/test/"; - // 0、Init失败 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(-1, namespaceTool.RunCommand("abc")); - // 1、正常情况 + // 1. Normal situation FLAGS_showAllocMap = true; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); curve::tool::AllocMap allocMap = {{1, segmentSize}, {2, 9 * segmentSize}}; EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - SetArgPointee<2>(allocMap), - Return(0))); + SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 2、获取fileInfo失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain fileInfo + EXPECT_CALL(*core_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 3、计算大小失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) + // 3. Calculation of size failed + EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 4、get的是目录的话还要计算file size + // 4. If the target is a directory, the file size should also be calculated FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 5、指定了-showAllocSize=false的话不计算分配大小 + // 5. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 6、对目录指定了-showFileSize=false的话不计算文件大小 + // 6. If - showFileSize=false is specified for the directory, the file size + // will not be calculated FLAGS_showFileSize = false; FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); } @@ -190,75 +178,66 @@ TEST_F(NameSpaceToolTest, ListDir) { GetFileInfoForTest(&fileInfo); PageFileSegment segment; GetSegmentForTest(&segment); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation std::vector files; for (uint64_t i = 0; i < 3; ++i) { files.emplace_back(fileInfo); } EXPECT_CALL(*core_, ListDir(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); FLAGS_fileName = "/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); FLAGS_fileName = "/test/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 2、listDir失败 - EXPECT_CALL(*core_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListDir failed + EXPECT_CALL(*core_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 3、计算大小失败,个别的文件计算大小失败会继续计算,但是返回-1 + // 3. Failed to calculate the size. Some files will continue to be + // calculated if the size calculation fails, but will return -1 EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(3) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 4、指定了-showAllocSize=false的话不计算分配大小 + // 4. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 4、list的时候有目录的话计算fileSize + // 4. If there is a directory in the list, calculate fileSize FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); files.emplace_back(fileInfo2); EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 5、指定了-showFileSize=false的话不计算文件大小 + // 5. If - showFileSize=false is specified, the file size will not be + // calculated FLAGS_showFileSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); } @@ -272,81 +251,58 @@ TEST_F(NameSpaceToolTest, SegInfo) { segments.emplace_back(segment); } FLAGS_fileName = "/test"; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, GetFileSegments(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(segments), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("seginfo")); - // 2、GetFileSegment失败 - EXPECT_CALL(*core_, GetFileSegments(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. GetFileSegment failed + EXPECT_CALL(*core_, GetFileSegments(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("seginfo")); } TEST_F(NameSpaceToolTest, CreateFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("create"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("create")); - // 2、创建失败 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("create")); } TEST_F(NameSpaceToolTest, DeleteFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("delete"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("delete")); - // 2、创建失败 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("delete")); } TEST_F(NameSpaceToolTest, CleanRecycle) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("clean-recycle"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("clean-recycle")); - // 2、失败 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failure + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("clean-recycle")); } @@ -361,33 +317,28 @@ TEST_F(NameSpaceToolTest, PrintChunkLocation) { } uint64_t chunkId = 2001; std::pair copyset = {1, 101}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("chunk-location")); - // 2、QueryChunkCopyset失败 + // 2. QueryChunkCopyset failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("chunk-location")); - // 3、GetChunkServerListInCopySet失败 + // 3. GetChunkServerListInCopySet failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) diff --git a/test/tools/raft_log_tool_test.cpp b/test/tools/raft_log_tool_test.cpp index ff70a5ef8b..f026ac064c 100644 --- a/test/tools/raft_log_tool_test.cpp +++ b/test/tools/raft_log_tool_test.cpp @@ -20,16 +20,19 @@ * Author: charisu */ +#include "src/tools/raft_log_tool.h" + #include + #include #include -#include "src/tools/raft_log_tool.h" + #include "test/tools/mock/mock_segment_parser.h" DECLARE_string(fileName); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; namespace curve { @@ -37,12 +40,8 @@ namespace tool { class RaftLogToolTest : public ::testing::Test { protected: - void SetUp() { - parser_ = std::make_shared(); - } - void TearDown() { - parser_ = nullptr; - } + void SetUp() { parser_ = std::make_shared(); } + void TearDown() { parser_ = nullptr; } std::shared_ptr parser_; }; @@ -58,23 +57,19 @@ TEST_F(RaftLogToolTest, PrintHeaders) { raftLogTool.PrintHelp("chunk-meta"); ASSERT_EQ(-1, raftLogTool.RunCommand("chunk-meta")); - // 文件名格式不对 + // The file name format is incorrect FLAGS_fileName = "illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); FLAGS_fileName = "/tmp/illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // parser初始化失败 + // parser initialization faile FLAGS_fileName = "/tmp/log_inprogress_002"; - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 解析失败 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Parsing failed + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(1) .WillOnce(Return(false)); @@ -83,10 +78,8 @@ TEST_F(RaftLogToolTest, PrintHeaders) { .WillOnce(Return(false)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 正常情况 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Normal situation + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(3) .WillOnce(Return(true)) @@ -100,4 +93,3 @@ TEST_F(RaftLogToolTest, PrintHeaders) { } // namespace tool } // namespace curve - diff --git a/test/tools/segment_parser_test.cpp b/test/tools/segment_parser_test.cpp index 3f9e1f465f..12e6614a9f 100644 --- a/test/tools/segment_parser_test.cpp +++ b/test/tools/segment_parser_test.cpp @@ -21,8 +21,10 @@ */ #include + #include #include + #include "src/tools/raft_log_tool.h" #include "test/fs/mock_local_filesystem.h" @@ -31,8 +33,8 @@ namespace tool { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -41,23 +43,19 @@ const uint32_t DATA_LEN = 20; class SetmentParserTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } void PackHeader(const EntryHeader& header, char* buf, bool checkFail = false) { memset(buf, 0, ENTRY_HEADER_SIZE); - const uint32_t meta_field = (header.type << 24) | - (header.checksum_type << 16); + const uint32_t meta_field = + (header.type << 24) | (header.checksum_type << 16); butil::RawPacker packer(buf); packer.pack64(header.term) - .pack32(meta_field) - .pack32((uint32_t)header.data_len) - .pack32(header.data_checksum); + .pack32(meta_field) + .pack32((uint32_t)header.data_len) + .pack32(header.data_checksum); uint32_t checkSum = braft::murmurhash32(buf, ENTRY_HEADER_SIZE - 4); if (checkFail) { packer.pack32(checkSum + 1); @@ -71,29 +69,23 @@ class SetmentParserTest : public ::testing::Test { TEST_F(SetmentParserTest, Init) { SegmentParser parser(localFs_); - // 1、打开文件失败 + // 1. Failed to open file EXPECT_CALL(*localFs_, Open(_, _)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 2、获取文件大小失败 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain file size + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 3、成功 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 3. Success + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, parser.Init(fileName)); - // 4、反初始化 - EXPECT_CALL(*localFs_, Close(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. De-initialization + EXPECT_CALL(*localFs_, Close(_)).Times(1).WillOnce(Return(0)); parser.UnInit(); } @@ -102,13 +94,10 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { struct stat stBuf; stBuf.st_size = 88; - EXPECT_CALL(*localFs_, Open(_, _)) - .Times(1) - .WillOnce(Return(1)); + EXPECT_CALL(*localFs_, Open(_, _)).Times(1).WillOnce(Return(1)); EXPECT_CALL(*localFs_, Fstat(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(stBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(stBuf), Return(0))); ASSERT_EQ(0, parser.Init(fileName)); EntryHeader header; @@ -120,30 +109,30 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { header.data_checksum = 73235795; char header_buf[ENTRY_HEADER_SIZE] = {0}; - // 读出来的数据大小不对 + // The size of the data read out is incorrect EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) .WillOnce(Return(22)); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 校验失败 + // Verification failed PackHeader(header, header_buf, true); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) - .WillOnce(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillOnce(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 正常情况 + // Normal situation PackHeader(header, header_buf); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(2) - .WillRepeatedly(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillRepeatedly(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); ASSERT_EQ(header, header2); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); @@ -155,4 +144,3 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { } // namespace tool } // namespace curve - diff --git a/test/tools/snapshot_clone_client_test.cpp b/test/tools/snapshot_clone_client_test.cpp index 024a270a69..9a87583dd8 100644 --- a/test/tools/snapshot_clone_client_test.cpp +++ b/test/tools/snapshot_clone_client_test.cpp @@ -20,28 +20,27 @@ * Author: charisu */ +#include "src/tools/snapshot_clone_client.h" + #include + #include -#include "src/tools/snapshot_clone_client.h" + #include "test/tools/mock/mock_metric_client.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace tool { class SnapshotCloneClientTest : public ::testing::Test { protected: - void SetUp() { - metricClient_ = std::make_shared(); - } + void SetUp() { metricClient_ = std::make_shared(); } - void TearDown() { - metricClient_ = nullptr; - } + void TearDown() { metricClient_ = nullptr; } std::shared_ptr metricClient_; }; @@ -50,60 +49,57 @@ TEST_F(SnapshotCloneClientTest, Init) { // no snapshot clone server ASSERT_EQ(1, client.Init("", "")); ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "")); - // dummy server与mds不匹配 + // Dummy server and mds do not match ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "8081,8082,8083")); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091,9092,9093")); - std::map expected = - {{"127.0.0.1:5555", "127.0.0.1:9091"}, - {"127.0.0.1:5556", "127.0.0.1:9092"}, - {"127.0.0.1:5557", "127.0.0.1:9093"}}; + "9091,9092,9093")); + std::map expected = { + {"127.0.0.1:5555", "127.0.0.1:9091"}, + {"127.0.0.1:5556", "127.0.0.1:9092"}, + {"127.0.0.1:5557", "127.0.0.1:9093"}}; ASSERT_EQ(expected, client.GetDummyServerMap()); } TEST_F(SnapshotCloneClientTest, GetActiveAddr) { - // 正常情况 + // Normal situation SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); std::vector activeAddr = client.GetActiveAddrs(); ASSERT_EQ(1, activeAddr.size()); ASSERT_EQ("127.0.0.1:5555", activeAddr[0]); - // 有一个dummyserver显示active,服务端口访问失败 + // There is a dummyserver displaying active, and the service port access + // failed EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_TRUE(activeAddr.empty()); - // 有一个获取metric失败,其他返回standby + // One failed to obtain metric, while the others returned standby EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); ASSERT_TRUE(client.GetActiveAddrs().empty()); - // 有两个active状态的 + // Having two active states EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_EQ(2, activeAddr.size()); ASSERT_EQ("127.0.0.1:5556", activeAddr[0]); @@ -112,15 +108,16 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { TEST_F(SnapshotCloneClientTest, GetOnlineStatus) { SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); - // 有一个在线,有一个获取metric失败,有一个listen addr不匹配 + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); + // One online, one failed to obtain metric, and one did not match the listen + // addr EXPECT_CALL(*metricClient_, GetConfValueFromMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5555"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5557"), - Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5555"), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5557"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); std::map onlineStatus; client.GetOnlineStatus(&onlineStatus); diff --git a/test/tools/status_tool_test.cpp b/test/tools/status_tool_test.cpp index 8b33183220..8dba1a8f94 100644 --- a/test/tools/status_tool_test.cpp +++ b/test/tools/status_tool_test.cpp @@ -19,25 +19,28 @@ * File Created: 2019-11-26 * Author: charisu */ +#include "src/tools/status_tool.h" + #include + #include -#include "src/tools/status_tool.h" -#include "test/tools/mock/mock_namespace_tool_core.h" -#include "test/tools/mock/mock_copyset_check_core.h" + #include "test/tools//mock/mock_mds_client.h" +#include "test/tools/mock/mock_copyset_check_core.h" #include "test/tools/mock/mock_etcd_client.h" -#include "test/tools/mock/mock_version_tool.h" #include "test/tools/mock/mock_metric_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +#include "test/tools/mock/mock_version_tool.h" +using curve::mds::topology::AllocateStatus; +using curve::mds::topology::LogicalPoolType; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::LogicalPoolType; -using curve::mds::topology::AllocateStatus; DECLARE_bool(offline); DECLARE_bool(unhealthy); @@ -76,7 +79,7 @@ class StatusToolTest : public ::testing::Test { pool->set_desc("physical pool for test"); } - void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo *lpInfo, + void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo* lpInfo, bool getSpace = true) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); @@ -89,9 +92,9 @@ class StatusToolTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -113,7 +116,7 @@ class StatusToolTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *server, + void GetServerInfoForTest(curve::mds::topology::ServerInfo* server, uint64_t id) { server->set_serverid(id); server->set_hostname("localhost"); @@ -137,8 +140,7 @@ class StatusToolTest : public ::testing::Test { }; TEST_F(StatusToolTest, InitAndSupportCommand) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); ASSERT_TRUE(statusTool.SupportCommand("status")); ASSERT_TRUE(statusTool.SupportCommand("space")); @@ -153,10 +155,9 @@ TEST_F(StatusToolTest, InitAndSupportCommand) { } TEST_F(StatusToolTest, InitFail) { - StatusTool statusTool1(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - // 1、status命令需要所有的init + StatusTool statusTool1(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + // 1. The status command requires all inits EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(3) .WillOnce(Return(-1)) @@ -169,50 +170,38 @@ TEST_F(StatusToolTest, InitFail) { .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); - // 2、etcd-status命令只需要初始化etcdClinet - StatusTool statusTool2(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The etcd-status command only needs to initialize etcdClinet + StatusTool statusTool2(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool2.RunCommand("etcd-status")); - // 3、space和其他命令不需要初始化etcdClient - StatusTool statusTool3(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); + // 3. Space and other commands do not require initialization of etcdClient + StatusTool statusTool3(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(2) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool3.RunCommand("space")); ASSERT_EQ(-1, statusTool3.RunCommand("chunkserver-list")); - // 4、snapshot-clone-status只需要snapshot clone - StatusTool statusTool4(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 4. snapshot-clone-status only requires snapshot clone + StatusTool statusTool4(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool4.RunCommand("snapshot-clone-status")); } TEST_F(StatusToolTest, SpaceCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("space"); statusTool.PrintHelp("123"); @@ -221,92 +210,70 @@ TEST_F(StatusToolTest, SpaceCmd) { std::vector lgPools; lgPools.emplace_back(lgPool); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("123")); - // 2、ListLogicalPoolsInPhysicalPool失败的情况 + // 2. The situation of ListLogicalPoolsInPhysicalPool failure EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 3、获取filesize失败 + // 3. Failed to obtain filesize EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetFileSize(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 4、获取metric失败的情况 + // 4. Failure to obtain metric EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 5、获取RecyleBin大小失败的情况 + // 5. Failure in obtaining the size of RecycleBin EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -314,33 +281,28 @@ TEST_F(StatusToolTest, SpaceCmd) { } TEST_F(StatusToolTest, ChunkServerCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("chunkserver-list"); std::vector chunkservers; - // 加入5个chunkserver,2个offline + // Add 5 chunkservers and 2 offline ChunkServerInfo csInfo; for (uint64_t i = 1; i <= 5; ++i) { GetCsInfoForTest(&csInfo, i, i <= 2); chunkservers.emplace_back(csInfo); } - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - - // 正常情况,有一个chunkserver的UnhealthyRatio大于0 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + + // Under normal circumstances, there is a chunkserver with an UnhealthyRatio + // greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -349,23 +311,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { .WillRepeatedly(Return(statistics1)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示offline的 + // Only display offline FLAGS_offline = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示unhealthy ratio大于0的 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // Show only those with unhealthy ratio greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -376,21 +336,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { FLAGS_unhealthy = true; ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // list chunkserver失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // List chunkserver failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-list")); - // FLAGS_checkCSAlive为true的时候,会发送rpc检查chunkserver在线状态 + // when FLAGS_checkCSAlive is true, an rpc will be sent to check the online + // status of the chunkserver FLAGS_checkHealth = false; FLAGS_checkCSAlive = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(5) .WillOnce(Return(false)) @@ -399,8 +359,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { } TEST_F(StatusToolTest, StatusCmdCommon) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("status"); statusTool.PrintHelp("chunkserver-status"); @@ -422,10 +381,9 @@ TEST_F(StatusToolTest, StatusCmdCommon) { {"0.0.2", {"127.0.0.1:8002"}}, {"0.0.3", {"127.0.0.1:8003"}}}; ClientVersionMapType clientVersionMap = {{"nebd-server", versionMap}, - {"python", versionMap}, - {"qemu", versionMap}}; - std::vector offlineList = {"127.0.0.1:8004", - "127.0.0.1:8005"}; + {"python", versionMap}, + {"qemu", versionMap}}; + std::vector offlineList = {"127.0.0.1:8004", "127.0.0.1:8005"}; std::vector leaderAddr = {"127.0.0.1:2379"}; std::map onlineState = {{"127.0.0.1:2379", true}, {"127.0.0.1:2381", true}, @@ -440,22 +398,14 @@ TEST_F(StatusToolTest, StatusCmdCommon) { } chunkservers.emplace(1, chunkserverList); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); - // 正常情况 - // 1、设置cluster的输出 + // Normal situation + // 1. Set the output of the cluster EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); @@ -464,41 +414,31 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(Return(statistics1)); EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInPhysicalPool(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); - // 设置client status的输出 + // Set the output of client status EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), Return(0))); - // 2、设置MDS status的输出 + // 2. Set the output of MDS status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(2) .WillRepeatedly(Return(mdsAddr)); @@ -506,25 +446,21 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); - // 3、设置etcd status的输出 + // 3. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), Return(0))); EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); - // 设置snapshot clone的输出 + // Set the output of snapshot clone std::vector activeAddr = {"127.0.0.1:5555"}; EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(2) .WillRepeatedly(Return(activeAddr)); @@ -532,39 +468,36 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineState)); - // 4、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 4. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*metricClient_, GetMetricUint(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(1000), - Return(MetricRet::kOK))); + .WillRepeatedly(DoAll(SetArgPointee<2>(1000), Return(MetricRet::kOK))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("status")); - // 5、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 5. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-status")); - // 6、设置mds status的输出 + // 6. Set the output of mds statu EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(mdsAddr)); @@ -572,37 +505,26 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("mds-status")); - // 7、设置etcd status的输出 + // 7. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("etcd-status")); } TEST_F(StatusToolTest, StatusCmdError) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); // 1、cluster unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) @@ -611,24 +533,22 @@ TEST_F(StatusToolTest, StatusCmdError) { EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) .Times(1) .WillOnce(Return(statistics2)); - // 列出物理池失败 + // Failed to list physical pools EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 列出逻辑池失败 + // Failed to list logical pools EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 获取client version失败 - EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(Return(-1)); + // Failed to obtain client version + EXPECT_CALL(*versionTool_, GetClientVersion(_)).WillOnce(Return(-1)); - // 2、当前无mds可用 + // 2. Currently, no mds are available std::vector failedList = {"127.0.0.1:6666", "127.0.0.1:6667"}; EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map mdsOnlineStatus = {{"127.0.0.1:6666", false}, {"127.0.0.1:6667", false}}; EXPECT_CALL(*mdsClient_, GetCurrentMds()) @@ -638,7 +558,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); - // 3、GetEtcdClusterStatus失败 + // 3. GetEtcdClusterStatus failed EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -646,10 +566,9 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(Return(-1)); - // 当前无snapshot clone server可用 + // Currently, no snapshot clone server is available EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map onlineStatus = {{"127.0.0.1:5555", false}, {"127.0.0.1:5556", false}}; EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) @@ -659,42 +578,42 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineStatus)); - // 4、获取chunkserver version失败并ListChunkServersInCluster失败 + // 4. Failed to obtain chunkserver version and ListChunkServersInCluster EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) .WillOnce(Return(-1)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("status")); - // 获取mds在线状态失败 + // Failed to obtain mds online status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); - // 获取mdsversion失败 + // Failed to obtain mdsversion EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("mds-status")); - // 个别chunkserver获取version失败 + // Individual chunkservers failed to obtain version EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - SetArgPointee<1>(failedList), - Return(0))); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), SetArgPointee<1>(failedList), + Return(0))); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-status")); } TEST_F(StatusToolTest, IsClusterHeatlhy) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); std::map onlineStatus = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", true}, @@ -702,55 +621,54 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { std::map onlineStatus2 = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", false}, {"127.0.0.1:8003", true}}; - // 1、copysets不健康 + // 1. Copysets are unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(-1)); - // 2、没有mds可用 + // 2. No mds available EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); - // 3、有mds不在线 + // 3. There are MDSs that are not online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); - // 4、获取etcd集群状态失败 + // 4. Failed to obtain the ETCD cluster status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(Return(-1)); - // 5、没有snapshot-clone-server可用 + // 5. No snapshot-clone-server available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector())); - // 6、有snapshot-clone-server不在线 + // 6. There is snapshot-clone-server that is not online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); ASSERT_FALSE(statusTool.IsClusterHeatlhy()); - // 1、copyset健康 + // 1. Copyset Health EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); - // 2、超过一个mds在服务 + // 2. More than one mds is in service EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector(2))); - // 3、mds都在线 + // 3. MDS is all online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); - // 4、etcd没有leader且有etcd不在线 + // 4. ETCD does not have a leader and there are ETCDs that are not online EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - SetArgPointee<1>(onlineStatus2), - Return(0))); - // 5、有多个snapshot-clone-server可用 + SetArgPointee<1>(onlineStatus2), Return(0))); + // 5. Multiple snapshot-clone-server are available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector(2))); - // 9、snapshot-clone-server都在线 + // 9. snapshot-clone-server is all online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); @@ -758,43 +676,30 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { } TEST_F(StatusToolTest, ListClientCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector clientAddrs; for (int i = 0; i < 10; ++i) { clientAddrs.emplace_back("127.0.0.1:900" + std::to_string(i)); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("client-list")); - // 失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // Failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("client-list")); } TEST_F(StatusToolTest, ServerList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector servers; for (int i = 0; i < 3; ++i) { @@ -802,13 +707,12 @@ TEST_F(StatusToolTest, ServerList) { GetServerInfoForTest(&server, i); servers.emplace_back(server); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("server-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -816,15 +720,10 @@ TEST_F(StatusToolTest, ServerList) { } TEST_F(StatusToolTest, LogicalPoolList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector lgPools; for (int i = 1; i <= 3; ++i) { @@ -832,30 +731,25 @@ TEST_F(StatusToolTest, LogicalPoolList) { GetLogicalPoolForTest(i, &lgPool); lgPools.emplace_back(lgPool); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); AllocMap allocMap = {{1, DefaultSegmentSize}, {2, DefaultSegmentSize * 20}}; EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(allocMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("logical-pool-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); } } // namespace tool } // namespace curve - diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..db40892f40 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -21,21 +21,23 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/version_tool.h" + +#include + #include "test/tools/mock/mock_mds_client.h" #include "test/tools/mock/mock_metric_client.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::DiskState; +using curve::mds::topology::OnlineState; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; namespace curve { namespace tool { @@ -53,8 +55,8 @@ class VersionToolTest : public ::testing::Test { metricClient_ = nullptr; } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -73,64 +75,61 @@ class VersionToolTest : public ::testing::Test { TEST_F(VersionToolTest, GetAndCheckMdsVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,123 +150,112 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 1. Normal situation + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 2. ListChunkServersInCluster failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 3. Failed to obtain metric + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 4. chunkserverList is empty + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - Return(0))); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillOnce( + DoAll(SetArgPointee<0>(std::vector()), Return(0))); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 5. version inconsistency + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 6. Old version + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } TEST_F(VersionToolTest, GetClientVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::vector clientAddrs = - {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", - "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; + std::vector clientAddrs = {"127.0.0.1:8000", "127.0.0.1:8001", + "127.0.0.1:8002", "127.0.0.1:8003", + "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, kProcessCmdLineMetricName, _)) .Times(6) .WillOnce(Return(MetricRet::kOtherErr)) - .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessPython), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessOther), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessPython), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessOther), Return(MetricRet::kOK))) .WillRepeatedly(DoAll(SetArgPointee<2>(kProcessNebdServer), - Return(MetricRet::kOK))); + Return(MetricRet::kOK))); EXPECT_CALL(*metricClient_, GetMetric(_, kCurveVersionMetricName, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) .WillOnce(Return(MetricRet::kNotFound)) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))); ClientVersionMapType clientVersionMap; ClientVersionMapType expected; VersionMapType versionMap = {{"0.0.5.2", {"127.0.0.1:8004"}}, @@ -282,85 +270,80 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListClient failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, versionTool.GetClientVersion(&clientVersionMap)); } TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } diff --git a/test/util/config_generator.h b/test/util/config_generator.h index f0508e58ca..7ee14f23d0 100644 --- a/test/util/config_generator.h +++ b/test/util/config_generator.h @@ -32,14 +32,15 @@ namespace curve { using curve::common::Configuration; -// 各模块继承该接口,实现自己的初始化配置函数 +// Each module inherits this interface and implements its own initialization +// configuration function class ConfigGenerator { public: ConfigGenerator() = default; virtual ~ConfigGenerator() = default; - virtual bool LoadTemplete(const std::string &defaultConfigPath) { + virtual bool LoadTemplete(const std::string& defaultConfigPath) { config_.SetConfigPath(defaultConfigPath); if (!config_.LoadConfig()) { return false; @@ -47,23 +48,22 @@ class ConfigGenerator { return true; } - virtual void SetConfigPath(const std::string &configPath) { + virtual void SetConfigPath(const std::string& configPath) { configPath_ = configPath; } - // 设置配置项 + // Set Configuration Items virtual void SetKV(const std::string& key, const std::string& value) { config_.SetValue(key, value); } /** - * @brief 批量设置配置项 + * @brief Batch Set Configuration Items * - * @param options 配置项表,形如 "Ip=127.0.0.1" + * @param options configuration item table, in the form of "Ip=127.0.0.1" */ - virtual void SetConfigOptions( - const std::vector &options) { - for (const std::string &op : options) { + virtual void SetConfigOptions(const std::vector& options) { + for (const std::string& op : options) { int delimiterPos = op.find("="); std::string key = op.substr(0, delimiterPos); std::string value = op.substr(delimiterPos + 1); @@ -71,7 +71,7 @@ class ConfigGenerator { } } - // 用于生成配置文件 + // Used to generate configuration files virtual bool Generate() { if (configPath_ != "") { config_.SetConfigPath(configPath_); @@ -80,27 +80,25 @@ class ConfigGenerator { return false; } - virtual bool Generate(const std::string &newConfigPath) { + virtual bool Generate(const std::string& newConfigPath) { configPath_ = newConfigPath; return Generate(); } - // 删除配置文件 - virtual int Remove() { - return ::remove(configPath_.c_str()); - } + // Delete Profile + virtual int Remove() { return ::remove(configPath_.c_str()); } protected: - // 配置文件路径 + // Configuration file path std::string configPath_; - // 配置器 + // Configurator Configuration config_; }; #define DEFAULT_MDS_CONF "conf/mds.conf" struct MDSConfigGenerator : public ConfigGenerator { - explicit MDSConfigGenerator(const std::string &configPath) { + explicit MDSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_MDS_CONF); SetConfigPath(configPath); } @@ -109,7 +107,7 @@ struct MDSConfigGenerator : public ConfigGenerator { #define DEFAULT_CHUNKSERVER_CONF "conf/chunkserver.conf.example" struct CSConfigGenerator : public ConfigGenerator { - explicit CSConfigGenerator(const std::string &configPath) { + explicit CSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CHUNKSERVER_CONF); SetConfigPath(configPath); } @@ -118,7 +116,7 @@ struct CSConfigGenerator : public ConfigGenerator { #define DEFAULT_CLIENT_CONF "conf/client.conf" struct ClientConfigGenerator : public ConfigGenerator { - explicit ClientConfigGenerator(const std::string &configPath) { + explicit ClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CLIENT_CONF); SetConfigPath(configPath); } @@ -127,7 +125,7 @@ struct ClientConfigGenerator : public ConfigGenerator { #define DEFAULT_CS_CLIENT_CONF "conf/cs_client.conf" struct CSClientConfigGenerator : public ConfigGenerator { - explicit CSClientConfigGenerator(const std::string &configPath) { + explicit CSClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CS_CLIENT_CONF); SetConfigPath(configPath); } @@ -136,7 +134,7 @@ struct CSClientConfigGenerator : public ConfigGenerator { #define DEFAULT_SNAP_CLIENT_CONF "conf/snap_client.conf" struct SnapClientConfigGenerator : public ConfigGenerator { - explicit SnapClientConfigGenerator(const std::string &configPath) { + explicit SnapClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SNAP_CLIENT_CONF); SetConfigPath(configPath); } @@ -145,7 +143,7 @@ struct SnapClientConfigGenerator : public ConfigGenerator { #define DEFAULT_S3_CONF "conf/s3.conf" struct S3ConfigGenerator : public ConfigGenerator { - explicit S3ConfigGenerator(const std::string &configPath) { + explicit S3ConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_S3_CONF); SetConfigPath(configPath); SetKV("s3.endpoint", "127.0.0.1:9999"); @@ -155,7 +153,7 @@ struct S3ConfigGenerator : public ConfigGenerator { #define DEFAULT_SCS_CONF "conf/snapshot_clone_server.conf" struct SCSConfigGenerator : public ConfigGenerator { - explicit SCSConfigGenerator(const std::string &configPath) { + explicit SCSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SCS_CONF); SetConfigPath(configPath); } diff --git a/thirdparties/etcdclient/etcdclient.go b/thirdparties/etcdclient/etcdclient.go index dc7df6d691..493d2a807f 100644 --- a/thirdparties/etcdclient/etcdclient.go +++ b/thirdparties/etcdclient/etcdclient.go @@ -21,7 +21,7 @@ package main enum EtcdErrCode { - // grpc errCode, 具体的含义见: + // grpc errCode, for specific meanings, refer to: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -42,7 +42,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom Error Codes EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -198,7 +198,7 @@ func GetErrCode(op string, err error) C.enum_EtcdErrCode { return C.EtcdUnknown } -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required //export NewEtcdClientV3 func NewEtcdClientV3(conf C.struct_EtcdConf) C.enum_EtcdErrCode { var err error @@ -271,7 +271,7 @@ func EtcdClientGet(timeout C.int, key *C.char, resp.Header.Revision } -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit //export EtcdClientList func EtcdClientList(timeout C.int, startKey, endKey *C.char, startLen, endLen C.int) (C.enum_EtcdErrCode, uint64, int64) { @@ -425,7 +425,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, goPfx := C.GoStringN(pfx, pfxLen) goLeaderName := C.GoStringN(leaderName, nameLen) - // 创建带ttl的session + // Create a session with ttl var sessionOpts concurrency.SessionOption = concurrency.WithTTL(int(sessionInterSec)) session, err := concurrency.NewSession(globalClient, sessionOpts) if err != nil { @@ -433,7 +433,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, return C.EtcdCampaignInternalErr, 0 } - // 创建election和超时context + // Create an election and timeout context var election *concurrency.Election = concurrency.NewElection(session, goPfx) var ctx context.Context var cancel context.CancelFunc @@ -448,7 +448,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, wg.Add(2) defer wg.Wait() - // 监测当前的leader + // Monitor the current leader obCtx, obCancel := context.WithCancel(context.Background()) observer := election.Observe(obCtx) defer obCancel() @@ -472,7 +472,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 监测自己key的存活状态 + // Monitor the survival status of one's own key exitSignal := make(chan struct{}, 1) go func() { defer wg.Done() @@ -490,8 +490,8 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 1. Campaign返回nil说明当前mds持有的key版本号最小 - // 2. Campaign返回时不检测自己持有key的状态,所以返回nil后需要监测session.Done() + // 1. Campaign returns nil indicating that the current MDS holds the smallest key version number + // 2. When Campaign returns, it does not detect the status of the key it holds, so after returning nil, it is necessary to monitor session. Done() if err := election.Campaign(ctx, goLeaderName); err == nil { log.Printf("[%s/%x] campaign for leader success", goLeaderName, session.Lease()) diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index 2227257bf3..a104e4e21f 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -29,27 +29,25 @@ using ::curve::common::kDefaultPoolsetName; DEFINE_string(mds_addr, "127.0.0.1:6666", - "mds ip and port list, separated by \",\""); + "mds ip and port list, separated by \",\""); -DEFINE_string(op, - "", - "operation: create_logicalpool, " - "create_physicalpool, " - "set_chunkserver, " - "set_logicalpool"); +DEFINE_string(op, "", + "operation: create_logicalpool, " + "create_physicalpool, " + "set_chunkserver, " + "set_logicalpool"); DEFINE_string(cluster_map, "/etc/curve/topo.json", "cluster topology map."); DEFINE_int32(chunkserver_id, -1, "chunkserver id for set chunkserver status."); DEFINE_string(chunkserver_status, "readwrite", - "chunkserver status: readwrite, pendding."); + "chunkserver status: readwrite, pendding."); DEFINE_uint32(rpcTimeOutMs, 5000u, "rpc time out"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); DEFINE_uint32(logicalpool_id, -1, "logicalpool id for set logicalpool status."); -DEFINE_string(logicalpool_status, "allow", - "logicalpool status: allow, deny."); +DEFINE_string(logicalpool_status, "allow", "logicalpool status: allow, deny."); const int kRetCodeCommonErr = -1; const int kRetCodeRedirectMds = -2; @@ -73,7 +71,6 @@ const char kAllocStatusDeny[] = "deny"; const char kPoolsets[] = "poolsets"; const char kPoolsetName[] = "poolset"; - using ::curve::common::SplitString; namespace curve { @@ -83,8 +80,10 @@ namespace topology { const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail if (conf->LoadConfig()) { google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) { @@ -122,20 +121,18 @@ int CurvefsTools::TryAnotherMdsAddress() { } mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; - LOG(INFO) << "try mds address(" << mdsAddressIndex_ - << "): " << mdsAddress; + LOG(INFO) << "try mds address(" << mdsAddressIndex_ << "): " << mdsAddress; int ret = channel_.Init(mdsAddress.c_str(), NULL); if (ret != 0) { - LOG(ERROR) << "Fail to init channel to mdsAddress: " - << mdsAddress; + LOG(ERROR) << "Fail to init channel to mdsAddress: " << mdsAddress; } return ret; } int CurvefsTools::DealFailedRet(int ret, std::string operation) { if (kRetCodeRedirectMds == ret) { - LOG(WARNING) << operation << " fail on mds: " - << mdsAddressStr_[mdsAddressIndex_]; + LOG(WARNING) << operation + << " fail on mds: " << mdsAddressStr_[mdsAddressIndex_]; } else { LOG(ERROR) << operation << " fail."; } @@ -166,10 +163,9 @@ int CurvefsTools::HandleCreateLogicalPool() { std::string copysetNumStr = std::to_string(lgPool.copysetNum); std::string zoneNumStr = std::to_string(lgPool.zoneNum); - std::string rapString = "{\"replicaNum\":" + replicaNumStr - + ", \"copysetNum\":" + copysetNumStr - + ", \"zoneNum\":" + zoneNumStr - + "}"; + std::string rapString = "{\"replicaNum\":" + replicaNumStr + + ", \"copysetNum\":" + copysetNumStr + + ", \"zoneNum\":" + zoneNumStr + "}"; request.set_redundanceandplacementpolicy(rapString); request.set_userpolicy("{\"aaa\":1}"); @@ -189,7 +185,7 @@ int CurvefsTools::HandleCreateLogicalPool() { stub.CreateLogicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() == kTopoErrCodeSuccess) { @@ -199,8 +195,7 @@ int CurvefsTools::HandleCreateLogicalPool() { LOG(INFO) << "Logical pool already exist"; } else { LOG(ERROR) << "CreateLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } } @@ -221,7 +216,7 @@ int CurvefsTools::ScanLogicalPool() { return ret; } for (auto it = logicalPoolInfos.begin(); - it != logicalPoolInfos.end();) { + it != logicalPoolInfos.end();) { auto ix = std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), [it](const CurveLogicalPoolData& data) { @@ -236,8 +231,9 @@ int CurvefsTools::ScanLogicalPool() { return 0; } -int CurvefsTools::ListLogicalPool(const std::string& phyPoolName, - std::list *logicalPoolInfos) { +int CurvefsTools::ListLogicalPool( + const std::string& phyPoolName, + std::list* logicalPoolInfos) { TopologyService_Stub stub(&channel_); ListLogicalPoolRequest request; ListLogicalPoolResponse response; @@ -246,15 +242,13 @@ int CurvefsTools::ListLogicalPool(const std::string& phyPoolName, cntl.set_log_id(1); request.set_physicalpoolname(phyPoolName); - LOG(INFO) << "ListLogicalPool send request: " - << request.DebugString(); + LOG(INFO) << "ListLogicalPool send request: " << request.DebugString(); stub.ListLogicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { return kRetCodeRedirectMds; } for (int i = 0; i < response.logicalpoolinfos_size(); i++) { - logicalPoolInfos->push_back( - response.logicalpoolinfos(i)); + logicalPoolInfos->push_back(response.logicalpoolinfos(i)); } return 0; } @@ -311,7 +305,6 @@ int CurvefsTools::HandleBuildCluster() { return ret; } - int CurvefsTools::ReadClusterMap() { std::ifstream fin(FLAGS_cluster_map); if (fin.is_open()) { @@ -325,8 +318,8 @@ int CurvefsTools::ReadClusterMap() { return -1; } } else { - LOG(ERROR) << "open cluster map file : " - << FLAGS_cluster_map << " fail."; + LOG(ERROR) << "open cluster map file : " << FLAGS_cluster_map + << " fail."; return -1; } return 0; @@ -339,7 +332,7 @@ int CurvefsTools::InitPoolsetData() { for (const auto& poolset : clusterMap_[kPoolsets]) { CurvePoolsetData poolsetData; if (!poolset[kName].isString()) { - LOG(ERROR) <<"poolset name must be string" << poolset[kName]; + LOG(ERROR) << "poolset name must be string" << poolset[kName]; return -1; } poolsetData.name = poolset[kName].asString(); @@ -364,7 +357,7 @@ int CurvefsTools::InitServerData() { LOG(ERROR) << "No servers in cluster map"; return -1; } - for (const auto &server : clusterMap_[kServers]) { + for (const auto& server : clusterMap_[kServers]) { CurveServerData serverData; if (!server[kName].isString()) { LOG(ERROR) << "server name must be string"; @@ -423,7 +416,7 @@ int CurvefsTools::InitLogicalPoolData() { LOG(ERROR) << "No servers in cluster map"; return -1; } - for (const auto &lgPool : clusterMap_[kLogicalPools]) { + for (const auto& lgPool : clusterMap_[kLogicalPools]) { CurveLogicalPoolData lgPoolData; if (!lgPool[kName].isString()) { LOG(ERROR) << "logicalpool name must be string"; @@ -496,8 +489,7 @@ int CurvefsTools::ListPoolset(std::list* poolsetInfos) { } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPoolset Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received ListPoolset Rpc response success, " @@ -511,7 +503,7 @@ int CurvefsTools::ListPoolset(std::list* poolsetInfos) { } int CurvefsTools::ListPhysicalPool( - std::list *physicalPoolInfos) { + std::list* physicalPoolInfos) { TopologyService_Stub stub(&channel_); ListPhysicalPoolRequest request; ListPhysicalPoolResponse response; @@ -519,38 +511,30 @@ int CurvefsTools::ListPhysicalPool( cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListPhysicalPool send request: " - << request.DebugString(); + LOG(INFO) << "ListPhysicalPool send request: " << request.DebugString(); - stub.ListPhysicalPool(&cntl, - &request, - &response, - nullptr); + stub.ListPhysicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received ListPhysicalPool Rpc response success, " << response.DebugString(); } - for (int i = 0; - i < response.physicalpoolinfos_size(); - i++) { - physicalPoolInfos->push_back( - response.physicalpoolinfos(i)); + for (int i = 0; i < response.physicalpoolinfos_size(); i++) { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); } return 0; } -int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, - std::list *physicalPoolInfos) { +int CurvefsTools::ListPhysicalPoolsInPoolset( + PoolsetIdType poolsetid, std::list* physicalPoolInfos) { TopologyService_Stub stub(&channel_); ListPhysicalPoolsInPoolsetRequest request; ListPhysicalPoolResponse response; @@ -570,10 +554,8 @@ int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetid = " - << poolsetid; + << "Message is :" << response.DebugString() + << " , poolsetid = " << poolsetid; return response.statuscode(); } else { LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," @@ -587,7 +569,7 @@ int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, } int CurvefsTools::AddListPoolZone(PoolIdType poolid, - std::list *zoneInfos) { + std::list* zoneInfos) { TopologyService_Stub stub(&channel_); ListPoolZoneRequest request; ListPoolZoneResponse response; @@ -597,8 +579,7 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListPoolZone, send request: " - << request.DebugString(); + LOG(INFO) << "ListPoolZone, send request: " << request.DebugString(); stub.ListPoolZone(&cntl, &request, &response, nullptr); @@ -607,10 +588,8 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPoolZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalpoolid = " - << poolid; + << "Message is :" << response.DebugString() + << " , physicalpoolid = " << poolid; return response.statuscode(); } else { LOG(INFO) << "Received ListPoolZone Rpc response success, " @@ -624,7 +603,7 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, } int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, - std::list *serverInfos) { + std::list* serverInfos) { TopologyService_Stub stub(&channel_); ListZoneServerRequest request; ListZoneServerResponse response; @@ -633,8 +612,7 @@ int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListZoneServer, send request: " - << request.DebugString(); + LOG(INFO) << "ListZoneServer, send request: " << request.DebugString(); stub.ListZoneServer(&cntl, &request, &response, nullptr); @@ -643,14 +621,12 @@ int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListZoneServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneid = " - << zoneid; + << "Message is :" << response.DebugString() + << " , zoneid = " << zoneid; return response.statuscode(); } else { LOG(INFO) << "ListZoneServer Rpc response success, " - << response.DebugString(); + << response.DebugString(); } for (int i = 0; i < response.serverinfo_size(); i++) { @@ -700,11 +676,11 @@ int CurvefsTools::ScanCluster() { // get all phsicalpool and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(physicalPoolToAdd.begin(), - physicalPoolToAdd.end(), - [server](CurvePhysicalPoolData& data) { - return data.physicalPoolName == server.physicalPoolName; - }) != physicalPoolToAdd.end()) { + if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [server](CurvePhysicalPoolData& data) { + return data.physicalPoolName == + server.physicalPoolName; + }) != physicalPoolToAdd.end()) { continue; } CurvePhysicalPoolData poolData; @@ -738,11 +714,11 @@ int CurvefsTools::ScanCluster() { for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) { auto ix = std::find_if( - physicalPoolToAdd.begin(), physicalPoolToAdd.end(), - [it](const CurvePhysicalPoolData& data) { - return (data.poolsetName == it->poolsetname()) && - (data.physicalPoolName == it->physicalpoolname()); - }); + physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [it](const CurvePhysicalPoolData& data) { + return (data.poolsetName == it->poolsetname()) && + (data.physicalPoolName == it->physicalpoolname()); + }); if (ix != physicalPoolToAdd.end()) { physicalPoolToAdd.erase(ix); it++; @@ -755,14 +731,12 @@ int CurvefsTools::ScanCluster() { // get zone and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(zoneToAdd.begin(), - zoneToAdd.end(), - [server](CurveZoneData& data) { - return (data.physicalPoolName == - server.physicalPoolName) && - (data.zoneName == - server.zoneName); - }) != zoneToAdd.end()) { + if (std::find_if(zoneToAdd.begin(), zoneToAdd.end(), + [server](CurveZoneData& data) { + return (data.physicalPoolName == + server.physicalPoolName) && + (data.zoneName == server.zoneName); + }) != zoneToAdd.end()) { continue; } CurveZoneData CurveZoneData; @@ -784,9 +758,8 @@ int CurvefsTools::ScanCluster() { } zoneInfos.clear(); - for (auto it = physicalPoolInfos.begin(); - it != physicalPoolInfos.end(); - it++) { + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end(); + it++) { PoolIdType poolid = it->physicalpoolid(); ret = AddListPoolZone(poolid, &zoneInfos); if (ret < 0) { @@ -794,15 +767,12 @@ int CurvefsTools::ScanCluster() { } } - for (auto it = zoneInfos.begin(); - it != zoneInfos.end();) { + for (auto it = zoneInfos.begin(); it != zoneInfos.end();) { auto ix = std::find_if( zoneToAdd.begin(), zoneToAdd.end(), [it](const CurveZoneData& data) { - return (data.physicalPoolName == - it->physicalpoolname()) && - (data.zoneName == - it->zonename()); + return (data.physicalPoolName == it->physicalpoolname()) && + (data.zoneName == it->zonename()); }); if (ix != zoneToAdd.end()) { zoneToAdd.erase(ix); @@ -816,15 +786,12 @@ int CurvefsTools::ScanCluster() { // get server and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(serverToAdd.begin(), - serverToAdd.end(), - [server](CurveServerData& data) { - return data.serverName == - server.serverName; - }) != serverToAdd.end()) { + if (std::find_if(serverToAdd.begin(), serverToAdd.end(), + [server](CurveServerData& data) { + return data.serverName == server.serverName; + }) != serverToAdd.end()) { LOG(WARNING) << "WARING! Duplicated Server Name: " - << server.serverName - << " , ignored."; + << server.serverName << " , ignored."; continue; } serverToAdd.push_back(server); @@ -843,9 +810,7 @@ int CurvefsTools::ScanCluster() { } serverInfos.clear(); - for (auto it = zoneInfos.begin(); - it != zoneInfos.end(); - it++) { + for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) { ZoneIdType zoneid = it->zoneid(); ret = AddListZoneServer(zoneid, &serverInfos); if (ret < 0) { @@ -853,17 +818,14 @@ int CurvefsTools::ScanCluster() { } } - for (auto it = serverInfos.begin(); - it != serverInfos.end(); - it++) { - auto ix = - std::find_if( - serverToAdd.begin(), serverToAdd.end(), - [it](const CurveServerData& data) { - return (data.serverName == it->hostname()) && - (data.zoneName == it->zonename()) && - (data.physicalPoolName == it->physicalpoolname()); - }); + for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) { + auto ix = std::find_if( + serverToAdd.begin(), serverToAdd.end(), + [it](const CurveServerData& data) { + return (data.serverName == it->hostname()) && + (data.zoneName == it->zonename()) && + (data.physicalPoolName == it->physicalpoolname()); + }); if (ix != serverToAdd.end()) { serverToAdd.erase(ix); } else { @@ -893,22 +855,19 @@ int CurvefsTools::CreatePoolset() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreatePoolset, send request: " - << request.DebugString(); + LOG(INFO) << "CreatePoolset, send request: " << request.DebugString(); stub.CreatePoolset(&cntl, &request, &response, nullptr); if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "CreatePoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetName =" - << it.name; + << "Message is :" << response.DebugString() + << " , poolsetName =" << it.name; return response.statuscode(); } else { LOG(INFO) << "Received CreatePoolset response success, " @@ -939,15 +898,13 @@ int CurvefsTools::CreatePhysicalPool() { if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolName =" - << it.physicalPoolName; + << "Message is :" << response.DebugString() + << " , physicalPoolName =" << it.physicalPoolName; return response.statuscode(); } else { LOG(INFO) << "Received CreatePhysicalPool response success, " @@ -971,8 +928,7 @@ int CurvefsTools::CreateZone() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreateZone, send request: " - << request.DebugString(); + LOG(INFO) << "CreateZone, send request: " << request.DebugString(); stub.CreateZone(&cntl, &request, &response, nullptr); @@ -980,20 +936,15 @@ int CurvefsTools::CreateZone() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "CreateZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneName = " - << it.zoneName; + LOG(ERROR) << "CreateZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneName = " << it.zoneName; return kRetCodeCommonErr; } if (response.statuscode() != 0) { LOG(ERROR) << "CreateZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneName = " - << it.zoneName; + << "Message is :" << response.DebugString() + << " , zoneName = " << it.zoneName; return response.statuscode(); } else { LOG(INFO) << "Received CreateZone Rpc success, " @@ -1023,8 +974,7 @@ int CurvefsTools::CreateServer() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreateServer, send request: " - << request.DebugString(); + LOG(INFO) << "CreateServer, send request: " << request.DebugString(); stub.RegistServer(&cntl, &request, &response, nullptr); @@ -1032,12 +982,9 @@ int CurvefsTools::CreateServer() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "RegistServer, errcorde = " - << response.statuscode() - << ", error content : " - << cntl.ErrorText() - << " , serverName = " - << it.serverName; + LOG(ERROR) << "RegistServer, errcorde = " << response.statuscode() + << ", error content : " << cntl.ErrorText() + << " , serverName = " << it.serverName; return kRetCodeCommonErr; } if (response.statuscode() == kTopoErrCodeSuccess) { @@ -1047,10 +994,8 @@ int CurvefsTools::CreateServer() { LOG(INFO) << "Server already exist"; } else { LOG(ERROR) << "RegistServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverName = " - << it.serverName; + << "Message is :" << response.DebugString() + << " , serverName = " << it.serverName; return response.statuscode(); } } @@ -1080,18 +1025,14 @@ int CurvefsTools::ClearPhysicalPool() { } else if (cntl.Failed()) { LOG(ERROR) << "DeletePhysicalPool, errcorde = " << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , physicalPoolId = " - << it; + << ", error content:" << cntl.ErrorText() + << " , physicalPoolId = " << it; return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolId = " - << it; + << "Message is :" << response.DebugString() + << " , physicalPoolId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " @@ -1128,7 +1069,7 @@ int CurvefsTools::ClearPoolset() { return kRetCodeCommonErr; } else if (response.statuscode() != kTopoErrCodeSuccess && response.statuscode() != - kTopoErrCodeCannotDeleteDefaultPoolset) { + kTopoErrCodeCannotDeleteDefaultPoolset) { LOG(ERROR) << "DeletePoolset Rpc response fail. " << "Message is :" << response.DebugString() << " , PoolsetId = " << it; @@ -1153,8 +1094,7 @@ int CurvefsTools::ClearZone() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "DeleteZone, send request: " - << request.DebugString(); + LOG(INFO) << "DeleteZone, send request: " << request.DebugString(); stub.DeleteZone(&cntl, &request, &response, nullptr); @@ -1162,19 +1102,14 @@ int CurvefsTools::ClearZone() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneId = " - << it; + LOG(ERROR) << "DeleteZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneId = " << it; return kRetCodeCommonErr; } else if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeleteZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneId = " - << it; + << "Message is :" << response.DebugString() + << " , zoneId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeleteZone Rpc success, " @@ -1196,8 +1131,7 @@ int CurvefsTools::ClearServer() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "DeleteServer, send request: " - << request.DebugString(); + LOG(INFO) << "DeleteServer, send request: " << request.DebugString(); stub.DeleteServer(&cntl, &request, &response, nullptr); @@ -1205,20 +1139,15 @@ int CurvefsTools::ClearServer() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteServer, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , serverId = " - << it; + LOG(ERROR) << "DeleteServer, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , serverId = " << it; return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeleteServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverId = " - << it; + << "Message is :" << response.DebugString() + << " , serverId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeleteServer Rpc response success, " @@ -1254,25 +1183,21 @@ int CurvefsTools::SetChunkServer() { stub.SetChunkServer(&cntl, &request, &response, nullptr); - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " << response.statuscode() - << ", error content:" - << cntl.ErrorText(); + << ", error content:" << cntl.ErrorText(); return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " - << "response success, " - << response.DebugString(); + << "response success, " << response.DebugString(); } return 0; } @@ -1327,30 +1252,24 @@ int CurvefsTools::SetLogicalPool() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "SetLogicalPool, send request: " - << request.DebugString(); + LOG(INFO) << "SetLogicalPool, send request: " << request.DebugString(); stub.SetLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "SetLogicalPool, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText(); + LOG(ERROR) << "SetLogicalPool, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText(); return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "SetLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received SetLogicalPool Rpc " - << "response success, " - << response.DebugString(); + << "response success, " << response.DebugString(); } return 0; } @@ -1359,9 +1278,7 @@ int CurvefsTools::SetLogicalPool() { } // namespace mds } // namespace curve - - -int main(int argc, char **argv) { +int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/tools/snaptool/queryclone.py b/tools/snaptool/queryclone.py index a80d746f7a..cde76bc130 100644 --- a/tools/snaptool/queryclone.py +++ b/tools/snaptool/queryclone.py @@ -5,18 +5,21 @@ import common import time -status = ['done', 'cloning', 'recovering', 'cleaning', 'errorCleaning', 'error', 'retrying', 'metaInstalled'] +status = ['done', 'cloning', 'recovering', 'cleaning', + 'errorCleaning', 'error', 'retrying', 'metaInstalled'] filetype = ['file', 'snapshot'] clonestep = ['createCloneFile', 'createCloneMeta', 'createCloneChunk', 'completeCloneMeta', - 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] + 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] tasktype = ["clone", "recover"] islazy = ["notlazy", "lazy"] + def __get_status(args): if args.status: return status.index(args.status) return None + def __get_type(args): if args.clone: return tasktype.index("clone") @@ -24,12 +27,14 @@ def __get_type(args): return tasktype.index("recover") return None + def query_clone_recover(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) if totalCount == 0: print "no record found" return - # 提高打印可读性 + # Improving Print Readability for record in records: code = record['TaskStatus'] record['TaskStatus'] = status[code] @@ -42,15 +47,18 @@ def query_clone_recover(args): code = record['IsLazy'] record['IsLazy'] = islazy[code] time_temp = record['Time'] - record['Time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) + record['Time'] = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) notes = {} heads = ['UUID', 'User', 'TaskType', 'Src', 'File', - 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] + 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] common.printTable(heads, records, notes) + def clone_recover_status(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, None, __get_type(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, None, __get_type(args)) if totalCount == 0: print "no record found" return @@ -64,17 +72,17 @@ def clone_recover_status(args): clone_statistics[status_name].append(record['UUID']) else: clone_statistics[status_name] = [record['UUID']] - else : + else: if recover_statistics.has_key(status_name): recover_statistics[status_name].append(record['UUID']) else: recover_statistics[status_name] = [record['UUID']] if clone_statistics: print "clone status:" - for k,v in clone_statistics.items(): + for k, v in clone_statistics.items(): print("%s : %d" % (k, len(v))) if recover_statistics: print "recover status:" - for k,v in recover_statistics.items(): - print("%s : %d" % (k, len(v))) \ No newline at end of file + for k, v in recover_statistics.items(): + print("%s : %d" % (k, len(v))) From 12fbff384ddce9db8a73b01a39adef1e45ac4d27 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 13 Oct 2023 22:56:07 +0800 Subject: [PATCH 2/8] style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. --- src/chunkserver/chunkserver.cpp | 12 ++++-------- src/chunkserver/clone_core.cpp | 10 +++++----- src/chunkserver/conf_epoch_file.h | 12 ++++++------ src/common/bitmap.h | 4 ++-- src/tools/chunkserver_client.h | 24 +++++++++++------------ test/integration/cluster_common/cluster.h | 2 +- 6 files changed, 30 insertions(+), 34 deletions(-) diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 18205a05d1..101de1ae0a 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -86,8 +86,7 @@ int ChunkServer::Run(int argc, char** argv) { RegisterCurveSegmentLogStorageOrDie(); - // ==========================Load Configuration - // Items===============================// + // ============Load Configuration Items============ // LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); @@ -107,8 +106,7 @@ int ChunkServer::Run(int argc, char** argv) { conf.ExposeMetric("chunkserver_config"); curve::common::ExposeCurveVersion(); - // ============================nitialize each - // module==========================// + // ============Initialize each module============ // LOG(INFO) << "Initializing ChunkServer modules"; // Prioritize initializing the metric collection module @@ -418,8 +416,7 @@ int ChunkServer::Run(int argc, char** argv) { } } - // =======================Start each - // module==================================// + // ==========Start each module==========// LOG(INFO) << "ChunkServer starts."; /** * Placing module startup after RPC service startup is mainly to address @@ -436,8 +433,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, !chunkfilePool->StartCleaning()) << "Failed to start file pool clean worker."; - // =======================Wait for the process to - // exit==================================// + // ==========Wait for the process to exit========== // while (!brpc::IsAskedToQuit()) { bthread_usleep(1000000L); } diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index af05a01646..99eb260a95 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -223,11 +223,11 @@ int CloneCore::HandleReadRequest(std::shared_ptr readRequest, CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); /* - *Chunk exists: Check and analyze Bitmap to determine if it can be read - *locally Chunk does not exist: if it contains clone information, it will be - *read from clonesource, otherwise an error will be returned Because the - *upper level ReadChunkRequest::OnApply has already processed NoExist And - *the situation where cloneinfo does not exist + * Chunk exists: Check and analyze Bitmap to determine if it can be read + * locally Chunk does not exist: if it contains clone information, it will be + * read from clonesource, otherwise an error will be returned Because the + * upper level ReadChunkRequest::OnApply has already processed NoExist And + * the situation where cloneinfo does not exist */ switch (errorCode) { case CSErrorCode::Success: diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 4d2513fc2b..979dd90032 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -59,12 +59,12 @@ class ConfEpochFile { /** * Serialize configuration version information and save it to a snapshot - *file. The format is as follows: The 'head' indicates the length and is in - *binary format. The rest is in text format for easy viewing when necessary. - *'sync' ensures data persistence. | head | - *Configuration version information | | 8 bytes size_t | uint32_t | - *Variable length text | | length | crc32 | logic pool id - *| copyset id | epoch| The persistence above is separated by ':' + * file. The format is as follows: The 'head' indicates the length and is in + * binary format. The rest is in text format for easy viewing when necessary. + * 'sync' ensures data persistence. | head | + * Configuration version information | | 8 bytes size_t | uint32_t | + * Variable length text | | length | crc32 | logic pool id + * | copyset id | epoch| The persistence above is separated by ':' * @param path: File path * @param logicPoolID: Logical Pool ID * @param copysetID: Copy group ID diff --git a/src/common/bitmap.h b/src/common/bitmap.h index f4b6f76ce7..34cf72edbd 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -57,8 +57,8 @@ class Bitmap { explicit Bitmap(uint32_t bits); /** * Constructor when initializing from an existing snapshot file - *The constructor will create a new bitmap internally, and then use the - *bitmap memcpy in the parameters + * The constructor will create a new bitmap internally, and then use the + * bitmap memcpy in the parameters * @param bits: Bitmap bits * @param bitmap: An externally provided bitmap for initialization */ diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 400755cb30..6c6e006e31 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -80,22 +80,22 @@ class ChunkServerClient { virtual bool CheckChunkServerOnline(); /** - * @brief calls the GetCopysetStatus interface of chunkserver - * @param request Query the request for the copyset - * @param response The response returned contains detailed information about - * the replication group, which is valid when the return value is 0 - * @return returns 0 for success, -1 for failure - */ + * @brief calls the GetCopysetStatus interface of chunkserver + * @param request Query the request for the copyset + * @param response The response returned contains detailed information + * about the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief Get the hash value of chunks from chunkserver - * @param chunk The chunk to query - * @param[out] The hash value chunkHash chunk, valid when the return value - * is 0 - * @return returns 0 for success, -1 for failure - */ + * @brief Get the hash value of chunks from chunkserver + * @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); private: diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index 71777d5241..f663594f4a 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -120,7 +120,7 @@ class CurveCluster { /** * StartSingleMDS starts an mds * If need chunkservers with different IPs, please set the ipPort to - 192.168.200.1:XXXX + * 192.168.200.1:XXXX * * @param[in] id mdsId * @param[in] ipPort specifies the ipPort of the mds From 4fe0ace2d5f115ab4fa819748d11ef55b9fb3576 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 13 Oct 2023 19:29:03 +0800 Subject: [PATCH 3/8] chore: prepare for rebase --- WORKSPACE | 4 +- build.sh | 10 +- conf/chunkserver.conf | 144 +- conf/chunkserver.conf.example | 145 +- conf/client.conf | 122 +- conf/cs_client.conf | 116 +- conf/mds.conf | 170 +- conf/py_client.conf | 112 +- conf/snap_client.conf | 116 +- conf/snapshot_clone_server.conf | 80 +- conf/tools.conf | 10 +- curve-ansible/client.ini | 2 +- .../wait_copysets_status_healthy.yml | 2 +- curve-ansible/group_vars/mds.yml | 2 +- .../roles/generate_config/defaults/main.yml | 20 +- .../templates/chunkserver.conf.j2 | 138 +- .../generate_config/templates/client.conf.j2 | 116 +- .../generate_config/templates/mds.conf.j2 | 166 +- .../templates/nebd-client.conf.j2 | 22 +- .../templates/nebd-server.conf.j2 | 10 +- .../templates/snapshot_clone_server.conf.j2 | 80 +- .../generate_config/templates/tools.conf.j2 | 10 +- .../install_package/files/disk_uuid_repair.py | 103 +- .../templates/chunkserver_ctl.sh.j2 | 24 +- .../templates/chunkserver_deploy.sh.j2 | 32 +- .../templates/etcd-daemon.sh.j2 | 44 +- .../templates/mds-daemon.sh.j2 | 52 +- .../install_package/templates/nebd-daemon.j2 | 8 +- .../templates/snapshot-daemon.sh.j2 | 52 +- .../roles/install_package/vars/main.yml | 2 +- .../roles/restart_service/defaults/main.yml | 2 +- .../tasks/include/restart_mds.yml | 2 +- .../tasks/include/restart_snapshotclone.yml | 2 +- .../roles/restart_service/tasks/main.yml | 2 +- .../roles/restart_service/vars/main.yml | 2 +- .../vars/main.yml | 2 +- .../tasks/include/start_chunkserver.yml | 2 +- .../roles/start_service/tasks/main.yml | 2 +- .../roles/stop_service/tasks/main.yml | 2 +- curve-ansible/rolling_update_curve.yml | 14 +- curve-ansible/server.ini | 14 +- curvefs/conf/curvebs_client.conf | 120 +- curvefs/monitor/grafana-report.py | 48 +- .../grafana/provisioning/dashboards/mds.json | 8 +- .../metaserverclient/metaserver_client.cpp | 81 +- .../src/metaserver/copyset/conf_epoch_file.h | 36 +- curvefs/src/metaserver/inflight_throttle.h | 14 +- .../test/mds/schedule/coordinator_test.cpp | 112 +- .../test/mds/schedule/operatorStep_test.cpp | 72 +- .../mds/schedule/recoverScheduler_test.cpp | 36 +- .../mds/schedule/scheduleMetrics_test.cpp | 40 +- .../scheduleService/scheduleService_test.cpp | 15 +- curvefs/test/volume/bitmap_allocator_test.cpp | 7 +- curvefs_python/cbd_client.h | 12 +- curvefs_python/curve_type.h | 109 +- curvefs_python/curvefs_tool.py | 85 +- curvefs_python/libcurvefs.h | 21 +- curvefs_python/test.py | 7 +- curvesnapshot_python/libcurveSnapshot.cpp | 190 +- curvesnapshot_python/libcurveSnapshot.h | 246 +- .../local/chunkserver/conf/chunkserver.conf.0 | 4 +- .../local/chunkserver/conf/chunkserver.conf.1 | 4 +- .../local/chunkserver/conf/chunkserver.conf.2 | 4 +- include/chunkserver/chunkserver_common.h | 74 +- include/client/libcurve.h | 408 +- include/etcdclient/etcdclient.h | 133 +- .../nebd-package/etc/nebd/nebd-client.conf | 22 +- .../nebd-package/etc/nebd/nebd-server.conf | 10 +- monitor/grafana-report.py | 48 +- monitor/grafana/dashboards/chunkserver.json | 104 +- monitor/grafana/dashboards/client.json | 34 +- monitor/grafana/dashboards/etcd.json | 2 +- monitor/grafana/dashboards/mds.json | 80 +- monitor/grafana/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- .../provisioning/dashboards/chunkserver.json | 104 +- .../provisioning/dashboards/client.json | 34 +- .../grafana/provisioning/dashboards/etcd.json | 2 +- .../grafana/provisioning/dashboards/mds.json | 80 +- .../provisioning/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- nebd/etc/nebd/nebd-client.conf | 22 +- nebd/etc/nebd/nebd-server.conf | 10 +- nebd/nebd-package/usr/bin/nebd-daemon | 8 +- nebd/src/common/configuration.cpp | 68 +- nebd/src/common/configuration.h | 127 +- nebd/src/common/crc32.h | 32 +- nebd/src/common/file_lock.h | 15 +- nebd/src/common/name_lock.h | 50 +- nebd/src/common/stringstatus.h | 34 +- nebd/src/common/timeutility.h | 12 +- nebd/src/part1/async_request_closure.cpp | 22 +- nebd/src/part1/async_request_closure.h | 81 +- nebd/src/part1/heartbeat_manager.h | 30 +- nebd/src/part1/libnebd.cpp | 36 +- nebd/src/part1/libnebd.h | 150 +- nebd/src/part1/libnebd_file.h | 88 +- nebd/src/part1/nebd_client.cpp | 141 +- nebd/src/part1/nebd_client.h | 122 +- nebd/src/part1/nebd_common.h | 34 +- nebd/src/part1/nebd_metacache.h | 33 +- nebd/src/part2/define.h | 48 +- nebd/src/part2/file_entity.cpp | 87 +- nebd/src/part2/file_entity.h | 162 +- nebd/src/part2/file_manager.cpp | 12 +- nebd/src/part2/file_manager.h | 134 +- nebd/src/part2/heartbeat_manager.cpp | 23 +- nebd/src/part2/heartbeat_manager.h | 77 +- nebd/src/part2/main.cpp | 11 +- nebd/src/part2/metafile_manager.cpp | 70 +- nebd/src/part2/metafile_manager.h | 50 +- nebd/src/part2/nebd_server.cpp | 47 +- nebd/src/part2/nebd_server.h | 60 +- nebd/src/part2/request_executor.h | 16 +- nebd/src/part2/request_executor_curve.h | 70 +- nebd/src/part2/util.h | 6 +- nebd/test/common/configuration_test.cpp | 27 +- nebd/test/common/test_name_lock.cpp | 36 +- .../test/part1/heartbeat_manager_unittest.cpp | 32 +- nebd/test/part1/nebd_client_unittest.cpp | 144 +- nebd/test/part2/file_manager_unittest.cpp | 239 +- .../test/part2/heartbeat_manager_unittest.cpp | 57 +- nebd/test/part2/heartbeat_service_test.cpp | 20 +- nebd/test/part2/metafile_manager_test.cpp | 101 +- nebd/test/part2/test_nebd_server.cpp | 34 +- .../part2/test_request_executor_curve.cpp | 141 +- proto/chunk.proto | 70 +- proto/cli.proto | 6 +- proto/cli2.proto | 16 +- proto/common.proto | 12 +- proto/copyset.proto | 42 +- proto/heartbeat.proto | 72 +- proto/nameserver2.proto | 114 +- proto/schedule.proto | 2 +- proto/topology.proto | 2 +- robot/Resources/keywords/deploy.py | 501 +- robot/Resources/keywords/fault_inject.py | 1518 +++--- robot/Resources/keywords/snapshot_operate.py | 76 +- robot/curve_choas.txt | 10 +- robot/curve_robot.txt | 38 +- src/chunkserver/chunk_closure.cpp | 28 +- src/chunkserver/chunk_closure.h | 55 +- src/chunkserver/chunk_service.cpp | 303 +- src/chunkserver/chunk_service.h | 103 +- src/chunkserver/chunk_service_closure.cpp | 103 +- src/chunkserver/chunk_service_closure.h | 65 +- src/chunkserver/chunkserver.cpp | 2033 ++++---- src/chunkserver/chunkserver.h | 97 +- src/chunkserver/chunkserver_helper.cpp | 20 +- src/chunkserver/chunkserver_main.cpp | 2 +- src/chunkserver/chunkserver_metrics.cpp | 121 +- src/chunkserver/chunkserver_metrics.h | 355 +- src/chunkserver/cli.h | 54 +- src/chunkserver/cli2.cpp | 132 +- src/chunkserver/cli2.h | 83 +- src/chunkserver/clone_copyer.h | 85 +- src/chunkserver/clone_core.cpp | 319 +- src/chunkserver/clone_core.h | 155 +- src/chunkserver/clone_manager.cpp | 24 +- src/chunkserver/clone_manager.h | 59 +- src/chunkserver/clone_task.h | 28 +- src/chunkserver/conf_epoch_file.cpp | 38 +- src/chunkserver/conf_epoch_file.h | 63 +- src/chunkserver/config_info.h | 96 +- src/chunkserver/copyset_node.cpp | 503 +- src/chunkserver/copyset_node.h | 337 +- src/chunkserver/copyset_node_manager.cpp | 195 +- src/chunkserver/copyset_node_manager.h | 200 +- src/chunkserver/copyset_service.cpp | 92 +- src/chunkserver/copyset_service.h | 39 +- src/chunkserver/heartbeat.cpp | 1194 ++--- src/chunkserver/heartbeat.h | 110 +- src/chunkserver/heartbeat_helper.cpp | 68 +- src/chunkserver/heartbeat_helper.h | 71 +- src/chunkserver/inflight_throttle.h | 17 +- src/chunkserver/op_request.cpp | 494 +- src/chunkserver/op_request.h | 341 +- src/chunkserver/passive_getfn.h | 112 +- .../raftsnapshot/curve_file_adaptor.h | 7 +- .../raftsnapshot/curve_file_service.cpp | 75 +- .../raftsnapshot/curve_filesystem_adaptor.cpp | 86 +- .../raftsnapshot/curve_filesystem_adaptor.h | 145 +- .../curve_snapshot_attachment.cpp | 21 +- .../raftsnapshot/curve_snapshot_attachment.h | 57 +- .../raftsnapshot/curve_snapshot_copier.cpp | 126 +- .../raftsnapshot/curve_snapshot_copier.h | 8 +- .../raftsnapshot/curve_snapshot_file_reader.h | 41 +- src/chunkserver/raftsnapshot/define.h | 9 +- src/chunkserver/register.cpp | 28 +- src/chunkserver/register.h | 22 +- src/chunkserver/trash.cpp | 119 +- src/chunkserver/trash.h | 328 +- src/client/chunk_closure.cpp | 488 +- src/client/chunk_closure.h | 150 +- src/client/client_common.h | 126 +- src/client/client_metric.h | 181 +- src/client/config_info.h | 204 +- src/client/copyset_client.cpp | 198 +- src/client/copyset_client.h | 246 +- src/client/file_instance.cpp | 82 +- src/client/file_instance.h | 122 +- src/client/inflight_controller.h | 57 +- src/client/io_condition_varaiable.h | 35 +- src/client/io_tracker.cpp | 119 +- src/client/io_tracker.h | 253 +- src/client/iomanager.h | 37 +- src/client/iomanager4chunk.h | 168 +- src/client/iomanager4file.cpp | 44 +- src/client/iomanager4file.h | 187 +- src/client/lease_executor.cpp | 15 +- src/client/lease_executor.h | 138 +- src/client/libcurve_file.cpp | 310 +- src/client/libcurve_file.h | 257 +- src/client/libcurve_snapshot.h | 547 ++- src/client/mds_client.cpp | 553 ++- src/client/mds_client.h | 584 +-- src/client/mds_client_base.h | 591 ++- src/client/metacache.cpp | 105 +- src/client/metacache.h | 238 +- src/client/metacache_struct.h | 119 +- src/client/request_closure.h | 81 +- src/client/request_context.h | 53 +- src/client/request_scheduler.cpp | 58 +- src/client/request_scheduler.h | 122 +- src/client/request_sender.h | 210 +- src/client/request_sender_manager.cpp | 9 +- src/client/request_sender_manager.h | 25 +- src/client/service_helper.cpp | 80 +- src/client/service_helper.h | 81 +- src/client/splitor.h | 140 +- src/client/unstable_helper.cpp | 8 +- src/client/unstable_helper.h | 39 +- src/common/authenticator.h | 29 +- src/common/bitmap.cpp | 139 +- src/common/bitmap.h | 171 +- src/common/channel_pool.h | 22 +- .../concurrent/bounded_blocking_queue.h | 38 +- src/common/concurrent/concurrent.h | 51 +- src/common/concurrent/count_down_event.h | 50 +- src/common/concurrent/task_thread_pool.h | 67 +- src/common/configuration.cpp | 125 +- src/common/configuration.h | 175 +- src/common/crc32.h | 32 +- src/common/curve_define.h | 39 +- src/common/define.h | 69 +- src/common/fs_util.h | 10 +- src/common/interruptible_sleeper.h | 22 +- src/common/location_operator.cpp | 32 +- src/common/location_operator.h | 46 +- src/common/net_common.h | 20 +- src/common/s3_adapter.cpp | 1384 +++--- src/common/s3_adapter.h | 201 +- .../snapshotclone/snapshotclone_define.cpp | 12 +- .../snapshotclone/snapshotclone_define.h | 74 +- src/common/stringstatus.h | 35 +- src/common/timeutility.h | 22 +- src/common/uuid.h | 39 +- src/common/wait_interval.h | 19 +- src/fs/ext4_filesystem_impl.cpp | 117 +- src/fs/local_filesystem.h | 209 +- src/kvstorageclient/etcd_client.h | 100 +- src/leader_election/leader_election.cpp | 48 +- src/leader_election/leader_election.h | 42 +- src/mds/nameserver2/clean_core.cpp | 98 +- src/mds/nameserver2/clean_core.h | 39 +- src/mds/nameserver2/clean_manager.h | 33 +- src/mds/nameserver2/clean_task.h | 90 +- src/mds/nameserver2/clean_task_manager.cpp | 38 +- src/mds/nameserver2/clean_task_manager.h | 54 +- src/snapshotcloneserver/clone/clone_core.cpp | 674 ++- src/snapshotcloneserver/clone/clone_core.h | 560 +-- .../clone/clone_service_manager.cpp | 341 +- .../clone/clone_service_manager.h | 411 +- src/snapshotcloneserver/clone/clone_task.h | 127 +- .../clone/clone_task_manager.cpp | 97 +- .../clone/clone_task_manager.h | 115 +- src/snapshotcloneserver/common/config.h | 49 +- .../common/curvefs_client.h | 595 +-- .../common/snapshotclone_info.h | 413 +- .../common/snapshotclone_meta_store.h | 122 +- .../common/snapshotclone_meta_store_etcd.h | 59 +- .../common/snapshotclone_metric.h | 95 +- src/snapshotcloneserver/common/task.h | 32 +- src/snapshotcloneserver/common/task_info.h | 88 +- src/snapshotcloneserver/common/thread_pool.h | 36 +- src/snapshotcloneserver/main.cpp | 19 +- .../snapshot/snapshot_core.cpp | 450 +- .../snapshot/snapshot_core.h | 391 +- .../snapshot/snapshot_data_store.cpp | 42 +- .../snapshot/snapshot_data_store.h | 282 +- .../snapshot/snapshot_data_store_s3.h | 81 +- .../snapshot/snapshot_service_manager.cpp | 192 +- .../snapshot/snapshot_service_manager.h | 264 +- .../snapshot/snapshot_task.cpp | 95 +- .../snapshot/snapshot_task.h | 208 +- .../snapshot/snapshot_task_manager.cpp | 37 +- .../snapshot/snapshot_task_manager.h | 96 +- .../snapshotclone_server.cpp | 726 +-- .../snapshotclone_server.h | 258 +- .../snapshotclone_service.cpp | 486 +- .../snapshotclone_service.h | 73 +- src/tools/chunkserver_client.cpp | 30 +- src/tools/chunkserver_client.h | 56 +- src/tools/chunkserver_tool_factory.h | 15 +- src/tools/common.cpp | 6 +- src/tools/common.h | 9 +- src/tools/consistency_check.cpp | 78 +- src/tools/consistency_check.h | 131 +- src/tools/copyset_check.cpp | 129 +- src/tools/copyset_check.h | 99 +- src/tools/copyset_check_core.cpp | 325 +- src/tools/copyset_check_core.h | 486 +- src/tools/curve_cli.cpp | 154 +- src/tools/curve_cli.h | 88 +- src/tools/curve_format_main.cpp | 112 +- src/tools/curve_meta_tool.cpp | 37 +- src/tools/curve_meta_tool.h | 44 +- src/tools/curve_tool_define.h | 21 +- src/tools/curve_tool_factory.h | 32 +- src/tools/curve_tool_main.cpp | 53 +- src/tools/etcd_client.h | 29 +- src/tools/mds_client.cpp | 312 +- src/tools/mds_client.h | 1057 ++-- src/tools/metric_client.cpp | 25 +- src/tools/metric_client.h | 51 +- src/tools/metric_name.h | 250 +- src/tools/namespace_tool.cpp | 143 +- src/tools/namespace_tool.h | 71 +- src/tools/namespace_tool_core.cpp | 59 +- src/tools/namespace_tool_core.h | 158 +- src/tools/raft_log_tool.cpp | 90 +- src/tools/raft_log_tool.h | 86 +- src/tools/schedule_tool.cpp | 54 +- src/tools/schedule_tool.h | 30 +- src/tools/snapshot_check.h | 42 +- src/tools/snapshot_clone_client.cpp | 41 +- src/tools/snapshot_clone_client.h | 61 +- src/tools/status_tool.cpp | 2388 +++++---- src/tools/status_tool.h | 145 +- src/tools/version_tool.cpp | 20 +- src/tools/version_tool.h | 107 +- test/chunkserver/braft_cli_service2_test.cpp | 195 +- test/chunkserver/braft_cli_service_test.cpp | 80 +- test/chunkserver/chunk_service_test.cpp | 78 +- test/chunkserver/chunk_service_test2.cpp | 145 +- test/chunkserver/chunkserver_helper_test.cpp | 10 +- test/chunkserver/chunkserver_service_test.cpp | 30 +- .../chunkserver/chunkserver_snapshot_test.cpp | 936 ++-- test/chunkserver/chunkserver_test_util.cpp | 208 +- test/chunkserver/chunkserver_test_util.h | 192 +- test/chunkserver/cli2_test.cpp | 352 +- test/chunkserver/cli_test.cpp | 238 +- test/chunkserver/client.cpp | 59 +- test/chunkserver/clone/clone_copyer_test.cpp | 142 +- test/chunkserver/clone/clone_core_test.cpp | 389 +- test/chunkserver/clone/clone_manager_test.cpp | 81 +- test/chunkserver/clone/op_request_test.cpp | 743 ++- test/chunkserver/copyset_epoch_test.cpp | 101 +- .../chunkserver/copyset_node_manager_test.cpp | 122 +- test/chunkserver/copyset_node_test.cpp | 521 +- test/chunkserver/copyset_service_test.cpp | 114 +- .../datastore/datastore_mock_unittest.cpp | 2865 ++++------- .../datastore/file_helper_unittest.cpp | 35 +- .../datastore/filepool_mock_unittest.cpp | 1719 +++---- .../datastore/filepool_unittest.cpp | 25 +- test/chunkserver/fake_datastore.h | 73 +- test/chunkserver/heartbeat_helper_test.cpp | 75 +- test/chunkserver/heartbeat_test.cpp | 104 +- test/chunkserver/heartbeat_test_common.cpp | 127 +- test/chunkserver/heartbeat_test_common.h | 149 +- test/chunkserver/heartbeat_test_main.cpp | 27 +- test/chunkserver/inflight_throttle_test.cpp | 9 +- test/chunkserver/metrics_test.cpp | 175 +- ...curve_filesystem_adaptor_mock_unittest.cpp | 86 +- .../curve_filesystem_adaptor_unittest.cpp | 62 +- .../curve_snapshot_attachment_test.cpp | 40 +- ...raftsnapshot_chunkfilepool_integration.cpp | 295 +- test/chunkserver/server.cpp | 29 +- test/chunkserver/trash_test.cpp | 98 +- test/client/client_common_unittest.cpp | 19 +- .../client_mdsclient_metacache_unittest.cpp | 587 ++- test/client/client_metric_test.cpp | 69 +- test/client/client_session_unittest.cpp | 65 +- test/client/client_unstable_helper_test.cpp | 62 +- test/client/client_userinfo_unittest.cpp | 228 +- test/client/copyset_client_test.cpp | 2287 ++++----- test/client/fake/client_workflow_test.cpp | 73 +- .../client/fake/client_workflow_test4snap.cpp | 52 +- test/client/fake/fakeChunkserver.h | 174 +- test/client/fake/fakeMDS.h | 649 ++- test/client/inflight_rpc_control_test.cpp | 30 +- test/client/iotracker_splitor_unittest.cpp | 268 +- test/client/lease_executor_test.cpp | 11 +- test/client/libcbd_libcurve_test.cpp | 45 +- test/client/libcurve_interface_unittest.cpp | 197 +- test/client/mds_failover_test.cpp | 145 +- test/client/mock/mock_chunkservice.h | 163 +- test/client/request_scheduler_test.cpp | 153 +- test/client/request_sender_test.cpp | 17 +- test/common/bitmap_test.cpp | 30 +- test/common/channel_pool_test.cpp | 12 +- test/common/configuration_test.cpp | 40 +- test/common/count_down_event_test.cpp | 35 +- test/common/lru_cache_test.cpp | 66 +- test/common/task_thread_pool_test.cpp | 50 +- test/common/test_name_lock.cpp | 36 +- test/failpoint/failpoint_test.cpp | 51 +- test/fs/ext4_filesystem_test.cpp | 238 +- .../chunkserver/chunkserver_basic_test.cpp | 276 +- .../chunkserver/chunkserver_clone_recover.cpp | 391 +- .../chunkserver_concurrent_test.cpp | 868 ++-- .../datastore/datastore_basic_test.cpp | 108 +- .../datastore/datastore_clone_case_test.cpp | 194 +- .../datastore/datastore_concurrency_test.cpp | 17 +- .../datastore/datastore_exception_test.cpp | 697 +-- .../datastore/datastore_integration_base.h | 32 +- .../datastore/datastore_integration_test.cpp | 252 +- .../datastore/datastore_restart_test.cpp | 397 +- .../datastore_snapshot_case_test.cpp | 211 +- .../datastore/datastore_stress_test.cpp | 24 +- .../client/chunkserver_exception_test.cpp | 333 +- .../client/common/file_operation.cpp | 18 +- .../client/common/file_operation.h | 13 +- .../integration/client/mds_exception_test.cpp | 684 +-- .../unstable_chunkserver_exception_test.cpp | 203 +- test/integration/cluster_common/cluster.cpp | 171 +- test/integration/cluster_common/cluster.h | 369 +- .../cluster_common/cluster_basic_test.cpp | 190 +- .../integration/cluster_common/mds.basic.conf | 158 +- test/integration/common/chunkservice_op.cpp | 152 +- test/integration/common/chunkservice_op.h | 223 +- test/integration/common/config_generator.h | 2 +- test/integration/common/peer_cluster.cpp | 432 +- test/integration/common/peer_cluster.h | 391 +- test/integration/heartbeat/common.cpp | 50 +- test/integration/heartbeat/common.h | 307 +- .../heartbeat/heartbeat_basic_test.cpp | 4360 ++++++++--------- .../heartbeat/heartbeat_exception_test.cpp | 111 +- .../raft/raft_config_change_test.cpp | 2308 +++------ .../raft/raft_log_replication_test.cpp | 1202 ++--- test/integration/raft/raft_snapshot_test.cpp | 565 +-- test/integration/raft/raft_vote_test.cpp | 1394 ++---- .../fake_curvefs_client.cpp | 213 +- .../snapshotcloneserver/fake_curvefs_client.h | 186 +- .../snapshotcloneserver_common_test.cpp | 517 +- .../snapshotcloneserver_concurrent_test.cpp | 183 +- .../snapshotcloneserver_exception_test.cpp | 999 ++-- .../snapshotcloneserver_module.cpp | 48 +- .../snapshotcloneserver_recover_test.cpp | 758 ++- .../snapshotcloneserver_test.cpp | 34 +- test/kvstorageclient/etcdclient_test.cpp | 123 +- .../chunkserver_healthy_checker_test.cpp | 85 +- test/mds/heartbeat/heartbeat_manager_test.cpp | 90 +- .../alloc_statistic_helper_test.cpp | 73 +- .../allocstatistic/alloc_statistic_test.cpp | 22 +- test/mds/nameserver2/clean_core_test.cpp | 120 +- test/mds/nameserver2/curvefs_test.cpp | 2879 ++++++----- test/mds/nameserver2/file_lock_test.cpp | 64 +- test/mds/nameserver2/file_record_test.cpp | 34 +- .../nameserver2/namespace_service_test.cpp | 419 +- test/mds/schedule/coordinator_test.cpp | 314 +- test/mds/schedule/leaderScheduler_test.cpp | 261 +- test/mds/schedule/operatorStep_test.cpp | 106 +- .../mds/schedule/rapidLeaderSheduler_test.cpp | 71 +- test/mds/schedule/recoverScheduler_test.cpp | 129 +- test/mds/schedule/scheduleMetrics_test.cpp | 284 +- .../scheduleService/scheduleService_test.cpp | 37 +- .../schedule/schedulerPOC/scheduler_poc.cpp | 420 +- test/mds/schedule/scheduler_helper_test.cpp | 248 +- test/mds/server/mds_test.cpp | 63 +- test/mds/topology/test_topology.cpp | 1323 ++--- .../test_topology_chunk_allocator.cpp | 367 +- test/mds/topology/test_topology_metric.cpp | 265 +- test/resources.list | 48 +- test/snapshotcloneserver/test_clone_core.cpp | 802 ++- .../test_curvefs_client.cpp | 66 +- .../test_snapshot_core.cpp | 1519 ++---- .../test_snapshot_service_manager.cpp | 593 +-- test/tools/chunkserver_client_test.cpp | 51 +- test/tools/config/data_check.conf | 102 +- test/tools/copyset_check_core_test.cpp | 489 +- test/tools/copyset_check_test.cpp | 164 +- test/tools/curve_cli_test.cpp | 249 +- test/tools/curve_meta_tool_test.cpp | 52 +- test/tools/data_consistency_check_test.cpp | 147 +- test/tools/etcd_client_test.cpp | 53 +- test/tools/mds_client_test.cpp | 872 ++-- test/tools/metric_client_test.cpp | 95 +- test/tools/namespace_tool_core_test.cpp | 198 +- test/tools/namespace_tool_test.cpp | 193 +- test/tools/raft_log_tool_test.cpp | 36 +- test/tools/segment_parser_test.cpp | 68 +- test/tools/snapshot_clone_client_test.cpp | 83 +- test/tools/status_tool_test.cpp | 510 +- test/tools/version_tool_test.cpp | 241 +- test/util/config_generator.h | 46 +- thirdparties/etcdclient/etcdclient.go | 32 +- tools/curvefsTool.cpp | 351 +- tools/snaptool/queryclone.py | 30 +- 499 files changed, 45103 insertions(+), 50977 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index a423f1c46a..ff394ed660 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -96,8 +96,8 @@ bind( actual = "@com_google_googletest//:gtest", ) -#Import the glog files. -# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog" +# Import the glog files. +# When the BUILD file in brpc relies on glog, the direct specified dependency is "@com_github_google_glog//:glog" git_repository( name = "com_github_google_glog", remote = "https://github.com/google/glog", diff --git a/build.sh b/build.sh index 9d714c28d6..f9e880d131 100644 --- a/build.sh +++ b/build.sh @@ -17,7 +17,7 @@ # dir=`pwd` -#step1 清除生成的目录和文件 +# step1 Clear generated directories and files bazel clean rm -rf curvefs_python/BUILD rm -rf curvefs_python/tmplib/ @@ -29,8 +29,8 @@ then exit fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 +# step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'` if [ -z ${tag_version} ] then @@ -38,7 +38,7 @@ then tag_version=9.9.9 fi -#获取git提交版本信息 +# Obtain git submission version information commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` if [ "$1" = "debug" ] then @@ -50,7 +50,7 @@ fi curve_version=${tag_version}+${commit_id}${debug} -#step3 执行编译 +# step3 Execute Compilation # check bazel verion, bazel vesion must = 4.2.2 bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` if [ -z ${bazel_version} ] diff --git a/conf/chunkserver.conf b/conf/chunkserver.conf index 19457b3c18..ebfcddb584 100644 --- a/conf/chunkserver.conf +++ b/conf/chunkserver.conf @@ -1,17 +1,17 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__ global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ global.subnet=127.0.0.0/24 global.enable_external_server=true global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__ global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, usually 16MB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.meta_page_size=4096 # chunk's block size, IO requests must align with it, supported value is |512| and |4096| @@ -21,40 +21,40 @@ global.meta_page_size=4096 # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.block_size=4096 -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666, 127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10 seconds mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__ -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__ -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -70,43 +70,43 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # lease read switch, default is true(open lease read) # if false, all requests will propose to raft(log read) -# 启用lease read,一般开启,否则将退化为log read形式 +# Enable lease read, usually enabled, otherwise it will revert to log read form copyset.enable_lease_read=true -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node, and the added node first acts like a learner to copy data. +# When the gap with the leader is equal to catchup_margin entries, the leader +# will attempt to commit the configuration change entry (generally, the committed entry +# will definitely be committed and applied). A small catchup_margin can +# ensure that the learner can join the replication group quickly. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__ -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency=10 # chunkserver use how many threads to use copyset complete sync. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The current peer's applied_index and leader‘s committed_index difference is less than this value +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -132,26 +132,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Whether to paste the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__ -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__ # Curve File time to live curve.curve_file_timeout_s=30 @@ -159,7 +159,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -179,27 +179,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -219,23 +219,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -249,14 +249,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index 443412215b..f7ab284dd9 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,23 +212,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -241,14 +242,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/client.conf b/conf/client.conf index bac0dc1108..22345400d5 100644 --- a/conf/client.conf +++ b/conf/client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,84 +36,84 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# ** 已废弃,不再使用,请使用 `chunkserver.slowRequestThresholdMS` ** -# ** dreprecated, use `chunkserver.slowRequestThresholdMS` instead ** -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# ** Deprecated, no longer in use, please use `chunkserver.slowRequestThresholdMS` ** +# ** Deprecated, use `chunkserver.slowRequestThresholdMS` instead ** +# When an RPC retry exceeds the maxRetryTimesBeforeConsiderSuspend count +# it is marked as a suspended IO, and the metric will trigger an alert. chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # 请求重试时间超过该阈值后,会标记为slow request @@ -122,41 +122,41 @@ chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.slowRequestThresholdMS=45000 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off, false/do not turn off global.turnOffHealthCheck=true # minimal open file limit diff --git a/conf/cs_client.conf b/conf/cs_client.conf index 09d567d8f7..5bd674e417 100644 --- a/conf/cs_client.conf +++ b/conf/cs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### mds side configuration information ################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads for the queue +# The task of execution threads is to fetch IO and then send it over the network before moving on to the next network task. +# The time taken for a task, from retrieval from the queue to sending the RPC request, is typically between 20 microseconds to 100 microseconds. 20 microseconds is the normal case when leader acquisition is not needed during the send operation. +# If leader acquisition is required during sending, the time can be around 100 microseconds. The throughput of one thread ranges from 100,000 to 500,000 operations per second. +# The performance meets the requirements. schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +#Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/mds.conf b/conf/mds.conf index cc8c661e0d..ef61689b97 100644 --- a/conf/mds.conf +++ b/conf/mds.conf @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 #__CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ mds.dummy.listen.port=6667 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ @@ -7,15 +7,15 @@ global.subnet=127.0.0.0/24 global.port=6666 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # wait dlock timeout mds.etcd.dlock.timeoutMs=10000 @@ -27,68 +27,68 @@ etcd.auth.username= etcd.auth.password= # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=10000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 mds.segment.discard.scanIntevalMs=5000 -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true # Scan scheduler switch mds.enable.scan.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec=60 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=1 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=60 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=300 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec=1800 # Scan operator timeout (seconds) mds.scheduler.scan.limitSec=180 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec=1800 # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour=0 @@ -102,104 +102,104 @@ mds.scheduler.scan.concurrent.per.pool=10 mds.scheduler.scan.concurrent.per.chunkserver=1 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=10000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=30000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files=5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16)*12) * 2621440~=1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs=5000000 -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs=500000 # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName=root mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=10 -#和mds.chunkserver.failure.tolerance设置有关,一个zone 标准配置20台节点,如果允许3台节点failover, -#那么剩余17台机器需要承载原先20台机器的空间,17/20=0.85,即使用量超过这个值即不再往这个池分配, -#具体分为来两种情况, 当不使用chunkfilepool,物理池限制使用百分比,当使用 chunkfilepool 进行chunkfilepool分配时需预留failover空间, +# It is related to the settings of mds.chunkserver.failure.tolerance. A standard configuration for a zone is 20 nodes, and if 3 nodes are allowed to fail over, +# So the remaining 17 machines need to carry the space of the original 20 machines, 17/20=0.85. Even if the usage exceeds this value, they will no longer be allocated to this pool, +# There are two specific situations: when chunkfilepool is not used, the physical pool limits the percentage of usage, and when chunkfilepool is used for chunkfilepool allocation, it is necessary to reserve failover space, mds.topology.PoolUsagePercentLimit=85 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus=false # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +#Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of deviation from the mean scatterWidth of all chunk servers. +# Setting a large percentage deviation for scatterWidth can result in some machines having scatterWidth values that are too small, affecting machine recovery time and reducing cluster reliability. +# Additionally, it can lead to some machines having excessively large scatterWidth values, causing certain chunk server's copysets to be scattered across various machines. +# Once data is written to these servers, the ones with larger scatterWidth become hotspots. Setting the percentage deviation for scatterWidth too small requires a higher level of scatterWidth +# uniformity and copyset algorithm precision, potentially resulting in suboptimal algorithm results. +# It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize=1073741824 -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength=10737418240 -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength=21990232555520 # smallest read/write unit for volume, support |512| and |4096| mds.curvefs.blockSize=4096 @@ -207,29 +207,29 @@ mds.curvefs.blockSize=4096 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr=127.0.0.1:5555 # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_proxy_addr} __CURVEADM_TEMPLATE__ # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/conf/py_client.conf b/conf/py_client.conf index cb7999c5e4..5460949092 100644 --- a/conf/py_client.conf +++ b/conf/py_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,91 +36,91 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # minimal open file limit @@ -128,22 +128,22 @@ global.fileIOSplitMaxSizeKB=64 global.minOpenFileLimit=0 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=10000 # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snap_client.conf b/conf/snap_client.conf index a643e44461..427f521663 100644 --- a/conf/snap_client.conf +++ b/conf/snap_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +###################MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +#################Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=50 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=16000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +#After the number of unstable chunkservers on the same server exceeds this value +#All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +#Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snapshot_clone_server.conf b/conf/snapshot_clone_server.conf index 1c043686cd..70a3deb864 100644 --- a/conf/snapshot_clone_server.conf +++ b/conf/snapshot_clone_server.conf @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/snap_client.conf __CURVEADM_TEMPLATE__ -# mds root 用户名 +# Mds root username mds.rootUser=root -# mds root 密码 +# Mds root password mds.rootPassword=root_password -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec=300 -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs=5000 -# 日志文件位置 +# Log file location log.dir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ # @@ -26,61 +26,61 @@ s3.config_path=./conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __ server.address=127.0.0.1:5556 # __CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ server.subnet=127.0.0.0/24 server.port=5556 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec=300 -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs=5000 -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum=256 -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs=1000 -# 转储chunk分片大小 +# Dump chunk shard size # for nos, pls set to 1048576 server.chunkSplitSize=8388608 -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs=1000 -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit=1024 -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum=64 -# mds session 时间 +# Mds session time server.mdsSessionTimeUs=5000000 -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency=16 # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum=256 -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum=256 -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum=256 -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs=1000 -# clone chunk分片大小 +# Clone chunk shard size # for nos, pls set to 65536 server.cloneChunkSplitSize=1048576 -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir=/clone -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency=64 -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency=64 -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs=500 -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs=3600000 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times=3 # wait dlock timeout etcd.dlock.timeoutMs=10000 @@ -93,20 +93,20 @@ etcd.auth.password= # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix=snapshotcloneserverleaderlock -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms=0 # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port=8081 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/conf/tools.conf b/conf/tools.conf index 545297d92c..42be38e27c 100644 --- a/conf/tools.conf +++ b/conf/tools.conf @@ -1,16 +1,16 @@ -# mds地址 +# Mds address mdsAddr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ # mds dummy port mdsDummyPort=6700 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_port} __CURVEADM_TEMPLATE__ -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout=500 -# rpc重试次数 +# RPC retry count rpcRetryTimes=5 # the rpc concurrency to chunkserver rpcConcurrentNum=10 -# etcd地址 +# ETCD address etcdAddr=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# snapshot clone server 地址 +# Snapshot clone server address snapshotCloneAddr= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_addr} __CURVEADM_TEMPLATE__ # snapshot clone server dummy port snapshotCloneDummyPort= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/curve-ansible/client.ini b/curve-ansible/client.ini index 8eacc6270c..ecf308581d 100644 --- a/curve-ansible/client.ini +++ b/curve-ansible/client.ini @@ -1,7 +1,7 @@ [client] localhost ansible_ssh_host=127.0.0.1 -# 仅用于生成配置中的mds地址 +# Only used to generate mds addresses in the configuration [mds] localhost ansible_ssh_host=127.0.0.1 diff --git a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml index 7121b28042..8200229894 100644 --- a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml +++ b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 等待copyset健康,每个一段时间检查一次,一共检查若干次,成功则break,如果一直不健康则报错 +# Wait for the copyset to be healthy, check once every period of time, a total of several times. If successful, it will break, and if it remains unhealthy, an error will be reported - name: check copysets status until healthy shell: curve_ops_tool copysets-status --confPath={{ curve_ops_tool_config }} | grep "{{ defined_copysets_status }}" register: result diff --git a/curve-ansible/group_vars/mds.yml b/curve-ansible/group_vars/mds.yml index f575cb79d5..689b1414eb 100644 --- a/curve-ansible/group_vars/mds.yml +++ b/curve-ansible/group_vars/mds.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 集群拓扑信息 +# Cluster topology information cluster_map: servers: - name: server1 diff --git a/curve-ansible/roles/generate_config/defaults/main.yml b/curve-ansible/roles/generate_config/defaults/main.yml index 4d7dfe5514..36d14e676b 100644 --- a/curve-ansible/roles/generate_config/defaults/main.yml +++ b/curve-ansible/roles/generate_config/defaults/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 通用配置 +# General configuration curve_root_username: root curve_root_password: root_password curve_file_timeout_s: 30 @@ -25,7 +25,7 @@ min_file_length: 10737418240 max_file_length: 21990232555520 file_expired_time_us: 5000000 -# mds配置默认值 +# Mds configuration default values mds_etcd_dailtimeout_ms: 5000 mds_etcd_operation_timeout_ms: 5000 mds_etcd_retry_times: 3 @@ -94,7 +94,7 @@ throttle_bps_min_in_MB: 120 throttle_bps_max_in_MB: 260 throttle_bps_per_GB_in_MB: 0.3 -# chunkserver配置默认值 +# Chunkserver Configuration Default Values chunkserver_enable_external_server: true chunkserver_meta_page_size: 4096 chunkserver_location_limit: 3000 @@ -165,7 +165,7 @@ chunkserver_trash_expire_after_sec: 300 chunkserver_trash_scan_period_sec: 120 chunkserver_common_log_dir: ./runlog/ -# 快照克隆配置默认值 +# Default values for snapshot clone configuration snap_client_config_path: /etc/curve/snap_client.conf snap_client_method_retry_time_sec: 120 snap_client_method_retry_interval_ms: 5000 @@ -201,7 +201,7 @@ snap_leader_session_inter_sec: 5 snap_leader_election_timeout_ms: 0 snap_nginx_addr: 127.0.0.1:5555 -# client配置默认值 +# Default values for client configuration client_register_to_mds: true client_mds_rpc_timeout_ms: 500 client_mds_max_rpc_timeout_ms: 2000 @@ -244,7 +244,7 @@ client_discard_enable: true client_discard_granularity: 4096 client_discard_task_delay_ms: 60000 -# nebd默认配置 +# Nebd default configuration client_config_path: /etc/curve/client.conf nebd_client_sync_rpc_retry_times: 50 nebd_client_rpc_retry_inverval_us: 100000 @@ -259,7 +259,7 @@ nebd_server_heartbeat_timeout_s: 30 nebd_server_heartbeat_check_interval_ms: 3000 nebd_server_response_return_rpc_when_io_error: false -# s3配置默认值 +# Default values for s3 configuration s3_http_scheme: 0 s3_verify_ssl: false s3_user_agent_conf: S3 Browser @@ -276,15 +276,15 @@ s3_throttle_bpsTotalLimit: 1280 s3_throttle_bpsReadLimit: 1280 s3_throttle_bpsWriteLimit: 1280 -# 运维工具默认值 +# Default values for operation and maintenance tools tool_rpc_timeout: 500 tool_rpc_retry_times: 5 tool_rpc_concurrent_num: 10 -# snapshotclone_nginx配置 +# snapshotclone_nginx configuration nginx_docker_internal_port: 80 -# etcd默认配置 +# ETCD default configuration etcd_snapshot_count: 10000 etcd_heartbeat_interval: 100 etcd_election_timeout: 1000 diff --git a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 index 0e7e65e9cc..ae43478df7 100644 --- a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 @@ -1,24 +1,24 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip={{ ansible_ssh_host }} global.port={{ chunkserver_base_port }} global.subnet={{ chunkserver_subnet }} global.enable_external_server={{ chunkserver_enable_external_server }} global.external_ip={{ ansible_ssh_host }} global.external_subnet={{ chunkserver_external_subnet }} -# chunk大小,一般16MB +# Chunk size, usually 16MB global.chunk_size={{ chunk_size }} -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB global.meta_page_size={{ chunkserver_meta_page_size }} -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit={{ chunkserver_location_limit }} # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666,127.0.0.1:7777 {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -26,30 +26,30 @@ global.location_limit={{ chunkserver_location_limit }} {% set _ = mds_address.append("%s:%s" % (mds_ip, mds_port)) -%} {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries={{ chunkserver_register_retries }} -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout={{ chunkserver_register_timeout }} -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10s mds.heartbeat_interval={{ chunkserver_heartbeat_interval }} -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout={{ chunkserver_heartbeat_timeout }} # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri={{ chunkserver_stor_uri }} -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri={{ chunkserver_meta_uri }} -# disk类型 +# Disk type chunkserver.disk_type={{ chunkserver_disk_type }} -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes={{ chunkserver_snapshot_throttle_throughput_bytes }} -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles={{ chunkserver_snapshot_throttle_check_cycles }} chunkserver.max_inflight_requests={{ chunkserver_max_inflight_requests }} @@ -64,39 +64,39 @@ test.testcopyset_conf={{ chunkserver_test_testcopyset_conf }} # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term={{ chunkserver_copyset_check_term }} -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli={{ chunkserver_copyset_disable_cli }} copyset.log_applied_task={{ chunkserver_copyset_log_applied_task }} -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms={{ chunkserver_copyset_election_timeout_ms }} -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s={{ chunkserver_copyset_snapshot_interval_s }} -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node. The node added by 'add' first copies data in a way similar to a learner. +# When the difference from the leader reaches 'catchup_margin' entries, +# the leader will attempt to commit the configuration-changing entry. +# Generally, the committed and applied entry will definitely be committed and applied. +# A smaller catchup_margin can significantly ensure that the learner can quickly join the replication group soon after. copyset.catchup_margin={{ chunkserver_copyset_catchup_margin }} -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri={{ chunkserver_copyset_chunk_data_uri }} -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri={{ chunkserver_copyset_raft_log_uri }} -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri={{ chunkserver_copyset_raft_meta_uri }} -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri={{ chunkserver_copyset_raft_snapshot_uri }} -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri={{ chunkserver_copyset_recycler_uri }} -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency={{ chunkserver_copyset_load_concurrency }} -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes={{ chunkserver_copyset_check_retrytimes }} -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The difference between the applied_index of the current peer and the committed_index on the leader is less than this value. +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin={{ chunkserver_copyset_finishload_margin }} -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms={{ chunkserver_copyset_check_loadmargin_interval_ms }} # scan copyset interval copyset.scan_interval_sec={{ chunkserver_copyset_scan_interval_sec }} @@ -115,26 +115,26 @@ copyset.check_syncing_interval_ms={{ chunkserver_copyset_check_syncing_interval_ # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client={{ disable_snapshot_clone }} -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter={{ disable_snapshot_clone }} -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size={{ chunkserver_clone_slice_size }} -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste={{ chunkserver_clone_enable_paste }} -# 克隆的线程数量 +# Number of cloned threads clone.thread_num={{ chunkserver_clone_thread_num }} -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth={{ chunkserver_clone_queue_depth }} -# curve用户名 +# Curve username curve.root_username={{ curve_root_username }} -# curve密码 +# Curve password curve.root_password={{ curve_root_password }} -# client配置文件 +# Client configuration file curve.config_path={{ chunkserver_client_config_path }} -# s3配置文件 +# S3 configuration file s3.config_path={{ chunkserver_s3_config_path }} # Curve File time to live curve.curve_file_timeout_s={{ curve_file_timeout_s }} @@ -142,7 +142,7 @@ curve.curve_file_timeout_s={{ curve_file_timeout_s }} # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2={{ chunkserver_fs_enable_renameat2 }} # @@ -163,27 +163,27 @@ storeng.sync_write={{ chunkserver_storeng_sync_write }} # # Concurrent apply module # -# 并发模块的并发度,一般是10 +# The concurrency of concurrent modules is generally 10 wconcurrentapply.size={{ chunkserver_wconcurrentapply_size }} -# 并发模块线程的队列深度 +# Queue depth of concurrent module threads wconcurrentapply.queuedepth={{ chunkserver_wconcurrentapply_queuedepth }} -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size={{ chunkserver_rconcurrentapply_size }} -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth={{ chunkserver_rconcurrentapply_queuedepth }} # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_format_disk }} -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_dir }} -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size={{ chunkserver_chunkfilepool_cpmeta_file_size }} -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable={{ chunkserver_chunkfilepool_clean_enable }} @@ -195,34 +195,34 @@ chunkfilepool.clean.throttle_iops={{ chunkserver_chunkfilepool_clean_throttle_io # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool={{ walfilepool_use_chunk_file_pool }} -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool={{ chunkserver_format_disk }} -# walpool目录 +# Walpool directory walfilepool.file_pool_dir={{ chunkserver_walfilepool_file_pool_dir }} -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path={{ chunkserver_walfilepool_meta_path }} -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size={{ chunkserver_walfilepool_segment_size }} -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size={{ chunkserver_walfilepool_metapage_size }} -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size={{ chunkserver_walfilepool_meta_file_size }} -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times={{ chunkserver_walfilepool_retry_times }} # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec={{ chunkserver_trash_expire_after_sec }} -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec={{ chunkserver_trash_scan_period_sec }} # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir={{ chunkserver_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curve-ansible/roles/generate_config/templates/client.conf.j2 b/curve-ansible/roles/generate_config/templates/client.conf.j2 index 08d4413780..492ac270bf 100644 --- a/curve-ansible/roles/generate_config/templates/client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/client.conf.j2 @@ -1,8 +1,8 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -11,25 +11,25 @@ {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS={{ client_register_to_mds }} -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS={{ client_mds_rpc_timeout_ms }} -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS={{ client_mds_max_rpc_timeout_ms }} -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS={{ client_mds_max_retry_ms }} -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS={{ client_mds_max_failed_times_before_change_mds }} -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease={{ client_mds_refresh_times_per_lease }} -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS={{ client_mds_rpc_retry_interval_us }} # The normal retry times for trigger wait strategy @@ -42,104 +42,104 @@ mds.maxRetryMsInIOPath={{ client_mds_max_retry_ms_in_io_path }} mds.waitSleepMs={{ client_mds_wait_sleep_ms }} # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS={{ client_metacache_get_leader_timeout_ms }} -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry={{ client_metacache_get_leader_retry }} -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS={{ client_metacache_rpc_retry_interval_us }} # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity={{ client_schedule_queue_capacity }} -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize={{ client_schedule_threadpool_size }} -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity={{ client_isolation_task_queue_capacity }} -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize={{ client_isolation_task_thread_pool_size }} # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS={{ client_chunkserver_op_retry_interval_us }} -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry={{ client_chunkserver_op_max_retry }} -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS={{ client_chunkserver_rpc_timeout_ms }} -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead={{ client_chunkserver_enable_applied_index_read }} -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS={{ client_chunkserver_max_retry_sleep_interval_us }} -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS={{ client_chunkserver_max_rpc_timeout_ms }} -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes={{ client_chunkserver_max_stable_timeout_times }} -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs={{ client_chunkserver_check_health_timeout_ms }} -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold={{ client_chunkserver_server_stable_threshold }} -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff={{ client_chunkserver_min_retry_times_force_timeout_backoff }} -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend={{ client_chunkserver_max_retry_times_before_consider_suspend }} # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum={{ client_file_max_inflight_rpc_num }} -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB={{ client_file_io_split_max_size_kb }} # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel={{ client_log_level }} -# 设置log的路径 +# Set the path of the log global.logPath={{ client_log_path }} -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # @@ -151,15 +151,15 @@ closefd.timeout={{ client_closefd_timeout_sec }} closefd.timeInterval={{ client_closefd_time_interval_sec }} # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort={{ client_metric_dummy_server_start_port }} -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck={{ client_turn_off_health_check }} # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath={{ client_session_map_path }} diff --git a/curve-ansible/roles/generate_config/templates/mds.conf.j2 b/curve-ansible/roles/generate_config/templates/mds.conf.j2 index 13040fa9ea..7e9b8f39b1 100644 --- a/curve-ansible/roles/generate_config/templates/mds.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/mds.conf.j2 @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr={{ ansible_ssh_host }}:{{ mds_port }} @@ -8,9 +8,9 @@ global.subnet={{ mds_subnet }} global.port={{ mds_port }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -19,11 +19,11 @@ global.port={{ mds_port }} {% endfor -%} mds.etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs={{ mds_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs={{ mds_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times={{ mds_etcd_retry_times }} # wait dlock timeout mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} @@ -31,68 +31,68 @@ mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} mds.etcd.dlock.ttlSec={{ mds_etcd_dlock_ttl_sec }} # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs={{ mds_segment_alloc_periodic_persist_inter_ms }} -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs={{ mds_segment_alloc_retry_inter_ms }} mds.segment.discard.scanIntevalMs={{ mds_segment_discard_scan_interval_ms }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec={{ mds_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs={{ mds_leader_election_timeout_ms }} # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler={{ mds_enable_copyset_scheduler }} -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler={{ mds_enable_leader_scheduler }} -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler={{ mds_enable_recover_scheduler }} -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler={{ mds_enable_replica_scheduler }} # Scan scheduler switch mds.enable.scan.scheduler={{ mds_enable_scan_scheduler }} -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec={{ mds_copyset_scheduler_interval_sec }} -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec={{ mds_replica_scheduler_interval_sec }} -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec={{ mds_leader_scheduler_interval_sec }} -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec={{ mds_recover_scheduler_interval_sec }} # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec={{ mds_scan_scheduler_interval_sec }} -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent={{ mds_schduler_operator_concurrent }} -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec={{ mds_schduler_transfer_limit_sec }} -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec={{ mds_scheduler_remove_limit_sec }} -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec={{ mds_scheduler_add_limit_sec }} -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec={{ mds_scheduler_change_limit_sec }} # Scan operator timeout (seconds) mds.scheduler.scan.limitSec={{ mds_scheduler_scan_limit_sec }} -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent={{ mds_scheduler_copyset_mum_range_percent }} -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent={{ mds_schduler_scatterwidth_range_percent }} -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance={{ mds_chunkserver_failure_tolerance }} -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec={{ mds_scheduler_chunkserver_cooling_time_sec }} # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour={{ mds_scheduler_scan_start_hour }} @@ -106,129 +106,129 @@ mds.scheduler.scan.concurrent.per.pool={{ mds_scheduler_scan_concurrent_per_pool mds.scheduler.scan.concurrent.per.chunkserver={{ mds_scheduler_scan_concurrent_per_chunkserver }} # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs={{ mds_heartbeat_interval_ms }} -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs={{ mds_heartbeat_misstimeout_ms }} -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs={{ mds_heartbeat_offlinet_imeout_ms }} -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs={{ mds_heartbeat_clean_follower_after_ms }} # -# namespace cache相关 +#Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count={{ mds_cache_count }} # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs={{ file_expired_time_us }} -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs={{ mds_file_scan_inteval_time_us }} # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName={{ curve_root_username }} mds.auth.rootPassword={{ curve_root_password }} # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum={{ mds_filelock_bucket_num }} # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec={{ mds_topology_topology_update_to_repo_sec }} -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs={{ mds_topology_create_copyset_rpc_timeout_ms }} -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes={{ mds_topology_create_copyset_rpc_retry_times }} -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs={{ mds_topology_create_copyset_rpc_retry_sleep_time_ms }} -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec={{ mds_topology_update_metric_interval_sec }} -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit={{ mds_topology_pool_usage_percent_limit }} -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0: Random, 1: Weight mds.topology.choosePoolPolicy={{ mds_topology_choose_pool_policy }} # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus={{ mds_topology_enable_logicalpool_status}} # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes={{ mds_copyset_copyset_retry_times }} -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance={{ mds_copyset_scatterwidth_variance }} -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation={{ mds_copyset_scatterwidth_standard_devation }} -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange={{ mds_copyset_scatterwidth_range }} -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# The percentage of deviation from the mean scatterWidth of all chunk servers. Setting a too large percentage of scatterWidth deviation can result in some machines having +# excessively small scatterWidth, affecting machine recovery times. Prolonged recovery times can reduce the cluster's reliability. Additionally, it can lead to some machines having +# excessively large scatterWidth, causing certain chunk servers to scatter copysets across various machines. When other machines write data, these machines with larger scatterWidth +# can become hotspots. Setting a too small percentage of scatterWidth deviation requires a greater average scatterWidth, +# which demands higher copyset algorithm requirements. This can lead to the algorithm being unable +# to produce ideal results. It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage={{ mds_copyset_scatterwidth_floating_percentage }} # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize={{ chunk_size }} -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize={{ segment_size }} -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength={{ min_file_length }} -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength={{ max_file_length }} # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs={{ mds_chunkserverclient_rpc_timeout_ms }} -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes={{ mds_chunkserverclient_rpc_retry_times }} -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs={{ mds_chunkserverclient_rpc_retry_interval_ms }} -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes={{ mds_chunkserverclient_update_leader_retry_times }} -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs={{ mds_chunkserverclient_update_leader_retry_interval_ms }} # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr={{ snapshot_nginx_vip }}:{{ nginx_docker_external_port }} # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir={{ mds_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 index d7121c6dad..eadcb92bd7 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress={{ nebd_data_dir }}/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath={{ nebd_data_dir }}/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes={{ nebd_client_sync_rpc_retry_times }} -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs={{ nebd_client_rpc_retry_inverval_us }} -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs={{ nebd_client_rpc_retry_max_inverval_us }} -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs={{ nebd_client_rpc_hostdown_retry_inverval_us }} -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS={{ nebd_client_health_check_internal_s }} -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs={{ nebd_client_delay_health_check_internal_ms }} -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum={{ nebd_client_rpc_send_exec_queue_num }} -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS={{ nebd_client_heartbeat_inverval_s }} -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs={{ nebd_client_heartbeat_rpc_timeout_ms }} -# 日志路径 +# Log Path log.path={{ nebd_log_dir }}/client diff --git a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 index 5262d0af37..7cd700b2db 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath={{ client_config_path }} -#brpc server监听端口 +# brpc server listening port listen.address={{ nebd_data_dir }}/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path={{ nebd_data_dir }}/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec={{ nebd_server_heartbeat_timeout_s }} -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms={{ nebd_server_heartbeat_check_interval_ms }} # return rpc when io error diff --git a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 index ca52b19925..00c20160a0 100644 --- a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path={{ snap_client_config_path }} -# mds root 用户名 +# Mds root username mds.rootUser={{ curve_root_username }} -# mds root 密码 +# Mds root password mds.rootPassword={{ curve_root_password }} -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec={{ snap_client_method_retry_time_sec }} -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs={{ snap_client_method_retry_interval_ms }} -# 日志文件位置 +# Log file location log.dir={{ snap_log_dir }} # @@ -26,53 +26,53 @@ s3.config_path={{ snap_s3_config_path }} server.address={{ ansible_ssh_host }}:{{ snapshot_port }} server.subnet={{ snapshot_subnet }} server.port={{ snapshot_port }} -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec={{ snap_client_async_method_retry_time_sec }} -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs={{ snap_client_async_method_retry_interval_ms }} -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum={{ snap_snapshot_pool_thread_num }} -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs={{ snap_snapshot_task_manager_scan_interval_ms }} -# 转储chunk分片大小 +# Dump chunk shard size server.chunkSplitSize={{ snap_chunk_split_size }} -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs={{ snap_check_snapshot_status_interval_ms }} -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit={{ snap_max_snapshot_limit }} -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum={{ snap_snapshot_core_thread_num }} -# mds session 时间 +# Mds session time server.mdsSessionTimeUs={{ file_expired_time_us }} -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency={{ snap_read_chunk_snapshot_concurrency }} # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum={{ snap_stage1_pool_thread_num }} -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum={{ snap_stage2_pool_thread_num }} -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum={{ snap_common_pool_thread_num }} -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs={{ snap_clone_task_manager_scan_interval_ms }} -# clone chunk分片大小 +# Clone chunk shard size server.cloneChunkSplitSize={{ snap_clone_chunk_split_size }} -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir={{ snap_clone_temp_dir }} -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency={{ snap_create_clone_chunk_concurrency }} -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency={{ snap_recover_chunk_concurrency }} -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs={{ snap_clone_backend_ref_record_scan_interval_ms }} -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_interval_ms }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -81,11 +81,11 @@ server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_in {% endfor -%} etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs={{ snap_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs={{ snap_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times={{ snap_etcd_retry_times }} # wait dlock timeout etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} @@ -93,20 +93,20 @@ etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} etcd.dlock.ttlSec={{ snap_etcd_dlock_ttl_sec }} # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix={{ snap_leader_campagin_prefix }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec={{ snap_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms={{ snap_leader_election_timeout_ms }} # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port={{ snapshot_dummy_port }} diff --git a/curve-ansible/roles/generate_config/templates/tools.conf.j2 b/curve-ansible/roles/generate_config/templates/tools.conf.j2 index 6207e8a4ef..b630b3dfe3 100644 --- a/curve-ansible/roles/generate_config/templates/tools.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/tools.conf.j2 @@ -1,4 +1,4 @@ -# mds地址 +# Mds address {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -8,13 +8,13 @@ mdsAddr={{ mds_address | join(',') }} # mds dummy port mdsDummyPort={{ hostvars[groups.mds[0]].mds_dummy_port }} -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout={{ tool_rpc_timeout }} -# rpc重试次数 +# RPC retry count rpcRetryTimes={{ tool_rpc_retry_times }} # the rpc concurrency to chunkserver rpcConcurrentNum={{ tool_rpc_concurrent_num }} -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -23,7 +23,7 @@ rpcConcurrentNum={{ tool_rpc_concurrent_num }} {% endfor -%} etcdAddr={{ etcd_address | join(',') }} {% if groups.snapshotclone is defined and groups.snapshotclone[0] is defined %} -# snapshot clone server 地址 +# Snapshot clone server address {% set snap_address=[] -%} {% for host in groups.snapshotclone -%} {% set snap_ip = hostvars[host].ansible_ssh_host -%} diff --git a/curve-ansible/roles/install_package/files/disk_uuid_repair.py b/curve-ansible/roles/install_package/files/disk_uuid_repair.py index eb48728e2e..cfa5a32ac3 100644 --- a/curve-ansible/roles/install_package/files/disk_uuid_repair.py +++ b/curve-ansible/roles/install_package/files/disk_uuid_repair.py @@ -17,30 +17,34 @@ # limitations under the License. # -# 检测磁盘上disk.meta中记录的uuid与当前磁盘的实际uuid是否相符合 -# 如果不符合, 更新为当前的uuid +# Check if the uuid recorded in disk.meta on the disk matches the actual uuid of the current disk +# If not, update to the current uuid import os import hashlib import sys import subprocess + def __get_umount_disk_list(): - # 获取需要挂载的设备 + # Obtain devices that need to be mounted cmd = "lsblk -O|grep ATA|awk '{print $1}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) devlist = out_msg.splitlines() - # 查看当前设备的挂载状况 + # View the mounting status of the current device umount = [] for dev in devlist: cmd = "lsblk|grep " + dev + "|awk '{print $7}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) if len(out_msg.replace('\n', '')) == 0: umount.append(dev) return umount + def __uninit(): try: cmd = "grep curvefs /etc/fstab" @@ -49,6 +53,7 @@ def __uninit(): except subprocess.CalledProcessError: return True + def __analyse_uuid(kv): uuid = "" uuidkv = kv[0].split("=") @@ -64,25 +69,27 @@ def __analyse_uuid(kv): return "" else: uuidmd5 = uuidmd5kv[1].replace("\n", "") - # 校验 + # Verification if (hashlib.md5(uuid).hexdigest() != uuidmd5): print("uuid[%s] not match uuidmd5[%s]" % (uuid, uuidmd5)) return "" return uuid + def __get_recorduuid(disk): uuid = "" - # 将磁盘挂载到临时目录 + # Mount the disk to a temporary directory cmd = "mkdir -p /data/tmp; mount " + disk + " /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: print("Get record uuid in %s fail." % disk) return False, uuid - # 挂载成功,获取记录的uuid + # Successfully mounted, obtaining the recorded uuid try: cmd = "cat /data/tmp/disk.meta" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) kv = out_msg.splitlines() if len(kv) != 2: @@ -94,7 +101,7 @@ def __get_recorduuid(disk): except subprocess.CalledProcessError as e: print("Get file disk.meta from %s fail, reason: %s." % (disk, e)) - # 卸载磁盘 + # Unmount Disk cmd = "umount " + disk + "; rm -fr /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: @@ -103,75 +110,81 @@ def __get_recorduuid(disk): return True, uuid + def __get_actualuuid(disk): uuid = "" try: cmd = "ls -l /dev/disk/by-uuid/|grep " + disk + "|awk '{print $9}'" - uuid = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + uuid = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Get actual uuid of %s fail, reason: %s." % (disk, e)) return uuid + def __cmp_recorduuid_with_actual(umountDisk): recordList = {} actualList = {} for disk in umountDisk: - # 获取当前disk上记录的uuid - diskFullName = "/dev/" + disk - opRes, recorduuid = __get_recorduuid(diskFullName) - if opRes != True or len(recorduuid) == 0: - return False, recordList, actualList - - # 获取disk的实际uuid - actualuuid = __get_actualuuid(disk).replace("\n", "") - - # 比较记录的和实际的是否相同 - if actualuuid != recorduuid: - recordList[disk] = recorduuid - actualList[disk] = actualuuid - else: + # Obtain the uuid recorded on the current disk + diskFullName = "/dev/" + disk + opRes, recorduuid = __get_recorduuid(diskFullName) + if opRes != True or len(recorduuid) == 0: + return False, recordList, actualList + + # Obtain the actual uuid of the disk + actualuuid = __get_actualuuid(disk).replace("\n", "") + + # Compare whether the recorded and actual values are the same + if actualuuid != recorduuid: + recordList[disk] = recorduuid + actualList[disk] = actualuuid + else: return False, recordList, actualList return True, recordList, actualList + def __mount_with_atual_uuid(diskPath, record, actual): print("%s uuid change from [%s] to [%s]." % (diskPath, record, actual)) - # 从/etc/fstab中获取对应的挂载目录 + # Obtain the corresponding mount directory from/etc/fstab mntdir = "" try: cmd = "grep " + record + " /etc/fstab | awk -F \" \" '{print $2}'" - mntdir = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") + mntdir = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") except subprocess.CalledProcessError as e: print("Get mount dir for %s fail. error: %s." % (diskPath, e)) return False - # 将actual挂载到相应的目录下 + # Mount the actual to the corresponding directory cmd = "mount " + diskPath + " " + mntdir retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("mount %s to %s success." % (diskPath, mntdir)) replaceCmd = "sed -i \"s/" + record + "/" + actual + "/g\"" - # 将新的uuid写入到fstab + # Write the new uuid to fstab cmd = "cp /etc/fstab /etc/fstab.bak;" + replaceCmd + " /etc/fstab > /dev/null" retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to /etc/fstab for disk %s success." % diskPath) - # 将新的uuid写入到diskmeta + # Write the new uuid to diskmeta fileFullName = mntdir + "/disk.meta" filebakName = fileFullName + ".bak" cpcmd = "cp " + fileFullName + " " + filebakName uuidcmd = "echo uuid=" + actual + " > " + fileFullName - uuidmd5cmd = "echo uuidmd5=" + hashlib.md5(actual).hexdigest() + " >> " + fileFullName + uuidmd5cmd = "echo uuidmd5=" + \ + hashlib.md5(actual).hexdigest() + " >> " + fileFullName cmd = cpcmd + ";" + uuidcmd + ";" + uuidmd5cmd retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to %s success." % fileFullName) @@ -182,29 +195,32 @@ def __mount_with_atual_uuid(diskPath, record, actual): def __handle_inconsistent(umountDisk, record, actual): for disk in umountDisk: if disk not in record: - print("record uuid and actual uuid of %s is same, please check other reason" % disk) + print( + "record uuid and actual uuid of %s is same, please check other reason" % disk) continue - # 按照actual uuid做挂载 - res = __mount_with_atual_uuid("/dev/" + disk, record[disk], actual[disk]) + # Mount according to the actual uuid + res = __mount_with_atual_uuid( + "/dev/" + disk, record[disk], actual[disk]) if res: continue else: return False return True + if __name__ == "__main__": - # 查看未挂载成功的磁盘设备列表 + # View the list of disk devices that were not successfully mounted umountDisk = __get_umount_disk_list() if len(umountDisk) == 0: print("All disk mount success.") exit(0) - # 查看是否之前已经挂载过 + # Check if it has been previously mounted if __uninit(): print("Please init env with chunkserver_ctl.sh first.") exit(0) - # 查看当前未挂载成功的磁盘设备记录的uuid和实际uuid + # View the uuid and actual uuid of disk devices that have not been successfully mounted currently cmpRes, record, actual = __cmp_recorduuid_with_actual(umountDisk) if cmpRes == False: print("Compare record uuid with actual uuid fail.") @@ -213,13 +229,10 @@ def __handle_inconsistent(umountDisk, record, actual): print("Record uuid with actual uuid all consistent.") exit(0) - # 将不一致的磁盘按照当前的uuid重新挂载 + # Remount inconsistent disks according to the current uuid if __handle_inconsistent(umountDisk, record, actual): print("fix uuid-changed disk[%s] success." % umountDisk) exit(0) else: print("fxi uuid-changed disk[%s] fail." % umountDisk) exit(-1) - - - diff --git a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 index cba41adfcd..d44a03c682 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 @@ -6,7 +6,7 @@ dataDir={{ chunkserver_data_dir }} raftLogProtocol={{ chunkserver_raft_log_procotol }} source ./chunkserver_deploy.sh -# 使用方式 +# Usage function help() { echo "COMMANDS:" echo " start : start chunkserver" @@ -50,18 +50,18 @@ function ip_value() { }' } -# 从subnet获取ip +# Obtain IP from subnet function get_ip_from_subnet() { subnet=$1 prefix=$(ip_value $subnet) mod=`echo $subnet|awk -F/ '{print $2}'` mask=$((2**32-2**(32-$mod))) - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) ip= for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -76,7 +76,7 @@ function get_ip_from_subnet() { fi } -# 启动chunkserver +# Start chunkserver function start() { if [ $# -lt 1 ] then @@ -87,7 +87,7 @@ function start() { then confPath=$3 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "confPath $confPath not exist!" @@ -104,7 +104,7 @@ function start() { get_ip_from_subnet $external_subnet external_ip=$ip enableExternalServer=true - # external ip和internal ip一致或external ip为127.0.0.1时不启动external server + # Do not start the external server when the external IP and internal IP are consistent or when the external IP is 127.0.0.1 if [ $internal_ip = $external_ip -o $external_ip = "127.0.0.1" ] then enableExternalServer=false @@ -148,7 +148,7 @@ function start_one() { fi jemallocpath={{ jemalloc_path }} - # 检查jemalloc库文件 + # Check the Jemalloc library file if [ ! -f ${jemallocpath} ] then echo "Not found jemalloc library, Path is ${jemallocpath}" @@ -230,7 +230,7 @@ function restart() { } function wait_stop() { - # wait 3秒钟让它退出 + # Wait for 3 seconds to exit retry_times=0 while [ $retry_times -le 3 ] do @@ -244,7 +244,7 @@ function wait_stop() { break fi done - # 如果进程还在,就kill -9 + # If the process is still in progress, kill -9 ps -efl|grep -E "curve-chunkserver .*${dataDir}/chunkserver$1 "|grep -v grep > /dev/null 2>&1 if [ $? -eq 0 ] then @@ -325,12 +325,12 @@ function deploy() { } function format() { - # 格式化chunkfile pool + Format chunkfile pool curve-format $* } function recordmeta() { - # 将当前的磁盘的uuid及其md5备份到磁盘的disk.meta文件中 + # Back up the current disk's uuid and its md5 to the disk's disk.meta file meta_record; } diff --git a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 index db8566728a..7f84ccd28f 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 @@ -1,5 +1,5 @@ #!/bin/bash -#confirm提示,防止误操作 +# confirm prompt to prevent misoperation dataDir={{ chunkserver_data_dir }} function do_confirm { echo "This deployment script will format the disk and delete all the data." @@ -24,14 +24,14 @@ diskList="{{ dlist | join('\n') }}" {% endif %} function deploy_prep { -#清理/etc/fstab残留信息 +# Clean up/etc/fstab residual information grep curvefs /etc/fstab if [ $? -eq 0 ] then sed -i '/curvefs/d' /etc/fstab sed -i '/chunkserver/d' /etc/fstab fi -#将数据盘挂载的目录都卸载掉,为下一步格式化磁盘做准备 +# Uninstall all directories mounted on the data disk to prepare for the next step of formatting the disk for i in `{{ get_disk_list_cmd }}` do mntdir=`lsblk|grep $i|awk '{print $7}'` @@ -49,7 +49,7 @@ function deploy_prep { fi done } -#记录磁盘的盘符信息和磁盘的wwn信息,将信息持久化到diskinfo文件 +# Record the disk letter information and the disk's wwn information, and persist the information to the diskinfo file declare -A disk_map diskinfo=./diskinfo function record_diskinfo { @@ -69,7 +69,7 @@ function record_diskinfo { done } -#根据磁盘数量创建数据目录和日志目录,目前的数据目录格式统一是$dataDir/chunkserver+num,日志目录在$dataDir/log/chunkserver+num +# Create a data directory and log directory based on the number of disks. The current data directory format is $dataDir/chunkserver+num, and the log directory is in $dataDir/log/chunkserver+num function chunk_dir_prep { if [ -d ${dataDir} ] then @@ -90,7 +90,7 @@ function chunk_dir_prep { mkdir -p ${dataDir}/log/chunkserver$i done } -#格式化磁盘文件系统 +# Format Disk File System function disk_format { for disk in ${!disk_map[@]} do @@ -99,7 +99,7 @@ function disk_format { done } -#将创建好的数据目录按照顺序挂载到格式化好的磁盘上,并记录挂载信息到mount.info +# Mount the created data directory onto the formatted disk in order and record the mounting information to mount-info function mount_dir { while [ 1 ] do @@ -128,7 +128,7 @@ function mount_dir { lsblk > ./mount.info } -#持久化挂载信息到fstab文件,防止系统重启后丢失 +# Persist mounting information to fstab file to prevent loss after system restart function fstab_record { grep curvefs /etc/fstab if [ $? -ne 0 ] @@ -141,7 +141,7 @@ function fstab_record { fi } -#将当前的uuid持久化到磁盘上做备份,防止系统重启后uuid发生变化 +# Persist the current uuid to disk for backup to prevent changes in uuid after system restart function meta_record { grep curvefs /etc/fstab if [ $? -eq 0 ] @@ -158,7 +158,7 @@ function meta_record { fi } -#初始化chunkfile pool +# Initialize chunkfile pool function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` @@ -224,20 +224,20 @@ function deploy_all { function deploy_one { local diskname=$1 local dirname=$2 - #目录不存在 + # Directory does not exist if [ ! -d $dirname ] then echo "$dirname is not exist!" exit 1 fi - #磁盘正在挂载使用 + # Disk is being mounted for use mount | grep -w $diskname if [ $? -eq 0 ] then echo "$diskname is being used" exit 1 fi - #目录正在挂载使用 + # Directory is being mounted for use mount | grep -w $dirname if [ $? -eq 0 ] then @@ -265,7 +265,7 @@ function deploy_one { done mount $diskname $dirname lsblk > ./mount.info - #更新fstab + # Update fstab short_diskname=`echo $diskname|awk -F"/" '{print $3}'` ls -l /dev/disk/by-uuid|grep -w $short_diskname if [ $? -ne 0 ] @@ -275,12 +275,12 @@ function deploy_one { fi uuid=`ls -l /dev/disk/by-uuid/|grep -w ${short_diskname}|awk '{print $9}'` echo "UUID=$uuid $dirname ext4 rw,errors=remount-ro 0 0" >> /etc/fstab - # 将uuid及其md5写到diskmeta中 + # Write uuid and its md5 to diskmeta uuidmd5=`echo -n $uuid | md5sum | cut -d ' ' -f1` touch $dirname/disk.meta echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta - #格式化chunkfile pool + # Format chunkfile pool curve-format -allocatePercent={{ chunk_alloc_percent }} \ -filePoolDir=$dirname/chunkfilepool \ diff --git a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 index 6c0b36c932..9aadcb311f 100644 --- a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# 默认配置文件 +# Default configuration file confPath={{ etcd_config_path }} -# 日志文件目录 +# Log file directory logDir={{ etcd_log_dir }} -# 日志文件路径 +# Log file path logPath=${logDir}/etcd.log # pidfile @@ -15,9 +15,9 @@ pidFile=${HOME}/etcd.pid # daemon log daemonLog=${logDir}/daemon-etcd.log -# 启动etcd +# Start etcd function start_etcd() { - # 创建logDir + # Create logDir mkdir -p ${logDir} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -25,14 +25,14 @@ function start_etcd() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logDir} ] then echo "Write permission denied: ${logDir}" exit 1 fi - # 检查logPath是否可写或者是否能够创建 + # Check if logPath is writable or can be created touch ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -40,7 +40,7 @@ function start_etcd() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -48,28 +48,28 @@ function start_etcd() { exit fi - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查是否安装etcd + # Check if etcd is installed if [ -z `command -v etcd` ] then echo "No etcd installed" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found confFile, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -77,8 +77,8 @@ function start_etcd() { exit fi - # pidfile不存在 或 daemon进程不存在 - # 启动daemon,切换路径,并启动etcd + # The pidfile does not exist or the daemon process does not exist + # Start the daemon, switch paths, and start ETCD daemon --name etcd --core \ @@ -90,9 +90,9 @@ function start_etcd() { -- {{ install_etcd_dir }}/etcd --config-file ${confPath} } -# 停止daemon进程和etcd +# Stop the daemon process and ETCD function stop_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -112,7 +112,7 @@ function stop_etcd() { # restart function restart_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -127,7 +127,7 @@ function restart_etcd() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " etcd-daemon start -- start deamon process and watch on etcd process" @@ -139,7 +139,7 @@ function usage() { echo " etcd-daemon start -c /etcd/etcd.conf.yml -l ${HOME}/etcd.log" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -150,7 +150,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 @@ -176,11 +176,11 @@ case $1 in start_etcd ;; "stop") - # 停止daemon和etcd进程 + # Stop the daemon and etcd processes stop_etcd ;; "restart") - # 重启etcd + # Restart etcd restart_etcd ;; *) diff --git a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 index 6d69e6d47d..81f55b7ed7 100644 --- a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-mds路径 +# curve-mds path curveBin={{ curve_bin_dir }}/curve-mds -# 默认配置文件 +# Default configuration file confPath={{ mds_config_path }} -# 日志文件路径 +# Log file path logPath={{ mds_log_dir }} # mdsAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动mds +# Start mds function start_mds() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit 1 fi - # 检查curve-mds + # Check curve-mds if [ ! -f ${curveBin} ] then echo "No curve-mds installed" exit 1 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found mds.conf, Path is ${confPath}" exit 1 fi - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_mds() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_mds() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者是否能够创建 + # Check if consoleLog is writable or can be created touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_mds() { exit 1 fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_mds() { exit 1 fi - # 未指定mdsAddr, 从配置文件中解析出网段 + # No mdsAddr specified, resolving network segment from configuration file if [ -z ${mdsAddr} ] then subnet=`cat $confPath|grep global.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_mds() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_mds() { show_status } -# 停止daemon进程,且停止curve-mds +# Stop the daemon process and stop the curve-mds function stop_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_mds() { # restart function restart_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_mds() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load mds configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, mds应该没有起来 + # If there are no leader related logs in the logs after load mds configuration + # So leaderAddr is empty, and mds should not be up if [ -z ${leaderAddr} ] then echo "MDS may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current MDS is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " ./mds-daemon.sh start -- start deamon process and watch on curve-mds process" @@ -222,7 +222,7 @@ function usage() { echo " ./mds-daemon.sh start -c /etc/curve/mds.conf -l ${HOME}/ -a 127.0.0.1:6666" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 index 50bdc2a07e..d170963075 100644 --- a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 +++ b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 @@ -133,7 +133,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -174,7 +174,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -262,7 +262,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -278,7 +278,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 index 4d7edae130..169ff2b84d 100644 --- a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-snapshotcloneserver路径 +# curve-snapshotcloneserver path curveBin={{ curve_bin_dir }}/curve-snapshotcloneserver -# 默认配置文件 +# Default configuration file confPath={{ snapshot_config_path }} -# 日志文件路径 +# Log file path logPath={{ snapshot_clone_server_log_dir }} # serverAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动snapshotcloneserver +# Starting snapshotcloneserver function start_server() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查curve-snapshotcloneserver + # Check the curve-snapshotcloneserver if [ ! -f ${curveBin} ] then echo "No curve-snapshotcloneserver installed, Path is ${curveBin}" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found snapshot_clone_server.conf, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_server() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_server() { exit fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者能否创建,初始化glog之前的日志存放在这里 + # Check if the consoleLog can be written or created, and store the logs before initializing the glog here touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_server() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_server() { exit fi - # 未指定serverAddr, 从配置文件中解析出网段 + # No serverAddr specified, resolving network segment from configuration file if [ -z ${serverAddr} ] then subnet=`cat $confPath|grep server.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_server() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_server() { show_status } -# 停止daemon进程和curve-snapshotcloneserver +# Stop the daemon process and curve-snapshotcloneserver function stop_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_server() { # restart function restart_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_server() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, snapshotcloneserver应该没有起来 + # If there are no leader related logs in the logs after load configuration + # So the leaderAddr is empty, and the snapshotcloneserver should not be up if [ -z ${leaderAddr} ] then echo "SnapshotClone may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current SnapshotClone is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " snapshot-daemon start -- start deamon process and watch on curve-snapshotcloneserver process" @@ -222,7 +222,7 @@ function usage() { echo " snapshot-daemon start -c /etc/curve/snapshot_clone_server.conf -l ${HOME}/ -a 127.0.0.1:5555" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/vars/main.yml b/curve-ansible/roles/install_package/vars/main.yml index ee545c1d7b..8967883b7c 100644 --- a/curve-ansible/roles/install_package/vars/main.yml +++ b/curve-ansible/roles/install_package/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 包的名称 +# The name of the package package_name: package_version: lib_installed: false diff --git a/curve-ansible/roles/restart_service/defaults/main.yml b/curve-ansible/roles/restart_service/defaults/main.yml index 061c32a4ec..0051d42ecc 100644 --- a/curve-ansible/roles/restart_service/defaults/main.yml +++ b/curve-ansible/roles/restart_service/defaults/main.yml @@ -16,7 +16,7 @@ # check_health: False -# 启动一个chunkserver需要的最大时间 +# The maximum time required to start a chunkserver restart_chunkserver_async: 100 restart_chunkserver_check_delay: 5 restart_chunkserver_check_times: 20 diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml index d74b05abc7..6b3050bb01 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取mds的版本 + # Obtain the version of mds - name: get curve version vars: metric_port: "{{ mds_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml index 73f6bcf636..966d9b95d6 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取snapshotclone的版本 + # Obtain the version of snapshotclone - name: get snapshotclone version vars: metric_port: "{{ snapshot_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/main.yml b/curve-ansible/roles/restart_service/tasks/main.yml index befb68b5b3..a8b077a3a4 100644 --- a/curve-ansible/roles/restart_service/tasks/main.yml +++ b/curve-ansible/roles/restart_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 重启对应的服务 +# Restart the corresponding service - name: restart_service include_tasks: "include/restart_{{ service_name }}.yml" diff --git a/curve-ansible/roles/restart_service/vars/main.yml b/curve-ansible/roles/restart_service/vars/main.yml index 94f0bad0c6..44f7d6797e 100644 --- a/curve-ansible/roles/restart_service/vars/main.yml +++ b/curve-ansible/roles/restart_service/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 服务的名称 +# Name of service service_name: need_restart: true sudo: "" diff --git a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml index 82478df03e..f2a67fdba1 100644 --- a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml +++ b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml @@ -1,4 +1,4 @@ -# 服务的名称 +# Name of service service_name: leader_ip: all_ip: diff --git a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml index 25fecb2337..32602a56cd 100644 --- a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml +++ b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml @@ -27,7 +27,7 @@ poll: "{{ service_poll }}" failed_when: start_chunkserver_res.rc != 0 or "down" in start_chunkserver_res.stdout -# 打印控制台输出 +# Print Console Output - name: print console output debug: var: start_chunkserver_res.stdout_lines diff --git a/curve-ansible/roles/start_service/tasks/main.yml b/curve-ansible/roles/start_service/tasks/main.yml index 483dfd5d9a..be93405394 100644 --- a/curve-ansible/roles/start_service/tasks/main.yml +++ b/curve-ansible/roles/start_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: start_service include_tasks: "include/start_{{ service_name }}.yml" diff --git a/curve-ansible/roles/stop_service/tasks/main.yml b/curve-ansible/roles/stop_service/tasks/main.yml index 0b2bbb486e..d3b8cbd018 100644 --- a/curve-ansible/roles/stop_service/tasks/main.yml +++ b/curve-ansible/roles/stop_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: stop_service include_tasks: "include/stop_{{ service_name }}.yml" diff --git a/curve-ansible/rolling_update_curve.yml b/curve-ansible/rolling_update_curve.yml index fddd6832bf..61949f9f8f 100644 --- a/curve-ansible/rolling_update_curve.yml +++ b/curve-ansible/rolling_update_curve.yml @@ -83,7 +83,7 @@ - { role: generate_config, template_name: topo.json, conf_path: "{{ topo_file_path }}", tags: ["generate_config", "generage_topo_json"] } -# 获取leader节点和follower节点 +# Obtain the leader and follower nodes - name: set mds leader and follower list hosts: mds gather_facts: no @@ -95,7 +95,7 @@ roles: - { role: set_leader_and_follower_list, service_name: mds } -# 按顺序先升级follower节点,再升级leader节点 +# Upgrade the follower node first in order, and then upgrade the leader node - name: update follower and leader server in sequence hosts: mds_servers_followers, mds_servers_leader any_errors_fatal: true @@ -110,14 +110,14 @@ - pause: prompt: "Confirm restart mds in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启mds + # Restart mds roles: - { role: restart_service, service_name: mds, expected_curve_version: "{{ mds_package_version }}", command_need_sudo: "{{ mds_need_sudo | bool }}"} ############################## rolling update chunkserver ############################## -# 1. 更新各节点上的配置 +# 1. Update the configuration on each node - name: prepare chunkserver hosts: chunkservers any_errors_fatal: true @@ -136,8 +136,8 @@ - { role: generate_config, template_name: s3.conf, conf_path: "{{ chunkserver_s3_config_path }}", tags: ["generate_config", "generage_cs_s3_conf"] } -# 逐个重启chunkserver,每重启完一个需要等待copyset健康 -# 继续操作下一个的的时候还需要一个命令行交互确认 +# Restart the chunkservers one by one, and wait for the copyset to be healthy after each restart +# When continuing with the next operation, a command line interaction confirmation is also required - name: restart chunkserver and wait healthy hosts: chunkservers any_errors_fatal: true @@ -203,7 +203,7 @@ - pause: prompt: "Confirm restart snapshotclone in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启snapshot clone + # Restart snapshot clone roles: - { role: restart_service, service_name: snapshotclone, expected_curve_version: "{{ snapshot_package_version }}", command_need_sudo: "{{ snapshot_need_sudo | bool }}" } diff --git a/curve-ansible/server.ini b/curve-ansible/server.ini index eaca5a4515..7e06fbe105 100644 --- a/curve-ansible/server.ini +++ b/curve-ansible/server.ini @@ -14,8 +14,8 @@ localhost ansible_ssh_host=127.0.0.1 [zone1] localhost ansible_ssh_host=127.0.0.1 -# 请确保zone内机器数量一致,如果有多个zone,则在上面根据zone1格式增加zone2,zone3...即可。 -# 如果zone下面有多个机器,则换行一起列出来即可。比如: +# Please ensure that the number of machines in the zone is consistent. If there are multiple zones, add zone2, zone3... based on the zone1 format above. +# If there are multiple machines under the zone, they can be listed together in a new line. For example: # [zone1] # localhost ansible_ssh_host=127.0.0.1 # localhost2 ansible_ssh_host=127.0.0.2 @@ -32,7 +32,7 @@ mds_subnet=127.0.0.1/22 defined_healthy_status="cluster is healthy" mds_package_version="0.0.6.1+160be351" tool_package_version="0.0.6.1+160be351" -# 启动命令是否用sudo +# Whether to use sudo for startup command mds_need_sudo=True mds_config_path=/etc/curve/mds.conf mds_log_dir=/data/log/curve/mds @@ -90,7 +90,7 @@ chunkserver_subnet=127.0.0.1/22 global_enable_external_server=True chunkserver_external_subnet=127.0.0.1/22 chunkserver_s3_config_path=/etc/curve/cs_s3.conf -# chunkserver使用的client相关的配置 +# Client related configurations used by chunkserver chunkserver_client_config_path=/etc/curve/cs_client.conf client_register_to_mds=False client_chunkserver_op_max_retry=3 @@ -149,10 +149,10 @@ sudo_or_not=True ansible_become_user=curve ansible_become_flags=-iu curve update_config_with_puppet=False -# 启动服务要用到ansible的异步操作,否则ansible退出后chunkserver也会退出 -# 异步等待结果的总时间 +# Starting the service requires the asynchronous operation of ansible, otherwise the chunkserver will also exit after ansible exits +# Total time waiting for results asynchronously service_async=5 -# 异步查询结果的间隔 +# Interval between asynchronous query results service_poll=1 install_with_deb=False restart_directly=False diff --git a/curvefs/conf/curvebs_client.conf b/curvefs/conf/curvebs_client.conf index e0eb4d70f2..23fc37b087 100644 --- a/curvefs/conf/curvebs_client.conf +++ b/curvefs/conf/curvebs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,123 +36,123 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck=true # diff --git a/curvefs/monitor/grafana-report.py b/curvefs/monitor/grafana-report.py index 016473a509..16f8ce65cd 100644 --- a/curvefs/monitor/grafana-report.py +++ b/curvefs/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curvefs/monitor/grafana/report/report.tex' -imagedir= '/etc/curvefs/monitor/grafana/report/images/' -pdfpath= '/etc/curvefs/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curvefs/monitor/grafana/report/report.tex' +imagedir = '/etc/curvefs/monitor/grafana/report/images/' +pdfpath = '/etc/curvefs/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view the dashboard in Grafana (if displayed incorrectly, please check the attached PDF).

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Sending dashboard daily report email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple recipients - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The filename here can be anything, whatever name is written will be displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add the body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/curvefs/monitor/grafana/provisioning/dashboards/mds.json b/curvefs/monitor/grafana/provisioning/dashboards/mds.json index 09de6b31f7..a90a8f13c0 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/mds.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/mds.json @@ -290,7 +290,7 @@ { "columns": [], "datasource": null, - "description": "mds的配置", + "description": "Configuration of MDS", "fieldConfig": { "defaults": { "custom": { @@ -336,7 +336,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -352,7 +352,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -368,7 +368,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", diff --git a/curvefs/src/mds/metaserverclient/metaserver_client.cpp b/curvefs/src/mds/metaserverclient/metaserver_client.cpp index 739704f62a..f9b1278562 100644 --- a/curvefs/src/mds/metaserverclient/metaserver_client.cpp +++ b/curvefs/src/mds/metaserverclient/metaserver_client.cpp @@ -21,6 +21,7 @@ */ #include "curvefs/src/mds/metaserverclient/metaserver_client.h" + #include #include @@ -28,30 +29,30 @@ namespace curvefs { namespace mds { -using curvefs::metaserver::Time; -using curvefs::metaserver::CreateRootInodeRequest; -using curvefs::metaserver::CreateRootInodeResponse; -using curvefs::metaserver::CreateManageInodeRequest; -using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::mds::topology::BuildPeerIdWithAddr; +using curvefs::mds::topology::SplitPeerId; using curvefs::metaserver::CreateDentryRequest; using curvefs::metaserver::CreateDentryResponse; +using curvefs::metaserver::CreateManageInodeRequest; +using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::metaserver::CreateRootInodeRequest; +using curvefs::metaserver::CreateRootInodeResponse; using curvefs::metaserver::DeleteDentryRequest; using curvefs::metaserver::DeleteDentryResponse; using curvefs::metaserver::DeleteInodeRequest; using curvefs::metaserver::DeleteInodeResponse; +using curvefs::metaserver::Dentry; using curvefs::metaserver::MetaServerService_Stub; using curvefs::metaserver::MetaStatusCode; -using curvefs::metaserver::Dentry; +using curvefs::metaserver::Time; using curvefs::metaserver::copyset::COPYSET_OP_STATUS; using curvefs::metaserver::copyset::CopysetService_Stub; -using curvefs::mds::topology::SplitPeerId; -using curvefs::mds::topology::BuildPeerIdWithAddr; template FSStatusCode MetaserverClient::SendRpc2MetaServer( - Request *request, Response *response, const LeaderCtx &ctx, - void (T::*func)(google::protobuf::RpcController *, const Request *, - Response *, google::protobuf::Closure *)) { + Request* request, Response* response, const LeaderCtx& ctx, + void (T::*func)(google::protobuf::RpcController*, const Request*, Response*, + google::protobuf::Closure*)) { bool refreshLeader = true; uint32_t maxRetry = options_.rpcRetryTimes; @@ -110,14 +111,14 @@ FSStatusCode MetaserverClient::SendRpc2MetaServer( } } -FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, - std::string *leader) { +FSStatusCode MetaserverClient::GetLeader(const LeaderCtx& ctx, + std::string* leader) { GetLeaderRequest2 request; GetLeaderResponse2 response; request.set_poolid(ctx.poolId); request.set_copysetid(ctx.copysetId); - for (const std::string &item : ctx.addrs) { + for (const std::string& item : ctx.addrs) { LOG(INFO) << "GetLeader from " << item; if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; @@ -162,7 +163,7 @@ FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, FSStatusCode MetaserverClient::CreateRootInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, - const std::set &addrs) { + const std::set& addrs) { CreateRootInodeRequest request; CreateRootInodeResponse response; request.set_poolid(poolId); @@ -213,7 +214,7 @@ FSStatusCode MetaserverClient::CreateRootInode( FSStatusCode MetaserverClient::CreateManageInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, ManageInodeType manageType, - const std::set &addrs) { + const std::set& addrs) { CreateManageInodeRequest request; CreateManageInodeResponse response; request.set_poolid(poolId); @@ -259,14 +260,14 @@ FSStatusCode MetaserverClient::CreateManageInode( FSStatusCode MetaserverClient::CreateDentry( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t parentInodeId, const std::string &name, uint64_t inodeId, - const std::set &addrs) { + uint64_t parentInodeId, const std::string& name, uint64_t inodeId, + const std::set& addrs) { CreateDentryRequest request; CreateDentryResponse response; request.set_poolid(poolId); request.set_copysetid(copysetId); request.set_partitionid(partitionId); - Dentry *d = new Dentry; + Dentry* d = new Dentry; d->set_fsid(fsId); d->set_inodeid(inodeId); d->set_parentinodeid(parentInodeId); @@ -276,7 +277,7 @@ FSStatusCode MetaserverClient::CreateDentry( request.set_allocated_dentry(d); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); - Time *tm = new Time(); + Time* tm = new Time(); tm->set_sec(now.tv_sec); tm->set_nsec(now.tv_nsec); request.set_allocated_create(tm); @@ -309,11 +310,10 @@ FSStatusCode MetaserverClient::CreateDentry( } } -FSStatusCode -MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, - uint32_t partitionId, uint32_t fsId, - uint64_t parentInodeId, const std::string &name, - const std::set &addrs) { +FSStatusCode MetaserverClient::DeleteDentry( + uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t fsId, + uint64_t parentInodeId, const std::string& name, + const std::set& addrs) { DeleteDentryRequest request; DeleteDentryResponse response; request.set_poolid(poolId); @@ -342,13 +342,14 @@ MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, return ret; } else { switch (response.statuscode()) { - case MetaStatusCode::OK: - return FSStatusCode::OK; - default: - LOG(ERROR) << "DeleteDentry failed, request = " - << request.ShortDebugString() - << ", response statuscode = " << response.statuscode(); - return FSStatusCode::DELETE_DENTRY_FAIL; + case MetaStatusCode::OK: + return FSStatusCode::OK; + default: + LOG(ERROR) << "DeleteDentry failed, request = " + << request.ShortDebugString() + << ", response statuscode = " + << response.statuscode(); + return FSStatusCode::DELETE_DENTRY_FAIL; } } } @@ -372,7 +373,7 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { request.set_partitionid(0); request.set_fsid(fsId); request.set_inodeid(inodeId); - // TODO(@威姐): 适配新的proto + // TODO(@ Wei Jie): Adapt to the new proto request.set_copysetid(1); request.set_poolid(1); request.set_partitionid(1); @@ -398,10 +399,10 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { FSStatusCode MetaserverClient::CreatePartition( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t idStart, uint64_t idEnd, const std::set &addrs) { + uint64_t idStart, uint64_t idEnd, const std::set& addrs) { curvefs::metaserver::CreatePartitionRequest request; curvefs::metaserver::CreatePartitionResponse response; - PartitionInfo *partition = request.mutable_partition(); + PartitionInfo* partition = request.mutable_partition(); partition->set_fsid(fsId); partition->set_poolid(poolId); partition->set_copysetid(copysetId); @@ -448,7 +449,7 @@ FSStatusCode MetaserverClient::CreatePartition( FSStatusCode MetaserverClient::DeletePartition( uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - const std::set &addrs) { + const std::set& addrs) { curvefs::metaserver::DeletePartitionRequest request; curvefs::metaserver::DeletePartitionResponse response; request.set_poolid(poolId); @@ -489,8 +490,8 @@ FSStatusCode MetaserverClient::DeletePartition( } } -FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, - uint32_t copysetId, const std::set &addrs) { +FSStatusCode MetaserverClient::CreateCopySet( + uint32_t poolId, uint32_t copysetId, const std::set& addrs) { CreateCopysetRequest request; CreateCopysetResponse response; auto copyset = request.add_copysets(); @@ -500,7 +501,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, copyset->add_peers()->set_address(BuildPeerIdWithAddr(item)); } - for (const std::string &item : addrs) { + for (const std::string& item : addrs) { if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; return FSStatusCode::RPC_ERROR; @@ -544,7 +545,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, } FSStatusCode MetaserverClient::CreateCopySetOnOneMetaserver( - uint32_t poolId, uint32_t copysetId, const std::string &addr) { + uint32_t poolId, uint32_t copysetId, const std::string& addr) { CreateCopysetRequest request; CreateCopysetResponse response; diff --git a/curvefs/src/metaserver/copyset/conf_epoch_file.h b/curvefs/src/metaserver/copyset/conf_epoch_file.h index abe14f2f8b..ff3953b080 100644 --- a/curvefs/src/metaserver/copyset/conf_epoch_file.h +++ b/curvefs/src/metaserver/copyset/conf_epoch_file.h @@ -41,28 +41,30 @@ class ConfEpochFile { explicit ConfEpochFile(curve::fs::LocalFileSystem* fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful; - 1 failed */ int Load(const std::string& path, PoolId* poolId, CopysetId* copysetId, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + *file. The format is as follows: The 'head' indicates the length and is in + *binary format. The rest is in text format for easy viewing when necessary. + *'sync' ensures data persistence. | head |Configuration version + *information| | 8 bytes size_t | uint32_t | Variable length text | + *| length | crc32 | logic pool id | copyset id | epoch| + * The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded; - 1 failed */ int Save(const std::string& path, const PoolId poolId, const CopysetId copysetId, const uint64_t epoch); diff --git a/curvefs/src/metaserver/inflight_throttle.h b/curvefs/src/metaserver/inflight_throttle.h index fb670b6161..dfbe50bebf 100644 --- a/curvefs/src/metaserver/inflight_throttle.h +++ b/curvefs/src/metaserver/inflight_throttle.h @@ -30,7 +30,7 @@ namespace curvefs { namespace metaserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: @@ -40,8 +40,8 @@ class InflightThrottle { ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ bool IsOverLoad() { if (maxInflightRequest_ >= @@ -53,23 +53,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight request std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight request const uint64_t maxInflightRequest_; }; diff --git a/curvefs/test/mds/schedule/coordinator_test.cpp b/curvefs/test/mds/schedule/coordinator_test.cpp index e759da89ed..a5dd3736de 100644 --- a/curvefs/test/mds/schedule/coordinator_test.cpp +++ b/curvefs/test/mds/schedule/coordinator_test.cpp @@ -21,22 +21,24 @@ */ #include "curvefs/src/mds/schedule/coordinator.h" + #include + #include "curvefs/src/mds/common/mds_define.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::schedule::ScheduleOption; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::std::chrono::steady_clock; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curvefs::mds::topology::UNINITIALIZE_ID; @@ -51,7 +53,7 @@ class CoordinatorTest : public ::testing::Test { void SetUp() override { topo_ = std::make_shared(idGenerator_, tokenGenerator_, - storage_); + storage_); metric_ = std::make_shared(topo_); topoAdapter_ = std::make_shared(); coordinator_ = std::make_shared(topoAdapter_); @@ -132,7 +134,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -149,20 +151,20 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -174,19 +176,19 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -204,7 +206,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -217,7 +219,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -228,7 +230,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -237,7 +239,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -270,7 +272,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -289,21 +291,21 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -315,19 +317,19 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -345,7 +347,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -358,7 +360,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -369,7 +371,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -378,7 +380,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -386,15 +388,16 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; coordinator_->InitScheduler(scheduleOption, - std::make_shared(topo_)); + std::make_shared(topo_)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator_->MetaserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为metaserver-1 + // 2. There is a leader change on the copyset and the target leader is + // metaserver-1 Operator testOperator( 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2, 1)); @@ -403,7 +406,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 3. copyset上有remove peer操作 + // 3. There is a remove peer operation on the copyset Operator testOperator( 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -412,7 +415,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 4. copyset上有add peer操作, target不是1 + // 4. There is an add peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2)); @@ -421,7 +425,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 5. copyset上有add peer操作, target是1 + // 5. There is an add peer operation on the copyset, with a target of 1 Operator testOperator( 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -430,7 +434,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 6. copyset上有change peer操作,target不是1 + // 6. There is a change peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 2)); @@ -439,7 +444,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 7. copyset上有change peer操作,target是1 + // 7. There is a change peer operation on the copyset, with a target of + // 1 Operator testOperator( 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 1)); @@ -449,7 +455,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } TEST_F(CoordinatorTest, test_SchedulerSwitch) { - ScheduleOption scheduleOption = GetTrueScheduleOption(); + ScheduleOption scheduleOption = GetTrueScheduleOption(); scheduleOption.copysetSchedulerIntervalSec = 0; scheduleOption.leaderSchedulerIntervalSec = 0; scheduleOption.recoverSchedulerIntervalSec = 0; @@ -459,7 +465,7 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { EXPECT_CALL(*topoAdapter_, Getpools()).Times(0); EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()).Times(0); - // 设置flag都为false + // Set flags to false gflags::SetCommandLineOption("enableCopySetScheduler", "false"); gflags::SetCommandLineOption("enableRecoverScheduler", "false"); gflags::SetCommandLineOption("enableLeaderScheduler", "false"); @@ -471,18 +477,18 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { /* - 场景: - metaserver1: offline 有恢复op - metaserver2: offline 没有恢复op,没有candidate,有其他op - metaserver3: offline 有candidate + Scenario: + metaserver1: offline has recovery op + metaserver2: offline has no recovery op, no candidate, and other op + metaserver3: offline has a candidate metaserver4: online metaserver4: online */ - // 获取option + // Get option ScheduleOption scheduleOption = GetFalseScheduleOption(); coordinator_->InitScheduler(scheduleOption, metric_); - // 构造metaserver + // Construct metaserver std::vector metaserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { @@ -497,7 +503,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op + // Construct op Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, steady_clock::now(), std::make_shared(1, 4)); @@ -508,7 +514,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}); @@ -523,7 +529,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, configChangeInfoForCS3); - // 1. 查询所有metaserver + // 1. Query all metaservers { EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()) .WillOnce(Return(metaserverInfos)); @@ -545,7 +551,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定metaserver, 但metaserver不存在 + // 2. Query specified metaserver, but metaserver does not exist { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(7, _)) .WillOnce(Return(false)); @@ -556,7 +562,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { std::vector{7}, &statusMap)); } - // 3. 查询指定metaserver, 不在恢复中 + // 3. Query specified metaserver, not in recovery { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(6, _)) .WillOnce( diff --git a/curvefs/test/mds/schedule/operatorStep_test.cpp b/curvefs/test/mds/schedule/operatorStep_test.cpp index d6378bb927..821d97fac7 100644 --- a/curvefs/test/mds/schedule/operatorStep_test.cpp +++ b/curvefs/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "curvefs/test/mds/schedule/common.h" namespace curvefs { @@ -29,8 +30,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -48,21 +49,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -75,7 +76,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -89,14 +90,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -109,7 +110,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -126,8 +127,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -140,8 +140,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -157,7 +157,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -173,8 +173,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -198,13 +197,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -217,7 +215,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -233,31 +231,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. The change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -268,24 +266,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the oprator in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -295,7 +293,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } } // namespace schedule diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index d48c6a9ee1..32c6e88d18 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -21,26 +21,27 @@ */ #include + #include "curvefs/src/mds/common/mds_define.h" #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/src/mds/schedule/scheduleMetrics.h" #include "curvefs/src/mds/schedule/scheduler.h" #include "curvefs/src/mds/topology/topology_id_generator.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::MockIdGenerator; -using ::curvefs::mds::topology::MockTokenGenerator; using ::curvefs::mds::topology::MockStorage; +using ::curvefs::mds::topology::MockTokenGenerator; +using ::curvefs::mds::topology::MockTopology; +using ::curvefs::mds::topology::TopologyIdGenerator; using ::std::chrono::steady_clock; namespace curvefs { namespace mds { @@ -172,7 +173,7 @@ TEST_F(TestRecoverSheduler, recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -196,7 +197,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); { - // 1. 所有metaserveronline + // 1. All metaserveronline EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id2, _)) @@ -208,7 +209,8 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); @@ -217,12 +219,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; @@ -232,12 +235,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); @@ -254,12 +258,12 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换metaserver + // 5. Unable to select a replacement metaserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetMetaServersInPool(_)) .WillOnce(Return(std::vector{})); @@ -268,7 +272,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 6. 在metaserver上创建copyset失败 + // 6. Failed to create copyset on metaserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); std::vector metaserverList( diff --git a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp index 1041519eb6..0a7036ce15 100644 --- a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp +++ b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp @@ -21,23 +21,25 @@ */ #include "curvefs/src/mds/schedule/scheduleMetrics.h" + #include #include #include + #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/test/mds/mock/mock_topology.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::CopySetKey; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curvefs { namespace mds { @@ -82,7 +84,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ::curvefs::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operator of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -150,7 +152,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -167,11 +169,10 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(_)) - .WillOnce(Return("haha")); + EXPECT_CALL(*topo, GetHostNameAndPortById(_)).WillOnce(Return("haha")); EXPECT_CALL(*topo, GetMetaServer(1, _)) .WillOnce(DoAll(SetArgPointee<1>(GetMetaServer(1)), Return(true))); @@ -245,7 +246,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -263,7 +264,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -329,7 +330,6 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("Normal\",\"opType\":\"TransferLeader\",\"poolId") + std::string("\":\"1\",\"startEpoch\":\"1\"}"); - ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) @@ -338,14 +338,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } @@ -358,7 +359,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -426,14 +427,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } @@ -446,7 +448,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -459,7 +461,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - // 获取metaserver 或者 server失败 + // Failed to obtain metaserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)).WillOnce(Return(false)); diff --git a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp index 04241d0209..a8c91d7617 100644 --- a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,6 +20,8 @@ * @Author: chenwei */ +#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" + #include #include #include @@ -27,17 +29,16 @@ #include #include "curvefs/proto/schedule.pb.h" -#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" #include "curvefs/test/mds/mock/mock_coordinator.h" namespace curvefs { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,7 +46,7 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); ASSERT_EQ( 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { request.add_metaserverid(1); QueryMetaServerRecoverStatusResponse response; - // 1. 查询metaserver恢复状态返回成功 + // 1. Querying metaserver recovery status returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( @@ -91,7 +92,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的metaserverid不合法 + // 2. The metaserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..88c324e9e4 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -18,9 +18,8 @@ #include -#include "curvefs/test/volume/common.h" - #include "absl/memory/memory.h" +#include "curvefs/test/volume/common.h" namespace curvefs { namespace volume { @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) { Extents expected = { Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion, - allocSize)}; + allocSize)}; ASSERT_EQ(expected, exts); @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..a5415b26e3 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -56,15 +56,17 @@ class CBDClient { int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); int Extend(const char* filename, UserInfo_t* info, uint64_t size); - // 同步读写 - int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT + // Synchronous read and write + int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT - // 异步读写 + // Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); - // 获取文件的基本信息 + // Obtain basic information about the file int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/curvefs_python/curve_type.h b/curvefs_python/curve_type.h index d6603e238d..5382401d72 100644 --- a/curvefs_python/curve_type.h +++ b/curvefs_python/curve_type.h @@ -34,65 +34,65 @@ #define CURVEINODE_APPENDFILE 2 #define CURVE_INODE_APPENDECFILE 3 -#define CURVE_ERROR_OK 0 -// 文件或者目录已存在 +#define CURVE_ERROR_OK 0 +// The file or directory already exists #define CURVE_ERROR_EXISTS 1 -// 操作失败 +// Operation failed #define CURVE_ERROR_FAILED 2 -// 禁止IO +// Prohibit IO #define CURVE_ERROR_DISABLEIO 3 -// 认证失败 +// Authentication failed #define CURVE_ERROR_AUTHFAIL 4 -// 正在删除 +// Removing #define CURVE_ERROR_DELETING 5 -// 文件不存在 +// File does not exist #define CURVE_ERROR_NOTEXIST 6 -// 快照中 +// In the snapshot #define CURVE_ERROR_UNDER_SNAPSHOT 7 -// 非快照期间 +// During non snapshot periods #define CURVE_ERROR_NOT_UNDERSNAPSHOT 8 -// 删除错误 +// Delete Error #define CURVE_ERROR_DELETE_ERROR 9 -// segment未分配 +// Segment not allocated #define CURVE_ERROR_NOT_ALLOCATE 10 -// 操作不支持 +// Operation not supported #define CURVE_ERROR_NOT_SUPPORT 11 -// 目录非空 +// Directory is not empty #define CURVE_ERROR_NOT_EMPTY 12 -// 禁止缩容 +// Prohibit shrinkage #define CURVE_ERROR_NO_SHRINK_BIGGER_FILE 13 -// session不存在 +// Session does not exist #define CURVE_ERROR_SESSION_NOTEXISTS 14 -// 文件被占用 +// File occupied #define CURVE_ERROR_FILE_OCCUPIED 15 -// 参数错误 +// Parameter error #define CURVE_ERROR_PARAM_ERROR 16 -// MDS一侧存储错误 +// MDS side storage error #define CURVE_ERROR_INTERNAL_ERROR 17 -// crc检查错误 +// CRC check error #define CURVE_ERROR_CRC_ERROR 18 -// request参数存在问题 +// There is an issue with the request parameter #define CURVE_ERROR_INVALID_REQUEST 19 -// 磁盘存在问题 +// There is a problem with the disk #define CURVE_ERROR_DISK_FAIL 20 -// 空间不足 +// Insufficient space #define CURVE_ERROR_NO_SPACE 21 -// IO未对齐 +// IO misalignment #define CURVE_ERROR_NOT_ALIGNED 22 -// 文件被关闭,fd不可用 +// File closed, fd not available #define CURVE_ERROR_BAD_FD 23 -// 文件长度不支持 +// File length not supported #define CURVE_ERROR_LENGTH_NOT_SUPPORT 24 -// 文件状态 -#define CURVE_FILE_CREATED 0 -#define CURVE_FILE_DELETING 1 -#define CURVE_FILE_CLONING 2 +// File Status +#define CURVE_FILE_CREATED 0 +#define CURVE_FILE_DELETING 1 +#define CURVE_FILE_CLONING 2 #define CURVE_FILE_CLONEMETAINSTALLED 3 -#define CURVE_FILE_CLONED 4 -#define CURVE_FILE_BEINGCLONED 5 +#define CURVE_FILE_CLONED 4 +#define CURVE_FILE_BEINGCLONED 5 -// 未知错误 +// Unknown error #define CURVE_ERROR_UNKNOWN 100 #define CURVE_OP_READ 0 @@ -100,11 +100,10 @@ #define CLUSTERIDMAX 256 - typedef void (*AioCallBack)(struct AioContext* context); typedef struct AioContext { - unsigned long offset; //NOLINT - unsigned long length; //NOLINT + unsigned long offset; // NOLINT + unsigned long length; // NOLINT int ret; int op; AioCallBack cb; @@ -117,32 +116,32 @@ typedef struct UserInfo { } UserInfo_t; typedef struct FileInfo { - uint64_t id; - uint64_t parentid; - int filetype; - uint64_t length; - uint64_t ctime; - char filename[256]; - char owner[256]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; + uint64_t id; + uint64_t parentid; + int filetype; + uint64_t length; + uint64_t ctime; + char filename[256]; + char owner[256]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; } FileInfo_t; typedef struct DirInfos { - char* dirpath; - UserInfo_t* userinfo; - uint64_t dirsize; - FileInfo_t* fileinfo; + char* dirpath; + UserInfo_t* userinfo; + uint64_t dirsize; + FileInfo_t* fileinfo; } DirInfos_t; struct CreateContext { - std::string name; - size_t length; - UserInfo user; - std::string poolset; - uint64_t stripeUnit; - uint64_t stripeCount; + std::string name; + size_t length; + UserInfo user; + std::string poolset; + uint64_t stripeUnit; + uint64_t stripeCount; }; #endif // CURVEFS_PYTHON_CURVE_TYPE_H_ diff --git a/curvefs_python/curvefs_tool.py b/curvefs_python/curvefs_tool.py index f2fb582214..7a0cf73e92 100644 --- a/curvefs_python/curvefs_tool.py +++ b/curvefs_python/curvefs_tool.py @@ -21,61 +21,65 @@ import parser import time -fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] -fileStatus = ["Created", "Deleting", "Cloning", "CloneMetaInstalled", "Cloned", "BeingCloned"] +fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", + "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] +fileStatus = ["Created", "Deleting", "Cloning", + "CloneMetaInstalled", "Cloned", "BeingCloned"] kGB = 1024 * 1024 * 1024 kUnitializedFileID = 0 -# 参照curve/include/client/libcurve.h -retCode = { 0 : "OK", - 1 : "EXISTS", - 2 : "FAILED", - 3 : "DISABLEIO", - 4 : "AUTHFAIL", - 5 : "DELETING", - 6 : "NOTEXIST", - 7 : "UNDER_SNAPSHOT", - 8 : "NOT_UNDERSNAPSHOT", - 9 : "DELETE_ERROR", - 10 : "NOT_ALLOCATE", - 11 : "NOT_SUPPORT", - 12 : "NOT_EMPTY", - 13 : "NO_SHRINK_BIGGER_FILE", - 14 : "SESSION_NOTEXISTS", - 15 : "FILE_OCCUPIED", - 16 : "PARAM_ERROR", - 17 : "INTERNAL_ERROR", - 18 : "CRC_ERROR", - 19 : "INVALID_REQUEST", - 20 : "DISK_FAIL", - 21 : "NO_SPACE", - 22 : "NOT_ALIGNED", - 23 : "BAD_FD", - 24 : "LENGTH_NOT_SUPPORT", - 25 : "SESSION_NOT_EXIST", - 26 : "STATUS_NOT_MATCH", - 27 : "DELETE_BEING_CLONED", - 28 : "CLIENT_NOT_SUPPORT_SNAPSHOT", - 29 : "SNAPSTHO_FROZEN", - 100 : "UNKNOWN"} +# Refer to curve/include/client/libcurve.h +retCode = {0: "OK", + 1: "EXISTS", + 2: "FAILED", + 3: "DISABLEIO", + 4: "AUTHFAIL", + 5: "DELETING", + 6: "NOTEXIST", + 7: "UNDER_SNAPSHOT", + 8: "NOT_UNDERSNAPSHOT", + 9: "DELETE_ERROR", + 10: "NOT_ALLOCATE", + 11: "NOT_SUPPORT", + 12: "NOT_EMPTY", + 13: "NO_SHRINK_BIGGER_FILE", + 14: "SESSION_NOTEXISTS", + 15: "FILE_OCCUPIED", + 16: "PARAM_ERROR", + 17: "INTERNAL_ERROR", + 18: "CRC_ERROR", + 19: "INVALID_REQUEST", + 20: "DISK_FAIL", + 21: "NO_SPACE", + 22: "NOT_ALIGNED", + 23: "BAD_FD", + 24: "LENGTH_NOT_SUPPORT", + 25: "SESSION_NOT_EXIST", + 26: "STATUS_NOT_MATCH", + 27: "DELETE_BEING_CLONED", + 28: "CLIENT_NOT_SUPPORT_SNAPSHOT", + 29: "SNAPSTHO_FROZEN", + 100: "UNKNOWN"} + def getRetCodeMsg(ret): - if retCode.has_key(-ret) : + if retCode.has_key(-ret): return retCode[-ret] return "Unknown Error Code" + if __name__ == '__main__': - # 参数解析 + # Parameter parsing args = parser.get_parser().parse_args() - # 初始化client + # Initialize client cbd = curvefs.CBDClient() ret = cbd.Init(args.confpath) if ret != 0: print "init fail" exit(1) - # 获取文件user信息 + # Obtain file user information user = curvefs.UserInfo_t() user.owner = args.user if args.password: @@ -85,7 +89,8 @@ def getRetCodeMsg(ret): if args.optype == "create": if args.stripeUnit or args.stripeCount: - ret = cbd.Create2(args.filename, user, args.length * kGB, args.stripeUnit, args.stripeCount) + ret = cbd.Create2(args.filename, user, args.length * + kGB, args.stripeUnit, args.stripeCount) else: ret = cbd.Create(args.filename, user, args.length * kGB) elif args.optype == "delete": @@ -116,7 +121,7 @@ def getRetCodeMsg(ret): ret = cbd.Mkdir(args.dirname, user) elif args.optype == "rmdir": ret = cbd.Rmdir(args.dirname, user) - elif args.optype == "list" : + elif args.optype == "list": dir = cbd.Listdir(args.dirname, user) for i in dir: print i diff --git a/curvefs_python/libcurvefs.h b/curvefs_python/libcurvefs.h index 55c6bf55fe..069c4542f4 100644 --- a/curvefs_python/libcurvefs.h +++ b/curvefs_python/libcurvefs.h @@ -19,13 +19,14 @@ * File Created: Tuesday, 25th September 2018 2:07:05 pm * Author: */ -#ifndef CURVE_LIBCURVE_INTERFACE_H //NOLINT +#ifndef CURVE_LIBCURVE_INTERFACE_H // NOLINT #define CURVE_LIBCURVE_INTERFACE_H -#include #include -#include +#include + #include +#include #include "curvefs_python/curve_type.h" @@ -38,15 +39,17 @@ int Open4Qemu(const char* filename); int Open(const char* filename, UserInfo_t* info); int Create(const char* filename, UserInfo_t* info, size_t size); -// 同步读写 -int Read(int fd, char* buf, unsigned long offset, unsigned long length); //NOLINT -int Write(int fd, const char* buf, unsigned long offset, unsigned long length); //NOLINT +// Synchronous read and write +int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT +int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT -// 异步读写 +// Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); -// 获取文件的基本信息 +// Obtain basic information about the file int StatFile4Qemu(const char* filename, FileInfo_t* finfo); int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); @@ -59,7 +62,7 @@ int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); int DeleteForce(const char* filename, UserInfo_t* info); DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); void CloseDir(DirInfos_t* dirinfo); -int Listdir(DirInfos_t *dirinfo); +int Listdir(DirInfos_t* dirinfo); int Mkdir(const char* dirpath, UserInfo_t* info); int Rmdir(const char* dirpath, UserInfo_t* info); diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..eb77fd7f9e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -19,11 +19,12 @@ import os + def exec_cmd(cmd): ret = os.system(cmd) if ret == 0: print cmd + " exec success" - else : + else: print cmd + " exec fail, ret = " + str(ret) @@ -37,10 +38,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/curvesnapshot_python/libcurveSnapshot.cpp b/curvesnapshot_python/libcurveSnapshot.cpp index 5cdce45219..97588ba58c 100644 --- a/curvesnapshot_python/libcurveSnapshot.cpp +++ b/curvesnapshot_python/libcurveSnapshot.cpp @@ -20,60 +20,57 @@ * Author: tongguangxun */ +#include "curvesnapshot_python/libcurveSnapshot.h" + #include -#include #include +#include -#include "curvesnapshot_python/libcurveSnapshot.h" -#include "src/client/libcurve_snapshot.h" -#include "src/client/client_config.h" #include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/concurrent/concurrent.h" -using curve::client::UserInfo; using curve::client::ClientConfig; -using curve::client::SnapshotClient; -using curve::client::SnapCloneClosure; -using curve::client::FileServiceOption; using curve::client::ClientConfigOption; -using curve::common::Mutex; +using curve::client::FileServiceOption; +using curve::client::SnapCloneClosure; +using curve::client::SnapshotClient; +using curve::client::UserInfo; using curve::common::ConditionVariable; +using curve::common::Mutex; class TaskTracker { public: - TaskTracker() - : concurrent_(0), - lastErr_(0) {} + TaskTracker() : concurrent_(0), lastErr_(0) {} /** - * @brief 增加一个追踪任务 + * @brief Add a tracking task */ - void AddOneTrace() { - concurrent_.fetch_add(1, std::memory_order_acq_rel); - } + void AddOneTrace() { concurrent_.fetch_add(1, std::memory_order_acq_rel); } /** - * @brief 获取任务数量 + * @brief Get the number of tasks * - * @return 任务数量 + * @return Number of tasks */ - uint32_t GetTaskNum() const { - return concurrent_; - } + uint32_t GetTaskNum() const { return concurrent_; } /** - * @brief 处理任务返回值 + * @brief processing task return value * - * @param retCode 返回值 + * @param retCode return value */ void HandleResponse(int retCode) { if (retCode < 0) { lastErr_ = retCode; } if (1 == concurrent_.fetch_sub(1, std::memory_order_acq_rel)) { - // 最后一次需拿锁再发信号,防止先发信号后等待导致死锁 + // The last time you need to take the lock and send the signal + // again, to prevent deadlock caused by waiting after sending the + // signal first std::unique_lock lk(cv_m); cv_.notify_all(); } else { @@ -82,30 +79,29 @@ class TaskTracker { } /** - * @brief 等待追踪的所有任务完成 + * @brief Waiting for all tracked tasks to be completed */ void Wait() { std::unique_lock lk(cv_m); - cv_.wait(lk, [this](){ - return concurrent_.load(std::memory_order_acquire) == 0;}); + cv_.wait(lk, [this]() { + return concurrent_.load(std::memory_order_acquire) == 0; + }); } /** - * @brief 获取最后一个错误 + * @brief Get Last Error * - * @return 错误码 + * @return error code */ - int GetResult() { - return lastErr_; - } + int GetResult() { return lastErr_; } private: - // 等待的条件变量 + // Waiting condition variable ConditionVariable cv_; Mutex cv_m; - // 并发数量 + // Concurrent quantity std::atomic concurrent_; - // 错误码 + // Error code int lastErr_; }; @@ -162,32 +158,26 @@ void LocalInfo2ChunkIDInfo(const CChunkIDInfo& localinfo, idinfo->lpid_ = localinfo.lpid_.value; } -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } int ret = globalSnapshotclient->CreateSnapShot( - filename, - UserInfo(userinfo.owner, userinfo.password), - &seq->value); - LOG(INFO) << "create snapshot ret = " << ret - << ", seq = " << seq->value; + filename, UserInfo(userinfo.owner, userinfo.password), &seq->value); + LOG(INFO) << "create snapshot ret = " << ret << ", seq = " << seq->value; return ret; } -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } - return globalSnapshotclient->DeleteSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value); + return globalSnapshotclient->DeleteSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value); } int GetSnapShot(const char* filename, const CUserInfo_t userinfo, @@ -198,10 +188,9 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, } curve::client::FInfo_t fileinfo; - int ret = globalSnapshotclient->GetSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fileinfo); + int ret = globalSnapshotclient->GetSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + &fileinfo); if (ret == LIBCURVE_ERROR::OK) { snapinfo->id.value = fileinfo.id; snapinfo->parentid.value = fileinfo.parentid; @@ -224,22 +213,18 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, return ret; } -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo) { +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetSnapshotSegmentInfo(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - offset.value, - &seg); + int ret = globalSnapshotclient->GetSnapshotSegmentInfo( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + offset.value, &seg); if (ret == LIBCURVE_ERROR::OK) { segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; @@ -259,12 +244,10 @@ int GetSnapshotSegmentInfo(const char* filename, return ret; } -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo) { +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -274,14 +257,12 @@ int GetOrAllocateSegmentInfo(const char* filename, fileinfo.segmentsize = segmentsize.value; fileinfo.chunksize = chunksize.value; fileinfo.fullPathName = std::string(filename); - fileinfo.filename = std::string(filename); + fileinfo.filename = std::string(filename); fileinfo.userinfo = UserInfo(userinfo.owner, userinfo.password); curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetOrAllocateSegmentInfo(false, - offset.value, - &fileinfo, - &seg); + int ret = globalSnapshotclient->GetOrAllocateSegmentInfo( + false, offset.value, &fileinfo, &seg); segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; segInfo->startoffset.value = seg.startoffset; @@ -300,11 +281,8 @@ int GetOrAllocateSegmentInfo(const char* filename, return ret; } -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf) { +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -313,12 +291,11 @@ int ReadChunkSnapshot(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->ReadChunkSnapshot(idinfo, seq.value, - offset.value, len.value, - buf, cb); + int ret = globalSnapshotclient->ReadChunkSnapshot( + idinfo, seq.value, offset.value, len.value, buf, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -340,13 +317,12 @@ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); - int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSeq.value); + int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn( + idinfo, correctedSeq.value); return ret; } - -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -364,31 +340,23 @@ int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { return ret; } - -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus) { +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::FileStatus fs; - int ret = globalSnapshotclient->CheckSnapShotStatus(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fs); + int ret = globalSnapshotclient->CheckSnapShotStatus( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, &fs); filestatus->value = static_cast(fs); return ret; } - -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize) { +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -397,13 +365,11 @@ int CreateCloneChunk(const char* location, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->CreateCloneChunk(location, idinfo, - sn.value, correntSn.value, - chunkSize.value, - cb); + int ret = globalSnapshotclient->CreateCloneChunk( + location, idinfo, sn.value, correntSn.value, chunkSize.value, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -412,10 +378,8 @@ int CreateCloneChunk(const char* location, } } - -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len) { +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -423,13 +387,11 @@ int RecoverChunk(const CChunkIDInfo chunkidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->RecoverChunk(idinfo, - offset.value, - len.value, - cb); + int ret = + globalSnapshotclient->RecoverChunk(idinfo, offset.value, len.value, cb); tracker->Wait(); if (ret < 0) { return ret; diff --git a/curvesnapshot_python/libcurveSnapshot.h b/curvesnapshot_python/libcurveSnapshot.h index bb45a02f57..7db41cf7c3 100644 --- a/curvesnapshot_python/libcurveSnapshot.h +++ b/curvesnapshot_python/libcurveSnapshot.h @@ -24,6 +24,7 @@ #define CURVESNAPSHOT_PYTHON_LIBCURVESNAPSHOT_H_ #include + #include #ifdef __cplusplus @@ -52,42 +53,36 @@ enum CFileType { }; typedef struct FileInfo { - type_uInt64_t id; - type_uInt64_t parentid; - int filetype; - type_uInt64_t length; - type_uInt64_t ctime; + type_uInt64_t id; + type_uInt64_t parentid; + int filetype; + type_uInt64_t length; + type_uInt64_t ctime; } FileInfo_t; -enum CFileStatus { - Created = 0, - Deleting, - Cloning, - CloneMetaInstalled, - Cloned -}; +enum CFileStatus { Created = 0, Deleting, Cloning, CloneMetaInstalled, Cloned }; typedef struct CChunkIDInfo { - type_uInt64_t cid_; - type_uInt32_t cpid_; - type_uInt32_t lpid_; + type_uInt64_t cid_; + type_uInt32_t cpid_; + type_uInt32_t lpid_; } CChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct CChunkInfoDetail { type_uInt64_t snSize; std::vector chunkSn; } CChunkInfoDetail_t; - -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct CLogicalPoolCopysetIDInfo { type_uInt32_t lpid; type_uInt32_t cpidVecSize; std::vector cpidVec; } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct CSegmentInfo { type_uInt32_t segmentsize; type_uInt32_t chunksize; @@ -98,154 +93,153 @@ typedef struct CSegmentInfo { } CSegmentInfo_t; typedef struct CFInfo { - type_uInt64_t id; - type_uInt64_t parentid; - CFileType filetype; - type_uInt32_t chunksize; - type_uInt32_t segmentsize; - type_uInt64_t length; - type_uInt64_t ctime; - type_uInt64_t seqnum; - char owner[256]; - char filename[256]; - CFileStatus filestatus; + type_uInt64_t id; + type_uInt64_t parentid; + CFileType filetype; + type_uInt32_t chunksize; + type_uInt32_t segmentsize; + type_uInt64_t length; + type_uInt64_t ctime; + type_uInt64_t seqnum; + char owner[256]; + char filename[256]; + CFileStatus filestatus; } CFInfo_t; int Init(const char* path); /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of the + * file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq); /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq); /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot of + * the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int GetSnapShot(const char* fname, const CUserInfo_t userinfo, type_uInt64_t seq, CFInfo_t* snapinfo); /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment information of + * the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo); +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo); /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf); +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected */ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, type_uInt64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output + * parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot */ -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo); +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo); /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information */ -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus); +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus); /** - * 获取快照分配信息 - * @param: filename是当前文件名 - * @param: offset是当前的文件偏移 - * @param: segmentsize为segment大小 + * Obtain snapshot allocation information + * @param: filename is the current file name + * @param: offset is the current file offset + * @param: segmentsize is the segment size * @param: chunksize - * @param: userinfo是用户信息 - * @param[out]: segInfo是出参 + * @param: userinfo is the user information + * @param[out]: segInfo is the output parameter */ -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo); +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, CSegmentInfo* segInfo); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format of 'location' is defined as A@B. + * - If the source data is on S3, the 'location' format is uri@s3, where 'uri' + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the 'location' format is + * /filename/chunkindex@cs. * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn + * @param: location The URL of the data source + * @param: chunkidinfo The target chunk + * @param: sn The sequence number of the chunk + * @param: chunkSize The size of the chunk + * @param: correntSn Used for modifying the 'correctedSn' when creating the + * clone chunk * - * @return 错误码 + * @return error code */ -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize); +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length * - * @return 错误码 + * @return error code */ -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len); +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInit(); diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..b6b0010c83 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/include/chunkserver/chunkserver_common.h b/include/chunkserver/chunkserver_common.h index c483dbea82..62aaf9fce7 100644 --- a/include/chunkserver/chunkserver_common.h +++ b/include/chunkserver/chunkserver_common.h @@ -24,9 +24,9 @@ #define INCLUDE_CHUNKSERVER_CHUNKSERVER_COMMON_H_ #include +#include #include #include -#include #include #include @@ -35,16 +35,16 @@ namespace curve { namespace chunkserver { /* for IDs */ -using LogicPoolID = uint32_t; -using CopysetID = uint32_t; -using ChunkID = uint64_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +using LogicPoolID = uint32_t; +using CopysetID = uint32_t; +using ChunkID = uint64_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; using ChunkSizeType = uint32_t; -using PageSizeType = uint32_t; +using PageSizeType = uint32_t; -using GroupNid = uint64_t; +using GroupNid = uint64_t; using ChunkServerID = uint32_t; // braft @@ -60,57 +60,55 @@ using PosixFileSystemAdaptor = braft::PosixFileSystemAdaptor; using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; - -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or +// validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { - uint64_t readCount; - uint64_t writeCount; - uint64_t readBytes; - uint64_t writeBytes; - uint64_t readIops; - uint64_t writeIops; - uint64_t readBps; - uint64_t writeBps; + uint64_t readCount; + uint64_t writeCount; + uint64_t readBytes; + uint64_t writeBytes; + uint64_t readIops; + uint64_t writeIops; + uint64_t readBps; + uint64_t writeBps; }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: - * | group id | - * | 32 | 32 | + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical + * format, as follows: | group id | | 32 | 32 | * | logic pool id | copyset id | */ -inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupNid ToGroupNid(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + *Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string + *format */ -inline GroupId ToGroupId(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupId ToGroupId(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return std::to_string(ToGroupNid(logicPoolId, copysetId)); } -#define ToBraftGroupId ToGroupId +#define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + *Parsing LogicPoolID from Copy Group ID in Numeric Format */ -inline LogicPoolID GetPoolID(const GroupNid &groupId) { - return groupId >> 32; -} +inline LogicPoolID GetPoolID(const GroupNid& groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + *Parsing CopysetID from Copy Group ID in Numeric Format */ -inline CopysetID GetCopysetID(const GroupNid &groupId) { +inline CopysetID GetCopysetID(const GroupNid& groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ -inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +/*Format output string for group ID (logicPoolId, copysetId)*/ +inline std::string ToGroupIdString(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { std::string groupIdString; groupIdString.append("("); groupIdString.append(std::to_string(logicPoolId)); @@ -121,7 +119,7 @@ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, groupIdString.append(")"); return groupIdString; } -#define ToGroupIdStr ToGroupIdString +#define ToGroupIdStr ToGroupIdString // Meta page is header of chunkfile, and is used to store meta data of // chunkfile. diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 58459c8bb2..92fa097295 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -23,17 +23,18 @@ #ifndef INCLUDE_CLIENT_LIBCURVE_H_ #define INCLUDE_CLIENT_LIBCURVE_H_ -#include #include -#include +#include + #include #include +#include #include "libcurve_define.h" // NOLINT #define IO_ALIGNED_BLOCK_SIZE 4096 -#define PATH_MAX_SIZE 4096 -#define NAME_MAX_SIZE 256 +#define PATH_MAX_SIZE 4096 +#define NAME_MAX_SIZE 256 enum FileType { INODE_DIRECTORY = 0, @@ -44,38 +45,38 @@ enum FileType { }; typedef struct FileStatInfo { - uint64_t id; - uint64_t parentid; - FileType filetype; - uint64_t length; - uint64_t ctime; - char filename[NAME_MAX_SIZE]; - char owner[NAME_MAX_SIZE]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; - uint32_t blocksize; + uint64_t id; + uint64_t parentid; + FileType filetype; + uint64_t length; + uint64_t ctime; + char filename[NAME_MAX_SIZE]; + char owner[NAME_MAX_SIZE]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; + uint32_t blocksize; } FileStatInfo_t; -// 存储用户信息 +// Storing User Information typedef struct C_UserInfo { - // 当前执行的owner信息, owner信息需要以'\0'结尾 + // The current owner information needs to end with'\0' char owner[NAME_MAX_SIZE]; - // 当owner="root"的时候,需要提供password作为计算signature的key - // password信息需要以'\0'结尾 + // When owner="root", password needs to be provided as the key for + // calculating the signature password information needs to end with '\0' char password[NAME_MAX_SIZE]; } C_UserInfo_t; typedef struct DirInfo { - // 当前listdir的目录路径 - char* dirpath; - // 当前listdir操作的用户信息 - C_UserInfo_t* userinfo; - // 当前dir大小,也就是文件数量 - uint64_t dirSize; - // 当前dir的内的文件信息内容,是一个数组 - // fileStat是这个数组的头,数组大小为dirSize - FileStatInfo_t* fileStat; + // The directory path of the current listdir + char* dirpath; + // User information for the current listdir operation + C_UserInfo_t* userinfo; + // The current dir size, which is the number of files + uint64_t dirSize; + // The file information content within the current dir is an array + // fileStat is the header of this array, with an array size of dirSize + FileStatInfo_t* fileStat; } DirInfo_t; #ifdef __cplusplus @@ -85,21 +86,20 @@ extern "C" { const char* LibCurveErrorName(LIBCURVE_ERROR err); /** - * 初始化系统 - * @param: path为配置文件路径 - * @return: 成功返回0,否则返回-1. + * Initialize the system + * @param: path is the configuration file path + * @return: Successfully returns 0, otherwise returns -1 */ int Init(const char* path); /** - * 打开文件,qemu打开文件的方式 - * @param: filename文件名, filename中包含用户信息 - * 例如:/1.img_userinfo_ - * @return: 返回文件fd + * Open a file , the way qemu to open a file + * @param: filename File name, which contains user information + * For example:/1.img_userinfo_ + * @return: Return the file fd */ int Open4Qemu(const char* filename); - /** * increase epoch * @param: filename, filename include userinfo @@ -109,41 +109,43 @@ int Open4Qemu(const char* filename); int IncreaseEpoch(const char* filename); /** - * 打开文件,非qemu场景 - * @param: filename文件名 - * @param: userinfo为要打开的文件的用户信息 - * @return: 返回文件fd + * Open file, non qemu scene + * @param: filename File name + * @param: userinfo is the user information of the file to be opened + * @return: Return the file fd */ int Open(const char* filename, const C_UserInfo_t* userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回 0, 失败返回小于0,可能有多种可能,比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size length + * @return: Success returns 0, failure returns less than 0, and there may be + * multiple possibilities, such as internal errors or the file already exists */ -int Create(const char* filename, - const C_UserInfo_t* userinfo, - size_t size); +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回读取长度, 否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: Offset The offset within the file + * @param: length is the length to be read + * @return: Successfully returned the read length, otherwise + * -LIBCURVE_ERROR::FAILED, etc */ int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回 写入长度,否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: Offset The offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the write length, otherwise - + * LIBCURVE_ERROR::FAILED, etc */ int Write(int fd, const char* buf, off_t offset, size_t length); @@ -158,18 +160,20 @@ int Write(int fd, const char* buf, off_t offset, size_t length); int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise - LIBCURVE_ERROR::FAILED */ int AioRead(int fd, CurveAioContext* aioctx); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise -LIBCURVE_ERROR::FAILED */ int AioWrite(int fd, CurveAioContext* aioctx); @@ -182,51 +186,58 @@ int AioWrite(int fd, CurveAioContext* aioctx); int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路径 - * @param: newpath目标路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Rename File + * @param: userinfo is the user information + * @param: oldpath source path + * @param: newpath Target Path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Rename(const C_UserInfo_t* userinfo, const char* oldpath, const char* newpath); // NOLINT +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath); // NOLINT /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize); // NOLINT +int Extend(const char* filename, const C_UserInfo_t* userinfo, + uint64_t newsize); // NOLINT /** - * 扩展文件,Qemu场景在线扩容 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Expanding files, Qemu scene online expansion + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT - +int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Unlink(const char* filename, const C_UserInfo_t* userinfo); /** - * 强制删除文件, unlink删除文件在mds一侧并不是真正的删除, - * 而是放到了垃圾回收站,当使用DeleteForce接口删除的时候是直接删除 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Forced deletion of files, unlink deletion of files on the mds side is not a + * true deletion, Instead, it was placed in the garbage collection bin, and when + * deleted using the DeleteForce interface, it was directly deleted + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); @@ -239,96 +250,107 @@ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED and so on */ int Recover(const char* filename, const C_UserInfo_t* userinfo, - uint64_t fileId); + uint64_t fileId); /** - * 在获取目录内容之前先打开文件夹 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回一个非空的DirInfo_t指针,否则返回一个空指针 + * Open the folder before obtaining directory content + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned a non empty DirInfo_ Pointer t, otherwise + * return a null pointer */ DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 枚举目录内容, 用户OpenDir成功之后才能list - * @param[in][out]: dirinfo为OpenDir返回的指针, 内部会将mds返回的信息放入次结构中 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Enumerate directory contents, only after the user OpenDir is successful can + * they be listed + * @param[in][out]: dirinfo is the pointer returned by OpenDir, which internally + * places the information returned by mds into the substructures + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Listdir(DirInfo_t* dirinfo); /** - * 关闭打开的文件夹 - * @param: dirinfo为opendir返回的dir信息 + * Close Open Folder + * @param: dirinfo is the dir information returned by opendir */ void CloseDir(DirInfo_t* dirinfo); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int StatFile(const char* filename, - const C_UserInfo_t* userinfo, +int StatFile(const char* filename, const C_UserInfo_t* userinfo, FileStatInfo* finfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int StatFile4Qemu(const char* filename, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the + * root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int ChangeOwner(const char* filename, - const char* newOwner, +int ChangeOwner(const char* filename, const char* newOwner, const C_UserInfo_t* userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Close(int fd); void UnInit(); /** - * @brief: 获取集群id, id用UUID标识 - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 否则返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain the cluster ID, which is identified by UUID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Successfully returns 0, otherwise returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); @@ -343,24 +365,23 @@ class FileClient; enum class UserDataType { RawBuffer, // char* - IOBuffer // butil::IOBuf* + IOBuffer // butil::IOBuf* }; -// 存储用户信息 +// Storing User Information typedef struct UserInfo { - // 当前执行的owner信息 + // Current owner information for execution std::string owner; - // 当owner=root的时候,需要提供password作为计算signature的key + // When owner=root, password needs to be provided as the key for calculating + // the signature std::string password; UserInfo() = default; UserInfo(const std::string& own, const std::string& pwd = "") - : owner(own), password(pwd) {} + : owner(own), password(pwd) {} - bool Valid() const { - return !owner.empty(); - } + bool Valid() const { return !owner.empty(); } } UserInfo_t; inline bool operator==(const UserInfo& lhs, const UserInfo& rhs) { @@ -380,14 +401,14 @@ class CurveClient { virtual ~CurveClient(); /** - * 初始化 - * @param configPath 配置文件路径 - * @return 返回错误码 + * Initialize + * @param configPath Configuration file path + * @return returns an error code */ virtual int Init(const std::string& configPath); /** - * 反初始化 + * Deinitialization */ virtual void UnInit(); @@ -400,62 +421,59 @@ class CurveClient { virtual int IncreaseEpoch(const std::string& filename); /** - * 打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Open File + * @param filename File name, format: File name_ User name_ * @param[out] sessionId session Id - * @return 成功返回fd,失败返回-1 + * @return successfully returns fd, failure returns -1 */ - virtual int Open(const std::string& filename, - const OpenFlags& openflags); + virtual int Open(const std::string& filename, const OpenFlags& openflags); /** - * 重新打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Reopen File + * @param filename File name, format: File name_ User name_ * @param sessionId session Id - * @param[out] newSessionId reOpen之后的新sessionId - * @return 成功返回fd,失败返回-1 + * @param[out] newSessionId New sessionId after reOpen + * @return successfully returns fd, failure returns -1 */ - virtual int ReOpen(const std::string& filename, - const OpenFlags& openflags); + virtual int ReOpen(const std::string& filename, const OpenFlags& openflags); /** - * 关闭文件 - * @param fd 文件fd - * @return 返回错误码 + * Close File + * @param fd file fd + * @return returns an error code */ virtual int Close(int fd); /** - * 扩展文件 - * @param filename 文件名,格式为:文件名_用户名_ - * @param newsize 扩展后的大小 - * @return 返回错误码 + * Extension file + * @param filename File name, format: File name_ User name_ + * @param newsize The expanded size + * @return returns an error code */ - virtual int Extend(const std::string& filename, - int64_t newsize); + virtual int Extend(const std::string& filename, int64_t newsize); /** - * 获取文件大小 - * @param fd 文件fd - * @return 返回错误码 + * Get File Size + * @param fd file fd + * @return returns an error code */ virtual int64_t StatFile(int fd, FileStatInfo* fileStat); /** - * 异步读 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous reading + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType); /** - * 异步写 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous writing + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType); @@ -469,8 +487,8 @@ class CurveClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 测试使用,设置fileclient - * @param client 需要设置的fileclient + * Test usage, set fileclient + * @param client The fileclient that needs to be set */ void SetFileClient(FileClient* client); diff --git a/include/etcdclient/etcdclient.h b/include/etcdclient/etcdclient.h index 42f63a7436..b3ce392aba 100644 --- a/include/etcdclient/etcdclient.h +++ b/include/etcdclient/etcdclient.h @@ -18,7 +18,6 @@ /* package command-line-arguments */ - #line 1 "cgo-builtin-export-prolog" #include /* for ptrdiff_t below */ @@ -27,21 +26,22 @@ #define GO_CGO_EXPORT_PROLOGUE_H #ifndef GO_CGO_GOSTRING_TYPEDEF -typedef struct { const char *p; ptrdiff_t n; } _GoString_; +typedef struct { + const char* p; + ptrdiff_t n; +} _GoString_; #endif #endif /* Start of preamble from import "C" comments. */ - #line 19 "etcdclient.go" #include -enum EtcdErrCode -{ - // grpc errCode, 具体的含义见: +enum EtcdErrCode { + // The specific meaning of grpc errCode is as follows: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -62,7 +62,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom error code EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -79,31 +79,26 @@ enum EtcdErrCode EtcdObjectLenNotEnough = 30, }; -enum OpType { - OpPut = 1, - OpDelete = 2 -}; +enum OpType { OpPut = 1, OpDelete = 2 }; struct EtcdConf { - char *Endpoints; + char* Endpoints; int len; int DialTimeout; }; struct Operation { enum OpType opType; - char *key; - char *value; + char* key; + char* value; int keyLen; int valueLen; }; #line 1 "cgo-generated-wrapper" - /* End of preamble from import "C" comments. */ - /* Start of boilerplate cgo prologue. */ #line 1 "cgo-gcc-export-header-prolog" @@ -130,15 +125,23 @@ typedef double _Complex GoComplex128; static assertion to make sure the file is being used on architecture at least with matching size of GoInt. */ -typedef char _check_for_64_bit_pointer_matching_GoInt[sizeof(void*)==64/8 ? 1:-1]; +typedef char + _check_for_64_bit_pointer_matching_GoInt[sizeof(void*) == 64 / 8 ? 1 : -1]; #ifndef GO_CGO_GOSTRING_TYPEDEF typedef _GoString_ GoString; #endif -typedef void *GoMap; -typedef void *GoChan; -typedef struct { void *t; void *v; } GoInterface; -typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; +typedef void* GoMap; +typedef void* GoChan; +typedef struct { + void* t; + void* v; +} GoInterface; +typedef struct { + void* data; + GoInt len; + GoInt cap; +} GoSlice; #endif @@ -148,8 +151,7 @@ typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; extern "C" { #endif - -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required extern GoUint32 NewEtcdClientV3(struct EtcdConf p0); @@ -159,66 +161,77 @@ extern GoUint32 EtcdClientPut(int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientPutRewtihRevision */ struct EtcdClientPutRewtihRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision( + int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientGet */ struct EtcdClientGet_return { - GoUint32 r0; - char* r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + char* r1; + GoInt r2; + GoInt64 r3; }; extern struct EtcdClientGet_return EtcdClientGet(int p0, char* p1, int p2); /* Return type for EtcdClientList */ struct EtcdClientList_return { - GoUint32 r0; - GoUint64 r1; - GoInt64 r2; + GoUint32 r0; + GoUint64 r1; + GoInt64 r2; }; -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit -extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, + int p3, int p4); /* Return type for EtcdClientListWithLimitAndRevision */ struct EtcdClientListWithLimitAndRevision_return { - GoUint32 r0; - GoUint64 r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + GoUint64 r1; + GoInt r2; + GoInt64 r3; }; -extern struct EtcdClientListWithLimitAndRevision_return EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, int p4, GoInt64 p5, GoInt64 p6); +extern struct EtcdClientListWithLimitAndRevision_return +EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, + int p4, GoInt64 p5, GoInt64 p6); extern GoUint32 EtcdClientDelete(int p0, char* p1, int p2); /* Return type for EtcdClientDeleteRewithRevision */ struct EtcdClientDeleteRewithRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientDeleteRewithRevision_return EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); +extern struct EtcdClientDeleteRewithRevision_return +EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); -extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, struct Operation p2); +extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, + struct Operation p2); -extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, struct Operation p3); +extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, + struct Operation p3); -extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, int p4, int p5, int p6); +extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, + int p4, int p5, int p6); /* Return type for EtcdElectionCampaign */ struct EtcdElectionCampaign_return { - GoUint32 r0; - GoUint64 r1; + GoUint32 r0; + GoUint64 r1; }; -extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, char* p2, int p3, GoUint32 p4, GoUint32 p5); +extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, + char* p2, int p3, + GoUint32 p4, + GoUint32 p5); extern GoUint32 EtcdLeaderObserve(GoUint64 p0, char* p1, int p2); @@ -226,23 +239,25 @@ extern GoUint32 EtcdLeaderResign(GoUint64 p0, GoUint64 p1); /* Return type for EtcdClientGetSingleObject */ struct EtcdClientGetSingleObject_return { - GoUint32 r0; - char* r1; - GoInt r2; + GoUint32 r0; + char* r1; + GoInt r2; }; -extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject(GoUint64 p0); +extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject( + GoUint64 p0); /* Return type for EtcdClientGetMultiObject */ struct EtcdClientGetMultiObject_return { - GoUint32 r0; - char* r1; - GoInt r2; - char* r3; - GoInt r4; + GoUint32 r0; + char* r1; + GoInt r2; + char* r3; + GoInt r4; }; -extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject(GoUint64 p0, GoInt p1); +extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject( + GoUint64 p0, GoInt p1); extern void EtcdClientRemoveObject(GoUint64 p0); diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf index 71ca380f13..8bc37cb542 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/var/lib/nebd/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath=/var/lib/nebd/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/var/log/nebd/client diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf index b03e7a25c6..4dcb28c7e6 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf @@ -1,14 +1,14 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf -#brpc server监听端口 +# brpc server listening port listen.address=/var/lib/nebd/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/var/lib/nebd/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 \ No newline at end of file diff --git a/monitor/grafana-report.py b/monitor/grafana-report.py index a400263e8c..0170470996 100644 --- a/monitor/grafana-report.py +++ b/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curve/monitor/grafana/report/report.tex' -imagedir= '/etc/curve/monitor/grafana/report/images/' -pdfpath= '/etc/curve/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curve/monitor/grafana/report/report.tex' +imagedir = '/etc/curve/monitor/grafana/report/images/' +pdfpath = '/etc/curve/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view it in the Grafana dashboard (if the display is chaotic, please refer to the attached PDF)

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Send dashboard daily email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple people - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The file name here can be written arbitrarily, including the name you want to write and the name displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add Body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/monitor/grafana/dashboards/chunkserver.json b/monitor/grafana/dashboards/chunkserver.json index 2770cd2802..e48e7a0721 100644 --- a/monitor/grafana/dashboards/chunkserver.json +++ b/monitor/grafana/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process running time", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successful requests processed per second for all RPCs on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the read_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the write_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "Percentile values of RPC-level read chunk latency", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read-write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "Number of errors per second for read_chunk at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "Number of read_chunk operations successfully processed per second at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "Number of read_chunk requests received per second at the chunk service layer.", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/dashboards/client.json b/monitor/grafana/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/dashboards/client.json +++ b/monitor/grafana/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/dashboards/etcd.json b/monitor/grafana/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/dashboards/etcd.json +++ b/monitor/grafana/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/mds.json b/monitor/grafana/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/dashboards/mds.json +++ b/monitor/grafana/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/report.json b/monitor/grafana/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/dashboards/report.json +++ b/monitor/grafana/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/dashboards/snapshotcloneserver.json b/monitor/grafana/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/chunkserver.json b/monitor/grafana/provisioning/dashboards/chunkserver.json index 2770cd2802..89ce686aa7 100644 --- a/monitor/grafana/provisioning/dashboards/chunkserver.json +++ b/monitor/grafana/provisioning/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successfully processed requests per second for all RPCs on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the rpc level in read_chunk", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "Write_chunk The number of errors per second at the rpc level", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "The quantile value of read chunk delay at the rpc level", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read and write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "The number of read_chunk errors per second at the chunk service level", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "The number of read_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/client.json b/monitor/grafana/provisioning/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/provisioning/dashboards/client.json +++ b/monitor/grafana/provisioning/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/etcd.json b/monitor/grafana/provisioning/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/provisioning/dashboards/etcd.json +++ b/monitor/grafana/provisioning/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/mds.json b/monitor/grafana/provisioning/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/provisioning/dashboards/mds.json +++ b/monitor/grafana/provisioning/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/report.json b/monitor/grafana/provisioning/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/provisioning/dashboards/report.json +++ b/monitor/grafana/provisioning/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/nebd/etc/nebd/nebd-client.conf b/nebd/etc/nebd/nebd-client.conf index 1207e5bbd0..6baa9c2a51 100644 --- a/nebd/etc/nebd/nebd-client.conf +++ b/nebd/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -# 文件锁路径 +# File lock path metacache.fileLockPath=/data/nebd/lock # __CURVEADM_TEMPLATE__ ${prefix}/data/lock __CURVEADM_TEMPLATE__ -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/data/log/nebd/client # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ diff --git a/nebd/etc/nebd/nebd-server.conf b/nebd/etc/nebd/nebd-server.conf index a6d2fbe534..1ef0966cc6 100644 --- a/nebd/etc/nebd/nebd-server.conf +++ b/nebd/etc/nebd/nebd-server.conf @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/client.conf __CURVEADM_TEMPLATE__ -#brpc server监听端口 +# brpc server listening port listen.address=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/data/nebd/nebdserver.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/nebdserver.meta __CURVEADM_TEMPLATE__ -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 # return rpc when io error diff --git a/nebd/nebd-package/usr/bin/nebd-daemon b/nebd/nebd-package/usr/bin/nebd-daemon index fb8242d1dc..3204bc8732 100755 --- a/nebd/nebd-package/usr/bin/nebd-daemon +++ b/nebd/nebd-package/usr/bin/nebd-daemon @@ -138,7 +138,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -179,7 +179,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -267,7 +267,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -283,7 +283,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/nebd/src/common/configuration.cpp b/nebd/src/common/configuration.cpp index 69a23ebe43..3c331c7cee 100644 --- a/nebd/src/common/configuration.cpp +++ b/nebd/src/common/configuration.cpp @@ -22,10 +22,10 @@ #include "nebd/src/common/configuration.h" -#include +#include #include +#include #include -#include namespace nebd { namespace common { @@ -54,8 +54,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -73,38 +75,33 @@ std::string Configuration::DumpConfig() { return ""; } - std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -113,7 +110,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -122,7 +119,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -141,18 +138,17 @@ bool Configuration::GetInt64Value(const std::string& key, int64_t* out) { return false; } -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -161,18 +157,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -181,11 +176,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -195,7 +190,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -215,16 +210,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -233,7 +227,7 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; } diff --git a/nebd/src/common/configuration.h b/nebd/src/common/configuration.h index 95df251e80..642d3be2ad 100644 --- a/nebd/src/common/configuration.h +++ b/nebd/src/common/configuration.h @@ -20,8 +20,8 @@ * Author: hzchenwei7 */ -#include #include +#include #ifndef NEBD_SRC_COMMON_CONFIGURATION_H_ #define NEBD_SRC_COMMON_CONFIGURATION_H_ @@ -39,79 +39,80 @@ class Configuration { std::string DumpConfig(); std::map ListConfig() const; - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] outThe value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); bool GetInt64Value(const std::string& key, int64_t* out); - void SetIntValue(const std::string &key, const int value); + void SetIntValue(const std::string& key, const int value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); private: - std::string confFile_; - std::map config_; + std::string confFile_; + std::map config_; }; } // namespace common diff --git a/nebd/src/common/crc32.h b/nebd/src/common/crc32.h index 627218fcbd..238b1ce4fc 100644 --- a/nebd/src/common/crc32.h +++ b/nebd/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef NEBD_SRC_COMMON_CRC32_H_ #define NEBD_SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace nebd { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/nebd/src/common/file_lock.h b/nebd/src/common/file_lock.h index 277cfebcf7..dfd644b98b 100644 --- a/nebd/src/common/file_lock.h +++ b/nebd/src/common/file_lock.h @@ -28,31 +28,30 @@ namespace nebd { namespace common { -// 文件锁 +// File lock class FileLock { public: explicit FileLock(const std::string& fileName) - : fileName_(fileName), fd_(-1) {} + : fileName_(fileName), fd_(-1) {} FileLock() : fileName_(""), fd_(-1) {} ~FileLock() = default; /** - * @brief 获取文件锁 - * @return 成功返回0,失败返回-1 + * @brief Get file lock + * @return returns 0 for success, -1 for failure */ int AcquireFileLock(); - /** - * @brief 释放文件锁 + * @brief Release file lock */ void ReleaseFileLock(); private: - // 锁文件的文件名 + // Lock the file name of the file std::string fileName_; - // 锁文件的fd + // Lock file fd int fd_; }; diff --git a/nebd/src/common/name_lock.h b/nebd/src/common/name_lock.h index ae34c182a9..e179c4272d 100644 --- a/nebd/src/common/name_lock.h +++ b/nebd/src/common/name_lock.h @@ -23,12 +23,12 @@ #ifndef NEBD_SRC_COMMON_NAME_LOCK_H_ #define NEBD_SRC_COMMON_NAME_LOCK_H_ +#include +#include +#include // NOLINT #include #include #include -#include -#include -#include // NOLINT #include "nebd/src/common/uncopyable.h" @@ -40,29 +40,28 @@ class NameLock : public Uncopyable { explicit NameLock(int bucketNum = 256); /** - * @brief 对指定string加锁 + * @brief locks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ - void Lock(const std::string &lockStr); + void Lock(const std::string& lockStr); /** - * @brief 尝试指定sting加锁 + * @brief Attempt to specify sting lock * - * @param lockStr 被加锁的string + * @param lockStr locked string * - * @retval 成功 - * @retval 失败 + * @retval succeeded + * @retval failed */ - bool TryLock(const std::string &lockStr); + bool TryLock(const std::string& lockStr); /** - * @brief 对指定string解锁 + * @brief unlocks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ - void Unlock(const std::string &lockStr); - + void Unlock(const std::string& lockStr); private: struct LockEntry { @@ -77,7 +76,7 @@ class NameLock : public Uncopyable { }; using LockBucketPtr = std::shared_ptr; - int GetBucketOffset(const std::string &lockStr); + int GetBucketOffset(const std::string& lockStr); private: std::vector locks_; @@ -85,24 +84,21 @@ class NameLock : public Uncopyable { class NameLockGuard : public Uncopyable { public: - NameLockGuard(NameLock &lock, const std::string &lockStr) : //NOLINT - lock_(lock), - lockStr_(lockStr) { + NameLockGuard(NameLock& lock, const std::string& lockStr) + : // NOLINT + lock_(lock), + lockStr_(lockStr) { lock_.Lock(lockStr_); } - ~NameLockGuard() { - lock_.Unlock(lockStr_); - } + ~NameLockGuard() { lock_.Unlock(lockStr_); } private: - NameLock &lock_; + NameLock& lock_; std::string lockStr_; }; - -} // namespace common -} // namespace nebd - +} // namespace common +} // namespace nebd #endif // NEBD_SRC_COMMON_NAME_LOCK_H_ diff --git a/nebd/src/common/stringstatus.h b/nebd/src/common/stringstatus.h index fc4c9a6364..db47e08933 100644 --- a/nebd/src/common/stringstatus.h +++ b/nebd/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ -#define NEBD_SRC_COMMON_STRINGSTATUS_H_ +#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ +#define NEBD_SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace nebd { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,26 +49,28 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..9e454f15a7 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -26,9 +26,10 @@ #include #include #include + +#include #include #include -#include namespace nebd { namespace common { @@ -53,7 +54,8 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -64,7 +66,7 @@ class TimeUtility { } }; -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd -#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ +#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..c9ab8e873e 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -22,8 +22,8 @@ #include "nebd/src/part1/async_request_closure.h" -#include #include +#include #include #include @@ -40,11 +40,10 @@ void AsyncRequestClosure::Run() { int64_t sleepUs = GetRpcRetryIntervalUs(aioCtx->retryCount); LOG_EVERY_SECOND(WARNING) << OpTypeToString(aioCtx->op) << " rpc failed" - << ", error = " << cntl.ErrorText() - << ", fd = " << fd + << ", error = " << cntl.ErrorText() << ", fd = " << fd << ", log id = " << cntl.log_id() - << ", retryCount = " << aioCtx->retryCount - << ", sleep " << (sleepUs / 1000) << " ms"; + << ", retryCount = " << aioCtx->retryCount << ", sleep " + << (sleepUs / 1000) << " ms"; bthread_usleep(sleepUs); Retry(); } else { @@ -52,7 +51,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +72,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } @@ -83,10 +82,9 @@ int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { return requestOption_.rpcRetryIntervalUs; } - return std::max( - requestOption_.rpcRetryIntervalUs, - std::min(requestOption_.rpcRetryIntervalUs * retryCount, - requestOption_.rpcRetryMaxIntervalUs)); + return std::max(requestOption_.rpcRetryIntervalUs, + std::min(requestOption_.rpcRetryIntervalUs * retryCount, + requestOption_.rpcRetryMaxIntervalUs)); } void AsyncRequestClosure::Retry() const { diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..0df2f03172 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -32,12 +32,9 @@ namespace nebd { namespace client { struct AsyncRequestClosure : public google::protobuf::Closure { - AsyncRequestClosure(int fd, - NebdClientAioContext* ctx, + AsyncRequestClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : fd(fd), - aioCtx(ctx), - requestOption_(option) {} + : fd(fd), aioCtx(ctx), requestOption_(option) {} void Run() override; @@ -47,94 +44,70 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; }; struct AioWriteClosure : public AsyncRequestClosure { - AioWriteClosure(int fd, - NebdClientAioContext* ctx, + AioWriteClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} WriteResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioReadClosure : public AsyncRequestClosure { - AioReadClosure(int fd, - NebdClientAioContext* ctx, + AioReadClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} ReadResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioDiscardClosure : public AsyncRequestClosure { - AioDiscardClosure(int fd, - NebdClientAioContext* ctx, + AioDiscardClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} DiscardResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioFlushClosure : public AsyncRequestClosure { - AioFlushClosure(int fd, - NebdClientAioContext* ctx, + AioFlushClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} FlushResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; inline const char* OpTypeToString(LIBAIO_OP opType) { switch (opType) { - case LIBAIO_OP::LIBAIO_OP_READ: - return "Read"; - case LIBAIO_OP::LIBAIO_OP_WRITE: - return "Write"; - case LIBAIO_OP::LIBAIO_OP_DISCARD: - return "Discard"; - case LIBAIO_OP::LIBAIO_OP_FLUSH: - return "Flush"; - default: - return "Unknown"; + case LIBAIO_OP::LIBAIO_OP_READ: + return "Read"; + case LIBAIO_OP::LIBAIO_OP_WRITE: + return "Write"; + case LIBAIO_OP::LIBAIO_OP_DISCARD: + return "Discard"; + case LIBAIO_OP::LIBAIO_OP_FLUSH: + return "Flush"; + default: + return "Unknown"; } } diff --git a/nebd/src/part1/heartbeat_manager.h b/nebd/src/part1/heartbeat_manager.h index 13289cb2d0..c9020e84cc 100644 --- a/nebd/src/part1/heartbeat_manager.h +++ b/nebd/src/part1/heartbeat_manager.h @@ -25,52 +25,52 @@ #include -#include // NOLINT #include #include +#include // NOLINT +#include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "nebd/src/common/interrupt_sleep.h" namespace nebd { namespace client { -// Heartbeat 管理类 -// 定期向nebd-server发送已打开文件的心跳信息 +// Heartbeat Management Class +// Regularly send heartbeat information of opened files to nebd-server class HeartbeatManager { public: explicit HeartbeatManager(std::shared_ptr metaCache); - ~HeartbeatManager() { - Stop(); - } + ~HeartbeatManager() { Stop(); } /** - * @brief: 启动心跳线程 + * @brief: Start heartbeat thread */ void Run(); /** - * @brief: 停止心跳线程 + * @brief: Stop heartbeat thread */ void Stop(); /** - * @brief 初始化 - * @param heartbeatOption heartbeat 配置项 - * @return 0 初始化成功 / -1 初始化失败 + * @brief initialization + * @param heartbeatOption heartbeat configuration item + * @return 0 initialization successful/-1 initialization failed */ int Init(const HeartbeatOption& option); private: /** - * @brief: 心跳线程执行函数,定期发送心跳消息 + * @brief: Heartbeat thread execution function, sending heartbeat messages + * regularly */ void HeartBetaThreadFunc(); /** - * @brief: 向part2发送心跳消息,包括当前已打开的卷信息 + * @brief: Send a heartbeat message to part2, including information about + * the currently opened volume */ void SendHeartBeat(); @@ -79,7 +79,7 @@ class HeartbeatManager { HeartbeatOption heartbeatOption_; - std::shared_ptr metaCache_; + std::shared_ptr metaCache_; std::thread heartbeatThread_; nebd::common::InterruptibleSleeper sleeper_; diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index ab6093e415..dc254c9286 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -21,12 +21,14 @@ */ #include "nebd/src/part1/libnebd.h" + #include "nebd/src/part1/libnebd_file.h" extern "C" { bool g_inited = false; -// Note: 配置文件路径是否有上层传下来比较合适,评估是否要修改 +// Note: It is more appropriate to pass down the configuration file path from +// the upper level, and evaluate whether it needs to be modified const char* confpath = "/etc/nebd/nebd-client.conf"; int nebd_lib_init() { if (g_inited) { @@ -67,17 +69,13 @@ int nebd_lib_uninit() { return 0; } -int nebd_lib_open(const char* filename) { - return Open4Nebd(filename, nullptr); -} +int nebd_lib_open(const char* filename) { return Open4Nebd(filename, nullptr); } int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* flags) { return Open4Nebd(filename, flags); } -int nebd_lib_close(int fd) { - return Close4Nebd(fd); -} +int nebd_lib_close(int fd) { return Close4Nebd(fd); } int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length) { (void)fd; @@ -114,32 +112,20 @@ int nebd_lib_sync(int fd) { return 0; } -int64_t nebd_lib_filesize(int fd) { - return GetFileSize4Nebd(fd); -} +int64_t nebd_lib_filesize(int fd) { return GetFileSize4Nebd(fd); } -int64_t nebd_lib_blocksize(int fd) { - return GetBlockSize4Nebd(fd); -} +int64_t nebd_lib_blocksize(int fd) { return GetBlockSize4Nebd(fd); } -int nebd_lib_resize(int fd, int64_t size) { - return Extend4Nebd(fd, size); -} +int nebd_lib_resize(int fd, int64_t size) { return Extend4Nebd(fd, size); } int nebd_lib_flush(int fd, NebdClientAioContext* context) { return Flush4Nebd(fd, context); } -int64_t nebd_lib_getinfo(int fd) { - return GetInfo4Nebd(fd); -} +int64_t nebd_lib_getinfo(int fd) { return GetInfo4Nebd(fd); } -int nebd_lib_invalidcache(int fd) { - return InvalidCache4Nebd(fd); -} +int nebd_lib_invalidcache(int fd) { return InvalidCache4Nebd(fd); } -void nebd_lib_init_open_flags(NebdOpenFlags* flags) { - flags->exclusive = 1; -} +void nebd_lib_init_open_flags(NebdOpenFlags* flags) { flags->exclusive = 1; } } // extern "C" diff --git a/nebd/src/part1/libnebd.h b/nebd/src/part1/libnebd.h index 380776d71b..8a39ee3977 100644 --- a/nebd/src/part1/libnebd.h +++ b/nebd/src/part1/libnebd.h @@ -27,19 +27,19 @@ extern "C" { #endif +#include +#include +#include #include #include -#include -#include #include -#include +#include #include -#include -// 文件路径最大的长度,单位字节 -#define NEBD_MAX_FILE_PATH_LEN 1024 +// The maximum length of the file path, in bytes +#define NEBD_MAX_FILE_PATH_LEN 1024 -// nebd异步请求的类型 +// Types of nebd asynchronous requests typedef enum LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -55,139 +55,147 @@ void nebd_lib_init_open_flags(NebdOpenFlags* flags); struct NebdClientAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*LibAioCallBack)(struct NebdClientAioContext* context); struct NebdClientAioContext { - off_t offset; // 请求的offset - size_t length; // 请求的length - int ret; // 记录异步返回的返回值 - LIBAIO_OP op; // 异步请求的类型,详见定义 - LibAioCallBack cb; // 异步请求的回调函数 - void* buf; // 请求的buf - unsigned int retryCount; // 记录异步请求的重试次数 + off_t offset; // Requested offset + size_t length; // Requested length + int ret; // Record the return value returned asynchronously + LIBAIO_OP + op; // The type of asynchronous request, as defined in the definition + LibAioCallBack cb; // Callback function for asynchronous requests + void* buf; // Buf requested + unsigned int + retryCount; // Record the number of retries for asynchronous requests }; // int nebd_lib_fini(void); /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_init(void); int nebd_lib_init_with_conf(const char* confPath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_uninit(void); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int nebd_lib_open(const char* filename); int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* openflags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_close(int fd); /** - * @brief 同步读文件 - * @param fd:文件的fd - * buf:存放读取data的buf - * offset:读取的位置offset - * length:读取的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file reading + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length); /** - * @brief 同步写文件 - * @param fd:文件的fd - * buf:存放写入data的buf - * offset:写入的位置offset - * length:写入的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file writing + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_discard(int fd, struct NebdClientAioContext* context); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pread(int fd, struct NebdClientAioContext* context); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pwrite(int fd, struct NebdClientAioContext* context); /** - * @brief sync文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief sync file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_sync(int fd); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t nebd_lib_filesize(int fd); int64_t nebd_lib_blocksize(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int nebd_lib_resize(int fd, int64_t size); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_flush(int fd, struct NebdClientAioContext* context); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t nebd_lib_getinfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_invalidcache(int fd); diff --git a/nebd/src/part1/libnebd_file.h b/nebd/src/part1/libnebd_file.h index 6361094ab2..33e39a58c2 100644 --- a/nebd/src/part1/libnebd_file.h +++ b/nebd/src/part1/libnebd_file.h @@ -26,83 +26,89 @@ #include "nebd/src/part1/libnebd.h" /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init4Nebd(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit4Nebd(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open4Nebd(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close4Nebd(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend4Nebd(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t GetFileSize4Nebd(int fd); int64_t GetBlockSize4Nebd(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get info of the file + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t GetInfo4Nebd(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache4Nebd(int fd); diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index bd1a2202ea..7f9ec811fd 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -22,30 +22,42 @@ #include "nebd/src/part1/nebd_client.h" -#include -#include -#include #include -#include -#include +#include #include +#include +#include +#include +#include + #include -#include "nebd/src/part1/async_request_closure.h" #include "nebd/src/common/configuration.h" +#include "nebd/src/part1/async_request_closure.h" -#define RETURN_IF_FALSE(val) if (val == false) { return -1; } +#define RETURN_IF_FALSE(val) \ + if (val == false) { \ + return -1; \ + } -// 修改brpc的health_check_interval参数,这个参数用来控制健康检查的周期 -// ## 健康检查 -// 连接断开的server会被暂时隔离而不会被负载均衡算法选中,brpc会定期连接被隔离的server,以检查他们是否恢复正常,间隔由参数-health_check_interval控制: // NOLINT -// | Name | Value | Description | Defined At | // NOLINT -// | ------------------------- | ----- | ---------------------------------------- | ----------------------- | // NOLINT -// | health_check_interval (R) | 3 | seconds between consecutive health-checkings | src/brpc/socket_map.cpp | // NOLINT -// 一旦server被连接上,它会恢复为可用状态。如果在隔离过程中,server从命名服务中删除了,brpc也会停止连接尝试。 // NOLINT +// Modify health_check_interval parameter is used to control the period of +// health checks +// ## Health Check +// The disconnected servers will be temporarily isolated and not selected by the +// load balancing algorithm. brpc will periodically connect to the isolated +// servers to check if they have returned to normal. The interval is determined +// by the parameter-health_check_interval://NOLINT | Name | +// Value | Description | Defined At | +// // NOLINT | ------------------------- | ----- | +// ---------------------------------------- | ----------------------- | // +// NOLINT | health_check_interval (R) | 3 | seconds between consecutive +// health-checkings | src/brpc/socket_map.cpp | // +// NOLINT Once the server is connected, it will return to an available state. If +// the server is removed from the naming service during the isolation process, +// brpc will also stop connection attempts// NOLINT namespace brpc { - DECLARE_int32(health_check_interval); - DECLARE_int32(circuit_breaker_max_isolation_duration_ms); +DECLARE_int32(health_check_interval); +DECLARE_int32(circuit_breaker_max_isolation_duration_ms); } // namespace brpc namespace nebd { @@ -53,7 +65,7 @@ namespace client { using nebd::common::FileLock; -NebdClient &nebdClient = NebdClient::GetInstance(); +NebdClient& nebdClient = NebdClient::GetInstance(); constexpr int32_t kBufSize = 128; @@ -98,8 +110,7 @@ int NebdClient::Init(const char* confpath) { } metaCache_ = std::make_shared(); - heartbeatMgr_ = std::make_shared( - metaCache_); + heartbeatMgr_ = std::make_shared(metaCache_); ret = heartbeatMgr_->Init(heartbeatOption); if (ret != 0) { @@ -139,7 +150,7 @@ void NebdClient::Uninit() { } int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { - // 加文件锁 + // Add file lock std::string fileLockName = option_.fileLockPath + "/" + ReplaceSlash(filename); FileLock fileLock(fileLockName); @@ -150,8 +161,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { return -1; } - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); OpenFileRequest request; @@ -168,8 +178,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "OpenFile rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "OpenFile rpc failed, error = " << cntl->ErrorText() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -177,7 +186,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "OpenFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -199,8 +208,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { } int NebdClient::Close(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); CloseFileRequest request; @@ -219,7 +227,7 @@ int NebdClient::Close(int fd) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "CloseFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); } @@ -240,8 +248,7 @@ int NebdClient::Close(int fd) { } int NebdClient::Extend(int fd, int64_t newsize) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { (void)channel; nebd::client::NebdFileService_Stub stub(&channel_); @@ -255,17 +262,15 @@ int NebdClient::Extend(int fd, int64_t newsize) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "Resize RPC failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "Resize RPC failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "ExtendFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() - << ", fd = " << fd - << ", newsize = " << newsize + << ", retmsg = " << response.retmsg() + << ", fd = " << fd << ", newsize = " << newsize << ", log id = " << cntl->log_id(); return -1; } else { @@ -276,15 +281,13 @@ int NebdClient::Extend(int fd, int64_t newsize) { int64_t ret = ExecuteSyncRpc(task); if (ret < 0) { - LOG(ERROR) << "Extend failed, fd = " << fd - << ", newsize = " << newsize; + LOG(ERROR) << "Extend failed, fd = " << fd << ", newsize = " << newsize; } return ret; } int64_t NebdClient::GetFileSize(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -295,15 +298,14 @@ int64_t NebdClient::GetFileSize(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetFileSize failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetFileSize failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetFileSize failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -366,8 +368,8 @@ int NebdClient::Discard(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioDiscardClosure* done = new(std::nothrow) AioDiscardClosure( - fd, aioctx, option_.requestOption); + AioDiscardClosure* done = new (std::nothrow) + AioDiscardClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Discard(&done->cntl, &request, &done->response, done); @@ -386,8 +388,8 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioReadClosure* done = new(std::nothrow) AioReadClosure( - fd, aioctx, option_.requestOption); + AioReadClosure* done = new (std::nothrow) + AioReadClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Read(&done->cntl, &request, &done->response, done); @@ -398,9 +400,7 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { return 0; } -static void EmptyDeleter(void* m) { - (void)m; -} +static void EmptyDeleter(void* m) { (void)m; } int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { auto task = [this, fd, aioctx]() { @@ -410,8 +410,8 @@ int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioWriteClosure* done = new(std::nothrow) AioWriteClosure( - fd, aioctx, option_.requestOption); + AioWriteClosure* done = new (std::nothrow) + AioWriteClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); @@ -431,8 +431,8 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { nebd::client::FlushRequest request; request.set_fd(fd); - AioFlushClosure* done = new(std::nothrow) AioFlushClosure( - fd, aioctx, option_.requestOption); + AioFlushClosure* done = new (std::nothrow) + AioFlushClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Flush(&done->cntl, &request, &done->response, done); @@ -444,8 +444,7 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { } int64_t NebdClient::GetInfo(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -456,15 +455,14 @@ int64_t NebdClient::GetInfo(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetInfo rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetInfo rpc failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetInfo failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -482,8 +480,7 @@ int64_t NebdClient::GetInfo(int fd) { } int NebdClient::InvalidCache(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::InvalidateCacheRequest request; @@ -502,7 +499,7 @@ int NebdClient::InvalidCache(int fd) { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "InvalidCache failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -526,8 +523,7 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { LOG_IF(ERROR, ret != true) << "Load nebdserver.serverAddress failed"; RETURN_IF_FALSE(ret); - ret = conf->GetStringValue("metacache.fileLockPath", - &option_.fileLockPath); + ret = conf->GetStringValue("metacache.fileLockPath", &option_.fileLockPath); LOG_IF(ERROR, ret != true) << "Load metacache.fileLockPath failed"; RETURN_IF_FALSE(ret); @@ -550,7 +546,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcHostDownRetryIntervalUs", &requestOption.rpcHostDownRetryIntervalUs); - LOG_IF(ERROR, ret != true) << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetInt64Value("request.rpcHealthCheckIntervalS", @@ -560,7 +557,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcMaxDelayHealthCheckIntervalMs", &requestOption.rpcMaxDelayHealthCheckIntervalMs); - LOG_IF(ERROR, ret != true) << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetUInt32Value("request.rpcSendExecQueueNum", @@ -581,8 +579,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { int NebdClient::InitHeartBeatOption(Configuration* conf, HeartbeatOption* heartbeatOption) { - bool ret = conf->GetInt64Value("heartbeat.intervalS", - &heartbeatOption->intervalS); + bool ret = + conf->GetInt64Value("heartbeat.intervalS", &heartbeatOption->intervalS); LOG_IF(ERROR, ret != true) << "Load heartbeat.intervalS failed"; RETURN_IF_FALSE(ret); @@ -604,8 +602,7 @@ int NebdClient::InitChannel() { option_.requestOption.rpcHealthCheckIntervalS; brpc::FLAGS_circuit_breaker_max_isolation_duration_ms = option_.requestOption.rpcMaxDelayHealthCheckIntervalMs; - int ret = channel_.InitWithSockFile( - option_.serverAddress.c_str(), nullptr); + int ret = channel_.InitWithSockFile(option_.serverAddress.c_str(), nullptr); if (ret != 0) { LOG(ERROR) << "Init Channel failed, socket addr = " << option_.serverAddress; @@ -652,7 +649,6 @@ std::string NebdClient::ReplaceSlash(const std::string& str) { return ret; } - void NebdClient::InitLogger(const LogOption& logOption) { static const char* kProcessName = "nebd-client"; @@ -661,8 +657,9 @@ void NebdClient::InitLogger(const LogOption& logOption) { google::InitGoogleLogging(kProcessName); } -int NebdClient::ExecAsyncRpcTask(void* meta, - bthread::TaskIterator& iter) { // NOLINT +int NebdClient::ExecAsyncRpcTask( + void* meta, + bthread::TaskIterator& iter) { // NOLINT (void)meta; if (iter.is_queue_stopped()) { return 0; diff --git a/nebd/src/part1/nebd_client.h b/nebd/src/part1/nebd_client.h index c814f9f711..815c4c7fe7 100644 --- a/nebd/src/part1/nebd_client.h +++ b/nebd/src/part1/nebd_client.h @@ -27,30 +27,28 @@ #include #include -#include #include +#include #include -#include "nebd/src/part1/nebd_common.h" -#include "nebd/src/common/configuration.h" +#include "include/curve_compiler_specific.h" #include "nebd/proto/client.pb.h" -#include "nebd/src/part1/libnebd.h" +#include "nebd/src/common/configuration.h" #include "nebd/src/part1/heartbeat_manager.h" +#include "nebd/src/part1/libnebd.h" +#include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "include/curve_compiler_specific.h" - namespace nebd { namespace client { -using RpcTask = std::function; +using RpcTask = std::function; using nebd::common::Configuration; class NebdClient { public: - static NebdClient &GetInstance() { + static NebdClient& GetInstance() { static NebdClient client; return client; } @@ -58,93 +56,100 @@ class NebdClient { ~NebdClient() = default; /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + *Size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error + * code */ int64_t GetFileSize(int fd); int64_t GetBlockSize(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an + * error code */ int64_t GetInfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache(int fd); @@ -159,17 +164,17 @@ class NebdClient { void InitLogger(const LogOption& logOption); /** - * @brief 替换字符串中的 '/' 为 '+' + * @brief replaces'/'with'+'in the string * - * @param str 需要替换的字符串 - * @return 替换后的字符串 + * @param str The string that needs to be replaced + * @return The replaced string */ std::string ReplaceSlash(const std::string& str); int64_t ExecuteSyncRpc(RpcTask task); - // 心跳管理模块 + // Heartbeat management module std::shared_ptr heartbeatMgr_; - // 缓存模块 + // Cache module std::shared_ptr metaCache_; NebdClientOption option_; @@ -183,7 +188,8 @@ class NebdClient { std::vector> rpcTaskQueues_; - static int ExecAsyncRpcTask(void* meta, bthread::TaskIterator& iter); // NOLINT + static int ExecAsyncRpcTask( + void* meta, bthread::TaskIterator& iter); // NOLINT void PushAsyncTask(const AsyncRpcTask& task) { static thread_local unsigned int seed = time(nullptr); @@ -197,7 +203,7 @@ class NebdClient { } }; -extern NebdClient &nebdClient; +extern NebdClient& nebdClient; } // namespace client } // namespace nebd diff --git a/nebd/src/part1/nebd_common.h b/nebd/src/part1/nebd_common.h index 432f24534f..7c03839178 100644 --- a/nebd/src/part1/nebd_common.h +++ b/nebd/src/part1/nebd_common.h @@ -25,49 +25,49 @@ #include -// rpc request配置项 +// rpc request configuration item struct RequestOption { - // 同步rpc的最大重试次数 + // Maximum number of retries for synchronous rpc int64_t syncRpcMaxRetryTimes; - // rpc请求的重试间隔 + // The retry interval for rpc requests int64_t rpcRetryIntervalUs; - // rpc请求的最大重试间隔 + // Maximum retry interval for rpc requests int64_t rpcRetryMaxIntervalUs; - // rpc hostdown情况下的重试时间 + // The retry time in the case of rpc hostdown int64_t rpcHostDownRetryIntervalUs; - // brpc的健康检查周期时间 + // Health check cycle time for brpc int64_t rpcHealthCheckIntervalS; - // brpc从rpc失败到进行健康检查的最大时间间隔 + // The maximum time interval between RPC failure and health check in BRPC int64_t rpcMaxDelayHealthCheckIntervalMs; - // rpc发送执行队列个数 + // Number of RPC send execution queues uint32_t rpcSendExecQueueNum = 2; }; -// 日志配置项 +// Log Configuration Item struct LogOption { - // 日志存放目录 + // Log storage directory std::string logPath; }; -// nebd client配置项 +// nebd client configuration item struct NebdClientOption { // part2 socket file address std::string serverAddress; - // 文件锁路径 + // File lock path std::string fileLockPath; - // rpc request配置项 + // rpc request configuration item RequestOption requestOption; - // 日志配置项 + // Log Configuration Item LogOption logOption; }; -// heartbeat配置项 +// heartbeat configuration item struct HeartbeatOption { // part2 socket file address std::string serverAddress; - // heartbeat间隔 + // heartbeat interval int64_t intervalS; - // heartbeat rpc超时时间 + // heartbeat RPC timeout int64_t rpcTimeoutMs; }; diff --git a/nebd/src/part1/nebd_metacache.h b/nebd/src/part1/nebd_metacache.h index 3b596bdf62..5435e3af5f 100644 --- a/nebd/src/part1/nebd_metacache.h +++ b/nebd/src/part1/nebd_metacache.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART1_NEBD_METACACHE_H_ #define NEBD_SRC_PART1_NEBD_METACACHE_H_ +#include #include #include -#include #include "nebd/src/common/file_lock.h" #include "nebd/src/common/rw_lock.h" @@ -42,16 +42,13 @@ struct NebdClientFileInfo { NebdClientFileInfo() = default; - NebdClientFileInfo( - int fd, const std::string& fileName, - const FileLock& fileLock) - : fd(fd), - fileName(fileName), - fileLock(fileLock) {} + NebdClientFileInfo(int fd, const std::string& fileName, + const FileLock& fileLock) + : fd(fd), fileName(fileName), fileLock(fileLock) {} }; /** - * @brief: 保存当前已打开文件信息 + * @brief: Save the information of the currently opened file */ class NebdClientMetaCache { public: @@ -59,33 +56,33 @@ class NebdClientMetaCache { ~NebdClientMetaCache() = default; /** - * @brief: 添加文件信息 - * @param: fileInfo 文件信息 + * @brief: Add file information + * @param: fileInfo: file information */ void AddFileInfo(const NebdClientFileInfo& fileInfo); /** - * @brief: 删除文件信息 - * @param: fd 文件描述符 + * @brief: Delete file information + * @param: fd: file descriptor */ void RemoveFileInfo(int fd); /** - * @brief: 获取对应fd的文件信息 - * @param: fd 文件fd + * @brief: Obtain the file information of the corresponding fd + * @param: fd: file fd * @param[out]: fileInfo - * @return: 0 成功 / -1 返回 + * @return: 0 succeeded/-1 returned */ int GetFileInfo(int fd, NebdClientFileInfo* fileInfo) const; /** - * @brief: 获取当前已打开文件信息 - * @return: 当前已打开文件信息 + * @brief: Get information about currently opened files + * @return: Currently opened file information */ std::vector GetAllFileInfo() const; private: - // 当前已打开文件信息 + // Currently opened file information std::unordered_map fileinfos_; mutable nebd::common::RWLock rwLock_; }; diff --git a/nebd/src/part2/define.h b/nebd/src/part2/define.h index 4c2fc54022..8a66854c59 100644 --- a/nebd/src/part2/define.h +++ b/nebd/src/part2/define.h @@ -25,24 +25,25 @@ #include #include -#include -#include + #include +#include +#include #include "nebd/src/common/rw_lock.h" namespace nebd { namespace server { -using nebd::common::RWLock; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; using ::google::protobuf::RpcController; +using nebd::common::RWLock; const char CURVE_PREFIX[] = "cbd"; const char TEST_PREFIX[] = "test"; -// nebd异步请求的类型 +// Types of nebd asynchronous requests enum class LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -70,54 +71,55 @@ using RWLockPtr = std::shared_ptr; struct NebdServerAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*NebdAioCallBack)(struct NebdServerAioContext* context); -// nebd server端异步请求的上下文 -// 记录请求的类型、参数、返回信息、rpc信息 +// Context of Nebd server-side asynchronous requests +// Record the type, parameters, return information, and rpc information of the +// request struct NebdServerAioContext { - // 请求的offset + // Requested offset off_t offset = 0; - // 请求的size + // Requested size size_t size = 0; - // 记录异步返回的返回值 + // Record the return value returned asynchronously int ret = -1; - // 异步请求的类型,详见定义 + // The type of asynchronous request, as defined in the definition LIBAIO_OP op = LIBAIO_OP::LIBAIO_OP_UNKNOWN; - // 异步请求结束时调用的回调函数 + // Callback function called at the end of asynchronous request NebdAioCallBack cb; - // 请求的buf + // Buf requested void* buf = nullptr; - // rpc请求的相应内容 + // The corresponding content of the rpc request Message* response = nullptr; - // rpc请求的回调函数 - Closure *done = nullptr; - // rpc请求的controller + // Callback function for rpc requests + Closure* done = nullptr; + // Controller for rpc requests RpcController* cntl = nullptr; // return rpc when io error bool returnRpcWhenIoError = false; }; struct NebdFileInfo { - // 文件大小 + // File size uint64_t size; - // object/chunk大小 + // object/chunk size uint64_t obj_size; - // object数量 + // Number of objects uint64_t num_objs; // block size uint32_t block_size; }; using ExtendAttribute = std::map; -// nebd server 端文件持久化的元数据信息 +// Metadata information for file persistence on the Nebd server side struct NebdFileMeta { int fd; std::string fileName; ExtendAttribute xattr; }; -// part2配置项 +// part2 Configuration Item const char LISTENADDRESS[] = "listen.address"; const char METAFILEPATH[] = "meta.file.path"; const char HEARTBEATTIMEOUTSEC[] = "heartbeat.timeout.sec"; diff --git a/nebd/src/part2/file_entity.cpp b/nebd/src/part2/file_entity.cpp index 0899472c72..272e761ace 100644 --- a/nebd/src/part2/file_entity.cpp +++ b/nebd/src/part2/file_entity.cpp @@ -57,13 +57,13 @@ std::ostream& operator<<(std::ostream& os, const OpenFlags* flags) { } NebdFileEntity::NebdFileEntity() - : fd_(0) - , fileName_("") - , status_(NebdFileStatus::CLOSED) - , timeStamp_(0) - , fileInstance_(nullptr) - , executor_(nullptr) - , metaFileManager_(nullptr) {} + : fd_(0), + fileName_(""), + status_(NebdFileStatus::CLOSED), + timeStamp_(0), + fileInstance_(nullptr), + executor_(nullptr), + metaFileManager_(nullptr) {} NebdFileEntity::~NebdFileEntity() {} @@ -117,8 +117,7 @@ int NebdFileEntity::Open(const OpenFlags* openflags) { return -1; } LOG(INFO) << "Open file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; if (openflags) { openFlags_.reset(new OpenFlags{*openflags}); @@ -157,26 +156,28 @@ int NebdFileEntity::Reopen(const ExtendAttribute& xattr) { } LOG(INFO) << "Reopen file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return fd_; } int NebdFileEntity::Close(bool removeMeta) { CHECK(executor_ != nullptr) << "file entity is not inited. " << "filename: " << fileName_; - // 用于和其他用户请求互斥,避免文件被close后,请求发到后端导致返回失败 + // This is used to prevent conflicts with other user requests to ensure that + // a file is not closed, and requests sent to the backend after the file has + // been closed result in failures. WriteLockGuard writeLock(rwLock_); - // 这里的互斥锁是为了跟open请求互斥,以下情况可能导致close和open并发 - // part2重启,导致文件被reopen,然后由于超时,文件准备被close - // 此时用户发送了挂载卷请求对文件进行open + // The mutex lock here is to prevent conflicts with open requests. The + // following scenarios may lead to concurrent close and open operations: + // part2 restarts, causing the file to be reopened. Due to a timeout, the + // file is about to be closed. At this point, a user sends a request to + // mount a volume, which involves opening the file. std::unique_lock lock(fileStatusMtx_); if (status_ == NebdFileStatus::OPENED) { int ret = executor_->Close(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Close file failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::CLOSED; @@ -186,15 +187,13 @@ int NebdFileEntity::Close(bool removeMeta) { int ret = metaFileManager_->RemoveFileMeta(fileName_); if (ret != 0) { LOG(ERROR) << "Remove file record failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::DESTROYED; } LOG(INFO) << "Close file success. " - << "fd: " << fd_ - << ", filename: " << fileName_ + << "fd: " << fd_ << ", filename: " << fileName_ << ", meta removed? " << (removeMeta ? "yes" : "no"); return 0; } @@ -204,8 +203,7 @@ int NebdFileEntity::Discard(NebdServerAioContext* aioctx) { int ret = executor_->Discard(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Discard file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -219,8 +217,7 @@ int NebdFileEntity::AioRead(NebdServerAioContext* aioctx) { int ret = executor_->AioRead(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioRead file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -234,8 +231,7 @@ int NebdFileEntity::AioWrite(NebdServerAioContext* aioctx) { int ret = executor_->AioWrite(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioWrite file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -249,8 +245,7 @@ int NebdFileEntity::Flush(NebdServerAioContext* aioctx) { int ret = executor_->Flush(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Flush file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -264,8 +259,7 @@ int NebdFileEntity::Extend(int64_t newsize) { int ret = executor_->Extend(fileInstance_.get(), newsize); if (ret < 0) { LOG(ERROR) << "Extend file failed. " - << "fd: " << fd_ - << ", newsize: " << newsize + << "fd: " << fd_ << ", newsize: " << newsize << ", fileName" << fileName_; return -1; } @@ -279,8 +273,7 @@ int NebdFileEntity::GetInfo(NebdFileInfo* fileInfo) { int ret = executor_->GetInfo(fileInstance_.get(), fileInfo); if (ret < 0) { LOG(ERROR) << "Get file info failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -293,8 +286,7 @@ int NebdFileEntity::InvalidCache() { int ret = executor_->InvalidCache(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Invalid cache failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -318,8 +310,7 @@ int NebdFileEntity::ProcessSyncRequest(ProcessTask task) { int ret = task(); if (ret < 0) { LOG(ERROR) << "Process sync request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -340,18 +331,19 @@ int NebdFileEntity::ProcessAsyncRequest(ProcessTask task, return -1; } - // 对于异步请求,将此closure传给aiocontext,从而在请求返回时释放读锁 + // For asynchronous requests, pass this closure to aiocontext to release the + // read lock when the request returns done->SetClosure(aioctx->done); aioctx->done = doneGuard.release(); int ret = task(); if (ret < 0) { - // 如果请求失败,这里要主动释放锁,并将aiocontext还原回去 + // If the request fails, the lock should be actively released here and + // the aiocontext should be restored back brpc::ClosureGuard doneGuard(done); aioctx->done = done->GetClosure(); done->SetClosure(nullptr); LOG(ERROR) << "Process async request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -381,11 +373,11 @@ int NebdFileEntity::UpdateFileStatus(NebdFileInstancePtr fileInstance) { } bool NebdFileEntity::GuaranteeFileOpened() { - // 文件如果已经被用户close了,就不允许后面请求再自动打开进行操作了 + // If the file has already been closed by the user, subsequent requests for + // automatic opening for operation are not allowed if (status_ == NebdFileStatus::DESTROYED) { LOG(ERROR) << "File has been destroyed. " - << "filename: " << fileName_ - << ", fd: " << fd_; + << "filename: " << fileName_ << ", fd: " << fd_; return false; } @@ -393,8 +385,7 @@ bool NebdFileEntity::GuaranteeFileOpened() { int ret = Open(openFlags_.get()); if (ret != fd_) { LOG(ERROR) << "Get opened file failed. " - << "filename: " << fileName_ - << ", fd: " << fd_ + << "filename: " << fileName_ << ", fd: " << fd_ << ", ret: " << ret; return false; } @@ -404,8 +395,8 @@ bool NebdFileEntity::GuaranteeFileOpened() { std::ostream& operator<<(std::ostream& os, const NebdFileEntity& entity) { std::string standardTime; - TimeUtility::TimeStampToStandard( - entity.GetFileTimeStamp() / 1000, &standardTime); + TimeUtility::TimeStampToStandard(entity.GetFileTimeStamp() / 1000, + &standardTime); os << "[filename: " << entity.GetFileName() << ", fd: " << entity.GetFd() << ", status: " << NebdFileStatus2Str(entity.GetFileStatus()) << ", timestamp: " << standardTime << "]"; diff --git a/nebd/src/part2/file_entity.h b/nebd/src/part2/file_entity.h index fb1e1448d8..c57d90e2ad 100644 --- a/nebd/src/part2/file_entity.h +++ b/nebd/src/part2/file_entity.h @@ -25,42 +25,44 @@ #include #include -#include -#include + #include +#include +#include #include // NOLINT +#include #include -#include +#include "nebd/proto/client.pb.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/timeutility.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" -#include "nebd/src/part2/request_executor.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/request_executor.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::BthreadRWLock; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; using nebd::common::TimeUtility; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; class NebdFileInstance; class NebdRequestExecutor; using NebdFileInstancePtr = std::shared_ptr; -// 处理用户请求时需要加读写锁,避免close时仍有用户IO未处理完成 -// 对于异步IO来说,只有返回时才能释放读锁,所以封装成Closure -// 在发送异步请求前,将closure赋值给NebdServerAioContext +// When processing user requests, it is necessary to add a read write lock to +// avoid user IO still not being processed when closing For asynchronous IO, the +// read lock can only be released on return, so it is encapsulated as a Closure +// Assign the closure value to NebdServerAioContext before sending an +// asynchronous request class NebdRequestReadLockClosure : public Closure { public: explicit NebdRequestReadLockClosure(BthreadRWLock& rwLock) // NOLINT - : rwLock_(rwLock) - , done_(nullptr) { + : rwLock_(rwLock), done_(nullptr) { rwLock_.RDLock(); } ~NebdRequestReadLockClosure() {} @@ -71,13 +73,9 @@ class NebdRequestReadLockClosure : public Closure { rwLock_.Unlock(); } - void SetClosure(Closure* done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } - Closure* GetClosure() { - return done_; - } + Closure* GetClosure() { return done_; } private: BthreadRWLock& rwLock_; @@ -96,134 +94,132 @@ class NebdFileEntity : public std::enable_shared_from_this { virtual ~NebdFileEntity(); /** - * 初始化文件实体 - * @param option: 初始化参数 - * @return 成功返回0, 失败返回-1 + * Initialize File Entity + * @param option: Initialize parameters + * @return returns 0 for success, -1 for failure */ virtual int Init(const NebdFileEntityOption& option); /** - * 打开文件 - * @return 成功返回fd,失败返回-1 + * Open File + * @return successfully returns fd, failure returns -1 */ virtual int Open(const OpenFlags* openflags); /** - * 重新open文件,如果之前的后端存储的连接还存在则复用之前的连接 - * 否则与后端存储建立新的连接 - * @param xattr: 文件reopen需要的信息 - * @return 成功返回fd,失败返回-1 + * Reopen the file and reuse the previous backend storage connection if it + * still exists Otherwise, establish a new connection with the backend + * storage + * @param xattr: Information required for file reopening + * @return successfully returns fd, failure returns -1 */ virtual int Reopen(const ExtendAttribute& xattr); /** - * 关闭文件 - * @param removeMeta: 是否要移除文件元数据记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + *Close File + * @param removeMeta: Do you want to remove the file metadata record? True + *means remove, false means not remove If it is a close request passed from + *part1, this parameter is true If it is a close request initiated by the + *heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(bool removeMeta); /** - * 给文件扩容 - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int64_t newsize); /** - * 获取文件信息 - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(); - virtual std::string GetFileName() const { - return fileName_; - } + virtual std::string GetFileName() const { return fileName_; } - virtual int GetFd() const { - return fd_; - } + virtual int GetFd() const { return fd_; } virtual void UpdateFileTimeStamp(uint64_t timestamp) { timeStamp_.store(timestamp); } - virtual uint64_t GetFileTimeStamp() const { - return timeStamp_.load(); - } + virtual uint64_t GetFileTimeStamp() const { return timeStamp_.load(); } - virtual NebdFileStatus GetFileStatus() const { - return status_.load(); - } + virtual NebdFileStatus GetFileStatus() const { return status_.load(); } private: /** - * 更新文件状态,包括元信息文件和内存状态 - * @param fileInstancea: open或reopen返回的文件上下文信息 - * @return: 成功返回0,失败返回-1 + * Update file status, including meta information files and memory status + * @param fileInstancea: The file context information returned by open or + * reopen + * @return: Success returns 0, failure returns -1 */ int UpdateFileStatus(NebdFileInstancePtr fileInstance); /** - * 请求统一处理函数 - * @param task: 实际请求执行的函数体 - * @return: 成功返回0,失败返回-1 + * Request Unified Processing Function + * @param task: The actual request to execute the function body + * @return: Success returns 0, failure returns -1 */ using ProcessTask = std::function; int ProcessSyncRequest(ProcessTask task); int ProcessAsyncRequest(ProcessTask task, NebdServerAioContext* aioctx); - // 确保文件处于opened状态,如果不是则尝试进行open - // 无法open或者open失败,则返回false, - // 如果文件处于open状态,则返回true + // Ensure that the file is in an open state, and if not, attempt to open it + // Unable to open or failed to open, returns false, + // If the file is in the open state, return true bool GuaranteeFileOpened(); private: - // 文件读写锁,处理请求前加读锁,close文件的时候加写锁 - // 避免close时还有请求未处理完 + // File read/write lock, apply read lock before processing requests, and + // apply write lock when closing files Avoiding pending requests during + // close BthreadRWLock rwLock_; - // 互斥锁,用于open、close之间的互斥 + // Mutex lock, used for mutual exclusion between open and close bthread::Mutex fileStatusMtx_; - // nebd server为该文件分配的唯一标识符 + // The unique identifier assigned by the nebd server to this file int fd_; - // 文件名称 + // File Name std::string fileName_; std::unique_ptr openFlags_; - // 文件当前状态,opened表示文件已打开,closed表示文件已关闭 + // The current state of the file, where 'opened' indicates that the file is + // open and 'closed' indicates that the file is closed std::atomic status_; - // 该文件上一次收到心跳时的时间戳 + // The timestamp of the last time the file received a heartbeat std::atomic timeStamp_; - // 文件在executor open时返回上下文信息,用于后续文件的请求处理 + // When the file is opened by the executor, contextual information is + // returned for subsequent file request processing NebdFileInstancePtr fileInstance_; - // 文件对应的executor的指针 + // Pointer to the executor corresponding to the file NebdRequestExecutor* executor_; - // 元数据持久化管理 + // Metadata Persistence Management MetaFileManagerPtr metaFileManager_; }; using NebdFileEntityPtr = std::shared_ptr; diff --git a/nebd/src/part2/file_manager.cpp b/nebd/src/part2/file_manager.cpp index 5c1dc2a15c..d139829f4f 100644 --- a/nebd/src/part2/file_manager.cpp +++ b/nebd/src/part2/file_manager.cpp @@ -34,8 +34,7 @@ namespace nebd { namespace server { NebdFileManager::NebdFileManager(MetaFileManagerPtr metaFileManager) - : isRunning_(false) - , metaFileManager_(metaFileManager) {} + : isRunning_(false), metaFileManager_(metaFileManager) {} NebdFileManager::~NebdFileManager() {} @@ -62,14 +61,14 @@ int NebdFileManager::Fini() { } int NebdFileManager::Load() { - // 从元数据文件中读取持久化的文件信息 + // Reading persistent file information from metadata files std::vector fileMetas; int ret = metaFileManager_->ListFileMeta(&fileMetas); if (ret < 0) { LOG(ERROR) << "Load file metas failed."; return ret; } - // 根据持久化的信息重新open文件 + // Reopen files based on persistent information int maxFd = 0; for (auto& fileMeta : fileMetas) { NebdFileEntityPtr entity = @@ -174,8 +173,7 @@ int NebdFileManager::InvalidCache(int fd) { return entity->InvalidCache(); } -NebdFileEntityPtr -NebdFileManager::GetFileEntity(int fd) { +NebdFileEntityPtr NebdFileManager::GetFileEntity(int fd) { ReadLockGuard readLock(rwLock_); auto iter = fileMap_.find(fd); if (iter == fileMap_.end()) { @@ -221,7 +219,7 @@ NebdFileEntityPtr NebdFileManager::GenerateFileEntity( } } - // 检测是否存在冲突的文件记录 + // Detect for conflicting file records auto iter = fileMap_.find(fd); if (iter != fileMap_.end()) { LOG(ERROR) << "File entity conflict. " diff --git a/nebd/src/part2/file_manager.h b/nebd/src/part2/file_manager.h index bac54fd1fa..f81a3d72d0 100644 --- a/nebd/src/part2/file_manager.h +++ b/nebd/src/part2/file_manager.h @@ -25,27 +25,28 @@ #include #include + #include +#include // NOLINT #include #include -#include // NOLINT #include -#include "nebd/src/common/rw_lock.h" +#include "nebd/proto/client.pb.h" #include "nebd/src/common/name_lock.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" #include "nebd/src/part2/file_entity.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::NameLock; using nebd::common::NameLockGuard; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; using FileEntityMap = std::unordered_map; @@ -54,119 +55,124 @@ class NebdFileManager { explicit NebdFileManager(MetaFileManagerPtr metaFileManager); virtual ~NebdFileManager(); /** - * 停止FileManager并释放FileManager资源 - * @return 成功返回0,失败返回-1 + * Stop FileManager and release FileManager resources + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 启动FileManager - * @return 成功返回0,失败返回-1 + * Start FileManager + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 打开文件 - * @param filename: 文件的filename - * @return 成功返回fd,失败返回-1 + * Open File + * @param filename: The filename of the file + * @return successfully returns fd, failure returns -1 */ virtual int Open(const std::string& filename, const OpenFlags* flags); /** - * 关闭文件 - * @param fd: 文件的fd - * @param removeRecord: 是否要移除文件记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + * Close File + * @param fd: fd of the file + * @param removeRecord: Do you want to remove the file record? True means + * remove, false means not remove If it is a close request passed from + * part1, this parameter is true If it is a close request initiated by the + * heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(int fd, bool removeRecord); /** - * 给文件扩容 - * @param fd: 文件的fd - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param fd: fd of the file + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int fd, int64_t newsize); /** - * 获取文件信息 - * @param fd: 文件的fd - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fd: fd of the file + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(int fd, NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(int fd, NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @param fd: 文件的fd - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @param fd: fd of the file + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(int fd); - // 根据fd从map中获取指定的entity - // 如果entity已存在,返回entity指针,否则返回nullptr + // Obtain the specified entity from the map based on fd + // If entity already exists, return entity pointer; otherwise, return + // nullptr virtual NebdFileEntityPtr GetFileEntity(int fd); virtual FileEntityMap GetFileEntityMap(); - // 将所有文件状态输出到字符串 + // Output all file states to a string std::string DumpAllFileStatus(); // set public for test - // 启动时从metafile加载文件记录,并reopen文件 + // Load file records from metafile at startup and reopen the file int Load(); private: - // 分配新的可用的fd,fd不允许和已经存在的重复 - // 成功返回的可用fd,失败返回-1 + // Assign new available fds, fds are not allowed to duplicate existing ones + // Successfully returned available fd, failed returned -1 int GenerateValidFd(); - // 根据文件名获取file entity - // 如果entity存在,直接返回entity指针 - // 如果entity不存在,则创建新的entity,并插入map,然后返回 + // Obtain file entity based on file name + // If entity exists, directly return the entity pointer + // If the entity does not exist, create a new entity, insert a map, and then + // return NebdFileEntityPtr GetOrCreateFileEntity(const std::string& fileName); - // 根据fd和文件名生成file entity, - // 如果fd对于的entity已存在,直接返回entity指针 - // 如果entity不存在,则生成新的entity,并插入map,然后返回 + // Generate file entity based on fd and file name, + // If fd already exists for entity, directly return the entity pointer + // If the entity does not exist, generate a new entity, insert a map, and + // then return NebdFileEntityPtr GenerateFileEntity(int fd, const std::string& fileName); - // 删除指定fd对应的entity + // Delete the entity corresponding to the specified fd void RemoveEntity(int fd); private: - // 当前filemanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of the filemanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件名锁,对同名文件加锁 + // File name lock, lock files with the same name NameLock nameLock_; - // fd分配器 + // Fd distributor FdAllocator fdAlloc_; - // nebd server 文件记录管理 + // nebd server file record management MetaFileManagerPtr metaFileManager_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; - // 文件fd和文件实体的映射 + // Mapping of file fd and file entities FileEntityMap fileMap_; }; using NebdFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/heartbeat_manager.cpp b/nebd/src/part2/heartbeat_manager.cpp index 4516874807..739bf586a7 100644 --- a/nebd/src/part2/heartbeat_manager.cpp +++ b/nebd/src/part2/heartbeat_manager.cpp @@ -20,11 +20,12 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/heartbeat_manager.h" + #include +#include #include "nebd/src/common/timeutility.h" -#include "nebd/src/part2/heartbeat_manager.h" namespace nebd { namespace server { @@ -69,7 +70,7 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, const auto& iter = nebdClients_.find(pid); if (iter == nebdClients_.end()) { nebdClients_[pid] = - std::make_shared(pid, version, timestamp); + std::make_shared(pid, version, timestamp); nebdClientNum_ << 1; } else { nebdClients_[pid]->timeStamp = timestamp; @@ -79,8 +80,8 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, } void HeartbeatManager::CheckTimeoutFunc() { - while (sleeper_.wait_for( - std::chrono::milliseconds(checkTimeoutIntervalMs_))) { + while ( + sleeper_.wait_for(std::chrono::milliseconds(checkTimeoutIntervalMs_))) { LOG_EVERY_N(INFO, 60 * 1000 / checkTimeoutIntervalMs_) << "Checking timeout, file status: " << fileManager_->DumpAllFileStatus(); @@ -107,24 +108,24 @@ void HeartbeatManager::CheckTimeoutFunc() { bool HeartbeatManager::CheckNeedClosed(NebdFileEntityPtr entity) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - entity->GetFileTimeStamp(); - // 文件如果是opened状态,并且已经超时,则需要调用close - bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED - && interval > (uint64_t)1000 * heartbeatTimeoutS_; + // If the file is in an open state and has timed out, you need to call close + bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED && + interval > (uint64_t)1000 * heartbeatTimeoutS_; return needClose; } std::ostream& operator<<(std::ostream& os, NebdClientInfo* info) { std::string standardTime; TimeUtility::TimeStampToStandard(info->timeStamp / 1000, &standardTime); - os << "pid: " << info->pid << ", version: " - << info->version.GetValueByKey(kVersion) + os << "pid: " << info->pid + << ", version: " << info->version.GetValueByKey(kVersion) << ", last time received heartbeat: " << standardTime; return os; } void HeartbeatManager::RemoveTimeoutNebdClient() { WriteLockGuard writeLock(rwLock_); - auto iter = nebdClients_.begin(); + auto iter = nebdClients_.begin(); while (iter != nebdClients_.end()) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - iter->second->timeStamp; diff --git a/nebd/src/part2/heartbeat_manager.h b/nebd/src/part2/heartbeat_manager.h index 73943bc4bc..69b4c3eed2 100644 --- a/nebd/src/part2/heartbeat_manager.h +++ b/nebd/src/part2/heartbeat_manager.h @@ -24,32 +24,34 @@ #define NEBD_SRC_PART2_HEARTBEAT_MANAGER_H_ #include -#include // NOLINT + #include -#include #include +#include #include +#include // NOLINT #include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/stringstatus.h" -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/define.h" +#include "nebd/src/part2/file_manager.h" namespace nebd { namespace server { using nebd::common::InterruptibleSleeper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; struct HeartbeatManagerOption { - // 文件心跳超时时间(单位:秒) + // File heartbeat timeout (in seconds) uint32_t heartbeatTimeoutS; - // 心跳超时检测线程的检测间隔(时长:毫秒) + // Heartbeat timeout detection thread detection interval (duration: + // milliseconds) uint32_t checkTimeoutIntervalMs; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager; }; @@ -57,42 +59,44 @@ const char kNebdClientMetricPrefix[] = "nebd_client_pid_"; const char kVersion[] = "version"; struct NebdClientInfo { - NebdClientInfo(int pid2, const std::string& version2, - uint64_t timeStamp2) : - pid(pid2), timeStamp(timeStamp2) { + NebdClientInfo(int pid2, const std::string& version2, uint64_t timeStamp2) + : pid(pid2), timeStamp(timeStamp2) { version.ExposeAs(kNebdClientMetricPrefix, - std::to_string(pid2) + "_version"); + std::to_string(pid2) + "_version"); version.Set(kVersion, version2); version.Update(); } - // nebd client的进程号 + // Process number of nebd client int pid; - // nebd version的metric + // The metric of nebd version nebd::common::StringStatus version; - // 上次心跳的时间戳 + // Time stamp of last heartbeat uint64_t timeStamp; }; -// 负责文件心跳超时管理 +// Responsible for managing file heartbeat timeout class HeartbeatManager { public: explicit HeartbeatManager(HeartbeatManagerOption option) - : isRunning_(false) - , heartbeatTimeoutS_(option.heartbeatTimeoutS) - , checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs) - , fileManager_(option.fileManager) { + : isRunning_(false), + heartbeatTimeoutS_(option.heartbeatTimeoutS), + checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs), + fileManager_(option.fileManager) { nebdClientNum_.expose("nebd_client_num"); } virtual ~HeartbeatManager() {} - // 启动心跳检测线程 + // Start Heartbeat Detection Thread virtual int Run(); - // 停止心跳检测线程 + // Stop Heartbeat Detection Thread virtual int Fini(); - // part2收到心跳后,会通过该接口更新心跳中包含的文件在内存中记录的时间戳 - // 心跳检测线程会根据该时间戳判断是否需要关闭文件 + // After receiving the heartbeat, part2 will update the timestamp of the + // files included in the heartbeat recorded in memory through this interface + // The heartbeat detection thread will determine whether the file needs to + // be closed based on this timestamp virtual bool UpdateFileTimestamp(int fd, uint64_t timestamp); - // part2收到心跳后,会通过该接口更新part1的时间戳 + // After receiving the heartbeat, part2 will update the timestamp of part1 + // through this interface virtual void UpdateNebdClientInfo(int pid, const std::string& version, uint64_t timestamp); std::map> GetNebdClients() { @@ -101,31 +105,32 @@ class HeartbeatManager { } private: - // 心跳检测线程的函数执行体 + // Function execution body of heartbeat detection thread void CheckTimeoutFunc(); - // 判断文件是否需要close + // Determine if the file needs to be closed bool CheckNeedClosed(NebdFileEntityPtr entity); - // 从内存中删除已经超时的nebdClientInfo + // Delete nebdClientInfo that has timed out from memory void RemoveTimeoutNebdClient(); private: - // 当前heartbeatmanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of heartbeatmanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件心跳超时时长 + // File heartbeat timeout duration uint32_t heartbeatTimeoutS_; - // 心跳超时检测线程的检测时间间隔 + // Heartbeat timeout detection thread detection time interval uint32_t checkTimeoutIntervalMs_; - // 心跳检测线程 + // Heartbeat detection thread std::thread checkTimeoutThread_; - // 心跳检测线程的sleeper + // sleeper for Heartbeat Detection Thread InterruptibleSleeper sleeper_; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager_; - // nebd client的信息 + // Information on nebd client std::map> nebdClients_; - // nebdClient的计数器 + // Counters for nebdClient bvar::Adder nebdClientNum_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; }; diff --git a/nebd/src/part2/main.cpp b/nebd/src/part2/main.cpp index e72bb27cbf..0780796ae6 100644 --- a/nebd/src/part2/main.cpp +++ b/nebd/src/part2/main.cpp @@ -20,31 +20,32 @@ * Author: hzwuhongsong */ +#include #include #include -#include + #include "nebd/src/part2/nebd_server.h" #include "src/common/log_util.h" DEFINE_string(confPath, "/etc/nebd/nebd-server.conf", "nebd server conf path"); int main(int argc, char* argv[]) { - // 解析参数 + // Parsing parameters google::ParseCommandLineFlags(&argc, &argv, false); curve::common::DisableLoggingToStdErr(); google::InitGoogleLogging(argv[0]); std::string confPath = FLAGS_confPath.c_str(); - // 启动nebd server + // Start nebd server auto server = std::make_shared<::nebd::server::NebdServer>(); int initRes = server->Init(confPath); if (initRes < 0) { - LOG(ERROR) << "init nebd server fail"; + LOG(ERROR) << "init nebd server fail"; return -1; } server->RunUntilAskedToQuit(); - // 停止nebd server + // Stop nebd server server->Fini(); google::ShutdownGoogleLogging(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 6fcdc5c94b..03c5f1d366 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -20,19 +20,18 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { NebdMetaFileManager::NebdMetaFileManager() - : metaFilePath_("") - , wrapper_(nullptr) - , parser_(nullptr) {} + : metaFilePath_(""), wrapper_(nullptr), parser_(nullptr) {} NebdMetaFileManager::~NebdMetaFileManager() {} @@ -52,9 +51,10 @@ int NebdMetaFileManager::Init(const NebdMetaFileManagerOption& option) { int NebdMetaFileManager::UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta) { WriteLockGuard writeLock(rwLock_); - bool needUpdate = metaCache_.find(fileName) == metaCache_.end() - || fileMeta != metaCache_[fileName]; - // 如果元数据信息没发生变更,则不需要写文件 + bool needUpdate = metaCache_.find(fileName) == metaCache_.end() || + fileMeta != metaCache_[fileName]; + // If the metadata information has not changed, there is no need to write a + // file if (!needUpdate) { return 0; } @@ -105,29 +105,29 @@ int NebdMetaFileManager::UpdateMetaFile(const FileMetaMap& fileMetas) { } int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { - // 写入tmp文件 + // Write tmp file std::string tmpFilePath = metaFilePath_ + ".tmp"; - int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT|O_RDWR, 0644); - // open文件失败 + int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT | O_RDWR, 0644); + // Open file failed if (fd <= 0) { LOG(ERROR) << "Open tmp file " << tmpFilePath << " fail"; return -1; } - // 写入 + // Write std::string jsonString = root.toStyledString(); - int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), - jsonString.size(), 0); + int writeSize = + wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); wrapper_->close(fd); if (writeSize != static_cast(jsonString.size())) { LOG(ERROR) << "Write tmp file " << tmpFilePath << " fail"; return -1; } - // 重命名 + // Rename int res = wrapper_->rename(tmpFilePath.c_str(), metaFilePath_.c_str()); if (res != 0) { - LOG(ERROR) << "rename file " << tmpFilePath << " to " - << metaFilePath_ << " fail"; + LOG(ERROR) << "rename file " << tmpFilePath << " to " << metaFilePath_ + << " fail"; return -1; } return 0; @@ -138,7 +138,8 @@ int NebdMetaFileManager::LoadFileMeta() { FileMetaMap tempMetas; std::ifstream in(metaFilePath_, std::ios::binary); if (!in) { - // 这里不应该返回错误,第一次初始化的时候文件可能还未创建 + // There should be no error returned here, the file may not have been + // created during the first initialization LOG(WARNING) << "File not exist: " << metaFilePath_; return 0; } @@ -149,8 +150,7 @@ int NebdMetaFileManager::LoadFileMeta() { bool ok = Json::parseFromStream(reader, in, &root, &errs); in.close(); if (!ok) { - LOG(ERROR) << "Parse meta file " << metaFilePath_ - << " fail: " << errs; + LOG(ERROR) << "Parse meta file " << metaFilePath_ << " fail: " << errs; return -1; } @@ -173,31 +173,28 @@ int NebdMetaFileManager::ListFileMeta(std::vector* fileMetas) { return 0; } -int NebdMetaFileParser::Parse(Json::Value root, - FileMetaMap* fileMetas) { +int NebdMetaFileParser::Parse(Json::Value root, FileMetaMap* fileMetas) { if (!fileMetas) { LOG(ERROR) << "the argument fileMetas is null pointer"; return -1; } fileMetas->clear(); - // 检验crc + // Check crc if (root[kCRC].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no crc"; + LOG(ERROR) << "Parse json: " << root << " fail, no crc"; return -1; } uint32_t crcValue = root[kCRC].asUInt(); root.removeMember(kCRC); std::string jsonString = root.toStyledString(); - uint32_t crcCalc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crcCalc = + nebd::common::CRC32(jsonString.c_str(), jsonString.size()); if (crcValue != crcCalc) { - LOG(ERROR) << "Parse json: " << root - << " fail, crc not match"; + LOG(ERROR) << "Parse json: " << root << " fail, crc not match"; return -1; } - // 没有volume字段 + // No volume field const auto& volumes = root[kVolumes]; if (volumes.isNull()) { LOG(WARNING) << "No volumes in json: " << root; @@ -208,22 +205,21 @@ int NebdMetaFileParser::Parse(Json::Value root, NebdFileMeta meta; if (volume[kFileName].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no filename"; + LOG(ERROR) << "Parse json: " << root << " fail, no filename"; return -1; } else { meta.fileName = volume[kFileName].asString(); } if (volume[kFd].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no fd"; + LOG(ERROR) << "Parse json: " << root << " fail, no fd"; return -1; } else { meta.fd = volume[kFd].asInt(); } - // 除了filename和fd的部分统一放到xattr里面 + // Except for the parts of filename and fd, they are uniformly placed in + // xattr Json::Value::Members mem = volume.getMemberNames(); ExtendAttribute xattr; for (auto iter = mem.begin(); iter != mem.end(); iter++) { @@ -238,13 +234,13 @@ int NebdMetaFileParser::Parse(Json::Value root, } Json::Value NebdMetaFileParser::ConvertFileMetasToJson( - const FileMetaMap& fileMetas) { + const FileMetaMap& fileMetas) { Json::Value volumes; for (const auto& meta : fileMetas) { Json::Value volume; volume[kFileName] = meta.second.fileName; volume[kFd] = meta.second.fd; - for (const auto &item : meta.second.xattr) { + for (const auto& item : meta.second.xattr) { volume[item.first] = item.second; } volumes.append(volume); @@ -252,7 +248,7 @@ Json::Value NebdMetaFileParser::ConvertFileMetasToJson( Json::Value root; root[kVolumes] = volumes; - // 计算crc + // Calculate crc std::string jsonString = root.toStyledString(); uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); root[kCRC] = crc; diff --git a/nebd/src/part2/metafile_manager.h b/nebd/src/part2/metafile_manager.h index a46255a467..35200fa9bc 100644 --- a/nebd/src/part2/metafile_manager.h +++ b/nebd/src/part2/metafile_manager.h @@ -24,16 +24,17 @@ #define NEBD_SRC_PART2_METAFILE_MANAGER_H_ #include -#include -#include -#include + #include +#include // NOLINT +#include #include // NOLINT -#include // NOLINT +#include +#include -#include "nebd/src/common/rw_lock.h" -#include "nebd/src/common/posix_wrapper.h" #include "nebd/src/common/crc32.h" +#include "nebd/src/common/posix_wrapper.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" #include "nebd/src/part2/util.h" @@ -41,9 +42,9 @@ namespace nebd { namespace server { using nebd::common::PosixWrapper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; using FileMetaMap = std::unordered_map; const char kVolumes[] = "volumes"; @@ -53,17 +54,15 @@ const char kCRC[] = "crc"; class NebdMetaFileParser { public: - int Parse(Json::Value root, - FileMetaMap* fileMetas); + int Parse(Json::Value root, FileMetaMap* fileMetas); Json::Value ConvertFileMetasToJson(const FileMetaMap& fileMetas); }; struct NebdMetaFileManagerOption { std::string metaFilePath = ""; - std::shared_ptr wrapper - = std::make_shared(); - std::shared_ptr parser - = std::make_shared(); + std::shared_ptr wrapper = std::make_shared(); + std::shared_ptr parser = + std::make_shared(); }; class NebdMetaFileManager { @@ -71,37 +70,38 @@ class NebdMetaFileManager { NebdMetaFileManager(); virtual ~NebdMetaFileManager(); - // 初始化,主要从文件读取元数据信息并加载到内存 + // Initialization, mainly reading metadata information from files and + // loading it into memory virtual int Init(const NebdMetaFileManagerOption& option); - // 列出文件记录 + // List file records virtual int ListFileMeta(std::vector* fileMetas); - // 更新文件元数据 + // Update file metadata virtual int UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta); - // 删除文件元数据 + // Delete file metadata virtual int RemoveFileMeta(const std::string& fileName); private: - // 原子写文件 + // Atomic writing file int AtomicWriteFile(const Json::Value& root); - // 更新元数据文件并更新内存缓存 + // Update metadata files and update memory cache int UpdateMetaFile(const FileMetaMap& fileMetas); - // 初始化从持久化文件读取到内存 + // Initialize reading from persistent files to memory int LoadFileMeta(); private: - // 元数据文件路径 + // Meta Data File Path std::string metaFilePath_; - // 文件系统操作封装 + // File system operation encapsulation std::shared_ptr wrapper_; - // 用于解析Json格式的元数据 + // Metadata for parsing Json format std::shared_ptr parser_; - // MetaFileManager 线程安全读写锁 + // MetaFileManager thread safe read write lock RWLock rwLock_; - // meta文件内存缓存 + // Meta file memory cache FileMetaMap metaCache_; }; using MetaFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/nebd_server.cpp b/nebd/src/part2/nebd_server.cpp index 74e5e2329d..89baaad537 100644 --- a/nebd/src/part2/nebd_server.cpp +++ b/nebd/src/part2/nebd_server.cpp @@ -20,19 +20,22 @@ * Author: lixiaocui */ +#include "nebd/src/part2/nebd_server.h" + #include + #include + #include "nebd/src/common/file_lock.h" -#include "nebd/src/part2/nebd_server.h" +#include "nebd/src/common/nebd_version.h" #include "nebd/src/part2/file_service.h" #include "nebd/src/part2/heartbeat_service.h" -#include "nebd/src/common/nebd_version.h" namespace nebd { namespace server { -int NebdServer::Init(const std::string &confPath, - std::shared_ptr curveClient) { +int NebdServer::Init(const std::string& confPath, + std::shared_ptr curveClient) { if (isRunning_) { LOG(WARNING) << "NebdServer is inited"; return -1; @@ -75,7 +78,7 @@ int NebdServer::Init(const std::string &confPath, LOG(INFO) << "NebdServer init heartbeatManager ok"; LOG(INFO) << "NebdServer init ok"; - // 暴露版本信息 + // Expose version information LOG(INFO) << "nebd version: " << nebd::common::NebdVersion(); nebd::common::ExposeNebdVersion(); return 0; @@ -100,7 +103,7 @@ int NebdServer::Fini() { } if (curveClient_ != nullptr) { - curveClient_ ->UnInit(); + curveClient_->UnInit(); } if (heartbeatManager_ != nullptr) { @@ -110,7 +113,7 @@ int NebdServer::Fini() { return 0; } -bool NebdServer::LoadConfFromFile(const std::string &confPath) { +bool NebdServer::LoadConfFromFile(const std::string& confPath) { conf_.SetConfigPath(confPath); return conf_.LoadConfig(); } @@ -172,16 +175,16 @@ MetaFileManagerPtr NebdServer::InitMetaFileManager() { return metaFileManager; } -bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption *opt) { - bool getOk = conf_.GetUInt32Value( - HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); +bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption* opt) { + bool getOk = + conf_.GetUInt32Value(HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.timeout.sec fail"; return false; } - getOk = conf_.GetUInt32Value( - HEARTBEATCHECKINTERVALMS, &opt->checkTimeoutIntervalMs); + getOk = conf_.GetUInt32Value(HEARTBEATCHECKINTERVALMS, + &opt->checkTimeoutIntervalMs); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.check.interval.ms fail"; return false; @@ -212,24 +215,24 @@ bool NebdServer::InitHeartbeatManager() { bool NebdServer::StartServer() { // add service bool returnRpcWhenIoError; - bool ret = conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, - &returnRpcWhenIoError); + bool ret = + conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, &returnRpcWhenIoError); if (false == ret) { LOG(ERROR) << "get " << RESPONSERETURNRPCWHENIOERROR << " fail"; return false; } NebdFileServiceImpl fileService(fileManager_, returnRpcWhenIoError); - int addFileServiceRes = server_.AddService( - &fileService, brpc::SERVER_DOESNT_OWN_SERVICE); + int addFileServiceRes = + server_.AddService(&fileService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add file service fail"; return false; } NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); - addFileServiceRes = server_.AddService( - &heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); + addFileServiceRes = + server_.AddService(&heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add heartbeat service fail"; return false; @@ -238,17 +241,17 @@ bool NebdServer::StartServer() { // start brcp server brpc::ServerOptions option; option.idle_timeout_sec = -1; - // 获取文件锁 + // Obtain file lock common::FileLock fileLock(listenAddress_ + ".lock"); if (fileLock.AcquireFileLock() != 0) { LOG(ERROR) << "Address already in use"; return -1; } - int startBrpcServerRes = server_.StartAtSockFile( - listenAddress_.c_str(), &option); + int startBrpcServerRes = + server_.StartAtSockFile(listenAddress_.c_str(), &option); if (0 != startBrpcServerRes) { LOG(ERROR) << "NebdServer start brpc server fail, res=" - << startBrpcServerRes; + << startBrpcServerRes; return false; } diff --git a/nebd/src/part2/nebd_server.h b/nebd/src/part2/nebd_server.h index c4ee40f23e..8a1275d23e 100644 --- a/nebd/src/part2/nebd_server.h +++ b/nebd/src/part2/nebd_server.h @@ -24,8 +24,10 @@ #define NEBD_SRC_PART2_NEBD_SERVER_H_ #include -#include + #include +#include + #include "nebd/src/common/configuration.h" #include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/heartbeat_manager.h" @@ -34,17 +36,17 @@ namespace nebd { namespace server { -using ::nebd::common::Configuration; using ::curve::client::CurveClient; +using ::nebd::common::Configuration; class NebdServer { public: NebdServer() {} virtual ~NebdServer() {} - int Init(const std::string &confPath, - std::shared_ptr curveClient = - std::make_shared()); + int Init(const std::string& confPath, + std::shared_ptr curveClient = + std::make_shared()); int RunUntilAskedToQuit(); @@ -52,62 +54,64 @@ class NebdServer { private: /** - * @brief 从配置文件加载配置项 - * @param[in] confPath 配置文件路径 - * @return false-加载配置文件失败 true-加载配置文件成功 + * @brief Load configuration items from the configuration file + * @param[in] confPath Configuration file path + * @return false-Failed to load configuration file, true-Successfully loaded + * configuration file */ - bool LoadConfFromFile(const std::string &confPath); + bool LoadConfFromFile(const std::string& confPath); /** - * @brief 初始化NebdFileManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize NebdFileManager + * @return false-initialization failed, true-initialization successful */ bool InitFileManager(); /** - * @brief 初始化request_executor_curve - * @return false-初始化失败 true-初始化成功 + * @brief initialization request_executor_curve + * @return false-initialization failed, true-initialization successful */ bool InitCurveRequestExecutor(); /** - * @brief 初始化NebdMetaFileManager - * @return nullptr-初始化不成功 否则表示初始化成功 + * @brief Initialize NebdMetaFileManager + * @return nullptr - initialization failed; otherwise, it indicates + * successful initialization */ MetaFileManagerPtr InitMetaFileManager(); /** - * @brief 初始化HeartbeatManagerOption + * @brief Initialize HeartbeatManagerOption * @param[out] opt - * @return false-初始化失败 true-初始化成功 + * @return false-initialization failed, true-initialization successful */ - bool InitHeartbeatManagerOption(HeartbeatManagerOption *opt); + bool InitHeartbeatManagerOption(HeartbeatManagerOption* opt); /** - * @brief 初始化HeartbeatManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize HeartbeatManager + * @return false-initialization failed, true-initialization successful */ bool InitHeartbeatManager(); /** - * @brief 启动brpc service - * @return false-启动service失败 true-启动service成功 + * @brief Start brpc service + * @return false-Failed to start service, true-Successfully started service */ bool StartServer(); private: - // 配置项 + // Configuration Item Configuration conf_; - // NebdServer监听地址 + // NebdServer Listening Address std::string listenAddress_; - // NebdServer是否处于running状态 - bool isRunning_ = false; + // Is NebdServer in running state + bool isRunning_ = false; // brpc server brpc::Server server_; - // 用于接受和处理client端的各种请求 + // Used to accept and process various requests from the client side std::shared_ptr fileManager_; - // 负责文件心跳超时处理 + // Responsible for handling file heartbeat timeout std::shared_ptr heartbeatManager_; // curveclient std::shared_ptr curveClient_; diff --git a/nebd/src/part2/request_executor.h b/nebd/src/part2/request_executor.h index 0d69e3c9c8..2098ca87a4 100644 --- a/nebd/src/part2/request_executor.h +++ b/nebd/src/part2/request_executor.h @@ -24,8 +24,9 @@ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_H_ #include -#include #include +#include + #include "nebd/src/part2/define.h" namespace nebd { @@ -41,14 +42,16 @@ class CurveRequestExecutor; using OpenFlags = nebd::client::ProtoOpenFlags; -// 具体RequestExecutor中会用到的文件实例上下文信息 -// RequestExecutor需要用到的文件上下文信息都记录到FileInstance内 +// The file instance context information used in the specific RequestExecutor +// The file context information required for RequestExecutor is recorded in +// FileInstance class NebdFileInstance { public: NebdFileInstance() {} virtual ~NebdFileInstance() {} - // 需要持久化到文件的内容,以kv形式返回,例如curve open时返回的sessionid - // 文件reopen的时候也会用到该内容 + // The content that needs to be persisted to the file is returned in kv + // format, such as the sessionid returned when curve open This content will + // also be used when reopening files ExtendAttribute xattr; }; @@ -65,7 +68,8 @@ class NebdRequestExecutor { virtual int GetInfo(NebdFileInstance* fd, NebdFileInfo* fileInfo) = 0; virtual int Discard(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int AioRead(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; - virtual int AioWrite(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; // NOLINT + virtual int AioWrite(NebdFileInstance* fd, + NebdServerAioContext* aioctx) = 0; // NOLINT virtual int Flush(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int InvalidCache(NebdFileInstance* fd) = 0; }; diff --git a/nebd/src/part2/request_executor_curve.h b/nebd/src/part2/request_executor_curve.h index 11606d1bb1..a96409e5c4 100644 --- a/nebd/src/part2/request_executor_curve.h +++ b/nebd/src/part2/request_executor_curve.h @@ -23,12 +23,13 @@ #ifndef NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ -#include #include +#include #include -#include "nebd/src/part2/request_executor.h" -#include "nebd/src/part2/define.h" + #include "include/client/libcurve.h" +#include "nebd/src/part2/define.h" +#include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { @@ -54,17 +55,22 @@ void CurveAioCallback(struct CurveAioContext* curveCtx); class FileNameParser { public: /** - * @brief 解析fileName - * 一般格式: - * qemu "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" //NOLINT - * nbd "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" // NOLINT + * @brief parsing fileName + * General format: + * qemu + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" + * //NOLINT nbd + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" + * // NOLINT * @param[in] fileName - * @return 解析结果 - * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "/etc/curve/client.conf" //NOLINT - * nbd "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" //NOLINT + * @return Parsing Result + * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", + * "/etc/curve/client.conf" //NOLINT nbd + * "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" + * //NOLINT */ - static std::pair - Parse(const std::string& fileName); + static std::pair Parse( + const std::string& fileName); }; class CurveRequestExecutor : public NebdRequestExecutor { @@ -74,7 +80,7 @@ class CurveRequestExecutor : public NebdRequestExecutor { return executor; } ~CurveRequestExecutor() {} - void Init(const std::shared_ptr &client); + void Init(const std::shared_ptr& client); std::shared_ptr Open(const std::string& filename, const OpenFlags* openflags) override; std::shared_ptr Reopen( @@ -90,40 +96,42 @@ class CurveRequestExecutor : public NebdRequestExecutor { private: /** - * @brief 构造函数 + * @brief constructor */ CurveRequestExecutor() {} /** - * @brief 从NebdFileInstance中解析出curve_client需要的fd - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中文件的fd, 如果小于0,表示解析结果错误 + * @brief Parse the fd needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the fd of the file in curve_client. If less than 0, it + * indicates an error in the parsing result. */ int GetCurveFdFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 从NebdFileInstance中解析出curbe_client需要的filename - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中的filename, 如果为空,表示解析出错 + * @brief Parse the filename needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the filename in curve_client. If empty, it indicates an + * error in the parsing. */ std::string GetFileNameFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 将NebdServerAioContext类型转换为CurveAioContext类型 - * @param[in] nebdCtx NebdServerAioContext类型 - * @param[out] curveCtx CurveAioContext类型 - * @return -1转换失败,0转换成功 + * @brief Convert NebdServerAioContext type to CurveAioContext type + * @param[in] nebdCtx NebdServerAioContext type + * @param[out] curveCtx CurveAioContext type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdCtxToCurveCtx( - NebdServerAioContext *nebdCtx, CurveAioContext *curveCtx); + int FromNebdCtxToCurveCtx(NebdServerAioContext* nebdCtx, + CurveAioContext* curveCtx); /** - * @brief 将LIBAIO_OP类型转换为curve_client中LIBCURVE_OP类型 - * @param[in] op LIBAIO_OP类型 - * @param[out] out LIBCURVE_OP类型 - * @return -1转换失败,0转换成功 + * @brief Convert LIBAIO_OP types to LIBCURVE_OP types in the curve_client + * @param[in] op LIBAIO_OP type + * @param[out] out LIBCURVE_OP type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP *out); + int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP* out); private: std::shared_ptr<::curve::client::CurveClient> client_; diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..0894d69ebe 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART2_UTIL_H_ #define NEBD_SRC_PART2_UTIL_H_ -#include #include // NOLINT #include +#include #include "nebd/src/part2/define.h" @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/configuration_test.cpp b/nebd/test/common/configuration_test.cpp index 4c9e7b7c21..ef24eeb42a 100644 --- a/nebd/test/common/configuration_test.cpp +++ b/nebd/test/common/configuration_test.cpp @@ -21,15 +21,15 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "nebd/src/common/configuration.h" + #include +#include -#include -#include #include +#include #include - -#include "nebd/src/common/configuration.h" +#include namespace nebd { namespace common { @@ -86,9 +86,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -136,10 +134,10 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } @@ -148,18 +146,19 @@ TEST_F(ConfigurationTest, SaveConfig) { Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } @@ -301,7 +300,7 @@ TEST_F(ConfigurationTest, GetSetDoubleAndFloatValue) { } // namespace common } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); int ret = RUN_ALL_TESTS(); diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..574667ad8b 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include // NOLINT @@ -32,29 +33,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +63,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -80,14 +80,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -95,12 +95,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = std::thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd diff --git a/nebd/test/part1/heartbeat_manager_unittest.cpp b/nebd/test/part1/heartbeat_manager_unittest.cpp index 72de6802d4..3d95f9adf4 100644 --- a/nebd/test/part1/heartbeat_manager_unittest.cpp +++ b/nebd/test/part1/heartbeat_manager_unittest.cpp @@ -20,14 +20,15 @@ * Author: hzchenwei7 */ -#include -#include +#include "nebd/src/part1/heartbeat_manager.h" + #include +#include +#include #include #include // NOLINT -#include "nebd/src/part1/heartbeat_manager.h" #include "nebd/src/part1/nebd_metacache.h" #include "nebd/test/part1/fake_heartbeat_service.h" @@ -66,24 +67,20 @@ class HeartbeatManagerTest : public testing::Test { HeartbeatOption option; }; -TEST_F(HeartbeatManagerTest, InitTest) { - ASSERT_EQ(0, manager->Init( - option)); -} +TEST_F(HeartbeatManagerTest, InitTest) { ASSERT_EQ(0, manager->Init(option)); } TEST_F(HeartbeatManagerTest, InvokeTimesTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); - // metaCache中数据为空,不发送心跳消息 + // The data in metaCache is empty and no heartbeat message will be sent for (int i = 0; i < 10; ++i) { ASSERT_EQ(0, fakeHeartBeatService.GetInvokeTimes()); std::this_thread::sleep_for(std::chrono::seconds(1)); } - // 添加数据 + // Add data NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); @@ -91,7 +88,7 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { int times = fakeHeartBeatService.GetInvokeTimes(); ASSERT_TRUE(times >= 9 && times <= 11); - // 清空metaCache数据 + // Clear MetaCache data metaCache->RemoveFileInfo(1); std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -103,13 +100,12 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { } TEST_F(HeartbeatManagerTest, RequestValidTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); std::vector currentFileInfos; - // 添加一个文件 + // Add a file NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); HeartbeatFileInfo info; @@ -126,7 +122,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 添加第二个文件 + // Add second file fileInfo = NebdClientFileInfo(2, "/test2", FileLock("/test2.lock")); metaCache->AddFileInfo(fileInfo); info.set_fd(2); @@ -147,7 +143,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 删除第一个文件 + // Delete the first file metaCache->RemoveFileInfo(1); currentFileInfos.erase(currentFileInfos.begin()); @@ -166,7 +162,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { } // namespace client } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part1/nebd_client_unittest.cpp b/nebd/test/part1/nebd_client_unittest.cpp index 6822947653..2f3e18910f 100644 --- a/nebd/test/part1/nebd_client_unittest.cpp +++ b/nebd/test/part1/nebd_client_unittest.cpp @@ -20,18 +20,18 @@ * Author: wuhanqing */ -#include -#include +#include "nebd/src/part1/nebd_client.h" + #include +#include +#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT -#include "nebd/src/part1/nebd_client.h" #include "nebd/src/part1/libnebd.h" #include "nebd/src/part1/libnebd_file.h" - #include "nebd/test/part1/fake_file_service.h" #include "nebd/test/part1/mock_file_service.h" #include "nebd/test/utils/config_generator.h" @@ -79,16 +79,14 @@ void AioRpcFailCallBack(NebdClientAioContext* ctx) { template void MockClientFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); } template void MockClientRpcFailedFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); static int invokeTimes = 0; @@ -110,20 +108,20 @@ class NebdFileClientTest : public ::testing::Test { void TearDown() override {} void AddFakeService() { - ASSERT_EQ(0, server.AddService( - &fakeService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&fakeService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void AddMockService() { - ASSERT_EQ(0, server.AddService( - &mockService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&mockService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void StartServer(const std::string& address = kNebdServerTestAddress) { - ASSERT_EQ(0, server.StartAtSockFile( - address.c_str(), nullptr)) << "Start server failed"; + ASSERT_EQ(0, server.StartAtSockFile(address.c_str(), nullptr)) + << "Start server failed"; } void StopServer() { @@ -137,15 +135,15 @@ class NebdFileClientTest : public ::testing::Test { }; using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; TEST_F(NebdFileClientTest, AioRpcFailTest) { AddMockService(); @@ -167,7 +165,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Write(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; auto start = std::chrono::system_clock::now(); @@ -177,9 +176,11 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { cond.wait(ulk, []() { return aioOpReturn.load(); }); ASSERT_TRUE(aioOpReturn.load()); auto end = std::chrono::system_clock::now(); - auto elpased = std::chrono::duration_cast(end - start).count(); // NOLINT + auto elpased = + std::chrono::duration_cast(end - start) + .count(); // NOLINT - // 重试睡眠时间: 100ms + 200ms + ... + 900ms = 4500ms + // Retrying sleep time: 100ms + 200ms + ... + 900ms = 4500ms ASSERT_TRUE(elpased >= 4000 && elpased <= 5000); } @@ -196,7 +197,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); @@ -218,7 +220,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); @@ -240,7 +243,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); @@ -261,10 +265,12 @@ TEST_F(NebdFileClientTest, NoNebdServerTest) { auto start = std::chrono::system_clock::now(); ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); auto end = std::chrono::system_clock::now(); - auto elapsed = std::chrono::duration_cast( - end - start).count(); + auto elapsed = + std::chrono::duration_cast(end - start) + .count(); - // rpc failed的清空下,睡眠100ms后继续重试,共重试10次 + // Clear RPC failed and continue to retry after sleeping for 100ms, a + // total of 10 retries ASSERT_TRUE(elapsed >= 900 && elapsed <= 1100); } ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); @@ -380,8 +386,8 @@ TEST_F(NebdFileClientTest, ReOpenTest) { int fd = Open4Nebd(kFileName, nullptr); ASSERT_GT(fd, 0); - // 文件已经被打开,并占用文件锁 - // 再次打开时,获取文件锁失败,直接返回 + // The file has been opened and is occupying the file lock + // When reopening, obtaining the file lock failed and returned directly ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); ASSERT_EQ(0, Close4Nebd(fd)); @@ -406,9 +412,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, OpenFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); } @@ -417,9 +424,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, CloseFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(0, Close4Nebd(0)); } @@ -428,9 +436,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, ResizeFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); } @@ -439,9 +447,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetFileSize4Nebd(1)); } @@ -450,9 +459,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, GetBlockSize4Nebd(1)); } @@ -461,9 +470,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetInfo4Nebd(1)); } @@ -474,7 +484,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, InvalidCache4Nebd(1)); } @@ -496,7 +507,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke( + MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioWrite4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -518,9 +530,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -542,9 +553,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -566,9 +578,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -596,14 +608,12 @@ TEST_F(NebdFileClientTest, InitAndUninitTest) { } // namespace client } // namespace nebd - int main(int argc, char* argv[]) { - std::vector nebdConfig { + std::vector nebdConfig{ std::string("nebdserver.serverAddress=") + kNebdServerTestAddress, std::string("metacache.fileLockPath=/tmp"), std::string("request.syncRpcMaxRetryTimes=10"), - std::string("log.path=.") - }; + std::string("log.path=.")}; nebd::common::NebdClientConfigGenerator generator; generator.SetConfigPath(kNebdClientConf); diff --git a/nebd/test/part2/file_manager_unittest.cpp b/nebd/test/part2/file_manager_unittest.cpp index 0d13a7b18c..0b59f918aa 100644 --- a/nebd/test/part2/file_manager_unittest.cpp +++ b/nebd/test/part2/file_manager_unittest.cpp @@ -20,15 +20,17 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/file_manager.h" + #include -#include +#include + #include +#include -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/file_entity.h" -#include "nebd/test/part2/mock_request_executor.h" #include "nebd/test/part2/mock_metafile_manager.h" +#include "nebd/test/part2/mock_request_executor.h" namespace nebd { namespace server { @@ -38,11 +40,11 @@ const char testFile2[] = "test:/cinder/222"; const char unknownFile[] = "un:/cinder/666"; using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -66,12 +68,10 @@ class FileManagerTest : public ::testing::Test { metaFileManager_ = std::make_shared(); fileManager_ = std::make_shared(metaFileManager_); } - void TearDown() { - delete aioContext_; - } + void TearDown() { delete aioContext_; } using TestTask = std::function; - // 构造初始环境 + // Construct initial environment void InitEnv() { NebdFileMeta meta; meta.fd = 1; @@ -80,18 +80,14 @@ class FileManagerTest : public ::testing::Test { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); } - void UnInitEnv() { - ASSERT_EQ(fileManager_->Fini(), 0); - } + void UnInitEnv() { ASSERT_EQ(fileManager_->Fini(), 0); } void ExpectCallRequest(RequestType type, int ret) { switch (type) { @@ -125,20 +121,19 @@ class FileManagerTest : public ::testing::Test { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件状态为OPENED + // The file status is OPENED ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件状态为CLOSED + // The file status is CLOSED EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); @@ -147,53 +142,47 @@ class FileManagerTest : public ::testing::Test { void RequestFailTest(RequestType type, TestTask task) { InitEnv(); - // 将文件close + // Close the file NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // open文件失败 - EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .Times(0); + // Open file failed + EXPECT_CALL(*executor_, Open(testFile1, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 更新元数据文件失败 + // Failed to update metadata file EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 执行处理函数失败 + // Failed to execute processing function EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, -1); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 将文件状态置为DESTROYED - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Set the file status to DESTROYED + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(entity1->Close(true), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); - EXPECT_CALL(*executor_, Open(testFile1, _)) - .Times(0); + EXPECT_CALL(*executor_, Open(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); - // 直接将文件删除 + // Delete files directly ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(-1, task(1)); @@ -216,17 +205,14 @@ TEST_F(FileManagerTest, RunTest) { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); - // 重复run返回失败 + // Repeated run returns failed ASSERT_EQ(fileManager_->Run(), -1); - // 校验结果 + // Verification results FileEntityMap entityMap = fileManager_->GetFileEntityMap(); ASSERT_EQ(1, entityMap.size()); ASSERT_NE(nullptr, entityMap[meta.fd]); @@ -239,44 +225,36 @@ TEST_F(FileManagerTest, RunFailTest) { std::vector fileMetas; fileMetas.emplace_back(meta); - // list file meta失败 - EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(Return(-1)); + // List file meta failed + EXPECT_CALL(*metaFileManager_, ListFileMeta(_)).WillOnce(Return(-1)); ASSERT_EQ(fileManager_->Run(), -1); - // reopen失败不影响Run成功 + // Reopen failure does not affect Run success EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(nullptr)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(nullptr)); ASSERT_EQ(fileManager_->Run(), 0); ASSERT_EQ(fileManager_->Fini(), 0); - // 更新metafile失败不影响Run成功 + // Failure to update metafile does not affect the success of Run EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(1); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).Times(1); ASSERT_EQ(fileManager_->Run(), 0); } TEST_F(FileManagerTest, OpenTest) { InitEnv(); - // open一个不存在的文件 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open a non-existent file + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); - // 重复open + // Repeat open fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); @@ -288,15 +266,13 @@ TEST_F(FileManagerTest, OpenTest) { ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(_)).WillOnce(Return(0)); ASSERT_EQ(entity2->Close(false), 0); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::CLOSED); - // open 已经close的文件, fd不变 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open closed files, keep fd unchanged + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); @@ -304,79 +280,67 @@ TEST_F(FileManagerTest, OpenTest) { TEST_F(FileManagerTest, OpenFailTest) { InitEnv(); - // 调用后端open接口时出错 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .Times(0); + // Error calling backend open interface + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)).Times(0); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // 持久化元数据信息失败 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Persisting metadata information failed + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(_)) - .Times(1); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(_)).Times(1); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // Open一个非法的filename - EXPECT_CALL(*executor_, Open(_, _)) - .Times(0); + // Open an illegal filename + EXPECT_CALL(*executor_, Open(_, _)).Times(0); fd = fileManager_->Open(unknownFile, nullptr); ASSERT_EQ(fd, -1); } TEST_F(FileManagerTest, CloseTest) { InitEnv(); - // 指定的fd不存在,直接返回成功 + // The specified fd does not exist, return success directly ASSERT_EQ(nullptr, fileManager_->GetFileEntity(2)); ASSERT_EQ(0, fileManager_->Close(2, true)); NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,且文件状态为OPENED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // The file exists and its status is OPENED, while removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // File exists, file status is CLOSED, removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); + // The file exists, the file status is CLOSED, and removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); NebdFileEntityPtr entity2 = fileManager_->GetFileEntity(2); ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,文件状态为OPENED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // File exists, file status is OPENED, removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile2)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(fd, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); } @@ -387,36 +351,31 @@ TEST_F(FileManagerTest, CloseFailTest) { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // executor close 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // Executor close failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // remove file meta 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Remove file meta failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(-1)); + .WillOnce(Return(-1)); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); } TEST_F(FileManagerTest, ExtendTest) { - auto task = [&](int fd)->int { - return fileManager_->Extend(fd, 4096); - }; + auto task = [&](int fd) -> int { return fileManager_->Extend(fd, 4096); }; RequestSuccssTest(RequestType::EXTEND, task); RequestFailTest(RequestType::EXTEND, task); } TEST_F(FileManagerTest, GetInfoTest) { NebdFileInfo fileInfo; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { return fileManager_->GetInfo(fd, &fileInfo); }; RequestSuccssTest(RequestType::GETINFO, task); @@ -424,16 +383,14 @@ TEST_F(FileManagerTest, GetInfoTest) { } TEST_F(FileManagerTest, InvalidCacheTest) { - auto task = [&](int fd)->int { - return fileManager_->InvalidCache(fd); - }; + auto task = [&](int fd) -> int { return fileManager_->InvalidCache(fd); }; RequestSuccssTest(RequestType::INVALIDCACHE, task); RequestFailTest(RequestType::INVALIDCACHE, task); } TEST_F(FileManagerTest, AioReadTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioRead(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -457,7 +414,7 @@ TEST_F(FileManagerTest, AioReadTest) { TEST_F(FileManagerTest, AioWriteTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioWrite(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -481,7 +438,7 @@ TEST_F(FileManagerTest, AioWriteTest) { TEST_F(FileManagerTest, DiscardTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Discard(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -505,7 +462,7 @@ TEST_F(FileManagerTest, DiscardTest) { TEST_F(FileManagerTest, FlushTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Flush(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -544,7 +501,7 @@ TEST_F(FileManagerTest, UpdateTimestampTest) { } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/nebd/test/part2/heartbeat_manager_unittest.cpp b/nebd/test/part2/heartbeat_manager_unittest.cpp index 2ae0e8d221..9d1e0eaabb 100644 --- a/nebd/test/part2/heartbeat_manager_unittest.cpp +++ b/nebd/test/part2/heartbeat_manager_unittest.cpp @@ -20,10 +20,12 @@ * Author: yangyaokai */ +#include "nebd/src/part2/heartbeat_manager.h" + #include + #include -#include "nebd/src/part2/heartbeat_manager.h" #include "nebd/test/part2/mock_file_entity.h" #include "nebd/test/part2/mock_file_manager.h" @@ -35,11 +37,11 @@ namespace server { using ::testing::_; using ::testing::AtLeast; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -53,16 +55,16 @@ class HeartbeatManagerTest : public ::testing::Test { option.fileManager = fileManager_; heartbeatManager_ = std::make_shared(option); } - std::shared_ptr fileManager_; + std::shared_ptr fileManager_; std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { ASSERT_EQ(heartbeatManager_->Run(), 0); - // 已经在run了不允许重复Run或者Init + // It is already running, and duplicate Run or Init is not allowed ASSERT_EQ(heartbeatManager_->Run(), -1); - // 构造file entity + // Construct file entity uint64_t curTime = TimeUtility::GetTimeofDayMs(); std::shared_ptr entity1 = std::make_shared(); @@ -71,51 +73,44 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { std::shared_ptr entity3 = std::make_shared(); EXPECT_CALL(*entity1, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity1, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); EXPECT_CALL(*entity2, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity2, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::CLOSED)); - EXPECT_CALL(*entity3, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime)); + .WillRepeatedly(Return(NebdFileStatus::CLOSED)); + EXPECT_CALL(*entity3, GetFileTimeStamp()).WillRepeatedly(Return(curTime)); EXPECT_CALL(*entity3, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); - // 构造file map + // Construct a file map FileEntityMap entityMap; entityMap.emplace(1, entity1); entityMap.emplace(2, entity2); entityMap.emplace(3, entity3); EXPECT_CALL(*fileManager_, GetFileEntityMap()) - .WillRepeatedly(Return(entityMap)); + .WillRepeatedly(Return(entityMap)); - // 预期结果 - EXPECT_CALL(*entity1, Close(false)) - .Times(AtLeast(1)); - EXPECT_CALL(*entity2, Close(false)) - .Times(0); - EXPECT_CALL(*entity3, Close(false)) - .Times(0); + // Expected results + EXPECT_CALL(*entity1, Close(false)).Times(AtLeast(1)); + EXPECT_CALL(*entity2, Close(false)).Times(0); + EXPECT_CALL(*entity3, Close(false)).Times(0); ::sleep(2); ASSERT_EQ(heartbeatManager_->Fini(), 0); - // 重复Fini,也返回成功 + // Repeat Fini and return success ASSERT_EQ(heartbeatManager_->Fini(), 0); } TEST_F(HeartbeatManagerTest, UpdateTimeStampTest) { std::shared_ptr entity = std::make_shared(); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(entity)); - EXPECT_CALL(*entity, UpdateFileTimeStamp(100)) - .Times(1); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(entity)); + EXPECT_CALL(*entity, UpdateFileTimeStamp(100)).Times(1); ASSERT_TRUE(heartbeatManager_->UpdateFileTimestamp(1, 100)); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(nullptr)); ASSERT_FALSE(heartbeatManager_->UpdateFileTimestamp(1, 100)); } @@ -136,7 +131,7 @@ TEST_F(HeartbeatManagerTest, UpdateNebdClientInfo) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/heartbeat_service_test.cpp b/nebd/test/part2/heartbeat_service_test.cpp index 7d60ce6981..7e29edd10c 100644 --- a/nebd/test/part2/heartbeat_service_test.cpp +++ b/nebd/test/part2/heartbeat_service_test.cpp @@ -20,13 +20,15 @@ * Author: charisu */ -#include +#include "nebd/src/part2/heartbeat_service.h" + #include #include +#include + #include #include "nebd/proto/heartbeat.pb.h" -#include "nebd/src/part2/heartbeat_service.h" #include "nebd/test/part2/mock_heartbeat_manager.h" using ::testing::_; using ::testing::Return; @@ -41,15 +43,15 @@ class HeartbeatServiceTest : public ::testing::Test { void SetUp() override { heartbeatManager_ = std::make_shared(); } - std::shared_ptr heartbeatManager_; + std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatServiceTest, KeepAlive) { - // 启动server + // Start server brpc::Server server; NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); ASSERT_EQ(0, server.AddService(&heartbeatService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(0, server.StartAtSockFile(kSockFile_.c_str(), &option)); @@ -68,7 +70,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { nebd::client::NebdHeartbeatService_Stub stub(&channel); brpc::Controller cntl; - // 正常情况 + // Normal situation EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillRepeatedly(Return(true)); @@ -76,7 +78,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kOK, response.retcode()); - // 有文件更新时间戳失败 + // Failed to update timestamp with file EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillOnce(Return(false)) @@ -86,14 +88,14 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kNoOK, response.retcode()); - // 停止server + // Stop server server.Stop(0); server.Join(); } } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/metafile_manager_test.cpp b/nebd/test/part2/metafile_manager_test.cpp index 7027cb9da6..dbde2d4ee3 100644 --- a/nebd/test/part2/metafile_manager_test.cpp +++ b/nebd/test/part2/metafile_manager_test.cpp @@ -20,11 +20,13 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include + #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/test/part2/mock_posix_wrapper.h" using ::testing::_; @@ -37,8 +39,7 @@ const char metaPath[] = "/tmp/nebd-test-metafilemanager.meta"; void FillCrc(Json::Value* root) { std::string jsonString = root->toStyledString(); - uint32_t crc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); (*root)[kCRC] = crc; } @@ -61,19 +62,19 @@ TEST_F(MetaFileManagerTest, nomaltest) { NebdMetaFileManager metaFileManager; ASSERT_EQ(metaFileManager.Init(option), 0); std::vector fileMetas; - // 文件不存在 + // File does not exist ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 添加两条记录,curve和test各一 + // Add two records, one for curve and one for test NebdFileMeta fileMeta1; fileMeta1.fileName = "test:volume1"; fileMeta1.fd = 1; ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 使用相同的内容Update + // Update using the same content ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 插入不同的meta + // Insert different meta NebdFileMeta fileMeta2; fileMeta2.fileName = "cbd:volume2"; fileMeta2.fd = 2; @@ -89,9 +90,9 @@ TEST_F(MetaFileManagerTest, nomaltest) { // remove meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta(fileMeta2.fileName)); - // remove 不存在的meta + // remove non-existent meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta("unknown")); - // 校验结果 + // Verification results fileMetas.clear(); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -111,35 +112,28 @@ TEST_F(MetaFileManagerTest, UpdateMetaFailTest) { fileMetaMap.emplace(fileMeta.fileName, fileMeta); std::vector fileMetas; - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // rename失败 + // Rename failed NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); @@ -160,15 +154,12 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - // 先插入一条数据 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Insert a piece of data first + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -176,33 +167,26 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { fileMetaMap.erase(fileMeta.fileName); root = parser.ConvertFileMetasToJson(fileMetaMap); - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // rename失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Rename failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -215,7 +199,7 @@ TEST(MetaFileParserTest, Parse) { Json::Value volumes; FileMetaMap fileMetas; - // 正常情况 + // Normal situation volume[kFileName] = "cbd:volume1"; volume[kFd] = 1; volumes.append(volume); @@ -225,18 +209,19 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(0, parser.Parse(root, &fileMetas)); - // 空指针 + // Null pointer ASSERT_EQ(-1, parser.Parse(root, nullptr)); - // crc校验不正确 + // Incorrect crc verification root[kCRC] = root[kCRC].asUInt() + 1; ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有crc字段 + // No crc field root.removeMember(kCRC); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有volumes字段或volumes字段是null,不应该报错 + // There is no volumes field or the volumes field is null, and an error + // should not be reported root.clear(); root["key"] = "value"; FillCrc(&root); @@ -249,7 +234,7 @@ TEST(MetaFileParserTest, Parse) { ASSERT_EQ(0, parser.Parse(root, &fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 记录中没有filename + // There is no filename in the record volume.clear(); volumes.clear(); root.clear(); @@ -259,7 +244,7 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 记录中没有fd + // The record does not contain an 'fd'. volume.clear(); volumes.clear(); root.clear(); @@ -273,7 +258,7 @@ TEST(MetaFileParserTest, Parse) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_nebd_server.cpp b/nebd/test/part2/test_nebd_server.cpp index 1f6f8ef112..effcdc05b3 100644 --- a/nebd/test/part2/test_nebd_server.cpp +++ b/nebd/test/part2/test_nebd_server.cpp @@ -21,27 +21,28 @@ */ #include + #include "nebd/src/part2/nebd_server.h" #include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; TEST(TestNebdServer, test_Init_Run_Fini) { NebdServer server; auto curveClient = std::make_shared(); std::string confPath; - // 1. 配置文件不存在, init失败 + // 1. Configuration file does not exist, init failed confPath = "./nebd.conf"; ASSERT_EQ(-1, server.Init(confPath)); - // 2. 配置文件存在, 监听端口未设置 + // 2. Configuration file exists, listening port not set confPath = "./nebd/test/part2/nebd-server-err.conf"; Configuration conf; conf.SetBoolValue("response.returnRpcWhenIoError", false); @@ -49,55 +50,54 @@ TEST(TestNebdServer, test_Init_Run_Fini) { conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 3、配置文件中没有client配置 + // 3. There is no client configuration in the configuration file conf.SetStringValue("listen.address", "/tmp/nebd-server.sock"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 4. curveclient init失败 + // 4. Curveclient init failed conf.SetStringValue("curveclient.confPath", "/etc/curve/client.conf"); conf.SaveConfig(); EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 5、初始化fileManager失败 + // 5. Failed to initialize fileManager EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 6、没有heartbeat.timeout字段 + // 6. There is no heartbeat.timeout field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetStringValue("meta.file.path", "./nebd-server-test.meta"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7、没有heartbeat.check.interval.ms字段 + // 7. No heartbeat.check.interval.ms field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.timeout.sec", 30); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - - // 8. 初始化成功 + // 8. Initialized successfully EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.check.interval.ms", 3000); conf.SaveConfig(); ASSERT_EQ(0, server.Init(confPath, curveClient)); - // 9. run成功 + // 9. Run successful EXPECT_CALL(*curveClient, UnInit()).Times(2); std::thread nebdServerThread(&NebdServer::RunUntilAskedToQuit, &server); sleep(1); - // 10、再次Run会失败 + // 10. Running again will fail ASSERT_EQ(-1, server.RunUntilAskedToQuit()); - // 11、Run之后Init会失败 + // 11. Init will fail after Run ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7. stop成功 + // 7. Stop successful ASSERT_EQ(0, server.Fini()); - // 8. 再次stop不会重复释放资源 + // 8. Stopping again will not repeatedly release resources ASSERT_EQ(0, server.Fini()); nebdServerThread.join(); } @@ -105,7 +105,7 @@ TEST(TestNebdServer, test_Init_Run_Fini) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_request_executor_curve.cpp b/nebd/test/part2/test_request_executor_curve.cpp index 2b749d0615..8d8c3811f2 100644 --- a/nebd/test/part2/test_request_executor_curve.cpp +++ b/nebd/test/part2/test_request_executor_curve.cpp @@ -21,36 +21,30 @@ */ #include -#include "nebd/src/part2/request_executor_curve.h" -#include "nebd/test/part2/mock_curve_client.h" #include "nebd/proto/client.pb.h" #include "nebd/proto/heartbeat.pb.h" #include "nebd/src/part2/file_service.h" +#include "nebd/src/part2/request_executor_curve.h" +#include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; -using ::testing::SaveArg; using ::testing::Invoke; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestReuqestExecutorCurveClosure : public google::protobuf::Closure { public: TestReuqestExecutorCurveClosure() : runned_(false) {} ~TestReuqestExecutorCurveClosure() {} - void Run() { - runned_ = true; - } - bool IsRunned() { - return runned_; - } - void Reset() { - runned_ = false; - } + void Run() { runned_ = true; } + bool IsRunned() { return runned_; } + void Reset() { runned_ = false; } private: bool runned_; @@ -60,7 +54,7 @@ void NebdUnitTestCallback(NebdServerAioContext* context) { std::cout << "callback" << std::endl; } -class TestReuqestExecutorCurve : public ::testing::Test { +class TestReuqestExecutorCurve : public ::testing::Test { protected: void SetUp() { curveClient_ = std::make_shared(); @@ -77,7 +71,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(fileName, _)).Times(0); @@ -86,23 +80,21 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { ASSERT_TRUE(nullptr == ret); } - // 2. curveclient open失败 + // 2. Curveclient open failed { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(-1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr == ret); } - // 3. open成功 + // 3. Open successful { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -117,16 +109,16 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(_, _)).Times(0); - std::shared_ptr ret = executor.Reopen( - errFileName, xattr); + std::shared_ptr ret = + executor.Reopen(errFileName, xattr); ASSERT_TRUE(nullptr == ret); } - // 2. repoen失败 + // 2. repoen failed { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(-1)); @@ -135,14 +127,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { ASSERT_TRUE(nullptr == ret); } - // 3. reopen成功 + // 3. reopen successful { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(1)); - std::shared_ptr ret = + std::shared_ptr ret = executor.Reopen(fileName, xattr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -153,14 +145,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { TEST_F(TestReuqestExecutorCurve, test_Close) { auto executor = CurveRequestExecutor::GetInstance(); - // 1. nebdFileIns不是CurveFileInstance类型, close失败 + // 1. nebdFileIns is not of type CurveFileInstance, close failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Close(_)).Times(0); ASSERT_EQ(-1, executor.Close(nebdFileIns)); } - // 2. nebdFileIns中的fd<0, close失败 + // 2. fd<0 in nebdFileIns, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -168,7 +160,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 3. 调用curveclient的close接口失败, close失败 + // 3. Calling the close interface of curveclient failed, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -177,7 +169,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 4. close成功 + // 4. close successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -191,21 +183,21 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, extend失败 + // 1. nebdFileIns is not of type CurveFileInstance, extend failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(nebdFileIns, 1)); } - // 2. nebdFileIns中的fileName为空, extend失败 + // 2. FileName in nebdFileIns is empty, extend failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 3. 调用curveclient的extend接口失败, extend失败 + // 3. Calling the extend interface of curveclient failed, extend failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -214,7 +206,7 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 4. extend成功 + // 4. extend successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -229,43 +221,40 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { NebdFileInfo fileInfo; int curveFd = 123; - // 1. nebdFileIns不是CurveFileInstance类型, stat失败 + // 1. nebdFileIns is not of type CurveFileInstance, stat failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(nebdFileIns, &fileInfo)); } - // 2. nebdFileIns中的fd为空, stat失败 + // 2. Fd in nebdFileIns is empty, stat failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - - // 3. 调用curveclient的stat接口失败, stat失败 + // 3. Calling the stat interface of curveclient failed, stat failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; - EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - // 4. stat成功 + // 4. stat successful { const uint64_t size = 10ull * 1024 * 1024 * 1024; const uint32_t blocksize = 4096; auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Invoke( - [size, blocksize](int /*fd*/, FileStatInfo* info) { - info->length = size; - info->blocksize = blocksize; - return 0; - })); + .WillOnce(Invoke([size, blocksize](int /*fd*/, FileStatInfo* info) { + info->length = size; + info->blocksize = blocksize; + return 0; + })); ASSERT_EQ(0, executor.GetInfo(curveFileIns, &fileInfo)); ASSERT_EQ(size, fileInfo.size); ASSERT_EQ(blocksize, fileInfo.block_size); @@ -278,14 +267,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步读失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous read failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步读失败 + // 2. fd<0 in nebdFileIns, asynchronous read failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -293,7 +282,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioRead接口失败, 异步读失败 + // 3. Calling the AioRead interface of curveclient failed, asynchronous read + // failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -307,15 +297,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 4. 异步读取成功 + // 4. Asynchronous read successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioRead(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioRead(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -327,14 +316,15 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步写失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous write + // failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步写失败 + // 2. fd<0 in nebdFileIns, asynchronous write failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -342,7 +332,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioWrite接口失败, 异步写失败 + // 3. Calling the AioWrite interface of curveclient failed, asynchronous + // write failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -356,15 +347,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 4. 异步写入成功 + // 4. Asynchronous write successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioWrite(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioWrite(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -379,8 +369,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { // 1. not an curve volume { std::unique_ptr nebdFileIns(new NebdFileInstance()); - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(nebdFileIns.get(), &aioctx)); } @@ -389,8 +378,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { std::unique_ptr curveFileIns( new CurveFileInstance()); curveFileIns->fd = -1; - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(curveFileIns.get(), &aioctx)); } @@ -419,8 +407,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.Discard(curveFileIns.get(), &aioctx)); curveCtx->cb(curveCtx); } @@ -448,13 +435,13 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 不合法 + // 1. nebdFileIns is not of type CurveFileInstance, illegal { auto nebdFileIns = new NebdFileInstance(); ASSERT_EQ(-1, executor.InvalidCache(nebdFileIns)); } - // 2. fd<0, 不合法 + // 2. fd<0, illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -462,14 +449,14 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 3. filename为空,不合法 + // 3. The filename is empty and illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 4. 合法 + // 4. legitimate { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -478,11 +465,10 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { } } - TEST(TestFileNameParser, test_Parse) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); - std::pair res( - "/cinder/volume-1234_cinder_", "/client.conf"); + std::pair res("/cinder/volume-1234_cinder_", + "/client.conf"); ASSERT_EQ(res, FileNameParser::Parse(fileName)); fileName = "cbd:pool1//cinder/volume-1234_cinder_"; @@ -500,11 +486,10 @@ TEST(TestFileNameParser, test_Parse) { ASSERT_EQ(res, FileNameParser::Parse(fileName)); } - } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/proto/chunk.proto b/proto/chunk.proto index af5cd3fb5a..c19303c854 100755 --- a/proto/chunk.proto +++ b/proto/chunk.proto @@ -20,7 +20,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/chunk"; -// Qos 参数 +// Qos parameters message QosRequestParas { optional uint32 clientId = 1; optional int32 dmclockDelta = 2; @@ -28,38 +28,38 @@ message QosRequestParas { } message QosResponseParas { - optional int32 phase = 1; // 0: 代表 reservation 阶段; 1: 代表 priority 阶段 + optional int32 phase = 1; // 0: represents the reservation stage; 1: Representing the priority stage optional int32 cost = 2; // } // For chunk enum CHUNK_OP_TYPE { - CHUNK_OP_DELETE = 0; // 删除 chunk - CHUNK_OP_READ = 1; // 读 chunk - CHUNK_OP_WRITE = 2; // 写 chunk + CHUNK_OP_DELETE = 0; // Delete chunk + CHUNK_OP_READ = 1; // Read chunk + CHUNK_OP_WRITE = 2; // Write chunk CHUNK_OP_READ_SNAP = 3; // read chunk snapshot - // TODO(wudemiao): 后期替换成CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, - // 保证和chunkserver的接口一致 + // TODO(wudemiao): later replaced with CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, + // Ensure consistency with chunkserver interface CHUNK_OP_DELETE_SNAP = 4; // delete chunk snapshot - CHUNK_OP_CREATE_CLONE = 5; // 创建clone chunk - CHUNK_OP_RECOVER = 6; // 恢复clone chunk - CHUNK_OP_PASTE = 7; // paste chunk 内部请求 + CHUNK_OP_CREATE_CLONE = 5; // Create clone chunk + CHUNK_OP_RECOVER = 6; // Restore clone chunk + CHUNK_OP_PASTE = 7; // paste chunk internal request CHUNK_OP_UNKNOWN = 8; // unknown Op CHUNK_OP_SCAN = 9; // scan oprequest }; -// read/write 的实际数据在 rpc 的 attachment 中 +// The actual data of read/write is in the attachment of rpc message ChunkRequest { required CHUNK_OP_TYPE opType = 1; // for all - required uint32 logicPoolId = 2; // for all // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 2; // for all // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 3; // for all required uint64 chunkId = 4; // for all optional uint64 appliedIndex = 5; // for read optional uint32 offset = 6; // for read/write - optional uint32 size = 7; // for read/write/clone 读取数据大小/写入数据大小/创建快照请求中表示请求创建的chunk大小 + optional uint32 size = 7; // for read/write/clone Read data size/Write data size/Create snapshot request represents the chunk size of the request creation optional QosRequestParas deltaRho = 8; // for read/write - optional uint64 sn = 9; // for write/read snapshot 写请求中表示文件当前版本号,读快照请求中表示请求的chunk的版本号 - optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn 用于修改chunk的correctedSn + optional uint64 sn = 9; // for write/read snapshot, in the write request, represents the current version number of the file, and in the read snapshot request, represents the version number of the requested chunk + optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn used to modify the correctedSn of a chunk optional string location = 11; // for CreateCloneChunk optional string cloneFileSource = 12; // for write/read optional uint64 cloneFileOffset = 13; // for write/read @@ -72,28 +72,28 @@ message ChunkRequest { }; enum CHUNK_OP_STATUS { - CHUNK_OP_STATUS_SUCCESS = 0; // 成功 - CHUNK_OP_STATUS_REDIRECTED = 1; // 不是 leader,重定向 - CHUNK_OP_STATUS_DISK_FAIL = 2; // 磁盘返回错误 - CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC 校验失败 - CHUNK_OP_STATUS_INVALID_REQUEST = 4; // 请求参数不对 - CHUNK_OP_STATUS_NOSPACE = 5; // 空间不够 - CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // copyset 不存在 - CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // chunk或其快照文件不存在 - CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // 其他错误 - CHUNK_OP_STATUS_OVERLOAD = 9; // 过载,表示服务端有过多请求未处理返回 - CHUNK_OP_STATUS_BACKWARD = 10; // 请求的版本落后当前chunk的版本 - CHUNK_OP_STATUS_CHUNK_EXIST = 11; // chunk已存在 + CHUNK_OP_STATUS_SUCCESS = 0; // Success + CHUNK_OP_STATUS_REDIRECTED = 1; // Not a leader, redirect + CHUNK_OP_STATUS_DISK_FAIL = 2; // Disk returned error + CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC verification failed + CHUNK_OP_STATUS_INVALID_REQUEST = 4; // The request parameters are incorrect + CHUNK_OP_STATUS_NOSPACE = 5; // Insufficient space + CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // Copyset does not exist + CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // Chunk or its snapshot file does not exist + CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // Other errors + CHUNK_OP_STATUS_OVERLOAD = 9; // Overload indicates that the server has too many requests that have not been processed and returned + CHUNK_OP_STATUS_BACKWARD = 10; // The requested version falls behind the current chunk version + CHUNK_OP_STATUS_CHUNK_EXIST = 11; // Chunk already exists CHUNK_OP_STATUS_EPOCH_TOO_OLD = 12; // request epoch too old }; message ChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - optional uint64 appliedIndex = 3; // 返回当前最新的 committedIndex, 注意 read 和 write 都要返回 + optional string redirect = 2; // Not the leader, redirect to the leader + optional uint64 appliedIndex = 3; // Return the latest committedIndex, note that both read and write must be returned optional QosResponseParas phaseCost = 4; // for read/write - optional uint64 chunkSn = 5; // for GetChunkInfo 表示chunk文件版本号,0表示不存在 - optional uint64 snapSn = 6; // for GetChunkInfo 表示chunk文件快照的版本号,0表示不存在 + optional uint64 chunkSn = 5; // for GetChunkInfo represents the version number of the chunk file, while 0 indicates that it does not exist + optional uint64 snapSn = 6; // for GetChunkInfo represents the version number of the Chunk file snapshot, while 0 indicates that it does not exist }; message GetChunkInfoRequest { @@ -104,8 +104,8 @@ message GetChunkInfoRequest { message GetChunkInfoResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - repeated uint64 chunkSn = 3; // chunk 版本号 和 snapshot 版本号 + optional string redirect = 2; // Not the leader, redirect to the leader + repeated uint64 chunkSn = 3; // Chunk version number and snapshot version number }; message GetChunkHashRequest { @@ -118,7 +118,7 @@ message GetChunkHashRequest { message GetChunkHashResponse { required CHUNK_OP_STATUS status = 1; - optional string hash = 2; // 能标志chunk数据状态的hash值,一般是crc32c + optional string hash = 2; // The hash value that can indicate the status of chunk data, usually crc32c }; message CreateS3CloneChunkRequest { @@ -131,7 +131,7 @@ message CreateS3CloneChunkRequest { message CreateS3CloneChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // Not the leader, redirect to the leader }; message UpdateEpochRequest { diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..5a0bdd89ff 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, both logicPoolId and copysetId are used. After entering the rpc service, they will be converted to a string +// GroupId of type, passed to raft // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // LogicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/proto/common.proto b/proto/common.proto index 3cae9f9e65..0dc409b609 100644 --- a/proto/common.proto +++ b/proto/common.proto @@ -21,13 +21,13 @@ package curve.common; option cc_generic_services = true; option go_package = "proto/common"; -// 1. braft场景: id不使用,address为braft里面的PeerId,格式为{ip}:{port}:{index} -// 2. curve-raft场景:id是peer id,address为{ip}:{port} -// 当前chunkserver id就是peer id +// 1. In the braft scenario: 'id' is not used, and 'address' is the PeerId within braft, in the format {ip}:{port}:{index}. +// 2. In the curve-raft scenario: 'id' represents the peer id, and 'address' is in the format {ip}:{port}. +// The current chunkserver id is the peer id. message Peer { - optional uint64 id = 1; // peer id,全局唯一 -// optional bool isLearner = 2; // 是否是learner (暂时不支持) - optional string address = 3; // peer的地址信息 + optional uint64 id = 1; // Peer ID, globally unique +// optional bool isLearner = 2; // Whether it is a learner (not supported for now) + optional string address = 3; // Address information of the peer } message CopysetInfo { diff --git a/proto/copyset.proto b/proto/copyset.proto index fe3d271d53..10aab0485c 100755 --- a/proto/copyset.proto +++ b/proto/copyset.proto @@ -23,7 +23,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/copyset"; -// copyset epoch message,用于epoch序列化和反序列化 +// copyset epoch message for epoch serialization and deserialization message ConfEpoch { required uint32 logicPoolId = 1; required uint32 copysetId = 2; @@ -32,15 +32,15 @@ message ConfEpoch { } message CopysetRequest { - // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + // logicPoolId is actually uint16, but proto does not have uint16 required uint32 logicPoolId = 1; required uint32 copysetId = 2; - repeated string peerid = 3; // 当前复制组配置,可以为空 + repeated string peerid = 3; // The current replication group configuration can be empty }; enum COPYSET_OP_STATUS { COPYSET_OP_STATUS_SUCCESS = 0; - COPYSET_OP_STATUS_EXIST = 1; // copyset node 已经存在 + COPYSET_OP_STATUS_EXIST = 1; // copyset node already exists COPYSET_OP_STATUS_COPYSET_NOTEXIST = 2; COPYSET_OP_STATUS_FAILURE_UNKNOWN = 3; COPYSET_OP_STATUS_COPYSET_IS_HEALTHY = 4; @@ -48,7 +48,7 @@ enum COPYSET_OP_STATUS { message CopysetResponse { optional COPYSET_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // If not the leader, redirect to the leader. }; message Copyset { @@ -69,27 +69,27 @@ message CopysetStatusRequest { required uint32 logicPoolId = 1; required uint32 copysetId = 2; required common.Peer peer = 3; - required bool queryHash = 4; // 考虑到计算copyset hash值是一个非常耗时的操作,所以设置一个bool变量可以选择不查 + required bool queryHash = 4; // Considering that calculating the copyset hash value is a very time-consuming operation, setting a bool variable can choose not to check } -// 大部分字段只能是optional,因为copyset node可能不存在 +// Most fields can only be optional, as the copyset node may not exist message CopysetStatusResponse { - required COPYSET_OP_STATUS status = 1; // op状态 - optional uint32 state = 2; // copyset状态 + required COPYSET_OP_STATUS status = 1; // OP status + optional uint32 state = 2; // Copyset status optional common.Peer peer = 3; // peer optional common.Peer leader = 4; // leader - optional bool readOnly = 5; // 是否只读 - optional int64 term = 6; // 当前任期 - optional int64 committedIndex = 7; // 当前的committed index - optional int64 knownAppliedIndex = 8; // 当前copyset已知的applied index,当前peer可能未apply - optional int64 pendingIndex = 9; // 当前副本未决的op log index起始index - optional int64 pendingQueueSize = 10; // 当前副本未决的op log queue的长度 - optional int64 applyingIndex = 11; // 当前副本正在apply的op log index - optional int64 firstIndex = 12; // 当前副本第一条op log index(包括盘和memory) - optional int64 lastIndex = 13; // 当前副本最后一条op log index(包括盘和memory) - optional int64 diskIndex = 14; // 当前副本已经持久化的最大op log index(不包含memory) - optional uint64 epoch = 15; // 当前copyset配置版本 - optional string hash = 16; // 当前copyset的数据hash值 + optional bool readOnly = 5; // Read Only + optional int64 term = 6; // Current term of office + optional int64 committedIndex = 7; // Current committed index + optional int64 knownAppliedIndex = 8; // The current copyset has a known applied index, but the current peer may not have applied it + optional int64 pendingIndex = 9; // The open op log index starting index for the current replica + optional int64 pendingQueueSize = 10; // The length of the pending op log queue for the current replica + optional int64 applyingIndex = 11; // The current copy is applying the op log index + optional int64 firstIndex = 12; // The first op log index of the current replica (including disk and memory) + optional int64 lastIndex = 13; // The last op log index of the current replica (including disk and memory) + optional int64 diskIndex = 14; // The maximum op log index that the current replica has persisted (excluding memory) + optional uint64 epoch = 15; // Current copyset configuration version + optional string hash = 16; // The data hash value of the current copyset } service CopysetService { diff --git a/proto/heartbeat.proto b/proto/heartbeat.proto index 6b51d40277..dd600f7112 100644 --- a/proto/heartbeat.proto +++ b/proto/heartbeat.proto @@ -33,13 +33,13 @@ message CopySetInfo { required uint32 copysetId = 2; // copyset replicas, IP:PORT:ID, e.g. 127.0.0.1:8200:0 repeated common.Peer peers = 3; - // epoch, 用来标记配置变更,每变更一次,epoch会增加 + // epoch is used to mark configuration changes. Every time a change is made, epoch will increase required uint64 epoch = 4; - // 该复制组的leader + // The leader of this replication group required common.Peer leaderPeer = 5; - // 配置变更相关信息 + // Configuration change related information optional ConfigChangeInfo configChangeInfo = 6; - // copyset的性能信息 + // Performance information of copyset optional CopysetStatistics stats = 7; // whether the current copyset is on scaning optional bool scaning = 8; @@ -51,11 +51,11 @@ message CopySetInfo { message ConfigChangeInfo { required common.Peer peer = 1; - // 配置变更的类型 + // Types of configuration changes required ConfigChangeType type = 2; - // 配置变更是否成功 + // Whether the configuration change was successful required bool finished = 3; - // 变更的error信息 + // Changed error information optional CandidateError err = 4; }; @@ -81,13 +81,13 @@ message ChunkServerStatisticInfo { required uint32 writeRate = 2; required uint32 readIOPS = 3; required uint32 writeIOPS = 4; - // 已使用的chunk占用的磁盘空间 + // Disk space occupied by used chunks required uint64 chunkSizeUsedBytes = 5; - // chunkfilepool中未使用的chunk占用的磁盘空间 + // Disk space occupied by unused chunks in chunkfilepool required uint64 chunkSizeLeftBytes = 6; - // 回收站中chunk占用的磁盘空间 + // Disk space occupied by chunks in the recycle bin required uint64 chunkSizeTrashedBytes = 7; - // chunkfilepool的大小 + // The size of chunkfilepool optional uint64 chunkFilepoolSize = 8; // percentage of chunkfilepool formatting optional uint32 chunkFilepoolFormatPercent = 9; @@ -102,27 +102,27 @@ message ChunkServerHeartbeatRequest { required DiskState diskState = 6; required uint64 diskCapacity = 7; required uint64 diskUsed = 8; - // 返回该chunk上所有copyset的信息 + // Returns information about all copysets on this chunk repeated CopySetInfo copysetInfos = 9; - // 时间窗口内该chunkserver上leader的个数 + // The number of leaders on this chunkserver within the time window required uint32 leaderCount = 10; - // 时间窗口内该chunkserver上copyset的个数 + // The number of copysets on this chunkserver within the time window required uint32 copysetCount = 11; - // chunkServer相关的统计信息 + // ChunkServer related statistical information optional ChunkServerStatisticInfo stats = 12; optional string version = 13; }; enum ConfigChangeType { - // 配置变更命令: leader转换 + // Configuration change command: leader conversion TRANSFER_LEADER = 1; - // 配置变更命令: 复制组增加一个成员 + // Configuration change command: Add a member to the replication group ADD_PEER = 2; - // 配置变更命令: 复制组删除一个成员 + // Configuration change command: Delete a member from a replication group REMOVE_PEER = 3; - // 配置变更命令: 没有配置变更 + // Configuration change command: No configuration changes NONE = 4; - // 配置变更命令:change复制组一个成员 + // Configuration change command: change a member of a replication group CHANGE_PEER = 5; // start scan on the peer START_SCAN_PEER = 6; @@ -136,40 +136,40 @@ message CopySetConf { repeated common.Peer peers = 3; required uint64 epoch = 4; optional ConfigChangeType type = 5; - // configchangeItem 是目标节点 - // 对于TRANSFER_LEADER: 表示目标节点; 对于ADD_PEER: 表示待加入节点 - // 对于REMOVE_PEER: 表示待删除节点; 对于CHANGE_PEER: 表示待加入节点 + // ConfigchangeItem is the target node + // For TRANSFER_LEADER: represents the target node; For ADD_PEER: indicates the node to be added + // For MOVE_PEER: represents the node to be deleted; For CHANGE_PEER: indicates the node to be added // SCAN_PEER: to scan the node optional common.Peer configchangeItem = 6; - // oldPeer, 这个只在ConfigChangeType=对于CHANGE_PEER的情况下会赋值, - // 表示待删除节点。 - // chunkserver收到CHANGE_PEER,根据peers,configchangeItem,oldPeer拼出新的conf + // OldPeer, this only applies to ConfigChangeType=for In the case of CHANGE_PEER, a value will be assigned, + // Represents a node to be deleted. + // Chunkserver received CHANGE_PEER, according to peers, configchangeItem, oldPeer, spell out a new conf optional common.Peer oldPeer = 7; }; enum HeartbeatStatusCode { - // 正常返回 + // Normal return hbOK = 0; - // 必要的参数为初始化 + // The necessary parameters are initialization hbParamUnInitialized = 1; - // chunkserver不在topology中 + // Chunkserver is not in topology hbChunkserverUnknown = 2; - // chunkserver状态为retired + // Chunkserver status is retired hbChunkserverRetired = 3; - // chunkserver的ip和port与topology中的不匹配 + // The IP and port of chunkserver do not match those in topology hbChunkserverIpPortNotMatch = 4; - // chunkserver的token不匹配 + // Chunkserver token mismatch hbChunkserverTokenNotMatch = 5; - // 无copyset上报 + // No copyset reported hbRequestNoCopyset = 6; - // copyset转换为topology格式失败 + // Copyset conversion to topology format failed hbAnalyseCopysetError = 7; } message ChunkServerHeartbeatResponse { - // 返回需要进行变更的copyset的信息 + // Returns information about the copyset that needs to be changed repeated CopySetConf needUpdateCopysets = 1; - // 错误码 + // Error code optional HeartbeatStatusCode statusCode = 2; }; diff --git a/proto/nameserver2.proto b/proto/nameserver2.proto index 85947d96ad..57b8a80c3d 100644 --- a/proto/nameserver2.proto +++ b/proto/nameserver2.proto @@ -31,18 +31,18 @@ enum FileType { }; enum FileStatus { - // 文件创建完成 + // File creation completed kFileCreated = 0; - // 文件删除中 + // Deleting files kFileDeleting = 1; - // 文件正在克隆 + // File is being cloned kFileCloning = 2; - // 文件元数据安装完毕 + // File metadata installation completed kFileCloneMetaInstalled = 3; - // 文件克隆完成 + // File cloning completed kFileCloned = 4; - // 文件正在被克隆 + // The file is being cloned kFileBeingCloned = 5; } @@ -78,15 +78,15 @@ message FileInfo { optional uint64 ctime = 9; optional uint64 seqNum = 10; optional FileStatus fileStatus = 11; - //用于文件转移到回收站的情况下恢复场景下的使用, - //RecycleBin(回收站)目录下使用/其他场景下不使用 + // Used to restore usage in scenarios where files are transferred to the recycle bin, + // Used in the RecycleBin directory/not used in other scenarios optional string originalFullPathName = 12; - // cloneSource 当前用于存放克隆源(当前主要用于curvefs) - // 后期可以考虑存放 s3相关信息 + // CloneSource is currently used to store clone sources (currently mainly used for curvefs) + //Later on, we can consider storing s3 related information optional string cloneSource = 13; - // cloneLength 克隆源文件的长度,用于clone过程中进行extent + // CloneLength The length of the clone source file used for extension during the clone process optional uint64 cloneLength = 14; optional uint64 stripeUnit = 15; optional uint64 stripeCount = 16; @@ -99,68 +99,68 @@ message FileInfo { // status code enum StatusCode { - // 执行成功 + // Execution successful kOK = 0; - // 文件已存在 + // File already exists kFileExists = 101; - // 文件不存在 + // File does not exist kFileNotExists = 102; - // 非目录类型 + // Non directory type kNotDirectory = 103; - // 传入参数错误 + // Incoming parameter error kParaError = 104; - // 缩小文件,目前不支持缩小文件 + // Shrinking files, currently not supported kShrinkBiggerFile = 105; - // 扩容单位错误,非segment size整数倍 + // Expansion unit error, not an integer multiple of segment size kExtentUnitError = 106; - // segment未分配 + // Segment not allocated kSegmentNotAllocated = 107; - // segment分配失败 + // Segment allocation failed kSegmentAllocateError = 108; - // 目录不存在 + // Directory does not exist kDirNotExist = 109; - // 功能不支持 + // Function not supported kNotSupported = 110; - // owner认证失败 + // Owner authentication failed kOwnerAuthFail = 111; - // 目录非空 + // Directory is not empty kDirNotEmpty = 112; - // 文件已处于快照中 + // The file is already in a snapshot kFileUnderSnapShot = 120; - // 文件不在快照中 + // The file is not in the snapshot kFileNotUnderSnapShot = 121; - // 快照删除中 + // Snapshot deletion in progress kSnapshotDeleting = 122; - // 快照文件不存在 + // The snapshot file does not exist kSnapshotFileNotExists = 123; - // 快照文件删除失败 + // Snapshot file deletion failed kSnapshotFileDeleteError = 124; - // session不存在 + // Session does not exist kSessionNotExist = 125; - // 文件已被占用 + // The file is already in use kFileOccupied = 126; kCloneFileNameIllegal = 127; kCloneStatusNotMatch = 128; - // 文件删除失败 + // File deletion failed kCommonFileDeleteError = 129; - // 文件id不匹配 + // File ID mismatch kFileIdNotMatch = 130; - // 文件在删除中 + // The file is being deleted kFileUnderDeleting = 131; - // 文件长度不符合要求 + // The file length does not meet the requirements kFileLengthNotSupported = 132; - // 文件正在被克隆 + // The file is being cloned kDeleteFileBeingCloned = 133; - // client版本不匹配 + // Client version mismatch kClientVersionNotMatch = 134; - // snapshot功能禁用中 + // The snapshot function is disabled kSnapshotFrozen = 135; - // 快照克隆服务连不上 + // The snapshot clone service cannot be connected kSnapshotCloneConnectFail = 136; - // 快照克隆服务未初始化 + // The snapshot clone service is not initialized kSnapshotCloneServerNotInit = 137; // recover file status is CloneMetaInstalled kRecoverFileCloneMetaInstalled = 138; @@ -170,9 +170,9 @@ enum StatusCode { kEpochTooOld = 140; // poolset doesn't exist kPoolsetNotExist = 141; - // 元数据存储错误 + // Metadata storage error kStorageError = 501; - // 内部错误 + // Internal error KInternalError = 502; }; @@ -311,20 +311,20 @@ message ExtendFileResponse { } message ChangeOwnerRequest { - // 需要变更owner的文件的fileName + // Need to change the fileName of the owner's file required string fileName = 1; - // 希望文件owner变更后的新的owner + // Hope the new owner after the file owner changes required string newOwner = 2; - // ChangerOwner接口只能通过root权限进行调用,需要传入root权限的owner + // The ChangerOwner interface can only be called with root permission, and an owner with root permission needs to be passed in required string rootOwner = 3; - // 对root身份进行校验的的signature + // The signature for verifying the root identity required string signature = 4; - // 用来在mds端重新计算signature + // Used to recalculate the signature on the mds side required uint64 date = 5; } -// 返回ChangeOwner的执行结果,成功返回statusCode::kOK -// 失败可能返回kFileNotExists、kOwnerAuthFail、kFileOccupied、kStorageError等,可能返回的错误码将来继续补充 +// Returns the execution result of ChangeOwner, successfully returning statusCode::kOK +// Failure may return kFileNotExists, kOwnerAuthFail, kFileOccupied, kStorageError, etc. The error codes that may be returned will continue to be supplemented in the future message ChangeOwnerResponse { required StatusCode statusCode = 1; } @@ -395,8 +395,8 @@ message CheckSnapShotStatusRequest { required uint64 date = 5; } -// statusCode为kOK时,fileStatus和progress才会赋值 -// 只有fileStatus是kFileDeleting时,progress表示快照文件删除进度,否则progress返回0 +// FileStatus and progress are only assigned values when statusCode is kOK +// Only when fileStatus is kFileDeleting, progress represents the progress of snapshot file deletion, otherwise progress returns 0 message CheckSnapShotStatusResponse { required StatusCode statusCode = 1; optional FileStatus fileStatus = 2; @@ -431,7 +431,7 @@ message OpenFileRequest { optional string clientVersion = 5; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -456,7 +456,7 @@ message CloseFileRequest { optional uint32 clientPort = 7; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -478,7 +478,7 @@ message ReFreshSessionRequest { optional uint32 clientPort = 8; } -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -531,9 +531,9 @@ message GetAllocatedSizeRequest { message GetAllocatedSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的分配大小 + // Allocation size of files or directories optional uint64 allocatedSize = 2; - // key是逻辑池id,value是分配大小 + // Key is the logical pool id, and value is the allocation size map allocSizeMap = 3; } @@ -543,7 +543,7 @@ message GetFileSizeRequest { message GetFileSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的file length + // The file length of a file or directory optional uint64 fileSize = 2; } diff --git a/proto/schedule.proto b/proto/schedule.proto index 2dde693556..9c92bb4ef5 100644 --- a/proto/schedule.proto +++ b/proto/schedule.proto @@ -34,7 +34,7 @@ message RapidLeaderScheduleResponse { required sint32 statusCode = 1; } -// 如果chunkServerID为空,则返回所有chunkserver的恢复状态 +// If chunkServerID is empty, return the recovery status of all chunkservers message QueryChunkServerRecoverStatusRequest { repeated uint32 chunkServerID = 1; } diff --git a/proto/topology.proto b/proto/topology.proto index 6e88d4e102..f9864de5e9 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/robot/Resources/keywords/deploy.py b/robot/Resources/keywords/deploy.py index 93d7926a45..0a556c7021 100644 --- a/robot/Resources/keywords/deploy.py +++ b/robot/Resources/keywords/deploy.py @@ -9,6 +9,7 @@ import random import time + def add_config(): etcd = [] for host in config.etcd_list: @@ -16,168 +17,183 @@ def add_config(): etcd_addrs = ",".join(etcd) # add mds config for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf"%host + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change offline time - ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf"%(config.offline_timeout*1000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change offline time + ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf" % ( + config.offline_timeout*1000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change clean_follower_afterMs time - ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf"%(300000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change clean_follower_afterMs time + ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf" % ( + 300000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change scheduler time + assert rs[3] == 0, "change host %s mds config fail" % host + # change scheduler time ori_cmd = R"sed -i 's/mds.copyset.scheduler.intervalSec=.*/mds.copyset.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = R"sed -i 's/mds.replica.scheduler.intervalSec=.*/mds.replica.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # ori_cmd = R"sed -i 's/mds.recover.scheduler.intervalSec=.*/mds.recover.scheduler.intervalSec=0/g' mds.conf" # rs = shell_operator.ssh_exec(ssh, ori_cmd) # assert rs[3] == 0,"change host %s mds config fail"%host ori_cmd = R"sed -i 's/mds.leader.scheduler.intervalSec=.*/mds.leader.scheduler.intervalSec=5/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # change topology update time ori_cmd = R"sed -i 's/mds.topology.TopologyUpdateToRepoSec=.*/mds.topology.TopologyUpdateToRepoSec=1/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add mysql conf - ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s mds config fail" % host + # add mysql conf + ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add etcd conf - ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s mds config fail" % host + # add etcd conf + ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = "sudo mv mds.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s mds conf fail"%host + assert rs[3] == 0, "mv %s mds conf fail" % host # add client config mds_addrs = [] for host in config.mds_list: mds_addrs.append(host + ":6666") addrs = ",".join(mds_addrs) for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s client config fail"%host -#将client.conf配置成py_client.conf(主机用),方便client复现死锁问题 + assert rs[3] == 0, "change host %s client config fail" % host + # Configure client.conf to py_client.conf(for the host) to facilitate client replication of deadlock issues ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /etc/curve/client.conf /etc/curve/py_client.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host # add chunkserver config addrs = ",".join(mds_addrs) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - #change global ip - ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf"%host + # change global ip + ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change global subnet - subnet=host+"/24" - ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf"%subnet + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change global subnet + subnet = host+"/24" + ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf" % subnet rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change mds ip - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf"%(addrs) + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change mds ip + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/chunkserver.snapshot_throttle_throughput_bytes=.*/chunkserver.snapshot_throttle_throughput_bytes=104857600/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.expire_afterSec=.*/trash.expire_afterSec=0/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.scan_periodSec=.*/trash.scan_periodSec=10/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #open use snapshot + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # open use snapshot ori_cmd = R"sed -i 's/clone.disable_curve_client=true/clone.disable_curve_client=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's/clone.disable_s3_adapter=true/clone.disable_s3_adapter=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#curve.config_path=conf/cs_client.conf#curve.config_path=/etc/curve/conf/cs_client.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#s3.config_path=conf/s3.conf#s3.config_path=/etc/curve/conf/s3.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = "sudo mv chunkserver.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s chunkserver conf fail"%host + assert rs[3] == 0, "mv %s chunkserver conf fail" % host # add s3 and client conf\cs_client conf client_host = random.choice(config.client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,client_host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s cs_client config fail"%host + assert rs[3] == 0, "change host %s cs_client config fail" % host ori_cmd = "sudo mv client.conf /etc/curve/conf && sudo mv cs_client.conf /etc/curve/conf/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) ori_cmd = "sed -i \"s/client.config_path=\S*/client.config_path=\/etc\/curve\/snap_client.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改snapshot_clone_server.conf etcd配置 - ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modify snapshot_clone_server.conf etcd configuration + ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改数据库配置项 - ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modifying Database Configuration Items + ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot clone server config fail"%host + assert rs[3] == 0, "change host %s snapshot clone server config fail" % host ori_cmd = "sed -i \"s/s3.config_path=\S*/s3.config_path=\/etc\/curve\/s3.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host + ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host -#change snap_client.conf - ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf"%(addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host +# change snap_client.conf + ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host ori_cmd = "sudo mv snapshot_clone_server.conf /etc/curve/ && sudo mv snap_client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s snapshot_clone_server conf fail"%host + assert rs[3] == 0, "mv %s snapshot_clone_server conf fail" % host ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -187,29 +203,32 @@ def add_config(): snap_addrs_list.append(host + ":5556") snap_addrs = ",".join(snap_addrs_list) for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf"%addrs + ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf" % addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf"%etcd_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf" % etcd_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf"%snap_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf" % snap_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host + assert rs[3] == 0, "change host %s tools config fail" % host ori_cmd = "sudo mv tools.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s tools conf fail"%host + assert rs[3] == 0, "mv %s tools conf fail" % host + def clean_env(): - host_list = config.client_list + config.mds_list + config.chunkserver_list + host_list = config.client_list + config.mds_list + config.chunkserver_list host_list = list(set(host_list)) for host in host_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd1 = "sudo tc qdisc del dev bond0.106 root" shell_operator.ssh_exec(ssh, ori_cmd1) ori_cmd2 = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" @@ -217,33 +236,42 @@ def clean_env(): ori_cmd3 = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd3) + def destroy_mds(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_etcd(): for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_snapshotclone_server(): for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep |grep -v sudo | grep snapshotcloneserver | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def stop_nebd(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep nebd | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: logger.debug("snapshotcloneserver not up") continue - + + def initial_chunkserver(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -257,22 +285,24 @@ def initial_chunkserver(host): assert rs[1] == [], "kill chunkserver fail" ori_cmd = "sudo find /data/ -name chunkserver.dat -exec rm -rf {} \;" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("delete dat ,return is %s"%rs[1]) - assert rs[3] == 0,"rm %s dat fail"%host + logger.debug("delete dat ,return is %s" % rs[1]) + assert rs[3] == 0, "rm %s dat fail" % host ori_cmd = "sh recycle_chunks.sh -d /data -chunks chunkfilepool -wals chunkfilepool" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("recycle chunk ,return is %s"%rs[1]) - assert rs[3] == 0,"recycle %s chunk fail"%host + logger.debug("recycle chunk ,return is %s" % rs[1]) + assert rs[3] == 0, "recycle %s chunk fail" % host ssh.close() except Exception as e: logger.error("%s" % e) raise return 0 + def recycle_chunk(): cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/clean_curve.yml --tags chunkserver" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible clean chunk fail" + assert ret == 0, "ansible clean chunk fail" + def drop_all_chunkserver_dat(): thread = [] @@ -286,34 +316,39 @@ def drop_all_chunkserver_dat(): logger.debug("drop cs dat get result is %d" % t.get_result()) assert t.get_result() == 0 + def destroy_test_env(): try: cmd = "cp robot/init_env.sh . && bash init_env.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"init env fail" + assert ret == 0, "init env fail" host = config.client_list[0] except Exception: logger.error("init env fail.") raise + def change_cfg(): try: - cmd = "bash %s/change_cfg.sh"%config.fs_cfg_path + cmd = "bash %s/change_cfg.sh" % config.fs_cfg_path ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"change fs cfg fail" + assert ret == 0, "change fs cfg fail" except Exception: logger.error("change fs cfg fail.") raise + def destroy_curvefs(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) shell_operator.ssh_exec(ssh, cmd) cmd = "echo 'yes' | /home/nbs/.curveadm/bin/curveadm stop" ret = shell_operator.run_exec(cmd) @@ -323,186 +358,218 @@ def destroy_curvefs(): logger.error("destroy curvefs fail.") raise + def use_ansible_deploy(): try: cmd = "cp robot/ansible_deploy.sh . && bash ansible_deploy.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible deploy fail" + assert ret == 0, "ansible deploy fail" host = config.client_list[0] - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, host) ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"cp client.conf fail" + assert ret == 0, "cp client.conf fail" except Exception: logger.error("deploy curve fail.") raise + def deploy_all_servers(): try: cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" cmd = "/home/nbs/.curveadm/bin/curveadm deploy" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"deploy mds\etcd\metaserver fail" + assert ret == 0, "deploy mds\etcd\metaserver fail" except Exception: logger.error("deploy curvefs fail.") raise -def remk_test_dir(): + +def remk_test_dir(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) for test_dir in config.fs_mount_dir: - ori_cmd = "rm -rf %s/%s"%(config.fs_mount_path,test_dir) + ori_cmd = "rm -rf %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm test dir %s fail,error is %s"%(test_dir,rs[1]) - ori_cmd = "mkdir %s/%s"%(config.fs_mount_path,test_dir) + assert rs[3] == 0, "rm test dir %s fail,error is %s" % ( + test_dir, rs[1]) + ori_cmd = "mkdir %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mkdir %s fail,error is %s"%(test_dir,rs[1]) + assert rs[3] == 0, "mkdir %s fail,error is %s" % (test_dir, rs[1]) except Exception: logger.error(" remk test dir fail.") raise -def mount_test_dir(mountpoint="",mountfile=""): + +def mount_test_dir(mountpoint="", mountfile=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) - else: + --fstype volume" % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) + else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) + " % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: if mountfile == "": mountfile = mountpoint if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountfile,mountfile) + --fstype volume" % (mountpoint, config.fs_mount_path, mountfile, mountfile) else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountfile,mountfile) + " % (mountpoint, config.fs_mount_path, mountfile, mountfile) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("mount dir fail.") raise + def umount_test_dir(mountpoint=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("umount dir fail.") raise + def install_deb(): try: -# mkdeb_url = config.curve_workspace + "mk-deb.sh" -# exec_mkdeb = "bash %s"%mkdeb_url -# shell_operator.run_exec2(exec_mkdeb) - cmd = "ls %scurve-mds*.deb"%config.curve_workspace + # mkdeb_url = config.curve_workspace + "mk-deb.sh" + # exec_mkdeb = "bash %s"%mkdeb_url + # shell_operator.run_exec2(exec_mkdeb) + cmd = "ls %scurve-mds*.deb" % config.curve_workspace mds_deb = shell_operator.run_exec2(cmd) version = mds_deb.split('+')[1] for host in config.mds_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mds install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "mds install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) - + for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"sdk install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "sdk install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) for host in config.chunkserver_list: cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ - (config.pravie_key_path,config.curve_workspace,host) + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb"%(version,version) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb" % ( + version, version) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0, "chunkserver install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "chunkserver install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) except Exception: logger.error("install deb fail.") raise + def start_nebd(): - cmd = "ls nebd/nebd*.deb" - nebd_deb = shell_operator.run_exec2(cmd) - version = nebd_deb.split('+')[1] - assert nebd_deb != "","can not get nebd deb" - for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) - shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s"%version - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"install nebd deb fail,error is %s"%rs - rm_deb = "rm nebd_*%s"%version - shell_operator.ssh_exec(ssh, rm_deb) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/"%\ - (config.pravie_key_path,host) - shell_operator.run_exec2(cmd) - ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp %s nebd conf fail"%host - ori_cmd = "sudo nebd-daemon start" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if rs[3] != 0: - logger.debug("nebd start fail,error is %s"%rs[1]) - ori_cmd == "sudo nebd-daemon restart" - rs2 = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs2[3] == 0,"restart nebd fail, return is %s"%rs2[1] - time.sleep(5) - ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] != "","start nebd fail!" + cmd = "ls nebd/nebd*.deb" + nebd_deb = shell_operator.run_exec2(cmd) + version = nebd_deb.split('+')[1] + assert nebd_deb != "", "can not get nebd deb" + for host in config.client_list: + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) + shell_operator.run_exec2(cmd) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s" % version + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "install nebd deb fail,error is %s" % rs + rm_deb = "rm nebd_*%s" % version + shell_operator.ssh_exec(ssh, rm_deb) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/" %\ + (config.pravie_key_path, host) + shell_operator.run_exec2(cmd) + ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cp %s nebd conf fail" % host + ori_cmd = "sudo nebd-daemon start" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[3] != 0: + logger.debug("nebd start fail,error is %s" % rs[1]) + ori_cmd == "sudo nebd-daemon restart" + rs2 = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs2[3] == 0, "restart nebd fail, return is %s" % rs2[1] + time.sleep(5) + ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] != "", "start nebd fail!" + def add_config_file(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) + def start_abnormal_test_services(): try: for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm -rf /etcd/default.etcd" shell_operator.ssh_exec(ssh, ori_cmd) etcd_cmd = "cd etcdrun && sudo nohup ./run.sh new &" @@ -510,52 +577,59 @@ def start_abnormal_test_services(): ori_cmd = "ps -ef|grep -v grep | grep -w etcd | awk '{print $2}'" time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("etcd pid is %s"%rs[1]) + logger.debug("etcd pid is %s" % rs[1]) assert rs[1] != [], "up etcd fail" for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) mds_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, mds_cmd) time.sleep(1) ori_cmd = "ps -ef|grep -v grep | grep -v curve-mds.log | grep -v sudo | grep -w curve-mds | awk '{print $2}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[1] != [], "up mds fail" - logger.debug("mds pid is %s"%rs[1]) + logger.debug("mds pid is %s" % rs[1]) for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, ori_cmd) except Exception: logger.error("up servers fail.") raise + def create_pool(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) mds = [] mds_addrs = "" for mds_host in config.mds_list: mds.append(mds_host + ":6666") mds_addrs = ",".join(mds) physical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_physicalpool"%(mds_addrs) + -op=create_physicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, physical_pool) if rs[3] == 0: logger.info("create physical pool sucess") else: - assert False,"create physical fail ,msg is %s"%rs[2] + assert False, "create physical fail ,msg is %s" % rs[2] for host in config.chunkserver_list: - ssh2 = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh2 = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo nohup ./chunkserver_ctl.sh start all &" shell_operator.ssh_background_exec2(ssh2, ori_cmd) time.sleep(60) logical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_logicalpool"%(mds_addrs) + -op=create_logicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, logical_pool) time.sleep(180) + def restart_cinder_server(): for client_host in config.client_list: - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ori_cmd = "sudo cp /usr/curvefs/curvefs.py /srv/stack/cinder/lib/python2.7/site-packages/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /usr/curvefs/_curvefs.so /srv/stack/cinder/lib/python2.7/site-packages/" @@ -563,21 +637,22 @@ def restart_cinder_server(): time.sleep(2) ori_cmd = "sudo service cinder-volume restart" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] == [],"rs is %s"%rs + assert rs[1] == [], "rs is %s" % rs + def wait_cinder_server_up(): cinder_host = config.nova_host - ssh = shell_operator.create_ssh_connect(cinder_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + cinder_host, 1046, config.abnormal_user) ori_cmd = R"source OPENRC && cinder get-host-list --all-services | grep pool1 | grep curve2 | awk '{print $16}'" i = 0 while i < 360: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - status = "".join(rs[1]).strip() - if status == "up": - break - i = i + 5 - time.sleep(5) - assert status == "up","up curve2 cinder service fail,please check" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + status = "".join(rs[1]).strip() + if status == "up": + break + i = i + 5 + time.sleep(5) + assert status == "up", "up curve2 cinder service fail,please check" if status == "up": - time.sleep(10) - + time.sleep(10) diff --git a/robot/Resources/keywords/fault_inject.py b/robot/Resources/keywords/fault_inject.py index 48e95382c4..507b5af8cf 100644 --- a/robot/Resources/keywords/fault_inject.py +++ b/robot/Resources/keywords/fault_inject.py @@ -15,6 +15,7 @@ import string import types + def block_ip(chain): ori_cmd = "iptables -I %s 2>&1" % chain cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -30,6 +31,7 @@ def cancel_block_ip(chain): print cmd # rc = shell_operator.run_exec(cmd) + def net_work_delay(dev, time): ori_cmd = "tc qdisc add dev %s root netem delay %dms 2>&1" % (dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -37,37 +39,45 @@ def net_work_delay(dev, time): print cmd # rc = shell_operator.run_exec(cmd) -def package_loss_all(ssh,dev, percent): - ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % (dev, percent) + +def package_loss_all(ssh, dev, percent): + ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % ( + dev, percent) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def package_delay_all(ssh, dev,ms): + +def package_delay_all(ssh, dev, ms): ori_cmd = "sudo tc qdisc add dev %s root netem delay %dms" % (dev, ms) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def cancel_tc_inject(ssh,dev): + +def cancel_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc del dev %s root" % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def show_tc_inject(ssh,dev): + +def show_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc show dev %s " % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) + def package_reorder_all(dev, ms, percent1, percent2): - ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % (dev, ms, percent1, percent2) + ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % ( + dev, ms, percent1, percent2) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def package_duplicate_all(dev, percent): ori_cmd = "tc qdisc add dev %s root netem duplicate %d%%" % (dev, percent) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -77,7 +87,8 @@ def package_duplicate_all(dev, percent): def eth_down_for_a_monent(dev, time): - ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % (dev, time) + ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % ( + dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd @@ -86,110 +97,125 @@ def eth_down_for_a_monent(dev, time): def add_rate_limit(dev, downlink, uplink): ori_cmd = "wget -N -P /tmp nos.netease.com/nfit-software/taaslimit.sh 2>&1 && chmod a+rx /tmp/taaslimit.sh 2>&1 " \ - "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % (dev, downlink, uplink) + "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % ( + dev, downlink, uplink) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def del_rate_limit(dev): - ori_cmd = "taaslimit clear %s 2>&1" %(dev) + ori_cmd = "taaslimit clear %s 2>&1" % (dev) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) -def inject_cpu_stress(ssh,stress=50): - cmd = "sudo nohup python cpu_stress.py %d &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) + +def inject_cpu_stress(ssh, stress=50): + cmd = "sudo nohup python cpu_stress.py %d &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up cpu stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up cpu stress fail" + def del_cpu_stress(ssh): cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no cpu stress running") return cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop cpu stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop cpu stess fail" + -def inject_mem_stress(ssh,stress): - cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) +def inject_mem_stress(ssh, stress): + cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up memster stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up memster stress fail" + def del_mem_stress(ssh): cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no memtester stress running") return cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop memtester stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop memtester stess fail" -def inject_clock_offset(ssh,time): + +def inject_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"+%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"inject clock offet fail,return is %s"%rs[2] + assert rs[3] == 0, "inject clock offet fail,return is %s" % rs[2] + -def del_clock_offset(ssh,time): +def del_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"-%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) assert rs[3] == 0, "del clock offet fail,return is %s" % rs[2] + def listen_network_stress(ip): ori_cmd = "iperf -s" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) # assert rs[3] == 0,"up iperf fail: %s"%rs[1] + def inject_network_stress(ip): - ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001"%ip + ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001" % ip ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"inject iperf fail: %s"%rs[2] + assert rs[3] == 0, "inject iperf fail: %s" % rs[2] + def stop_network_stress(ip): ori_cmd = "ps -ef|grep iperf |grep -v grep| awk '{print $2}' | sudo xargs kill -9" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"stop iperf fail: %s"%rs[2] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "stop iperf fail: %s" % rs[2] ori_cmd = "ps -ef|grep iperf |grep -v grep" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[1] == [],"stop iperf fail,pid %s"%rs[1] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] == [], "stop iperf fail,pid %s" % rs[1] + def ipmitool_cycle_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power cycle" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"cycle restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cycle restart host fail,return is %s" % rs + def ipmitool_reset_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power reset" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reset restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reset restart host fail,return is %s" % rs -def get_hostip_dev(ssh,hostip): - ori_cmd = "ip a|grep %s | awk '{print $7}'"%hostip + +def get_hostip_dev(ssh, hostip): + ori_cmd = "ip a|grep %s | awk '{print $7}'" % hostip rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] return "".join(rs[1]).strip() + def clear_RecycleBin(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool clean-recycle --isTest" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"clean RecyclenBin fail,msg is %s"%rs[1] + assert rs[3] == 0, "clean RecyclenBin fail,msg is %s" % rs[1] starttime = time.time() ori_cmd = "curve_ops_tool list -fileName=/RecycleBin |grep Total" while time.time() - starttime < 180: @@ -199,9 +225,10 @@ def clear_RecycleBin(): else: logger.debug("deleting") if rs[3] != 0: - logger.debug("list /RecycleBin fail,error is %s"%rs[1]) - time.sleep(3) - assert rs[3] == 0,"delete /RecycleBin fail,error is %s"%rs[1] + logger.debug("list /RecycleBin fail,error is %s" % rs[1]) + time.sleep(3) + assert rs[3] == 0, "delete /RecycleBin fail,error is %s" % rs[1] + def loop_map_unmap_file(): thread = [] @@ -209,7 +236,7 @@ def loop_map_unmap_file(): filename = "nbdthrash" + str(i) t = mythread.runThread(test_curve_stability_nbd.nbd_all, filename) thread.append(t) - logger.debug("thrash map unmap %s" %filename) + logger.debug("thrash map unmap %s" % filename) config.thrash_thread = thread for t in thread: @@ -217,29 +244,32 @@ def loop_map_unmap_file(): # logger.debug("get result is %d" % t.get_result()) # assert t.get_result() == 0 + def stop_map_unmap(): try: if config.thrash_thread == []: - assert False,"map umap not up" + assert False, "map umap not up" thread = config.thrash_thread config.thrash_map = False logger3.info("set thrash_map to false") time = 0 for t in thread: - assert t.exitcode == 0,"map/umap thread error" + assert t.exitcode == 0, "map/umap thread error" result = t.get_result() - logger.debug("thrash map/umap time is %d"%result) - assert result > 0,"map/umap thread error" + logger.debug("thrash map/umap time is %d" % result) + assert result > 0, "map/umap thread error" time = time + result - logger.info("map/umap all time is %d"%time) + logger.info("map/umap all time is %d" % time) except: - raise + raise + def stop_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "sudo supervisorctl stop all" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"stop rwio fail,rs is %s"%rs[1] + assert rs[3] == 0, "stop rwio fail,rs is %s" % rs[1] ori_cmd = "ps -ef|grep -v grep | grep randrw | awk '{print $2}'| sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "ps -ef|grep -v grep | grep -w /home/nbs/vdbench50406/profile | awk '{print $2}'| sudo xargs kill -9" @@ -247,114 +277,133 @@ def stop_rwio(): time.sleep(3) ssh.close() + def run_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) - ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd0": logger.error("map is error") - assert False,"output is %s"%output - ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" + assert False, "output is %s" % output + ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd1": logger.error("map is error") - assert False,"output is %s"%output + assert False, "output is %s" % output ori_cmd = "sudo supervisorctl stop all && sudo supervisorctl reload" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo nohup /home/nbs/vdbench50406/vdbench -jn -f /home/nbs/vdbench50406/profile &" rs = shell_operator.ssh_background_exec2(ssh, ori_cmd) - #write 60s io + # write 60s io time.sleep(60) # assert rs[3] == 0,"start rwio fail" ssh.close() + def init_recover_disk(fio_size): - ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based"%int(fio_size) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based" % int( + fio_size) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"write fio fail" + assert rs[3] == 0, "write fio fail" cmd = "sudo curve-nbd unmap cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap recover fail:%s"%rs[2] + assert rs[3] == 0, "unmap recover fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") config.recover_vol_md5 = md5 cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def recover_disk(): cmd = "sudo curve recover --user test --filename /recover" - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"recover file fail:%s"%rs[2] + assert rs[3] == 0, "recover file fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") - assert md5 == config.recover_vol_md5,"Data is inconsistent after translation,md5 is %s,recover md5 is %s"%(config.recover_vol_md5,md5) - + assert md5 == config.recover_vol_md5, "Data is inconsistent after translation,md5 is %s,recover md5 is %s" % ( + config.recover_vol_md5, md5) + + def get_chunkserver_list(): client_host = config.client_list[0] logger.info("|------begin get chunkserver list------|") cmd = "curve_ops_tool chunkserver-list > cs_list" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) -def get_chunkserver_id(host,cs_id): + +def get_chunkserver_id(host, cs_id): client_host = config.client_list[0] - logger.info("|------begin get chunkserver %s id %d------|"%(host,cs_id)) - cmd = "cat cs_list | grep %s |grep -w chunkserver%d"%(host,cs_id) + logger.info("|------begin get chunkserver %s id %d------|" % (host, cs_id)) + cmd = "cat cs_list | grep %s |grep -w chunkserver%d" % (host, cs_id) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: return -1 -def get_cs_copyset_num(host,cs_id): + +def get_cs_copyset_num(host, cs_id): client_host = config.client_list[0] cs_number = int(cs_id) + 8200 - cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'"%(host,cs_number) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'" % ( + host, cs_number) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: - return -1 + return -1 -def stop_vm(ssh,uuid): - stop_cmd = "source OPENRC && nova stop %s"%uuid + +def stop_vm(ssh, uuid): + stop_cmd = "source OPENRC && nova stop %s" % uuid rs = shell_operator.ssh_exec(ssh, stop_cmd) - assert rs[3] == 0,"stop vm fail,error is %s"%rs[2] + assert rs[3] == 0, "stop vm fail,error is %s" % rs[2] time.sleep(5) -def start_vm(ssh,uuid): - start_cmd = "source OPENRC && nova start %s"%uuid + +def start_vm(ssh, uuid): + start_cmd = "source OPENRC && nova start %s" % uuid rs = shell_operator.ssh_exec(ssh, start_cmd) - assert rs[3] == 0,"start vm fail,error is %s"%rs[2] + assert rs[3] == 0, "start vm fail,error is %s" % rs[2] -def restart_vm(ssh,uuid): - restart_cmd = "source OPENRC && nova reboot %s"%uuid + +def restart_vm(ssh, uuid): + restart_cmd = "source OPENRC && nova reboot %s" % uuid rs = shell_operator.ssh_exec(ssh, restart_cmd) - assert rs[3] == 0,"reboot vm fail,error is %s"%rs[2] + assert rs[3] == 0, "reboot vm fail,error is %s" % rs[2] + -def check_vm_status(ssh,uuid): - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%uuid +def check_vm_status(ssh, uuid): + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % uuid i = 0 while i < 180: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if "".join(rs[1]).strip() == "ACTIVE": - return True - elif "".join(rs[1]).strip() == "ERROR": - return False - else: - time.sleep(5) - i = i + 5 - assert False,"start vm fail" - -def check_vm_vd(ip,nova_ssh,uuid): + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if "".join(rs[1]).strip() == "ACTIVE": + return True + elif "".join(rs[1]).strip() == "ERROR": + return False + else: + time.sleep(5) + i = i + 5 + assert False, "start vm fail" + + +def check_vm_vd(ip, nova_ssh, uuid): i = 0 while i < 300: try: @@ -363,19 +412,21 @@ def check_vm_vd(ip,nova_ssh,uuid): rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output == "vdc": - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - shell_operator.ssh_exec(nova_ssh,ori_cmd) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + shell_operator.ssh_exec(nova_ssh, ori_cmd) elif output == "": break except: i = i + 5 time.sleep(5) - assert rs[3] == 0,"start vm fail,ori_cmd is %s" % rs[1] + assert rs[3] == 0, "start vm fail,ori_cmd is %s" % rs[1] + def init_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_host - ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_stability_host + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_host + ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_stability_host try: rs = shell_operator.ssh_exec(ssh, ori_cmd) rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) @@ -384,23 +435,23 @@ def init_vm(): uuid = "".join(rs[1]).strip() uuid2 = "".join(rs2[1]).strip() - for i in range(1,10): + for i in range(1, 10): ori_cmd = "bash curve_test.sh delete" shell_operator.ssh_exec(ssh, ori_cmd) - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - ori_cmd2 = "source OPENRC && nova reboot %s --hard"%uuid2 - rs = shell_operator.ssh_exec(ssh,ori_cmd) - rs2 = shell_operator.ssh_exec(ssh,ori_cmd2) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + ori_cmd2 = "source OPENRC && nova reboot %s --hard" % uuid2 + rs = shell_operator.ssh_exec(ssh, ori_cmd) + rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) time.sleep(60) - rs1 = check_vm_status(ssh,uuid) - rs2 = check_vm_status(ssh,uuid2) + rs1 = check_vm_status(ssh, uuid) + rs2 = check_vm_status(ssh, uuid2) if rs1 == True and rs2 == True: break - assert rs1 == True,"hard reboot vm fail" - assert rs2 == True,"hard reboot vm fail" + assert rs1 == True, "hard reboot vm fail" + assert rs2 == True, "hard reboot vm fail" - check_vm_vd(config.vm_host,ssh,uuid) - check_vm_vd(config.vm_stability_host,ssh,uuid2) + check_vm_vd(config.vm_host, ssh, uuid) + check_vm_vd(config.vm_stability_host, ssh, uuid2) except: logger.error("init vm error") raise @@ -408,42 +459,49 @@ def init_vm(): def remove_vm_key(): - cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s"%config.vm_host + cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s" % config.vm_host shell_operator.run_exec(cmd) print cmd -def attach_new_vol(fio_size,vdbench_size): - ori_cmd = "bash curve_test.sh create %d %d"%(int(fio_size),int(vdbench_size)) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + +def attach_new_vol(fio_size, vdbench_size): + ori_cmd = "bash curve_test.sh create %d %d" % ( + int(fio_size), int(vdbench_size)) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "attach vol fail,return is %s" % rs[2] logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"attach vol fail,return is %s"%rs[2] - logger.info("exec cmd %s"%ori_cmd) get_vol_uuid() ssh.close() + def detach_vol(): stop_rwio() ori_cmd = "bash curve_test.sh delete" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "retcode is %d,error is %s" % (rs[3], rs[2]) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"retcode is %d,error is %s"%(rs[3],rs[2]) - logger.info("exec cmd %s"%ori_cmd) ssh.close() + def clean_nbd(): for client_ip in config.client_list: - logger.info("|------begin test clean client %s------|"%(client_ip)) + logger.info("|------begin test clean client %s------|" % (client_ip)) cmd = "sudo curve-nbd list-mapped |grep nbd" - ssh = shell_operator.create_ssh_connect(client_ip, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_ip, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] != []: for nbd_info in rs[1]: - nbd = re.findall("/dev/nbd\d+",nbd_info) + nbd = re.findall("/dev/nbd\d+", nbd_info) cmd = "sudo curve-nbd unmap " + nbd[0] rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap %s fail,error is %s"%(nbd,rs[2]) + assert rs[3] == 0, "unmap %s fail,error is %s" % (nbd, rs[2]) cmd = "ps -ef|grep curve-nbd|grep -v grep | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, cmd) return @@ -451,159 +509,174 @@ def clean_nbd(): def map_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - stripeUnit = [524288,1048576,2097152,4194304] - stripeCount = [1,2,4,8,16] - cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + stripeUnit = [524288, 1048576, 2097152, 4194304] + stripeCount = [1, 2, 4, 8, 16] + cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /fiofile fail:%s"%rs[2] - cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /fiofile fail:%s" % rs[2] + cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /vdbenchfile fail:%s"%rs[2] - #test recover recyclebin file - cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /vdbenchfile fail:%s" % rs[2] + # test recover recyclebin file + cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /recover fail:%s"%rs[2] + assert rs[3] == 0, "create /recover fail:%s" % rs[2] time.sleep(3) cmd = "sudo curve-nbd map cbd:pool1//fiofile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map fiofile fail:%s"%rs[2] + assert rs[3] == 0, "map fiofile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//vdbenchfile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "map vdbenchfile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map recover fail:%s"%rs[2] + assert rs[3] == 0, "map recover fail:%s" % rs[2] + def delete_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) cmd = "curve delete --filename /fiofile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /fiofile fail:%s"%rs[2] + assert rs[3] == 0, "delete /fiofile fail:%s" % rs[2] cmd = "curve delete --filename /vdbenchfile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "delete /vdbenchfile fail:%s" % rs[2] cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def check_host_connect(ip): - cmd = "ping %s -w3"%ip + cmd = "ping %s -w3" % ip status = shell_operator.run_exec(cmd) if status == 0: return True else: return False + def get_chunkserver_status(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) grep_cmd = "bash /home/nbs/chunkserver_ctl.sh status all" - rs = shell_operator.ssh_exec(ssh,grep_cmd) + rs = shell_operator.ssh_exec(ssh, grep_cmd) chunkserver_lines = rs[1] - logger.debug("get lines is %s"%chunkserver_lines) - up_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "active" in x, chunkserver_lines)] - down_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "down" in x, chunkserver_lines)] - return {'up':up_cs, 'down':down_cs} + logger.debug("get lines is %s" % chunkserver_lines) + up_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "active" in x, chunkserver_lines)] + down_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "down" in x, chunkserver_lines)] + return {'up': up_cs, 'down': down_cs} ssh.close() -def kill_mult_cs_process(host,num): + +def kill_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - up_cs = cs_status["up"] - if up_cs == []: - raise Exception("no chunkserver up") + cs_status = get_chunkserver_status(host) + up_cs = cs_status["up"] + if up_cs == []: + raise Exception("no chunkserver up") except Exception as e: - logger.debug("cs_status is %s"%cs_status) - logger.error("%s"%e) - raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) + logger.error("%s" % e) + raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(up_cs) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ - ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'"%(cs,cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) + ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) pid_chunkserver = "".join(rs[1]).strip() - logger.info("test kill host %s chunkserver %s"%(host,cs)) - kill_cmd = "sudo kill -9 %s"%pid_chunkserver - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[2]))) - assert rs[3] == 0,"kill chunkserver fail" + logger.info("test kill host %s chunkserver %s" % (host, cs)) + kill_cmd = "sudo kill -9 %s" % pid_chunkserver + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[2]))) + assert rs[3] == 0, "kill chunkserver fail" up_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs -def start_mult_cs_process(host,num): + +def start_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - raise Exception("no chunkserver down") + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + raise Exception("no chunkserver down") except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % (cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" down_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs + def up_all_cs(): operate_cs = [] for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: + if get_cs_copyset_num(host, cs) == 0: ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat;sudo rm -rf /data/chunkserver%d/copysets;\ - sudo rm -rf /data/chunkserver%d/recycler"%(cs,cs,cs) + sudo rm -rf /data/chunkserver%d/recycler" % (cs, cs, cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail" + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail" time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" ssh.close() + def stop_host_cs_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -612,17 +685,18 @@ def stop_host_cs_process(host): if up_cs == []: raise Exception("no chunkserver up") except Exception as e: - logger.error("%s"%e) + logger.error("%s" % e) raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) ori_cmd = "ps -ef|grep -v grep | grep -w curve-chunkserver |grep -v sudo | awk '{print $2}' | sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) - print "test kill host %s chunkserver %s"%(host,up_cs) - assert rs[3] == 0,"kill chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) + print "test kill host %s chunkserver %s" % (host, up_cs) + assert rs[3] == 0, "kill chunkserver fail" ssh.close() -def start_host_cs_process(host,csid=-1): + +def start_host_cs_process(host, csid=-1): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) down_cs = cs_status["down"] @@ -636,17 +710,19 @@ def start_host_cs_process(host,csid=-1): if csid == -1: ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start all" else: - if get_cs_copyset_num(host,csid) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(csid) + if get_cs_copyset_num(host, csid) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + csid) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" %csid - print "test up host %s chunkserver %s"%(host, down_cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % csid + print "test up host %s chunkserver %s" % (host, down_cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] ssh.close() -def restart_mult_cs_process(host,num): + +def restart_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) for i in range(0, num): try: @@ -680,6 +756,7 @@ def restart_mult_cs_process(host,num): assert False, "up chunkserver fail" up_cs.remove(cs) + def kill_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}'" @@ -689,10 +766,11 @@ def kill_mds_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill mds fail,process is %s"%pid + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill mds fail,process is %s" % pid + def start_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -703,12 +781,13 @@ def start_mds_process(host): return up_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "mds up fail" + def kill_etcd_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" @@ -718,35 +797,37 @@ def kill_etcd_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill etcd fail" + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill etcd fail" + def start_etcd_process(host): -# ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) -# ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] != []: -# logger.debug("etcd already up") -# return -# mkdir_cmd = "sudo rm -rf /etcd/default.etcd" -# rs = shell_operator.ssh_exec(ssh, mkdir_cmd) -# up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" + # ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + # ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] != []: + # logger.debug("etcd already up") + # return + # mkdir_cmd = "sudo rm -rf /etcd/default.etcd" + # rs = shell_operator.ssh_exec(ssh, mkdir_cmd) + # up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" # shell_operator.ssh_background_exec2(ssh, up_cmd) -# logger.debug("exec %s"%(up_cmd)) -# time.sleep(2) -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] == []: -# assert False, "etcd up fail" + # logger.debug("exec %s"%(up_cmd)) + # time.sleep(2) + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] == []: + # assert False, "etcd up fail" try: - cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" - ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible start etcd fail" + cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" + ret = shell_operator.run_exec(cmd) + assert ret == 0, "ansible start etcd fail" except Exception: - logger.error("ansible start etcd fail.") - raise - + logger.error("ansible start etcd fail.") + raise + + def stop_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep mysql" @@ -756,8 +837,9 @@ def stop_mysql_process(host): return ori_cmd = "sudo killall mysqld" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) - assert rs[3] == 0,"stop mysql fail" + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) + assert rs[3] == 0, "stop mysql fail" + def start_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -774,73 +856,82 @@ def start_mysql_process(host): if rs[1] == []: assert False, "mysql up fail" + def get_cluster_iops(): return 100 + def exec_deleteforce(): client_list = config.client_list host = random.choice(client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/"%(config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/" % ( + config.pravie_key_path, host) shell_operator.run_exec2(cmd) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "sudo cp ~/deleteforce-test.py /usr/curvefs/" shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo python /usr/curvefs/deleteforce-test.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.info("exec deleteforce return is %s"%rs[1]) - assert rs[3] == 0,"rc is %d"%rs[3] - + logger.info("exec deleteforce return is %s" % rs[1]) + assert rs[3] == 0, "rc is %d" % rs[3] + + def get_all_chunk_num(): chunkserver_list = config.chunkserver_list num = 0 for host in chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) cs_list = cs_status["up"] + cs_status["down"] for cs in cs_list: - ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l"%cs + ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l" % cs rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 num = num + int("".join(rs[1]).strip()) - logger.info("now num is %d"%(num)) + logger.info("now num is %d" % (num)) return num def check_nbd_iops(limit_iops=3000): - ssh = shell_operator.create_ssh_connect(config.client_list[0],1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "iostat -d nb0 3 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("now nbd0 iops is %d with 4k randrw"%iops) - assert iops >= limit_iops,"vm iops not ok,is %d"%iops + logger.info("now nbd0 iops is %d with 4k randrw" % iops) + assert iops >= limit_iops, "vm iops not ok,is %d" % iops + def check_chunkserver_online(num=120): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool chunkserver-status | grep chunkserver" - + starttime = time.time() i = 0 while time.time() - starttime < 300: rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.debug("get chunkserver status fail,rs is %s"%rs[1]) + logger.debug("get chunkserver status fail,rs is %s" % rs[1]) time.sleep(10) continue status = "".join(rs[1]).strip() - online_num = re.findall(r'(?<=online = )\d+',status) - logger.info("chunkserver online num is %s"%online_num) + online_num = re.findall(r'(?<=online = )\d+', status) + logger.info("chunkserver online num is %s" % online_num) if int(online_num[0]) != num: - logger.debug("chunkserver online num is %s"%online_num) + logger.debug("chunkserver online num is %s" % online_num) time.sleep(10) else: break if int(online_num[0]) != num: ori_cmd = "curve_ops_tool chunkserver-list -checkHealth=false -checkCSAlive | grep OFFLINE" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("chunkserver offline list is %s"%rs[1]) - assert int(online_num[0]) == num,"chunkserver online num is %s"%online_num + logger.error("chunkserver offline list is %s" % rs[1]) + assert int( + online_num[0]) == num, "chunkserver online num is %s" % online_num + def wait_health_ok(): host = random.choice(config.mds_list) @@ -858,9 +949,10 @@ def wait_health_ok(): ori_cmd2 = "curve_ops_tool copysets-status -detail | grep \"unhealthy copysets statistic\"" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) health = rs2[1] - logger.debug("copysets status is %s"%health) + logger.debug("copysets status is %s" % health) time.sleep(10) - assert check == 1,"cluster is not healthy in %d s"%config.recover_time + assert check == 1, "cluster is not healthy in %d s" % config.recover_time + def rapid_leader_schedule(): host = random.choice(config.mds_list) @@ -877,12 +969,12 @@ def rapid_leader_schedule(): else: ori_cmd2 = "curve_ops_tool check-operator -opName=change_peer" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) - logger.debug("operator status is %s"%rs2[1]) + logger.debug("operator status is %s" % rs2[1]) time.sleep(10) - assert check == 1,"change operator num is not 0 in %d s"%config.recover_time + assert check == 1, "change operator num is not 0 in %d s" % config.recover_time ori_cmd = "curve_ops_tool rapid-leader-schedule" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rapid leader schedule not ok" + assert rs[3] == 0, "rapid leader schedule not ok" ori_cmd = "curve_ops_tool check-operator -opName=transfer_leader -leaderOpInterval=1| grep \"Operator num is\"" starttime = time.time() while time.time() - starttime < 60: @@ -893,6 +985,7 @@ def rapid_leader_schedule(): else: time.sleep(1) + def wait_cluster_healthy(limit_iops=8000): check_chunkserver_online() host = random.choice(config.mds_list) @@ -912,46 +1005,53 @@ def wait_cluster_healthy(limit_iops=8000): ori_cmd2 = "curve_ops_tool status" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) cluster_status = "".join(rs2[1]).strip() - logger.debug("cluster status is %s"%cluster_status) + logger.debug("cluster status is %s" % cluster_status) ori_cmd2 = "curve_ops_tool copysets-status -detail" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) copysets_status = "".join(rs2[1]).strip() - logger.debug("copysets status is %s"%copysets_status) - assert check == 1,"cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s"%(config.recover_time,cluster_status,copysets_status) - rapid_leader_schedule() - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + logger.debug("copysets status is %s" % copysets_status) + assert check == 1, "cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s" % ( + config.recover_time, cluster_status, copysets_status) + rapid_leader_schedule() + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) i = 0 while i < 300: ori_cmd = "iostat -d nb0 1 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("vm iops is %d"%iops) + logger.info("vm iops is %d" % iops) if iops >= limit_iops: break i = i + 2 time.sleep(2) - assert iops >= limit_iops,"vm iops not ok in 300s" + assert iops >= limit_iops, "vm iops not ok in 300s" + def clean_kernel_log(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0," rollback log fail, %s"%rs[1] + assert rs[3] == 0, " rollback log fail, %s" % rs[1] ssh.close() + def check_io_error(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo grep \'I/O error\' /var/log/kern.log -R | grep -v nbd2" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" shell_operator.ssh_exec(ssh, ori_cmd) - assert False," rwio error,log is %s"%rs[1] + assert False, " rwio error,log is %s" % rs[1] ssh.close() + def check_copies_consistency(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -966,15 +1066,16 @@ def check_copies_consistency(): rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] == 0: break - logger.info("check_hash false return is %s,return code is %d"%(rs[1],rs[3])) + logger.info( + "check_hash false return is %s,return code is %d" % (rs[1], rs[3])) time.sleep(3) i = i + 3 if rs[3] != 0: - assert False,"exec check_hash false fail,return is %s"%rs[1] + assert False, "exec check_hash false fail,return is %s" % rs[1] check_hash = "true" ori_cmd = ori_cmdpri + check_hash - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) if rs[3] == 0: print "check consistency ok!" else: @@ -983,15 +1084,18 @@ def check_copies_consistency(): chunkID = message["chunkID"] hosts = message["hosts"] chunkservers = message["chunkservers"] - for i in range(0,3): + for i in range(0, 3): host = hosts[i] chunkserver = chunkservers[i] - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s"%(chunkserver,groupId,chunkID,chunkserver) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s" % ( + chunkserver, groupId, chunkID, chunkserver) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.error("cp chunk fail,is %s"%rs[1]) - assert False,"checkconsistecny fail,error is %s"%("".join(rs[1]).strip()) + logger.error("cp chunk fail,is %s" % rs[1]) + assert False, "checkconsistecny fail,error is %s" % ( + "".join(rs[1]).strip()) # check_data_consistency() except: logger.error("check consistency error") @@ -999,132 +1103,151 @@ def check_copies_consistency(): raise # run_rwio() + def check_data_consistency(): try: - #wait run 60s io - #time.sleep(60) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + # wait run 60s io + # time.sleep(60) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "grep \"Data Validation error\" /home/nbs/output/ -R && \ grep \"Data Validation error\" /home/nbs/nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: t = time.time() - ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d"%(int(t),int(t)) + ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d" % ( + int(t), int(t)) rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "mkdir output && touch nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) # logger.error("find error in %s"%rs[1]) - assert False,"find data consistency error,save log to vm /root/vdbench-output/output-%d"%int(t) + assert False, "find data consistency error,save log to vm /root/vdbench-output/output-%d" % int( + t) except Exception as e: ssh.close() raise ssh.close() + def test_kill_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test kill chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test kill chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: -# check_chunkserver_status(chunkserver_host) - kill_mult_cs_process(chunkserver_host,num) + # check_chunkserver_status(chunkserver_host) + kill_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("error:%s"%e) - start_mult_cs_process(chunkserver_host,num) - raise + logger.error("error:%s" % e) + start_mult_cs_process(chunkserver_host, num) + raise return chunkserver_host -def test_start_chunkserver_num(num,host=None): + +def test_start_chunkserver_num(num, host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test start chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test start chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - start_mult_cs_process(chunkserver_host,num) + start_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_outcs_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test out one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test out one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(5) while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) if num == 0: break - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if num != 0: - # assert num != 0 - raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d"%(chunkserver_host,cs_list[0],config.recover_time,num)) + # assert num != 0 + raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d" % ( + chunkserver_host, cs_list[0], config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) + # raise AssertionError() + logger.error("error is %s" % e) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) raise - return chunkserver_host,begin_num + return chunkserver_host, begin_num + -def test_upcs_recover_copyset(host,copyset_num): +def test_upcs_recover_copyset(host, copyset_num): if host == None: chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test up one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test up one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = start_mult_cs_process(chunkserver_host,1) + cs_list = start_mult_cs_process(chunkserver_host, 1) time.sleep(10) - #time.sleep(config.recover_time) + # time.sleep(config.recover_time) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 time.sleep(60) - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + logger.info("cs copyset num is %d" % num) if abs(num - copyset_num) <= 10: break if abs(num - copyset_num) > 10: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs_list[0],num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs_list[0], num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],copyset_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], copyset_num, config.recover_time, num)) except Exception as e: - logger.error("error is :%s"%e) - raise + logger.error("error is :%s" % e) + raise return chunkserver_host + def stop_all_cs_not_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop all chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop all chunkserver,host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) down_list = list["down"] dict = {} for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) dict[cs] = num time.sleep(config.offline_timeout + 10) check_nbd_iops() for cs in dict: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != dict[cs]: - # assert num != 0 - raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % (cs,dict[cs],num)) + # assert num != 0 + raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % ( + cs, dict[cs], num)) except Exception as e: # raise AssertionError() logger.error("error is %s" % e) @@ -1132,11 +1255,15 @@ def stop_all_cs_not_recover(): raise start_host_cs_process(chunkserver_host) + def pendding_all_cs_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test pendding all chunkserver,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) @@ -1149,13 +1276,14 @@ def pendding_all_cs_recover(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in down_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) i = 0 @@ -1164,13 +1292,14 @@ def pendding_all_cs_recover(): i = i + 60 time.sleep(60) for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: break if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1180,7 +1309,7 @@ def pendding_all_cs_recover(): raise test_start_mds() for cs in down_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1189,23 +1318,28 @@ def pendding_all_cs_recover(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def pendding_all_cs_recover_online(): cs_host = list(config.chunkserver_list) chunkserver_host = random.choice(config.cs_list) cs_host.remove(chunkserver_host) - logger.info("|------begin test pendding all chunkserver online,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver online,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1216,13 +1350,14 @@ def pendding_all_cs_recover_online(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in up_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) chunkserver_host2 = random.choice(config.cs_list) @@ -1236,7 +1371,7 @@ def pendding_all_cs_recover_online(): i = i + 60 time.sleep(60) for cs in up_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: @@ -1244,7 +1379,8 @@ def pendding_all_cs_recover_online(): stop_host_cs_process(chunkserver_host) wait_health_ok() if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("online pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1254,7 +1390,7 @@ def pendding_all_cs_recover_online(): raise test_start_mds() for cs in up_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1263,146 +1399,162 @@ def pendding_all_cs_recover_online(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def test_suspend_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend recover,host %s------|"%(chunkserver_host)) + logger.info("|------begin test suspend recover,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(config.offline_timeout - 5) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" start_host_cs_process(chunkserver_host) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_suspend_delete_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend delete recover,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test suspend delete recover,host %s------|" % (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(10) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" - start_host_cs_process(chunkserver_host,cs_list[0]) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" + start_host_cs_process(chunkserver_host, cs_list[0]) time.sleep(300) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_kill_mds(num=1): start_iops = get_cluster_iops() - logger.info("|------begin test kill mds num %d------|"%(num)) + logger.info("|------begin test kill mds num %d------|" % (num)) mds_ips = list(config.mds_list) try: - for i in range(0,num): + for i in range(0, num): mds_host = random.choice(mds_ips) - logger.info("mds ip is %s"%mds_host) + logger.info("mds ip is %s" % mds_host) kill_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) mds_ips.remove(mds_host) except Exception as e: - logger.error("kill mds %s fail"%mds_host) - raise + logger.error("kill mds %s fail" % mds_host) + raise return mds_host + def test_start_mds(): start_iops = get_cluster_iops() try: - logger.info("mds list is %s"%config.mds_list) + logger.info("mds list is %s" % config.mds_list) for mds_host in config.mds_list: start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_snap(): start_iops = get_cluster_iops() try: - logger.info("snap list is %s"%config.snap_server_list) + logger.info("snap list is %s" % config.snap_server_list) for snap_host in config.snap_server_list: start_snap_process(snap_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_nginx(): client_host = config.client_list[0] - logger.info("|------begin start nginx,host %s------|"%(client_host)) + logger.info("|------begin start nginx,host %s------|" % (client_host)) cmd = "sudo docker start 5ac540f1608d" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"start nginx docker fail %s"%rs[1] + assert rs[3] == 0, "start nginx docker fail %s" % rs[1] + def start_snap_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -1413,12 +1565,13 @@ def start_snap_process(host): return up_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "snap up fail" + def test_round_restart_mds(): logger.info("|------begin test round restart mds------|") start_iops = get_cluster_iops() @@ -1430,29 +1583,33 @@ def test_round_restart_mds(): start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart mds %s fail"%mds_host) + logger.error("round restart mds %s fail" % mds_host) raise + def test_kill_etcd(num=1): - logger.info("|------begin test kill etcd num %d------|"%(num)) + logger.info("|------begin test kill etcd num %d------|" % (num)) start_iops = get_cluster_iops() etcd_ips = list(config.etcd_list) try: - for i in range(0,num): + for i in range(0, num): etcd_host = random.choice(etcd_ips) - logger.info("etcd ip is %s"%etcd_host) + logger.info("etcd ip is %s" % etcd_host) kill_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) etcd_ips.remove(etcd_host) except Exception as e: - logger.error("kill etcd %s fail"%etcd_host) + logger.error("kill etcd %s fail" % etcd_host) raise return etcd_host + def test_start_etcd(): start_iops = get_cluster_iops() try: @@ -1460,9 +1617,11 @@ def test_start_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_round_restart_etcd(): logger.info("|------begin test round restart etcd------|") @@ -1475,11 +1634,13 @@ def test_round_restart_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart etcd %s fail"%etcd_host) + logger.error("round restart etcd %s fail" % etcd_host) raise + def test_kill_mysql(): logger.info("|------begin test kill mysql------|") start_iops = get_cluster_iops() @@ -1488,12 +1649,14 @@ def test_kill_mysql(): stop_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_mysql_process(mysql_host) raise return mysql_host + def test_start_mysql(host): start_iops = get_cluster_iops() mysql_host = host @@ -1501,69 +1664,84 @@ def test_start_mysql(host): start_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise + def test_stop_chunkserver_host(): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop chunkserver host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop chunkserver host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_host_cs_process(chunkserver_host) raise e return chunkserver_host + def test_start_chunkserver_host(host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host try: start_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_restart_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test restart chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test restart chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - restart_mult_cs_process(chunkserver_host,num) + restart_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def stop_scheduler(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) for mds_host in config.mds_list: - logger.info("|------begin stop copyset scheduler %s------|"%(mds_host)) - cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false"%mds_host - rs = shell_operator.ssh_exec(ssh,cmd) + logger.info("|------begin stop copyset scheduler %s------|" % + (mds_host)) + cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false" % mds_host + rs = shell_operator.ssh_exec(ssh, cmd) time.sleep(180) + def test_start_all_chunkserver(): start_iops = get_cluster_iops() try: for chunkserver_host in config.chunkserver_list: - start_host_cs_process(chunkserver_host) - end_iops = get_cluster_iops() - if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + start_host_cs_process(chunkserver_host) + end_iops = get_cluster_iops() + if float(end_iops) / float(start_iops) < 0.9: + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_stop_all_chunkserver(): start_iops = get_cluster_iops() logger.info("|------begin test stop all chunkserver------|") @@ -1572,18 +1750,21 @@ def test_stop_all_chunkserver(): stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: test_start_all_chunkserver() raise e + def test_kill_diff_host_chunkserver(): start_iops = get_cluster_iops() chunkserver_list = list(config.chunkserver_list) chunkserver_host1 = random.choice(chunkserver_list) chunkserver_list.remove(chunkserver_host1) chunkserver_host2 = random.choice(chunkserver_list) - logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|"%(chunkserver_host1,chunkserver_host2)) + logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|" % + (chunkserver_host1, chunkserver_host2)) try: kill_mult_cs_process(chunkserver_host1, 1) kill_mult_cs_process(chunkserver_host2, 1) @@ -1602,44 +1783,52 @@ def test_kill_diff_host_chunkserver(): start_mult_cs_process(chunkserver_host1, 1) start_mult_cs_process(chunkserver_host2, 1) + def test_reboot_nebd(): client_host = random.choice(config.client_list) - logger.info("|------begin test reboot nebd %s------|"%(client_host)) + logger.info("|------begin test reboot nebd %s------|" % (client_host)) cmd = "sudo nebd-daemon restart" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"reboot nebd daemon fail,return is %s"%rs[1] + assert rs[3] == 0, "reboot nebd daemon fail,return is %s" % rs[1] + def test_cs_loss_package(percent): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s loss package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (chunkserver_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: raise Exception("client io slow op more than 5s") except Exception as e: - raise + raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_loss_package(percent): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s loss package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (mds_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1648,18 +1837,21 @@ def test_mds_loss_package(percent): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_cs_delay_package(ms): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s delay package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (chunkserver_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1668,18 +1860,21 @@ def test_cs_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_delay_package(ms): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s delay package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (mds_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) # check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1688,75 +1883,93 @@ def test_mds_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_chunkserver_cpu_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver cpu stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver cpu stress,host %s------|" % (chunkserver_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,chunkserver_host) + %s:~/" % (config.pravie_key_path, chunkserver_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh - + + def test_mds_cpu_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds cpu stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds cpu stress,host %s------|" % (mds_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,mds_host) + %s:~/" % (config.pravie_key_path, mds_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_client_cpu_stress(stress=50): -# client_host = random.choice(config.client_list) + # client_host = random.choice(config.client_list) client_host = config.client_list[0] - logger.info("|------begin test client cpu stress,host %s------|"%(client_host)) + logger.info("|------begin test client cpu stress,host %s------|" % + (client_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,client_host) + %s:~/" % (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_chunkserver_mem_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver mem stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver mem stress,host %s------|" % (chunkserver_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_mds_mem_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds mem stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds mem stress,host %s------|" % (mds_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_client_mem_stress(stress=50): client_host = config.client_list[0] - logger.info("|------begin test client mem stress,host %s------|"%(client_host)) + logger.info("|------begin test client mem stress,host %s------|" % + (client_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_chunkserver_network_stress(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver network stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver network stress,host %s------|" % (chunkserver_host)) t1 = mythread.runThread(listen_network_stress, chunkserver_host) t2 = mythread.runThread(inject_network_stress, chunkserver_host) t1.start() @@ -1764,9 +1977,11 @@ def test_chunkserver_network_stress(): t2.start() return chunkserver_host + def test_mds_network_stress(): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds network stress,host %s------|"%(mds_host)) + logger.info( + "|------begin test mds network stress,host %s------|" % (mds_host)) t1 = mythread.runThread(listen_network_stress, mds_host) t2 = mythread.runThread(inject_network_stress, mds_host) t1.start() @@ -1774,9 +1989,11 @@ def test_mds_network_stress(): t2.start() return mds_host + def test_client_network_stress(): client_host = config.client_list[0] - logger.info("|------begin test client network stress,host %s------|"%(client_host)) + logger.info( + "|------begin test client network stress,host %s------|" % (client_host)) t1 = mythread.runThread(listen_network_stress, client_host) t2 = mythread.runThread(inject_network_stress, client_host) t1.start() @@ -1784,23 +2001,31 @@ def test_client_network_stress(): t2.start() return client_host + def test_chunkserver_clock_offset(offset): chunkserver_host = random.choice(config.chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh + def test_mds_clock_offset(offset): mds_host = random.choice(config.mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh -#使用cycle会从掉电到上电有1秒钟的间隔 +# There is a 1-second interval from power down to power up when using cycle + + def test_ipmitool_restart_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool cycle,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool cycle,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1812,13 +2037,16 @@ def test_ipmitool_restart_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_client(): client_host = config.client_list[0] - logger.info("|------begin test client ipmitool cycle,host %s------|"%(client_host)) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + logger.info( + "|------begin test client ipmitool cycle,host %s------|" % (client_host)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1830,13 +2058,17 @@ def test_ipmitool_restart_client(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%client_host + assert status, "restart host %s fail" % client_host + +# There is no interval between power-off and power-on when using reset + -#使用reset从掉电到上电没有间隔 def test_ipmitool_reset_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool reset,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool reset,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_reset_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1848,13 +2080,16 @@ def test_ipmitool_reset_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_mds(): mds_host = random.choice(config.mds_reset_list) - logger.info("|------begin test mds ipmitool cycle,host %s------|"%(mds_host)) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + logger.info( + "|------begin test mds ipmitool cycle,host %s------|" % (mds_host)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1866,11 +2101,12 @@ def test_ipmitool_restart_mds(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%mds_host + assert status, "restart host %s fail" % mds_host start_mds_process(mds_host) start_etcd_process(mds_host) start_host_cs_process(mds_host) + def clean_last_data(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) ori_cmd = "rm /root/perf/test-ssd/fiodata/* && rm /root/perf/test-ssd/cfg/*" @@ -1879,19 +2115,20 @@ def clean_last_data(): ori_cmd = "rm /root/perf/fiodata -rf" rs = shell_operator.ssh_exec(ssh, ori_cmd) + def analysis_data(ssh): ori_cmd = "cd /root/perf/ && python gen_randrw_data.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"gen randrw data fail,error is %s"%rs[1] + assert rs[3] == 0, "gen randrw data fail,error is %s" % rs[1] ori_cmd = "cat /root/perf/test.csv" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"get data fail,error is %s"%rs[1] + assert rs[3] == 0, "get data fail,error is %s" % rs[1] for line in rs[1]: if 'randread,4k' in line: randr_4k_iops = line.split(',')[4] elif 'randwrite,4k' in line: randw_4k_iops = line.split(',')[8] - elif 'write,512k' in line: + elif 'write,512k' in line: write_512k_iops = line.split(',')[8] elif 'read,512k' in line: read_512k_iops = line.split(',')[4] @@ -1900,24 +2137,29 @@ def analysis_data(ssh): read_512k_BW = float(read_512k_iops)*1000/2 write_512k_BW = float(write_512k_iops)*1000/2 logger.info("get one volume Basic data:-------------------------------") - logger.info("4k rand read iops is %d/s"%int(randr_4k_iops)) - logger.info("4k rand write iops is %d/s"%int(randw_4k_iops)) - logger.info("512k read BW is %d MB/s"%int(read_512k_BW)) - logger.info("512k write BW is %d MB/s"%int(write_512k_BW)) + logger.info("4k rand read iops is %d/s" % int(randr_4k_iops)) + logger.info("4k rand write iops is %d/s" % int(randw_4k_iops)) + logger.info("512k read BW is %d MB/s" % int(read_512k_BW)) + logger.info("512k write BW is %d MB/s" % int(write_512k_BW)) filename = "onevolume_perf.txt" - with open(filename,'w') as f: - f.write("4k randwrite %d/s 56000\n"%int(randw_4k_iops)) - f.write("4k randread %d/s 75000\n"%int(randr_4k_iops)) - f.write("512k write %dMB/s 135\n"%int(write_512k_BW)) - f.write("512k read %dMB/s 450\n"%int(read_512k_BW)) + with open(filename, 'w') as f: + f.write("4k randwrite %d/s 56000\n" % int(randw_4k_iops)) + f.write("4k randread %d/s 75000\n" % int(randr_4k_iops)) + f.write("512k write %dMB/s 135\n" % int(write_512k_BW)) + f.write("512k read %dMB/s 450\n" % int(read_512k_BW)) if randr_4k_iops < 75000: - assert float(75000 - randr_4k_iops)/75000 < 0.02,"4k_randr_iops did not meet expectations,expect more than 75000" + assert float(75000 - randr_4k_iops) / \ + 75000 < 0.02, "4k_randr_iops did not meet expectations,expect more than 75000" if randw_4k_iops < 56000: - assert float(56000 - randw_4k_iops)/56000 < 0.02,"4k_randw_iops did not meet expectations,expect more than 56000" + assert float(56000 - randw_4k_iops) / \ + 56000 < 0.02, "4k_randw_iops did not meet expectations,expect more than 56000" if read_512k_BW < 450: - assert float(450 - read_512k_BW)/450 < 0.02,"512k_read_bw did not meet expectations,expect more than 450" + assert float(450 - read_512k_BW) / \ + 450 < 0.02, "512k_read_bw did not meet expectations,expect more than 450" if write_512k_BW < 135: - assert float(135 - write_512k_BW)/135 < 0.02,"512k_write_bw did not meet expectations,expect more than 135" + assert float(135 - write_512k_BW) / \ + 135 < 0.02, "512k_write_bw did not meet expectations,expect more than 135" + def perf_test(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) @@ -1929,7 +2171,7 @@ def perf_test(): -bs=4k -size=200G -runtime=300 -numjobs=1 -time_based" shell_operator.ssh_exec(ssh, init_io) start_test = "cd /root/perf && nohup python /root/perf/io_test.py &" - shell_operator.ssh_background_exec2(ssh,start_test) + shell_operator.ssh_background_exec2(ssh, start_test) time.sleep(60) final = 0 starttime = time.time() @@ -1942,123 +2184,134 @@ def perf_test(): else: logger.debug("wait io test finally") time.sleep(60) - assert final == 1,"io test have not finall" + assert final == 1, "io test have not finall" ori_cmd = "cp -r /root/perf/test-ssd/fiodata /root/perf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp fiodata fail,error is %s"%rs[1] + assert rs[3] == 0, "cp fiodata fail,error is %s" % rs[1] analysis_data(ssh) + def add_data_disk(): ori_cmd = "bash attach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"attach thrash vol fail,rs is %s"%rs[1] + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "attach thrash vol fail,rs is %s" % rs[1] ori_cmd = "cat thrash_vm" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.info("rs is %s"%rs[1]) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("rs is %s" % rs[1]) vm_list = [] for i in rs[1]: - logger.info("uuid is %s"%i) - vm_list.append(i.strip()) + logger.info("uuid is %s" % i) + vm_list.append(i.strip()) vm_ip_list = [] for vm in vm_list: - ori_cmd = "source OPENRC && nova list|grep %s"%vm - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ori_cmd = "source OPENRC && nova list|grep %s" % vm + rs = shell_operator.ssh_exec(ssh, ori_cmd) ret = "".join(rs[1]).strip() - ip = re.findall(r'\d+\.\d+\.\d+\.\d+',ret) - logger.info("get vm %s ip %s"%(vm,ip)) + ip = re.findall(r'\d+\.\d+\.\d+\.\d+', ret) + logger.info("get vm %s ip %s" % (vm, ip)) vm_ip_list.append(ip[0]) ssh.close() ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) for ip in vm_ip_list: - ori_cmd = "ssh %s -o StrictHostKeyChecking=no "%ip + "\"" + " supervisorctl reload && supervisorctl start all " + "\"" + ori_cmd = "ssh %s -o StrictHostKeyChecking=no " % ip + "\"" + \ + " supervisorctl reload && supervisorctl start all " + "\"" logger.info("exec cmd %s" % ori_cmd) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"start supervisor fail,rs is %s"%rs[1] + assert rs[3] == 0, "start supervisor fail,rs is %s" % rs[1] ssh.close() def create_vm_image(vm_name): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%(vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % ( + vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("vm uuid is %s" % rs[1]) thrash_vm_uuid = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-create %s image-%s"%(thrash_vm_uuid,vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm %s image fail"%(thrash_vm_uuid) + ori_cmd = "source OPENRC && nova image-create %s image-%s" % ( + thrash_vm_uuid, vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm %s image fail" % (thrash_vm_uuid) starttime = time.time() - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm image image-%s fail"%(vm_name) + assert False, "create vm image image-%s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait image create image-%s fail"%(vm_name) - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'"%vm_name + assert False, "wait image create image-%s fail" % (vm_name) + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'" % vm_name rs = shell_operator.ssh_exec(ssh, ori_cmd) return "".join(rs[1]).strip() + def get_all_curvevm_active_num(num): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) starttime = time.time() while time.time() - starttime < 600: - ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm status fail" + ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm status fail" if int("".join(rs[1]).strip()) == num: break else: time.sleep(10) active_num = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm uuid fail" + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm uuid fail" for uuid in rs[1]: uuid = uuid.strip() status = "up" cmd = "source OPENRC && nova show %s |grep os-server-status |awk \'{print $4}\'" % uuid st = shell_operator.ssh_exec(ssh, cmd) status = "".join(st[1]).strip() - assert status == "up","get vm status fail,not up.is %s,current vm id is %s"%(status,uuid) + assert status == "up", "get vm status fail,not up.is %s,current vm id is %s" % ( + status, uuid) return active_num + def init_create_curve_vm(num): image_id = config.image_id salt = ''.join(random.sample(string.ascii_letters + string.digits, 8)) - logger.info("vm name is thrash-%s"%salt) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + logger.info("vm name is thrash-%s" % salt) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s"%(config.image_id,config.avail_zone,salt) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + --meta use-vpc=True --meta instance_image_type=curve thrash-%s" % (config.image_id, config.avail_zone, salt) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] - vm_name = "thrash-%s"%salt + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] + vm_name = "thrash-%s" % salt starttime = time.time() - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm %s fail"%(vm_name) + assert False, "create vm %s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait vm ok %s fail"%(vm_name) + assert False, "wait vm ok %s fail" % (vm_name) new_image_id = create_vm_image(vm_name) config.vm_prefix = vm_name - for i in range(1,num): + for i in range(1, num): ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d"%(new_image_id,config.avail_zone,salt,i) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] + --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d" % (new_image_id, config.avail_zone, salt, i) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] starttime = time.time() while time.time() - starttime < 300: active_num = int(get_all_curvevm_active_num(num)) @@ -2067,28 +2320,32 @@ def init_create_curve_vm(num): break else: time.sleep(10) - assert active_num == num,"some vm are abnormal,%d is acitve"%active_num + assert active_num == num, "some vm are abnormal,%d is acitve" % active_num + def reboot_curve_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done "%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reboot curve vm fail" + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done " % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reboot curve vm fail" + def clean_curve_data(): ori_cmd = "bash detach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"detach thrash vol fail,rs is %s"%rs[1] - ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete vm fail,rs is %s"%rs[1] - ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "detach thrash vol fail,rs is %s" % rs[1] + ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete vm fail,rs is %s" % rs[1] + ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) image_id = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-delete %s"%(image_id) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete image fail,rs is %s"%rs + ori_cmd = "source OPENRC && nova image-delete %s" % (image_id) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete image fail,rs is %s" % rs time.sleep(30) ori_cmd = "curve_ops_tool list -fileName=/nova |grep Total" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -2097,46 +2354,53 @@ def clean_curve_data(): else: ori_cmd = "curve_ops_tool list -fileName=/nova" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("No deleted files: %s"%rs[1]) - assert False,"vm or image not be deleted" + logger.error("No deleted files: %s" % rs[1]) + assert False, "vm or image not be deleted" + def do_thrasher(action): - #start level1 + # start level1 if type(action) is types.StringType: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX"%action) + logger.debug( + "Startup FailureXXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX" % action) globals()[action]() else: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1]))) + logger.debug("Startup FailureXXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX" % ( + action[0], str(action[1]))) globals()[action[0]](action[1]) + def start_retired_and_down_chunkservers(): for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue - logger.debug("down_cs is %s"%down_cs) - for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm chunkserver%d chunkserver.dat fail"%cs - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] - time.sleep(2) - ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue + logger.debug("down_cs is %s" % down_cs) + for cs in down_cs: + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "rm chunkserver%d chunkserver.dat fail" % cs + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] + time.sleep(2) + ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - if rs[1] == []: - assert False,"up chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[1] == []: + assert False, "up chunkserver fail" except: raise ssh.close() + def get_level_list(level): if level == "level1": return config.level1 diff --git a/robot/Resources/keywords/snapshot_operate.py b/robot/Resources/keywords/snapshot_operate.py index f21c2be296..d902cd0737 100644 --- a/robot/Resources/keywords/snapshot_operate.py +++ b/robot/Resources/keywords/snapshot_operate.py @@ -18,8 +18,9 @@ def create_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam else: return rc + def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=config.user_name, size=config.size, - pass_word=config.pass_word): + pass_word=config.pass_word): curvefs = swig_operate.LibCurve() rc = curvefs.libcurve_create(file_name, user_name, size, pass_word) if rc != 0: @@ -28,9 +29,11 @@ def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=conf else: return rc + def delete_curve_file_for_shanpshot(): curvefs = swig_operate.LibCurve() - rc = curvefs.libcurve_delete(config.snapshot_file_name, config.user_name, config.pass_word) + rc = curvefs.libcurve_delete( + config.snapshot_file_name, config.user_name, config.pass_word) if rc != 0: logger.info("delete_curve_file_for_shanpshot file fail. rc = %s" % rc) return rc @@ -44,21 +47,25 @@ def write_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_name buf=config.buf, offset=config.offset, length=config.length): curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def read_4k_length_curve_file(): curvefs = swig_operate.LibCurve() - fd = curvefs.libcurve_open(config.snapshot_file_name, config.user_name, config.pass_word) + fd = curvefs.libcurve_open( + config.snapshot_file_name, config.user_name, config.pass_word) content = curvefs.libcurve_read(fd, "", 0, 4096) return content @@ -68,22 +75,26 @@ def modify_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) buf = "tttttttt" * 512 - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def snapshot_normal_create(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_curve_file_for_snapshot file and return seq.value = %s" % seq) + logger.info( + "create_curve_file_for_snapshot file and return seq.value = %s" % seq) return seq @@ -93,7 +104,8 @@ def snapshot_create_with_not_exist_file(file_name="/notexistfile", user_name=con rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_not_exist_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_not_exist_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_not_exist_file file fail. rc = %s" % rc) return rc @@ -103,25 +115,28 @@ def snapshot_create_with_empty_str_file(file_name=" ", user_name=config.user_nam rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_empty_str_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_empty_str_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_empty_str_file file fail. rc = %s" % rc) return rc -# "特殊字符`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" -# "特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" def snapshot_create_with_special_file_name(file_name="/特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?", user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_special_file_name , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_special_file_name file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_special_file_name file fail. rc = %s" % rc) return rc def get_sanpshot_info(seq): client = snapshot_client.CurveSnapshot() - finfo = client.get_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + finfo = client.get_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) # logger.info("get_sanpshot_info , file snapshot info.status = %s, owner = %s, filename = %s, " # "length = %s, chunksize = %s, seqnum = %s, segmentsize = %s , parentid = %s, " # "filetype = %s, ctime = %s" % ( @@ -131,25 +146,28 @@ def get_sanpshot_info(seq): return finfo -# 创建并获取快照文件信息 +# Create and obtain snapshot file information def create_snapshot_and_get_snapshot_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) + logger.info( + "create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) finfo = client.get_snapshot(file_name, user_name, password, seq) return finfo -# 正常获取快照文件分配信息 +# Obtain snapshot file allocation information normally def get_normal_snapshot_segment_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): seq = snapshot_normal_create(file_name, user_name, password) client = snapshot_client.CurveSnapshot() offset = curvesnapshot.type_uInt64_t() offset.value = 0 - seginfo = client.get_snapshot_SegmentInfo(file_name, user_name, password, seq, offset) - logger.info("get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) + seginfo = client.get_snapshot_SegmentInfo( + file_name, user_name, password, seq, offset) + logger.info( + "get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) return seginfo @@ -159,7 +177,7 @@ def get_normal_chunk_info(file_name=config.snapshot_file_name, user_name=config. client = snapshot_client.CurveSnapshot() chunkinfo = client.get_chunk_Info(seginfo.chunkvec[0]) logger.info("get_normal_chunkInfo chunkInfo info = %s" % chunkinfo) - return chunkinfo # 可以对chunInfo.chunkSn进行断言验证 + return chunkinfo # Can perform assertion validation on chunInfo.chunkSn def get_chunk_info_with_chunk_id_info(idinfo): @@ -175,7 +193,7 @@ def get_snapshot_first_segment_info(seq): offset = curvesnapshot.type_uInt64_t() offset.value = 0 seginfo = client.get_snapshot_SegmentInfo(config.snapshot_file_name, config.user_name, config.pass_word, seq, - offset) + offset) # logger.info( # "get_snapshot_first_segment_info seq = %s, segmsize = %s, chunksize = %s, startoffset = %s, chunkvecsize = %s, " # % ( @@ -220,7 +238,8 @@ def read_chunk_snapshot(idinfo, seq): buf = "tttttttt" * 512 rc = client.read_chunk_snapshot(idinfo, seq, offset, len, buf) if rc != len.value: - logger.info("read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) + logger.info( + "read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) return rc logger.info("read_chunk_snapshot ,return buf = %s" % buf) return buf @@ -228,14 +247,16 @@ def read_chunk_snapshot(idinfo, seq): def check_snapshot_status(seq): client = snapshot_client.CurveSnapshot() - status = client.check_snapshot_status(config.snapshot_file_name, config.user_name, config.pass_word, seq) + status = client.check_snapshot_status( + config.snapshot_file_name, config.user_name, config.pass_word, seq) logger.info("check_snapshot_status rc = %s " % status) return status def delete_file_snapshot(seq): client = snapshot_client.CurveSnapshot() - rc = client.delete_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + rc = client.delete_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) return rc @@ -253,7 +274,8 @@ def create_clone_chunk_with_s3_object(chunkidinfo): seq.value = 1 correctseq = curvesnapshot.type_uInt64_t() correctseq.value = 0 - rc = client.create_clone_chunk(config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) + rc = client.create_clone_chunk( + config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) return rc diff --git a/robot/curve_choas.txt b/robot/curve_choas.txt index ff39c335e5..0f9b389152 100644 --- a/robot/curve_choas.txt +++ b/robot/curve_choas.txt @@ -37,7 +37,7 @@ test one volume perf stop rwio perf test -#启动大压力情况下的混沌测试:分等级进行随机故障注入。每次注入完成后恢复集群所有业务,目前设置100次的全流程注入 +# Conduct chaos testing under high stress: Inject faults of various levels randomly. Restore all cluster operations after each injection. Currently set for 100 rounds of full injection inject cluster chaos test [Tags] P2 chaos longtime @@ -47,17 +47,17 @@ inject cluster chaos test ${num} evaluate int(10) init create curve vm ${num} :FOR ${i} IN RANGE 10 - log "启动第"${i}"轮故障" + log "Starting Round "${i}" of Fault Injection" ${choas1} evaluate random.choice($choas_level1) random - log "开始启动一级故障" + log "Starting Level 1 Fault" do thrasher ${choas1} sleep 30 ${choas2} evaluate random.choice($choas_level2) random - log "开始启动二级故障" + log "Starting Level 2 Fault" do thrasher ${choas2} sleep 30 ${choas3} evaluate random.choice($choas_level3) random - log "开始启动三级故障" + log "Starting Level 3 Fault" do thrasher ${choas3} sleep 30 clean env diff --git a/robot/curve_robot.txt b/robot/curve_robot.txt index 8709a96b6e..9f49ca2caa 100644 --- a/robot/curve_robot.txt +++ b/robot/curve_robot.txt @@ -1628,7 +1628,7 @@ test kill chunkserver one check loop read ${new_fd} [Teardown] file clean ${new_fd} -# create snapshot 相关用例 +# Create snapshot related use cases create snapshot with notexist file [Tags] P0 base first release test-snapshot @@ -1698,7 +1698,7 @@ create snapshot with nomal file and check first chunk snapshot [Teardown] delete curve file for shanpshot -# 创建文件->写文件->创建快照->修改文件->读快照验证(修改前数据)->删除重新快照->验证快照数据(修改后数据) +# Create file ->Write file ->Create snapshot ->Modify file ->Read snapshot verification (data before modification) ->Delete re snapshot ->Verify snapshot data (data after modification) create snapshot and check chunk snapshot after cow [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -1744,7 +1744,7 @@ create snapshot repeat should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsnapshot info 用例 +# Getsnapshot info use case get empty file snapshot info [Tags] P0 base first release test-snapshot @@ -1871,7 +1871,7 @@ delete snapshoting curve file should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsegmentinfo 相关用例 +# Use cases related to getsegmentinfo check snapshot segmentinfo after modify file [Tags] P0 base first release test-snapshot @@ -1981,7 +1981,7 @@ get empty file snapshot segmentinfo should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# snapshot chunkinfo 用例验证 +# Snapshot chunkinfo use case validation check empty file snapshot chunkinfo after modify file [Tags] P0 base first release test-snapshot @@ -2038,10 +2038,10 @@ get snapshot chunkinfo with notexist chunidinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} - #TODO: 此处需要判断错误,当前是死循环,不停轮询查询id信息 + # TODO: An error needs to be determined here. Currently, it is a dead loop and constantly polls for ID information ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2063,7 +2063,7 @@ check snapshot chunkinfo after delete snapshot ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} - # 此处应该再重新获取下segmentinfo, chunkvec[0]应该不存在 + # We should retrieve segmentinfo again here, chunkvec [0] should not exist ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} should be equal ${chunkinfo.snSize.value} ${expect_size} should be equal ${chunkinfo.chunkSn[0]} ${expect_first_sn} @@ -2071,7 +2071,7 @@ check snapshot chunkinfo after delete snapshot [Teardown] delete curve file for shanpshot -# read snapshot chunk 用例 CLDCFS-1249 +# Read snapshot chunk use case CLDCFS-1249 read snapshot chunk with notexist idinfo [Tags] P0 base first release no-need @@ -2081,10 +2081,10 @@ read snapshot chunk with notexist idinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # TODO:当前客户端死循环打印错误,此处校验结果应该返回错误 + # TODO: The current client has a loop printing error, and the verification result should return an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2101,7 +2101,7 @@ read snapshot chunk with error seq ${seginfo} get snapshot first segment info ${seq} ${seq.value} evaluate int(8) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处校验结果应该返回错误 + # The verification result should return an error here ${expect_rst} evaluate int(-6) should be equal ${content} ${expect_rst} ${seq.value} evaluate int(1) @@ -2110,7 +2110,7 @@ read snapshot chunk with error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 查询快照状态用例 +# Query snapshot status use case check empty file snapshot status [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2197,7 +2197,7 @@ check snapshot status use error seq [Teardown] delete curve file for shanpshot -# 删除快照相关用例 +# Delete snapshot related use cases repeat delete snapshot [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2230,7 +2230,7 @@ delete snapshot use error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 删除chunk快照(当前无限重试,需要调用方设置重试次数) CLDCFS-1254 +# Delete chunk snapshot (currently infinite retries, caller needs to set retry count) CLDCFS-1254 delete chunk snapshot with snapshot seq [Tags] P0 base first release no-need ${rc} create curve file for snapshot @@ -2243,7 +2243,7 @@ delete chunk snapshot with snapshot seq ${rc} delete chunk snapshot with correct sn ${seginfo.chunkvec[0]} ${seq} should be equal ${rc} ${expect_rc} ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处判断返回结果是否为错误 + # Determine whether the returned result is an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2291,8 +2291,8 @@ repeat delete chunk snapshot [Teardown] delete curve file for shanpshot -# 创建clone&recover -# 步骤:创建文件、写文件、创建快照记录seq,触发cow,获取快照信息(版本号),createclonechunk(指定s3上对象,correctedseq=快照seq),恢复快照,验证chunk数据是否为s3数据 +# Create clone&recover +# Steps: Create a file, write a file, create a snapshot record seq, trigger Cow, obtain snapshot information (version number), create clonechunk (specify an object on s3, correctedseq=snapshot seq), restore the snapshot, verify if the chunk data is s3 data create clone and recover chunk [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2311,7 +2311,7 @@ create clone and recover chunk should be equal ${rc} ${expect_rc} ${rc} recover chunk data ${seginfo.chunkvec[0]} should be equal ${rc} ${expect_rc} - # check数据 + # Check data ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} ${expect_content} evaluate str("aaaaaaaa")*512 should be equal ${content} ${expect_content} diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..ae00f97a66 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_closure.h" + #include namespace curve { @@ -28,21 +29,22 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although before the request proposal was given to the copyset + * Confirmed the identity of the leader, but processed it in copyset + * When requesting, it is still possible to determine the identity of the + * current copyset Becoming a non leader, so it needs to be determined that + * ChunkClosure has been adjusted When using, the status of the request. If + * it is OK, it indicates that it is Normal apply processing, otherwise the + * request will be forwarded */ if (status().ok()) { return; @@ -61,13 +63,13 @@ void ScanChunkClosure::Run() { case CHUNK_OP_STATUS_CHUNK_NOTEXIST: LOG(WARNING) << "scan chunk failed, read chunk not exist. " << request_->ShortDebugString(); - break; + break; case CHUNK_OP_STATUS_FAILURE_UNKNOWN: LOG(ERROR) << "scan chunk failed, read chunk unknown failure. " << request_->ShortDebugString(); - break; - default: - break; + break; + default: + break; } } diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..6700527c26 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -24,20 +24,23 @@ #define SRC_CHUNKSERVER_CHUNK_CLOSURE_H_ #include + #include -#include "src/chunkserver/op_request.h" #include "proto/chunk.pb.h" +#include "src/chunkserver/op_request.h" namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft + * for processing through the braft::Task, There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and + * returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred + * before it could be processed, such as leader If the step down becomes a non + * leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,37 +52,37 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; class ScanChunkClosure : public google::protobuf::Closure { public: - ScanChunkClosure(ChunkRequest *request, ChunkResponse *response) : - request_(request), response_(response) {} + ScanChunkClosure(ChunkRequest* request, ChunkResponse* response) + : request_(request), response_(response) {} ~ScanChunkClosure() = default; void Run() override; public: - ChunkRequest *request_; - ChunkResponse *response_; + ChunkRequest* request_; + ChunkResponse* response_; }; class SendScanMapClosure : public google::protobuf::Closure { public: - SendScanMapClosure(FollowScanMapRequest * request, - FollowScanMapResponse *response, - uint64_t timeout, - uint32_t retry, - uint64_t retryIntervalUs, - brpc::Controller* cntl, - brpc::Channel *channel) : - request_(request), response_(response), - rpcTimeoutMs_(timeout), retry_(retry), - retryIntervalUs_(retryIntervalUs), - cntl_(cntl), channel_(channel) {} + SendScanMapClosure(FollowScanMapRequest* request, + FollowScanMapResponse* response, uint64_t timeout, + uint32_t retry, uint64_t retryIntervalUs, + brpc::Controller* cntl, brpc::Channel* channel) + : request_(request), + response_(response), + rpcTimeoutMs_(timeout), + retry_(retry), + retryIntervalUs_(retryIntervalUs), + cntl_(cntl), + channel_(channel) {} ~SendScanMapClosure() = default; @@ -89,13 +92,13 @@ class SendScanMapClosure : public google::protobuf::Closure { void Guard(); public: - FollowScanMapRequest *request_; - FollowScanMapResponse *response_; + FollowScanMapRequest* request_; + FollowScanMapResponse* response_; uint64_t rpcTimeoutMs_; uint32_t retry_; uint64_t retryIntervalUs_; - brpc::Controller *cntl_; - brpc::Channel *channel_; + brpc::Controller* cntl_; + brpc::Channel* channel_; }; } // namespace chunkserver diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..85d3d241a5 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -22,31 +22,30 @@ #include "src/chunkserver/chunk_service.h" -#include #include #include +#include -#include #include +#include #include +#include "include/curve_compiler_specific.h" +#include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/op_request.h" -#include "src/chunkserver/chunk_service_closure.h" #include "src/common/fast_align.h" -#include "include/curve_compiler_specific.h" - namespace curve { namespace chunkserver { using ::curve::common::is_aligned; ChunkServiceImpl::ChunkServiceImpl( - const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr& epochMap) + const ChunkServiceOptions& chunkServiceOptions, + const std::shared_ptr& epochMap) : chunkServiceOptions_(chunkServiceOptions), copysetNodeManager_(chunkServiceOptions.copysetNodeManager), inflightThrottle_(chunkServiceOptions.inflightThrottle), @@ -55,15 +54,11 @@ ChunkServiceImpl::ChunkServiceImpl( maxChunkSize_ = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; } -void ChunkServiceImpl::DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::DeleteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -76,7 +71,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -86,24 +81,17 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::WriteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -116,11 +104,11 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); DVLOG(9) << "Get write I/O request, op: " << request->optype() - << " offset: " << request->offset() - << " size: " << request->size() << " buf header: " - << *(unsigned int *) cntl->request_attachment().to_string().c_str() + << " offset: " << request->offset() << " size: " << request->size() + << " buf header: " + << *(unsigned int*)cntl->request_attachment().to_string().c_str() << " attachement size " << cntl->request_attachment().size(); if (request->has_epoch()) { @@ -134,7 +122,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +132,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -154,24 +142,18 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -184,7 +166,8 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured + // for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +176,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -203,19 +186,15 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared( + nodePtr, controller, request, response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done) { +void ChunkServiceImpl::CreateS3CloneChunk( + RpcController* controller, const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, Closure* done) { (void)controller; (void)request; brpc::ClosureGuard doneGuard(done); @@ -223,15 +202,11 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, LOG(INFO) << "Invalid request, serverSide Not implement yet"; } -void ChunkServiceImpl::ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -244,7 +219,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +229,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -264,25 +239,17 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::RecoverChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -295,7 +262,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +272,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,26 +282,19 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + // RecoverChunk request and ReadChunk request share ReadChunkRequest + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -347,13 +307,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -363,25 +323,17 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( - RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); + RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -401,7 +353,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -412,31 +364,26 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated + * from Chunk Service, And it does not go through QoS or raft consistency + * protocols, so it is not allowed to inherit here OpRequest or QoSRequest to be + * re encapsulated, but directly processed in place */ -void ChunkServiceImpl::GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, + Closure* done) { (void)controller; - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - nullptr, - nullptr, - done); + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, nullptr, nullptr, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -449,10 +396,9 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkInfo failed, copyset node is not found: " @@ -460,7 +406,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +422,15 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); - if (chunkInfo.snapSn > 0) - response->add_chunksn(chunkInfo.snapSn); + if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -497,14 +442,14 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, } } -void ChunkServiceImpl::GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,10 +462,9 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkHash failed, copyset node is not found: " @@ -531,21 +475,19 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, CSErrorCode ret; std::string hash; - ret = nodePtr->GetDataStore()->GetChunkHash(request->chunkid(), - request->offset(), - request->length(), - &hash); + ret = nodePtr->GetDataStore()->GetChunkHash( + request->chunkid(), request->offset(), request->length(), &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -557,18 +499,17 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, } } -void ChunkServiceImpl::UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done) { +void ChunkServiceImpl::UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); LOG(INFO) << "Update fileId: " << request->fileid() - << " to epoch: " << request->epoch() - << " success."; + << " to epoch: " << request->epoch() << " success."; } else { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); LOG(WARNING) << "Update fileId: " << request->fileid() @@ -579,7 +520,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..04e37feac9 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -23,9 +23,9 @@ #ifndef SRC_CHUNKSERVER_CHUNK_SERVICE_H_ #define SRC_CHUNKSERVER_CHUNK_SERVICE_H_ -#include #include #include +#include #include "proto/chunk.pb.h" #include "src/chunkserver/config_info.h" @@ -34,84 +34,71 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; class ChunkServiceImpl : public ChunkService { public: explicit ChunkServiceImpl(const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr &epochMap); + const std::shared_ptr& epochMap); ~ChunkServiceImpl() {} - void DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void DeleteChunkSnapshotOrCorrectSn(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); + void DeleteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void WriteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); + + void DeleteChunkSnapshotOrCorrectSn(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); void CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done); - void RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done); - - void GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done); - - void UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done); + const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, + Closure* done); + void RecoverChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, Closure* done); + + void GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, Closure* done); + + void UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, Closure* done); private: /** - * 验证op request的offset和length是否越界和对齐 + * Verify if the offset and length of the op request are out of bounds and + * aligned * @param offset[in]: op request' offset * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * @return true indicates legality, otherwise false is returned */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; private: ChunkServiceOptions chunkServiceOptions_; - CopysetNodeManager *copysetNodeManager_; + CopysetNodeManager* copysetNodeManager_; std::shared_ptr inflightThrottle_; - uint32_t maxChunkSize_; + uint32_t maxChunkSize_; std::shared_ptr epochMap_; uint32_t blockSize_; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..fca11199f5 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_service_closure.h" + #include #include "src/chunkserver/chunkserver_metrics.h" @@ -30,55 +31,52 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All brpcDone_ All operations that need to be done before calling are + // placed within this lifecycle brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // When calling the closure, subtract 1, and add 1 to what the closure + // creates This line must be placed in brpcDone_ After calling, UT needs to + // test the performance of inflightio when it exceeds the limit Will add a + // sleep to the incoming closure to control the number of inflightio if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::READ_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::WRITE_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::RECOVER_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::PASTE_CHUNK); break; } @@ -88,62 +86,51 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the + // return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 - hasError = (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && - (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + // If it is a read request, return CHUNK_OP_STATUS_CHUNK_NOTEXIST + // also believes that it is correct + hasError = (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && + (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::READ_CHUNK, - request_->size(), - latencyUs, - hasError); + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::READ_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::WRITE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::WRITE_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::RECOVER_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::RECOVER_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::PASTE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::PASTE_CHUNK, request_->size(), + latencyUs, hasError); break; } default: diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..48c418033c 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -24,66 +24,71 @@ #define SRC_CHUNKSERVER_CHUNK_SERVICE_CLOSURE_H_ #include + #include #include "proto/chunk.pb.h" -#include "src/chunkserver/op_request.h" #include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/op_request.h" #include "src/common/timeutility.h" namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc +// layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( - std::shared_ptr inflightThrottle, - const ChunkRequest *request, - ChunkResponse *response, - google::protobuf::Closure *done) - : inflightThrottle_(inflightThrottle) - , request_(request) - , response_(response) - , brpcDone_(done) - , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 - if (nullptr != inflightThrottle_) { - inflightThrottle_->Increment(); - } - // 统计请求数量 - OnRequest(); + std::shared_ptr inflightThrottle, + const ChunkRequest* request, ChunkResponse* response, + google::protobuf::Closure* done) + : inflightThrottle_(inflightThrottle), + request_(request), + response_(response), + brpcDone_(done), + receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { + // What does the closure create add 1, and when the closure is called, + // subtract 1 + if (nullptr != inflightThrottle_) { + inflightThrottle_->Increment(); } + // Count the number of requests + OnRequest(); + } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the + * closure Currently, this function mainly performs some metric statistics + * on the returned results of read and write requests If there are similar + * scenarios in the future (doing some processing at the end of the service + * request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was + * incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request - const ChunkRequest *request_; - // rpc请求的response - ChunkResponse *response_; - // rpc请求回调 - google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Request for rpc requests + const ChunkRequest* request_; + // Response to rpc requests + ChunkResponse* response_; + // Rpc request callback + google::protobuf::Closure* brpcDone_; + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 22f302c9da..ddc7875277 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -47,12 +47,12 @@ #include "src/common/uri_parser.h" #include "src/common/log_util.h" +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::curve::common::UriParser; +using ::curve::fs::FileSystemType; using ::curve::fs::LocalFileSystem; using ::curve::fs::LocalFileSystemOption; using ::curve::fs::LocalFsFactory; -using ::curve::fs::FileSystemType; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; -using ::curve::common::UriParser; DEFINE_string(conf, "ChunkServer.conf", "Path of configuration file"); DEFINE_string(chunkServerIp, "127.0.0.1", "chunkserver ip"); @@ -60,19 +60,19 @@ DEFINE_bool(enableExternalServer, false, "start external server or not"); DEFINE_string(chunkServerExternalIp, "127.0.0.1", "chunkserver external ip"); DEFINE_int32(chunkServerPort, 8200, "chunkserver port"); DEFINE_string(chunkServerStoreUri, "local://./0/", "chunkserver store uri"); -DEFINE_string(chunkServerMetaUri, - "local://./0/chunkserver.dat", "chunkserver meta uri"); +DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", + "chunkserver meta uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); -DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); +DEFINE_string(recycleUri, "local://./0/recycler", "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); DEFINE_int32(chunkFilePoolAllocatedPercent, 80, "format percent for chunkfillpool."); DEFINE_uint32(chunkFormatThreadNum, 1, "number of threads while file pool formatting"); DEFINE_string(chunkFilePoolMetaPath, - "./chunkfilepool.meta", "chunk file pool meta path"); + "./chunkfilepool.meta", "chunk file pool meta path"); DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); @@ -80,972 +80,1069 @@ DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", - "WAL filepool meta path"); - - -const char* kProtocalCurve = "curve"; - -namespace curve { -namespace chunkserver { - -int ChunkServer::Run(int argc, char** argv) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - - RegisterCurveSegmentLogStorageOrDie(); - - // ==========================加载配置项===============================// - LOG(INFO) << "Loading Configuration."; - common::Configuration conf; - conf.SetConfigPath(FLAGS_conf.c_str()); - - // 在从配置文件获取 - LOG_IF(FATAL, !conf.LoadConfig()) - << "load chunkserver configuration fail, conf path = " - << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 - LoadConfigFromCmdline(&conf); - - // 初始化日志模块 - curve::common::DisableLoggingToStdErr(); - google::InitGoogleLogging(argv[0]); - - // 打印参数 - conf.PrintConfig(); - curve::common::ExposeCurveVersion(); - - // ============================初始化各模块==========================// - LOG(INFO) << "Initializing ChunkServer modules"; - - // 优先初始化 metric 收集模块 - ChunkServerMetricOptions metricOptions; - InitMetricOptions(&conf, &metricOptions); - ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); - LOG_IF(FATAL, metric->Init(metricOptions) != 0) - << "Failed to init chunkserver metric."; - - // 初始化并发持久模块 - ConcurrentApplyModule concurrentapply; - ConcurrentApplyOption concurrentApplyOptions; - InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); - LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) - << "Failed to initialize concurrentapply module!"; - - // 初始化本地文件系统 - std::shared_ptr fs( - LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - LocalFileSystemOption lfsOption; - LOG_IF(FATAL, !conf.GetBoolValue( - "fs.enable_renameat2", &lfsOption.enableRenameat2)); - LOG_IF(FATAL, 0 != fs->Init(lfsOption)) - << "Failed to initialize local filesystem module!"; - - // 初始化chunk文件池 - FilePoolOptions chunkFilePoolOptions; - InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); - - LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) - << "Failed to init chunk file pool"; - - // Init Wal file pool - std::string raftLogUri; - LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &raftLogUri)); - std::string raftLogProtocol = UriParser::GetProtocolFromUri(raftLogUri); - std::shared_ptr walFilePool = nullptr; - bool useChunkFilePoolAsWalPool = true; - uint32_t useChunkFilePoolAsWalPoolReserve = 15; - if (raftLogProtocol == kProtocalCurve) { - LOG_IF(FATAL, !conf.GetBoolValue( - "walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); - - if (!useChunkFilePoolAsWalPool) { - FilePoolOptions walFilePoolOptions; - InitWalFilePoolOptions(&conf, &walFilePoolOptions); - walFilePool = std::make_shared(fs); - LOG_IF(FATAL, false == walFilePool->Initialize(walFilePoolOptions)) - << "Failed to init wal file pool"; - LOG(INFO) << "initialize walpool success."; - } else { - walFilePool = chunkfilePool; - LOG_IF(FATAL, !conf.GetUInt32Value( - "walfilepool.use_chunk_file_pool_reserve", - &useChunkFilePoolAsWalPoolReserve)); - LOG(INFO) << "initialize to use chunkfilePool as walpool success."; + "WAL filepool meta path"); + +const char *kProtocalCurve = "curve"; + +namespace curve +{ + namespace chunkserver + { + + int ChunkServer::Run(int argc, char **argv) + { + gflags::ParseCommandLineFlags(&argc, &argv, true); + + RegisterCurveSegmentLogStorageOrDie(); + + // ==========================Load Configuration + // Items===============================// + LOG(INFO) << "Loading Configuration."; + common::Configuration conf; + conf.SetConfigPath(FLAGS_conf.c_str()); + + // Obtaining from the configuration file + LOG_IF(FATAL, !conf.LoadConfig()) + << "load chunkserver configuration fail, conf path = " + << conf.GetConfigPath(); + // The command line can override parameters in the configuration file + LoadConfigFromCmdline(&conf); + + // 初始化日志模块 + curve::common::DisableLoggingToStdErr(); + google::InitGoogleLogging(argv[0]); + + // Print parameters + conf.PrintConfig(); + curve::common::ExposeCurveVersion(); + + // ============================nitialize each + // module==========================// + LOG(INFO) << "Initializing ChunkServer modules"; + + // Prioritize initializing the metric collection module + ChunkServerMetricOptions metricOptions; + InitMetricOptions(&conf, &metricOptions); + ChunkServerMetric *metric = ChunkServerMetric::GetInstance(); + LOG_IF(FATAL, metric->Init(metricOptions) != 0) + << "Failed to init chunkserver metric."; + + // Initialize concurrent persistence module + ConcurrentApplyModule concurrentapply; + ConcurrentApplyOption concurrentApplyOptions; + InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); + LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) + << "Failed to initialize concurrentapply module!"; + + // Initialize local file system + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + LocalFileSystemOption lfsOption; + LOG_IF(FATAL, !conf.GetBoolValue("fs.enable_renameat2", + &lfsOption.enableRenameat2)); + LOG_IF(FATAL, 0 != fs->Init(lfsOption)) + << "Failed to initialize local filesystem module!"; + + // Initialize chunk file pool + FilePoolOptions chunkFilePoolOptions; + InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); + std::shared_ptr chunkfilePool = std::make_shared(fs); + + LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) + << "Failed to init chunk file pool"; + + // Init Wal file pool + std::string raftLogUri; + LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &raftLogUri)); + std::string raftLogProtocol = UriParser::GetProtocolFromUri(raftLogUri); + std::shared_ptr walFilePool = nullptr; + bool useChunkFilePoolAsWalPool = true; + uint32_t useChunkFilePoolAsWalPoolReserve = 15; + if (raftLogProtocol == kProtocalCurve) + { + LOG_IF(FATAL, !conf.GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); + + if (!useChunkFilePoolAsWalPool) + { + FilePoolOptions walFilePoolOptions; + InitWalFilePoolOptions(&conf, &walFilePoolOptions); + walFilePool = std::make_shared(fs); + LOG_IF(FATAL, false == walFilePool->Initialize(walFilePoolOptions)) + << "Failed to init wal file pool"; + LOG(INFO) << "initialize walpool success."; + } + else + { + walFilePool = chunkfilePool; + LOG_IF(FATAL, !conf.GetUInt32Value( + "walfilepool.use_chunk_file_pool_reserve", + &useChunkFilePoolAsWalPoolReserve)); + LOG(INFO) << "initialize to use chunkfilePool as walpool success."; + } + } + + // Remote Copy Management Module Options + CopyerOptions copyerOptions; + InitCopyerOptions(&conf, ©erOptions); + auto copyer = std::make_shared(); + LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) + << "Failed to initialize clone copyer."; + + // Clone Management Module Initialization + CloneOptions cloneOptions; + InitCloneOptions(&conf, &cloneOptions); + uint32_t sliceSize; + LOG_IF(FATAL, !conf.GetUInt32Value("clone.slice_size", &sliceSize)); + bool enablePaste = false; + LOG_IF(FATAL, !conf.GetBoolValue("clone.enable_paste", &enablePaste)); + cloneOptions.core = + std::make_shared(sliceSize, enablePaste, copyer); + LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) + << "Failed to initialize clone manager."; + + // Initialize registration module + RegisterOptions registerOptions; + InitRegisterOptions(&conf, ®isterOptions); + registerOptions.useChunkFilePoolAsWalPoolReserve = + useChunkFilePoolAsWalPoolReserve; + registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; + registerOptions.fs = fs; + registerOptions.chunkFilepool = chunkfilePool; + registerOptions.blockSize = chunkfilePool->GetFilePoolOpt().blockSize; + registerOptions.chunkSize = chunkfilePool->GetFilePoolOpt().fileSize; + Register registerMDS(registerOptions); + ChunkServerMetadata metadata; + ChunkServerMetadata localMetadata; + // Get Meta from Local + std::string metaPath = + UriParser::GetPathFromUri(registerOptions.chunkserverMetaUri); + + auto epochMap = std::make_shared(); + if (fs->FileExists(metaPath)) + { + LOG_IF(FATAL, GetChunkServerMetaFromLocal( + registerOptions.chunserverStoreUri, + registerOptions.chunkserverMetaUri, + registerOptions.fs, &localMetadata) != 0) + << "Failed to GetChunkServerMetaFromLocal."; + LOG_IF(FATAL, registerMDS.RegisterToMDS(&localMetadata, &metadata, + epochMap) != 0) + << "Failed to register to MDS."; + } + else + { + // If it cannot be obtained locally, register with MDS + LOG(INFO) << "meta file " << metaPath + << " do not exist, register to mds"; + LOG_IF(FATAL, + registerMDS.RegisterToMDS(nullptr, &metadata, epochMap) != 0) + << "Failed to register to MDS."; + } + + // Trash module initialization + TrashOptions trashOptions; + InitTrashOptions(&conf, &trashOptions); + trashOptions.localFileSystem = fs; + trashOptions.chunkFilePool = chunkfilePool; + trashOptions.walPool = walFilePool; + trash_ = std::make_shared(); + LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; + + // Initialize replication group management module + CopysetNodeOptions copysetNodeOptions; + InitCopysetNodeOptions(&conf, ©setNodeOptions); + copysetNodeOptions.concurrentapply = &concurrentapply; + copysetNodeOptions.chunkFilePool = chunkfilePool; + copysetNodeOptions.walFilePool = walFilePool; + copysetNodeOptions.localFileSystem = fs; + copysetNodeOptions.trash = trash_; + if (nullptr != walFilePool) + { + FilePoolOptions poolOpt = walFilePool->GetFilePoolOpt(); + uint32_t maxWalSegmentSize = poolOpt.fileSize + poolOpt.metaPageSize; + copysetNodeOptions.maxWalSegmentSize = maxWalSegmentSize; + + if (poolOpt.getFileFromPool) + { + // overwrite from file pool + copysetNodeOptions.maxChunkSize = poolOpt.fileSize; + copysetNodeOptions.metaPageSize = poolOpt.metaPageSize; + copysetNodeOptions.blockSize = poolOpt.blockSize; + } + } + + // Bandwidth limitation of install snapshot + int snapshotThroughputBytes; + LOG_IF(FATAL, + !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", + &snapshotThroughputBytes)); + /** + * CheckCycles is used for finer bandwidth control, with + * snapshotThroughputBytes=100MB, Taking checkCycles=10 as an example, it + * can ensure a bandwidth of 10MB every 1/10 second without accumulation, + * such as the first one The bandwidth of 1/10 second is 10MB, but it + * expires. In the second 1/10 second, only 10MB of bandwidth can be used, + * and Not a bandwidth of 20MB + */ + int checkCycles; + LOG_IF(FATAL, + !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", + &checkCycles)); + scoped_refptr snapshotThrottle = + new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); + snapshotThrottle_ = snapshotThrottle; + copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; + + butil::ip_t ip; + if (butil::str2ip(copysetNodeOptions.ip.c_str(), &ip) < 0) + { + LOG(FATAL) << "Invalid server IP provided: " << copysetNodeOptions.ip; + return -1; + } + butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); + // Register curve snapshot storage + RegisterCurveSnapshotStorageOrDie(); + CurveSnapshotStorage::set_server_addr(endPoint); + copysetNodeManager_ = &CopysetNodeManager::GetInstance(); + LOG_IF(FATAL, copysetNodeManager_->Init(copysetNodeOptions) != 0) + << "Failed to initialize CopysetNodeManager."; + + // init scan model + ScanManagerOptions scanOpts; + InitScanOptions(&conf, &scanOpts); + scanOpts.copysetNodeManager = copysetNodeManager_; + LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) + << "Failed to init scan manager."; + + // Heartbeat module initialization + HeartbeatOptions heartbeatOptions; + InitHeartbeatOptions(&conf, &heartbeatOptions); + heartbeatOptions.copysetNodeManager = copysetNodeManager_; + heartbeatOptions.fs = fs; + heartbeatOptions.chunkFilePool = chunkfilePool; + heartbeatOptions.chunkserverId = metadata.id(); + heartbeatOptions.chunkserverToken = metadata.token(); + heartbeatOptions.scanManager = &scanManager_; + LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) + << "Failed to init Heartbeat manager."; + + // Metric indicators for monitoring some modules + metric->MonitorTrash(trash_.get()); + metric->MonitorChunkFilePool(chunkfilePool.get()); + if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) + { + metric->MonitorWalFilePool(walFilePool.get()); + } + metric->ExposeConfigMetric(&conf); + + // ========================Add RPC Service===============================// + // TODO(lixiaocui): Add delay metric to each interface in rpc + brpc::Server server; + brpc::Server externalServer; + // We need call braft::add_service to add endPoint to braft::NodeManager + braft::add_service(&server, endPoint); + + // copyset service + CopysetServiceImpl copysetService(copysetNodeManager_); + int ret = + server.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CopysetService"; + + // inflight throttle + int maxInflight; + LOG_IF(FATAL, !conf.GetIntValue("chunkserver.max_inflight_requests", + &maxInflight)); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); + CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; + + // chunk service + ChunkServiceOptions chunkServiceOptions; + chunkServiceOptions.copysetNodeManager = copysetNodeManager_; + chunkServiceOptions.cloneManager = &cloneManager_; + chunkServiceOptions.inflightThrottle = inflightThrottle; + + ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); + ret = server.AddService(&chunkService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkService"; + + // We need to replace braft::CliService with our own implementation + auto service = server.FindServiceByName("CliService"); + ret = server.RemoveService(service); + CHECK(0 == ret) << "Fail to remove braft::CliService"; + BRaftCliServiceImpl braftCliService; + ret = server.AddService(&braftCliService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService"; + + // braftclient service + BRaftCliServiceImpl2 braftCliService2; + ret = server.AddService(&braftCliService2, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService2"; + + // We need to replace braft::FileServiceImpl with our own implementation + service = server.FindServiceByName("FileService"); + ret = server.RemoveService(service); + CHECK(0 == ret) << "Fail to remove braft::FileService"; + kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); + ret = + server.AddService(&kCurveFileService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CurveFileService"; + + // chunkserver service + ChunkServerServiceImpl chunkserverService(copysetNodeManager_); + ret = + server.AddService(&chunkserverService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkServerService"; + + // scan copyset service + ScanServiceImpl scanCopysetService(&scanManager_); + ret = + server.AddService(&scanCopysetService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ScanCopysetService"; + + // Start rpc service + LOG(INFO) << "Internal server is going to serve on: " + << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; + if (server.Start(endPoint, NULL) != 0) + { + LOG(ERROR) << "Fail to start Internal Server"; + return -1; + } + /* Start external server + External server is used to provide services to external clients and + tools Different from communication between MDS and chunkserver*/ + if (registerOptions.enableExternalServer) + { + ret = externalServer.AddService(©setService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CopysetService at external server"; + ret = externalServer.AddService(&chunkService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkService at external server"; + ret = externalServer.AddService(&braftCliService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; + ret = externalServer.AddService(&braftCliService2, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; + braft::RaftStatImpl raftStatService; + ret = externalServer.AddService(&raftStatService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add RaftStatService at external server"; + std::string externalAddr = + registerOptions.chunkserverExternalIp + ":" + + std::to_string(registerOptions.chunkserverPort); + LOG(INFO) << "External server is going to serve on: " << externalAddr; + if (externalServer.Start(externalAddr.c_str(), NULL) != 0) + { + LOG(ERROR) << "Fail to start External Server"; + return -1; + } + } + + // =======================Start each + // module==================================// + LOG(INFO) << "ChunkServer starts."; + /** + * Placing module startup after RPC service startup is mainly to address + * memory growth issues Control the number of copysets for concurrent + * recovery. Copyset recovery requires the RPC service to be started first + */ + LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; + LOG_IF(FATAL, cloneManager_.Run() != 0) << "Failed to start clone manager."; + LOG_IF(FATAL, heartbeat_.Run() != 0) + << "Failed to start heartbeat manager."; + LOG_IF(FATAL, copysetNodeManager_->Run() != 0) + << "Failed to start CopysetNodeManager."; + LOG_IF(FATAL, scanManager_.Run() != 0) << "Failed to start scan manager."; + LOG_IF(FATAL, !chunkfilePool->StartCleaning()) + << "Failed to start file pool clean worker."; + + // =======================Wait for the process to + // exit==================================// + while (!brpc::IsAskedToQuit()) + { + bthread_usleep(1000000L); + } + // scanmanager stop maybe need a little while, so stop it first before stop + // service NOLINT + LOG(INFO) << "ChunkServer is going to quit."; + LOG_IF(ERROR, scanManager_.Fini() != 0) + << "Failed to shutdown scan manager."; + + if (registerOptions.enableExternalServer) + { + externalServer.Stop(0); + externalServer.Join(); + } + + server.Stop(0); + server.Join(); + + LOG_IF(ERROR, heartbeat_.Fini() != 0) + << "Failed to shutdown heartbeat manager."; + LOG_IF(ERROR, copysetNodeManager_->Fini() != 0) + << "Failed to shutdown CopysetNodeManager."; + LOG_IF(ERROR, cloneManager_.Fini() != 0) + << "Failed to shutdown clone manager."; + LOG_IF(ERROR, copyer->Fini() != 0) << "Failed to shutdown clone copyer."; + LOG_IF(ERROR, trash_->Fini() != 0) << "Failed to shutdown trash."; + LOG_IF(ERROR, !chunkfilePool->StopCleaning()) + << "Failed to shutdown file pool clean worker."; + concurrentapply.Stop(); + + google::ShutdownGoogleLogging(); + return 0; } - } - - // 远端拷贝管理模块选项 - CopyerOptions copyerOptions; - InitCopyerOptions(&conf, ©erOptions); - auto copyer = std::make_shared(); - LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) - << "Failed to initialize clone copyer."; - - // 克隆管理模块初始化 - CloneOptions cloneOptions; - InitCloneOptions(&conf, &cloneOptions); - uint32_t sliceSize; - LOG_IF(FATAL, !conf.GetUInt32Value("clone.slice_size", &sliceSize)); - bool enablePaste = false; - LOG_IF(FATAL, !conf.GetBoolValue("clone.enable_paste", &enablePaste)); - cloneOptions.core = - std::make_shared(sliceSize, enablePaste, copyer); - LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) - << "Failed to initialize clone manager."; - - // 初始化注册模块 - RegisterOptions registerOptions; - InitRegisterOptions(&conf, ®isterOptions); - registerOptions.useChunkFilePoolAsWalPoolReserve = - useChunkFilePoolAsWalPoolReserve; - registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; - registerOptions.fs = fs; - registerOptions.chunkFilepool = chunkfilePool; - registerOptions.blockSize = chunkfilePool->GetFilePoolOpt().blockSize; - registerOptions.chunkSize = chunkfilePool->GetFilePoolOpt().fileSize; - Register registerMDS(registerOptions); - ChunkServerMetadata metadata; - ChunkServerMetadata localMetadata; - // 从本地获取meta - std::string metaPath = UriParser::GetPathFromUri( - registerOptions.chunkserverMetaUri); - - auto epochMap = std::make_shared(); - if (fs->FileExists(metaPath)) { - LOG_IF(FATAL, GetChunkServerMetaFromLocal( - registerOptions.chunserverStoreUri, - registerOptions.chunkserverMetaUri, - registerOptions.fs, &localMetadata) != 0) - << "Failed to GetChunkServerMetaFromLocal."; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - &localMetadata, &metadata, epochMap) != 0) - << "Failed to register to MDS."; - } else { - // 如果本地获取不到,向mds注册 - LOG(INFO) << "meta file " - << metaPath << " do not exist, register to mds"; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - nullptr, &metadata, epochMap) != 0) - << "Failed to register to MDS."; - } - - // trash模块初始化 - TrashOptions trashOptions; - InitTrashOptions(&conf, &trashOptions); - trashOptions.localFileSystem = fs; - trashOptions.chunkFilePool = chunkfilePool; - trashOptions.walPool = walFilePool; - trash_ = std::make_shared(); - LOG_IF(FATAL, trash_->Init(trashOptions) != 0) - << "Failed to init Trash"; - - // 初始化复制组管理模块 - CopysetNodeOptions copysetNodeOptions; - InitCopysetNodeOptions(&conf, ©setNodeOptions); - copysetNodeOptions.concurrentapply = &concurrentapply; - copysetNodeOptions.chunkFilePool = chunkfilePool; - copysetNodeOptions.walFilePool = walFilePool; - copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.trash = trash_; - if (nullptr != walFilePool) { - FilePoolOptions poolOpt = walFilePool->GetFilePoolOpt(); - uint32_t maxWalSegmentSize = poolOpt.fileSize + poolOpt.metaPageSize; - copysetNodeOptions.maxWalSegmentSize = maxWalSegmentSize; - - if (poolOpt.getFileFromPool) { - // overwrite from file pool - copysetNodeOptions.maxChunkSize = poolOpt.fileSize; - copysetNodeOptions.metaPageSize = poolOpt.metaPageSize; - copysetNodeOptions.blockSize = poolOpt.blockSize; + + void ChunkServer::Stop() + { + brpc::AskToQuit(); } - } - - // install snapshot的带宽限制 - int snapshotThroughputBytes; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", - &snapshotThroughputBytes)); - /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 - */ - int checkCycles; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", - &checkCycles)); - scoped_refptr snapshotThrottle - = new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); - snapshotThrottle_ = snapshotThrottle; - copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; - - butil::ip_t ip; - if (butil::str2ip(copysetNodeOptions.ip.c_str(), &ip) < 0) { - LOG(FATAL) << "Invalid server IP provided: " << copysetNodeOptions.ip; - return -1; - } - butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage - RegisterCurveSnapshotStorageOrDie(); - CurveSnapshotStorage::set_server_addr(endPoint); - copysetNodeManager_ = &CopysetNodeManager::GetInstance(); - LOG_IF(FATAL, copysetNodeManager_->Init(copysetNodeOptions) != 0) - << "Failed to initialize CopysetNodeManager."; - - // init scan model - ScanManagerOptions scanOpts; - InitScanOptions(&conf, &scanOpts); - scanOpts.copysetNodeManager = copysetNodeManager_; - LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) - << "Failed to init scan manager."; - - // 心跳模块初始化 - HeartbeatOptions heartbeatOptions; - InitHeartbeatOptions(&conf, &heartbeatOptions); - heartbeatOptions.copysetNodeManager = copysetNodeManager_; - heartbeatOptions.fs = fs; - heartbeatOptions.chunkFilePool = chunkfilePool; - heartbeatOptions.chunkserverId = metadata.id(); - heartbeatOptions.chunkserverToken = metadata.token(); - heartbeatOptions.scanManager = &scanManager_; - LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) - << "Failed to init Heartbeat manager."; - - // 监控部分模块的metric指标 - metric->MonitorTrash(trash_.get()); - metric->MonitorChunkFilePool(chunkfilePool.get()); - if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { - metric->MonitorWalFilePool(walFilePool.get()); - } - metric->ExposeConfigMetric(&conf); - - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric - brpc::Server server; - brpc::Server externalServer; - // We need call braft::add_service to add endPoint to braft::NodeManager - braft::add_service(&server, endPoint); - - // copyset service - CopysetServiceImpl copysetService(copysetNodeManager_); - int ret = server.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CopysetService"; - - // inflight throttle - int maxInflight; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.max_inflight_requests", - &maxInflight)); - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); - CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; - - // chunk service - ChunkServiceOptions chunkServiceOptions; - chunkServiceOptions.copysetNodeManager = copysetNodeManager_; - chunkServiceOptions.cloneManager = &cloneManager_; - chunkServiceOptions.inflightThrottle = inflightThrottle; - - ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); - ret = server.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkService"; - - // We need to replace braft::CliService with our own implementation - auto service = server.FindServiceByName("CliService"); - ret = server.RemoveService(service); - CHECK(0 == ret) << "Fail to remove braft::CliService"; - BRaftCliServiceImpl braftCliService; - ret = server.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService"; - - // braftclient service - BRaftCliServiceImpl2 braftCliService2; - ret = server.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService2"; - - // We need to replace braft::FileServiceImpl with our own implementation - service = server.FindServiceByName("FileService"); - ret = server.RemoveService(service); - CHECK(0 == ret) << "Fail to remove braft::FileService"; - kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); - ret = server.AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CurveFileService"; - - // chunkserver service - ChunkServerServiceImpl chunkserverService(copysetNodeManager_); - ret = server.AddService(&chunkserverService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkServerService"; - - // scan copyset service - ScanServiceImpl scanCopysetService(&scanManager_); - ret = server.AddService(&scanCopysetService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ScanCopysetService"; - - // 启动rpc service - LOG(INFO) << "Internal server is going to serve on: " - << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; - if (server.Start(endPoint, NULL) != 0) { - LOG(ERROR) << "Fail to start Internal Server"; - return -1; - } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ - if (registerOptions.enableExternalServer) { - ret = externalServer.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CopysetService at external server"; - ret = externalServer.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkService at external server"; - ret = externalServer.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; - ret = externalServer.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; - braft::RaftStatImpl raftStatService; - ret = externalServer.AddService(&raftStatService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add RaftStatService at external server"; - std::string externalAddr = registerOptions.chunkserverExternalIp + ":" + - std::to_string(registerOptions.chunkserverPort); - LOG(INFO) << "External server is going to serve on: " << externalAddr; - if (externalServer.Start(externalAddr.c_str(), NULL) != 0) { - LOG(ERROR) << "Fail to start External Server"; - return -1; + + void ChunkServer::InitChunkFilePoolOptions( + common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", + &chunkFilePoolOptions->fileSize)); + + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + &chunkFilePoolOptions->metaPageSize)) + << "Not found `global.meta_page_size` in config file"; + + LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", + &chunkFilePoolOptions->blockSize)) + << "Not found `global.block_size` in config file"; + + LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", + &chunkFilePoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + &chunkFilePoolOptions->getFileFromPool)); + + if (chunkFilePoolOptions->getFileFromPool == false) + { + std::string chunkFilePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), + chunkFilePoolUri.size()); + } + else + { + std::string metaUri; + LOG_IF(FATAL, !conf->GetStringValue( + "chunkfilepool.meta_path", &metaUri)); + ::memcpy( + chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + + std::string chunkFilePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), + chunkFilePoolUri.size()); + std::string pool_size; + LOG_IF(FATAL, !conf->GetStringValue( + "chunkfilepool.chunk_file_pool_size", &pool_size)); + LOG_IF(FATAL, !curve::common::ToNumbericByte( + pool_size, &chunkFilePoolOptions->filePoolSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.allocated_by_percent", + &chunkFilePoolOptions->allocatedByPercent)); + LOG_IF(FATAL, + !conf->GetUInt32Value("chunkfilepool.allocate_percent", + &chunkFilePoolOptions->allocatedPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "chunkfilepool.chunk_file_pool_format_thread_num", + &chunkFilePoolOptions->formatThreadNum)); + LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", + &chunkFilePoolOptions->needClean)); + LOG_IF(FATAL, + !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", + &chunkFilePoolOptions->bytesPerWrite)); + LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", + &chunkFilePoolOptions->iops4clean)); + + std::string copysetUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.raft_snapshot_uri", ©setUri)); + curve::common::UriParser::ParseUri(copysetUri, + &chunkFilePoolOptions->copysetDir); + + std::string recycleUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); + curve::common::UriParser::ParseUri(recycleUri, + &chunkFilePoolOptions->recycleDir); + + bool useChunkFilePoolAsWalPool; + LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); + + chunkFilePoolOptions->isAllocated = [=](const std::string &filename) + { + return Trash::IsChunkOrSnapShotFile(filename) || + (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); + }; + + if (0 == chunkFilePoolOptions->bytesPerWrite || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) + { + LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " + << "and should be aligned to 4K, " + << "but now is: " << chunkFilePoolOptions->bytesPerWrite; + } + } + } + + void ChunkServer::InitConcurrentApplyOptions( + common::Configuration *conf, + ConcurrentApplyOption *concurrentApplyOptions) + { + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.size", + &concurrentApplyOptions->rconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.size", + &concurrentApplyOptions->wconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.queuedepth", + &concurrentApplyOptions->rqueuedepth)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.queuedepth", + &concurrentApplyOptions->wqueuedepth)); + } + + void ChunkServer::InitWalFilePoolOptions(common::Configuration *conf, + FilePoolOptions *walPoolOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", + &walPoolOptions->fileSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", + &walPoolOptions->metaPageSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); + + if (walPoolOptions->getFileFromPool == false) + { + std::string filePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.file_pool_dir", + &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, filePoolUri.c_str(), + filePoolUri.size()); + } + else + { + std::string metaUri; + LOG_IF(FATAL, !conf->GetStringValue( + "walfilepool.meta_path", &metaUri)); + + std::string pool_size; + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", + &pool_size)); + LOG_IF(FATAL, !curve::common::ToNumbericByte( + pool_size, &walPoolOptions->filePoolSize)); + LOG_IF(FATAL, !conf->GetUInt64Value("walfilepool.wal_file_pool_size", + &walPoolOptions->filePoolSize)); + LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.allocated_by_percent", + &walPoolOptions->allocatedByPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.allocated_percent", + &walPoolOptions->allocatedPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.thread_num", + &walPoolOptions->formatThreadNum)); + + std::string copysetUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.raft_log_uri", ©setUri)); + curve::common::UriParser::ParseUri(copysetUri, + &walPoolOptions->copysetDir); + + std::string recycleUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); + curve::common::UriParser::ParseUri(recycleUri, + &walPoolOptions->recycleDir); + + walPoolOptions->isAllocated = [](const string &filename) + { + return Trash::IsWALFile(filename); + }; + ::memcpy( + walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + } + } + + void ChunkServer::InitCopysetNodeOptions( + common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", ©setNodeOptions->port)); + if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) + { + LOG(FATAL) << "Invalid server port provided: " + << copysetNodeOptions->port; + } + + LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", + ©setNodeOptions->electionTimeoutMs)); + LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", + ©setNodeOptions->snapshotIntervalS)); + bool ret = conf->GetBoolValue("copyset.enable_lease_read", + ©setNodeOptions->enbaleLeaseRead); + LOG_IF(WARNING, ret == false) + << "config no copyset.enable_lease_read info, using default value " + << copysetNodeOptions->enbaleLeaseRead; + LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", + ©setNodeOptions->catchupMargin)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", + ©setNodeOptions->chunkDataUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", + ©setNodeOptions->logUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", + ©setNodeOptions->raftMetaUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", + ©setNodeOptions->raftSnapshotUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + ©setNodeOptions->recyclerUri)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", + ©setNodeOptions->maxChunkSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + ©setNodeOptions->metaPageSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", + ©setNodeOptions->blockSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", + ©setNodeOptions->locationLimit)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", + ©setNodeOptions->loadConcurrency)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", + ©setNodeOptions->checkRetryTimes)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", + ©setNodeOptions->finishLoadMargin)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_loadmargin_interval_ms", + ©setNodeOptions->checkLoadMarginIntervalMs)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", + ©setNodeOptions->syncConcurrency)); + + LOG_IF(FATAL, !conf->GetBoolValue( + "copyset.enable_odsync_when_open_chunkfile", + ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); + if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) + { + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_chunk_limits", + ©setNodeOptions->syncChunkLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_threshold", + ©setNodeOptions->syncThreshold)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_syncing_interval_ms", + ©setNodeOptions->checkSyncingIntervalMs)); + LOG_IF(FATAL, + !conf->GetUInt32Value("copyset.sync_trigger_seconds", + ©setNodeOptions->syncTriggerSeconds)); + } + } + + void ChunkServer::InitCopyerOptions(common::Configuration *conf, + CopyerOptions *copyerOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", + ©erOptions->curveUser.owner)); + LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", + ©erOptions->curveUser.password)); + LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", + ©erOptions->curveConf)); + LOG_IF(FATAL, + !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); + bool disableCurveClient = false; + bool disableS3Adapter = false; + LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", + &disableCurveClient)); + LOG_IF(FATAL, + !conf->GetBoolValue("clone.disable_s3_adapter", &disableS3Adapter)); + LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", + ©erOptions->curveFileTimeoutSec)); + + if (disableCurveClient) + { + copyerOptions->curveClient = nullptr; + } + else + { + copyerOptions->curveClient = std::make_shared(); + } + + if (disableS3Adapter) + { + copyerOptions->s3Client = nullptr; + } + else + { + copyerOptions->s3Client = std::make_shared(); + } + } + + void ChunkServer::InitCloneOptions(common::Configuration *conf, + CloneOptions *cloneOptions) + { + LOG_IF(FATAL, + !conf->GetUInt32Value("clone.thread_num", &cloneOptions->threadNum)); + LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", + &cloneOptions->queueCapacity)); + } + + void ChunkServer::InitScanOptions(common::Configuration *conf, + ScanManagerOptions *scanOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", + &scanOptions->intervalSec)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", + &scanOptions->scanSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + &scanOptions->chunkMetaPageSize)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", + &scanOptions->timeoutMs)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", + &scanOptions->retry)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", + &scanOptions->retryIntervalUs)); + } + + void ChunkServer::InitHeartbeatOptions(common::Configuration *conf, + HeartbeatOptions *heartbeatOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", + &heartbeatOptions->storeUri)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", &heartbeatOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", + &heartbeatOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", + &heartbeatOptions->intervalSec)); + LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", + &heartbeatOptions->timeout)); + } + + void ChunkServer::InitRegisterOptions(common::Configuration *conf, + RegisterOptions *registerOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", + ®isterOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetStringValue( + "global.ip", ®isterOptions->chunkserverInternalIp)); + LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", + ®isterOptions->enableExternalServer)); + LOG_IF(FATAL, + !conf->GetStringValue("global.external_ip", + ®isterOptions->chunkserverExternalIp)); + LOG_IF(FATAL, !conf->GetIntValue("global.port", + ®isterOptions->chunkserverPort)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", + ®isterOptions->chunserverStoreUri)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", + ®isterOptions->chunkserverMetaUri)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", + ®isterOptions->chunkserverDiskType)); + LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", + ®isterOptions->registerRetries)); + LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", + ®isterOptions->registerTimeout)); + } + + void ChunkServer::InitTrashOptions(common::Configuration *conf, + TrashOptions *trashOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + &trashOptions->trashPath)); + LOG_IF(FATAL, !conf->GetIntValue("trash.expire_afterSec", + &trashOptions->expiredAfterSec)); + LOG_IF(FATAL, !conf->GetIntValue("trash.scan_periodSec", + &trashOptions->scanPeriodSec)); } - } - - // =======================启动各模块==================================// - LOG(INFO) << "ChunkServer starts."; - /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 - */ - LOG_IF(FATAL, trash_->Run() != 0) - << "Failed to start trash."; - LOG_IF(FATAL, cloneManager_.Run() != 0) - << "Failed to start clone manager."; - LOG_IF(FATAL, heartbeat_.Run() != 0) - << "Failed to start heartbeat manager."; - LOG_IF(FATAL, copysetNodeManager_->Run() != 0) - << "Failed to start CopysetNodeManager."; - LOG_IF(FATAL, scanManager_.Run() != 0) - << "Failed to start scan manager."; - LOG_IF(FATAL, !chunkfilePool->StartCleaning()) - << "Failed to start file pool clean worker."; - - // =======================等待进程退出==================================// - while (!brpc::IsAskedToQuit()) { - bthread_usleep(1000000L); - } - // scanmanager stop maybe need a little while, so stop it first before stop service NOLINT - LOG(INFO) << "ChunkServer is going to quit."; - LOG_IF(ERROR, scanManager_.Fini() != 0) - << "Failed to shutdown scan manager."; - - if (registerOptions.enableExternalServer) { - externalServer.Stop(0); - externalServer.Join(); - } - - server.Stop(0); - server.Join(); - - LOG_IF(ERROR, heartbeat_.Fini() != 0) - << "Failed to shutdown heartbeat manager."; - LOG_IF(ERROR, copysetNodeManager_->Fini() != 0) - << "Failed to shutdown CopysetNodeManager."; - LOG_IF(ERROR, cloneManager_.Fini() != 0) - << "Failed to shutdown clone manager."; - LOG_IF(ERROR, copyer->Fini() != 0) - << "Failed to shutdown clone copyer."; - LOG_IF(ERROR, trash_->Fini() != 0) - << "Failed to shutdown trash."; - LOG_IF(ERROR, !chunkfilePool->StopCleaning()) - << "Failed to shutdown file pool clean worker."; - concurrentapply.Stop(); - - google::ShutdownGoogleLogging(); - return 0; -} - -void ChunkServer::Stop() { - brpc::AskToQuit(); -} - -void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->fileSize)); - - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &chunkFilePoolOptions->metaPageSize)) - << "Not found `global.meta_page_size` in config file"; - - LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - &chunkFilePoolOptions->blockSize)) - << "Not found `global.block_size` in config file"; - - LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getFileFromPool)); - - if (chunkFilePoolOptions->getFileFromPool == false) { - std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->filePoolDir, - chunkFilePoolUri.c_str(), - chunkFilePoolUri.size()); - } else { - std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.meta_path", &metaUri)); - ::memcpy( - chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); - - std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", - &chunkFilePoolUri)); - - ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), - chunkFilePoolUri.size()); - std::string pool_size; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_size", &pool_size)); - LOG_IF(FATAL, !curve::common::ToNumbericByte( - pool_size, &chunkFilePoolOptions->filePoolSize)); - LOG_IF(FATAL, - !conf->GetBoolValue("chunkfilepool.allocated_by_percent", - &chunkFilePoolOptions->allocatedByPercent)); - LOG_IF(FATAL, - !conf->GetUInt32Value("chunkfilepool.allocate_percent", - &chunkFilePoolOptions->allocatedPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "chunkfilepool.chunk_file_pool_format_thread_num", - &chunkFilePoolOptions->formatThreadNum)); - LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", - &chunkFilePoolOptions->needClean)); - LOG_IF(FATAL, - !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", - &chunkFilePoolOptions->bytesPerWrite)); - LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", - &chunkFilePoolOptions->iops4clean)); - - std::string copysetUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.raft_snapshot_uri", ©setUri)); - curve::common::UriParser::ParseUri(copysetUri, - &chunkFilePoolOptions->copysetDir); - - std::string recycleUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); - curve::common::UriParser::ParseUri(recycleUri, - &chunkFilePoolOptions->recycleDir); - - bool useChunkFilePoolAsWalPool; - LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); - - chunkFilePoolOptions->isAllocated = [=](const std::string& filename) { - return Trash::IsChunkOrSnapShotFile(filename) || - (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); - }; - - if (0 == chunkFilePoolOptions->bytesPerWrite - || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 - || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { - LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " - << "and should be aligned to 4K, " - << "but now is: " << chunkFilePoolOptions->bytesPerWrite; + + void ChunkServer::InitMetricOptions(common::Configuration *conf, + ChunkServerMetricOptions *metricOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("global.port", &metricOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &metricOptions->ip)); + LOG_IF(FATAL, + !conf->GetBoolValue("metric.onoff", &metricOptions->collectMetric)); } - } -} - -void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOptions) { - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.size", &concurrentApplyOptions->rconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.size", &concurrentApplyOptions->wconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.queuedepth", &concurrentApplyOptions->rqueuedepth)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); -} - -void ChunkServer::InitWalFilePoolOptions( - common::Configuration *conf, FilePoolOptions *walPoolOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", - &walPoolOptions->fileSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", - &walPoolOptions->metaPageSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", - &walPoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "walfilepool.enable_get_segment_from_pool", - &walPoolOptions->getFileFromPool)); - - if (walPoolOptions->getFileFromPool == false) { - std::string filePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.file_pool_dir", &filePoolUri)); - ::memcpy(walPoolOptions->filePoolDir, - filePoolUri.c_str(), - filePoolUri.size()); - } else { - std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.meta_path", &metaUri)); - - std::string pool_size; - LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", - &pool_size)); - LOG_IF(FATAL, !curve::common::ToNumbericByte( - pool_size, &walPoolOptions->filePoolSize)); - LOG_IF(FATAL, !conf->GetUInt64Value("walfilepool.wal_file_pool_size", - &walPoolOptions->filePoolSize)); - LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.allocated_by_percent", - &walPoolOptions->allocatedByPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.allocated_percent", - &walPoolOptions->allocatedPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.thread_num", - &walPoolOptions->formatThreadNum)); - - std::string copysetUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.raft_log_uri", ©setUri)); - curve::common::UriParser::ParseUri(copysetUri, - &walPoolOptions->copysetDir); - - std::string recycleUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); - curve::common::UriParser::ParseUri(recycleUri, - &walPoolOptions->recycleDir); - - walPoolOptions->isAllocated = [](const string& filename) { - return Trash::IsWALFile(filename); - }; - ::memcpy( - walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); - } -} - -void ChunkServer::InitCopysetNodeOptions( - common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { - LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", ©setNodeOptions->port)); - if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) { - LOG(FATAL) << "Invalid server port provided: " - << copysetNodeOptions->port; - } - - LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", - ©setNodeOptions->electionTimeoutMs)); - LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", - ©setNodeOptions->snapshotIntervalS)); - bool ret = conf->GetBoolValue("copyset.enable_lease_read", - ©setNodeOptions->enbaleLeaseRead); - LOG_IF(WARNING, ret == false) - << "config no copyset.enable_lease_read info, using default value " - << copysetNodeOptions->enbaleLeaseRead; - LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", - ©setNodeOptions->catchupMargin)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", - ©setNodeOptions->chunkDataUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", - ©setNodeOptions->logUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", - ©setNodeOptions->raftMetaUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", - ©setNodeOptions->raftSnapshotUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", - ©setNodeOptions->recyclerUri)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - ©setNodeOptions->maxChunkSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - ©setNodeOptions->metaPageSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - ©setNodeOptions->blockSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", - ©setNodeOptions->locationLimit)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", - ©setNodeOptions->loadConcurrency)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", - ©setNodeOptions->checkRetryTimes)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", - ©setNodeOptions->finishLoadMargin)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_loadmargin_interval_ms", - ©setNodeOptions->checkLoadMarginIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", - ©setNodeOptions->syncConcurrency)); - - LOG_IF(FATAL, !conf->GetBoolValue( - "copyset.enable_odsync_when_open_chunkfile", - ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); - if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) { - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_chunk_limits", - ©setNodeOptions->syncChunkLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_threshold", - ©setNodeOptions->syncThreshold)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_syncing_interval_ms", - ©setNodeOptions->checkSyncingIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_trigger_seconds", - ©setNodeOptions->syncTriggerSeconds)); - } -} - -void ChunkServer::InitCopyerOptions( - common::Configuration *conf, CopyerOptions *copyerOptions) { - LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", - ©erOptions->curveUser.owner)); - LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", - ©erOptions->curveUser.password)); - LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", - ©erOptions->curveConf)); - LOG_IF(FATAL, - !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); - bool disableCurveClient = false; - bool disableS3Adapter = false; - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", - &disableCurveClient)); - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_s3_adapter", - &disableS3Adapter)); - LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", - ©erOptions->curveFileTimeoutSec)); - - if (disableCurveClient) { - copyerOptions->curveClient = nullptr; - } else { - copyerOptions->curveClient = std::make_shared(); - } - - if (disableS3Adapter) { - copyerOptions->s3Client = nullptr; - } else { - copyerOptions->s3Client = std::make_shared(); - } -} - -void ChunkServer::InitCloneOptions( - common::Configuration *conf, CloneOptions *cloneOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("clone.thread_num", - &cloneOptions->threadNum)); - LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", - &cloneOptions->queueCapacity)); -} - -void ChunkServer::InitScanOptions( - common::Configuration *conf, ScanManagerOptions *scanOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", - &scanOptions->intervalSec)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", - &scanOptions->scanSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &scanOptions->chunkMetaPageSize)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", - &scanOptions->timeoutMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", - &scanOptions->retry)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", - &scanOptions->retryIntervalUs)); -} - -void ChunkServer::InitHeartbeatOptions( - common::Configuration *conf, HeartbeatOptions *heartbeatOptions) { - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - &heartbeatOptions->storeUri)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.port", - &heartbeatOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - &heartbeatOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", - &heartbeatOptions->intervalSec)); - LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", - &heartbeatOptions->timeout)); -} - -void ChunkServer::InitRegisterOptions( - common::Configuration *conf, RegisterOptions *registerOptions) { - LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - ®isterOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", - ®isterOptions->chunkserverInternalIp)); - LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", - ®isterOptions->enableExternalServer)); - LOG_IF(FATAL, !conf->GetStringValue("global.external_ip", - ®isterOptions->chunkserverExternalIp)); - LOG_IF(FATAL, !conf->GetIntValue("global.port", - ®isterOptions->chunkserverPort)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - ®isterOptions->chunserverStoreUri)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", - ®isterOptions->chunkserverMetaUri)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", - ®isterOptions->chunkserverDiskType)); - LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", - ®isterOptions->registerRetries)); - LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", - ®isterOptions->registerTimeout)); -} - -void ChunkServer::InitTrashOptions( - common::Configuration *conf, TrashOptions *trashOptions) { - LOG_IF(FATAL, !conf->GetStringValue( - "copyset.recycler_uri", &trashOptions->trashPath)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.expire_afterSec", &trashOptions->expiredAfterSec)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.scan_periodSec", &trashOptions->scanPeriodSec)); -} - -void ChunkServer::InitMetricOptions( - common::Configuration *conf, ChunkServerMetricOptions *metricOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", &metricOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue( - "global.ip", &metricOptions->ip)); - LOG_IF(FATAL, !conf->GetBoolValue( - "metric.onoff", &metricOptions->collectMetric)); -} - -void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 - google::CommandLineFlagInfo info; - if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { - conf->SetStringValue("global.ip", FLAGS_chunkServerIp); - } else { - LOG(FATAL) - << "chunkServerIp must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("enableExternalServer", &info) && - !info.is_default) { - conf->SetBoolValue( - "global.enable_external_server", FLAGS_enableExternalServer); - } - if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && - !info.is_default) { - conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); - } - - if (GetCommandLineFlagInfo("chunkServerPort", &info) && !info.is_default) { - conf->SetIntValue("global.port", FLAGS_chunkServerPort); - } else { - LOG(FATAL) - << "chunkServerPort must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && - !info.is_default) { - conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); - } else { - LOG(FATAL) - << "chunkServerStoreUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && - !info.is_default) { - conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); - } else { - LOG(FATAL) - << "chunkServerMetaUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) { - conf->SetStringValue("copyset.chunk_data_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_log_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); - } else { - LOG(FATAL) - << "copySetUri must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_snapshot_uri", FLAGS_raftSnapshotUri); - } else { - LOG(FATAL) - << "raftSnapshotUri must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_log_uri", FLAGS_raftLogUri); - } else { - LOG(FATAL) - << "raftLogUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("recycleUri", &info) && - !info.is_default) { - conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); - } else { - LOG(FATAL) - << "recycleUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.chunk_file_pool_dir", FLAGS_chunkFilePoolDir); - } else { - LOG(FATAL) - << "chunkFilePoolDir must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) { - conf->SetUInt32Value("chunkfilepool.allocate_percent", - FLAGS_chunkFilePoolAllocatedPercent); - } - - if (GetCommandLineFlagInfo("chunkFormatThreadNum", &info)) { - conf->SetUInt64Value("chunkfilepool.chunk_file_pool_format_thread_num", - FLAGS_chunkFormatThreadNum); - } - - if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.meta_path", FLAGS_chunkFilePoolMetaPath); - } else { - LOG(FATAL) - << "chunkFilePoolMetaPath must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("walFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); - } else { - LOG(FATAL) - << "walFilePoolDir must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); - } else { - LOG(FATAL) - << "walFilePoolMetaPath must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { - conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); - } - - // 设置日志存放文件夹 - if (FLAGS_log_dir.empty()) { - if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT - LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf - << ", will log to /tmp"; + + void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) + { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file + google::CommandLineFlagInfo info; + if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) + { + conf->SetStringValue("global.ip", FLAGS_chunkServerIp); + } + else + { + LOG(FATAL) + << "chunkServerIp must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("enableExternalServer", &info) && + !info.is_default) + { + conf->SetBoolValue("global.enable_external_server", + FLAGS_enableExternalServer); + } + if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && + !info.is_default) + { + conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); + } + + if (GetCommandLineFlagInfo("chunkServerPort", &info) && !info.is_default) + { + conf->SetIntValue("global.port", FLAGS_chunkServerPort); + } + else + { + LOG(FATAL) + << "chunkServerPort must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && + !info.is_default) + { + conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); + } + else + { + LOG(FATAL) << "chunkServerStoreUri must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && + !info.is_default) + { + conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); + } + else + { + LOG(FATAL) << "chunkServerMetaUri must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.chunk_data_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_log_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); + } + else + { + LOG(FATAL) << "copySetUri must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.raft_snapshot_uri", + FLAGS_raftSnapshotUri); + } + else + { + LOG(FATAL) + << "raftSnapshotUri must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.raft_log_uri", FLAGS_raftLogUri); + } + else + { + LOG(FATAL) << "raftLogUri must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); + } + else + { + LOG(FATAL) << "recycleUri must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && !info.is_default) + { + conf->SetStringValue("chunkfilepool.chunk_file_pool_dir", + FLAGS_chunkFilePoolDir); + } + else + { + LOG(FATAL) + << "chunkFilePoolDir must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) + { + conf->SetUInt32Value("chunkfilepool.allocate_percent", + FLAGS_chunkFilePoolAllocatedPercent); + } + + if (GetCommandLineFlagInfo("chunkFormatThreadNum", &info)) + { + conf->SetUInt64Value("chunkfilepool.chunk_file_pool_format_thread_num", + FLAGS_chunkFormatThreadNum); + } + + if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && + !info.is_default) + { + conf->SetStringValue("chunkfilepool.meta_path", + FLAGS_chunkFilePoolMetaPath); + } + else + { + LOG(FATAL) << "chunkFilePoolMetaPath must be set when run chunkserver " + "in command."; + } + + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && !info.is_default) + { + conf->SetStringValue("walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + } + else + { + LOG(FATAL) + << "walFilePoolDir must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && + !info.is_default) + { + conf->SetStringValue("walfilepool.meta_path", + FLAGS_walFilePoolMetaPath); + } + else + { + LOG(FATAL) << "walFilePoolMetaPath must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) + { + conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); + } + + // Set log storage folder + if (FLAGS_log_dir.empty()) + { + if (!conf->GetStringValue("chunkserver.common.logDir", + &FLAGS_log_dir)) + { // NOLINT + LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf + << ", will log to /tmp"; + } + } + + if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && + !info.is_default) + { + conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + FLAGS_enableChunkfilepool); + } + + if (GetCommandLineFlagInfo("enableWalfilepool", &info) && + !info.is_default) + { + conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", + FLAGS_enableWalfilepool); + } + + if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && + !info.is_default) + { + conf->SetIntValue("copyset.load_concurrency", + FLAGS_copysetLoadConcurrency); + } } - } - - if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && - !info.is_default) { - conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", - FLAGS_enableChunkfilepool); - } - - if (GetCommandLineFlagInfo("enableWalfilepool", &info) && - !info.is_default) { - conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", - FLAGS_enableWalfilepool); - } - - if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && - !info.is_default) { - conf->SetIntValue("copyset.load_concurrency", - FLAGS_copysetLoadConcurrency); - } -} - -int ChunkServer::GetChunkServerMetaFromLocal( - const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata) { - std::string proto = UriParser::GetProtocolFromUri(storeUri); - if (proto != "local") { - LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; - return -1; - } - // 从配置文件中获取chunkserver元数据的文件路径 - proto = UriParser::GetProtocolFromUri(metaUri); - if (proto != "local") { - LOG(ERROR) << "Chunkserver meta protocal " - << proto << " is not supported yet"; - return -1; - } - // 元数据文件已经存在 - if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 - if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { - LOG(ERROR) << "Fail to read persisted chunkserver meta data"; + + int ChunkServer::GetChunkServerMetaFromLocal( + const std::string &storeUri, const std::string &metaUri, + const std::shared_ptr &fs, ChunkServerMetadata *metadata) + { + std::string proto = UriParser::GetProtocolFromUri(storeUri); + if (proto != "local") + { + LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; + return -1; + } + // Obtain the file path for chunkserver metadata from the configuration file + proto = UriParser::GetProtocolFromUri(metaUri); + if (proto != "local") + { + LOG(ERROR) << "Chunkserver meta protocal " << proto + << " is not supported yet"; + return -1; + } + // The metadata file already exists + if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) + { + // Get File Content + if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) + { + LOG(ERROR) << "Fail to read persisted chunkserver meta data"; + return -1; + } + + LOG(INFO) << "Found persisted chunkserver data, skipping registration," + << " chunkserver id: " << metadata->id() + << ", token: " << metadata->token(); + return 0; + } return -1; } - LOG(INFO) << "Found persisted chunkserver data, skipping registration," - << " chunkserver id: " << metadata->id() - << ", token: " << metadata->token(); - return 0; - } - return -1; -} - -int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata) { - int fd; - std::string metaFile = UriParser::GetPathFromUri(metaUri); - - fd = fs->Open(metaFile.c_str(), O_RDONLY); - if (fd < 0) { - LOG(ERROR) << "Failed to open Chunkserver metadata file " << metaFile; - return -1; - } - - #define METAFILE_MAX_SIZE 4096 - int size; - char json[METAFILE_MAX_SIZE] = {0}; - - size = fs->Read(fd, json, 0, METAFILE_MAX_SIZE); - if (size < 0) { - LOG(ERROR) << "Failed to read Chunkserver metadata file"; - return -1; - } else if (size >= METAFILE_MAX_SIZE) { - LOG(ERROR) << "Chunkserver metadata file is too large: " << size; - return -1; - } - if (fs->Close(fd)) { - LOG(ERROR) << "Failed to close chunkserver metadata file"; - return -1; - } - - if (!ChunkServerMetaHelper::DecodeChunkServerMeta(json, metadata)) { - LOG(ERROR) << "Failed to decode chunkserver meta: " << json; - return -1; - } - - return 0; -} - -} // namespace chunkserver -} // namespace curve + int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, + const std::string &metaUri, + ChunkServerMetadata *metadata) + { + int fd; + std::string metaFile = UriParser::GetPathFromUri(metaUri); + + fd = fs->Open(metaFile.c_str(), O_RDONLY); + if (fd < 0) + { + LOG(ERROR) << "Failed to open Chunkserver metadata file " << metaFile; + return -1; + } + +#define METAFILE_MAX_SIZE 4096 + int size; + char json[METAFILE_MAX_SIZE] = {0}; + + size = fs->Read(fd, json, 0, METAFILE_MAX_SIZE); + if (size < 0) + { + LOG(ERROR) << "Failed to read Chunkserver metadata file"; + return -1; + } + else if (size >= METAFILE_MAX_SIZE) + { + LOG(ERROR) << "Chunkserver metadata file is too large: " << size; + return -1; + } + if (fs->Close(fd)) + { + LOG(ERROR) << "Failed to close chunkserver metadata file"; + return -1; + } + + if (!ChunkServerMetaHelper::DecodeChunkServerMeta(json, metadata)) + { + LOG(ERROR) << "Failed to decode chunkserver meta: " << json; + return -1; + } + + return 0; + } + + } // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..6698281fec 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_H_ -#include #include -#include "src/common/configuration.h" +#include + +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/clone_manager.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/heartbeat.h" -#include "src/chunkserver/scan_manager.h" -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/register.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/scan_manager.h" #include "src/chunkserver/scan_service.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; @@ -43,81 +44,84 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); private: - void InitChunkFilePoolOptions(common::Configuration *conf, - FilePoolOptions *chunkFilePoolOptions); + void InitChunkFilePoolOptions(common::Configuration* conf, + FilePoolOptions* chunkFilePoolOptions); - void InitWalFilePoolOptions(common::Configuration *conf, - FilePoolOptions *walPoolOption); + void InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOption); - void InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOption); + void InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOption); - void InitCopysetNodeOptions(common::Configuration *conf, - CopysetNodeOptions *copysetNodeOptions); + void InitCopysetNodeOptions(common::Configuration* conf, + CopysetNodeOptions* copysetNodeOptions); - void InitCopyerOptions(common::Configuration *conf, - CopyerOptions *copyerOptions); + void InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions); - void InitCloneOptions(common::Configuration *conf, - CloneOptions *cloneOptions); + void InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions); - void InitScanOptions(common::Configuration *conf, - ScanManagerOptions *scanOptions); + void InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions); - void InitHeartbeatOptions(common::Configuration *conf, - HeartbeatOptions *heartbeatOptions); + void InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions); - void InitRegisterOptions(common::Configuration *conf, - RegisterOptions *registerOptions); + void InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions); - void InitTrashOptions(common::Configuration *conf, - TrashOptions *trashOptions); + void InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions); - void InitMetricOptions(common::Configuration *conf, - ChunkServerMetricOptions *metricOptions); + void InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions); - void LoadConfigFromCmdline(common::Configuration *conf); + void LoadConfigFromCmdline(common::Configuration* conf); - int GetChunkServerMetaFromLocal(const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata); + int GetChunkServerMetaFromLocal(const std::string& storeUri, + const std::string& metaUri, + const std::shared_ptr& fs, + ChunkServerMetadata* metadata); - int ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata); + int ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing + // tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; @@ -125,4 +129,3 @@ class ChunkServer { } // namespace curve #endif // SRC_CHUNKSERVER_CHUNKSERVER_H_ - diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..96afcf39e8 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -20,19 +20,20 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/chunkserver_helper.h" + #include +#include +#include #include "src/common/crc32.h" -#include "src/chunkserver/chunkserver_helper.h" namespace curve { namespace chunkserver { const uint64_t DefaultMagic = 0x6225929368674118; bool ChunkServerMetaHelper::EncodeChunkServerMeta( - const ChunkServerMetadata &meta, std::string *out) { + const ChunkServerMetadata& meta, std::string* out) { if (!out->empty()) { LOG(ERROR) << "out string must empty!"; return false; @@ -50,8 +51,8 @@ bool ChunkServerMetaHelper::EncodeChunkServerMeta( return true; } -bool ChunkServerMetaHelper::DecodeChunkServerMeta( - const std::string &meta, ChunkServerMetadata *out) { +bool ChunkServerMetaHelper::DecodeChunkServerMeta(const std::string& meta, + ChunkServerMetadata* out) { std::string jsonStr(meta); std::string err; json2pb::Json2PbOptions opt; @@ -63,7 +64,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." @@ -75,8 +76,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return true; } -uint32_t ChunkServerMetaHelper::MetadataCrc( - const ChunkServerMetadata &meta) { +uint32_t ChunkServerMetaHelper::MetadataCrc(const ChunkServerMetadata& meta) { uint32_t crc = 0; uint32_t ver = meta.version(); uint32_t id = meta.id(); @@ -87,7 +87,7 @@ uint32_t ChunkServerMetaHelper::MetadataCrc( crc = curve::common::CRC32(crc, reinterpret_cast(&id), sizeof(id)); crc = curve::common::CRC32(crc, token, meta.token().size()); crc = curve::common::CRC32(crc, reinterpret_cast(&magic), - sizeof(magic)); + sizeof(magic)); return crc; } diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..f8a361d94e 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -21,8 +21,9 @@ */ #include "src/chunkserver/chunkserver_metrics.h" -#include + #include +#include #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/passive_getfn.h" @@ -31,13 +32,15 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + : rps_(&reqNum_, 1), + iops_(&ioNum_, 1), + eps_(&errorNum_, 1), bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric +int IOMetric::Init(const std::string& prefix) { + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -94,9 +97,8 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } } - -int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric +int CSIOMetric::Init(const std::string& prefix) { + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -161,30 +163,30 @@ void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); @@ -196,7 +198,7 @@ int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -209,30 +211,36 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage *logStorage) { + CurveSegmentLogStorage* logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), - walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : hasInited_(false), + leaderCount_(nullptr), + chunkLeft_(nullptr), + walSegmentLeft_(nullptr), + chunkTrashed_(nullptr), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} -ChunkServerMetric *ChunkServerMetric::self_ = nullptr; +ChunkServerMetric* ChunkServerMetric::self_ = nullptr; -ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 +ChunkServerMetric* ChunkServerMetric::GetInstance() { + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock + // protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -245,14 +253,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +286,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -293,8 +301,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { if (!option_.collectMetric) { return 0; } @@ -321,9 +329,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, return 0; } -CopysetMetricPtr -ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( + const LogicPoolID& logicPoolId, const CopysetID& copysetId) { if (!option_.collectMetric) { return nullptr; } @@ -332,18 +339,18 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -356,8 +363,8 @@ void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { if (!option_.collectMetric) { @@ -371,7 +378,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } @@ -381,7 +388,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { if (!option_.collectMetric) { return; } @@ -391,7 +398,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash *trash) { +void ChunkServerMetric::MonitorTrash(Trash* trash) { if (!option_.collectMetric) { return; } @@ -417,7 +424,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { if (!option_.collectMetric) { return; } diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..b91fbf0f6e 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ -#include #include +#include + +#include #include #include -#include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/configuration.h" -#include "src/chunkserver/datastore/file_pool.h" +#include "src/common/uncopyable.h" using curve::common::Configuration; using curve::common::ReadLockGuard; @@ -54,57 +55,59 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template using AdderPtr = std::shared_ptr>; +template +using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and +// write requests Statistics can be conducted on quantile values, maximum +// values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -120,100 +123,109 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), - pasteMetric_(nullptr), downloadMetric_(nullptr) {} + : readMetric_(nullptr), + writeMetric_(nullptr), + recoverMetric_(nullptr), + pasteMetric_(nullptr), + downloadMetric_(nullptr) {} ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : logicPoolId_(0), + copysetId_(0), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); + int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, + * number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ - void MonitorDataStore(CSDataStore *datastore); + void MonitorDataStore(CSDataStore* datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +233,10 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +277,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,173 +357,175 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 - static ChunkServerMetric *GetInstance(); + // Implementation singleton + static ChunkServerMetric* GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ - int Init(const ChunkServerMetricOptions &option); + int Init(const ChunkServerMetricOptions& option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ - void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnRequest(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ - void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnResponse(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + *Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the + *specified metric already exists */ - int CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure + * returns nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + *Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + *Monitor the chunk allocation pool, mainly monitoring the number of chunks + *in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ - void MonitorChunkFilePool(FilePool *chunkFilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + *Monitor the allocation pool of wall segments, mainly monitoring the number + *of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ - void MonitorWalFilePool(FilePool *walFilePool); + void MonitorWalFilePool(FilePool* walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + *Monitor Recycle Bin + * @param trash: Object pointer to trash */ - void MonitorTrash(Trash *trash); + void MonitorTrash(Trash* trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + *Update configuration item data + * @param conf: Configuration content */ - void ExposeConfigMetric(common::Configuration *conf); + void ExposeConfigMetric(common::Configuration* conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } + CopysetMetricMap* GetCopysetMetricMap() { return ©setMetricMap_; } uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { - if (leaderCount_ == nullptr) - return 0; + if (leaderCount_ == nullptr) return 0; return leaderCount_->get_value(); } uint32_t GetTotalChunkCount() { - if (chunkCount_ == nullptr) - return 0; + if (chunkCount_ == nullptr) return 0; return chunkCount_->get_value(); } uint32_t GetTotalSnapshotCount() { - if (snapshotCount_ == nullptr) - return 0; + if (snapshotCount_ == nullptr) return 0; return snapshotCount_->get_value(); } uint32_t GetTotalCloneChunkCount() { - if (cloneChunkCount_ == nullptr) - return 0; + if (cloneChunkCount_ == nullptr) return 0; return cloneChunkCount_->get_value(); } uint32_t GetTotalWalSegmentCount() { - if (nullptr == walSegmentCount_) - return 0; + if (nullptr == walSegmentCount_) return 0; return walSegmentCount_->get_value(); } uint32_t GetChunkLeftCount() const { - if (chunkLeft_ == nullptr) - return 0; + if (chunkLeft_ == nullptr) return 0; return chunkLeft_->get_value(); } uint32_t GetWalSegmentLeftCount() const { - if (nullptr == walSegmentLeft_) - return 0; + if (nullptr == walSegmentLeft_) return 0; return walSegmentLeft_->get_value(); } uint32_t GetChunkTrashedCount() const { - if (chunkTrashed_ == nullptr) - return 0; + if (chunkTrashed_ == nullptr) return 0; return chunkTrashed_->get_value(); } @@ -522,32 +537,32 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 - static ChunkServerMetric *self_; + // Self pointing pointer for singleton mode + static ChunkServerMetric* self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..ed048dc460 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,41 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId); +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId); -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const PeerId& peer, + const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const PeerId& peer, + const braft::cli::CliOptions& options); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..ba779bb8d7 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -22,10 +22,10 @@ #include "src/chunkserver/cli2.h" -#include -#include #include #include +#include +#include #include @@ -34,16 +34,14 @@ namespace curve { namespace chunkserver { -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader) { +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader) { if (conf.empty()) { return butil::Status(EINVAL, "Empty group configuration"); } - butil::Status st(-1, - "Fail to get leader of copyset node %s", + butil::Status st(-1, "Fail to get leader of copyset node %s", ToGroupIdString(logicPoolId, copysetId).c_str()); PeerId leaderId; Configuration::const_iterator iter = conf.begin(); @@ -53,7 +51,7 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status(-1, "Fail to init channel to %s", iter->to_string().c_str()); } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -84,11 +82,9 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -101,10 +97,10 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, AddPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *addPeer = new Peer(); + Peer* addPeer = new Peer(); request.set_allocated_addpeer(addPeer); *addPeer = peer; AddPeerResponse2 response; @@ -128,17 +124,15 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -151,10 +145,10 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, RemovePeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *removePeer = new Peer(); + Peer* removePeer = new Peer(); request.set_allocated_removepeer(removePeer); *removePeer = peer; RemovePeerResponse2 response; @@ -179,17 +173,15 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options) { +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -203,11 +195,11 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, ChangePeersRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); *leaderPeer = leader; request.set_allocated_leader(leaderPeer); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ChangePeersResponse2 response; @@ -229,17 +221,15 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, new_conf.add_peer(response.newpeers(i).address()); } LOG(INFO) << "Configuration of replication group `" - << ToGroupIdString(logicPoolId, copysetId) - << "' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << "' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -256,10 +246,10 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, TransferLeaderRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *transfereePeer = new Peer(); + Peer* transfereePeer = new Peer(); request.set_allocated_transferee(transfereePeer); *transfereePeer = peer; TransferLeaderResponse2 response; @@ -274,18 +264,23 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// reset peer does not follow a consistency protocol and directly resets them, +// thus posing certain risks Application scenario: Extreme situation where most +// nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, +// which will be unavailable for a period of time. To cope with this scenario, +// we have introduced The reset peer tool directly resets replication group +// members to only contain surviving replicas. Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the +// check-copyset tool that most of the replicas in the replication group have +// indeed been suspended +// 2. When resetting the peer, ensure that the remaining replicas have the +// latest data, otherwise there is a risk of data loss +// 3. Reset peer is suitable for situations where the other two replicas cannot +// be restored, otherwise it may disrupt the cluster +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options) { if (newPeers.empty()) { return butil::Status(EINVAL, "new_conf is empty"); @@ -294,7 +289,7 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, brpc::Channel channel; if (channel.Init(requestPeerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - requestPeerId.to_string().c_str()); + requestPeerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -302,11 +297,11 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, ResetPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *requestPeerPtr = new Peer(); + Peer* requestPeerPtr = new Peer(); *requestPeerPtr = requestPeer; request.set_allocated_requestpeer(requestPeerPtr); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ResetPeerResponse2 response; @@ -318,15 +313,14 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options) { brpc::Channel channel; PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -334,7 +328,7 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(peer); + Peer* peerPtr = new Peer(peer); request.set_allocated_peer(peerPtr); SnapshotResponse2 response; CliService2_Stub stub(&channel); @@ -351,7 +345,7 @@ butil::Status SnapshotAll(const Peer& peer, PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..512850b747 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,50 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader); - -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 变更配置 -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options); - -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 重置复制组 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader); + +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options); + +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options); + +// Change configuration +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options); + +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options); + +// Reset replication group +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/chunkserver/clone_copyer.h b/src/chunkserver/clone_copyer.h index 6ccb7d7dc1..3c640f4693 100644 --- a/src/chunkserver/clone_copyer.h +++ b/src/chunkserver/clone_copyer.h @@ -24,56 +24,57 @@ #define SRC_CHUNKSERVER_CLONE_COPYER_H_ #include + +#include #include -#include #include -#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/location_operator.h" +#include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" #include "src/client/libcurve_file.h" -#include "src/client/client_common.h" -#include "include/client/libcurve.h" +#include "src/common/location_operator.h" #include "src/common/s3_adapter.h" namespace curve { namespace chunkserver { -using curve::common::S3Adapter; using curve::client::FileClient; using curve::client::UserInfo; -using curve::common::LocationOperator; -using curve::common::OriginType; using curve::common::GetObjectAsyncCallBack; using curve::common::GetObjectAsyncContext; +using curve::common::LocationOperator; +using curve::common::OriginType; +using curve::common::S3Adapter; using std::string; class DownloadClosure; struct CopyerOptions { - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser; - // curvefs 的配置文件路径 + // Profile path for curvefs std::string curveConf; - // s3adapter 的配置文件路径 + // Configuration file path for s3adapter std::string s3Conf; - // curve client的对象指针 + // Object pointer to curve client std::shared_ptr curveClient; - // s3 adapter的对象指针 + // Object pointer to s3 adapter std::shared_ptr s3Client; // curve file's time to live uint64_t curveFileTimeoutSec; }; struct AsyncDownloadContext { - // 源chunk的位置信息 + // Location information of the source chunk string location; - // 请求下载数据在对象中的相对偏移 + // Request to download the relative offset of data in the object off_t offset; - // 请求下载数据的的长度 + // The length of the requested download data size_t size; - // 存放下载数据的缓冲区 + // Buffer for storing downloaded data char* buf; }; @@ -85,9 +86,9 @@ struct CurveOpenTimestamp { // lastest use time, using seconds int64_t lastUsedSec; // Init functions - CurveOpenTimestamp(): fd(-1), fileName(""), lastUsedSec(0) {} - CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec): - fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} + CurveOpenTimestamp() : fd(-1), fileName(""), lastUsedSec(0) {} + CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec) + : fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} }; std::ostream& operator<<(std::ostream& out, const AsyncDownloadContext& rhs); @@ -98,40 +99,34 @@ class OriginCopyer { virtual ~OriginCopyer() = default; /** - * 初始化资源 - * @param options: 配置信息 - * @return: 成功返回0,失败返回-1 + * Initialize Resources + * @param options: Configuration information + * @return: Success returns 0, failure returns -1 */ virtual int Init(const CopyerOptions& options); /** - * 释放资源 - * @return: 成功返回0,失败返回-1 + * Release resources + * @return: Success returns 0, failure returns -1 */ virtual int Fini(); /** - * 异步地从源端拷贝数据 - * @param done:包含下载请求的上下文信息, - * 数据下载完成后执行该closure进行回调 + * Asynchronous copying of data from the source + * @param done: Contains contextual information for download requests, + *After the data download is completed, execute the closure for callback */ virtual void DownloadAsync(DownloadClosure* done); private: - void DownloadFromS3(const string& objectName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); - void DownloadFromCurve(const string& fileName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); + void DownloadFromS3(const string& objectName, off_t off, size_t size, + char* buf, DownloadClosure* done); + void DownloadFromCurve(const string& fileName, off_t off, size_t size, + char* buf, DownloadClosure* done); static void DeleteExpiredCurveCache(void* arg); private: - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser_; // mutex for protect curveOpenTime_ std::mutex timeMtx_; @@ -139,13 +134,13 @@ class OriginCopyer { std::list curveOpenTime_; // curve file's time to live uint64_t curveFileTimeoutSec_; - // 负责跟curve交互 + // Responsible for interacting with curve std::shared_ptr curveClient_; - // 负责跟s3交互 - std::shared_ptr s3Client_; - // 保护fdMap_的互斥锁 - std::mutex mtx_; - // 文件名->文件fd 的映射 + // Responsible for interacting with s3 + std::shared_ptr s3Client_; + // Protect fdMap_ Mutex lock for + std::mutex mtx_; + // File name ->Mapping of file fd std::unordered_map fdMap_; // Timer for clean expired curve file bthread::TimerThread timer_; diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index b3efe70f36..af05a01646 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -20,15 +20,16 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/clone_core.h" + #include +#include -#include "src/common/bitmap.h" -#include "src/chunkserver/clone_core.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/op_request.h" +#include "src/common/bitmap.h" #include "src/common/timeutility.h" namespace curve { @@ -37,26 +38,23 @@ namespace chunkserver { using curve::common::Bitmap; using curve::common::TimeUtility; -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} +static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } DownloadClosure::DownloadClosure(std::shared_ptr readRequest, std::shared_ptr cloneCore, AsyncDownloadContext* downloadCtx, Closure* done) - : isFailed_(false) - , beginTime_(TimeUtility::GetTimeofDayUs()) - , downloadCtx_(downloadCtx) - , cloneCore_(cloneCore) - , readRequest_(readRequest) - , done_(done) { - // 记录初始metric + : isFailed_(false), + beginTime_(TimeUtility::GetTimeofDayUs()), + downloadCtx_(downloadCtx), + cloneCore_(cloneCore), + readRequest_(readRequest), + done_(done) { + // Record initial metric if (readRequest_ != nullptr) { const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - csMetric->OnRequest(request->logicpoolid(), - request->copysetid(), + csMetric->OnRequest(request->logicpoolid(), request->copysetid(), CSIOMetricType::DOWNLOAD); } } @@ -66,60 +64,56 @@ void DownloadClosure::Run() { std::unique_ptr contextGuard(downloadCtx_); brpc::ClosureGuard doneGuard(done_); butil::IOBuf copyData; - copyData.append_user_data( - downloadCtx_->buf, downloadCtx_->size, ReadBufferDeleter); + copyData.append_user_data(downloadCtx_->buf, downloadCtx_->size, + ReadBufferDeleter); CHECK(readRequest_ != nullptr) << "read request is nullptr."; - // 记录结束metric + // Record End Metric const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; - csMetric->OnResponse(request->logicpoolid(), - request->copysetid(), - CSIOMetricType::DOWNLOAD, - downloadCtx_->size, - latencyUs, - isFailed_); - - // 从源端拷贝数据失败 + csMetric->OnResponse(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD, downloadCtx_->size, + latencyUs, isFailed_); + + // Copying data from the source failed if (isFailed_) { LOG(ERROR) << "download origin data failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " AsyncDownloadContext: " << *downloadCtx_; + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " AsyncDownloadContext: " << *downloadCtx_; cloneCore_->SetResponse( readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); return; } if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - // release doneGuard,将closure交给paste请求处理 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, + // Release doneGuard, hand over the closure to the pass request for + // processing + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, doneGuard.release()); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + // Error or end of processing call closure returned to user cloneCore_->SetReadChunkResponse(readRequest_, ©Data); - // paste clone data是异步操作,很快就能处理完 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, + // Paste clone data is an asynchronous operation that can be processed + // quickly + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, nullptr); } } void CloneClosure::Run() { - // 释放资源 + // Release resources std::unique_ptr selfGuard(this); std::unique_ptr requestGuard(request_); std::unique_ptr responseGuard(response_); brpc::ClosureGuard doneGuard(done_); - // 如果userResponse不为空,需要将response_中的相关内容赋值给userResponse + // If userResponse is not empty, you need to set the response_ Assign the + // relevant content in to userResponse if (userResponse_ != nullptr) { if (response_->has_status()) { userResponse_->set_status(response_->status()); @@ -134,8 +128,8 @@ void CloneClosure::Run() { } int CloneCore::CloneReadByLocalInfo( - std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done) { + std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, + Closure* done) { brpc::ClosureGuard doneGuard(done); const ChunkRequest* request = readRequest->request_; off_t offset = request->offset(); @@ -148,8 +142,7 @@ int CloneCore::CloneReadByLocalInfo( << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() << " chunkid: " << request->chunkid() - << " offset: " << offset - << " length: " << length + << " offset: " << offset << " length: " << length << " block size: " << blockSize; SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); @@ -159,104 +152,103 @@ int CloneCore::CloneReadByLocalInfo( uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 请求提交到CloneManager的时候,chunk一定是clone chunk - // 但是由于有其他请求操作相同的chunk,此时chunk有可能已经被遍写过了 - // 所以此处要先判断chunk是否是clone chunk,如果是再判断是否要拷贝数据 + // When submitting a request to CloneManager, the chunk must be a clone + // chunk However, due to other requests for the same chunk, it is possible + // that the chunk has already been overwritten at this time So here we need + // to first determine whether the chunk is a clone chunk, and then determine + // whether to copy the data if so bool needClone = chunkInfo.isClone && - (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS); + (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS); if (needClone) { - // TODO(yyk) 这一块可以优化,但是优化方法判断条件可能比较复杂 - // 目前只根据是否存在未写过的page来决定是否要触发拷贝 - // chunk中请求读取范围内的数据存在page未被写过,则需要从源端拷贝数据 + // The TODO(yyk) block can be optimized, but the optimization method may + // determine complex conditions Currently, the decision to trigger + // copying is only based on whether there are unwritten pages If the + // data within the requested read range in the chunk has a page that has + // not been written, it is necessary to copy the data from the source + // side AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; downloadCtx->location = chunkInfo.location; downloadCtx->offset = offset; downloadCtx->size = length; downloadCtx->buf = new (std::nothrow) char[length]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); + DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); copyer_->DownloadAsync(downloadClosure); return 0; } - // 执行到这一步说明不需要拷贝数据,如果是recover请求可以直接返回成功 - // 如果是ReadChunk请求,则直接读chunk并返回 + // Performing this step indicates that there is no need to copy data. If it + // is a recover request, it can directly return success If it is a ReadChunk + // request, read the chunk directly and return if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + // Error or end of processing call closure returned to user return ReadChunk(readRequest); } return 0; } -void CloneCore::CloneReadByRequestInfo(std::shared_ptr - readRequest, Closure* done) { +void CloneCore::CloneReadByRequestInfo( + std::shared_ptr readRequest, Closure* done) { brpc::ClosureGuard doneGuard(done); - const ChunkRequest* chunkRequest = readRequest->request_; + const ChunkRequest* chunkRequest = readRequest->request_; auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - std::string location = func(chunkRequest->clonefilesource(), - chunkRequest->clonefileoffset()); + std::string location = + func(chunkRequest->clonefilesource(), chunkRequest->clonefileoffset()); - AsyncDownloadContext* downloadCtx = - new (std::nothrow) AsyncDownloadContext; + AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; downloadCtx->location = location; downloadCtx->offset = chunkRequest->offset(); downloadCtx->size = chunkRequest->size(); downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); + DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); copyer_->DownloadAsync(downloadClosure); return; } -int CloneCore::HandleReadRequest( - std::shared_ptr readRequest, - Closure* done) { +int CloneCore::HandleReadRequest(std::shared_ptr readRequest, + Closure* done) { brpc::ClosureGuard doneGuard(done); const ChunkRequest* request = readRequest->request_; - // 获取chunk信息 + // Obtain chunk information CSChunkInfo chunkInfo; ChunkID id = readRequest->ChunkId(); std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); /* - * chunk存在:按照查看分析bitmap判断是否可以本地读 - * chunk不存在:如包含clone信息则从clonesource读,否则返回错误 - * 因为上层ReadChunkRequest::OnApply已经处理了NoExist - * 并且cloneinfo不存在的情况 - */ + *Chunk exists: Check and analyze Bitmap to determine if it can be read + *locally Chunk does not exist: if it contains clone information, it will be + *read from clonesource, otherwise an error will be returned Because the + *upper level ReadChunkRequest::OnApply has already processed NoExist And + *the situation where cloneinfo does not exist + */ switch (errorCode) { - case CSErrorCode::Success: - return CloneReadByLocalInfo(readRequest, chunkInfo, - doneGuard.release()); - case CSErrorCode::ChunkNotExistError: - if (existCloneInfo(request)) { - CloneReadByRequestInfo(readRequest, doneGuard.release()); - return 0; - } - // 否则fallthrough直接返回错误 - FALLTHROUGH_INTENDED; - default: - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; + case CSErrorCode::Success: + return CloneReadByLocalInfo(readRequest, chunkInfo, + doneGuard.release()); + case CSErrorCode::ChunkNotExistError: + if (existCloneInfo(request)) { + CloneReadByRequestInfo(readRequest, doneGuard.release()); + return 0; + } + // Otherwise, fallthrough will directly return an error + FALLTHROUGH_INTENDED; + default: + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; } } @@ -267,29 +259,25 @@ int CloneCore::ReadChunk(std::shared_ptr readRequest) { std::unique_ptr chunkData(new char[length]); std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData.get(), - offset, - length); + errorCode = dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData.get(), offset, length); if (CSErrorCode::Success != errorCode) { SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << offset - << " read length: " << length - << " error code: " << errorCode; + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << offset << " read length: " << length + << " error code: " << errorCode; return -1; } - // 读成功后需要更新 apply index + // After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - // Return 完成数据读取后可以将结果返回给用户 - readRequest->cntl_->response_attachment().append( - chunkData.get(), length); + // After completing the data reading, Return can return the results to the + // user + readRequest->cntl_->response_attachment().append(chunkData.get(), length); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; } @@ -303,14 +291,19 @@ int CloneCore::SetReadChunkResponse( std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - // 如果chunk不存在,需要判断请求是否带源chunk的信息 - // 如果带了源chunk信息,说明用了lazy分配chunk机制,可以直接返回clone data - // 有一种情况,当请求的chunk是lazy allocate的,请求时chunk在本地是存在的, - // 并且请求读取的部分区域已经被写过,在从源端拷贝数据的时候,chunk又被删除了 - // 这种情况下会被当成正常请求返回,但是返回的数据不符合预期 - // 由于当前我们的curve file都是延迟删除的,文件真正删除时能够确保没有用户IO - // 如果后续添加了一些改动触发到这个问题,则需要进行修复 - // TODO(yyk) fix it + // If the chunk does not exist, it is necessary to determine whether the + // request contains information about the source chunk If the source chunk + // information is provided, it indicates that the lazy allocation chunk + // mechanism is used, and clone data can be directly returned There is a + // situation where the requested chunk is lazily allocated and the requested + // chunk exists locally, And the requested read area has already been + // written, and when copying data from the source, the chunk has been + // deleted again In this case, it will be returned as a normal request, but + // the returned data does not meet expectations Due to the current delayed + // deletion of our curve files, it is ensured that there is no user IO when + // the files are truly deleted If some changes are added later that trigger + // this issue, it needs to be fixed + // TODO(yyk) fix it bool expect = errorCode == CSErrorCode::Success || (errorCode == CSErrorCode::ChunkNotExistError && existCloneInfo(request)); @@ -327,11 +320,11 @@ int CloneCore::SetReadChunkResponse( size_t length = request->size(); butil::IOBuf responseData; - // 如果chunk存在,则要从chunk中读取已经写过的区域合并后返回 + // If a chunk exists, read the regions that have already been written from + // the chunk and merge them back if (errorCode == CSErrorCode::Success) { char* chunkData = new (std::nothrow) char[length]; - int ret = ReadThenMerge( - readRequest, chunkInfo, cloneData, chunkData); + int ret = ReadThenMerge(readRequest, chunkInfo, cloneData, chunkData); responseData.append_user_data(chunkData, length, ReadBufferDeleter); if (ret < 0) { SetResponse(readRequest, @@ -343,7 +336,7 @@ int CloneCore::SetReadChunkResponse( } readRequest->cntl_->response_attachment().append(responseData); - // 读成功后需要更新 apply index + // After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; @@ -351,8 +344,7 @@ int CloneCore::SetReadChunkResponse( int CloneCore::ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData) { + const butil::IOBuf* cloneData, char* chunkData) { const ChunkRequest* request = readRequest->request_; std::shared_ptr dataStore = readRequest->datastore_; @@ -361,13 +353,11 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 获取chunk文件已经写过和未被写过的区域 + // Obtain the regions where the chunk file has been written and not written std::vector copiedRanges; std::vector uncopiedRanges; if (chunkInfo.isClone) { - chunkInfo.bitmap->Divide(beginIndex, - endIndex, - &uncopiedRanges, + chunkInfo.bitmap->Divide(beginIndex, endIndex, &uncopiedRanges, &copiedRanges); } else { BitRange range; @@ -376,23 +366,22 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, copiedRanges.push_back(range); } - // 需要读取的起始位置在chunk中的偏移 + // The offset of the starting position to be read in the chunk off_t readOff; - // 读取到的数据要拷贝到缓冲区中的相对偏移 + // The relative offset of the read data to be copied into the buffer off_t relativeOff; - // 每次从chunk读取的数据长度 + // The length of data read from chunk each time size_t readSize; - // 1.Read 对于已写过的区域,从chunk文件中读取 + // 1. Read for regions that have already been written, read from the chunk + // file CSErrorCode errorCode; for (auto& range : copiedRanges) { readOff = range.beginIndex * blockSize; readSize = (range.endIndex - range.beginIndex + 1) * blockSize; relativeOff = readOff - offset; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData + relativeOff, - readOff, - readSize); + errorCode = + dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData + relativeOff, readOff, readSize); if (CSErrorCode::Success != errorCode) { LOG(ERROR) << "read chunk failed: " << " logic pool id: " << request->logicpoolid() @@ -405,7 +394,8 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, } } - // 2.Merge 对于未写过的区域,从源端下载的区域中拷贝出来进行merge + // 2. Merge: For areas that have not been written before, copy them from the + // downloaded area on the source side for merging for (auto& range : uncopiedRanges) { readOff = range.beginIndex * blockSize; readSize = (range.endIndex - range.beginIndex + 1) * blockSize; @@ -416,16 +406,15 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, } void CloneCore::PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done) { + const butil::IOBuf* cloneData, off_t offset, + size_t cloneDataSize, Closure* done) { const ChunkRequest* request = readRequest->request_; - bool dontPaste = CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() - && !enablePaste_; + bool dontPaste = + CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() && !enablePaste_; if (dontPaste) return; - // 数据拷贝完成以后,需要将产生PaseChunkRequest将数据Paste到chunk文件 + // After the data copy is completed, it is necessary to generate a + // PaseChunkRequest and paste the data to the chunk file ChunkRequest* pasteRequest = new ChunkRequest(); pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); pasteRequest->set_logicpoolid(request->logicpoolid()); @@ -440,22 +429,18 @@ void CloneCore::PasteCloneData(std::shared_ptr readRequest, closure->SetRequest(pasteRequest); closure->SetResponse(pasteResponse); closure->SetClosure(done); - // 如果是recover chunk的请求,需要将paste的结果通过rpc返回 + // If it is a request for a recover chunk, the result of the pass needs to + // be returned through rpc if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { closure->SetUserResponse(readRequest->response_); } - ChunkServiceClosure* pasteClosure = - new (std::nothrow) ChunkServiceClosure(nullptr, - pasteRequest, - pasteResponse, - closure); - - req = std::make_shared(readRequest->node_, - pasteRequest, - pasteResponse, - cloneData, - pasteClosure); + ChunkServiceClosure* pasteClosure = new (std::nothrow) + ChunkServiceClosure(nullptr, pasteRequest, pasteResponse, closure); + + req = std::make_shared( + readRequest->node_, pasteRequest, pasteResponse, cloneData, + pasteClosure); req->Process(); } diff --git a/src/chunkserver/clone_core.h b/src/chunkserver/clone_core.h index c91183feb3..3f3eb2ef69 100644 --- a/src/chunkserver/clone_core.h +++ b/src/chunkserver/clone_core.h @@ -23,25 +23,26 @@ #ifndef SRC_CHUNKSERVER_CLONE_CORE_H_ #define SRC_CHUNKSERVER_CLONE_CORE_H_ +#include #include #include #include -#include + #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/timeutility.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/datastore/define.h" +#include "src/common/timeutility.h" namespace curve { namespace chunkserver { +using common::TimeUtility; +using curve::chunkserver::CSChunkInfo; using ::google::protobuf::Closure; using ::google::protobuf::Message; -using curve::chunkserver::CSChunkInfo; -using common::TimeUtility; class ReadChunkRequest; class PasteChunkInternalRequest; @@ -51,151 +52,147 @@ class DownloadClosure : public Closure { public: DownloadClosure(std::shared_ptr readRequest, std::shared_ptr cloneCore, - AsyncDownloadContext* downloadCtx, - Closure *done); + AsyncDownloadContext* downloadCtx, Closure* done); void Run(); - void SetFailed() { - isFailed_ = true; - } + void SetFailed() { isFailed_ = true; } - AsyncDownloadContext* GetDownloadContext() { - return downloadCtx_; - } + AsyncDownloadContext* GetDownloadContext() { return downloadCtx_; } protected: - // 下载是否出错出错 + // Is there an error in downloading bool isFailed_; - // 请求开始的时间 + // Request start time uint64_t beginTime_; - // 下载请求上下文信息 + // Download request context information AsyncDownloadContext* downloadCtx_; - // clone core对象 + // Clone core object std::shared_ptr cloneCore_; - // read chunk请求对象 + // Read chunk request object std::shared_ptr readRequest_; - // DownloadClosure生命周期结束后需要执行的回调 + // Callbacks to be executed after the end of the DownloadClosure lifecycle Closure* done_; }; class CloneClosure : public Closure { public: - CloneClosure() : request_(nullptr) - , response_(nullptr) - , userResponse_(nullptr) - , done_(nullptr) {} + CloneClosure() + : request_(nullptr), + response_(nullptr), + userResponse_(nullptr), + done_(nullptr) {} void Run(); - void SetClosure(Closure *done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } void SetRequest(Message* request) { - request_ = dynamic_cast(request); + request_ = dynamic_cast(request); } void SetResponse(Message* response) { - response_ = dynamic_cast(response); + response_ = dynamic_cast(response); } void SetUserResponse(Message* response) { - userResponse_ = dynamic_cast(response); + userResponse_ = dynamic_cast(response); } private: - // paste chunk的请求结构体 - ChunkRequest *request_; - // paste chunk的响应结构体 - ChunkResponse *response_; - // 真正要返回给用户的响应结构体 - ChunkResponse *userResponse_; - // CloneClosure生命周期结束后需要执行的回调 - Closure *done_; + // Request structure for paste chunk + ChunkRequest* request_; + // Response structure of paste chunk + ChunkResponse* response_; + // The response structure that truly needs to be returned to the user + ChunkResponse* userResponse_; + // Callbacks to be executed after the end of the CloneClosure lifecycle + Closure* done_; }; class CloneCore : public std::enable_shared_from_this { friend class DownloadClosure; + public: CloneCore(uint32_t sliceSize, bool enablePaste, std::shared_ptr copyer) - : sliceSize_(sliceSize) - , enablePaste_(enablePaste) - , copyer_(copyer) {} + : sliceSize_(sliceSize), enablePaste_(enablePaste), copyer_(copyer) {} virtual ~CloneCore() {} /** - * 处理读请求的逻辑 - * @param readRequest[in]:读请求信息 - * @param done[in]:任务完成后要执行的closure - * @return: 成功返回0,失败返回-1 + * Logic for processing read requests + * @param readRequest[in]: Read request information + * @param done[in]: The closure to be executed after the task is completed + * @return: Success returns 0, failure returns -1 */ int HandleReadRequest(std::shared_ptr readRequest, Closure* done); protected: /** - * 本地chunk文件存在情况下,按照本地记录的clone和bitmap信息进行数据读取 - * 会涉及读取远程文件结合本地文件进行merge返回结果 - * @param[in/out] readRequest: 用户请求&响应上下文 - * @param[in] chunkInfo: 对应本地的chunkinfo - * @return 成功返回0,失败返回负数 + * When a local chunk file exists, read data based on the locally recorded + * clone and bitmap information Will involve reading remote files and + * merging with local files to return results + * @param[in/out] readRequest: User Request&Response Context + * @param[in] chunkInfo: corresponds to the local chunkinfo + * @return Success returns 0, failure returns a negative number */ int CloneReadByLocalInfo(std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done); + const CSChunkInfo& chunkInfo, Closure* done); /** - * 本地chunk文件不存在情况下,按照用户请求上下文中带的clonesource信息进行数据读取 - * 不涉及merge本地结果 - * @param[in/out] readRequest: 用户请求&响应上下文 + * When the local chunk file does not exist, read the data according to the + * clonesource information in the user request context Not involving merge + * local results + * @param[in/out] readRequest: User Request&Response Context */ void CloneReadByRequestInfo(std::shared_ptr readRequest, - Closure* done); + Closure* done); /** - * 从本地chunk中读取请求的区域,然后设置response - * @param readRequest: 用户的ReadRequest - * @return: 成功返回0,失败返回-1 + * Read the requested area from the local chunk and set the response + * @param readRequest: User's ReadRequest + * @return: Success returns 0, failure returns -1 */ int ReadChunk(std::shared_ptr readRequest); /** - * 设置read chunk类型的response,包括返回的数据和其他返回参数 - * 从本地chunk中读取已被写过的区域,未写过的区域从克隆下来的数据中获取 - * 然后将数据在内存中merge - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端拷贝下来的数据,数据起始偏移同请求中的偏移 - * @return: 成功返回0,失败返回-1 + * Set the response of the read chunk type, including the returned data and + * other return parameters Read the written area from the local chunk, and + * obtain the unwritten area from the cloned data Then merge the data into + * memory + * @param readRequest: User's ReadRequest + * @param cloneData: The data copied from the source has the same starting + * offset as the offset in the request + * @return: Success returns 0, failure returns -1 */ int SetReadChunkResponse(std::shared_ptr readRequest, const butil::IOBuf* cloneData); - // 从本地chunk中读取已经写过的区域合并到clone data中 + // Read the previously written regions from the local chunk and merge them + // into clone data int ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData); + const butil::IOBuf* cloneData, char* chunkData); /** - * 将从源端下载下来的数据paste到本地chunk文件中 - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端下载的数据 - * @param offset: 下载的数据在chunk文件中的偏移 - * @param cloneDataSize: 下载的数据长度 - * @param done:任务完成后要执行的closure + * Paste the downloaded data from the source into the local chunk file + * @param readRequest: User's ReadRequest + * @param cloneData: Data downloaded from the source + * @param offset: The offset of the downloaded data in the chunk file + * @param cloneDataSize: Download data length + * @param done: The closure to be executed after the task is completed */ void PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done); + const butil::IOBuf* cloneData, off_t offset, + size_t cloneDataSize, Closure* done); inline void SetResponse(std::shared_ptr readRequest, CHUNK_OP_STATUS status); private: - // 每次拷贝的slice的大小 + // The size of each copied slice uint32_t sliceSize_; - // 判断read chunk类型的请求是否需要paste, true需要paste,false表示不需要 + // Determine whether a read chunk type request requires paste. True requires + // paste, while false indicates no need bool enablePaste_; - // 负责从源端下载数据 + // Responsible for downloading data from the source std::shared_ptr copyer_; }; diff --git a/src/chunkserver/clone_manager.cpp b/src/chunkserver/clone_manager.cpp index 6fc428bdba..c41d844500 100644 --- a/src/chunkserver/clone_manager.cpp +++ b/src/chunkserver/clone_manager.cpp @@ -28,8 +28,7 @@ namespace chunkserver { CloneManager::CloneManager() : isRunning_(false) {} CloneManager::~CloneManager() { - if (isRunning_.load(std::memory_order_acquire)) - Fini(); + if (isRunning_.load(std::memory_order_acquire)) Fini(); } int CloneManager::Init(const CloneOptions& options) { @@ -38,9 +37,8 @@ int CloneManager::Init(const CloneOptions& options) { } int CloneManager::Run() { - if (isRunning_.load(std::memory_order_acquire)) - return 0; - // 启动线程池 + if (isRunning_.load(std::memory_order_acquire)) return 0; + // Start Thread Pool LOG(INFO) << "Begin to run clone manager."; tp_ = std::make_shared>(); int ret = tp_->Start(options_.threadNum, options_.queueCapacity); @@ -56,8 +54,7 @@ int CloneManager::Run() { } int CloneManager::Fini() { - if (!isRunning_.load(std::memory_order_acquire)) - return 0; + if (!isRunning_.load(std::memory_order_acquire)) return 0; LOG(INFO) << "Begin to stop clone manager."; isRunning_.store(false, std::memory_order_release); @@ -69,10 +66,9 @@ int CloneManager::Fini() { std::shared_ptr CloneManager::GenerateCloneTask( std::shared_ptr request, - ::google::protobuf::Closure *done) { - // 如果core是空的,任务无法被处理,所以返回空 - if (options_.core == nullptr) - return nullptr; + ::google::protobuf::Closure* done) { + // If the core is empty, the task cannot be processed, so it returns empty + if (options_.core == nullptr) return nullptr; std::shared_ptr cloneTask = std::make_shared(request, options_.core, done); @@ -80,11 +76,9 @@ std::shared_ptr CloneManager::GenerateCloneTask( } bool CloneManager::IssueCloneTask(std::shared_ptr cloneTask) { - if (!isRunning_.load(std::memory_order_acquire)) - return false; + if (!isRunning_.load(std::memory_order_acquire)) return false; - if (cloneTask == nullptr) - return false; + if (cloneTask == nullptr) return false; tp_->Enqueue(cloneTask->Closure()); diff --git a/src/chunkserver/clone_manager.h b/src/chunkserver/clone_manager.h index 01f7088218..96e489d5c1 100644 --- a/src/chunkserver/clone_manager.h +++ b/src/chunkserver/clone_manager.h @@ -25,16 +25,17 @@ #include #include -#include // NOLINT -#include // NOLINT + #include -#include +#include // NOLINT #include +#include // NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/clone_task.h" #include "src/chunkserver/clone_core.h" +#include "src/chunkserver/clone_task.h" +#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace chunkserver { @@ -44,18 +45,16 @@ using curve::common::TaskThreadPool; class ReadChunkRequest; struct CloneOptions { - // 核心逻辑处理类 + // Core logic processing class std::shared_ptr core; - // 最大线程数 + // Maximum number of threads uint32_t threadNum; - // 最大队列深度 + // Maximum queue depth uint32_t queueCapacity; - // 任务状态检查的周期,单位ms + // The cycle of task status check, in ms uint32_t checkPeriod; - CloneOptions() : core(nullptr) - , threadNum(10) - , queueCapacity(100) - , checkPeriod(5000) {} + CloneOptions() + : core(nullptr), threadNum(10), queueCapacity(100), checkPeriod(5000) {} }; class CloneManager { @@ -64,49 +63,51 @@ class CloneManager { virtual ~CloneManager(); /** - * 初始化 + * Initialize * - * @param options[in]:初始化参数 - * @return 错误码 + * @param options[in]: initialization parameters + * @return error code */ virtual int Init(const CloneOptions& options); /** - * 启动所有线程 + * Start all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 停止所有线程 + * Stop all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 生成克隆任务 - * @param request[in]:请求信息 - * @return:返回生成的克隆任务,如果生成失败,返回nullptr + * Generate Clone Task + * @param request[in]: Request information + * @return: Returns the generated clone task. If the generation fails, + * returns nullptr */ virtual std::shared_ptr GenerateCloneTask( std::shared_ptr request, ::google::protobuf::Closure* done); /** - * 发布克隆任务,产生克隆任务放到线程池中处理 - * @param task[in]:克隆任务 - * @return 成功返回true,失败返回false + * Publish clone tasks, generate clone tasks, and place them in the thread + * pool for processing + * @param task[in]: Clone task + * @return returns true for success, false for failure */ virtual bool IssueCloneTask(std::shared_ptr cloneTask); private: - // 克隆任务管理相关的选项,调Init的时候初始化 + // Clone task management related options, initialization when calling Init CloneOptions options_; - // 处理克隆任务的异步线程池 + // Asynchronous thread pool for processing cloning tasks std::shared_ptr> tp_; - // 当前线程池是否处于工作状态 + // Is the current thread pool in working state std::atomic isRunning_; }; diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..cd55f0b439 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -25,37 +25,33 @@ #include #include + #include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::Uncopyable; -class CloneTask : public Uncopyable - , public std::enable_shared_from_this{ +class CloneTask : public Uncopyable, + public std::enable_shared_from_this { public: CloneTask(std::shared_ptr request, std::shared_ptr core, ::google::protobuf::Closure* done) - : core_(core) - , readRequest_(request) - , done_(done) - , isComplete_(false) {} + : core_(core), readRequest_(request), done_(done), isComplete_(false) {} virtual ~CloneTask() {} virtual std::function Closure() { auto sharedThis = shared_from_this(); - return [sharedThis] () { - sharedThis->Run(); - }; + return [sharedThis]() { sharedThis->Run(); }; } virtual void Run() { @@ -65,18 +61,16 @@ class CloneTask : public Uncopyable isComplete_ = true; } - virtual bool IsComplete() { - return isComplete_; - } + virtual bool IsComplete() { return isComplete_; } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 6a39c6ce3e..aa8fa0077c 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -22,20 +22,20 @@ #include "src/chunkserver/conf_epoch_file.h" -#include #include +#include #include "src/common/crc32.h" namespace curve { namespace chunkserver { -// conf.epoch文件最大长度 +// Maximum length of conf.epoch file const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; -int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, - CopysetID *copysetID, uint64_t *epoch) { +int ConfEpochFile::Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch) { int fd = fs_->Open(path.c_str(), O_RDWR); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -47,7 +47,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, char json[kConfEpochFileMaxSize] = {0}; int size = 0; - // 1. read数据 + // 1. Read data size = fs_->Read(fd, json, 0, kConfEpochFileMaxSize); if (size <= 0) { LOG(ERROR) << "LoadConfEpoch read failed: " << path @@ -58,7 +58,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, } fs_->Close(fd); - // 2.反序列化 + // 2. Deserialization ConfEpoch confEpoch; std::string jsonStr(json); std::string err; @@ -71,7 +71,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return -1; } - // 3. 验证crc + // 3. Verify CRC uint32_t crc32c = ConfEpochCrc(confEpoch); if (crc32c != confEpoch.checksum()) { LOG(ERROR) << "conf epoch crc error: " << jsonStr; @@ -89,15 +89,15 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return 0; } -int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, +int ConfEpochFile::Save(const std::string& path, const LogicPoolID logicPoolID, const CopysetID copysetID, const uint64_t epoch) { - // 1. 转换成conf message + // 1. Convert to conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); confEpoch.set_copysetid(copysetID); confEpoch.set_epoch(epoch); - // 计算crc + // Calculate crc uint32_t crc32c = ConfEpochCrc(confEpoch); confEpoch.set_checksum(crc32c); @@ -113,7 +113,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 2. open文件 + // 2. Open file int fd = fs_->Open(path.c_str(), O_RDWR | O_CREAT); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -122,7 +122,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 3. write文件 + // 3. Write file if (static_cast(out.size()) != fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path @@ -132,7 +132,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 4. 落盘 + // 4. Falling disc if (0 != fs_->Fsync(fd)) { LOG(ERROR) << "SaveConfEpoch sync failed, path: " << path << ", errno: " << errno @@ -145,20 +145,20 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return 0; } -uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch &confEpoch) { +uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch& confEpoch) { uint32_t crc32c = 0; uint32_t logicPoolId = confEpoch.logicpoolid(); uint32_t copysetId = confEpoch.copysetid(); uint64_t epoch = confEpoch.epoch(); uint64_t magic = kConfEpochFileMagic; - crc32c = curve::common::CRC32( - crc32c, reinterpret_cast(&logicPoolId), sizeof(logicPoolId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&logicPoolId), + sizeof(logicPoolId)); + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), sizeof(copysetId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), sizeof(epoch)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), sizeof(magic)); return crc32c; diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 91ee27ec6b..4d2513fc2b 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -23,13 +23,13 @@ #ifndef SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ #define SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/fs/fs_common.h" #include "include/chunkserver/chunkserver_common.h" #include "proto/copyset.pb.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { @@ -38,47 +38,44 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; /** - * 配置版本序列化和反序列化的工具类 - * TODO(wudemiao): 后期替换采用json编码 + * Tool classes for configuring version serialization and deserialization + * TODO(wudemiao): Post replacement using JSON encoding */ class ConfEpochFile { public: - explicit ConfEpochFile(std::shared_ptr fs) - : fs_(fs) {} + explicit ConfEpochFile(std::shared_ptr fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful- 1 failed */ - int Load(const std::string &path, - LogicPoolID *logicPoolID, - CopysetID *copysetID, - uint64_t *epoch); + int Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + *file. The format is as follows: The 'head' indicates the length and is in + *binary format. The rest is in text format for easy viewing when necessary. + *'sync' ensures data persistence. | head | + *Configuration version information | | 8 bytes size_t | uint32_t | + *Variable length text | | length | crc32 | logic pool id + *| copyset id | epoch| The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded- 1 failed */ - int Save(const std::string &path, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const uint64_t epoch); + int Save(const std::string& path, const LogicPoolID logicPoolID, + const CopysetID copysetID, const uint64_t epoch); private: - static uint32_t ConfEpochCrc(const ConfEpoch &confEpoch); + static uint32_t ConfEpochCrc(const ConfEpoch& confEpoch); std::shared_ptr fs_; }; diff --git a/src/chunkserver/config_info.h b/src/chunkserver/config_info.h index 67c3f57524..c00809413f 100644 --- a/src/chunkserver/config_info.h +++ b/src/chunkserver/config_info.h @@ -23,33 +23,34 @@ #ifndef SRC_CHUNKSERVER_CONFIG_INFO_H_ #define SRC_CHUNKSERVER_CONFIG_INFO_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/inflight_throttle.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/trash.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { -using curve::fs::LocalFileSystem; using curve::chunkserver::concurrent::ConcurrentApplyModule; +using curve::fs::LocalFileSystem; class FilePool; class CopysetNodeManager; class CloneManager; /** - * copyset node的配置选项 + * Configuration options for copyset node */ struct CopysetNodeOptions { - // follower to candidate 超时时间,单位ms,默认是1000ms + // follower to candidate timeout, in ms, defaults to 1000ms int electionTimeoutMs; - // 定期打快照的时间间隔,默认3600s,也就是1小时 + // The time interval for taking regular snapshots is 3600s by default, which + // is 1 hour int snapshotIntervalS; // If true, read requests will be invoked in current lease leader node. @@ -57,79 +58,86 @@ struct CopysetNodeOptions { // Default: true bool enbaleLeaseRead; - // 如果follower和leader日志相差超过catchupMargin, - // 就会执行install snapshot进行恢复,默认: 1000 + // If the difference between the follower and leader logs exceeds + // catchupMargin, Will execute install snapshot for recovery, default: 1000 int catchupMargin; - // 是否开启pthread执行用户代码,默认false + // Enable pthread to execute user code, default to false bool usercodeInPthread; - // 所有uri个格式: ${protocol}://${绝对或者相对路径} - // eg: - // posix: local - // bluestore: bluestore + // All uri formats: ${protocol}://${absolute or relative path} + // eg: + // posix: local + // bluestore: bluestore - // raft log uri, 默认raft_log + // Raft log uri, default raft_log std::string logUri; - // raft meta uri, 默认raft_meta + // Raft meta uri, default raft_meta std::string raftMetaUri; - // raft snapshot uri,默认raft_snpashot + // Raft snapshot uri, default raft_snpashot std::string raftSnapshotUri; - // chunk data uri,默认data + // Chunk data uri, default data std::string chunkDataUri; - // chunk snapshot uri,默认snapshot + // Chunk snapshot uri, default snapshot std::string chunkSnapshotUri; - // copyset data recycling uri,默认recycler + // Copyset data recycling uri, default recycler std::string recyclerUri; std::string ip; uint32_t port; - // chunk文件的大小 + // Chunk file size uint32_t maxChunkSize; // WAL segment file size uint32_t maxWalSegmentSize; - // chunk文件的page大小 + // The page size of the chunk file uint32_t metaPageSize; // alignment for I/O request uint32_t blockSize; - // clone chunk的location长度限制 + // Location length limit for clone chunks uint32_t locationLimit; - // 并发模块 - ConcurrentApplyModule *concurrentapply; - // Chunk file池子 + // Concurrent module + ConcurrentApplyModule* concurrentapply; + // Chunk file pool std::shared_ptr chunkFilePool; // WAL file pool std::shared_ptr walFilePool; - // 文件系统适配层 + // File System Adaptation Layer std::shared_ptr localFileSystem; - // 回收站, 心跳模块判断该chunkserver不在copyset配置组时, - // 通知copysetManager将copyset目录移动至回收站 - // 一段时间后实际回收物理空间 + // When the recycle bin and heartbeat module determine that the chunkserver + // is not in the copyset configuration group, Notify the copysetManager to + // move the copyset directory to the recycle bin Actual recovery of physical + // space after a period of time std::shared_ptr trash; - // snapshot流控 - scoped_refptr *snapshotThrottle; + // Snapshot flow control + scoped_refptr* snapshotThrottle; - // 限制chunkserver启动时copyset并发恢复加载的数量,为0表示不限制 + // Limit the number of copyset concurrent recovery loads during chunkserver + // startup, with a value of 0 indicating no limit uint32_t loadConcurrency = 0; // chunkserver sync_thread_pool number of threads. uint32_t syncConcurrency = 20; // copyset trigger sync timeout uint32_t syncTriggerSeconds = 25; - // 检查copyset是否加载完成出现异常时的最大重试次数 - // 可能的异常:1.当前大多数副本还没起来;2.网络问题等导致无法获取leader - // 3.其他的原因导致无法获取到leader的committed index + // Check if the copyset has completed loading and the maximum number of + // retries when an exception occurs Possible exceptions: 1. Currently, most + // replicas have not yet been restored; 2. Network issues and other issues + // preventing the acquisition of leaders + // 3. Due to other reasons, it is not possible to obtain the committed index + // of the leader uint32_t checkRetryTimes = 3; - // 当前peer的applied_index与leader上的committed_index差距小于该值 - // 则判定copyset已经加载完成 + // the difference bewteen the current peer's applied_index and leader's + // committed_index is less than this value Then it is determined that the + // copyset has been loaded successfully uint32_t finishLoadMargin = 2000; - // 循环判定copyset是否加载完成的内部睡眠时间 + // Internal sleep time for loop determination of whether copyset has been + // loaded and completed uint32_t checkLoadMarginIntervalMs = 1000; // enable O_DSYNC when open chunkfile @@ -145,11 +153,11 @@ struct CopysetNodeOptions { }; /** - * ChunkServiceManager 的依赖项 + *Dependencies for ChunkServiceManager */ struct ChunkServiceOptions { - CopysetNodeManager *copysetNodeManager; - CloneManager *cloneManager; + CopysetNodeManager* copysetNodeManager; + CloneManager* cloneManager; std::shared_ptr inflightThrottle; }; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..87e8d70135 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -22,33 +22,34 @@ #include "src/chunkserver/copyset_node.h" -#include -#include -#include #include -#include #include -#include -#include +#include +#include +#include +#include + #include #include -#include -#include -#include #include #include +#include +#include +#include +#include +#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/chunk_closure.h" -#include "src/chunkserver/op_request.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/fs/fs_common.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/crc32.h" #include "src/common/fs_util.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" namespace braft { DECLARE_bool(raft_enable_leader_lease); @@ -59,37 +60,36 @@ namespace chunkserver { using curve::fs::FileSystemInfo; -const char *kCurveConfEpochFilename = "conf.epoch"; +const char* kCurveConfEpochFilename = "conf.epoch"; uint32_t CopysetNode::syncTriggerSeconds_ = 25; -std::shared_ptr> - CopysetNode::copysetSyncPool_ = nullptr; - -CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf) : - logicPoolId_(logicPoolId), - copysetId_(copysetId), - conf_(initConf), - epoch_(0), - peerId_(), - nodeOptions_(), - raftNode_(nullptr), - chunkDataApath_(), - chunkDataRpath_(), - appliedIndex_(0), - leaderTerm_(-1), - configChange_(std::make_shared()), - lastSnapshotIndex_(0), - scaning_(false), - lastScanSec_(0), - enableOdsyncWhenOpenChunkFile_(false), - isSyncing_(false), - checkSyncingIntervalMs_(500) { -} +std::shared_ptr> CopysetNode::copysetSyncPool_ = + nullptr; + +CopysetNode::CopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& initConf) + : logicPoolId_(logicPoolId), + copysetId_(copysetId), + conf_(initConf), + epoch_(0), + peerId_(), + nodeOptions_(), + raftNode_(nullptr), + chunkDataApath_(), + chunkDataRpath_(), + appliedIndex_(0), + leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), + scaning_(false), + lastScanSec_(0), + enableOdsyncWhenOpenChunkFile_(false), + isSyncing_(false), + checkSyncingIntervalMs_(500) {} CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -98,17 +98,16 @@ CopysetNode::~CopysetNode() { delete nodeOptions_.snapshot_file_system_adaptor; nodeOptions_.snapshot_file_system_adaptor = nullptr; } - LOG(INFO) << "release copyset node: " - << GroupIdString(); + LOG(INFO) << "release copyset node: " << GroupIdString(); } -int CopysetNode::Init(const CopysetNodeOptions &options) { +int CopysetNode::Init(const CopysetNodeOptions& options) { std::string groupId = GroupId(); std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -135,12 +134,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.locationLimit = options.locationLimit; dsOptions.enableOdsyncWhenOpenChunkFile = options.enableOdsyncWhenOpenChunkFile; - dataStore_ = std::make_shared(options.localFileSystem, - options.chunkFilePool, - dsOptions); + dataStore_ = std::make_shared( + options.localFileSystem, options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -150,10 +148,10 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { syncThread_.Init(this); dataStore_->SetCacheCondPtr(syncThread_.cond_); dataStore_->SetCacheLimits(options.syncChunkLimit, - options.syncThreshold); + options.syncThreshold); LOG(INFO) << "init sync thread success limit = " - << options.syncChunkLimit << - "syncthreshold = " << options.syncThreshold; + << options.syncChunkLimit + << "syncthreshold = " << options.syncThreshold; } recyclerUri_ = options.recyclerUri; @@ -166,21 +164,21 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have + * multiple copies of the same copyset, Pay attention to this point and not + * distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); concurrentapply_ = options.concurrentapply; - /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -189,10 +187,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { << "Copyset: " << GroupIdString(); return -1; } - metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( - logicPoolId_, copysetId_); + metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric(logicPoolId_, + copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in + // the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +212,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,19 +236,20 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the + // WriteChunk operation may result in errors concurrentapply_->Flush(); } } -void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { +void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions& options) { auto groupId = GroupId(); nodeOptions_.initial_conf = conf_; nodeOptions_.election_timeout_ms = options.electionTimeoutMs; @@ -257,20 +257,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { nodeOptions_.node_owns_fsm = false; nodeOptions_.snapshot_interval_s = options.snapshotIntervalS; nodeOptions_.log_uri = options.logUri; - nodeOptions_.log_uri.append("/").append(groupId) - .append("/").append(RAFT_LOG_DIR); + nodeOptions_.log_uri.append("/").append(groupId).append("/").append( + RAFT_LOG_DIR); nodeOptions_.raft_meta_uri = options.raftMetaUri; - nodeOptions_.raft_meta_uri.append("/").append(groupId) - .append("/").append(RAFT_META_DIR); + nodeOptions_.raft_meta_uri.append("/").append(groupId).append("/").append( + RAFT_META_DIR); nodeOptions_.snapshot_uri = options.raftSnapshotUri; - nodeOptions_.snapshot_uri.append("/").append(groupId) - .append("/").append(RAFT_SNAP_DIR); + nodeOptions_.snapshot_uri.append("/").append(groupId).append("/").append( + RAFT_SNAP_DIR); nodeOptions_.usercode_in_pthread = options.usercodeInPthread; nodeOptions_.snapshot_throttle = options.snapshotThrottle; - CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkFilePool, - options.localFileSystem); + CurveFilesystemAdaptor* cfa = new CurveFilesystemAdaptor( + options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(kCurveConfEpochFilename); @@ -282,47 +281,52 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { new scoped_refptr(cfa); } -void CopysetNode::on_apply(::braft::Iterator &iter) { +void CopysetNode::on_apply(::braft::Iterator& iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of + // the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which + * includes All Contextual ChunkOpRequest for Op */ - braft::Closure *closure = iter.done(); + braft::Closure* closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node + * is normal and Op is directly obtained from memory Apply in + * context */ - ChunkClosure - *chunkClosure = dynamic_cast(iter.done()); + ChunkClosure* chunkClosure = + dynamic_cast(iter.done()); CHECK(nullptr != chunkClosure) << "ChunkClosure dynamic cast failed"; std::shared_ptr& opRequest = chunkClosure->request_; - concurrentapply_->Push(opRequest->ChunkId(), ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT - &ChunkOpRequest::OnApply, opRequest, - iter.index(), doneGuard.release()); + concurrentapply_->Push( + opRequest->ChunkId(), + ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT + &ChunkOpRequest::OnApply, opRequest, iter.index(), + doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply - * 2.2. follower apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op + * log entry will be deserialized, Then obtain Op information for + * application 2.2. follower apply */ ChunkRequest request; butil::IOBuf data; auto opReq = ChunkOpRequest::Decode(log, &request, &data, iter.index(), GetLeaderId()); auto chunkId = request.chunkid(); - concurrentapply_->Push(chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT - &ChunkOpRequest::OnApplyFromLog, opReq, - dataStore_, std::move(request), data); + concurrentapply_->Push( + chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT + &ChunkOpRequest::OnApplyFromLog, opReq, dataStore_, + std::move(request), data); } } } @@ -331,11 +335,11 @@ void CopysetNode::on_shutdown() { LOG(INFO) << GroupIdString() << " is shutdown"; } -void CopysetNode::on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { snapshotFuture_ = - std::async(std::launch::async, - &CopysetNode::save_snapshot_background, this, writer, done); + std::async(std::launch::async, &CopysetNode::save_snapshot_background, + this, writer, done); } void CopysetNode::WaitSnapshotDone() { @@ -345,12 +349,12 @@ void CopysetNode::WaitSnapshotDone() { } } -void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,37 +363,41 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that + * conf.epoch is stored in the data directory */ - std::string - filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; + std::string filePathTemp = + writer->get_path() + "/" + kCurveConfEpochFilename; if (0 != SaveConfEpoch(filePathTemp)) { done->status().set_error(errno, "invalid: %s", strerror(errno)); LOG(ERROR) << "SaveConfEpoch failed. " - << "Copyset: " << GroupIdString() - << ", errno: " << errno << ", " + << "Copyset: " << GroupIdString() << ", errno: " << errno + << ", " << ", error message: " << strerror(errno); return; } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the + // list of snapshot files in the meta information When raft + // downloads a snapshot, after downloading the chunk, a separate + // snapshot list will be obtained bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through + // absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); - std::string filePath = curve::common::CalcRelativePath( - writer->get_path(), chunkApath); + std::string filePath = + curve::common::CalcRelativePath(writer->get_path(), chunkApath); writer->add_file(filePath); } } else { @@ -401,16 +409,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ - writer->add_file(kCurveConfEpochFilename); + writer->add_file(kCurveConfEpochFilename); } -int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { +int CopysetNode::on_snapshot_load(::braft::SnapshotReader* reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,15 +427,19 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section + // does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 - bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - delete_file(chunkDataApath_, true); + // Before loading snapshot data, clean the files in the copyset data + // directory first Otherwise, it may result in some residual data after + // the snapshot is loaded If delete_file or rename fails, the current + // node status will be set to ERROR If delete_file or during the rename + // the process restarts, and after copyset is set, the snapshot will be + // loaded Since rename ensures atomicity, after loading the snapshot, + // the data directory must be restored + bool ret = + nodeOptions_.snapshot_file_system_adaptor->get()->delete_file( + chunkDataApath_, true); if (!ret) { LOG(ERROR) << "delete chunk data dir failed. " << "Copyset: " << GroupIdString() @@ -437,8 +449,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { LOG(INFO) << "delete chunk data dir success. " << "Copyset: " << GroupIdString() << ", path: " << chunkDataApath_; - ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - rename(snapshotChunkDataDir, chunkDataApath_); + ret = nodeOptions_.snapshot_file_system_adaptor->get()->rename( + snapshotChunkDataDir, chunkDataApath_); if (!ret) { LOG(ERROR) << "rename snapshot data dir " << snapshotChunkDataDir << "to chunk data dir " << chunkDataApath_ << " failed. " @@ -449,13 +461,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { << "to chunk data dir " << chunkDataApath_ << " success. " << "Copyset: " << GroupIdString(); } else { - LOG(INFO) << "load snapshot data path: " - << snapshotChunkDataDir << " not exist. " + LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir + << " not exist. " << "Copyset: " << GroupIdString(); } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +480,25 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, + * the data store may return "chunk not exist." This is because the newly + * added peer initially has no data, and when the data store is initialized, + * it is not aware of the data that the new peer receives after the leader + * recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot + * operation, it is performed through a rename operation. If a file was + * previously open in the data store, the rename operation can succeed, but + * the old file can only be deleted after the data store closes it. + * Therefore, it is necessary to reinitialize the data store, close the + * file's file descriptor (fd), and then reopen the new file. Otherwise, the + * data store will continue to operate on the old file. Once the data store + * closes, the corresponding fd, any subsequent write operations will be + * lost. Additionally, if the datastore is not reinitialized and the new + * file is not reopened, it may result in reading the old data rather than + * the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +507,9 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that + * there is no need for on_configuration_committed. It should be noted that + * the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -510,7 +528,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * @@ -536,7 +554,7 @@ void CopysetNode::on_leader_start(int64_t term) { << " become leader, term is: " << leaderTerm_; } -void CopysetNode::on_leader_stop(const butil::Status &status) { +void CopysetNode::on_leader_stop(const butil::Status& status) { (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); @@ -544,7 +562,7 @@ void CopysetNode::on_leader_stop(const butil::Status &status) { << ", peer id: " << peerId_.to_string() << " stepped down"; } -void CopysetNode::on_error(const ::braft::Error &e) { +void CopysetNode::on_error(const ::braft::Error& e) { LOG(FATAL) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " meet raft error: " << e; @@ -556,7 +574,7 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, // Loading snapshot should not increase epoch. When loading // snapshot, the index is equal with lastSnapshotIndex_. LOG(INFO) << "index: " << index - << ", lastSnapshotIndex_: " << lastSnapshotIndex_; + << ", lastSnapshotIndex_: " << lastSnapshotIndex_; if (index != lastSnapshotIndex_) { std::unique_lock lock_guard(confLock_); conf_ = conf; @@ -569,63 +587,47 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, << ", epoch: " << epoch_.load(std::memory_order_acquire); } -void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << " stops following" << ctx; + << ", peer id: " << peerId_.to_string() << " stops following" + << ctx; } -void CopysetNode::on_start_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_start_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << "start following" << ctx; + << ", peer id: " << peerId_.to_string() << "start following" + << ctx; } -LogicPoolID CopysetNode::GetLogicPoolId() const { - return logicPoolId_; -} +LogicPoolID CopysetNode::GetLogicPoolId() const { return logicPoolId_; } -CopysetID CopysetNode::GetCopysetId() const { - return copysetId_; -} +CopysetID CopysetNode::GetCopysetId() const { return copysetId_; } -void CopysetNode::SetScan(bool scan) { - scaning_ = scan; -} +void CopysetNode::SetScan(bool scan) { scaning_ = scan; } -bool CopysetNode::GetScan() const { - return scaning_; -} +bool CopysetNode::GetScan() const { return scaning_; } -void CopysetNode::SetLastScan(uint64_t time) { - lastScanSec_ = time; -} +void CopysetNode::SetLastScan(uint64_t time) { lastScanSec_ = time; } -uint64_t CopysetNode::GetLastScan() const { - return lastScanSec_; -} +uint64_t CopysetNode::GetLastScan() const { return lastScanSec_; } std::vector& CopysetNode::GetFailedScanMap() { return failedScanMaps_; } -std::string CopysetNode::GetCopysetDir() const { - return copysetDirPath_; -} +std::string CopysetNode::GetCopysetDir() const { return copysetDirPath_; } uint64_t CopysetNode::GetConfEpoch() const { std::lock_guard lockguard(confLock_); return epoch_.load(std::memory_order_relaxed); } -int CopysetNode::LoadConfEpoch(const std::string &filePath) { +int CopysetNode::LoadConfEpoch(const std::string& filePath) { LogicPoolID loadLogicPoolID = 0; CopysetID loadCopysetID = 0; uint64_t loadEpoch = 0; - int ret = epochFile_->Load(filePath, - &loadLogicPoolID, - &loadCopysetID, + int ret = epochFile_->Load(filePath, &loadLogicPoolID, &loadCopysetID, &loadEpoch); if (0 == ret) { if (logicPoolId_ != loadLogicPoolID || copysetId_ != loadCopysetID) { @@ -643,7 +645,7 @@ int CopysetNode::LoadConfEpoch(const std::string &filePath) { return ret; } -int CopysetNode::SaveConfEpoch(const std::string &filePath) { +int CopysetNode::SaveConfEpoch(const std::string& filePath) { return epochFile_->Save(filePath, logicPoolId_, copysetId_, epoch_); } @@ -678,17 +680,17 @@ void CopysetNode::SetCopysetNode(std::shared_ptr node) { raftNode_ = node; } -void CopysetNode::SetSnapshotFileSystem(scoped_refptr *fs) { +void CopysetNode::SetSnapshotFileSystem(scoped_refptr* fs) { nodeOptions_.snapshot_file_system_adaptor = fs; } bool CopysetNode::IsLeaderTerm() const { - if (0 < leaderTerm_.load(std::memory_order_acquire)) - return true; + if (0 < leaderTerm_.load(std::memory_order_acquire)) return true; return false; } -bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT /* * Why not use lease_status.state==LEASE_VALID directly to judge? * @@ -707,13 +709,12 @@ bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) co return term > 0 && term == lease_status.term; } -bool CopysetNode::IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT return lease_status.state == braft::LEASE_EXPIRED; } -PeerId CopysetNode::GetLeaderId() const { - return raftNode_->leader_id(); -} +PeerId CopysetNode::GetLeaderId() const { return raftNode_->leader_id(); } butil::Status CopysetNode::TransferLeader(const Peer& peer) { butil::Status status; @@ -722,15 +723,15 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { if (raftNode_->leader_id() == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << "Skipped transferring leader to leader itself. " - << "peerid: " << peerId - << ", Copyset: " << GroupIdString(); + << "peerid: " << peerId << ", Copyset: " << GroupIdString(); return status; } int rc = raftNode_->transfer_leadership_to(peerId); if (rc != 0) { - status = butil::Status(rc, "Failed to transfer leader of copyset " + status = butil::Status(rc, + "Failed to transfer leader of copyset " "%s to peer %s, error: %s", GroupIdString().c_str(), peerId.to_string().c_str(), berror(rc)); @@ -741,9 +742,8 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { transferee_ = peer; status = butil::Status::OK(); - LOG(INFO) << "Transferred leader of copyset " - << GroupIdString() - << " to peer " << peerId; + LOG(INFO) << "Transferred leader of copyset " << GroupIdString() + << " to peer " << peerId; return status; } @@ -761,14 +761,13 @@ butil::Status CopysetNode::AddPeer(const Peer& peer) { if (peer == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << peerId << " is already a member of copyset " - << GroupIdString() - << ", skip adding peer"; + << GroupIdString() << ", skip adding peer"; return status; } } ConfigurationChangeDone* addPeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::ADD_PEER, peer); addPeerDone->expectedCfgChange = expectedCfgChange; raftNode_->add_peer(peerId, addPeerDone); @@ -797,13 +796,13 @@ butil::Status CopysetNode::RemovePeer(const Peer& peer) { if (!peerValid) { butil::Status status = butil::Status::OK(); - DVLOG(6) << peerId << " is not a member of copyset " - << GroupIdString() << ", skip removing"; + DVLOG(6) << peerId << " is not a member of copyset " << GroupIdString() + << ", skip removing"; return status; } ConfigurationChangeDone* removePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::REMOVE_PEER, peer); removePeerDone->expectedCfgChange = expectedCfgChange; raftNode_->remove_peer(peerId, removePeerDone); @@ -831,7 +830,7 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { return st; } ConfigurationChangeDone* changePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange; expectedCfgChange.type = ConfigChangeType::CHANGE_PEER; expectedCfgChange.alterPeer.set_address(adding.begin()->to_string()); @@ -845,18 +844,22 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it + * is equal, it means that no one has modified appliedindex. In this + * case, it tries to update appliedIndex with the value of index, and if + * the update is successful, it's done. If curIndex is not equal to + * appliedindex, it indicates that someone else has updated appliedIndex + * in the meantime. In this case, it returns the current value of + * appliedindex through curIndex and returns false. This entire process + * is atomic. */ - while (!appliedIndex_.compare_exchange_strong(curIndex, - index, - std::memory_order_acq_rel)) { //NOLINT + while (!appliedIndex_.compare_exchange_strong( + curIndex, index, + std::memory_order_acq_rel)) { // NOLINT if (index <= curIndex) { break; } @@ -876,27 +879,29 @@ CurveSegmentLogStorage* CopysetNode::GetLogStorage() const { return logStorage_; } -ConcurrentApplyModule *CopysetNode::GetConcurrentApplyModule() const { +ConcurrentApplyModule* CopysetNode::GetConcurrentApplyModule() const { return concurrentapply_; } -void CopysetNode::Propose(const braft::Task &task) { - raftNode_->apply(task); -} +void CopysetNode::Propose(const braft::Task& task) { raftNode_->apply(task); } -int CopysetNode::GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer) { +int CopysetNode::GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the + * following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, + * and A is the leader. A receives a configuration change log that adds D, + * and assume that B also receives the configuration change log {ABC+D}. + * Then, leader A crashes, and B is elected as the new leader. Before B + * commits the noop entry, the maximum epoch value it can query on B is + * still 5, but the queried configuration is {ABCD}. Therefore, here, before + * the new leader B commits the noop entry, which is effectively committing + * the hidden configuration change log {ABC+D}, it does not allow returning + * the configuration and configuration change information to the user to + * avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -922,9 +927,9 @@ uint64_t CopysetNode::LeaderTerm() const { return leaderTerm_.load(std::memory_order_acquire); } -int CopysetNode::GetHash(std::string *hash) { +int CopysetNode::GetHash(std::string* hash) { int ret = 0; - int fd = 0; + int fd = 0; int len = 0; uint32_t crc32c = 0; std::vector files; @@ -934,7 +939,8 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of + // calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { @@ -953,7 +959,7 @@ int CopysetNode::GetHash(std::string *hash) { } len = fileInfo.st_size; - char *buff = new (std::nothrow) char[len]; + char* buff = new (std::nothrow) char[len]; if (nullptr == buff) { return -1; } @@ -974,15 +980,15 @@ int CopysetNode::GetHash(std::string *hash) { return 0; } -void CopysetNode::GetStatus(NodeStatus *status) { +void CopysetNode::GetStatus(NodeStatus* status) { raftNode_->get_status(status); } -void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status) { +void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status) { raftNode_->get_leader_lease_status(status); } -bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { +bool CopysetNode::GetLeaderStatus(NodeStatus* leaderStaus) { NodeStatus status; GetStatus(&status); if (status.leader_id.is_empty()) { @@ -997,16 +1003,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { brpc::Controller cntl; cntl.set_timeout_ms(500); brpc::Channel channel; - if (channel.Init(status.leader_id.addr, nullptr) !=0) { - LOG(WARNING) << "can not create channel to " - << status.leader_id.addr + if (channel.Init(status.leader_id.addr, nullptr) != 0) { + LOG(WARNING) << "can not create channel to " << status.leader_id.addr << ", copyset " << GroupIdString(); return false; } CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(status.leader_id.to_string()); request.set_logicpoolid(logicPoolId_); request.set_copysetid(copysetId_); @@ -1016,16 +1021,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { CopysetService_Stub stub(&channel); stub.GetCopysetStatus(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(WARNING) << "get leader status failed: " - << cntl.ErrorText() + LOG(WARNING) << "get leader status failed: " << cntl.ErrorText() << ", copyset " << GroupIdString(); return false; } if (response.status() != COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { LOG(WARNING) << "get leader status failed" - << ", status: " << response.status() - << ", copyset " << GroupIdString(); + << ", status: " << response.status() << ", copyset " + << GroupIdString(); return false; } @@ -1078,9 +1082,8 @@ void CopysetNode::SyncAllChunks() { CSErrorCode r = dataStore_->SyncChunk(chunk); if (r != CSErrorCode::Success) { LOG(FATAL) << "Sync Chunk failed in Copyset: " - << GroupIdString() - << ", chunkid: " << chunk - << " data store return: " << r; + << GroupIdString() << ", chunkid: " << chunk + << " data store return: " << r; } }); } @@ -1093,11 +1096,11 @@ void SyncChunkThread::Init(CopysetNode* node) { } void SyncChunkThread::Run() { - syncThread_ = std::thread([this](){ + syncThread_ = std::thread([this]() { while (running_) { std::unique_lock lock(mtx_); - cond_->wait_for(lock, - std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); + cond_->wait_for( + lock, std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); node_->SyncAllChunks(); } }); @@ -1111,9 +1114,7 @@ void SyncChunkThread::Stop() { } } -SyncChunkThread::~SyncChunkThread() { - Stop(); -} +SyncChunkThread::~SyncChunkThread() { Stop(); } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/copyset_node.h b/src/chunkserver/copyset_node.h index cf7a34aeec..74033cbc80 100755 --- a/src/chunkserver/copyset_node.h +++ b/src/chunkserver/copyset_node.h @@ -23,53 +23,53 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_H_ -#include #include #include +#include +#include #include +#include +#include #include #include -#include -#include -#include +#include "proto/chunk.pb.h" +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/scan.pb.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "src/chunkserver/conf_epoch_file.h" #include "src/chunkserver/config_info.h" -#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/raft_node.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_writer.h" -#include "src/common/string_util.h" +#include "src/chunkserver/raftsnapshot/define.h" #include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/raft_node.h" -#include "proto/heartbeat.pb.h" -#include "proto/chunk.pb.h" -#include "proto/common.pb.h" -#include "proto/scan.pb.h" +#include "src/common/string_util.h" namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; -using ::google::protobuf::Closure; -using ::curve::mds::heartbeat::ConfigChangeType; using ::curve::common::Peer; using ::curve::common::TaskThreadPool; +using ::curve::mds::heartbeat::ConfigChangeType; +using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; -extern const char *kCurveConfEpochFilename; +extern const char* kCurveConfEpochFilename; struct ConfigurationChange { ConfigChangeType type; Peer alterPeer; ConfigurationChange() : type(ConfigChangeType::NONE) {} - ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) : - type(type2), alterPeer(alterPeer2) {} + ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) + : type(type2), alterPeer(alterPeer2) {} bool IsEmpty() { return type == ConfigChangeType::NONE && !alterPeer.has_address(); } @@ -79,7 +79,7 @@ struct ConfigurationChange { } bool operator==(const ConfigurationChange& rhs) { return type == rhs.type && - alterPeer.address() == rhs.alterPeer.address(); + alterPeer.address() == rhs.alterPeer.address(); } ConfigurationChange& operator=(const ConfigurationChange& rhs) { type = rhs.type; @@ -92,17 +92,18 @@ class ConfigurationChangeDone : public braft::Closure { public: void Run() { if (!expectedCfgChange.IsEmpty() && - *curCfgChange == expectedCfgChange) { + *curCfgChange == expectedCfgChange) { curCfgChange->Reset(); } delete this; } explicit ConfigurationChangeDone( - std::shared_ptr cfgChange) - : curCfgChange(cfgChange) {} - // copyset node中当前的配置变更信息 + std::shared_ptr cfgChange) + : curCfgChange(cfgChange) {} + // Current configuration change information in the copyset node std::shared_ptr curCfgChange; - // 这次配置变更对应的配置变更信息 + // The configuration change information corresponding to this configuration + // change ConfigurationChange expectedCfgChange; }; @@ -116,6 +117,7 @@ class SyncChunkThread : public curve::common::Uncopyable { void Run(); void Init(CopysetNode* node); void Stop(); + private: bool running_; std::mutex mtx_; @@ -125,7 +127,7 @@ class SyncChunkThread : public curve::common::Uncopyable { }; /** - * 一个Copyset Node就是一个复制组的副本 + * A Copyset Node is a replica of a replication group */ class CopysetNode : public braft::StateMachine, public std::enable_shared_from_this { @@ -133,38 +135,37 @@ class CopysetNode : public braft::StateMachine, // for ut mock CopysetNode() = default; - CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf); + CopysetNode(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& initConf); virtual ~CopysetNode(); /** - * 初始化copyset node配置 + * Initialize copyset node configuration * @param options - * @return 0,成功,-1失败 + * @return 0, successful, -1 failed */ - virtual int Init(const CopysetNodeOptions &options); + virtual int Init(const CopysetNodeOptions& options); /** - * Raft Node init,使得Raft Node运行起来 + * Raft Node init to make Raft Node run * @return */ virtual int Run(); /** - * 关闭copyset node + * Close copyset node */ virtual void Fini(); /** - * 返回复制组的逻辑池ID + * Returns the logical pool ID of the replication group * @return */ LogicPoolID GetLogicPoolId() const; /** - * 返回复制组的复制组ID + * Returns the replication group ID of the replication group * @return */ CopysetID GetCopysetId() const; @@ -180,13 +181,13 @@ class CopysetNode : public braft::StateMachine, virtual std::vector& GetFailedScanMap(); /** - * 返回复制组数据目录 + * Return to the replication group data directory * @return */ std::string GetCopysetDir() const; /** - * 返回当前副本是否在leader任期 + * Returns whether the current replica is in the leader's tenure * @return */ virtual bool IsLeaderTerm() const; @@ -195,111 +196,115 @@ class CopysetNode : public braft::StateMachine, * check if current node is in lease leader * @return */ - virtual bool IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** * check if current node is expired * @return */ - virtual bool IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** - * 返回当前的任期 - * @return 当前的任期 + * Return to current tenure + * @return Current tenure */ virtual uint64_t LeaderTerm() const; /** - * 返回leader id + * Return leader id * @return */ virtual PeerId GetLeaderId() const; /** - * @brief 切换复制组的Leader - * @param[in] peerId 目标Leader的成员ID - * @return 心跳任务的引用 + * @brief Switch the leader of the replication group + * @param[in] peerId The member ID of the target leader + * @return Reference to Heartbeat Task */ butil::Status TransferLeader(const Peer& peer); /** - * @brief 复制组添加新成员 - * @param[in] peerId 新成员的ID - * @return 心跳任务的引用 + * @brief Add new members to the replication group + * @param[in] peerId The ID of the new member + * @return Reference to Heartbeat Task */ butil::Status AddPeer(const Peer& peer); /** - * @brief 复制组删除成员 - * @param[in] peerId 将要删除成员的ID - * @return 心跳任务的引用 + * @brief Copy Group Delete Members + * @param[in] peerId The ID of the member to be deleted + * @return Reference to Heartbeat Task */ butil::Status RemovePeer(const Peer& peer); /** - * @brief 变更复制组成员 - * @param[in] newPeers 新的复制组成员 - * @return 心跳任务的引用 + * @brief Change replication group members + * @param[in] newPeers New replication group member + * @return Reference to Heartbeat Task */ butil::Status ChangePeer(const std::vector& newPeers); /** - * 返回copyset的配置版本 + * Returns the configuration version of the copyset * @return */ virtual uint64_t GetConfEpoch() const; /** - * 更新applied index,只有比它大的才更新 + * Update the applied index, only those larger than it will be updated * @param index */ virtual void UpdateAppliedIndex(uint64_t index); /** - * 返回当前最新的applied index + * Returns the current latest applied index * @return */ virtual uint64_t GetAppliedIndex() const; /** - * @brief: 查询配置变更的状态 - * @param type[out]: 配置变更类型 - * @param oldConf[out]: 老的配置 - * @param alterPeer[out]: 变更的peer - * @return 0查询成功,-1查询异常失败 + * @brief: Query the status of configuration changes + * @param type[out]: Configuration change type + * @param oldConf[out]: Old configuration + * @param alterPeer[out]: Changed Peer + * @return 0 query successful, -1 query exception failed */ - virtual int GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer); + virtual int GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer); /** - * @brief: 获取copyset node的状态值,用于比较多个副本的数据一致性 - * @param hash[out]: copyset node状态值 - * @return 0成功,-1失败 + * @brief: Obtain the status value of the copyset node for comparing data + * consistency across multiple replicas + * @param hash[out]: copyset node status value + * @return 0 succeeded, -1 failed */ - virtual int GetHash(std::string *hash); + virtual int GetHash(std::string* hash); /** - * @brief: 获取copyset node的status,实际调用的raft node的get_status接口 + * @brief: Get the status of the copyset node, actually calling the + * get_status interface of the Raft node * @param status[out]: copyset node status */ - virtual void GetStatus(NodeStatus *status); + virtual void GetStatus(NodeStatus* status); /** * @brief: get raft node leader lease status * @param status[out]: raft node leader lease status */ - virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status); + virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status); /** - * 获取此copyset的leader上的status + * Obtain the status on the leader of this copyset * @param leaderStaus[out]: leader copyset node status - * @return 获取成功返回true,获取失败返回false + * @return returns true for successful acquisition, false for failed + * acquisition */ - virtual bool GetLeaderStatus(NodeStatus *leaderStaus); + virtual bool GetLeaderStatus(NodeStatus* leaderStaus); /** - * 返回data store指针 + * Return data store pointer * @return */ virtual std::shared_ptr GetDataStore() const; @@ -311,19 +316,19 @@ class CopysetNode : public braft::StateMachine, virtual CurveSegmentLogStorage* GetLogStorage() const; /** - * 返回ConcurrentApplyModule + * Returning ConcurrentApplyModule */ virtual ConcurrentApplyModule* GetConcurrentApplyModule() const; /** - * 向copyset node propose一个op request + * Propose an op request to the copyset node * @param task */ - virtual void Propose(const braft::Task &task); + virtual void Propose(const braft::Task& task); /** - * 获取复制组成员 - * @param peers:返回的成员列表(输出参数) + * Get replication group members + * @param peers: List of returned members (output parameters) * @return */ virtual void ListPeers(std::vector* peers); @@ -333,87 +338,95 @@ class CopysetNode : public braft::StateMachine, * @param options * @return */ - void InitRaftNodeOptions(const CopysetNodeOptions &options); + void InitRaftNodeOptions(const CopysetNodeOptions& options); /** - * 下面的接口都是继承StateMachine实现的接口 + * The following interfaces are all interfaces that inherit the + * implementation of StateMachine */ public: /** - * op log apply的时候回调函数 - * @param iter:可以batch的访问已经commit的log entries + * Callback function when applying op log + * @param iter: Allows batch access to already committed log entries. */ - void on_apply(::braft::Iterator &iter) override; + void on_apply(::braft::Iterator& iter) override; /** - * 复制关闭的时候调用此回调 + * Call this callback when replication is closed */ void on_shutdown() override; /** - * raft snapshot相关的接口,仅仅保存raft snapshot meta - * 和snapshot文件的list,这里并没有拷贝实际的数据,因为 - * 在块存储场景所有操作是幂等,所以,并不真实的拷贝数据 + * Interfaces related to raft snapshot, which only store raft snapshot meta + * and a list of snapshot files. Actual data is not copied here because + * in the context of block storage, all operations are idempotent, so there + * is no need to actually copy the data. */ - void on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) override; + void on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) override; /** - * load日志有两种情况: - * 1. Follower节点Install snapshot追赶leader,这个时候 - * snapshot目录下面有chunk数据和snapshot数据 - * 2. 节点重启,会执行snapshot load,然后回放日志,这个时 - * 候snapshot目录下面没有数据,什么都不用做 - * TODO(wudemiao): install snapshot的时候会存在空间 - * double的可能性,考虑如下场景,follower落后,然后通过从 - * leader install snapshot恢复数据,其首先会从leader将 - * 所有数据下载过来,然后在调用snapshot load加载快照,这个 - * 期间空间占用了就double了;后期需要通过控制单盘参与install - * snapshot的数量 + * There are two scenarios for loading logs: + * 1. Follower nodes catch up with the leader by installing a snapshot. In + * this case, there are chunk data and snapshot data under the snapshot + * directory. + * 2. When a node restarts, it performs a snapshot load and then replays + * the logs. In this case, there is no data under the snapshot directory, so + * nothing needs to be done. + * TODO(wudemiao): When installing a snapshot, there is a possibility of + * doubling the space usage. Consider the following scenario: a follower + * lags behind and then recovers data by installing a snapshot from the + * leader. It will first download all the data from the leader and then call + * snapshot load to load the snapshot. During this period, the space usage + * doubles. Later, we need to control the number of disks participating in + * the installation of snapshots. */ - int on_snapshot_load(::braft::SnapshotReader *reader) override; + int on_snapshot_load(::braft::SnapshotReader* reader) override; /** - * new leader在apply noop之后会调用此接口,表示此 leader可 - * 以提供read/write服务了。 - * @param term:当前leader任期 + * The new leader will call this interface after applying noop, indicating + * that this leader can provide read/write services. + * @param term: Current leader term */ void on_leader_start(int64_t term) override; /** - * leader step down的时候调用 - * @param status:复制组的状态 + * Called when the leader step is down + * @param status: The status of the replication group */ - void on_leader_stop(const butil::Status &status) override; + void on_leader_stop(const butil::Status& status) override; /** - * 复制组发生错误的时候调用 - * @param e:具体的 error + * Called when an error occurs in the replication group + * @param e: Specific error */ - void on_error(const ::braft::Error &e) override; + void on_error(const ::braft::Error& e) override; /** - * 配置变更日志entry apply的时候会调用此函数,目前会利用此接口 - * 更新配置epoch值 - * @param conf:当前复制组最新的配置 + * This function will be called when configuring the change log entry + * application, and currently this interface will be utilized Update + * configuration epoch value + * @param conf: The latest configuration of the current replication group * @param index log index */ - void on_configuration_committed(const Configuration& conf, int64_t index) override; //NOLINT + void on_configuration_committed(const Configuration& conf, + int64_t index) override; // NOLINT /** - * 当follower停止following主的时候调用 - * @param ctx:可以获取stop following的原因 + * Called when the follower stops following the main + * @param ctx: Can obtain the reason for stop following */ - void on_stop_following(const ::braft::LeaderChangeContext &ctx) override; + void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; /** - * Follower或者Candidate发现新的leader后调用 - * @param ctx:leader变更上下,可以获取new leader和start following的原因 + * Called after the Follower or Candidate finds a new leader + * @param ctx: Change the leader up and down to obtain the reasons for the + * new leader and start following */ - void on_start_following(const ::braft::LeaderChangeContext &ctx) override; + void on_start_following(const ::braft::LeaderChangeContext& ctx) override; /** - * 用于测试注入mock依赖 + * Used for testing injection mock dependencies */ public: void SetCSDateStore(std::shared_ptr datastore); @@ -435,22 +448,22 @@ class CopysetNode : public braft::StateMachine, // shared to sync pool static std::shared_ptr> copysetSyncPool_; /** - * 从文件中解析copyset配置版本信息 - * @param filePath:文件路径 - * @return 0: successs, -1 failed + * Parsing copyset configuration version information from a file + * @param filePath: File path + * @return 0: success, -1 fail */ - int LoadConfEpoch(const std::string &filePath); + int LoadConfEpoch(const std::string& filePath); /** - * 保存copyset配置版本信息到文件中 - * @param filePath:文件路径 - * @return 0 成功,-1 failed + * Save the copyset configuration version information to a file + * @param filePath: File path + * @return 0 success, -1 fail */ - int SaveConfEpoch(const std::string &filePath); + int SaveConfEpoch(const std::string& filePath); public: - void save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done); + void save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done); void ShipToSync(ChunkID chunkId) { if (enableOdsyncWhenOpenChunkFile_) { @@ -470,58 +483,58 @@ class CopysetNode : public braft::StateMachine, void WaitSnapshotDone(); private: - inline std::string GroupId() { - return ToGroupId(logicPoolId_, copysetId_); - } + inline std::string GroupId() { return ToGroupId(logicPoolId_, copysetId_); } inline std::string GroupIdString() { return ToGroupIdString(logicPoolId_, copysetId_); } private: - // 逻辑池 id + // Logical Pool ID LogicPoolID logicPoolId_; - // 复制组 id + // Copy Group ID CopysetID copysetId_; - // 复制组的配置 - Configuration conf_; - // 复制组的配置操作锁 - mutable std::mutex confLock_; - // 复制组的配置版本 + // Configuration of replication groups + Configuration conf_; + // Configuration operation lock for replication group + mutable std::mutex confLock_; + // Copy the configuration version of the group std::atomic epoch_; - // 复制组副本的peer id + // Peer ID of the replication group replica PeerId peerId_; - // braft Node的配置参数 + // Configuration parameters for the braft Node NodeOptions nodeOptions_; - // CopysetNode对应的braft Node + // The braft Node corresponding to CopysetNode std::shared_ptr raftNode_; - // chunk file的绝对目录 + // Absolute directory for chunk files std::string chunkDataApath_; - // chunk file的相对目录 + // Relative directory for chunk files std::string chunkDataRpath_; - // copyset绝对路径 + // copyset absolute path std::string copysetDirPath_; - // 文件系统适配器 + // File system adapter std::shared_ptr fs_; - // Chunk持久化操作接口 + // Chunk Persistence Operation Interface std::shared_ptr dataStore_; // The log storage for braft CurveSegmentLogStorage* logStorage_; - // 并发模块 - ConcurrentApplyModule *concurrentapply_ = nullptr; - // 配置版本持久化工具接口 + // Concurrent module + ConcurrentApplyModule* concurrentapply_ = nullptr; + // Configure version persistence tool interface std::unique_ptr epochFile_; - // 复制组的apply index + // Apply index of replication group std::atomic appliedIndex_; - // 复制组当前任期,如果<=0表明不是leader + // Copy the current tenure of the group. If<=0, it indicates that it is not + // a leader std::atomic leaderTerm_; - // 复制组数据回收站目录 + // Copy Group Data Recycle Bin Directory std::string recyclerUri_; - // 复制组的metric信息 + // Copy the metric information of the group CopysetMetricPtr metric_; - // 正在进行中的配置变更 + // Configuration changes in progress std::shared_ptr configChange_; - // transfer leader的目标,状态为TRANSFERRING时有效 + // The target of the transfer leader is valid when the status is + // TRANSFERRING Peer transferee_; int64_t lastSnapshotIndex_; // scan status diff --git a/src/chunkserver/copyset_node_manager.cpp b/src/chunkserver/copyset_node_manager.cpp index 78f4afec89..9c856ccb50 100755 --- a/src/chunkserver/copyset_node_manager.cpp +++ b/src/chunkserver/copyset_node_manager.cpp @@ -22,27 +22,26 @@ #include "src/chunkserver/copyset_node_manager.h" -#include #include #include +#include -#include #include #include +#include +#include "src/chunkserver/braft_cli_service.h" +#include "src/chunkserver/braft_cli_service2.h" +#include "src/chunkserver/chunk_service.h" #include "src/chunkserver/config_info.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_service.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_file_service.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/chunkserver/chunk_service.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_service.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/braft_cli_service2.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/raftsnapshot/curve_file_service.h" - namespace curve { namespace chunkserver { @@ -51,7 +50,7 @@ using curve::common::TimeUtility; std::once_flag addServiceFlag; -int CopysetNodeManager::Init(const CopysetNodeOptions ©setNodeOptions) { +int CopysetNodeManager::Init(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; CopysetNode::syncTriggerSeconds_ = copysetNodeOptions.syncTriggerSeconds; CopysetNode::copysetSyncPool_ = @@ -71,10 +70,9 @@ int CopysetNodeManager::Run() { CopysetNode::copysetSyncPool_->Start(copysetNodeOptions_.syncConcurrency); assert(copysetNodeOptions_.syncConcurrency > 0); int ret = 0; - // 启动线程池 + // Start Thread Pool if (copysetLoader_ != nullptr) { - ret = copysetLoader_->Start( - copysetNodeOptions_.loadConcurrency); + ret = copysetLoader_->Start(copysetNodeOptions_.loadConcurrency); if (ret < 0) { LOG(ERROR) << "CopysetLoadThrottle start error. ThreadNum: " << copysetNodeOptions_.loadConcurrency; @@ -82,7 +80,7 @@ int CopysetNodeManager::Run() { } } - // 启动加载已有的copyset + // Start loading existing copyset ret = ReloadCopysets(); if (ret == 0) { loadFinished_.exchange(true, std::memory_order_acq_rel); @@ -141,28 +139,26 @@ int CopysetNodeManager::ReloadCopysets() { } uint64_t poolId = GetPoolID(groupId); uint64_t copysetId = GetCopysetID(groupId); - LOG(INFO) << "Parsed groupid " << groupId - << " as " << ToGroupIdString(poolId, copysetId); + LOG(INFO) << "Parsed groupid " << groupId << " as " + << ToGroupIdString(poolId, copysetId); if (copysetLoader_ == nullptr) { LoadCopyset(poolId, copysetId, false); } else { - copysetLoader_->Enqueue( - std::bind(&CopysetNodeManager::LoadCopyset, - this, - poolId, - copysetId, - true)); + copysetLoader_->Enqueue(std::bind(&CopysetNodeManager::LoadCopyset, + this, poolId, copysetId, true)); } } - // 如果加载成功,则等待所有copyset加载完成,关闭线程池 + // If loading is successful, wait for all copysets to load and close the + // thread pool if (copysetLoader_ != nullptr) { while (copysetLoader_->QueueSize() != 0) { ::sleep(1); } - // queue size为0,但是线程池中的线程仍然可能还在执行 - // stop内部会去join thread,以此保证所有任务执行完以后再退出 + // Even when the queue size is 0, the threads in the thread pool may + // still be executing. The 'stop' function internally performs thread + // joining to ensure that all tasks are completed before exiting. copysetLoader_->Stop(); copysetLoader_ = nullptr; } @@ -174,8 +170,8 @@ bool CopysetNodeManager::LoadFinished() { return loadFinished_.load(std::memory_order_acquire); } -void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void CopysetNodeManager::LoadCopyset(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, bool needCheckLoadFinished) { LOG(INFO) << "Begin to load copyset " << ToGroupIdString(logicPoolId, copysetId) @@ -183,8 +179,9 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, << (needCheckLoadFinished ? "Yes." : "No."); uint64_t beginTime = TimeUtility::GetTimeofDayMs(); - // chunkserver启动加载copyset阶段,会拒绝外部的创建copyset请求 - // 因此不会有其他线程加载或者创建相同copyset,此时不需要加锁 + // chunkserver starts the loading copyset phase and will reject external + // requests to create copysets Therefore, no other threads will load or + // create the same copyset, and locking is not necessary at this time Configuration conf; std::shared_ptr copysetNode = CreateCopysetNodeUnlocked(logicPoolId, copysetId, conf); @@ -205,7 +202,7 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, } LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " end, time used (ms): " - << TimeUtility::GetTimeofDayMs() - beginTime; + << TimeUtility::GetTimeofDayMs() - beginTime; } bool CopysetNodeManager::CheckCopysetUntilLoadFinished( @@ -224,9 +221,12 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( } NodeStatus leaderStaus; bool getSuccess = node->GetLeaderStatus(&leaderStaus); - // 获取leader状态失败一般是由于还没选出leader或者leader心跳还未发送到当前节点 - // 正常通过几次重试可以获取到leader信息,如果重试多次都未获取到 - // 则认为copyset当前可能无法选出leader,直接退出 + // Failure to obtain leader status is usually because a leader has not + // been elected yet, or the leader's heartbeat has not been received by + // the current node. Typically, leader information can be obtained + // through several retries. If multiple retries fail to obtain the + // information, it is assumed that the copyset may not be able to elect + // a leader at the moment, and the operation exits directly. if (!getSuccess) { ++retryTimes; ::usleep(1000 * copysetNodeOptions_.electionTimeoutMs); @@ -235,8 +235,10 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( NodeStatus status; node->GetStatus(&status); - // 当前副本的最后一个日志落后于leader上保存的第一个日志 - // 这种情况下此副本会通过安装快照恢复,可以忽略避免阻塞检查线程 + // When the last log of the current replica lags behind the first log + // saved on the leader, in this situation, the replica will recover by + // installing a snapshot, and it can be safely ignored to avoid blocking + // the checking thread. bool mayInstallSnapshot = leaderStaus.first_index > status.last_index; if (mayInstallSnapshot) { LOG(WARNING) << "Copyset " @@ -250,73 +252,73 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( return false; } - // 判断当前副本已经apply的日志是否接近已经committed的日志 - int64_t margin = leaderStaus.committed_index - - status.known_applied_index; - bool catchupLeader = margin - < (int64_t)copysetNodeOptions_.finishLoadMargin; + // Determine whether the logs that have been applied to the current + // replica are close to the logs that have been committed + int64_t margin = + leaderStaus.committed_index - status.known_applied_index; + bool catchupLeader = + margin < (int64_t)copysetNodeOptions_.finishLoadMargin; if (catchupLeader) { LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " finished, " << "leader CommittedIndex: " << leaderStaus.committed_index - << ", node appliedIndex: " - << status.known_applied_index; + << ", node appliedIndex: " << status.known_applied_index; return true; } retryTimes = 0; ::usleep(1000 * copysetNodeOptions_.checkLoadMarginIntervalMs); } - LOG(WARNING) << "check copyset " - << ToGroupIdString(logicPoolId, copysetId) + LOG(WARNING) << "check copyset " << ToGroupIdString(logicPoolId, copysetId) << " failed."; return false; } std::shared_ptr CopysetNodeManager::GetCopysetNode( - const LogicPoolID &logicPoolId, const CopysetID ©setId) const { - /* 加读锁 */ + const LogicPoolID& logicPoolId, const CopysetID& copysetId) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); - if (copysetNodeMap_.end() != it) - return it->second; + if (copysetNodeMap_.end() != it) return it->second; return nullptr; } void CopysetNodeManager::GetAllCopysetNodes( - std::vector *nodes) const { - /* 加读锁 */ + std::vector* nodes) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); for (auto it = copysetNodeMap_.begin(); it != copysetNodeMap_.end(); ++it) { nodes->push_back(it->second); } } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 如果本地copyset还未全部加载完成,不允许外部创建copyset + // If the local copyset has not been fully loaded yet, external copyset + // creation is not allowed if (!loadFinished_.load(std::memory_order_acquire)) { LOG(WARNING) << "Create copyset failed: load unfinished " << ToGroupIdString(logicPoolId, copysetId); return false; } - // copysetnode析构的时候会去调shutdown,可能导致协程切出 - // 所以创建copysetnode失败的时候,不能占着写锁,等写锁释放后再析构 + // When copysetnode is deconstructed, shutdown may be called, which may lead + // to coprocessor disconnection So when creating a copysetnode fails, it + // cannot occupy the write lock, wait for the write lock to be released + // before destructing std::shared_ptr copysetNode = nullptr; - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); if (copysetNodeMap_.end() == copysetNodeMap_.find(groupId)) { - copysetNode = std::make_shared(logicPoolId, - copysetId, - conf); + copysetNode = + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) - << " init failed"; + << " init failed"; return false; } if (0 != copysetNode->Run()) { @@ -325,8 +327,7 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } copysetNodeMap_.insert(std::pair>( - groupId, - copysetNode)); + groupId, copysetNode)); LOG(INFO) << "Create copyset success " << ToGroupIdString(logicPoolId, copysetId); return true; @@ -336,8 +337,8 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers) { Configuration conf; for (Peer peer : peers) { @@ -348,13 +349,10 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, } std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf) { std::shared_ptr copysetNode = - std::make_shared(logicPoolId, - copysetId, - conf); + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) << " init failed"; @@ -369,13 +367,13 @@ std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( return copysetNode; } -int CopysetNodeManager::AddService(brpc::Server *server, - const butil::EndPoint &listenAddress) { +int CopysetNodeManager::AddService(brpc::Server* server, + const butil::EndPoint& listenAddress) { int ret = 0; uint64_t maxInflight = 100; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); - CopysetNodeManager *copysetNodeManager = this; + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); + CopysetNodeManager* copysetNodeManager = this; ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = copysetNodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -401,7 +399,7 @@ int CopysetNodeManager::AddService(brpc::Server *server, ret = server->RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; ret = server->AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // add other services @@ -413,70 +411,71 @@ int CopysetNodeManager::AddService(brpc::Server *server, brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; auto epochMap = std::make_shared(); - ret = server->AddService(new ChunkServiceImpl( - chunkServiceOptions, epochMap), - brpc::SERVER_OWNS_SERVICE); + ret = server->AddService( + new ChunkServiceImpl(chunkServiceOptions, epochMap), + brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; } while (false); return ret; } -bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { copysetNodeMap_.erase(it); ret = true; LOG(INFO) << "Delete copyset " - << ToGroupIdString(logicPoolId, copysetId) - <<" success."; + << ToGroupIdString(logicPoolId, copysetId) << " success."; } } return ret; } -bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { if (0 != copysetNodeOptions_.trash->RecycleCopySet( - it->second->GetCopysetDir())) { + it->second->GetCopysetDir())) { LOG(ERROR) << "Failed to remove copyset " << ToGroupIdString(logicPoolId, copysetId) << " persistently."; @@ -519,18 +518,18 @@ bool CopysetNodeManager::DeleteBrokenCopyset(const LogicPoolID& poolId, return true; } -bool CopysetNodeManager::IsExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { - /* 加读锁 */ +bool CopysetNodeManager::IsExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); return copysetNodeMap_.end() != copysetNodeMap_.find(groupId); } bool CopysetNodeManager::InsertCopysetNodeIfNotExist( - const LogicPoolID &logicPoolId, const CopysetID ©setId, + const LogicPoolID& logicPoolId, const CopysetID& copysetId, std::shared_ptr node) { - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); diff --git a/src/chunkserver/copyset_node_manager.h b/src/chunkserver/copyset_node_manager.h index 8294b21e0f..5336025227 100755 --- a/src/chunkserver/copyset_node_manager.h +++ b/src/chunkserver/copyset_node_manager.h @@ -23,209 +23,215 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ -#include //NOLINT -#include #include +#include //NOLINT #include +#include #include "src/chunkserver/copyset_node.h" #include "src/common/concurrent/rw_lock.h" -#include "src/common/uncopyable.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::BthreadRWLock; using curve::common::ReadLockGuard; -using curve::common::WriteLockGuard; using curve::common::TaskThreadPool; +using curve::common::WriteLockGuard; class ChunkOpRequest; /** - * Copyset Node的管理者 + * Manager of Copyset Node */ class CopysetNodeManager : public curve::common::Uncopyable { public: using CopysetNodePtr = std::shared_ptr; - // 单例,仅仅在 c++11或者更高版本下正确 - static CopysetNodeManager &GetInstance() { + // Single example, only correct in c++11 or higher versions + static CopysetNodeManager& GetInstance() { static CopysetNodeManager instance; return instance; } virtual ~CopysetNodeManager() = default; - int Init(const CopysetNodeOptions ©setNodeOptions); + int Init(const CopysetNodeOptions& copysetNodeOptions); int Run(); int Fini(); /** - * @brief 加载目录下的所有copyset + * @brief Load all copysets in the directory * - * @return 0表示加载成功,非0表示加载失败 + * @return 0 indicates successful loading, non 0 indicates failed loading */ int ReloadCopysets(); /** - * 创建copyset node,两种情况需要创建copyset node - * TODO(wudemiao): 后期替换之后删除掉 - * 1.集群初始化,创建copyset - * 2.恢复的时候add peer + * To create a copyset node, there are two situations where you need to + * create a copyset node + * TODO(wudemiao): Delete after later replacement + * 1. Cluster initialization, creating copyset + * 2. add peer during recovery */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf); /** - * 都是创建copyset,目前两个同时存在,后期仅仅保留一个 + * Both are creating copysets, currently both exist simultaneously, and only + * one will be retained in the future */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers); /** - * 删除copyset node内存实例(停止copyset, 销毁copyset内存实例并从copyset - * manager的copyset表中清除copyset表项,并不影响盘上的copyset持久化数据) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Delete the copyset node memory instance (stop copyset, destroy the + * copyset memory instance, and remove it from the copyset Clearing the + * copyset table entry in the manager's copyset table does not affect the + * persistence data of the copyset on the disk + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 彻底删除copyset node内存数据(停止copyset, 销毁copyset内存实例并从 - * copyset manager的copyset表中清除copyset表项,并将copyset持久化数据从盘 - * 上彻底删除) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Completely delete the copyset node's memory data (stop copyset, destroy + * the copyset memory instance, and remove it from the Clear the copyset + * table entries in the copyset manager's copyset table and persist the + * copyset data from the disk Completely delete on) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** * @brief Delete broken copyset * @param[in] poolId logical pool id * @param[in] copysetId copyset id * @return true if delete success, else return false - */ + */ bool DeleteBrokenCopyset(const LogicPoolID& poolId, const CopysetID& copysetId); /** - * 判断指定的copyset是否存在 - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return true存在,false不存在 + * Determine whether the specified copyset exists + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true exists, false does not exist */ - bool IsExist(const LogicPoolID &logicPoolId, const CopysetID ©setId); + bool IsExist(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 获取指定的copyset - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return nullptr则为没查询到 + * Get the specified copyset + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return nullptr means that no query was found */ - virtual CopysetNodePtr GetCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) const; + virtual CopysetNodePtr GetCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) const; /** - * 查询所有的copysets - * @param nodes:出参,返回所有的copyset + * Query all copysets + * @param nodes: Issue parameters and return all copysets */ - void GetAllCopysetNodes(std::vector *nodes) const; + void GetAllCopysetNodes(std::vector* nodes) const; /** - * 添加RPC service - * TODO(wudemiao): 目前仅仅用于测试,后期完善了会删除掉 - * @param server:rpc Server - * @param listenAddress:监听的地址 - * @return 0成功,-1失败 + * Add RPC service + * TODO(wudemiao): Currently only used for testing, and will be removed + * after later refinement + * @param server: rpc Server + * @param listenAddress: The address to listen to + * @return 0 succeeded, -1 failed */ - int AddService(brpc::Server *server, - const butil::EndPoint &listenAddress); + int AddService(brpc::Server* server, const butil::EndPoint& listenAddress); - virtual const CopysetNodeOptions &GetCopysetNodeOptions() const { + virtual const CopysetNodeOptions& GetCopysetNodeOptions() const { return copysetNodeOptions_; } /** * @brief: Only for test */ - void SetCopysetNodeOptions( - const CopysetNodeOptions& copysetNodeOptions) { + void SetCopysetNodeOptions(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; } /** - * 加载copyset,包括新建一个copyset或者重启一个copyset - * @param logicPoolId: 逻辑池id + * Load copyset, including creating a new copyset or restarting a copyset + * @param logicPoolId: Logical Pool ID * @param copysetId: copyset id - * @param needCheckLoadFinished: 是否需要判断copyset加载完成 + * @param needCheckLoadFinished: Do you need to determine if the copyset + * loading is complete */ - void LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + void LoadCopyset(const LogicPoolID& logicPoolId, const CopysetID& copysetId, bool needCheckLoadFinished); /** - * 检测指定的copyset状态,直到copyset加载完成或出现异常 - * @param node: 指定的copyset node - * @return true表示加载成功,false表示检测过程中出现异常 + * Detect the specified copyset state until the copyset load is completed or + * an exception occurs + * @param node: The specified copyset node + * @return true indicates successful loading, while false indicates an + * exception occurred during the detection process */ bool CheckCopysetUntilLoadFinished(std::shared_ptr node); /** - * 获取copysetNodeManager加载copyset的状态 - * @return false-copyset未加载完成 true-copyset已加载完成 + * Obtain the status of copysetNodeManager loading copyset + * @return false-copyset not loaded complete, true-copyset loaded complete */ virtual bool LoadFinished(); protected: CopysetNodeManager() - : copysetLoader_(nullptr) - , running_(false) - , loadFinished_(false) {} + : copysetLoader_(nullptr), running_(false), loadFinished_(false) {} private: /** - * 如果指定copyset不存在,则将copyset插入到map当中(线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param node:要插入的copysetnode - * @return copyset不存在,则插入到map并返回true; - * copyset如果存在,则返回false + * If the specified copyset does not exist, insert the copyset into the map + * (thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param node: The copysetnode to be inserted + * @return If the copyset does not exist, insert it into the map and return + * true; If copyset exists, return false */ - bool InsertCopysetNodeIfNotExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool InsertCopysetNodeIfNotExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, std::shared_ptr node); /** - * 创建一个新的copyset或加载一个已存在的copyset(非线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param conf:此copyset的配置成员 - * @return 创建或加载成功返回copysetnode,否则返回nullptr + * Create a new copyset or load an existing copyset (non thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param conf: The configuration members of this copyset + * @return Successfully created or loaded, returns copysetnode, otherwise + * returns nullptr */ std::shared_ptr CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf); private: - using CopysetNodeMap = std::unordered_map>; - // 保护复制组 map的读写锁 + using CopysetNodeMap = + std::unordered_map>; + // Protect the read write lock of the replication group map mutable BthreadRWLock rwLock_; - // 复制组map + // Copy Group Map CopysetNodeMap copysetNodeMap_; - // 复制组配置选项 + // Copy Group Configuration Options CopysetNodeOptions copysetNodeOptions_; - // 控制copyset并发启动的数量 + // Control the number of concurrent starts of copyset std::shared_ptr> copysetLoader_; - // 表示copyset node manager当前是否正在运行 + // Indicates whether the copyset node manager is currently running Atomic running_; - // 表示copyset node manager当前是否已经完成加载 + // Indicates whether the copyset node manager has currently completed + // loading Atomic loadFinished_; }; diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e09516c0ad..9082024b4c 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -20,36 +20,36 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_service.h" + #include #include -#include #include +#include -#include "src/chunkserver/copyset_service.h" #include "src/chunkserver/copyset_node_manager.h" namespace curve { namespace chunkserver { -void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); LOG(INFO) << "Received create copyset request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 解析request中的peers + // Analyzing Peers in Request Configuration conf; for (int i = 0; i < request->peerid_size(); ++i) { PeerId peer; int ret = peer.parse(request->peerid(i)); if (ret != 0) { - cntl->SetFailed(EINVAL, - "Fail to parse peer id %s", + cntl->SetFailed(EINVAL, "Fail to parse peer id %s", request->peerid(i).c_str()); return; } @@ -59,12 +59,9 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, LogicPoolID logicPoolID = request->logicpoolid(); CopysetID copysetID = request->copysetid(); GroupId groupId = ToGroupId(logicPoolID, copysetID); - if (false == copysetNodeManager_->IsExist(logicPoolID, - copysetID)) { - if (true == - copysetNodeManager_->CreateCopysetNode(logicPoolID, - copysetID, - conf)) { + if (false == copysetNodeManager_->IsExist(logicPoolID, copysetID)) { + if (true == copysetNodeManager_->CreateCopysetNode(logicPoolID, + copysetID, conf)) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { response->set_status( @@ -80,10 +77,10 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, << COPYSET_OP_STATUS_Name(response->status()); } -void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); @@ -103,31 +100,32 @@ void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, if (true == copysetNodeManager_->IsExist(copyset.logicpoolid(), copyset.copysetid())) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); - LOG(WARNING) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) - << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); //NOLINT + LOG(WARNING) + << "Create copyset " + << ToGroupIdString(copyset.logicpoolid(), + copyset.copysetid()) + << " failed, response code: " + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); // NOLINT return; } - if (false == - copysetNodeManager_->CreateCopysetNode(copyset.logicpoolid(), - copyset.copysetid(), - peers)) { + if (false == copysetNodeManager_->CreateCopysetNode( + copyset.logicpoolid(), copyset.copysetid(), peers)) { response->set_status( COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Create copyset " << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); //NOLINT + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN); // NOLINT return; } LOG(INFO) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) + << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " success."; } @@ -151,7 +149,7 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, // if copyset node exist in the manager means its data is complete if (copysetNodeManager_->IsExist(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_COPYSET_IS_HEALTHY); - LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; + LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; } else if (!copysetNodeManager_->DeleteBrokenCopyset(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Delete broken copyset " << groupId << " failed"; @@ -161,17 +159,17 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, } } -void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done) { +void CopysetServiceImpl::GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); LOG(INFO) << "Received GetCopysetStatus request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -183,14 +181,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, return; } - // 获取raft node status + // Obtain raft node status NodeStatus status; nodePtr->GetStatus(&status); response->set_state(status.state); - Peer *peer = new Peer(); + Peer* peer = new Peer(); response->set_allocated_peer(peer); peer->set_address(status.peer_id.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); response->set_allocated_leader(leader); leader->set_address(status.leader_id.to_string()); response->set_readonly(status.readonly); @@ -204,13 +202,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_lastindex(status.last_index); response->set_diskindex(status.disk_index); - // 获取配置的版本 + // Obtain the version of the configuration response->set_epoch(nodePtr->GetConfEpoch()); /** - * 考虑到query hash需要读取copyset的所有chunk数据,然后计算hash值 - * 是一个非常耗时的操作,所以在request会设置query hash字段,如果 - * 为false,那么就不需要查询copyset的hash值 + * Considering that calculating the hash value for query hash requires + * reading all chunk data from a copyset, which is a very time-consuming + * operation, the request will have a "query hash" field. If it is set to + * false, then there is no need to query the hash value of the copyset. */ if (request->queryhash()) { std::string hash; @@ -228,8 +227,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); LOG(INFO) << "GetCopysetStatus success: " - << ToGroupIdString(request->logicpoolid(), - request->copysetid()); + << ToGroupIdString(request->logicpoolid(), request->copysetid()); } } // namespace chunkserver diff --git a/src/chunkserver/copyset_service.h b/src/chunkserver/copyset_service.h index fabf6df8fc..7025b6e9dd 100755 --- a/src/chunkserver/copyset_service.h +++ b/src/chunkserver/copyset_service.h @@ -28,51 +28,48 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; /** - * 复制组管理的Rpc服务,目前仅有创建复制组 + * The Rpc service for replication group management currently only creates + * replication groups */ class CopysetServiceImpl : public CopysetService { public: - explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) : - copysetNodeManager_(copysetNodeManager) {} + explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) + : copysetNodeManager_(copysetNodeManager) {} ~CopysetServiceImpl() {} /** - * 创建复制组,一次只能创建一个 + * Create replication groups, only one can be created at a time */ - void CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done); + void CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, Closure* done); /* - * 创建复制组,一次可以创建多个 + * Create replication groups, multiple can be created at once */ - void CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done); + void CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, Closure* done); /** * @brief Delete broken copyset */ void DeleteBrokenCopyset(RpcController* controller, const CopysetRequest* request, - CopysetResponse* response, - Closure* done); + CopysetResponse* response, Closure* done); - void GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done); + void GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, Closure* done); private: - // 复制组管理者 + // Copy Group Manager CopysetNodeManager* copysetNodeManager_; }; diff --git a/src/chunkserver/heartbeat.cpp b/src/chunkserver/heartbeat.cpp index b81fe6bdb3..5ce5a575cd 100644 --- a/src/chunkserver/heartbeat.cpp +++ b/src/chunkserver/heartbeat.cpp @@ -21,606 +21,694 @@ * 2018/12/20 Wenyu Zhou Initial version */ -#include -#include +#include "src/chunkserver/heartbeat.h" + +#include #include #include -#include +#include +#include -#include #include +#include -#include "src/fs/fs_common.h" -#include "src/common/timeutility.h" -#include "src/chunkserver/heartbeat.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/heartbeat_helper.h" #include "src/common/curve_version.h" +#include "src/common/timeutility.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" using curve::fs::FileSystemInfo; -namespace curve { -namespace chunkserver { -TaskStatus Heartbeat::PurgeCopyset(LogicPoolID poolId, CopysetID copysetId) { - if (!copysetMan_->PurgeCopysetNodeData(poolId, copysetId)) { - LOG(ERROR) << "Failed to clean copyset " - << ToGroupIdStr(poolId, copysetId) << " and its data."; - - return TaskStatus(-1, "Failed to clean copyset"); - } - - LOG(INFO) << "Successfully cleaned copyset " - << ToGroupIdStr(poolId, copysetId) << " and its data."; - - return TaskStatus::OK(); -} - -int Heartbeat::Init(const HeartbeatOptions &options) { - toStop_.store(false, std::memory_order_release); - options_ = options; - - butil::ip_t csIp; - storePath_ = curve::common::UriParser::GetPathFromUri(options_.storeUri); - if (butil::str2ip(options_.ip.c_str(), &csIp) < 0) { - LOG(ERROR) << "Invalid Chunkserver IP provided: " << options_.ip; - return -1; - } - csEp_ = butil::EndPoint(csIp, options_.port); - LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; - - // mdsEps不能为空 - ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); - if (mdsEps_.empty()) { - LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; - return -1; - } - // 检查每个地址的合法性 - for (auto addr : mdsEps_) { - butil::EndPoint endpt; - if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { - LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; - return -1; - } - } - - inServiceIndex_ = 0; - LOG(INFO) << "MDS address: " << options_.mdsListenAddr; - - copysetMan_ = options.copysetNodeManager; - - // 初始化timer - waitInterval_.Init(options_.intervalSec * 1000); - - // 获取当前unix时间戳 - startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); - - // init scanManager - scanMan_ = options.scanManager; - return 0; -} - -int Heartbeat::Run() { - // start scan thread - hbThread_ = Thread(&Heartbeat::HeartbeatWorker, this); - return 0; -} - -int Heartbeat::Stop() { - LOG(INFO) << "Stopping Heartbeat manager."; - - waitInterval_.StopWait(); - toStop_.store(true, std::memory_order_release); - hbThread_.join(); - - LOG(INFO) << "Stopped Heartbeat manager."; - return 0; -} - -int Heartbeat::Fini() { - Stop(); - // stop scan thread - LOG(INFO) << "Heartbeat manager cleaned up."; - return 0; -} - -int Heartbeat::GetFileSystemSpaces(size_t* capacity, size_t* avail) { - int ret; - struct FileSystemInfo info; - - ret = options_.fs->Statfs(storePath_, &info); - if (ret != 0) { - LOG(ERROR) << "Failed to get file system space information, " - << " error message: " << strerror(errno); - return -1; - } - - *capacity = info.total; - *avail = info.available; - - return 0; -} - -int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, - CopysetNodePtr copyset) { - int ret; - LogicPoolID poolId = copyset->GetLogicPoolId(); - CopysetID copysetId = copyset->GetCopysetId(); - - info->set_logicalpoolid(poolId); - info->set_copysetid(copysetId); - info->set_epoch(copyset->GetConfEpoch()); - - // for scan - info->set_scaning(copyset->GetScan()); - if (copyset->GetLastScan() > 0) { - info->set_lastscansec(copyset->GetLastScan()); - } - auto failedScanMaps = copyset->GetFailedScanMap(); - if (!failedScanMaps.empty()) { - for (auto &map : failedScanMaps) { - info->add_scanmap()->CopyFrom(map); - } - } - - std::vector peers; - copyset->ListPeers(&peers); - for (Peer peer : peers) { - auto replica = info->add_peers(); - replica->set_address(peer.address().c_str()); - } - - PeerId leader = copyset->GetLeaderId(); - auto replica = new ::curve::common::Peer(); - replica->set_address(leader.to_string()); - info->set_allocated_leaderpeer(replica); - - curve::mds::heartbeat::CopysetStatistics* stats = - new curve::mds::heartbeat::CopysetStatistics(); - CopysetMetricPtr copysetMetric = - ChunkServerMetric::GetInstance()->GetCopysetMetric(poolId, copysetId); - if (copysetMetric != nullptr) { - IOMetricPtr readMetric = - copysetMetric->GetIOMetric(CSIOMetricType::READ_CHUNK); - IOMetricPtr writeMetric = - copysetMetric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); - if (readMetric != nullptr && writeMetric != nullptr) { - stats->set_readrate(readMetric->bps_.get_value(1)); - stats->set_writerate(writeMetric->bps_.get_value(1)); - stats->set_readiops(readMetric->iops_.get_value(1)); - stats->set_writeiops(writeMetric->iops_.get_value(1)); - info->set_allocated_stats(stats); - } else { - LOG(ERROR) << "Failed to get copyset io metric." - << "logic pool id: " << poolId - << ", copyset id: " << copysetId; +namespace curve +{ + namespace chunkserver + { + TaskStatus Heartbeat::PurgeCopyset(LogicPoolID poolId, CopysetID copysetId) + { + if (!copysetMan_->PurgeCopysetNodeData(poolId, copysetId)) + { + LOG(ERROR) << "Failed to clean copyset " + << ToGroupIdStr(poolId, copysetId) << " and its data."; + + return TaskStatus(-1, "Failed to clean copyset"); + } + + LOG(INFO) << "Successfully cleaned copyset " + << ToGroupIdStr(poolId, copysetId) << " and its data."; + + return TaskStatus::OK(); } - } - - ConfigChangeType type; - Configuration conf; - Peer peer; - - if ((ret = copyset->GetConfChange(&type, &conf, &peer)) != 0) { - LOG(ERROR) << "Failed to get config change state of copyset " - << ToGroupIdStr(poolId, copysetId); - return ret; - } else if (type == curve::mds::heartbeat::NONE) { - return 0; - } - - ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); - replica = new(std::nothrow) ::curve::common::Peer(); - if (replica == nullptr) { - LOG(ERROR) << "apply memory error"; - return -1; - } - replica->set_address(peer.address()); - confChxInfo->set_allocated_peer(replica); - confChxInfo->set_type(type); - confChxInfo->set_finished(false); - info->set_allocated_configchangeinfo(confChxInfo); - - return 0; -} - -int Heartbeat::BuildRequest(HeartbeatRequest* req) { - int ret; - - req->set_chunkserverid(options_.chunkserverId); - req->set_token(options_.chunkserverToken); - req->set_starttime(startUpTime_); - req->set_ip(options_.ip); - req->set_port(options_.port); - - /* - * TODO(wenyu): DiskState field is not valid yet until disk health feature - * is ready - */ - curve::mds::heartbeat::DiskState* diskState = - new curve::mds::heartbeat::DiskState(); - diskState->set_errtype(0); - diskState->set_errmsg(""); - req->set_allocated_diskstate(diskState); - - ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); - curve::mds::heartbeat::ChunkServerStatisticInfo* stats = - new curve::mds::heartbeat::ChunkServerStatisticInfo(); - IOMetricPtr readMetric = metric->GetIOMetric(CSIOMetricType::READ_CHUNK); - IOMetricPtr writeMetric = metric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); - if (readMetric != nullptr && writeMetric != nullptr) { - stats->set_readrate(readMetric->bps_.get_value(1)); - stats->set_writerate(writeMetric->bps_.get_value(1)); - stats->set_readiops(readMetric->iops_.get_value(1)); - stats->set_writeiops(writeMetric->iops_.get_value(1)); - } - CopysetNodeOptions opt = copysetMan_->GetCopysetNodeOptions(); - uint64_t chunkFileSize = opt.maxChunkSize; - uint64_t walSegmentFileSize = opt.maxWalSegmentSize; - uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize - + metric->GetTotalChunkCount() * chunkFileSize; - uint64_t usedWalSegmentSize = metric->GetTotalWalSegmentCount() - * walSegmentFileSize; - uint64_t trashedChunkSize = metric->GetChunkTrashedCount() * chunkFileSize; - uint64_t leftChunkSize = metric->GetChunkLeftCount() * chunkFileSize; - - // leftWalSegmentSize will be 0 when CHUNK and WAL share file pool - uint64_t leftWalSegmentSize = metric->GetWalSegmentLeftCount() - * walSegmentFileSize; - uint64_t chunkPoolSize = options_.chunkFilePool->Size() * - options_.chunkFilePool->GetFilePoolOpt().fileSize; - - // compute format progress rate. - const ChunkFormatStat& formatStat = - options_.chunkFilePool->GetChunkFormatStat(); // NOLINT - - stats->set_chunkfilepoolsize(chunkPoolSize); - stats->set_chunksizeusedbytes(usedChunkSize+usedWalSegmentSize); - stats->set_chunksizeleftbytes(leftChunkSize+leftWalSegmentSize); - stats->set_chunksizetrashedbytes(trashedChunkSize); - if (formatStat.preAllocateNum != 0) { - stats->set_chunkfilepoolformatpercent( - 100 * formatStat.allocateChunkNum / formatStat.preAllocateNum); - } else { - stats->set_chunkfilepoolformatpercent(100); - } - req->set_allocated_stats(stats); - - size_t cap, avail; - ret = GetFileSystemSpaces(&cap, &avail); - if (ret != 0) { - LOG(ERROR) << "Failed to get file system space information for path " - << storePath_; - return -1; - } - req->set_diskcapacity(cap); - req->set_diskused(cap - avail); - - std::vector copysets; - copysetMan_->GetAllCopysetNodes(©sets); - - req->set_copysetcount(copysets.size()); - int leaders = 0; - - for (CopysetNodePtr copyset : copysets) { - curve::mds::heartbeat::CopySetInfo* info = req->add_copysetinfos(); - - ret = BuildCopysetInfo(info, copyset); - if (ret != 0) { - LOG(ERROR) << "Failed to build heartbeat information of copyset " - << ToGroupIdStr(copyset->GetLogicPoolId(), - copyset->GetCopysetId()); - continue; + + int Heartbeat::Init(const HeartbeatOptions &options) + { + toStop_.store(false, std::memory_order_release); + options_ = options; + + butil::ip_t csIp; + storePath_ = curve::common::UriParser::GetPathFromUri(options_.storeUri); + if (butil::str2ip(options_.ip.c_str(), &csIp) < 0) + { + LOG(ERROR) << "Invalid Chunkserver IP provided: " << options_.ip; + return -1; + } + csEp_ = butil::EndPoint(csIp, options_.port); + LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; + + // mdsEps cannot be empty + ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); + if (mdsEps_.empty()) + { + LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; + return -1; + } + // Check the legality of each address + for (auto addr : mdsEps_) + { + butil::EndPoint endpt; + if (butil::str2endpoint(addr.c_str(), &endpt) < 0) + { + LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; + return -1; + } + } + + inServiceIndex_ = 0; + LOG(INFO) << "MDS address: " << options_.mdsListenAddr; + + copysetMan_ = options.copysetNodeManager; + + // Initialize timer + waitInterval_.Init(options_.intervalSec * 1000); + + // Obtain the current Unix timestamp + startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); + + // init scanManager + scanMan_ = options.scanManager; + return 0; } - if (copyset->IsLeaderTerm()) { - ++leaders; + + int Heartbeat::Run() + { + // start scan thread + hbThread_ = Thread(&Heartbeat::HeartbeatWorker, this); + return 0; } - } - req->set_leadercount(leaders); - req->set_version(curve::common::CurveVersion()); - - return 0; -} - -void Heartbeat::DumpHeartbeatRequest(const HeartbeatRequest& request) { - DVLOG(6) << "Heartbeat request: Chunkserver ID: " - << request.chunkserverid() - << ", IP: " << request.ip() << ", port: " << request.port() - << ", copyset count: " << request.copysetcount() - << ", leader count: " << request.leadercount(); - for (int i = 0; i < request.copysetinfos_size(); i ++) { - const curve::mds::heartbeat::CopySetInfo& info = - request.copysetinfos(i); - - std::string peersStr = ""; - for (int j = 0; j < info.peers_size(); j ++) { - peersStr += info.peers(j).address() + ","; + + int Heartbeat::Stop() + { + LOG(INFO) << "Stopping Heartbeat manager."; + + waitInterval_.StopWait(); + toStop_.store(true, std::memory_order_release); + hbThread_.join(); + + LOG(INFO) << "Stopped Heartbeat manager."; + return 0; } - DVLOG(6) << "Copyset " << i << " " - << ToGroupIdStr(info.logicalpoolid(), info.copysetid()) - << ", epoch: " << info.epoch() - << ", leader: " << info.leaderpeer().address() - << ", peers: " << peersStr; - - if (info.has_configchangeinfo()) { - const ConfigChangeInfo& cxInfo = info.configchangeinfo(); - DVLOG(6) << "Config change info: peer: " << cxInfo.peer().address() - << ", finished: " << cxInfo.finished() - << ", errno: " << cxInfo.err().errtype() - << ", errmsg: " << cxInfo.err().errmsg(); + int Heartbeat::Fini() + { + Stop(); + // stop scan thread + LOG(INFO) << "Heartbeat manager cleaned up."; + return 0; } - } -} - -void Heartbeat::DumpHeartbeatResponse(const HeartbeatResponse& response) { - int count = response.needupdatecopysets_size(); - if (count > 0) { - LOG(INFO) << "Received " << count << " config change commands:"; - for (int i = 0; i < count; i ++) { - CopySetConf conf = response.needupdatecopysets(i); - - int type = (conf.has_type()) ? conf.type() : 0; - std::string item = (conf.has_configchangeitem()) ? - conf.configchangeitem().address() : ""; - - std::string peersStr = ""; - for (int j = 0; j < conf.peers_size(); j ++) { - peersStr += conf.peers(j).address(); + + int Heartbeat::GetFileSystemSpaces(size_t *capacity, size_t *avail) + { + int ret; + struct FileSystemInfo info; + + ret = options_.fs->Statfs(storePath_, &info); + if (ret != 0) + { + LOG(ERROR) << "Failed to get file system space information, " + << " error message: " << strerror(errno); + return -1; } - LOG(INFO) << "Config change " << i << ": " - << "Copyset < " << conf.logicalpoolid() - << ", " << conf.copysetid() << ">, epoch: " - << conf.epoch() << ", Peers: " << peersStr - << ", type: " << type << ", item: " << item; - } - } else { - LOG(INFO) << "Received no config change command."; - } -} - -int Heartbeat::SendHeartbeat(const HeartbeatRequest& request, - HeartbeatResponse* response) { - brpc::Channel channel; - if (channel.Init(mdsEps_[inServiceIndex_].c_str(), NULL) != 0) { - LOG(ERROR) << csEp_.ip << ":" << csEp_.port - << " Fail to init channel to MDS " - << mdsEps_[inServiceIndex_]; - return -1; - } - - curve::mds::heartbeat::HeartbeatService_Stub stub(&channel); - brpc::Controller cntl; - cntl.set_timeout_ms(options_.timeout); - - DumpHeartbeatRequest(request); - - stub.ChunkServerHeartbeat(&cntl, &request, response, nullptr); - if (cntl.Failed()) { - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == ETIMEDOUT || - cntl.ErrorCode() == brpc::ELOGOFF || - cntl.ErrorCode() == brpc::ERPCTIMEDOUT) { - LOG(WARNING) << "current mds: " << mdsEps_[inServiceIndex_] - << " is shutdown or going to quit," - << cntl.ErrorText(); - inServiceIndex_ = (inServiceIndex_ + 1) % mdsEps_.size(); - LOG(INFO) << "next heartbeat switch to " - << mdsEps_[inServiceIndex_]; - } else { - LOG(ERROR) << csEp_.ip << ":" << csEp_.port - << " Fail to send heartbeat to MDS " - << mdsEps_[inServiceIndex_] << "," - << " cntl errorCode: " << cntl.ErrorCode() - << " cntl error: " << cntl.ErrorText(); + *capacity = info.total; + *avail = info.available; + + return 0; } - return -1; - } else { - DumpHeartbeatResponse(*response); - } - - return 0; -} - -int Heartbeat::ExecTask(const HeartbeatResponse& response) { - int count = response.needupdatecopysets_size(); - for (int i = 0; i < count; i ++) { - CopySetConf conf = response.needupdatecopysets(i); - CopysetNodePtr copyset = copysetMan_->GetCopysetNode( - conf.logicalpoolid(), conf.copysetid()); - - // 判断copyconf是否合法 - if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) { - continue; + + int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo *info, + CopysetNodePtr copyset) + { + int ret; + LogicPoolID poolId = copyset->GetLogicPoolId(); + CopysetID copysetId = copyset->GetCopysetId(); + + info->set_logicalpoolid(poolId); + info->set_copysetid(copysetId); + info->set_epoch(copyset->GetConfEpoch()); + + // for scan + info->set_scaning(copyset->GetScan()); + if (copyset->GetLastScan() > 0) + { + info->set_lastscansec(copyset->GetLastScan()); + } + auto failedScanMaps = copyset->GetFailedScanMap(); + if (!failedScanMaps.empty()) + { + for (auto &map : failedScanMaps) + { + info->add_scanmap()->CopyFrom(map); + } + } + + std::vector peers; + copyset->ListPeers(&peers); + for (Peer peer : peers) + { + auto replica = info->add_peers(); + replica->set_address(peer.address().c_str()); + } + + PeerId leader = copyset->GetLeaderId(); + auto replica = new ::curve::common::Peer(); + replica->set_address(leader.to_string()); + info->set_allocated_leaderpeer(replica); + + curve::mds::heartbeat::CopysetStatistics *stats = + new curve::mds::heartbeat::CopysetStatistics(); + CopysetMetricPtr copysetMetric = + ChunkServerMetric::GetInstance()->GetCopysetMetric(poolId, copysetId); + if (copysetMetric != nullptr) + { + IOMetricPtr readMetric = + copysetMetric->GetIOMetric(CSIOMetricType::READ_CHUNK); + IOMetricPtr writeMetric = + copysetMetric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); + if (readMetric != nullptr && writeMetric != nullptr) + { + stats->set_readrate(readMetric->bps_.get_value(1)); + stats->set_writerate(writeMetric->bps_.get_value(1)); + stats->set_readiops(readMetric->iops_.get_value(1)); + stats->set_writeiops(writeMetric->iops_.get_value(1)); + info->set_allocated_stats(stats); + } + else + { + LOG(ERROR) << "Failed to get copyset io metric." + << "logic pool id: " << poolId + << ", copyset id: " << copysetId; + } + } + + ConfigChangeType type; + Configuration conf; + Peer peer; + + if ((ret = copyset->GetConfChange(&type, &conf, &peer)) != 0) + { + LOG(ERROR) << "Failed to get config change state of copyset " + << ToGroupIdStr(poolId, copysetId); + return ret; + } + else if (type == curve::mds::heartbeat::NONE) + { + return 0; + } + + ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + replica = new (std::nothrow)::curve::common::Peer(); + if (replica == nullptr) + { + LOG(ERROR) << "apply memory error"; + return -1; + } + replica->set_address(peer.address()); + confChxInfo->set_allocated_peer(replica); + confChxInfo->set_type(type); + confChxInfo->set_finished(false); + info->set_allocated_configchangeinfo(confChxInfo); + + return 0; } - // 解析该chunkserver上的copyset是否需要删除 - // 需要删除则清理copyset - if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) { - LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - PurgeCopyset(conf.logicalpoolid(), conf.copysetid()); - continue; + int Heartbeat::BuildRequest(HeartbeatRequest *req) + { + int ret; + + req->set_chunkserverid(options_.chunkserverId); + req->set_token(options_.chunkserverToken); + req->set_starttime(startUpTime_); + req->set_ip(options_.ip); + req->set_port(options_.port); + + /* + * TODO(wenyu): DiskState field is not valid yet until disk health feature + * is ready + */ + curve::mds::heartbeat::DiskState *diskState = + new curve::mds::heartbeat::DiskState(); + diskState->set_errtype(0); + diskState->set_errmsg(""); + req->set_allocated_diskstate(diskState); + + ChunkServerMetric *metric = ChunkServerMetric::GetInstance(); + curve::mds::heartbeat::ChunkServerStatisticInfo *stats = + new curve::mds::heartbeat::ChunkServerStatisticInfo(); + IOMetricPtr readMetric = metric->GetIOMetric(CSIOMetricType::READ_CHUNK); + IOMetricPtr writeMetric = metric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); + if (readMetric != nullptr && writeMetric != nullptr) + { + stats->set_readrate(readMetric->bps_.get_value(1)); + stats->set_writerate(writeMetric->bps_.get_value(1)); + stats->set_readiops(readMetric->iops_.get_value(1)); + stats->set_writeiops(writeMetric->iops_.get_value(1)); + } + CopysetNodeOptions opt = copysetMan_->GetCopysetNodeOptions(); + uint64_t chunkFileSize = opt.maxChunkSize; + uint64_t walSegmentFileSize = opt.maxWalSegmentSize; + uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize + + metric->GetTotalChunkCount() * chunkFileSize; + uint64_t usedWalSegmentSize = + metric->GetTotalWalSegmentCount() * walSegmentFileSize; + uint64_t trashedChunkSize = metric->GetChunkTrashedCount() * chunkFileSize; + uint64_t leftChunkSize = metric->GetChunkLeftCount() * chunkFileSize; + + // leftWalSegmentSize will be 0 when CHUNK and WAL share file pool + uint64_t leftWalSegmentSize = + metric->GetWalSegmentLeftCount() * walSegmentFileSize; + uint64_t chunkPoolSize = options_.chunkFilePool->Size() * + options_.chunkFilePool->GetFilePoolOpt().fileSize; + + // compute format progress rate. + const ChunkFormatStat &formatStat = + options_.chunkFilePool->GetChunkFormatStat(); // NOLINT + + stats->set_chunkfilepoolsize(chunkPoolSize); + stats->set_chunksizeusedbytes(usedChunkSize + usedWalSegmentSize); + stats->set_chunksizeleftbytes(leftChunkSize + leftWalSegmentSize); + stats->set_chunksizetrashedbytes(trashedChunkSize); + if (formatStat.preAllocateNum != 0) + { + stats->set_chunkfilepoolformatpercent( + 100 * formatStat.allocateChunkNum / formatStat.preAllocateNum); + } + else + { + stats->set_chunkfilepoolformatpercent(100); + } + req->set_allocated_stats(stats); + + size_t cap, avail; + ret = GetFileSystemSpaces(&cap, &avail); + if (ret != 0) + { + LOG(ERROR) << "Failed to get file system space information for path " + << storePath_; + return -1; + } + req->set_diskcapacity(cap); + req->set_diskused(cap - avail); + + std::vector copysets; + copysetMan_->GetAllCopysetNodes(©sets); + + req->set_copysetcount(copysets.size()); + int leaders = 0; + + for (CopysetNodePtr copyset : copysets) + { + curve::mds::heartbeat::CopySetInfo *info = req->add_copysetinfos(); + + ret = BuildCopysetInfo(info, copyset); + if (ret != 0) + { + LOG(ERROR) << "Failed to build heartbeat information of copyset " + << ToGroupIdStr(copyset->GetLogicPoolId(), + copyset->GetCopysetId()); + continue; + } + if (copyset->IsLeaderTerm()) + { + ++leaders; + } + } + req->set_leadercount(leaders); + req->set_version(curve::common::CurveVersion()); + + return 0; } - // 解析是否有配置变更需要执行 - if (!conf.has_type()) { - LOG(INFO) << "Failed to parse task for copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - continue; + void Heartbeat::DumpHeartbeatRequest(const HeartbeatRequest &request) + { + DVLOG(6) << "Heartbeat request: Chunkserver ID: " << request.chunkserverid() + << ", IP: " << request.ip() << ", port: " << request.port() + << ", copyset count: " << request.copysetcount() + << ", leader count: " << request.leadercount(); + for (int i = 0; i < request.copysetinfos_size(); i++) + { + const curve::mds::heartbeat::CopySetInfo &info = + request.copysetinfos(i); + + std::string peersStr = ""; + for (int j = 0; j < info.peers_size(); j++) + { + peersStr += info.peers(j).address() + ","; + } + + DVLOG(6) << "Copyset " << i << " " + << ToGroupIdStr(info.logicalpoolid(), info.copysetid()) + << ", epoch: " << info.epoch() + << ", leader: " << info.leaderpeer().address() + << ", peers: " << peersStr; + + if (info.has_configchangeinfo()) + { + const ConfigChangeInfo &cxInfo = info.configchangeinfo(); + DVLOG(6) << "Config change info: peer: " << cxInfo.peer().address() + << ", finished: " << cxInfo.finished() + << ", errno: " << cxInfo.err().errtype() + << ", errmsg: " << cxInfo.err().errmsg(); + } + } } - // 如果有配置变更需要执行,下发变更到copyset - if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) { - continue; + void Heartbeat::DumpHeartbeatResponse(const HeartbeatResponse &response) + { + int count = response.needupdatecopysets_size(); + if (count > 0) + { + LOG(INFO) << "Received " << count << " config change commands:"; + for (int i = 0; i < count; i++) + { + CopySetConf conf = response.needupdatecopysets(i); + + int type = (conf.has_type()) ? conf.type() : 0; + std::string item = (conf.has_configchangeitem()) + ? conf.configchangeitem().address() + : ""; + + std::string peersStr = ""; + for (int j = 0; j < conf.peers_size(); j++) + { + peersStr += conf.peers(j).address(); + } + + LOG(INFO) << "Config change " << i << ": " + << "Copyset < " << conf.logicalpoolid() << ", " + << conf.copysetid() << ">, epoch: " << conf.epoch() + << ", Peers: " << peersStr << ", type: " << type + << ", item: " << item; + } + } + else + { + LOG(INFO) << "Received no config change command."; + } } - if (conf.epoch() != copyset->GetConfEpoch()) { - LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is not same as current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; - continue; + int Heartbeat::SendHeartbeat(const HeartbeatRequest &request, + HeartbeatResponse *response) + { + brpc::Channel channel; + if (channel.Init(mdsEps_[inServiceIndex_].c_str(), NULL) != 0) + { + LOG(ERROR) << csEp_.ip << ":" << csEp_.port + << " Fail to init channel to MDS " + << mdsEps_[inServiceIndex_]; + return -1; + } + + curve::mds::heartbeat::HeartbeatService_Stub stub(&channel); + brpc::Controller cntl; + cntl.set_timeout_ms(options_.timeout); + + DumpHeartbeatRequest(request); + + stub.ChunkServerHeartbeat(&cntl, &request, response, nullptr); + if (cntl.Failed()) + { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == ETIMEDOUT || + cntl.ErrorCode() == brpc::ELOGOFF || + cntl.ErrorCode() == brpc::ERPCTIMEDOUT) + { + LOG(WARNING) << "current mds: " << mdsEps_[inServiceIndex_] + << " is shutdown or going to quit," + << cntl.ErrorText(); + inServiceIndex_ = (inServiceIndex_ + 1) % mdsEps_.size(); + LOG(INFO) << "next heartbeat switch to " + << mdsEps_[inServiceIndex_]; + } + else + { + LOG(ERROR) << csEp_.ip << ":" << csEp_.port + << " Fail to send heartbeat to MDS " + << mdsEps_[inServiceIndex_] << "," + << " cntl errorCode: " << cntl.ErrorCode() + << " cntl error: " << cntl.ErrorText(); + } + return -1; + } + else + { + DumpHeartbeatResponse(*response); + } + + return 0; } - // 根据不同的变更类型下发配置 - switch (conf.type()) { - case curve::mds::heartbeat::TRANSFER_LEADER: + int Heartbeat::ExecTask(const HeartbeatResponse &response) + { + int count = response.needupdatecopysets_size(); + for (int i = 0; i < count; i++) { - if (!HeartbeatHelper::ChunkServerLoadCopySetFin( - conf.configchangeitem().address())) { + CopySetConf conf = response.needupdatecopysets(i); + CopysetNodePtr copyset = + copysetMan_->GetCopysetNode(conf.logicalpoolid(), conf.copysetid()); + + // Determine whether copyconf is legal + if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) + { + continue; + } + + // Resolve whether the copyset on the chunkserver needs to be deleted + // If deletion is required, clean the copyset + if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) + { + LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + PurgeCopyset(conf.logicalpoolid(), conf.copysetid()); + continue; + } + + // Resolve if there are any configuration changes that need to be + // executed + if (!conf.has_type()) + { + LOG(INFO) << "Failed to parse task for copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + continue; + } + + // If there are configuration changes that need to be executed, issue + // the changes to the copyset + if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) + { + continue; + } + + if (conf.epoch() != copyset->GetConfEpoch()) + { + LOG(WARNING) << "Config change epoch:" << conf.epoch() + << " is not same as current:" + << copyset->GetConfEpoch() << " on copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; + continue; + } + + // Distribute configurations based on different change types + switch (conf.type()) + { + case curve::mds::heartbeat::TRANSFER_LEADER: + { + if (!HeartbeatHelper::ChunkServerLoadCopySetFin( + conf.configchangeitem().address())) + { + LOG(INFO) + << "Transfer leader to " + << conf.configchangeitem().address() << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << " reject. target chunkserver is loading copyset"; + break; + } + LOG(INFO) << "Transfer leader to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << " reject. target chunkserver is loading copyset"; + << conf.configchangeitem().address() << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->TransferLeader(conf.configchangeitem()); break; } - LOG(INFO) << "Transfer leader to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->TransferLeader(conf.configchangeitem()); + case curve::mds::heartbeat::ADD_PEER: + LOG(INFO) << "Adding peer " << conf.configchangeitem().address() + << " to copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->AddPeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::REMOVE_PEER: + LOG(INFO) << "Removing peer " + << conf.configchangeitem().address() + << " from copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->RemovePeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::CHANGE_PEER: + { + std::vector newPeers; + if (HeartbeatHelper::BuildNewPeers(conf, &newPeers)) + { + LOG(INFO) + << "Change peer from " << conf.oldpeer().address() + << " to " << conf.configchangeitem().address() + << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + copyset->ChangePeer(newPeers); + } + else + { + LOG(ERROR) + << "Build new peer for copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << " failed"; + } + } break; - } - case curve::mds::heartbeat::ADD_PEER: - LOG(INFO) << "Adding peer " << conf.configchangeitem().address() - << " to copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->AddPeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::REMOVE_PEER: - LOG(INFO) << "Removing peer " << conf.configchangeitem().address() - << " from copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->RemovePeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::CHANGE_PEER: - { - std::vector newPeers; - if (HeartbeatHelper::BuildNewPeers(conf, &newPeers)) { - LOG(INFO) << "Change peer from " - << conf.oldpeer().address() << " to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->ChangePeer(newPeers); - } else { - LOG(ERROR) << "Build new peer for copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << " failed"; + case curve::mds::heartbeat::START_SCAN_PEER: + { + ConfigChangeType type; + Configuration tmpConf; + Peer peer; + LogicPoolID poolId = conf.logicalpoolid(); + CopysetID copysetId = conf.copysetid(); + int ret = copyset->GetConfChange(&type, &tmpConf, &peer); + // if copyset happen conf change, can't scan and wait retry + if (0 != ret) + { + LOG(ERROR) << "Failed to get config change state of copyset" + << ToGroupIdStr(poolId, copysetId); + return ret; + } + else if (type != curve::mds::heartbeat::NONE) + { + LOG(INFO) << "drop scan peer request to copyset: " + << ToGroupIdStr(poolId, copysetId) + << " because exist config" + << " ConfigChangeType: " << type; + } + else + { + LOG(INFO) + << "Scan peer " << conf.configchangeitem().address() + << "to copyset " << ToGroupIdStr(poolId, copysetId); + scanMan_->Enqueue(poolId, copysetId); + } } - } - break; + break; - case curve::mds::heartbeat::START_SCAN_PEER: - { - ConfigChangeType type; - Configuration tmpConf; - Peer peer; - LogicPoolID poolId = conf.logicalpoolid(); - CopysetID copysetId = conf.copysetid(); - int ret = copyset->GetConfChange(&type, &tmpConf, &peer); - // if copyset happen conf change, can't scan and wait retry - if (0 != ret) { - LOG(ERROR) << "Failed to get config change state of copyset" - << ToGroupIdStr(poolId, copysetId); + case curve::mds::heartbeat::CANCEL_SCAN_PEER: + { + // todo Abnormal scenario + LogicPoolID poolId = conf.logicalpoolid(); + CopysetID copysetId = conf.copysetid(); + int ret = scanMan_->CancelScanJob(poolId, copysetId); + if (ret < 0) + { + LOG(ERROR) + << "cancel scan peer failed, " + << "peer address: " << conf.configchangeitem().address() + << "copyset groupId: " + << ToGroupIdStr(poolId, copysetId); + } return ret; - } else if (type != curve::mds::heartbeat::NONE) { - LOG(INFO) << "drop scan peer request to copyset: " - << ToGroupIdStr(poolId, copysetId) - << " because exist config" - << " ConfigChangeType: " << type; - } else { - LOG(INFO) << "Scan peer " - << conf.configchangeitem().address() - << "to copyset " - << ToGroupIdStr(poolId, copysetId); - scanMan_->Enqueue(poolId, copysetId); } - } - break; + break; - case curve::mds::heartbeat::CANCEL_SCAN_PEER: - { - // todo Abnormal scenario - LogicPoolID poolId = conf.logicalpoolid(); - CopysetID copysetId = conf.copysetid(); - int ret = scanMan_->CancelScanJob(poolId, copysetId); - if (ret < 0) { - LOG(ERROR) << "cancel scan peer failed, " - << "peer address: " - << conf.configchangeitem().address() - << "copyset groupId: " - << ToGroupIdStr(poolId, copysetId); + default: + LOG(ERROR) << "Invalid configchange type: " << conf.type(); + break; } - return ret; } - break; - default: - LOG(ERROR) << "Invalid configchange type: " << conf.type(); - break; - } - } - - return 0; -} - -void Heartbeat::HeartbeatWorker() { - int ret; - int errorIntervalSec = 2; - - LOG(INFO) << "Starting Heartbeat worker thread."; - - // 处理配置等于0等异常情况 - if (options_.intervalSec <= 4) { - errorIntervalSec = 2; - } else { - errorIntervalSec = options_.intervalSec / 2; - } - - while (!toStop_.load(std::memory_order_acquire)) { - HeartbeatRequest req; - HeartbeatResponse resp; - - LOG(INFO) << "building heartbeat info"; - ret = BuildRequest(&req); - if (ret != 0) { - LOG(ERROR) << "Failed to build heartbeat request"; - ::sleep(errorIntervalSec); - continue; + return 0; } - LOG(INFO) << "sending heartbeat info"; - ret = SendHeartbeat(req, &resp); - if (ret != 0) { - LOG(WARNING) << "Failed to send heartbeat to MDS"; - ::sleep(errorIntervalSec); - continue; - } + void Heartbeat::HeartbeatWorker() + { + int ret; + int errorIntervalSec = 2; - LOG(INFO) << "executing heartbeat info"; - ret = ExecTask(resp); - if (ret != 0) { - LOG(ERROR) << "Failed to execute heartbeat tasks"; - ::sleep(errorIntervalSec); - continue; - } + LOG(INFO) << "Starting Heartbeat worker thread."; + + // Handling abnormal situations such as configuration equal to 0 + if (options_.intervalSec <= 4) + { + errorIntervalSec = 2; + } + else + { + errorIntervalSec = options_.intervalSec / 2; + } + + while (!toStop_.load(std::memory_order_acquire)) + { + HeartbeatRequest req; + HeartbeatResponse resp; + + LOG(INFO) << "building heartbeat info"; + ret = BuildRequest(&req); + if (ret != 0) + { + LOG(ERROR) << "Failed to build heartbeat request"; + ::sleep(errorIntervalSec); + continue; + } + + LOG(INFO) << "sending heartbeat info"; + ret = SendHeartbeat(req, &resp); + if (ret != 0) + { + LOG(WARNING) << "Failed to send heartbeat to MDS"; + ::sleep(errorIntervalSec); + continue; + } - waitInterval_.WaitForNextExcution(); - } + LOG(INFO) << "executing heartbeat info"; + ret = ExecTask(resp); + if (ret != 0) + { + LOG(ERROR) << "Failed to execute heartbeat tasks"; + ::sleep(errorIntervalSec); + continue; + } - LOG(INFO) << "Heartbeat worker thread stopped."; -} + waitInterval_.WaitForNextExcution(); + } + + LOG(INFO) << "Heartbeat worker thread stopped."; + } -} // namespace chunkserver -} // namespace curve + } // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/heartbeat.h b/src/chunkserver/heartbeat.h index df86d8e88a..16d5c1a1fa 100644 --- a/src/chunkserver/heartbeat.h +++ b/src/chunkserver/heartbeat.h @@ -24,58 +24,58 @@ #ifndef SRC_CHUNKSERVER_HEARTBEAT_H_ #define SRC_CHUNKSERVER_HEARTBEAT_H_ +#include // NodeImpl #include -#include // NodeImpl -#include -#include -#include #include +#include #include +#include #include //NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/common/wait_interval.h" -#include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/scan_manager.h" #include "proto/heartbeat.pb.h" #include "proto/scan.pb.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/scan_manager.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/wait_interval.h" using ::curve::common::Thread; namespace curve { namespace chunkserver { -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; -using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; -using CopySetConf = curve::mds::heartbeat::CopySetConf; -using CandidateError = curve::mds::heartbeat::CandidateError; -using TaskStatus = butil::Status; -using CopysetNodePtr = std::shared_ptr; +using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; +using CopySetConf = curve::mds::heartbeat::CopySetConf; +using CandidateError = curve::mds::heartbeat::CandidateError; +using TaskStatus = butil::Status; +using CopysetNodePtr = std::shared_ptr; /** - * 心跳子系统选项 + * Heartbeat subsystem options */ struct HeartbeatOptions { - ChunkServerID chunkserverId; - std::string chunkserverToken; - std::string storeUri; - std::string mdsListenAddr; - std::string ip; - uint32_t port; - uint32_t intervalSec; - uint32_t timeout; - CopysetNodeManager* copysetNodeManager; - ScanManager* scanManager; + ChunkServerID chunkserverId; + std::string chunkserverToken; + std::string storeUri; + std::string mdsListenAddr; + std::string ip; + uint32_t port; + uint32_t intervalSec; + uint32_t timeout; + CopysetNodeManager* copysetNodeManager; + ScanManager* scanManager; std::shared_ptr fs; std::shared_ptr chunkFilePool; }; /** - * 心跳子系统处理模块 + * Heartbeat subsystem processing module */ class Heartbeat { public: @@ -83,110 +83,110 @@ class Heartbeat { ~Heartbeat() {} /** - * @brief 初始化心跳子系统 - * @param[in] options 心跳子系统选项 - * @return 0:成功,非0失败 + * @brief Initialize heartbeat subsystem + * @param[in] options Heartbeat subsystem options + * @return 0: Success, non 0 failure */ int Init(const HeartbeatOptions& options); /** - * @brief 清理心跳子系统 - * @return 0:成功,非0失败 + * @brief Clean heartbeat subsystem + * @return 0: Success, non 0 failure */ int Fini(); /** - * @brief 启动心跳子系统 - * @return 0:成功,非0失败 + * @brief: Start the heartbeat subsystem + * @return 0: Success, non 0 failure */ int Run(); private: /** - * @brief 停止心跳子系统 - * @return 0:成功,非0失败 + * @brief Stop heartbeat subsystem + * @return 0: Success, non 0 failure */ int Stop(); /* - * 心跳工作线程 + * Heartbeat Worker Thread */ void HeartbeatWorker(); /* - * 获取Chunkserver存储空间信息 + * Obtain Chunkserver storage space information */ int GetFileSystemSpaces(size_t* capacity, size_t* free); /* - * 构建心跳消息的Copyset信息项 + * Building a Copyset information item for heartbeat messages */ int BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, CopysetNodePtr copyset); /* - * 构建心跳请求 + * Build Heartbeat Request */ int BuildRequest(HeartbeatRequest* request); /* - * 发送心跳消息 + * Send heartbeat message */ int SendHeartbeat(const HeartbeatRequest& request, HeartbeatResponse* response); /* - * 执行心跳任务 + * Perform Heartbeat Tasks */ int ExecTask(const HeartbeatResponse& response); /* - * 输出心跳请求信息 + * Output heartbeat request information */ void DumpHeartbeatRequest(const HeartbeatRequest& request); /* - * 输出心跳回应信息 + * Output heartbeat response information */ void DumpHeartbeatResponse(const HeartbeatResponse& response); /* - * 清理复制组实例及持久化数据 + * Clean up replication group instances and persist data */ TaskStatus PurgeCopyset(LogicPoolID poolId, CopysetID copysetId); private: - // 心跳线程 + // Heartbeat Thread Thread hbThread_; - // 控制心跳模块运行或停止 + // Control the heartbeat module to run or stop std::atomic toStop_; - // 使用定时器 + // Using a timer ::curve::common::WaitInterval waitInterval_; - // Copyset管理模块 + // Copyset Management Module CopysetNodeManager* copysetMan_; - // ChunkServer目录 + // ChunkServer directory std::string storePath_; - // 心跳选项 + // Heartbeat Options HeartbeatOptions options_; - // MDS的地址 + // MDS address std::vector mdsEps_; - // 当前供服务的mds + // Current mds for service int inServiceIndex_; - // ChunkServer本身的地址 + // ChunkServer's own address butil::EndPoint csEp_; - // 模块初始化时间, unix时间 + // Module initialization time, unix time uint64_t startUpTime_; - ScanManager *scanMan_; + ScanManager* scanMan_; }; } // namespace chunkserver diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index 02a2fc65c9..bc9bbd3708 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -20,34 +20,37 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat_helper.h" + #include #include +#include + #include -#include "src/chunkserver/heartbeat_helper.h" + #include "include/chunkserver/chunkserver_common.h" #include "proto/chunkserver.pb.h" namespace curve { namespace chunkserver { -bool HeartbeatHelper::BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers) { - // 检验目标节点和待删除节点是否有效 +bool HeartbeatHelper::BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers) { + // Verify if the target node and the node to be deleted are valid std::string target(conf.configchangeitem().address()); std::string old(conf.oldpeer().address()); if (!PeerVaild(target) || !PeerVaild(old)) { return false; } - // 生成newPeers + // Generate newPeers for (int i = 0; i < conf.peers_size(); i++) { std::string peer = conf.peers(i).address(); - // 检验conf中的peer是否有效 + // Verify if the peer in conf is valid if (!PeerVaild(peer)) { return false; } - // newPeers中不包含old副本 + // newPeers does not contain old copies if (conf.peers(i).address() != old) { newPeers->emplace_back(conf.peers(i)); } @@ -57,49 +60,51 @@ bool HeartbeatHelper::BuildNewPeers( return true; } -bool HeartbeatHelper::PeerVaild(const std::string &peer) { +bool HeartbeatHelper::PeerVaild(const std::string& peer) { PeerId peerId; return 0 == peerId.parse(peer); } -bool HeartbeatHelper::CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set) { - // chunkserver中不存在需要变更的copyset, 报警 +bool HeartbeatHelper::CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset) { + // There is no copyset that needs to be changed in chunkserver, alarm if (copyset == nullptr) { - LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() - << "," << conf.copysetid() << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); return false; } - // 下发的变更epoch < copyset实际的epoch,报错 + // The issued change epoch is less than the actual epoch of the copyset, and + // an error is reported if (conf.epoch() < copyset->GetConfEpoch()) { LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is smaller than current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; + << " is smaller than current:" << copyset->GetConfEpoch() + << " on copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; return false; } return true; } -bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, - const CopySetConf &conf, const CopysetNodePtr ©set) { +bool HeartbeatHelper::NeedPurge(const butil::EndPoint& csEp, + const CopySetConf& conf, + const CopysetNodePtr& copyset) { (void)copyset; - // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset + // CLDCFS-1004 bug-fix: mds issued a copyset with epoch 0 and empty + // configuration if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << "in peer " << csEp - << ", witch is not exist in mds record"; + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << "in peer " << csEp << ", witch is not exist in mds record"; return true; } - // 该chunkserrver不在copyset的配置中,需要清理 + // The chunkserrver is not in the configuration of the copyset and needs to + // be cleaned up std::string chunkserverEp = std::string(butil::endpoint2str(csEp).c_str()); for (int i = 0; i < conf.peers_size(); i++) { if (conf.peers(i).address().find(chunkserverEp) != std::string::npos) { @@ -117,7 +122,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { PeerId peer; peer.parse(peerId); - const char *ip = butil::ip2str(peer.addr.ip).c_str(); + const char* ip = butil::ip2str(peer.addr.ip).c_str(); int port = peer.addr.port; brpc::Channel channel; if (channel.Init(ip, port, NULL) != 0) { @@ -133,7 +138,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { stub.ChunkServerStatus(&cntl, &req, &rep, nullptr); if (cntl.Failed()) { LOG(WARNING) << "Send ChunkServerStatusRequest failed, cntl.errorText =" - << cntl.ErrorText(); + << cntl.ErrorText(); return false; } @@ -142,4 +147,3 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { } // namespace chunkserver } // namespace curve - diff --git a/src/chunkserver/heartbeat_helper.h b/src/chunkserver/heartbeat_helper.h index 43ada5f6ea..c06fedb61b 100644 --- a/src/chunkserver/heartbeat_helper.h +++ b/src/chunkserver/heartbeat_helper.h @@ -24,74 +24,83 @@ #define SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ #include -#include + #include #include +#include + #include "proto/heartbeat.pb.h" #include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -using ::curve::mds::heartbeat::CopySetConf; using ::curve::common::Peer; +using ::curve::mds::heartbeat::CopySetConf; using CopysetNodePtr = std::shared_ptr; class HeartbeatHelper { public: /** - * 根据mds下发的conf构建出指定复制组的新配置,给ChangePeer使用 + * Build a new configuration for the specified replication group based on + * the conf issued by mds, and use it for ChangePeer * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[out] newPeers 指定复制组的目标配置 + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[out] newPeers specifies the target configuration for the + * replication group * - * @return false-生成newpeers失败 true-生成newpeers成功 + * @return false - Failed to generate newpeers, true - Successfully + * generated newpeers */ - static bool BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers); + static bool BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers); /** - * 判断字符串peer(正确的形式为: ip:port:0)是否有效 + * Determine whether the string peer (correct form: ip:port:0) is valid * - * @param[in] peer 指定字符串 + * @param[in] peer specifies the string * - * @return false-无效 true-有效 + * @return false - invalid, true - valid */ - static bool PeerVaild(const std::string &peer); + static bool PeerVaild(const std::string& peer); /** - * 判断mds下发过来的copysetConf是否合法,以下两种情况不合法: - * 1. chunkserver中不存在该copyset - * 2. mds下发的copyset中记录的epoch小于chunkserver上copyset此时的epoch + * Determine whether the copysetConf sent by mds is legal, and the following + * two situations are illegal: + * 1. The copyset does not exist in chunkserver + * 2. The epoch recorded in the copyset issued by mds is smaller than the + * epoch recorded in the copyset on chunkserver at this time * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-copysetConf不合法,true-copysetConf合法 + * @return false-copysetConf is illegal, true-copysetConf is legal */ - static bool CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set); + static bool CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断chunkserver(csEp)中指定copyset是否需要删除 + * Determine whether the specified copyset in chunkserver(csEp) needs to be + * deleted * - * @param[in] csEp 该chunkserver的ip:port - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] csEp The ip:port of this chunkserver + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-该chunkserver上的copyset无需清理; - * true-该chunkserver上的copyset需要清理 + * @return false-The copyset on the chunkserver does not need to be cleaned; + * true-The copyset on this chunkserver needs to be cleaned up */ - static bool NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, - const CopysetNodePtr ©set); + static bool NeedPurge(const butil::EndPoint& csEp, const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断指定chunkserver copyset是否已经加载完毕 + * Determine whether the specified chunkserver copyset has been loaded + * completely * - * @return false-copyset加载完毕 true-copyset未加载完成 + * @return false-copyset loading completed, true-copyset not loaded + * completed */ static bool ChunkServerLoadCopySetFin(const std::string ipPort); }; } // namespace chunkserver } // namespace curve #endif // SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ - diff --git a/src/chunkserver/inflight_throttle.h b/src/chunkserver/inflight_throttle.h index 86af93daf7..71462b5e97 100644 --- a/src/chunkserver/inflight_throttle.h +++ b/src/chunkserver/inflight_throttle.h @@ -30,18 +30,17 @@ namespace curve { namespace chunkserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: explicit InflightThrottle(uint64_t maxInflight) - : inflightRequestCount_(0), - kMaxInflightRequest_(maxInflight) { } + : inflightRequestCount_(0), kMaxInflightRequest_(maxInflight) {} virtual ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ inline bool IsOverLoad() { if (kMaxInflightRequest_ >= @@ -53,23 +52,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ inline void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ inline void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight requests std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight requests const uint64_t kMaxInflightRequest_; }; diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 817e65c79f..e03c079341 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -22,44 +22,41 @@ #include "src/chunkserver/op_request.h" -#include +#include #include #include -#include +#include #include #include #include -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_closure.h" #include "src/chunkserver/clone_manager.h" #include "src/chunkserver/clone_task.h" +#include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -ChunkOpRequest::ChunkOpRequest() : - datastore_(nullptr), - node_(nullptr), - cntl_(nullptr), - request_(nullptr), - response_(nullptr), - done_(nullptr) { -} +ChunkOpRequest::ChunkOpRequest() + : datastore_(nullptr), + node_(nullptr), + cntl_(nullptr), + request_(nullptr), + response_(nullptr), + done_(nullptr) {} ChunkOpRequest::ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - datastore_(nodePtr->GetDataStore()), - node_(nodePtr), - cntl_(dynamic_cast(cntl)), - request_(request), - response_(response), - done_(done) { -} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : datastore_(nodePtr->GetDataStore()), + node_(nodePtr), + cntl_(dynamic_cast(cntl)), + request_(request), + response_(response), + done_(done) {} void ChunkOpRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -71,18 +68,19 @@ void ChunkOpRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ - if (0 == Propose(request_, cntl_ ? &cntl_->request_attachment() : - nullptr)) { + if (0 == + Propose(request_, cntl_ ? &cntl_->request_attachment() : nullptr)) { doneGuard.release(); } } -int ChunkOpRequest::Propose(const ChunkRequest *request, - const butil::IOBuf *data) { - // 打包op request为task +int ChunkOpRequest::Propose(const ChunkRequest* request, + const butil::IOBuf* data) { + // Pack op request as task braft::Task task; butil::IOBuf log; if (0 != Encode(request, data, &log)) { @@ -93,10 +91,13 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, task.data = &log; task.done = new ChunkClosure(shared_from_this()); /** - * 由于apply是异步的,有可能某个节点在term1是leader,apply了一条log, - * 但是中间发生了主从切换,在很短的时间内这个节点又变为term3的leader, - * 之前apply的日志才开始进行处理,这种情况下要实现严格意义上的复制状态 - * 机,需要解决这种ABA问题,可以在apply的时候设置leader当时的term + * Due to the asynchronous nature of the application, it is possible that a + * node in term1 is a leader and has applied a log, But there was a + * master-slave switch in the middle, and in a short period of time, this + * node became the leader of term3 again, Previously applied logs were only + * processed, in which case strict replication status needs to be + * implemented To solve this ABA problem, you can set the term of the leader + * at the time of application */ task.expected_term = node_->LeaderTerm(); @@ -106,8 +107,8 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, } void ChunkOpRequest::RedirectChunkRequest() { - // 编译时加上 --copt -DUSE_BTHREAD_MUTEX - // 否则可能发生死锁: CLDCFS-1120 + // Compile with --copt -DUSE_BTHREAD_MUTEX + // Otherwise, a deadlock may occur: CLDCFS-1120 // PeerId leader = node_->GetLeaderId(); // if (!leader.is_empty()) { // response_->set_redirect(leader.to_string()); @@ -115,9 +116,8 @@ void ChunkOpRequest::RedirectChunkRequest() { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); } -int ChunkOpRequest::Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log) { +int ChunkOpRequest::Encode(const ChunkRequest* request, + const butil::IOBuf* data, butil::IOBuf* log) { // 1.append request length const uint32_t metaSize = butil::HostToNet32(request->ByteSize()); log->append(&metaSize, sizeof(uint32_t)); @@ -135,8 +135,8 @@ int ChunkOpRequest::Encode(const ChunkRequest *request, } std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId) { uint32_t metaSize = 0; @@ -171,35 +171,35 @@ std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, return std::make_shared(); case CHUNK_OP_TYPE::CHUNK_OP_SCAN: return std::make_shared(index, leaderId); - default:LOG(ERROR) << "Unknown chunk op"; + default: + LOG(ERROR) << "Unknown chunk op"; return nullptr; } } ApplyTaskType ChunkOpRequest::Schedule(CHUNK_OP_TYPE opType) { switch (opType) { - case CHUNK_OP_READ: - case CHUNK_OP_RECOVER: - return ApplyTaskType::READ; - default: - return ApplyTaskType::WRITE; + case CHUNK_OP_READ: + case CHUNK_OP_RECOVER: + return ApplyTaskType::READ; + default: + return ApplyTaskType::WRITE; } } namespace { uint64_t MaxAppliedIndex( - const std::shared_ptr& node, - uint64_t current) { + const std::shared_ptr& node, + uint64_t current) { return std::max(current, node->GetAppliedIndex()); } } // namespace void DeleteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->DeleteChunk(request_->chunkid(), - request_->sn()); + auto ret = datastore_->DeleteChunk(request_->chunkid(), request_->sn()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); @@ -211,21 +211,19 @@ void DeleteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "delete chunk failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteChunk(request.chunkid(), - request.sn()); - if (CSErrorCode::Success == ret) - return; + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete failed: " @@ -239,16 +237,14 @@ void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } ReadChunkRequest::ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, cntl, request, response, done), - cloneMgr_(cloneMgr), - concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), - applyIndex(0) { -} + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done), + cloneMgr_(cloneMgr), + concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), + applyIndex(0) {} void ReadChunkRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -267,21 +263,20 @@ void ReadChunkRequest::Process() { * extend from std::enable_shared_from_this, * use shared_from_this() to return a shared_ptr */ - auto thisPtr - = std::dynamic_pointer_cast(shared_from_this()); + auto thisPtr = + std::dynamic_pointer_cast(shared_from_this()); /* * why push read requests to concurrent layer: * 1. all I/O operators including read and write requests are executed * in concurrent layer, we can separate disk I/O from other logic. * 2. ensure linear consistency of read semantics. */ - auto task = std::bind(&ReadChunkRequest::OnApply, - thisPtr, - node_->GetAppliedIndex(), - doneGuard.release()); - concurrentApplyModule_->Push(request_->chunkid(), - ChunkOpRequest::Schedule(request_->optype()), // NOLINT - task); + auto task = std::bind(&ReadChunkRequest::OnApply, thisPtr, + node_->GetAppliedIndex(), doneGuard.release()); + concurrentApplyModule_->Push( + request_->chunkid(), + ChunkOpRequest::Schedule(request_->optype()), // NOLINT + task); return; } @@ -298,16 +293,19 @@ void ReadChunkRequest::Process() { } void ReadChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { - // 先清除response中的status,以保证CheckForward后的判断的正确性 + ::google::protobuf::Closure* done) { + // Clear the status in the response first to ensure the correctness of the + // judgment after CheckForward response_->clear_status(); CSChunkInfo chunkInfo; - CSErrorCode errorCode = datastore_->GetChunkInfo(request_->chunkid(), - &chunkInfo); + CSErrorCode errorCode = + datastore_->GetChunkInfo(request_->chunkid(), &chunkInfo); do { bool needLazyClone = false; - // 如果需要Read的chunk不存在,但是请求包含Clone源信息,则尝试从Clone源读取数据 + // If the chunk that needs to be read does not exist, but the request + // contains Clone source information, try reading data from the Clone + // source if (CSErrorCode::ChunkNotExistError == errorCode) { if (existCloneInfo(request_)) { needLazyClone = true; @@ -324,14 +322,15 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果需要从源端拷贝数据,需要将请求转发给clone manager处理 - if ( needLazyClone || NeedClone(chunkInfo) ) { + // If you need to copy data from the source, you need to forward the + // request to the clone manager for processing + if (needLazyClone || NeedClone(chunkInfo)) { applyIndex = index; - std::shared_ptr cloneTask = - cloneMgr_->GenerateCloneTask( + std::shared_ptr cloneTask = cloneMgr_->GenerateCloneTask( std::dynamic_pointer_cast(shared_from_this()), done); - // TODO(yyk) 尽量不能阻塞队列,后面要具体考虑 + // TODO(yyk) should try not to block the queue, and specific + // considerations should be taken later bool result = cloneMgr_->IssueCloneTask(cloneTask); if (!result) { LOG(ERROR) << "issue clone task failed: " @@ -340,14 +339,16 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果请求成功转发给了clone manager就可以直接返回了 + // If the request is successfully forwarded to the clone manager, it + // can be returned directly return; } - // 如果是ReadChunk请求还需要从本地读取数据 + // If it is a ReadChunk request, data needs to be read locally if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_READ) { ReadChunk(); } - // 如果是recover请求,说明请求区域已经被写过了,可以直接返回成功 + // If it is a recover request, it indicates that the request area has + // been written and can directly return success if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_RECOVER) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } @@ -362,57 +363,51 @@ void ReadChunkRequest::OnApply(uint64_t index, } void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } bool ReadChunkRequest::NeedClone(const CSChunkInfo& chunkInfo) { - // 如果不是 clone chunk,就不需要拷贝 + // If it's not a clone chunk, there's no need to copy it if (chunkInfo.isClone) { off_t offset = request_->offset(); size_t length = request_->size(); uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 如果是clone chunk,且存在未被写过的page,就需要拷贝 - if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS) { + // If it is a clone chunk and there are unwritten pages, it needs to be + // copied + if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS) { return true; } } return false; } -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} +static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } void ReadChunkRequest::ReadChunk() { - char *readBuffer = nullptr; + char* readBuffer = nullptr; size_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); - auto ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - size); + auto ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer, request_->offset(), size); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "read failed: " << " data store return: " << ret @@ -421,50 +416,47 @@ void ReadChunkRequest::ReadChunk() { LOG(ERROR) << "read failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } void WriteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(request_)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request_->clonefilesource(), - request_->clonefileoffset()); + cloneSourceLocation = + func(request_->clonefilesource(), request_->clonefileoffset()); } - auto ret = datastore_->WriteChunk(request_->chunkid(), - request_->sn(), - cntl_->request_attachment(), - request_->offset(), - request_->size(), - &cost, - cloneSourceLocation); + auto ret = datastore_->WriteChunk( + request_->chunkid(), request_->sn(), cntl_->request_attachment(), + request_->offset(), request_->size(), &cost, cloneSourceLocation); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); } else if (CSErrorCode::BackwardRequestError == ret) { - // 打快照那一刻是有可能出现旧版本的请求 - // 返回错误给客户端,让客户端带新版本来重试 + // At the moment of taking a snapshot, there may be requests for older + // versions Return an error to the client and ask them to try again with + // the new version of the original LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret || CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * internalerror一般是磁盘错误,为了防止副本不一致,让进程退出 - * TODO(yyk): 当前遇到write错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 - */ + * An internal error is usually a disk error. To prevent inconsistent + * replicas, the process is forced to exit + * TODO(yyk): Currently encountering a write error, directly fatally + * exit the entire process ChunkServer will consider only flagging this + * copyset in the later stage to ensure good availability + */ LOG(FATAL) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); @@ -472,8 +464,7 @@ void WriteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); @@ -481,27 +472,24 @@ void WriteChunkRequest::OnApply(uint64_t index, } void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request + const ChunkRequest& request, + const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(&request)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request.clonefilesource(), - request.clonefileoffset()); + cloneSourceLocation = + func(request.clonefilesource(), request.clonefileoffset()); } - auto ret = datastore->WriteChunk(request.chunkid(), - request.sn(), - data, - request.offset(), - request.size(), - &cost, + auto ret = datastore->WriteChunk(request.chunkid(), request.sn(), data, + request.offset(), request.size(), &cost, cloneSourceLocation); - if (CSErrorCode::Success == ret) { - return; - } else if (CSErrorCode::BackwardRequestError == ret) { + if (CSErrorCode::Success == ret) { + return; + } else if (CSErrorCode::BackwardRequestError == ret) { LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request.ShortDebugString(); @@ -519,24 +507,22 @@ void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } void ReadSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - char *readBuffer = nullptr; + char* readBuffer = nullptr; uint32_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) << "new readBuffer failed, " - << errno << ":" << strerror(errno); - auto ret = datastore_->ReadSnapshotChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - request_->size()); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) + << "new readBuffer failed, " << errno << ":" << strerror(errno); + auto ret = datastore_->ReadSnapshotChunk( + request_->chunkid(), request_->sn(), readBuffer, request_->offset(), + request_->size()); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); do { /** - * 1.成功 + * 1. Success */ if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); @@ -548,7 +534,8 @@ void ReadSnapshotRequest::OnApply(uint64_t index, * 2.chunk not exist */ if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT + response_->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT break; } /** @@ -560,30 +547,29 @@ void ReadSnapshotRequest::OnApply(uint64_t index, << ", request: " << request_->ShortDebugString(); } /** - * 4.其他错误 + * 4. Other errors */ LOG(ERROR) << "read snapshot failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } while (0); response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } void DeleteSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); CSErrorCode ret = datastore_->DeleteSnapshotChunkOrCorrectSn( request_->chunkid(), request_->correctedsn()); @@ -594,8 +580,7 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(WARNING) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete snapshot or correct sn failed: " << " data store return: " << ret @@ -604,20 +589,20 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(ERROR) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void DeleteSnapshotRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( - request.chunkid(), request.correctedsn()); + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteSnapshotChunkOrCorrectSn(request.chunkid(), + request.correctedsn()); if (CSErrorCode::Success == ret) { return; } else if (CSErrorCode::BackwardRequestError == ret) { @@ -636,14 +621,12 @@ void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastor } void CreateCloneChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->CreateCloneChunk(request_->chunkid(), - request_->sn(), - request_->correctedsn(), - request_->size(), - request_->location()); + auto ret = datastore_->CreateCloneChunk( + request_->chunkid(), request_->sn(), request_->correctedsn(), + request_->size(), request_->location()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -652,44 +635,41 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * TODO(yyk): 当前遇到createclonechunk错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 + * TODO(yyk): Currently encountering the createclonechunk error, + * directly fatally exit the entire process ChunkServer will consider + * only flagging this copyset in the later stage to ensure good + * availability */ LOG(FATAL) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } else if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); + << ", request: " << request_->ShortDebugString(); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); } else { LOG(ERROR) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void CreateCloneChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->CreateCloneChunk(request.chunkid(), - request.sn(), + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), request.correctedsn(), - request.size(), - request.location()); - if (CSErrorCode::Success == ret) - return; + request.size(), request.location()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request.ShortDebugString(); + << ", request: " << request.ShortDebugString(); return; } @@ -714,8 +694,9 @@ void PasteChunkInternalRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ if (0 == Propose(request_, &data_)) { doneGuard.release(); @@ -723,13 +704,12 @@ void PasteChunkInternalRequest::Process() { } void PasteChunkInternalRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); auto ret = datastore_->PasteChunk(request_->chunkid(), - data_.to_string().c_str(), //NOLINT - request_->offset(), - request_->size()); + data_.to_string().c_str(), // NOLINT + request_->offset(), request_->size()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -746,16 +726,15 @@ void PasteChunkInternalRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->PasteChunk(request.chunkid(), - data.to_string().c_str(), - request.offset(), - request.size()); - if (CSErrorCode::Success == ret) - return; +void PasteChunkInternalRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = + datastore->PasteChunk(request.chunkid(), data.to_string().c_str(), + request.offset(), request.size()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "paste chunk failed: " @@ -767,27 +746,22 @@ void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr data } void ScanChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); // read and calculate crc, build scanmap uint32_t crc = 0; size_t size = request_->size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request_->has_readmetapage() && request_->readmetapage()) { - ret = datastore_->ReadChunkMetaPage(request_->chunkid(), - request_->sn(), + ret = datastore_->ReadChunkMetaPage(request_->chunkid(), request_->sn(), readBuffer.get()); } else { - ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer.get(), - request_->offset(), - size); + ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer.get(), request_->offset(), size); } if (CSErrorCode::Success == ret) { @@ -808,39 +782,32 @@ void ScanChunkRequest::OnApply(uint64_t index, scanManager_->GenScanJobs(jobKey); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "scan chunk failed, read chunk internal error" << ", request: " << request_->ShortDebugString(); } else { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } -void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void ScanChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; uint32_t crc = 0; size_t size = request.size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request.has_readmetapage() && request.readmetapage()) { - ret = datastore->ReadChunkMetaPage(request.chunkid(), - request.sn(), - readBuffer.get()); + ret = datastore->ReadChunkMetaPage(request.chunkid(), request.sn(), + readBuffer.get()); } else { - ret = datastore->ReadChunk(request.chunkid(), - request.sn(), - readBuffer.get(), - request.offset(), - size); + ret = datastore->ReadChunk(request.chunkid(), request.sn(), + readBuffer.get(), request.offset(), size); } if (CSErrorCode::Success == ret) { @@ -861,10 +828,10 @@ void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, / } } -void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, +void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc) { // send rpc to leader - brpc::Channel *channel = new brpc::Channel(); + brpc::Channel* channel = new brpc::Channel(); if (channel->Init(peer_.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to chunkserver for send scanmap: " << peer_; @@ -873,7 +840,7 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, } // build scanmap - ScanMap *scanMap = new ScanMap(); + ScanMap* scanMap = new ScanMap(); scanMap->set_logicalpoolid(request.logicpoolid()); scanMap->set_copysetid(request.copysetid()); scanMap->set_chunkid(request.chunkid()); @@ -882,20 +849,17 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, scanMap->set_offset(request.offset()); scanMap->set_len(request.size()); - FollowScanMapRequest *scanMapRequest = new FollowScanMapRequest(); + FollowScanMapRequest* scanMapRequest = new FollowScanMapRequest(); scanMapRequest->set_allocated_scanmap(scanMap); ScanService_Stub stub(channel); brpc::Controller* cntl = new brpc::Controller(); cntl->set_timeout_ms(request.sendscanmaptimeoutms()); - FollowScanMapResponse *scanMapResponse = new FollowScanMapResponse(); - SendScanMapClosure *done = new SendScanMapClosure( - scanMapRequest, - scanMapResponse, - request.sendscanmaptimeoutms(), - request.sendscanmapretrytimes(), - request.sendscanmapretryintervalus(), - cntl, channel); + FollowScanMapResponse* scanMapResponse = new FollowScanMapResponse(); + SendScanMapClosure* done = new SendScanMapClosure( + scanMapRequest, scanMapResponse, request.sendscanmaptimeoutms(), + request.sendscanmapretrytimes(), request.sendscanmapretryintervalus(), + cntl, channel); LOG(INFO) << "logid = " << cntl->log_id() << " Sending scanmap: " << scanMap->ShortDebugString() << " to leader: " << peer_.addr; diff --git a/src/chunkserver/op_request.h b/src/chunkserver/op_request.h index c29484f79b..d83a7ab827 100755 --- a/src/chunkserver/op_request.h +++ b/src/chunkserver/op_request.h @@ -23,21 +23,21 @@ #ifndef SRC_CHUNKSERVER_OP_REQUEST_H_ #define SRC_CHUNKSERVER_OP_REQUEST_H_ -#include -#include #include +#include +#include #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/datastore/define.h" #include "src/chunkserver/scan_manager.h" -using ::google::protobuf::RpcController; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; using ::curve::chunkserver::concurrent::ApplyTaskType; +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::google::protobuf::RpcController; namespace curve { namespace chunkserver { @@ -49,12 +49,10 @@ class CloneCore; class CloneTask; class ScanManager; - -inline bool existCloneInfo(const ChunkRequest *request) { +inline bool existCloneInfo(const ChunkRequest* request) { if (request != nullptr) { - if (request->has_clonefilesource() && - request->has_clonefileoffset()) { - return true; + if (request->has_clonefilesource() && request->has_clonefileoffset()) { + return true; } } return false; @@ -63,97 +61,104 @@ inline bool existCloneInfo(const ChunkRequest *request) { class ChunkOpRequest : public std::enable_shared_from_this { public: ChunkOpRequest(); - ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + ChunkOpRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ChunkOpRequest() = default; /** - * 处理request,实际上是Propose给相应的copyset + * Processing a request actually involves proposing to the corresponding + * copyset */ virtual void Process(); /** - * request正常情况从内存中获取上下文on apply逻辑 - * @param index:此op log entry的index - * @param done:对应的ChunkClosure + * request normally obtains context on apply logic from memory + * @param index: The index of this op log entry + * @param done: corresponding ChunkClosure */ - virtual void OnApply(uint64_t index, - ::google::protobuf::Closure *done) = 0; + virtual void OnApply(uint64_t index, ::google::protobuf::Closure* done) = 0; /** - * NOTE: 子类实现过程中优先使用参数传入的datastore/request - * 从log entry反序列之后得到request详细信息进行处理,request - * 相关的上下文和依赖的data store都是从参数传递进去的 - * 1.重启回放日志,从磁盘读取op log entry然后执行on apply逻辑 - * 2. follower执行on apply的逻辑 - * @param datastore:chunk数据持久化层 - * @param request:反序列化后得到的request 细信息 - * @param data:反序列化后得到的request要处理的数据 + * NOTE: During subclass implementation, prioritize the use of + * datastore/request passed in as parameters Obtain detailed request + * information from the reverse sequence of the log entry for processing, + * request The relevant context and dependent data store are passed in from + * parameters + * 1. Restart the replay log, read the op log entry from the disk, and then + * execute the on apply logic + * 2. follower execute the logic of on apply + * @param datastore: chunk data persistence layer + * @param request: The detailed request information obtained after + * deserialization + * @param data: The data to be processed by the request obtained after + * deserialization */ virtual void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) = 0; + const ChunkRequest& request, + const butil::IOBuf& data) = 0; /** - * 返回request的done成员 + * Return the done member of the request */ - ::google::protobuf::Closure *Closure() { return done_; } + ::google::protobuf::Closure* Closure() { return done_; } /** - * 返回chunk id + * Return chunk id */ ChunkID ChunkId() { return request_->chunkid(); } /** - * 返回请求类型 + * Return request type */ CHUNK_OP_TYPE OpType() { return request_->optype(); } /** - * 返回请求大小 + * Return request size */ uint32_t RequestSize() { return request_->size(); } /** - * 转发request给leader + * Forward request to leader */ virtual void RedirectChunkRequest(); public: /** - * Op序列化工具函数 + * Op Serialization Tool Function * | data | * | op meta | op data | * | op request length | op request | * | 32 bit | .... | - * 各个字段解释如下: - * data: encode之后的数据,实际上就是一条op log entry的data - * op meta: 就是op的元数据,这里是op request部分的长度 - * op data: 就是request通过protobuf序列化后的数据 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @param log:出参,存放序列化好的数据,用户自己保证data!=nullptr - * @return 0成功,-1失败 + * The explanation of each field is as follows: + * data: The data after encoding is actually the data of an op log entry + * op meta: refers to the metadata of op, where is the length of the op + * request section op data: refers to the data serialized by the request + * through protobuf + * @param request: Chunk Request + * @param data: The data content contained in the request + * @param log: Output parameter to store serialized data. Users are + * responsible for ensuring that data != nullptr. + * @return 0 succeeded, -1 failed */ - static int Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log); + static int Encode(const ChunkRequest* request, const butil::IOBuf* data, + butil::IOBuf* log); /** - * 反序列化,从log entry得到ChunkOpRequest,当前反序列出的ChunkRequest和data - * 都会从出参传出去,而不会放在ChunkOpRequest的成员变量里面 - * @param log:op log entry - * @param request: 出参,存放反序列上下文 - * @param data:出参,op操作的数据 - * @return nullptr,失败,否则返回相应的ChunkOpRequest + * Deserialize, obtain ChunkOpRequest from log entry, and deserialize the + * current ChunkRequest and data Will be passed out from the output + * parameter, rather than being placed in the member variable of + * ChunkOpRequest + * @param log: op log entry + * @param request: Provide parameters to store the reverse sequence context + * @param data: Output parameters, op operation data + * @return nullptr, failed, otherwise return the corresponding + * ChunkOpRequest */ static std::shared_ptr Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId); @@ -161,49 +166,43 @@ class ChunkOpRequest : public std::enable_shared_from_this { protected: /** - * 打包request为braft::task,propose给相应的复制组 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @return 0成功,-1失败 + * Package the request as braft::task and propose it to the corresponding + * replication group + * @param request: Chunk Request + * @param data: The data content contained in the request + * @return 0 succeeded, -1 failed */ - int Propose(const ChunkRequest *request, - const butil::IOBuf *data); + int Propose(const ChunkRequest* request, const butil::IOBuf* data); protected: - // chunk持久化接口 + // chunk Persistence Interface std::shared_ptr datastore_; - // 复制组 + // Copy Group std::shared_ptr node_; // rpc controller - brpc::Controller *cntl_; - // rpc 请求 - const ChunkRequest *request_; - // rpc 返回 - ChunkResponse *response_; + brpc::Controller* cntl_; + // rpc request + const ChunkRequest* request_; + // rpc return + ChunkResponse* response_; // rpc done closure - ::google::protobuf::Closure *done_; + ::google::protobuf::Closure* done_; }; class DeleteChunkRequest : public ChunkOpRequest { public: - DeleteChunkRequest() : - ChunkOpRequest() {} + DeleteChunkRequest() : ChunkOpRequest() {} DeleteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadChunkRequest : public ChunkOpRequest { @@ -211,154 +210,118 @@ class ReadChunkRequest : public ChunkOpRequest { friend class PasteChunkInternalRequest; public: - ReadChunkRequest() : - ChunkOpRequest() {} + ReadChunkRequest() : ChunkOpRequest() {} ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ReadChunkRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; - const ChunkRequest* GetChunkRequest() { - return request_; - } + const ChunkRequest* GetChunkRequest() { return request_; } private: - // 根据chunk信息判断是否需要拷贝数据 + // Determine whether to copy data based on chunk information bool NeedClone(const CSChunkInfo& chunkInfo); - // 从chunk文件中读数据 + // Reading data from chunk file void ReadChunk(); private: CloneManager* cloneMgr_; - // 并发模块 + // Concurrent module ConcurrentApplyModule* concurrentApplyModule_; - // 保存 apply index + // Save the apply index uint64_t applyIndex; }; class WriteChunkRequest : public ChunkOpRequest { public: - WriteChunkRequest() : - ChunkOpRequest() {} - WriteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + WriteChunkRequest() : ChunkOpRequest() {} + WriteChunkRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~WriteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done); + void OnApply(uint64_t index, ::google::protobuf::Closure* done); void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadSnapshotRequest : public ChunkOpRequest { public: - ReadSnapshotRequest() : - ChunkOpRequest() {} + ReadSnapshotRequest() : ChunkOpRequest() {} ReadSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~ReadSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class DeleteSnapshotRequest : public ChunkOpRequest { public: - DeleteSnapshotRequest() : - ChunkOpRequest() {} + DeleteSnapshotRequest() : ChunkOpRequest() {} DeleteSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class CreateCloneChunkRequest : public ChunkOpRequest { public: - CreateCloneChunkRequest() : - ChunkOpRequest() {} + CreateCloneChunkRequest() : ChunkOpRequest() {} CreateCloneChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~CreateCloneChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class PasteChunkInternalRequest : public ChunkOpRequest { public: - PasteChunkInternalRequest() : - ChunkOpRequest() {} + PasteChunkInternalRequest() : ChunkOpRequest() {} PasteChunkInternalRequest(std::shared_ptr nodePtr, - const ChunkRequest *request, - ChunkResponse *response, - const butil::IOBuf* data, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done) { - if (data != nullptr) { - data_ = *data; - } + const ChunkRequest* request, + ChunkResponse* response, const butil::IOBuf* data, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done) { + if (data != nullptr) { + data_ = *data; } + } virtual ~PasteChunkInternalRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: butil::IOBuf data_; @@ -366,28 +329,22 @@ class PasteChunkInternalRequest : public ChunkOpRequest { class ScanChunkRequest : public ChunkOpRequest { public: - ScanChunkRequest(uint64_t index, PeerId peer) : - ChunkOpRequest(), index_(index), peer_(peer) {} + ScanChunkRequest(uint64_t index, PeerId peer) + : ChunkOpRequest(), index_(index), peer_(peer) {} ScanChunkRequest(std::shared_ptr nodePtr, - ScanManager* scanManager, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done), - scanManager_(scanManager) {} + ScanManager* scanManager, const ChunkRequest* request, + ChunkResponse* response, ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done), + scanManager_(scanManager) {} virtual ~ScanChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: - void BuildAndSendScanMap(const ChunkRequest &request, uint64_t index, + void BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc); ScanManager* scanManager_; uint64_t index_; diff --git a/src/chunkserver/passive_getfn.h b/src/chunkserver/passive_getfn.h index ac6655d1b2..56b6cd01eb 100644 --- a/src/chunkserver/passive_getfn.h +++ b/src/chunkserver/passive_getfn.h @@ -23,70 +23,70 @@ #ifndef SRC_CHUNKSERVER_PASSIVE_GETFN_H_ #define SRC_CHUNKSERVER_PASSIVE_GETFN_H_ -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { - /** - * 获取datastore中chunk文件的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreChunkCountFunc(void* arg); - /** - * @brief: Get the number of WAL segment in CurveSegmentLogStorage - * @param arg: The pointer to CurveSegmentLogStorage - */ - uint32_t GetLogStorageWalSegmentCountFunc(void* arg); - /** - * 获取datastore中快照chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreSnapshotCountFunc(void* arg); - /** - * 获取datastore中clone chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreCloneChunkCountFunc(void* arg); - /** - * 获取chunkserver上chunk文件的数量 - * @param arg: nullptr - */ - uint32_t GetTotalChunkCountFunc(void* arg); - /** - * @brief: Get the total number of WAL segment in chunkserver - * @param arg: The pointer to ChunkServerMetric - */ - uint32_t GetTotalWalSegmentCountFunc(void* arg); +/** + * Obtain the number of chunk files in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreChunkCountFunc(void* arg); +/** + * @brief: Get the number of WAL segment in CurveSegmentLogStorage + * @param arg: The pointer to CurveSegmentLogStorage + */ +uint32_t GetLogStorageWalSegmentCountFunc(void* arg); +/** + * Obtain the number of snapshot chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreCloneChunkCountFunc(void* arg); +/** + * Obtain the number of chunk files on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalChunkCountFunc(void* arg); +/** + * @brief: Get the total number of WAL segment in chunkserver + * @param arg: The pointer to ChunkServerMetric + */ +uint32_t GetTotalWalSegmentCountFunc(void* arg); - /** - * 获取chunkserver上快照chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalSnapshotCountFunc(void* arg); - /** - * 获取chunkserver上clone chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalCloneChunkCountFunc(void* arg); - /** - * 获取chunkfilepool中剩余chunk的数量 - * @param arg: chunkfilepool的对象指针 - */ - uint32_t GetChunkLeftFunc(void* arg); - /** - * 获取walfilepool中剩余chunk的数量 - * @param arg: walfilepool的对象指针 - */ - uint32_t GetWalSegmentLeftFunc(void* arg); - /** - * 获取trash中chunk的数量 - * @param arg: trash的对象指针 - */ - uint32_t GetChunkTrashedFunc(void* arg); +/** + * Obtain the number of snapshot chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalCloneChunkCountFunc(void* arg); +/** + * Obtain the number of remaining chunks in the chunkfilepool + * @param arg: Object pointer to chunkfilepool + */ +uint32_t GetChunkLeftFunc(void* arg); +/** + * Obtain the number of remaining chunks in the walfilepool + * @param arg: Object pointer to walfilepool + */ +uint32_t GetWalSegmentLeftFunc(void* arg); +/** + * Obtain the number of chunks in the trash + * @param arg: Object pointer to trash + */ +uint32_t GetChunkTrashedFunc(void* arg); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_file_adaptor.h b/src/chunkserver/raftsnapshot/curve_file_adaptor.h index 2f6b23ec0b..b4467bb268 100644 --- a/src/chunkserver/raftsnapshot/curve_file_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_file_adaptor.h @@ -31,10 +31,9 @@ namespace chunkserver { class CurveFileAdaptor : public braft::PosixFileAdaptor { public: explicit CurveFileAdaptor(int fd) : PosixFileAdaptor(fd) {} - // close之前必须先sync,保证数据落盘,其他逻辑不变 - bool close() override { - return sync() && braft::PosixFileAdaptor::close(); - } + // Before closing, you must first synchronize to ensure that the data is + // dropped and other logic remains unchanged + bool close() override { return sync() && braft::PosixFileAdaptor::close(); } }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_file_service.cpp b/src/chunkserver/raftsnapshot/curve_file_service.cpp index f1d5d931e0..4395234d6f 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.cpp +++ b/src/chunkserver/raftsnapshot/curve_file_service.cpp @@ -36,15 +36,17 @@ // Authors: Zhangyi Chen(chenzhangyi01@baidu.com) -#include -#include -#include -#include +#include "src/chunkserver/raftsnapshot/curve_file_service.h" + +#include #include #include -#include +#include +#include +#include +#include + #include -#include "src/chunkserver/raftsnapshot/curve_file_service.h" namespace curve { namespace chunkserver { @@ -52,9 +54,9 @@ namespace chunkserver { CurveFileService& kCurveFileService = CurveFileService::GetInstance(); void CurveFileService::get_file(::google::protobuf::RpcController* controller, - const ::braft::GetFileRequest* request, - ::braft::GetFileResponse* response, - ::google::protobuf::Closure* done) { + const ::braft::GetFileRequest* request, + ::braft::GetFileResponse* response, + ::google::protobuf::Closure* done) { scoped_refptr reader; brpc::ClosureGuard done_gurad(done); brpc::Controller* cntl = (brpc::Controller*)controller; @@ -63,21 +65,23 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, if (iter == _reader_map.end()) { lck.unlock(); /** - * 为了和文件不存在的错误区分开来,且考虑到install snapshot - * 的uri format为:remote://ip:port/reader_id,所以使用ENXIO - * 代表reader id不存在的错误 + * In order to distinguish between the error of a non-existent file + * and considering that the uri format for installing a snapshot is: + * remote://ip:port/reader_id, ENXIO is used to represent the error of a + * non-existent reader id. */ cntl->SetFailed(ENXIO, "Fail to find reader=%" PRId64, - request->reader_id()); + request->reader_id()); return; } // Don't touch iter ever after reader = iter->second; lck.unlock(); - LOG(INFO) << "get_file for " << cntl->remote_side() << " path=" - << reader->path() << " filename=" << request->filename() - << " offset=" << request->offset() << " count=" - << request->count(); + LOG(INFO) << "get_file for " << cntl->remote_side() + << " path=" << reader->path() + << " filename=" << request->filename() + << " offset=" << request->offset() + << " count=" << request->count(); if (request->count() <= 0 || request->offset() < 0) { cntl->SetFailed(brpc::EREQUEST, "Invalid request=%s", @@ -88,10 +92,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, butil::IOBuf buf; bool is_eof = false; size_t read_count = 0; - // 1. 如果是read attch meta file + // 1. If it is a read attach meta file if (request->filename() == BRAFT_SNAPSHOT_ATTACH_META_FILE) { - // 如果没有设置snapshot attachment,那么read文件的长度为零 - // 表示没有 snapshot attachment文件列表 + // If no snapshot attachment is set, then the length of the read file is + // zero, indicating that there are no snapshot attachment files in the + // list. bool snapshotAttachmentExist = false; { std::unique_lock lck(_mutex); @@ -104,7 +109,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } } if (snapshotAttachmentExist) { - // 否则获取snapshot attachment file list + // Otherwise, obtain the snapshot attachment file list std::vector files; _snapshot_attachment->list_attach_files(&files, reader->path()); CurveSnapshotAttachMetaTable attachMetaTable; @@ -121,7 +126,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, request->reader_id()); return; } - CurveSnapshotFileReader *reader = + CurveSnapshotFileReader* reader = dynamic_cast(it->second.get()); if (reader != nullptr) { reader->set_attach_meta_table(attachMetaTable); @@ -135,11 +140,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } if (0 != attachMetaTable.save_to_iobuf_as_remote(&buf)) { - // 内部错误: EINTERNAL + // Internal error: EINTERNAL LOG(ERROR) << "Fail to serialize " - "LocalSnapshotAttachMetaTable as iobuf"; + "LocalSnapshotAttachMetaTable as iobuf"; cntl->SetFailed(brpc::EINTERNAL, - "serialize snapshot attach meta table fail"); + "serialize snapshot attach meta table fail"); return; } else { LOG(INFO) << "LocalSnapshotAttachMetaTable encode buf length = " @@ -149,17 +154,15 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, read_count = buf.size(); } } else { - // 2. 否则其它文件下载继续走raft原先的文件下载流程 + // 2. Otherwise, the download of other files will continue to follow the + // original file download process of Raft const int rc = reader->read_file( - &buf, request->filename(), - request->offset(), request->count(), - request->read_partly(), - &read_count, - &is_eof); + &buf, request->filename(), request->offset(), request->count(), + request->read_partly(), &read_count, &is_eof); if (rc != 0) { cntl->SetFailed(rc, "Fail to read from path=%s filename=%s : %s", - reader->path().c_str(), - request->filename().c_str(), berror(rc)); + reader->path().c_str(), request->filename().c_str(), + berror(rc)); return; } } @@ -177,13 +180,13 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } void CurveFileService::set_snapshot_attachment( - SnapshotAttachment *snapshot_attachment) { + SnapshotAttachment* snapshot_attachment) { _snapshot_attachment = snapshot_attachment; } CurveFileService::CurveFileService() { - _next_id = ((int64_t)getpid() << 45) | - (butil::gettimeofday_us() << 17 >> 17); + _next_id = + ((int64_t)getpid() << 45) | (butil::gettimeofday_us() << 17 >> 17); } int CurveFileService::add_reader(braft::FileReader* reader, diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp index 18479b26a6..d46a7f18b9 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp @@ -20,16 +20,17 @@ * Author: tongguangxun */ +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include -#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include namespace curve { namespace chunkserver { CurveFilesystemAdaptor::CurveFilesystemAdaptor( - std::shared_ptr chunkFilePool, - std::shared_ptr lfs) { + std::shared_ptr chunkFilePool, + std::shared_ptr lfs) { lfs_ = lfs; chunkFilePool_ = chunkFilePool; uint64_t metapageSize = chunkFilePool->GetFilePoolOpt().metaPageSize; @@ -39,8 +40,7 @@ CurveFilesystemAdaptor::CurveFilesystemAdaptor( } CurveFilesystemAdaptor::CurveFilesystemAdaptor() - : tempMetaPageContent(nullptr) { -} + : tempMetaPageContent(nullptr) {} CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { if (tempMetaPageContent != nullptr) { @@ -50,14 +50,14 @@ CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { LOG(INFO) << "release raftsnapshot filesystem adaptor!"; } -braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, - int oflag, const ::google::protobuf::Message* file_meta, - butil::File::Error* e) { - (void) file_meta; +braft::FileAdaptor* CurveFilesystemAdaptor::open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e) { + (void)file_meta; static std::once_flag local_s_check_cloexec_once; static bool local_s_support_cloexec_on_open = false; - std::call_once(local_s_check_cloexec_once, [&](){ + std::call_once(local_s_check_cloexec_once, [&]() { int fd = lfs_->Open("/dev/zero", O_RDONLY | O_CLOEXEC); local_s_support_cloexec_on_open = (fd != -1); if (fd != -1) { @@ -69,18 +69,21 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, if (cloexec && !local_s_support_cloexec_on_open) { oflag &= (~O_CLOEXEC); } - // Open就使用sync标志是为了避免集中在close一次性sync,对于16MB的chunk文件可能会造成抖动 + // The use of the sync flag in Open is to avoid focusing on the close + // one-time sync, which may cause jitter for 16MB chunk files oflag |= O_SYNC; - // 先判断当前文件是否需要过滤,如果需要过滤,就直接走下面逻辑,不走chunkfilepool - // 如果open操作携带create标志,则从chunkfilepool取,否则保持原来语意 - // 如果待打开的文件已经存在,则直接使用原有语意 - if (!NeedFilter(path) && - (oflag & O_CREAT) && + // First, determine whether the current file needs to be filtered. If it + // needs to be filtered, simply follow the following logic instead of + // chunkfilepool If the open operation carries the create flag, it will be + // taken from chunkfilepool, otherwise it will maintain its original meaning + // If the file to be opened already exists, use the original meaning + // directly + if (!NeedFilter(path) && (oflag & O_CREAT) && false == lfs_->FileExists(path)) { - // 从chunkfile pool中取出chunk返回 + // Removing a chunk from the chunkfile pool returns int rc = chunkFilePool_->GetFile(path, tempMetaPageContent); - // 如果从FilePool中取失败,返回错误。 + // If retrieving from FilePool fails, an error is returned. if (rc != 0) { LOG(ERROR) << "get chunk from chunkfile pool failed!"; return NULL; @@ -93,17 +96,17 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, int fd = lfs_->Open(path.c_str(), oflag); if (e) { *e = (fd < 0) ? butil::File::OSErrorToFileError(errno) - : butil::File::FILE_OK; + : butil::File::FILE_OK; } if (fd < 0) { if (oflag & O_CREAT) { LOG(ERROR) << "snapshot create chunkfile failed, filename = " - << path.c_str() << ", errno = " << errno; + << path.c_str() << ", errno = " << errno; } else { LOG(WARNING) << "snapshot open chunkfile failed," - << "may be deleted by user, filename = " - << path.c_str() << ",errno = " << errno; + << "may be deleted by user, filename = " + << path.c_str() << ",errno = " << errno; } return NULL; } @@ -115,10 +118,12 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, } bool CurveFilesystemAdaptor::delete_file(const std::string& path, - bool recursive) { - // 1. 如果是目录且recursive=true,那么遍历目录内容回收 - // 2. 如果是目录且recursive=false,那么判断目录内容是否为空,不为空返回false - // 3. 如果是文件直接回收 + bool recursive) { + // 1. If it is a directory and recursive=true, then traverse the directory + // content to recycle + // 2. If it is a directory and recursive=false, then determine whether the + // directory content is empty, and return false if it is not empty + // 3. If the file is directly recycled if (lfs_->DirExists(path)) { std::vector dircontent; lfs_->List(path, &dircontent); @@ -130,20 +135,21 @@ bool CurveFilesystemAdaptor::delete_file(const std::string& path, } } else { if (lfs_->FileExists(path)) { - // 如果在过滤名单里,就直接删除 - if (NeedFilter(path)) { - return lfs_->Delete(path) == 0; - } else { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // If it is on the filtering list, delete it directly + if (NeedFilter(path)) { + return lfs_->Delete(path) == 0; + } else { + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be + // deleted directly return chunkFilePool_->RecycleFile(path) == 0; - } + } } } return true; } -bool CurveFilesystemAdaptor::RecycleDirRecursive( - const std::string& path) { +bool CurveFilesystemAdaptor::RecycleDirRecursive(const std::string& path) { std::vector dircontent; lfs_->List(path, &dircontent); bool rc = true; @@ -152,7 +158,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( if (lfs_->DirExists(todeletePath)) { RecycleDirRecursive(todeletePath); } else { - // 如果在过滤名单里,就直接删除 + // If it is on the filtering list, delete it directly if (NeedFilter(todeletePath)) { if (lfs_->Delete(todeletePath) != 0) { LOG(ERROR) << "delete " << todeletePath << ", failed!"; @@ -173,16 +179,18 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( } bool CurveFilesystemAdaptor::rename(const std::string& old_path, - const std::string& new_path) { + const std::string& new_path) { if (!NeedFilter(new_path) && lfs_->FileExists(new_path)) { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be deleted + // directly chunkFilePool_->RecycleFile(new_path); } return lfs_->Rename(old_path, new_path) == 0; } void CurveFilesystemAdaptor::SetFilterList( - const std::vector& filter) { + const std::vector& filter) { filterList_.assign(filter.begin(), filter.end()); } diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h index 4e6737b8d4..b29a0948a8 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h @@ -33,53 +33,59 @@ #include "src/chunkserver/raftsnapshot/curve_file_adaptor.h" /** - * RaftSnapshotFilesystemAdaptor目的是为了接管braft - * 内部snapshot创建chunk文件的逻辑,目前curve内部 - * 会从chunkfilepool中直接取出已经格式化好的chunk文件 - * 但是braft内部由于install snapshot也会创建chunk文件 - * 这个创建文件不感知chunkfilepool,因此我们希望install - * snapshot也能从chunkfilepool中直接取出chunk文件,因此 - * 我们对install snapshot流程中的文件系统做了一层hook,在 - * 创建及删除文件操作上直接使用curve提供的文件系统接口即可。 + * The purpose of RaftSnapshotFilesystemAdaptor is to take over the logic of + * creating chunk files for internal snapshots in braft. Currently, within + * Curve, we directly retrieve pre-formatted chunk files from the chunk file + * pool. However, within braft, the creation of chunk files during an install + * snapshot process does not interact with the chunk file pool. Therefore, we + * want the install snapshot process to also be able to retrieve chunk files + * directly from the chunk file pool. To achieve this, we have implemented a + * hook in the file system operations within the install snapshot process. This + * hook allows us to use the file system interface provided by Curve for file + * creation and deletion. */ -using curve::fs::LocalFileSystem; using curve::chunkserver::FilePool; +using curve::fs::LocalFileSystem; namespace curve { namespace chunkserver { /** - * CurveFilesystemAdaptor继承raft的PosixFileSystemAdaptor类,在raft - * 内部其快照使用PosixFileSystemAdaptor类进行文件操作,因为我们只希望在其创建文件 - * 或者删除文件的时候使用chunkfilepool提供的getchunk和recyclechunk接口,所以这里 - * 我们只实现了open和delete_file两个接口。其他接口在调用的时候仍然使用原来raft的内部 - * 的接口。 + * CurveFilesystemAdaptor inherits from Raft's PosixFileSystemAdaptor class. + * Within the Raft framework, it uses the PosixFileSystemAdaptor class for file + * operations during snapshots. However, we only want to use the `getchunk` and + * `recyclechunk` interfaces provided by the chunkfilepool when creating or + * deleting files. Therefore, in this context, we have only implemented the + * `open` and `delete_file` interfaces. Other interfaces are still used with the + * original internal Raft interfaces when called. */ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { public: /** - * 构造函数 - * @param: chunkfilepool用于获取和回收chunk文件 - * @param: lfs用于进行一些文件操作,比如打开或者删除目录 + * Constructor + * @param: chunkfilepool is used to retrieve and recycle chunk files + * @param: lfs is used for some file operations, such as opening or deleting + * directories */ CurveFilesystemAdaptor(std::shared_ptr filePool, - std::shared_ptr lfs); + std::shared_ptr lfs); CurveFilesystemAdaptor(); virtual ~CurveFilesystemAdaptor(); /** - * 打开文件,在raft内部使用open来创建一个文件,并返回FileAdaptor结构 - * @param: path是当前待打开的路径 - * @param: oflag为打开文件参数 - * @param: file_meta是当前文件的meta信息,这个参数内部未使用 - * @param: e为打开文件是的错误码 - * @return: FileAdaptor是raft内部封装fd的一个类,fd是open打开path的返回值 - * 后续所有对于该文件的读写都是通过该FileAdaptor指针进行的,其内部封装了 - * 读写操作,其内部定义如下。 - * class PosixFileAdaptor : public FileAdaptor { - * friend class PosixFileSystemAdaptor; - * public: - * PosixFileAdaptor(int fd) : _fd(fd) {} + * Open the file, use open inside the raft to create a file, and return the + * FileAdaptor structure + * @param: path is the current path to be opened + * @param: oflag is the parameter for opening a file + * @param: file_meta is the meta information of the current file, which is + * not used internally + * @param: e is the error code for opening the file + * @return: FileAdaptor is a class within Raft that encapsulates a file + * descriptor (fd). After opening a path with the `open` call, all + * subsequent read and write operations on that file are performed through a + * pointer to this FileAdaptor class. It internally defines the following + * operations: class PosixFileAdaptor : public FileAdaptor { friend class + * PosixFileSystemAdaptor; public: PosixFileAdaptor(int fd) : _fd(fd) {} * virtual ~PosixFileAdaptor(); * * virtual ssize_t write(const butil::IOBuf& data, @@ -94,61 +100,70 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { * int _fd; * }; */ - virtual braft::FileAdaptor* open(const std::string& path, int oflag, - const ::google::protobuf::Message* file_meta, - butil::File::Error* e); + virtual braft::FileAdaptor* open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e); /** - * 删除path对应的文件或目录 - * @param: path是待删除的文件路径 - * @param: recursive是否递归删除 - * @return: 成功返回true,否则返回false + * Delete the file or directory corresponding to the path + * @param: path is the file path to be deleted + * @param: Recursive whether to recursively delete + * @return: Successfully returns true, otherwise returns false */ virtual bool delete_file(const std::string& path, bool recursive); /** - * rename到新路径 - * 为什么要重载rename? - * 由于raft内部使用的是本地文件系统的rename,如果目标new path - * 已经存在文件,那么就会覆盖该文件。这样raft内部会创建temp_snapshot_meta - * 文件,这个是为了保证原子修改snapshot_meta文件而设置的,然后通过rename保证 - * 修改snapshot_meta文件修改的原子性。如果这个temp_snapshot_meta是从chunkfilpool - * 取的,那么如果直接rename,这个temp_snapshot_meta文件所占用的chunk文件 - * 就永远收不回来了,这种情况下会消耗大量的预分配chunk,所以这里重载rename,先 - * 回收new path,然后再rename, - * @param: old_path旧文件路径 - * @param: new_path新文件路径 + * Rename to a new path. + * Why override the rename function? + * Raft internally uses the rename function of the local file system. If the + * target new path already exists as a file, it will overwrite that file. + * This behavior leads to the creation of a 'temp_snapshot_meta' file, which + * is set up to ensure the atomic modification of the 'snapshot_meta' file. + * Using rename helps ensure the atomicity of modifying the 'snapshot_meta' + * file. However, if the 'temp_snapshot_meta' file is allocated from the + * chunk file pool and renamed directly, the chunk file used by the + * 'temp_snapshot_meta' file will never be released. In this situation, a + * significant number of pre-allocated chunks can be consumed. Therefore, + * the rename function is overridden here to first release the resources + * associated with the new path, and then perform the rename operation. + * @param: old_path - The old file path + * @param: new_path - The new file path */ virtual bool rename(const std::string& old_path, - const std::string& new_path); + const std::string& new_path); - // 设置过滤哪些文件,这些文件不从chunkfilepool取 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Set which files to filter and do not retrieve them from chunkfilepool + // Delete these files directly during recycling without entering the + // chunkfilepool void SetFilterList(const std::vector& filter); private: - /** - * 递归回收目录内容 - * @param: path为待回收的目录路径 - * @return: 成功返回true,否则返回false - */ + /** + * Recursive recycling of directory content + * @param: path is the directory path to be recycled + * @return: Successfully returns true, otherwise returns false + */ bool RecycleDirRecursive(const std::string& path); /** - * 查看文件是否需要过滤 + * Check if the file needs to be filtered */ bool NeedFilter(const std::string& filename); private: - // 由于chunkfile pool获取新的chunk时需要传入metapage信息 - // 这里创建一个临时的metapage,其内容无关紧要,因为快照会覆盖这部分内容 - char* tempMetaPageContent; - // 我们自己的文件系统,这里文件系统会做一些打开及删除目录操作 + // Due to the need to pass in metapage information when obtaining new chunks + // in the chunkfile pool Create a temporary metapage here, whose content is + // irrelevant as the snapshot will overwrite this part of the content + char* tempMetaPageContent; + // Our own file system, where the file system performs some opening and + // deleting directory operations std::shared_ptr lfs_; - // 操作chunkfilepool的指针,这个FilePool_与copysetnode的 - // chunkfilepool_应该是全局唯一的,保证操作chunkfilepool的原子性 + // Pointer to operate chunkfilepool, this FilePool_ Related to copysetnode + // Chunkfilepool_ It should be globally unique, ensuring the atomicity of + // the chunkfilepool operation std::shared_ptr chunkFilePool_; - // 过滤名单,在当前vector中的文件名,都不从chunkfilepool中取文件 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Filter the list and do not retrieve file names from chunkfilepool in the + // current vector Delete these files directly during recycling without + // entering the chunkfilepool std::vector filterList_; }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp index 93d4a7c324..cbd77403da 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include "src/common/fs_util.h" namespace curve { @@ -31,11 +32,11 @@ CurveSnapshotAttachment::CurveSnapshotAttachment( : fileHelper_(fs) {} void CurveSnapshotAttachment::list_attach_files( - std::vector *files, const std::string& raftSnapshotPath) { + std::vector* files, const std::string& raftSnapshotPath) { std::string raftBaseDir = - getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); + getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); std::string dataDir; - if (raftBaseDir[raftBaseDir.length()-1] != '/') { + if (raftBaseDir[raftBaseDir.length() - 1] != '/') { dataDir = raftBaseDir + "/" + RAFT_DATA_DIR; } else { dataDir = raftBaseDir + RAFT_DATA_DIR; @@ -43,23 +44,23 @@ void CurveSnapshotAttachment::list_attach_files( std::vector snapFiles; int rc = fileHelper_.ListFiles(dataDir, nullptr, &snapFiles); - // list出错一般认为就是磁盘出现问题了,这种情况直接让进程挂掉 - // Attention: 这里还需要更仔细考虑 + // An error in the list is generally believed to be due to a disk issue, + // which directly causes the process to crash Attention: More careful + // consideration is needed here CHECK(rc == 0) << "List dir failed."; files->clear(); - // 文件路径格式与snapshot_meta中的格式要相同 + // File path format and the format in snapshot_meta should be the same for (const auto& snapFile : snapFiles) { std::string snapApath; - // 添加绝对路径 + // Add absolute path snapApath.append(dataDir); snapApath.append("/").append(snapFile); - std::string filePath = curve::common::CalcRelativePath( - raftSnapshotPath, snapApath); + std::string filePath = + curve::common::CalcRelativePath(raftSnapshotPath, snapApath); files->emplace_back(filePath); } } - } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h index 10e2172673..94b6009714 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h @@ -23,62 +23,71 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_ATTACHMENT_H_ #include + +#include #include #include -#include -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" +#include "src/chunkserver/raftsnapshot/define.h" namespace curve { namespace chunkserver { /** - * 用于获取snapshot attachment files的接口,一般用于一些下载 - * 快照获取需要额外下载的文件list + * The interface used to obtain snapshot attachment files, usually used for some + * downloads List of files that require additional downloads for snapshot + * acquisition */ -class SnapshotAttachment : - public butil::RefCountedThreadSafe { +class SnapshotAttachment + : public butil::RefCountedThreadSafe { public: SnapshotAttachment() = default; virtual ~SnapshotAttachment() = default; /** - * 获取snapshot attachment文件列表 - * @param files[out]: attachment文件列表 - * @param snapshotPath[in]: braft快照的路径 + * Obtain a list of snapshot attachment files + * @param files[out]: attachment file list + * @param snapshotPath[in]: Path to the brace snapshot */ - virtual void list_attach_files(std::vector *files, - const std::string& raftSnapshotPath) = 0; + virtual void list_attach_files(std::vector* files, + const std::string& raftSnapshotPath) = 0; }; -// SnapshotAttachment接口的实现,用于raft加载快照时,获取chunk快照文件列表 +// Implementation of the SnapshotAttachment interface, used to obtain a list of +// chunk snapshot files when loading snapshots in the raft class CurveSnapshotAttachment : public SnapshotAttachment { public: explicit CurveSnapshotAttachment(std::shared_ptr fs); virtual ~CurveSnapshotAttachment() = default; /** - * 获取raft snapshot的attachment,这里就是获取chunk的快照文件列表 - * @param files[out]: data目录下的chunk快照文件列表 - * @param raftSnapshotPath: braft快照的路径 - * 返回的文件路径使用 绝对路径:相对路径 的格式,相对路径包含data目录 + *Obtain the attachment of the raft snapshot, which is the list of snapshot + *files for the chunk + * @param files[out]: List of chunk snapshot files in the data directory + * @param raftSnapshotPath: Path to the brace snapshot + * The returned file path uses an absolute path: in the format of a relative + *path, which includes the data directory */ - void list_attach_files(std::vector *files, + void list_attach_files(std::vector* files, const std::string& raftSnapshotPath) override; + private: DatastoreFileHelper fileHelper_; }; /* -* @brif 通过具体的某个raft的snapshot实例地址获取raft实例基础地址 -* @param[in] specificSnapshotDir 某个具体snapshot的目录 - 比如/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ -* @param[in] raftSnapshotRelativeDir 上层业务指的所有snapshot的相对基地址 - 比如raft_snapshot -* @return 返回raft实例的绝对基地址,/data/chunkserver1/copysets/4294967812/ +* @brif obtains the base address of a raft instance through the snapshot +instance address of a specific raft +* @param[in] specificSnapshotDir The directory of a specific snapshot + For +example,/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ +* @param[in] raftSnapshotRelativeDir The relative base addresses of all +snapshots referred to by the upper level business For example, raft_ Snapshot +* @return returns the absolute base address of the raft +instance,/data/chunkserver1/copysets/4294967812/ */ inline std::string getCurveRaftBaseDir(std::string specificSnapshotDir, - std::string raftSnapshotRelativeDir) { + std::string raftSnapshotRelativeDir) { std::string::size_type m = specificSnapshotDir.find(raftSnapshotRelativeDir); if (m == std::string::npos) { diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp index 6a996695bd..5cceb37171 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp @@ -48,22 +48,19 @@ CurveSnapshotCopier::CurveSnapshotCopier(CurveSnapshotStorage* storage, bool filter_before_copy_remote, braft::FileSystemAdaptor* fs, braft::SnapshotThrottle* throttle) - : _tid(INVALID_BTHREAD) - , _cancelled(false) - , _filter_before_copy_remote(filter_before_copy_remote) - , _fs(fs) - , _throttle(throttle) - , _writer(NULL) - , _storage(storage) - , _reader(NULL) - , _cur_session(NULL) -{} - -CurveSnapshotCopier::~CurveSnapshotCopier() { - CHECK(!_writer); -} - -void *CurveSnapshotCopier::start_copy(void* arg) { + : _tid(INVALID_BTHREAD), + _cancelled(false), + _filter_before_copy_remote(filter_before_copy_remote), + _fs(fs), + _throttle(throttle), + _writer(NULL), + _storage(storage), + _reader(NULL), + _cur_session(NULL) {} + +CurveSnapshotCopier::~CurveSnapshotCopier() { CHECK(!_writer); } + +void* CurveSnapshotCopier::start_copy(void* arg) { CurveSnapshotCopier* c = reinterpret_cast(arg); c->copy(); return NULL; @@ -71,7 +68,7 @@ void *CurveSnapshotCopier::start_copy(void* arg) { void CurveSnapshotCopier::copy() { do { - // 下载snapshot meta中记录的文件 + // Download the files recorded in the snapshot meta load_meta_table(); if (!ok()) { break; @@ -86,7 +83,7 @@ void CurveSnapshotCopier::copy() { copy_file(files[i]); } - // 下载snapshot attachment文件 + // Download snapshot attachment file load_attach_meta_table(); if (!ok()) { break; @@ -99,8 +96,8 @@ void CurveSnapshotCopier::copy() { } while (0); if (!ok() && _writer && _writer->ok()) { LOG(WARNING) << "Fail to copy, error_code " << error_code() - << " error_msg " << error_cstr() - << " writer path " << _writer->get_path(); + << " error_msg " << error_cstr() << " writer path " + << _writer->get_path(); _writer->set_error(error_code(), error_cstr()); } if (_writer) { @@ -123,9 +120,9 @@ void CurveSnapshotCopier::load_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, &meta_buf, + NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -153,9 +150,9 @@ void CurveSnapshotCopier::load_attach_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, + &meta_buf, NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -169,21 +166,22 @@ void CurveSnapshotCopier::load_attach_meta_table() { return; } - // 如果attach meta table为空,那么说明没有snapshot attachment files + // If the attach_meta_table is empty, then there are no snapshot attachment + // files if (0 == meta_buf.size()) { return; } - if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote(meta_buf) - != 0) { + if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote( + meta_buf) != 0) { LOG(WARNING) << "Bad attach_meta_table format"; set_error(-1, "Bad attach_meta_table format"); return; } } -int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, - braft::SnapshotReader* last_snapshot) { +int CurveSnapshotCopier::filter_before_copy( + CurveSnapshotWriter* writer, braft::SnapshotReader* last_snapshot) { std::vector existing_files; writer->list_files(&existing_files); std::vector to_remove; @@ -200,8 +198,7 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, for (size_t i = 0; i < remote_files.size(); ++i) { const std::string& filename = remote_files[i]; braft::LocalFileMeta remote_meta; - CHECK_EQ(0, _remote_snapshot.get_file_meta( - filename, &remote_meta)); + CHECK_EQ(0, _remote_snapshot.get_file_meta(filename, &remote_meta)); if (!remote_meta.has_checksum()) { // Redownload file if this file doen't have checksum writer->remove_file(filename); @@ -214,8 +211,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, if (local_meta.has_checksum() && local_meta.checksum() == remote_meta.checksum()) { LOG(INFO) << "Keep file=" << filename - << " checksum=" << remote_meta.checksum() - << " in " << writer->get_path(); + << " checksum=" << remote_meta.checksum() << " in " + << writer->get_path(); continue; } // Remove files from writer so that the file is to be copied from @@ -232,21 +229,20 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, continue; } if (!local_meta.has_checksum() || - local_meta.checksum() != remote_meta.checksum()) { + local_meta.checksum() != remote_meta.checksum()) { continue; } LOG(INFO) << "Found the same file=" << filename << " checksum=" << remote_meta.checksum() << " in last_snapshot=" << last_snapshot->get_path(); if (local_meta.source() == braft::FILE_SOURCE_LOCAL) { - std::string source_path = last_snapshot->get_path() + '/' - + filename; - std::string dest_path = writer->get_path() + '/' - + filename; + std::string source_path = + last_snapshot->get_path() + '/' + filename; + std::string dest_path = writer->get_path() + '/' + filename; _fs->delete_file(dest_path, false); if (!_fs->link(source_path, dest_path)) { - PLOG(ERROR) << "Fail to link " << source_path - << " to " << dest_path; + PLOG(ERROR) + << "Fail to link " << source_path << " to " << dest_path; continue; } // Don't delete linked file @@ -272,8 +268,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, } void CurveSnapshotCopier::filter() { - _writer = reinterpret_cast(_storage->create( - !_filter_before_copy_remote)); + _writer = reinterpret_cast( + _storage->create(!_filter_before_copy_remote)); if (_writer == NULL) { set_error(EIO, "Fail to create snapshot writer"); return; @@ -283,12 +279,13 @@ void CurveSnapshotCopier::filter() { braft::SnapshotReader* reader = _storage->open(); if (filter_before_copy(_writer, reader) != 0) { LOG(WARNING) << "Fail to filter writer before copying" - ", path: " << _writer->get_path() + ", path: " + << _writer->get_path() << ", destroy and create a new writer"; _writer->set_error(-1, "Fail to filter"); _storage->close(_writer, false); - _writer = reinterpret_cast( - _storage->create(true)); + _writer = + reinterpret_cast(_storage->create(true)); } if (reader) { _storage->close(reader); @@ -319,16 +316,16 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { butil::File::Error e; bool rc = false; if (braft::FLAGS_raft_create_parent_directories) { - butil::FilePath sub_dir = butil::FilePath( - _writer->get_path()).Append(sub_path.DirName()); + butil::FilePath sub_dir = + butil::FilePath(_writer->get_path()).Append(sub_path.DirName()); rc = _fs->create_directory(sub_dir.value(), &e, true); } else { - rc = create_sub_directory( - _writer->get_path(), sub_path.DirName().value(), _fs, &e); + rc = create_sub_directory(_writer->get_path(), + sub_path.DirName().value(), _fs, &e); } if (!rc) { - LOG(ERROR) << "Fail to create directory for " << file_path - << " : " << butil::File::ErrorToString(e); + LOG(ERROR) << "Fail to create directory for " << file_path << " : " + << butil::File::ErrorToString(e); set_error(braft::file_error_to_os_error(e), "Fail to create directory"); } @@ -340,8 +337,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_file(filename, file_path, NULL); + scoped_refptr session = + _copier.start_to_copy_to_file(filename, file_path, NULL); if (session == NULL) { LOG(WARNING) << "Fail to copy " << filename << " path: " << _writer->get_path(); @@ -355,14 +352,13 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { _cur_session = NULL; lck.unlock(); if (!session->status().ok()) { - // 如果是文件不存在,那么删除刚开始open的文件 + // If the file does not exist, delete the file that was just opened if (session->status().error_code() == ENOENT) { bool rc = _fs->delete_file(file_path, false); if (!rc) { - LOG(ERROR) << "Fail to delete file" << file_path - << " : " << ::berror(errno); - set_error(errno, - "Fail to create delete file " + file_path); + LOG(ERROR) << "Fail to delete file" << file_path << " : " + << ::berror(errno); + set_error(errno, "Fail to create delete file " + file_path); } return; } @@ -371,7 +367,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { session->status().error_cstr()); return; } - // 如果是attach file,那么不需要持久化file meta信息 + // If it is an attach file, then there is no need to persist the file meta + // information if (!attch && _writer->add_file(filename, &meta) != 0) { set_error(EIO, "Fail to add file to writer"); return; @@ -394,16 +391,13 @@ std::string CurveSnapshotCopier::get_rfilename(const std::string& filename) { } void CurveSnapshotCopier::start() { - if (bthread_start_background( - &_tid, NULL, start_copy, this) != 0) { + if (bthread_start_background(&_tid, NULL, start_copy, this) != 0) { PLOG(ERROR) << "Fail to start bthread"; copy(); } } -void CurveSnapshotCopier::join() { - bthread_join(_tid, NULL); -} +void CurveSnapshotCopier::join() { bthread_join(_tid, NULL); } void CurveSnapshotCopier::cancel() { BAIDU_SCOPED_LOCK(_mutex); diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h index 1c991720b0..fdc1ef960a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h @@ -43,8 +43,10 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_COPIER_H_ #include -#include + #include +#include + #include "src/chunkserver/raftsnapshot/curve_snapshot.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" @@ -75,7 +77,9 @@ class CurveSnapshotCopier : public braft::SnapshotCopier { braft::SnapshotReader* last_snapshot); void filter(); void copy_file(const std::string& filename, bool attach = false); - // 这里的filename是相对于快照目录的路径,为了先把文件下载到临时目录,需要把前面的..去掉 + // The filename here is the path relative to the snapshot directory. In + // order to download the file to the temporary directory first, it is + // necessary to Remove std::string get_rfilename(const std::string& filename); braft::raft_mutex_t _mutex; diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h index 97c553661c..5221a0df8a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h @@ -44,10 +44,12 @@ #include #include + +#include +#include #include #include -#include -#include + #include "proto/curve_storage.pb.h" #include "src/chunkserver/raftsnapshot/define.h" @@ -55,9 +57,10 @@ namespace curve { namespace chunkserver { /** - * snapshot attachment文件元数据表,同上面的 - * CurveSnapshotAttachMetaTable接口,主要提供attach文件元数据信息 - * 的查询、序列化和反序列等接口 + * Snapshot attachment file metadata table, similar to the above + * CurveSnapshotAttachMetaTable interface. This table primarily provides + * interfaces for querying, serializing, and deserializing attachment file + * metadata */ class CurveSnapshotAttachMetaTable { public: @@ -70,7 +73,7 @@ class CurveSnapshotAttachMetaTable { int get_attach_file_meta(const std::string& filename, braft::LocalFileMeta* file_meta) const; // list files in the attach meta table - void list_files(std::vector *files) const; + void list_files(std::vector* files) const; // deserialize int load_from_iobuf_as_remote(const butil::IOBuf& buf); // serialize @@ -79,39 +82,31 @@ class CurveSnapshotAttachMetaTable { private: typedef std::map Map; // file -> file meta - Map _file_map; + Map _file_map; }; class CurveSnapshotFileReader : public braft::LocalDirReader { public: CurveSnapshotFileReader(braft::FileSystemAdaptor* fs, - const std::string& path, - braft::SnapshotThrottle* snapshot_throttle) - : LocalDirReader(fs, path), - _snapshot_throttle(snapshot_throttle) - {} + const std::string& path, + braft::SnapshotThrottle* snapshot_throttle) + : LocalDirReader(fs, path), _snapshot_throttle(snapshot_throttle) {} virtual ~CurveSnapshotFileReader() = default; - void set_meta_table(const braft::LocalSnapshotMetaTable &meta_table) { + void set_meta_table(const braft::LocalSnapshotMetaTable& meta_table) { _meta_table = meta_table; } void set_attach_meta_table( - const CurveSnapshotAttachMetaTable &attach_meta_table) { + const CurveSnapshotAttachMetaTable& attach_meta_table) { _attach_meta_table = attach_meta_table; } - int read_file(butil::IOBuf* out, - const std::string &filename, - off_t offset, - size_t max_count, - bool read_partly, - size_t* read_count, + int read_file(butil::IOBuf* out, const std::string& filename, off_t offset, + size_t max_count, bool read_partly, size_t* read_count, bool* is_eof) const override; - braft::LocalSnapshotMetaTable get_meta_table() { - return _meta_table; - } + braft::LocalSnapshotMetaTable get_meta_table() { return _meta_table; } private: braft::LocalSnapshotMetaTable _meta_table; diff --git a/src/chunkserver/raftsnapshot/define.h b/src/chunkserver/raftsnapshot/define.h index 012da7f1ba..79b1dcf355 100644 --- a/src/chunkserver/raftsnapshot/define.h +++ b/src/chunkserver/raftsnapshot/define.h @@ -29,12 +29,13 @@ namespace chunkserver { const char RAFT_DATA_DIR[] = "data"; const char RAFT_META_DIR[] = "raft_meta"; -// TODO(all:fix it): RAFT_SNAP_DIR注意当前这个目录地址不能修改 -// 与当前外部依赖curve-braft代码强耦合(两边硬编码耦合) +// TODO(all:fix it): Note that the RAFT_SNAP_DIR directory address should not be +// modified at this time. This is tightly coupled with the current external +// dependency on curve-braft code (hardcoded coupling on both sides). const char RAFT_SNAP_DIR[] = "raft_snapshot"; -const char RAFT_LOG_DIR[] = "log"; +const char RAFT_LOG_DIR[] = "log"; #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 -#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" +#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" #define BRAFT_SNAPSHOT_ATTACH_META_FILE "__raft_snapshot_attach_meta" #define BRAFT_PROTOBUF_FILE_TEMP ".tmp" diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 1616800c55..edbf2a27f7 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -20,29 +20,30 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/register.h" + #include #include +#include +#include #include #include +#include "proto/topology.pb.h" +#include "src/chunkserver/chunkserver_helper.h" #include "src/common/crc32.h" #include "src/common/string_util.h" -#include "src/chunkserver/register.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/chunkserver_helper.h" -#include "proto/topology.pb.h" namespace curve { namespace chunkserver { -Register::Register(const RegisterOptions &ops) { +Register::Register(const RegisterOptions& ops) { this->ops_ = ops; - // 解析mds的多个地址 + // Parsing multiple addresses of mds ::curve::common::SplitString(ops.mdsListenAddr, ",", &mdsEps_); - // 检验每个地址的合法性 + // Verify the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -52,9 +53,9 @@ Register::Register(const RegisterOptions &ops) { inServiceIndex_ = 0; } -int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap) { +int Register::RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap) { ::curve::mds::topology::ChunkServerRegistRequest req; ::curve::mds::topology::ChunkServerRegistResponse resp; req.set_disktype(ops_.chunkserverDiskType); @@ -105,7 +106,8 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, curve::mds::topology::TopologyService_Stub stub(&channel); stub.RegistChunkServer(&cntl, &req, &resp, nullptr); - // TODO(lixiaocui): 后续错误码和mds共享后改成枚举类型 + // TODO(lixiaocui): Change to enumeration type after sharing error codes + // and mds in the future if (!cntl.Failed() && resp.statuscode() == 0) { break; } else { @@ -158,7 +160,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, return 0; } -int Register::PersistChunkServerMeta(const ChunkServerMetadata &metadata) { +int Register::PersistChunkServerMeta(const ChunkServerMetadata& metadata) { int fd; std::string metaFile = curve::common::UriParser::GetPathFromUri(ops_.chunkserverMetaUri); diff --git a/src/chunkserver/register.h b/src/chunkserver/register.h index f89683087d..d45a15fdf5 100644 --- a/src/chunkserver/register.h +++ b/src/chunkserver/register.h @@ -23,13 +23,14 @@ #ifndef SRC_CHUNKSERVER_REGISTER_H_ #define SRC_CHUNKSERVER_REGISTER_H_ -#include #include +#include #include -#include "src/fs/local_filesystem.h" + #include "proto/chunkserver.pb.h" -#include "src/chunkserver/epoch_map.h" #include "src/chunkserver/datastore/file_pool.h" +#include "src/chunkserver/epoch_map.h" +#include "src/fs/local_filesystem.h" using ::curve::fs::LocalFileSystem; @@ -37,7 +38,7 @@ namespace curve { namespace chunkserver { const uint32_t CURRENT_METADATA_VERSION = 0x01; -// register配置选项 +// Register Configuration Options struct RegisterOptions { std::string mdsListenAddr; std::string chunkserverInternalIp; @@ -61,7 +62,7 @@ struct RegisterOptions { class Register { public: - explicit Register(const RegisterOptions &ops); + explicit Register(const RegisterOptions& ops); ~Register() {} /** @@ -71,16 +72,16 @@ class Register { * @param[out] metadata chunkserver meta * @param[in,out] epochMap epochMap to update */ - int RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap); + int RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap); /** - * @brief 持久化ChunkServer元数据 + * @brief Persisting ChunkServer metadata * * @param[in] metadata */ - int PersistChunkServerMeta(const ChunkServerMetadata &metadata); + int PersistChunkServerMeta(const ChunkServerMetadata& metadata); private: RegisterOptions ops_; @@ -92,4 +93,3 @@ class Register { } // namespace curve #endif // SRC_CHUNKSERVER_REGISTER_H_ - diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..511ad103f0 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -20,21 +20,24 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/trash.h" + #include +#include + #include -#include "src/chunkserver/trash.h" -#include "src/common/string_util.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/copyset_node.h" + #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/raftlog/define.h" +#include "src/common/string_util.h" +#include "src/common/uri_parser.h" using ::curve::chunkserver::RAFT_DATA_DIR; +using ::curve::chunkserver::RAFT_LOG_DIR; using ::curve::chunkserver::RAFT_META_DIR; using ::curve::chunkserver::RAFT_SNAP_DIR; -using ::curve::chunkserver::RAFT_LOG_DIR; namespace curve { namespace chunkserver { @@ -60,13 +63,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -100,8 +103,8 @@ int Trash::Fini() { return 0; } -int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 +int Trash::RecycleCopySet(const std::string& dirPath) { + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,10 +116,11 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 - std::string dst = trashPath_ + "/" + - dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + - '.' + std::to_string(std::time(nullptr)); + // If the directory already exists in the recycle bin, this deletion failed + std::string dst = + trashPath_ + "/" + + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + + std::to_string(std::time(nullptr)); if (localFileSystem_->DirExists(dst)) { LOG(WARNING) << "recycle error: " << dst << " already exist in " << trashPath_; @@ -137,28 +141,28 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } void Trash::DeleteEligibleFileInTrashInterval() { - while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 - DeleteEligibleFileInTrash(); - } + while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { + // Scan Recycle Bin + DeleteEligibleFileInTrash(); + } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +176,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -180,10 +184,10 @@ void Trash::DeleteEligibleFileInTrash() { } } -bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) +bool Trash::IsCopysetInTrash(const std::string& dirName) { + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low + // 32-bit composed of copysetId(>0) The directory is in decimal form For + // example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -196,7 +200,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { return GetPoolID(groupId) >= 1 && GetCopysetID(groupId) >= 1; } -bool Trash::NeedDelete(const std::string ©setDir) { +bool Trash::NeedDelete(const std::string& copysetDir) { int fd = localFileSystem_->Open(copysetDir, O_RDONLY); if (0 > fd) { LOG(ERROR) << "Trash fail open " << copysetDir; @@ -219,15 +223,15 @@ bool Trash::NeedDelete(const std::string ©setDir) { return true; } -bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { +bool Trash::IsChunkOrSnapShotFile(const std::string& chunkName) { return FileNameOperator::FileType::UNKNOWN != - FileNameOperator::ParseFileName(chunkName).type; + FileNameOperator::ParseFileName(chunkName).type; } -bool Trash::RecycleChunksAndWALInDir( - const std::string ©setPath, const std::string &filename) { +bool Trash::RecycleChunksAndWALInDir(const std::string& copysetPath, + const std::string& filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // Is it a file to see if it needs to be recycled if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +242,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } @@ -257,13 +261,13 @@ bool Trash::RecycleChunksAndWALInDir( return ret; } -bool Trash::RecycleChunkfile( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleChunkfile(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to FilePool"; + << " to FilePool"; return false; } @@ -271,13 +275,12 @@ bool Trash::RecycleChunkfile( return true; } -bool Trash::RecycleWAL( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleWAL(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { - LOG(ERROR) << "Trash failed recycle WAL " << filepath - << " to WALPool"; + LOG(ERROR) << "Trash failed recycle WAL " << filepath << " to WALPool"; return false; } @@ -285,12 +288,12 @@ bool Trash::RecycleWAL( return true; } -bool Trash::IsWALFile(const std::string &fileName) { +bool Trash::IsWALFile(const std::string& fileName) { int match = 0; int64_t first_index = 0; int64_t last_index = 0; - match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, - &first_index, &last_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, &first_index, + &last_index); if (match == 2) { LOG(INFO) << "recycle closed segment wal file, path: " << fileName << " first_index: " << first_index @@ -298,8 +301,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return true; } - match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, - &first_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, &first_index); if (match == 1) { LOG(INFO) << "recycle open segment wal file, path: " << fileName << " first_index: " << first_index; @@ -308,7 +310,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return false; } -uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { +uint32_t Trash::CountChunkNumInCopyset(const std::string& copysetPath) { std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; @@ -317,15 +319,14 @@ uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { // Traverse subdirectories uint32_t chunkNum = 0; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; bool isDir = localFileSystem_->DirExists(filePath); if (!isDir) { // valid: chunkfile, snapshotfile, walfile - if (!(IsChunkOrSnapShotFile(file) || - IsWALFile(file))) { - LOG(WARNING) << "Trash find a illegal file:" - << file << " in " << copysetPath; + if (!(IsChunkOrSnapShotFile(file) || IsWALFile(file))) { + LOG(WARNING) << "Trash find a illegal file:" << file << " in " + << copysetPath; continue; } ++chunkNum; diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index a3a3c89d53..b35f4aef71 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -25,169 +25,179 @@ #include #include -#include "src/fs/local_filesystem.h" + #include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" +#include "src/fs/local_filesystem.h" -using ::curve::common::Thread; using ::curve::common::Atomic; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; using ::curve::common::InterruptibleSleeper; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; +using ::curve::common::Thread; -namespace curve { -namespace chunkserver { -struct TrashOptions{ - // copyset的trash路径 - std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 - int expiredAfterSec; - // 扫描trash目录的时间间隔 - int scanPeriodSec; - - std::shared_ptr localFileSystem; - std::shared_ptr chunkFilePool; - std::shared_ptr walPool; -}; - -class Trash { - public: - int Init(TrashOptions options); - - int Run(); - - int Fini(); - - /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 - */ - void DeleteEligibleFileInTrash(); - - int RecycleCopySet(const std::string &dirPath); - - /* - * @brief 获取回收站中chunk的个数 - * - * @return chunk个数 - */ - uint32_t GetChunkNum() {return chunkNum_.load();} - - /** - * @brief is WAL or not ? - * - * @param fileName file name - * - * @retval true yes - * @retval false no - */ - static bool IsWALFile(const std::string& fileName); - - /* - * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 - * - * @param[in] chunkName 文件名 - * - * @return true-符合chunk或snapshot文件命名规则 - */ - static bool IsChunkOrSnapShotFile(const std::string& chunkName); - - private: - /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 - */ - void DeleteEligibleFileInTrashInterval(); - - /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 - * - * @param[in] copysetDir copyset的目录路径 - * - * @return true-可以被删除 - */ - bool NeedDelete(const std::string ©setDir); - - /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 - * - * @param[in] dirName 文目录路径 - * - * @return true-符合copyset目录命名规则 - */ - bool IsCopysetInTrash(const std::string &dirName); - - /* - * @brief Recycle Chunkfile and wal file in Copyset - * - * @param[in] copysetDir copyset dir - * @param[in] filename filename - */ - bool RecycleChunksAndWALInDir( - const std::string ©setDir, const std::string &filename); - - /* - * @brief Recycle Chunkfile - * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 - */ - bool RecycleChunkfile( - const std::string &filepath, const std::string &filename); - - /** - * @brief Recycle WAL - * - * @param copysetPath copyset dir - * @param filename file name - * - * @retval true success - * @retval false failure - */ - bool RecycleWAL(const std::string& filepath, const std::string& filename); - - /* - * @brief 统计copyset目录中的chunk个数 - * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 - */ - uint32_t CountChunkNumInCopyset(const std::string ©setPath); - - private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 - int expiredAfterSec_; - - // 扫描trash目录的时间间隔 - int scanPeriodSec_; - - // 回收站中chunk的个数 - Atomic chunkNum_; - - Mutex mtx_; - - // 本地文件系统 - std::shared_ptr localFileSystem_; - - // chunk池子 - std::shared_ptr chunkFilePool_; - - // wal pool - std::shared_ptr walPool_; - - // 回收站全路径 - std::string trashPath_; - - // 后台清理回收站的线程 - Thread recycleThread_; - - // false-开始后台任务,true-停止后台任务 - Atomic isStop_; - - InterruptibleSleeper sleeper_; -}; -} // namespace chunkserver -} // namespace curve - -#endif // SRC_CHUNKSERVER_TRASH_H_ - +namespace curve +{ + namespace chunkserver + { + struct TrashOptions + { + // The trash path of copyset + std::string trashPath; + // The file can be physically recycled after being placed in trash for + // expiredAfteSec seconds + int expiredAfterSec; + // Time interval for scanning the trash directory + int scanPeriodSec; + + std::shared_ptr localFileSystem; + std::shared_ptr chunkFilePool; + std::shared_ptr walPool; + }; + + class Trash + { + public: + int Init(TrashOptions options); + + int Run(); + + int Fini(); + + /* + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash + * directory + */ + void DeleteEligibleFileInTrash(); + + int RecycleCopySet(const std::string &dirPath); + + /* + * @brief Get the number of chunks in the recycle bin + * + * @return Number of chunks + */ + uint32_t GetChunkNum() { return chunkNum_.load(); } + + /** + * @brief is WAL or not ? + * + * @param fileName file name + * + * @retval true yes + * @retval false no + */ + static bool IsWALFile(const std::string &fileName); + + /* + * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 + * + * @param[in] chunkName 文件名 + * + * @return true-符合chunk或snapshot文件命名规则 + */ + static bool IsChunkOrSnapShotFile(const std::string &chunkName); + + private: + /* + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling + * at regular intervals + */ + void DeleteEligibleFileInTrashInterval(); + + /* + * @brief NeedDelete Does the file need to be deleted, and the time it takes + * to place the trash is greater than ExpiredAfterSec in trash can be + * deleted + * + * @param[in] copysetDir copyset directory path + * + * @return true - can be deleted + */ + bool NeedDelete(const std::string ©setDir); + + /* + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle + * bin + * + * @param[in] dirName directory path + * + * @return true - Complies with copyset directory naming rules + */ + bool IsCopysetInTrash(const std::string &dirName); + + /* + * @brief Recycle Chunkfile and wal file in Copyset + * + * @param[in] copysetDir copyset dir + * @param[in] filename filename + */ + bool RecycleChunksAndWALInDir( + const std::string ©setDir, const std::string &filename); + + /* + * @brief Recycle Chunkfile + * + * @param[in] filepath file path + * @param[in] filename File name + */ + bool RecycleChunkfile(const std::string &filepath, + const std::string &filename); + + /** + * @brief Recycle WAL + * + * @param copysetPath copyset dir + * @param filename file name + * + * @retval true success + * @retval false failure + */ + bool RecycleWAL(const std::string &filepath, const std::string &filename); + + /* + * @brief counts the number of chunks in the copyset directory + * + * @param[in] copysetPath chunk directory + * @return returns the number of chunks + */ + uint32_t CountChunkNumInCopyset(const std::string ©setPath); + + private: + // The file can be physically recycled after being placed in trash for + // expiredAfterSec seconds + int expiredAfterSec_; + + // Time interval for scanning the trash directory + int scanPeriodSec_; + + // Number of chunks in the Recycle Bin + Atomic chunkNum_; + + Mutex mtx_; + + // Local File System + std::shared_ptr localFileSystem_; + + // chunk Pool + std::shared_ptr chunkFilePool_; + + // wal pool + std::shared_ptr walPool_; + + // Recycle Bin Full Path + std::string trashPath_; + + // Thread for background cleaning of the recycle bin + Thread recycleThread_; + + // false-Start background task, true-Stop background task + Atomic isStop_; + + InterruptibleSleeper sleeper_; + }; + } // namespace chunkserver +} // namespace curve + +#endif // SRC_CHUNKSERVER_TRASH_H_ diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 592e9d2a06..d2345e85fc 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -22,48 +22,57 @@ #include "src/client/chunk_closure.h" -#include -#include #include +#include +#include #include "src/client/client_common.h" #include "src/client/copyset_client.h" +#include "src/client/io_tracker.h" #include "src/client/metacache.h" #include "src/client/request_closure.h" #include "src/client/request_context.h" #include "src/client/service_helper.h" -#include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from +// the RPC return logic namespace curve { namespace client { -ClientClosure::BackoffParam ClientClosure::backoffParam_; -FailureRequestOption ClientClosure::failReqOpt_; +ClientClosure::BackoffParam ClientClosure::backoffParam_; +FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the corresponding cooysetId leader may change + // So set the timeout time for this retry request to the default value + // This is to retry this request as soon as possible + // There will be a delay of 1-2 seconds when migrating from copysetleader to + // client GetLeader to obtain a new leader For a request, GetLeader may + // still return the old Leader The rpc timeout time may be set to 2s/4s, and + // the leader information will be obtained after the timeout To retry the + // request on the new Leader as soon as possible, set the rpc timeout time + // to the default value if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 - if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT + // When a certain IO retry exceeds a certain number of times, an + // exponential backoff must be performed during the timeout period When + // the underlying chunkserver is under high pressure, unstable may also + // be triggered Due to copyset leader may change, the request timeout + // time will be set to the default value And chunkserver cannot process + // it within this time, resulting in IO hang In the case of real + // downtime, the request will be processed after a certain number of + // retries If you keep trying again, it's not a downtime situation, and + // at this point, the timeout still needs to enter the exponential + // backoff logic + if (retriedTimes < + failReqOpt_ + .chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; } else { @@ -71,25 +80,23 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } reqDone->SetNextTimeOutMS(nextTimeout); - LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout << ", " + << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); return; } if (rpcstatus == CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD) { uint64_t nextsleeptime = OverLoadBackOff(reqDone->GetRetriedTimes()); LOG(WARNING) << "chunkserver overload, sleep(us) = " << nextsleeptime - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << ", " << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); bthread_usleep(nextsleeptime); return; } @@ -103,19 +110,19 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } } - LOG(WARNING) - << "Rpc failed " - << (retryDirectly_ ? "retry directly, " - : "sleep " + std::to_string(nextSleepUS) + " us, ") - << *reqCtx_ << ", cntl status = " << cntlstatus - << ", response status = " - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(rpcstatus)) - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "Rpc failed " + << (retryDirectly_ + ? "retry directly, " + : "sleep " + std::to_string(nextSleepUS) + " us, ") + << *reqCtx_ << ", cntl status = " << cntlstatus + << ", response status = " + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast( + rpcstatus)) + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (nextSleepUS != 0) { bthread_usleep(nextSleepUS); @@ -134,8 +141,11 @@ uint64_t ClientClosure::OverLoadBackOff(uint64_t currentRetryTimes) { random_time -= nextsleeptime / 10; nextsleeptime += random_time; - nextsleeptime = std::min(nextsleeptime, failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT - nextsleeptime = std::max(nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT + nextsleeptime = + std::min(nextsleeptime, + failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT + nextsleeptime = std::max( + nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT return nextsleeptime; } @@ -153,10 +163,11 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified Request Callback Function Entry +// The overall processing logic is the same as before +// Perform corresponding processing for different request types and return +// status codes Each subclass needs to implement SendRetryRequest for retry +// requests void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,80 +187,81 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 - metaCache_->GetUnstableHelper().ClearTimeout( - chunkserverID_, chunkserverEndPoint_); + // As long as RPC returns normally, clear the timeout counter + metaCache_->GetUnstableHelper().ClearTimeout(chunkserverID_, + chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: - OnSuccess(); - break; - - // 2.1 不是leader - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: - MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); - needRetry = true; - OnRedirected(); - break; - - // 2.2 Copyset不存在,大概率都是配置变更了 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: - needRetry = true; - OnCopysetNotExist(); - break; - - // 2.3 chunk not exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: - OnChunkNotExist(); - break; - - // 2.4 非法参数,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: - OnInvalidRequest(); - break; + // 1. Request successful + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: + OnSuccess(); + break; + + // 2.1 is not a leader + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: + MetricHelper::IncremRedirectRPCCount(fileMetric_, + reqCtx_->optype_); + needRetry = true; + OnRedirected(); + break; - // 2.5 返回backward - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: - if (reqCtx_->optype_ == OpType::WRITE) { + // 2.2 Copyset does not exist, most likely due to configuration + // changes + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; - OnBackward(); - } else { - LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " return backward, " - << *reqCtx_ - << ", status=" << status_ + OnCopysetNotExist(); + break; + + // 2.3 Chunk not exist, return directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: + OnChunkNotExist(); + break; + + // 2.4 Illegal parameter, returned directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: + OnInvalidRequest(); + break; + + // 2.5 Return to feedback + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: + if (reqCtx_->optype_ == OpType::WRITE) { + needRetry = true; + OnBackward(); + } else { + LOG(ERROR) + << OpTypeToString(reqCtx_->optype_) + << " return backward, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); + } + break; + + // 2.6 Return Chunk Exist, directly return without retrying + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: + OnChunkExist(); + break; + + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: + OnEpochTooOld(); + break; + + default: + needRetry = true; + LOG(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed for UNKNOWN reason, " << *reqCtx_ << ", status=" + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast(status_)) << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); - } - break; - - // 2.6 返回chunk exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: - OnChunkExist(); - break; - - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: - OnEpochTooOld(); - break; - - default: - needRetry = true; - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed for UNKNOWN reason, " << *reqCtx_ - << ", status=" - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(status_)) - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); } } @@ -264,22 +276,22 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying + // again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout + // requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } - LOG_EVERY_SECOND(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed, error code: " - << cntl_->ErrorCode() - << ", error: " << cntl_->ErrorText() - << ", " << *reqCtx_ + LOG_EVERY_SECOND(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed, error code: " << cntl_->ErrorCode() + << ", error: " << cntl_->ErrorText() << ", " << *reqCtx_ << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); ProcessUnstableState(); @@ -291,26 +303,27 @@ void ClientClosure::ProcessUnstableState() { chunkserverID_, chunkserverEndPoint_); switch (state) { - case UnstableState::ServerUnstable: { - std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); - int ret = metaCache_->SetServerUnstable(ip); - if (ret != 0) { - LOG(WARNING) << "Set server(" << ip << ") unstable failed, " - << "now set chunkserver(" << chunkserverID_ << ") unstable"; + case UnstableState::ServerUnstable: { + std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); + int ret = metaCache_->SetServerUnstable(ip); + if (ret != 0) { + LOG(WARNING) + << "Set server(" << ip << ") unstable failed, " + << "now set chunkserver(" << chunkserverID_ << ") unstable"; + metaCache_->SetChunkserverUnstable(chunkserverID_); + } + break; + } + case UnstableState::ChunkServerUnstable: { metaCache_->SetChunkserverUnstable(chunkserverID_); + break; } - break; - } - case UnstableState::ChunkServerUnstable: { - metaCache_->SetChunkserverUnstable(chunkserverID_); - break; - } - case UnstableState::NoUnstable: { - RefreshLeader(); - break; - } - default: - break; + case UnstableState::NoUnstable: { + RefreshLeader(); + break; + } + default: + break; } } @@ -319,64 +332,58 @@ void ClientClosure::OnSuccess() { auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkNotExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " not exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " not exists, " + << *reqCtx_ << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " exists, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnEpochTooOld() { reqDone_->SetFailed(status_); LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " epoch too old, reqCtx: " << *reqCtx_ - << ", status: " << status_ - << ", retried times: " << reqDone_->GetRetriedTimes() - << ", IO id: " << reqDone_->GetIOTracker()->GetID() - << ", request id: " << reqCtx_->id_ - << ", remote side: " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " epoch too old, reqCtx: " << *reqCtx_ + << ", status: " << status_ + << ", retried times: " << reqDone_->GetRetriedTimes() + << ", IO id: " << reqDone_->GetIOTracker()->GetID() + << ", request id: " << reqCtx_->id_ << ", remote side: " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnRedirected() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (response_->has_redirect() ? response_->redirect() : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (response_->has_redirect() ? response_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (response_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(response_->redirect()); @@ -390,13 +397,11 @@ void ClientClosure::OnRedirected() { void ClientClosure::OnCopysetNotExist() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " copyset not exists, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); RefreshLeader(); } @@ -443,23 +448,20 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If the refresh leader obtains new leader information + // Do not sleep before retrying retryDirectly_ = (leaderId != chunkserverID_); } } void ClientClosure::OnBackward() { const auto latestSn = metaCache_->GetLatestFileSn(); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " return BACKWARD, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " return BACKWARD, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); reqCtx_->seq_ = latestSn; } @@ -467,38 +469,26 @@ void ClientClosure::OnBackward() { void ClientClosure::OnInvalidRequest() { reqDone_->SetFailed(status_); LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " failed for invalid format, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " failed for invalid format, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); MetricHelper::IncremFailRPCCount(fileMetric_, reqCtx_->optype_); } void WriteChunkClosure::SendRetryRequest() { - client_->WriteChunk(reqCtx_->idinfo_, - reqCtx_->fileId_, - reqCtx_->epoch_, - reqCtx_->seq_, - reqCtx_->writeData_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->fileId_, reqCtx_->epoch_, + reqCtx_->seq_, reqCtx_->writeData_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } -void WriteChunkClosure::OnSuccess() { - ClientClosure::OnSuccess(); -} +void WriteChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); } void ReadChunkClosure::SendRetryRequest() { - client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } void ReadChunkClosure::OnSuccess() { @@ -516,9 +506,7 @@ void ReadChunkClosure::OnChunkNotExist() { void ReadChunkSnapClosure::SendRetryRequest() { client_->ReadChunkSnapshot(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + reqCtx_->offset_, reqCtx_->rawlength_, done_); } void ReadChunkSnapClosure::OnSuccess() { @@ -528,10 +516,8 @@ void ReadChunkSnapClosure::OnSuccess() { } void DeleteChunkSnapClosure::SendRetryRequest() { - client_->DeleteChunkSnapshotOrCorrectSn( - reqCtx_->idinfo_, - reqCtx_->correctedSeq_, - done_); + client_->DeleteChunkSnapshotOrCorrectSn(reqCtx_->idinfo_, + reqCtx_->correctedSeq_, done_); } void GetChunkInfoClosure::SendRetryRequest() { @@ -548,17 +534,16 @@ void GetChunkInfoClosure::OnSuccess() { } void GetChunkInfoClosure::OnRedirected() { - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " redirected, " << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (chunkinforesponse_->has_redirect() ? chunkinforesponse_->redirect() - : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (chunkinforesponse_->has_redirect() + ? chunkinforesponse_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (chunkinforesponse_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(chunkinforesponse_->redirect()); @@ -571,19 +556,14 @@ void GetChunkInfoClosure::OnRedirected() { } void CreateCloneChunkClosure::SendRetryRequest() { - client_->CreateCloneChunk(reqCtx_->idinfo_, - reqCtx_->location_, - reqCtx_->seq_, - reqCtx_->correctedSeq_, - reqCtx_->chunksize_, - done_); + client_->CreateCloneChunk(reqCtx_->idinfo_, reqCtx_->location_, + reqCtx_->seq_, reqCtx_->correctedSeq_, + reqCtx_->chunksize_, done_); } void RecoverChunkClosure::SendRetryRequest() { - client_->RecoverChunk(reqCtx_->idinfo_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + client_->RecoverChunk(reqCtx_->idinfo_, reqCtx_->offset_, + reqCtx_->rawlength_, done_); } int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { @@ -601,7 +581,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->UpdateLeader(lpId, cpId, leaderAddr.addr_); if (ret != 0) { LOG(WARNING) << "Update leader of copyset (" << lpId << ", " << cpId - << ") in metaCache fail"; + << ") in metaCache fail"; return -1; } @@ -609,7 +589,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->GetLeader(lpId, cpId, &leaderId, &leaderEp); if (ret != 0) { LOG(INFO) << "Get leader of copyset (" << lpId << ", " << cpId - << ") from metaCache fail"; + << ") from metaCache fail"; return -1; } @@ -617,5 +597,5 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..3ca5a609df 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -23,15 +23,16 @@ #ifndef SRC_CLIENT_CHUNK_CLOSURE_H_ #define SRC_CLIENT_CHUNK_CLOSURE_H_ -#include #include #include +#include + #include #include #include "proto/chunk.pb.h" -#include "src/client/client_config.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/client/request_closure.h" #include "src/common/math_util.h" @@ -42,15 +43,15 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkResponse; using curve::chunkserver::GetChunkInfoResponse; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for saving Rpc context, + * Contains cntl and response retries */ class ClientClosure : public Closure { public: @@ -59,67 +60,59 @@ class ClientClosure : public Closure { virtual ~ClientClosure() = default; - void SetCntl(brpc::Controller* cntl) { - cntl_ = cntl; - } + void SetCntl(brpc::Controller* cntl) { cntl_ = cntl; } virtual void SetResponse(Message* response) { response_.reset(static_cast(response)); } - void SetChunkServerID(ChunkServerID csid) { - chunkserverID_ = csid; - } + void SetChunkServerID(ChunkServerID csid) { chunkserverID_ = csid; } - ChunkServerID GetChunkServerID() const { - return chunkserverID_; - } + ChunkServerID GetChunkServerID() const { return chunkserverID_; } void SetChunkServerEndPoint(const butil::EndPoint& endPoint) { chunkserverEndPoint_ = endPoint; } - EndPoint GetChunkServerEndPoint() const { - return chunkserverEndPoint_; - } + EndPoint GetChunkServerEndPoint() const { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -132,45 +125,43 @@ class ClientClosure : public Closure { SetBackoffParam(); DVLOG(9) << "Client clousre conf info: " - << "chunkserverOPRetryIntervalUS = " - << failReqOpt_.chunkserverOPRetryIntervalUS - << ", chunkserverOPMaxRetry = " - << failReqOpt_.chunkserverOPMaxRetry; + << "chunkserverOPRetryIntervalUS = " + << failReqOpt_.chunkserverOPRetryIntervalUS + << ", chunkserverOPMaxRetry = " + << failReqOpt_.chunkserverOPMaxRetry; } - Closure* GetClosure() const { - return done_; - } + Closure* GetClosure() const { return done_; } - // 测试使用,设置closure - void SetClosure(Closure* done) { - done_ = done; - } + // Test usage, set closure + void SetClosure(Closure* done) { done_ = done; } - static FailureRequestOption GetFailOpt() { - return failReqOpt_; - } + static FailureRequestOption GetFailOpt() { return failReqOpt_; } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying + * Scenario 1: rpc timeout, which will exponentially increase the current + * rpc timeout and then directly retry Scenario 2: Underlying Overload, then + * it is necessary to sleep for a period of time before retrying, and the + * sleep time increases exponentially based on the number of retries + * @param: rpcstatue returns the value for rpc + * @param: cntlstatus is the return value of this rpc controller */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After the underlying chunkserver overload, it is necessary to backoff + * based on the number of retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the current time required for sleep */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After the rpc timeout, it is necessary to backoff based on the number of + * retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the next RPC timeout time */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -207,32 +198,33 @@ class ClientClosure : public Closure { void RefreshLeader(); - static FailureRequestOption failReqOpt_; - - brpc::Controller* cntl_; - std::unique_ptr response_; - CopysetClient* client_; - Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 - ChunkServerID chunkserverID_; - butil::EndPoint chunkserverEndPoint_; - - // 记录当前请求的相关信息 - MetaCache* metaCache_; - RequestClosure* reqDone_; - FileMetric* fileMetric_; - RequestContext* reqCtx_; - ChunkIDInfo chunkIdInfo_; - - // 发送重试请求前是否睡眠 + static FailureRequestOption failReqOpt_; + + brpc::Controller* cntl_; + std::unique_ptr response_; + CopysetClient* client_; + Closure* done_; + // The chunkserverID is saved here to distinguish which chunkserver the + // current rpc is sent to This makes it easy to directly find which + // chunkserver is currently returning the failure in the rpc closure + ChunkServerID chunkserverID_; + butil::EndPoint chunkserverEndPoint_; + + // Record relevant information for the current request + MetaCache* metaCache_; + RequestClosure* reqDone_; + FileMetric* fileMetric_; + RequestContext* reqCtx_; + ChunkIDInfo chunkIdInfo_; + + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 - int status_; + // response status code + int status_; - // rpc 状态码 - int cntlstatus_; + // rpc status code + int cntlstatus_; }; class WriteChunkClosure : public ClientClosure { @@ -308,7 +300,7 @@ class RecoverChunkClosure : public ClientClosure { void SendRetryRequest() override; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CHUNK_CLOSURE_H_ diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..97598a7038 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -28,8 +28,8 @@ #include #include -#include #include +#include #include "include/client/libcurve.h" #include "src/common/throttle.h" @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -90,12 +90,10 @@ typedef struct ChunkIDInfo { ChunkIDInfo(ChunkID cid, LogicPoolID lpid, CopysetID cpid) : cid_(cid), cpid_(cpid), lpid_(lpid) {} - bool Valid() const { - return lpid_ > 0 && cpid_ > 0; - } + bool Valid() const { return lpid_ > 0 && cpid_ > 0; } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +104,8 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +116,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +146,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -162,7 +161,7 @@ typedef struct FInfo { uint64_t stripeCount; std::string poolset; - OpenFlags openflags; + OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; FInfo() { @@ -187,10 +186,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -198,17 +197,17 @@ struct PeerAddr { bool IsEmpty() const { return (addr_.ip == butil::IP_ANY && addr_.port == 0) && - addr_.socket_file.empty(); + addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 - int Parse(const std::string &str) { + // Parse address information from a string + int Parse(const std::string& str) { int idx; char ip_str[64]; if (2 > sscanf(str.c_str(), "%[^:]%*[:]%d%*[:]%d", ip_str, &addr_.port, @@ -224,8 +223,9 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format + // In the get leader call, this value can be directly passed into the + // request std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), @@ -233,32 +233,32 @@ struct PeerAddr { return std::string(str); } - bool operator==(const PeerAddr &other) const { + bool operator==(const PeerAddr& other) const { return addr_ == other.addr_; } }; -inline const char *OpTypeToString(OpType optype) { +inline const char* OpTypeToString(OpType optype) { switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::CREATE_CLONE: - return "CreateCloneChunk"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::DISCARD: - return "Discard"; - case OpType::UNKNOWN: - default: - return "Unknown"; + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::CREATE_CLONE: + return "CreateCloneChunk"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::DISCARD: + return "Discard"; + case OpType::UNKNOWN: + default: + return "Unknown"; } } @@ -279,16 +279,14 @@ class SnapCloneClosure : public google::protobuf::Closure { class ClientDummyServerInfo { public: - static ClientDummyServerInfo &GetInstance() { + static ClientDummyServerInfo& GetInstance() { static ClientDummyServerInfo clientInfo; return clientInfo; } - void SetIP(const std::string &ip) { localIP_ = ip; } + void SetIP(const std::string& ip) { localIP_ = ip; } - std::string GetIP() const { - return localIP_; - } + std::string GetIP() const { return localIP_; } void SetPort(uint32_t port) { localPort_ = port; } @@ -309,22 +307,22 @@ class ClientDummyServerInfo { inline void TrivialDeleter(void*) {} -inline const char *FileStatusToName(FileStatus status) { +inline const char* FileStatusToName(FileStatus status) { switch (status) { - case FileStatus::Created: - return "Created"; - case FileStatus::Deleting: - return "Deleting"; - case FileStatus::Cloning: - return "Cloning"; - case FileStatus::CloneMetaInstalled: - return "CloneMetaInstalled"; - case FileStatus::Cloned: - return "Cloned"; - case FileStatus::BeingCloned: - return "BeingCloned"; - default: - return "Unknown"; + case FileStatus::Created: + return "Created"; + case FileStatus::Deleting: + return "Deleting"; + case FileStatus::Cloning: + return "Cloning"; + case FileStatus::CloneMetaInstalled: + return "CloneMetaInstalled"; + case FileStatus::Cloned: + return "Cloned"; + case FileStatus::BeingCloned: + return "BeingCloned"; + default: + return "Unknown"; } } @@ -359,7 +357,7 @@ struct CreateFileContext { std::string poolset; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_COMMON_H_ diff --git a/src/client/client_metric.h b/src/client/client_metric.h index 826b8b9b2d..603a0176b1 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -28,9 +28,9 @@ #include #include -#include "src/common/timeutility.h" #include "src/client/client_common.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -48,11 +48,11 @@ struct SlowRequestMetric { : count(prefix, name + "_total") {} }; -// 秒级信息统计 +// Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + // Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + // persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -60,21 +60,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +// Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + // Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + // Call throughput PerSecondMetric bps; - // 调用超时次数qps + // Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + // Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + // Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -102,33 +102,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +// File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + // Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + // Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + // The maximum number of request bytes for the current file request, which + // is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + // Libcurve's lowest level read rpc interface statistics information metric + // statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + // Libcurve's lowest level write rpc interface statistics information metric + // statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + // User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + // User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + // Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; // Number of slow requests @@ -153,52 +156,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +// Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + // Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + // openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + // createFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + // closeFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + // GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + // RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + // GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + // GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + // DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + // RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + // Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + // deleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + // changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + // Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + // Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + // GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + // ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + // Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -245,8 +248,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -255,13 +258,14 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + * operation */ - static void IncremUserQPSCount(FileMetric* fm, - uint64_t length, + static void IncremUserQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -286,9 +290,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -308,13 +314,18 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the + * user for RPS calculation rps: receive request persecond, which is the + * number of requests received by the current interface per second qps: + * query request persecond, which is the number of requests processed by the + * current interface per second eps: error request persecond, which is the + * number of requests that make errors per second on the current interface + * rps minus qps is the number of requests that the current client is + * waiting for per second, which will persistently occupy the current memory + * for one second + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -334,9 +345,10 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -354,9 +366,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed + * out, used for timeoutQps calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -374,9 +388,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric The metric pointer of the current file + * @param opType request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -394,13 +408,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for QPS and bps calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCQPSCount(FileMetric* fm, - uint64_t length, + static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -419,13 +434,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for RPS calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCRPSCount(FileMetric* fm, - OpType type) { + static void IncremRPCRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -440,9 +456,7 @@ class MetricHelper { } } - static void LatencyRecord(FileMetric* fm, - uint64_t duration, - OpType type) { + static void LatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -457,8 +471,7 @@ class MetricHelper { } } - static void UserLatencyRecord(FileMetric* fm, - uint64_t duration, + static void UserLatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { @@ -502,7 +515,7 @@ class MetricHelper { } } }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_METRIC_H_ diff --git a/src/client/config_info.h b/src/client/config_info.h index 620d464eae..e324a6e8ba 100644 --- a/src/client/config_info.h +++ b/src/client/config_info.h @@ -24,6 +24,7 @@ #define SRC_CLIENT_CONFIG_INFO_H_ #include + #include #include @@ -31,9 +32,9 @@ namespace curve { namespace client { /** - * log的基本配置信息 - * @logLevel: 是log打印等级 - * @logPath: log打印位置 + * Basic configuration information of log + * @logLevel: It is the log printing level + * @logPath: Log printing location */ struct LogInfo { int logLevel = 2; @@ -41,8 +42,9 @@ struct LogInfo { }; /** - * in flight IO控制信息 - * @fileMaxInFlightRPCNum: 为一个文件中最大允许的inflight IO数量 + * in flight IO control information + * @fileMaxInFlightRPCNum: is the maximum allowed number of inflight IOs in a + * file */ struct InFlightIOCntlInfo { uint64_t fileMaxInFlightRPCNum = 2048; @@ -78,27 +80,29 @@ struct MetaServerOption { }; /** - * 租约基本配置 - * @mdsRefreshTimesPerLease: 一个租约内续约次数,client与mds之间通过租约保持心跳 - * 如果双方约定租约有效期为10s,那么client会在这10s内 - * 发送mdsRefreshTimesPerLease次心跳,如果连续失败, - * 那么client认为当前mds存在异常,会阻塞后续的IO,直到 - * 续约成功。 + * Basic configuration of lease + * @mdsRefreshTimesPerLease: The number of renewals within a lease, and the + * heartbeat between client and mds is maintained through the lease If both + * parties agree that the lease term is 10 seconds, then the client will be + * within these 10 seconds send mdsRefreshTimesPerLease heartbeats, if + * consecutive failures occur, So the client believes that there is an + * abnormality in the current mds, which will block subsequent IO until + * successfully renewed the contract. */ struct LeaseOption { uint32_t mdsRefreshTimesPerLease = 5; }; /** - * rpc超时,判断是否unstable的参数 + * RPC timeout, parameter to determine if it is unstable * @maxStableChunkServerTimeoutTimes: - * 一个chunkserver连续超时请求的阈值, 超过之后会检查健康状态, - * 如果不健康,则标记为unstable + * The threshold for a chunkserver to continuously timeout requests, after + * which the health status will be checked, If not healthy, mark as unstable * @checkHealthTimeoutMS: - * 检查chunkserver是否健康的http请求超时时间 + * Check if chunkserver is healthy HTTP request timeout * @serverUnstableThreashold: - * 一个server上超过serverUnstableThreashold个chunkserver都标记为unstable, - * 整个server上的所有chunkserver都标记为unstable + * More than serverUnstableThreashold chunkservers on a server are marked + * as unstable, All chunkservers on the entire server are marked as unstable */ struct ChunkServerUnstableOption { uint32_t maxStableChunkServerTimeoutTimes = 64; @@ -107,37 +111,40 @@ struct ChunkServerUnstableOption { }; /** - * 发送失败的chunk request处理 + * Handling of failed chunk request: * @chunkserverOPMaxRetry: - * 最大重试次数,一个RPC下发到底层chunkserver,最大允许的失败 - * 次数,超限之后会向上返回用户。 + * Maximum retry count allowed for an RPC sent to the underlying chunk server. + * If exceeded, it will be propagated to the user. * @chunkserverOPRetryIntervalUS: - * 相隔多久再重试,一个rpc失败之后client会根据其返回 - * 状态决定是否需要睡眠一段时间再重试,目前除了 - * TIMEOUT、REDIRECTED,这两种返回值,其他返回值都是需要 - * 先睡眠一段时间再重试。 - * @chunkserverRPCTimeoutMS: 为每个rpc发送时,其rpc controller配置的超时时间 + * Time interval between retries. After a failed RPC, the client will sleep for + * a period determined by the RPC response status before retrying. Currently, + * except for TIMEOUT and REDIRECTED, all other response + * values require sleeping for some time before retrying. + * @chunkserverRPCTimeoutMS: Timeout configured for each RPC sent when creating + * its RPC controller. * @chunkserverMaxRPCTimeoutMS: - * 在底层chunkserver返回TIMEOUT时,说明当前请求在底层 - * 无法及时得到处理,原因可能是底层的排队任务太多,这时候如果 - * 以相同的rpc - * 超时时间再去发送请求,很有可能最后还是超时, - * 所以为了避免底层处理请求时,rpc在client一侧已经超时的这种 - * 状况,为rpc超时时间增加了指数退避逻辑,超时时间会逐渐增加, - * 最大不能超过该值。 + * When the underlying chunkserver returns TIMEOUT, it means the current request + * cannot be processed promptly, possibly due to a large number of queued tasks. + * In such cases, sending requests with the same RPC timeout again may still + * result in timeouts. To avoid this, exponential backoff logic is applied to + * increase the timeout gradually, but it cannot exceed this maximum value. * @chunkserverMaxRetrySleepIntervalUS: - * 在底层返回OVERLOAD时,表明当前chunkserver - * 压力过大,这时候睡眠时间会进行指数退避,睡眠时间会加长,这样 - * 能够保证client的请求不会将底层chunkserver打满,但是睡眠时间 - * 最长不能超过该值。 - * @chunkserverMaxStableTimeoutTimes: 一个chunkserver连续超时请求的阈值, - * 超过之后 会标记为unstable。因为一个chunkserver所在的server如果宕机 - * 那么发向该chunkserver的请求都会超时,如果同一个chunkserver - * 的rpc连续超时超过该阈值,那么client就认为这个chunkserver - * 所在的server可能宕机了,就将该server上的所有leader - * copyset 标记为unstable,促使其下次发送rpc前,先去getleader。 + * When the underlying chunk server returns OVERLOAD, indicating excessive + * pressure, the sleep interval is exponentially extended to ensure that client + * requests do not overwhelm the underlying chunk server. + * However, the maximum sleep time cannot exceed this value. + * @chunkserverMaxStableTimeoutTimes: + * Threshold for consecutive timeouts on an RPC from a chunk server. If + * exceeded, the chunk server is marked as unstable. This is because if a server + * where a chunk server resides crashes, requests sent to + * that chunk server will all time out. If the same chunk server's RPCs + * consecutively timeout beyond this threshold, the client assumes that the + * server where it resides may have crashed and marks all leader copysets on + * that server as unstable, prompting a leader retrieval before sending any + * RPCs. * @chunkserverMinRetryTimesForceTimeoutBackoff: - * 当一个请求重试次数超过阈值时,还在重试 使其超时时间进行指数退避 + * When a request exceeds the retry count threshold, it continues to retry with + * exponential backoff for its timeout duration. */ struct FailureRequestOption { uint32_t chunkserverOPMaxRetry = 3; @@ -154,9 +161,11 @@ struct FailureRequestOption { }; /** - * 发送rpc给chunkserver的配置 - * @inflightOpt: 一个文件向chunkserver发送请求时的inflight 请求控制配置 - * @failRequestOpt: rpc发送失败之后,需要进行rpc重试的相关配置 + * Configuration for sending rpc to chunkserver + * @inflightOpt: Configuration of inflight request control when a file sends a + * request to chunkserver + * @failRequestOpt: After rpc sending fails, relevant configuration for rpc + * retry needs to be carried out */ struct IOSenderOption { InFlightIOCntlInfo inflightOpt; @@ -164,10 +173,12 @@ struct IOSenderOption { }; /** - * scheduler模块基本配置信息,schedule模块是用于分发用户请求,每个文件有自己的schedule - * 线程池,线程池中的线程各自配置一个队列 - * @scheduleQueueCapacity: schedule模块配置的队列深度 - * @scheduleThreadpoolSize: schedule模块线程池大小 + Basic Configuration Information for the Scheduler Module + * The scheduler module is used for distributing user requests. Each file has + its own scheduler thread pool, and each thread in the pool is configured with + its own queue. + * @scheduleQueueCapacity: The queue depth configured by the schedule module + * @scheduleThreadpoolSize: schedule module thread pool size */ struct RequestScheduleOption { uint32_t scheduleQueueCapacity = 1024; @@ -176,26 +187,29 @@ struct RequestScheduleOption { }; /** - * metaccache模块配置信息 + * MetaCache Module Configuration * @metacacheGetLeaderRetry: - * 获取leader重试次数,一个rpc发送到chunkserver之前需要先 - * 获取当前copyset的leader,如果metacache中没有这个信息, - * 就向copyset的peer发送getleader请求,如果getleader失败, - * 需要重试,最大重试次数为该值。 + * Number of retries to get the leader. Before an RPC is sent to the + * chunkserver, it needs to first obtain the leader for the current copyset. If + * this information is not available in the metacache, a getleader request is + * sent to a copyset's peers. If getleader fails, it needs to be retried, with a + * maximum retry count defined by this value. * @metacacheRPCRetryIntervalUS: - * 如上所述,如果getleader请求失败,会发起重试,但是并 - * 不会立即进行重试,而是选择先睡眠一段时间在重试。该值代表 - * 睡眠长度。 - * @metacacheGetLeaderRPCTimeOutMS: 发送getleader rpc请求的rpc - * controller最大超时时间 + * As mentioned above, if a getleader request fails, it will be retried, but not + * immediately. Instead, there will be a delay before the retry. This value + * represents the length of that delay. + * @metacacheGetLeaderRPCTimeOutMS: The maximum timeout duration for the RPC + * controller when sending a 'getleader' RPC request * @metacacheGetLeaderBackupRequestMS: - * 因为一个copyset有三个或者更多的peer,getleader - * 会以backuprequest的方式向这些peer发送rpc,在brpc内部 - * 会串行发送,如果第一个请求超过一定时间还没返回,就直接向 - * 下一个peer发送请求,而不用等待上一次请求返回或超时,这个触发 - * backup request的时间就为该值。 - * @metacacheGetLeaderBackupRequestLbName: 为getleader backup rpc - * 选择底层服务节点的策略 + * Since a copyset has three or more peers, getleader requests are + * sent to these peers in a backuprequest manner. + * Internally, in brpc, these requests are sent + * serially. If the first request takes too long to return, the next request is + * sent to the next peer without waiting for the previous one to return or time + * out. The time at which backup requests are triggered is determined by this + * value. + * @metacacheGetLeaderBackupRequestLbName: Strategy for selecting the underlying + * service nodes for getleader backup RPCs. */ struct MetaCacheOption { uint32_t metacacheGetLeaderRetry = 3; @@ -208,21 +222,23 @@ struct MetaCacheOption { }; /** - * IO 拆分模块配置信息 + * IO Split Module Configuration * @fileIOSplitMaxSizeKB: - * 用户下发IO大小client没有限制,但是client会将用户的IO进行拆分, - * 发向同一个chunkserver的请求锁携带的数据大小不能超过该值。 + * The size of user-issued IOs is not restricted by the client. However, the + * client will split the user's IOs, and the data size carried by requests sent + * to the same chunkserver cannot exceed this value. */ struct IOSplitOption { uint64_t fileIOSplitMaxSizeKB = 64; }; /** - * 线程隔离任务队列配置信息 - * 线程隔离主要是为了上层做异步接口调用时,直接将其调用任务推到线程池中而不是让其阻塞到放入 - * 分发队列线程池。 - * @isolationTaskQueueCapacity: 隔离线程池的队列深度 - * @isolationTaskThreadPoolSize: 隔离线程池容量 + * Configuration information for thread-isolated task queues. + * Thread isolation is primarily used to push asynchronous interface calls + * directly into the thread pool instead of blocking them until they are placed + * in the dispatch queue thread pool. + * @isolationTaskQueueCapacity: The queue depth of the isolation thread pool. + * @isolationTaskThreadPoolSize: The capacity of the isolation thread pool. */ struct TaskThreadOption { uint64_t isolationTaskQueueCapacity = 500000; @@ -250,7 +266,8 @@ struct ThrottleOption { }; /** - * IOOption存储了当前io 操作所需要的所有配置信息 + * IOOption stores all the configuration information required for the current IO + * operation */ struct IOOption { IOSplitOption ioSplitOpt; @@ -264,11 +281,12 @@ struct IOOption { }; /** - * client一侧常规的共同的配置信息 - * @mdsRegisterToMDS: 是否向mds注册client信息,因为client需要通过dummy - * server导出 metric信息,为了配合普罗米修斯的自动服务发现机制,会将其监听的 - * ip和端口信息发送给mds。 - * @turnOffHealthCheck: 是否关闭健康检查 + * Common client-side configuration options: + * @mdsRegisterToMDS: Whether to register client information with the MDS. Since + * the client needs to export metric information through a dummy server to + * support Prometheus's automatic service discovery mechanism, it sends its + * listening IP and port information to the MDS. + * @turnOffHealthCheck: Whether to disable health checks. */ struct CommonConfigOpt { bool mdsRegisterToMDS = false; @@ -284,7 +302,8 @@ struct CommonConfigOpt { }; /** - * ClientConfigOption是外围快照系统需要设置的配置信息 + * ClientConfigOption is the configuration information that needs to be set for + * the peripheral snapshot system */ struct ClientConfigOption { LogInfo loginfo; @@ -296,25 +315,24 @@ struct ClientConfigOption { struct ChunkServerBroadCasterOption { uint32_t broadCastMaxNum; - ChunkServerBroadCasterOption() - : broadCastMaxNum(200) {} + ChunkServerBroadCasterOption() : broadCastMaxNum(200) {} }; struct ChunkServerClientRetryOptions { - uint32_t rpcTimeoutMs; - uint32_t rpcMaxTry; - uint32_t rpcIntervalUs; - uint32_t rpcMaxTimeoutMs; + uint32_t rpcTimeoutMs; + uint32_t rpcMaxTry; + uint32_t rpcIntervalUs; + uint32_t rpcMaxTimeoutMs; ChunkServerClientRetryOptions() - : rpcTimeoutMs(500), - rpcMaxTry(3), - rpcIntervalUs(100000), - rpcMaxTimeoutMs(8000) {} + : rpcTimeoutMs(500), + rpcMaxTry(3), + rpcIntervalUs(100000), + rpcMaxTimeoutMs(8000) {} }; /** - * FileServiceOption是QEMU侧总体配置信息 + * FileServiceOption is the overall configuration information on the QEMU side */ struct FileServiceOption { LogInfo loginfo; diff --git a/src/client/copyset_client.cpp b/src/client/copyset_client.cpp index 964929d18f..9211070715 100644 --- a/src/client/copyset_client.cpp +++ b/src/client/copyset_client.cpp @@ -24,21 +24,21 @@ #include #include + #include #include -#include "src/client/request_sender.h" -#include "src/client/metacache.h" #include "src/client/client_config.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache.h" #include "src/client/request_closure.h" +#include "src/client/request_scheduler.h" +#include "src/client/request_sender.h" namespace curve { namespace client { -int CopysetClient::Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler, - FileMetric* fileMetric) { +int CopysetClient::Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, + RequestScheduler* scheduler, FileMetric* fileMetric) { if (nullptr == metaCache || scheduler == nullptr) { LOG(ERROR) << "metacache or scheduler is null!"; return -1; @@ -47,7 +47,7 @@ int CopysetClient::Init(MetaCache *metaCache, metaCache_ = metaCache; scheduler_ = scheduler; fileMetric_ = fileMetric; - senderManager_ = new(std::nothrow) RequestSenderManager(); + senderManager_ = new (std::nothrow) RequestSenderManager(); if (nullptr == senderManager_) { return -1; } @@ -63,30 +63,33 @@ int CopysetClient::Init(MetaCache *metaCache, return 0; } bool CopysetClient::FetchLeader(LogicPoolID lpid, CopysetID cpid, - ChunkServerID* leaderid, butil::EndPoint* leaderaddr) { - // 1. 先去当前metacache中拉取leader信息 - if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, false, fileMetric_)) { + ChunkServerID* leaderid, + butil::EndPoint* leaderaddr) { + // 1. First, pull the leader information from the current metacache + if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, false, + fileMetric_)) { return true; } - // 2. 如果metacache中leader信息拉取失败,就发送RPC请求获取新leader信息 - if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, true, fileMetric_)) { + // 2. If the pull of leader information in the metacache fails, send an RPC + // request to obtain new leader information + if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, true, + fileMetric_)) { LOG(WARNING) << "Get leader address form cache failed, but " - << "also refresh leader address failed from mds." - << "(<" << lpid << ", " << cpid << ">)"; + << "also refresh leader address failed from mds." + << "(<" << lpid << ", " << cpid << ">)"; return false; } return true; } -// 因为这里的CopysetClient::ReadChunk(会在两个逻辑里调用 -// 1. 从request scheduler下发的新的请求 -// 2. clientclosure再重试逻辑里调用copyset client重试 -// 这两种状况都会调用该接口,因为对于重试的RPC有可能需要重新push到队列中 -// 非重试的RPC如果重新push到队列中会导致死锁。 +// Because the CopysetClient::ReadChunk (will be called in two logics) here +// 1. New requests issued from the request scheduler +// 2. Calling copyset client to retry in the clientclosure retry logic +// Both of these situations will call the interface, as retrying RPCs may +// require re pushing to the queue If non retrying RPC is pushed back into the +// queue, it will cause a deadlock. int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, @@ -94,26 +97,31 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, RequestClosure* reqclosure = static_cast(done); brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { - // session过期之后需要重新push到队列 + // After the session expires, it needs to be re pushed to the queue LOG(WARNING) << "session not valid, read rpc ReSchedule!"; doneGuard.release(); reqclosure->ReleaseInflightRPCToken(); @@ -123,20 +131,17 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, } auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkClosure *readDone = new ReadChunkClosure(this, done); - senderPtr->ReadChunk(idinfo, sn, offset, - length, sourceInfo, readDone); + ReadChunkClosure* readDone = new ReadChunkClosure(this, done); + senderPtr->ReadChunk(idinfo, sn, offset, length, sourceInfo, readDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, size_t length, +int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, + uint64_t epoch, uint64_t sn, + const butil::IOBuf& data, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, google::protobuf::Closure* done) { std::shared_ptr senderPtr = nullptr; @@ -146,23 +151,28 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { LOG(WARNING) << "session not valid, write rpc ReSchedule!"; @@ -175,19 +185,18 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, auto task = [&](Closure* done, std::shared_ptr senderPtr) { WriteChunkClosure* writeDone = new WriteChunkClosure(this, done); - senderPtr->WriteChunk(idinfo, fileId, epoch, sn, - data, offset, length, sourceInfo, - writeDone); + senderPtr->WriteChunk(idinfo, fileId, epoch, sn, data, offset, length, + sourceInfo, writeDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, off_t offset, size_t length, Closure *done) { - +int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, + off_t offset, size_t length, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkSnapClosure *readDone = new ReadChunkSnapClosure(this, done); + ReadChunkSnapClosure* readDone = new ReadChunkSnapClosure(this, done); senderPtr->ReadChunkSnapshot(idinfo, sn, offset, length, readDone); }; @@ -195,21 +204,22 @@ int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, } int CopysetClient::DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, Closure *done) { - + uint64_t correctedSn, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - DeleteChunkSnapClosure *deleteDone = new DeleteChunkSnapClosure( - this, done); - senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSn, deleteDone); + DeleteChunkSnapClosure* deleteDone = + new DeleteChunkSnapClosure(this, done); + senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, correctedSn, + deleteDone); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { +int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - GetChunkInfoClosure *chunkInfoDone = new GetChunkInfoClosure(this, done); // NOLINT + GetChunkInfoClosure* chunkInfoDone = + new GetChunkInfoClosure(this, done); // NOLINT senderPtr->GetChunkInfo(idinfo, chunkInfoDone); }; @@ -217,9 +227,9 @@ int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { } int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string& location, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - Closure* done) { + const std::string& location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { CreateCloneChunkClosure* createCloneDone = new CreateCloneChunkClosure(this, done); @@ -230,22 +240,22 @@ int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, return DoRPCTask(idinfo, task, done); } -int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, +int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { RecoverChunkClosure* recoverChunkDone = new RecoverChunkClosure(this, done); - senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, - len); + senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, len); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, - std::function senderptr)> task, Closure *done) { +int CopysetClient::DoRPCTask( + const ChunkIDInfo& idinfo, + std::function senderptr)> + task, + Closure* done) { RequestClosure* reqclosure = static_cast(done); ChunkServerID leaderId; @@ -253,30 +263,30 @@ int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); while (reqclosure->GetRetriedTimes() < - iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { + iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { reqclosure->IncremRetriedTimes(); - if (false == FetchLeader(idinfo.lpid_, idinfo.cpid_, - &leaderId, &leaderAddr)) { + if (false == + FetchLeader(idinfo.lpid_, idinfo.cpid_, &leaderId, &leaderAddr)) { bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } - auto senderPtr = senderManager_->GetOrCreateSender(leaderId, - leaderAddr, iosenderopt_); + auto senderPtr = senderManager_->GetOrCreateSender(leaderId, leaderAddr, + iosenderopt_); if (nullptr != senderPtr) { task(doneGuard.release(), senderPtr); break; } else { LOG(WARNING) << "create or reset sender failed, " - << ", leaderId = " << leaderId; + << ", leaderId = " << leaderId; bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } } return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/copyset_client.h b/src/client/copyset_client.h index 3dc1fc66f7..0881a7ffac 100644 --- a/src/client/copyset_client.h +++ b/src/client/copyset_client.h @@ -23,11 +23,11 @@ #ifndef SRC_CLIENT_COPYSET_CLIENT_H_ #define SRC_CLIENT_COPYSET_CLIENT_H_ -#include #include +#include -#include #include +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" @@ -43,12 +43,14 @@ namespace client { using curve::common::Uncopyable; using ::google::protobuf::Closure; -// TODO(tongguangxun) :后续除了read、write的接口也需要调整重试逻辑 +// TODO(tongguangxun): In addition to the read and write interfaces, the retry +// logic needs to be adjusted in the future class MetaCache; class RequestScheduler; /** - * 负责管理 ChunkServer 的链接,向上层提供访问 - * 指定 copyset 的 chunk 的 read/write 等接口 + * Responsible for managing connections to ChunkServers and providing + * upper-layer access to read/write interfaces for specific chunks within a + * copyset. */ class CopysetClient { public: @@ -68,120 +70,101 @@ class CopysetClient { senderManager_ = nullptr; } - int Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, + int Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler = nullptr, FileMetric* fileMetic = nullptr); /** - * 返回依赖的Meta Cache + * Return dependent Meta Cache */ - MetaCache* GetMetaCache() { - return metaCache_; - } + MetaCache* GetMetaCache() { return metaCache_; } /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param souceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param idinfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - google::protobuf::Closure *done); + int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + google::protobuf::Closure* done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param writeData:要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& writeData, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - Closure *done); + * Write Chunk + * @param idinfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param writeData: The data to be written + *@param offset: write offset + * @param length: The length written + * @param sourceInfo: chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf& writeData, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + Closure* done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + *Reading Chunk snapshot files + * @param idinfo: the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - Closure *done); + int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, Closure* done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param idinfo is the ID information related to chunk + * @param correctedSn: Version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - Closure *done); + uint64_t correctedSn, Closure* done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure + * Obtain information about chunk files + * @param idinfo: the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - Closure *done); + int GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done); /** - * @brief lazy 创建clone chunk - * @param idinfo为chunk相关的id信息 - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - Closure *done); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param:offset 偏移 - * @param:len 长度 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, - uint64_t len, - Closure *done); + * @brief lazy Create clone chunk + * @param idinfo: the ID information related to chunk + * @param location: URL of the data source + * @param sn: chunk's serial number + * @param correntSn: used to modify the chunk when creating CloneChunk + * @param chunkSize: Chunk size + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo& idinfo, const std::string& location, + uint64_t sn, uint64_t correntSn, uint64_t chunkSize, + Closure* done); + + /** + * @brief Actual recovery chunk data + * @param idinfo is the ID information related to chunk + * @param offset: offset + * @param len: length + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, + Closure* done); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId) { @@ -189,24 +172,21 @@ class CopysetClient { } /** - * session过期,需要将重试RPC停住 + * session expired, retry RPC needs to be stopped */ - void StartRecycleRetryRPC() { - sessionNotValid_ = true; - } + void StartRecycleRetryRPC() { sessionNotValid_ = true; } /** - * session恢复通知不再回收重试的RPC + * session recovery notification no longer recycles retried RPCs */ - void ResumeRPCRetry() { - sessionNotValid_ = false; - } + void ResumeRPCRetry() { sessionNotValid_ = false; } /** - * 在文件关闭的时候接收上层关闭通知, 根据session有效状态 - * 置位exitFlag, 如果sessio无效状态下再有rpc超时返回,这 - * 些RPC会直接错误返回,如果session正常,则将继续正常下发 - * RPC,直到重试次数结束或者成功返回 + * Receive upper-layer closure notification when the file is closed. + * Set the exitFlag based on the session's validity status. If there are RPC + * timeouts under an invalid session state, these RPCs will return errors + * directly. If the session is valid, RPCs will continue to be issued until + * the retry limit is reached or they return successfully. */ void ResetExitFlag() { if (sessionNotValid_) { @@ -218,47 +198,49 @@ class CopysetClient { friend class WriteChunkClosure; friend class ReadChunkClosure; - // 拉取新的leader信息 - bool FetchLeader(LogicPoolID lpid, - CopysetID cpid, - ChunkServerID* leaderid, + // Pull new leader information + bool FetchLeader(LogicPoolID lpid, CopysetID cpid, ChunkServerID* leaderid, butil::EndPoint* leaderaddr); /** - * 执行发送rpc task,并进行错误重试 - * @param[in]: idinfo为当前rpc task的id信息 - * @param[in]: task为本次要执行的rpc task - * @param[in]: done是本次rpc 任务的异步回调 - * @return: 成功返回0, 否则-1 + * Execute the send rpc task and retry with an error + * @param[in]: idinfo is the ID information of the current rpc task + * @param[in]: task is the rpc task executed this time + * @param[in]: done is the asynchronous callback for this RPC task + * @return: Successfully returns 0, otherwise -1 */ - int DoRPCTask(const ChunkIDInfo& idinfo, + int DoRPCTask( + const ChunkIDInfo& idinfo, std::function)> task, - Closure *done); + Closure* done); private: - // 元数据缓存 - MetaCache *metaCache_; - // 所有ChunkServer的链接管理者 - RequestSenderManager *senderManager_; - // 配置 + // Metadata cache + MetaCache* metaCache_; + // Link managers for all ChunkServers + RequestSenderManager* senderManager_; + // Configuration IOSenderOption iosenderopt_; - // session是否有效,如果session无效那么需要将重试的RPC停住 - // RPC停住通过将这个rpc重新push到request scheduler队列,这样不会 - // 阻塞brpc内部的线程,防止一个文件的操作影响到其他文件 + // Check if the session is valid. If the session is invalid, it's necessary + // to pause the retry RPCs by re-pushing this RPC into the request scheduler + // queue. This ensures that it doesn't block the internal threads of BRPC + // and prevents operations on one file from affecting other files. bool sessionNotValid_; - // request 调度器,在session过期的时候重新将RPC push到调度队列 + // request scheduler to push RPC back to the scheduling queue when the + // session expires RequestScheduler* scheduler_; - // 当前copyset client对应的文件metric + // The file metric corresponding to the current copyset client FileMetric* fileMetric_; - // 是否在停止状态中,如果是在关闭过程中且session失效,需要将rpc直接返回不下发 + // Is it in a stopped state? If it is during the shutdown process and the + // session fails, it is necessary to directly return rpc without issuing it bool exitFlag_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_COPYSET_CLIENT_H_ diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 63836653de..343b6cd5f8 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -24,21 +24,22 @@ #include #include + #include #include "src/client/iomanager4file.h" #include "src/client/mds_client.h" -#include "src/common/timeutility.h" #include "src/common/curve_define.h" #include "src/common/fast_align.h" +#include "src/common/timeutility.h" namespace curve { namespace client { using curve::client::ClientConfig; +using curve::common::is_aligned; using curve::common::TimeUtility; using curve::mds::SessionStatus; -using curve::common::is_aligned; bool CheckAlign(off_t off, size_t length, size_t blocksize) { return is_aligned(off, blocksize) && is_aligned(length, blocksize); @@ -105,18 +106,16 @@ void FileInstance::UnInitialize() { int FileInstance::Read(char* buf, off_t offset, size_t length) { if (CURVE_UNLIKELY(!CheckAlign(offset, length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << offset - << ", length: " << length - << ", block size: " << blocksize_; + << ", length: " << length << ", block size: " << blocksize_; return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin Read "<< finfo_.fullPathName - << ", offset = " << offset - << ", len = " << length; + DLOG_EVERY_SECOND(INFO) << "begin Read " << finfo_.fullPathName + << ", offset = " << offset << ", len = " << length; return iomanager4file_.Read(buf, offset, length, mdsclient_.get()); } -int FileInstance::Write(const char *buf, off_t offset, size_t len) { +int FileInstance::Write(const char* buf, off_t offset, size_t len) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; @@ -135,7 +134,7 @@ int FileInstance::Write(const char *buf, off_t offset, size_t len) { int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -144,20 +143,20 @@ int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioRead " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioRead " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioRead(aioctx, mdsclient_.get(), dataType); } -int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { +int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; } if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -166,9 +165,9 @@ int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioWrite " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioWrite " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioWrite(aioctx, mdsclient_.get(), dataType); } @@ -181,7 +180,7 @@ int FileInstance::Discard(off_t offset, size_t length) { return -1; } -int FileInstance::AioDiscard(CurveAioContext *aioctx) { +int FileInstance::AioDiscard(CurveAioContext* aioctx) { if (CURVE_LIKELY(!readonly_)) { return iomanager4file_.AioDiscard(aioctx, mdsclient_.get()); } @@ -190,16 +189,23 @@ int FileInstance::AioDiscard(CurveAioContext *aioctx) { return -1; } -// 两种场景会造成在Open的时候返回LIBCURVE_ERROR::FILE_OCCUPIED -// 1. 强制重启qemu不会调用close逻辑,然后启动的时候原来的文件sessio还没过期. -// 导致再次去发起open的时候,返回被占用,这种情况可以通过load sessionmap -// 拿到已有的session,再去执行refresh。 -// 2. 由于网络原因,导致open rpc超时,然后再去重试的时候就会返回FILE_OCCUPIED -// 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh -// 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 -// 等待一段时间再去Open,如果依然失败,就向上层返回失败。 +// Two scenarios can lead to returning LIBCURVE_ERROR::FILE_OCCUPIED when +// opening: +// 1. Forcibly restarting QEMU does not trigger the close logic, and when +// starting, the original session file has not expired yet. +// This causes a return of "occupied" +// when attempting to open it again. This situation can be resolved by +// loading the session map, obtaining the existing session, and then +// performing a refresh. +// 2. Due to network issues, the open RPC times out, and when retrying, it +// returns FILE_OCCUPIED. +// At this point, the file hasn't been successfully opened yet, so the +// session information isn't stored, and it's impossible to open it through +// refresh. In this case, you need to obtain the session lease duration on +// the MDS side, then wait for a period on the client side before attempting +// to Open again. If it still fails, return a failure to the upper layer. int FileInstance::Open(std::string* sessionId) { - LeaseSession_t lease; + LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; FileEpoch fEpoch; @@ -218,8 +224,8 @@ int FileInstance::Open(std::string* sessionId) { return -ret; } -int FileInstance::GetFileInfo(const std::string &filename, FInfo_t *fi, - FileEpoch_t *fEpoch) { +int FileInstance::GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch) { LIBCURVE_ERROR ret = mdsclient_->GetFileInfo(filename, finfo_.userinfo, fi, fEpoch); return -ret; @@ -240,12 +246,12 @@ int FileInstance::Close() { FileInstance* FileInstance::NewInitedFileInstance( const FileServiceOption& fileServiceOption, - const std::shared_ptr& mdsClient, - const std::string& filename, + const std::shared_ptr& mdsClient, const std::string& filename, const UserInfo& userInfo, - const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and readonly into openflags // NOLINT + const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and + // readonly into openflags // NOLINT bool readonly) { - FileInstance *instance = new (std::nothrow) FileInstance(); + FileInstance* instance = new (std::nothrow) FileInstance(); if (instance == nullptr) { LOG(ERROR) << "Create FileInstance failed, filename: " << filename; return nullptr; @@ -266,10 +272,8 @@ FileInstance* FileInstance::NewInitedFileInstance( } FileInstance* FileInstance::Open4Readonly( - const FileServiceOption& opt, - const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const FileServiceOption& opt, const std::shared_ptr& mdsclient, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags) { FileInstance* instance = FileInstance::NewInitedFileInstance( opt, mdsclient, filename, userInfo, openflags, true); @@ -279,8 +283,8 @@ FileInstance* FileInstance::Open4Readonly( } FileEpoch_t fEpoch; - int ret = mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, - &fEpoch); + int ret = + mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, &fEpoch); if (ret != 0) { LOG(ERROR) << "Get file info failed!"; instance->UnInitialize(); diff --git a/src/client/file_instance.h b/src/client/file_instance.h index 432a3402e4..952fc7e3d4 100644 --- a/src/client/file_instance.h +++ b/src/client/file_instance.h @@ -25,13 +25,13 @@ #include #include -#include "src/client/mds_client.h" #include "include/client/libcurve.h" #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" -#include "src/client/service_helper.h" #include "src/client/iomanager4file.h" #include "src/client/lease_executor.h" +#include "src/client/mds_client.h" +#include "src/client/service_helper.h" namespace curve { namespace client { @@ -42,55 +42,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { ~FileInstance() = default; /** - * 初始化 - * @param: filename文件名用于初始化iomanager的metric信息 - * @param: mdsclient为全局的mds client - * @param: userinfo为user信息 - * @param: fileservicopt fileclient的配置选项 - * @param: clientMetric为client端要统计的metric信息 - * @param: readonly是否以只读方式打开 - * @return: 成功返回true、否则返回false + * Initialize + * @param: filename The filename used to initialize the iomanager's metric + * information. + * @param: mdsclient The global mds client. + * @param: userinfo User information. + * @param: fileservicopt The configuration options for the fileclient. + * @param: clientMetric Metric information to be collected on the client + * side. + * @param: readonly Whether to open in read-only mode. + * @return: Returns true on success, otherwise returns false. */ bool Initialize(const std::string& filename, const std::shared_ptr& mdsclient, - const UserInfo& userinfo, - const OpenFlags& openflags, + const UserInfo& userinfo, const OpenFlags& openflags, const FileServiceOption& fileservicopt, bool readonly = false); /** - * 打开文件 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Open File + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int Open(std::string* sessionId = nullptr); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode read + * @param: buf The current buffer to be read + * @param: offset The offset within the file + * @param: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: buf The current buffer to be written + * @param: offset The offset within the file + * @parma: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length); /** - * 异步模式读 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read. + * @param: aioctx The I/O context for asynchronous read/write, which holds + * basic I/O information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 on success, less than 0 on failure */ int AioRead(CurveAioContext* aioctx, UserDataType dataType); /** - * 异步模式写 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write. + * @param: aioctx An asynchronous read/write IO context that stores basic IO + * information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, UserDataType dataType); @@ -113,69 +119,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { void UnInitialize(); - IOManager4File* GetIOManager4File() { - return &iomanager4file_; - } + IOManager4File* GetIOManager4File() { return &iomanager4file_; } /** - * 获取lease, 测试代码使用 + * Obtain a release to test code usage */ - LeaseExecutor* GetLeaseExecutor() const { - return leaseExecutor_.get(); - } + LeaseExecutor* GetLeaseExecutor() const { return leaseExecutor_.get(); } - int GetFileInfo(const std::string& filename, - FInfo_t* fi, FileEpoch_t *fEpoch); + int GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch); - void UpdateFileEpoch(const FileEpoch_t &fEpoch) { + void UpdateFileEpoch(const FileEpoch_t& fEpoch) { iomanager4file_.UpdateFileEpoch(fEpoch); } /** - * @brief 获取当前instance对应的文件信息 + * @brief Get the file information corresponding to the current instance * - * @return 当前instance对应文件的信息 + * @return The information of the file corresponding to the current instance */ - FInfo GetCurrentFileInfo() const { - return finfo_; - } + FInfo GetCurrentFileInfo() const { return finfo_; } static FileInstance* NewInitedFileInstance( const FileServiceOption& fileServiceOption, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags, - bool readonly); + const std::string& filename, const UserInfo& userInfo, + const OpenFlags& openflags, bool readonly); static FileInstance* Open4Readonly( const FileServiceOption& opt, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags = DefaultReadonlyOpenFlags()); private: void StopLease(); private: - // 保存当前file的文件信息 + // Save file information for the current file FInfo finfo_; - // 当前FileInstance的初始化配置信息 - FileServiceOption fileopt_; + // The initialization configuration information of the current FileInstance + FileServiceOption fileopt_; - // MDSClient是FileInstance与mds通信的唯一出口 + // MDSClient is the only exit for FileInstance to communicate with mds std::shared_ptr mdsclient_; - // 每个文件都持有与MDS通信的lease,LeaseExecutor是续约执行者 + // Each file holds a lease for communication with MDS, and the LeaseExecutor + // is the renewal executor std::unique_ptr leaseExecutor_; - // IOManager4File用于管理所有向chunkserver端发送的IO - IOManager4File iomanager4file_; + // IOManager4File is used to manage all IO sent to the chunkserver end + IOManager4File iomanager4file_; - // 是否为只读方式 - bool readonly_ = false; + // Whether to open in read-only mode + bool readonly_ = false; // offset and length must align with `blocksize_` // 4096 for backward compatibility @@ -184,7 +182,7 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { bool CheckAlign(off_t off, size_t length, size_t blocksize); -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_FILE_INSTANCE_H_ diff --git a/src/client/inflight_controller.h b/src/client/inflight_controller.h index 5c59f4edce..ddef520d0d 100644 --- a/src/client/inflight_controller.h +++ b/src/client/inflight_controller.h @@ -28,8 +28,8 @@ namespace curve { namespace client { -using curve::common::Mutex; using curve::common::ConditionVariable; +using curve::common::Mutex; class InflightControl { public: @@ -40,8 +40,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight全部回来,这段期间是hang的, - * 在close文件时调用 + * @brief calls the interface to wait for all inflight returns, which is a + * period of hang, Called when closing a file */ void WaitInflightAllComeBack() { LOG(INFO) << "wait inflight to complete, count = " << curInflightIONum_; @@ -53,7 +53,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight回来,这段期间是hang的 + * @brief calls the interface to wait for inflight to return, which is + * during the hang period */ void WaitInflightComeBack() { if (curInflightIONum_.load(std::memory_order_acquire) >= @@ -67,14 +68,14 @@ class InflightControl { } /** - * @brief 递增inflight num + * @brief increment inflight num */ void IncremInflightNum() { curInflightIONum_.fetch_add(1, std::memory_order_release); } /** - * @brief 递减inflight num + * @brief decreasing inflight num */ void DecremInflightNum() { std::lock_guard lk(inflightComeBackmtx_); @@ -90,24 +91,30 @@ class InflightControl { } /** - * WaitInflightComeBack会检查当前未返回的io数量是否超过我们限制的最大未返回inflight数量 - * 但是真正的inflight数量与上层并发调用的线程数有关。 - * 假设我们设置的maxinflight=100,上层有三个线程在同时调用GetInflightToken, - * 如果这个时候inflight数量为99,那么并发状况下这3个线程在WaitInflightComeBack - * 都会通过然后向下并发执行IncremInflightNum,这个时候真正的inflight为102, - * 下一个下发的时候需要等到inflight数量小于100才能继续,也就是等至少3个IO回来才能继续 - * 下发。这个误差是可以接受的,他与scheduler一侧并发度有关,误差有上限。 - * 如果想要精确控制inflight数量,就需要在接口处加锁,让原本可以并发的逻辑变成了 - * 串行,这样得不偿失。因此我们这里选择容忍一定误差范围。 + * WaitInflightComeBack checks if the current number of pending IOs exceeds + * our maximum allowed inflight limit. However, the actual inflight count is + * influenced by concurrent calls from upper-layer threads. Suppose we set + * maxinflight to 100, and there are three upper-layer threads + * simultaneously calling GetInflightToken. If, at this moment, the inflight + * count is 99, then in a concurrent scenario, all three threads in + * WaitInflightComeBack will pass and proceed to concurrently execute + * IncremInflightNum. Consequently, the actual inflight count becomes 102. + * The next dispatch operation will need to wait until the inflight count is + * less than 100 to proceed, which means it needs at least 3 IOs to return + * before proceeding. This margin of error is acceptable and is related to + * the concurrency level on the scheduler side, with a defined upper limit. + * If precise control over the inflight count is required, it would + * necessitate adding locks at the interface level, converting originally + * concurrent logic into serial, which would not be a cost-effective + * solution. Therefore, we choose to tolerate a certain margin of error in + * this scenario. */ void GetInflightToken() { WaitInflightComeBack(); IncremInflightNum(); } - void ReleaseInflightToken() { - DecremInflightNum(); - } + void ReleaseInflightToken() { DecremInflightNum(); } /** * @brief Get current inflight io num, only use in test code @@ -117,16 +124,16 @@ class InflightControl { } private: - uint64_t maxInflightNum_ = 0; + uint64_t maxInflightNum_ = 0; std::atomic curInflightIONum_{0}; - Mutex inflightComeBackmtx_; - ConditionVariable inflightComeBackcv_; - Mutex inflightAllComeBackmtx_; - ConditionVariable inflightAllComeBackcv_; + Mutex inflightComeBackmtx_; + ConditionVariable inflightComeBackcv_; + Mutex inflightAllComeBackmtx_; + ConditionVariable inflightAllComeBackcv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_INFLIGHT_CONTROLLER_H_ diff --git a/src/client/io_condition_varaiable.h b/src/client/io_condition_varaiable.h index a220168db3..9b721bd60f 100644 --- a/src/client/io_condition_varaiable.h +++ b/src/client/io_condition_varaiable.h @@ -23,12 +23,13 @@ #ifndef SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ #define SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace client { -// IOConditionVariable是用户同步IO场景下IO等待条件变量 +// IOConditionVariable is the IO waiting condition variable in the user +// synchronous IO scenario class IOConditionVariable { public: IOConditionVariable() : retCode_(-1), done_(false), mtx_(), cv_() {} @@ -36,9 +37,10 @@ class IOConditionVariable { ~IOConditionVariable() = default; /** - * 条件变量唤醒函数,因为底层的RPC request是异步的,所以用户下发同步IO的时候需要 - * 在发送读写请求的时候暂停等待IO返回。 - * @param: retcode是当前IO的返回值 + * Condition variable wakeup function. Since the underlying RPC requests are + * asynchronous, when users initiate synchronous IO, they need to pause and + * wait for the IO to return while sending read/write requests. + * @param: retcode is the return value of the current IO. */ void Complete(int retcode) { std::unique_lock lk(mtx_); @@ -48,7 +50,8 @@ class IOConditionVariable { } /** - * 是用户IO需要等待时候调用的函数,这个函数会在Complete被调用的时候返回 + * This is a function called when user IO needs to wait, and this function + * will return when Complete is called */ int Wait() { std::unique_lock lk(mtx_); @@ -58,20 +61,20 @@ class IOConditionVariable { } private: - // 当前IO的返回值 - int retCode_; + // The return value of the current IO + int retCode_; - // 当前IO是否完成 - bool done_; + // Is the current IO completed + bool done_; - // 条件变量使用的锁 - std::mutex mtx_; + // Locks used by conditional variables + std::mutex mtx_; - // 条件变量用于等待 + // Condition variable used for waiting std::condition_variable cv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 85d6dae911..b835ebf503 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -20,21 +20,22 @@ * Author: tongguangxun */ +#include "src/client/io_tracker.h" + #include #include #include #include -#include "src/client/splitor.h" +#include "src/client/discard_task.h" #include "src/client/iomanager.h" -#include "src/client/io_tracker.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache_struct.h" #include "src/client/request_closure.h" -#include "src/common/timeutility.h" +#include "src/client/request_scheduler.h" #include "src/client/source_reader.h" -#include "src/client/metacache_struct.h" -#include "src/client/discard_task.h" +#include "src/client/splitor.h" +#include "src/common/timeutility.h" namespace curve { namespace client { @@ -44,24 +45,22 @@ using curve::chunkserver::CHUNK_OP_STATUS; std::atomic IOTracker::tracekerID_(1); DiscardOption IOTracker::discardOption_; -IOTracker::IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric, +IOTracker::IOTracker(IOManager* iomanager, MetaCache* mc, + RequestScheduler* scheduler, FileMetric* clientMetric, bool disableStripe) : mc_(mc), scheduler_(scheduler), iomanager_(iomanager), fileMetric_(clientMetric), disableStripe_(disableStripe) { - id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); - scc_ = nullptr; - aioctx_ = nullptr; - data_ = nullptr; - type_ = OpType::UNKNOWN; - errcode_ = LIBCURVE_ERROR::OK; - offset_ = 0; - length_ = 0; + id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); + scc_ = nullptr; + aioctx_ = nullptr; + data_ = nullptr; + type_ = OpType::UNKNOWN; + errcode_ = LIBCURVE_ERROR::OK; + offset_ = 0; + length_ = 0; reqlist_.clear(); reqcount_.store(0, std::memory_order_release); opStartTimePoint_ = curve::common::TimeUtility::GetTimeofDayUs(); @@ -162,8 +161,7 @@ int IOTracker::ReadFromSource(const std::vector& reqCtxVec, void IOTracker::StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { data_ = const_cast(buf); offset_ = offset; length_ = length; @@ -190,8 +188,7 @@ void IOTracker::StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, } void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { if (nullptr == data_) { ReturnOnFail(); return; @@ -199,8 +196,7 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, switch (userDataType_) { case UserDataType::RawBuffer: - writeData_.append_user_data(data_, length_, - TrivialDeleter); + writeData_.append_user_data(data_, length_, TrivialDeleter); break; case UserDataType::IOBuffer: writeData_ = *reinterpret_cast(data_); @@ -211,9 +207,9 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, throttle->Add(false, length_); } - int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, - offset_, length_, - mdsclient, fileInfo, fEpoch); + int ret = + Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, offset_, + length_, mdsclient, fileInfo, fEpoch); if (ret == 0) { uint32_t subIoIndex = 0; @@ -284,14 +280,14 @@ void IOTracker::DoDiscard(MDSClient* mdsClient, const FInfo* fileInfo, Done(); } -void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, uint64_t offset, uint64_t len, - char *buf, SnapCloneClosure* scc) { - scc_ = scc; - data_ = buf; +void IOTracker::ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) { + scc_ = scc; + data_ = buf; offset_ = offset; length_ = len; - type_ = OpType::READ_SNAP; + type_ = OpType::READ_SNAP; int ret = -1; do { @@ -316,8 +312,8 @@ void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, } } -void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq) { +void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq) { type_ = OpType::DELETE_SNAP; int ret = -1; @@ -343,8 +339,8 @@ void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, } } -void IOTracker::GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo) { +void IOTracker::GetChunkInfo(const ChunkIDInfo& cinfo, + ChunkInfoDetail* chunkInfo) { type_ = OpType::GET_CHUNK_INFO; int ret = -1; @@ -384,10 +380,10 @@ void IOTracker::CreateCloneChunk(const std::string& location, break; } - newreqNode->seq_ = sn; - newreqNode->chunksize_ = chunkSize; - newreqNode->location_ = location; - newreqNode->correctedSeq_ = correntSn; + newreqNode->seq_ = sn; + newreqNode->chunksize_ = chunkSize; + newreqNode->location_ = location; + newreqNode->correctedSeq_ = correntSn; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -415,8 +411,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, break; } - newreqNode->rawlength_ = len; - newreqNode->offset_ = offset; + newreqNode->rawlength_ = len; + newreqNode->offset_ = offset; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -433,8 +429,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, } void IOTracker::FillCommonFields(ChunkIDInfo idinfo, RequestContext* req) { - req->optype_ = type_; - req->idinfo_ = idinfo; + req->optype_ = type_; + req->idinfo_ = idinfo; req->done_->SetIOTracker(this); } @@ -459,9 +455,7 @@ void IOTracker::InitDiscardOption(const DiscardOption& opt) { discardOption_ = opt; } -int IOTracker::Wait() { - return iocv_.Wait(); -} +int IOTracker::Wait() { return iocv_.Wait(); } void IOTracker::Done() { if (type_ == OpType::READ || type_ == OpType::WRITE) { @@ -510,15 +504,15 @@ void IOTracker::Done() { MetricHelper::IncremUserEPSCount(fileMetric_, type_); if (type_ == OpType::READ || type_ == OpType::WRITE) { if (LIBCURVE_ERROR::EPOCH_TOO_OLD == errcode_) { - LOG(WARNING) << "file [" << fileMetric_->filename << "]" - << ", epoch too old, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + LOG(WARNING) + << "file [" << fileMetric_->filename << "]" + << ", epoch too old, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ << ", length = " << length_; } else { LOG(ERROR) << "file [" << fileMetric_->filename << "]" - << ", IO Error, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + << ", IO Error, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ + << ", length = " << length_; } } else { if (OpType::CREATE_CLONE == type_ && @@ -533,13 +527,13 @@ void IOTracker::Done() { DestoryRequestList(); - // scc_和aioctx都为空的时候肯定是个同步调用 + // When both scc_ and aioctx are empty, it is definitely a synchronous call. if (scc_ == nullptr && aioctx_ == nullptr) { iocv_.Complete(ToReturnCode()); return; } - // 异步函数调用,在此处发起回调 + // Asynchronous function call, where a callback is initiated if (aioctx_ != nullptr) { aioctx_->ret = ToReturnCode(); aioctx_->cb(aioctx_); @@ -548,7 +542,7 @@ void IOTracker::Done() { scc_->Run(); } - // 回收当前io tracker + // Recycle the current io tracker iomanager_->HandleAsyncIOResponse(this); } @@ -565,12 +559,13 @@ void IOTracker::ReturnOnFail() { } void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, - LIBCURVE_ERROR* errout) { + LIBCURVE_ERROR* errout) { switch (errcode) { case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: *errout = LIBCURVE_ERROR::OK; break; - // chunk或者copyset对于用户来说是透明的,所以直接返回错误 + // Chunks or copysets are transparent to users, so they directly return + // errors case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: *errout = LIBCURVE_ERROR::NOTEXIST; @@ -599,5 +594,5 @@ void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6369410ae3..e87ffcc23b 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -49,44 +49,45 @@ class IOManager; class FileSegment; class DiscardTaskManager; -// IOTracker用于跟踪一个用户IO,因为一个用户IO可能会跨chunkserver, -// 因此在真正下发的时候会被拆分成多个小IO并发的向下发送,因此我们需要 -// 跟踪发送的request的执行情况。 +// IOTracker is used to track a user's IO, as a user's IO may cross +// chunkservers, Therefore, when it is actually distributed, it will be split +// into multiple small IOs and sent down concurrently. Therefore, we need to +// Track the execution status of the sent request. class CURVE_CACHELINE_ALIGNMENT IOTracker { friend class Splitor; public: - IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric = nullptr, - bool disableStripe = false); + IOTracker(IOManager* iomanager, MetaCache* mc, RequestScheduler* scheduler, + FileMetric* clientMetric = nullptr, bool disableStripe = false); ~IOTracker() = default; /** - * @brief StartRead同步读 - * @param buf 读缓冲区 - * @param offset 读偏移 - * @param length 读长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartRead Sync Read + * @param buf read buffer + * @param offset read offset + * @param length Read length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartRead(void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, Throttle* throttle = nullptr); /** - * @brief StartWrite同步写 - * @param buf 写缓冲区 - * @param offset 写偏移 - * @param length 写长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartWrite Sync Write + * @param buf write buffer + * @param offset write offset + * @param length Write length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle = nullptr); + const FileEpoch* fEpoch, Throttle* throttle = nullptr); /** * @brief start an async read operation @@ -105,8 +106,8 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { * @param fEpoch file epoch info */ void StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, - const FInfo_t* fileInfo, - const FileEpoch* fEpoch, Throttle* throttle = nullptr); + const FInfo_t* fileInfo, const FileEpoch* fEpoch, + Throttle* throttle = nullptr); void StartDiscard(off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -116,46 +117,44 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { DiscardTaskManager* taskManager); /** - * chunk相关接口是提供给snapshot使用的,上层的snapshot和file - * 接口是分开的,在IOTracker这里会将其统一,这样对下层来说不用 - * 感知上层的接口类别。 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 + * The chunk-related interfaces are intended for use by snapshots. The + * upper-level snapshot and file interfaces are separate. However, in the + * IOTracker, they are unified so that the lower levels do not need to be + * aware of the upper-level interface category. + * @param: chunkidinfo The target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is the read buffer + * @param: scc is the asynchronous callback */ - void ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); + void ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: seq是需要修正的版本号 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo is the target chunk + * @param: seq is the version number that needs to be corrected */ - void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq); + void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot */ - void GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo); + void GetChunkInfo(const ChunkIDInfo& cinfo, ChunkInfoDetail* chunkInfo); /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief lazy Create clone chunk + * @param: location is the URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when CreateCloneChunk + * @param: chunkSize chunk size + * @param: scc is an asynchronous callback */ void CreateCloneChunk(const std::string& location, const ChunkIDInfo& chunkidinfo, uint64_t sn, @@ -163,47 +162,51 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief Actual recovery chunk data + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: chunkSize Chunk size + * @param: scc is an asynchronous callback */ void RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * Wait用于同步接口等待,因为用户下来的IO被client内部线程接管之后 - * 调用就可以向上返回了,但是用户的同步IO语意是要等到结果返回才能向上 - * 返回的,因此这里的Wait会让用户线程等待。 - * @return: 返回读写信息,异步IO的时候返回0或-1.0代表成功,-1代表失败 - * 同步IO返回length或-1,length代表真实读写长度,-1代表读写失败 + * Wait is used for synchronous interface waiting. When the user's IO is + * taken over by client internal threads, the call can return to the upper + * layer. However, the user's synchronous IO semantics require waiting for + * the result to return before returning to the upper layer, so Wait here + * will make the user thread wait. + * @return: Returns read/write information. For asynchronous IO, it returns + * 0 or -1. 0 means success, -1 means failure. For synchronous IO, it + * returns the length or -1. 'length' represents the actual read/write + * length, and -1 represents read/write failure. */ int Wait(); /** - * 每个request都要有自己的OP类型,这里提供接口可以在io拆分的时候获取类型 + * Each request must have its own OP type, and an interface is provided here + * to obtain the type during IO splitting */ - OpType Optype() {return type_;} + OpType Optype() { return type_; } - // 设置操作类型,测试使用 + // Set operation type, test usage void SetOpType(OpType type) { type_ = type; } /** - * 因为client的IO都是异步发送的,且一个IO被拆分成多个Request,因此在异步 - * IO返回后就应该告诉IOTracker当前request已经返回,这样tracker可以处理 - * 返回的request。 - * @param: 待处理的异步request + * Because client IOs are all sent asynchronously, and a single IO is split + * into multiple Requests, after asynchronous IO returns, it should inform + * the IOTracker that the current request has returned. This way, the + * tracker can handle the returned request. + * @param: The asynchronous request to be processed. */ void HandleResponse(RequestContext* reqctx); /** - * 获取当前tracker id信息 + * Obtain the current tracker ID information */ - uint64_t GetID() const { - return id_; - } + uint64_t GetID() const { return id_; } // set user data type void SetUserDataType(const UserDataType dataType) { @@ -222,9 +225,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { readDatas_[subIoIndex] = data; } - bool IsStripeDisabled() const { - return disableStripe_; - } + bool IsStripeDisabled() const { return disableStripe_; } static void InitDiscardOption(const DiscardOption& opt); @@ -232,38 +233,40 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { void ReleaseAllSegmentLocks(); /** - * 当IO返回的时候调用done,由done负责向上返回 + * When IO returns, call done, which is responsible for returning upwards */ void Done(); /** - * 在io拆分或者,io分发失败的时候需要调用,设置返回状态,并向上返回 + * When IO splitting or IO distribution fails, it needs to be called, set + * the return status, and return upwards */ void ReturnOnFail(); /** - * 用户下来的大IO会被拆分成多个子IO,这里在返回之前将子IO资源回收 + * The user's incoming large IO will be split into multiple sub IOs, and the + * sub IO resources will be reclaimed before returning here */ void DestoryRequestList(); /** - * 填充request context common字段 - * @param: idinfo为chunk的id信息 - * @param: req为待填充的request context + * Fill in the request context common field + * @param: IDInfo is the ID information of the chunk + * @param: req is the request context to be filled in */ void FillCommonFields(ChunkIDInfo idinfo, RequestContext* req); /** - * chunkserver errcode转化为libcurve client的errode - * @param: errcode为chunkserver侧的errode - * @param[out]: errout为libcurve自己的errode + * Convert chunkserver errcode to libcurve client errode + * @param: errcode is the error code on the chunkserver side + * @param[out]: errout is libcurve's own errode */ void ChunkServerErr2LibcurveErr(curve::chunkserver::CHUNK_OP_STATUS errcode, LIBCURVE_ERROR* errout); /** - * 获取一个初始化后的RequestContext - * return: 如果分配失败或者初始化失败,返回nullptr - * 反之,返回一个指针 + * Obtain an initialized RequestContext + * @return: If allocation or initialization fails, return nullptr + * On the contrary, return a pointer */ RequestContext* GetInitedRequestContext() const; @@ -283,8 +286,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // perform write operation void DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle); + const FileEpoch* fEpoch, Throttle* throttle); void DoDiscard(MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -296,12 +298,13 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { } private: - // io 类型 - OpType type_; + // IO type + OpType type_; - // 当前IO的数据内容,data是读写数据的buffer - off_t offset_; - uint64_t length_; + // The current IO data content, where data is the buffer for reading and + // writing data + off_t offset_; + uint64_t length_; // user data pointer void* data_; @@ -315,48 +318,52 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // save read data std::vector readDatas_; - // 当用户下发的是同步IO的时候,其需要在上层进行等待,因为client的 - // IO发送流程全部是异步的,因此这里需要用条件变量等待,待异步IO返回 - // 之后才将这个等待的条件变量唤醒,然后向上返回。 - IOConditionVariable iocv_; + // When a user sends synchronous IO, they need to wait in the upper layer + // because the client's IO sending process is all asynchronous, so here we + // need to use a conditional variable to wait for asynchronous IO to return + // Afterwards, the waiting condition variable is awakened and then returned + // upwards. + IOConditionVariable iocv_; - // 异步IO的context,在异步IO返回时,通过调用aioctx - // 的异步回调进行返回。 + // The context of asynchronous IO is called aioctx when asynchronous IO + // returns Asynchronous callback for return. CurveAioContext* aioctx_; - // 当前IO的errorcode + // The errorcode of the current IO LIBCURVE_ERROR errcode_; - // 当前IO被拆分成reqcount_个小IO + // The current IO is split into reqcount_ Small IO std::atomic reqcount_; - // 大IO被拆分成多个request,这些request放在reqlist中国保存 - std::vector reqlist_; + // The large IO is split into multiple requests, which are stored in the + // reqlist in China + std::vector reqlist_; // store segment indices that can be discarded std::unordered_set discardSegments_; - // metacache为当前fileinstance的元数据信息 + // metacache is the metadata information of the current fileinstance MetaCache* mc_; - // scheduler用来将用户线程与client自己的线程切分 - // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 + // The scheduler is used to separate user threads from the client's own + // threads After the large IO is split, the split reqlist is passed to the + // scheduler and sent downwards RequestScheduler* scheduler_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 - // iomanager可以将该tracker释放 + // For asynchronous IO, the Tracker needs to notify the upper level that the + // current IO processing has ended The iomanager can release the tracker IOManager* iomanager_; - // 发起时间 + // Initiation time uint64_t opStartTimePoint_; - // client端的metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // 当前tracker的id + // The ID of the current tracker uint64_t id_; - // 快照克隆系统异步调用回调指针 + // Asynchronous call callback pointer for snapshot cloning system SnapCloneClosure* scc_; bool disableStripe_; @@ -365,11 +372,11 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // so store corresponding segment lock and release after operations finished std::vector segmentLocks_; - // id生成器 + // ID generator static std::atomic tracekerID_; static DiscardOption discardOption_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_TRACKER_H_ diff --git a/src/client/iomanager.h b/src/client/iomanager.h index e985b1527f..04664fe870 100644 --- a/src/client/iomanager.h +++ b/src/client/iomanager.h @@ -23,8 +23,8 @@ #ifndef SRC_CLIENT_IOMANAGER_H_ #define SRC_CLIENT_IOMANAGER_H_ -#include "src/client/io_tracker.h" #include "src/client/client_common.h" +#include "src/client/io_tracker.h" #include "src/common/concurrent/concurrent.h" namespace curve { @@ -34,48 +34,41 @@ using curve::common::Atomic; class IOManager { public: - IOManager() { - id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); - } + IOManager() { id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); } virtual ~IOManager() = default; /** - * @brief 获取当前iomanager的ID信息 + * @brief Get the ID information of the current iomanager */ - virtual IOManagerID ID() const { - return id_; - } + virtual IOManagerID ID() const { return id_; } /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ - virtual void GetInflightRpcToken() { - return; - } + virtual void GetInflightRpcToken() { return; } /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ - virtual void ReleaseInflightRpcToken() { - return; - } + virtual void ReleaseInflightRpcToken() { return; } /** - * @brief 处理异步返回的response - * @param: iotracker是当前reponse的归属 + * @brief handles response returned asynchronously + * @param: iotracker The ownership of the current reponse */ virtual void HandleAsyncIOResponse(IOTracker* iotracker) = 0; protected: - // iomanager id目的是为了让底层RPC知道自己归属于哪个iomanager + // The purpose of the iomanager id is to let the underlying RPC know which + // iomanager it belongs to IOManagerID id_; private: // global id recorder - static Atomic idRecorder_; + static Atomic idRecorder_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER_H_ diff --git a/src/client/iomanager4chunk.h b/src/client/iomanager4chunk.h index f9cedeca02..209829f3ef 100644 --- a/src/client/iomanager4chunk.h +++ b/src/client/iomanager4chunk.h @@ -24,15 +24,15 @@ #define SRC_CLIENT_IOMANAGER4CHUNK_H_ #include -#include // NOLINT +#include // NOLINT +#include // NOLINT #include -#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager.h" +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/iomanager.h" +#include "src/client/metacache.h" #include "src/client/request_scheduler.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -41,107 +41,109 @@ class IOManager4Chunk : public IOManager { public: IOManager4Chunk(); ~IOManager4Chunk() = default; - bool Initialize(IOOption ioOpt, MDSClient* mdsclient); + bool Initialize(IOOption ioOpt, MDSClient* mdsclient); - /** - * 读取seq版本号的快照数据 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return:成功返回真实读取长度,失败为-1 - */ - int ReadSnapChunk(const ChunkIDInfo &chunkidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: correctedSeq是需要修正的版本号 - */ - int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &chunkidinfo, + /** + * Read snapshot data of seq version number + * @param: chunkidinfo target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned the true read length, failed with -1 + */ + int ReadSnapChunk(const ChunkIDInfo& chunkidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo target chunk + * @param: correctedSeq is the version number that needs to be corrected + */ + int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& chunkidinfo, uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(const ChunkIDInfo &chunkidinfo, - ChunkInfoDetail *chunkInfo); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(const ChunkIDInfo& chunkidinfo, + ChunkInfoDetail* chunkInfo); - /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * @return 成功返回0, 否则-1 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @detail + * - The format of the location is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * @return successfully returns 0, otherwise -1 + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param scc 异步回调 - * @return 成功返回0, 否则-1 + * @param offset offset + * @param len length + * @param scc asynchronous callback + * @return successfully returns 0, otherwise -1 */ int RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: 是异步返回的io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: It is an io returned asynchronously */ void HandleAsyncIOResponse(IOTracker* iotracker) override; - /** - * 析构,回收资源 - */ + /** + * Deconstruct and recycle resources + */ void UnInitialize(); - /** - * 获取metacache,测试代码使用 - */ - MetaCache* GetMetaCache() {return &mc_;} - /** - * 设置scahuler,测试代码使用 - */ + /** + * Obtain Metacache, test code usage + */ + MetaCache* GetMetaCache() { return &mc_; } + /** + * Set up scahuler to test code usage + */ void SetRequestScheduler(RequestScheduler* scheduler) { - scheduler_ = scheduler; + scheduler_ = scheduler; } private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前snapshot client元数据信息 - MetaCache mc_; + // metacache stores the current snapshot client metadata information + MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER4CHUNK_H_ diff --git a/src/client/iomanager4file.cpp b/src/client/iomanager4file.cpp index b6f1b09527..992554264d 100644 --- a/src/client/iomanager4file.cpp +++ b/src/client/iomanager4file.cpp @@ -20,14 +20,15 @@ * Author: tongguangxun */ +#include "src/client/iomanager4file.h" + #include -#include // NOLINT +#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager4file.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" +#include "src/client/metacache.h" #include "src/client/splitor.h" namespace curve { @@ -36,8 +37,7 @@ Atomic IOManager::idRecorder_(1); IOManager4File::IOManager4File() : scheduler_(nullptr), exit_(false) {} bool IOManager4File::Initialize(const std::string& filename, - const IOOption& ioOpt, - MDSClient* mdsclient) { + const IOOption& ioOpt, MDSClient* mdsclient) { ioopt_ = ioOpt; disableStripe_ = false; @@ -55,8 +55,9 @@ bool IOManager4File::Initialize(const std::string& filename, return false; } - // IO Manager中不控制inflight IO数量,所以传入UINT64_MAX - // 但是IO Manager需要控制所有inflight IO在关闭的时候都被回收掉 + // The IO Manager does not control the number of inflight IOs, so UINT64_MAX + // is passed. However, the IO Manager needs to ensure that all inflight IOs + // are reclaimed upon shutdown. inflightCntl_.SetMaxInflightNum(UINT64_MAX); scheduler_ = new (std::nothrow) RequestScheduler(); @@ -114,7 +115,7 @@ void IOManager4File::UnInitialize() { { std::unique_lock lk(exitMtx); - exitCv.wait(lk, [&](){ return exitFlag; }); + exitCv.wait(lk, [&]() { return exitFlag; }); } taskPool_.Stop(); @@ -128,8 +129,9 @@ void IOManager4File::UnInitialize() { discardTaskManager_->Stop(); { - // 这个锁保证设置exit_和delete scheduler_是原子的 - // 这样保证在scheduler_被析构的时候lease线程不会使用scheduler_ + // This lock ensures that setting exit_ and deleting scheduler_ are + // atomic. This ensures that the lease thread won't use scheduler_ when + // it is being destructed. std::unique_lock lk(exitMtx_); exit_ = true; @@ -140,8 +142,8 @@ void IOManager4File::UnInitialize() { } } -int IOManager4File::Read(char* buf, off_t offset, - size_t length, MDSClient* mdsclient) { +int IOManager4File::Read(char* buf, off_t offset, size_t length, + MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::READ); FlightIOGuard guard(this); @@ -162,9 +164,7 @@ int IOManager4File::Read(char* buf, off_t offset, } } -int IOManager4File::Write(const char* buf, - off_t offset, - size_t length, +int IOManager4File::Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::WRITE); FlightIOGuard guard(this); @@ -175,8 +175,7 @@ int IOManager4File::Write(const char* buf, IOTracker temp(this, &mc_, scheduler_, fileMetric_, disableStripe_); temp.SetUserDataType(UserDataType::IOBuffer); temp.StartWrite(&data, offset, length, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); int rc = temp.Wait(); return rc; @@ -223,8 +222,7 @@ int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient, inflightCntl_.IncremInflightNum(); auto task = [this, ctx, mdsclient, temp]() { temp->StartAioWrite(ctx, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); }; taskPool_.Enqueue(task); @@ -286,9 +284,7 @@ void IOManager4File::UpdateFileThrottleParams( } } -void IOManager4File::SetDisableStripe() { - disableStripe_ = true; -} +void IOManager4File::SetDisableStripe() { disableStripe_ = true; } void IOManager4File::HandleAsyncIOResponse(IOTracker* iotracker) { inflightCntl_.DecremInflightNum(); @@ -330,5 +326,5 @@ void IOManager4File::GetInflightRpcToken() { inflightRpcCntl_.GetInflightToken(); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/iomanager4file.h b/src/client/iomanager4file.h index eaecc8497f..9571a3845d 100644 --- a/src/client/iomanager4file.h +++ b/src/client/iomanager4file.h @@ -28,12 +28,13 @@ #include #include // NOLINT -#include // NOLINT -#include #include +#include // NOLINT +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/discard_task.h" #include "src/client/inflight_controller.h" #include "src/client/iomanager.h" #include "src/client/mds_client.h" @@ -42,7 +43,6 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/throttle.h" -#include "src/client/discard_task.h" namespace curve { namespace client { @@ -57,14 +57,13 @@ class IOManager4File : public IOManager { ~IOManager4File() = default; /** - * 初始化函数 - * @param: filename为当前iomanager服务的文件名 - * @param: ioopt为当前iomanager的配置信息 - * @param: mdsclient向下透传给metacache - * @return: 成功true,失败false + * Initialization function + * @param: filename is the file name of the current iomanager service + * @param: ioopt is the configuration information of the current iomanager + * @param: mdsclient penetrates downwards to Metacache + * @return: Success true, failure false */ - bool Initialize(const std::string& filename, - const IOOption& ioOpt, + bool Initialize(const std::string& filename, const IOOption& ioOpt, MDSClient* mdsclient); /** @@ -73,39 +72,47 @@ class IOManager4File : public IOManager { void UnInitialize(); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode reading + * @param: buf is the current buffer to be read + * @param: offset is the offset in file + * @parma: length is the length to be read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @return: Successfully returned reading the true length, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 同步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @param:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: buf is the current buffer to be written + * @param: offset is the offset within the file + * @param: length is the length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 异步模式读 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioRead(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); /** - * 异步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); @@ -128,88 +135,71 @@ class IOManager4File : public IOManager { int AioDiscard(CurveAioContext* aioctx, MDSClient* mdsclient); /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ void GetInflightRpcToken() override; /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ void ReleaseInflightRpcToken() override; /** - * 获取metacache,测试代码使用 + * Obtain Metacache, test code usage */ - MetaCache* GetMetaCache() { - return &mc_; - } + MetaCache* GetMetaCache() { return &mc_; } /** - * 设置scheduler,测试代码使用 + * Set the scheduler to test the code using */ void SetRequestScheduler(RequestScheduler* scheduler) { scheduler_ = scheduler; } /** - * 获取metric信息,测试代码使用 + * Obtain metric information and test code usage */ - FileMetric* GetMetric() { - return fileMetric_; - } + FileMetric* GetMetric() { return fileMetric_; } /** - * 重新设置io配置信息,测试使用 + * Reset IO configuration information for testing use */ - void SetIOOpt(const IOOption& opt) { - ioopt_ = opt; - } + void SetIOOpt(const IOOption& opt) { ioopt_ = opt; } /** - * 测试使用,获取request scheduler + * Test usage, obtain request scheduler */ - RequestScheduler* GetScheduler() { - return scheduler_; - } + RequestScheduler* GetScheduler() { return scheduler_; } /** - * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 - * @param: fi为当前需要更新的文件信息 + * When the lease excutor detects a version update, it needs to notify the + * iomanager to update the file version information + * @param: fi is the current file information that needs to be updated */ void UpdateFileInfo(const FInfo_t& fi); - const FInfo* GetFileInfo() const { - return mc_.GetFileInfo(); - } + const FInfo* GetFileInfo() const { return mc_.GetFileInfo(); } void UpdateFileEpoch(const FileEpoch& fEpoch) { mc_.UpdateFileEpoch(fEpoch); } - const FileEpoch* GetFileEpoch() const { - return mc_.GetFileEpoch(); - } + const FileEpoch* GetFileEpoch() const { return mc_.GetFileEpoch(); } /** - * 返回文件最新版本号 + * Return the latest version number of the file */ - uint64_t GetLatestFileSn() const { - return mc_.GetLatestFileSn(); - } + uint64_t GetLatestFileSn() const { return mc_.GetLatestFileSn(); } /** - * 更新文件最新版本号 + * Update the latest version number of the file */ - void SetLatestFileSn(uint64_t newSn) { - mc_.SetLatestFileSn(newSn); - } + void SetLatestFileSn(uint64_t newSn) { mc_.SetLatestFileSn(newSn); } /** * @brief get current file inodeid * @return file inodeid */ - uint64_t InodeId() const { - return mc_.InodeId(); - } + uint64_t InodeId() const { return mc_.InodeId(); } void UpdateFileThrottleParams( const common::ReadWriteThrottleParams& params); @@ -220,26 +210,30 @@ class IOManager4File : public IOManager { friend class LeaseExecutor; friend class FlightIOGuard; /** - * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 将新下发的IO全部失败返回 + * lease related interface, when LeaseExecutor contract renewal fails, calls + * LeaseTimeoutDisableIO Failed to return all newly issued IOs */ void LeaseTimeoutBlockIO(); /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO */ void ResumeIO(); /** - * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 + * When the lesaeexcutor detects a version change, it calls the interface + * and waits for inflight to return. During this period, IO is hanging */ void BlockIO(); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: iotracker是返回的异步io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: iotracker is an asynchronous io returned */ void HandleAsyncIOResponse(IOTracker* iotracker) override; @@ -250,9 +244,7 @@ class IOManager4File : public IOManager { iomanager->inflightCntl_.IncremInflightNum(); } - ~FlightIOGuard() { - iomanager->inflightCntl_.DecremInflightNum(); - } + ~FlightIOGuard() { iomanager->inflightCntl_.DecremInflightNum(); } private: IOManager4File* iomanager; @@ -261,42 +253,45 @@ class IOManager4File : public IOManager { bool IsNeedDiscard(size_t len) const; private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前文件的所有元数据信息 + // metacache stores all metadata information for the current file MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; - // client端metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // task thread pool为了将qemu线程与curve线程隔离 + // The task thread pool is used to isolate the QEMU thread from the curve + // thread curve::common::TaskThreadPool taskPool_; - // inflight IO控制 + // inflight IO control InflightControl inflightCntl_; - // inflight rpc控制 + // inflight rpc control InflightControl inflightRpcCntl_; std::unique_ptr throttle_; - // 是否退出 + // Exit or not bool exit_; - // lease续约线程与qemu一侧线程调用是并发的 - // qemu在调用close的时候会关闭iomanager及其对应 - // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 - // scheduler线程现在需要block IO或者resume IO,所以 - // 如果在lease续约线程需要通知iomanager的时候,这时候 - // 如果iomanager的资源scheduler已经被释放了,就会 - // 导致crash,所以需要对这个资源加一把锁,在退出的时候 - // 不会有并发的情况,保证在资源被析构的时候lease续约 - // 线程不会再用到这些资源. + // The lease renewal thread and the QEMU-side thread are concurrent. + // When QEMU calls close, it closes the iomanager and its corresponding + // resources. The lease renewal thread notifies the iomanager's scheduler + // thread when renewal succeeds or fails, indicating whether it needs to + // block or resume IO. Therefore, if the lease renewal thread needs to + // notify the iomanager at this point, and if the iomanager's scheduler + // resources have already been released, it may lead to a crash. So, it's + // necessary to add a lock to protect this resource, ensuring that there is + // no concurrency when exiting. This ensures that the lease renewal thread + // won't use these resources when they are being destructed. std::mutex exitMtx_; // enable/disable stripe for read/write of stripe file diff --git a/src/client/lease_executor.cpp b/src/client/lease_executor.cpp index c8db8ddd30..797c0f0075 100644 --- a/src/client/lease_executor.cpp +++ b/src/client/lease_executor.cpp @@ -19,11 +19,12 @@ * File Created: Saturday, 23rd February 2019 1:41:31 pm * Author: tongguangxun */ +#include "src/client/lease_executor.h" + #include -#include "src/common/timeutility.h" -#include "src/client/lease_executor.h" #include "src/client/service_helper.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -145,9 +146,7 @@ void LeaseExecutor::Stop() { } } -bool LeaseExecutor::LeaseValid() { - return isleaseAvaliable_.load(); -} +bool LeaseExecutor::LeaseValid() { return isleaseAvaliable_.load(); } void LeaseExecutor::IncremRefreshFailed() { failedrefreshcount_.fetch_add(1); @@ -190,7 +189,7 @@ void LeaseExecutor::ResetRefreshSessionTask() { return; } - // 等待前一个任务退出 + // Waiting for the previous task to exit task_->Stop(); task_->WaitTaskExit(); @@ -203,5 +202,5 @@ void LeaseExecutor::ResetRefreshSessionTask() { isleaseAvaliable_.store(true); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/lease_executor.h b/src/client/lease_executor.h index 2236dc9982..829d264adc 100644 --- a/src/client/lease_executor.h +++ b/src/client/lease_executor.h @@ -41,16 +41,13 @@ namespace client { class RefreshSessionTask; /** - * lease refresh结果,session如果不存在就不需要再续约 - * 如果session存在但是lease续约失败,继续续约 - * 续约成功了FInfo_t中才会有对应的文件信息 + * Please refresh the result. If the session does not exist, there is no need to + * renew it If the session exists but the lease renewal fails, continue to renew + * the contract Successfully renewed the contract, FInfo_ Only in t will there + * be corresponding file information */ struct LeaseRefreshResult { - enum class Status { - OK, - FAILED, - NOT_EXIST - }; + enum class Status { OK, FAILED, NOT_EXIST }; Status status; FInfo_t finfo; }; @@ -62,19 +59,22 @@ class LeaseExecutorBase { }; /** - * 每个vdisk对应的fileinstance都会与mds保持心跳 - * 心跳通过LeaseExecutor实现,LeaseExecutor定期 - * 去mds续约,同时将mds端当前file最新的版本信息带回来 - * 然后检查版本信息是否变更,如果变更就需要通知iomanager - * 更新版本。如果续约失败,就需要将用户新发下来的io直接错误返回 + * The fileinstance corresponding to each vdisk will maintain heartbeat with the + * mds Heartbeat is achieved through LeaseExecutor, which periodically Go to MDS + * to renew the contract and bring back the latest version information of the + * current file on the MDS side Then check if the version information has + * changed, and if so, notify the iomanager Updated version. If the renewal + * fails, the user's newly sent IO needs to be returned in error */ class LeaseExecutor : public LeaseExecutorBase { public: /** - * 构造函数 - * @param: leaseopt为当前lease续约的option配置 - * @param: mdsclient是与mds续约的client - * @param: iomanager会在续约失败或者版本变更的时候进行io调度 + * Constructor + * @param: leaseopt is the option configuration for the current lease + * renewal + * @param: mdsclient is a renewed client with mds + * @param: iomanager will schedule IO in case of contract renewal failure or + * version change */ LeaseExecutor(const LeaseOption& leaseOpt, const UserInfo& userinfo, MDSClient* mdscllent, IOManager4File* iomanager); @@ -82,26 +82,27 @@ class LeaseExecutor : public LeaseExecutorBase { ~LeaseExecutor(); /** - * LeaseExecutor需要finfo保存filename - * LeaseSession_t是当前leaeexcutor的执行配置 - * @param: fi为当前需要续约的文件版本信息 - * @param: lease为续约的lease信息 - * @return: 成功返回true,否则返回false + * LeaseExecutor requires finfo to save the filename + * LeaseSession_t is the execution configuration of the current leaeexcutor + * @param: fi is the current version information of the file that needs to + * be renewed + * @param: lease is the lease information for renewal + * @return: Successfully returns true, otherwise returns false */ - bool Start(const FInfo_t& fi, const LeaseSession_t& lease); + bool Start(const FInfo_t& fi, const LeaseSession_t& lease); /** - * 停止续约 + *Stop Renewal */ void Stop(); /** - * 当前lease如果续约失败则通知iomanagerdisable io + *Notify iomanagerdisable io if the current lease renewal fails */ bool LeaseValid(); /** - * 测试使用,主动失效增加刷新失败 + *Test use, active failure increases refresh failure */ void InvalidLease() { for (uint32_t i = 0; i <= leaseoption_.mdsRefreshTimesPerLease; i++) { @@ -110,20 +111,21 @@ class LeaseExecutor : public LeaseExecutorBase { } /** - * @brief 续约任务执行者 - * @return 是否继续执行refresh session任务 + * @brief Renew Task Executor + * @return Do you want to continue executing the refresh session task */ bool RefreshLease() override; /** - * @brief 测试使用,重置refresh session task + * @brief test use, reset refresh session task */ void ResetRefreshSessionTask(); private: /** - * 一个lease期间会续约rfreshTimesPerLease次,每次续约失败就递增 - * 当连续续约rfreshTimesPerLease次失败的时候,则disable IO + * During a lease period, rfreshTimesPerLease will be renewed times, + * increasing every time the renewal fails When consecutive renewals of + * rfreshTimesPerLease fail times, disable IO */ void IncremRefreshFailed(); @@ -135,44 +137,46 @@ class LeaseExecutor : public LeaseExecutorBase { void CheckNeedUpdateFileInfo(const FInfo& fileInfo); private: - // 与mds进行lease续约的文件名 - std::string fullFileName_; + // File name for lease renewal with mds + std::string fullFileName_; - // 用于续约的client - MDSClient* mdsclient_; + // client for renewal + MDSClient* mdsclient_; - // 用于发起refression的user信息 - UserInfo_t userinfo_; + // User information used to initiate a expression + UserInfo_t userinfo_; - // IO管理者,当文件需要更新版本信息或者disable io的时候调用其接口 - IOManager4File* iomanager_; + // IO manager, calls its interface when a file needs to update version + // information or disable IO + IOManager4File* iomanager_; - // 当前lease执行的配置信息 - LeaseOption leaseoption_; + // Configuration information for the current lease execution + LeaseOption leaseoption_; - // mds端传过来的lease信息,包含当前文件的lease时长,及sessionid - LeaseSession_t leasesession_; + // The lease information transmitted from the mds end, including the lease + // duration of the current file and the sessionid + LeaseSession_t leasesession_; - // 记录当前lease是否可用 - std::atomic isleaseAvaliable_; + // Record whether the current lease is available + std::atomic isleaseAvaliable_; - // 记录当前连续续约失败的次数 - std::atomic failedrefreshcount_; + // Record the current number of consecutive renewal failures + std::atomic failedrefreshcount_; - // refresh session定时任务,会间隔固定时间执行一次 + // refresh session scheduled tasks will be executed at fixed intervals std::unique_ptr task_; }; -// RefreshSessin定期任务 -// 利用brpc::PeriodicTaskManager进行管理 -// 定时器触发时调用OnTriggeringTask,根据返回值决定是否继续定时触发 -// 如果不再继续触发,调用OnDestroyingTask进行清理操作 +// RefreshSession Recurring Task +// Manage using brpc::PeriodicTaskManager +// Call OnTriggeringTask when the timer is triggered, and decide whether to +// continue timing triggering based on the return value If no longer triggered, +// call OnDestroyingTask for cleaning operation class RefreshSessionTask : public brpc::PeriodicTask { public: using Task = std::function; - RefreshSessionTask(LeaseExecutorBase* leaseExecutor, - uint64_t intervalUs) + RefreshSessionTask(LeaseExecutorBase* leaseExecutor, uint64_t intervalUs) : leaseExecutor_(leaseExecutor), refreshIntervalUs_(intervalUs), stopped_(false), @@ -193,10 +197,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { virtual ~RefreshSessionTask() = default; /** - * @brief 定时器超时后执行当前函数 - * @param next_abstime 任务下次执行的绝对时间 - * @return true 继续定期执行当前任务 - * false 停止执行当前任务 + * @brief: Execute current function after timer timeout + * @param next_abstime Absolute time for the next execution of the task + * @return true Continue to regularly execute the current task + * false Stop executing the current task */ bool OnTriggeringTask(timespec* next_abstime) override { std::lock_guard lk(stopMtx_); @@ -209,7 +213,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 停止再次执行当前任务 + * @brief Stop executing the current task again */ void Stop() { std::lock_guard lk(stopMtx_); @@ -217,7 +221,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 任务停止后调用 + * @brief is called after the task stops */ void OnDestroyingTask() override { std::unique_lock lk(terminatedMtx_); @@ -226,7 +230,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 等待任务退出 + * @brief Wait for the task to exit */ void WaitTaskExit() { std::unique_lock lk(terminatedMtx_); @@ -236,12 +240,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 获取refresh session时间间隔(us) - * @return refresh session任务时间间隔(us) + * @brief Get refresh session time interval (us) + * @return refresh session task time interval (us) */ - uint64_t RefreshIntervalUs() const { - return refreshIntervalUs_; - } + uint64_t RefreshIntervalUs() const { return refreshIntervalUs_; } private: LeaseExecutorBase* leaseExecutor_; @@ -255,7 +257,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { bthread::ConditionVariable terminatedCv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LEASE_EXECUTOR_H_ diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index 06273c5d0b..4c4d3fb632 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -49,7 +49,7 @@ #include "src/common/uuid.h" bool globalclientinited_ = false; -curve::client::FileClient *globalclient = nullptr; +curve::client::FileClient* globalclient = nullptr; using curve::client::UserInfo; @@ -71,9 +71,9 @@ char g_processname[kProcessNameMax]; class LoggerGuard { private: - friend void InitLogging(const std::string &confPath); + friend void InitLogging(const std::string& confPath); - explicit LoggerGuard(const std::string &confpath) { + explicit LoggerGuard(const std::string& confpath) { InitInternal(confpath); } @@ -83,13 +83,13 @@ class LoggerGuard { } } - void InitInternal(const std::string &confpath); + void InitInternal(const std::string& confpath); private: bool needShutdown_ = false; }; -void LoggerGuard::InitInternal(const std::string &confPath) { +void LoggerGuard::InitInternal(const std::string& confPath) { curve::common::Configuration conf; conf.SetConfigPath(confPath); @@ -127,14 +127,18 @@ void LoggerGuard::InitInternal(const std::string &confPath) { needShutdown_ = true; } -void InitLogging(const std::string &confPath) { +void InitLogging(const std::string& confPath) { static LoggerGuard guard(confPath); } } // namespace FileClient::FileClient() - : rwlock_(), fdcount_(0), fileserviceMap_(), clientconfig_(), mdsClient_(), + : rwlock_(), + fdcount_(0), + fileserviceMap_(), + clientconfig_(), + mdsClient_(), csClient_(std::make_shared()), csBroadCaster_(std::make_shared(csClient_)), inited_(false), @@ -214,8 +218,8 @@ void FileClient::UnInit() { inited_ = false; } -int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, - const OpenFlags &openflags) { +int FileClient::Open(const std::string& filename, const UserInfo_t& userinfo, + const OpenFlags& openflags) { LOG(INFO) << "Opening filename: " << filename << ", flags: " << openflags; ClientConfig clientConfig; if (openflags.confPath.empty()) { @@ -235,7 +239,7 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return -LIBCURVE_ERROR::FAILED; } - FileInstance *fileserv = FileInstance::NewInitedFileInstance( + FileInstance* fileserv = FileInstance::NewInitedFileInstance( clientConfig.GetFileServiceOption(), mdsClient, filename, userinfo, openflags, false); if (fileserv == nullptr) { @@ -266,9 +270,9 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return fd; } -int FileClient::Open4ReadOnly(const std::string &filename, - const UserInfo_t &userinfo, bool disableStripe) { - FileInstance *instance = FileInstance::Open4Readonly( +int FileClient::Open4ReadOnly(const std::string& filename, + const UserInfo_t& userinfo, bool disableStripe) { + FileInstance* instance = FileInstance::Open4Readonly( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo); if (instance == nullptr) { @@ -293,8 +297,8 @@ int FileClient::Open4ReadOnly(const std::string &filename, return fd; } -int FileClient::IncreaseEpoch(const std::string &filename, - const UserInfo_t &userinfo) { +int FileClient::IncreaseEpoch(const std::string& filename, + const UserInfo_t& userinfo) { LOG(INFO) << "IncreaseEpoch, filename: " << filename; FInfo_t fi; FileEpoch_t fEpoch; @@ -324,8 +328,7 @@ int FileClient::IncreaseEpoch(const std::string &filename, return ret2; } -int FileClient::Create(const std::string& filename, - const UserInfo& userinfo, +int FileClient::Create(const std::string& filename, const UserInfo& userinfo, size_t size) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -350,8 +353,8 @@ int FileClient::Create2(const CreateFileContext& context) { if (mdsClient_ != nullptr) { ret = mdsClient_->CreateFile(context); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Create file failed, filename: " << context.name - << ", ret: " << ret; + << "Create file failed, filename: " << context.name + << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -359,8 +362,8 @@ int FileClient::Create2(const CreateFileContext& context) { return -ret; } -int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -374,8 +377,8 @@ int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Read(buf, offset, len); } -int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -400,9 +403,9 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { return iter->second->Discard(offset, length); } -int FileClient::AioRead(int fd, CurveAioContext *aioctx, +int FileClient::AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -420,9 +423,9 @@ int FileClient::AioRead(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioWrite(int fd, CurveAioContext *aioctx, +int FileClient::AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -440,7 +443,7 @@ int FileClient::AioWrite(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { +int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { ReadLockGuard lk(rwlock_); auto iter = fileserviceMap_.find(fd); if (CURVE_UNLIKELY(iter == fileserviceMap_.end())) { @@ -451,8 +454,8 @@ int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { } } -int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, - const std::string &newpath) { +int FileClient::Rename(const UserInfo_t& userinfo, const std::string& oldpath, + const std::string& newpath) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RenameFile(userinfo, oldpath, newpath); @@ -466,7 +469,7 @@ int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, return -ret; } -int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -481,7 +484,7 @@ int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -496,7 +499,7 @@ int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -510,7 +513,7 @@ int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::StatFile(int fd, FileStatInfo *finfo) { +int FileClient::StatFile(int fd, FileStatInfo* finfo) { FInfo_t fi; { ReadLockGuard lk(rwlock_); @@ -519,7 +522,7 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { LOG(ERROR) << "StatFile failed not found fd = " << fd; return -LIBCURVE_ERROR::FAILED; } - FileInstance *instance = fileserviceMap_[fd]; + FileInstance* instance = fileserviceMap_[fd]; fi = instance->GetCurrentFileInfo(); } BuildFileStatInfo(fi, finfo); @@ -527,8 +530,8 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { return LIBCURVE_ERROR::OK; } -int FileClient::StatFile(const std::string &filename, - const UserInfo_t &userinfo, FileStatInfo *finfo) { +int FileClient::StatFile(const std::string& filename, + const UserInfo_t& userinfo, FileStatInfo* finfo) { FInfo_t fi; FileEpoch_t fEpoch; int ret; @@ -548,8 +551,8 @@ int FileClient::StatFile(const std::string &filename, return -ret; } -int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, - std::vector *filestatVec) { +int FileClient::Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + std::vector* filestatVec) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Listdir(dirpath, userinfo, filestatVec); @@ -563,7 +566,7 @@ int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, return -ret; } -int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { CreateFileContext context; @@ -588,7 +591,7 @@ int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(dirpath, userinfo); @@ -601,9 +604,9 @@ int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +int FileClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->ChangeOwner(filename, newOwner, userinfo); @@ -651,7 +654,7 @@ int FileClient::Close(int fd) { return -LIBCURVE_ERROR::FAILED; } -int FileClient::GetClusterId(char *buf, int len) { +int FileClient::GetClusterId(char* buf, int len) { std::string result = GetClusterId(); if (result.empty()) { @@ -685,7 +688,7 @@ std::string FileClient::GetClusterId() { return {}; } -int FileClient::GetFileInfo(int fd, FInfo *finfo) { +int FileClient::GetFileInfo(int fd, FInfo* finfo) { int ret = -LIBCURVE_ERROR::FAILED; ReadLockGuard lk(rwlock_); @@ -707,11 +710,11 @@ std::vector FileClient::ListPoolset() { const auto ret = mdsClient_->ListPoolset(&out); LOG_IF(WARNING, ret != LIBCURVE_ERROR::OK) - << "Failed to list poolset, error: " << ret; + << "Failed to list poolset, error: " << ret; return out; } -void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { +void FileClient::BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo) { finfo->id = fi.id; finfo->parentid = fi.parentid; finfo->ctime = fi.ctime; @@ -722,9 +725,9 @@ void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { finfo->stripeCount = fi.stripeCount; memcpy(finfo->filename, fi.filename.c_str(), - std::min(sizeof(finfo->filename), fi.filename.size() + 1)); + std::min(sizeof(finfo->filename), fi.filename.size() + 1)); memcpy(finfo->owner, fi.owner.c_str(), - std::min(sizeof(finfo->owner), fi.owner.size() + 1)); + std::min(sizeof(finfo->owner), fi.owner.size() + 1)); finfo->fileStatus = static_cast(fi.filestatus); } @@ -758,7 +761,7 @@ bool FileClient::StartDummyServer() { return false; } - // 获取本地ip + // Obtain local IP std::string ip; if (!common::NetCommon::GetLocalIP(&ip)) { LOG(ERROR) << "Get local ip failed!"; @@ -775,14 +778,13 @@ bool FileClient::StartDummyServer() { } // namespace client } // namespace curve - -// 全局初始化与反初始化 -int GlobalInit(const char *configpath); +// Global initialization and deinitialization +int GlobalInit(const char* configpath); void GlobalUnInit(); -int Init(const char *path) { return GlobalInit(path); } +int Init(const char* path) { return GlobalInit(path); } -int Open4Qemu(const char *filename) { +int Open4Qemu(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -800,7 +802,7 @@ int Open4Qemu(const char *filename) { return globalclient->Open(realname, userinfo); } -int IncreaseEpoch(const char *filename) { +int IncreaseEpoch(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -818,7 +820,7 @@ int IncreaseEpoch(const char *filename) { return globalclient->IncreaseEpoch(realname, userinfo); } -int Extend4Qemu(const char *filename, int64_t newsize) { +int Extend4Qemu(const char* filename, int64_t newsize) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -840,7 +842,7 @@ int Extend4Qemu(const char *filename, int64_t newsize) { static_cast(newsize)); } -int Open(const char *filename, const C_UserInfo_t *userinfo) { +int Open(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -850,7 +852,7 @@ int Open(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Read(int fd, char *buf, off_t offset, size_t length) { +int Read(int fd, char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -859,7 +861,7 @@ int Read(int fd, char *buf, off_t offset, size_t length) { return globalclient->Read(fd, buf, offset, length); } -int Write(int fd, const char *buf, off_t offset, size_t length) { +int Write(int fd, const char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -877,7 +879,7 @@ int Discard(int fd, off_t offset, size_t length) { return globalclient->Discard(fd, offset, length); } -int AioRead(int fd, CurveAioContext *aioctx) { +int AioRead(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -888,7 +890,7 @@ int AioRead(int fd, CurveAioContext *aioctx) { return globalclient->AioRead(fd, aioctx); } -int AioWrite(int fd, CurveAioContext *aioctx) { +int AioWrite(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -896,11 +898,11 @@ int AioWrite(int fd, CurveAioContext *aioctx) { DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length << " op: " << aioctx->op - << " buf: " << *(unsigned int *)aioctx->buf; + << " buf: " << *(unsigned int*)aioctx->buf; return globalclient->AioWrite(fd, aioctx); } -int AioDiscard(int fd, CurveAioContext *aioctx) { +int AioDiscard(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "Not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -909,7 +911,7 @@ int AioDiscard(int fd, CurveAioContext *aioctx) { return globalclient->AioDiscard(fd, aioctx); } -int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -919,8 +921,8 @@ int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { filename, UserInfo(userinfo->owner, userinfo->password), size); } -int Rename(const C_UserInfo_t* userinfo, - const char* oldpath, const char* newpath) { +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -930,7 +932,7 @@ int Rename(const C_UserInfo_t* userinfo, oldpath, newpath); } -int Extend(const char *filename, const C_UserInfo_t *userinfo, +int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -941,7 +943,7 @@ int Extend(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), newsize); } -int Unlink(const char *filename, const C_UserInfo_t *userinfo) { +int Unlink(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -951,7 +953,7 @@ int Unlink(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { +int DeleteForce(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -961,7 +963,7 @@ int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { filename, UserInfo(userinfo->owner, userinfo->password), true); } -int Recover(const char *filename, const C_UserInfo_t *userinfo, +int Recover(const char* filename, const C_UserInfo_t* userinfo, uint64_t fileId) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -972,21 +974,21 @@ int Recover(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), fileId); } -DirInfo_t *OpenDir(const char *dirpath, const C_UserInfo_t *userinfo) { +DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return nullptr; } - DirInfo_t *dirinfo = new (std::nothrow) DirInfo_t; - dirinfo->dirpath = const_cast(dirpath); - dirinfo->userinfo = const_cast(userinfo); + DirInfo_t* dirinfo = new (std::nothrow) DirInfo_t; + dirinfo->dirpath = const_cast(dirpath); + dirinfo->userinfo = const_cast(userinfo); dirinfo->fileStat = nullptr; return dirinfo; } -int Listdir(DirInfo_t *dirinfo) { +int Listdir(DirInfo_t* dirinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1027,7 +1029,7 @@ int Listdir(DirInfo_t *dirinfo) { return ret; } -void CloseDir(DirInfo_t *dirinfo) { +void CloseDir(DirInfo_t* dirinfo) { if (dirinfo != nullptr) { if (dirinfo->fileStat != nullptr) { delete[] dirinfo->fileStat; @@ -1037,7 +1039,7 @@ void CloseDir(DirInfo_t *dirinfo) { } } -int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1047,7 +1049,7 @@ int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Rmdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1066,7 +1068,7 @@ int Close(int fd) { return globalclient->Close(fd); } -int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { +int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -1084,8 +1086,8 @@ int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { return globalclient->StatFile(realname, userinfo, finfo); } -int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, - FileStatInfo *finfo) { +int StatFile(const char* filename, const C_UserInfo_t* cuserinfo, + FileStatInfo* finfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1095,8 +1097,8 @@ int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, return globalclient->StatFile(filename, userinfo, finfo); } -int ChangeOwner(const char *filename, const char *newOwner, - const C_UserInfo_t *cuserinfo) { +int ChangeOwner(const char* filename, const char* newOwner, + const C_UserInfo_t* cuserinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1108,7 +1110,7 @@ int ChangeOwner(const char *filename, const char *newOwner, void UnInit() { GlobalUnInit(); } -int GetClusterId(char *buf, int len) { +int GetClusterId(char* buf, int len) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1117,7 +1119,7 @@ int GetClusterId(char *buf, int len) { return globalclient->GetClusterId(buf, len); } -int GlobalInit(const char *path) { +int GlobalInit(const char* path) { int ret = 0; if (globalclientinited_) { LOG(INFO) << "global cient already inited!"; @@ -1154,74 +1156,74 @@ void GlobalUnInit() { } } -const char *LibCurveErrorName(LIBCURVE_ERROR err) { +const char* LibCurveErrorName(LIBCURVE_ERROR err) { switch (err) { - case LIBCURVE_ERROR::OK: - return "OK"; - case LIBCURVE_ERROR::EXISTS: - return "EXISTS"; - case LIBCURVE_ERROR::FAILED: - return "FAILED"; - case LIBCURVE_ERROR::DISABLEIO: - return "DISABLEIO"; - case LIBCURVE_ERROR::AUTHFAIL: - return "AUTHFAIL"; - case LIBCURVE_ERROR::DELETING: - return "DELETING"; - case LIBCURVE_ERROR::NOTEXIST: - return "NOTEXIST"; - case LIBCURVE_ERROR::UNDER_SNAPSHOT: - return "UNDER_SNAPSHOT"; - case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: - return "NOT_UNDERSNAPSHOT"; - case LIBCURVE_ERROR::DELETE_ERROR: - return "DELETE_ERROR"; - case LIBCURVE_ERROR::NOT_ALLOCATE: - return "NOT_ALLOCATE"; - case LIBCURVE_ERROR::NOT_SUPPORT: - return "NOT_SUPPORT"; - case LIBCURVE_ERROR::NOT_EMPTY: - return "NOT_EMPTY"; - case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: - return "NO_SHRINK_BIGGER_FILE"; - case LIBCURVE_ERROR::SESSION_NOTEXISTS: - return "SESSION_NOTEXISTS"; - case LIBCURVE_ERROR::FILE_OCCUPIED: - return "FILE_OCCUPIED"; - case LIBCURVE_ERROR::PARAM_ERROR: - return "PARAM_ERROR"; - case LIBCURVE_ERROR::INTERNAL_ERROR: - return "INTERNAL_ERROR"; - case LIBCURVE_ERROR::CRC_ERROR: - return "CRC_ERROR"; - case LIBCURVE_ERROR::INVALID_REQUEST: - return "INVALID_REQUEST"; - case LIBCURVE_ERROR::DISK_FAIL: - return "DISK_FAIL"; - case LIBCURVE_ERROR::NO_SPACE: - return "NO_SPACE"; - case LIBCURVE_ERROR::NOT_ALIGNED: - return "NOT_ALIGNED"; - case LIBCURVE_ERROR::BAD_FD: - return "BAD_FD"; - case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: - return "LENGTH_NOT_SUPPORT"; - case LIBCURVE_ERROR::SESSION_NOT_EXIST: - return "SESSION_NOT_EXIST"; - case LIBCURVE_ERROR::STATUS_NOT_MATCH: - return "STATUS_NOT_MATCH"; - case LIBCURVE_ERROR::DELETE_BEING_CLONED: - return "DELETE_BEING_CLONED"; - case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: - return "CLIENT_NOT_SUPPORT_SNAPSHOT"; - case LIBCURVE_ERROR::SNAPSTHO_FROZEN: - return "SNAPSTHO_FROZEN"; - case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: - return "RETRY_UNTIL_SUCCESS"; - case LIBCURVE_ERROR::EPOCH_TOO_OLD: - return "EPOCH_TOO_OLD"; - case LIBCURVE_ERROR::UNKNOWN: - break; + case LIBCURVE_ERROR::OK: + return "OK"; + case LIBCURVE_ERROR::EXISTS: + return "EXISTS"; + case LIBCURVE_ERROR::FAILED: + return "FAILED"; + case LIBCURVE_ERROR::DISABLEIO: + return "DISABLEIO"; + case LIBCURVE_ERROR::AUTHFAIL: + return "AUTHFAIL"; + case LIBCURVE_ERROR::DELETING: + return "DELETING"; + case LIBCURVE_ERROR::NOTEXIST: + return "NOTEXIST"; + case LIBCURVE_ERROR::UNDER_SNAPSHOT: + return "UNDER_SNAPSHOT"; + case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: + return "NOT_UNDERSNAPSHOT"; + case LIBCURVE_ERROR::DELETE_ERROR: + return "DELETE_ERROR"; + case LIBCURVE_ERROR::NOT_ALLOCATE: + return "NOT_ALLOCATE"; + case LIBCURVE_ERROR::NOT_SUPPORT: + return "NOT_SUPPORT"; + case LIBCURVE_ERROR::NOT_EMPTY: + return "NOT_EMPTY"; + case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: + return "NO_SHRINK_BIGGER_FILE"; + case LIBCURVE_ERROR::SESSION_NOTEXISTS: + return "SESSION_NOTEXISTS"; + case LIBCURVE_ERROR::FILE_OCCUPIED: + return "FILE_OCCUPIED"; + case LIBCURVE_ERROR::PARAM_ERROR: + return "PARAM_ERROR"; + case LIBCURVE_ERROR::INTERNAL_ERROR: + return "INTERNAL_ERROR"; + case LIBCURVE_ERROR::CRC_ERROR: + return "CRC_ERROR"; + case LIBCURVE_ERROR::INVALID_REQUEST: + return "INVALID_REQUEST"; + case LIBCURVE_ERROR::DISK_FAIL: + return "DISK_FAIL"; + case LIBCURVE_ERROR::NO_SPACE: + return "NO_SPACE"; + case LIBCURVE_ERROR::NOT_ALIGNED: + return "NOT_ALIGNED"; + case LIBCURVE_ERROR::BAD_FD: + return "BAD_FD"; + case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: + return "LENGTH_NOT_SUPPORT"; + case LIBCURVE_ERROR::SESSION_NOT_EXIST: + return "SESSION_NOT_EXIST"; + case LIBCURVE_ERROR::STATUS_NOT_MATCH: + return "STATUS_NOT_MATCH"; + case LIBCURVE_ERROR::DELETE_BEING_CLONED: + return "DELETE_BEING_CLONED"; + case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: + return "CLIENT_NOT_SUPPORT_SNAPSHOT"; + case LIBCURVE_ERROR::SNAPSTHO_FROZEN: + return "SNAPSTHO_FROZEN"; + case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: + return "RETRY_UNTIL_SUCCESS"; + case LIBCURVE_ERROR::EPOCH_TOO_OLD: + return "EPOCH_TOO_OLD"; + case LIBCURVE_ERROR::UNKNOWN: + break; } static thread_local char message[64]; diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index 1f1202bbbb..cd24b8afc6 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -24,19 +24,20 @@ #define SRC_CLIENT_LIBCURVE_FILE_H_ #include + #include +#include #include #include #include -#include #include "include/client/libcurve.h" +#include "src/client/chunkserver_broadcaster.h" #include "src/client/client_common.h" #include "src/client/file_instance.h" #include "src/common/concurrent/rw_lock.h" -#include "src/client/chunkserver_broadcaster.h" -// TODO(tongguangxun) :添加关键函数trace功能 +// TODO(tongguangxun): Add key function trace function namespace curve { namespace client { @@ -48,28 +49,28 @@ class FileClient { virtual ~FileClient() = default; /** - * file对象初始化函数 - * @param: 配置文件路径 + * file object initialization function + * @param: Configuration file path */ virtual int Init(const std::string& configpath); /** - * 打开或创建文件 - * @param: filename文件名 - * @param: userinfo是操作文件的用户信息 - * @return: 返回文件fd + * Open or create a file + * @param: filename File name + * @param: userinfo is the user information for operating the file + * @return: Return the file fd */ - virtual int Open(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Open(const std::string& filename, const UserInfo_t& userinfo, const OpenFlags& openflags = {}); /** - * 打开文件,这个打开只是创建了一个fd,并不与mds交互,没有session续约 - * 这个Open接口主要是提供给快照克隆镜像系统做数据拷贝使用 - * @param: filename文件名 - * @param: userinfo当前用户信息 + * Open the file. This only creates an fd and does not interact with mds. + * There is no session renewal This Open interface is mainly provided for + * data copying in snapshot clone image systems + * @param: filename File name + * @param: userinfo Current user information * @param disableStripe enable/disable stripe feature for a stripe file - * @return: 返回文件fd + * @return: Return the file fd */ virtual int Open4ReadOnly(const std::string& filename, const UserInfo_t& userinfo, @@ -83,19 +84,19 @@ class FileClient { * * @return 0 for success, -1 for fail */ - int IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo); + int IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回0, 失败可能有多种可能 - * 比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size + * length + * @return: Success returns 0, failure may have multiple possibilities + * For example, internal errors or files that already exist */ - virtual int Create(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Create(const std::string& filename, const UserInfo_t& userinfo, size_t size); /** @@ -105,22 +106,24 @@ class FileClient { virtual int Create2(const CreateFileContext& context); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int Write(int fd, const char* buf, off_t offset, size_t length); @@ -135,21 +138,25 @@ class FileClient { virtual int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); @@ -163,33 +170,31 @@ class FileClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路劲 - * @param: newpath目标路径 + * Rename File + * @param: userinfo is the user information + * @param: oldpath Yuanlujin + * @param: newpath Target Path */ - virtual int Rename(const UserInfo_t& userinfo, - const std::string& oldpath, + virtual int Rename(const UserInfo_t& userinfo, const std::string& oldpath, const std::string& newpath); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - virtual int Extend(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce=true只能用于从回收站删除,false为放入垃圾箱 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce=true can only be used to delete from the recycle bin, + * false means to put it in the trash can */ - virtual int Unlink(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce = false); /** @@ -198,96 +203,98 @@ class FileClient { * @param: filename * @param: fileId */ - virtual int Recover(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - virtual int Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, + virtual int Listdir(const std::string& dirpath, const UserInfo_t& userinfo, std::vector* filestatVec); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Mkdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Rmdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information + * of the current file + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int StatFile(const std::string& filename, - const UserInfo_t& userinfo, - FileStatInfo* finfo); - - /** - * stat file - * @param: fd is file descriptor. - * @param: finfo is an output para, carry the base info of current file. - * @return: returns int::ok if success, - * otherwise returns an error code less than 0 - */ + const UserInfo_t& userinfo, FileStatInfo* finfo); + + /** + * stat file + * @param: fd is file descriptor. + * @param: finfo is an output para, carry the base info of current file. + * @return: returns int::ok if success, + * otherwise returns an error code less than 0 + */ virtual int StatFile(int fd, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to + * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED, etc */ virtual int ChangeOwner(const std::string& filename, const std::string& newOwner, const UserInfo_t& userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int Close(int fd); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ virtual void UnInit(); /** - * @brief: 获取集群id - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 失败返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain cluster ID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); /** - * @brief 获取集群id - * @return 成功返回集群id,失败返回空 + * @brief Get cluster ID + * @return Successfully returned cluster ID, failed returned empty */ std::string GetClusterId(); /** - * @brief 获取文件信息,测试使用 - * @param fd 文件句柄 - * @param[out] finfo 文件信息 - * @return 成功返回0,失败返回-LIBCURVE_ERROR::FAILED + * @brief to obtain file information for testing purposes + * @param fd file handle + * @param[out] finfo file information + * @return success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetFileInfo(int fd, FInfo* finfo); @@ -295,33 +302,33 @@ class FileClient { std::vector ListPoolset(); /** - * 测试使用,获取当前挂载文件数量 - * @return 返回当前挂载文件数量 + * Test usage to obtain the current number of mounted files + * @return Returns the current number of mounted files */ - uint64_t GetOpenedFileNum() const { - return openedFileNum_.get_value(); - } + uint64_t GetOpenedFileNum() const { return openedFileNum_.get_value(); } private: - static void BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo); + static void BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo); bool StartDummyServer(); private: BthreadRWLock rwlock_; - // 向上返回的文件描述符,对于QEMU来说,一个vdisk对应一个文件描述符 + // The file descriptor returned upwards, for QEMU, one vdisk corresponds to + // one file descriptor std::atomic fdcount_; - // 每个vdisk都有一个FileInstance,通过返回的fd映射到对应的instance + // Each vdisk has a FileInstance, which is mapped to the corresponding + // instance through the returned fd std::unordered_map fileserviceMap_; // std::unordered_map fileserviceFileNameMap_; - // FileClient配置 + // FileClient Configuration ClientConfig clientconfig_; - // fileclient对应的全局mdsclient + // Global mdsclient corresponding to fileclient std::shared_ptr mdsClient_; // chunkserver client @@ -329,10 +336,10 @@ class FileClient { // chunkserver broadCaster std::shared_ptr csBroadCaster_; - // 是否初始化成功 + // Is initialization successful bool inited_; - // 挂载文件数量 + // Number of mounted files bvar::Adder openedFileNum_; }; diff --git a/src/client/libcurve_snapshot.h b/src/client/libcurve_snapshot.h index d8b2ce841a..24f9d2f163 100644 --- a/src/client/libcurve_snapshot.h +++ b/src/client/libcurve_snapshot.h @@ -27,305 +27,304 @@ #include #include -#include "src/client/mds_client.h" -#include "src/client/config_info.h" #include "src/client/client_common.h" +#include "src/client/config_info.h" #include "src/client/iomanager4chunk.h" +#include "src/client/mds_client.h" namespace curve { namespace client { -// SnapshotClient为外围快照系统与MDS和Chunkserver通信的出口 +// SnapshotClient is the exit for peripheral snapshot systems to communicate +// with MDS and Chunkserver class SnapshotClient { public: - SnapshotClient(); - ~SnapshotClient() = default; - /** - * 初始化函数,外围系统直接传入配置选项 - * @param: opt为外围配置选项 - * @return:0为成功,-1为失败 - */ - int Init(const ClientConfigOption& opt); + SnapshotClient(); + ~SnapshotClient() = default; + /** + * Initialization function, peripheral system directly passes in + * configuration options + * @param: opt is the peripheral configuration option + * @return: 0 indicates success, -1 indicates failure + */ + int Init(const ClientConfigOption& opt); - /** - * file对象初始化函数 - * @param: 配置文件路径 - */ - int Init(const std::string& configpath); + /** + * file object initialization function + * @param: Configuration file path + */ + int Init(const std::string& configpath); - /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t* seq); - /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq); - /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapShot(const std::string& fname, - const UserInfo_t& userinfo, - uint64_t seq, - FInfo* snapinfo); - /** - * 列出当前文件对应版本列表的文件信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seqvec是当前文件的版本列表 - * @param: snapif是出参,获取多个seq号的文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - const std::vector* seqvec, - std::map* snapif); - /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo); + /** + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of + * the file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t* seq); + /** + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq); + /** + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot + * of the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapShot(const std::string& fname, const UserInfo_t& userinfo, + uint64_t seq, FInfo* snapinfo); + /** + * List the file information corresponding to the version list of the + * current file + * @param: userinfo is the user information + * @param: filenam file name + * @param: seqvec is the version list of the current file + * @param: snapif is a parameter that obtains file information for multiple + * seq numbers + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, + const std::vector* seqvec, + std::map* snapif); + /** + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment + * information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo); - /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, - uint64_t len, char *buf, SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 - */ - int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, - uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail *chunkInfo); - /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - */ - int CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - FileStatus* filestatus); - /** - * @brief 创建clone文件 - * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param: destination clone目标文件名 - * @param: userinfo 用户信息 - * @param: size 文件大小 - * @param: sn 版本号 - * @param: chunksize是要创建文件的chunk大小 - * @param stripeUnit stripe size - * @param stripeCount stripe count - * @param poolset poolset of destination file - * @param[out] fileinfo 创建的目标文件的文件信息 - * - * @return 错误码 - */ - int CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo); + /** + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correntSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected + */ + int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, + uint64_t correctedSeq); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail* chunkInfo); + /** + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + */ + int CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + /** + * @brief Create clone file + * @detail + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param: destination clone Destination file name + * @param: userinfo User information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the file to be created + * @param stripeUnit stripe size + * @param stripeCount stripe count + * @param poolset poolset of destination file + * @param[out] fileinfo The file information of the target file created + * + * @return error code + */ + int CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo); - /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * - * @return 错误码 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * + * @return error code + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); - /** - * @brief 实际恢复chunk数据 - * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param: scc是异步回调 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo &chunkidinfo, - uint64_t offset, uint64_t len, - SnapCloneClosure* scc); + /** + * @brief Actual recovery chunk data + * + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: scc is an asynchronous callback + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc); - /** - * @brief 通知mds完成Clone Meta - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneMeta(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Meta + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); - /** - * @brief 通知mds完成Clone Chunk - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneFile(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Chunk + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); - /** - * 设置clone文件状态 - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * - * @return 错误码 - */ - int SetCloneFileStatus(const std::string &filename, - const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID = 0); + /** + * Set clone file status + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * + * @return error code + */ + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); - /** - * @brief 获取文件信息 - * - * @param:filename 文件名 - * @param:userinfo 用户信息 - * @param[out] fileInfo 文件信息 - * - * @return 错误码 - */ - int GetFileInfo(const std::string &filename, - const UserInfo_t& userinfo, - FInfo* fileInfo); + /** + * @brief Get file information + * + * @param: filename File name + * @param: userinfo User Information + * @param[out] fileInfo file information + * + * @return error code + */ + int GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + FInfo* fileInfo); - /** - * @brief 查询或分配文件segment信息 - * - * @param:userinfo 用户信息 - * @param:offset 偏移值 - * @param:segInfo segment信息 - * - * @return 错误码 - */ - int GetOrAllocateSegmentInfo(bool allocate, - uint64_t offset, - const FInfo_t* fi, - SegmentInfo *segInfo); + /** + * @brief Query or allocate file segment information + * + * @param: userinfo User Information + * @param: offset offset value + * @param: segInfo segment information + * + * @return error code + */ + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + const FInfo_t* fi, SegmentInfo* segInfo); - /** - * @brief 为recover rename复制的文件 - * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * - * @return 错误码 - */ - int RenameCloneFile(const UserInfo_t& userinfo, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination); + /** + * @brief is the file copied for recover rename + * + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * + * @return error code + */ + int RenameCloneFile(const UserInfo_t& userinfo, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination); - /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - */ - int DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t id = 0); + /** + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + */ + int DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t id = 0); - /** - * 析构,回收资源 - */ - void UnInit(); - /** - * 获取iomanager信息,测试代码使用 - */ - IOManager4Chunk* GetIOManager4Chunk() {return &iomanager4chunk_;} + /** + * Deconstruct and recycle resources + */ + void UnInit(); + /** + * Obtain iomanager information and test code usage + */ + IOManager4Chunk* GetIOManager4Chunk() { return &iomanager4chunk_; } private: - /** - * 获取logicalpool中copyset的serverlist - * @param: lpid是逻辑池id - * @param: csid是逻辑池中的copysetid数据集 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetServerList(const LogicPoolID& lpid, - const std::vector& csid); + /** + * Obtain the serverlist of copyset in the logicalpool + * @param: lpid is the logical pool id + * @param: csid is the copysetid dataset in the logical pool + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetServerList(const LogicPoolID& lpid, + const std::vector& csid); private: - // MDSClient负责与Metaserver通信,所有通信都走这个接口 - MDSClient mdsclient_; + // MDSClient is responsible for communicating with Metaserver, and all + // communication goes through this interface + MDSClient mdsclient_; - // IOManager4Chunk用于管理发向chunkserver端的IO - IOManager4Chunk iomanager4chunk_; + // IOManager4Chunk is used to manage IO sent to the chunkserver end + IOManager4Chunk iomanager4chunk_; - // 用于client 配置读取 - ClientConfig clientconfig_; + // Used for client configuration reading + ClientConfig clientconfig_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LIBCURVE_SNAPSHOT_H_ diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index 9ace95e823..e8d8a35f6d 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -21,11 +21,11 @@ */ #include "src/client/mds_client.h" -#include #include +#include -#include #include +#include #include "src/client/lease_executor.h" #include "src/common/net_common.h" @@ -35,6 +35,7 @@ namespace curve { namespace client { +using curve::common::ChunkServerLocation; using curve::common::NetCommon; using curve::common::TimeUtility; using curve::mds::FileInfo; @@ -42,24 +43,23 @@ using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::ProtoSession; using curve::mds::StatusCode; -using curve::common::ChunkServerLocation; using curve::mds::topology::CopySetServerInfo; -// rpc发送和mds地址切换状态机 +// Rpc sending and mds address switching state machine int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { - // 记录上一次正在服务的mds index + // Record the last serving mds index int lastWorkingMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前正在使用的mds index + // Record the currently used mds index int curRetryMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前mds重试的次数 + // Record the number of current mds retries uint64_t currentMDSRetryCount = 0; - // 执行起始时间点 + // Execution start time point uint64_t startTime = TimeUtility::GetTimeofDayMs(); - // rpc超时时间 + // RPC timeout uint64_t rpcTimeOutMS = retryOpt_.rpcTimeoutMs; // The count of normal retry @@ -68,16 +68,18 @@ int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { int retcode = -1; bool retryUnlimit = (maxRetryTimeMS == 0); while (GoOnRetry(startTime, maxRetryTimeMS)) { - // 1. 创建当前rpc需要使用的channel和controller,执行rpc任务 + // 1. Create the channels and controllers required for the current RPC + // and execute the RPC task retcode = ExcuteTask(curRetryMDSIndex, rpcTimeOutMS, rpctask); - // 2. 根据rpc返回值进行预处理 + // 2. Preprocessing based on rpc return value if (retcode < 0) { curRetryMDSIndex = PreProcessBeforeRetry( retcode, retryUnlimit, &normalRetryCount, ¤tMDSRetryCount, curRetryMDSIndex, &lastWorkingMDSIndex, &rpcTimeOutMS); continue; - // 3. 此时rpc是正常返回的,更新当前正在服务的mds地址index + // 3. At this point, rpc returns normally and updates the index of + // the currently serving mds address } else { currentWorkingMDSAddrIndex_.store(curRetryMDSIndex); break; @@ -98,11 +100,11 @@ bool RPCExcutorRetryPolicy::GoOnRetry(uint64_t startTimeMS, } int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, - uint64_t *timeOutMS) { + int* lastWorkingMDSIndex, + uint64_t* timeOutMS) { int nextMDSIndex = 0; bool rpcTimeout = false; bool needChangeMDS = false; @@ -115,44 +117,48 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, bthread_usleep(retryOpt_.waitSleepMs * 1000); } - // 1. 访问存在的IP地址,但无人监听:ECONNREFUSED - // 2. 正常发送RPC情况下,对端进程挂掉了:EHOSTDOWN - // 3. 对端server调用了Stop:ELOGOFF - // 4. 对端链接已关闭:ECONNRESET - // 5. 在一个mds节点上rpc失败超过限定次数 - // 在这几种场景下,主动切换mds。 + // 1. Access to an existing IP address, but no one is listening: + // ECONNREFUSED + // 2. In the normal RPC scenario, the remote process has crashed: + // EHOSTDOWN + // 3. The remote server called Stop: ELOGOFF + // 4. The remote connection has been closed: ECONNRESET + // 5. RPC failures on a single MDS node exceed the specified limit. + // In these scenarios, actively switch the MDS. } else if (status == -EHOSTDOWN || status == -ECONNRESET || status == -ECONNREFUSED || status == -brpc::ELOGOFF || *curMDSRetryCount >= retryOpt_.maxFailedTimesBeforeChangeAddr) { needChangeMDS = true; - // 在开启健康检查的情况下,在底层tcp连接失败时 - // rpc请求会本地直接返回 EHOSTDOWN - // 这种情况下,增加一些睡眠时间,避免大量的重试请求占满bthread - // TODO(wuhanqing): 关闭健康检查 + // When health checks are enabled, in the event of a failure in the + // underlying TCP connection, RPC requests will directly return + // EHOSTDOWN locally. In this situation, add some sleep time to avoid a + // large number of retry requests overwhelming bthread. + // TODO(wuhanqing): Disable health checks. if (status == -EHOSTDOWN) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } } else if (status == -brpc::ERPCTIMEDOUT || status == -ETIMEDOUT) { rpcTimeout = true; needChangeMDS = false; - // 触发超时指数退避 + // Trigger timeout index backoff *timeOutMS *= 2; *timeOutMS = std::min(*timeOutMS, retryOpt_.maxRPCTimeoutMS); *timeOutMS = std::max(*timeOutMS, retryOpt_.rpcTimeoutMs); } - // 获取下一次需要重试的mds索引 + // Obtain the mds index that needs to be retried next time nextMDSIndex = GetNextMDSIndex(needChangeMDS, curRetryMDSIndex, lastWorkingMDSIndex); // NOLINT - // 更新curMDSRetryCount和rpctimeout + // Update curMDSRetryCount and rpctimeout if (nextMDSIndex != curRetryMDSIndex) { *curMDSRetryCount = 0; *timeOutMS = retryOpt_.rpcTimeoutMs; } else { ++(*curMDSRetryCount); - // 还是在当前mds上重试,且rpc不是超时错误,就进行睡眠,然后再重试 + // Try again on the current mds, and if the rpc is not a timeout error, + // go to sleep and try again if (!rpcTimeout) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } @@ -161,20 +167,21 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, return nextMDSIndex; } /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex + * Obtain the next MDS index to retry based on the input state. The MDS + * switching logic is as follows: Record three states: curRetryMDSIndex, + * lastWorkingMDSIndex, currentWorkingMDSIndex + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ + * 2. If an RPC fails, it triggers a switch to curRetryMDSIndex. If at this + * point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, then + * sequentially switch to the next MDS index. If lastWorkingMDSIndex is not + * equal to currentWorkingMDSIndex, it means that another interface has updated + * currentWorkingMDSAddrIndex_, so this time, switch directly to + * currentWorkingMDSAddrIndex_. */ int RPCExcutorRetryPolicy::GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex) { + int* lastWorkingindex) { int nextMDSIndex = 0; if (std::atomic_compare_exchange_strong( ¤tWorkingMDSAddrIndex_, lastWorkingindex, @@ -194,13 +201,14 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, assert(mdsindex >= 0 && mdsindex < static_cast(retryOpt_.addrs.size())); - const std::string &mdsaddr = retryOpt_.addrs[mdsindex]; + const std::string& mdsaddr = retryOpt_.addrs[mdsindex]; brpc::Channel channel; int ret = channel.Init(mdsaddr.c_str(), nullptr); if (ret != 0) { LOG(WARNING) << "Init channel failed! addr = " << mdsaddr; - // 返回EHOSTDOWN给上层调用者,促使其切换mds + // Return EHOSTDOWN to the upper level caller, prompting them to switch + // mds return -EHOSTDOWN; } @@ -211,14 +219,15 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, return task(mdsindex, rpcTimeOutMS, &channel, &cntl); } - -MDSClient::MDSClient(const std::string &metricPrefix) - : inited_(false), metaServerOpt_(), mdsClientMetric_(metricPrefix), +MDSClient::MDSClient(const std::string& metricPrefix) + : inited_(false), + metaServerOpt_(), + mdsClientMetric_(metricPrefix), rpcExcutor_() {} MDSClient::~MDSClient() { UnInitialize(); } -LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { +LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption& metaServerOpt) { if (inited_) { LOG(INFO) << "MDSClient already started!"; return LIBCURVE_ERROR::OK; @@ -229,7 +238,7 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { rpcExcutor_.SetOption(metaServerOpt.rpcRetryOpt); std::ostringstream oss; - for (const auto &addr : metaServerOpt_.rpcRetryOpt.addrs) { + for (const auto& addr : metaServerOpt_.rpcRetryOpt.addrs) { oss << " " << addr; } @@ -238,19 +247,15 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { return LIBCURVE_ERROR::OK; } - -void MDSClient::UnInitialize() { - inited_ = false; -} +void MDSClient::UnInitialize() { inited_ = false; } #define RPCTaskDefine \ [&](CURVE_UNUSED int addrindex, CURVE_UNUSED uint64_t rpctimeoutMS, \ brpc::Channel* channel, brpc::Controller* cntl) -> int -LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -278,12 +283,12 @@ LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, bool flag = response.has_protosession() && response.has_fileinfo(); if (flag) { - const ProtoSession &leasesession = response.protosession(); + const ProtoSession& leasesession = response.protosession(); lease->sessionID = leasesession.sessionid(); lease->leaseTime = leasesession.leasetime(); lease->createTime = leasesession.createtime(); - const curve::mds::FileInfo &protoFileInfo = response.fileinfo(); + const curve::mds::FileInfo& protoFileInfo = response.fileinfo(); LOG(INFO) << "OpenFile succeeded, filename: " << filename << ", file info " << protoFileInfo.DebugString(); ServiceHelper::ProtoFileInfo2Local(protoFileInfo, fi, fEpoch); @@ -349,9 +354,9 @@ LIBCURVE_ERROR MDSClient::CreateFile(const CreateFileContext& context) { rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid) { +LIBCURVE_ERROR MDSClient::CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -385,9 +390,9 @@ LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, - const UserInfo_t &uinfo, FInfo_t *fi, - FileEpoch_t *fEpoch) { +LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string& filename, + const UserInfo_t& uinfo, FInfo_t* fi, + FileEpoch_t* fEpoch) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -421,19 +426,17 @@ LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs) { +LIBCURVE_ERROR MDSClient::IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; IncreaseFileEpochResponse response; mdsClientMetric_.increaseEpoch.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.increaseEpoch.latency); - MDSClientBase::IncreaseEpoch( - filename, userinfo, &response, cntl, channel); + MDSClientBase::IncreaseEpoch(filename, userinfo, &response, cntl, + channel); if (cntl->Failed()) { mdsClientMetric_.increaseEpoch.eps.count << 1; @@ -445,10 +448,10 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, LIBCURVE_ERROR retcode; MDSStatusCode2LibcurveError(stcode, &retcode); LOG(ERROR) << "IncreaseEpoch: filename = " << filename - << ", owner = " << userinfo.owner - << ", errocde = " << retcode - << ", error msg = " << StatusCode_Name(stcode) - << ", log id = " << cntl->log_id(); + << ", owner = " << userinfo.owner + << ", errocde = " << retcode + << ", error msg = " << StatusCode_Name(stcode) + << ", log id = " << cntl->log_id(); return retcode; } @@ -466,12 +469,12 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, csinfo.peerID = response.cslocs(i).chunkserverid(); EndPoint internal; butil::str2endpoint(response.cslocs(i).hostip().c_str(), - response.cslocs(i).port(), &internal); + response.cslocs(i).port(), &internal); EndPoint external; const bool hasExternalIp = response.cslocs(i).has_externalip(); if (hasExternalIp) { butil::str2endpoint(response.cslocs(i).externalip().c_str(), - response.cslocs(i).port(), &external); + response.cslocs(i).port(), &external); } csinfo.internalAddr = PeerAddr(internal); csinfo.externalAddr = PeerAddr(external); @@ -508,10 +511,10 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if ((stcode == StatusCode::kOK || stcode == StatusCode::kFileUnderSnapShot) && hasinfo) { - FInfo_t *fi = new (std::nothrow) FInfo_t; + FInfo_t* fi = new (std::nothrow) FInfo_t; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - fi, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), fi, + &fEpoch); *seq = fi->seqnum; delete fi; if (stcode == StatusCode::kOK) { @@ -527,8 +530,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if (hasinfo) { FInfo_t fi; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - &fi, &fEpoch); // NOLINT + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), &fi, + &fEpoch); // NOLINT *seq = fi.seqnum; } @@ -545,8 +548,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq) { auto task = RPCTaskDefine { (void)addrindex; @@ -578,10 +581,10 @@ LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif) { +LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -612,8 +615,8 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, for (int i = 0; i < response.fileinfo_size(); i++) { FInfo_t tempInfo; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), - &tempInfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), &tempInfo, + &fEpoch); snapif->insert(std::make_pair(tempInfo.seqnum, tempInfo)); } @@ -628,10 +631,10 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo) { + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -692,11 +695,11 @@ LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -728,40 +731,39 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, } switch (stcode) { - case StatusCode::kSessionNotExist: - case StatusCode::kFileNotExists: - resp->status = LeaseRefreshResult::Status::NOT_EXIST; - break; - case StatusCode::kOwnerAuthFail: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kOK: - if (response.has_fileinfo()) { - FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - &resp->finfo, - &fEpoch); - resp->status = LeaseRefreshResult::Status::OK; - } else { - LOG(WARNING) << "session response has no fileinfo!"; - return LIBCURVE_ERROR::FAILED; - } - if (nullptr != lease) { - if (!response.has_protosession()) { - LOG(WARNING) << "session response has no protosession"; + case StatusCode::kSessionNotExist: + case StatusCode::kFileNotExists: + resp->status = LeaseRefreshResult::Status::NOT_EXIST; + break; + case StatusCode::kOwnerAuthFail: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kOK: + if (response.has_fileinfo()) { + FileEpoch_t fEpoch; + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), + &resp->finfo, &fEpoch); + resp->status = LeaseRefreshResult::Status::OK; + } else { + LOG(WARNING) << "session response has no fileinfo!"; return LIBCURVE_ERROR::FAILED; } - ProtoSession leasesession = response.protosession(); - lease->sessionID = leasesession.sessionid(); - lease->leaseTime = leasesession.leasetime(); - lease->createTime = leasesession.createtime(); - } - break; - default: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::FAILED; - break; + if (nullptr != lease) { + if (!response.has_protosession()) { + LOG(WARNING) << "session response has no protosession"; + return LIBCURVE_ERROR::FAILED; + } + ProtoSession leasesession = response.protosession(); + lease->sessionID = leasesession.sessionid(); + lease->leaseTime = leasesession.leasetime(); + lease->createTime = leasesession.createtime(); + } + break; + default: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::FAILED; + break; } return LIBCURVE_ERROR::OK; }; @@ -769,10 +771,10 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, - FileStatus *filestatus) { + FileStatus* filestatus) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -807,9 +809,9 @@ LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, } LIBCURVE_ERROR -MDSClient::GetServerList(const LogicPoolID &logicalpooid, - const std::vector ©setidvec, - std::vector> *cpinfoVec) { +MDSClient::GetServerList(const LogicPoolID& logicalpooid, + const std::vector& copysetidvec, + std::vector>* cpinfoVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -875,7 +877,7 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { +LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext* clsctx) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -927,19 +929,14 @@ LIBCURVE_ERROR MDSClient::ListPoolset(std::vector* out) { }; return ReturnError( - rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); + rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo) { +LIBCURVE_ERROR MDSClient::CreateCloneFile( + const std::string& source, const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -970,8 +967,8 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, if (stcode == StatusCode::kOK) { FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - fileinfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), fileinfo, + &fEpoch); fileinfo->sourceInfo.name = response.fileinfo().clonesource(); fileinfo->sourceInfo.length = response.fileinfo().clonelength(); } @@ -982,20 +979,20 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::CloneMetaInstalled, userinfo); } -LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::Cloned, userinfo); } -LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID) { auto task = RPCTaskDefine { (void)addrindex; @@ -1028,9 +1025,9 @@ LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, } LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo) { + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1050,23 +1047,23 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, auto statuscode = response.statuscode(); switch (statuscode) { - case StatusCode::kParaError: - LOG(WARNING) << "GetOrAllocateSegment: error param!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kOwnerAuthFail: - LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; - return LIBCURVE_ERROR::AUTHFAIL; - case StatusCode::kFileNotExists: - LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kSegmentNotAllocated: - LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; - return LIBCURVE_ERROR::NOT_ALLOCATE; - case StatusCode::kEpochTooOld: - LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; - return LIBCURVE_ERROR::EPOCH_TOO_OLD; - default: - break; + case StatusCode::kParaError: + LOG(WARNING) << "GetOrAllocateSegment: error param!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kOwnerAuthFail: + LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; + return LIBCURVE_ERROR::AUTHFAIL; + case StatusCode::kFileNotExists: + LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kSegmentNotAllocated: + LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; + return LIBCURVE_ERROR::NOT_ALLOCATE; + case StatusCode::kEpochTooOld: + LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; + return LIBCURVE_ERROR::EPOCH_TOO_OLD; + default: + break; } PageFileSegment pfs = response.pagefilesegment(); @@ -1094,7 +1091,7 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, +LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo* fileInfo, uint64_t offset) { auto task = RPCTaskDefine { (void)addrindex; @@ -1133,9 +1130,9 @@ LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, +LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId, uint64_t destinationId) { auto task = RPCTaskDefine { @@ -1177,8 +1174,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize) { +LIBCURVE_ERROR MDSClient::Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1211,8 +1208,8 @@ LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1251,8 +1248,8 @@ LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1285,9 +1282,9 @@ LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1326,9 +1323,9 @@ LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec) { +LIBCURVE_ERROR MDSClient::Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1379,8 +1376,8 @@ LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, - CopysetPeerInfo *chunkserverInfo) { +LIBCURVE_ERROR MDSClient::GetChunkServerInfo( + const PeerAddr& csAddr, CopysetPeerInfo* chunkserverInfo) { if (!chunkserverInfo) { LOG(ERROR) << "chunkserverInfo pointer is null!"; return LIBCURVE_ERROR::FAILED; @@ -1403,7 +1400,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, std::vector strs; curve::common::SplitString(csAddr.ToString(), ":", &strs); - const std::string &ip = strs[0]; + const std::string& ip = strs[0]; uint64_t port; bool succ = curve::common::StringToUll(strs[1], &port); @@ -1428,7 +1425,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, << ", log id = " << cntl->log_id(); if (statusCode == 0) { - const auto &csInfo = response.chunkserverinfo(); + const auto& csInfo = response.chunkserverinfo(); ChunkServerID csId = csInfo.chunkserverid(); std::string internalIp = csInfo.hostip(); std::string externalIp = internalIp; @@ -1440,9 +1437,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, butil::str2endpoint(internalIp.c_str(), port, &internal); EndPoint external; butil::str2endpoint(externalIp.c_str(), port, &external); - *chunkserverInfo = - CopysetPeerInfo(csId, PeerAddr(internal), - PeerAddr(external)); + *chunkserverInfo = CopysetPeerInfo( + csId, PeerAddr(internal), PeerAddr(external)); return LIBCURVE_ERROR::OK; } else { return LIBCURVE_ERROR::FAILED; @@ -1453,8 +1449,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, } LIBCURVE_ERROR -MDSClient::ListChunkServerInServer(const std::string &serverIp, - std::vector *csIds) { +MDSClient::ListChunkServerInServer(const std::string& serverIp, + std::vector* csIds) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1497,82 +1493,81 @@ MDSClient::ListChunkServerInServer(const std::string &serverIp, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -void MDSClient::MDSStatusCode2LibcurveError(const StatusCode &status, - LIBCURVE_ERROR *errcode) { +void MDSClient::MDSStatusCode2LibcurveError(const StatusCode& status, + LIBCURVE_ERROR* errcode) { switch (status) { - case StatusCode::kOK: - *errcode = LIBCURVE_ERROR::OK; - break; - case StatusCode::kFileExists: - *errcode = LIBCURVE_ERROR::EXISTS; - break; - case StatusCode::kSnapshotFileNotExists: - case StatusCode::kFileNotExists: - case StatusCode::kDirNotExist: - case StatusCode::kPoolsetNotExist: - *errcode = LIBCURVE_ERROR::NOTEXIST; - break; - case StatusCode::kSegmentNotAllocated: - *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; - break; - case StatusCode::kShrinkBiggerFile: - *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; - break; - case StatusCode::kNotSupported: - *errcode = LIBCURVE_ERROR::NOT_SUPPORT; - break; - case StatusCode::kOwnerAuthFail: - *errcode = LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kSnapshotFileDeleteError: - *errcode = LIBCURVE_ERROR::DELETE_ERROR; - break; - case StatusCode::kFileUnderSnapShot: - *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; - break; - case StatusCode::kFileNotUnderSnapShot: - *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; - break; - case StatusCode::kSnapshotDeleting: - *errcode = LIBCURVE_ERROR::DELETING; - break; - case StatusCode::kDirNotEmpty: - *errcode = LIBCURVE_ERROR::NOT_EMPTY; - break; - case StatusCode::kFileOccupied: - *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; - break; - case StatusCode::kSessionNotExist: - *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; - break; - case StatusCode::kParaError: - *errcode = LIBCURVE_ERROR::PARAM_ERROR; - break; - case StatusCode::kStorageError: - *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; - break; - case StatusCode::kFileLengthNotSupported: - *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; - break; - case ::curve::mds::StatusCode::kCloneStatusNotMatch: - *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; - break; - case ::curve::mds::StatusCode::kDeleteFileBeingCloned: - *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; - break; - case ::curve::mds::StatusCode::kClientVersionNotMatch: - *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; - break; - case ::curve::mds::StatusCode::kSnapshotFrozen: - *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; - break; - default: - *errcode = LIBCURVE_ERROR::UNKNOWN; - break; + case StatusCode::kOK: + *errcode = LIBCURVE_ERROR::OK; + break; + case StatusCode::kFileExists: + *errcode = LIBCURVE_ERROR::EXISTS; + break; + case StatusCode::kSnapshotFileNotExists: + case StatusCode::kFileNotExists: + case StatusCode::kDirNotExist: + case StatusCode::kPoolsetNotExist: + *errcode = LIBCURVE_ERROR::NOTEXIST; + break; + case StatusCode::kSegmentNotAllocated: + *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; + break; + case StatusCode::kShrinkBiggerFile: + *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; + break; + case StatusCode::kNotSupported: + *errcode = LIBCURVE_ERROR::NOT_SUPPORT; + break; + case StatusCode::kOwnerAuthFail: + *errcode = LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kSnapshotFileDeleteError: + *errcode = LIBCURVE_ERROR::DELETE_ERROR; + break; + case StatusCode::kFileUnderSnapShot: + *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; + break; + case StatusCode::kFileNotUnderSnapShot: + *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; + break; + case StatusCode::kSnapshotDeleting: + *errcode = LIBCURVE_ERROR::DELETING; + break; + case StatusCode::kDirNotEmpty: + *errcode = LIBCURVE_ERROR::NOT_EMPTY; + break; + case StatusCode::kFileOccupied: + *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; + break; + case StatusCode::kSessionNotExist: + *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; + break; + case StatusCode::kParaError: + *errcode = LIBCURVE_ERROR::PARAM_ERROR; + break; + case StatusCode::kStorageError: + *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; + break; + case StatusCode::kFileLengthNotSupported: + *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; + break; + case ::curve::mds::StatusCode::kCloneStatusNotMatch: + *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; + break; + case ::curve::mds::StatusCode::kDeleteFileBeingCloned: + *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; + break; + case ::curve::mds::StatusCode::kClientVersionNotMatch: + *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; + break; + case ::curve::mds::StatusCode::kSnapshotFrozen: + *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; + break; + default: + *errcode = LIBCURVE_ERROR::UNKNOWN; + break; } } - LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // logic error if (retcode >= 0) { @@ -1581,12 +1576,12 @@ LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // rpc error or special defined error switch (retcode) { - case -LIBCURVE_ERROR::NOT_SUPPORT: - return LIBCURVE_ERROR::NOT_SUPPORT; - case -LIBCURVE_ERROR::FILE_OCCUPIED: - return LIBCURVE_ERROR::FILE_OCCUPIED; - default: - return LIBCURVE_ERROR::FAILED; + case -LIBCURVE_ERROR::NOT_SUPPORT: + return LIBCURVE_ERROR::NOT_SUPPORT; + case -LIBCURVE_ERROR::FILE_OCCUPIED: + return LIBCURVE_ERROR::FILE_OCCUPIED; + default: + return LIBCURVE_ERROR::FAILED; } } diff --git a/src/client/mds_client.h b/src/client/mds_client.h index 36822fa31c..10c4a682cd 100644 --- a/src/client/mds_client.h +++ b/src/client/mds_client.h @@ -26,10 +26,10 @@ #include #include +#include #include #include #include -#include #include "include/client/libcurve.h" #include "proto/nameserver2.pb.h" @@ -48,28 +48,28 @@ class RPCExcutorRetryPolicy { RPCExcutorRetryPolicy() : retryOpt_(), currentWorkingMDSAddrIndex_(0), cntlID_(1) {} - void SetOption(const MetaServerOption::RpcRetryOption &option) { + void SetOption(const MetaServerOption::RpcRetryOption& option) { retryOpt_ = option; } using RPCFunc = std::function; + brpc::Channel*, brpc::Controller*)>; /** - * 将client与mds的重试相关逻辑抽离 - * @param: task为当前要进行的具体rpc任务 - * @param: maxRetryTimeMS是当前执行最大的重试时间 - * @return: 返回当前RPC的结果 + * Detach the retry related logic between client and mds + * @param: task is the specific rpc task to be carried out currently + * @param: maxRetryTimeMS is the maximum retry time currently executed + * @return: Returns the result of the current RPC */ int DoRPCTask(RPCFunc task, uint64_t maxRetryTimeMS); /** - * 测试使用: 设置当前正在服务的mdsindex + * Test usage: Set the currently serving mdsindex */ void SetCurrentWorkIndex(int index) { currentWorkingMDSAddrIndex_.store(index); } /** - * 测试使用:获取当前正在服务的mdsindex + * Test usage: Obtain the currently serving mdsindex */ int GetCurrentWorkIndex() const { return currentWorkingMDSAddrIndex_.load(); @@ -77,105 +77,117 @@ class RPCExcutorRetryPolicy { private: /** - * rpc失败需要重试,根据cntl返回的不同的状态,确定应该做什么样的预处理。 - * 主要做了以下几件事: - * 1. 如果上一次的RPC是超时返回,那么执行rpc 超时指数退避逻辑 - * 2. 如果上一次rpc返回not connect等返回值,会主动触发切换mds地址重试 - * 3. 更新重试信息,比如在当前mds上连续重试的次数 - * @param[in]: status为当前rpc的失败返回的状态 - * @param normalRetryCount The total count of normal retry - * @param[in][out]: curMDSRetryCount当前mds节点上的重试次数,如果切换mds - * 该值会被重置为1. - * @param[in]: curRetryMDSIndex代表当前正在重试的mds索引 - * @param[out]: lastWorkingMDSIndex上一次正在提供服务的mds索引 - * @param[out]: timeOutMS根据status对rpctimeout进行调整 + * When an RPC fails, it needs to be retried, and based on different + * statuses returned by `cntl`, determine what kind of preprocessing should + * be done. The main tasks performed are as follows: + * 1. If the last RPC timed out, execute RPC timeout exponential backoff + * logic. + * 2. If the last RPC returned values like "not connect," it will actively + * trigger MDS address switching and retry. + * 3. Update retry information, such as the number of consecutive retries on + * the current MDS. + * @param[in]: status is the status of the current RPC failure. + * @param[in]: normalRetryCount is the total count of normal retries. + * @param[in][out]: curMDSRetryCount is the number of retries on the current + * MDS node. If MDS switching occurs, this value will be reset to 1. + * @param[in]: curRetryMDSIndex represents the current MDS index being + * retried. + * @param[out]: lastWorkingMDSIndex is the index of the MDS that was + * providing service in the last attempt. + * @param[out]: timeOutMS is adjusted based on the status to control the RPC + * timeout. * - * @return: 返回下一次重试的mds索引 + * @return: Returns the next MDS index for the next retry. */ int PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, uint64_t *timeOutMS); + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, + int* lastWorkingMDSIndex, uint64_t* timeOutMS); /** - * 执行rpc发送任务 - * @param[in]: mdsindex为mds对应的地址索引 - * @param[in]: rpcTimeOutMS是rpc超时时间 - * @param[in]: task为待执行的任务 - * @return: channel获取成功则返回0,否则-1 + * Execute rpc send task + * @param[in]: mdsindex is the address index corresponding to mds + * @param[in]: rpcTimeOutMS is the rpc timeout time + * @param[in]: task is the task to be executed + * @return: If the channel is successfully obtained, 0 will be returned. + * Otherwise, -1 */ int ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, RPCExcutorRetryPolicy::RPCFunc task); /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex - * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. - * 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ - * @param[in]: needChangeMDS表示当前外围需不需要切换mds,这个值由 - * PreProcessBeforeRetry函数确定 - * @param[in]: currentRetryIndex为当前正在重试的mds索引 - * @param[in][out]: - * lastWorkingindex为上一次正在服务的mds索引,正在重试的mds - * 与正在服务的mds索引可能是不同的mds。 - * @return: 返回下一次要重试的mds索引 + * Get the next MDS index to retry based on the input state. MDS switching + * logic: Record three states: curRetryMDSIndex, lastWorkingMDSIndex, + * currentWorkingMDSIndex. + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex, + * lastWorkingMDSIndex = currentWorkingMDSIndex. + * 2. If an RPC fails, it will trigger a switch of curRetryMDSIndex. If at + * this point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, it + * will sequentially switch to the next MDS index. If lastWorkingMDSIndex is + * not equal to currentWorkingMDSIndex, it means that another interface has + * updated currentWorkingMDSAddrIndex_. In this case, the switch will + * directly go to currentWorkingMDSAddrIndex_. + * @param[in]: needChangeMDS indicates whether the current peripheral needs + * to switch MDS. This value is determined by the PreProcessBeforeRetry + * function. + * @param[in]: currentRetryIndex is the current MDS index being retried. + * @param[in][out]: lastWorkingIndex is the index of the last MDS being + * served in the last retry. The MDS being retried and the MDS being served + * may be different. + * @return: Returns the next MDS index to retry. */ int GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex); - /** - * 根据输入参数,决定是否继续重试,重试退出条件是重试时间超出最大允许时间 - * IO路径上和非IO路径上的重试时间不一样,非IO路径的重试时间由配置文件的 - * mdsMaxRetryMS参数指定,IO路径为无限循环重试。 + int* lastWorkingindex); + /** + * Based on the input parameters, decide whether to continue retry. The + * condition for retry exit is that the retry time exceeds the maximum + * allowed time The retry time on IO paths is different from that on non IO + * paths, and the retry time on non IO paths is determined by the + * configuration file The mdsMaxRetryMS parameter specifies that the IO path + * is an infinite loop retry. * @param[in]: startTimeMS - * @param[in]: maxRetryTimeMS为最大重试时间 - * @return:需要继续重试返回true, 否则返回false + * @param[in]: maxRetryTimeMS is the maximum retry time + * @return: Need to continue retrying and return true, otherwise return + * false */ bool GoOnRetry(uint64_t startTimeMS, uint64_t maxRetryTimeMS); /** - * 递增controller id并返回id + *Increment controller id and return id */ uint64_t GetLogId() { return cntlID_.fetch_add(1, std::memory_order_relaxed); } private: - // 执行rpc时必要的配置信息 + // Necessary configuration information for executing rpc MetaServerOption::RpcRetryOption retryOpt_; - // 记录上一次重试过的leader信息 + // Record the leader information from the last retry std::atomic currentWorkingMDSAddrIndex_; - // controller id,用于trace整个rpc IO链路 - // 这里直接用uint64即可,在可预测的范围内,不会溢出 + // controller ID, used to trace the entire RPC IO link + // Simply use uint64 here, within a predictable range, without overflow std::atomic cntlID_; }; - struct LeaseRefreshResult; -// MDSClient是client与MDS通信的唯一窗口 +// MDSClient is the only window where the client communicates with MDS class MDSClient : public MDSClientBase, public std::enable_shared_from_this { public: - explicit MDSClient(const std::string &metricPrefix = ""); + explicit MDSClient(const std::string& metricPrefix = ""); virtual ~MDSClient(); - LIBCURVE_ERROR Initialize(const MetaServerOption &metaopt); + LIBCURVE_ERROR Initialize(const MetaServerOption& metaopt); /** - * 创建文件 - * @param: context创建文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK - * 文件已存在返回LIBCURVE_ERROR::EXIST - * 否则返回LIBCURVE_ERROR::FAILED - * 如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, + * Create File + * @param: context Create file information + * @return: Successfully returned LIBCURVE_ERROR::OK + * File already exists Return LIBCURVE_ERROR::EXIST + * Otherwise, return LIBCURVE_ERROR::FAILED + * If authentication fails, return LIBCURVE_ERROR::AUTHFAIL */ LIBCURVE_ERROR CreateFile(const CreateFileContext& context); /** @@ -190,29 +202,31 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease); + LIBCURVE_ERROR OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: csid为要获取的copyset列表 - * @param: cpinfoVec保存获取到的server信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: csid is the list of copysets to obtain + * @param: cpinfoVec saves the obtained server information + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR - GetServerList(const LogicPoolID &logicPoolId, - const std::vector &csid, - std::vector> *cpinfoVec); + GetServerList(const LogicPoolID& logicPoolId, + const std::vector& csid, + std::vector>* cpinfoVec); /** - * 获取当前mds所属的集群信息 - * @param[out]: clsctx 为要获取的集群信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the cluster information to which the current mds belongs + * @param[out]: clsctx is the cluster information to be obtained + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetClusterInfo(ClusterContext *clsctx); + LIBCURVE_ERROR GetClusterInfo(ClusterContext* clsctx); LIBCURVE_ERROR ListPoolset(std::vector* out); @@ -229,9 +243,9 @@ class MDSClient : public MDSClientBase, * otherwise return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo); + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo); /** * @brief Send DeAllocateSegment request to current working MDS @@ -239,7 +253,7 @@ class MDSClient : public MDSClientBase, * @param offset segment start offset * @return LIBCURVE_ERROR::OK means success, other value means fail */ - virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo *fileInfo, + virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo* fileInfo, uint64_t offset); /** @@ -253,10 +267,9 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetFileInfo(const std::string &filename, - const UserInfo_t &userinfo, - FInfo_t *fi, - FileEpoch_t *fEpoch); + LIBCURVE_ERROR GetFileInfo(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch); /** * @brief Increase epoch and return chunkserver locations @@ -269,29 +282,29 @@ class MDSClient : public MDSClientBase, * * @return LIBCURVE_ERROR::OK for success, LIBCURVE_ERROR::FAILED for fail. */ - LIBCURVE_ERROR IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs); + LIBCURVE_ERROR IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - LIBCURVE_ERROR Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize); + LIBCURVE_ERROR Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce Does it force deletion without placing it in the + * garbage bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds */ - LIBCURVE_ERROR DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce = false, uint64_t id = 0); /** @@ -300,253 +313,266 @@ class MDSClient : public MDSClientBase, * @param: filename * @param: fileId is inodeid,default 0 */ - LIBCURVE_ERROR RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, uint64_t fileId); + LIBCURVE_ERROR RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileId); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param: seq是出参,返回创建快照时文件的版本信息 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is an output parameter that returns the version information + * of the file when creating the snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CreateSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t *seq); + LIBCURVE_ERROR CreateSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t* seq); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq); + LIBCURVE_ERROR DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: snapif是出参,保存文件的基本信息 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: snapif is a parameter that saves the basic information of the + * file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif); - /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param: segInfo是出参,保存chunk信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif); + /** + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param: segInfo is the output parameter, saving chunk information * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo); - /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: filestatus为快照状态 - */ - LIBCURVE_ERROR CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq, - FileStatus *filestatus); - - /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param: resp是mds端传递过来的lease信息 - * @param[out]: lease当前文件的session信息 + SegmentInfo* segInfo); + /** + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: filestatus is the snapshot status + */ + LIBCURVE_ERROR CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + + /** + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param: resp is the release information passed from the mds end + * @param[out]: lease the session information of the current file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease = nullptr); - /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease = nullptr); + /** + * To close the file, it is necessary to carry the session ID, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid); + LIBCURVE_ERROR CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid); /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged * - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] destFileId 创建的目标文件的Id + * @param[out] destFileId The ID of the target file created * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CreateCloneFile(const std::string &source, - const std::string &destination, - const UserInfo_t &userinfo, uint64_t size, + LIBCURVE_ERROR CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, - const std::string& poolset, - FInfo *fileinfo); + const std::string& poolset, FInfo* fileinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, + LIBCURVE_ERROR SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); /** - * @brief 重名文件 + * @brief duplicate file * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, + LIBCURVE_ERROR RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId = 0, uint64_t destinationId = 0); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo); + LIBCURVE_ERROR ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - LIBCURVE_ERROR Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec); + LIBCURVE_ERROR Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec); /** - * 向mds注册client metric监听的地址和端口 - * @param: ip客户端ip - * @param: dummyServerPort为监听端口 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Register the address and port for client metric listening with mds + * @param: IP client IP + * @param: dummyServerPort is the listening port + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR Register(const std::string &ip, uint16_t port); + LIBCURVE_ERROR Register(const std::string& ip, uint16_t port); /** - * 获取chunkserver信息 - * @param[in] addr chunkserver地址信息 - * @param[out] chunkserverInfo 待获取的信息 - * @return:成功返回ok + * Obtain chunkserver information + * @param[in] addr chunkserver address information + * @param[out] chunkserverInfo Information to be obtained + * @return: Successfully returned OK */ LIBCURVE_ERROR - GetChunkServerInfo(const PeerAddr &addr, - CopysetPeerInfo *chunkserverInfo); + GetChunkServerInfo(const PeerAddr& addr, + CopysetPeerInfo* chunkserverInfo); /** - * 获取server上所有chunkserver的id - * @param[in]: ip为server的ip地址 - * @param[out]: csIds用于保存chunkserver的id - * @return: 成功返回LIBCURVE_ERROR::OK,失败返回LIBCURVE_ERROR::FAILED + * Obtain the IDs of all chunkservers on the server + * @param[in]: ip is the IP address of the server + * @param[out]: csIds is used to save the id of the chunkserver + * @return: Successfully returned LIBCURVE_ERROR::OK, failure returns + * LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR ListChunkServerInServer(const std::string &ip, - std::vector *csIds); + LIBCURVE_ERROR ListChunkServerInServer(const std::string& ip, + std::vector* csIds); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInitialize(); /** - * 将mds侧错误码对应到libcurve错误码 - * @param: statecode为mds一侧错误码 - * @param[out]: 出参errcode为libcurve一侧的错误码 + * Map the mds side error code to the libcurve error code + * @param: statecode is the error code on the mds side + * @param[out]: The errcode of the output parameter is the error code on the + * side of libcurve */ - void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode &statcode, - LIBCURVE_ERROR *errcode); + void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode& statcode, + LIBCURVE_ERROR* errcode); LIBCURVE_ERROR ReturnError(int retcode); private: - // 初始化标志,放置重复初始化 + // Initialization flag, placing duplicate initialization bool inited_ = false; - // 当前模块的初始化option配置 + // Initialization option configuration for the current module MetaServerOption metaServerOpt_; - // client与mds通信的metric统计 + // Metric statistics of communication between client and mds MDSClientMetric mdsClientMetric_; RPCExcutorRetryPolicy rpcExcutor_; diff --git a/src/client/mds_client_base.h b/src/client/mds_client_base.h index 64178e43e9..6cb3340231 100644 --- a/src/client/mds_client_base.h +++ b/src/client/mds_client_base.h @@ -38,120 +38,120 @@ namespace curve { namespace client { -using curve::mds::OpenFileRequest; -using curve::mds::OpenFileResponse; -using curve::mds::CreateFileRequest; -using curve::mds::CreateFileResponse; +using curve::mds::ChangeOwnerRequest; +using curve::mds::ChangeOwnerResponse; +using curve::mds::CheckSnapShotStatusRequest; +using curve::mds::CheckSnapShotStatusResponse; using curve::mds::CloseFileRequest; using curve::mds::CloseFileResponse; -using curve::mds::RenameFileRequest; -using curve::mds::RenameFileResponse; -using curve::mds::ExtendFileRequest; -using curve::mds::ExtendFileResponse; +using curve::mds::CreateCloneFileRequest; +using curve::mds::CreateCloneFileResponse; +using curve::mds::CreateFileRequest; +using curve::mds::CreateFileResponse; +using curve::mds::CreateSnapShotRequest; +using curve::mds::CreateSnapShotResponse; +using curve::mds::DeAllocateSegmentRequest; +using curve::mds::DeAllocateSegmentResponse; using curve::mds::DeleteFileRequest; using curve::mds::DeleteFileResponse; -using curve::mds::RecoverFileRequest; -using curve::mds::RecoverFileResponse; +using curve::mds::DeleteSnapShotRequest; +using curve::mds::DeleteSnapShotResponse; +using curve::mds::ExtendFileRequest; +using curve::mds::ExtendFileResponse; using curve::mds::GetFileInfoRequest; using curve::mds::GetFileInfoResponse; +using curve::mds::GetOrAllocateSegmentRequest; +using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::IncreaseFileEpochRequest; using curve::mds::IncreaseFileEpochResponse; -using curve::mds::DeleteSnapShotRequest; -using curve::mds::DeleteSnapShotResponse; -using curve::mds::ReFreshSessionRequest; -using curve::mds::ReFreshSessionResponse; using curve::mds::ListDirRequest; using curve::mds::ListDirResponse; -using curve::mds::ChangeOwnerRequest; -using curve::mds::ChangeOwnerResponse; -using curve::mds::CreateSnapShotRequest; -using curve::mds::CreateSnapShotResponse; -using curve::mds::CreateCloneFileRequest; -using curve::mds::CreateCloneFileResponse; -using curve::mds::SetCloneFileStatusRequest; -using curve::mds::SetCloneFileStatusResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; -using curve::mds::DeAllocateSegmentRequest; -using curve::mds::DeAllocateSegmentResponse; -using curve::mds::CheckSnapShotStatusRequest; -using curve::mds::CheckSnapShotStatusResponse; using curve::mds::ListSnapShotFileInfoRequest; using curve::mds::ListSnapShotFileInfoResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::OpenFileRequest; +using curve::mds::OpenFileResponse; +using curve::mds::RecoverFileRequest; +using curve::mds::RecoverFileResponse; +using curve::mds::ReFreshSessionRequest; +using curve::mds::ReFreshSessionResponse; +using curve::mds::RenameFileRequest; +using curve::mds::RenameFileResponse; +using curve::mds::SetCloneFileStatusRequest; +using curve::mds::SetCloneFileStatusResponse; +using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::GetChunkServerListInCopySetsRequest; using curve::mds::topology::GetChunkServerListInCopySetsResponse; using curve::mds::topology::GetClusterInfoRequest; using curve::mds::topology::GetClusterInfoResponse; -using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::ListChunkServerResponse; -using curve::mds::IncreaseFileEpochRequest; -using curve::mds::IncreaseFileEpochResponse; using curve::mds::topology::ListPoolsetRequest; using curve::mds::topology::ListPoolsetResponse; extern const char* kRootUserName; -// MDSClientBase将所有与mds的RPC接口抽离,与业务逻辑解耦 -// 这里只负责rpc的发送,具体的业务处理逻辑通过reponse和controller向上 -// 返回给调用者,有调用者处理 +// MDSClientBase abstracts all RPC interfaces with the MDS, decoupling them from +// business logic. Here, it is responsible only for sending RPC requests, while +// the specific business logic processing is returned to the caller through +// responses and controllers, which are handled by the caller. class MDSClientBase { public: /** - * 打开文件 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Open File + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void OpenFile(const std::string& filename, - const UserInfo_t& userinfo, - OpenFileResponse* response, - brpc::Controller* cntl, + void OpenFile(const std::string& filename, const UserInfo_t& userinfo, + OpenFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建文件 - * @param: filename创建文件的文件名 - * @param: userinfo为user信息 - * @param: size文件长度 - * @param: normalFile表示创建的是普通文件还是目录文件,如果是目录则忽略size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create File + * @param: filename The file name used to create the file + * @param: userinfo is the user information + * @param: size File length + * @param: normalFile indicates whether the created file is a regular file + * or a directory file. If it is a directory, size is ignored + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateFile(const CreateFileContext& context, - CreateFileResponse* response, - brpc::Controller* cntl, + CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: userinfo为user信息 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * To close the file, it is necessary to carry the sessionid, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: userinfo is the user information + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CloseFile(const std::string& filename, - const UserInfo_t& userinfo, - const std::string& sessionid, - CloseFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void CloseFile(const std::string& filename, const UserInfo_t& userinfo, + const std::string& sessionid, CloseFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取文件信息,fi是出参 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain file information, where fi is the output parameter + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetFileInfo(const std::string& filename, - const UserInfo_t& userinfo, - GetFileInfoResponse* response, - brpc::Controller* cntl, + void GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + GetFileInfoResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -164,175 +164,177 @@ class MDSClientBase { * @param[in] channel rpc channel * */ - void IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - IncreaseFileEpochResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo, + IncreaseFileEpochResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, CreateSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - DeleteSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq, DeleteSnapShotResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, const std::vector* seq, ListSnapShotFileInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RefreshSession(const std::string& filename, - const UserInfo_t& userinfo, + void RefreshSession(const std::string& filename, const UserInfo_t& userinfo, const std::string& sessionid, ReFreshSessionResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, CheckSnapShotStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: copysetidvec为要获取的copyset列表 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: copysetidvec is the list of copysets to obtain + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetServerList(const LogicPoolID& logicalpooid, const std::vector& copysetidvec, GetChunkServerListInCopySetsResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取mds对应的cluster id - * @param[out]: response为该rpc的respoonse,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the cluster ID corresponding to the mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetClusterInfo(GetClusterInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); - void ListPoolset(ListPoolsetResponse* response, - brpc::Controller* cntl, + void ListPoolset(ListPoolsetResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建clone文件 - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * Create clone file + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateCloneFile(const std::string& source, const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, CreateCloneFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 通知mds完成Clone Meta - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief Notify mds to complete Clone Meta + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void SetCloneFileStatus(const std::string& filename, const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID, + const UserInfo_t& userinfo, uint64_t fileID, SetCloneFileStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** * Get or Alloc SegmentInfo,and update to Metacache @@ -344,68 +346,63 @@ class MDSClientBase { * @param[in|out]: cntl rpc controller * @param[in]:channel rpc channel */ - void GetOrAllocateSegment(bool allocate, - uint64_t offset, - const FInfo_t* fi, - const FileEpoch_t *fEpoch, + void GetOrAllocateSegment(bool allocate, uint64_t offset, const FInfo_t* fi, + const FileEpoch_t* fEpoch, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); void DeAllocateSegment(const FInfo* fileInfo, uint64_t segmentOffset, DeAllocateSegmentResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 重名文件 - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief duplicate file + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RenameFile(const UserInfo_t& userinfo, - const std::string &origin, - const std::string &destination, - uint64_t originId, - uint64_t destinationId, - RenameFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RenameFile(const UserInfo_t& userinfo, const std::string& origin, + const std::string& destination, uint64_t originId, + uint64_t destinationId, RenameFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void Extend(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t newsize, - ExtendFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void Extend(const std::string& filename, const UserInfo_t& userinfo, + uint64_t newsize, ExtendFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: Does deleteforce force deletion without placing it in the garbage + * bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - bool deleteforce, - uint64_t fileid, - DeleteFileResponse* response, - brpc::Controller* cntl, + void DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + bool deleteforce, uint64_t fileid, + DeleteFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -417,61 +414,59 @@ class MDSClientBase { * @param[in|out]: cntl, return RPC status * @param[in]:channel */ - void RecoverFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t fileid, - RecoverFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RecoverFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t fileid, RecoverFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ChangeOwner(const std::string& filename, - const std::string& newOwner, - const UserInfo_t& userinfo, - ChangeOwnerResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void ChangeOwner(const std::string& filename, const std::string& newOwner, + const UserInfo_t& userinfo, ChangeOwnerResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 - */ - void Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, - ListDirResponse* response, - brpc::Controller* cntl, + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS + */ + void Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + ListDirResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取chunkserverID信息 - * @param[in]: ip为当前client的监听地址 - * @param[in]: port为监听端口 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain chunkserverID information + * @param[in]: IP is the listening address of the current client + * @param[in]: port is the listening port + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetChunkServerInfo(const std::string& ip, - uint16_t port, + void GetChunkServerInfo(const std::string& ip, uint16_t port, GetChunkServerInfoResponse* reponse, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取server上的所有chunkserver的id - * @param[in]: ip为当前server的地址 - * @param[out]: response是当前rpc调用的response,返回给外部处理 - * @param[in|out]: cntl既是入参也是出参 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the IDs of all chunkservers on the server + * @param[in]: IP is the address of the current server + * @param[out]: response is the response of the current rpc call, returned + * to external processing + * @param[in|out]: cntl is both an input and output parameter + * @param[in]: channel is the current channel established with MDS */ void ListChunkServerInServer(const std::string& ip, ListChunkServerResponse* response, @@ -480,8 +475,8 @@ class MDSClientBase { private: /** - * 为不同的request填充user信息 - * @param: request是待填充的变量指针 + * Fill in user information for different requests + * @param: request is the pointer to the variable to be filled in */ template void FillUserInfo(T* request, const UserInfo_t& userinfo) { @@ -499,7 +494,7 @@ class MDSClientBase { std::string CalcSignature(const UserInfo& userinfo, uint64_t date) const; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_MDS_CLIENT_BASE_H_ diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index 7c0a25a262..2265f6b6dd 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -19,27 +19,26 @@ * File Created: Tuesday, 25th September 2018 2:06:35 pm * Author: tongguangxun */ -#include +#include "src/client/metacache.h" #include +#include +#include #include #include -#include #include "proto/cli.pb.h" - -#include "src/client/metacache.h" -#include "src/client/mds_client.h" #include "src/client/client_common.h" +#include "src/client/mds_client.h" #include "src/common/concurrent/concurrent.h" namespace curve { namespace client { -using curve::common::WriteLockGuard; -using curve::common::ReadLockGuard; using curve::client::ClientConfig; +using curve::common::ReadLockGuard; +using curve::common::WriteLockGuard; void MetaCache::Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient) { @@ -87,12 +86,9 @@ bool MetaCache::IsLeaderMayChange(LogicPoolID logicPoolId, return flag; } -int MetaCache::GetLeader(LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkServerID* serverId, - EndPoint* serverAddr, - bool refresh, - FileMetric* fm) { +int MetaCache::GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, + ChunkServerID* serverId, EndPoint* serverAddr, + bool refresh, FileMetric* fm) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo targetInfo; @@ -123,7 +119,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, << "logicpool id = " << logicPoolId << ", copyset id = " << copysetId; - // 重试失败,这时候需要向mds重新拉取最新的copyset信息了 + // The retry failed. At this point, it is necessary to retrieve the + // latest copyset information from mds again ret = UpdateCopysetInfoFromMDS(logicPoolId, copysetId); if (ret == 0) { continue; @@ -135,8 +132,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, if (ret == -1) { LOG(WARNING) << "get leader failed after retry!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } @@ -148,25 +145,24 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetInfo* toupdateCopyset, FileMetric* fm) { ChunkServerID csid = 0; - PeerAddr leaderaddr; + PeerAddr leaderaddr; GetLeaderRpcOption rpcOption(metacacheopt_.metacacheGetLeaderRPCTimeOutMS); - GetLeaderInfo getLeaderInfo(logicPoolId, - copysetId, toupdateCopyset->csinfos_, - toupdateCopyset->GetCurrentLeaderIndex(), - rpcOption); - int ret = ServiceHelper::GetLeader( - getLeaderInfo, &leaderaddr, &csid, fm); + GetLeaderInfo getLeaderInfo( + logicPoolId, copysetId, toupdateCopyset->csinfos_, + toupdateCopyset->GetCurrentLeaderIndex(), rpcOption); + int ret = ServiceHelper::GetLeader(getLeaderInfo, &leaderaddr, &csid, fm); if (ret == -1) { LOG(WARNING) << "get leader failed!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr); - // 如果更新失败,说明leader地址不在当前配置组中,从mds获取chunkserver的信息 + // If the update fails, it indicates that the leader address is not in the + // current configuration group. Obtain chunkserver information from MDS if (ret == -1 && !leaderaddr.IsEmpty()) { CopysetPeerInfo csInfo; ret = mdsclient_->GetChunkServerInfo(leaderaddr, &csInfo); @@ -177,8 +173,8 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, return -1; } - UpdateCopysetInfoIfMatchCurrentLeader( - logicPoolId, copysetId, leaderaddr); + UpdateCopysetInfoIfMatchCurrentLeader(logicPoolId, copysetId, + leaderaddr); *toupdateCopyset = GetCopysetinfo(logicPoolId, copysetId); ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr, csInfo); } @@ -201,18 +197,16 @@ int MetaCache::UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, return -1; } - // 更新chunkserverid到copyset映射关系 + // Update chunkserverid to copyset mapping relationship UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); return 0; } void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( - LogicPoolID logicPoolId, - CopysetID copysetId, - const PeerAddr& leaderAddr) { + LogicPoolID logicPoolId, CopysetID copysetId, const PeerAddr& leaderAddr) { std::vector> copysetInfos; (void)mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); @@ -224,15 +218,15 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( << ", copyset id = " << copysetId << ", current leader = " << leaderAddr.ToString(); - // 更新chunkserverid到copyset的映射关系 + // Update the mapping relationship between chunkserverid and copyset UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); } } CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, - CopysetID copysetId) { + CopysetID copysetId) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo ret; @@ -250,8 +244,7 @@ CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, * the copyset client will call UpdateLeader. * return the ChunkServerID to invoker */ -int MetaCache::UpdateLeader(LogicPoolID logicPoolId, - CopysetID copysetId, +int MetaCache::UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, const EndPoint& leaderAddr) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); @@ -329,11 +322,13 @@ void MetaCache::SetChunkserverUnstable(ChunkServerID csid) { ChunkServerID leaderid; if (cpinfo->second.GetCurrentLeaderID(&leaderid)) { if (leaderid == csid) { - // 只设置leaderid为当前serverid的Lcopyset + // Set only the Lcopyset with leaderid as the current + // serverid cpinfo->second.SetLeaderUnstableFlag(); } } else { - // 当前copyset集群信息未知,直接设置LeaderUnStable + // The current copyset cluster information is unknown, set + // LeaderUnStable directly cpinfo->second.SetLeaderUnstableFlag(); } } @@ -346,24 +341,24 @@ void MetaCache::AddCopysetIDInfo(ChunkServerID csid, chunkserverCopysetIDMap_[csid].emplace(cpidinfo); } -void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo& cpinfo) { +void MetaCache::UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, cpinfo.cpid_); - // 先获取原来的chunkserver到copyset映射 + // First, obtain the original chunkserver to copyset mapping auto previouscpinfo = lpcsid2CopsetInfoMap_.find(key); if (previouscpinfo != lpcsid2CopsetInfoMap_.end()) { std::vector newID; std::vector changedID; - // 先判断当前copyset有没有变更chunkserverid + // Determine if the current copyset has changed the chunkserverid for (auto iter : previouscpinfo->second.csinfos_) { changedID.push_back(iter.peerID); } for (auto iter : cpinfo.csinfos_) { - auto it = std::find(changedID.begin(), changedID.end(), - iter.peerID); + auto it = + std::find(changedID.begin(), changedID.end(), iter.peerID); if (it != changedID.end()) { changedID.erase(it); } else { @@ -371,7 +366,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 删除变更的copyset信息 + // Delete changed copyset information for (auto chunkserverid : changedID) { { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); @@ -382,7 +377,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 更新新的copyset信息到chunkserver + // Update new copyset information to chunkserver for (auto chunkserverid : newID) { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); chunkserverCopysetIDMap_[chunkserverid].emplace(lpid, cpinfo.cpid_); @@ -390,8 +385,8 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } -CopysetInfo MetaCache::GetCopysetinfo( - LogicPoolID lpid, CopysetID csid) { +CopysetInfo MetaCache::GetCopysetinfo(LogicPoolID lpid, + CopysetID csid) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, csid); auto cpinfo = lpcsid2CopsetInfoMap_.find(key); @@ -412,10 +407,8 @@ FileSegment* MetaCache::GetFileSegment(SegmentIndex segmentIndex) { WriteLockGuard lk(rwlock4Segments_); auto ret = segments_.emplace( - std::piecewise_construct, - std::forward_as_tuple(segmentIndex), - std::forward_as_tuple(segmentIndex, - fileInfo_.segmentsize, + std::piecewise_construct, std::forward_as_tuple(segmentIndex), + std::forward_as_tuple(segmentIndex, fileInfo_.segmentsize, metacacheopt_.discardGranularity)); return &(ret.first->second); @@ -435,5 +428,5 @@ void MetaCache::CleanChunksInSegment(SegmentIndex segmentIndex) { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/metacache.h b/src/client/metacache.h index a9a6e1fab7..1340a3eb25 100644 --- a/src/client/metacache.h +++ b/src/client/metacache.h @@ -61,69 +61,78 @@ class MetaCache { virtual ~MetaCache() = default; /** - * 初始化函数 - * @param: metacacheopt为当前metacache的配置option信息 - * @param: mdsclient为与mds通信的指针。 - * 为什么这里需要把mdsclient传进来? - * 因为首先metacache充当的角色就是对于MDS一侧的信息缓存 - * 所以对于底层想使用metacache的copyset client或者chunk closure - * 来说,他只需要知道metacache就可以了,不需要再去向mds查询信息, - * 在copyset client或者chunk closure发送IO失败之后会重新获取leader - * 然后再重试,如果leader获取不成功,需要向mds一侧查询当前copyset的最新信息, - * 这里将查询mds封装在内部了,这样copyset client和chunk closure就不感知mds了 + * Initialization function + * @param: metacacheopt is the configuration option information for the + * current Metacache + * @param: mdsclient is the pointer that communicates with mds. + * Why does it need to pass in mdsclient here? + * Because the first role that Metacache plays is to cache information on + * the MDS side So for low-level users who want to use Metacache's copyset + * client or chunk closure For example, he only needs to know the Metacache + * and no longer needs to query information from MDS, After the copyset + * client or chunk closure fails to send IO, it will retrieve the leader + * again Then try again. If the leader acquisition is unsuccessful, you need + * to query the latest information of the current copyset from the mds side, + * Here, the query mds is encapsulated internally, so that the copyset + * client and chunk closure are not aware of mds */ - void Init(const MetaCacheOption &metaCacheOpt, MDSClient *mdsclient); + void Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient); /** - * 通过chunk index获取chunkid信息 - * @param: chunkidx以index查询chunk对应的id信息 - * @param: chunkinfo是出参,存储chunk的版本信息 - * @param: 成功返回OK, 否则返回UNKNOWN_ERROR + * Obtain chunk information through chunk index + * @param: chunkidx queries the ID information corresponding to chunks using + * index + * @param: chunkinfo is an outgoing parameter that stores the version + * information of the chunk + * @param: Successfully returns OK, otherwise returns UNKNOWN_ ERROR */ virtual MetaCacheErrorType GetChunkInfoByIndex(ChunkIndex chunkidx, - ChunkIDInfo_t *chunkinfo); + ChunkIDInfo_t* chunkinfo); /** * @brief Update cached chunk info by chunk index */ virtual void UpdateChunkInfoByIndex(ChunkIndex cindex, - const ChunkIDInfo &chunkinfo); + const ChunkIDInfo& chunkinfo); /** - * sender发送数据的时候需要知道对应的leader然后发送给对应的chunkserver - * 如果get不到的时候,外围设置refresh为true,然后向chunkserver端拉取最新的 - * server信息,然后更新metacache。 - * 如果当前copyset的leaderMayChange置位的时候,即使refresh为false,也需要 - * 先去拉取新的leader信息,才能继续下发IO. - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: serverId对应chunkserver的id信息,是出参 - * @param: serverAddr为serverid对应的ip信息 - * @param: refresh,如果get不到的时候,外围设置refresh为true, - * 然后向chunkserver端拉取最新的 - * @param: fm用于统计metric - * @param: 成功返回0, 否则返回-1 + * When the sender sends data, it needs to know the corresponding leader and + * send it to the corresponding chunkserver. If it cannot retrieve the + * leader, and the external setting has "refresh" set to true, it will then + * fetch the latest server information from the chunkserver side and update + * the metacache. If the "leaderMayChange" flag of the current copyset is + * set, even if "refresh" is set to false, it is still necessary to fetch + * the new leader information before continuing with IO operations. + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: The serverId corresponds to the ID information of the + * chunkserver, which is the output parameter + * @param: serverAddr is the IP information corresponding to serverid + * @param: refresh. If it cannot be obtained, set the peripheral refresh to + * true, Then pull the latest data from the chunkserver end + * @param: fm for statistical metrics + * @param: Successfully returns 0, otherwise returns -1 */ virtual int GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, - ChunkServerID *serverId, butil::EndPoint *serverAddr, - bool refresh = false, FileMetric *fm = nullptr); + ChunkServerID* serverId, butil::EndPoint* serverAddr, + bool refresh = false, FileMetric* fm = nullptr); /** - * 更新某个copyset的leader信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @param leaderAddr leader地址 - * @return: 成功返回0, 否则返回-1 + * Update the leader information of a copyset + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param leaderAddr leader address + * @return: Successfully returns 0, otherwise returns -1 */ virtual int UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const butil::EndPoint &leaderAddr); + const butil::EndPoint& leaderAddr); /** - * 更新copyset数据信息,包含serverlist - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: csinfo是要更新的copyset info + * Update copyset data information, including serverlist + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: csinfo is the copyset info to be updated */ virtual void UpdateCopysetInfo(LogicPoolID logicPoolId, CopysetID copysetId, - const CopysetInfo &csinfo); + const CopysetInfo& csinfo); // Add copysets info to cache, and skip already copyset void AddCopysetsInfo( @@ -131,26 +140,26 @@ class MetaCache { std::vector>&& copysetsInfo); /** - * 通过chunk id更新chunkid信息 - * @param: cid为chunkid - * @param: cidinfo为当前chunk对应的id信息 + * Update chunk information through chunk id + * @param: cid is chunkid + * @param: cininfo is the ID information corresponding to the current chunk */ - virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo &cidinfo); + virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo& cidinfo); /** - * 获取当前copyset的server list信息 - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 当前copyset的copysetinfo信息 + * Obtain the server list information for the current copyset + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: The copysetinfo information of the current copyset */ virtual CopysetInfo GetServerList(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 将ID转化为cache的key - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 为当前的key + * Convert ID to key for cache + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: is the current key */ static LogicPoolCopysetID CalcLogicPoolCopysetID(LogicPoolID logicPoolId, CopysetID copysetId) { @@ -159,45 +168,45 @@ class MetaCache { } /** - * @brief: 标记整个server上的所有chunkserver为unstable状态 + * @brief: Mark all chunkservers on the entire server as unstable * - * @param: serverIp server的ip地址 - * @return: 0 设置成功 / -1 设置失败 + * @param: serverIp The IP address of the server + * @return: 0 set successfully/-1 set failed */ - virtual int SetServerUnstable(const std::string &endPoint); + virtual int SetServerUnstable(const std::string& endPoint); /** - * 如果leader所在的chunkserver出现问题了,导致RPC失败。这时候这个 - * chunkserver上的其他leader copyset也会存在同样的问题,所以需要 - * 通知当前chunkserver上的leader copyset. 主要是通过设置这个copyset - * 的leaderMayChange标志,当该copyset的再次下发IO的时候会查看这个 - * 状态,当这个标志位置位的时候,IO下发需要先进行leader refresh, - * 如果leaderrefresh成功,leaderMayChange会被reset。 - * SetChunkserverUnstable就会遍历当前chunkserver上的所有copyset - * 并设置这个chunkserver的leader copyset的leaderMayChange标志。 - * @param: csid是当前不稳定的chunkserver ID + * If the chunkserver where the leader is located encounters a problem, + * leading to RPC failures, then other leader copysets on this chunkserver + * will also face the same issue. Therefore, it is necessary to notify the + * leader copysets on the current chunkserver. This is primarily done by + * setting the "leaderMayChange" flag for these copysets. When IO is issued + * again for a copyset with this flag set, the system will check this + * status. When this flag is set, IO issuance will first perform a leader + * refresh. If the leader refresh is successful, the "leaderMayChange" flag + * will be reset. The "SetChunkserverUnstable" operation will iterate + * through all the copysets on the current chunkserver and set the + * "leaderMayChange" flag for the leader copysets of that chunkserver. + * @param: csid is the currently unstable chunkserver ID */ virtual void SetChunkserverUnstable(ChunkServerID csid); /** - * 向map中添加对应chunkserver的copyset信息 - * @param: csid为当前chunkserverid - * @param: cpid为当前copyset的id信息 + * Add copyset information for the corresponding chunkserver to the map + * @param: csid is the current chunkserverid + * @param: cpid is the ID information of the current copyset */ virtual void AddCopysetIDInfo(ChunkServerID csid, - const CopysetIDInfo &cpid); + const CopysetIDInfo& cpid); - virtual void - UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo &cpinfo); + virtual void UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo); - void UpdateFileInfo(const FInfo &fileInfo) { fileInfo_ = fileInfo; } + void UpdateFileInfo(const FInfo& fileInfo) { fileInfo_ = fileInfo; } - const FInfo *GetFileInfo() const { return &fileInfo_; } + const FInfo* GetFileInfo() const { return &fileInfo_; } - void UpdateFileEpoch(const FileEpoch& fEpoch) { - fEpoch_ = fEpoch; - } + void UpdateFileEpoch(const FileEpoch& fEpoch) { fEpoch_ = fEpoch; } const FileEpoch* GetFileEpoch() const { return &fEpoch_; } @@ -212,26 +221,26 @@ class MetaCache { } /** - * 获取对应的copyset的LeaderMayChange标志 + * Get the LeaderMayChange flag of the corresponding copyset */ virtual bool IsLeaderMayChange(LogicPoolID logicpoolId, CopysetID copysetId); /** - * 测试使用 - * 获取copysetinfo信息 + * Test Usage + * Obtain copysetinfo information */ virtual CopysetInfo GetCopysetinfo(LogicPoolID lpid, CopysetID csid); - UnstableHelper &GetUnstableHelper() { return unstableHelper_; } + UnstableHelper& GetUnstableHelper() { return unstableHelper_; } uint64_t InodeId() const { return fileInfo_.id; } /** * @brief Get file segment info about the segmentIndex */ - FileSegment *GetFileSegment(SegmentIndex segmentIndex); + FileSegment* GetFileSegment(SegmentIndex segmentIndex); /** * @brief Clean chunks of this segment @@ -240,68 +249,71 @@ class MetaCache { private: /** - * @brief 从mds更新copyset复制组信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @return 0 成功 / -1 失败 + * @brief Update copyset replication group information from mds + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @return 0 successful/-1 failed */ int UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 更新copyset的leader信息 - * @param[in]: logicPoolId逻辑池信息 - * @param[in]: copysetId复制组信息 - * @param[out]: toupdateCopyset为metacache中待更新的copyset信息指针 + * Update the leader information of the copyset + * @param[in]: logicPoolId Logical Pool Information + * @param[in]: copysetId Copy group information + * @param[out]: toupdateCopyset is the pointer to the copyset information to + * be updated in the metacache */ int UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetID copysetId, - CopysetInfo *toupdateCopyset, - FileMetric *fm = nullptr); + CopysetInfo* toupdateCopyset, + FileMetric* fm = nullptr); /** - * 从mds拉去复制组信息,如果当前leader在复制组中 - * 则更新本地缓存,反之则不更新 - * @param: logicPoolId 逻辑池id - * @param: copysetId 复制组id - * @param: leaderAddr 当前的leader address + * Pull replication group information from MDS, if the current leader is in + * the replication group Update local cache, otherwise do not update + * @param: logicPoolId Logical Pool ID + * @param: copysetId Copy group ID + * @param: leaderAddr The current leader address */ void UpdateCopysetInfoIfMatchCurrentLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const PeerAddr &leaderAddr); + const PeerAddr& leaderAddr); private: - MDSClient *mdsclient_; + MDSClient* mdsclient_; MetaCacheOption metacacheopt_; - // chunkindex到chunkidinfo的映射表 + // Mapping table from chunkindex to chunkidinfo CURVE_CACHELINE_ALIGNMENT ChunkIndexInfoMap chunkindex2idMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4Segments_; CURVE_CACHELINE_ALIGNMENT std::unordered_map segments_; // NOLINT - // logicalpoolid和copysetid到copysetinfo的映射表 + // Mapping table for logicalpoolid and copysetid to copysetinfo CURVE_CACHELINE_ALIGNMENT CopysetInfoMap lpcsid2CopsetInfoMap_; - // chunkid到chunkidinfo的映射表 + // chunkid to chunkidinfo mapping table CURVE_CACHELINE_ALIGNMENT ChunkInfoMap chunkid2chunkInfoMap_; - // 三个读写锁分别保护上述三个映射表 + // Three read and write locks protect each of the three mapping tables + // mentioned above CURVE_CACHELINE_ALIGNMENT RWLock rwlock4chunkInfoMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4ChunkInfo_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CopysetInfo_; - // chunkserverCopysetIDMap_存放当前chunkserver到copyset的映射 - // 当rpc closure设置SetChunkserverUnstable时,会设置该chunkserver - // 的所有copyset处于leaderMayChange状态,后续copyset需要判断该值来看 - // 是否需要刷新leader + // chunkserverCopysetIDMap_ stores the mapping of the current chunkserver to + // copysets. When an RPC closure sets SetChunkserverUnstable, it sets all + // the copysets of that chunkserver to the leaderMayChange state. Subsequent + // copyset operations will check this value to determine whether a leader + // refresh is needed. - // chunkserverid到copyset的映射 + // Mapping chunkserverid to copyset std::unordered_map> chunkserverCopysetIDMap_; // NOLINT - // 读写锁保护unStableCSMap + // Read write lock protection unstableCSMap CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CSCopysetIDMap_; - // 当前文件信息 + // Current file information FInfo fileInfo_; // epoch info diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index f283687f3c..4b17893a51 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -43,24 +43,25 @@ using curve::common::ReadLockGuard; using curve::common::SpinLock; using curve::common::WriteLockGuard; -// copyset内的chunkserver节点的基本信息 -// 包含当前chunkserver的id信息,以及chunkserver的地址信息 +// Basic information of chunkserver nodes in the copyset +// Contains the ID information of the current chunkserver and the address +// information of the chunkserver template struct CopysetPeerInfo { - // 当前chunkserver节点的ID + // The ID of the current chunkserver node T peerID = 0; - // 当前chunkserver节点的内部地址 + // The internal address of the current chunkserver node PeerAddr internalAddr; - // 当前chunkserver节点的外部地址 + // The external address of the current chunkserver node PeerAddr externalAddr; CopysetPeerInfo() = default; - CopysetPeerInfo(const T &cid, const PeerAddr &internal, - const PeerAddr &external) + CopysetPeerInfo(const T& cid, const PeerAddr& internal, + const PeerAddr& external) : peerID(cid), internalAddr(internal), externalAddr(external) {} - bool operator==(const CopysetPeerInfo &other) const { + bool operator==(const CopysetPeerInfo& other) const { return this->internalAddr == other.internalAddr && this->externalAddr == other.externalAddr; } @@ -72,7 +73,7 @@ struct CopysetPeerInfo { }; template -inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { +inline std::ostream& operator<<(std::ostream& os, const CopysetPeerInfo& c) { os << "peer id : " << c.peerID << ", internal address : " << c.internalAddr.ToString() << ", external address : " << c.externalAddr.ToString(); @@ -81,23 +82,25 @@ inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { } // copyset's informations including peer and leader information -template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { - // leader存在变更可能标志位 +template +struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { + // Possible flag bits for leader changes bool leaderMayChange_ = false; - // 当前copyset的节点信息 + // Node information of the current copyset std::vector> csinfos_; - // leader在本copyset信息中的索引,用于后面避免重复尝试同一个leader + // The index of the leader in this copyset information is used to avoid + // repeated attempts at the same leader in the future int16_t leaderindex_ = -1; - // 当前copyset的id信息 + // The ID information of the current copyset CopysetID cpid_ = 0; LogicPoolID lpid_ = 0; - // 用于保护对copyset信息的修改 + // Used to protect modifications to copyset information SpinLock spinlock_; CopysetInfo() = default; ~CopysetInfo() = default; - CopysetInfo &operator=(const CopysetInfo &other) { + CopysetInfo& operator=(const CopysetInfo& other) { this->cpid_ = other.cpid_; this->lpid_ = other.lpid_; this->csinfos_ = other.csinfos_; @@ -106,9 +109,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { return *this; } - CopysetInfo(const CopysetInfo &other) - : leaderMayChange_(other.leaderMayChange_), csinfos_(other.csinfos_), - leaderindex_(other.leaderindex_), cpid_(other.cpid_), + CopysetInfo(const CopysetInfo& other) + : leaderMayChange_(other.leaderMayChange_), + csinfos_(other.csinfos_), + leaderindex_(other.leaderindex_), + cpid_(other.cpid_), lpid_(other.lpid_) {} CopysetInfo(CopysetInfo&& other) noexcept @@ -142,11 +147,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 获取当前leader的索引 + * Get the index of the current leader */ int16_t GetCurrentLeaderIndex() const { return leaderindex_; } - bool GetCurrentLeaderID(T *id) const { + bool GetCurrentLeaderID(T* id) const { if (leaderindex_ >= 0) { if (static_cast(csinfos_.size()) < leaderindex_) { return false; @@ -160,10 +165,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 更新leaderindex,如果leader不在当前配置组中,则返回-1 - * @param: addr为新的leader的地址信息 + * Update the leaderindex, if the leader is not in the current configuration + * group, return -1 + * @param: addr is the address information of the new leader */ - int UpdateLeaderInfo(const PeerAddr &addr, + int UpdateLeaderInfo(const PeerAddr& addr, CopysetPeerInfo csInfo = CopysetPeerInfo()) { VLOG(3) << "update leader info, pool " << lpid_ << ", copyset " << cpid_ << ", current leader " << addr.ToString(); @@ -179,7 +185,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { tempindex++; } - // 新的addr不在当前copyset内,如果csInfo不为空,那么将其插入copyset + // The new addr is not within the current copyset. If csInfo is not + // empty, insert it into the copyset if (!exists && !csInfo.IsEmpty()) { csinfos_.push_back(csInfo); } else if (exists == false) { @@ -198,8 +205,10 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { * @param[out]: peer id * @param[out]: ep */ - int GetLeaderInfo(T *peerid, EndPoint *ep) { - // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader + int GetLeaderInfo(T* peerid, EndPoint* ep) { + // For the first time obtaining the leader, if the current leader + // information is not determined, return -1, and the external initiative + // will be initiated to update the leader if (leaderindex_ < 0 || leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ @@ -219,32 +228,32 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 添加copyset的peerinfo - * @param: csinfo为待添加的peer信息 + * Add peerinfo for copyset + * @param: csinfo is the peer information to be added */ - void AddCopysetPeerInfo(const CopysetPeerInfo &csinfo) { + void AddCopysetPeerInfo(const CopysetPeerInfo& csinfo) { spinlock_.Lock(); csinfos_.push_back(csinfo); spinlock_.UnLock(); } /** - * 当前CopysetInfo是否合法 + * Is the current CopysetInfo legal */ bool IsValid() const { return !csinfos_.empty(); } /** - * 更新leaderindex + * Update leaderindex */ void UpdateLeaderIndex(int index) { leaderindex_ = index; } /** - * 当前copyset是否存在对应的chunkserver address - * @param: addr需要检测的chunkserver - * @return: true存在;false不存在 + * Does the current copyset have a corresponding chunkserver address + * @param: addr Chunkserver to be detected + * @return: true exists; False does not exist */ - bool HasPeerInCopyset(const PeerAddr &addr) const { - for (const auto &peer : csinfos_) { + bool HasPeerInCopyset(const PeerAddr& addr) const { + for (const auto& peer : csinfos_) { if (peer.internalAddr == addr || peer.externalAddr == addr) { return true; } @@ -255,13 +264,13 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { }; template -inline std::ostream &operator<<(std::ostream &os, - const CopysetInfo ©set) { +inline std::ostream& operator<<(std::ostream& os, + const CopysetInfo& copyset) { os << "pool id : " << copyset.lpid_ << ", copyset id : " << copyset.cpid_ << ", leader index : " << copyset.leaderindex_ << ", leader may change : " << copyset.leaderMayChange_ << ", peers : "; - for (auto &p : copyset.csinfos_) { + for (auto& p : copyset.csinfos_) { os << p << " "; } @@ -276,13 +285,13 @@ struct CopysetIDInfo { : lpid(logicpoolid), cpid(copysetid) {} }; -inline bool operator<(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator<(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.lpid <= cpidinfo2.lpid && cpidinfo1.cpid < cpidinfo2.cpid; } -inline bool operator==(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator==(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.cpid == cpidinfo2.cpid && cpidinfo1.lpid == cpidinfo2.lpid; } @@ -290,9 +299,12 @@ class FileSegment { public: FileSegment(SegmentIndex segmentIndex, uint32_t segmentSize, uint32_t discardGranularity) - : segmentIndex_(segmentIndex), segmentSize_(segmentSize), - discardGranularity_(discardGranularity), rwlock_(), - discardBitmap_(segmentSize_ / discardGranularity_), chunks_() {} + : segmentIndex_(segmentIndex), + segmentSize_(segmentSize), + discardGranularity_(discardGranularity), + rwlock_(), + discardBitmap_(segmentSize_ / discardGranularity_), + chunks_() {} /** * @brief Confirm if all bit was discarded @@ -312,7 +324,7 @@ class FileSegment { * @brief Get internal bitmap for unit-test * @return Internal bitmap */ - Bitmap &GetBitmap() { return discardBitmap_; } + Bitmap& GetBitmap() { return discardBitmap_; } void SetBitmap(const uint64_t offset, const uint64_t length); void ClearBitmap(const uint64_t offset, const uint64_t length); @@ -370,14 +382,15 @@ inline void FileSegment::ClearBitmap(const uint64_t offset, enum class FileSegmentLockType { Read, Write }; -template class FileSegmentLockGuard { +template +class FileSegmentLockGuard { public: - explicit FileSegmentLockGuard(FileSegment *segment) : segment_(segment) { + explicit FileSegmentLockGuard(FileSegment* segment) : segment_(segment) { Lock(); } - FileSegmentLockGuard(const FileSegmentLockGuard &) = delete; - FileSegmentLockGuard &operator=(const FileSegmentLockGuard &) = delete; + FileSegmentLockGuard(const FileSegmentLockGuard&) = delete; + FileSegmentLockGuard& operator=(const FileSegmentLockGuard&) = delete; ~FileSegmentLockGuard() { UnLock(); } @@ -392,7 +405,7 @@ template class FileSegmentLockGuard { void UnLock() { segment_->ReleaseLock(); } private: - FileSegment *segment_; + FileSegment* segment_; }; using FileSegmentReadLockGuard = diff --git a/src/client/request_closure.h b/src/client/request_closure.h index 326f76e10b..753f16aea4 100644 --- a/src/client/request_closure.h +++ b/src/client/request_closure.h @@ -63,83 +63,60 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure /** * @brief Get error code */ - virtual int GetErrorCode() { - return errcode_; - } + virtual int GetErrorCode() { return errcode_; } /** * @brief Set error code, 0 means success */ - virtual void SetFailed(int errorCode) { - errcode_ = errorCode; - } + virtual void SetFailed(int errorCode) { errcode_ = errorCode; } /** - * @brief 获取当前closure属于哪个request + * @brief to obtain which request the current closure belongs to */ - virtual RequestContext* GetReqCtx() { - return reqCtx_; - } + virtual RequestContext* GetReqCtx() { return reqCtx_; } /** - * @brief 获取当前request属于哪个iotracker + * @brief: Obtain which iotracker the current request belongs to */ - virtual IOTracker* GetIOTracker() { - return tracker_; - } + virtual IOTracker* GetIOTracker() { return tracker_; } /** - * @brief 设置当前属于哪一个iotracker + * @brief Set which iotracker currently belongs to */ - void SetIOTracker(IOTracker* ioTracker) { - tracker_ = ioTracker; - } + void SetIOTracker(IOTracker* ioTracker) { tracker_ = ioTracker; } /** - * @brief 设置所属的iomanager + * @brief Set the iomanager to which it belongs */ - void SetIOManager(IOManager* ioManager) { - ioManager_ = ioManager; - } + void SetIOManager(IOManager* ioManager) { ioManager_ = ioManager; } /** - * @brief 设置当前closure重试次数 + * @brief Set the current closure retry count */ - void IncremRetriedTimes() { - retryTimes_++; - } + void IncremRetriedTimes() { retryTimes_++; } - uint64_t GetRetriedTimes() const { - return retryTimes_; - } + uint64_t GetRetriedTimes() const { return retryTimes_; } /** - * 设置metric + * Set metric */ - void SetFileMetric(FileMetric* fm) { - metric_ = fm; - } + void SetFileMetric(FileMetric* fm) { metric_ = fm; } /** - * 获取metric指针 + * Get metric pointer */ - FileMetric* GetMetric() const { - return metric_; - } + FileMetric* GetMetric() const { return metric_; } /** - * 获取下一次rpc超时时间, rpc超时时间实现了指数退避的策略 + * Obtain the next RPC timeout, which implements an exponential backoff + * strategy */ - uint64_t GetNextTimeoutMS() const { - return nextTimeoutMS_; - } + uint64_t GetNextTimeoutMS() const { return nextTimeoutMS_; } /** - * 设置下次重试超时时间 + * Set the next retry timeout time */ - void SetNextTimeOutMS(uint64_t timeout) { - nextTimeoutMS_ = timeout; - } + void SetNextTimeOutMS(uint64_t timeout) { nextTimeoutMS_ = timeout; } bool IsSlowRequest() const { return slowRequest_; } @@ -153,25 +130,25 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure // whether own inflight count bool ownInflight_ = false; - // 当前request的错误码 + // The error code of the current request int errcode_ = -1; - // 当前request的tracker信息 + // Tracker information for the current request IOTracker* tracker_ = nullptr; - // closure的request信息 + // Request information for closures RequestContext* reqCtx_ = nullptr; - // metric信息 + // metric Information FileMetric* metric_ = nullptr; - // 重试次数 + // Number of retries uint64_t retryTimes_ = 0; - // 当前closure属于的iomanager + // The iomanager to which the current closure belongs IOManager* ioManager_ = nullptr; - // 下一次rpc超时时间 + // Next RPC timeout uint64_t nextTimeoutMS_ = 0; // create time of this closure(in millisecond) diff --git a/src/client/request_context.h b/src/client/request_context.h index 0b7c9db649..76d2acf4c9 100644 --- a/src/client/request_context.h +++ b/src/client/request_context.h @@ -28,9 +28,9 @@ #include #include +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" #include "src/client/request_closure.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -73,13 +73,14 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { done_ = nullptr; } - // chunk的ID信息,sender在发送rpc的时候需要附带其ID信息 - ChunkIDInfo idinfo_; + // The ID information of the chunk, which the sender needs to include when + // sending rpc + ChunkIDInfo idinfo_; - // 用户IO被拆分之后,其小IO有自己的offset和length - off_t offset_ = 0; - OpType optype_ = OpType::UNKNOWN; - size_t rawlength_ = 0; + // After user IO is split, its small IO has its own offset and length + off_t offset_ = 0; + OpType optype_ = OpType::UNKNOWN; + size_t rawlength_ = 0; // user's single io request will split into several requests // subIoIndex_ is an index of serveral requests @@ -91,29 +92,31 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { // write data of current request butil::IOBuf writeData_; - // 因为RPC都是异步发送,因此在一个Request结束时,RPC回调调用当前的done - // 来告知当前的request结束了 - RequestClosure* done_ = nullptr; + // Because RPC is sent asynchronously, at the end of a request, the RPC + // callback calls the current done To inform you that the current request is + // over + RequestClosure* done_ = nullptr; // file id uint64_t fileId_; // file epoch uint64_t epoch_; - // request的版本信息 - uint64_t seq_ = 0; + // Version information of request + uint64_t seq_ = 0; - // 这个对应的GetChunkInfo的出参 - ChunkInfoDetail* chunkinfodetail_ = nullptr; + // The output parameter of this corresponding GetChunkInfo + ChunkInfoDetail* chunkinfodetail_ = nullptr; - // clone chunk请求需要携带源chunk的location及所需要创建的chunk的大小 - uint32_t chunksize_ = 0; - std::string location_; - RequestSourceInfo sourceInfo_; - // create clone chunk时候用于修改chunk的correctedSn - uint64_t correctedSeq_ = 0; + // The clone chunk request needs to carry the location of the source chunk + // and the size of the chunk that needs to be created + uint32_t chunksize_ = 0; + std::string location_; + RequestSourceInfo sourceInfo_; + // CorrectedSn used to modify a chunk when creating a clone chunk + uint64_t correctedSeq_ = 0; - // 当前request context id - uint64_t id_ = 0; + // Current request context id + uint64_t id_ = 0; static RequestContext* NewInitedRequestContext() { RequestContext* ctx = new (std::nothrow) RequestContext(); @@ -139,10 +142,8 @@ inline std::ostream& operator<<(std::ostream& os, os << "logicpool id = " << reqCtx.idinfo_.lpid_ << ", copyset id = " << reqCtx.idinfo_.cpid_ << ", chunk id = " << reqCtx.idinfo_.cid_ - << ", offset = " << reqCtx.offset_ - << ", length = " << reqCtx.rawlength_ - << ", sub-io index = " << reqCtx.subIoIndex_ - << ", sn = " << reqCtx.seq_ + << ", offset = " << reqCtx.offset_ << ", length = " << reqCtx.rawlength_ + << ", sub-io index = " << reqCtx.subIoIndex_ << ", sn = " << reqCtx.seq_ << ", source info = " << reqCtx.sourceInfo_; return os; diff --git a/src/client/request_scheduler.cpp b/src/client/request_scheduler.cpp index e723126235..939115e210 100644 --- a/src/client/request_scheduler.cpp +++ b/src/client/request_scheduler.cpp @@ -25,9 +25,9 @@ #include #include -#include "src/client/request_context.h" -#include "src/client/request_closure.h" #include "src/client/chunk_closure.h" +#include "src/client/request_closure.h" +#include "src/client/request_context.h" namespace curve { namespace client { @@ -35,8 +35,7 @@ namespace client { RequestScheduler::~RequestScheduler() {} int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache* metaCache, - FileMetric* fm) { + MetaCache* metaCache, FileMetric* fm) { blockIO_.store(false); reqschopt_ = reqSchdulerOpt; @@ -58,8 +57,7 @@ int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, } LOG(INFO) << "RequestScheduler conf info: " - << "scheduleQueueCapacity = " - << reqschopt_.scheduleQueueCapacity + << "scheduleQueueCapacity = " << reqschopt_.scheduleQueueCapacity << ", scheduleThreadpoolSize = " << reqschopt_.scheduleThreadpoolSize; return 0; @@ -77,7 +75,7 @@ int RequestScheduler::Fini() { if (running_.exchange(false, std::memory_order_acq_rel)) { for (int i = 0; i < threadPool_.NumOfThreads(); ++i) { // notify the wait thread - BBQItem stopReq(nullptr, true); + BBQItem stopReq(nullptr, true); queue_.PutBack(stopReq); } threadPool_.Stop(); @@ -89,7 +87,7 @@ int RequestScheduler::Fini() { int RequestScheduler::ScheduleRequest( const std::vector& requests) { if (running_.load(std::memory_order_acquire)) { - /* TODO(wudemiao): 后期考虑 qos */ + /* TODO(wudemiao): Consider QoS in the later stage */ for (auto it : requests) { // skip the fake request if (!it->idinfo_.chunkExist) { @@ -99,7 +97,7 @@ int RequestScheduler::ScheduleRequest( continue; } - BBQItem req(it); + BBQItem req(it); queue_.PutBack(req); } return 0; @@ -107,18 +105,18 @@ int RequestScheduler::ScheduleRequest( return -1; } -int RequestScheduler::ScheduleRequest(RequestContext *request) { +int RequestScheduler::ScheduleRequest(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutBack(req); return 0; } return -1; } -int RequestScheduler::ReSchedule(RequestContext *request) { +int RequestScheduler::ReSchedule(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutFront(req); return 0; } @@ -126,14 +124,17 @@ int RequestScheduler::ReSchedule(RequestContext *request) { } void RequestScheduler::WakeupBlockQueueAtExit() { - // 在scheduler退出的时候要把队列的内容清空, 通知copyset client - // 当前操作是退出状态,copyset client会针对inflight RPC做响应处理 - // 正常情况下队列内容一定会在Fini调用结束之后全部清空 - // 但是在session刷新失败的时候,scheduler无法继续下发 - // RPC请求,所以需要设置blockingQueue_标志,告知scheduler - // 把队列里内容统统扔到copyset client,因为在session - // 续约失败后copyset client会将IO全部失败返回,scheduler - // 模块不需要处理具体RPC请求,由copyset client负责。 + // When the scheduler exits, it is necessary to clear the contents of the + // queue and notify the copyset client The current operation is in the exit + // state, and the copyset client will respond to the inflight RPC Under + // normal circumstances, the queue content must be completely cleared after + // Fini calls are completed But when the session refresh fails, the + // scheduler cannot continue issuing RPC request, therefore blockingQueue + // needs to be set_ Sign to inform scheduler Throw all the content in the + // queue to the copyset client because in the session After the renewal + // fails, the copyset client will return all IO failures to the scheduler + // The module does not need to handle specific RPC requests, and is the + // responsibility of the copyset client. client_.ResetExitFlag(); blockingQueue_ = false; std::atomic_thread_fence(std::memory_order_acquire); @@ -151,8 +152,8 @@ void RequestScheduler::Process() { ProcessOne(req); } else { /** - * 一旦遇到stop item,所有线程都可以退出,因为此时 - * queue里面所有的request都被处理完了 + * Once a stop item is encountered, all threads can exit because at + * this point All requests in the queue have been processed */ stop_.store(true, std::memory_order_release); } @@ -172,8 +173,8 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { case OpType::WRITE: ctx->done_->GetInflightRPCToken(); client_.WriteChunk(ctx->idinfo_, ctx->fileId_, ctx->epoch_, - ctx->seq_, ctx->writeData_, - ctx->offset_, ctx->rawlength_, ctx->sourceInfo_, + ctx->seq_, ctx->writeData_, ctx->offset_, + ctx->rawlength_, ctx->sourceInfo_, guard.release()); break; case OpType::READ_SNAP: @@ -197,11 +198,12 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { guard.release()); break; default: - /* TODO(wudemiao) 后期整个链路错误发统一了在处理 */ + /* In the later stage of TODO(wudemiao), the entire link error was + * sent and processed uniformly */ ctx->done_->SetFailed(-1); LOG(ERROR) << "unknown op type: OpType::UNKNOWN"; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_scheduler.h b/src/client/request_scheduler.h index 752f72bcb0..f00ded5bc1 100644 --- a/src/client/request_scheduler.h +++ b/src/client/request_scheduler.h @@ -25,88 +25,86 @@ #include -#include "src/common/uncopyable.h" +#include "include/curve_compiler_specific.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" +#include "src/client/copyset_client.h" #include "src/common/concurrent/bounded_blocking_queue.h" #include "src/common/concurrent/thread_pool.h" -#include "src/client/client_common.h" -#include "src/client/copyset_client.h" -#include "include/curve_compiler_specific.h" +#include "src/common/uncopyable.h" namespace curve { namespace client { -using curve::common::ThreadPool; -using curve::common::BoundedBlockingDeque; using curve::common::BBQItem; +using curve::common::BoundedBlockingDeque; +using curve::common::ThreadPool; using curve::common::Uncopyable; struct RequestContext; /** - * 请求调度器,上层拆分的I/O会交给Scheduler的线程池 - * 分发到具体的ChunkServer,后期QoS也会放在这里处理 + * Request the scheduler, and the split I/O from the upper layer will be handed + * over to the scheduler's thread pool Distribute to specific ChunkServers, + * where QoS will also be handled in the future */ class RequestScheduler : public Uncopyable { public: RequestScheduler() - : running_(false), - stop_(true), - client_(), - blockingQueue_(true) {} + : running_(false), stop_(true), client_(), blockingQueue_(true) {} virtual ~RequestScheduler(); /** - * 初始化 - * @param: reqSchdulerOpt为scheduler的配置选项 - * @param: metacache为meta信息 - * @param: filematric为文件的metric信息 + * Initialize + * @param: reqSchdulerOpt is the configuration option for the scheduler + * @param: metacache is the meta information + * @param: filematric is the metric information of the file */ virtual int Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache *metaCache, - FileMetric* fileMetric = nullptr); + MetaCache* metaCache, FileMetric* fileMetric = nullptr); /** - * 启动Scheduler的线程池开始处理request - * 启动之后才能push request,除此之外,只有当 - * queue里面的任务都被处理完了,才会Scheduler - * 的 thread pool里面的所有线程都退出 - * @return 0成功,-1失败 + * Start the Scheduler's thread pool to begin processing requests. + * Requests can only be pushed after starting. Furthermore, only when + * all tasks in the queue have been processed will all threads in the + * Scheduler's thread pool exit. + * @return 0 for success, -1 for failure */ virtual int Run(); /** - * Stop Scheduler,一旦调用了Fini,那么 - * 此Scheduler不再接收新的request - * @return 0成功,-1失败 + * Stop Scheduler, once Fini is called, then + * This scheduler no longer receives new requests + * @return 0 succeeded, -1 failed */ virtual int Fini(); /** - * 将request push到Scheduler处理 - * @param requests:请求列表 - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param requests: Request List + * @return 0 succeeded, -1 failed */ virtual int ScheduleRequest(const std::vector& requests); /** - * 将request push到Scheduler处理 - * @param request:一个request - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param request: A request + * @return 0 succeeded, -1 failed */ - virtual int ScheduleRequest(RequestContext *request); + virtual int ScheduleRequest(RequestContext* request); /** - * 对于需要重新入队的RPC将其放在头部 + * For RPC that need to be re queued, place them at the top */ - virtual int ReSchedule(RequestContext *request); + virtual int ReSchedule(RequestContext* request); /** - * 关闭scheduler之前如果队列在sessionnotvalid睡眠就将其唤醒 + * Before closing the scheduler, if the queue is in sessionnotvalid, wake it + * up */ virtual void WakeupBlockQueueAtExit(); /** - * 当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 后续的IO调度会被阻塞 + * When LeaseExecutor renewal fails, call LeaseTimeoutDisableIO + * Subsequent IO scheduling will be blocked */ void LeaseTimeoutBlockIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -115,8 +113,8 @@ class RequestScheduler : public Uncopyable { } /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO, - * IO调度被恢复 + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO, IO scheduling restored */ void ResumeIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -126,7 +124,7 @@ class RequestScheduler : public Uncopyable { } /** - * 测试使用,获取队列 + * For testing purposes, get the queue. */ BoundedBlockingDeque>* GetQueue() { return &queue_; @@ -134,14 +132,16 @@ class RequestScheduler : public Uncopyable { private: /** - * Thread pool的运行函数,会从queue中取request进行处理 + * The run function of the Thread pool will retrieve the request from the + * queue for processing */ void Process(); void ProcessOne(RequestContext* ctx); void WaitValidSession() { - // lease续约失败的时候需要阻塞IO直到续约成功 + // When the lease renewal fails, IO needs to be blocked until the + // renewal is successful if (blockIO_.load(std::memory_order_acquire) && blockingQueue_) { std::unique_lock lk(leaseRefreshmtx_); leaseRefreshcv_.wait(lk, [&]() -> bool { @@ -151,32 +151,34 @@ class RequestScheduler : public Uncopyable { } private: - // 线程池和queue容量的配置参数 + // Configuration parameters for thread pool and queue capacity RequestScheduleOption reqschopt_; - // 存放 request 的队列 - BoundedBlockingDeque> queue_; - // 处理 request 的线程池 + // Queue for storing request + BoundedBlockingDeque> queue_; + // Thread pool for processing request ThreadPool threadPool_; - // Scheduler 运行标记,只有运行了,才接收 request + // The running flag of the Scheduler, it only accepts requests when it's + // running std::atomic running_; - // stop thread pool 标记,当调用 Scheduler Fini - // 之后且 queue 里面的 request 都处理完了,就可以 - // 让所有处理线程退出了 + // stop thread pool flag, when calling Scheduler Fini + // After processing all the requests in the queue, you can proceed + // Let all processing threads exit std::atomic stop_; - // 访问复制组Chunk的客户端 + // Client accessing replication group Chunk CopysetClient client_; - // 续约失败,卡住IO + // Renewal failed, IO stuck std::atomic blockIO_; - // 此锁与LeaseRefreshcv_条件变量配合使用 - // 在leasee续约失败的时候,所有新下发的IO被阻塞直到续约成功 - std::mutex leaseRefreshmtx_; - // 条件变量,用于唤醒和hang IO + // This lock is associated with LeaseRefreshcv_ Using Conditional Variables + // Together When lease renewal fails, all newly issued IO is blocked until + // the renewal is successful + std::mutex leaseRefreshmtx_; + // Conditional variables for wake-up and hang IO std::condition_variable leaseRefreshcv_; - // 阻塞队列 + // Blocking queue bool blockingQueue_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SCHEDULER_H_ diff --git a/src/client/request_sender.h b/src/client/request_sender.h index f288160267..99bc94b2e3 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -29,23 +29,22 @@ #include -#include "src/client/client_config.h" -#include "src/client/client_common.h" -#include "src/client/chunk_closure.h" #include "include/curve_compiler_specific.h" +#include "src/client/chunk_closure.h" +#include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/request_context.h" namespace curve { namespace client { /** - * 一个RequestSender负责管理一个ChunkServer的所有 - * connection,目前一个ChunkServer仅有一个connection + * A RequestSender is responsible for managing all aspects of a ChunkServer + * Connection, currently there is only one connection for a ChunkServer */ class RequestSender { public: - RequestSender(ChunkServerID chunkServerId, - butil::EndPoint serverEndPoint) + RequestSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint) : chunkServerId_(chunkServerId), serverEndPoint_(serverEndPoint), channel_() {} @@ -54,125 +53,111 @@ class RequestSender { int Init(const IOSenderOption& ioSenderOpt); /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); + int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + ClientClosure* done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param data 要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); + * Write Chunk + * @param IDInfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param data The data to be written + * @param offset: write offset + * @param length: The length written + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf& data, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + ClientClosure* done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + * Reading Chunk snapshot files + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - ClientClosure *done); + int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, ClientClosure* done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:chunk需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param IDInfo is the ID information related to chunk + * @param correctedSn: Chunk The version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - ClientClosure *done); + uint64_t correctedSn, + ClientClosure* done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param retriedTimes:已经重试了几次 + * Obtain information about chunk files + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param retriedTimes: Number of retries */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - ClientClosure *done); + int GetChunkInfo(const ChunkIDInfo& idinfo, ClientClosure* done); /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - ClientClosure *done, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:offset 偏移 - * @param:len 长度 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - ClientClosure* done, uint64_t offset, uint64_t len); + * @brief lazy Create clone chunk + * @detail + * - The format definition of a location is A@B The form of. + * - If the source data is on s3, the location format is uri@s3 Uri is the + * address of the actual chunk object; + * - If the source data is on curves, the location format + * is/filename/chunkindex@cs + * + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: location, URL of the data source + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: chunkSize Chunk size + * @param retriedTimes: Number of retries + * + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo& idinfo, ClientClosure* done, + const std::string& location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize); + + /** + * @brief Actual recovery chunk data + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: offset: offset + * @param: len: length + * @param retriedTimes: Number of retries + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& idinfo, ClientClosure* done, + uint64_t offset, uint64_t len); /** - * 重置和Chunk Server的链接 - * @param chunkServerId:Chunk Server唯一标识 - * @param serverEndPoint:Chunk Server - * @return 0成功,-1失败 + * Reset Link to Chunk Server + * @param chunkServerId: Chunk Server unique identifier + * @param serverEndPoint: Chunk Server + * @return 0 succeeded, -1 failed */ int ResetSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint); - bool IsSocketHealth() { - return channel_.CheckHealth() == 0; - } + bool IsSocketHealth() { return channel_.CheckHealth() == 0; } private: void UpdateRpcRPS(ClientClosure* done, OpType type) const; @@ -181,16 +166,17 @@ class RequestSender { google::protobuf::Message* rpcResponse) const; private: - // Rpc stub配置 + // Rpc stub configuration IOSenderOption iosenderopt_; - // ChunkServer 的唯一标识 id + // The unique identification ID of ChunkServer ChunkServerID chunkServerId_; - // ChunkServer 的地址 + // Address of ChunkServer butil::EndPoint serverEndPoint_; - brpc::Channel channel_; /* TODO(wudemiao): 后期会维护多个 channel */ + brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be + maintained in the later stage */ }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SENDER_H_ diff --git a/src/client/request_sender_manager.cpp b/src/client/request_sender_manager.cpp index a5c77a793f..a5f7264e4b 100644 --- a/src/client/request_sender_manager.cpp +++ b/src/client/request_sender_manager.cpp @@ -30,8 +30,7 @@ namespace curve { namespace client { RequestSenderManager::SenderPtr RequestSenderManager::GetOrCreateSender( - const ChunkServerID& leaderId, - const butil::EndPoint& leaderAddr, + const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt) { { curve::common::ReadLockGuard guard(rwlock_); @@ -66,7 +65,7 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { return; } - // 检查是否健康 + // Check for health if (iter->second->IsSocketHealth()) { return; } @@ -74,5 +73,5 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { senderPool_.erase(iter); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_sender_manager.h b/src/client/request_sender_manager.h index 530d8c1c82..189fab3cc8 100644 --- a/src/client/request_sender_manager.h +++ b/src/client/request_sender_manager.h @@ -38,8 +38,8 @@ using curve::common::Uncopyable; class RequestSender; /** - * 所有Chunk Server的request sender管理者, - * 可以理解为Chunk Server的链接管理者 + * Request sender managers for all Chunk Servers, + * It can be understood as the link manager of Chunk Server */ class RequestSenderManager : public Uncopyable { public: @@ -47,30 +47,31 @@ class RequestSenderManager : public Uncopyable { RequestSenderManager() : rwlock_(), senderPool_() {} /** - * 获取指定leader id的sender,如果没有则根据leader - * 地址,创建新的 sender并返回 - * @param leaderId:leader的id - * @param leaderAddr:leader的地址 - * @return nullptr:get或者create失败,否则成功 + * Obtain the sender with the specified leader ID, if not, based on the + * leader Address, create a new sender and return + * @param leaderId: The ID of the leader + * @param leaderAddr: The address of the leader + * @return nullptr: Get or create failed, otherwise successful */ SenderPtr GetOrCreateSender(const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId); private: - // 读写锁,保护senderPool_ + // Read write lock to protect senderPool_ curve::common::BthreadRWLock rwlock_; - // 请求发送链接的map,以ChunkServer ID为key + // Request to send a map for the link, with ChunkServer ID as the key std::unordered_map senderPool_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SENDER_MANAGER_H_ diff --git a/src/client/service_helper.cpp b/src/client/service_helper.cpp index 70a7be6e34..3c8fbee5da 100644 --- a/src/client/service_helper.cpp +++ b/src/client/service_helper.cpp @@ -28,6 +28,7 @@ #include #include #include + #include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/common/curve_define.h" @@ -164,6 +165,7 @@ void ServiceHelper::ProtoCloneSourceInfo2Local( class GetLeaderProxy : public std::enable_shared_from_this { friend struct GetLeaderClosure; + public: GetLeaderProxy() : proxyId_(getLeaderProxyId.fetch_add(1, std::memory_order_relaxed)), @@ -171,10 +173,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_(false) {} /** - * @brief 等待GetLeader返回结果 - * @param[out] leaderId leader的id - * @param[out] leaderAddr leader的ip地址 - * @return 0 成功 / -1 失败 + * @brief waiting for GetLeader to return results + * @param[out] leaderId The ID of the leader + * @param[out] leaderAddr The IP address of the leader + * @return 0 successful/-1 failed */ int Wait(ChunkServerID* leaderId, PeerAddr* leaderAddr) { { @@ -212,11 +214,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 发起GetLeader请求 - * @param peerAddresses 除当前leader以外的peer地址 - * @param logicPoolId getleader请求的logicpool id - * @param copysetId getleader请求的copyset id - * @param fileMetric metric统计 + * @brief initiates GetLeader request + * @param peerAddresses Peer addresses other than the current leader + * @param logicPoolId getleader requested logicpool ID + * @param copysetId getleader requested copyset id + * @param fileMetric metric statistics */ void StartGetLeader(const std::unordered_set& peerAddresses, const GetLeaderRpcOption& rpcOption, @@ -270,10 +272,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 处理异步请求结果 - * @param callId rpc请求id - * @param success rpc请求是否成功 - * @param peer rpc请求返回的leader信息 + * @brief processing asynchronous request results + * @param callId rpc request id + * @param success rpc request successful + * @param peer The leader information returned by the rpc request */ void HandleResponse(brpc::CallId callId, bool success, const curve::common::Peer& peer) { @@ -289,7 +291,8 @@ class GetLeaderProxy : public std::enable_shared_from_this { continue; } - // cancel以后,后续的rpc请求回调仍然会执行,但是会标记为失败 + // After canceling, subsequent rpc request callbacks will still + // be executed, but will be marked as failed brpc::StartCancel(id); } @@ -301,10 +304,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_ = true; finishCv_.notify_one(); } else { - // 删除当前call id + // Delete the current call id callIds_.erase(callId); - // 如果为空,说明是最后一个rpc返回,需要标记请求失败,并向上返回 + // If it is empty, it indicates that it is the last rpc returned, + // and the request needs to be marked as failed and returned upwards if (callIds_.empty()) { std::lock_guard ulk(finishMtx_); finish_ = true; @@ -317,24 +321,25 @@ class GetLeaderProxy : public std::enable_shared_from_this { private: uint64_t proxyId_; - // 是否完成请求 - // 1. 其中一个请求成功 - // 2. 最后一个请求返回 - // 都会标记为true + // Whether to complete the request + // 1. One of the requests was successful + // 2. Last request returned + // Will be marked as true bool finish_; bthread::ConditionVariable finishCv_; bthread::Mutex finishMtx_; - // 记录cntl id + // Record cntl id std::set callIds_; - // 请求是否成功 + // Is the request successful bool success_; - // leader信息 + // leader Information curve::common::Peer leader_; - // 保护callIds_/success_,避免异步rpc回调同时操作 + // Protect callIds_/success_, Avoiding asynchronous rpc callbacks from + // operating simultaneously bthread::Mutex mtx_; LogicPoolID logicPooldId_; @@ -367,17 +372,16 @@ void GetLeaderClosure::Run() { } else { success = true; LOG(INFO) << "GetLeader returned from " << cntl.remote_side() - << ", logicpool id = " << logicPoolId - << ", copyset id = " << copysetId - << ", proxy id = " << proxy->proxyId_ - << ", leader = " << response.DebugString(); + << ", logicpool id = " << logicPoolId + << ", copyset id = " << copysetId + << ", proxy id = " << proxy->proxyId_ + << ", leader = " << response.DebugString(); } proxy->HandleResponse(cntl.call_id(), success, response.leader()); } int ServiceHelper::GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr* leaderAddr, - ChunkServerID* leaderId, + PeerAddr* leaderAddr, ChunkServerID* leaderId, FileMetric* fileMetric) { const auto& peerInfo = getLeaderInfo.copysetPeerInfo; @@ -423,8 +427,8 @@ bool ServiceHelper::GetUserInfoFromFilename(const std::string& filename, return true; } -int ServiceHelper::CheckChunkServerHealth( - const butil::EndPoint& endPoint, int32_t requestTimeoutMs) { +int ServiceHelper::CheckChunkServerHealth(const butil::EndPoint& endPoint, + int32_t requestTimeoutMs) { brpc::Controller cntl; brpc::Channel httpChannel; brpc::ChannelOptions options; @@ -437,22 +441,22 @@ int ServiceHelper::CheckChunkServerHealth( return -1; } - // 访问 ip:port/health + // Accessing ip:port/health cntl.http_request().uri() = ipPort + "/health"; cntl.set_timeout_ms(requestTimeoutMs); httpChannel.CallMethod(nullptr, &cntl, nullptr, nullptr, nullptr); if (cntl.Failed()) { LOG(WARNING) << "CheckChunkServerHealth failed, " << cntl.ErrorText() - << ", url = " << cntl.http_request().uri(); + << ", url = " << cntl.http_request().uri(); return -1; } else { LOG(INFO) << "CheckChunkServerHealth success, " - << cntl.response_attachment() - << ", url = " << cntl.http_request().uri(); + << cntl.response_attachment() + << ", url = " << cntl.http_request().uri(); return 0; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/service_helper.h b/src/client/service_helper.h index 279c6a17f5..4de48afbf3 100644 --- a/src/client/service_helper.h +++ b/src/client/service_helper.h @@ -25,12 +25,13 @@ #include #include - #include -#include + +#include #include #include -#include +#include + #include "proto/cli2.pb.h" #include "proto/nameserver2.pb.h" #include "src/client/client_common.h" @@ -40,7 +41,7 @@ namespace curve { namespace client { -// GetLeader请求rpc参数信息 +// GetLeader request rpc parameter information struct GetLeaderRpcOption { uint32_t rpcTimeoutMs; @@ -48,29 +49,30 @@ struct GetLeaderRpcOption { : rpcTimeoutMs(rpcTimeoutMs) {} }; -// GetLeader请求对应的copyset信息及rpc相关参数信息 +// The copyset information and rpc related parameter information corresponding +// to the GetLeader request struct GetLeaderInfo { LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; std::vector> copysetPeerInfo; - int16_t currentLeaderIndex; + int16_t currentLeaderIndex; GetLeaderRpcOption rpcOption; - GetLeaderInfo(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - const std::vector>& copysetPeerInfo, //NOLINT + GetLeaderInfo(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const std::vector>& + copysetPeerInfo, // NOLINT int16_t currentLeaderIndex, const GetLeaderRpcOption& rpcOption = GetLeaderRpcOption()) - : logicPoolId(logicPoolId), - copysetId(copysetId), - copysetPeerInfo(copysetPeerInfo), - currentLeaderIndex(currentLeaderIndex), - rpcOption(rpcOption) {} + : logicPoolId(logicPoolId), + copysetId(copysetId), + copysetPeerInfo(copysetPeerInfo), + currentLeaderIndex(currentLeaderIndex), + rpcOption(rpcOption) {} }; class GetLeaderProxy; -// GetLeader异步请求回调 +// GetLeader asynchronous request callback struct GetLeaderClosure : public google::protobuf::Closure { GetLeaderClosure(LogicPoolID logicPoolId, CopysetID copysetId, std::shared_ptr proxy) @@ -86,7 +88,7 @@ struct GetLeaderClosure : public google::protobuf::Closure { curve::chunkserver::GetLeaderResponse2 response; }; -// ServiceHelper是client端RPC服务的一些工具 +// ServiceHelper is a tool for client-side RPC services class ServiceHelper { public: /** @@ -103,38 +105,41 @@ class ServiceHelper { CloneSourceInfo* info); /** - * 从chunkserver端获取最新的leader信息 - * @param[in]: getLeaderInfo为对应copyset的信息 - * @param[out]: leaderAddr是出参,返回当前copyset的leader信息 - * @param[out]: leaderId是出参,返回当前leader的id信息 - * @param[in]: fileMetric是用于metric的记录 - * @return: 成功返回0,否则返回-1 + * Obtain the latest leader information from the chunkserver side + * @param[in]: getLeaderInfo is the information of the corresponding copyset + * @param[out]: leaderAddr is the output parameter that returns the leader + * information of the current copyset + * @param[out]: leaderId is the output parameter, returning the ID + * information of the current leader + * @param[in]: fileMetric is a record used for metric + * @return: Successfully returns 0, otherwise returns -1 */ static int GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr *leaderAddr, + PeerAddr* leaderAddr, ChunkServerID* leaderId = nullptr, FileMetric* fileMetric = nullptr); /** - * 从文件名中获取user信息. - * 用户的user信息需要夹在文件名中,比如文件名为temp,用户名为user, - * 那么其完整的文件信息是:temp_user_。 - * 如果文件名为: /temp_temp_,那么完整文件名为/temp_temp__user_。 - * @param[in]: filename为用户传下来的文件名 - * @param[out]:realfilename是真正文件名 - * @param[out]: user信息,出参 - * @return: 获取到user信息为true,否则false + * Obtain user information from the file name + * The user information needs to be included in the file name, such as the + * file name being temp and the username being user, So the complete file + * information is: temp_user_. If the file name is: /temp_temp_, So the + * complete file name is /temp_temp__user_. + * @param[in]: filename is the file name passed down by the user + * @param[out]: realfilename is the true file name + * @param[out]: user information, output parameters + * @return: Obtained user information as true, otherwise false */ static bool GetUserInfoFromFilename(const std::string& fname, std::string* realfilename, std::string* user); /** - * @brief: 发送http请求,判断chunkserver是否健康 + * @brief: Send an HTTP request to determine if the chunkserver is healthy * - * @param: endPoint chunkserver的ip:port - * @param: http请求的超时时间 + * @param: endPoint chunkserver's ip:port + * @param: HTTP request timeout * - * @return: 0 表示健康,-1表示不健康 + * @return: 0 indicates health, -1 indicates unhealthy */ static int CheckChunkServerHealth(const butil::EndPoint& endPoint, int32_t requestTimeoutMs); @@ -147,6 +152,6 @@ class ServiceHelper { common::ReadWriteThrottleParams* localParams); }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SERVICE_HELPER_H_ diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..7e923cb1ea 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,53 +46,51 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: FileEpoch_t file epoch information */ - static int IO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch); + static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi, + const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: cid is the ID information of the current chunk + * @param: data is the data to be written + * @param: offset is the offset within the current chunk + * @param: length Data length + * @param: seq is the version number of the current chunk */ - static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - const ChunkIDInfo& cid, - butil::IOBuf* data, - off_t offset, - size_t length, - uint64_t seq); + static int SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, const ChunkIDInfo& cid, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief calculates the location information of the request + * @param ioTracker io Context Information + * @param metaCache file cache information + * @param chunkIdx Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,34 +103,33 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting + * operations + * @param: iotracker Big IO Context Information + * @param: mc is the cache information that needs to be used during IO + * splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: Data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: chunkidx is the index value of the current chunk in the vdisk */ - static bool AssignInternal(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - uint64_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch, - ChunkIndex chunkidx); - - static bool GetOrAllocateSegment(bool allocateIfNotExist, - uint64_t offset, - MDSClient* mdsClient, - MetaCache* metaCache, + static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, + uint64_t length, MDSClient* mdsclient, + const FInfo_t* fi, const FileEpoch_t* fEpoch, + ChunkIndex chunkidx); + + static bool GetOrAllocateSegment(bool allocateIfNotExist, uint64_t offset, + MDSClient* mdsClient, MetaCache* metaCache, const FInfo* fileInfo, - const FileEpoch_t *fEpoch, + const FileEpoch_t* fEpoch, ChunkIndex chunkidx); static int SplitForNormal(IOTracker* iotracker, MetaCache* metaCache, @@ -149,14 +146,13 @@ class Splitor { static bool MarkDiscardBitmap(IOTracker* iotracker, FileSegment* fileSegment, - SegmentIndex segmentIndex, - uint64_t offset, + SegmentIndex segmentIndex, uint64_t offset, uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SPLITOR_H_ diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..ae330b1294 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,13 +24,13 @@ namespace curve { namespace client { -UnstableState -UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, - const butil::EndPoint &csEndPoint) { +UnstableState UnstableHelper::GetCurrentUnstableState( + ChunkServerID csId, const butil::EndPoint& csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return + // chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..47c9be6a25 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -35,20 +35,17 @@ namespace curve { namespace client { -enum class UnstableState { - NoUnstable, - ChunkServerUnstable, - ServerUnstable -}; - -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +enum class UnstableState { NoUnstable, ChunkServerUnstable, ServerUnstable }; + +// If the chunkserver goes down or the network is unreachable, the rpc sent to +// the corresponding chunkserver will time out After returning, go back to the +// refresh leader and then send the request In this case, requests on different +// copysets will always first rpc timeout and then refresh the leader again To +// avoid a redundant rpc timeout Record the number of timeout requests sent to +// the same chunkserver If the threshold is exceeded, an HTTP request will be +// sent to check if the chunkserver is healthy If not healthy, notify all +// leaders of the copyset on this chunkserver Actively refresh the leader +// instead of directly sending rpc based on cached leader information class UnstableHelper { public: UnstableHelper() = default; @@ -56,9 +53,7 @@ class UnstableHelper { UnstableHelper(const UnstableHelper&) = delete; UnstableHelper& operator=(const UnstableHelper&) = delete; - void Init(const ChunkServerUnstableOption& opt) { - option_ = opt; - } + void Init(const ChunkServerUnstableOption& opt) { option_ = opt; } void IncreTimeout(ChunkServerID csId) { std::unique_lock guard(mtx_); @@ -78,10 +73,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint, ip:port address of endPoint chunkserver + * @return: true healthy/false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +87,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..f52560379a 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,31 +30,30 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, - const std::string& owner); + const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); private: - static int HMacSha256(const void* key, int key_size, - const void* data, int data_size, - void* digest); + static int HMacSha256(const void* key, int key_size, const void* data, + int data_size, void* digest); - static std::string Base64(const unsigned char *src, size_t sz); + static std::string Base64(const unsigned char* src, size_t sz); }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_AUTHENTICATOR_H_ diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..50d33181d9 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -20,20 +20,22 @@ * Author: yangyaokai */ +#include "src/common/bitmap.h" + #include #include -#include + #include -#include "src/common/bitmap.h" +#include namespace curve { namespace common { -std::string BitRangeVecToString(const std::vector &ranges) { +std::string BitRangeVecToString(const std::vector& ranges) { std::stringstream ss; for (uint32_t i = 0; i < ranges.size(); ++i) { if (i != 0) { - ss << ", "; + ss << ", "; } ss << "(" << ranges[i].beginIndex << "," << ranges[i].endIndex << ")"; } @@ -44,14 +46,14 @@ const uint32_t Bitmap::NO_POS = 0xFFFFFFFF; Bitmap::Bitmap(uint32_t bits) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memset(bitmap_, 0, count); } Bitmap::Bitmap(uint32_t bits, const char* bitmap) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -64,7 +66,7 @@ Bitmap::Bitmap(uint32_t bits, char* bitmap, bool transfer) : bits_(bits) { int count = unitCount(); if (!transfer) { - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -87,18 +89,17 @@ Bitmap::~Bitmap() { Bitmap::Bitmap(const Bitmap& bitmap) { bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); } -Bitmap& Bitmap::operator = (const Bitmap& bitmap) { - if (this == &bitmap) - return *this; +Bitmap& Bitmap::operator=(const Bitmap& bitmap) { + if (this == &bitmap) return *this; delete[] bitmap_; bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); return *this; @@ -118,23 +119,19 @@ Bitmap& Bitmap::operator=(Bitmap&& other) noexcept { return *this; } -bool Bitmap::operator == (const Bitmap& bitmap) const { - if (bits_ != bitmap.Size()) - return false; +bool Bitmap::operator==(const Bitmap& bitmap) const { + if (bits_ != bitmap.Size()) return false; return 0 == memcmp(bitmap_, bitmap.GetBitmap(), unitCount()); } -bool Bitmap::operator != (const Bitmap& bitmap) const { +bool Bitmap::operator!=(const Bitmap& bitmap) const { return !(*this == bitmap); } -void Bitmap::Set() { - memset(bitmap_, 0xff, unitCount()); -} +void Bitmap::Set() { memset(bitmap_, 0xff, unitCount()); } void Bitmap::Set(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] |= mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] |= mask(index); } void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { @@ -144,13 +141,10 @@ void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { } } -void Bitmap::Clear() { - memset(bitmap_, 0, unitCount()); -} +void Bitmap::Clear() { memset(bitmap_, 0, unitCount()); } void Bitmap::Clear(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] &= ~mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] &= ~mask(index); } void Bitmap::Clear(uint32_t startIndex, uint32_t endIndex) { @@ -169,106 +163,93 @@ bool Bitmap::Test(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t index) const { for (; index < bits_; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t index) const { for (; index < bits_; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } -void Bitmap::Divide(uint32_t startIndex, - uint32_t endIndex, +void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex - if (endIndex < startIndex) - return; + // The value of endIndex cannot be less than startIndex + if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; - if (endIndex > lastIndex) - endIndex = lastIndex; + if (endIndex > lastIndex) endIndex = lastIndex; BitRange clearRange; BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there + // is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range - setRange.endIndex = nextClearIndex == NO_POS - ? endIndex - : nextClearIndex - 1; + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range + setRange.endIndex = + nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; tmpSetRanges.push_back(setRange); } - if (nextClearIndex == NO_POS) - break; + if (nextClearIndex == NO_POS) break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear + // range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; - clearRange.endIndex = nextSetIndex == NO_POS - ? endIndex - : nextSetIndex - 1; + clearRange.endIndex = + nextSetIndex == NO_POS ? endIndex : nextSetIndex - 1; tmpClearRanges.push_back(clearRange); startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers + // in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } @@ -277,13 +258,9 @@ void Bitmap::Divide(uint32_t startIndex, } } -uint32_t Bitmap::Size() const { - return bits_; -} +uint32_t Bitmap::Size() const { return bits_; } -const char* Bitmap::GetBitmap() const { - return bitmap_; -} +const char* Bitmap::GetBitmap() const { return bitmap_; } } // namespace common } // namespace curve diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..f4b6f76ce7 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -24,8 +24,9 @@ #define SRC_COMMON_BITMAP_H_ #include -#include + #include +#include namespace curve { namespace common { @@ -36,30 +37,30 @@ const int BITMAP_UNIT_SIZE = 8; const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE /** - * 表示bitmap中的一段连续区域,为闭区间 + * Represents a continuous region in a bitmap, which is a closed interval */ struct BitRange { - // 连续区域起始位置在bitmap中的索引 + // Index of the starting position of a continuous region in Bitmap uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 + // Index of the end position of a continuous region in Bitmap uint32_t endIndex; }; - -std::string BitRangeVecToString(const std::vector &ranges); +std::string BitRangeVecToString(const std::vector& ranges); class Bitmap { public: /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap */ explicit Bitmap(uint32_t bits); /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap + * Constructor when initializing from an existing snapshot file + *The constructor will create a new bitmap internally, and then use the + *bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization */ explicit Bitmap(uint32_t bits, const char* bitmap); @@ -70,142 +71,158 @@ class Bitmap { ~Bitmap(); /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 + * Copy construction, using deep copy + * @param bitmap: Copy content from this object */ Bitmap(const Bitmap& bitmap); /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 + *Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference */ - Bitmap& operator = (const Bitmap& bitmap); + Bitmap& operator=(const Bitmap& bitmap); Bitmap(Bitmap&& other) noexcept; Bitmap& operator=(Bitmap&& other) noexcept; /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different */ - bool operator == (const Bitmap& bitmap) const; + bool operator==(const Bitmap& bitmap) const; /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same */ - bool operator != (const Bitmap& bitmap) const; + bool operator!=(const Bitmap& bitmap) const; /** - * 将所有位置1 + * Place all positions 1 */ void Set(); /** - * 将指定位置1 - * @param index: 指定位的位置 + * Specify position 1 + * @param index: Refers to the location of the positioning */ void Set(uint32_t index); /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Set(uint32_t startIndex, uint32_t endIndex); /** - * 将所有位置0 + * Move all positions to 0 */ void Clear(); /** - * 将指定位置0 - * @param index: 指定位的位置 + * Will specify position 0 + * @param index: Refers to the location of the positioning */ void Clear(uint32_t index); /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Clear(uint32_t startIndex, uint32_t endIndex); /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false + * indicates that it is 0 */ bool Test(uint32_t index) const; /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 1 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 1. If it does not exist, + * return NO_POS */ uint32_t NextSetBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 0 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 0. If it does not exist, + * return NO_POS */ uint32_t NextClearBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr - */ - void Divide(uint32_t startIndex, - uint32_t endIndex, + * Divide the designated area of the bitmap into several continuous areas + * based on bit states, with consistent bit states within the continuous + * areas For example, 00011100 will be divided into three regions: [0,2], + * [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit + * state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit + * state of 1, which can be specified as nullptr + */ + void Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const; /** - * bitmap的有效位数 - * @return: 返回位数 + * Bitmap's significant digits + * @return: Returns the number of digits */ uint32_t Size() const; /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap */ const char* GetBitmap() const; private: - // bitmap的字节数 + // Bytes of bitmap int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; } - // 指定位置的bit在其所在字节中的偏移 + // The offset of the bit at the specified position in its byte int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE + // Same as index / BITMAP_UNIT_SIZE return index >> ALIGN_FACTOR; } - // 逻辑计算掩码值 + // Logical calculation mask value char mask(uint32_t index) const { - int indexInUnit = index % BITMAP_UNIT_SIZE; + int indexInUnit = index % BITMAP_UNIT_SIZE; char mask = 0x01 << indexInUnit; return mask; } public: - // 表示不存在的位置,值为0xffffffff + // Represents a non-existent position, with a value of 0xffffffff static const uint32_t NO_POS; private: - uint32_t bits_; - char* bitmap_; + uint32_t bits_; + char* bitmap_; }; } // namespace common diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..fb549023e9 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -24,9 +24,10 @@ #define SRC_COMMON_CHANNEL_POOL_H_ #include -#include -#include + #include +#include +#include #include #include "src/common/concurrent/concurrent.h" @@ -39,18 +40,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the + * specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int GetOrInitChannel(const std::string& addr, - ChannelPtr* channelPtr); + int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); @@ -62,5 +63,4 @@ class ChannelPool { } // namespace common } // namespace curve -#endif // SRC_COMMON_CHANNEL_POOL_H_ - +#endif // SRC_COMMON_CHANNEL_POOL_H_ diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..7d8449d812 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -23,12 +23,12 @@ #ifndef SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ #define SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ +#include #include +#include //NOLINT #include -#include //NOLINT #include -#include //NOLINT -#include +#include //NOLINT #include #include "src/common/uncopyable.h" @@ -36,18 +36,17 @@ namespace curve { namespace common { -template +template class BBQItem { public: - explicit BBQItem(const T &t, bool stop = false) - : item_(t) { + explicit BBQItem(const T& t, bool stop = false) : item_(t) { stop_.store(stop, std::memory_order_release); } - BBQItem(const BBQItem &bbqItem) { + BBQItem(const BBQItem& bbqItem) { item_ = bbqItem.item_; stop_.store(bbqItem.stop_, std::memory_order_release); } - BBQItem &operator=(const BBQItem &bbqItem) { + BBQItem& operator=(const BBQItem& bbqItem) { if (&bbqItem == this) { return *this; } @@ -56,13 +55,9 @@ class BBQItem { return *this; } - bool IsStop() const { - return stop_.load(std::memory_order_acquire); - } + bool IsStop() const { return stop_.load(std::memory_order_acquire); } - T Item() { - return item_; - } + T Item() { return item_; } private: T item_; @@ -70,18 +65,13 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ -template +template class BoundedBlockingDeque : public Uncopyable { public: BoundedBlockingDeque() - : mutex_(), - notEmpty_(), - notFull_(), - deque_(), - capacity_(0) { - } + : mutex_(), notEmpty_(), notFull_(), deque_(), capacity_(0) {} int Init(const int capacity) { if (0 >= capacity) { @@ -91,7 +81,7 @@ class BoundedBlockingDeque : public Uncopyable { return 0; } - void PutBack(const T &x) { + void PutBack(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); @@ -100,7 +90,7 @@ class BoundedBlockingDeque : public Uncopyable { notEmpty_.notify_one(); } - void PutFront(const T &x) { + void PutFront(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); diff --git a/src/common/concurrent/concurrent.h b/src/common/concurrent/concurrent.h index df79ea8ec8..9d87996f2e 100644 --- a/src/common/concurrent/concurrent.h +++ b/src/common/concurrent/concurrent.h @@ -24,39 +24,38 @@ #define SRC_COMMON_CONCURRENT_CONCURRENT_H_ #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/rw_lock.h" - -#include "src/common/concurrent/thread_pool.h" +#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/task_queue.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/concurrent/thread_pool.h" namespace curve { namespace common { -// curve公共组件命名空间替换 -template -using Atomic = std::atomic; -using Mutex = std::mutex; -using Thread = std::thread; -using LockGuard = std::lock_guard; -using UniqueLock = std::unique_lock; -using ConditionVariable = std::condition_variable; - -// curve内部定义的锁组件 -using RWLock = RWLock; -using SpinLock = SpinLock; -using ReadLockGuard = ReadLockGuard; -using WriteLockGuard = WriteLockGuard; - -// curve内部定义的线程组件 -using TaskQueue = TaskQueue; -using ThreadPool = ThreadPool; - -} // namespace common -} // namespace curve +// curve public component namespace replacement +template +using Atomic = std::atomic; +using Mutex = std::mutex; +using Thread = std::thread; +using LockGuard = std::lock_guard; +using UniqueLock = std::unique_lock; +using ConditionVariable = std::condition_variable; + +// Lock components defined internally in curve +using RWLock = RWLock; +using SpinLock = SpinLock; +using ReadLockGuard = ReadLockGuard; +using WriteLockGuard = WriteLockGuard; + +// Thread components defined internally in curve +using TaskQueue = TaskQueue; +using ThreadPool = ThreadPool; + +} // namespace common +} // namespace curve #endif // SRC_COMMON_CONCURRENT_CONCURRENT_H_ diff --git a/src/common/concurrent/count_down_event.h b/src/common/concurrent/count_down_event.h index bfce259351..404fc32681 100644 --- a/src/common/concurrent/count_down_event.h +++ b/src/common/concurrent/count_down_event.h @@ -23,36 +23,30 @@ #ifndef SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ #define SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ -#include //NOLINT -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace common { /** - * 用于线程间同步,CountDownEvent是通过一个计数器来实现的,计数器的 - * 初始值initCnt为需要等待event的总数,通过接口Wait等待。每当一个 - * event发生,就会调用Signal接口,让计数器的值就会减 1。当计数器值到 - * 达0时,则Wait等待就会结束。一般用于等待一些事件发生 + * Used for inter-thread synchronization, CountDownEvent is implemented using a + * counter with an initial value (initCnt) representing the total number of + * events to wait for. Threads can wait for events using the Wait interface. + * Each time an event occurs, the Signal interface is called, decrementing the + * counter by 1. When the counter reaches 0, the waiting in Wait will conclude. + * It is typically used to wait for certain events to occur. */ class CountDownEvent { public: - CountDownEvent() : - mutex_(), - cond_(), - count_() { - } + CountDownEvent() : mutex_(), cond_(), count_() {} - explicit CountDownEvent(int initCnt) : - mutex_(), - cond_(), - count_(initCnt) { - } + explicit CountDownEvent(int initCnt) : mutex_(), cond_(), count_(initCnt) {} /** - * 重新设置event计数 - * @param eventCount:事件计数 + * Reset event count + * @param eventCount: Event Count */ void Reset(int eventCount) { std::unique_lock guard(mutex_); @@ -60,7 +54,7 @@ class CountDownEvent { } /** - * 通知wait event发生了一次,计数减1 + * Notify that a wait event has occurred once, count minus 1 */ void Signal() { std::unique_lock guard(mutex_); @@ -71,7 +65,7 @@ class CountDownEvent { } /** - * 等待initCnt的event发生之后,再唤醒 + * Wait for the event of initCnt to occur before waking up */ void Wait() { std::unique_lock guard(mutex_); @@ -81,9 +75,9 @@ class CountDownEvent { } /** - * 等待initCnt的event发生,或者指定时长 - * @param waitMs: 等待的ms数 - * @return:如果所有等待的event都发生,那么就返回true,否则false + * Wait for the event of initCnt to occur, or specify a duration + * @param waitMs: Number of ms waiting + * @return: If all waiting events occur, then return true; otherwise, false */ bool WaitFor(int waitMs) { std::unique_lock guard(mutex_); @@ -92,11 +86,11 @@ class CountDownEvent { while (count_ > 0) { auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration elapsed = now - start; - // 计算还剩余多少时间 + // Calculate how much time is left int leftMs = waitMs - static_cast(elapsed.count()); if (leftMs > 0) { - auto ret = cond_.wait_for(guard, - std::chrono::milliseconds(leftMs)); + auto ret = + cond_.wait_for(guard, std::chrono::milliseconds(leftMs)); (void)ret; } else { break; @@ -113,7 +107,7 @@ class CountDownEvent { private: mutable std::mutex mutex_; std::condition_variable cond_; - // 需要等待的事件计数 + // Count of events to wait for int count_; }; diff --git a/src/common/concurrent/task_thread_pool.h b/src/common/concurrent/task_thread_pool.h index b9b23eebe3..cfd9524024 100644 --- a/src/common/concurrent/task_thread_pool.h +++ b/src/common/concurrent/task_thread_pool.h @@ -23,27 +23,26 @@ #ifndef SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ #define SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ -#include -#include //NOLINT -#include -#include -#include //NOLINT #include -#include //NOLINT #include +#include //NOLINT +#include +#include #include #include +#include //NOLINT +#include //NOLINT #include +#include #include "src/common/uncopyable.h" namespace curve { namespace common { - using Task = std::function; -// 异步运行回调的线程池 +// Thread pool for asynchronously running callbacks template class TaskThreadPool : public Uncopyable { @@ -58,9 +57,10 @@ class TaskThreadPool : public Uncopyable { } /** - * 启动一个线程池 - * @param numThreads 线程池的线程数量,必须大于 0,不设置就是 INT_MAX (不推荐) - * @param queueCapacity queue 的容量,必须大于 0 + * Start a thread pool + * @param numThreads The number of threads in the thread pool must be + * greater than 0, otherwise it is INT_ MAX (not recommended) + * @param queueCapacity The capacity of queue must be greater than 0 * @return */ int Start(int numThreads, int queueCapacity = INT_MAX) { @@ -86,7 +86,7 @@ class TaskThreadPool : public Uncopyable { } /** - * 关闭线程池 + * Close Thread Pool */ void Stop() { if (running_.exchange(false, std::memory_order_acq_rel)) { @@ -101,10 +101,12 @@ class TaskThreadPool : public Uncopyable { } /** - * push 一个 task 给线程池处理,如果队列满,线程阻塞,直到 task push 进去 - * 需要注意的是用户自己需要保证 task 的有效的。除此之外,此 TaskThreadPool - * 并没有提供获取 f 的返回值,所以如果需要获取运行 f 的一些额外信息,需要用户 - * 自己在 f 内部逻辑添加 + * Push a task to the thread pool for processing. If the queue is full, the + * thread will block until the task is pushed in It should be noted that + * users themselves need to ensure the effectiveness of the task. In + * addition, this TaskThreadPool There is no provision for obtaining the + * return value of f, so if you need to obtain some additional information + * about running f, you need the user to Add your own internal logic to f * @tparam F * @tparam Args * @param f @@ -121,40 +123,39 @@ class TaskThreadPool : public Uncopyable { notEmpty_.notify_one(); } - /* 返回线程池 queue 的容量 */ - int QueueCapacity() const { - return capacity_; - } + /*Returns the capacity of the thread pool queue*/ + int QueueCapacity() const { return capacity_; } - /* 返回线程池当前 queue 中的 task 数量,线程安全 */ + /*Returns the number of tasks in the current queue of the thread pool, + * thread safe*/ int QueueSize() const { std::lock_guard guard(mutex_); return queue_.size(); } - /* 返回线程池的线程数 */ - int ThreadOfNums() const { - return threads_.size(); - } + /*Returns the number of threads in the thread pool*/ + int ThreadOfNums() const { return threads_.size(); } protected: - /*线程工作时执行的函数*/ + /*Functions executed during thread work*/ virtual void ThreadFunc() { while (running_.load(std::memory_order_acquire)) { Task task(Take()); - /* ThreadPool 退出的时候,queue 为空,那么会返回无效的 task */ + /*When ThreadPool exits, if the queue is empty, an invalid task will + * be returned*/ if (task) { task(); } } } - /* 判断线程池 queue 是否已经满了, 非线程安全,私有内部使用 */ + /*Determine if the thread pool queue is full, non thread safe, private + * internal use*/ bool IsFullUnlock() const { return queue_.size() >= static_cast(capacity_); } - /* 从线程池的 queue 中取一个 task 线程安全 */ + /*Taking a task from the queue in the thread pool is thread safe*/ Task Take() { std::unique_lock guard(mutex_); while (queue_.empty() && running_.load(std::memory_order_acquire)) { @@ -170,13 +171,13 @@ class TaskThreadPool : public Uncopyable { } protected: - mutable MutexT mutex_; + mutable MutexT mutex_; CondVarT notEmpty_; CondVarT notFull_; std::vector> threads_; - std::deque queue_; - int capacity_; - std::atomic running_; + std::deque queue_; + int capacity_; + std::atomic running_; }; } // namespace common diff --git a/src/common/configuration.cpp b/src/common/configuration.cpp index a35db6d357..28d699240f 100644 --- a/src/common/configuration.cpp +++ b/src/common/configuration.cpp @@ -53,8 +53,8 @@ bool Configuration::LoadConfig() { SetValue(key, value); } } else { - LOG(ERROR) << "Open config file '" << confFile_ << "' failed: " - << strerror(errno); + LOG(ERROR) << "Open config file '" << confFile_ + << "' failed: " << strerror(errno); return false; } @@ -62,8 +62,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -78,14 +80,13 @@ bool Configuration::SaveConfig() { void Configuration::PrintConfig() { LOG(INFO) << std::string(30, '=') << "BEGIN" << std::string(30, '='); - for (auto &item : config_) { + for (auto& item : config_) { LOG(INFO) << item.first << std::string(60 - item.first.size(), ' ') << ": " << item.second; } LOG(INFO) << std::string(31, '=') << "END" << std::string(31, '='); } - void Configuration::ExposeMetric(const std::string& exposeName) { if (!exposeName_.empty()) { LOG(WARNING) << "Config metric has been exposed."; @@ -98,20 +99,20 @@ void Configuration::ExposeMetric(const std::string& exposeName) { } } -void Configuration::UpdateMetricIfExposed(const std::string &key, - const std::string &value) { +void Configuration::UpdateMetricIfExposed(const std::string& key, + const std::string& value) { if (exposeName_.empty()) { return; } auto it = configMetric_.find(key); - // 如果配置项不存在,则新建配置项 + // If the configuration item does not exist, create a new configuration item if (it == configMetric_.end()) { ConfigItemPtr configItem = std::make_shared(); configItem->ExposeAs(exposeName_, key); configMetric_[key] = configItem; } - // 更新配置项 + // Update Configuration Items configMetric_[key]->Set("conf_name", key); configMetric_[key]->Set("conf_value", value); configMetric_[key]->Update(); @@ -121,33 +122,29 @@ std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -156,7 +153,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -165,7 +162,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -174,17 +171,16 @@ bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { return false; } - -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt64Value( - const std::string &key, const uint64_t value) { +void Configuration::SetUInt64Value(const std::string& key, + const uint64_t value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt32Value(const std::string &key, +void Configuration::SetUInt32Value(const std::string& key, const uint32_t value) { SetValue(key, std::to_string(value)); } @@ -203,14 +199,13 @@ void Configuration::SetInt64Value(const std::string& key, const int64_t value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -219,18 +214,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -239,11 +233,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -253,7 +247,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -273,16 +267,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -291,51 +284,47 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; UpdateMetricIfExposed(key, value); } -void Configuration::GetValueFatalIfFail(const std::string& key, - int* value) { - LOG_IF(FATAL, !GetIntValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, int* value) { + LOG_IF(FATAL, !GetIntValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, std::string* value) { - LOG_IF(FATAL, !GetStringValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetStringValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - bool* value) { - LOG_IF(FATAL, !GetBoolValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, bool* value) { + LOG_IF(FATAL, !GetBoolValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint32_t* value) { - LOG_IF(FATAL, !GetUInt32Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt32Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint64_t* value) { - LOG_IF(FATAL, !GetUInt64Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt64Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - float* value) { - LOG_IF(FATAL, !GetFloatValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, float* value) { + LOG_IF(FATAL, !GetFloatValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - double* value) { - LOG_IF(FATAL, !GetDoubleValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, double* value) { + LOG_IF(FATAL, !GetDoubleValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } } // namespace common diff --git a/src/common/configuration.h b/src/common/configuration.h index d546995ade..e3a5144e61 100644 --- a/src/common/configuration.h +++ b/src/common/configuration.h @@ -22,9 +22,10 @@ */ #include -#include + #include #include +#include #include #include "src/common/stringstatus.h" @@ -36,7 +37,7 @@ namespace curve { namespace common { using ConfigItemPtr = std::shared_ptr; -using ConfigMetricMap = std::unordered_map; +using ConfigMetricMap = std::unordered_map; class Configuration { public: @@ -45,94 +46,96 @@ class Configuration { void PrintConfig(); std::map ListConfig() const; /** - * 暴露config的metric供采集 - * 如果metric已经暴露,则直接返回 - * @param exposeName: 对外暴露的metric的名字 + * Expose the metric of config for collection + * If the metric has already been exposed, return it directly + * @param exposeName: The name of the exposed metric */ void ExposeMetric(const std::string& exposeName); - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); - void SetIntValue(const std::string &key, const int value); - void SetUInt32Value(const std::string &key, const uint32_t value); - void SetUInt64Value(const std::string &key, const uint64_t value); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); + void SetIntValue(const std::string& key, const int value); + void SetUInt32Value(const std::string& key, const uint32_t value); + void SetUInt64Value(const std::string& key, const uint64_t value); bool GetInt64Value(const std::string& key, int64_t* out); void SetInt64Value(const std::string& key, const int64_t value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); /* - * @brief GetValueFatalIfFail 获取指定配置项的值,失败打FATAL日志 - * - * @param[in] key 配置项名称 - * @param[out] value 获取的值 - * - * @return 无 - */ + * @brief GetValueFatalIfFail to obtain the value of the specified + * configuration item, failed to log FATAL + * + * @param[in] key configuration item name + * @param[out] value The value obtained + * + * @return None + */ void GetValueFatalIfFail(const std::string& key, int* value); void GetValueFatalIfFail(const std::string& key, std::string* value); void GetValueFatalIfFail(const std::string& key, bool* value); @@ -141,11 +144,11 @@ class Configuration { void GetValueFatalIfFail(const std::string& key, float* value); void GetValueFatalIfFail(const std::string& key, double* value); - bool GetValue(const std::string &key, int *value) { + bool GetValue(const std::string& key, int* value) { return GetIntValue(key, value); } - bool GetValue(const std::string &key, uint32_t *value) { + bool GetValue(const std::string& key, uint32_t* value) { return GetUInt32Value(key, value); } @@ -171,19 +174,19 @@ class Configuration { private: /** - * 更新新的配置到metric - * @param 要更新的metric + *Update new configuration to metric + * @param The metric to update */ - void UpdateMetricIfExposed(const std::string &key, - const std::string &value); + void UpdateMetricIfExposed(const std::string& key, + const std::string& value); private: - std::string confFile_; - std::map config_; - // metric对外暴露的名字 - std::string exposeName_; - // 每一个配置项使用单独的一个metric,用map管理 - ConfigMetricMap configMetric_; + std::string confFile_; + std::map config_; + // Metric's exposed name + std::string exposeName_; + // Each configuration item uses a separate metric and is managed using a map + ConfigMetricMap configMetric_; }; } // namespace common diff --git a/src/common/crc32.h b/src/common/crc32.h index 99916fe873..7df16e6654 100644 --- a/src/common/crc32.h +++ b/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef SRC_COMMON_CRC32_H_ #define SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace curve { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/src/common/curve_define.h b/src/common/curve_define.h index 04d07ad5ec..1bea28e298 100644 --- a/src/common/curve_define.h +++ b/src/common/curve_define.h @@ -28,34 +28,35 @@ #include #ifndef DLOG_EVERY_SECOND -#define DLOG_EVERY_SECOND(severity) \ +#define DLOG_EVERY_SECOND(severity) \ BAIDU_LOG_IF_EVERY_SECOND_IMPL(DLOG_IF, severity, true) #endif namespace curve { namespace common { -// curve系统中共用的定义,对于各模块自己独有的放在各模块自己的define中 -using ChunkID = uint64_t; -using CopysetID = uint32_t; -using ChunkIndex = uint32_t; -using LogicPoolID = uint32_t; -using ChunkServerID = uint32_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +// The definition shared in the curve system is unique to each module and placed +// in its own definition +using ChunkID = uint64_t; +using CopysetID = uint32_t; +using ChunkIndex = uint32_t; +using LogicPoolID = uint32_t; +using ChunkServerID = uint32_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; -using FileSeqType = uint64_t; -using PageSizeType = uint32_t; -using ChunkSizeType = uint32_t; -using SegmentSizeType = uint32_t; +using FileSeqType = uint64_t; +using PageSizeType = uint32_t; +using ChunkSizeType = uint32_t; +using SegmentSizeType = uint32_t; -using Status = butil::Status; -using EndPoint = butil::EndPoint; +using Status = butil::Status; +using EndPoint = butil::EndPoint; -const uint32_t kKB = 1024; -const uint32_t kMB = 1024*kKB; -const uint32_t kGB = 1024*kMB; +const uint32_t kKB = 1024; +const uint32_t kMB = 1024 * kKB; +const uint32_t kGB = 1024 * kMB; -// maigic number用于FilePool_meta file计算crc +// maigic number for FilePool_meta file calculation of crc const char kFilePoolMagic[3] = "01"; constexpr uint32_t kDefaultBlockSize = 4096; diff --git a/src/common/define.h b/src/common/define.h index e3f90d7bd0..6001e48120 100644 --- a/src/common/define.h +++ b/src/common/define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_DEFINE_H_ #define SRC_COMMON_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -67,72 +67,67 @@ extern const char* kTotalCountStr; extern const char* kSnapshotsStr; extern const char* kTaskInfosStr; - typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -144,8 +139,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 3e591fd5ca..6b23b9558c 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -24,16 +24,18 @@ #define SRC_COMMON_FS_UTIL_H_ #include + #include #include + #include "src/common/string_util.h" namespace curve { namespace common { -// 计算path2相对于path1的相对路径 -inline std::string CalcRelativePath(const std::string &path1, - const std::string &path2) { +// Calculate the relative path of path2 relative to path1 +inline std::string CalcRelativePath(const std::string& path1, + const std::string& path2) { if (path1.empty() || path2.empty()) { return ""; } @@ -66,7 +68,7 @@ inline std::string CalcRelativePath(const std::string &path1, } // Check whether the path2 is the subpath of path1 -inline bool IsSubPath(const std::string &path1, const std::string &path2) { +inline bool IsSubPath(const std::string& path1, const std::string& path2) { return StringStartWith(CalcRelativePath(path1, path2), "./"); } diff --git a/src/common/interruptible_sleeper.h b/src/common/interruptible_sleeper.h index 73c2cba645..7f0f641674 100644 --- a/src/common/interruptible_sleeper.h +++ b/src/common/interruptible_sleeper.h @@ -24,32 +24,35 @@ #define SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ #include // NOLINT + #include "src/common/concurrent/concurrent.h" namespace curve { namespace common { /** - * InterruptibleSleeper 实现可 interruptible 的 sleep 功能. - * 正常情况下 wait_for 超时, 接收到退出信号之后, 程序会立即被唤醒, - * 退出 while 循环, 并执行 cleanup 代码. + * Implement interruptible sleep functionality with InterruptibleSleeper. + * Under normal circumstances, when wait_for times out and receives an exit + * signal, the program will be immediately awakened, exit the while loop, and + * execute cleanup code. */ class InterruptibleSleeper { public: /** - * @brief wait_for 等待指定时间,如果接受到退出信号立刻返回 + * @brief wait_for Wait for the specified time, and immediately return if an + * exit signal is received * - * @param[in] time 指定wait时长 + * @param[in] time specifies the wait duration * - * @return false-收到退出信号 true-超时后退出 + * @return false - Received exit signal true - Exit after timeout */ - template + template bool wait_for(std::chrono::duration const& time) { UniqueLock lock(m); - return !cv.wait_for(lock, time, [&]{return terminate;}); + return !cv.wait_for(lock, time, [&] { return terminate; }); } /** - * @brief interrupt 给当前wait发送退出信号 + * @brief interrupt Send an exit signal to the current wait */ void interrupt() { UniqueLock lock(m); @@ -72,4 +75,3 @@ class InterruptibleSleeper { } // namespace curve #endif // SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ - diff --git a/src/common/location_operator.cpp b/src/common/location_operator.cpp index f9d5a8f4c8..3571f4e040 100644 --- a/src/common/location_operator.cpp +++ b/src/common/location_operator.cpp @@ -32,22 +32,21 @@ std::string LocationOperator::GenerateS3Location( return location; } -std::string LocationOperator::GenerateCurveLocation( - const std::string& fileName, off_t offset) { +std::string LocationOperator::GenerateCurveLocation(const std::string& fileName, + off_t offset) { std::string location(fileName); location.append(kOriginPathSeprator) - .append(std::to_string(offset)) - .append(kOriginTypeSeprator) - .append(CURVE_TYPE); + .append(std::to_string(offset)) + .append(kOriginTypeSeprator) + .append(CURVE_TYPE); return location; } -OriginType LocationOperator::ParseLocation( - const std::string& location, std::string* originPath) { - // 找到最后一个“@”,不能简单用SplitString - // 因为不能保证OriginPath中不包含“@” - std::string::size_type pos = - location.find_last_of(kOriginTypeSeprator); +OriginType LocationOperator::ParseLocation(const std::string& location, + std::string* originPath) { + // Found the last '@', cannot simply use SplitString + // Because it cannot be guaranteed that OriginPath does not contain '@' + std::string::size_type pos = location.find_last_of(kOriginTypeSeprator); if (std::string::npos == pos) { return OriginType::InvalidOrigin; } @@ -67,18 +66,17 @@ OriginType LocationOperator::ParseLocation( return type; } -bool LocationOperator::ParseCurveChunkPath( - const std::string& originPath, std::string* fileName, off_t* offset) { - std::string::size_type pos = - originPath.find_last_of(kOriginPathSeprator); +bool LocationOperator::ParseCurveChunkPath(const std::string& originPath, + std::string* fileName, + off_t* offset) { + std::string::size_type pos = originPath.find_last_of(kOriginPathSeprator); if (std::string::npos == pos) { return false; } std::string file = originPath.substr(0, pos); std::string offStr = originPath.substr(pos + 1); - if (file.empty() || offStr.empty()) - return false; + if (file.empty() || offStr.empty()) return false; if (fileName != nullptr) { *fileName = file; diff --git a/src/common/location_operator.h b/src/common/location_operator.h index a86b33d158..2669beb4c3 100644 --- a/src/common/location_operator.h +++ b/src/common/location_operator.h @@ -43,43 +43,45 @@ enum class OriginType { class LocationOperator { public: /** - * 生成s3的location - * location格式:${objectname}@s3 - * @param objectName:s3上object的名称 - * @return:生成的location + * Generate location for s3 + * location format: ${objectname}@s3 + * @param objectName: The name of the object on s3 + * @return: Generated location */ static std::string GenerateS3Location(const std::string& objectName); /** - * 生成curve的location - * location格式:${filename}:${offset}@cs + * Generate the location of the curve + * location format: ${filename}:${offset}@cs */ static std::string GenerateCurveLocation(const std::string& fileName, off_t offset); /** - * 解析数据源的位置信息 - * location格式: - * s3示例:${objectname}@s3 - * curve示例:${filename}:${offset}@cs + * Parsing the location information of data sources + * location format: + * example of s3: ${objectname}@s3 + * curve example: ${filename}:${offset}@cs * - * @param location[in]:数据源的位置,其格式为originPath@originType - * @param originPath[out]:表示数据源在源端的路径 - * @return:返回OriginType,表示源数据的源端类型是s3还是curve - * 如果路径格式不正确或者originType无法识别,则返回InvalidOrigin + * @param location[in]: The location of the data source, in the format + * originPath@originType + * @param originPath[out]: represents the path of the data source on the + * source side + * @return: Returns OriginType, indicating whether the source side type of + * the source data is s3 or curve If the path format is incorrect or the + * originType is not recognized, InvalidOrigin is returned */ static OriginType ParseLocation(const std::string& location, std::string* originPath); /** - * 解析curvefs的originPath - * 格式:${filename}:${offset} - * @param originPath[in]:数据源在curvefs上的路径 - * @param fileName[out]:数据源所属文件名 - * @param offset[out]:数据源在文件中的偏移 - * @return: 解析成功返回true,失败返回false + * Parsing the originPath of curves + * Format: ${filename}:${offset} + * @param originPath[in]: The path of the data source on curves + * @param fileName[out]: The file name to which the data source belongs + * @param offset[out]: The offset of the data source in the file + * @return: Successful parsing returns true, while failure returns false */ static bool ParseCurveChunkPath(const std::string& originPath, - std::string* fileName, - off_t* offset); + std::string* fileName, off_t* offset); }; } // namespace common diff --git a/src/common/net_common.h b/src/common/net_common.h index 8bf058e134..c31cb7b770 100644 --- a/src/common/net_common.h +++ b/src/common/net_common.h @@ -23,27 +23,27 @@ #ifndef SRC_COMMON_NET_COMMON_H_ #define SRC_COMMON_NET_COMMON_H_ -#include -#include -#include // in_addr -#include // inet_pton, inet_ntop +#include // inet_pton, inet_ntop #include +#include +#include // in_addr +#include + #include namespace curve { namespace common { class NetCommon { public: - // addr形式为"ip:port" + // The form of addr is "ip:port" static bool CheckAddressValid(const std::string& addr) { std::string ip; uint32_t port; return SplitAddrToIpPort(addr, &ip, &port); } - // addr形式为"ip:port" - static bool SplitAddrToIpPort(const std::string& addr, - std::string* ipstr, + // The form of addr is "ip:port" + static bool SplitAddrToIpPort(const std::string& addr, std::string* ipstr, uint32_t* port) { size_t splitpos = addr.find(":"); if (splitpos == std::string::npos) { @@ -91,7 +91,7 @@ class NetCommon { return true; } }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_NET_COMMON_H_ diff --git a/src/common/s3_adapter.cpp b/src/common/s3_adapter.cpp index e3b3d917a0..8f8b911405 100644 --- a/src/common/s3_adapter.cpp +++ b/src/common/s3_adapter.cpp @@ -35,655 +35,735 @@ #define AWS_ALLOCATE_TAG __FILE__ ":" STRINGIFY(__LINE__) -namespace curve { -namespace common { - -std::once_flag S3INIT_FLAG; -std::once_flag S3SHUTDOWN_FLAG; -Aws::SDKOptions AWS_SDK_OPTIONS; - -namespace { - -// https://github.com/aws/aws-sdk-cpp/issues/1430 -class PreallocatedIOStream : public Aws::IOStream { - public: - PreallocatedIOStream(char *buf, size_t size) - : Aws::IOStream(new Aws::Utils::Stream::PreallocatedStreamBuf( - reinterpret_cast(buf), size)) {} - - PreallocatedIOStream(const char *buf, size_t size) - : PreallocatedIOStream(const_cast(buf), size) {} - - ~PreallocatedIOStream() { - // corresponding new in constructor - delete rdbuf(); - } -}; - -Aws::String GetObjectRequestRange(uint64_t offset, uint64_t len) { - auto range = - "bytes=" + std::to_string(offset) + "-" + std::to_string(offset + len); - return {range.data(), range.size()}; -} - -} // namespace - -void InitS3AdaptorOption(Configuration* conf, S3AdapterOption* s3Opt) { - InitS3AdaptorOptionExceptS3InfoOption(conf, s3Opt); - LOG_IF(FATAL, !conf->GetStringValue("s3.endpoint", &s3Opt->s3Address)); - LOG_IF(FATAL, !conf->GetStringValue("s3.ak", &s3Opt->ak)); - LOG_IF(FATAL, !conf->GetStringValue("s3.sk", &s3Opt->sk)); - LOG_IF(FATAL, !conf->GetStringValue("s3.bucket_name", &s3Opt->bucketName)); -} - -void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, - S3AdapterOption* s3Opt) { - LOG_IF(FATAL, !conf->GetIntValue("s3.logLevel", &s3Opt->loglevel)); - LOG_IF(FATAL, !conf->GetStringValue("s3.logPrefix", &s3Opt->logPrefix)); - LOG_IF(FATAL, !conf->GetIntValue("s3.http_scheme", &s3Opt->scheme)); - LOG_IF(FATAL, !conf->GetBoolValue("s3.verify_SSL", &s3Opt->verifySsl)); - LOG_IF(FATAL, !conf->GetStringValue("s3.user_agent", &s3Opt->userAgent)); - LOG_IF(FATAL, !conf->GetIntValue("s3.maxConnections", - &s3Opt->maxConnections)); - LOG_IF(FATAL, !conf->GetIntValue("s3.connectTimeout", - &s3Opt->connectTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.requestTimeout", - &s3Opt->requestTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.asyncThreadNum", - &s3Opt->asyncThreadNum)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsTotalLimit", - &s3Opt->iopsTotalLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsReadLimit", - &s3Opt->iopsReadLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsWriteLimit", - &s3Opt->iopsWriteLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsTotalMB", - &s3Opt->bpsTotalMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsReadMB", - &s3Opt->bpsReadMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsWriteMB", - &s3Opt->bpsWriteMB)); - LOG_IF(FATAL, !conf->GetBoolValue("s3.useVirtualAddressing", - &s3Opt->useVirtualAddressing)); - LOG_IF(FATAL, !conf->GetStringValue("s3.region", &s3Opt->region)); - - if (!conf->GetUInt64Value("s3.maxAsyncRequestInflightBytes", - &s3Opt->maxAsyncRequestInflightBytes)) { - LOG(WARNING) << "Not found s3.maxAsyncRequestInflightBytes in conf"; - s3Opt->maxAsyncRequestInflightBytes = 0; - } -} - -void S3Adapter::Init(const std::string& path) { - LOG(INFO) << "Loading s3 configurations"; - conf_.SetConfigPath(path); - LOG_IF(FATAL, !conf_.LoadConfig()) - << "Failed to open s3 config file: " << conf_.GetConfigPath(); - S3AdapterOption option; - InitS3AdaptorOption(&conf_, &option); - Init(option); -} - -void S3Adapter::InitExceptFsS3Option(const std::string& path) { - LOG(INFO) << "Loading s3 configurations"; - conf_.SetConfigPath(path); - LOG_IF(FATAL, !conf_.LoadConfig()) - << "Failed to open s3 config file: " << conf_.GetConfigPath(); - S3AdapterOption option; - InitS3AdaptorOptionExceptS3InfoOption(&conf_, &option); - Init(option); -} - -void S3Adapter::Init(const S3AdapterOption &option) { - auto initSDK = [&]() { - AWS_SDK_OPTIONS.loggingOptions.logLevel = - Aws::Utils::Logging::LogLevel(option.loglevel); - AWS_SDK_OPTIONS.loggingOptions.defaultLogPrefix = - option.logPrefix.c_str(); - Aws::InitAPI(AWS_SDK_OPTIONS); - }; - std::call_once(S3INIT_FLAG, initSDK); - s3Address_ = option.s3Address.c_str(); - s3Ak_ = option.ak.c_str(); - s3Sk_ = option.sk.c_str(); - bucketName_ = option.bucketName.c_str(); - clientCfg_ = Aws::New(AWS_ALLOCATE_TAG); - clientCfg_->scheme = Aws::Http::Scheme(option.scheme); - clientCfg_->verifySSL = option.verifySsl; - clientCfg_->userAgent = option.userAgent.c_str(); - clientCfg_->region = option.region.c_str(); - clientCfg_->maxConnections = option.maxConnections; - clientCfg_->connectTimeoutMs = option.connectTimeout; - clientCfg_->requestTimeoutMs = option.requestTimeout; - clientCfg_->endpointOverride = s3Address_; - auto asyncThreadNum = option.asyncThreadNum; - LOG(INFO) << "S3Adapter init thread num = " << asyncThreadNum << std::endl; - clientCfg_->executor = - Aws::MakeShared( - "S3Adapter.S3Client", asyncThreadNum); - s3Client_ = Aws::New(AWS_ALLOCATE_TAG, - Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), - *clientCfg_, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - option.useVirtualAddressing); - - ReadWriteThrottleParams params; - params.iopsTotal.limit = option.iopsTotalLimit; - params.iopsRead.limit = option.iopsReadLimit; - params.iopsWrite.limit = option.iopsWriteLimit; - params.bpsTotal.limit = option.bpsTotalMB * kMB; - params.bpsRead.limit = option.bpsReadMB * kMB; - params.bpsWrite.limit = option.bpsWriteMB * kMB; - - throttle_ = new Throttle(); - throttle_->UpdateThrottleParams(params); - - inflightBytesThrottle_.reset(new AsyncRequestInflightBytesThrottle( - option.maxAsyncRequestInflightBytes == 0 - ? UINT64_MAX - : option.maxAsyncRequestInflightBytes)); -} - -void S3Adapter::Deinit() { - // delete s3client in s3adapter - if (clientCfg_ != nullptr) { - Aws::Delete(clientCfg_); - clientCfg_ = nullptr; - } - if (s3Client_ != nullptr) { - Aws::Delete(s3Client_); - s3Client_ = nullptr; - } - if (throttle_ != nullptr) { - delete throttle_; - throttle_ = nullptr; - } - if (inflightBytesThrottle_ != nullptr) - inflightBytesThrottle_.release(); -} - -void S3Adapter::Shutdown() { - // one program should only call once - auto shutdownSDK = [&]() { - Aws::ShutdownAPI(AWS_SDK_OPTIONS); - }; - std::call_once(S3SHUTDOWN_FLAG, shutdownSDK); -} - -void S3Adapter::Reinit(const S3AdapterOption& option) { - Deinit(); - Init(option); -} - -std::string S3Adapter::GetS3Ak() { - return std::string(s3Ak_.c_str(), s3Ak_.size()); -} - -std::string S3Adapter::GetS3Sk() { - return std::string(s3Sk_.c_str(), s3Sk_.size()); -} - -std::string S3Adapter::GetS3Endpoint() { - return std::string(s3Address_.c_str(), s3Address_.size()); -} - -int S3Adapter::CreateBucket() { - Aws::S3::Model::CreateBucketRequest request; - request.SetBucket(bucketName_); - Aws::S3::Model::CreateBucketConfiguration conf; - conf.SetLocationConstraint( - Aws::S3::Model::BucketLocationConstraint::us_east_1); - request.SetCreateBucketConfiguration(conf); - auto response = s3Client_->CreateBucket(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "CreateBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::DeleteBucket() { - Aws::S3::Model::DeleteBucketRequest request; - request.SetBucket(bucketName_); - auto response = s3Client_->DeleteBucket(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "DeleteBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -bool S3Adapter::BucketExist() { - Aws::S3::Model::HeadBucketRequest request; - request.SetBucket(bucketName_); - auto response = s3Client_->HeadBucket(request); - if (response.IsSuccess()) { - return true; - } else { - LOG(ERROR) << "HeadBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return false; - } -} - -int S3Adapter::PutObject(const Aws::String &key, const char *buffer, - const size_t bufferSize) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - - request.SetBody(Aws::MakeShared(AWS_ALLOCATE_TAG, - buffer, bufferSize)); - - if (throttle_) { - throttle_->Add(false, bufferSize); - } - - auto response = s3Client_->PutObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "PutObject error, bucket: " << bucketName_ - << ", key: " << key << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::PutObject(const Aws::String &key, const std::string &data) { - return PutObject(key, data.data(), data.size()); -} -/* - int S3Adapter::GetObject(const Aws::String &key, - void *buffer, - const int bufferSize) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - request.SetResponseStreamFactory( - [buffer, bufferSize](){ - std::unique_ptr - stream(Aws::New("stream")); - stream->rdbuf()->pubsetbuf(buffer, - bufferSize); - return stream.release(); - }); - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - *buffer << response.GetResult().GetBody().rdbuf(); - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } - } -*/ - -void S3Adapter::PutObjectAsync(std::shared_ptr context) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); - - request.SetBody(Aws::MakeShared( - AWS_ALLOCATE_TAG, context->buffer, context->bufferSize)); - - auto originCallback = context->cb; - auto wrapperCallback = - [this, - originCallback](const std::shared_ptr& ctx) { - inflightBytesThrottle_->OnComplete(ctx->bufferSize); - ctx->cb = originCallback; - ctx->cb(ctx); - }; - - Aws::S3::PutObjectResponseReceivedHandler handler = - [context]( - const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::PutObjectRequest & /*request*/, - const Aws::S3::Model::PutObjectOutcome &response, - const std::shared_ptr - &awsCtx) { - std::shared_ptr ctx = - std::const_pointer_cast( - std::dynamic_pointer_cast( - awsCtx)); - - LOG_IF(ERROR, !response.IsSuccess()) - << "PutObjectAsync error: " - << response.GetError().GetExceptionName() - << "message: " << response.GetError().GetMessage() - << "resend: " << ctx->key; - - ctx->retCode = (response.IsSuccess() ? 0 : -1); - ctx->timer.stop(); - ctx->cb(ctx); - }; - - if (throttle_) { - throttle_->Add(false, context->bufferSize); - } - - inflightBytesThrottle_->OnStart(context->bufferSize); - context->cb = std::move(wrapperCallback); - s3Client_->PutObjectAsync(request, handler, context); -} - -int S3Adapter::GetObject(const Aws::String &key, - std::string *data) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - std::stringstream ss; - if (throttle_) { - throttle_->Add(true, 1); - } - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - ss << response.GetResult().GetBody().rdbuf(); - *data = ss.str(); - return 0; - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::GetObject(const std::string &key, - char *buf, - off_t offset, - size_t len) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{key.c_str(), key.size()}); - request.SetRange(GetObjectRequestRange(offset, len)); - - request.SetResponseStreamFactory([buf, len]() { - return Aws::New(AWS_ALLOCATE_TAG, buf, len); - }); - - if (throttle_) { - throttle_->Add(true, len); - } - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -void S3Adapter::GetObjectAsync(std::shared_ptr context) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); - request.SetRange(GetObjectRequestRange(context->offset, context->len)); - - request.SetResponseStreamFactory([context]() { - return Aws::New(AWS_ALLOCATE_TAG, context->buf, - context->len); - }); - - auto originCallback = context->cb; - auto wrapperCallback = - [this, originCallback]( - const S3Adapter* /*adapter*/, - const std::shared_ptr& ctx) { - inflightBytesThrottle_->OnComplete(ctx->len); - ctx->cb = originCallback; - ctx->cb(this, ctx); - }; - - Aws::S3::GetObjectResponseReceivedHandler handler = - [this](const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::GetObjectRequest & /*request*/, - const Aws::S3::Model::GetObjectOutcome &response, - const std::shared_ptr - &awsCtx) { - std::shared_ptr ctx = - std::const_pointer_cast( - std::dynamic_pointer_cast( - awsCtx)); - - LOG_IF(ERROR, !response.IsSuccess()) - << "GetObjectAsync error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - ctx->actualLen = response.GetResult().GetContentLength(); - ctx->retCode = (response.IsSuccess() ? 0 : -1); - ctx->cb(this, ctx); - }; - - if (throttle_) { - throttle_->Add(true, context->len); - } - - inflightBytesThrottle_->OnStart(context->len); - context->cb = std::move(wrapperCallback); - s3Client_->GetObjectAsync(request, handler, context); -} - -bool S3Adapter::ObjectExist(const Aws::String &key) { - Aws::S3::Model::HeadObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->HeadObject(request); - if (response.IsSuccess()) { - return true; - } else { - LOG(WARNING) << "HeadObject error:" << bucketName_ << "--" << key - << "--" << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return false; - } -} - -int S3Adapter::DeleteObject(const Aws::String &key) { - Aws::S3::Model::DeleteObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->DeleteObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(WARNING) << "DeleteObject error:" << bucketName_ << "--" << key - << "--" << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::DeleteObjects(const std::list& keyList) { - Aws::S3::Model::DeleteObjectsRequest deleteObjectsRequest; - Aws::S3::Model::Delete deleteObjects; - for (const auto& key : keyList) { - Aws::S3::Model::ObjectIdentifier ObjIdent; - ObjIdent.SetKey(key); - deleteObjects.AddObjects(ObjIdent); - } - - deleteObjects.SetQuiet(false); - deleteObjectsRequest.WithBucket(bucketName_).WithDelete(deleteObjects); - auto response = s3Client_->DeleteObjects(deleteObjectsRequest); - if (response.IsSuccess()) { - for (auto del : response.GetResult().GetDeleted()) { - LOG(INFO) << "delete ok : " << del.GetKey(); - } - - for (auto err : response.GetResult().GetErrors()) { - LOG(WARNING) << "delete err : " << err.GetKey() << " --> " - << err.GetMessage(); - } - - if (response.GetResult().GetErrors().size() != 0) { - return -1; - } - - return 0; - } else { - LOG(ERROR) << response.GetError().GetMessage() << " failed, " - << deleteObjectsRequest.SerializePayload(); - return -1; - } - return 0; -} -/* - // object元数据单独更新还有问题,需要单独的s3接口来支持 -int S3Adapter::UpdateObjectMeta(const Aws::String &key, - const Aws::Map &meta) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto input_data = - Aws::MakeShared("PutObjectInputStream"); - request.SetBody(input_data); - request.SetMetadata(meta); - auto response = s3Client_->PutObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "PutObject error:" - << bucketName_ << key - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::GetObjectMeta(const Aws::String &key, - Aws::Map *meta) { - Aws::S3::Model::HeadObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->HeadObject(request); - if (response.IsSuccess()) { - *meta = response.GetResult().GetMetadata(); - return 0; - } else { - LOG(ERROR) << "HeadObject error:" - << bucketName_ << key - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} -*/ -Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) { - Aws::S3::Model::CreateMultipartUploadRequest request; - request.WithBucket(bucketName_).WithKey(key); - auto response = s3Client_->CreateMultipartUpload(request); - if (response.IsSuccess()) { - return response.GetResult().GetUploadId(); - } else { - LOG(ERROR) << "CreateMultipartUploadRequest error: " - << response.GetError().GetMessage(); - return ""; - } -} - -Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( - const Aws::String &key, - const Aws::String &uploadId, - int partNum, - int partSize, - const char* buf) { - Aws::S3::Model::UploadPartRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - request.SetPartNumber(partNum); - request.SetContentLength(partSize); - - request.SetBody( - Aws::MakeShared(AWS_ALLOCATE_TAG, buf, partSize)); - - if (throttle_) { - throttle_->Add(false, partSize); - } - auto result = s3Client_->UploadPart(request); - if (result.IsSuccess()) { - return Aws::S3::Model::CompletedPart() - .WithETag(result.GetResult().GetETag()).WithPartNumber(partNum); - } else { - return Aws::S3::Model::CompletedPart() - .WithETag("errorTag").WithPartNumber(-1); - } -} - -int S3Adapter::CompleteMultiUpload(const Aws::String &key, - const Aws::String &uploadId, - const Aws::Vector &cp_v) { - Aws::S3::Model::CompleteMultipartUploadRequest request; - request.WithBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - request.SetMultipartUpload( - Aws::S3::Model::CompletedMultipartUpload().WithParts(cp_v)); - auto response = s3Client_->CompleteMultipartUpload(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "CompleteMultiUpload error: " - << response.GetError().GetMessage(); - this->AbortMultiUpload(key, uploadId); - return -1; - } -} - -int S3Adapter::AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId) { - Aws::S3::Model::AbortMultipartUploadRequest request; - request.WithBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - auto response = s3Client_->AbortMultipartUpload(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "AbortMultiUpload error: " - << response.GetError().GetMessage(); - return -1; - } -} - -void S3Adapter::AsyncRequestInflightBytesThrottle::OnStart(uint64_t len) { - std::unique_lock lock(mtx_); - while (inflightBytes_ + len > maxInflightBytes_) { - cond_.wait(lock); - } - - inflightBytes_ += len; -} - -void S3Adapter::AsyncRequestInflightBytesThrottle::OnComplete(uint64_t len) { - std::unique_lock lock(mtx_); - inflightBytes_ -= len; - cond_.notify_all(); -} -void S3Adapter::SetS3Option(const S3InfoOption& fsS3Opt) { - s3Address_ = fsS3Opt.s3Address.c_str(); - s3Ak_ = fsS3Opt.ak.c_str(); - s3Sk_ = fsS3Opt.sk.c_str(); - bucketName_ = fsS3Opt.bucketName.c_str(); -} - -} // namespace common -} // namespace curve +namespace curve +{ + namespace common + { + + std::once_flag S3INIT_FLAG; + std::once_flag S3SHUTDOWN_FLAG; + Aws::SDKOptions AWS_SDK_OPTIONS; + + namespace + { + + // https://github.com/aws/aws-sdk-cpp/issues/1430 + class PreallocatedIOStream : public Aws::IOStream + { + public: + PreallocatedIOStream(char *buf, size_t size) + : Aws::IOStream(new Aws::Utils::Stream::PreallocatedStreamBuf( + reinterpret_cast(buf), size)) {} + + PreallocatedIOStream(const char *buf, size_t size) + : PreallocatedIOStream(const_cast(buf), size) {} + + ~PreallocatedIOStream() + { + // corresponding new in constructor + delete rdbuf(); + } + }; + + Aws::String GetObjectRequestRange(uint64_t offset, uint64_t len) + { + auto range = + "bytes=" + std::to_string(offset) + "-" + std::to_string(offset + len); + return {range.data(), range.size()}; + } + + } // namespace + + void InitS3AdaptorOption(Configuration *conf, S3AdapterOption *s3Opt) + { + InitS3AdaptorOptionExceptS3InfoOption(conf, s3Opt); + LOG_IF(FATAL, !conf->GetStringValue("s3.endpoint", &s3Opt->s3Address)); + LOG_IF(FATAL, !conf->GetStringValue("s3.ak", &s3Opt->ak)); + LOG_IF(FATAL, !conf->GetStringValue("s3.sk", &s3Opt->sk)); + LOG_IF(FATAL, !conf->GetStringValue("s3.bucket_name", &s3Opt->bucketName)); + } + + void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, + S3AdapterOption *s3Opt) + { + LOG_IF(FATAL, !conf->GetIntValue("s3.logLevel", &s3Opt->loglevel)); + LOG_IF(FATAL, !conf->GetStringValue("s3.logPrefix", &s3Opt->logPrefix)); + LOG_IF(FATAL, !conf->GetIntValue("s3.http_scheme", &s3Opt->scheme)); + LOG_IF(FATAL, !conf->GetBoolValue("s3.verify_SSL", &s3Opt->verifySsl)); + LOG_IF(FATAL, !conf->GetStringValue("s3.user_agent", &s3Opt->userAgent)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.maxConnections", &s3Opt->maxConnections)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.connectTimeout", &s3Opt->connectTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.requestTimeout", &s3Opt->requestTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.asyncThreadNum", &s3Opt->asyncThreadNum)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsTotalLimit", + &s3Opt->iopsTotalLimit)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsReadLimit", + &s3Opt->iopsReadLimit)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsWriteLimit", + &s3Opt->iopsWriteLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsTotalMB", &s3Opt->bpsTotalMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsReadMB", &s3Opt->bpsReadMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsWriteMB", &s3Opt->bpsWriteMB)); + LOG_IF(FATAL, !conf->GetBoolValue("s3.useVirtualAddressing", + &s3Opt->useVirtualAddressing)); + LOG_IF(FATAL, !conf->GetStringValue("s3.region", &s3Opt->region)); + + if (!conf->GetUInt64Value("s3.maxAsyncRequestInflightBytes", + &s3Opt->maxAsyncRequestInflightBytes)) + { + LOG(WARNING) << "Not found s3.maxAsyncRequestInflightBytes in conf"; + s3Opt->maxAsyncRequestInflightBytes = 0; + } + } + + void S3Adapter::Init(const std::string &path) + { + LOG(INFO) << "Loading s3 configurations"; + conf_.SetConfigPath(path); + LOG_IF(FATAL, !conf_.LoadConfig()) + << "Failed to open s3 config file: " << conf_.GetConfigPath(); + S3AdapterOption option; + InitS3AdaptorOption(&conf_, &option); + Init(option); + } + + void S3Adapter::InitExceptFsS3Option(const std::string &path) + { + LOG(INFO) << "Loading s3 configurations"; + conf_.SetConfigPath(path); + LOG_IF(FATAL, !conf_.LoadConfig()) + << "Failed to open s3 config file: " << conf_.GetConfigPath(); + S3AdapterOption option; + InitS3AdaptorOptionExceptS3InfoOption(&conf_, &option); + Init(option); + } + + void S3Adapter::Init(const S3AdapterOption &option) + { + auto initSDK = [&]() + { + AWS_SDK_OPTIONS.loggingOptions.logLevel = + Aws::Utils::Logging::LogLevel(option.loglevel); + AWS_SDK_OPTIONS.loggingOptions.defaultLogPrefix = + option.logPrefix.c_str(); + Aws::InitAPI(AWS_SDK_OPTIONS); + }; + std::call_once(S3INIT_FLAG, initSDK); + s3Address_ = option.s3Address.c_str(); + s3Ak_ = option.ak.c_str(); + s3Sk_ = option.sk.c_str(); + bucketName_ = option.bucketName.c_str(); + clientCfg_ = Aws::New(AWS_ALLOCATE_TAG); + clientCfg_->scheme = Aws::Http::Scheme(option.scheme); + clientCfg_->verifySSL = option.verifySsl; + clientCfg_->userAgent = option.userAgent.c_str(); + clientCfg_->region = option.region.c_str(); + clientCfg_->maxConnections = option.maxConnections; + clientCfg_->connectTimeoutMs = option.connectTimeout; + clientCfg_->requestTimeoutMs = option.requestTimeout; + clientCfg_->endpointOverride = s3Address_; + auto asyncThreadNum = option.asyncThreadNum; + LOG(INFO) << "S3Adapter init thread num = " << asyncThreadNum << std::endl; + clientCfg_->executor = + Aws::MakeShared( + "S3Adapter.S3Client", asyncThreadNum); + s3Client_ = Aws::New( + AWS_ALLOCATE_TAG, Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), *clientCfg_, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, + option.useVirtualAddressing); + + ReadWriteThrottleParams params; + params.iopsTotal.limit = option.iopsTotalLimit; + params.iopsRead.limit = option.iopsReadLimit; + params.iopsWrite.limit = option.iopsWriteLimit; + params.bpsTotal.limit = option.bpsTotalMB * kMB; + params.bpsRead.limit = option.bpsReadMB * kMB; + params.bpsWrite.limit = option.bpsWriteMB * kMB; + + throttle_ = new Throttle(); + throttle_->UpdateThrottleParams(params); + + inflightBytesThrottle_.reset(new AsyncRequestInflightBytesThrottle( + option.maxAsyncRequestInflightBytes == 0 + ? UINT64_MAX + : option.maxAsyncRequestInflightBytes)); + } + + void S3Adapter::Deinit() + { + // delete s3client in s3adapter + if (clientCfg_ != nullptr) + { + Aws::Delete(clientCfg_); + clientCfg_ = nullptr; + } + if (s3Client_ != nullptr) + { + Aws::Delete(s3Client_); + s3Client_ = nullptr; + } + if (throttle_ != nullptr) + { + delete throttle_; + throttle_ = nullptr; + } + if (inflightBytesThrottle_ != nullptr) + inflightBytesThrottle_.release(); + } + + void S3Adapter::Shutdown() + { + // one program should only call once + auto shutdownSDK = [&]() + { Aws::ShutdownAPI(AWS_SDK_OPTIONS); }; + std::call_once(S3SHUTDOWN_FLAG, shutdownSDK); + } + + void S3Adapter::Reinit(const S3AdapterOption &option) + { + Deinit(); + Init(option); + } + + std::string S3Adapter::GetS3Ak() + { + return std::string(s3Ak_.c_str(), s3Ak_.size()); + } + + std::string S3Adapter::GetS3Sk() + { + return std::string(s3Sk_.c_str(), s3Sk_.size()); + } + + std::string S3Adapter::GetS3Endpoint() + { + return std::string(s3Address_.c_str(), s3Address_.size()); + } + + int S3Adapter::CreateBucket() + { + Aws::S3::Model::CreateBucketRequest request; + request.SetBucket(bucketName_); + Aws::S3::Model::CreateBucketConfiguration conf; + conf.SetLocationConstraint( + Aws::S3::Model::BucketLocationConstraint::us_east_1); + request.SetCreateBucketConfiguration(conf); + auto response = s3Client_->CreateBucket(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "CreateBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::DeleteBucket() + { + Aws::S3::Model::DeleteBucketRequest request; + request.SetBucket(bucketName_); + auto response = s3Client_->DeleteBucket(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "DeleteBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + bool S3Adapter::BucketExist() + { + Aws::S3::Model::HeadBucketRequest request; + request.SetBucket(bucketName_); + auto response = s3Client_->HeadBucket(request); + if (response.IsSuccess()) + { + return true; + } + else + { + LOG(ERROR) << "HeadBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return false; + } + } + + int S3Adapter::PutObject(const Aws::String &key, const char *buffer, + const size_t bufferSize) + { + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + + request.SetBody(Aws::MakeShared(AWS_ALLOCATE_TAG, + buffer, bufferSize)); + + if (throttle_) + { + throttle_->Add(false, bufferSize); + } + + auto response = s3Client_->PutObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "PutObject error, bucket: " << bucketName_ + << ", key: " << key << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::PutObject(const Aws::String &key, const std::string &data) + { + return PutObject(key, data.data(), data.size()); + } + /* + int S3Adapter::GetObject(const Aws::String &key, + void *buffer, + const int bufferSize) { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + request.SetResponseStreamFactory( + [buffer, bufferSize](){ + std::unique_ptr + stream(Aws::New("stream")); + stream->rdbuf()->pubsetbuf(buffer, + bufferSize); + return stream.release(); + }); + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) { + *buffer << response.GetResult().GetBody().rdbuf(); + } else { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + */ + + void S3Adapter::PutObjectAsync(std::shared_ptr context) + { + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); + + request.SetBody(Aws::MakeShared( + AWS_ALLOCATE_TAG, context->buffer, context->bufferSize)); + + auto originCallback = context->cb; + auto wrapperCallback = + [this, + originCallback](const std::shared_ptr &ctx) + { + inflightBytesThrottle_->OnComplete(ctx->bufferSize); + ctx->cb = originCallback; + ctx->cb(ctx); + }; + + Aws::S3::PutObjectResponseReceivedHandler handler = + [context](const Aws::S3::S3Client * /*client*/, + const Aws::S3::Model::PutObjectRequest & /*request*/, + const Aws::S3::Model::PutObjectOutcome &response, + const std::shared_ptr & + awsCtx) + { + std::shared_ptr ctx = + std::const_pointer_cast( + std::dynamic_pointer_cast( + awsCtx)); + + LOG_IF(ERROR, !response.IsSuccess()) + << "PutObjectAsync error: " + << response.GetError().GetExceptionName() + << "message: " << response.GetError().GetMessage() + << "resend: " << ctx->key; + + ctx->retCode = (response.IsSuccess() ? 0 : -1); + ctx->timer.stop(); + ctx->cb(ctx); + }; + + if (throttle_) + { + throttle_->Add(false, context->bufferSize); + } + + inflightBytesThrottle_->OnStart(context->bufferSize); + context->cb = std::move(wrapperCallback); + s3Client_->PutObjectAsync(request, handler, context); + } + + int S3Adapter::GetObject(const Aws::String &key, std::string *data) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + std::stringstream ss; + if (throttle_) + { + throttle_->Add(true, 1); + } + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) + { + ss << response.GetResult().GetBody().rdbuf(); + *data = ss.str(); + return 0; + } + else + { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::GetObject(const std::string &key, char *buf, off_t offset, + size_t len) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{key.c_str(), key.size()}); + request.SetRange(GetObjectRequestRange(offset, len)); + + request.SetResponseStreamFactory([buf, len]() + { return Aws::New(AWS_ALLOCATE_TAG, buf, len); }); + + if (throttle_) + { + throttle_->Add(true, len); + } + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + void S3Adapter::GetObjectAsync(std::shared_ptr context) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); + request.SetRange(GetObjectRequestRange(context->offset, context->len)); + + request.SetResponseStreamFactory([context]() + { return Aws::New(AWS_ALLOCATE_TAG, context->buf, + context->len); }); + + auto originCallback = context->cb; + auto wrapperCallback = + [this, originCallback]( + const S3Adapter * /*adapter*/, + const std::shared_ptr &ctx) + { + inflightBytesThrottle_->OnComplete(ctx->len); + ctx->cb = originCallback; + ctx->cb(this, ctx); + }; + + Aws::S3::GetObjectResponseReceivedHandler handler = + [this](const Aws::S3::S3Client * /*client*/, + const Aws::S3::Model::GetObjectRequest & /*request*/, + const Aws::S3::Model::GetObjectOutcome &response, + const std::shared_ptr & + awsCtx) + { + std::shared_ptr ctx = + std::const_pointer_cast( + std::dynamic_pointer_cast( + awsCtx)); + + LOG_IF(ERROR, !response.IsSuccess()) + << "GetObjectAsync error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + ctx->actualLen = response.GetResult().GetContentLength(); + ctx->retCode = (response.IsSuccess() ? 0 : -1); + ctx->cb(this, ctx); + }; + + if (throttle_) + { + throttle_->Add(true, context->len); + } + + inflightBytesThrottle_->OnStart(context->len); + context->cb = std::move(wrapperCallback); + s3Client_->GetObjectAsync(request, handler, context); + } + + bool S3Adapter::ObjectExist(const Aws::String &key) + { + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->HeadObject(request); + if (response.IsSuccess()) + { + return true; + } + else + { + LOG(WARNING) << "HeadObject error:" << bucketName_ << "--" << key + << "--" << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return false; + } + } + + int S3Adapter::DeleteObject(const Aws::String &key) + { + Aws::S3::Model::DeleteObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->DeleteObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(WARNING) << "DeleteObject error:" << bucketName_ << "--" << key + << "--" << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::DeleteObjects(const std::list &keyList) + { + Aws::S3::Model::DeleteObjectsRequest deleteObjectsRequest; + Aws::S3::Model::Delete deleteObjects; + for (const auto &key : keyList) + { + Aws::S3::Model::ObjectIdentifier ObjIdent; + ObjIdent.SetKey(key); + deleteObjects.AddObjects(ObjIdent); + } + + deleteObjects.SetQuiet(false); + deleteObjectsRequest.WithBucket(bucketName_).WithDelete(deleteObjects); + auto response = s3Client_->DeleteObjects(deleteObjectsRequest); + if (response.IsSuccess()) + { + for (auto del : response.GetResult().GetDeleted()) + { + LOG(INFO) << "delete ok : " << del.GetKey(); + } + + for (auto err : response.GetResult().GetErrors()) + { + LOG(WARNING) << "delete err : " << err.GetKey() << " --> " + << err.GetMessage(); + } + + if (response.GetResult().GetErrors().size() != 0) + { + return -1; + } + + return 0; + } + else + { + LOG(ERROR) << response.GetError().GetMessage() << " failed, " + << deleteObjectsRequest.SerializePayload(); + return -1; + } + return 0; + } + /* + // There are still issues with updating the object metadata separately, and a + separate s3 interface is needed to support it int + S3Adapter::UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta) { Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto input_data = + Aws::MakeShared("PutObjectInputStream"); + request.SetBody(input_data); + request.SetMetadata(meta); + auto response = s3Client_->PutObject(request); + if (response.IsSuccess()) { + return 0; + } else { + LOG(ERROR) << "PutObject error:" + << bucketName_ << key + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::GetObjectMeta(const Aws::String &key, + Aws::Map *meta) { + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->HeadObject(request); + if (response.IsSuccess()) { + *meta = response.GetResult().GetMetadata(); + return 0; + } else { + LOG(ERROR) << "HeadObject error:" + << bucketName_ << key + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + */ + Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) + { + Aws::S3::Model::CreateMultipartUploadRequest request; + request.WithBucket(bucketName_).WithKey(key); + auto response = s3Client_->CreateMultipartUpload(request); + if (response.IsSuccess()) + { + return response.GetResult().GetUploadId(); + } + else + { + LOG(ERROR) << "CreateMultipartUploadRequest error: " + << response.GetError().GetMessage(); + return ""; + } + } + + Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( + const Aws::String &key, const Aws::String &uploadId, int partNum, + int partSize, const char *buf) + { + Aws::S3::Model::UploadPartRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + request.SetPartNumber(partNum); + request.SetContentLength(partSize); + + request.SetBody( + Aws::MakeShared(AWS_ALLOCATE_TAG, buf, partSize)); + + if (throttle_) + { + throttle_->Add(false, partSize); + } + auto result = s3Client_->UploadPart(request); + if (result.IsSuccess()) + { + return Aws::S3::Model::CompletedPart() + .WithETag(result.GetResult().GetETag()) + .WithPartNumber(partNum); + } + else + { + return Aws::S3::Model::CompletedPart() + .WithETag("errorTag") + .WithPartNumber(-1); + } + } + + int S3Adapter::CompleteMultiUpload( + const Aws::String &key, const Aws::String &uploadId, + const Aws::Vector &cp_v) + { + Aws::S3::Model::CompleteMultipartUploadRequest request; + request.WithBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + request.SetMultipartUpload( + Aws::S3::Model::CompletedMultipartUpload().WithParts(cp_v)); + auto response = s3Client_->CompleteMultipartUpload(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "CompleteMultiUpload error: " + << response.GetError().GetMessage(); + this->AbortMultiUpload(key, uploadId); + return -1; + } + } + + int S3Adapter::AbortMultiUpload(const Aws::String &key, + const Aws::String &uploadId) + { + Aws::S3::Model::AbortMultipartUploadRequest request; + request.WithBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + auto response = s3Client_->AbortMultipartUpload(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "AbortMultiUpload error: " + << response.GetError().GetMessage(); + return -1; + } + } + + void S3Adapter::AsyncRequestInflightBytesThrottle::OnStart(uint64_t len) + { + std::unique_lock lock(mtx_); + while (inflightBytes_ + len > maxInflightBytes_) + { + cond_.wait(lock); + } + + inflightBytes_ += len; + } + + void S3Adapter::AsyncRequestInflightBytesThrottle::OnComplete(uint64_t len) + { + std::unique_lock lock(mtx_); + inflightBytes_ -= len; + cond_.notify_all(); + } + void S3Adapter::SetS3Option(const S3InfoOption &fsS3Opt) + { + s3Address_ = fsS3Opt.s3Address.c_str(); + s3Ak_ = fsS3Opt.ak.c_str(); + s3Sk_ = fsS3Opt.sk.c_str(); + bucketName_ = fsS3Opt.bucketName.c_str(); + } + + } // namespace common +} // namespace curve diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index 2adbbfb3bc..4a3bb3c6b6 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -105,10 +105,10 @@ struct S3InfoOption { uint32_t objectPrefix; }; -void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, - S3AdapterOption *s3Opt); +void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, + S3AdapterOption* s3Opt); -void InitS3AdaptorOption(Configuration *conf, S3AdapterOption *s3Opt); +void InitS3AdaptorOption(Configuration* conf, S3AdapterOption* s3Opt); using GetObjectAsyncCallBack = std::function&)>; @@ -185,27 +185,27 @@ class S3Adapter { } virtual ~S3Adapter() { Deinit(); } /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const std::string &path); + virtual void Init(const std::string& path); /** - * 初始化S3Adapter - * 但不包括 S3InfoOption + * Initialize S3Adapter + * But not including S3InfoOption */ - virtual void InitExceptFsS3Option(const std::string &path); + virtual void InitExceptFsS3Option(const std::string& path); /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const S3AdapterOption &option); + virtual void Init(const S3AdapterOption& option); /** * @brief * * @details */ - virtual void SetS3Option(const S3InfoOption &fsS3Opt); + virtual void SetS3Option(const S3InfoOption& fsS3Opt); /** - * 释放S3Adapter资源 + * Release S3Adapter resources */ virtual void Deinit(); /** @@ -215,7 +215,7 @@ class S3Adapter { /** * reinit s3client with new AWSCredentials */ - virtual void Reinit(const S3AdapterOption &option); + virtual void Reinit(const S3AdapterOption& option); /** * get s3 ak */ @@ -229,39 +229,40 @@ class S3Adapter { */ virtual std::string GetS3Endpoint(); /** - * 创建存储快照数据的桶(桶名称由配置文件指定,需要全局唯一) - * @return: 0 创建成功/ -1 创建失败 + * Create a bucket for storing snapshot data (the bucket name is specified + * by the configuration file and needs to be globally unique) + * @return: 0 successfully created/-1 failed to create */ virtual int CreateBucket(); /** - * 删除桶 - * @return 0 删除成功/-1 删除失败 + * Delete Bucket + * @return 0 deleted successfully/-1 deleted failed */ virtual int DeleteBucket(); /** - * 判断快照数据的桶是否存在 - * @return true 桶存在/ false 桶不存在 + * Determine whether the bucket of snapshot data exists + * @return true bucket exists/false bucket does not exist */ virtual bool BucketExist(); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @param 数据内容大小 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @param data content size + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const char *buffer, + virtual int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize); // Get object to buffer[bufferSize] // int GetObject(const Aws::String &key, void *buffer, // const int bufferSize); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const std::string &data); + virtual int PutObject(const Aws::String& key, const std::string& data); virtual void PutObjectAsync(std::shared_ptr context); /** * Get object from s3, @@ -273,40 +274,40 @@ class S3Adapter { * @param pointer which contain the data * @return 0 success / -1 fail */ - virtual int GetObject(const Aws::String &key, std::string *data); + virtual int GetObject(const Aws::String& key, std::string* data); /** - * 从对象存储读取数据 - * @param 对象名 - * @param[out] 返回读取的数据 - * @param 读取的偏移 - * @param 读取的长度 + * Reading data from object storage + * @param object name + * @param[out] returns the read data + * @param read Offset read + * @param The read length read */ - virtual int GetObject(const std::string &key, char *buf, off_t offset, + virtual int GetObject(const std::string& key, char* buf, off_t offset, size_t len); // NOLINT /** - * @brief 异步从对象存储读取数据 + * @brief asynchronously reads data from object storage * - * @param context 异步上下文 + * @param context asynchronous context */ virtual void GetObjectAsync(std::shared_ptr context); /** - * 删除对象 - * @param 对象名 - * @return: 0 删除成功/ - + * Delete Object + * @param object name + * @return: 0 successfully deleted/- */ - virtual int DeleteObject(const Aws::String &key); + virtual int DeleteObject(const Aws::String& key); - virtual int DeleteObjects(const std::list &keyList); + virtual int DeleteObjects(const std::list& keyList); /** - * 判断对象是否存在 - * @param 对象名 - * @return: true 对象存在/ false 对象不存在 + * Determine whether the object exists + * @param object name + * @return: true object exists/false object does not exist */ - virtual bool ObjectExist(const Aws::String &key); + virtual bool ObjectExist(const Aws::String& key); /* // Update object meta content - // Todo 接口还有问题 need fix + // There are still issues with the Todo interface, need fix virtual int UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta); // Get object meta content @@ -314,51 +315,53 @@ class S3Adapter { Aws::Map *meta); */ /** - * 初始化对象的分片上传任务 - * @param 对象名 - * @return 任务名 + * Initialize the sharding upload task of the object + * @param object name + * @return Task Name */ - virtual Aws::String MultiUploadInit(const Aws::String &key); + virtual Aws::String MultiUploadInit(const Aws::String& key); /** - * 增加一个分片到分片上传任务中 - * @param 对象名 - * @param 任务名 - * @param 第几个分片(从1开始) - * @param 分片大小 - * @param 分片的数据内容 - * @return: 分片任务管理对象 + * Add a shard to the shard upload task + * @param object name + * @param Task Name + * @param Which shard (starting from 1) + * @param shard size + * @param sharded data content + * @return: Fragmented task management object */ - virtual Aws::S3::Model::CompletedPart - UploadOnePart(const Aws::String &key, const Aws::String &uploadId, - int partNum, int partSize, const char *buf); + virtual Aws::S3::Model::CompletedPart UploadOnePart( + const Aws::String& key, const Aws::String& uploadId, int partNum, + int partSize, const char* buf); /** - * 完成分片上传任务 - * @param 对象名 - * @param 分片上传任务id - * @管理分片上传任务的vector - * @return 0 任务完成/ -1 任务失败 + * Complete the shard upload task + * @param object name + * @param Partitioning Upload Task ID + * @param Manage vector for sharded upload tasks + * @return 0 task completed/-1 task failed */ - virtual int - CompleteMultiUpload(const Aws::String &key, const Aws::String &uploadId, - const Aws::Vector &cp_v); + virtual int CompleteMultiUpload( + const Aws::String& key, const Aws::String& uploadId, + const Aws::Vector& cp_v); /** - * 终止一个对象的分片上传任务 - * @param 对象名 - * @param 任务id - * @return 0 终止成功/ -1 终止失败 + * Terminate the sharding upload task of an object + * @param object name + * @param Task ID + * @return 0 Terminated successfully/-1 Terminated failed */ - virtual int AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId); - void SetBucketName(const Aws::String &name) { bucketName_ = name; } + virtual int AbortMultiUpload(const Aws::String& key, + const Aws::String& uploadId); + void SetBucketName(const Aws::String& name) { bucketName_ = name; } Aws::String GetBucketName() { return bucketName_; } - Aws::Client::ClientConfiguration *GetConfig() { return clientCfg_; } + Aws::Client::ClientConfiguration* GetConfig() { return clientCfg_; } private: class AsyncRequestInflightBytesThrottle { public: explicit AsyncRequestInflightBytesThrottle(uint64_t maxInflightBytes) - : maxInflightBytes_(maxInflightBytes), inflightBytes_(0), mtx_(), + : maxInflightBytes_(maxInflightBytes), + inflightBytes_(0), + mtx_(), cond_() {} void OnStart(uint64_t len); @@ -373,19 +376,20 @@ class S3Adapter { }; private: - // S3服务器地址 + // S3 server address Aws::String s3Address_; - // 用于用户认证的AK/SK,需要从对象存储的用户管理中申请 + // AK/SK for user authentication needs to be applied for from user + // management in object storage Aws::String s3Ak_; Aws::String s3Sk_; - // 对象的桶名 + // The bucket name of the object Aws::String bucketName_; - // aws sdk的配置 - Aws::Client::ClientConfiguration *clientCfg_; - Aws::S3::S3Client *s3Client_; + // Configuration of AWS SDK + Aws::Client::ClientConfiguration* clientCfg_; + Aws::S3::S3Client* s3Client_; Configuration conf_; - Throttle *throttle_; + Throttle* throttle_; std::unique_ptr inflightBytesThrottle_; }; @@ -397,7 +401,7 @@ class FakeS3Adapter : public S3Adapter { bool BucketExist() override { return true; } - int PutObject(const Aws::String &key, const char *buffer, + int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize) override { (void)key; (void)buffer; @@ -405,20 +409,20 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int PutObject(const Aws::String &key, const std::string &data) override { + int PutObject(const Aws::String& key, const std::string& data) override { (void)key; (void)data; return 0; } - void - PutObjectAsync(std::shared_ptr context) override { + void PutObjectAsync( + std::shared_ptr context) override { context->retCode = 0; context->timer.stop(); context->cb(context); } - int GetObject(const Aws::String &key, std::string *data) override { + int GetObject(const Aws::String& key, std::string* data) override { (void)key; (void)data; // just return 4M data @@ -426,7 +430,7 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int GetObject(const std::string &key, char *buf, off_t offset, + int GetObject(const std::string& key, char* buf, off_t offset, size_t len) override { (void)key; (void)offset; @@ -435,30 +439,29 @@ class FakeS3Adapter : public S3Adapter { return 0; } - void - GetObjectAsync(std::shared_ptr context) override { + void GetObjectAsync( + std::shared_ptr context) override { memset(context->buf, '1', context->len); context->retCode = 0; context->cb(this, context); } - int DeleteObject(const Aws::String &key) override { + int DeleteObject(const Aws::String& key) override { (void)key; return 0; } - int DeleteObjects(const std::list &keyList) override { + int DeleteObjects(const std::list& keyList) override { (void)keyList; return 0; } - bool ObjectExist(const Aws::String &key) override { + bool ObjectExist(const Aws::String& key) override { (void)key; return true; } }; - } // namespace common } // namespace curve #endif // SRC_COMMON_S3_ADAPTER_H_ diff --git a/src/common/snapshotclone/snapshotclone_define.cpp b/src/common/snapshotclone/snapshotclone_define.cpp index b3b08f8d74..9e2ba8a0a6 100644 --- a/src/common/snapshotclone/snapshotclone_define.cpp +++ b/src/common/snapshotclone/snapshotclone_define.cpp @@ -20,14 +20,14 @@ * Author: xuchaojie */ -#include - #include "src/common/snapshotclone/snapshotclone_define.h" +#include + namespace curve { namespace snapshotcloneserver { -// 字符串常量定义 +// String constant definition const char* kServiceName = "SnapshotCloneService"; const char* kCreateSnapshotAction = "CreateSnapshot"; const char* kDeleteSnapshotAction = "DeleteSnapshot"; @@ -92,10 +92,8 @@ std::map code2Msg = { {kErrCodeNotSupport, "Not support."}, }; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid) { +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid) { Json::Value mainObj; mainObj[kCodeStr] = std::to_string(errCode); mainObj[kMessageStr] = code2Msg[errCode]; diff --git a/src/common/snapshotclone/snapshotclone_define.h b/src/common/snapshotclone/snapshotclone_define.h index ffa5428a6e..558fa15f97 100644 --- a/src/common/snapshotclone/snapshotclone_define.h +++ b/src/common/snapshotclone/snapshotclone_define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ #define SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -74,74 +74,66 @@ extern const char* kCloneFileInfoStr; typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -enum class CloneRefStatus { - kNoRef = 0, - kHasRef = 1, - kNeedCheck = 2 -}; +enum class CloneRefStatus { kNoRef = 0, kHasRef = 1, kNeedCheck = 2 }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -153,8 +145,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..a8ca00e1d8 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef SRC_COMMON_STRINGSTATUS_H_ -#define SRC_COMMON_STRINGSTATUS_H_ +#ifndef SRC_COMMON_STRINGSTATUS_H_ +#define SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace curve { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,30 +49,31 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key-value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common } // namespace curve #endif // SRC_COMMON_STRINGSTATUS_H_ - diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..d3fc2d244c 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -25,9 +25,10 @@ #include #include #include + +#include #include #include -#include namespace curve { namespace common { @@ -57,7 +58,8 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +69,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; @@ -85,13 +87,9 @@ class ExpiredTime { public: ExpiredTime() : startUs_(TimeUtility::GetTimeofDayUs()) {} - double ExpiredSec() const { - return ExpiredUs() / 1000000; - } + double ExpiredSec() const { return ExpiredUs() / 1000000; } - double ExpiredMs() const { - return ExpiredUs() / 1000; - } + double ExpiredMs() const { return ExpiredUs() / 1000; } double ExpiredUs() const { return TimeUtility::GetTimeofDayUs() - startUs_; @@ -101,7 +99,7 @@ class ExpiredTime { uint64_t startUs_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve -#endif // SRC_COMMON_TIMEUTILITY_H_ +#endif // SRC_COMMON_TIMEUTILITY_H_ diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..996704c987 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,29 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time +// synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not + * available and if the MAC address can be obtained, the UUID will be + * generated using a combination of random numbers, current time, and the + * MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +61,14 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of MAC + * address leakage. To ensure uniqueness, it also employs a time + * synchronization mechanism. However, if the time synchronization mechanism + * is not available, there is a possibility of UUID duplication when + * generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +80,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) + * and a fallback to pseudo-random number generation. When using the + * pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..bbf8b21b49 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -20,36 +20,37 @@ * Author: lixiaocui */ -#ifndef SRC_COMMON_WAIT_INTERVAL_H_ -#define SRC_COMMON_WAIT_INTERVAL_H_ +#ifndef SRC_COMMON_WAIT_INTERVAL_H_ +#define SRC_COMMON_WAIT_INTERVAL_H_ #include "src/common/interruptible_sleeper.h" namespace curve { namespace common { -class WaitInterval { +class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution determines how long to wait before executing based on + * the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index f4cd6cfcdb..d649b68ce7 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -20,14 +20,15 @@ * Author: yangyaokai */ +#include "src/fs/ext4_filesystem_impl.h" + +#include #include -#include -#include #include -#include +#include +#include #include "src/common/string_util.h" -#include "src/fs/ext4_filesystem_impl.h" #include "src/fs/wrap_posix.h" #define MIN_KERNEL_VERSION KERNEL_VERSION(3, 15, 0) @@ -40,13 +41,11 @@ std::mutex Ext4FileSystemImpl::mutex_; Ext4FileSystemImpl::Ext4FileSystemImpl( std::shared_ptr posixWrapper) - : posixWrapper_(posixWrapper) - , enableRenameat2_(false) { + : posixWrapper_(posixWrapper), enableRenameat2_(false) { CHECK(posixWrapper_ != nullptr) << "PosixWrapper is null"; } -Ext4FileSystemImpl::~Ext4FileSystemImpl() { -} +Ext4FileSystemImpl::~Ext4FileSystemImpl() {} std::shared_ptr Ext4FileSystemImpl::getInstance() { std::lock_guard lock(mutex_); @@ -54,13 +53,14 @@ std::shared_ptr Ext4FileSystemImpl::getInstance() { std::shared_ptr wrapper = std::make_shared(); self_ = std::shared_ptr( - new(std::nothrow) Ext4FileSystemImpl(wrapper)); + new (std::nothrow) Ext4FileSystemImpl(wrapper)); CHECK(self_ != nullptr) << "Failed to new ext4 local fs."; } return self_; } -void Ext4FileSystemImpl::SetPosixWrapper(std::shared_ptr wrapper) { //NOLINT +void Ext4FileSystemImpl::SetPosixWrapper( + std::shared_ptr wrapper) { // NOLINT CHECK(wrapper != nullptr) << "PosixWrapper is null"; posixWrapper_ = wrapper; } @@ -71,16 +71,17 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { ret = posixWrapper_->uname(&kernel_info); if (ret != 0) { - LOG(ERROR) << "Get kernel info failed."; - return false; + LOG(ERROR) << "Get kernel info failed."; + return false; } LOG(INFO) << "Kernel version: " << kernel_info.release; LOG(INFO) << "System version: " << kernel_info.version; LOG(INFO) << "Machine: " << kernel_info.machine; - // 通过uname获取的版本字符串格式可能为a.b.c-xxx - // a为主版本号,b为此版本号,c为修正号 + // The version string format obtained through uname may be a.b.c-xxx + // A is the main version number, b is the version number, and c is the + // revision number vector elements; ::curve::common::SplitString(kernel_info.release, "-", &elements); if (elements.size() == 0) { @@ -90,7 +91,8 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { vector numbers; ::curve::common::SplitString(elements[0], ".", &numbers); - // 有些系统可能版本格式前面部分是a.b.c.d,但是a.b.c是不变的 + // Some systems may have a version format with the front part being a.b.c.d, + // but a.b.c remains unchanged if (numbers.size() < 3) { LOG(ERROR) << "parse kenel version failed."; return false; @@ -99,11 +101,10 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int major = std::stoi(numbers[0]); int minor = std::stoi(numbers[1]); int revision = std::stoi(numbers[2]); - LOG(INFO) << "major: " << major - << ", minor: " << minor + LOG(INFO) << "major: " << major << ", minor: " << minor << ", revision: " << revision; - // 内核版本必须大于3.15,用于支持renameat2 + // The kernel version must be greater than 3.15 to support renameat2 if (KERNEL_VERSION(major, minor, revision) < MIN_KERNEL_VERSION) { LOG(ERROR) << "Kernel older than 3.15 is not supported."; return false; @@ -114,14 +115,13 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int Ext4FileSystemImpl::Init(const LocalFileSystemOption& option) { enableRenameat2_ = option.enableRenameat2; if (enableRenameat2_) { - if (!CheckKernelVersion()) - return -1; + if (!CheckKernelVersion()) return -1; } return 0; } int Ext4FileSystemImpl::Statfs(const string& path, - struct FileSystemInfo *info) { + struct FileSystemInfo* info) { struct statfs diskInfo; int rc = posixWrapper_->statfs(path.c_str(), &diskInfo); if (rc < 0) { @@ -157,7 +157,8 @@ int Ext4FileSystemImpl::Close(int fd) { int Ext4FileSystemImpl::Delete(const string& path) { int rc = 0; - // 如果删除对象是目录的话,需要先删除目录下的子对象 + // If the deleted object is a directory, you need to first delete the sub + // objects under the directory if (DirExists(path)) { vector names; rc = List(path, &names); @@ -165,9 +166,9 @@ int Ext4FileSystemImpl::Delete(const string& path) { LOG(WARNING) << "List " << path << " failed."; return rc; } - for (auto &name : names) { + for (auto& name : names) { string subPath = path + "/" + name; - // 递归删除子对象 + // Recursively delete sub objects rc = Delete(subPath); if (rc < 0) { LOG(WARNING) << "Delete " << subPath << " failed."; @@ -189,20 +190,19 @@ int Ext4FileSystemImpl::Mkdir(const string& dirName) { ::curve::common::SplitString(dirName, "/", &names); // root dir must exists - if (0 == names.size()) - return 0; + if (0 == names.size()) return 0; string path; for (size_t i = 0; i < names.size(); ++i) { - if (0 == i && dirName[0] != '/') // 相对路径 + if (0 == i && dirName[0] != '/') // Relative path path = path + names[i]; else path = path + "/" + names[i]; - if (DirExists(path)) - continue; - // 目录需要755权限,不然会出现“Permission denied” + if (DirExists(path)) continue; + // Directory requires 755 permissions, otherwise 'Permission denied' + // will appear if (posixWrapper_->mkdir(path.c_str(), 0755) < 0) { - LOG(WARNING) << "mkdir " << path << " failed. "<< strerror(errno); + LOG(WARNING) << "mkdir " << path << " failed. " << strerror(errno); return -errno; } } @@ -226,8 +226,7 @@ bool Ext4FileSystemImpl::FileExists(const string& filePath) { return false; } -int Ext4FileSystemImpl::DoRename(const string& oldPath, - const string& newPath, +int Ext4FileSystemImpl::DoRename(const string& oldPath, const string& newPath, unsigned int flags) { int rc = 0; if (enableRenameat2_) { @@ -237,8 +236,7 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } if (rc < 0) { LOG(WARNING) << "rename failed: " << strerror(errno) - << ". old path: " << oldPath - << ", new path: " << newPath + << ". old path: " << oldPath << ", new path: " << newPath << ", flag: " << flags; return -errno; } @@ -246,21 +244,22 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } int Ext4FileSystemImpl::List(const string& dirName, - vector *names) { - DIR *dir = posixWrapper_->opendir(dirName.c_str()); + vector* names) { + DIR* dir = posixWrapper_->opendir(dirName.c_str()); if (nullptr == dir) { LOG(WARNING) << "opendir:" << dirName << " failed:" << strerror(errno); return -errno; } - struct dirent *dirIter; + struct dirent* dirIter; errno = 0; - while ((dirIter=posixWrapper_->readdir(dir)) != nullptr) { - if (strcmp(dirIter->d_name, ".") == 0 - || strcmp(dirIter->d_name, "..") == 0) + while ((dirIter = posixWrapper_->readdir(dir)) != nullptr) { + if (strcmp(dirIter->d_name, ".") == 0 || + strcmp(dirIter->d_name, "..") == 0) continue; names->push_back(dirIter->d_name); } - // 可能存在其他携程改变了errno,但是只能通过此方式判断readdir是否成功 + // There may be other Ctrip changes to errno, but this is the only way to + // determine whether readdir is successful if (errno != 0) { LOG(WARNING) << "readdir failed: " << strerror(errno); } @@ -268,19 +267,14 @@ int Ext4FileSystemImpl::List(const string& dirName, return -errno; } -int Ext4FileSystemImpl::Read(int fd, - char *buf, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Read(int fd, char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pread(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pread(fd, buf + relativeOffset, remainLength, offset); - // 如果offset大于文件长度,pread会返回0 + // If the offset is greater than the file length, pread will return 0 if (ret == 0) { LOG(WARNING) << "pread returns zero." << "offset: " << offset @@ -304,17 +298,13 @@ int Ext4FileSystemImpl::Read(int fd, return length - remainLength; } -int Ext4FileSystemImpl::Write(int fd, - const char *buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, const char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pwrite(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pwrite(fd, buf + relativeOffset, remainLength, offset); if (ret < 0) { if (errno == EINTR && retryTimes < MAX_RETYR_TIME) { @@ -333,9 +323,7 @@ int Ext4FileSystemImpl::Write(int fd, return length; } -int Ext4FileSystemImpl::Write(int fd, - butil::IOBuf buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, butil::IOBuf buf, uint64_t offset, int length) { if (length != static_cast(buf.size())) { LOG(ERROR) << "IOBuf::pcut_into_file_descriptor failed, fd: " << fd @@ -376,9 +364,7 @@ int Ext4FileSystemImpl::Sync(int fd) { return 0; } -int Ext4FileSystemImpl::Append(int fd, - const char *buf, - int length) { +int Ext4FileSystemImpl::Append(int fd, const char* buf, int length) { (void)fd; (void)buf; (void)length; @@ -386,10 +372,7 @@ int Ext4FileSystemImpl::Append(int fd, return 0; } -int Ext4FileSystemImpl::Fallocate(int fd, - int op, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Fallocate(int fd, int op, uint64_t offset, int length) { int rc = posixWrapper_->fallocate(fd, op, offset, length); if (rc < 0) { LOG(ERROR) << "fallocate failed: " << strerror(errno); @@ -398,7 +381,7 @@ int Ext4FileSystemImpl::Fallocate(int fd, return 0; } -int Ext4FileSystemImpl::Fstat(int fd, struct stat *info) { +int Ext4FileSystemImpl::Fstat(int fd, struct stat* info) { int rc = posixWrapper_->fstat(fd, info); if (rc < 0) { LOG(ERROR) << "fstat failed: " << strerror(errno); diff --git a/src/fs/local_filesystem.h b/src/fs/local_filesystem.h index 3072867807..075e273a29 100644 --- a/src/fs/local_filesystem.h +++ b/src/fs/local_filesystem.h @@ -23,22 +23,23 @@ #ifndef SRC_FS_LOCAL_FILESYSTEM_H_ #define SRC_FS_LOCAL_FILESYSTEM_H_ -#include #include -#include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include // NOLINT +#include +#include #include "src/fs/fs_common.h" -using std::vector; using std::map; using std::string; +using std::vector; namespace curve { namespace fs { @@ -50,123 +51,130 @@ struct LocalFileSystemOption { class LocalFileSystem { public: - LocalFileSystem() {} + LocalFileSystem() {} virtual ~LocalFileSystem() {} /** - * 初始化文件系统 - * 如果文件系统还未格式化,首先会格式化, - * 然后挂载文件系统, - * 已经格式化或者已经挂载的文件系统不会重复格式化或挂载 - * @param option:初始化参数 + * Initialize file system + * If the file system has not been formatted yet, it will be formatted + * first, Then mount the file system, Formatted or mounted file systems will + * not be repeatedly formatted or mounted + * @param option: initialization parameters */ virtual int Init(const LocalFileSystemOption& option) = 0; /** - * 获取文件或目录所在的文件系统状态信息 - * @param path: 要获取的文件系统下的文件或目录路径 - * @param info[out]: 文件系统状态信息 - * @return 成功返回0 + * Obtain the file system status information where the file or directory is + * located + * @param path: The file or directory path under the file system to obtain + * @param info[out]: File system status information + * @return Successfully returned 0 */ virtual int Statfs(const string& path, struct FileSystemInfo* info) = 0; /** - * 打开文件句柄 - * @param path:文件路径 - * @param flags:操作文件方式的flag - * 此flag使用POSIX文件系统的定义 - * @return 成功返回文件句柄id,失败返回负值 + * Open file handle + * @param path: File path + * @param flags: flags for manipulating file methods + * This flag uses the definition of the POSIX file system + * @return successfully returns the file handle id, while failure returns a + * negative value */ virtual int Open(const string& path, int flags) = 0; /** - * 关闭文件句柄 - * @param fd: 文件句柄id - * @return 成功返回0 + * Close file handle + * @param fd: file handle id + * @return Successfully returned 0 */ virtual int Close(int fd) = 0; /** - * 删除文件或目录 - * 如果删除对象为目录,会删除目录下的文件或子目录 - * @param path:文件或目录的路径 - * return 成功返回0 + * Delete files or directories + * If the deleted object is a directory, the files or subdirectories under + * the directory will be deleted + * @param path: The path to a file or directory + * @return Successful return returns 0 */ virtual int Delete(const string& path) = 0; /** - * 创建目录 - * @param dirPath: 目录路径 - * @return 成功返回0 + * Create directory + * @param dirPath: Directory path + * @return Successfully returned 0 */ virtual int Mkdir(const string& dirPath) = 0; /** - * 判断目录是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the directory exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool DirExists(const string& dirPath) = 0; /** - * 判断文件是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the file exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool FileExists(const string& filePath) = 0; /** - * 重命名文件/目录 - * 将文件或目录重命名或者移到其他路径,不会覆盖已存在的文件 - * @param oldPath:原文件或目录路径 - * @param newPath:新的文件或目录路径 - * 新的文件或目录在重命名之前不存在,否则返回错误 - * @param flags:重命名使用的模式,默认值为0 - * 可选择RENAME_EXCHANGE、RENAME_EXCHANGE、RENAME_WHITEOUT三种模式 + * Rename File/Directory + * Renaming or moving files or directories to a different path will not + * overwrite existing files + * @param oldPath: Path to the original file or directory + * @param newPath: New file or directory path + * The new file or directory does not exist before renaming, otherwise an + * error will be returned + * @param flags: The mode used for renaming, with a default value of 0 + * Optional RENAME_EXCHANGE, RENAME_EXCHANGE, RENAME_WHITEOUT three modes * https://manpages.debian.org/testing/manpages-dev/renameat2.2.en.html - * @return 成功返回0 + * @return Successfully returned 0 */ - virtual int Rename(const string& oldPath, - const string& newPath, + virtual int Rename(const string& oldPath, const string& newPath, unsigned int flags = 0) { return DoRename(oldPath, newPath, flags); } /** - * 列举指定路径下的所有文件和目录名 - * @param dirPath:目录路径 - * @param name[out]:目录下的所有目录和文件名 - * @return 成功返回0 + * List all files and directory names under the specified path + * @param dirPath: Directory path + * @param name[out]: All directories and file names under the directory + * @return Successfully returned 0 */ virtual int List(const string& dirPath, vector* names) = 0; /** - * 从文件指定区域读取数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:接收读取数据的buffer - * @param offset:读取区域的起始偏移 - * @param length:读取数据的长度 - * @return 返回成功读取到的数据长度,失败返回-1 + * Read data from the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for receiving and reading data + * @param offset: The starting offset of the read area + * @param length: The length of the read data + * @return returns the length of the data successfully read, while failure + * returns -1 */ virtual int Read(int fd, char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据的buffer - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: The buffer of the data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, const char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据 - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buf: Data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, butil::IOBuf buf, uint64_t offset, int length) = 0; @@ -181,59 +189,62 @@ class LocalFileSystem { virtual int Sync(int fd) = 0; /** - * 向文件末尾追加数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待追加数据的buffer - * @param length:追加数据的长度 - * @return 返回成功追加的数据长度,失败返回-1 + * Append data to the end of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for data to be added + * @param length: Append the length of the data + * @return returns the length of successfully added data, while failure + * returns -1 */ virtual int Append(int fd, const char* buf, int length) = 0; /** - * 文件预分配/挖洞(未实现) - * @param fd:文件句柄id,通过Open接口获取 - * @param op:指定操作类型,预分配还是挖洞 - * @param offset:操作区域的起始偏移 - * @param length:操作区域的长度 - * @return 成功返回0 + * File pre allocation/excavation (not implemented) + * @param fd: File handle id, obtained through the Open interface + * @param op: Specify the type of operation, pre allocation or excavation + * @param offset: The starting offset of the operating area + * @param length: The length of the operation area + * @return Successfully returned 0 */ virtual int Fallocate(int fd, int op, uint64_t offset, int length) = 0; /** - * 获取指定文件状态信息 - * @param fd:文件句柄id,通过Open接口获取 - * @param info[out]:文件系统的信息 - * stat结构同POSIX接口中使用的stat - * @return 成功返回0 + * Obtain specified file status information + * @param fd: File handle id, obtained through the Open interface + * @param info[out]: Information about the file system + * The stat structure is the same as the stat used in the POSIX interface + * @return Successfully returned 0 */ virtual int Fstat(int fd, struct stat* info) = 0; /** - * 将文件数据和元数据刷新到磁盘 - * @param fd:文件句柄id,通过Open接口获取 - * @return 成功返回0 + * Flush file data and metadata to disk + * @param fd: File handle id, obtained through the Open interface + * @return Successfully returned 0 */ virtual int Fsync(int fd) = 0; private: virtual int DoRename(const string& /* oldPath */, const string& /* newPath */, - unsigned int /* flags */) { return -1; } + unsigned int /* flags */) { + return -1; + } }; - class LocalFsFactory { public: /** - * 创建文件系统对象 - * 本地文件系统的工厂方法,根据传入的类型,创建相应的对象 - * 由该接口创建的文件系统会自动进行初始化 - * @param type:文件系统类型 - * @param deviceID: 设备的编号 - * @return 返回本地文件系统对象指针 + * Creating File System Objects + * The factory method of the local file system creates corresponding objects + * based on the type passed in The file system created by this interface + * will automatically initialize + * @param type: File system type + * @param deviceID: Device number + * @return returns the local file system object pointer */ - static std::shared_ptr CreateFs(FileSystemType type, - const std::string& deviceID); + static std::shared_ptr CreateFs( + FileSystemType type, const std::string& deviceID); }; } // namespace fs diff --git a/src/kvstorageclient/etcd_client.h b/src/kvstorageclient/etcd_client.h index 16aec44e6a..b9c2266d83 100644 --- a/src/kvstorageclient/etcd_client.h +++ b/src/kvstorageclient/etcd_client.h @@ -24,9 +24,10 @@ #define SRC_KVSTORAGECLIENT_ETCD_CLIENT_H_ #include + #include -#include #include +#include namespace curve { namespace kvstorage { @@ -43,7 +44,7 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int Put(const std::string &key, const std::string &value) = 0; + virtual int Put(const std::string& key, const std::string& value) = 0; /** * @brief PutRewithRevision store key-value @@ -54,8 +55,9 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int PutRewithRevision(const std::string &key, - const std::string &value, int64_t *revision) = 0; + virtual int PutRewithRevision(const std::string& key, + const std::string& value, + int64_t* revision) = 0; /** * @brief Get Get the value of the specified key @@ -65,7 +67,7 @@ class KVStorageClient { * * @return error code */ - virtual int Get(const std::string &key, std::string *out) = 0; + virtual int Get(const std::string& key, std::string* out) = 0; /** * @brief List Get all the values ​​between [startKey, endKey) @@ -76,15 +78,16 @@ class KVStorageClient { * * @return error code */ - virtual int List(const std::string &startKey, const std::string &endKey, - std::vector *values) = 0; + virtual int List(const std::string& startKey, const std::string& endKey, + std::vector* values) = 0; /** * @brief List all the key and values between [startKey, endKey) * * @param[in] startKey * @param[in] endKey - * @param[out] out store key/value pairs that key is between [startKey, endKey) + * @param[out] out store key/value pairs that key is between [startKey, + * endKey) * * @return error code */ @@ -98,7 +101,7 @@ class KVStorageClient { * * @return error code */ - virtual int Delete(const std::string &key) = 0; + virtual int Delete(const std::string& key) = 0; /** * @brief DeleteRewithRevision Delete the value of the specified key @@ -108,17 +111,18 @@ class KVStorageClient { * * @return error code */ - virtual int DeleteRewithRevision( - const std::string &key, int64_t *revision) = 0; + virtual int DeleteRewithRevision(const std::string& key, + int64_t* revision) = 0; /* - * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., currently 2 and 3 operations are supported //NOLINT - * - * @param[in] ops Operation set - * - * @return error code - */ - virtual int TxnN(const std::vector &ops) = 0; + * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., + * currently 2 and 3 operations are supported //NOLINT + * + * @param[in] ops Operation set + * + * @return error code + */ + virtual int TxnN(const std::vector& ops) = 0; /** * @brief CompareAndSwap Transaction, to achieve CAS @@ -129,17 +133,15 @@ class KVStorageClient { * * @return error code */ - virtual int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) = 0; + virtual int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) = 0; }; // encapsulate the c header file of etcd generated by go compilation class EtcdClientImp : public KVStorageClient { public: EtcdClientImp() {} - ~EtcdClientImp() { - CloseClient(); - } + ~EtcdClientImp() { CloseClient(); } /** * @brief Init init the etcdclient, a global var in go @@ -154,30 +156,30 @@ class EtcdClientImp : public KVStorageClient { void CloseClient(); - int Put(const std::string &key, const std::string &value) override; + int Put(const std::string& key, const std::string& value) override; - int PutRewithRevision(const std::string &key, const std::string &value, - int64_t *revision) override; + int PutRewithRevision(const std::string& key, const std::string& value, + int64_t* revision) override; - int Get(const std::string &key, std::string *out) override; + int Get(const std::string& key, std::string* out) override; - int List(const std::string &startKey, - const std::string &endKey, std::vector *values) override; + int List(const std::string& startKey, const std::string& endKey, + std::vector* values) override; int List(const std::string& startKey, const std::string& endKey, - std::vector >* out) override; + std::vector>* out) override; - int Delete(const std::string &key) override; + int Delete(const std::string& key) override; - int DeleteRewithRevision( - const std::string &key, int64_t *revision) override; + int DeleteRewithRevision(const std::string& key, + int64_t* revision) override; - int TxnN(const std::vector &ops) override; + int TxnN(const std::vector& ops) override; - int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) override; + int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) override; - virtual int GetCurrentRevision(int64_t *revision); + virtual int GetCurrentRevision(int64_t* revision); /** * @brief ListWithLimitAndRevision @@ -191,9 +193,11 @@ class EtcdClientImp : public KVStorageClient { * @param[out] values the value vector of all the key-value pairs * @param[out] lastKey the last key of the vector */ - virtual int ListWithLimitAndRevision(const std::string &startKey, - const std::string &endKey, int64_t limit, int64_t revision, - std::vector *values, std::string *lastKey); + virtual int ListWithLimitAndRevision(const std::string& startKey, + const std::string& endKey, + int64_t limit, int64_t revision, + std::vector* values, + std::string* lastKey); /** * @brief CampaignLeader Leader campaign through etcd, return directly if @@ -209,14 +213,14 @@ class EtcdClientImp : public KVStorageClient { * leader when the session expired after * client offline. * @param[in] electionTimeoutMs the timeout,0 will block always - * @param[out] leaderOid leader的objectId,recorded in objectManager + * @param[out] leaderOid leader's objectId,recorded in objectManager * * @return EtcdErrCode::EtcdCampaignLeaderSuccess success,others fail */ - virtual int CampaignLeader( - const std::string &pfx, const std::string &leaderName, - uint32_t sessionInterSec, uint32_t electionTimeoutMs, - uint64_t *leaderOid); + virtual int CampaignLeader(const std::string& pfx, + const std::string& leaderName, + uint32_t sessionInterSec, + uint32_t electionTimeoutMs, uint64_t* leaderOid); /** * @brief LeaderObserve @@ -228,8 +232,8 @@ class EtcdClientImp : public KVStorageClient { * * @return if returned, the session between mds and etcd expired */ - virtual int LeaderObserve( - uint64_t leaderOid, const std::string &leaderName); + virtual int LeaderObserve(uint64_t leaderOid, + const std::string& leaderName); /** * @brief LeaderResign the leader resigns initiatively, the other peers @@ -241,7 +245,7 @@ class EtcdClientImp : public KVStorageClient { * @return EtcdErrCode::EtcdLeaderResiginSuccess resign seccess * EtcdErrCode::EtcdLeaderResiginErr resign fail */ - virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); + virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); // for test void SetTimeout(int time); diff --git a/src/leader_election/leader_election.cpp b/src/leader_election/leader_election.cpp index 76884e0b9c..de2a86c743 100644 --- a/src/leader_election/leader_election.cpp +++ b/src/leader_election/leader_election.cpp @@ -20,11 +20,14 @@ * Author: lixiaocui1 */ +#include "src/leader_election/leader_election.h" + #include -#include -#include //NOLINT + #include -#include "src/leader_election/leader_election.h" +#include +#include //NOLINT + #include "src/common/concurrent/concurrent.h" using ::curve::common::Thread; @@ -32,23 +35,21 @@ using ::curve::common::Thread; namespace curve { namespace election { int LeaderElection::CampaignLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start campaign leader prefix: " - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start campaign leader prefix: " << realPrefix_; int resCode = opt_.etcdCli->CampaignLeader( - realPrefix_, - opt_.leaderUniqueName, - opt_.sessionInterSec, - opt_.electionTimeoutMs, - &leaderOid_); + realPrefix_, opt_.leaderUniqueName, opt_.sessionInterSec, + opt_.electionTimeoutMs, &leaderOid_); if (resCode == EtcdErrCode::EtcdCampaignLeaderSuccess) { - LOG(INFO) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " success"; + LOG(INFO) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ << " success"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " err: " << resCode; + LOG(WARNING) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ + << " err: " << resCode; return -1; } @@ -61,28 +62,29 @@ int LeaderElection::LeaderResign() { int res = opt_.etcdCli->LeaderResign(leaderOid_, 1000 * opt_.sessionInterSec); if (EtcdErrCode::EtcdLeaderResiginSuccess == res) { - LOG(INFO) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " ok"; + LOG(INFO) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " ok"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " err: " << res; + LOG(WARNING) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " err: " << res; return -1; } int LeaderElection::ObserveLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start observe for prefix:" - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start observe for prefix:" << realPrefix_; int resCode = opt_.etcdCli->LeaderObserve(leaderOid_, opt_.leaderUniqueName); - LOG(ERROR) << opt_.leaderUniqueName << " leader observe for prefix:" - << realPrefix_ << " occur error, errcode: " << resCode; + LOG(ERROR) << opt_.leaderUniqueName + << " leader observe for prefix:" << realPrefix_ + << " occur error, errcode: " << resCode; // for test fiu_return_on("src/mds/leaderElection/observeLeader", -1); - // 退出当前进程 + // Exit the current process LOG(INFO) << "mds is existing due to the error of leader observation"; raise(SIGTERM); diff --git a/src/leader_election/leader_election.h b/src/leader_election/leader_election.h index 70a28722ec..2188950cf7 100644 --- a/src/leader_election/leader_election.h +++ b/src/leader_election/leader_election.h @@ -24,32 +24,33 @@ #define SRC_LEADER_ELECTION_LEADER_ELECTION_H_ #include + #include #include -#include "src/kvstorageclient/etcd_client.h" #include "src/common/namespace_define.h" +#include "src/kvstorageclient/etcd_client.h" namespace curve { namespace election { -using ::curve::kvstorage::EtcdClientImp; using ::curve::common::LEADERCAMPAIGNNPFX; +using ::curve::kvstorage::EtcdClientImp; struct LeaderElectionOptions { - // etcd客户端 + // etcd client std::shared_ptr etcdCli; - // 带ttl的session,ttl超时时间内 + // session with ttl, within ttl timeout uint32_t sessionInterSec; - // 竞选leader的超时时间 + // Overtime for running for leader uint32_t electionTimeoutMs; - // leader名称,建议使用ip+port以示区分 + // leader name, it is recommended to use ip+port for differentiation std::string leaderUniqueName; - // 需要竞选的key + // key that need to be contested std::string campaginPrefix; }; @@ -61,33 +62,35 @@ class LeaderElection { } /** - * @brief CampaignLeader 竞选leader + * @brief CampaignLeader * - * @return 0表示竞选成功 -1表示竞选失败 + * @return 0 indicates a successful election, -1 indicates a failed election */ int CampaignLeader(); /** - * @brief StartObserverLeader 启动leader节点监测线程 + * @brief StartObserverLeader starts the leader node monitoring thread */ void StartObserverLeader(); /** - * @brief LeaderResign leader主动卸任leader,卸任成功后其他节点可以竞选leader + * @brief LeaderResign Leader proactively resigns from its leadership + * position. After successful resignation, other nodes can compete to become + * the new leader */ int LeaderResign(); /** - * @brief 返回leader name + * @brief returns the leader name */ - const std::string& GetLeaderName() { - return opt_.leaderUniqueName; - } + const std::string& GetLeaderName() { return opt_.leaderUniqueName; } public: /** - * @brief ObserveLeader 监测在etcd中创建的leader节点,正常情况下一直block, - * 退出表示leader change或者从client端角度看etcd异常,进程退出 + * @brief Monitor the leader node created in etcd. Under normal + * circumstances, this function continuously blocks. Exiting indicates a + * leader change or, from the client's perspective, an abnormality in etcd, + * which leads to process termination */ int ObserveLeader(); @@ -95,14 +98,13 @@ class LeaderElection { // option LeaderElectionOptions opt_; - // realPrefix_ = leader竞选公共prefix + 自定义prefix + // realPrefix_ = leader campaign public prefix + custom prefix std::string realPrefix_; - // 竞选leader之后记录在objectManager中的id号 + // The ID number recorded in the object manager after leader election uint64_t leaderOid_; }; } // namespace election } // namespace curve #endif // SRC_LEADER_ELECTION_LEADER_ELECTION_H_ - diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index 54f743c300..de7b0ae432 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -24,28 +24,27 @@ namespace curve { namespace mds { -StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, +StatusCode CleanCore::CleanSnapShotFile(const FileInfo& fileInfo, TaskProgress* progress) { if (fileInfo.segmentsize() == 0) { LOG(ERROR) << "cleanSnapShot File Error, segmentsize = 0"; return StatusCode::KInternalError; } - uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); + uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint32_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; StoreStatus storeRet = storage_->GetSegment(fileInfo.parentid(), - i * segmentSize, - &segment); + i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "cleanSnapShot File Error: " - << "GetSegment Error, inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << fileInfo.seqnum(); + << "GetSegment Error, inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << fileInfo.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } @@ -54,40 +53,40 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, LogicalPoolID logicalPoolID = segment.logicalpoolid(); uint32_t chunkNum = segment.chunks_size(); for (uint32_t j = 0; j != chunkNum; j++) { - // 删除快照时如果chunk不存在快照,则需要修改chunk的correctedSn - // 防止删除快照后,后续的写触发chunk的快照 - // correctSn为创建快照后文件的版本号,也就是快照版本号+1 + // When deleting a snapshot, if the chunk does not have a snapshot, + // the correctedSn of the chunk needs to be modified Prevent + // subsequent writes from triggering Chunk snapshots after deleting + // snapshots CorrectSn is the version number of the file after + // creating the snapshot, which is the snapshot version number+1 SeqNum correctSn = fileInfo.seqnum() + 1; int ret = copysetClient_->DeleteChunkSnapshotOrCorrectSn( - logicalPoolID, - segment.chunks()[j].copysetid(), - segment.chunks()[j].chunkid(), - correctSn); + logicalPoolID, segment.chunks()[j].copysetid(), + segment.chunks()[j].chunkid(), correctSn); if (ret != 0) { LOG(ERROR) << "CleanSnapShotFile Error: " - << "DeleteChunkSnapshotOrCorrectSn Error" - << ", ret = " << ret - << ", inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", correctSn = " << correctSn; + << "DeleteChunkSnapshotOrCorrectSn Error" + << ", ret = " << ret + << ", inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", correctSn = " << correctSn; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } } - progress->SetProgress(100 * (i+1) / segmentNum); + progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteSnapshotFile(fileInfo.parentid(), - fileInfo.filename()); + StoreStatus ret = + storage_->DeleteSnapshotFile(fileInfo.parentid(), fileInfo.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete snapshotfile error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } else { LOG(INFO) << "inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", seq = " << fileInfo.seqnum() << ", deleted"; + << ", filename = " << fileInfo.filename() + << ", seq = " << fileInfo.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -95,27 +94,27 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, return StatusCode::kOK; } -StatusCode CleanCore::CleanFile(const FileInfo & commonFile, +StatusCode CleanCore::CleanFile(const FileInfo& commonFile, TaskProgress* progress) { if (commonFile.segmentsize() == 0) { LOG(ERROR) << "Clean commonFile File Error, segmentsize = 0"; return StatusCode::KInternalError; } - int segmentNum = commonFile.length() / commonFile.segmentsize(); + int segmentNum = commonFile.length() / commonFile.segmentsize(); uint64_t segmentSize = commonFile.segmentsize(); for (int i = 0; i != segmentNum; i++) { // load segment PageFileSegment segment; - StoreStatus storeRet = storage_->GetSegment(commonFile.id(), - i * segmentSize, &segment); + StoreStatus storeRet = + storage_->GetSegment(commonFile.id(), i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "GetSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize; + << "GetSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } @@ -123,8 +122,7 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, int ret = DeleteChunksInSegment(segment, commonFile.seqnum()); if (ret != 0) { LOG(ERROR) << "Clean common File Error: " - << ", ret = " << ret - << ", inodeid = " << commonFile.id() + << ", ret = " << ret << ", inodeid = " << commonFile.id() << ", filename = " << commonFile.filename() << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); @@ -133,33 +131,33 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, // delete segment int64_t revision; - storeRet = storage_->DeleteSegment( - commonFile.id(), i * segmentSize, &revision); + storeRet = storage_->DeleteSegment(commonFile.id(), i * segmentSize, + &revision); if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "DeleteSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << commonFile.seqnum(); + << "DeleteSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } allocStatistic_->DeAllocSpace(segment.logicalpoolid(), - segment.segmentsize(), revision); + segment.segmentsize(), revision); progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteFile(commonFile.parentid(), - commonFile.filename()); + StoreStatus ret = + storage_->DeleteFile(commonFile.parentid(), commonFile.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete common file error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } else { LOG(INFO) << "inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", seq = " << commonFile.seqnum() << ", deleted"; + << ", filename = " << commonFile.filename() + << ", seq = " << commonFile.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -223,10 +221,8 @@ int CleanCore::DeleteChunksInSegment(const PageFileSegment& segment, const LogicalPoolID logicalPoolId = segment.logicalpoolid(); for (int i = 0; i < segment.chunks_size(); ++i) { int ret = copysetClient_->DeleteChunk( - logicalPoolId, - segment.chunks()[i].copysetid(), - segment.chunks()[i].chunkid(), - seq); + logicalPoolId, segment.chunks()[i].copysetid(), + segment.chunks()[i].chunkid(), seq); if (ret != 0) { LOG(ERROR) << "DeleteChunk failed, ret = " << ret diff --git a/src/mds/nameserver2/clean_core.h b/src/mds/nameserver2/clean_core.h index 0cb4f3f8ab..8011d10ee8 100644 --- a/src/mds/nameserver2/clean_core.h +++ b/src/mds/nameserver2/clean_core.h @@ -25,12 +25,13 @@ #include #include -#include "src/mds/nameserver2/namespace_storage.h" + +#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/nameserver2/task_progress.h" -#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/topology/topology.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" using ::curve::mds::chunkserverclient::CopysetClient; using ::curve::mds::topology::Topology; @@ -41,30 +42,32 @@ namespace mds { class CleanCore { public: CleanCore(std::shared_ptr storage, - std::shared_ptr copysetClient, - std::shared_ptr allocStatistic) + std::shared_ptr copysetClient, + std::shared_ptr allocStatistic) : storage_(storage), copysetClient_(copysetClient), allocStatistic_(allocStatistic) {} /** - * @brief 删除快照文件,更新task状态 - * @param snapShotFile: 需要清理的snapshot文件 - * @param progress: CleanSnapShotFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 + * @brief Delete the snapshot file and update the task status + * @param snapShotFile: The snapshot file that needs to be cleaned + * @param progress: The CleanSnapShotFile interface is a relatively + * asynchronous task that takes a long time Here, progress is transmitted + * for tracking and feedback */ - StatusCode CleanSnapShotFile(const FileInfo & snapShotFile, + StatusCode CleanSnapShotFile(const FileInfo& snapShotFile, TaskProgress* progress); /** - * @brief 删除普通文件,更新task状态 - * @param commonFile: 需要清理的普通文件 - * @param progress: CleanFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 - * @return 是否执行成功,成功返回StatusCode::kOK + * @brief Delete regular files and update task status + * @param commonFile: A regular file that needs to be cleaned + * @param progress: The CleanFile interface is a relatively asynchronous + * task that takes a long time Here, progress is transmitted for tracking + * and feedback + * @return whether the execution was successful, and if successful, return + * StatusCode::kOK */ - StatusCode CleanFile(const FileInfo & commonFile, - TaskProgress* progress); + StatusCode CleanFile(const FileInfo& commonFile, TaskProgress* progress); /** * @brief clean discarded segment and chunks @@ -85,4 +88,4 @@ class CleanCore { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ diff --git a/src/mds/nameserver2/clean_manager.h b/src/mds/nameserver2/clean_manager.h index 86dbbd3474..223203952a 100644 --- a/src/mds/nameserver2/clean_manager.h +++ b/src/mds/nameserver2/clean_manager.h @@ -26,18 +26,19 @@ #include #include #include + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/clean_task_manager.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/namespace_storage.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_task_manager.h" +#include "src/mds/nameserver2/namespace_storage.h" using curve::common::DLock; using curve::common::DLockOpts; -namespace curve { +namespace curve { namespace mds { class CleanDiscardSegmentTask; @@ -45,8 +46,8 @@ class CleanDiscardSegmentTask; class CleanManagerInterface { public: virtual ~CleanManagerInterface() {} - virtual bool SubmitDeleteSnapShotFileJob(const FileInfo&, - std::shared_ptr entity) = 0; + virtual bool SubmitDeleteSnapShotFileJob( + const FileInfo&, std::shared_ptr entity) = 0; virtual std::shared_ptr GetTask(TaskIDType id) = 0; virtual bool SubmitDeleteCommonFileJob(const FileInfo&) = 0; @@ -56,24 +57,26 @@ class CleanManagerInterface { curve::common::CountDownEvent* counter) = 0; }; /** - * CleanManager 用于异步清理 删除快照对应的数据 - * 1. 接收在线的删除快照请求 - * 2. 线程池异步处理实际的chunk删除任务 + * CleanManager is used for asynchronous cleaning and deleting data + *corresponding to snapshots. + * 1. Receives online requests for snapshot deletion. + * 2. Asynchronously processes the actual chunk deletion tasks in a thread pool. **/ class CleanManager : public CleanManagerInterface { public: explicit CleanManager(std::shared_ptr core, - std::shared_ptr taskMgr, - std::shared_ptr storage); + std::shared_ptr taskMgr, + std::shared_ptr storage); bool Start(void); bool Stop(void); - bool SubmitDeleteSnapShotFileJob(const FileInfo &fileInfo, - std::shared_ptr entity) override; + bool SubmitDeleteSnapShotFileJob( + const FileInfo& fileInfo, + std::shared_ptr entity) override; - bool SubmitDeleteCommonFileJob(const FileInfo&fileInfo) override; + bool SubmitDeleteCommonFileJob(const FileInfo& fileInfo) override; bool SubmitCleanDiscardSegmentJob( const std::string& cleanSegmentKey, diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..c865ff6271 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -23,24 +23,26 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#include //NOLINT +#include //NOLINT + #include #include //NOLINT #include -#include //NOLINT -#include //NOLINT + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/task_progress.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" -#include "src/common/concurrent/dlock.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/task_progress.h" using curve::common::DLock; namespace curve { namespace mds { -typedef uint64_t TaskIDType; +typedef uint64_t TaskIDType; // default clean task retry times const uint32_t kDefaultTaskRetryTimes = 5; @@ -52,56 +54,40 @@ class Task { virtual void Run(void) = 0; std::function Closure() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } - TaskProgress GetTaskProgress(void) const { - return progress_; - } + TaskProgress GetTaskProgress(void) const { return progress_; } - void SetTaskProgress(TaskProgress progress) { - progress_ = progress; - } + void SetTaskProgress(TaskProgress progress) { progress_ = progress; } - TaskProgress* GetMutableTaskProgress(void) { - return &progress_; - } + TaskProgress* GetMutableTaskProgress(void) { return &progress_; } - void SetTaskID(TaskIDType taskID) { - taskID_ = taskID; - } + void SetTaskID(TaskIDType taskID) { taskID_ = taskID; } - TaskIDType GetTaskID(void) const { - return taskID_; - } + TaskIDType GetTaskID(void) const { return taskID_; } - void SetRetryTimes(uint32_t retry) { - retry_ = retry; - } + void SetRetryTimes(uint32_t retry) { retry_ = retry; } void Retry() { retry_--; progress_ = TaskProgress(); } - bool RetryTimesExceed() { - return retry_ == 0; - } + bool RetryTimesExceed() { return retry_ == 0; } protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; -class SnapShotCleanTask: public Task { +class SnapShotCleanTask : public Task { public: - SnapShotCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo, - std::shared_ptr entity = nullptr) { + SnapShotCleanTask( + TaskIDType taskID, std::shared_ptr core, FileInfo fileInfo, + std::shared_ptr entity = nullptr) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -110,29 +96,29 @@ class SnapShotCleanTask: public Task { SetRetryTimes(kDefaultTaskRetryTimes); } void Run(void) override { - StatusCode ret = cleanCore_->CleanSnapShotFile(fileInfo_, - GetMutableTaskProgress()); + StatusCode ret = + cleanCore_->CleanSnapShotFile(fileInfo_, GetMutableTaskProgress()); if (asyncEntity_ != nullptr) { brpc::ClosureGuard doneGuard(asyncEntity_->GetClosure()); brpc::Controller* cntl = static_cast(asyncEntity_->GetController()); - DeleteSnapShotResponse *response = - asyncEntity_->GetDeleteResponse(); - const DeleteSnapShotRequest *request = - asyncEntity_->GetDeleteRequest(); + DeleteSnapShotResponse* response = + asyncEntity_->GetDeleteResponse(); + const DeleteSnapShotRequest* request = + asyncEntity_->GetDeleteRequest(); response->set_statuscode(ret); if (ret != StatusCode::kOK) { LOG(ERROR) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile fail, filename = " - << request->filename() - << ", sequencenum = " << request->seq() - << ", statusCode = " << ret; + << ", CleanSnapShotFile fail, filename = " + << request->filename() + << ", sequencenum = " << request->seq() + << ", statusCode = " << ret; } else { LOG(INFO) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile ok, filename = " - << request->filename() - << ", sequencenum = " << request->seq(); + << ", CleanSnapShotFile ok, filename = " + << request->filename() + << ", sequencenum = " << request->seq(); } } return; @@ -144,10 +130,10 @@ class SnapShotCleanTask: public Task { std::shared_ptr asyncEntity_; }; -class CommonFileCleanTask: public Task { +class CommonFileCleanTask : public Task { public: CommonFileCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo) { + FileInfo fileInfo) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -211,4 +197,4 @@ class SegmentCleanTask : public Task { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ diff --git a/src/mds/nameserver2/clean_task_manager.cpp b/src/mds/nameserver2/clean_task_manager.cpp index 2a73ff87b9..3aadf6694c 100644 --- a/src/mds/nameserver2/clean_task_manager.cpp +++ b/src/mds/nameserver2/clean_task_manager.cpp @@ -19,16 +19,17 @@ * Created Date: Wednesday December 19th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/clean_task_manager.h" +#include +#include + namespace curve { namespace mds { CleanTaskManager::CleanTaskManager(std::shared_ptr channelPool, - int threadNum, int checkPeriod) - : channelPool_(channelPool) { + int threadNum, int checkPeriod) + : channelPool_(channelPool) { threadNum_ = threadNum; checkPeriod_ = checkPeriod; stopFlag_ = true; @@ -43,30 +44,29 @@ void CleanTaskManager::CheckCleanResult(void) { auto taskProgress = iter->second->GetTaskProgress(); if (taskProgress.GetStatus() == TaskStatus::SUCCESS) { LOG(INFO) << "going to remove task, taskID = " - << iter->second->GetTaskID(); + << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } else if (taskProgress.GetStatus() == TaskStatus::FAILED) { iter->second->Retry(); if (!iter->second->RetryTimesExceed()) { - LOG(WARNING) << "CleanTaskManager find Task Failed," - << " retry," - << " taskID = " - << iter->second->GetTaskID(); + LOG(WARNING) + << "CleanTaskManager find Task Failed," + << " retry," + << " taskID = " << iter->second->GetTaskID(); cleanWorkers_->Enqueue(iter->second->Closure()); } else { LOG(ERROR) << "CleanTaskManager find Task Failed," - << " retry times exceed," - << " going to remove task," - << " taskID = " - << iter->second->GetTaskID(); + << " retry times exceed," + << " going to remove task," + << " taskID = " << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } } ++iter; } - // clean task为空,清空channelPool + // Clean task is empty, clear channelPool if (cleanTasks_.empty() && notEmptyBefore) { LOG(INFO) << "All tasks completed, clear channel pool"; channelPool_->Clear(); @@ -81,7 +81,7 @@ bool CleanTaskManager::Start(void) { stopFlag_ = false; // start worker thread - cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); + cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); if (cleanWorkers_->Start(threadNum_) != 0) { LOG(ERROR) << "thread pool start error"; @@ -89,8 +89,8 @@ bool CleanTaskManager::Start(void) { } // start check thread - checkThread_ = new common::Thread(&CleanTaskManager::CheckCleanResult, - this); + checkThread_ = + new common::Thread(&CleanTaskManager::CheckCleanResult, this); LOG(INFO) << "TaskManger check thread started"; return true; } @@ -117,7 +117,7 @@ bool CleanTaskManager::PushTask(std::shared_ptr task) { common::LockGuard lck(mutex_); if (stopFlag_) { LOG(ERROR) << "task manager not started, taskID = " - << task->GetTaskID(); + << task->GetTaskID(); return false; } if (cleanTasks_.find(task->GetTaskID()) != cleanTasks_.end()) { @@ -137,7 +137,7 @@ std::shared_ptr CleanTaskManager::GetTask(TaskIDType id) { auto iter = cleanTasks_.begin(); if ((iter = cleanTasks_.find(id)) == cleanTasks_.end()) { - LOG(INFO) << "taskid = "<< id << ", not found"; + LOG(INFO) << "taskid = " << id << ", not found"; return nullptr; } else { return iter->second; diff --git a/src/mds/nameserver2/clean_task_manager.h b/src/mds/nameserver2/clean_task_manager.h index 9673a0b1c4..409b9df5b8 100644 --- a/src/mds/nameserver2/clean_task_manager.h +++ b/src/mds/nameserver2/clean_task_manager.h @@ -22,20 +22,21 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ -#include -#include //NOLINT -#include //NOLINT #include +#include //NOLINT +#include //NOLINT +#include + +#include "src/common/channel_pool.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/interruptible_sleeper.h" -#include "src/common/concurrent/concurrent.h" -#include "src/common/channel_pool.h" #include "src/mds/common/mds_define.h" #include "src/mds/nameserver2/clean_task.h" using ::curve::common::Atomic; -using ::curve::common::InterruptibleSleeper; using ::curve::common::ChannelPool; +using ::curve::common::InterruptibleSleeper; namespace curve { namespace mds { @@ -43,40 +44,40 @@ namespace mds { class CleanTaskManager { public: /** - * @brief 初始化TaskManager - * @param channelPool: 连接池 - * @param threadNum: worker线程的数量 - * @param checkPeriod: 周期性任务检查线程时间, ms + * @brief Initialize TaskManager + * @param channelPool: Connection Pool + * @param threadNum: Number of worker threads + * @param checkPeriod: Periodic task check thread time, ms */ explicit CleanTaskManager(std::shared_ptr channelPool, int threadNum = 10, int checkPeriod = 10000); - ~CleanTaskManager() { - Stop(); - } + ~CleanTaskManager() { Stop(); } /** - * @brief 启动worker线程池、启动检查线程 + * @brief: Start worker thread pool, start check thread * */ bool Start(void); /** - * @brief 停止worker线程池、启动检查线程 + * @brief: Stop worker thread pool, start check thread * */ bool Stop(void); /** - * @brief 向线程池推送task - * @param task: 对应的工作任务 - * @return 推送task是否成功,如已存在对应的任务,推送是吧 + * @brief Push task to thread pool + * @param task: corresponding work task + * @return: Is the task successfully pushed? If a corresponding task already + * exists, is it pushed */ bool PushTask(std::shared_ptr task); /** - * @brief 获取当前的task - * @param id: 对应任务的相关文件InodeID - * @return 返回对应task的shared_ptr 或者 不存在返回nullptr + * @brief Get the current task + * @param id: The relevant file InodeID of the corresponding task + * @return returns the shared_ptr of the corresponding task or return + * nullptr if it does not exist */ std::shared_ptr GetTask(TaskIDType id); @@ -85,20 +86,21 @@ class CleanTaskManager { private: int threadNum_; - ::curve::common::TaskThreadPool<> *cleanWorkers_; + ::curve::common::TaskThreadPool<>* cleanWorkers_; // for period check snapshot delete status std::unordered_map> cleanTasks_; common::Mutex mutex_; - common::Thread *checkThread_; + common::Thread* checkThread_; int checkPeriod_; Atomic stopFlag_; InterruptibleSleeper sleeper_; - // 连接池,和chunkserverClient共享,没有任务在执行时清空 + // Connection pool, shared with chunkserverClient, no tasks cleared during + // execution std::shared_ptr channelPool_; }; -} // namespace mds -} // namespace curve +} // namespace mds +} // namespace curve #endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index 2974ed06c8..021da6b359 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -22,28 +22,27 @@ #include "src/snapshotcloneserver/clone/clone_core.h" +#include #include #include #include -#include -#include "src/snapshotcloneserver/clone/clone_task.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/location_operator.h" #include "src/common/uuid.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/clone/clone_task.h" -using ::curve::common::UUIDGenerator; using ::curve::common::LocationOperator; using ::curve::common::NameLock; using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; namespace curve { namespace snapshotcloneserver { int CloneCoreImpl::Init() { int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::EXISTS) { + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "Mkdir fail, ret = " << ret << ", dirpath = " << cloneTempDir_; return kErrCodeServerInitFail; @@ -51,22 +50,20 @@ int CloneCoreImpl::Init() { return kErrCodeSuccess; } -int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *cloneInfo) { - // 查询数据库中是否有任务正在执行 +int CloneCoreImpl::CloneOrRecoverPre(const UUID& source, + const std::string& user, + const std::string& destination, + bool lazyFlag, CloneTaskType taskType, + std::string poolset, + CloneInfo* cloneInfo) { + // Check if there are tasks executing in the database std::vector cloneInfoList; metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); bool needJudgeFileExist = false; std::vector existCloneInfos; - for (auto &info : cloneInfoList) { + for (auto& info : cloneInfoList) { LOG(INFO) << "CloneOrRecoverPre find same clone task" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset << ", Exist CloneInfo : " << info; @@ -74,40 +71,42 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (taskType == CloneTaskType::kClone) { if (info.GetStatus() == CloneStatus::cloning || info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && + if ((info.GetUser() == user) && (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone + // Treat as the same clone *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || info.GetStatus() == CloneStatus::error || info.GetStatus() == CloneStatus::metaInstalled) { - // 可能已经删除,需要再判断文件存不存在, - // 在已删除的条件下,允许再克隆 + // It may have been deleted, and it is necessary to determine + // whether the file exists again, Allowing further cloning under + // deleted conditions existCloneInfos.push_back(info); needJudgeFileExist = true; } else { - // 此时,有个相同的克隆任务正在删除中, 返回文件被占用 + // At this point, the same clone task is being deleted and the + // return file is occupied return kErrCodeFileExist; } } else { // is recover if (info.GetStatus() == CloneStatus::recovering || info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && + if ((info.GetUser() == user) && (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone,返回任务已存在 + // Treat as the same clone, return task already exists *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || @@ -115,13 +114,15 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, info.GetStatus() == CloneStatus::metaInstalled) { // nothing } else { - // 此时,有个相同的任务正在删除中, 返回文件被占用 + // At this point, the same task is being deleted and the return + // file is occupied return kErrCodeFileExist; } } } - // 目标文件已存在不能clone, 不存在不能recover + // The target file already exists and cannot be cloned or recovered if it + // does not exist FInfo destFInfo; int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); switch (ret) { @@ -129,7 +130,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (CloneTaskType::kClone == taskType) { if (needJudgeFileExist) { bool match = false; - // 找出inodeid匹配的cloneInfo + // Find the cloneInfo that matches the inodeid for (auto& existInfo : existCloneInfos) { if (destFInfo.id == existInfo.GetDestId()) { *cloneInfo = existInfo; @@ -140,27 +141,29 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (match) { return kErrCodeTaskExist; } else { - // 如果没找到,那么dest file都不是这些clone任务创建的, - // 意味着文件重名了 - LOG(ERROR) << "Clone dest file exist, " - << "but task not match! " - << "source = " << source - << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset; + // If not found, then none of the dest files were + // created by these clone tasks, It means the file has a + // duplicate name + LOG(ERROR) + << "Clone dest file exist, " + << "but task not match! " + << "source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; return kErrCodeFileExist; } } else { - // 没有对应的cloneInfo,意味着文件重名了 + // There is no corresponding cloneInfo, which means the file + // has a duplicate name LOG(ERROR) << "Clone dest file must not exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; return kErrCodeFileExist; } } else if (CloneTaskType::kRecover == taskType) { - // recover任务,卷的poolset信息不变 + // The recover task keeps the poolset information of the volume + // unchanged poolset = destFInfo.poolset; } else { assert(false); @@ -169,21 +172,19 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, case -LIBCURVE_ERROR::NOTEXIST: if (CloneTaskType::kRecover == taskType) { LOG(ERROR) << "Recover dest file must exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination; return kErrCodeFileNotExist; } break; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; return kErrCodeInternalError; } - // 是否为快照 + // Is it a snapshot SnapshotInfo snapInfo; CloneFileType fileType; @@ -204,8 +205,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, } if (snapInfo.GetUser() != user) { LOG(ERROR) << "Clone snapshot by invalid user" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset << ", snapshot.user = " << snapInfo.GetUser(); @@ -225,15 +225,13 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, case -LIBCURVE_ERROR::NOTEXIST: case -LIBCURVE_ERROR::PARAM_ERROR: LOG(ERROR) << "Clone source file not exist" - << ", source = " << source - << ", user = " << user + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; return kErrCodeFileNotExist; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; return kErrCodeInternalError; } @@ -245,27 +243,26 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeFileStatusInvalid; } - // TODO(镜像克隆的用户认证待完善) + // TODO (User authentication for mirror cloning to be improved) } UUID uuid = UUIDGenerator().GenerateUUID(); - CloneInfo info(uuid, user, taskType, - source, destination, poolset, fileType, lazyFlag); + CloneInfo info(uuid, user, taskType, source, destination, poolset, fileType, + lazyFlag); if (CloneTaskType::kClone == taskType) { info.SetStatus(CloneStatus::cloning); } else { info.SetStatus(CloneStatus::recovering); } - // 这里必须先AddCloneInfo, 因为如果先SetCloneFileStatus,然后AddCloneInfo, - // 如果AddCloneInfo失败又意外重启,将没人知道SetCloneFileStatus调用过,造成 - // 镜像无法删除 + // Here, you must first AddCloneInfo because if you first set + // CloneFileStatus and then AddCloneInfo, If AddCloneInfo fails and + // unexpectedly restarts, no one will know that SetCloneFileStatus has been + // called, causing Mirror cannot be deleted ret = metaStore_->AddCloneInfo(info); if (ret < 0) { LOG(ERROR) << "AddCloneInfo error" - << ", ret = " << ret - << ", taskId = " << uuid - << ", user = " << user - << ", source = " << source + << ", ret = " << ret << ", taskId = " << uuid + << ", user = " << user << ", source = " << source << ", destination = " << destination << ", poolset = " << poolset; if (CloneFileType::kSnapshot == fileType) { @@ -275,20 +272,19 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, } if (CloneFileType::kFile == fileType) { NameLockGuard lockGuard(cloneRef_->GetLock(), source); - ret = client_->SetCloneFileStatus(source, - FileStatus::BeingCloned, - mdsRootUser_); + ret = client_->SetCloneFileStatus(source, FileStatus::BeingCloned, + mdsRootUser_); if (ret < 0) { - // 这里不处理SetCloneFileStatus的错误, - // 因为SetCloneFileStatus失败的所有结果都是可接受的, - // 相比于处理SetCloneFileStatus失败的情况更直接: - // 比如调用DeleteCloneInfo删除任务, - // 一旦DeleteCloneInfo失败,给用户返回error之后, - // 重启服务将造成Clone继续进行, - // 跟用户结果返回的结果不一致,造成用户的困惑 + // The SetCloneFileStatus error is not handled here, + // Because all results of SetCloneFileStatus failure are acceptable, + // Compared to handling SetCloneFileStatus failure, it is more + // direct: For example, calling DeleteCloneInfo to delete a task, + // Once DeleteCloneInfo fails and an error is returned to the user, + // Restarting the service will cause Clone to continue, + // Inconsistency with the results returned by the user, causing + // confusion for the user LOG(WARNING) << "SetCloneFileStatus encounter an error" - << ", ret = " << ret - << ", source = " << source + << ", ret = " << ret << ", source = " << source << ", user = " << user; } cloneRef_->IncrementRef(source); @@ -298,10 +294,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeSuccess; } -int CloneCoreImpl::FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { +int CloneCoreImpl::FlattenPre(const std::string& user, const TaskIdType& taskId, + CloneInfo* cloneInfo) { (void)user; int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { @@ -311,7 +305,8 @@ int CloneCoreImpl::FlattenPre( case CloneStatus::done: case CloneStatus::cloning: case CloneStatus::recovering: { - // 已经完成的或正在进行中返回task exist, 表示不需要处理 + // A task exists is returned for completed or in progress, + // indicating that it does not need to be processed return kErrCodeTaskExist; } case CloneStatus::metaInstalled: { @@ -362,7 +357,8 @@ void CloneCoreImpl::HandleCloneOrRecoverTask( } } - // 在kCreateCloneMeta以后的步骤还需更新CloneChunkInfo信息中的chunkIdInfo + // In the steps after kCreateCloneMeta, it is necessary to update the + // chunkIdInfo in the CloneChunkInfo information if (NeedUpdateCloneMeta(task)) { ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); if (ret < 0) { @@ -451,9 +447,8 @@ void CloneCoreImpl::HandleCloneOrRecoverTask( } int CloneCoreImpl::BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { + std::shared_ptr task, FInfo* newFileInfo, + CloneSegmentMap* segInfos) { segInfos->clear(); UUID source = task->GetCloneInfo().GetSrc(); @@ -477,8 +472,8 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( return kErrCodeInternalError; } newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : snapInfo.GetPoolset(); + ? task->GetCloneInfo().GetPoolset() + : snapInfo.GetPoolset(); if (IsRecover(task)) { FInfo fInfo; @@ -504,34 +499,33 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 从快照恢复的destinationId为目标文件的id + // The destinationId recovered from the snapshot is the ID of the target + // file task->GetCloneInfo().SetDestId(fInfo.id); - // 从快照恢复seqnum+1 + // Restore seqnum+1 from snapshot newFileInfo->seqnum = fInfo.seqnum + 1; } else { newFileInfo->seqnum = kInitializeSeqNum; } newFileInfo->owner = task->GetCloneInfo().GetUser(); - ChunkIndexDataName indexName(snapInfo.GetFileName(), - snapInfo.GetSeqNum()); + ChunkIndexDataName indexName(snapInfo.GetFileName(), snapInfo.GetSeqNum()); ChunkIndexData snapMeta; ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); if (ret < 0) { - LOG(ERROR) << "GetChunkIndexData error" - << ", fileName = " << snapInfo.GetFileName() - << ", seqNum = " << snapInfo.GetSeqNum() - << ", taskid = " << task->GetTaskId(); - return ret; + LOG(ERROR) << "GetChunkIndexData error" + << ", fileName = " << snapInfo.GetFileName() + << ", seqNum = " << snapInfo.GetSeqNum() + << ", taskid = " << task->GetTaskId(); + return ret; } uint64_t segmentSize = snapInfo.GetSegmentSize(); uint64_t chunkSize = snapInfo.GetChunkSize(); uint64_t chunkPerSegment = segmentSize / chunkSize; - std::vector chunkIndexs = - snapMeta.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexs) { + std::vector chunkIndexs = snapMeta.GetAllChunkIndex(); + for (auto& chunkIndex : chunkIndexs) { ChunkDataName chunkDataName; snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); uint64_t segmentIndex = chunkIndex / chunkPerSegment; @@ -556,10 +550,9 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( return kErrCodeSuccess; } -int CloneCoreImpl::BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::BuildFileInfoFromFile(std::shared_ptr task, + FInfo* newFileInfo, + CloneSegmentMap* segInfos) { segInfos->clear(); UUID source = task->GetCloneInfo().GetSrc(); std::string user = task->GetCloneInfo().GetUser(); @@ -568,13 +561,11 @@ int CloneCoreImpl::BuildFileInfoFromFile( int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // GetOrAllocateSegment依赖fullPathName + // GetOrAllocateSegment depends on fullPathName fInfo.fullPathName = source; newFileInfo->chunksize = fInfo.chunksize; @@ -591,8 +582,8 @@ int CloneCoreImpl::BuildFileInfoFromFile( return kErrCodeInternalError; } newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : fInfo.poolset; + ? task->GetCloneInfo().GetPoolset() + : fInfo.poolset; uint64_t fileLength = fInfo.length; uint64_t segmentSize = fInfo.segmentsize; @@ -603,33 +594,31 @@ int CloneCoreImpl::BuildFileInfoFromFile( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - if (fileLength%segmentSize != 0) { + if (fileLength % segmentSize != 0) { LOG(ERROR) << "GetFileInfo return invalid fileInfo, " << "fileLength is not align to SegmentSize" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - for (uint64_t i = 0; i< fileLength/segmentSize; i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfoOut; - ret = client_->GetOrAllocateSegmentInfo( - false, offset, &fInfo, mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { + ret = client_->GetOrAllocateSegmentInfo(false, offset, &fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", ret = " << ret - << ", filename = " << source - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "false" + << ", ret = " << ret << ", filename = " << source + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "false" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } if (segInfoOut.chunkvec.size() != 0) { CloneSegmentInfo segInfo; for (std::vector::size_type j = 0; - j < segInfoOut.chunkvec.size(); j++) { + j < segInfoOut.chunkvec.size(); j++) { CloneChunkInfo info; info.location = std::to_string(offset + j * chunkSize); info.seqNum = kInitializeSeqNum; @@ -642,10 +631,8 @@ int CloneCoreImpl::BuildFileInfoFromFile( return kErrCodeSuccess; } - -int CloneCoreImpl::CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::CreateCloneFile(std::shared_ptr task, + const FInfo& fInfo) { std::string fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = fInfo.owner; @@ -657,47 +644,43 @@ int CloneCoreImpl::CreateCloneFile( const auto& poolset = fInfo.poolset; std::string source = ""; - // 只有从文件克隆才带clone source + // Clone source is only available when cloning from a file if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) { source = task->GetCloneInfo().GetSrc(); } FInfo fInfoOut; - int ret = client_->CreateCloneFile(source, fileName, - mdsRootUser_, fileLength, seqNum, chunkSize, + int ret = client_->CreateCloneFile( + source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, stripeUnit, stripeCount, poolset, &fInfoOut); if (ret == LIBCURVE_ERROR::OK) { // nothing } else if (ret == -LIBCURVE_ERROR::EXISTS) { - ret = client_->GetFileInfo(fileName, - mdsRootUser_, &fInfoOut); + ret = client_->GetFileInfo(fileName, mdsRootUser_, &fInfoOut); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", fileName = " << fileName + << ", ret = " << ret << ", fileName = " << fileName << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } } else { LOG(ERROR) << "CreateCloneFile file" - << ", ret = " << ret - << ", destination = " << fileName - << ", user = " << user - << ", fileLength = " << fileLength - << ", seqNum = " << seqNum - << ", chunkSize = " << chunkSize + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user << ", fileLength = " << fileLength + << ", seqNum = " << seqNum << ", chunkSize = " << chunkSize << ", return fileId = " << fInfoOut.id << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } task->GetCloneInfo().SetOriginId(fInfoOut.id); if (IsClone(task)) { - // 克隆情况下destinationId = originId; + // In the case of cloning, destinationId = originId; task->GetCloneInfo().SetDestId(fInfoOut.id); } task->GetCloneInfo().SetTime(fInfoOut.ctime); - // 如果是lazy&非快照,先不要createCloneMeta,createCloneChunk - // 等后面stage2阶段recoveryChunk之前去createCloneMeta,createCloneChunk + // If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk + // yet Wait until stage 2 recoveryChunk, go to createCloneMeta, + // createCloneChunk if (IsLazy(task) && IsFile(task)) { task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); } else { @@ -707,17 +690,14 @@ int CloneCoreImpl::CreateCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateCloneMeta(std::shared_ptr task, + FInfo* fInfo, CloneSegmentMap* segInfos) { int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); if (ret < 0) { return ret; @@ -728,29 +708,28 @@ int CloneCoreImpl::CreateCloneMeta( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateCloneChunk(std::shared_ptr task, + const FInfo& fInfo, + CloneSegmentMap* segInfos) { int ret = kErrCodeSuccess; uint32_t chunkSize = fInfo.chunksize; uint32_t correctSn = 0; - // 克隆时correctSn为0,恢复时为新产生的文件版本 + // When cloning, correctSn is 0, and when restoring, it is the newly + // generated file version if (IsClone(task)) { correctSn = 0; } else { correctSn = fInfo.seqnum; } auto tracker = std::make_shared(); - for (auto & cloneSegmentInfo : *segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { + for (auto& cloneSegmentInfo : *segInfos) { + for (auto& cloneChunkInfo : cloneSegmentInfo.second) { std::string location; if (IsSnapshot(task)) { location = LocationOperator::GenerateS3Location( @@ -790,13 +769,13 @@ int CloneCoreImpl::CreateCloneChunk( } } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end do { tracker->WaitSome(1); std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); @@ -813,8 +792,7 @@ int CloneCoreImpl::CreateCloneChunk( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; @@ -824,33 +802,26 @@ int CloneCoreImpl::StartAsyncCreateCloneChunk( std::shared_ptr task, std::shared_ptr tracker, std::shared_ptr context) { - CreateCloneChunkClosure *cb = - new CreateCloneChunkClosure(tracker, context); + CreateCloneChunkClosure* cb = new CreateCloneChunkClosure(tracker, context); tracker->AddOneTrace(); LOG(INFO) << "Doing CreateCloneChunk" << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); - int ret = client_->CreateCloneChunk(context->location, - context->cidInfo, - context->sn, - context->csn, - context->chunkSize, - cb); + int ret = client_->CreateCloneChunk(context->location, context->cidInfo, + context->sn, context->csn, + context->chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "CreateCloneChunk fail" - << ", ret = " << ret - << ", location = " << context->location + << ", ret = " << ret << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); return ret; } @@ -860,7 +831,7 @@ int CloneCoreImpl::StartAsyncCreateCloneChunk( int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, std::shared_ptr tracker, - const std::list &results) { + const std::list& results) { int ret = kErrCodeSuccess; for (auto context : results) { if (context->retCode == -LIBCURVE_ERROR::EXISTS) { @@ -878,11 +849,9 @@ int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - ret = StartAsyncCreateCloneChunk( - task, tracker, context); + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + ret = StartAsyncCreateCloneChunk(task, tracker, context); if (ret < 0) { return kErrCodeInternalError; } @@ -897,45 +866,37 @@ int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( return ret; } -int CloneCoreImpl::CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::CompleteCloneMeta(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { (void)fInfo; (void)segInfos; - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = task->GetCloneInfo().GetUser(); int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "CompleteCloneMeta fail" - << ", ret = " << ret - << ", filename = " << origin - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", filename = " << origin + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep( - CloneStep::kChangeOwner); + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); } else { - task->GetCloneInfo().SetNextStep( - CloneStep::kRecoverChunk); + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); } ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::RecoverChunk(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { int ret = kErrCodeSuccess; uint32_t chunkSize = fInfo.chunksize; @@ -945,8 +906,7 @@ int CloneCoreImpl::RecoverChunk( double progressPerData = static_cast(totalProgress) / segNum; uint32_t index = 0; - if (0 == cloneChunkSplitSize_ || - chunkSize % cloneChunkSplitSize_ != 0) { + if (0 == cloneChunkSplitSize_ || chunkSize % cloneChunkSplitSize_ != 0) { LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" << ", taskid = " << task->GetTaskId(); return kErrCodeChunkSizeNotAligned; @@ -954,24 +914,25 @@ int CloneCoreImpl::RecoverChunk( auto tracker = std::make_shared(); uint64_t workingChunkNum = 0; - // 为避免发往同一个chunk碰撞,异步请求不同的chunk - for (auto & cloneSegmentInfo : segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { + // To avoid collisions with the same chunk, asynchronous requests for + // different chunks + for (auto& cloneSegmentInfo : segInfos) { + for (auto& cloneChunkInfo : cloneSegmentInfo.second) { if (!cloneChunkInfo.second.needRecover) { continue; } - // 当前并发工作的chunk数已大于要求的并发数时,先消化一部分 + // When the current number of chunks for concurrent work exceeds the + // required number of concurrent tasks, digest a portion first while (workingChunkNum >= recoverChunkConcurrency_) { uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); if (ret < 0) { return kErrCodeInternalError; } workingChunkNum -= completeChunkNum; } - // 加入新的工作的chunk + // Chunk joining a new job workingChunkNum++; auto context = std::make_shared(); context->cidInfo = cloneChunkInfo.second.chunkIdInfo; @@ -984,29 +945,27 @@ int CloneCoreImpl::RecoverChunk( clientAsyncMethodRetryTimeSec_; LOG(INFO) << "RecoverChunk start" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); ret = StartAsyncRecoverChunkPart(task, tracker, context); if (ret < 0) { return kErrCodeInternalError; } } - task->SetProgress(static_cast( - kProgressRecoverChunkBegin + index * progressPerData)); + task->SetProgress(static_cast(kProgressRecoverChunkBegin + + index * progressPerData)); task->UpdateMetric(); index++; } while (workingChunkNum > 0) { uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); if (ret < 0) { return kErrCodeInternalError; } @@ -1017,8 +976,7 @@ int CloneCoreImpl::RecoverChunk( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; @@ -1028,30 +986,25 @@ int CloneCoreImpl::StartAsyncRecoverChunkPart( std::shared_ptr task, std::shared_ptr tracker, std::shared_ptr context) { - RecoverChunkClosure *cb = new RecoverChunkClosure(tracker, context); + RecoverChunkClosure* cb = new RecoverChunkClosure(tracker, context); tracker->AddOneTrace(); uint64_t offset = context->partIndex * context->partSize; LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - int ret = client_->RecoverChunk(context->cidInfo, - offset, - context->partSize, - cb); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", offset = " << offset + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + int ret = + client_->RecoverChunk(context->cidInfo, offset, context->partSize, cb); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "RecoverChunk fail" << ", ret = " << ret - << ", logicalPoolId = " - << context->cidInfo.lpid_ + << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize + << ", offset = " << offset << ", len = " << context->partSize << ", taskid = " << task->GetTaskId(); return ret; } @@ -1061,20 +1014,18 @@ int CloneCoreImpl::StartAsyncRecoverChunkPart( int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, std::shared_ptr tracker, - uint64_t *completeChunkNum) { + uint64_t* completeChunkNum) { *completeChunkNum = 0; tracker->WaitSome(1); - std::list results = - tracker->PopResultContexts(); + std::list results = tracker->PopResultContexts(); for (auto context : results) { if (context->retCode != LIBCURVE_ERROR::OK) { uint64_t nowTime = TimeUtility::GetTimeofDaySec(); if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); int ret = StartAsyncRecoverChunkPart(task, tracker, context); if (ret < 0) { return ret; @@ -1086,7 +1037,7 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( return context->retCode; } } else { - // 启动一个新的分片,index++,并重置开始时间 + // Start a new shard, index++, and reset the start time context->partIndex++; context->startTime = TimeUtility::GetTimeofDaySec(); if (context->partIndex < context->totalPartNum) { @@ -1096,12 +1047,11 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( } } else { LOG(INFO) << "RecoverChunk Complete" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); (*completeChunkNum)++; } } @@ -1109,19 +1059,16 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( return kErrCodeSuccess; } -int CloneCoreImpl::ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::ChangeOwner(std::shared_ptr task, + const FInfo& fInfo) { (void)fInfo; std::string user = task->GetCloneInfo().GetUser(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); int ret = client_->ChangeOwner(origin, user); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "ChangeOwner fail, ret = " << ret - << ", fileName = " << origin - << ", newOwner = " << user + << ", fileName = " << origin << ", newOwner = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } @@ -1130,31 +1077,25 @@ int CloneCoreImpl::ChangeOwner( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } return kErrCodeSuccess; } -int CloneCoreImpl::RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { +int CloneCoreImpl::RenameCloneFile(std::shared_ptr task, + const FInfo& fInfo) { std::string user = fInfo.owner; uint64_t originId = task->GetCloneInfo().GetOriginId(); uint64_t destinationId = task->GetCloneInfo().GetDestId(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string destination = task->GetCloneInfo().GetDest(); - // 先rename - int ret = client_->RenameCloneFile(mdsRootUser_, - originId, - destinationId, - origin, - destination); + // Rename first + int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, + origin, destination); if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 有可能是已经rename过了 + // It is possible that it has already been renamed FInfo destFInfo; ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); if (ret != LIBCURVE_ERROR::OK) { @@ -1174,10 +1115,8 @@ int CloneCoreImpl::RenameCloneFile( } } else if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "RenameCloneFile fail" - << ", ret = " << ret - << ", user = " << user - << ", originId = " << originId - << ", origin = " << origin + << ", ret = " << ret << ", user = " << user + << ", originId = " << originId << ", origin = " << origin << ", destination = " << destination << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; @@ -1196,25 +1135,22 @@ int CloneCoreImpl::RenameCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; } -int CloneCoreImpl::CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { +int CloneCoreImpl::CompleteCloneFile(std::shared_ptr task, + const FInfo& fInfo, + const CloneSegmentMap& segInfos) { (void)fInfo; (void)segInfos; std::string fileName; if (IsLazy(task)) { fileName = task->GetCloneInfo().GetDest(); } else { - fileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); } std::string user = task->GetCloneInfo().GetUser(); int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); @@ -1224,15 +1160,13 @@ int CloneCoreImpl::CompleteCloneFile( case -LIBCURVE_ERROR::NOTEXIST: LOG(ERROR) << "CompleteCloneFile " << "find dest file not exist, maybe deleted" - << ", ret = " << ret - << ", destination = " << fileName + << ", ret = " << ret << ", destination = " << fileName << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; default: LOG(ERROR) << "CompleteCloneFile fail" - << ", ret = " << ret - << ", fileName = " << fileName + << ", ret = " << ret << ", fileName = " << fileName << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; @@ -1245,8 +1179,7 @@ int CloneCoreImpl::CompleteCloneFile( ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return ret; } return kErrCodeSuccess; @@ -1271,8 +1204,8 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { task->GetCloneInfo().SetStatus(CloneStatus::error); int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); @@ -1282,8 +1215,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; + << ", ret = " << ret << ", TaskInfo : " << *task; task->Finish(); return; } @@ -1293,8 +1225,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } task->SetProgress(kProgressCloneComplete); @@ -1305,7 +1236,7 @@ void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { } void CloneCoreImpl::HandleCloneError(std::shared_ptr task, - int retCode) { + int retCode) { int ret = kErrCodeSuccess; if (NeedRetry(task, retCode)) { HandleCloneToRetry(task); @@ -1322,8 +1253,8 @@ void CloneCoreImpl::HandleCloneError(std::shared_ptr task, cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret << ", taskid = " << task->GetTaskId(); @@ -1334,8 +1265,7 @@ void CloneCoreImpl::HandleCloneError(std::shared_ptr task, ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Task Fail" << ", TaskInfo : " << *task; @@ -1348,8 +1278,7 @@ void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) { int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(WARNING) << "Task Fail, Retrying" << ", TaskInfo : " << *task; @@ -1362,8 +1291,7 @@ void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) { int ret = metaStore_->DeleteCloneInfo(taskId); if (ret < 0) { LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; } else { LOG(INFO) << "Clean Task Success" << ", TaskInfo : " << *task; @@ -1380,8 +1308,7 @@ void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } LOG(ERROR) << "Clean Task Fail" << ", TaskInfo : " << *task; @@ -1389,17 +1316,17 @@ void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { return; } -int CloneCoreImpl::GetCloneInfoList(std::vector *taskList) { +int CloneCoreImpl::GetCloneInfoList(std::vector* taskList) { metaStore_->GetCloneInfoList(taskList); return kErrCodeSuccess; } -int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) { +int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) { return metaStore_->GetCloneInfo(taskId, cloneInfo); } -int CloneCoreImpl::GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) { +int CloneCoreImpl::GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) { return metaStore_->GetCloneInfoByFileName(fileName, list); } @@ -1423,26 +1350,24 @@ inline bool CloneCoreImpl::IsClone(std::shared_ptr task) { return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); } -bool CloneCoreImpl::NeedUpdateCloneMeta( - std::shared_ptr task) { +bool CloneCoreImpl::NeedUpdateCloneMeta(std::shared_ptr task) { bool ret = true; CloneStep step = task->GetCloneInfo().GetNextStep(); if (CloneStep::kCreateCloneFile == step || - CloneStep::kCreateCloneMeta == step || - CloneStep::kEnd == step) { + CloneStep::kCreateCloneMeta == step || CloneStep::kEnd == step) { ret = false; } return ret; } bool CloneCoreImpl::NeedRetry(std::shared_ptr task, - int retCode) { + int retCode) { if (IsLazy(task)) { CloneStep step = task->GetCloneInfo().GetNextStep(); if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step) { - // 文件不存在的场景下不需要再重试,因为可能已经被删除了 + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) { + // In scenarios where the file does not exist, there is no need to + // retry as it may have been deleted if (retCode != kErrCodeFileNotExist) { return true; } @@ -1451,10 +1376,9 @@ bool CloneCoreImpl::NeedRetry(std::shared_ptr task, return false; } -int CloneCoreImpl::CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { +int CloneCoreImpl::CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo* fInfo, + CloneSegmentMap* segInfos) { std::string newFileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = fInfo->owner; @@ -1463,7 +1387,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( if (LIBCURVE_ERROR::OK == ret) { // nothing } else if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 可能已经rename过了 + // Perhaps it has already been renamed newFileName = task->GetCloneInfo().GetDest(); ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); if (ret != LIBCURVE_ERROR::OK) { @@ -1474,7 +1398,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // 如果是已经rename过,那么id应该一致 + // If it has already been renamed, then the id should be consistent uint64_t originId = task->GetCloneInfo().GetOriginId(); if (fInfoOut.id != originId) { LOG(ERROR) << "File is missing, fileId not equal, " @@ -1487,34 +1411,32 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( } } else { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", filename = " << newFileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + << ", ret = " << ret << ", filename = " << newFileName + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 更新fInfo + // Update fInfo *fInfo = fInfoOut; - // GetOrAllocateSegment 依赖fullPathName,需要在此处更新 + // GetOrAllocateSegment depends on fullPathName and needs to be updated here fInfo->fullPathName = newFileName; uint32_t segmentSize = fInfo->segmentsize; - for (auto &segInfo : *segInfos) { + for (auto& segInfo : *segInfos) { SegmentInfo segInfoOut; uint64_t offset = segInfo.first * segmentSize; - ret = client_->GetOrAllocateSegmentInfo( - true, offset, fInfo, mdsRootUser_, &segInfoOut); + ret = client_->GetOrAllocateSegmentInfo(true, offset, fInfo, + mdsRootUser_, &segInfoOut); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetOrAllocateSegmentInfo fail" << ", newFileName = " << newFileName - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "true" + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "true" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - for (auto &cloneChunkInfo : segInfo.second) { + for (auto& cloneChunkInfo : segInfo.second) { if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) { LOG(ERROR) << "can not find chunkIndexInSeg = " << cloneChunkInfo.first @@ -1535,12 +1457,13 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( return kErrCodeSuccess; } -int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { +int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) { int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { - // 不存在时直接返回成功,使接口幂等 + // Directly returns success when it does not exist, making the interface + // idempotent return kErrCodeSuccess; } if (cloneInfo->GetUser() != user) { @@ -1567,8 +1490,7 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, ret = metaStore_->UpdateCloneInfo(*cloneInfo); if (ret < 0) { LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return ret; } return kErrCodeSuccess; @@ -1576,16 +1498,17 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, void CloneCoreImpl::HandleCleanCloneOrRecoverTask( std::shared_ptr task) { - // 只有错误的clone/recover任务才清理临时文件 + // Only the wrong clone/recover task cleans up temporary files if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { - // 错误情况下可能未清除镜像被克隆标志 + // In the event of an error, the mirror being cloned flag may not be + // cleared if (IsFile(task)) { - // 重新发送 + // Resend std::string source = task->GetCloneInfo().GetSrc(); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus( + source, FileStatus::Created, mdsRootUser_); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret @@ -1598,16 +1521,12 @@ void CloneCoreImpl::HandleCleanCloneOrRecoverTask( std::string tempFileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); uint64_t fileId = task->GetCloneInfo().GetOriginId(); - std::string user = - task->GetCloneInfo().GetUser(); + std::string user = task->GetCloneInfo().GetUser(); int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST) { + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(ERROR) << "DeleteFile failed" - << ", ret = " << ret - << ", fileName = " << tempFileName - << ", user = " << user - << ", fileId = " << fileId + << ", ret = " << ret << ", fileName = " << tempFileName + << ", user = " << user << ", fileId = " << fileId << ", taskid = " << task->GetTaskId(); HandleCleanError(task); return; @@ -1623,8 +1542,7 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( int ret = metaStore_->DeleteCloneInfo(taskId); if (ret < 0) { LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeInternalError; } @@ -1635,12 +1553,11 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret < 0) { LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; + << ", ret = " << ret << ", TaskInfo : " << *task; return kErrCodeInternalError; } } @@ -1649,8 +1566,8 @@ int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( return kErrCodeSuccess; } -int CloneCoreImpl::CheckFileExists(const std::string &filename, - uint64_t inodeId) { +int CloneCoreImpl::CheckFileExists(const std::string& filename, + uint64_t inodeId) { FInfo destFInfo; int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); if (ret == LIBCURVE_ERROR::OK) { @@ -1668,10 +1585,13 @@ int CloneCoreImpl::CheckFileExists(const std::string &filename, return kErrCodeInternalError; } -// 加减引用计数的时候,接口里面会对引用计数map加锁; -// 加引用计数、处理引用计数减到0的时候,需要额外对修改的那条记录加锁。 -int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { - // 先减引用计数,如果是从镜像克隆且引用计数减到0,需要修改源镜像的状态为created +// When adding or subtracting reference counts, the interface will lock the +// reference count map; When adding a reference count and reducing the reference +// count to 0, an additional lock needs to be added to the modified record. +int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo& cloneInfo) { + // First, reduce the reference count. If you are cloning from a mirror and + // the reference count is reduced to 0, you need to modify the status of the + // source mirror to 'created' std::string source = cloneInfo.GetSrc(); if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { snapshotRef_->DecrementSnapshotRef(source); @@ -1679,12 +1599,12 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { cloneRef_->DecrementRef(source); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); if (ret == -LIBCURVE_ERROR::NOTEXIST) { LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " << source; - } else if (ret != LIBCURVE_ERROR::OK) { + } else if (ret != LIBCURVE_ERROR::OK) { cloneRef_->IncrementRef(source); LOG(ERROR) << "SetCloneFileStatus fail" << ", ret = " << ret @@ -1694,7 +1614,8 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { } } - // 删除这条记录,如果删除失败,把前面已经减掉的引用计数加回去 + // Delete this record. If the deletion fails, add back the previously + // subtracted reference count int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); if (ret != 0) { if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { @@ -1706,8 +1627,7 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { cloneRef_->IncrementRef(source); } LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", CloneInfo = " << cloneInfo; + << ", ret = " << ret << ", CloneInfo = " << cloneInfo; return kErrCodeInternalError; } diff --git a/src/snapshotcloneserver/clone/clone_core.h b/src/snapshotcloneserver/clone/clone_core.h index 19c1c20c9d..f33e2f8d5c 100644 --- a/src/snapshotcloneserver/clone/clone_core.h +++ b/src/snapshotcloneserver/clone/clone_core.h @@ -23,20 +23,20 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ +#include +#include #include #include #include -#include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" -#include "src/snapshotcloneserver/common/snapshot_reference.h" #include "src/snapshotcloneserver/clone/clone_reference.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshot_reference.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -51,359 +51,334 @@ class CloneCore { virtual ~CloneCore() {} /** - * @brief 克隆或恢复任务前置 + * @brief Clone or restore task ahead * - * @param source 克隆或恢复源 - * @param user 用户名 - * @param destination 克隆或恢复的目标文件名 - * @param lazyFlag 是否lazy - * @param taskType 克隆或恢复 - * @param poolset 克隆时目标文件的poolset - * @param[out] info 克隆或恢复任务信息 + * @param source Clone or restore source + * @param user username + * @param destination The target file name for cloning or restoring + * @param lazyFlag is lazy + * @param taskType clone or restore + * @param poolset The poolset of the target file during cloning + * @param[out] info Clone or restore task information * - * @return 错误码 + * @return error code */ - virtual int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) = 0; + virtual int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) = 0; /** - * @brief 处理克隆或恢复任务 + * @brief Processing cloning or recovery tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 清理克隆或恢复任务前置 + * @brief Clean clone or restore tasks ahead * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆或恢复信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo Clone or restore information * - * @return 错误码 + * @return error code */ - virtual int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; - + virtual int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 异步处理清理克隆或恢复任务 + * @brief Asynchronous processing of clean clone or restore tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCleanCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 安装克隆文件数据的前置工作 - * - 进行一些必要的检查 - * - 获取并返回克隆信息 - * - 更新数据库状态 + * @brief Pre work for installing clone file data + * - Conduct necessary inspections + * - Obtain and return clone information + * - Update database status * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo clone information * - * @return 错误码 + * @return error code */ - virtual int FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; + virtual int FlattenPre(const std::string& user, const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 获取全部克隆/恢复任务列表,用于重启后恢复执行 + * @brief Get a list of all clone/restore tasks for resuming execution after + * reboot * - * @param[out] cloneInfos 克隆/恢复任务列表 + * @param[out] cloneInfos Clone/Restore Task List * - * @return 错误码 + * @return error code */ - virtual int GetCloneInfoList(std::vector *cloneInfos) = 0; + virtual int GetCloneInfoList(std::vector* cloneInfos) = 0; /** - * @brief 获取指定id的克隆/恢复任务 + * @brief Get the clone/restore task for the specified ID * - * @param taskId  任务id - * @param cloneInfo 克隆/恢复任务 + * @param taskId Task ID + * @param cloneInfo Clone/Restore Task * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) = 0; + virtual int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) = 0; /** - * @brief 获取指定文件名的克隆/恢复任务 + * @brief Get the clone/restore task for the specified file name * - * @param fileName  文件名 - * @param list 克隆/恢复任务列表 + * @param fileName File name + * @param list Clone/Restore Task List * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取快照引用管理模块 + * @brief Get snapshot reference management module * - * @return 快照引用管理模块 + * @return Snapshot Reference Management Module */ virtual std::shared_ptr GetSnapshotRef() = 0; - /** - * @brief 获取镜像引用管理模块 + * @brief Get Mirror Reference Management Module * - * @return 镜像引用管理模块 + * @return Image Reference Management Module */ virtual std::shared_ptr GetCloneRef() = 0; - /** - * @brief 移除克隆/恢复任务 + * @brief Remove clone/restore task * - * @param task 克隆任务 + * @param task Clone task * - * @return 错误码 + * @return error code */ virtual int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 检查文件是否存在 + * @brief Check if the file exists * - * @param filename 文件名 + * @param filename File name * - * @return 错误码 + * @return error code */ - virtual int CheckFileExists(const std::string &filename, + virtual int CheckFileExists(const std::string& filename, uint64_t inodeId) = 0; /** - * @brief 删除cloneInfo + * @brief Delete cloneInfo * - * @param cloneInfo 待删除的cloneInfo + * @param cloneInfo CloneInfo to be deleted * - * @return 错误码 + * @return error code */ - virtual int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) = 0; }; /** - * @brief 克隆/恢复所需chunk信息 + * @brief Chunk information required for cloning/restoring */ struct CloneChunkInfo { - // 该chunk的id信息 + // The ID information of the chunk ChunkIDInfo chunkIdInfo; - // 位置信息,如果在s3上,是objectName,否则在curvefs上,则是offset + // Location information, if on s3, it is objectName, otherwise on curves, it + // is offset std::string location; - // 该chunk的版本号 + // The version number of the chunk uint64_t seqNum; - // chunk是否需要recover + // Does Chunk require recover bool needRecover; }; -// 克隆/恢复所需segment信息,key是ChunkIndex In Segment, value是chunk信息 +// The segment information required for cloning/recovery, where key is +// ChunkIndex In Segment and value is chunk information using CloneSegmentInfo = std::map; -// 克隆/恢复所需segment信息表,key是segmentIndex +// The segment information table required for cloning/recovery, where the key is +// segmentIndex using CloneSegmentMap = std::map; class CloneCoreImpl : public CloneCore { public: - static const std::string kCloneTempDir; + static const std::string kCloneTempDir; public: - CloneCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - std::shared_ptr cloneRef, - const SnapshotCloneServerOptions option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - cloneRef_(cloneRef), - cloneChunkSplitSize_(option.cloneChunkSplitSize), - cloneTempDir_(option.cloneTempDir), - mdsRootUser_(option.mdsRootUser), - createCloneChunkConcurrency_(option.createCloneChunkConcurrency), - recoverChunkConcurrency_(option.recoverChunkConcurrency), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs) {} - - ~CloneCoreImpl() { - } + CloneCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + std::shared_ptr cloneRef, + const SnapshotCloneServerOptions option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + cloneRef_(cloneRef), + cloneChunkSplitSize_(option.cloneChunkSplitSize), + cloneTempDir_(option.cloneTempDir), + mdsRootUser_(option.mdsRootUser), + createCloneChunkConcurrency_(option.createCloneChunkConcurrency), + recoverChunkConcurrency_(option.recoverChunkConcurrency), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs) {} + + ~CloneCoreImpl() {} int Init(); - int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) override; + int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) override; void HandleCloneOrRecoverTask(std::shared_ptr task) override; - int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) override; + int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) override; void HandleCleanCloneOrRecoverTask( std::shared_ptr task) override; - int FlattenPre( - const std::string &user, - const std::string &fileName, - CloneInfo *cloneInfo) override; + int FlattenPre(const std::string& user, const std::string& fileName, + CloneInfo* cloneInfo) override; - int GetCloneInfoList(std::vector *taskList) override; - int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) override; + int GetCloneInfoList(std::vector* taskList) override; + int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - std::shared_ptr GetSnapshotRef() { - return snapshotRef_; - } + std::shared_ptr GetSnapshotRef() { return snapshotRef_; } - std::shared_ptr GetCloneRef() { - return cloneRef_; - } + std::shared_ptr GetCloneRef() { return cloneRef_; } int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) override; - int CheckFileExists(const std::string &filename, - uint64_t inodeId) override; - int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) override; + int CheckFileExists(const std::string& filename, uint64_t inodeId) override; + int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) override; private: /** - * @brief 从快照构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from snapshot * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); + int BuildFileInfoFromSnapshot(std::shared_ptr task, + FInfo* newFileInfo, + CloneSegmentMap* segInfos); /** - * @brief 从源文件构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from source files * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); - + int BuildFileInfoFromFile(std::shared_ptr task, + FInfo* newFileInfo, CloneSegmentMap* segInfos); /** - * @brief 判断是否需要更新CloneChunkInfo信息中的chunkIdInfo + * @brief to determine if it is necessary to update chunkIdInfo in + * CloneChunkInfo information * - * @param task 任务信息 + * @param task task information * - * @retVal true 需要更新 - * @retVal false 不需要更新 + * @retval true needs to be updated + * @retval false No update required */ - bool NeedUpdateCloneMeta( - std::shared_ptr task); + bool NeedUpdateCloneMeta(std::shared_ptr task); /** - * @brief 判断clone失败后是否需要重试 + * @brief: Determine whether to retry after clone failure * - * @param task 任务信息 - * @param retCode 错误码 + * @param task task information + * @param retCode error code * - * @retVal true 需要 - * @retVal false 不需要 + * @retval true requires + * @retval false No need */ - bool NeedRetry(std::shared_ptr task, - int retCode); + bool NeedRetry(std::shared_ptr task, int retCode); /** - * @brief 创建clone的元数据信息或更新元数据信息 + * @brief Create metadata information for clone or update metadata + * information * - * @param task 任务信息 - * @param[int][out] fInfo 新创建的文件信息 - * @param[int][out] segInfos 文件的segment信息 + * @param task task information + * @param[int][out] fInfo Newly created file information + * @param[int][out] segInfosThe segment information of the file * - * @return 错误码 + * @return error code */ - int CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo* fInfo, CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件 + * @brief Create a new clone file * - * @param task 任务信息 - * @param fInfo 需创建的文件信息 + * @param task task information + * @param fInfo File information to be created * - * @return 错误码 + * @return error code */ - int CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int CreateCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 创建新文件的源信息(创建segment) + * @brief Create source information for new files (create segments) * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateCloneMeta(std::shared_ptr task, FInfo* fInfo, + CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件的chunk + * @brief Create a chunk for a new clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos); + int CreateCloneChunk(std::shared_ptr task, + const FInfo& fInfo, CloneSegmentMap* segInfos); /** - * @brief 开始CreateCloneChunk的异步请求 + * @brief Start asynchronous request for CreateCloneChunk * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param context CreateCloneChunk上下文 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param context CreateCloneChunk context * - * @return 错误码 + * @return error code */ int StartAsyncCreateCloneChunk( std::shared_ptr task, @@ -411,55 +386,51 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 处理CreateCloneChunk的结果并重试 + * @brief Process the results of CreateCloneChunk and try again * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param results CreateCloneChunk结果列表 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param results CreateCloneChunk result list * - * @return 错误码 + * @return error code */ int HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, std::shared_ptr tracker, - const std::list &results); + const std::list& results); /** - * @brief 通知mds完成源数据创建步骤 + * @brief Notify mds to complete the step of creating source data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneMeta(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 恢复chunk,即通知chunkserver拷贝数据 + * @brief Restore Chunk, that is, notify Chunkserver to copy data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int RecoverChunk(std::shared_ptr task, const FInfo& fInfo, + const CloneSegmentMap& segInfos); /** - * @brief 开始RecoverChunk的异步请求 + * @brief Start asynchronous request for RecoverChunk * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪器 - * @param context RecoverChunk上下文 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param context RecoverChunk Context * - * @return 错误码 + * @return error code */ int StartAsyncRecoverChunkPart( std::shared_ptr task, @@ -467,110 +438,103 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 继续RecoverChunk的其他部分的请求以及等待完成某些RecoverChunk + * @brief Continue requests for other parts of the RecoverChunk and wait for + * certain RecoverChunks to be completed * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪者 - * @param[out] completeChunkNum 完成的chunk数 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param[out] completeChunkNum Number of chunks completed * - * @return 错误码 + * @return error code */ int ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, std::shared_ptr tracker, - uint64_t *completeChunkNum); + uint64_t* completeChunkNum); /** - * @brief 修改克隆文件的owner + * @brief Modify the owner of the cloned file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo); + int ChangeOwner(std::shared_ptr task, const FInfo& fInfo); /** - * @brief 重命名克隆文件 + * @brief Rename clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int RenameCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 通知mds完成数据创建 + * @brief Notify mds to complete data creation * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneFile(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 从快照克隆时,更新快照状态,通知克隆完成 + * @brief: When cloning from a snapshot, update the snapshot status and + * notify the clone to complete * - * @param task 任务信息 + * @param task task information * - * @return 错误码 + * @return error code */ - int UpdateSnapshotStatus( - std::shared_ptr task); + int UpdateSnapshotStatus(std::shared_ptr task); /** - * @brief 处理Lazy克隆/恢复阶段一结束 + * @brief Processing Lazy Clone/Restore Phase 1 End * - * @param task 任务信息 + * @param task task information */ - void HandleLazyCloneStage1Finish( - std::shared_ptr task); + void HandleLazyCloneStage1Finish(std::shared_ptr task); /** - * @brief 处理克隆/恢复成功 + * @brief Successfully processed clone/restore * - * @param task 任务信息 + * @param task task information */ void HandleCloneSuccess(std::shared_ptr task); - /** - * @brief 处理克隆或恢复失败 + * @brief processing clone or restore failed * - * @param task 任务信息 - * @param retCode 待处理的错误码 + * @param task task information + * @param retCode pending error code */ - void HandleCloneError(std::shared_ptr task, - int retCode); + void HandleCloneError(std::shared_ptr task, int retCode); /** - * @brief Lazy Clone 情况下处理Clone任务失败重试 + * @brief Lazy Clone failed to process Clone task and retry * - * @param task 任务信息 + * @param task task information */ void HandleCloneToRetry(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务成功 + * @brief Successfully processed cleanup clone or restore task * - * @param task 任务信息 + * @param task task information */ void HandleCleanSuccess(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务失败 + * @brief processing cleanup clone or recovery task failed * - * @param task 任务信息 + * @param task task information */ void HandleCleanError(std::shared_ptr task); @@ -587,19 +551,19 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr snapshotRef_; std::shared_ptr cloneRef_; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize_; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir_; // mds root user std::string mdsRootUser_; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency_; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency_; - // client异步请求重试时间 + // Client asynchronous request retry time uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; }; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.cpp b/src/snapshotcloneserver/clone/clone_service_manager.cpp index 9b7439fecf..98cf730c25 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_service_manager.cpp @@ -24,19 +24,19 @@ #include -#include #include +#include #include -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "include/curve_compiler_specific.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/string_util.h" -#include "include/curve_compiler_specific.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" namespace curve { namespace snapshotcloneserver { -int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { +int CloneServiceManager::Init(const SnapshotCloneServerOptions& option) { dlockOpts_ = std::make_shared(option.dlockOpts); std::shared_ptr stage1Pool = std::make_shared(option.stage1PoolThreadNum); @@ -45,8 +45,8 @@ int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { std::shared_ptr commonPool = std::make_shared(option.commonPoolThreadNum); cloneServiceManagerBackend_->Init( - option.backEndReferenceRecordScanIntervalMs, - option.backEndReferenceFuncScanIntervalMs); + option.backEndReferenceRecordScanIntervalMs, + option.backEndReferenceFuncScanIntervalMs); return cloneTaskMgr_->Init(stage1Pool, stage2Pool, commonPool, option); } @@ -60,38 +60,34 @@ void CloneServiceManager::Stop() { cloneServiceManagerBackend_->Stop(); } -int CloneServiceManager::CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, poolset, &cloneInfo); + int ret = cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, poolset, + &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination - << ", lazyFlag = " << lazyFlag - << ", poolset = " << poolset; + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination + << ", lazyFlag = " << lazyFlag << ", poolset = " << poolset; closure->SetErrCode(ret); return ret; } @@ -106,35 +102,31 @@ int CloneServiceManager::CloneFile(const UUID &source, return ret; } -int CloneServiceManager::RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::RecoverFile( + const UUID& source, const std::string& user, const std::string& destination, + bool lazyFlag, std::shared_ptr closure, TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfo); + int ret = + cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination << ", lazyFlag = " << lazyFlag; closure->SetErrCode(ret); return ret; @@ -151,29 +143,23 @@ int CloneServiceManager::RecoverFile(const UUID &source, } int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushStage1Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -186,29 +172,23 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( } int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, nullptr); + std::make_shared(cloneInfo, cloneInfoMetric, nullptr); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -218,17 +198,15 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( return kErrCodeSuccess; } -int CloneServiceManager::Flatten( - const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::Flatten(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->FlattenPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "FlattenPre error" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskId = " << taskId; return ret; } @@ -240,10 +218,10 @@ int CloneServiceManager::Flatten( dlock_ = std::make_shared(*dlockOpts_); if (0 == dlock_->Init()) { LOG(ERROR) << "Init DLock error" - << ", pfx = " << dlockOpts_->pfx - << ", retryTimes = " << dlockOpts_->retryTimes - << ", timeout = " << dlockOpts_->ctx_timeoutMS - << ", ttl = " << dlockOpts_->ttlSec; + << ", pfx = " << dlockOpts_->pfx + << ", retryTimes = " << dlockOpts_->retryTimes + << ", timeout = " << dlockOpts_->ctx_timeoutMS + << ", ttl = " << dlockOpts_->ttlSec; return kErrCodeInternalError; } } @@ -253,11 +231,9 @@ int CloneServiceManager::Flatten( closure->SetDLock(dlock_); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, cloneInfoMetric, closure); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" @@ -267,8 +243,8 @@ int CloneServiceManager::Flatten( return kErrCodeSuccess; } -int CloneServiceManager::GetCloneTaskInfo(const std::string &user, - std::vector *info) { +int CloneServiceManager::GetCloneTaskInfo(const std::string& user, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -280,16 +256,14 @@ int CloneServiceManager::GetCloneTaskInfo(const std::string &user, } int CloneServiceManager::GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info) { + const std::string& user, const TaskIdType& taskId, + std::vector* info) { std::vector cloneInfos; CloneInfo cloneInfo; int ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeFileNotExist; } if (cloneInfo.GetUser() != user) { @@ -300,23 +274,20 @@ int CloneServiceManager::GetCloneTaskInfoById( } int CloneServiceManager::GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info) { + const std::string& user, const std::string& fileName, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoByFileName(fileName, &cloneInfos); if (ret < 0) { LOG(ERROR) << "GetCloneInfoByFileName fail" - << ", ret = " << ret - << ", fileName = " << fileName; + << ", ret = " << ret << ", fileName = " << fileName; return kErrCodeFileNotExist; } return GetCloneTaskInfoInner(cloneInfos, user, info); } int CloneServiceManager::GetCloneTaskInfoByFilter( - const CloneFilterCondition &filter, - std::vector *info) { + const CloneFilterCondition& filter, std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -327,9 +298,9 @@ int CloneServiceManager::GetCloneTaskInfoByFilter( return GetCloneTaskInfoInner(cloneInfos, filter, info); } -int CloneServiceManager::GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles) { +int CloneServiceManager::GetCloneRefStatus( + const std::string& src, CloneRefStatus* refStatus, + std::vector* needCheckFiles) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -338,10 +309,10 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } *refStatus = CloneRefStatus::kNoRef; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetSrc() == src) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : + case CloneStatus::done: case CloneStatus::error: { break; } @@ -370,14 +341,13 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info) { + std::vector cloneInfos, CloneFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (filter.IsMatchCondition(cloneInfo)) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -395,7 +365,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -421,14 +391,13 @@ int CloneServiceManager::GetCloneTaskInfoInner( } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - const std::string &user, - std::vector *info) { + std::vector cloneInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetUser() == user) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -446,7 +415,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -471,7 +440,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( return kErrCodeSuccess; } -bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { +bool CloneFilterCondition::IsMatchCondition(const CloneInfo& cloneInfo) { if (user_ != nullptr && *user_ != cloneInfo.GetUser()) { return false; } @@ -489,45 +458,39 @@ bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(cloneInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(cloneInfo.GetStatus())) { return false; } int type; - if (type_ != nullptr - && common::StringToInt(*type_, &type) == false) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == false) { return false; } - if (type_ != nullptr - && common::StringToInt(*type_, &type) == true - && type != static_cast(cloneInfo.GetTaskType())) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == true && + type != static_cast(cloneInfo.GetTaskType())) { return false; } return true; } -int CloneServiceManager::GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut) { +int CloneServiceManager::GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut) { CloneInfo newInfo; int ret = cloneCore_->GetCloneInfo(taskId, &newInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return ret; } switch (newInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { taskCloneInfoOut->SetCloneInfo(newInfo); taskCloneInfoOut->SetCloneProgress(kProgressCloneComplete); break; @@ -544,32 +507,29 @@ int CloneServiceManager::GetFinishedCloneTask( } default: LOG(ERROR) << "can not reach here!" - << " status = " << static_cast( - newInfo.GetStatus()); - // 当更新数据库失败时,有可能进入这里 + << " status = " << static_cast(newInfo.GetStatus()); + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } return kErrCodeSuccess; } -int CloneServiceManager::CleanCloneTask(const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::CleanCloneTask(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "CleanCloneOrRecoverTaskPre fail" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskid = " << taskId; return ret; } std::shared_ptr taskInfo = std::make_shared(cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -579,40 +539,40 @@ int CloneServiceManager::CleanCloneTask(const std::string &user, return kErrCodeSuccess; } -int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo& cloneInfo) { auto cloneInfoMetric = std::make_shared(cloneInfo.GetTaskId()); auto closure = std::make_shared(); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); bool isLazy = cloneInfo.GetIsLazy(); int ret = kErrCodeSuccess; - // Lazy 克隆/恢复 + // Lazy Clone/Restore if (isLazy) { CloneStep step = cloneInfo.GetNextStep(); - // 处理kRecoverChunk,kCompleteCloneFile,kEnd这三个阶段的Push到stage2Pool - // 如果克隆source类型是file,阶段为kCreateCloneChunk和kCreateCloneMeta也需要push到stage2Pool // NOLINT + // Process the Push to stage2Pool for the three stages of + // kRecoverChunk,kCompleteCloneFile, and kEnd If the clone source type + // is file and the stages are kCreateCloneChunk and kCreateCloneMeta, + // they also need to be pushed to stage2Pool// NOLINT if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step || - (CloneStep::kCreateCloneChunk == step - && cloneInfo.GetFileType() == CloneFileType::kFile) || - (CloneStep::kCreateCloneMeta == step - && cloneInfo.GetFileType() == CloneFileType::kFile)) { + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step || + (CloneStep::kCreateCloneChunk == step && + cloneInfo.GetFileType() == CloneFileType::kFile) || + (CloneStep::kCreateCloneMeta == step && + cloneInfo.GetFileType() == CloneFileType::kFile)) { ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" << ", ret = " << ret; return ret; } - // 否则push到stage1Pool + // Otherwise, push to stage1Pool } else { - // stage1的task包含了异步的请求的返回,需要加锁 + // The task of stage1 contains the return of asynchronous requests + // that require locking std::string destination = cloneInfo.GetDest(); NameLockGuard lockDestFileGuard(*destFileLock_, destination); closure->SetDestFileLock(destFileLock_); @@ -625,7 +585,7 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return ret; } } - // 非Lazy 克隆/恢复push到commonPool + // Non Lazy clone/restore push to commonPool } else { ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { @@ -637,13 +597,11 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return kErrCodeSuccess; } -int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo& cloneInfo) { std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, nullptr, nullptr); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" @@ -660,26 +618,26 @@ int CloneServiceManager::RecoverCloneTask() { LOG(ERROR) << "GetCloneInfoList fail"; return ret; } - for (auto &cloneInfo : list) { + for (auto& cloneInfo : list) { switch (cloneInfo.GetStatus()) { case CloneStatus::retrying: { - // 重置重试任务的状态 + // Reset the status of the retry task if (cloneInfo.GetTaskType() == CloneTaskType::kClone) { cloneInfo.SetStatus(CloneStatus::cloning); } else { cloneInfo.SetStatus(CloneStatus::recovering); } } - FALLTHROUGH_INTENDED; + FALLTHROUGH_INTENDED; case CloneStatus::cloning: case CloneStatus::recovering: { - // 建立快照或镜像的引用关系 + // Establishing a reference relationship for a snapshot or + // mirror if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } ret = RecoverCloneTaskInternal(cloneInfo); if (ret < 0) { @@ -696,13 +654,13 @@ int CloneServiceManager::RecoverCloneTask() { break; } case CloneStatus::metaInstalled: { - // metaInstalled 状态下的克隆对文件仍然有依赖,需要建立引用关系 + // Clones in MetaInstalled state still have dependencies on + // files and need to establish a reference relationship if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } break; } @@ -713,52 +671,59 @@ int CloneServiceManager::RecoverCloneTask() { return kErrCodeSuccess; } -// 当clone处于matainstall状态,且克隆卷已经删除的情况下,原卷的引用计数没有减。 -// 这个后台线程处理函数周期性的检查这个场景,如果发现有clone处于metaintalled状态 -// 且克隆卷已经删除,就去删除这条无效的clone信息,并减去原卷的引用计数。 -// 如果原卷是镜像且引用计数减为0,还需要去mds把原卷的状态改为created。 +// When the clone is in a matainstall state and the clone volume has been +// deleted, the reference count of the original volume does not decrease. This +// backend thread processing function periodically checks this scenario, and if +// any clones are found to be in the 'metaled' state If the clone volume has +// been deleted, delete the invalid clone information and subtract the reference +// count of the original volume. If the original volume is a mirror and the +// reference count is reduced to 0, it is necessary to go to MDS to change the +// status of the original volume to created. void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc start"; while (!isStop_.load()) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { - LOG(WARNING) << "GetCloneInfoList fail" << ", ret = " << ret; + LOG(WARNING) << "GetCloneInfoList fail" + << ", ret = " << ret; } int deleteCount = 0; - for (auto &it : cloneInfos) { - if (it.GetStatus() == CloneStatus::metaInstalled - && it.GetIsLazy() == true) { - // 检查destination在不在 + for (auto& it : cloneInfos) { + if (it.GetStatus() == CloneStatus::metaInstalled && + it.GetIsLazy() == true) { + // Check if the destination is available if (it.GetTaskType() == CloneTaskType::kClone) { ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetDestId()); + it.GetDestId()); } else { - // rename时,inodeid恢复成 + // When renaming, the inodeid is restored to ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetOriginId()); + it.GetOriginId()); } if (ret == kErrCodeFileNotExist) { - // 如果克隆卷是metaInstalled状态,且destination文件不存在, - // 删除这条cloneInfo,并减引用计数 + // If the cloned volume is in a metaInstalled state and the + // destination file does not exist, Delete this cloneInfo + // and subtract the reference count TaskIdType taskId = it.GetTaskId(); CloneInfo cloneInfo; ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret != kErrCodeSuccess) { - // cloneInfo已经不存在了 + // CloneInfo no longer exists continue; } - // 再次检查cloneInfo是否是metaInstalled状态 + // Check again if cloneInfo is in the metaInstalled state if (cloneInfo.GetStatus() != CloneStatus::metaInstalled) { continue; } ret = cloneCore_->HandleDeleteCloneInfo(cloneInfo); if (ret != kErrCodeSuccess) { - LOG(WARNING) << "HandleDeleteCloneInfo fail, ret = " - << ret << ", cloneInfo = " << cloneInfo; + LOG(WARNING) + << "HandleDeleteCloneInfo fail, ret = " << ret + << ", cloneInfo = " << cloneInfo; } else { deleteCount++; } @@ -771,14 +736,14 @@ void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "backend scan list, size = " << cloneInfos.size() << ", delete clone record count = " << deleteCount; - // 控制每轮扫描间隔 + // Control the scanning interval of each round roundWaitInterval_.WaitForNextExcution(); } LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc exit"; } void CloneServiceManagerBackendImpl::Init(uint32_t recordIntevalMs, - uint32_t roundIntevalMs) { + uint32_t roundIntevalMs) { recordWaitInterval_.Init(recordIntevalMs); roundWaitInterval_.Init(roundIntevalMs); diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..70268a9942 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -22,18 +22,18 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ +#include #include #include -#include +#include "src/common/concurrent/dlock.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/wait_interval.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" namespace curve { namespace snapshotcloneserver { @@ -44,26 +44,16 @@ class TaskCloneInfo { public: TaskCloneInfo() = default; - TaskCloneInfo(const CloneInfo &cloneInfo, - uint32_t progress) - : cloneInfo_(cloneInfo), - cloneProgress_(progress) {} + TaskCloneInfo(const CloneInfo& cloneInfo, uint32_t progress) + : cloneInfo_(cloneInfo), cloneProgress_(progress) {} - void SetCloneInfo(const CloneInfo &cloneInfo) { - cloneInfo_ = cloneInfo; - } + void SetCloneInfo(const CloneInfo& cloneInfo) { cloneInfo_ = cloneInfo; } - CloneInfo GetCloneInfo() const { - return cloneInfo_; - } + CloneInfo GetCloneInfo() const { return cloneInfo_; } - void SetCloneProgress(uint32_t progress) { - cloneProgress_ = progress; - } + void SetCloneProgress(uint32_t progress) { cloneProgress_ = progress; } - uint32_t GetCloneProgress() const { - return cloneProgress_; - } + uint32_t GetCloneProgress() const { return cloneProgress_; } Json::Value ToJsonObj() const { Json::Value cloneTaskObj; @@ -72,88 +62,76 @@ class TaskCloneInfo { cloneTaskObj["User"] = info.GetUser(); cloneTaskObj["File"] = info.GetDest(); cloneTaskObj["Src"] = info.GetSrc(); - cloneTaskObj["TaskType"] = static_cast ( - info.GetTaskType()); - cloneTaskObj["TaskStatus"] = static_cast ( - info.GetStatus()); + cloneTaskObj["TaskType"] = static_cast(info.GetTaskType()); + cloneTaskObj["TaskStatus"] = static_cast(info.GetStatus()); cloneTaskObj["IsLazy"] = info.GetIsLazy(); - cloneTaskObj["NextStep"] = static_cast (info.GetNextStep()); + cloneTaskObj["NextStep"] = static_cast(info.GetNextStep()); cloneTaskObj["Time"] = info.GetTime(); cloneTaskObj["Progress"] = GetCloneProgress(); - cloneTaskObj["FileType"] = static_cast (info.GetFileType()); + cloneTaskObj["FileType"] = static_cast(info.GetFileType()); return cloneTaskObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { CloneInfo info; info.SetTaskId(jsonObj["UUID"].asString()); info.SetUser(jsonObj["User"].asString()); info.SetDest(jsonObj["File"].asString()); info.SetSrc(jsonObj["Src"].asString()); - info.SetTaskType(static_cast( - jsonObj["TaskType"].asInt())); - info.SetStatus(static_cast( - jsonObj["TaskStatus"].asInt())); + info.SetTaskType( + static_cast(jsonObj["TaskType"].asInt())); + info.SetStatus(static_cast(jsonObj["TaskStatus"].asInt())); info.SetIsLazy(jsonObj["IsLazy"].asBool()); info.SetNextStep(static_cast(jsonObj["NextStep"].asInt())); info.SetTime(jsonObj["Time"].asUInt64()); - info.SetFileType(static_cast( - jsonObj["FileType"].asInt())); + info.SetFileType( + static_cast(jsonObj["FileType"].asInt())); SetCloneInfo(info); } private: - CloneInfo cloneInfo_; - uint32_t cloneProgress_; + CloneInfo cloneInfo_; + uint32_t cloneProgress_; }; class CloneFilterCondition { public: CloneFilterCondition() - : uuid_(nullptr), - source_(nullptr), - destination_(nullptr), - user_(nullptr), - status_(nullptr), - type_(nullptr) {} - - CloneFilterCondition(const std::string *uuid, const std::string *source, - const std::string *destination, const std::string *user, - const std::string *status, const std::string *type) - : uuid_(uuid), - source_(source), - destination_(destination), - user_(user), - status_(status), - type_(type) {} - bool IsMatchCondition(const CloneInfo &cloneInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } - void SetSource(const std::string *source) { - source_ = source; - } - void SetDestination(const std::string *destination) { + : uuid_(nullptr), + source_(nullptr), + destination_(nullptr), + user_(nullptr), + status_(nullptr), + type_(nullptr) {} + + CloneFilterCondition(const std::string* uuid, const std::string* source, + const std::string* destination, + const std::string* user, const std::string* status, + const std::string* type) + : uuid_(uuid), + source_(source), + destination_(destination), + user_(user), + status_(status), + type_(type) {} + bool IsMatchCondition(const CloneInfo& cloneInfo); + + void SetUuid(const std::string* uuid) { uuid_ = uuid; } + void SetSource(const std::string* source) { source_ = source; } + void SetDestination(const std::string* destination) { destination_ = destination; } - void SetUser(const std::string *user) { - user_ = user; - } - void SetStatus(const std::string *status) { - status_ = status; - } - void SetType(const std::string *type) { - type_ = type; - } + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } + void SetType(const std::string* type) { type_ = type; } private: - const std::string *uuid_; - const std::string *source_; - const std::string *destination_; - const std::string *user_; - const std::string *status_; - const std::string *type_; + const std::string* uuid_; + const std::string* source_; + const std::string* destination_; + const std::string* user_; + const std::string* status_; + const std::string* type_; }; class CloneServiceManagerBackend { public: @@ -161,7 +139,8 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the + * existence of cloned volumes * */ virtual void Func() = 0; @@ -177,12 +156,9 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { public: explicit CloneServiceManagerBackendImpl( std::shared_ptr cloneCore) - : cloneCore_(cloneCore), - isStop_(true) { - } + : cloneCore_(cloneCore), isStop_(true) {} - ~CloneServiceManagerBackendImpl() { - } + ~CloneServiceManagerBackendImpl() {} void Func() override; void Init(uint32_t recordIntevalMs, uint32_t roundIntevalMs) override; @@ -191,13 +167,14 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + // Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + // Is the current background scanning stopped? Used to support start and + // stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + // Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + // The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -207,250 +184,242 @@ class CloneServiceManager { std::shared_ptr cloneTaskMgr, std::shared_ptr cloneCore, std::shared_ptr cloneServiceManagerBackend) - : cloneTaskMgr_(cloneTaskMgr), - cloneCore_(cloneCore), - cloneServiceManagerBackend_(cloneServiceManagerBackend) { + : cloneTaskMgr_(cloneTaskMgr), + cloneCore_(cloneCore), + cloneServiceManagerBackend_(cloneServiceManagerBackend) { destFileLock_ = std::make_shared(); } virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file name + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int RecoverFile(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user user + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int Flatten( - const std::string &user, - const TaskIdType &taskId); + virtual int Flatten(const std::string& user, const TaskIdType& taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfo(const std::string &user, - std::vector *info); + virtual int GetCloneTaskInfo(const std::string& user, + std::vector* info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user + * through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user username + * @param taskId Task Id specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info); + virtual int GetCloneTaskInfoById(const std::string& user, + const TaskIdType& taskId, + std::vector* info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through + * a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param fileName The file name specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info); + virtual int GetCloneTaskInfoByName(const std::string& user, + const std::string& fileName, + std::vector* info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering + * criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter filtering conditions + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, - std::vector *info); + virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition& filter, + std::vector* info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src specified file name + * @param refStatus 0 indicates no dependencies, 1 indicates dependencies, + * and 2 indicates further confirmation is needed + * @param needCheckFiles List of files that require further confirmation * - * @return 错误码 + * @return error code */ - virtual int GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles); + virtual int GetCloneRefStatus(const std::string& src, + CloneRefStatus* refStatus, + std::vector* needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user username + * @param taskId Task Id * - * @return 错误码 + * @return error code */ - virtual int CleanCloneTask(const std::string &user, - const TaskIdType &taskId); + virtual int CleanCloneTask(const std::string& user, + const TaskIdType& taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief: Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); // for test - void SetDLock(std::shared_ptr dlock) { - dlock_ = dlock; - } + void SetDLock(std::shared_ptr dlock) { dlock_ = dlock; } private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param user user information + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - const std::string &user, - std::vector *info); + const std::string& user, + std::vector* info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given + * task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param filter filtering conditions + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info); + CloneFilterCondition filter, + std::vector* info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId Task ID + * @param taskCloneInfoOut Clone task information * - * @return 错误码 + * @return error code */ - int GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut); + int GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); + int RecoverCloneTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); + int RecoverCleanTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 构建和push Lazy的任务 + *Task of building and pushing Lazy @brief * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); private: std::shared_ptr dlockOpts_; @@ -461,8 +430,6 @@ class CloneServiceManager { std::shared_ptr cloneServiceManagerBackend_; }; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..2ddc10976e 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -23,17 +23,17 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ -#include #include +#include -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/common/concurrent/dlock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" using ::curve::common::DLock; @@ -42,33 +42,23 @@ namespace snapshotcloneserver { class CloneTaskInfo : public TaskInfo { public: - CloneTaskInfo(const CloneInfo &cloneInfo, - std::shared_ptr metric, - std::shared_ptr closure) + CloneTaskInfo(const CloneInfo& cloneInfo, + std::shared_ptr metric, + std::shared_ptr closure) : TaskInfo(), cloneInfo_(cloneInfo), metric_(metric), closure_(closure) {} - CloneInfo& GetCloneInfo() { - return cloneInfo_; - } + CloneInfo& GetCloneInfo() { return cloneInfo_; } - const CloneInfo& GetCloneInfo() const { - return cloneInfo_; - } + const CloneInfo& GetCloneInfo() const { return cloneInfo_; } - TaskIdType GetTaskId() const { - return cloneInfo_.GetTaskId(); - } + TaskIdType GetTaskId() const { return cloneInfo_.GetTaskId(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } - std::shared_ptr GetClosure() { - return closure_; - } + std::shared_ptr GetClosure() { return closure_; } private: CloneInfo cloneInfo_; @@ -76,20 +66,16 @@ class CloneTaskInfo : public TaskInfo { std::shared_ptr closure_; }; -std::ostream& operator<<(std::ostream& os, const CloneTaskInfo &taskInfo); +std::ostream& operator<<(std::ostream& os, const CloneTaskInfo& taskInfo); class CloneTaskBase : public Task { public: - CloneTaskBase(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} - - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + CloneTaskBase(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} + + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: std::shared_ptr taskInfo_; @@ -98,9 +84,8 @@ class CloneTaskBase : public Task { class CloneTask : public CloneTaskBase { public: - CloneTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneTask(const TaskIdType& taskId, std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} void Run() override { @@ -121,17 +106,14 @@ class CloneTask : public CloneTaskBase { } }; - class CloneCleanTask : public CloneTaskBase { public: - CloneCleanTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneCleanTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} - void Run() override { - core_->HandleCleanCloneOrRecoverTask(taskInfo_); - } + void Run() override { core_->HandleCleanCloneOrRecoverTask(taskInfo_); } }; struct SnapCloneCommonClosure : public SnapCloneClosure { @@ -145,9 +127,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,16 +137,16 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 - struct CloneChunkInfo *cloneChunkInfo; + // Chunk Information + struct CloneChunkInfo* cloneChunkInfo; }; using CreateCloneChunkContextPtr = std::shared_ptr; @@ -173,21 +155,20 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { CreateCloneChunkClosure( std::shared_ptr tracker, CreateCloneChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "CreateCloneChunkClosure return fail" - << ", ret = " << context_->retCode - << ", location = " << context_->location - << ", logicalPoolId = " << context_->cidInfo.lpid_ - << ", copysetId = " << context_->cidInfo.cpid_ - << ", chunkId = " << context_->cidInfo.cid_ - << ", seqNum = " << context_->sn - << ", csn = " << context_->csn - << ", taskid = " << context_->taskid; + << ", ret = " << context_->retCode + << ", location = " << context_->location + << ", logicalPoolId = " << context_->cidInfo.lpid_ + << ", copysetId = " << context_->cidInfo.cpid_ + << ", chunkId = " << context_->cidInfo.cid_ + << ", seqNum = " << context_->sn + << ", csn = " << context_->csn + << ", taskid = " << context_->taskid; } tracker_->PushResultContext(context_); tracker_->HandleResponse(context_->retCode); @@ -197,21 +178,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -219,17 +200,15 @@ using RecoverChunkContextPtr = std::shared_ptr; struct RecoverChunkClosure : public SnapCloneClosure { RecoverChunkClosure(std::shared_ptr tracker, - RecoverChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + RecoverChunkContextPtr context) + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "RecoverChunkClosure return fail" << ", ret = " << context_->retCode - << ", logicalPoolId = " - << context_->cidInfo.lpid_ + << ", logicalPoolId = " << context_->cidInfo.lpid_ << ", copysetId = " << context_->cidInfo.cpid_ << ", chunkId = " << context_->cidInfo.cid_ << ", partIndex = " << context_->partIndex diff --git a/src/snapshotcloneserver/clone/clone_task_manager.cpp b/src/snapshotcloneserver/clone/clone_task_manager.cpp index be14fc5db6..559c22b315 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_task_manager.cpp @@ -21,8 +21,8 @@ */ #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/snapshotclone/snapshotclone_define.h" namespace curve { namespace snapshotcloneserver { @@ -48,9 +48,8 @@ int CloneTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 - backEndThread = - std::thread(&CloneTaskManager::BackEndThreadFunc, this); + // isStop_ Flag set first to prevent backEndThread from exiting first + backEndThread = std::thread(&CloneTaskManager::BackEndThreadFunc, this); } return kErrCodeSuccess; } @@ -66,10 +65,8 @@ void CloneTaskManager::Stop() { } int CloneTaskManager::PushCommonTask(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &commonTaskMap_, - &commonTasksLock_, - commonPool_); + int ret = + PushTaskInternal(task, &commonTaskMap_, &commonTasksLock_, commonPool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -80,10 +77,8 @@ int CloneTaskManager::PushCommonTask(std::shared_ptr task) { } int CloneTaskManager::PushStage1Task(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage1TaskMap_, - &stage1TasksLock_, - stage1Pool_); + int ret = + PushTaskInternal(task, &stage1TaskMap_, &stage1TasksLock_, stage1Pool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -93,12 +88,9 @@ int CloneTaskManager::PushStage1Task(std::shared_ptr task) { return ret; } -int CloneTaskManager::PushStage2Task( - std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage2TaskMap_, - &stage2TasksLock_, - stage2Pool_); +int CloneTaskManager::PushStage2Task(std::shared_ptr task) { + int ret = + PushTaskInternal(task, &stage2TaskMap_, &stage2TasksLock_, stage2Pool_); if (ret >= 0) { cloneMetric_->UpdateFlattenTaskBegin(); LOG(INFO) << "Push Task Into Stage2 Pool for data install success," @@ -107,13 +99,13 @@ int CloneTaskManager::PushStage2Task( return ret; } -int CloneTaskManager::PushTaskInternal(std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool) { - // 同一个clone的Stage1的Task和Stage2的Task的任务ID是一样的, - // clean task的ID也是一样的, - // 触发一次扫描,将已完成的任务Flush出去 +int CloneTaskManager::PushTaskInternal( + std::shared_ptr task, + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool) { + // The task IDs for Stage1 and Stage2 of the same clone are the same, + // The ID of the clean task is also the same, + // Trigger a scan to flush out completed tasks ScanStage2Tasks(); ScanStage1Tasks(); ScanCommonTasks(); @@ -124,12 +116,9 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(*taskMapMutex); - std::string destination = - task->GetTaskInfo()->GetCloneInfo().GetDest(); + std::string destination = task->GetTaskInfo()->GetCloneInfo().GetDest(); - auto ret = taskMap->emplace( - destination, - task); + auto ret = taskMap->emplace(destination, task); if (!ret.second) { LOG(ERROR) << "CloneTaskManager::PushTaskInternal fail, " << "same destination exist, " @@ -152,7 +141,7 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, } std::shared_ptr CloneTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(cloneTaskMapLock_); auto it = cloneTaskMap_.find(taskId); if (it != cloneTaskMap_.end()) { @@ -174,16 +163,13 @@ void CloneTaskManager::BackEndThreadFunc() { void CloneTaskManager::ScanCommonTasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(commonTasksLock_); - for (auto it = commonTaskMap_.begin(); - it != commonTaskMap_.end();) { + for (auto it = commonTaskMap_.begin(); it != commonTaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // 移除任务并更新metric + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Remove task and update metric cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "common task {" << " TaskInfo : " << *taskInfo @@ -200,15 +186,12 @@ void CloneTaskManager::ScanStage1Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(stage1TasksLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage1TaskMap_.begin(); - it != stage1TaskMap_.end();) { + for (auto it = stage1TaskMap_.begin(); it != stage1TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "stage1 task {" << " TaskInfo : " << *taskInfo @@ -224,27 +207,22 @@ void CloneTaskManager::ScanStage1Tasks() { void CloneTaskManager::ScanStage2Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage2TaskMap_.begin(); - it != stage2TaskMap_.end();) { + for (auto it = stage2TaskMap_.begin(); it != stage2TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // retrying 状态的任务需要重试 + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Tasks in the retrying state need to be retried if (CloneStatus::retrying == status) { if (CloneTaskType::kClone == taskType) { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::cloning); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::cloning); } else { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::recovering); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::recovering); } taskInfo->Reset(); stage2Pool_->PushTask(it->second); - // 其他任务结束更新metric + // Update metric after completing other tasks } else { cloneMetric_->UpdateAfterFlattenTaskFinish(status); LOG(INFO) << "stage2 task {" @@ -261,4 +239,3 @@ void CloneTaskManager::ScanStage2Tasks() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..916d25deae 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -23,50 +23,46 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/thread_pool.h" -using ::curve::common::RWLock; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; namespace curve { namespace snapshotcloneserver { class CloneTaskManager { public: - explicit CloneTaskManager( - std::shared_ptr core, - std::shared_ptr cloneMetric) + explicit CloneTaskManager(std::shared_ptr core, + std::shared_ptr cloneMetric) : isStop_(true), core_(core), cloneMetric_(cloneMetric), cloneTaskManagerScanIntervalMs_(0) {} - ~CloneTaskManager() { - Stop(); - } + ~CloneTaskManager() { Stop(); } int Init(std::shared_ptr stage1Pool, - std::shared_ptr stage2Pool, - std::shared_ptr commonPool, - const SnapshotCloneServerOptions &option) { - cloneTaskManagerScanIntervalMs_ = - option.cloneTaskManagerScanIntervalMs; + std::shared_ptr stage2Pool, + std::shared_ptr commonPool, + const SnapshotCloneServerOptions& option) { + cloneTaskManagerScanIntervalMs_ = option.cloneTaskManagerScanIntervalMs; stage1Pool_ = stage1Pool; stage2Pool_ = stage2Pool; commonPool_ = commonPool; @@ -78,40 +74,39 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such + * as clones * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushCommonTask( - std::shared_ptr task); + int PushCommonTask(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage1Task( - std::shared_ptr task); + int PushStage1Task(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery + * clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage2Task( - std::shared_ptr task); + int PushStage2Task(std::shared_ptr task); - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; private: void BackEndThreadFunc(); @@ -120,51 +115,52 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task task + * @param taskMap task table + * @param taskMapMutex task table and thread pool locks + * @param taskPool Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool); + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID ->Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and + // other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Is the current task management stopped? Used to support start and stop + // functions std::atomic_bool isStop_; // clone core @@ -173,16 +169,11 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; } // namespace snapshotcloneserver } // namespace curve - - - - - #endif // SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/common/config.h b/src/snapshotcloneserver/common/config.h index d5e93a24c1..3c8cc13263 100644 --- a/src/snapshotcloneserver/common/config.h +++ b/src/snapshotcloneserver/common/config.h @@ -23,9 +23,9 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ - -#include +#include #include + #include "src/common/concurrent/dlock.h" namespace curve { @@ -41,58 +41,61 @@ struct CurveClientOptions { std::string mdsRootUser; // mds root password std::string mdsRootPassword; - // 调用client方法的重试总时间 + // The total retry time for calling the client method uint64_t clientMethodRetryTimeSec; - // 调用client方法重试间隔时间 + // Call client method retry interval uint64_t clientMethodRetryIntervalMs; }; // snapshotcloneserver options struct SnapshotCloneServerOptions { // snapshot&clone server address - std::string addr; - // 调用client异步方法重试总时间 + std::string addr; + // Total retry time for calling client asynchronous methods uint64_t clientAsyncMethodRetryTimeSec; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs; - // 快照工作线程数 + // Number of snapshot worker threads int snapshotPoolThreadNum; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) uint32_t snapshotTaskManagerScanIntervalMs; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit; // snapshotcore threadpool threadNum uint32_t snapshotCoreThreadNum; // mdsSessionTimeUs uint32_t mdsSessionTimeUs; - // ReadChunkSnapshot同时进行的异步请求数量 + // The number of asynchronous requests simultaneously processed by + // ReadChunkSnapshot uint32_t readChunkSnapshotConcurrency; - // 用于Lazy克隆元数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone metadata section int stage1PoolThreadNum; - // 用于Lazy克隆数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone data section int stage2PoolThreadNum; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 + // Number of thread pool threads used for requests for non Lazy clones and + // deletion of clones and other control surfaces int commonPoolThreadNum; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir; // mds root user std::string mdsRootUser; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency; - // 引用计数后台扫描每条记录间隔 + // Reference Count Background Scan Each Record Interval uint32_t backEndReferenceRecordScanIntervalMs; - // 引用计数后台扫描每轮间隔 + // Reference Count Background Scan Every Round Interval uint32_t backEndReferenceFuncScanIntervalMs; // dlock options DLockOpts dlockOpts; diff --git a/src/snapshotcloneserver/common/curvefs_client.h b/src/snapshotcloneserver/common/curvefs_client.h index 131f01659c..72db5e3009 100644 --- a/src/snapshotcloneserver/common/curvefs_client.h +++ b/src/snapshotcloneserver/common/curvefs_client.h @@ -15,42 +15,41 @@ */ /************************************************************************* - > File Name: curvefs_client.h - > Author: - > Created Time: Wed Nov 21 11:33:46 2018 + > File Name: curvefs_client.h + > Author: + > Created Time: Wed Nov 21 11:33:46 2018 ************************************************************************/ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ - -#include -#include -#include #include //NOLINT +#include +#include #include //NOLINT -#include "proto/nameserver2.pb.h" -#include "proto/chunk.pb.h" +#include +#include "proto/chunk.pb.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" -#include "src/client/libcurve_snapshot.h" #include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" #include "src/common/timeutility.h" +#include "src/snapshotcloneserver/common/config.h" -using ::curve::client::SegmentInfo; -using ::curve::client::LogicPoolID; -using ::curve::client::CopysetID; using ::curve::client::ChunkID; -using ::curve::client::ChunkInfoDetail; using ::curve::client::ChunkIDInfo; -using ::curve::client::FInfo; +using ::curve::client::ChunkInfoDetail; +using ::curve::client::CopysetID; +using ::curve::client::FileClient; using ::curve::client::FileStatus; +using ::curve::client::FInfo; +using ::curve::client::LogicPoolID; +using ::curve::client::SegmentInfo; using ::curve::client::SnapCloneClosure; -using ::curve::client::UserInfo; using ::curve::client::SnapshotClient; -using ::curve::client::FileClient; +using ::curve::client::UserInfo; namespace curve { namespace snapshotcloneserver { @@ -60,15 +59,13 @@ using RetryCondition = std::function; class RetryHelper { public: - RetryHelper(const RetryMethod &retryMethod, - const RetryCondition &condition) { + RetryHelper(const RetryMethod& retryMethod, + const RetryCondition& condition) { retryMethod_ = retryMethod; condition_ = condition; } - int RetryTimeSecAndReturn( - uint64_t retryTimeSec, - uint64_t retryIntervalMs) { + int RetryTimeSecAndReturn(uint64_t retryTimeSec, uint64_t retryIntervalMs) { int ret = -LIBCURVE_ERROR::FAILED; uint64_t startTime = TimeUtility::GetTimeofDaySec(); uint64_t nowTime = startTime; @@ -85,7 +82,7 @@ class RetryHelper { } private: - RetryMethod retryMethod_; + RetryMethod retryMethod_; RetryCondition condition_; }; @@ -95,432 +92,366 @@ class CurveFsClient { virtual ~CurveFsClient() {} /** - * @brief client 初始化 + * @brief client initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const CurveClientOptions &options) = 0; + virtual int Init(const CurveClientOptions& options) = 0; /** - * @brief client 资源回收 + * @brief client resource recycling * - * @return 错误码 + * @return error code */ virtual int UnInit() = 0; /** - * @brief 创建快照 + * @brief Create a snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param[out] seq 快照版本号 + * @param filename File name + * @param user user information + * @param[out] seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) = 0; + virtual int CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) = 0; /** - * @brief 删除快照 + * @brief Delete snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 + * @param filename File name + * @param user user information + * @param seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) = 0; + virtual int DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) = 0; /** - * @brief 获取快照文件信息 + * @brief Get snapshot file information * - * @param filename 文件名 - * @param user 用户名 - * @param seq 快照版本号 - * @param[out] snapInfo 快照文件信息 + * @param filename File name + * @param user username + * @param seq snapshot version number + * @param[out] snapInfo snapshot file information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, FInfo* snapInfo) = 0; + virtual int GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, + FInfo* snapInfo) = 0; /** - * @brief 查询快照文件segment信息 + * @brief Query snapshot file segment information * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param segInfo segment信息 + * @param filename File name + * @param user user information + * @param seq snapshot version number + * @param offset offset value + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) = 0; + virtual int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, + SegmentInfo* segInfo) = 0; /** - * @brief 读取snapshot chunk的数据 + * @brief Read snapshot chunk data * - * @param cidinfo chunk ID 信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param len 长度 - * @param[out] buf buffer指针 - * @param: scc是异步回调 + * @param cidinfo chunk ID information + * @param seq snapshot version number + * @param offset offset value + * @param len length + * @param[out] buf buffer pointer + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) = 0; + virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) = 0; /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: filestatus 快照文件状态 + *Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: filestatus Snapshot file status */ - virtual int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) = 0; + virtual int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) = 0; /** - * @brief 获取chunk的版本号信息 + * @brief to obtain the version number information of the chunk * - * @param cidinfo chunk ID 信息 - * @param chunkInfo chunk详细信息 + * @param cidinfo chunk ID information + * @param chunkInfo chunk Details * - * @return 错误码 + * @return error code */ - virtual int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) = 0; + virtual int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) = 0; /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param filename clone目标文件名 - * @param user 用户信息 - * @param size 文件大小 - * @param sn 版本号 - * @param chunkSize chunk大小 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param filename clone Target filename + * @param user user information + * @param size File size + * @param sn version number + * @param chunkSize chunk size * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] fileInfo 文件信息 + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) = 0; + virtual int CreateCloneFile(const std::string& source, + const std::string& filename, + const std::string& user, uint64_t size, + uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, + FInfo* fileInfo) = 0; /** - * @brief lazy 创建clone chunk + * @brief lazy creation of a clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param location 数据源的url - * @param chunkidinfo 目标chunk - * @param sn chunk的序列号 - * @param csn correct sn - * @param chunkSize chunk的大小 - * @param: scc是异步回调 - * - * @return 错误码 + * - The location format is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual chunk object's address. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param location URL of the data source + * @param chunkidinfo Target chunk + * @param sn chunk's sequence number + * @param csn correct sequence number + * @param chunkSize Size of the chunk + * @param scc Asynchronous callback + * + * @return Error code */ - virtual int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) = 0; - + virtual int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) = 0; /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param: scc是异步回调 + * @param offset offset + * @param len length + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) = 0; + virtual int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) = 0; /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneMeta( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneMeta(const std::string& filename, + const std::string& user) = 0; /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneFile( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneFile(const std::string& filename, + const std::string& user) = 0; /** - * @brief 设置clone文件状态 + * @brief Set clone file status * - * @param filename 文件名 - * @param filestatus 要设置的目标状态 - * @param user 用户名 + * @param filename File name + * @param filestatus The target state to be set + * @param user username * - * @return 错误码 + * @return error code */ - virtual int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) = 0; + virtual int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) = 0; /** - * @brief 获取文件信息 + * @brief Get file information * - * @param filename 文件名 - * @param user 用户名 - * @param[out] fileInfo 文件信息 + * @param filename File name + * @param user username + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) = 0; + virtual int GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) = 0; /** - * @brief 查询或分配文件segment信息 + * @brief Query or allocate file segment information * - * @param allocate 是否分配 - * @param offset 偏移值 - * @param fileInfo 文件信息 - * @param user 用户名 - * @param segInfo segment信息 + * @param allocate whether to allocate + * @param offset offset value + * @param fileInfo file information + * @param user username + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) = 0; + virtual int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) = 0; /** - * @brief 为recover rename复制的文件 + * @brief is the file copied for recover rename * - * @param user 用户信息 - * @param originId 被恢复的原始文件Id - * @param destinationId 克隆出的目标文件Id - * @param origin 被恢复的原始文件名 - * @param destination 克隆出的目标文件 + * @param user user information + * @param originId The original file ID that was restored + * @param destinationId The cloned target file ID + * @param origin The original file name of the recovered file + * @param destination Cloned destination file * - * @return 错误码 + * @return error code */ - virtual int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) = 0; - + virtual int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) = 0; /** - * @brief 删除文件 + * @brief Delete file * - * @param fileName 文件名 - * @param user 用户名 - * @param fileId 删除文件的inodeId + * @param fileName File name + * @param user username + * @param fileId Delete the inodeId of the file * - * @return 错误码 + * @return error code */ - virtual int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) = 0; + virtual int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) = 0; /** - * @brief 创建目录 + * @brief Create directory * - * @param dirpath 目录名 - * @param user 用户名 + * @param dirpath directory name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int Mkdir(const std::string& dirpath, - const std::string &user) = 0; + virtual int Mkdir(const std::string& dirpath, const std::string& user) = 0; /** - * @brief 变更文件的owner + * @brief Change the owner of the file * - * @param filename 文件名 - * @param newOwner 新的owner + * @param filename File name + * @param newOwner New owner * - * @return 错误码 + * @return error code */ virtual int ChangeOwner(const std::string& filename, - const std::string& newOwner) = 0; + const std::string& newOwner) = 0; }; class CurveFsClientImpl : public CurveFsClient { public: CurveFsClientImpl(std::shared_ptr snapClient, - std::shared_ptr fileClient) : - snapClient_(snapClient), fileClient_(fileClient) {} + std::shared_ptr fileClient) + : snapClient_(snapClient), fileClient_(fileClient) {} virtual ~CurveFsClientImpl() {} - // 以下接口定义见CurveFsClient接口注释 - int Init(const CurveClientOptions &options) override; + // The following interface definitions can be found in the CurveFsClient + // interface annotations + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; private: - UserInfo GetUserInfo(const std::string &user) { + UserInfo GetUserInfo(const std::string& user) { if (user == mdsRootUser_) { return UserInfo(mdsRootUser_, mdsRootPassword_); } else { diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 766ae00e05..fb7804d1f6 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -23,10 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ -#include -#include #include #include +#include +#include #include "src/common/snapshotclone/snapshotclone_define.h" @@ -44,10 +44,7 @@ enum class CloneStatus { metaInstalled = 7, }; -enum class CloneFileType { - kFile = 0, - kSnapshot = 1 -}; +enum class CloneFileType { kFile = 0, kSnapshot = 1 }; enum class CloneStep { kCreateCloneFile = 0, @@ -61,10 +58,10 @@ enum class CloneStep { kEnd }; -// 数据库中clone/recover任务信息 +// Clone/recover task information in the database class CloneInfo { public: - CloneInfo() + CloneInfo() : type_(CloneTaskType::kClone), originId_(0), destinationId_(0), @@ -74,14 +71,10 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::error) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - CloneFileType fileType, - bool isLazy) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + CloneFileType fileType, bool isLazy) : taskId_(taskId), user_(user), type_(type), @@ -96,19 +89,12 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::cloning) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - uint64_t originId, - uint64_t destinationId, - uint64_t time, - CloneFileType fileType, - bool isLazy, - CloneStep nextStep, - CloneStatus status) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + uint64_t originId, uint64_t destinationId, uint64_t time, + CloneFileType fileType, bool isLazy, CloneStep nextStep, + CloneStatus status) : taskId_(taskId), user_(user), type_(type), @@ -123,146 +109,94 @@ class CloneInfo { nextStep_(nextStep), status_(status) {} - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } - void SetTaskId(const TaskIdType &taskId) { - taskId_ = taskId; - } + void SetTaskId(const TaskIdType& taskId) { taskId_ = taskId; } - std::string GetUser() const { - return user_; - } + std::string GetUser() const { return user_; } - void SetUser(const std::string &user) { - user_ = user; - } + void SetUser(const std::string& user) { user_ = user; } - CloneTaskType GetTaskType() const { - return type_; - } + CloneTaskType GetTaskType() const { return type_; } - void SetTaskType(CloneTaskType type) { - type_ = type; - } + void SetTaskType(CloneTaskType type) { type_ = type; } - std::string GetSrc() const { - return source_; - } + std::string GetSrc() const { return source_; } - void SetSrc(const std::string &source) { - source_ = source; - } + void SetSrc(const std::string& source) { source_ = source; } - std::string GetDest() const { - return destination_; - } + std::string GetDest() const { return destination_; } - void SetDest(const std::string &dest) { - destination_ = dest; - } + void SetDest(const std::string& dest) { destination_ = dest; } - std::string GetPoolset() const { - return poolset_; - } + std::string GetPoolset() const { return poolset_; } - void SetPoolset(const std::string &poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - uint64_t GetOriginId() const { - return originId_; - } + uint64_t GetOriginId() const { return originId_; } - void SetOriginId(uint64_t originId) { - originId_ = originId; - } + void SetOriginId(uint64_t originId) { originId_ = originId; } - uint64_t GetDestId() const { - return destinationId_; - } + uint64_t GetDestId() const { return destinationId_; } - void SetDestId(uint64_t destId) { - destinationId_ = destId; - } + void SetDestId(uint64_t destId) { destinationId_ = destId; } - uint64_t GetTime() const { - return time_; - } + uint64_t GetTime() const { return time_; } - void SetTime(uint64_t time) { - time_ = time; - } + void SetTime(uint64_t time) { time_ = time; } - CloneFileType GetFileType() const { - return fileType_; - } + CloneFileType GetFileType() const { return fileType_; } - void SetFileType(CloneFileType fileType) { - fileType_ = fileType; - } + void SetFileType(CloneFileType fileType) { fileType_ = fileType; } - bool GetIsLazy() const { - return isLazy_; - } + bool GetIsLazy() const { return isLazy_; } - void SetIsLazy(bool flag) { - isLazy_ = flag; - } + void SetIsLazy(bool flag) { isLazy_ = flag; } - CloneStep GetNextStep() const { - return nextStep_; - } + CloneStep GetNextStep() const { return nextStep_; } - void SetNextStep(CloneStep nextStep) { - nextStep_ = nextStep; - } - CloneStatus GetStatus() const { - return status_; - } + void SetNextStep(CloneStep nextStep) { nextStep_ = nextStep; } + CloneStatus GetStatus() const { return status_; } - void SetStatus(CloneStatus status) { - status_ = status; - } + void SetStatus(CloneStatus status) { status_ = status; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 任务Id - TaskIdType taskId_; - // 用户 + // Task Id + TaskIdType taskId_; + // Users std::string user_; - // 克隆或恢复 + // Clone or Restore CloneTaskType type_; - // 源文件或快照uuid + // Source file or snapshot uuid std::string source_; - // 目标文件名 + // Destination File Name std::string destination_; - // 目标文件所在的poolset + // The poolset where the target file is located std::string poolset_; - // 被恢复的原始文件id, 仅用于恢复 + // The original file ID that has been restored, for recovery purposes only uint64_t originId_; - // 目标文件id + // Target file id uint64_t destinationId_; - // 创建时间 + // Creation time uint64_t time_; - // 克隆/恢复的文件类型 + // Clone/Restore File Types CloneFileType fileType_; - // 是否lazy + // Lazy or not bool isLazy_; - // 克隆进度, 下一个步骤 + // Clone progress, next step CloneStep nextStep_; - // 处理的状态 + // Processing status CloneStatus status_; }; -std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); +std::ostream& operator<<(std::ostream& os, const CloneInfo& cloneInfo); -// 快照处理状态 -enum class Status{ +// Snapshot processing status +enum class Status { done = 0, pending, deleting, @@ -271,187 +205,127 @@ enum class Status{ error }; -// 快照信息 +// Snapshot Information class SnapshotInfo { public: SnapshotInfo() - :uuid_(), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &snapshotName) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(snapshotName), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &desc, - uint64_t seqnum, - uint32_t chunksize, - uint64_t segmentsize, - uint64_t filelength, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - uint64_t time, - Status status) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(desc), - seqNum_(seqnum), - chunkSize_(chunksize), - segmentSize_(segmentsize), - fileLength_(filelength), - stripeUnit_(stripeUnit), - stripeCount_(stripeCount), - poolset_(poolset), - time_(time), - status_(status) {} - - void SetUuid(const UUID &uuid) { - uuid_ = uuid; - } + : uuid_(), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} - UUID GetUuid() const { - return uuid_; - } + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& snapshotName) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(snapshotName), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& desc, + uint64_t seqnum, uint32_t chunksize, uint64_t segmentsize, + uint64_t filelength, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, uint64_t time, Status status) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(desc), + seqNum_(seqnum), + chunkSize_(chunksize), + segmentSize_(segmentsize), + fileLength_(filelength), + stripeUnit_(stripeUnit), + stripeCount_(stripeCount), + poolset_(poolset), + time_(time), + status_(status) {} - void SetUser(const std::string &user) { - user_ = user; - } + void SetUuid(const UUID& uuid) { uuid_ = uuid; } - std::string GetUser() const { - return user_; - } + UUID GetUuid() const { return uuid_; } - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetUser(const std::string& user) { user_ = user; } - std::string GetFileName() const { - return fileName_; - } + std::string GetUser() const { return user_; } + + void SetFileName(const std::string& fileName) { fileName_ = fileName; } + + std::string GetFileName() const { return fileName_; } - void SetSnapshotName(const std::string &snapshotName) { + void SetSnapshotName(const std::string& snapshotName) { snapshotName_ = snapshotName; } - std::string GetSnapshotName() const { - return snapshotName_; - } + std::string GetSnapshotName() const { return snapshotName_; } - void SetSeqNum(uint64_t seqNum) { - seqNum_ = seqNum; - } + void SetSeqNum(uint64_t seqNum) { seqNum_ = seqNum; } - uint64_t GetSeqNum() const { - return seqNum_; - } + uint64_t GetSeqNum() const { return seqNum_; } - void SetChunkSize(uint32_t chunkSize) { - chunkSize_ = chunkSize; - } + void SetChunkSize(uint32_t chunkSize) { chunkSize_ = chunkSize; } - uint32_t GetChunkSize() const { - return chunkSize_; - } + uint32_t GetChunkSize() const { return chunkSize_; } - void SetSegmentSize(uint64_t segmentSize) { - segmentSize_ = segmentSize; - } + void SetSegmentSize(uint64_t segmentSize) { segmentSize_ = segmentSize; } - uint64_t GetSegmentSize() const { - return segmentSize_; - } + uint64_t GetSegmentSize() const { return segmentSize_; } - void SetFileLength(uint64_t fileLength) { - fileLength_ = fileLength; - } + void SetFileLength(uint64_t fileLength) { fileLength_ = fileLength; } - uint64_t GetFileLength() const { - return fileLength_; - } + uint64_t GetFileLength() const { return fileLength_; } - void SetStripeUnit(uint64_t stripeUnit) { - stripeUnit_ = stripeUnit; - } + void SetStripeUnit(uint64_t stripeUnit) { stripeUnit_ = stripeUnit; } - uint64_t GetStripeUnit() const { - return stripeUnit_; - } + uint64_t GetStripeUnit() const { return stripeUnit_; } - void SetStripeCount(uint64_t stripeCount) { - stripeCount_ = stripeCount; - } + void SetStripeCount(uint64_t stripeCount) { stripeCount_ = stripeCount; } - uint64_t GetStripeCount() const { - return stripeCount_; - } + uint64_t GetStripeCount() const { return stripeCount_; } - void SetPoolset(const std::string& poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - const std::string& GetPoolset() const { - return poolset_; - } + const std::string& GetPoolset() const { return poolset_; } - void SetCreateTime(uint64_t createTime) { - time_ = createTime; - } + void SetCreateTime(uint64_t createTime) { time_ = createTime; } - uint64_t GetCreateTime() const { - return time_; - } + uint64_t GetCreateTime() const { return time_; } - void SetStatus(Status status) { - status_ = status; - } + void SetStatus(Status status) { status_ = status; } - Status GetStatus() const { - return status_; - } + Status GetStatus() const { return status_; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 快照uuid + // Snapshot uuid UUID uuid_; - // 租户信息 + // Tenant Information std::string user_; - // 快照目标文件名 + // Snapshot Destination File Name std::string fileName_; - // 快照名 + // Snapshot Name std::string snapshotName_; - // 快照版本号 + // Snapshot version number uint64_t seqNum_; - // 文件的chunk大小 + // Chunk size of the file uint32_t chunkSize_; - // 文件的segment大小 + // The segment size of the file uint64_t segmentSize_; - // 文件大小 + // File size uint64_t fileLength_; // stripe size uint64_t stripeUnit_; @@ -459,16 +333,15 @@ class SnapshotInfo { uint64_t stripeCount_; // poolset std::string poolset_; - // 快照创建时间 + // Snapshot creation time uint64_t time_; - // 快照处理的状态 + // Status of snapshot processing Status status_; }; -std::ostream& operator<<(std::ostream& os, const SnapshotInfo &snapshotInfo); +std::ostream& operator<<(std::ostream& os, const SnapshotInfo& snapshotInfo); } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store.h b/src/snapshotcloneserver/common/snapshotclone_meta_store.h index ff550f5fc7..9e15692eb2 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store.h @@ -23,15 +23,15 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ -#include -#include #include #include -#include //NOLINT +#include //NOLINT +#include +#include +#include "src/common/concurrent/concurrent.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/concurrent/concurrent.h" #include "src/snapshotcloneserver/common/snapshotclone_info.h" namespace curve { @@ -43,25 +43,25 @@ class SnapshotCloneMetaStore { public: SnapshotCloneMetaStore() {} virtual ~SnapshotCloneMetaStore() {} - // 添加一条快照信息记录 + // Add a snapshot information record /** - * 添加一条快照记录到metastore中 - * @param 快照信息结构体 - * @return: 0 插入成功/ -1 插入失败 + * Add a snapshot record to metastore + * @param snapshot information structure + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int AddSnapshot(const SnapshotInfo& snapinfo) = 0; /** - * 从metastore删除一条快照记录 - * @param 快照任务的uuid,全局唯一 - * @return 0 删除成功/ -1 删除失败 + * Delete a snapshot record from metastore + * @param The uuid of the snapshot task, globally unique + * @return 0 successfully deleted/-1 failed to delete */ - virtual int DeleteSnapshot(const UUID &uuid) = 0; + virtual int DeleteSnapshot(const UUID& uuid) = 0; /** - * 更新快照记录 - * @param 快照信息结构体 - * @return: 0 更新成功/ -1 更新失败 + * Update snapshot records + * @param snapshot information structure + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int UpdateSnapshot(const SnapshotInfo& snapinfo) = 0; /** * @brief Compare and set snapshot @@ -75,76 +75,76 @@ class SnapshotCloneMetaStore { virtual int CASSnapshot(const UUID& uuid, CASFunc cas) = 0; /** - * 获取指定快照的快照信息 - * @param 快照的uuid - * @param 保存快照信息的指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain snapshot information for the specified snapshot + * @param uuid of snapshot + * @param pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) = 0; /** - * 获取指定文件的快照信息列表 - * @param 文件名 - * @param 保存快照信息的vector指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain a list of snapshot information for the specified file + * @param file name + * @param vector pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(const std::string &filename, - std::vector *v) = 0; + virtual int GetSnapshotList(const std::string& filename, + std::vector* v) = 0; /** - * 获取全部的快照信息列表 - * @param 保存快照信息的vector指针 - * @return: 0 获取成功/ -1 获取失败 + * Obtain a list of all snapshot information + * @param vector pointer to save snapshot information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(std::vector *list) = 0; + virtual int GetSnapshotList(std::vector* list) = 0; /** - * @brief 获取快照总数 + * @brief Total number of snapshots taken * - * @return 快照总数 + * @return Total number of snapshots */ virtual uint32_t GetSnapshotCount() = 0; /** - * @brief 插入一条clone任务记录到metastore - * @param clone记录信息 - * @return: 0 插入成功/ -1 插入失败 + * @brief Insert a clone task record into metastore + * @param clone records information + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int AddCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 从metastore删除一条clone任务记录 - * @param clone任务的任务id - * @return: 0 删除成功/ -1 删除失败 + * @brief Delete a clone task record from metastore + * @param Task ID of clone task + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteCloneInfo(const std::string &taskID) = 0; + virtual int DeleteCloneInfo(const std::string& taskID) = 0; /** - * @brief 更新一条clone任务记录 - * @param clone记录信息 - * @return: 0 更新成功/ -1 更新失败 + * @brief Update a clone task record + * @param clone records information + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int UpdateCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 获取指定task id的clone任务信息 - * @param clone任务id - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get clone task information for the specified task ID + * @param clone Task ID + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfo(const std::string &taskID, CloneInfo *info) = 0; + virtual int GetCloneInfo(const std::string& taskID, CloneInfo* info) = 0; /** - * @brief 获取指定文件的clone任务信息 + * @brief Get clone task information for the specified file * - * @param fileName 文件名 - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @param fileName File name + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取所有clone任务的信息列表 - * @param[out] 只想clone任务vector指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get a list of information for all clone tasks + * @param[out] just wants to clone the task vector pointer + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoList(std::vector *list) = 0; + virtual int GetCloneInfoList(std::vector* list) = 0; }; } // namespace snapshotcloneserver diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h index 6bc69aca1e..a502042761 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h @@ -23,21 +23,21 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ -#include -#include #include +#include #include +#include -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/kvstorageclient/etcd_client.h" -#include "src/snapshotcloneserver/common/snapshotclonecodec.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/rw_lock.h" +#include "src/kvstorageclient/etcd_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclonecodec.h" -using ::curve::kvstorage::KVStorageClient; -using ::curve::common::RWLock; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; +using ::curve::kvstorage::KVStorageClient; namespace curve { namespace snapshotcloneserver { @@ -45,54 +45,53 @@ namespace snapshotcloneserver { class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { public: SnapshotCloneMetaStoreEtcd(std::shared_ptr client, - std::shared_ptr codec) - : client_(client), - codec_(codec) {} + std::shared_ptr codec) + : client_(client), codec_(codec) {} int Init(); - int AddSnapshot(const SnapshotInfo &info) override; + int AddSnapshot(const SnapshotInfo& info) override; - int DeleteSnapshot(const UUID &uuid) override; + int DeleteSnapshot(const UUID& uuid) override; - int UpdateSnapshot(const SnapshotInfo &info) override; + int UpdateSnapshot(const SnapshotInfo& info) override; int CASSnapshot(const UUID& uuid, CASFunc cas) override; - int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) override; - int GetSnapshotList(const std::string &filename, - std::vector *v) override; + int GetSnapshotList(const std::string& filename, + std::vector* v) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; uint32_t GetSnapshotCount() override; - int AddCloneInfo(const CloneInfo &info) override; + int AddCloneInfo(const CloneInfo& info) override; - int DeleteCloneInfo(const std::string &uuid) override; + int DeleteCloneInfo(const std::string& uuid) override; - int UpdateCloneInfo(const CloneInfo &info) override; + int UpdateCloneInfo(const CloneInfo& info) override; - int GetCloneInfo(const std::string &uuid, CloneInfo *info) override; + int GetCloneInfo(const std::string& uuid, CloneInfo* info) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - int GetCloneInfoList(std::vector *list) override; + int GetCloneInfoList(std::vector* list) override; private: /** - * @brief 加载快照信息 + * @brief Load snapshot information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadSnapshotInfos(); /** - * @brief 加载克隆信息 + * @brief Load clone information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadCloneInfos(); @@ -100,11 +99,11 @@ class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { std::shared_ptr client_; std::shared_ptr codec_; - // key is UUID, map 需要考虑并发保护 + // Key is UUID, map needs to consider concurrency protection std::map snapInfos_; // snap info lock RWLock snapInfos_mutex; - // key is TaskIdType, map 需要考虑并发保护 + // Key is TaskIdType, map needs to consider concurrency protection std::map cloneInfos_; // clone info map lock RWLock cloneInfos_lock_; diff --git a/src/snapshotcloneserver/common/snapshotclone_metric.h b/src/snapshotcloneserver/common/snapshotclone_metric.h index 410d9b19f9..e4fd013334 100644 --- a/src/snapshotcloneserver/common/snapshotclone_metric.h +++ b/src/snapshotcloneserver/common/snapshotclone_metric.h @@ -24,9 +24,11 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_METRIC_H_ #include -#include + #include #include +#include + #include "src/common/stringstatus.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" @@ -39,8 +41,8 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; class CloneTaskInfo; -static uint32_t GetSnapshotTotalNum(void *arg) { - SnapshotCloneMetaStore *metaStore = +static uint32_t GetSnapshotTotalNum(void* arg) { + SnapshotCloneMetaStore* metaStore = reinterpret_cast(arg); uint32_t snapshotCount = 0; if (metaStore != nullptr) { @@ -53,27 +55,27 @@ struct SnapshotMetric { const std::string SnapshotMetricPrefix = "snapshotcloneserver_snapshot_metric_"; - // 正在进行的快照数量 + // Number of snapshots in progress bvar::Adder snapshotDoing; - // 正在等待的快照数量 + // Number of waiting snapshots bvar::Adder snapshotWaiting; - // 累计成功的快照数量 + // Accumulated number of successful snapshots bvar::Adder snapshotSucceed; - // 累计失败的快照数量 + // Accumulated number of failed snapshots bvar::Adder snapshotFailed; std::shared_ptr metaStore_; - // 系统内快照总量 + // Total number of snapshots within the system bvar::PassiveStatus snapshotNum; - explicit SnapshotMetric(std::shared_ptr metaStore) : - snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), - snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), - snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), - snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), - metaStore_(metaStore), - snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", - GetSnapshotTotalNum, metaStore_.get()) {} + explicit SnapshotMetric(std::shared_ptr metaStore) + : snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), + snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), + snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), + snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), + metaStore_(metaStore), + snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", + GetSnapshotTotalNum, metaStore_.get()) {} }; struct SnapshotInfoMetric { @@ -81,60 +83,56 @@ struct SnapshotInfoMetric { "snapshotcloneserver_snapshotInfo_metric_"; StringStatus metric; - explicit SnapshotInfoMetric(const std::string &snapshotId) { + explicit SnapshotInfoMetric(const std::string& snapshotId) { metric.ExposeAs(SnapshotInfoMetricPrefix, snapshotId); } - void Update(SnapshotTaskInfo *taskInfo); + void Update(SnapshotTaskInfo* taskInfo); }; struct CloneMetric { - const std::string CloneMetricPrefix = - "snapshotcloneserver_clone_metric_"; + const std::string CloneMetricPrefix = "snapshotcloneserver_clone_metric_"; - // 正在执行的克隆任务数量 + // Number of cloning tasks being executed bvar::Adder cloneDoing; - // 累计成功的克隆任务数量 + // Accumulated number of successful cloning tasks bvar::Adder cloneSucceed; - // 累计失败的克隆任务数量 + // Accumulated number of failed clone tasks bvar::Adder cloneFailed; - // 正在执行的恢复任务数量 + // Number of recovery tasks being executed bvar::Adder recoverDoing; - // 累计成功的恢复任务数量 + // Accumulated number of successful recovery tasks bvar::Adder recoverSucceed; - // 累计失败的恢复任务数量 + // Accumulated number of failed recovery tasks bvar::Adder recoverFailed; - // 正在执行的Flatten任务数量 + // Number of Flatten tasks being executed bvar::Adder flattenDoing; - // 累计成功的Flatten任务数量 + // Accumulated number of successful Flatten tasks bvar::Adder flattenSucceed; - // 累计失败的Flatten任务数量 + // Accumulated number of failed Flatten tasks bvar::Adder flattenFailed; - CloneMetric() : - cloneDoing(CloneMetricPrefix, "clone_doing"), - cloneSucceed(CloneMetricPrefix, "clone_succeed"), - cloneFailed(CloneMetricPrefix, "clone_failed"), - recoverDoing(CloneMetricPrefix, "recover_doing"), - recoverSucceed(CloneMetricPrefix, "recover_succeed"), - recoverFailed(CloneMetricPrefix, "recover_failed"), - flattenDoing(CloneMetricPrefix, "flatten_doing"), - flattenSucceed(CloneMetricPrefix, "flatten_succeed"), - flattenFailed(CloneMetricPrefix, "flatten_failed") {} + CloneMetric() + : cloneDoing(CloneMetricPrefix, "clone_doing"), + cloneSucceed(CloneMetricPrefix, "clone_succeed"), + cloneFailed(CloneMetricPrefix, "clone_failed"), + recoverDoing(CloneMetricPrefix, "recover_doing"), + recoverSucceed(CloneMetricPrefix, "recover_succeed"), + recoverFailed(CloneMetricPrefix, "recover_failed"), + flattenDoing(CloneMetricPrefix, "flatten_doing"), + flattenSucceed(CloneMetricPrefix, "flatten_succeed"), + flattenFailed(CloneMetricPrefix, "flatten_failed") {} - void UpdateBeforeTaskBegin( - const CloneTaskType &taskType); + void UpdateBeforeTaskBegin(const CloneTaskType& taskType); - void UpdateAfterTaskFinish( - const CloneTaskType &taskType, - const CloneStatus &status); + void UpdateAfterTaskFinish(const CloneTaskType& taskType, + const CloneStatus& status); void UpdateFlattenTaskBegin(); - void UpdateAfterFlattenTaskFinish( - const CloneStatus &status); + void UpdateAfterFlattenTaskFinish(const CloneStatus& status); }; struct CloneInfoMetric { @@ -142,14 +140,13 @@ struct CloneInfoMetric { "snapshotcloneserver_cloneInfo_metric_"; StringStatus metric; - explicit CloneInfoMetric(const std::string &cloneTaskId) { + explicit CloneInfoMetric(const std::string& cloneTaskId) { metric.ExposeAs(CloneInfoMetricPrefix, cloneTaskId); } - void Update(CloneTaskInfo *taskInfo); + void Update(CloneTaskInfo* taskInfo); }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/common/task.h b/src/snapshotcloneserver/common/task.h index bc0faa4178..0034230311 100644 --- a/src/snapshotcloneserver/common/task.h +++ b/src/snapshotcloneserver/common/task.h @@ -25,6 +25,7 @@ #include #include + #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/task_tracker.h" @@ -33,8 +34,7 @@ namespace snapshotcloneserver { class Task { public: - explicit Task(const TaskIdType &taskId) - : taskId_(taskId) {} + explicit Task(const TaskIdType& taskId) : taskId_(taskId) {} virtual ~Task() {} @@ -44,47 +44,40 @@ class Task { Task& operator=(Task&&) = default; /** - * @brief 获取快照任务执行体闭包 + * @brief Get snapshot task execution body closure * - * @return 快照任务执行体 + * @return Snapshot Task Execution Body */ virtual std::function clousre() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } /** - * @brief 获取快照任务id + * @brief Get snapshot task ID * - * @return 快照任务id + * @return Snapshot Task ID */ - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } /** - * @brief 快照执行函数接口 + * @brief snapshot execution function interface */ virtual void Run() = 0; private: - // 快照id + // Snapshot ID TaskIdType taskId_; }; class TrackerTask : public Task { public: - explicit TrackerTask(const TaskIdType &taskId) - : Task(taskId) {} + explicit TrackerTask(const TaskIdType& taskId) : Task(taskId) {} void SetTracker(std::shared_ptr tracker) { tracker_ = tracker; } - std::shared_ptr GetTracker() { - return tracker_; - } + std::shared_ptr GetTracker() { return tracker_; } private: std::shared_ptr tracker_; @@ -93,5 +86,4 @@ class TrackerTask : public Task { } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_TASK_H_ diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..2faf6cb1b7 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -23,11 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ - -#include -#include -#include //NOLINT #include +#include +#include //NOLINT +#include #include "src/common/concurrent/concurrent.h" @@ -36,10 +35,7 @@ namespace snapshotcloneserver { class TaskInfo { public: - TaskInfo() - : progress_(0), - isFinish_(false), - isCanceled_(false) {} + TaskInfo() : progress_(0), isFinish_(false), isCanceled_(false) {} virtual ~TaskInfo() {} TaskInfo(const TaskInfo&) = delete; @@ -48,59 +44,47 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent task completion percentage */ - void SetProgress(uint32_t persent) { - progress_ = persent; - } + void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ - uint32_t GetProgress() const { - return progress_; - } + uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ - void Finish() { - isFinish_.store(true); - } + void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true Task completed + * @retval false Task not completed */ - bool IsFinish() const { - return isFinish_.load(); - } + bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ - void Cancel() { - isCanceled_ = true; - } + void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Do you want to cancel the task * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true The task has been canceled + * @retval false The task was not canceled */ - bool IsCanceled() const { - return isCanceled_; - } + bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +92,24 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine + * whether the task is cancelled, If cancelled, release the lock, Otherwise, + * release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine + * whether the task is completed, If completed, release the lock, Otherwise, + * execute the task to cancel the logic and release the lock. */ - curve::common::Mutex& GetLockRef() { - return lock_; - } + curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..1e5c664f15 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -24,6 +24,7 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_THREAD_POOL_H_ #include + #include "src/common/concurrent/task_thread_pool.h" #include "src/snapshotcloneserver/common/task.h" @@ -31,52 +32,49 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief snapshot thread pool */ class ThreadPool { public: - /** - * @brief 构造函数 - * - * @param threadNum 最大线程数 - */ - explicit ThreadPool(int threadNum) - : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief constructor + * + * @param threadNum maximum number of threads + */ + explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} + /** + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ - void PushTask(Task* task) { - threadPool_.Enqueue(task->clousre()); - } + void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); } private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/snapshotcloneserver/main.cpp b/src/snapshotcloneserver/main.cpp index 3430ff0118..3ae3b44e34 100644 --- a/src/snapshotcloneserver/main.cpp +++ b/src/snapshotcloneserver/main.cpp @@ -19,24 +19,27 @@ * Created Date: Fri Dec 14 2018 * Author: xuchaojie */ -#include #include +#include + #include "src/snapshotcloneserver/snapshotclone_server.h" #include "src/common/log_util.h" -DEFINE_string(conf, "conf/snapshot_clone_server.conf", "snapshot&clone server config file path"); //NOLINT +DEFINE_string(conf, "conf/snapshot_clone_server.conf", + "snapshot&clone server config file path"); // NOLINT DEFINE_string(addr, "127.0.0.1:5555", "snapshotcloneserver address"); using Configuration = ::curve::common::Configuration; using SnapShotCloneServer = ::curve::snapshotcloneserver::SnapShotCloneServer; -void LoadConfigFromCmdline(Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void LoadConfigFromCmdline(Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("addr", &info) && !info.is_default) { conf->SetStringValue("server.address", FLAGS_addr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { if (!conf->GetStringValue("log.dir", &FLAGS_log_dir)) { LOG(WARNING) << "no log.dir in " << FLAGS_conf @@ -69,13 +72,12 @@ int snapshotcloneserver_main(std::shared_ptr conf) { return 0; } -int main(int argc, char **argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, true); std::shared_ptr conf = std::make_shared(); conf->SetConfigPath(FLAGS_conf); if (!conf->LoadConfig()) { - LOG(ERROR) << "Failed to open config file: " - << conf->GetConfigPath(); + LOG(ERROR) << "Failed to open config file: " << conf->GetConfigPath(); return -1; } LoadConfigFromCmdline(conf.get()); @@ -85,4 +87,3 @@ int main(int argc, char **argv) { google::InitGoogleLogging(argv[0]); snapshotcloneserver_main(conf); } - diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 6abb94b5e9..ec541c4c80 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -23,17 +23,17 @@ #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include -#include + #include +#include #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "src/common/uuid.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::UUIDGenerator; -using ::curve::common::NameLockGuard; using ::curve::common::LockGuard; +using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; namespace curve { namespace snapshotcloneserver { @@ -47,10 +47,10 @@ int SnapshotCoreImpl::Init() { return kErrCodeSuccess; } -int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) { NameLockGuard lockGuard(snapshotNameLock_, file); std::vector fileInfo; metaStore_->GetSnapshotList(file, &fileInfo); @@ -60,11 +60,10 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, if ((snap.GetUser() == user) && (snap.GetSnapshotName() == snapshotName)) { LOG(INFO) << "CreateSnapshotPre find same snap task" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName << ", Exist SnapInfo : " << snap; - // 视为同一个快照,返回任务已存在 + // Treat as the same snapshot, return task already exists *snapInfo = snap; return kErrCodeTaskExist; } @@ -85,20 +84,17 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, break; case -LIBCURVE_ERROR::NOTEXIST: LOG(ERROR) << "create snapshot file not exist" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeFileNotExist; case -LIBCURVE_ERROR::AUTHFAIL: LOG(ERROR) << "create snapshot by invalid user" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeInvalidUser; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", file = " << file + << ", ret = " << ret << ", file = " << file << ", user = " << user; return kErrCodeInternalError; } @@ -117,8 +113,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, ret = metaStore_->AddSnapshot(info); if (ret < 0) { LOG(ERROR) << "AddSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid + << " ret = " << ret << ", uuid = " << uuid << ", fileName = " << file << ", snapshotName = " << snapshotName; return ret; @@ -131,46 +126,56 @@ constexpr uint32_t kProgressCreateSnapshotOnCurvefsComplete = 5; constexpr uint32_t kProgressBuildChunkIndexDataComplete = 6; constexpr uint32_t kProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kProgressTransferSnapshotDataStart = - kProgressBuildSnapshotMapComplete; + kProgressBuildSnapshotMapComplete; constexpr uint32_t kProgressTransferSnapshotDataComplete = 99; constexpr uint32_t kProgressComplete = 100; /** - * @brief 异步执行创建快照任务并更新任务进度 + * @brief Asynchronous execution of snapshot creation task and update of task + * progress * - * 快照进度规划如下: + * The snapshot schedule is planned as follows: * - * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | TransferSnapshotData | UpdateSnapshot | //NOLINT - * | 5% | 6% | 10% | 10%~99% | 100% | //NOLINT + * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | + * TransferSnapshotData | UpdateSnapshot | //NOLINT | 5% | 6% + * | 10% | 10%~99% | 100% | //NOLINT * * - * 异步执行期间发生error与cancel情况说明: - * 1. 发生error将导致整个异步任务直接中断,并且不做任何清理动作: - * 发生error时,一般系统存在异常,清理动作很可能不能完成, - * 因此,不进行任何清理,只置状态,待人工干预排除异常之后, - * 使用DeleteSnapshot功能去手动删除error状态的快照。 - * 2. 发生cancel时则以创建功能相反的顺序依次进行清理动作, - * 若清理过程发生error,则立即中断,之后同error过程。 + * Explanation of errors and cancellations during asynchronous execution: + * 1. The occurrence of an error will cause the entire asynchronous task to be + * directly interrupted without any cleaning action: When an error occurs, there + * is usually an abnormality in the system, and the cleaning action may not be + * completed, Therefore, no cleaning will be carried out, only the status will + * be set, and after manual intervention to eliminate anomalies, Use the + * DeleteSnapshot function to manually delete snapshots with error status. + * 2. When a cancel occurs, the cleaning actions are carried out in reverse + * order of creating functions, If an error occurs during the cleaning process, + * it will be immediately interrupted, followed by the same error process. * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleCreateSnapshotTask( std::shared_ptr task) { std::string fileName = task->GetFileName(); - // 如果当前有失败的快照,需先清理失败的快照,否则快照会再次失败 + // If there are currently failed snapshots, it is necessary to clean up the + // failed snapshots first, otherwise the snapshots will fail again int ret = ClearErrorSnapBeforeCreateSnapshot(task); if (ret < 0) { HandleCreateSnapshotError(task); return; } - // 为支持任务重启,这里有三种情况需要处理 - // 1. 没打过快照, 没有seqNum且curve上没有快照 - // 2. 打过快照, 有seqNum且curve上有快照 - // 3. 打过快照并已经转储完删除快照, 有seqNum但curve上没有快照 + // To support task restart, there are three situations that need to be + // addressed + // 1. I haven't taken a snapshot, there's no seqNum, and there's no snapshot + // on the curve + // 2. I have taken a snapshot, and there is seqNum and a snapshot on the + // curve + // 3. I have taken a snapshot and have completed the dump to delete it. + // There is seqNum, but there is no snapshot on the curve - SnapshotInfo *info = &(task->GetSnapshotInfo()); + SnapshotInfo* info = &(task->GetSnapshotInfo()); UUID uuid = task->GetUuid(); uint64_t seqNum = info->GetSeqNum(); bool existIndexData = false; @@ -178,8 +183,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = CreateSnapshotOnCurvefs(fileName, info, task); if (ret < 0) { LOG(ERROR) << "CreateSnapshotOnCurvefs error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; @@ -188,9 +192,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = false; } else { FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = + client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (-LIBCURVE_ERROR::NOTEXIST == ret) { HandleCreateSnapshotSuccess(task); return; @@ -200,8 +203,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = dataStore_->ChunkIndexDataExist(name); } else { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); @@ -224,8 +226,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->GetChunkIndexData(name, &indexData); if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); @@ -238,8 +239,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildSegmentInfo(*info, &segInfos); if (ret < 0) { LOG(ERROR) << "BuildSegmentInfo error," - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -247,8 +247,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildChunkIndexData(*info, &indexData, &segInfos, task); if (ret < 0) { LOG(ERROR) << "BuildChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -256,8 +255,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->PutChunkIndexData(name, indexData); if (ret < 0) { LOG(ERROR) << "PutChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -271,14 +269,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( } FileSnapMap fileSnapshotMap; - ret = BuildSnapshotMap(fileName, - seqNum, - &fileSnapshotMap); + ret = BuildSnapshotMap(fileName, seqNum, &fileSnapshotMap); if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -286,26 +281,23 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (existIndexData) { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [this] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [this](const ChunkDataName& chunkDataName) { return dataStore_->ChunkDataExist(chunkDataName); }, task); } else { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [&fileSnapshotMap] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [&fileSnapshotMap](const ChunkDataName& chunkDataName) { return fileSnapshotMap.IsExistChunk(chunkDataName); }, task); } if (ret < 0) { LOG(ERROR) << "TransferSnapshotData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -313,8 +305,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } ret = DeleteSnapshotOnCurvefs(*info); @@ -327,8 +319,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( LockGuard lockGuard(task->GetLockRef()); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } HandleCreateSnapshotSuccess(task); @@ -347,9 +339,9 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( std::make_shared(snap, snapInfoMetric); taskInfo->GetSnapshotInfo().SetStatus(Status::errorDeleting); taskInfo->UpdateMetric(); - // 处理删除快照 + // Processing deletion of snapshots HandleDeleteSnapshotTask(taskInfo); - // 仍然失败,则本次快照失败 + // If it still fails, the current snapshot fails if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { LOG(ERROR) << "Find error Snapshot and Delete Fail" << ", error snapshot Id = " << snap.GetUuid() @@ -362,15 +354,13 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( return kErrCodeSuccess; } -int SnapshotCoreImpl::StartCancel( - std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); +int SnapshotCoreImpl::StartCancel(std::shared_ptr task) { + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::canceling); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Cancel Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return kErrCodeInternalError; } @@ -378,18 +368,17 @@ int SnapshotCoreImpl::StartCancel( } void SnapshotCoreImpl::CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap) { + std::shared_ptr task, const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap) { LOG(INFO) << "Cancel After TransferSnapshotData" << ", uuid = " << task->GetUuid(); std::vector chunkIndexVec = indexData.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - int ret = dataStore_->DeleteChunkData(chunkDataName); + int ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error" << "while canceling CreateSnapshot, " @@ -410,19 +399,16 @@ void SnapshotCoreImpl::CancelAfterCreateChunkIndexData( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateChunkIndexData" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); int ret = dataStore_->DeleteChunkIndexData(name); if (ret < 0) { LOG(ERROR) << "DeleteChunkIndexData error " << "while canceling CreateSnapshot, " - << " ret = " << ret - << ", fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", fileName = " << task->GetFileName() + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -433,7 +419,7 @@ void SnapshotCoreImpl::CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateSnapshotOnCurvefs" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); int ret = DeleteSnapshotOnCurvefs(info); @@ -452,13 +438,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( if (ret < 0) { LOG(ERROR) << "MetaStore DeleteSnapshot error " << "while cancel CreateSnapshot, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "CancelSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -472,13 +457,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( void SnapshotCoreImpl::HandleCreateSnapshotSuccess( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::done); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } task->SetProgress(kProgressComplete); @@ -494,13 +478,12 @@ void SnapshotCoreImpl::HandleCreateSnapshotSuccess( void SnapshotCoreImpl::HandleCreateSnapshotError( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } LOG(INFO) << "CreateSnapshot Task Fail" @@ -514,14 +497,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotError( } int SnapshotCoreImpl::CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, + const std::string& fileName, SnapshotInfo* info, std::shared_ptr task) { uint64_t seqNum = 0; - int ret = - client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); - if (LIBCURVE_ERROR::OK == ret || - -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { + int ret = client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); + if (LIBCURVE_ERROR::OK == ret || -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { // ok } else if (-LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT == ret) { LOG(ERROR) << "CreateSnapshot on curvefs fail, " @@ -530,23 +510,18 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( return kErrCodeNotSupport; } else { LOG(ERROR) << "CreateSnapshot on curvefs fail, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } LOG(INFO) << "CreateSnapshot on curvefs success, seq = " << seqNum << ", uuid = " << task->GetUuid(); FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << info->GetUser() - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } @@ -573,46 +548,38 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( ret = metaStore_->CASSnapshot(uuid, compareAndSet); if (ret < 0) { LOG(ERROR) << "CASSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); return ret; } - // 打完快照需等待2个session时间,以保证seq同步到所有client + // After taking a snapshot, you need to wait for 2 sessions to ensure that + // the seq is synchronized to all clients std::this_thread::sleep_for( std::chrono::microseconds(mdsSessionTimeUs_ * 2)); return kErrCodeSuccess; } -int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { +int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo& info) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seqNum = info.GetSeqNum(); - int ret = client_->DeleteSnapshot(fileName, - user, - seqNum); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST && + int ret = client_->DeleteSnapshot(fileName, user, seqNum); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST && ret != -LIBCURVE_ERROR::DELETING) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seqNum = " << seqNum << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } do { FileStatus status; - ret = client_->CheckSnapShotStatus(info.GetFileName(), - info.GetUser(), - seqNum, - &status); + ret = client_->CheckSnapShotStatus(info.GetFileName(), info.GetUser(), + seqNum, &status); LOG(INFO) << "Doing CheckSnapShotStatus, fileName = " - << info.GetFileName() - << ", user = " << info.GetUser() + << info.GetFileName() << ", user = " << info.GetUser() << ", seqNum = " << seqNum << ", status = " << static_cast(status) << ", uuid = " << info.GetUuid(); @@ -631,8 +598,7 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } } else { LOG(ERROR) << "CheckSnapShotStatus fail" - << ", ret = " << ret - << ", uuid = " << info.GetUuid(); + << ", ret = " << ret << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } std::this_thread::sleep_for( @@ -642,9 +608,8 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } int SnapshotCoreImpl::BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, + const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, std::shared_ptr task) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); @@ -656,25 +621,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( indexData->SetFileName(fileName); uint64_t chunkIndex = 0; - for (uint64_t i = 0; i < fileLength/segmentSize; i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - int ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seqNum, - offset, - &segInfo); + int ret = client_->GetSnapshotSegmentInfo(fileName, user, seqNum, + offset, &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, segInfo); for (std::vector::size_type j = 0; - j < segInfo.chunkvec.size(); - j++) { + j < segInfo.chunkvec.size(); j++) { ChunkInfoDetail chunkInfo; ChunkIDInfo cidInfo = segInfo.chunkvec[j]; - ret = client_->GetChunkInfo(cidInfo, - &chunkInfo); + ret = client_->GetChunkInfo(cidInfo, &chunkInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetChunkInfo error, " << " ret = " << ret @@ -684,16 +643,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } - // 2个sn,小的是snap sn,大的是快照之后的写 - // 1个sn,有两种情况: - // 小于等于seqNum时为snap sn, 且快照之后未写过; - // 大于时, 表示打快照时为空,是快照之后首次写的版本(seqNum+1) - // 没有sn,从未写过 - // 大于2个sn,错误,报错 + // 2 Sns, the smaller one is the snap snap snap, and the larger + // one is the write after the snapshot 1 SN, there are two + // situations: + // When it is less than or equal to seqNum, it is a snap + // snap and has not been written since the snapshot; When + // greater than, it indicates that it was blank when taking + // a snapshot, and is the version written for the first + // time after the snapshot (seqNum+1) + // No sn, never written before + // Greater than 2 sns, error, error reported if (chunkInfo.chunkSn.size() == 2) { uint64_t seq = - std::min(chunkInfo.chunkSn[0], - chunkInfo.chunkSn[1]); + std::min(chunkInfo.chunkSn[0], chunkInfo.chunkSn[1]); chunkIndex = i * (segmentSize / chunkSize) + j; ChunkDataName chunkDataName(fileName, seq, chunkIndex); indexData->PutChunkDataName(chunkDataName); @@ -708,10 +670,10 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { // should not reach here - LOG(ERROR) << "GetChunkInfo return chunkInfo.chunkSn.size()" - << " invalid, size = " - << chunkInfo.chunkSn.size() - << ", uuid = " << task->GetUuid(); + LOG(ERROR) + << "GetChunkInfo return chunkInfo.chunkSn.size()" + << " invalid, size = " << chunkInfo.chunkSn.size() + << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } if (task->IsCanceled()) { @@ -722,10 +684,8 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seqNum << ", offset = " << offset << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; @@ -736,25 +696,18 @@ int SnapshotCoreImpl::BuildChunkIndexData( } int SnapshotCoreImpl::BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos) { + const SnapshotInfo& info, std::map* segInfos) { int ret = kErrCodeSuccess; std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seq = info.GetSeqNum(); uint64_t fileLength = info.GetFileLength(); uint64_t segmentSize = info.GetSegmentSize(); - for (uint64_t i = 0; - i < fileLength/segmentSize; - i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seq, - offset, - &segInfo); + ret = client_->GetSnapshotSegmentInfo(fileName, user, seq, offset, + &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, std::move(segInfo)); @@ -762,10 +715,8 @@ int SnapshotCoreImpl::BuildSegmentInfo( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seq + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seq << ", offset = " << offset << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; @@ -775,15 +726,14 @@ int SnapshotCoreImpl::BuildSegmentInfo( } int SnapshotCoreImpl::TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, + const ChunkIndexData indexData, const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, std::shared_ptr task) { int ret = 0; uint64_t segmentSize = info.GetSegmentSize(); uint64_t chunkSize = info.GetChunkSize(); - uint64_t chunkPerSegment = segmentSize/chunkSize; + uint64_t chunkPerSegment = segmentSize / chunkSize; if (0 == chunkSplitSize_ || chunkSize % chunkSplitSize_ != 0) { LOG(ERROR) << "error!, ChunkSize is not align to chunkSplitSize" @@ -794,13 +744,13 @@ int SnapshotCoreImpl::TransferSnapshotData( std::vector chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kProgressTransferSnapshotDataComplete - - kProgressTransferSnapshotDataStart; + kProgressTransferSnapshotDataStart; uint32_t transferDataNum = chunkIndexVec.size(); double progressPerData = static_cast(totalProgress) / transferDataNum; uint32_t index = 0; - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { uint64_t segNum = chunkIndex / chunkPerSegment; auto it = segInfos.find(segNum); @@ -818,17 +768,15 @@ int SnapshotCoreImpl::TransferSnapshotData( LOG(ERROR) << "TransferSnapshotData, " << "chunkIndexInSegment >= " << "segInfos[segNum].chunkvec.size()" - << ", chunkIndexInSegment = " - << chunkIndexInSegment - << ", size = " - << it->second.chunkvec.size() + << ", chunkIndexInSegment = " << chunkIndexInSegment + << ", size = " << it->second.chunkvec.size() << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } } auto tracker = std::make_shared(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); uint64_t segNum = chunkIndex / chunkPerSegment; @@ -836,8 +784,7 @@ int SnapshotCoreImpl::TransferSnapshotData( auto it = segInfos.find(segNum); if (it != segInfos.end()) { - ChunkIDInfo cidInfo = - it->second.chunkvec[chunkIndexInSegment]; + ChunkIDInfo cidInfo = it->second.chunkvec[chunkIndexInSegment]; if (!filter(chunkDataName)) { auto taskInfo = std::make_shared( @@ -847,10 +794,7 @@ int SnapshotCoreImpl::TransferSnapshotData( readChunkSnapshotConcurrency_); UUID taskId = UUIDGenerator().GenerateUUID(); auto task = new TransferSnapshotDataChunkTask( - taskId, - taskInfo, - client_, - dataStore_); + taskId, taskInfo, client_, dataStore_); task->SetTracker(tracker); tracker->AddOneTrace(); threadPool_->PushTask(task); @@ -865,50 +809,45 @@ int SnapshotCoreImpl::TransferSnapshotData( ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } task->SetProgress(static_cast( - kProgressTransferSnapshotDataStart + index * progressPerData)); + kProgressTransferSnapshotDataStart + index * progressPerData)); task->UpdateMetric(); index++; if (task->IsCanceled()) { return kErrCodeSuccess; } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end tracker->Wait(); ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } return kErrCodeSuccess; } - -int SnapshotCoreImpl::DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) { NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), uuid); int ret = metaStore_->GetSnapshotInfo(uuid, snapInfo); if (ret < 0) { - // 快照不存在时直接返回删除成功,使接口幂等 + // When the snapshot does not exist, it directly returns deletion + // success, making the interface idempotent return kErrCodeSuccess; } if (snapInfo->GetUser() != user) { LOG(ERROR) << "Can not delete snapshot by different user."; return kErrCodeInvalidUser; } - if ((!fileName.empty()) && - (fileName != snapInfo->GetFileName())) { + if ((!fileName.empty()) && (fileName != snapInfo->GetFileName())) { LOG(ERROR) << "Can not delete, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -938,8 +877,7 @@ int SnapshotCoreImpl::DeleteSnapshotPre( ret = metaStore_->UpdateSnapshot(*snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; return ret; } return kErrCodeSuccess; @@ -947,23 +885,24 @@ int SnapshotCoreImpl::DeleteSnapshotPre( constexpr uint32_t kDelProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kDelProgressDeleteChunkDataStart = - kDelProgressBuildSnapshotMapComplete; + kDelProgressBuildSnapshotMapComplete; constexpr uint32_t kDelProgressDeleteChunkDataComplete = 80; constexpr uint32_t kDelProgressDeleteChunkIndexDataComplete = 90; /** - * @brief 异步执行删除快照任务并更新任务进度 + * @brief Asynchronous execution of delete snapshot task and update task + * progress * - * 删除快照进度规划如下: + * Delete the snapshot schedule as follows: * * |BuildSnapshotMap|DeleteChunkData|DeleteChunkIndexData|DeleteSnapshot| * | 10% | 10%~80% | 90% | 100% | * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleDeleteSnapshotTask( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); FileSnapMap fileSnapshotMap; @@ -971,15 +910,13 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; } task->SetProgress(kDelProgressBuildSnapshotMapComplete); task->UpdateMetric(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); ChunkIndexData indexData; if (dataStore_->ChunkIndexDataExist(name)) { ret = dataStore_->GetChunkIndexData(name, &indexData); @@ -995,29 +932,28 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( auto chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kDelProgressDeleteChunkDataComplete - - kDelProgressDeleteChunkDataStart; + kDelProgressDeleteChunkDataStart; uint32_t chunkDataNum = chunkIndexVec.size(); - double progressPerData = static_cast (totalProgress) / - chunkDataNum; + double progressPerData = + static_cast(totalProgress) / chunkDataNum; uint32_t index = 0; LOG(INFO) << "HandleDeleteSnapshotTask GetChunkIndexData success, " << "begin to DeleteChunkData, " << "chunkDataNum = " << chunkIndexVec.size(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - ret = dataStore_->DeleteChunkData(chunkDataName); + ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error, " << " ret = " << ret << ", fileName = " << task->GetFileName() << ", seqNum = " << seqNum - << ", chunkIndex = " - << chunkDataName.chunkIndex_ + << ", chunkIndex = " << chunkDataName.chunkIndex_ << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; @@ -1059,8 +995,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( ret = metaStore_->DeleteSnapshot(uuid); if (ret < 0) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; HandleDeleteSnapshotError(task); return; } @@ -1068,7 +1003,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( task->SetProgress(kProgressComplete); task->GetSnapshotInfo().SetStatus(Status::done); - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1079,19 +1014,17 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( return; } - void SnapshotCoreImpl::HandleDeleteSnapshotError( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); info.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(info); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Fail" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1102,23 +1035,22 @@ void SnapshotCoreImpl::HandleDeleteSnapshotError( return; } -int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string &file, - std::vector *info) { +int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string& file, + std::vector* info) { metaStore_->GetSnapshotList(file, info); return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) { +int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) { return metaStore_->GetSnapshotInfo(uuid, info); } -int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap) { +int SnapshotCoreImpl::BuildSnapshotMap(const std::string& fileName, + uint64_t seqNum, + FileSnapMap* fileSnapshotMap) { std::vector snapInfos; int ret = metaStore_->GetSnapshotList(fileName, &snapInfos); - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetSeqNum() != seqNum && snap.GetSeqNum() != kUnInitializeSeqNum) { ChunkIndexDataName name(snap.GetFileName(), snap.GetSeqNum()); @@ -1127,10 +1059,11 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " << " ret = " << ret - << ", fileName = " << snap.GetFileName() + << ", fileName = " << snap.GetFileName() << ", seqNum = " << snap.GetSeqNum(); - // 此处不能返回错误, - // 否则一旦某个失败的快照没有indexdata,所有快照都无法删除 + // An error cannot be returned here, + // Otherwise, once a failed snapshot does not have indexdata, + // all snapshots cannot be deleted } else { fileSnapshotMap->maps.push_back(std::move(indexData)); } @@ -1139,19 +1072,18 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotList(std::vector *list) { +int SnapshotCoreImpl::GetSnapshotList(std::vector* list) { metaStore_->GetSnapshotList(list); return kErrCodeSuccess; } int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); int ret = metaStore_->DeleteSnapshot(snapInfo.GetUuid()); if (ret < 0) { LOG(ERROR) << "HandleCancelUnSchduledSnapshotTask fail, " - << " ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << " ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() @@ -1161,7 +1093,6 @@ int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( return kErrCodeSuccess; } - int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( std::shared_ptr task) { LockGuard lockGuard(task->GetLockRef()); @@ -1176,8 +1107,7 @@ int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( } else { auto& snapInfo = task->GetSnapshotInfo(); LOG(ERROR) << "HandleCancelSchduledSnapshotTask failed: " - << ", ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << ", ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.h b/src/snapshotcloneserver/snapshot/snapshot_core.h index 747e02ea2f..9667b64d39 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.h +++ b/src/snapshotcloneserver/snapshot/snapshot_core.h @@ -23,19 +23,19 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ +#include #include #include #include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshot_reference.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -45,22 +45,23 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; /** - * @brief 文件的快照索引块映射表 + * @brief Snapshot index block mapping table for file */ struct FileSnapMap { std::vector maps; /** - * @brief 获取当前映射表中是否存在当前chunk数据 + * @brief to obtain whether the current chunk data exists in the current + * mapping table * - * @param name chunk数据对象 + * @param name chunk data object * - * @retval true 存在 - * @retval false 不存在 + * @retval true exists + * @retval false does not exist */ - bool IsExistChunk(const ChunkDataName &name) const { + bool IsExistChunk(const ChunkDataName& name) const { bool find = false; - for (auto &v : maps) { + for (auto& v : maps) { find = v.IsExistChunkDataName(name); if (find) { break; @@ -71,7 +72,7 @@ struct FileSnapMap { }; /** - * @brief 快照核心模块 + * @brief snapshot core module */ class SnapshotCore { public: @@ -79,80 +80,76 @@ class SnapshotCore { virtual ~SnapshotCore() {} /** - * @brief 创建快照前置操作 + * @brief Create snapshot pre operation * - * @param file 文件名 - * @param user 用户名 - * @param snapshotName 快照名 - * @param[out] snapInfo 快照信息 + * @param file file name + * @param user username + * @param snapshotName SnapshotName + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) = 0; + virtual int CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行创建快照任务并更新progress - * 第一步,构建快照文件映射, put MateObj - * 第二步,从curvefs读取chunk文件,并put DataObj - * 第三步,删除curvefs中的临时快照 - * 第四步,update status + * @brief Execute the task of creating a snapshot and update the progress + * Step 1, build a snapshot file mapping and put MateObj + * Step 2, read the chunk file from curvefs and put DataObj + * Step 3, delete the temporary snapshot in curves + * Step 4, update status * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleCreateSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 删除快照前置操作 - * 更新数据库中的快照记录为deleting状态 + * @brief Delete snapshot pre operation + * Update the snapshot records in the database to a deleting state * - * @param uuid 快照uuid - * @param user 用户名 - * @param fileName 文件名 - * @param[out] snapInfo 快照信息 + * @param uuid Snapshot uuid + * @param user username + * @param fileName File name + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) = 0; + virtual int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行删除快照任务并更新progress + * @brief Execute the delete snapshot task and update the progress * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleDeleteSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 获取文件的快照信息 + * @brief Get snapshot information of files * - * @param file 文件名 - * @param info 快照信息列表 + * @param file file name + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - std::vector *info) = 0; + virtual int GetFileSnapshotInfo(const std::string& file, + std::vector* info) = 0; /** - * @brief 获取全部快照信息 + * @brief Get all snapshot information * - * @param list 快照信息列表 + * @param list snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotList(std::vector *list) = 0; - + virtual int GetSnapshotList(std::vector* list) = 0; - virtual int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) = 0; virtual int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) = 0; @@ -170,66 +167,61 @@ class SnapshotCore { class SnapshotCoreImpl : public SnapshotCore { public: - /** - * @brief 构造函数 - * - * @param client curve客户端对象 - * @param metaStore meta存储对象 - * @param dataStore data存储对象 - */ - SnapshotCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - const SnapshotCloneServerOptions &option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - chunkSplitSize_(option.chunkSplitSize), - checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), - maxSnapshotLimit_(option.maxSnapshotLimit), - snapshotCoreThreadNum_(option.snapshotCoreThreadNum), - mdsSessionTimeUs_(option.mdsSessionTimeUs), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs), - readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { - threadPool_ = std::make_shared( - option.snapshotCoreThreadNum); + /** + * @brief constructor + * + * @param client curve client object + * @param metaStore MetaStorage Object + * @param dataStore data storage object + */ + SnapshotCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + const SnapshotCloneServerOptions& option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + chunkSplitSize_(option.chunkSplitSize), + checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), + maxSnapshotLimit_(option.maxSnapshotLimit), + snapshotCoreThreadNum_(option.snapshotCoreThreadNum), + mdsSessionTimeUs_(option.mdsSessionTimeUs), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs), + readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { + threadPool_ = + std::make_shared(option.snapshotCoreThreadNum); } int Init(); - ~SnapshotCoreImpl() { - threadPool_->Stop(); - } + ~SnapshotCoreImpl() { threadPool_->Stop(); } - // 公有接口定义见SnapshotCore接口注释 - int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) override; + // Public interface definition can be found in the SnapshotCore interface + // annotation + int CreateSnapshotPre(const std::string& file, const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) override; void HandleCreateSnapshotTask( std::shared_ptr task) override; - int DeleteSnapshotPre(UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) override; + int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) override; void HandleDeleteSnapshotTask( std::shared_ptr task) override; - int GetFileSnapshotInfo(const std::string &file, - std::vector *info) override; + int GetFileSnapshotInfo(const std::string& file, + std::vector* info) override; - int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) override; @@ -239,201 +231,188 @@ class SnapshotCoreImpl : public SnapshotCore { private: /** - * @brief 构建快照文件映射 + * @brief Build snapshot file mapping * - * @param fileName 文件名 - * @param seqNum 快照版本号 - * @param fileSnapshotMap 快照文件映射表 + * @param fileName File name + * @param seqNum snapshot version number + * @param fileSnapshotMap snapshot file mapping table * - * @return 错误码 + * @return error code */ - int BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap); + int BuildSnapshotMap(const std::string& fileName, uint64_t seqNum, + FileSnapMap* fileSnapshotMap); /** - * @brief 构建Segment信息 + * @brief Build Segment Information * - * @param info 快照信息 - * @param segInfos Segment信息表 + * @param info snapshot information + * @param segInfos Segment Information Table * - * @return 错误码 + * @return error code */ - int BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos); + int BuildSegmentInfo(const SnapshotInfo& info, + std::map* segInfos); /** - * @brief 在curvefs上创建快照 + * @brief Create a snapshot on curves * - * @param fileName 文件名 - * @param info 快照信息 - * @param task 快照任务信息 + * @param fileName File name + * @param info snapshot information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, - std::shared_ptr task); + int CreateSnapshotOnCurvefs(const std::string& fileName, SnapshotInfo* info, + std::shared_ptr task); /** - * @brief 删除curvefs上的快照 + * @brief Delete snapshot on curves * - * @param info 快照信息 + * @param info snapshot information * - * @return 错误码 + * @return error code */ - int DeleteSnapshotOnCurvefs(const SnapshotInfo &info); + int DeleteSnapshotOnCurvefs(const SnapshotInfo& info); /** - * @brief 构建索引块 + * @brief Build Index Block * - * @param info 快照信息 - * @param[out] indexData 索引块 - * @param[out] segInfos Segment信息 - * @param task 快照任务信息 + * @param info snapshot information + * @param[out] indexData index block + * @param[out] segInfos Segment Information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, - std::shared_ptr task); + int BuildChunkIndexData(const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, + std::shared_ptr task); - using ChunkDataExistFilter = - std::function; + using ChunkDataExistFilter = std::function; /** - * @brief 转储快照过程 + * @brief Dump snapshot process * - * @param indexData 索引块 - * @param info 快照信息 - * @param segInfos Segment信息 - * @param filter 转储数据块过滤器 - * @param task 快照任务信息 + * @param indexData index block + * @param info snapshot information + * @param segInfos Segment Information + * @param filter Dump data block filter + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, - std::shared_ptr task); + int TransferSnapshotData(const ChunkIndexData indexData, + const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, + std::shared_ptr task); /** - * @brief 开始cancel,更新任务状态,更新数据库状态 + * @brief Start cancel, update task status, update database status * - * @param task 快照任务信息 + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int StartCancel( - std::shared_ptr task); + int StartCancel(std::shared_ptr task); /** - * @brief 转储数据之后取消快照过程 + * @brief: Cancel the snapshot process after dumping data * - * @param task 快照任务信息 - * @param indexData 索引块 - * @param fileSnapshotMap 快照文件映射表 + * @param task snapshot task information + * @param indexData index block + * @param fileSnapshotMap snapshot file mapping table */ - void CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap); + void CancelAfterTransferSnapshotData(std::shared_ptr task, + const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap); /** - * @brief 创建索引块之后取消快照过程 + * @brief Cancel the snapshot process after creating the index block * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateChunkIndexData( std::shared_ptr task); /** - * @brief 在curvefs上创建快照之后取消快照过程 + * @brief: Cancel the snapshot process after creating a snapshot on curves * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task); /** - * @brief 在Mate数据存储在删除快照 + * @brief in Mate data storage, delete snapshot * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleClearSnapshotOnMateStore( - std::shared_ptr task); + void HandleClearSnapshotOnMateStore(std::shared_ptr task); /** - * @brief 处理创建快照任务成功 + * @brief successfully processed the snapshot creation task * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotSuccess( - std::shared_ptr task); + void HandleCreateSnapshotSuccess(std::shared_ptr task); /** - * @brief 处理创建快照任务失败过程 + * @brief processing failed snapshot creation task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotError( - std::shared_ptr task); + void HandleCreateSnapshotError(std::shared_ptr task); /** - * @brief 处理删除快照任务失败过程 + * @brief failed to process the delete snapshot task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleDeleteSnapshotError( - std::shared_ptr task); - + void HandleDeleteSnapshotError(std::shared_ptr task); /** - * @brief 创建快照前尝试清理失败的快照,否则可能会再次失败 + * @brief Attempt to clean up failed snapshots before creating them, + * otherwise they may fail again * - * @param task 快照任务信息 - * @return 错误码 + * @param task snapshot task information + * @return error code */ int ClearErrorSnapBeforeCreateSnapshot( std::shared_ptr task); private: - // curvefs客户端对象 + // Curvefs client object std::shared_ptr client_; - // meta数据存储 + // Meta Data Storage std::shared_ptr metaStore_; - // data数据存储 + // Data storage std::shared_ptr dataStore_; - // 快照引用计数管理模块 + // Snapshot Reference Count Management Module std::shared_ptr snapshotRef_; - // 执行并发步骤的线程池 + // Thread pool for executing concurrent steps std::shared_ptr threadPool_; - // 锁住打快照的文件名,防止并发同时对其打快照,同一文件的快照需排队 + // Lock the file name of the snapshot to prevent concurrent snapshots. + // Snapshots of the same file need to be queued NameLock snapshotNameLock_; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize_; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs_; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit_; - // 线程数 + // Number of threads uint32_t snapshotCoreThreadNum_; - // session超时时间 + // Session timeout uint32_t mdsSessionTimeUs_; - // client异步回调请求的重试总时间 + // Total retry time for client asynchronous callback requests uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; - // 异步ReadChunkSnapshot的并发数 + // The concurrency of asynchronous ReadChunkSnapshots uint32_t readChunkSnapshotConcurrency_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp index 8401af3b82..2c9fd2e28c 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp @@ -27,10 +27,10 @@ namespace curve { namespace snapshotcloneserver { -bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { - // 逆向解析string,以支持文件名具有分隔字符的情况 - std::string::size_type pos = - name.find_last_of(kChunkDataNameSeprator); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName) { + // Reverse parsing of strings to support cases where file names have + // separator characters + std::string::size_type pos = name.find_last_of(kChunkDataNameSeprator); std::string::size_type lastPos = std::string::npos; if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; @@ -40,8 +40,7 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { cName->chunkSeqNum_ = std::stoll(seqNumStr); lastPos = pos - 1; - pos = - name.find_last_of(kChunkDataNameSeprator, lastPos); + pos = name.find_last_of(kChunkDataNameSeprator, lastPos); if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; return false; @@ -57,27 +56,26 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { return true; } -bool ChunkIndexData::Serialize(std::string *data) const { +bool ChunkIndexData::Serialize(std::string* data) const { ChunkMap map; - for (const auto &m : this->chunkMap_) { - map.mutable_indexmap()-> - insert({m.first, - ChunkDataName(fileName_, m.second, m.first). - ToDataChunkKey()}); + for (const auto& m : this->chunkMap_) { + map.mutable_indexmap()->insert( + {m.first, + ChunkDataName(fileName_, m.second, m.first).ToDataChunkKey()}); } - // Todo:可以转化为stream给adpater接口使用SerializeToOstream + // Todo: Can be converted into a stream for the adpater interface to use + // SerializeToOstream return map.SerializeToString(data); } -bool ChunkIndexData::Unserialize(const std::string &data) { - ChunkMap map; +bool ChunkIndexData::Unserialize(const std::string& data) { + ChunkMap map; if (map.ParseFromString(data)) { - for (const auto &m : map.indexmap()) { + for (const auto& m : map.indexmap()) { ChunkDataName chunkDataName; if (ToChunkDataName(m.second, &chunkDataName)) { this->fileName_ = chunkDataName.fileName_; - this->chunkMap_.emplace(m.first, - chunkDataName.chunkSeqNum_); + this->chunkMap_.emplace(m.first, chunkDataName.chunkSeqNum_); } else { return false; } @@ -89,7 +87,7 @@ bool ChunkIndexData::Unserialize(const std::string &data) { } bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, - ChunkDataName* nameOut) const { + ChunkDataName* nameOut) const { auto it = chunkMap_.find(index); if (it != chunkMap_.end()) { *nameOut = ChunkDataName(fileName_, it->second, index); @@ -99,7 +97,7 @@ bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, } } -bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName &name) const { +bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName& name) const { if (fileName_ != name.fileName_) { return false; } @@ -120,5 +118,5 @@ std::vector ChunkIndexData::GetAllChunkIndex() const { return ret; } -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.h b/src/snapshotcloneserver/snapshot/snapshot_data_store.h index ae88b7694b..ed7d675450 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.h @@ -26,16 +26,16 @@ #include #include -#include -#include #include -#include +#include #include +#include +#include #include "src/common/concurrent/concurrent.h" -using ::curve::common::SpinLock; using ::curve::common::LockGuard; +using ::curve::common::SpinLock; namespace curve { namespace snapshotcloneserver { @@ -47,25 +47,19 @@ const char kChunkDataNameSeprator[] = "-"; class ChunkDataName { public: - ChunkDataName() - : chunkSeqNum_(0), - chunkIndex_(0) {} - ChunkDataName(const std::string &fileName, - SnapshotSeqType seq, + ChunkDataName() : chunkSeqNum_(0), chunkIndex_(0) {} + ChunkDataName(const std::string& fileName, SnapshotSeqType seq, ChunkIndexType chunkIndex) - : fileName_(fileName), - chunkSeqNum_(seq), - chunkIndex_(chunkIndex) {} + : fileName_(fileName), chunkSeqNum_(seq), chunkIndex_(chunkIndex) {} /** - * 构建datachunk对象的名称 文件名-chunk索引-版本号 - * @return: 对象名称字符串 + * Build the name of the datachunk object File name Chunk index Version + * number + * @return: Object name string */ std::string ToDataChunkKey() const { - return fileName_ - + kChunkDataNameSeprator - + std::to_string(this->chunkIndex_) - + kChunkDataNameSeprator - + std::to_string(this->chunkSeqNum_); + return fileName_ + kChunkDataNameSeprator + + std::to_string(this->chunkIndex_) + kChunkDataNameSeprator + + std::to_string(this->chunkSeqNum_); } std::string fileName_; @@ -73,45 +67,41 @@ class ChunkDataName { ChunkIndexType chunkIndex_; }; -inline bool operator==(const ChunkDataName &lhs, const ChunkDataName &rhs) { +inline bool operator==(const ChunkDataName& lhs, const ChunkDataName& rhs) { return (lhs.fileName_ == rhs.fileName_) && (lhs.chunkSeqNum_ == rhs.chunkSeqNum_) && (lhs.chunkIndex_ == rhs.chunkIndex_); } /** - * @brief 根据对象名称解析生成chunkdataname对象 + * @brief Generate chunkdataname object based on object name parsing * - * @param name 对象名 - * @param[out] cName chunkDataName对象 + * @param name Object name + * @param[out] cName chunkDataName object * - * @retVal true 成功 - * @retVal false 失败 + * @retval true succeeded + * @retval false failed */ -bool ToChunkDataName(const std::string &name, ChunkDataName *cName); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName); class ChunkIndexDataName { public: - ChunkIndexDataName() - : fileSeqNum_(0) {} - ChunkIndexDataName(std::string filename, - SnapshotSeqType seq) { + ChunkIndexDataName() : fileSeqNum_(0) {} + ChunkIndexDataName(std::string filename, SnapshotSeqType seq) { fileName_ = filename; fileSeqNum_ = seq; } /** - * 构建索引chunk的名称 文件名+文件版本号 - * @return: 索引chunk的名称字符串 + * Build the name of the index chunk file name+file version number + * @return: The name string of the index chunk */ std::string ToIndexDataChunkKey() const { - return this->fileName_ - + "-" - + std::to_string(this->fileSeqNum_); + return this->fileName_ + "-" + std::to_string(this->fileSeqNum_); } - // 文件名 + // File name std::string fileName_; - // 文件版本号 + // File version number SnapshotSeqType fileSeqNum_; }; @@ -119,46 +109,41 @@ class ChunkIndexData { public: ChunkIndexData() {} /** - * 索引chunk数据序列化(使用protobuf实现) - * @param 保存序列化后数据的指针 - * @return: true 序列化成功/ false 序列化失败 + * Index chunk data serialization (implemented using protobuf) + * @param data Saves a pointer to serialized data + * @return: true Serialization succeeded/false Serialization failed */ - bool Serialize(std::string *data) const; + bool Serialize(std::string* data) const; /** - * 反序列化索引chunk的数据到map中 - * @param 索引chunk存储的数据 - * @return: true 反序列化成功/ false 反序列化失败 + * Deserialize the data of the index chunk into the map + * @param data The data stored in the index chunk + * @return: true Deserialization succeeded/false Deserialization failed */ - bool Unserialize(const std::string &data); + bool Unserialize(const std::string& data); - void PutChunkDataName(const ChunkDataName &name) { + void PutChunkDataName(const ChunkDataName& name) { chunkMap_.emplace(name.chunkIndex_, name.chunkSeqNum_); } bool GetChunkDataName(ChunkIndexType index, ChunkDataName* nameOut) const; - bool IsExistChunkDataName(const ChunkDataName &name) const; + bool IsExistChunkDataName(const ChunkDataName& name) const; std::vector GetAllChunkIndex() const; - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetFileName(const std::string& fileName) { fileName_ = fileName; } - std::string GetFileName() { - return fileName_; - } + std::string GetFileName() { return fileName_; } private: - // 文件名 + // File name std::string fileName_; - // 快照文件索引信息map + // Snapshot file index information map std::map chunkMap_; }; - -class ChunkData{ +class ChunkData { public: ChunkData() {} std::string data_; @@ -166,132 +151,131 @@ class ChunkData{ class TransferTask { public: - TransferTask() {} - std::string uploadId_; + TransferTask() {} + std::string uploadId_; - void AddPartInfo(int partNum, std::string etag) { - m_.Lock(); - partInfo_.emplace(partNum, etag); - m_.UnLock(); - } + void AddPartInfo(int partNum, std::string etag) { + m_.Lock(); + partInfo_.emplace(partNum, etag); + m_.UnLock(); + } - std::map GetPartInfo() { - return partInfo_; - } + std::map GetPartInfo() { return partInfo_; } private: - mutable SpinLock m_; - // partnumber <=> etag - std::map partInfo_; + mutable SpinLock m_; + // partnumber <=> etag + std::map partInfo_; }; class SnapshotDataStore { public: - SnapshotDataStore() {} + SnapshotDataStore() {} virtual ~SnapshotDataStore() {} /** - * 快照的datastore初始化,根据存储的类型有不同的实现 - * @param s3配置文件路径 - * @return 0 初始化成功/ -1 初始化失败 + * The datastore initialization of snapshots can be implemented differently + * depending on the type of storage + * @param s3 configuration file path + * @return 0 initialization successful/-1 initialization failed */ - virtual int Init(const std::string &confpath) = 0; + virtual int Init(const std::string& confpath) = 0; /** - * 存储快照文件的元数据信息到datastore中 - * @param 元数据对象名 - * @param 元数据对象的数据内容 - * @return 0 保存成功/ -1 保存失败 + * Store the metadata information of the snapshot file in the datastore + * @param name Metadata object name + * @param The data content of the metadata object + * @return 0 saved successfully/-1 failed to save */ - virtual int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) = 0; + virtual int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) = 0; /** - * 获取快照文件的元数据信息 - * @param 元数据对象名 - * @param 保存元数据数据内容的指针 - * return: 0 获取成功/ -1 获取失败 + * Obtain metadata information for snapshot files + * @param name Metadata object name + * @param Pointer to save metadata data content + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) = 0; + virtual int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) = 0; /** - * 删除快照文件的元数据 - * @param 元数据对象名 - * @return: 0 删除成功/ -1 删除失败 + * Delete metadata for snapshot files + * @param name Metadata object name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkIndexData(const ChunkIndexDataName &name) = 0; - // 快照元数据chunk是否存在 + virtual int DeleteChunkIndexData(const ChunkIndexDataName& name) = 0; + // Does the snapshot metadata chunk exist /** - * 判断快照元数据是否存在 - * @param 元数据对象名 - * @return: true 存在/ false 不存在 + * Determine whether snapshot metadata exists + * @param name Metadata object name + * @return: true exists/false does not exist */ - virtual bool ChunkIndexDataExist(const ChunkIndexDataName &name) = 0; -/* - // 存储快照文件的数据信息到datastore - virtual int PutChunkData(const ChunkDataName &name, - const ChunkData &data) = 0; - - // 读取快照文件的数据信息 - virtual int GetChunkData(const ChunkDataName &name, - ChunkData *data) = 0; -*/ + virtual bool ChunkIndexDataExist(const ChunkIndexDataName& name) = 0; + /* + // Store the data information of the snapshot file in the datastore + virtual int PutChunkData(const ChunkDataName &name, + const ChunkData &data) = 0; + + // Reading data information from snapshot files + virtual int GetChunkData(const ChunkDataName &name, + ChunkData *data) = 0; + */ /** - * 删除快照的数据chunk - * @param 数据chunk名 - * @return: 0 删除成功/ -1 删除失败 + * Delete the data chunk of the snapshot + * @param name chunk data name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkData(const ChunkDataName &name) = 0; + virtual int DeleteChunkData(const ChunkDataName& name) = 0; /** - * 判断快照的数据chunk是否存在 - * @param 数据chunk名称 - * @return: true 存在/ false 不存在 + * Determine whether the data chunk of the snapshot exists + * @param name chunk data name + * @return: true exists/false does not exist */ - virtual bool ChunkDataExist(const ChunkDataName &name) = 0; - // 设置快照转储完成标志 -/* - virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = 0; - // 获取快照转储完成标志 - virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; -*/ + virtual bool ChunkDataExist(const ChunkDataName& name) = 0; + // Set snapshot dump completion flag + /* + virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = + 0; + // Get snapshot dump completion flag + virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; + */ /** - * 初始化数据库chunk的分片转储任务 - * @param 数据chunk名称 - * @param 管理转储任务的指针 - * @return 0 任务初始化成功/ -1 任务初始化失败 + * Initialize the sharded dump task of the database chunk + * @param name chunk data name + * @param task Pointer to management dump task + * @return 0 Task initialization successful/-1 Task initialization failed */ - virtual int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) = 0; /** - * 添加数据chunk的一个分片到转储任务中 - * @param 数据chunk名 - * @转储任务 - * @第几个分片 - * @分片大小 - * @分片的数据内容 - * @return: 0 添加成功/ -1 添加失败 + * Add a shard of data chunk to a dumping task. + * @param name chunk name + * @param task Dumping task + * @param partNum Index of the shard + * @param partSize Shard size + * @param buf Shard data content + * @return: 0 for successful addition / -1 for failure to add */ - virtual int DataChunkTranferAddPart(const ChunkDataName &name, + virtual int DataChunkTranferAddPart(const ChunkDataName& name, std::shared_ptr task, - int partNum, - int partSize, - const char* buf) = 0; + int partNum, int partSize, + const char* buf) = 0; /** - * 完成数据chunk的转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 转储任务完成/ 转储任务失败 -1 + *Complete the dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 Dump task completed/Dump task failed -1 */ - virtual int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferComplete( + const ChunkDataName& name, std::shared_ptr task) = 0; /** - * 终止数据chunk的分片转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 任务终止成功/ -1 任务终止失败 + *Terminate the sharded dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 mission terminated successfully/-1 mission terminated failed */ - virtual int DataChunkTranferAbort(const ChunkDataName &name, + virtual int DataChunkTranferAbort(const ChunkDataName& name, std::shared_ptr task) = 0; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h index d1324243e4..d43add3f96 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h @@ -23,13 +23,14 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ -#include -#include #include -#include +#include #include -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include +#include + #include "src/common/s3_adapter.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::S3Adapter; namespace curve { @@ -37,59 +38,53 @@ namespace snapshotcloneserver { class S3SnapshotDataStore : public SnapshotDataStore { public: - S3SnapshotDataStore() { + S3SnapshotDataStore() { s3Adapter4Meta_ = std::make_shared(); s3Adapter4Data_ = std::make_shared(); } ~S3SnapshotDataStore() {} - int Init(const std::string &path) override; - int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) override; - int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) override; - int DeleteChunkIndexData(const ChunkIndexDataName &name) override; - bool ChunkIndexDataExist(const ChunkIndexDataName &name) override; + int Init(const std::string& path) override; + int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) override; + int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) override; + int DeleteChunkIndexData(const ChunkIndexDataName& name) override; + bool ChunkIndexDataExist(const ChunkIndexDataName& name) override; // int PutChunkData(const ChunkDataName &name, // const ChunkData &data) override; // int GetChunkData(const ChunkDataName &name, // ChunkData *data) override; - int DeleteChunkData(const ChunkDataName &name) override; - bool ChunkDataExist(const ChunkDataName &name) override; -/* nos暂时不支持,后续增加 - int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; - int GetSnapshotFlag(const ChunkIndexDataName &name) override; -*/ - int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAddPart(const ChunkDataName &name, - std::shared_ptr task, - int partNum, - int partSize, - const char* buf) override; - int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAbort(const ChunkDataName &name, - std::shared_ptr task) override; + int DeleteChunkData(const ChunkDataName& name) override; + bool ChunkDataExist(const ChunkDataName& name) override; + /* NOS is currently not supported, but will be added in the future + int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; + int GetSnapshotFlag(const ChunkIndexDataName &name) override; + */ + int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAddPart(const ChunkDataName& name, + std::shared_ptr task, int partNum, + int partSize, const char* buf) override; + int DataChunkTranferComplete(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAbort(const ChunkDataName& name, + std::shared_ptr task) override; - void SetMetaAdapter(std::shared_ptr adapter) { - s3Adapter4Meta_ = adapter; - } - std::shared_ptr GetMetaAdapter(void) { - return s3Adapter4Meta_; - } - void SetDataAdapter(std::shared_ptr adapter) { - s3Adapter4Data_ = adapter; - } - std::shared_ptr GetDataAdapter(void) { - return s3Adapter4Data_; - } + void SetMetaAdapter(std::shared_ptr adapter) { + s3Adapter4Meta_ = adapter; + } + std::shared_ptr GetMetaAdapter(void) { return s3Adapter4Meta_; } + void SetDataAdapter(std::shared_ptr adapter) { + s3Adapter4Data_ = adapter; + } + std::shared_ptr GetDataAdapter(void) { return s3Adapter4Data_; } private: std::shared_ptr s3Adapter4Data_; std::shared_ptr s3Adapter4Meta_; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp index 747b666350..6846b10e16 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp @@ -23,46 +23,39 @@ #include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include + #include "src/common/string_util.h" namespace curve { namespace snapshotcloneserver { -int SnapshotServiceManager::Init(const SnapshotCloneServerOptions &option) { +int SnapshotServiceManager::Init(const SnapshotCloneServerOptions& option) { std::shared_ptr pool = std::make_shared(option.snapshotPoolThreadNum); return taskMgr_->Init(pool, option); } -int SnapshotServiceManager::Start() { - return taskMgr_->Start(); -} +int SnapshotServiceManager::Start() { return taskMgr_->Start(); } -void SnapshotServiceManager::Stop() { - taskMgr_->Stop(); -} +void SnapshotServiceManager::Stop() { taskMgr_->Stop(); } -int SnapshotServiceManager::CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid) { +int SnapshotServiceManager::CreateSnapshot(const std::string& file, + const std::string& user, + const std::string& snapshotName, + UUID* uuid) { SnapshotInfo snapInfo; int ret = core_->CreateSnapshotPre(file, user, snapshotName, &snapInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *uuid = snapInfo.GetUuid(); return kErrCodeSuccess; } LOG(ERROR) << "CreateSnapshotPre error, " - << " ret =" - << ret - << ", file = " - << file - << ", snapshotName = " - << snapshotName - << ", uuid = " - << snapInfo.GetUuid(); + << " ret =" << ret << ", file = " << file + << ", snapshotName = " << snapshotName + << ", uuid = " << snapInfo.GetUuid(); return ret; } *uuid = snapInfo.GetUuid(); @@ -72,30 +65,27 @@ int SnapshotServiceManager::CreateSnapshot(const std::string &file, std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " - << " ret = " - << ret; + << " ret = " << ret; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::CancelSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::CancelSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { std::shared_ptr task = taskMgr_->GetTask(uuid); if (task != nullptr) { if (user != task->GetTaskInfo()->GetSnapshotInfo().GetUser()) { LOG(ERROR) << "Can not cancel snapshot by different user."; return kErrCodeInvalidUser; } - if ((!file.empty()) && - (file != task->GetTaskInfo()->GetFileName())) { + if ((!file.empty()) && (file != task->GetTaskInfo()->GetFileName())) { LOG(ERROR) << "Can not cancel, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -104,35 +94,30 @@ int SnapshotServiceManager::CancelSnapshot( int ret = taskMgr_->CancelTask(uuid); if (ret < 0) { LOG(ERROR) << "CancelSnapshot error, " - << " ret =" - << ret - << ", uuid = " - << uuid - << ", file =" - << file; + << " ret =" << ret << ", uuid = " << uuid + << ", file =" << file; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::DeleteSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::DeleteSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { SnapshotInfo snapInfo; int ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (kErrCodeSnapshotCannotDeleteUnfinished == ret) { - // 转Cancel + // Transfer to Cancel ret = CancelSnapshot(uuid, user, file); if (kErrCodeCannotCancelFinished == ret) { - // 防止这一过程中又执行完了 + // To prevent the execution from completing again during this + // process ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -141,8 +126,7 @@ int SnapshotServiceManager::DeleteSnapshot( } } else if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -151,8 +135,8 @@ int SnapshotServiceManager::DeleteSnapshot( std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -162,31 +146,28 @@ int SnapshotServiceManager::DeleteSnapshot( return kErrCodeSuccess; } -int SnapshotServiceManager::GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfo( + const std::string& file, const std::string& user, + std::vector* info) { std::vector snapInfos; int ret = core_->GetFileSnapshotInfo(file, &snapInfos); if (ret < 0) { LOG(ERROR) << "GetFileSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file; + << " ret = " << ret << ", file = " << file; return ret; } return GetFileSnapshotInfoInner(snapInfos, user, info); } -int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfoById( + const std::string& file, const std::string& user, const UUID& uuid, + std::vector* info) { std::vector snapInfos; SnapshotInfo snap; int ret = core_->GetSnapshotInfo(uuid, &snap); if (ret < 0) { LOG(ERROR) << "GetSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file + << " ret = " << ret << ", file = " << file << ", uuid = " << uuid; return kErrCodeFileNotExist; } @@ -201,11 +182,10 @@ int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, } int SnapshotServiceManager::GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info) { + std::vector snapInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetUser() == user) { Status st = snap.GetStatus(); switch (st) { @@ -226,15 +206,15 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -248,7 +228,8 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -263,7 +244,7 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( return kErrCodeSuccess; } -bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { +bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo& snapInfo) { if (user_ != nullptr && *user_ != snapInfo.GetUser()) { return false; } @@ -277,14 +258,12 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(snapInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(snapInfo.GetStatus())) { return false; } @@ -292,11 +271,10 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int SnapshotServiceManager::GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info) { + std::vector snapInfos, SnapshotFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (filter.IsMatchCondition(snap)) { Status st = snap.GetStatus(); switch (st) { @@ -317,15 +295,15 @@ int SnapshotServiceManager::GetSnapshotListInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -339,7 +317,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -355,8 +334,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } int SnapshotServiceManager::GetSnapshotListByFilter( - const SnapshotFilterCondition &filter, - std::vector *info) { + const SnapshotFilterCondition& filter, + std::vector* info) { std::vector snapInfos; int ret = core_->GetSnapshotList(&snapInfos); if (ret < 0) { @@ -374,50 +353,44 @@ int SnapshotServiceManager::RecoverSnapshotTask() { LOG(ERROR) << "GetSnapshotList error"; return ret; } - for (auto &snap : list) { + for (auto& snap : list) { Status st = snap.GetStatus(); switch (st) { - case Status::pending : { + case Status::pending: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; } - // 重启恢复的canceling等价于errorDeleting - case Status::canceling : - case Status::deleting : - case Status::errorDeleting : { + // canceling restart recovery is equivalent to errorDeleting + case Status::canceling: + case Status::deleting: + case Status::errorDeleting: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; @@ -431,4 +404,3 @@ int SnapshotServiceManager::RecoverSnapshotTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h index 1aa7143e9f..9c7944f17f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h @@ -27,49 +27,39 @@ #include #include +#include "json/json.h" +#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" -#include "json/json.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 文件单个快照信息 + * @brief file single snapshot information */ class FileSnapshotInfo { public: FileSnapshotInfo() = default; - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - * @param snapProgress 快照完成度百分比 - */ - FileSnapshotInfo(const SnapshotInfo &snapInfo, - uint32_t snapProgress) - : snapInfo_(snapInfo), - snapProgress_(snapProgress) {} - - void SetSnapshotInfo(const SnapshotInfo &snapInfo) { - snapInfo_ = snapInfo; - } + /** + * @brief constructor + * + * @param snapInfo snapshot information + * @param snapProgress snapshot completion percentage + */ + FileSnapshotInfo(const SnapshotInfo& snapInfo, uint32_t snapProgress) + : snapInfo_(snapInfo), snapProgress_(snapProgress) {} - SnapshotInfo GetSnapshotInfo() const { - return snapInfo_; - } + void SetSnapshotInfo(const SnapshotInfo& snapInfo) { snapInfo_ = snapInfo; } - void SetSnapProgress(uint32_t progress) { - snapProgress_ = progress; - } + SnapshotInfo GetSnapshotInfo() const { return snapInfo_; } - uint32_t GetSnapProgress() const { - return snapProgress_; - } + void SetSnapProgress(uint32_t progress) { snapProgress_ = progress; } + + uint32_t GetSnapProgress() const { return snapProgress_; } Json::Value ToJsonObj() const { Json::Value fileSnapObj; @@ -86,7 +76,7 @@ class FileSnapshotInfo { return fileSnapObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { SnapshotInfo snapInfo; snapInfo.SetUuid(jsonObj["UUID"].asString()); snapInfo.SetUser(jsonObj["User"].asString()); @@ -101,209 +91,185 @@ class FileSnapshotInfo { } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapInfo_; - // 快照处理进度百分比 + // Snapshot processing progress percentage uint32_t snapProgress_; }; class SnapshotFilterCondition { public: SnapshotFilterCondition() - : uuid_(nullptr), - file_(nullptr), - user_(nullptr), - status_(nullptr) {} - - SnapshotFilterCondition(const std::string *uuid, const std::string *file, - const std::string *user, - const std::string *status) - : uuid_(uuid), - file_(file), - user_(user), - status_(status) {} - bool IsMatchCondition(const SnapshotInfo &snapInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } + : uuid_(nullptr), file_(nullptr), user_(nullptr), status_(nullptr) {} - void SetFile(const std::string *file) { - file_ = file; - } + SnapshotFilterCondition(const std::string* uuid, const std::string* file, + const std::string* user, const std::string* status) + : uuid_(uuid), file_(file), user_(user), status_(status) {} + bool IsMatchCondition(const SnapshotInfo& snapInfo); - void SetUser(const std::string *user) { - user_ = user; - } + void SetUuid(const std::string* uuid) { uuid_ = uuid; } - void SetStatus(const std::string *status) { - status_ = status; - } + void SetFile(const std::string* file) { file_ = file; } + + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } private: - const std::string *uuid_; - const std::string *file_; - const std::string *user_; - const std::string *status_; + const std::string* uuid_; + const std::string* file_; + const std::string* user_; + const std::string* status_; }; class SnapshotServiceManager { public: - /** - * @brief 构造函数 - * - * @param taskMgr 快照任务管理类对象 - * @param core 快照核心模块 - */ - SnapshotServiceManager( - std::shared_ptr taskMgr, - std::shared_ptr core) - : taskMgr_(taskMgr), - core_(core) {} + /** + * @brief constructor + * + * @param taskMgr snapshot task management class object + * @param core snapshot core module + */ + SnapshotServiceManager(std::shared_ptr taskMgr, + std::shared_ptr core) + : taskMgr_(taskMgr), core_(core) {} virtual ~SnapshotServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 创建快照服务 + * @brief Create snapshot service * - * @param file 文件名 - * @param user 文件所属用户 - * @param snapshotName 快照名 - * @param uuid 快照uuid + * @param file file name + * @param user The user to whom the file belongs + * @param snapshotName SnapshotName + * @param uuid Snapshot uuid * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid); + virtual int CreateSnapshot(const std::string& file, const std::string& user, + const std::string& snapshotName, UUID* uuid); /** - * @brief 删除快照服务 + * @brief Delete snapshot service * - * @param uuid 快照uuid - * @param user 快照文件的用户 - * @param file 快照所属文件的文件名 + * @param uuid Snapshot uuid + * @param user The user of the snapshot file + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int DeleteSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 取消快照服务 + * @brief Cancel snapshot service * - * @param uuid 快照的uuid - * @param user 快照的用户 - * @param file 快照所属文件的文件名 + * @param uuid The uuid of the snapshot + * @param user snapshot user + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int CancelSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int CancelSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 获取文件的快照信息服务接口 + * @brief Gets the snapshot information service interface for files * - * @param file 文件名 - * @param user 用户名 - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info); + virtual int GetFileSnapshotInfo(const std::string& file, + const std::string& user, + std::vector* info); /** - * @brief 根据Id获取文件的快照信息 + * @brief Obtain snapshot information of the file based on the ID * - * @param file 文件名 - * @param user 用户名 - * @param uuid 快照Id - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param uuid SnapshotId + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info); + virtual int GetFileSnapshotInfoById(const std::string& file, + const std::string& user, + const UUID& uuid, + std::vector* info); /** - * @brief 获取快照列表 + * @brief Get snapshot list * - * @param filter 过滤条件 - * @param info 快照信息列表 + * @param filter filtering conditions + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotListByFilter(const SnapshotFilterCondition &filter, - std::vector *info); + virtual int GetSnapshotListByFilter(const SnapshotFilterCondition& filter, + std::vector* info); /** - * @brief 恢复快照任务接口 + * @brief Restore Snapshot Task Interface * - * @return 错误码 + * @return error code */ virtual int RecoverSnapshotTask(); private: /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param user 用户名 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param user username + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info); + int GetFileSnapshotInfoInner(std::vector snapInfos, + const std::string& user, + std::vector* info); /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param filter 过滤条件 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param filter filtering conditions + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info); + int GetSnapshotListInner(std::vector snapInfos, + SnapshotFilterCondition filter, + std::vector* info); private: - // 快照任务管理类对象 + // Snapshot Task Management Class Object std::shared_ptr taskMgr_; - // 快照核心模块 + // Snapshot Core Module std::shared_ptr core_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.cpp b/src/snapshotcloneserver/snapshot/snapshot_task.cpp index 179f2b4617..a66bf4c4ca 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task.cpp @@ -20,10 +20,11 @@ * Author: xuchaojie */ +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" + #include #include "src/common/timeutility.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" namespace curve { namespace snapshotcloneserver { @@ -46,18 +47,20 @@ void ReadChunkSnapshotClosure::Run() { } /** - * @brief 转储快照的单个chunk + * @brief Dump a single chunk of a snapshot * @detail - * 由于单个chunk过大,chunk转储分片进行,分片大小为chunkSplitSize_, - * 步骤如下: - * 1. 创建一个转储任务transferTask,并调用DataChunkTranferInit初始化 - * 2. 调用ReadChunkSnapshot从curvefs读取chunk的一个分片 - * 3. 调用DataChunkTranferAddPart转储一个分片 - * 4. 重复2、3直到所有分片转储完成,调用DataChunkTranferComplete结束转储任务 - * 5. 中间如有读取或转储发生错误,则调用DataChunkTranferAbort放弃转储, - * 并返回错误码 + * Since a single chunk is too large, chunk dumping is done in segments, with + * each segment size being chunkSplitSize_. The steps are as follows: + * 1. Create a dump task transferTask and initialize it using + * DataChunkTransferInit. + * 2. Call ReadChunkSnapshot to read a segment of the chunk from CurveFS. + * 3. Call DataChunkTransferAddPart to dump a segment. + * 4. Repeat steps 2 and 3 until all segments have been dumped, and then call + * DataChunkTransferComplete to end the dump task. + * 5. If there are any errors during reading or dumping in the process, call + * DataChunkTransferAbort to abandon the dump and return an error code. * - * @return 错误码 + * @return Error code */ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { ChunkDataName name = taskInfo_->name_; @@ -67,8 +70,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::shared_ptr transferTask = std::make_shared(); - int ret = dataStore_->DataChunkTranferInit(name, - transferTask); + int ret = dataStore_->DataChunkTranferInit(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferInit error, " << " ret = " << ret @@ -80,9 +82,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } auto tracker = std::make_shared(); - for (uint64_t i = 0; - i < chunkSize / chunkSplitSize; - i++) { + for (uint64_t i = 0; i < chunkSize / chunkSplitSize; i++) { auto context = std::make_shared(); context->cidInfo = taskInfo_->cidInfo_; context->seqNum = taskInfo_->name_.chunkSeqNum_; @@ -101,8 +101,8 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } std::list results = tracker->PopResultContexts(); - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } @@ -113,18 +113,17 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } } while (true); if (ret >= 0) { - ret = - dataStore_->DataChunkTranferComplete(name, transferTask); + ret = dataStore_->DataChunkTranferComplete(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferComplete fail" << ", ret = " << ret @@ -136,18 +135,15 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } } if (ret < 0) { - int ret2 = - dataStore_->DataChunkTranferAbort( - name, - transferTask); - if (ret2 < 0) { - LOG(ERROR) << "DataChunkTranferAbort fail" - << ", ret = " << ret2 - << ", chunkDataName = " << name.ToDataChunkKey() - << ", logicalPool = " << cidInfo.lpid_ - << ", copysetId = " << cidInfo.cpid_ - << ", chunkId = " << cidInfo.cid_; - } + int ret2 = dataStore_->DataChunkTranferAbort(name, transferTask); + if (ret2 < 0) { + LOG(ERROR) << "DataChunkTranferAbort fail" + << ", ret = " << ret2 + << ", chunkDataName = " << name.ToDataChunkKey() + << ", logicalPool = " << cidInfo.lpid_ + << ", copysetId = " << cidInfo.cpid_ + << ", chunkId = " << cidInfo.cid_; + } return ret; } return kErrCodeSuccess; @@ -156,7 +152,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context) { - ReadChunkSnapshotClosure *cb = + ReadChunkSnapshotClosure* cb = new ReadChunkSnapshotClosure(tracker, context); tracker->AddOneTrace(); uint64_t offset = context->partIndex * context->len; @@ -166,13 +162,9 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( << ", chunkId = " << context->cidInfo.cid_ << ", seqNum = " << context->seqNum << ", offset = " << offset; - int ret = client_->ReadChunkSnapshot( - context->cidInfo, - context->seqNum, - offset, - context->len, - context->buf.get(), - cb); + int ret = + client_->ReadChunkSnapshot(context->cidInfo, context->seqNum, offset, + context->len, context->buf.get(), cb); if (ret < 0) { LOG(ERROR) << "ReadChunkSnapshot error, " << " ret = " << ret @@ -189,7 +181,7 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results) { + const std::list& results) { int ret = kErrCodeSuccess; for (auto context : results) { if (context->retCode < 0) { @@ -197,9 +189,8 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - taskInfo_->clientAsyncMethodRetryIntervalMs_)); + std::this_thread::sleep_for(std::chrono::milliseconds( + taskInfo_->clientAsyncMethodRetryIntervalMs_)); ret = StartAsyncReadChunkSnapshot(tracker, context); if (ret < 0) { return ret; @@ -212,15 +203,11 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( } } else { ret = dataStore_->DataChunkTranferAddPart( - taskInfo_->name_, - transferTask, - context->partIndex, - context->len, - context->buf.get()); + taskInfo_->name_, transferTask, context->partIndex, + context->len, context->buf.get()); if (ret < 0) { LOG(ERROR) << "DataChunkTranferAddPart fail" - << ", ret = " << ret - << ", chunkDataName = " + << ", ret = " << ret << ", chunkDataName = " << taskInfo_->name_.ToDataChunkKey() << ", index = " << context->partIndex; return ret; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.h b/src/snapshotcloneserver/snapshot/snapshot_task.h index bf53993a61..23102eb4f5 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task.h @@ -23,172 +23,153 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ -#include -#include #include +#include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task_tracker.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务信息 + * @brief snapshot task information */ class SnapshotTaskInfo : public TaskInfo { public: - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - */ - explicit SnapshotTaskInfo(const SnapshotInfo &snapInfo, - std::shared_ptr metric) - : TaskInfo(), - snapshotInfo_(snapInfo), - metric_(metric) {} + /** + * @brief constructor + * + * @param snapInfo snapshot information + */ + explicit SnapshotTaskInfo(const SnapshotInfo& snapInfo, + std::shared_ptr metric) + : TaskInfo(), snapshotInfo_(snapInfo), metric_(metric) {} /** - * @brief 获取快照信息 + * @brief Get snapshot information * - * @return 快照信息 + * @return snapshot information */ - SnapshotInfo& GetSnapshotInfo() { - return snapshotInfo_; - } + SnapshotInfo& GetSnapshotInfo() { return snapshotInfo_; } /** - * @brief 获取快照uuid + * @brief Get snapshot uuid * - * @return 快照uuid + * @return snapshot uuid */ - UUID GetUuid() const { - return snapshotInfo_.GetUuid(); - } + UUID GetUuid() const { return snapshotInfo_.GetUuid(); } /** - * @brief 获取文件名 + * @brief Get file name * - * @return 文件名 + * @return file name */ - std::string GetFileName() const { - return snapshotInfo_.GetFileName(); - } + std::string GetFileName() const { return snapshotInfo_.GetFileName(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapshotInfo_; - // metric 信息 + // Metric Information std::shared_ptr metric_; }; - class SnapshotTask : public Task { public: /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - */ - SnapshotTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + */ + SnapshotTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} /** - * @brief 获取快照任务信息对象指针 + * @brief Get snapshot task information object pointer * - * @return 快照任务信息对象指针 + * @return Snapshot task information object pointer */ - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: - // 快照任务信息 + // Snapshot Task Information std::shared_ptr taskInfo_; - // 快照核心逻辑对象 + // Snapshot Core Logical Object std::shared_ptr core_; }; /** - * @brief 创建快照任务 + * @brief Create snapshot task */ class SnapshotCreateTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotCreateTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotCreateTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleCreateSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleCreateSnapshotTask(taskInfo_); } }; /** - * @brief 删除快照任务 + * @brief Delete snapshot task */ class SnapshotDeleteTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotDeleteTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotDeleteTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleDeleteSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleDeleteSnapshotTask(taskInfo_); } }; struct ReadChunkSnapshotContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seq uint64_t seqNum; - // 分片的索引 + // Fragmented index uint64_t partIndex; - // 分片的buffer + // Sliced buffer std::unique_ptr buf; - // 分片长度 + // Slice length uint64_t len; - // 返回值 + // Return value int retCode; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -200,8 +181,7 @@ struct ReadChunkSnapshotClosure : public SnapCloneClosure { ReadChunkSnapshotClosure( std::shared_ptr tracker, std::shared_ptr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() override; std::shared_ptr tracker_; std::shared_ptr context_; @@ -216,13 +196,13 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { uint64_t clientAsyncMethodRetryIntervalMs_; uint32_t readChunkSnapshotConcurrency_; - TransferSnapshotDataChunkTaskInfo(const ChunkDataName &name, - uint64_t chunkSize, - const ChunkIDInfo &cidInfo, - uint64_t chunkSplitSize, - uint64_t clientAsyncMethodRetryTimeSec, - uint64_t clientAsyncMethodRetryIntervalMs, - uint32_t readChunkSnapshotConcurrency) + TransferSnapshotDataChunkTaskInfo(const ChunkDataName& name, + uint64_t chunkSize, + const ChunkIDInfo& cidInfo, + uint64_t chunkSplitSize, + uint64_t clientAsyncMethodRetryTimeSec, + uint64_t clientAsyncMethodRetryIntervalMs, + uint32_t readChunkSnapshotConcurrency) : name_(name), chunkSize_(chunkSize), cidInfo_(cidInfo), @@ -234,7 +214,8 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { class TransferSnapshotDataChunkTask : public TrackerTask { public: - TransferSnapshotDataChunkTask(const TaskIdType &taskId, + TransferSnapshotDataChunkTask( + const TaskIdType& taskId, std::shared_ptr taskInfo, std::shared_ptr client, std::shared_ptr dataStore) @@ -255,37 +236,37 @@ class TransferSnapshotDataChunkTask : public TrackerTask { private: /** - * @brief 转储快照单个chunk + * @brief Dump snapshot single chunk * - * @return 错误码 + * @return error code */ int TransferSnapshotDataChunk(); /** - * @brief 开始异步ReadSnapshotChunk + * @brief Start asynchronous ReadSnapshotChunk * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param context ReadSnapshotChunk上下文 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param context ReadSnapshotChunk context * - * @return 错误码 + * @return error code */ int StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context); /** - * @brief 处理ReadChunkSnapshot的结果并重试 + * @brief Process the results of ReadChunkSnapshot and try again * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param transferTask 转储任务 - * @param results ReadChunkSnapshot结果列表 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param transferTask Dump Task + * @param results ReadChunkSnapshot result list * - * @return 错误码 + * @return error code */ int HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results); + const std::list& results); protected: std::shared_ptr taskInfo_; @@ -293,7 +274,6 @@ class TransferSnapshotDataChunkTask : public TrackerTask { std::shared_ptr dataStore_; }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp index aa57505b9f..2c82ae1d0f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp @@ -21,9 +21,9 @@ */ #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/concurrent/concurrent.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using curve::common::LockGuard; @@ -39,7 +39,7 @@ int SnapshotTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 + // isStop_ Flag set first to prevent backEndThread from exiting first backEndThread = std::thread(&SnapshotTaskManager::BackEndThreadFunc, this); } @@ -58,7 +58,7 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { if (isStop_.load()) { return kErrCodeServiceIsStop; } - // 移除实际已完成的task,防止uuid冲突 + // Remove actual completed tasks to prevent uuid conflicts ScanWorkingTask(); { @@ -73,13 +73,13 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { } snapshotMetric_->snapshotWaiting << 1; - // 立即执行task + // Execute task immediately ScanWaitingTask(); return kErrCodeSuccess; } std::shared_ptr SnapshotTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(taskMapLock_); auto it = taskMap_.find(taskId); if (it != taskMap_.end()) { @@ -88,14 +88,12 @@ std::shared_ptr SnapshotTaskManager::GetTask( return nullptr; } -int SnapshotTaskManager::CancelTask(const TaskIdType &taskId) { +int SnapshotTaskManager::CancelTask(const TaskIdType& taskId) { { - // 还在等待队列的Cancel直接移除 + // Waiting for the Cancel of the queue to be directly removed WriteLockGuard taskMapWlock(taskMapLock_); LockGuard waitingTasksLock(waitingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end(); - it++) { + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end(); it++) { if ((*it)->GetTaskId() == taskId) { int ret = core_->HandleCancelUnSchduledSnapshotTask( (*it)->GetTaskInfo()); @@ -131,12 +129,10 @@ void SnapshotTaskManager::BackEndThreadFunc() { void SnapshotTaskManager::ScanWaitingTask() { LockGuard waitingTasksLock(waitingTasksLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end();) { - if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) - == workingTasks_.end()) { - workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), - *it); + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end();) { + if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) == + workingTasks_.end()) { + workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), *it); threadpool_->PushTask(*it); snapshotMetric_->snapshotDoing << 1; snapshotMetric_->snapshotWaiting << -1; @@ -150,13 +146,11 @@ void SnapshotTaskManager::ScanWaitingTask() { void SnapshotTaskManager::ScanWorkingTask() { WriteLockGuard taskMapWlock(taskMapLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = workingTasks_.begin(); - it != workingTasks_.end();) { + for (auto it = workingTasks_.begin(); it != workingTasks_.end();) { auto taskInfo = it->second->GetTaskInfo(); if (taskInfo->IsFinish()) { snapshotMetric_->snapshotDoing << -1; - if (taskInfo->GetSnapshotInfo().GetStatus() - != Status::done) { + if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { snapshotMetric_->snapshotFailed << 1; } else { snapshotMetric_->snapshotSucceed << 1; @@ -171,4 +165,3 @@ void SnapshotTaskManager::ScanWorkingTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h index a22eb0e2ae..c2cee2baa3 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h @@ -23,54 +23,51 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::RWLock; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务管理器类 + * @brief Snapshot Task Manager Class */ class SnapshotTaskManager { public: - /** - * @brief 默认构造函数 - */ - SnapshotTaskManager( - std::shared_ptr core, - std::shared_ptr snapshotMetric) + /** + * @brief default constructor + */ + SnapshotTaskManager(std::shared_ptr core, + std::shared_ptr snapshotMetric) : isStop_(true), core_(core), snapshotMetric_(snapshotMetric), snapshotTaskManagerScanIntervalMs_(0) {} /** - * @brief 析构函数 + * @brief destructor */ - ~SnapshotTaskManager() { - Stop(); - } + ~SnapshotTaskManager() { Stop(); } int Init(std::shared_ptr pool, - const SnapshotCloneServerOptions &option) { + const SnapshotCloneServerOptions& option) { snapshotTaskManagerScanIntervalMs_ = option.snapshotTaskManagerScanIntervalMs; threadpool_ = pool; @@ -78,88 +75,92 @@ class SnapshotTaskManager { } /** - * @brief 启动 + * @brief start * - * @return 错误码 + * @return error code */ int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ void Stop(); /** - * @brief 添加任务 + * @brief Add Task * - * @param task 快照任务 + * @param task snapshot task * - * @return 错误码 + * @return error code */ int PushTask(std::shared_ptr task); /** - * @brief 获取任务 + * @brief Get Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 快照任务指针 + * @return Snapshot Task Pointer */ - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; /** - * @brief 取消任务 + * @brief Cancel Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - int CancelTask(const TaskIdType &taskId); + int CancelTask(const TaskIdType& taskId); private: /** - * @brief 后台线程执行函数 + * @brief Background Thread Execution Function * - * 定期执行扫描等待队列函数与扫描工作队列函数。 + * Regularly execute the scan wait queue function and scan work queue + * function */ void BackEndThreadFunc(); /** - * @brief 扫描等待任务队列函数 + * @brief Scan Waiting Task Queue Function * - * 扫描等待队列,判断工作队列中当前文件 - * 是否有正在执行的快照,若没有则放入工作队列 + * Scan the waiting queue to determine the current file in the work queue + * Check if there is an active snapshot; if not, place it in the work queue * */ void ScanWaitingTask(); /** - * @brief 扫描工作队列函数 + * @brief Scan Work Queue Function * - * 扫描工作队列,判断工作队列中当前 - * 快照任务是否已完成,若完成则移出工作队列 + * Scan the work queue to determine the current status in the work queue + * Check if the snapshot task has been completed; if completed, remove it + * from the work queue * */ void ScanWorkingTask(); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->快照任务表 + // Id ->Snapshot Task Table std::map > taskMap_; mutable RWLock taskMapLock_; - // 快照等待队列 + // Snapshot waiting queue std::list > waitingTasks_; mutable Mutex waitingTasksLock_; - // 快照工作队列,实际是个map,其中key是文件名,以便于查询 + // The snapshot work queue is actually a map, where key is the file name for + // easy query std::map > workingTasks_; mutable Mutex workingTasksLock_; std::shared_ptr threadpool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Indicates whether the current task management is stopped, used to support + // start and stop functions. std::atomic_bool isStop_; // snapshot core @@ -168,7 +169,8 @@ class SnapshotTaskManager { // metric std::shared_ptr snapshotMetric_; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) int snapshotTaskManagerScanIntervalMs_; }; diff --git a/src/snapshotcloneserver/snapshotclone_server.cpp b/src/snapshotcloneserver/snapshotclone_server.cpp index be92a61d9d..b403054f38 100644 --- a/src/snapshotcloneserver/snapshotclone_server.cpp +++ b/src/snapshotcloneserver/snapshotclone_server.cpp @@ -19,365 +19,383 @@ * Created Date: Monday March 9th 2020 * Author: hzsunjianliang */ -#include +#include "src/snapshotcloneserver/snapshotclone_server.h" + #include #include -#include +#include + #include +#include -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshotclone_server.h" #include "src/common/curve_version.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using LeaderElectionOptions = ::curve::election::LeaderElectionOptions; -namespace curve { -namespace snapshotcloneserver { - -const char metricExposePrefix[] = "snapshotcloneserver"; -const char configMetricName[] = "snapshotcloneserver_config"; -const char statusMetricName[] = "snapshotcloneserver_status"; -const char ACTIVE[] = "active"; -const char STANDBY[] = "standby"; - -void InitClientOption(std::shared_ptr conf, - CurveClientOptions *clientOption) { - conf->GetValueFatalIfFail("client.config_path", - &clientOption->configPath); - conf->GetValueFatalIfFail("mds.rootUser", - &clientOption->mdsRootUser); - conf->GetValueFatalIfFail("mds.rootPassword", - &clientOption->mdsRootPassword); - conf->GetValueFatalIfFail("client.methodRetryTimeSec", - &clientOption->clientMethodRetryTimeSec); - conf->GetValueFatalIfFail("client.methodRetryIntervalMs", - &clientOption->clientMethodRetryIntervalMs); -} - -void InitSnapshotCloneServerOptions(std::shared_ptr conf, - SnapshotCloneServerOptions *serverOption) { - conf->GetValueFatalIfFail("server.address", - &serverOption->addr); - conf->GetValueFatalIfFail("server.clientAsyncMethodRetryTimeSec", - &serverOption->clientAsyncMethodRetryTimeSec); - conf->GetValueFatalIfFail( - "server.clientAsyncMethodRetryIntervalMs", - &serverOption->clientAsyncMethodRetryIntervalMs); - conf->GetValueFatalIfFail("server.snapshotPoolThreadNum", - &serverOption->snapshotPoolThreadNum); - conf->GetValueFatalIfFail( - "server.snapshotTaskManagerScanIntervalMs", - &serverOption->snapshotTaskManagerScanIntervalMs); - conf->GetValueFatalIfFail("server.chunkSplitSize", - &serverOption->chunkSplitSize); - conf->GetValueFatalIfFail( - "server.checkSnapshotStatusIntervalMs", - &serverOption->checkSnapshotStatusIntervalMs); - conf->GetValueFatalIfFail("server.maxSnapshotLimit", - &serverOption->maxSnapshotLimit); - conf->GetValueFatalIfFail("server.snapshotCoreThreadNum", - &serverOption->snapshotCoreThreadNum); - conf->GetValueFatalIfFail("server.mdsSessionTimeUs", - &serverOption->mdsSessionTimeUs); - conf->GetValueFatalIfFail("server.readChunkSnapshotConcurrency", - &serverOption->readChunkSnapshotConcurrency); - - conf->GetValueFatalIfFail("server.stage1PoolThreadNum", - &serverOption->stage1PoolThreadNum); - conf->GetValueFatalIfFail("server.stage2PoolThreadNum", - &serverOption->stage2PoolThreadNum); - conf->GetValueFatalIfFail("server.commonPoolThreadNum", - &serverOption->commonPoolThreadNum); - - conf->GetValueFatalIfFail( - "server.cloneTaskManagerScanIntervalMs", - &serverOption->cloneTaskManagerScanIntervalMs); - conf->GetValueFatalIfFail("server.cloneChunkSplitSize", - &serverOption->cloneChunkSplitSize); - conf->GetValueFatalIfFail("server.cloneTempDir", - &serverOption->cloneTempDir); - conf->GetValueFatalIfFail("mds.rootUser", - &serverOption->mdsRootUser); - conf->GetValueFatalIfFail("server.createCloneChunkConcurrency", - &serverOption->createCloneChunkConcurrency); - conf->GetValueFatalIfFail("server.recoverChunkConcurrency", - &serverOption->recoverChunkConcurrency); - conf->GetValueFatalIfFail("server.backEndReferenceRecordScanIntervalMs", - &serverOption->backEndReferenceRecordScanIntervalMs); - conf->GetValueFatalIfFail("server.backEndReferenceFuncScanIntervalMs", - &serverOption->backEndReferenceFuncScanIntervalMs); - - conf->GetValueFatalIfFail("etcd.retry.times", - &(serverOption->dlockOpts.retryTimes)); - conf->GetValueFatalIfFail("etcd.dlock.timeoutMs", - &(serverOption->dlockOpts.ctx_timeoutMS)); - conf->GetValueFatalIfFail("etcd.dlock.ttlSec", - &(serverOption->dlockOpts.ttlSec)); -} - -void SnapShotCloneServer::InitEtcdConf(EtcdConf* etcdConf) { - conf_->GetValueFatalIfFail("etcd.endpoint", &etcdEndpoints_); - etcdConf->len = etcdEndpoints_.size(); - etcdConf->Endpoints = &etcdEndpoints_[0]; - conf_->GetValueFatalIfFail( - "etcd.dailtimeoutMs", &etcdConf->DialTimeout); - // etcd auth config - bool authEnable = false; - conf_->GetBoolValue("etcd.auth.enable", &authEnable); - etcdConf->authEnable = authEnable ? 1 : 0; - if (authEnable) { - conf_->GetValueFatalIfFail("etcd.auth.username", &etcdUsername_); - etcdConf->username = &etcdUsername_[0]; - etcdConf->usernameLen = etcdUsername_.size(); - conf_->GetValueFatalIfFail("etcd.auth.password", &etcdPassword_); - etcdConf->password = &etcdPassword_[0]; - etcdConf->passwordLen = etcdPassword_.size(); - } -} - -void SnapShotCloneServer::InitAllSnapshotCloneOptions(void) { - InitClientOption(conf_, &(snapshotCloneServerOptions_.clientOptions)); - InitSnapshotCloneServerOptions(conf_, - &(snapshotCloneServerOptions_.serverOption)); - InitEtcdConf(&(snapshotCloneServerOptions_.etcdConf)); - - conf_->GetValueFatalIfFail("etcd.operation.timeoutMs", - &(snapshotCloneServerOptions_.etcdClientTimeout)); - - conf_->GetValueFatalIfFail("etcd.retry.times", - &(snapshotCloneServerOptions_.etcdRetryTimes)); - - conf_->GetValueFatalIfFail("server.dummy.listen.port", - &(snapshotCloneServerOptions_.dummyPort)); - - conf_->GetValueFatalIfFail("leader.campagin.prefix", - &(snapshotCloneServerOptions_.campaginPrefix)); - - conf_->GetValueFatalIfFail("leader.session.intersec", - &(snapshotCloneServerOptions_.sessionInterSec)); - - conf_->GetValueFatalIfFail("leader.election.timeoutms", - &(snapshotCloneServerOptions_.electionTimeoutMs)); - - conf_->GetValueFatalIfFail("s3.config_path", - &(snapshotCloneServerOptions_.s3ConfPath)); -} - -void SnapShotCloneServer::StartDummy() { - // Expose conf and version and role(standby or active) - LOG(INFO) << "snapshotCloneServer version: " - << curve::common::CurveVersion(); - curve::common::ExposeCurveVersion(); - conf_->ExposeMetric(configMetricName); - status_.expose(statusMetricName); - status_.set_value(STANDBY); - - int ret = brpc::StartDummyServerAt(snapshotCloneServerOptions_.dummyPort); - if (ret != 0) { - LOG(FATAL) << "StartDummyServer error"; - } else { - LOG(INFO) << "StartDummyServer ok"; - } -} - -bool SnapShotCloneServer::InitEtcdClient(void) { - etcdClient_ = std::make_shared(); - auto res = etcdClient_->Init(snapshotCloneServerOptions_.etcdConf, - snapshotCloneServerOptions_.etcdClientTimeout, - snapshotCloneServerOptions_.etcdRetryTimes); - if (res != EtcdErrCode::EtcdOK) { - LOG(ERROR) - << "init etcd client err! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " - << snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " - << snapshotCloneServerOptions_.etcdRetryTimes; - return false; - } - - std::string out; - res = etcdClient_->Get("test", &out); - if (res != EtcdErrCode::EtcdOK && res != EtcdErrCode::EtcdKeyNotExist) { - LOG(ERROR) << - "Run snapsthotcloneserver err. Check if etcd is running."; - return false; - } - - LOG(INFO) << "init etcd client ok! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << - snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " << - snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " << - snapshotCloneServerOptions_.etcdRetryTimes; - return true; -} - -void SnapShotCloneServer::StartCompaginLeader(void) { - if (!InitEtcdClient()) { - LOG(FATAL) << "InitEtcdClient error"; - } - // init leader election options - LeaderElectionOptions option; - option.etcdCli = etcdClient_; - option.leaderUniqueName = snapshotCloneServerOptions_.serverOption.addr; - option.electionTimeoutMs = snapshotCloneServerOptions_.electionTimeoutMs; - option.sessionInterSec = snapshotCloneServerOptions_.sessionInterSec; - option.campaginPrefix = snapshotCloneServerOptions_.campaginPrefix; - leaderElection_ = std::make_shared(option); - - // compagin leader and observe self then return - while (0 != leaderElection_->CampaignLeader()) { - LOG(INFO) << option.leaderUniqueName - << " campaign for leader again"; - } - LOG(INFO) << "Campain leader ok, I am the active member now"; - status_.set_value(ACTIVE); - leaderElection_->StartObserverLeader(); -} - -bool SnapShotCloneServer::Init() { - snapClient_ = std::make_shared(); - fileClient_ = std::make_shared(); - client_ = std::make_shared(snapClient_, fileClient_); - - if (client_->Init(snapshotCloneServerOptions_.clientOptions) < 0) { - LOG(ERROR) << "curvefs_client init fail."; - return false; - } - auto codec = std::make_shared(); - - metaStore_ = std::make_shared(etcdClient_, - codec); - if (metaStore_->Init() < 0) { - LOG(ERROR) << "metaStore init fail."; - return false; - } - - dataStore_ = std::make_shared(); - if (dataStore_->Init(snapshotCloneServerOptions_.s3ConfPath) < 0) { - LOG(ERROR) << "dataStore init fail."; - return false; - } - - - snapshotRef_ = std::make_shared(); - snapshotMetric_ = std::make_shared(metaStore_); - snapshotCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - snapshotCloneServerOptions_.serverOption); - if (snapshotCore_->Init() < 0) { - LOG(ERROR) << "SnapshotCore init fail."; - return false; - } - - snapshotTaskManager_ = std::make_shared(snapshotCore_, - snapshotMetric_); - snapshotServiceManager_ = - std::make_shared(snapshotTaskManager_, - snapshotCore_); - if (snapshotServiceManager_->Init( - snapshotCloneServerOptions_.serverOption) < 0) { - LOG(ERROR) << "SnapshotServiceManager init fail."; - return false; - } - - cloneMetric_ = std::make_shared(); - cloneRef_ = std::make_shared(); - cloneCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - snapshotCloneServerOptions_.serverOption); - if (cloneCore_->Init() < 0) { - LOG(ERROR) << "CloneCore init fail."; - return false; - } - cloneTaskMgr_ = std::make_shared(cloneCore_, - cloneMetric_); - - cloneServiceManagerBackend_ = - std::make_shared(cloneCore_); - cloneServiceManager_ = std::make_shared( - cloneTaskMgr_, - cloneCore_, - cloneServiceManagerBackend_); - if (cloneServiceManager_->Init( - snapshotCloneServerOptions_.serverOption) < 0) { - LOG(ERROR) << "CloneServiceManager init fail."; - return false; - } - service_ = std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); - server_ = std::make_shared(); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - LOG(ERROR) << "Failed to add snapshot_service!\n"; - return false; - } - return true; -} - -bool SnapShotCloneServer::Start(void) { - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 - int ret = cloneServiceManager_->Start(); - if (ret < 0) { - LOG(ERROR) << "cloneServiceManager start fail" - << ", ret = " << ret; - return false; - } - ret = cloneServiceManager_->RecoverCloneTask(); - if (ret < 0) { - LOG(ERROR) << "RecoverCloneTask fail" - << ", ret = " << ret; - return false; - } - ret = snapshotServiceManager_->Start(); - if (ret < 0) { - LOG(ERROR) << "snapshotServiceManager start fail" - << ", ret = " << ret; - return false; - } - ret = snapshotServiceManager_->RecoverSnapshotTask(); - if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask fail" - << ", ret = " << ret; - return false; - } - - brpc::ServerOptions option; - option.idle_timeout_sec = -1; - if (server_->Start(snapshotCloneServerOptions_.serverOption.addr.c_str(), - &option) != 0) { - LOG(FATAL) << "snapshotclone rpc server start fail."; - } - LOG(INFO) << "snapshotclone service start ok ..."; - return true; -} - -void SnapShotCloneServer::RunUntilQuit(void) { - server_->RunUntilAskedToQuit(); -} - -void SnapShotCloneServer::Stop(void) { - LOG(INFO) << "snapshotcloneserver stopping ..."; - server_->Stop(0); - server_->Join(); - snapshotServiceManager_->Stop(); - cloneServiceManager_->Stop(); - LOG(INFO) << "snapshorcloneserver stopped"; -} - -} // namespace snapshotcloneserver -} // namespace curve +namespace curve +{ + namespace snapshotcloneserver + { + + const char metricExposePrefix[] = "snapshotcloneserver"; + const char configMetricName[] = "snapshotcloneserver_config"; + const char statusMetricName[] = "snapshotcloneserver_status"; + const char ACTIVE[] = "active"; + const char STANDBY[] = "standby"; + + void InitClientOption(std::shared_ptr conf, + CurveClientOptions *clientOption) + { + conf->GetValueFatalIfFail("client.config_path", &clientOption->configPath); + conf->GetValueFatalIfFail("mds.rootUser", &clientOption->mdsRootUser); + conf->GetValueFatalIfFail("mds.rootPassword", + &clientOption->mdsRootPassword); + conf->GetValueFatalIfFail("client.methodRetryTimeSec", + &clientOption->clientMethodRetryTimeSec); + conf->GetValueFatalIfFail("client.methodRetryIntervalMs", + &clientOption->clientMethodRetryIntervalMs); + } + + void InitSnapshotCloneServerOptions(std::shared_ptr conf, + SnapshotCloneServerOptions *serverOption) + { + conf->GetValueFatalIfFail("server.address", &serverOption->addr); + conf->GetValueFatalIfFail("server.clientAsyncMethodRetryTimeSec", + &serverOption->clientAsyncMethodRetryTimeSec); + conf->GetValueFatalIfFail("server.clientAsyncMethodRetryIntervalMs", + &serverOption->clientAsyncMethodRetryIntervalMs); + conf->GetValueFatalIfFail("server.snapshotPoolThreadNum", + &serverOption->snapshotPoolThreadNum); + conf->GetValueFatalIfFail("server.snapshotTaskManagerScanIntervalMs", + &serverOption->snapshotTaskManagerScanIntervalMs); + conf->GetValueFatalIfFail("server.chunkSplitSize", + &serverOption->chunkSplitSize); + conf->GetValueFatalIfFail("server.checkSnapshotStatusIntervalMs", + &serverOption->checkSnapshotStatusIntervalMs); + conf->GetValueFatalIfFail("server.maxSnapshotLimit", + &serverOption->maxSnapshotLimit); + conf->GetValueFatalIfFail("server.snapshotCoreThreadNum", + &serverOption->snapshotCoreThreadNum); + conf->GetValueFatalIfFail("server.mdsSessionTimeUs", + &serverOption->mdsSessionTimeUs); + conf->GetValueFatalIfFail("server.readChunkSnapshotConcurrency", + &serverOption->readChunkSnapshotConcurrency); + + conf->GetValueFatalIfFail("server.stage1PoolThreadNum", + &serverOption->stage1PoolThreadNum); + conf->GetValueFatalIfFail("server.stage2PoolThreadNum", + &serverOption->stage2PoolThreadNum); + conf->GetValueFatalIfFail("server.commonPoolThreadNum", + &serverOption->commonPoolThreadNum); + + conf->GetValueFatalIfFail("server.cloneTaskManagerScanIntervalMs", + &serverOption->cloneTaskManagerScanIntervalMs); + conf->GetValueFatalIfFail("server.cloneChunkSplitSize", + &serverOption->cloneChunkSplitSize); + conf->GetValueFatalIfFail("server.cloneTempDir", + &serverOption->cloneTempDir); + conf->GetValueFatalIfFail("mds.rootUser", &serverOption->mdsRootUser); + conf->GetValueFatalIfFail("server.createCloneChunkConcurrency", + &serverOption->createCloneChunkConcurrency); + conf->GetValueFatalIfFail("server.recoverChunkConcurrency", + &serverOption->recoverChunkConcurrency); + conf->GetValueFatalIfFail( + "server.backEndReferenceRecordScanIntervalMs", + &serverOption->backEndReferenceRecordScanIntervalMs); + conf->GetValueFatalIfFail( + "server.backEndReferenceFuncScanIntervalMs", + &serverOption->backEndReferenceFuncScanIntervalMs); + + conf->GetValueFatalIfFail("etcd.retry.times", + &(serverOption->dlockOpts.retryTimes)); + conf->GetValueFatalIfFail("etcd.dlock.timeoutMs", + &(serverOption->dlockOpts.ctx_timeoutMS)); + conf->GetValueFatalIfFail("etcd.dlock.ttlSec", + &(serverOption->dlockOpts.ttlSec)); + } + + void SnapShotCloneServer::InitEtcdConf(EtcdConf *etcdConf) + { + conf_->GetValueFatalIfFail("etcd.endpoint", &etcdEndpoints_); + etcdConf->len = etcdEndpoints_.size(); + etcdConf->Endpoints = &etcdEndpoints_[0]; + conf_->GetValueFatalIfFail( + "etcd.dailtimeoutMs", &etcdConf->DialTimeout); + // etcd auth config + bool authEnable = false; + conf_->GetBoolValue("etcd.auth.enable", &authEnable); + etcdConf->authEnable = authEnable ? 1 : 0; + if (authEnable) + { + conf_->GetValueFatalIfFail("etcd.auth.username", &etcdUsername_); + etcdConf->username = &etcdUsername_[0]; + etcdConf->usernameLen = etcdUsername_.size(); + conf_->GetValueFatalIfFail("etcd.auth.password", &etcdPassword_); + etcdConf->password = &etcdPassword_[0]; + etcdConf->passwordLen = etcdPassword_.size(); + } + } + + void SnapShotCloneServer::InitAllSnapshotCloneOptions(void) + { + InitClientOption(conf_, &(snapshotCloneServerOptions_.clientOptions)); + InitSnapshotCloneServerOptions(conf_, + &(snapshotCloneServerOptions_.serverOption)); + InitEtcdConf(&(snapshotCloneServerOptions_.etcdConf)); + + conf_->GetValueFatalIfFail( + "etcd.operation.timeoutMs", + &(snapshotCloneServerOptions_.etcdClientTimeout)); + + conf_->GetValueFatalIfFail("etcd.retry.times", + &(snapshotCloneServerOptions_.etcdRetryTimes)); + + conf_->GetValueFatalIfFail("server.dummy.listen.port", + &(snapshotCloneServerOptions_.dummyPort)); + + conf_->GetValueFatalIfFail("leader.campagin.prefix", + &(snapshotCloneServerOptions_.campaginPrefix)); + + conf_->GetValueFatalIfFail("leader.session.intersec", + &(snapshotCloneServerOptions_.sessionInterSec)); + + conf_->GetValueFatalIfFail( + "leader.election.timeoutms", + &(snapshotCloneServerOptions_.electionTimeoutMs)); + + conf_->GetValueFatalIfFail("s3.config_path", + &(snapshotCloneServerOptions_.s3ConfPath)); + } + + void SnapShotCloneServer::StartDummy() + { + // Expose conf and version and role(standby or active) + LOG(INFO) << "snapshotCloneServer version: " + << curve::common::CurveVersion(); + curve::common::ExposeCurveVersion(); + conf_->ExposeMetric(configMetricName); + status_.expose(statusMetricName); + status_.set_value(STANDBY); + + int ret = brpc::StartDummyServerAt(snapshotCloneServerOptions_.dummyPort); + if (ret != 0) + { + LOG(FATAL) << "StartDummyServer error"; + } + else + { + LOG(INFO) << "StartDummyServer ok"; + } + } + + bool SnapShotCloneServer::InitEtcdClient(void) + { + etcdClient_ = std::make_shared(); + auto res = etcdClient_->Init(snapshotCloneServerOptions_.etcdConf, + snapshotCloneServerOptions_.etcdClientTimeout, + snapshotCloneServerOptions_.etcdRetryTimes); + if (res != EtcdErrCode::EtcdOK) + { + LOG(ERROR) << "init etcd client err! " + << "etcdaddr: " + << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " + << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; + return false; + } + + std::string out; + res = etcdClient_->Get("test", &out); + if (res != EtcdErrCode::EtcdOK && res != EtcdErrCode::EtcdKeyNotExist) + { + LOG(ERROR) << "Run snapsthotcloneserver err. Check if etcd is running."; + return false; + } + + LOG(INFO) << "init etcd client ok! " + << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; + return true; + } + + void SnapShotCloneServer::StartCompaginLeader(void) + { + if (!InitEtcdClient()) + { + LOG(FATAL) << "InitEtcdClient error"; + } + // init leader election options + LeaderElectionOptions option; + option.etcdCli = etcdClient_; + option.leaderUniqueName = snapshotCloneServerOptions_.serverOption.addr; + option.electionTimeoutMs = snapshotCloneServerOptions_.electionTimeoutMs; + option.sessionInterSec = snapshotCloneServerOptions_.sessionInterSec; + option.campaginPrefix = snapshotCloneServerOptions_.campaginPrefix; + leaderElection_ = std::make_shared(option); + + // compagin leader and observe self then return + while (0 != leaderElection_->CampaignLeader()) + { + LOG(INFO) << option.leaderUniqueName << " campaign for leader again"; + } + LOG(INFO) << "Campain leader ok, I am the active member now"; + status_.set_value(ACTIVE); + leaderElection_->StartObserverLeader(); + } + + bool SnapShotCloneServer::Init() + { + snapClient_ = std::make_shared(); + fileClient_ = std::make_shared(); + client_ = std::make_shared(snapClient_, fileClient_); + + if (client_->Init(snapshotCloneServerOptions_.clientOptions) < 0) + { + LOG(ERROR) << "curvefs_client init fail."; + return false; + } + auto codec = std::make_shared(); + + metaStore_ = + std::make_shared(etcdClient_, codec); + if (metaStore_->Init() < 0) + { + LOG(ERROR) << "metaStore init fail."; + return false; + } + + dataStore_ = std::make_shared(); + if (dataStore_->Init(snapshotCloneServerOptions_.s3ConfPath) < 0) + { + LOG(ERROR) << "dataStore init fail."; + return false; + } + + snapshotRef_ = std::make_shared(); + snapshotMetric_ = std::make_shared(metaStore_); + snapshotCore_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, + snapshotCloneServerOptions_.serverOption); + if (snapshotCore_->Init() < 0) + { + LOG(ERROR) << "SnapshotCore init fail."; + return false; + } + + snapshotTaskManager_ = + std::make_shared(snapshotCore_, snapshotMetric_); + snapshotServiceManager_ = std::make_shared( + snapshotTaskManager_, snapshotCore_); + if (snapshotServiceManager_->Init( + snapshotCloneServerOptions_.serverOption) < 0) + { + LOG(ERROR) << "SnapshotServiceManager init fail."; + return false; + } + + cloneMetric_ = std::make_shared(); + cloneRef_ = std::make_shared(); + cloneCore_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, + snapshotCloneServerOptions_.serverOption); + if (cloneCore_->Init() < 0) + { + LOG(ERROR) << "CloneCore init fail."; + return false; + } + cloneTaskMgr_ = + std::make_shared(cloneCore_, cloneMetric_); + + cloneServiceManagerBackend_ = + std::make_shared(cloneCore_); + cloneServiceManager_ = std::make_shared( + cloneTaskMgr_, cloneCore_, cloneServiceManagerBackend_); + if (cloneServiceManager_->Init(snapshotCloneServerOptions_.serverOption) < + 0) + { + LOG(ERROR) << "CloneServiceManager init fail."; + return false; + } + service_ = std::make_shared( + snapshotServiceManager_, cloneServiceManager_); + server_ = std::make_shared(); + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) + { + LOG(ERROR) << "Failed to add snapshot_service!\n"; + return false; + } + return true; + } + + bool SnapShotCloneServer::Start(void) + { + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies + int ret = cloneServiceManager_->Start(); + if (ret < 0) + { + LOG(ERROR) << "cloneServiceManager start fail" + << ", ret = " << ret; + return false; + } + ret = cloneServiceManager_->RecoverCloneTask(); + if (ret < 0) + { + LOG(ERROR) << "RecoverCloneTask fail" + << ", ret = " << ret; + return false; + } + ret = snapshotServiceManager_->Start(); + if (ret < 0) + { + LOG(ERROR) << "snapshotServiceManager start fail" + << ", ret = " << ret; + return false; + } + ret = snapshotServiceManager_->RecoverSnapshotTask(); + if (ret < 0) + { + LOG(ERROR) << "RecoverSnapshotTask fail" + << ", ret = " << ret; + return false; + } + + brpc::ServerOptions option; + option.idle_timeout_sec = -1; + if (server_->Start(snapshotCloneServerOptions_.serverOption.addr.c_str(), + &option) != 0) + { + LOG(FATAL) << "snapshotclone rpc server start fail."; + } + LOG(INFO) << "snapshotclone service start ok ..."; + return true; + } + + void SnapShotCloneServer::RunUntilQuit(void) { server_->RunUntilAskedToQuit(); } + + void SnapShotCloneServer::Stop(void) + { + LOG(INFO) << "snapshotcloneserver stopping ..."; + server_->Stop(0); + server_->Join(); + snapshotServiceManager_->Stop(); + cloneServiceManager_->Stop(); + LOG(INFO) << "snapshorcloneserver stopped"; + } + + } // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/snapshotclone_server.h b/src/snapshotcloneserver/snapshotclone_server.h index cb9a35d086..7131f5f011 100644 --- a/src/snapshotcloneserver/snapshotclone_server.h +++ b/src/snapshotcloneserver/snapshotclone_server.h @@ -23,144 +23,146 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ -#include #include +#include +#include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/configuration.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/leader_election/leader_election.h" - -#include "src/client/libcurve_snapshot.h" -#include "src/client/libcurve_file.h" - +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" - +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshotclone_service.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" -namespace curve { -namespace snapshotcloneserver { - -extern const char metricExposePrefix[]; -extern const char configMetricName[]; -extern const char statusMetricName[]; -extern const char ACTIVE[]; -extern const char STANDBY[]; - - -using EtcdClientImp = ::curve::kvstorage::EtcdClientImp; -using Configuration = ::curve::common::Configuration; -using LeaderElection = ::curve::election::LeaderElection; - -struct SnapShotCloneServerOptions { - CurveClientOptions clientOptions; - SnapshotCloneServerOptions serverOption; - - // etcd options - EtcdConf etcdConf; - int etcdClientTimeout; - int etcdRetryTimes; - - // leaderelections options - std::string campaginPrefix; - int sessionInterSec; - int electionTimeoutMs; - - int dummyPort; - - // s3 - std::string s3ConfPath; -}; - -class SnapShotCloneServer { - public: - explicit SnapShotCloneServer(std::shared_ptr config) - :conf_(config) {} - /** - * @brief 通过配置初始化snapshotcloneserver所需要的所有配置 - */ - void InitAllSnapshotCloneOptions(void); - - /** - * @brief leader选举,未选中持续等待,选中情况下建立watch并返回 - */ - void StartCompaginLeader(void); - - /** - * @brief 启动dummyPort 用于检查主备snapshotserver - * 存活和各种config metric 和版本信息 - */ - void StartDummy(void); - - /** - * @brief 初始化clone与snapshot 各种核心结构 - */ - bool Init(void); - - /** - * @brief 启动各个组件的逻辑和线程池 - */ - bool Start(void); - - /** - * @brief 停止所有服务 - */ - void Stop(void); - - /** - * @brief 启动RPC服务直到外部kill - */ - void RunUntilQuit(void); - - private: - void InitEtcdConf(EtcdConf* etcdConf); - bool InitEtcdClient(void); - - private: - std::shared_ptr conf_; - SnapShotCloneServerOptions snapshotCloneServerOptions_; - // 标记自己为active/standby - bvar::Status status_; - // 与etcd交互的client - std::shared_ptr etcdClient_; - std::shared_ptr leaderElection_; - - std::shared_ptr snapClient_; - std::shared_ptr fileClient_; - std::shared_ptr client_; - - std::shared_ptr metaStore_; - std::shared_ptr dataStore_; - std::shared_ptr snapshotRef_; - std::shared_ptr snapshotMetric_; - std::shared_ptr snapshotCore_; - std::shared_ptr snapshotTaskManager_; - std::shared_ptr snapshotServiceManager_; - - std::shared_ptr cloneMetric_; - std::shared_ptr cloneRef_; - std::shared_ptr cloneCore_; - std::shared_ptr cloneTaskMgr_; - std::shared_ptr cloneServiceManagerBackend_; - std::shared_ptr cloneServiceManager_; - std::shared_ptr service_; - std::shared_ptr server_; - - std::string etcdEndpoints_; - std::string etcdUsername_; - std::string etcdPassword_; -}; - -} // namespace snapshotcloneserver -} // namespace curve - -#endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ +namespace curve +{ + namespace snapshotcloneserver + { + + extern const char metricExposePrefix[]; + extern const char configMetricName[]; + extern const char statusMetricName[]; + extern const char ACTIVE[]; + extern const char STANDBY[]; + + using EtcdClientImp = ::curve::kvstorage::EtcdClientImp; + using Configuration = ::curve::common::Configuration; + using LeaderElection = ::curve::election::LeaderElection; + + struct SnapShotCloneServerOptions + { + CurveClientOptions clientOptions; + SnapshotCloneServerOptions serverOption; + + // etcd options + EtcdConf etcdConf; + int etcdClientTimeout; + int etcdRetryTimes; + + // leaderelections options + std::string campaginPrefix; + int sessionInterSec; + int electionTimeoutMs; + + int dummyPort; + + // s3 + std::string s3ConfPath; + }; + + class SnapShotCloneServer + { + public: + explicit SnapShotCloneServer(std::shared_ptr config) + : conf_(config) {} + /** + * @brief: Initialize all configurations required for snapshotcloneserver + * through configuration + */ + void InitAllSnapshotCloneOptions(void); + + /** + * @brief leader election, if not selected, continue to wait. If selected, + * establish a watch and return + */ + void StartCompaginLeader(void); + + /** + * @brief: Start dummyPort to check the active and standby snapshotserver + * Survival and various configuration metrics and version information + */ + void StartDummy(void); + + /** + * @brief initializes various core structures of clone and snapshot + */ + bool Init(void); + + /** + * @brief: Start the logic and thread pool of each component + */ + bool Start(void); + + /** + * @brief Stop all services + */ + void Stop(void); + + /** + * @brief Start RPC service until external kill + */ + void RunUntilQuit(void); + + private: + void InitEtcdConf(EtcdConf *etcdConf); + bool InitEtcdClient(void); + + private: + std::shared_ptr conf_; + SnapShotCloneServerOptions snapshotCloneServerOptions_; + // Mark yourself as active/standby + bvar::Status status_; + // Client interacting with ETCD + std::shared_ptr etcdClient_; + std::shared_ptr leaderElection_; + + std::shared_ptr snapClient_; + std::shared_ptr fileClient_; + std::shared_ptr client_; + + std::shared_ptr metaStore_; + std::shared_ptr dataStore_; + std::shared_ptr snapshotRef_; + std::shared_ptr snapshotMetric_; + std::shared_ptr snapshotCore_; + std::shared_ptr snapshotTaskManager_; + std::shared_ptr snapshotServiceManager_; + + std::shared_ptr cloneMetric_; + std::shared_ptr cloneRef_; + std::shared_ptr cloneCore_; + std::shared_ptr cloneTaskMgr_; + std::shared_ptr cloneServiceManagerBackend_; + std::shared_ptr cloneServiceManager_; + std::shared_ptr service_; + std::shared_ptr server_; + + std::string etcdEndpoints_; + std::string etcdUsername_; + std::string etcdPassword_; + }; + + } // namespace snapshotcloneserver +} // namespace curve + +#endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 72f6b04683..f8505b03fe 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -22,14 +22,14 @@ #include "src/snapshotcloneserver/snapshotclone_service.h" +#include #include #include -#include #include "json/json.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/uuid.h" #include "src/common/string_util.h" +#include "src/common/uuid.h" #include "src/snapshotcloneserver/clone/clone_closure.h" using ::curve::common::UUIDGenerator; @@ -38,15 +38,14 @@ namespace curve { namespace snapshotcloneserver { void SnapshotCloneServiceImpl::default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done) { + const HttpRequest* req, + HttpResponse* resp, + Closure* done) { (void)req; (void)resp; brpc::ClosureGuard done_guard(done); - brpc::Controller* bcntl = - static_cast(cntl); - const std::string *action = + brpc::Controller* bcntl = static_cast(cntl); + const std::string* action = bcntl->http_request().uri().GetQuery(kActionStr); std::string requestId = UUIDGenerator().GenerateUUID(); @@ -91,39 +90,27 @@ void SnapshotCloneServiceImpl::default_method(RpcController* cntl, } LOG(INFO) << "SnapshotCloneServiceImpl Return : " - << "action = " << *action - << ", requestId = " << requestId - << ", context = " << bcntl->response_attachment(); + << "action = " << *action << ", requestId = " << requestId + << ", context = " << bcntl->response_attachment(); return; } void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *name = - bcntl->http_request().uri().GetQuery(kNameStr); - if ((version == nullptr) || - (user == nullptr) || - (file == nullptr) || - (name == nullptr) || - (version->empty()) || - (user->empty()) || - (file->empty()) || - (name->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* name = bcntl->http_request().uri().GetQuery(kNameStr); + if ((version == nullptr) || (user == nullptr) || (file == nullptr) || + (name == nullptr) || (version->empty()) || (user->empty()) || + (file->empty()) || (name->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CreateSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << *file - << ", Name = " << *name + << " Version = " << *version << ", User = " << *user + << ", File = " << *file << ", Name = " << *name << ", requestId = " << requestId; UUID uuid; int ret = snapshotManager_->CreateSnapshot(*file, *user, *name, &uuid); @@ -146,22 +133,14 @@ void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( } void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (version->empty()) || (user->empty()) || (uuid->empty())) { HandleBadRequestError(bcntl, requestId); return; } @@ -172,10 +151,8 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( fileName = *file; } LOG(INFO) << "DeleteSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << fileStr << ", requestId = " << requestId; int ret = snapshotManager_->DeleteSnapshot(*uuid, *user, fileName); if (ret < 0) { @@ -196,32 +173,21 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( } void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (file == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty()) || - (file->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (file == nullptr) || (version->empty()) || (user->empty()) || + (uuid->empty()) || (file->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CancelSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << *file + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << *file << ", requestId = " << requestId; int ret = snapshotManager_->CancelSnapshot(*uuid, *user, *file); if (ret < 0) { @@ -242,28 +208,21 @@ void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -271,7 +230,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -291,22 +250,18 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( fileName = *file; } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr + << " Version = " << *version << ", User = " << *user + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = snapshotManager_->GetFileSnapshotInfoById( - fileName, *user, *uuid, &info); + ret = snapshotManager_->GetFileSnapshotInfoById(fileName, *user, *uuid, + &info); } else { - ret = snapshotManager_->GetFileSnapshotInfo( - fileName, *user, &info); + ret = snapshotManager_->GetFileSnapshotInfo(fileName, *user, &info); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -323,8 +278,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -334,32 +288,22 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } -void SnapshotCloneServiceImpl::HandleCloneAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleCloneAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - const std::string *poolset = - bcntl->http_request().uri().GetQuery(kPoolset); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + const std::string* poolset = bcntl->http_request().uri().GetQuery(kPoolset); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty()) || // poolset is optional, but if it exists, it should not be empty (poolset != nullptr && poolset->empty())) { @@ -381,15 +325,12 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } LOG(INFO) << "Clone:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination << ", Lazy = " << *lazy << ", Poolset = " << (poolset != nullptr ? *poolset : "") << ", requestId = " << requestId; - TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); @@ -400,30 +341,21 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } -void SnapshotCloneServiceImpl::HandleRecoverAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleRecoverAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " @@ -443,37 +375,27 @@ void SnapshotCloneServiceImpl::HandleRecoverAction( return; } LOG(INFO) << "Recover:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination - << ", Lazy = " << *lazy - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination + << ", Lazy = " << *lazy << ", requestId = " << requestId; TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); - cloneManager_->RecoverFile( - *source, *user, *destination, lazyFlag, closure, &taskId); + cloneManager_->RecoverFile(*source, *user, *destination, lazyFlag, closure, + &taskId); done_guard.release(); return; } void SnapshotCloneServiceImpl::HandleFlattenAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " << "action = Flatten" @@ -482,10 +404,8 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( return; } LOG(INFO) << "Flatten:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->Flatten(*user, *taskId); if (ret < 0) { bcntl->http_response().set_status_code( @@ -505,28 +425,21 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( } void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -534,7 +447,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -554,25 +467,21 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( } LOG(INFO) << "GetTasks:" - << " Version = " << *version - << ", User = " << *user - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", File = " << fileStr << ", requestId = " << requestId; std::vector cloneTaskInfos; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = cloneManager_->GetCloneTaskInfoById( - *user, *uuid, &cloneTaskInfos); + ret = + cloneManager_->GetCloneTaskInfoById(*user, *uuid, &cloneTaskInfos); } else if (file != nullptr) { - ret = cloneManager_->GetCloneTaskInfoByName( - *user, *file, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfoByName(*user, *file, + &cloneTaskInfos); } else { - ret = cloneManager_->GetCloneTaskInfo( - *user, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfo(*user, &cloneTaskInfos); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -589,8 +498,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -601,16 +509,12 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } -bool SnapshotCloneServiceImpl::CheckBoolParamter( - const std::string *param, bool *valueOut) { - if (*param == "true" || - *param == "True" || - *param == "TRUE" || +bool SnapshotCloneServiceImpl::CheckBoolParamter(const std::string* param, + bool* valueOut) { + if (*param == "true" || *param == "True" || *param == "TRUE" || *param == "1") { *valueOut = true; - } else if (*param == "false" || - *param == "False" || - *param == "FALSE" || + } else if (*param == "false" || *param == "False" || *param == "FALSE" || *param == "0") { *valueOut = false; } else { @@ -620,30 +524,20 @@ bool SnapshotCloneServiceImpl::CheckBoolParamter( } void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CleanCloneTask:" - << ", Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; - + << ", Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->CleanCloneTask(*user, *taskId); if (ret < 0) { @@ -664,27 +558,22 @@ void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *status = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - if ((version == nullptr) || - (version->empty())) { + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -692,7 +581,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -719,14 +608,10 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << userStr - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Status = " << statusStr - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << userStr + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr + << ", Status = " << statusStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; @@ -748,8 +633,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -760,31 +644,26 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *source = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *status = + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - const std::string *type = - bcntl->http_request().uri().GetQuery(kTypeStr); - if ((version == nullptr) || - (version->empty())) { + const std::string* type = bcntl->http_request().uri().GetQuery(kTypeStr); + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -792,7 +671,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -832,15 +711,11 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } LOG(INFO) << "GetTaskList:" - << " Version = " << *version - << ", User = " << userStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Source = " << sourceStr + << " Version = " << *version << ", User = " << userStr + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", Source = " << sourceStr << ", Destination = " << destinationStr - << ", Status = " << statusStr - << ", Type = " << typeStr + << ", Status = " << statusStr << ", Type = " << typeStr << ", requestId = " << requestId; std::vector cloneTaskInfos; @@ -862,8 +737,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -876,33 +750,26 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (version->empty()) || - (source->empty()) || - (user->empty())) { + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (version->empty()) || (source->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "GetCloneRefStatus:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", requestId = " << requestId; std::vector cloneInfos; CloneRefStatus refStatus; - int ret = cloneManager_->GetCloneRefStatus(*source, &refStatus, - &cloneInfos); + int ret = + cloneManager_->GetCloneRefStatus(*source, &refStatus, &cloneInfos); if (ret < 0) { bcntl->http_response().set_status_code( brpc::HTTP_STATUS_INTERNAL_SERVER_ERROR); @@ -916,7 +783,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( mainObj[kCodeStr] = std::to_string(kErrCodeSuccess); mainObj[kMessageStr] = code2Msg[kErrCodeSuccess]; mainObj[kRequestIdStr] = requestId; - mainObj[kRefStatusStr] = static_cast (refStatus); + mainObj[kRefStatusStr] = static_cast(refStatus); mainObj[kTotalCountStr] = 0; if (refStatus == CloneRefStatus::kNeedCheck) { mainObj[kTotalCountStr] = cloneInfos.size(); @@ -943,20 +810,19 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( } void SnapshotCloneServiceImpl::SetErrorMessage(brpc::Controller* bcntl, - int errCode, - const std::string &requestId, - const std::string &uuid) { + int errCode, + const std::string& requestId, + const std::string& uuid) { butil::IOBufBuilder os; - std::string msg = BuildErrorMessage(errCode, - requestId, uuid); + std::string msg = BuildErrorMessage(errCode, requestId, uuid); os << msg; os.move_to(bcntl->response_attachment()); return; } -void SnapshotCloneServiceImpl::HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid) { +void SnapshotCloneServiceImpl::HandleBadRequestError( + brpc::Controller* bcntl, const std::string& requestId, + const std::string& uuid) { bcntl->http_response().set_status_code(brpc::HTTP_STATUS_BAD_REQUEST); SetErrorMessage(bcntl, kErrCodeInvalidRequest, requestId, uuid); } diff --git a/src/snapshotcloneserver/snapshotclone_service.h b/src/snapshotcloneserver/snapshotclone_service.h index 6ba1f34f48..c9d15fc222 100644 --- a/src/snapshotcloneserver/snapshotclone_service.h +++ b/src/snapshotcloneserver/snapshotclone_service.h @@ -24,87 +24,82 @@ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVICE_H_ #include + #include #include #include "proto/snapshotcloneserver.pb.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/clone/clone_service_manager.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" namespace curve { namespace snapshotcloneserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; /** - * @brief 快照转储rpc服务实现 + * @brief snapshot dump rpc service implementation */ class SnapshotCloneServiceImpl : public SnapshotCloneService { public: - /** - * @brief 构造函数 - * - * @param manager 快照转储服务管理对象 - */ + /** + * @brief constructor + * + * @param manager snapshot dump service management object + */ SnapshotCloneServiceImpl( std::shared_ptr snapshotManager, std::shared_ptr cloneManager) - : snapshotManager_(snapshotManager), - cloneManager_(cloneManager) {} + : snapshotManager_(snapshotManager), cloneManager_(cloneManager) {} virtual ~SnapshotCloneServiceImpl() {} /** - * @brief http服务默认方法 + * @brief HTTP service default method * * @param cntl rpc controller - * @param req http请求报文 - * @param resp http回复报文 - * @param done http异步回调闭包 + * @param req HTTP request message + * @param resp HTTP reply message + * @param done HTTP asynchronous callback closure */ - void default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done); + void default_method(RpcController* cntl, const HttpRequest* req, + HttpResponse* resp, Closure* done); private: void HandleCreateSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleDeleteSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCancelSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotInfoAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCloneAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleRecoverAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleFlattenAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTasksAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCleanCloneTaskAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTaskListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneRefStatusAction(brpc::Controller* bcntl, - const std::string &requestId); - bool CheckBoolParamter( - const std::string *param, bool *valueOut); + const std::string& requestId); + bool CheckBoolParamter(const std::string* param, bool* valueOut); void SetErrorMessage(brpc::Controller* bcntl, int errCode, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); void HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); private: - // 快照转储服务管理对象 + // Snapshot Dump Service Management Object std::shared_ptr snapshotManager_; std::shared_ptr cloneManager_; }; diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..8ecd7036cd 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -28,11 +28,10 @@ namespace curve { namespace tool { std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { - uint64_t groupId = (static_cast(chunk.logicPoolId) << 32) | - chunk.copysetId; + uint64_t groupId = + (static_cast(chunk.logicPoolId) << 32) | chunk.copysetId; os << "logicalPoolId:" << chunk.logicPoolId - << ",copysetId:" << chunk.copysetId - << ",groupId:" << groupId + << ",copysetId:" << chunk.copysetId << ",groupId:" << groupId << ",chunkId:" << chunk.chunkId; return os; } @@ -40,8 +39,8 @@ std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { int ChunkServerClient::Init(const std::string& csAddr) { csAddr_ = csAddr; if (channel_.Init(csAddr.c_str(), nullptr) != 0) { - std::cout << "Init channel to chunkserver: " << csAddr - << " failed!" << std::endl; + std::cout << "Init channel to chunkserver: " << csAddr << " failed!" + << std::endl; return -1; } return 0; @@ -69,7 +68,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -97,9 +96,8 @@ bool ChunkServerClient::CheckChunkServerOnline() { return false; } -int ChunkServerClient::GetCopysetStatus( - const CopysetStatusRequest& request, - CopysetStatusResponse* response) { +int ChunkServerClient::GetCopysetStatus(const CopysetStatusRequest& request, + CopysetStatusResponse* response) { brpc::Controller cntl; curve::chunkserver::CopysetService_Stub stub(&channel_); uint64_t retryTimes = 0; @@ -112,17 +110,16 @@ int ChunkServerClient::GetCopysetStatus( continue; } if (response->status() != - COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response->status() << std::endl; + << ", errCode: " << response->status() << std::endl; return -1; } else { return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -151,15 +148,14 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response.status() << std::endl; + << ", errCode: " << response.status() << std::endl; return -1; } else { *chunkHash = response.hash(); return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..400755cb30 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -23,30 +23,30 @@ #ifndef SRC_TOOLS_CHUNKSERVER_CLIENT_H_ #define SRC_TOOLS_CHUNKSERVER_CLIENT_H_ -#include -#include #include +#include +#include -#include #include +#include #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/tools/curve_tool_define.h" +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::CopysetStatusRequest; using curve::chunkserver::CopysetStatusResponse; -using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::GetChunkHashRequest; using curve::chunkserver::GetChunkHashResponse; -using curve::chunkserver::CHUNK_OP_STATUS; namespace curve { namespace tool { struct Chunk { - Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) : - logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} + Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) + : logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} uint32_t logicPoolId; uint32_t copysetId; uint64_t chunkId; @@ -58,39 +58,43 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 - */ + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure + */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief: Call the RaftStat interface of Braft to obtain detailed + * information about the replication group, and place it in iobuf + * @param iobuf replication group details, valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false - */ + * @brief: Check if the chunkserver is online, only check the controller, + * not the response + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief calls the GetCopysetStatus interface of chunkserver + * @param request Query the request for the copyset + * @param response The response returned contains detailed information about + * the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the hash value of chunks from chunkserver + * @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..55505eccf0 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -26,9 +26,9 @@ #include #include +#include "src/fs/ext4_filesystem_impl.h" #include "src/tools/curve_meta_tool.h" #include "src/tools/raft_log_tool.h" -#include "src/fs/ext4_filesystem_impl.h" namespace curve { namespace tool { @@ -38,20 +38,21 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( - const std::string& command); + const std::string& command); + private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..cea600eb5f 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -24,8 +24,9 @@ #define SRC_TOOLS_COMMON_H_ #include -#include + #include +#include DECLARE_uint32(logicalPoolId); DECLARE_uint32(copysetId); @@ -34,9 +35,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/consistency_check.cpp b/src/tools/consistency_check.cpp index e3a84366ae..7cc1d50ed1 100644 --- a/src/tools/consistency_check.cpp +++ b/src/tools/consistency_check.cpp @@ -20,16 +20,18 @@ * Author: tongguangxun */ -#include - #include "src/tools/consistency_check.h" +#include + DEFINE_string(filename, "", "filename to check consistency"); -DEFINE_bool(check_hash, true, R"(用户需要先确认copyset的applyindex一致之后 - 再去查copyset内容是不是一致。通常需要先设置 - check_hash = false先检查copyset的applyindex是否一致 - 如果一致了再设置check_hash = true, - 检查copyset内容是不是一致)"); +DEFINE_bool( + check_hash, true, + R"(Users need to confirm whether the apply index of the copyset is consistent + before checking if the copyset content is consistent. Usually, you should first set + check_hash = false to initially verify if the apply index of the copyset is consistent. + Once confirmed, then set check_hash = true, + to check if the copyset content is consistent)"); DEFINE_uint32(chunkServerBasePort, 8200, "base port of chunkserver"); DECLARE_string(mdsAddr); @@ -48,8 +50,8 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { for (uint32_t i = 0; i < csAddrs.size(); ++i) { std::string ip; uint32_t port; - if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], - &ip, &port)) { + if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], &ip, + &port)) { uint32_t csSeq = port - FLAGS_chunkServerBasePort; ipVec.emplace_back(ip); seqVec.emplace_back(csSeq); @@ -75,12 +77,11 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { } ConsistencyCheck::ConsistencyCheck( - std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient) : - nameSpaceToolCore_(nameSpaceToolCore), - csClient_(csClient), - inited_(false) { -} + std::shared_ptr nameSpaceToolCore, + std::shared_ptr csClient) + : nameSpaceToolCore_(nameSpaceToolCore), + csClient_(csClient), + inited_(false) {} bool ConsistencyCheck::SupportCommand(const std::string& command) { return (command == kCheckConsistencyCmd); @@ -98,7 +99,7 @@ int ConsistencyCheck::Init() { return 0; } -int ConsistencyCheck::RunCommand(const std::string &cmd) { +int ConsistencyCheck::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init ConsistencyCheck failed" << std::endl; return -1; @@ -131,13 +132,15 @@ int ConsistencyCheck::CheckFileConsistency(const std::string& fileName, return 0; } -void ConsistencyCheck::PrintHelp(const std::string &cmd) { +void ConsistencyCheck::PrintHelp(const std::string& cmd) { if (!SupportCommand(cmd)) { std::cout << "Command not supported!" << std::endl; return; } std::cout << "Example: " << std::endl; - std::cout << "curve_ops_tool check-consistency -filename=/test [-check_hash=false]" << std::endl; // NOLINT + std::cout << "curve_ops_tool check-consistency -filename=/test " + "[-check_hash=false]" + << std::endl; // NOLINT } int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, @@ -160,14 +163,11 @@ int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, return 0; } -int ConsistencyCheck::CheckCopysetConsistency( - const CopySet copyset, - bool checkHash) { +int ConsistencyCheck::CheckCopysetConsistency(const CopySet copyset, + bool checkHash) { std::vector csLocs; int res = nameSpaceToolCore_->GetChunkServerListInCopySet( - copyset.first, - copyset.second, - &csLocs); + copyset.first, copyset.second, &csLocs); if (res != 0) { std::cout << "GetServerList info failed, exit consistency check!" << std::endl; @@ -180,9 +180,9 @@ int ConsistencyCheck::CheckCopysetConsistency( std::string csAddr = hostIp + ":" + std::to_string(port); csAddrs.emplace_back(csAddr); } - // 检查当前copyset的chunkserver内容是否一致 + // Check if the chunkserver content of the current copyset is consistent if (checkHash) { - // 先检查apply index是否一致 + // First, check if the application index is consistent res = CheckApplyIndex(copyset, csAddrs); if (res != 0) { std::cout << "Apply index not match when check hash!" << std::endl; @@ -195,17 +195,16 @@ int ConsistencyCheck::CheckCopysetConsistency( } int ConsistencyCheck::GetCopysetStatusResponse( - const std::string& csAddr, - const CopySet copyset, - CopysetStatusResponse* response) { + const std::string& csAddr, const CopySet copyset, + CopysetStatusResponse* response) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } CopysetStatusRequest request; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(csAddr); request.set_logicpoolid(copyset.first); request.set_copysetid(copyset.second); @@ -213,8 +212,8 @@ int ConsistencyCheck::GetCopysetStatusResponse( request.set_queryhash(false); res = csClient_->GetCopysetStatus(request, response); if (res != 0) { - std::cout << "GetCopysetStatus from " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopysetStatus from " << csAddr << " fail!" + << std::endl; return -1; } return 0; @@ -226,8 +225,7 @@ int ConsistencyCheck::CheckCopysetHash(const CopySet& copyset, Chunk chunk(copyset.first, copyset.second, chunkId); int res = CheckChunkHash(chunk, csAddrs); if (res != 0) { - std::cout << "{" << chunk - << "," << csAddrs << "}" << std::endl; + std::cout << "{" << chunk << "," << csAddrs << "}" << std::endl; return -1; } } @@ -242,8 +240,8 @@ int ConsistencyCheck::CheckChunkHash(const Chunk& chunk, for (const auto& csAddr : csAddrs) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } res = csClient_->GetChunkHash(chunk, &curHash); @@ -276,8 +274,8 @@ int ConsistencyCheck::CheckApplyIndex(const CopySet copyset, CopysetStatusResponse response; int res = GetCopysetStatusResponse(csAddr, copyset, &response); if (res != 0) { - std::cout << "GetCopysetStatusResponse from " << csAddr - << " fail" << std::endl; + std::cout << "GetCopysetStatusResponse from " << csAddr << " fail" + << std::endl; ret = -1; break; } diff --git a/src/tools/consistency_check.h b/src/tools/consistency_check.h index 12e12346b9..aad241306f 100644 --- a/src/tools/consistency_check.h +++ b/src/tools/consistency_check.h @@ -23,25 +23,25 @@ #ifndef SRC_TOOLS_CONSISTENCY_CHECK_H_ #define SRC_TOOLS_CONSISTENCY_CHECK_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include #include +#include #include -#include +#include #include "proto/copyset.pb.h" #include "src/common/net_common.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/chunkserver_client.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" DECLARE_string(filename); DECLARE_bool(check_hash); @@ -57,115 +57,118 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs); class ConsistencyCheck : public CurveTool { public: ConsistencyCheck(std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient); + std::shared_ptr csClient); ~ConsistencyCheck() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 检查三副本一致性 - * @param fileName 要检查一致性的文件名 - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 一致返回0,否则返回-1 + * @brief Check consistency of three replicas + * @param fileName The file name to check for consistency + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return consistently returns 0, otherwise returns -1 */ int CheckFileConsistency(const std::string& fileName, bool checkHash); /** - * @brief 检查copyset的三副本一致性 - * @param copysetId 要检查的copysetId - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 成功返回0,失败返回-1 + * @brief Check the consistency of the three copies of the copyset + * @param copysetId The copysetId to be checked + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return returns 0 for success, -1 for failure */ - int CheckCopysetConsistency(const CopySet copysetId, - bool checkHash); + int CheckCopysetConsistency(const CopySet copysetId, bool checkHash); /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(); /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 从mds获取文件所在的copyset列表 - * @param fileName 文件名 - * @param[out] copysetIds copysetId的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of copysets where the file is located from mds + * @param fileName File name + * @param[out] copysetIds The list copysetIds is valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ int FetchFileCopyset(const std::string& fileName, std::set* copysets); /** - * @brief 从chunkserver获取copyset的状态 - * @param csAddr chunkserver地址 - * @param copysetId 要获取的copysetId - * @param[out] response 返回的response - * @return 成功返回0,失败返回-1 + * @brief Get the status of copyset from chunkserver + * @param csAddr chunkserver address + * @param copysetId The copysetId to obtain + * @param[out] response The response returned + * @return returns 0 for success, -1 for failure */ int GetCopysetStatusResponse(const std::string& csAddr, const CopySet copyset, CopysetStatusResponse* response); /** - * @brief 检查copyset中指定chunk的hash的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the hash of the specified chunk in the + * copyset + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckCopysetHash(const CopySet& copyset, - const CsAddrsType& csAddrs); + int CheckCopysetHash(const CopySet& copyset, const CsAddrsType& csAddrs); /** - * @brief chunk在三个副本的hash的一致性 - * @param chunk 要检查的chunk - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Consistency of hash in three replicates of chunk + * @param chunk The chunk to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckChunkHash(const Chunk& chunk, - const CsAddrsType& csAddrs); + int CheckChunkHash(const Chunk& chunk, const CsAddrsType& csAddrs); /** - * @brief 检查副本间applyindex的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the applyindex between replicas + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckApplyIndex(const CopySet copyset, - const CsAddrsType& csAddrs); + int CheckApplyIndex(const CopySet copyset, const CsAddrsType& csAddrs); private: - // 文件所在的逻辑池id - PoolIdType lpid_; - // 用来与mds的nameservice接口交互 + // The logical pool ID where the file is located + PoolIdType lpid_; + // Used to interact with the nameservice interface of mds std::shared_ptr nameSpaceToolCore_; - // 向chunkserver发送RPC的client + // Client sending RPC to chunkserver std::shared_ptr csClient_; - // copyset中需要检查hash的chunk + // The chunk of the hash needs to be checked in the copyset std::map> chunksInCopyset_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check.cpp b/src/tools/copyset_check.cpp index 06341e5291..1d38b1d691 100644 --- a/src/tools/copyset_check.cpp +++ b/src/tools/copyset_check.cpp @@ -20,42 +20,44 @@ * Author: charisu */ #include "src/tools/copyset_check.h" + #include "src/tools/common.h" #include "src/tools/metric_name.h" DEFINE_bool(detail, false, "list the copyset detail or not"); DEFINE_uint32(chunkserverId, 0, "chunkserver id"); -DEFINE_string(chunkserverAddr, "", "if specified, chunkserverId is not required"); // NOLINT +DEFINE_string(chunkserverAddr, "", + "if specified, chunkserverId is not required"); // NOLINT DEFINE_uint32(serverId, 0, "server id"); DEFINE_string(serverIp, "", "server ip"); DEFINE_string(opName, curve::tool::kTotalOpName, "operator name"); DECLARE_string(mdsAddr); -DEFINE_uint64(opIntervalExceptLeader, 5, "Operator generation interval other " - "than transfer leader"); +DEFINE_uint64(opIntervalExceptLeader, 5, + "Operator generation interval other " + "than transfer leader"); DEFINE_uint64(leaderOpInterval, 30, - "tranfer leader operator generation interval"); + "tranfer leader operator generation interval"); namespace curve { namespace tool { -#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ - do { \ - if ((FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) == 0) { \ - std::cout << # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - if (!(FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) != 0) { \ - std::cout << "Only one of " # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - } while (0); \ +#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ + do { \ + if ((FLAGS_##flagname1).empty() && (FLAGS_##flagname2) == 0) { \ + std::cout << #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + if (!(FLAGS_##flagname1).empty() && (FLAGS_##flagname2) != 0) { \ + std::cout << "Only one of " #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + } while (0); bool CopysetCheck::SupportCommand(const std::string& command) { - return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd - || command == kCheckServerCmd || command == kCopysetsStatusCmd - || command == kCheckOperatorCmd - || command == kListMayBrokenVolumes); + return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd || + command == kCheckServerCmd || command == kCopysetsStatusCmd || + command == kCheckOperatorCmd || command == kListMayBrokenVolumes); } int CopysetCheck::Init() { @@ -76,7 +78,7 @@ int CopysetCheck::RunCommand(const std::string& command) { return -1; } if (command == kCheckCopysetCmd) { - // 检查某个copyset的状态 + // Check the status of a copyset if (FLAGS_logicalPoolId == 0 || FLAGS_copysetId == 0) { std::cout << "logicalPoolId AND copysetId should be specified!" << std::endl; @@ -84,7 +86,7 @@ int CopysetCheck::RunCommand(const std::string& command) { } return CheckCopyset(); } else if (command == kCheckChunnkServerCmd) { - // 检查某个chunkserver上的所有copyset + // Check all copysets on a certain chunkserver CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(chunkserverAddr, chunkserverId); return CheckChunkServer(); } else if (command == kCheckServerCmd) { @@ -159,8 +161,8 @@ int CopysetCheck::CheckServer() { if (FLAGS_detail) { PrintDetail(); std::ostream_iterator out(std::cout, ", "); - std::cout << "unhealthy chunkserver list (total: " - << unhealthyCs.size() <<"): {"; + std::cout << "unhealthy chunkserver list (total: " << unhealthyCs.size() + << "): {"; std::copy(unhealthyCs.begin(), unhealthyCs.end(), out); std::cout << "}" << std::endl; } @@ -188,11 +190,10 @@ int CopysetCheck::CheckOperator(const std::string& opName) { } else { res = core_->CheckOperator(opName, FLAGS_opIntervalExceptLeader); } - if (res < 0) { + if (res < 0) { std::cout << "Check operator fail!" << std::endl; } else { - std::cout << "Operator num is " - << res << std::endl; + std::cout << "Operator num is " << res << std::endl; res = 0; } return res; @@ -202,27 +203,33 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Example: " << std::endl << std::endl; if (command == kCheckCopysetCmd) { std::cout << "curve_ops_tool check-copyset -logicalPoolId=2 " - << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckChunnkServerCmd) { - std::cout << "curve_ops_tool check-chunkserver " + std::cout + << "curve_ops_tool check-chunkserver " << "-chunkserverId=1 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " << "[-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool check-chunkserver " - << "[-mdsAddr=127.0.0.1:6666] " - << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] " + << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckServerCmd) { std::cout << "curve_ops_tool check-server -serverId=1 " - << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT std::cout << "curve_ops_tool check-server [-mdsAddr=127.0.0.1:6666] " - << "[-serverIp=127.0.0.1] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-serverIp=127.0.0.1] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (command == kCopysetsStatusCmd) { std::cout << "curve_ops_tool copysets-status [-mdsAddr=127.0.0.1:6666] " << "[-margin=1000] [-operatorMaxPeriod=30] [-checkOperator] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckOperatorCmd) { std::cout << "curve_ops_tool check-operator -opName=" << kTotalOpName << "/" << kChangeOpName << "/" << kAddOpName << "/" @@ -233,26 +240,32 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Command not supported!" << std::endl; } std::cout << std::endl; - std::cout << "Standard of healthy is no copyset in the following state:" << std::endl; // NOLINT + std::cout << "Standard of healthy is no copyset in the following state:" + << std::endl; // NOLINT std::cout << "1、copyset has no leader" << std::endl; std::cout << "2、number of replicas less than expected" << std::endl; std::cout << "3、some replicas not online" << std::endl; std::cout << "4、installing snapshot" << std::endl; std::cout << "5、gap of log index between peers exceed margin" << std::endl; - std::cout << "6、for check-cluster, it will also check whether the mds is scheduling if -checkOperator specified" // NOLINT - "(if no operators in operatorMaxPeriod, it considered healthy)" << std::endl; // NOLINT - std::cout << "By default, if the number of replicas is less than 3, it is considered unhealthy, " // NOLINT - "you can change it by specify -replicasNum" << std::endl; - std::cout << "The order is sorted by priority, if the former is satisfied, the rest will not be checked" << std::endl; // NOLINT + std::cout << "6、for check-cluster, it will also check whether the mds is " + "scheduling if -checkOperator specified" // NOLINT + "(if no operators in operatorMaxPeriod, it considered healthy)" + << std::endl; // NOLINT + std::cout << "By default, if the number of replicas is less than 3, it is " + "considered unhealthy, " // NOLINT + "you can change it by specify -replicasNum" + << std::endl; + std::cout << "The order is sorted by priority, if the former is satisfied, " + "the rest will not be checked" + << std::endl; // NOLINT } - void CopysetCheck::PrintStatistic() { const auto& statistics = core_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; } void CopysetCheck::PrintDetail() { @@ -282,7 +295,7 @@ void CopysetCheck::PrintDetail() { PrintCopySet(item.second); } std::cout << std::endl; - // 打印有问题的chunkserver + // Printing problematic chunkservers PrintExcepChunkservers(); } @@ -300,32 +313,30 @@ void CopysetCheck::PrintCopySet(const std::set& set) { } PoolIdType lgId = GetPoolID(groupId); CopySetIdType csId = GetCopysetID(groupId); - std::cout << "(grouId: " << gid << ", logicalPoolId: " - << std::to_string(lgId) << ", copysetId: " - << std::to_string(csId) << ")"; + std::cout << "(grouId: " << gid + << ", logicalPoolId: " << std::to_string(lgId) + << ", copysetId: " << std::to_string(csId) << ")"; } std::cout << "}" << std::endl; } void CopysetCheck::PrintExcepChunkservers() { - auto serviceExceptionChunkServers = - core_->GetServiceExceptionChunkServer(); + auto serviceExceptionChunkServers = core_->GetServiceExceptionChunkServer(); if (!serviceExceptionChunkServers.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "service-exception chunkservers (total: " << serviceExceptionChunkServers.size() << "): {"; std::copy(serviceExceptionChunkServers.begin(), - serviceExceptionChunkServers.end(), out); + serviceExceptionChunkServers.end(), out); std::cout << "}" << std::endl; } - auto copysetLoadExceptionCS = - core_->GetCopysetLoadExceptionChunkServer(); + auto copysetLoadExceptionCS = core_->GetCopysetLoadExceptionChunkServer(); if (!copysetLoadExceptionCS.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "copyset-load-exception chunkservers (total: " << copysetLoadExceptionCS.size() << "): {"; - std::copy(copysetLoadExceptionCS.begin(), - copysetLoadExceptionCS.end(), out); + std::copy(copysetLoadExceptionCS.begin(), copysetLoadExceptionCS.end(), + out); std::cout << "}" << std::endl; } } diff --git a/src/tools/copyset_check.h b/src/tools/copyset_check.h index b4fa76c28f..54d5e46d36 100644 --- a/src/tools/copyset_check.h +++ b/src/tools/copyset_check.h @@ -25,23 +25,23 @@ #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include +#include #include "src/mds/common/mds_define.h" #include "src/tools/copyset_check_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::PoolIdType; using curve::mds::topology::ServerIdType; namespace curve { @@ -49,94 +49,101 @@ namespace tool { class CopysetCheck : public CurveTool { public: - explicit CopysetCheck(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit CopysetCheck(std::shared_ptr core) + : core_(core), inited_(false) {} ~CopysetCheck() = default; /** - * @brief 根据flag检查复制组健康状态 - * 复制组健康的标准,没有任何副本处于以下状态,下面的顺序按优先级排序, - * 即满足上面一条,就不会检查下面一条 - * 1、leader为空(复制组的信息以leader处的为准,没有leader无法检查) - * 2、配置中的副本数量不足 - * 3、有副本不在线 - * 4、有副本在安装快照 - * 5、副本间log index差距太大 - * 6、对于集群来说,还要判断一下chunkserver上的copyset数量和leader数量是否均衡, - * 避免后续会有调度使得集群不稳定 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 - * @return 成功返回0,失败返回-1 + * @brief Check the health status of the replication group based on the flag + * The standard for replication group health is that no replica is in the + * following state. The following order is sorted by priority, If the above + * one is met, the following one will not be checked + * 1. The leader is empty (the information of the replication group is + * based on the leader, and cannot be checked without a leader) + * 2. Insufficient number of replicas in the configuration + * 3. Some replicas are not online + * 4. There is a replica in the installation snapshot + * 5. The log index difference between replicas is too large + * 6. For a cluster, it is also necessary to determine whether the number + * of copysets and the number of leaders on the chunkserver are balanced, + * Avoid scheduling that may cause instability in the cluster in the + * future + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 + * @brief Print help information + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true / false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 检查单个copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check a single copyset + * @return Health returns 0, otherwise returns -1 */ int CheckCopyset(); /** - * @brief 检查chunkserver上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on chunkserver + * @return Health returns 0, otherwise returns -1 */ int CheckChunkServer(); /** - * @brief 检查server上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on the server + * @return Health returns 0, otherwise returns -1 */ int CheckServer(); /** - * @brief 检查集群所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets in the cluster + * @return Health returns 0, otherwise returns -1 */ int CheckCopysetsInCluster(); /** - * @brief 检查mds端的operator - * @return 无operator返回0,其他情况返回-1 + * @brief Check the operator on the mds side + * @return returns 0 without an operator, otherwise returns -1 */ int CheckOperator(const std::string& opName); - // 打印copyset检查的详细结果 + // Print detailed results of copyset check void PrintDetail(); void PrintCopySet(const std::set& set); - // 打印检查的结果,一共多少copyset,有多少不健康 + // Print the results of the inspection, how many copies are there in total, + // and how many are unhealthy void PrintStatistic(); - // 打印有问题的chunkserver列表 + // Print a list of problematic chunkservers void PrintExcepChunkservers(); - // 打印大多数不在线的副本上面的卷 + // Print the volume on most offline copies int PrintMayBrokenVolumes(); private: - // 检查copyset的核心逻辑 + // Check the core logic of copyset std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index f32a7a923d..8a7a3165d9 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -20,15 +20,19 @@ * Author: charisu */ #include "src/tools/copyset_check_core.h" + #include + #include DEFINE_uint64(margin, 1000, "The threshold of the gap between peers"); DEFINE_uint64(replicasNum, 3, "the number of replicas that required"); -DEFINE_uint64(operatorMaxPeriod, 30, "max period of operator generating, " - "if no operators in a period, it considered to be healthy"); -DEFINE_bool(checkOperator, false, "if true, the operator number of " - "mds will be considered"); +DEFINE_uint64(operatorMaxPeriod, 30, + "max period of operator generating, " + "if no operators in a period, it considered to be healthy"); +DEFINE_bool(checkOperator, false, + "if true, the operator number of " + "mds will be considered"); namespace curve { namespace tool { @@ -38,24 +42,22 @@ int CopysetCheckCore::Init(const std::string& mdsAddr) { } CopysetStatistics::CopysetStatistics(uint64_t total, uint64_t unhealthy) - : totalNum(total), unhealthyNum(unhealthy) { + : totalNum(total), unhealthyNum(unhealthy) { if (total != 0) { - unhealthyRatio = - static_cast(unhealthyNum) / totalNum; + unhealthyRatio = static_cast(unhealthyNum) / totalNum; } else { unhealthyRatio = 0; } } CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId) { + const CopySetIdType& copysetId) { Clear(); std::vector chunkserverLocation; - int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, &chunkserverLocation); + int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + &chunkserverLocation); if (res != 0) { - std::cout << "GetChunkServerListInCopySet from mds fail!" - << std::endl; + std::cout << "GetChunkServerListInCopySet from mds fail!" << std::endl; return CheckResult::kOtherErr; } int majority = chunkserverLocation.size() / 2 + 1; @@ -69,7 +71,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; ++offlinePeers; @@ -92,7 +94,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } } else { if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { checkRes = CheckResult::kOtherErr; } } @@ -106,20 +108,20 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId) { + const ChunkServerIdType& chunkserverId) { Clear(); return CheckCopysetsOnChunkServer(chunkserverId, ""); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr) { + const std::string& chunkserverAddr) { Clear(); return CheckCopysetsOnChunkServer(0, chunkserverAddr); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId, - const std::string& chunkserverAddr) { + const ChunkServerIdType& chunkserverId, + const std::string& chunkserverAddr) { curve::mds::topology::ChunkServerInfo csInfo; int res = 0; if (chunkserverId > 0) { @@ -131,7 +133,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::cout << "GetChunkServerInfo from mds fail!" << std::endl; return -1; } - // 如果chunkserver retired的话不发送请求 + // If chunkserver is redirected, do not send the request if (csInfo.status() == ChunkServerStatus::RETIRED) { std::cout << "ChunkServer is retired!" << std::endl; return 0; @@ -139,7 +141,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::string hostIp = csInfo.hostip(); uint64_t port = csInfo.port(); std::string csAddr = hostIp + ":" + std::to_string(port); - // 向chunkserver发送RPC请求获取raft state + // Send RPC request to chunkserver to obtain raft state ChunkServerHealthStatus csStatus = CheckCopysetsOnChunkServer(csAddr, {}); if (csStatus == ChunkServerHealthStatus::kHealthy) { return 0; @@ -149,11 +151,8 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( } ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader, - std::pair *record, - bool queryCs) { + const std::string& chunkserverAddr, const std::set& groupIds, + bool queryLeader, std::pair* record, bool queryCs) { bool isHealthy = true; int res = 0; butil::IOBuf iobuf; @@ -165,33 +164,38 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } if (res != 0) { - // 如果查询chunkserver失败,认为不在线,把它上面所有的 - // copyset都添加到peerNotOnlineCopysets_里面 + // If querying the chunkserver fails, consider it offline and add all + // its copysets to the peerNotOnlineCopysets_. UpdatePeerNotOnlineCopysets(chunkserverAddr); serviceExceptionChunkServers_.emplace(chunkserverAddr); chunkserverCopysets_[chunkserverAddr] = {}; return ChunkServerHealthStatus::kNotOnline; } - // 存储每一个copyset的详细信息 + // Store detailed information for each copyset CopySetInfosType copysetInfos; ParseResponseAttachment(groupIds, &iobuf, ©setInfos); - // 只有查询全部chunkserver的时候才更新chunkServer上的copyset列表 + // Only update the copyset list on chunkServer when querying all + // chunkservers if (groupIds.empty()) { UpdateChunkServerCopysets(chunkserverAddr, copysetInfos); } - // 对应的chunkserver上没有要找的leader的copyset,可能已经迁移出去了, - // 但是follower这边还没更新,这种情况也认为chunkserver不健康 + // There is no copyset for the leader you are looking for on the + // corresponding chunkserver, it may have already been migrated, But the + // follower has not been updated yet, and this situation also suggests that + // chunkserver is unhealthy if (copysetInfos.empty() || - (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { + (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { std::cout << "Some copysets not found on chunkserver, may be tranfered" << std::endl; return ChunkServerHealthStatus::kNotHealthy; } - // 存储需要发送消息的chunkserver的地址和对应的groupId - // key是chunkserver地址,value是groupId的列表 + // Store the address and corresponding groupId of the chunkserver that needs + // to send messages Key is the chunkserver address, and value is a list of + // groupIds std::map> csAddrMap; - // 存储没有leader的copyset对应的peers,key为groupId,value为配置 + // Store the peers corresponding to the copyset without a leader, with key + // as groupId and value as configuration std::map> noLeaderCopysetsPeers; for (auto& copysetInfo : copysetInfos) { std::string groupId = copysetInfo[kGroupId]; @@ -228,17 +232,17 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( break; } } else if (state == kStateFollower) { - // 如果没有leader,检查是否是大多数不在线 - // 是的话标记为大多数不在线,否则标记为No leader + // If there is no leader, check if most are offline + // If yes, mark it as mostly offline, otherwise mark it as No leader if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { std::vector peers; curve::common::SplitString(copysetInfo[kPeers], " ", &peers); noLeaderCopysetsPeers[groupId] = peers; continue; } if (queryLeader) { - // 向leader发送rpc请求 + // Send an rpc request to the leader auto pos = copysetInfo[kLeader].rfind(":"); auto csAddr = copysetInfo[kLeader].substr(0, pos); csAddrMap[csAddr].emplace(groupId); @@ -247,25 +251,25 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( copysets_[kNoLeader].emplace(groupId); isHealthy = false; } else { - // 其他情况有ERROR,UNINITIALIZED,SHUTTING和SHUTDOWN,这种都认为不健康,统计到 - // copyset里面 + // In other cases such as ERROR, UNINITIALIZED, SHUTTING, and + // SHUTDOWN, they are considered unhealthy and are counted within + // the copyset. std::string key = "state " + copysetInfo[kState]; copysets_[key].emplace(groupId); isHealthy = false; } } - // 遍历没有leader的copyset - bool health = CheckCopysetsNoLeader(chunkserverAddr, - noLeaderCopysetsPeers); + // Traverse copysets without leaders + bool health = CheckCopysetsNoLeader(chunkserverAddr, noLeaderCopysetsPeers); if (!health) { isHealthy = false; } - // 遍历chunkserver发送请求 + // Traverse chunkserver to send requests for (const auto& item : csAddrMap) { - ChunkServerHealthStatus res = CheckCopysetsOnChunkServer(item.first, - item.second); + ChunkServerHealthStatus res = + CheckCopysetsOnChunkServer(item.first, item.second); if (res != ChunkServerHealthStatus::kHealthy) { isHealthy = false; } @@ -277,10 +281,9 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } } -bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers) { +bool CopysetCheckCore::CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers) { if (copysetsPeers.empty()) { return true; } @@ -296,13 +299,12 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return false; } for (const auto& item : result) { - // 如果在配置组中,检查是否是majority offline + // If in the configuration group, check if it is a majority offline if (item.second) { isHealthy = false; std::string groupId = item.first; - CheckResult checkRes = CheckPeerOnlineStatus( - groupId, - copysetsPeers.at(item.first)); + CheckResult checkRes = + CheckPeerOnlineStatus(groupId, copysetsPeers.at(item.first)); if (checkRes == CheckResult::kMajorityPeerNotOnline) { copysets_[kMajorityPeerNotOnline].emplace(groupId); continue; @@ -313,9 +315,9 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return isHealthy; } -int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, - const std::set copysets, - std::map* result) { +int CopysetCheckCore::CheckIfChunkServerInCopysets( + const std::string& csAddr, const std::set copysets, + std::map* result) { PoolIdType logicPoolId; std::vector copysetIds; for (const auto& gId : copysets) { @@ -330,8 +332,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, } std::vector csServerInfos; - int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, - copysetIds, &csServerInfos); + int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, copysetIds, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail!" << std::endl; return res; @@ -340,8 +342,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, CopySetIdType copysetId = info.copysetid(); std::string groupId = ToGroupId(logicPoolId, copysetId); for (const auto& csLoc : info.cslocs()) { - std::string addr = csLoc.hostip() + ":" - + std::to_string(csLoc.port()); + std::string addr = + csLoc.hostip() + ":" + std::to_string(csLoc.port()); if (addr == csAddr) { (*result)[groupId] = true; break; @@ -351,22 +353,23 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, return 0; } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(serverId, "", true, unhealthyChunkServers); } -int CopysetCheckCore::CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(0, serverIp, true, unhealthyChunkServers); } void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, std::map> *result) { + const std::vector& chunkservers, uint32_t* index, + std::map>* result) { while (1) { indexMutex.lock(); if (*index + 1 > chunkservers.size()) { @@ -386,11 +389,11 @@ void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( } } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, bool queryLeader, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, bool queryLeader, + std::vector* unhealthyChunkServers) { bool isHealthy = true; - // 向mds发送RPC + // Send RPC to mds int res = 0; std::vector chunkservers; if (serverId > 0) { @@ -406,16 +409,15 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, std::map> queryCsResult; uint32_t index = 0; for (uint64_t i = 0; i < FLAGS_rpcConcurrentNum; i++) { - threadpool.emplace_back(Thread( - &CopysetCheckCore::ConcurrentCheckCopysetsOnServer, - this, std::ref(chunkservers), &index, - &queryCsResult)); + threadpool.emplace_back( + Thread(&CopysetCheckCore::ConcurrentCheckCopysetsOnServer, this, + std::ref(chunkservers), &index, &queryCsResult)); } - for (auto &thread : threadpool) { + for (auto& thread : threadpool) { thread.join(); } - for (auto &record : queryCsResult) { + for (auto& record : queryCsResult) { std::string chunkserverAddr = record.first; auto res = CheckCopysetsOnChunkServer(chunkserverAddr, {}, queryLeader, &record.second, false); @@ -429,7 +431,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, if (isHealthy) { return 0; - } else { + } else { return -1; } } @@ -450,18 +452,21 @@ int CopysetCheckCore::CheckCopysetsInCluster() { isHealthy = false; } } - // 检查从chunkserver上获取的copyset数量与mds记录的数量是否一致 + // Check if the number of copysets obtained from chunkserver matches the + // number of mds records res = CheckCopysetsWithMds(); if (res != 0) { std::cout << "CheckCopysetNumWithMds fail!" << std::endl; return -1; } - // 如果不健康,直接返回,如果健康,还需要对operator作出判断 + // If not healthy, return directly. If healthy, make a judgment on the + // operator if (!isHealthy) { return -1; } - // 默认不检查operator,在测试脚本之类的要求比较严格的地方才检查operator,不然 - // 每次执行命令等待30秒很不方便 + // By default, operators are not checked, and only checked in areas with + // strict requirements such as test scripts, otherwise waiting for 30 + // seconds each time executing a command is inconvenient if (FLAGS_checkOperator) { int res = CheckOperator(kTotalOpName, FLAGS_operatorMaxPeriod); if (res != 0) { @@ -482,21 +487,22 @@ int CopysetCheckCore::CheckCopysetsWithMds() { if (copysetsInMds.size() != copysets_[kTotal].size()) { std::cout << "Copyset numbers in chunkservers not consistent" " with mds, please check! copysets on chunkserver: " - << copysets_[kTotal].size() << ", copysets in mds: " - << copysetsInMds.size() << std::endl; + << copysets_[kTotal].size() + << ", copysets in mds: " << copysetsInMds.size() << std::endl; return -1; } std::set copysetsInMdsGid; for (const auto& copyset : copysetsInMds) { - std::string gId = ToGroupId(copyset.logicalpoolid(), - copyset.copysetid()); + std::string gId = + ToGroupId(copyset.logicalpoolid(), copyset.copysetid()); copysetsInMdsGid.insert(gId); } int ret = 0; std::vector copysetsInMdsNotInCs(10); - auto iter = std::set_difference(copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsNotInCs.begin()); + auto iter = + std::set_difference(copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsNotInCs.begin()); copysetsInMdsNotInCs.resize(iter - copysetsInMdsNotInCs.begin()); if (!copysetsInMdsNotInCs.empty()) { std::cout << "There are " << copysetsInMdsNotInCs.size() @@ -508,9 +514,10 @@ int CopysetCheckCore::CheckCopysetsWithMds() { ret = -1; } std::vector copysetsInCsNotInMds(10); - iter = std::set_difference(copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysetsInCsNotInMds.begin()); + iter = + std::set_difference(copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysetsInCsNotInMds.begin()); copysetsInCsNotInMds.resize(iter - copysetsInCsNotInMds.begin()); if (!copysetsInCsNotInMds.empty()) { std::cout << "There are " << copysetsInCsNotInMds.size() @@ -542,8 +549,8 @@ int CopysetCheckCore::CheckScanStatus( continue; } - auto groupId = ToGroupId(copysetInfo.logicalpoolid(), - copysetInfo.copysetid()); + auto groupId = + ToGroupId(copysetInfo.logicalpoolid(), copysetInfo.copysetid()); copysets_[kThreeCopiesInconsistent].emplace(groupId); count++; } @@ -565,37 +572,41 @@ int CopysetCheckCore::CheckOperator(const std::string& opName, if (opNum != 0) { return opNum; } - if (curve::common::TimeUtility::GetTimeofDaySec() - - startTime >= checkTimeSec) { + if (curve::common::TimeUtility::GetTimeofDaySec() - startTime >= + checkTimeSec) { break; } sleep(1); - } while (curve::common::TimeUtility::GetTimeofDaySec() - - startTime < checkTimeSec); + } while (curve::common::TimeUtility::GetTimeofDaySec() - startTime < + checkTimeSec); return 0; } -// 每个copyset的信息都会存储在一个map里面,map的key有 -// groupId: 复制组的groupId -// peer_id: 10.182.26.45:8210:0格式的peer id -// state: 节点的状态,LEADER,FOLLOWER,CANDIDATE等等 -// peers: 配置组里的成员,通过空格分隔 -// last_log_id: 最后一个log entry的index -// leader: state为LEADER时才存在这个key,指向复制组leader +// Information for each copyset is stored in a map. The map's keys include: +// - groupId: The groupId of the replication group. +// - peer_id: The peer id in the format 10.182.26.45:8210:0. +// - state: The node's state, which can be LEADER, FOLLOWER, CANDIDATE, etc. +// - peers: Members in the configuration group, separated by spaces. +// - last_log_id: The index of the last log entry. +// - leader: This key exists only when the state is LEADER and points to the +// leader of the replication group. // -// replicator_1: 第一个follower的复制状态,value如下: -// next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 ic=0 -// next_index为下一个要发送给该follower的index -// flying_append_entries_size是发出去还未确认的entry的数量 -// idle表明没有在安装快照,如果在安装快照的话是installing snapshot {12, 3}, -// 1234和3分别是快照包含的最后一个log entry的index和term -// hc,ac,ic分别是发向follower的heartbeat,append entry, -// 和install snapshot的rpc的数量 +// replicator_1: The replication status of the first follower, with values as +// follows: next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 +// ic=0 +// - next_index: The next index to be sent to this follower. +// - flying_append_entries_size: The number of unconfirmed entries that have +// been sent. +// - idle: Indicates whether there is no snapshot installation. If a +// snapshot is being installed, it will show as "installing snapshot {12, +// 3}", +// where 1234 and 3 are the last log entry's index and term included in +// the snapshot. +// - hc, ac, ic: The counts of RPCs sent to the follower for heartbeat, +// append entry, and install snapshot, respectively. void CopysetCheckCore::ParseResponseAttachment( - const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr) { + const std::set& gIds, butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, bool saveIobufStr) { butil::IOBuf copyset; iobuf->append("\r\n"); while (iobuf->cut_until(©set, "\r\n\r\n") == 0) { @@ -629,7 +640,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } } - // 找到了copyset + // Found copyset auto pos = line.npos; if (line.find(kReplicator) != line.npos) { pos = line.rfind(":"); @@ -640,7 +651,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } std::string key = line.substr(0, pos); - // 如果是replicator,把key简化一下 + // If it's a replicator, simplify the key if (key.find(kReplicator) != key.npos) { key = kReplicator + std::to_string(i); ++i; @@ -660,10 +671,11 @@ void CopysetCheckCore::ParseResponseAttachment( } int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, - butil::IOBuf* iobuf) { + butil::IOBuf* iobuf) { // unit test will set csClient_ to mock - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -673,8 +685,7 @@ int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, } void CopysetCheckCore::UpdateChunkServerCopysets( - const std::string& csAddr, - const CopySetInfosType& copysetInfos) { + const std::string& csAddr, const CopySetInfosType& copysetInfos) { std::set copysetIds; for (const auto& copyset : copysetInfos) { copysetIds.emplace(copyset.at(kGroupId)); @@ -682,11 +693,12 @@ void CopysetCheckCore::UpdateChunkServerCopysets( chunkserverCopysets_[csAddr] = copysetIds; } -// 通过发送RPC检查chunkserver是否在线 +// Check if chunkserver is online by sending RPC bool CopysetCheckCore::CheckChunkServerOnline( - const std::string& chunkserverAddr) { - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + const std::string& chunkserverAddr) { + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -718,7 +730,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; return false; @@ -727,7 +739,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, ParseResponseAttachment({}, &iobuf, ©setInfos); UpdateChunkServerCopysets(csAddr, copysetInfos); bool online = (chunkserverCopysets_[csAddr].find(groupId) != - chunkserverCopysets_[csAddr].end()); + chunkserverCopysets_[csAddr].end()); if (!online) { copysetLoacExceptionChunkServers_.emplace(csAddr); } @@ -735,8 +747,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, } CheckResult CopysetCheckCore::CheckPeerOnlineStatus( - const std::string& groupId, - const std::vector& peers) { + const std::string& groupId, const std::vector& peers) { int notOnlineNum = 0; for (const auto& peer : peers) { auto pos = peer.rfind(":"); @@ -762,20 +773,20 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( } CheckResult CopysetCheckCore::CheckHealthOnLeader( - std::map* map) { - // 先判断peers是否小于3 + std::map* map) { + // First, determine if the peers are less than 3 std::vector peers; curve::common::SplitString((*map)[kPeers], " ", &peers); if (peers.size() < FLAGS_replicasNum) { return CheckResult::kPeersNoSufficient; } std::string groupId = (*map)[kGroupId]; - // 检查不在线peer的数量 + // Check the number of offline peers CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes != CheckResult::kHealthy) { return checkRes; } - // 根据replicator的情况判断log index之间的差距 + // Judging the gap between log indices based on the replicator's situation uint64_t lastLogId; std::string str = (*map)[kStorage]; auto pos1 = str.find("="); @@ -785,7 +796,7 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( return CheckResult::kParseError; } bool res = curve::common::StringToUll(str.substr(pos1 + 1, pos2 - pos1 - 1), - &lastLogId); + &lastLogId); if (!res) { std::cout << "parse last log id from string fail!" << std::endl; return CheckResult::kParseError; @@ -805,16 +816,15 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( } } if (info.substr(0, pos) == kNextIndex) { - res = curve::common::StringToUll( - info.substr(pos + 1), &nextIndex); + res = curve::common::StringToUll(info.substr(pos + 1), + &nextIndex); if (!res) { std::cout << "parse next index fail!" << std::endl; return CheckResult::kParseError; } } if (info.substr(0, pos) == "flying_append_entries_size") { - res = curve::common::StringToUll(info.substr(pos + 1), - &flying); + res = curve::common::StringToUll(info.substr(pos + 1), &flying); if (!res) { std::cout << "parse flying_size fail!" << std::endl; return CheckResult::kParseError; @@ -835,8 +845,8 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { std::vector copysets; int res = mdsClient_->GetCopySetsInChunkServer(csAddr, ©sets); if (res != 0) { - std::cout << "GetCopySetsInChunkServer " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopySetsInChunkServer " << csAddr << " fail!" + << std::endl; return; } else if (copysets.empty()) { std::cout << "No copysets on chunkserver " << csAddr << std::endl; @@ -849,26 +859,24 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { copysetIds.emplace_back(csInfo.copysetid()); } - // 获取每个copyset的成员 + // Get the members of each copyset std::vector csServerInfos; - res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, - copysetIds, + res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, copysetIds, &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return; } - // 遍历每个copyset + // Traverse each copyset for (const auto& info : csServerInfos) { std::vector peers; for (const auto& csLoc : info.cslocs()) { - std::string peer = csLoc.hostip() + ":" - + std::to_string(csLoc.port()) + ":0"; + std::string peer = + csLoc.hostip() + ":" + std::to_string(csLoc.port()) + ":0"; peers.emplace_back(peer); } CopySetIdType copysetId = info.copysetid(); - std::string groupId = ToGroupId(logicalPoolId, - copysetId); + std::string groupId = ToGroupId(logicalPoolId, copysetId); CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes == CheckResult::kMinorityPeerNotOnline) { copysets_[kMinorityPeerNotOnline].emplace(groupId); @@ -889,9 +897,8 @@ CopysetStatistics CopysetCheckCore::GetCopysetStatistics() { if (item.first == kTotal) { total = item.second.size(); } else { - // 求并集 - unhealthyCopysets.insert(item.second.begin(), - item.second.end()); + // Union + unhealthyCopysets.insert(item.second.begin(), item.second.end()); } } uint64_t unhealthyNum = unhealthyCopysets.size(); @@ -907,7 +914,7 @@ void CopysetCheckCore::Clear() { } int CopysetCheckCore::ListMayBrokenVolumes( - std::vector* fileNames) { + std::vector* fileNames) { int res = CheckCopysetsOnOfflineChunkServer(); if (res != 0) { std::cout << "CheckCopysetsOnOfflineChunkServer fail" << std::endl; @@ -928,10 +935,10 @@ int CopysetCheckCore::ListMayBrokenVolumes( } void CopysetCheckCore::GetCopysetInfos(const char* key, - std::vector* copysets) { + std::vector* copysets) { (void)key; for (auto iter = copysets_[kMajorityPeerNotOnline].begin(); - iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { + iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { std::string gid = *iter; uint64_t groupId; if (!curve::common::StringToUll(gid, &groupId)) { diff --git a/src/tools/copyset_check_core.h b/src/tools/copyset_check_core.h index 6e93a373c7..157ddf2458 100644 --- a/src/tools/copyset_check_core.h +++ b/src/tools/copyset_check_core.h @@ -25,38 +25,38 @@ #include #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include #include +#include +#include "include/chunkserver/chunkserver_common.h" #include "proto/topology.pb.h" -#include "src/mds/common/mds_define.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/string_util.h" -#include "src/tools/mds_client.h" +#include "src/mds/common/mds_define.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/metric_name.h" #include "src/tools/curve_tool_define.h" -#include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/concurrent.h" +#include "src/tools/mds_client.h" +#include "src/tools/metric_name.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; -using curve::mds::topology::ChunkServerIdType; -using curve::mds::topology::ServerIdType; -using curve::mds::topology::kTopoErrCodeSuccess; -using curve::mds::topology::OnlineState; -using curve::mds::topology::ChunkServerStatus; -using curve::chunkserver::ToGroupId; -using curve::chunkserver::GetPoolID; using curve::chunkserver::GetCopysetID; +using curve::chunkserver::GetPoolID; +using curve::chunkserver::ToGroupId; using curve::common::Mutex; using curve::common::Thread; +using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::topology::OnlineState; +using curve::mds::topology::PoolIdType; +using curve::mds::topology::ServerIdType; namespace curve { namespace tool { @@ -65,32 +65,31 @@ using CopySet = std::pair; using CopySetInfosType = std::vector>; enum class CheckResult { - // copyset健康 + // copyset Health kHealthy = 0, - // 解析结果失败 + // Parsing result failed kParseError = -1, - // peer数量小于预期 - kPeersNoSufficient = -2, - // 副本间的index差距太大 + // The number of peers is less than expected + kPeersNoSufficient = -2, + // The index difference between replicas is too large kLogIndexGapTooBig = -3, - // 有副本在安装快照 + // There is a replica installing the snapshot kInstallingSnapshot = -4, - // 少数副本不在线 + // A few instances are not online kMinorityPeerNotOnline = -5, - // 大多数副本不在线 + // Most replicas are not online kMajorityPeerNotOnline = -6, kOtherErr = -7 }; enum class ChunkServerHealthStatus { - kHealthy = 0, // chunkserver上所有copyset健康 - kNotHealthy = -1, // chunkserver上有copyset不健康 - kNotOnline = -2 // chunkserver不在线 + kHealthy = 0, // All copysets on chunkserver are healthy + kNotHealthy = -1, // Copyset on chunkserver is unhealthy + kNotOnline = -2 // Chunkserver is not online }; struct CopysetStatistics { - CopysetStatistics() : - totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} + CopysetStatistics() : totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} CopysetStatistics(uint64_t total, uint64_t unhealthy); uint64_t totalNum; uint64_t unhealthyNum; @@ -109,102 +108,108 @@ const char kThreeCopiesInconsistent[] = "Three copies inconsistent"; class CopysetCheckCore { public: CopysetCheckCore(std::shared_ptr mdsClient, - std::shared_ptr csClient = nullptr) : - mdsClient_(mdsClient), csClient_(csClient) {} + std::shared_ptr csClient = nullptr) + : mdsClient_(mdsClient), csClient_(csClient) {} virtual ~CopysetCheckCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief check health of one copyset - * - * @param logicalPoolId - * @param copysetId - * - * @return error code - */ + * @brief check health of one copyset + * + * @param logicalPoolId + * @param copysetId + * + * @return error code + */ virtual CheckResult CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId); + const CopySetIdType& copysetId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId); + const ChunkServerIdType& chunkserverId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserAddr chunkserver地址 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserAddr chunkserver address + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer(const std::string& chunkserverAddr); /** - * @brief Check copysets on offline chunkservers - */ + * @brief Check copysets on offline chunkservers + */ virtual int CheckCopysetsOnOfflineChunkServer(); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的ip - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId IP of server + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查集群中所有copyset的健康状态 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets in the cluster + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsInCluster(); /** - * @brief 检查集群中的operator - * @param opName operator的名字 - * @param checkTimeSec 检查时间 - * @return 检查正常返回0,检查失败或存在operator返回-1 - */ - virtual int CheckOperator(const std::string& opName, - uint64_t checkTimeSec); + * @brief Check the operators in the cluster + * @param opName The name of the operator + * @param checkTimeSec check time + * @return returns 0 if the check is normal, or -1 if the check fails or + * there is an operator present + */ + virtual int CheckOperator(const std::string& opName, uint64_t checkTimeSec); /** - * @brief 计算不健康的copyset的比例,检查后调用 - * @return 不健康的copyset的比例 + * @brief Calculate the proportion of unhealthy copysets, check and call + * @return The proportion of unhealthy copysets */ virtual CopysetStatistics GetCopysetStatistics(); /** - * @brief 获取copyset的列表,通常检查后会调用,然后打印出来 - * @return copyset的列表 + * @brief to obtain a list of copysets, usually called after checking, and + * then printed out + * @return List of copysets */ virtual const std::map>& GetCopysetsRes() - const { + const { return copysets_; } @@ -212,112 +217,119 @@ class CopysetCheckCore { * @brief Get copysets info for specified copysets */ virtual void GetCopysetInfos(const char* key, - std::vector* copysets); + std::vector* copysets); /** - * @brief 获取copyset的详细信息 - * @return copyset的详细信息 + * @brief Get detailed information about copyset + * @return Details of copyset */ virtual const std::string& GetCopysetDetail() const { return copysetsDetail_; } /** - * @brief 获取检查过程中服务异常的chunkserver列表,通常检查后会调用,然后打印出来 - * @return 服务异常的chunkserver的列表 + * @brief: Obtain a list of chunkservers with service exceptions during the + * inspection process, which is usually called after the inspection and + * printed out + * @return List of chunkservers with service exceptions */ virtual const std::set& GetServiceExceptionChunkServer() - const { + const { return serviceExceptionChunkServers_; } /** - * @brief 获取检查过程中copyset寻找失败的chunkserver列表,通常检查后会调用,然后打印出来 - * @return copyset加载异常的chunkserver的列表 + * @brief: Obtain the list of failed chunkservers for copyset during the + * check process, which is usually called after the check and printed out + * @return List of chunkservers with copyset loading exceptions */ virtual const std::set& GetCopysetLoadExceptionChunkServer() - const { + const { return copysetLoacExceptionChunkServers_; } /** - * @brief 通过发送RPC检查chunkserver是否在线 - * - * @param chunkserverAddr chunkserver的地址 - * - * @return 在线返回true,不在线返回false - */ + * @brief Check if chunkserver is online by sending RPC + * + * @param chunkserverAddr Address of chunkserver + * + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(const std::string& chunkserverAddr); /** - * @brief List volumes on majority peers offline copysets - * - * @param fileNames affected volumes - * - * @return return 0 when sucess, otherwise return -1 - */ + * @brief List volumes on majority peers offline copysets + * + * @param fileNames affected volumes + * + * @return return 0 when sucess, otherwise return -1 + */ virtual int ListMayBrokenVolumes(std::vector* fileNames); private: /** - * @brief 从iobuf分析出指定groupId的复制组的信息, - * 每个复制组的信息都放到一个map里面 - * - * @param gIds 要查询的复制组的groupId,为空的话全部查询 - * @param iobuf 要分析的iobuf - * @param[out] maps copyset信息的列表,每个copyset的信息都是一个map - * @param saveIobufStr 是否要把iobuf里的详细内容存下来 - * - */ + * @brief Analyze the replication group information for the specified + * groupId from iobuf, Each replication group's information is placed in a + * map + * + * @param gIds: The groupId of the replication group to be queried. If it is + * empty, all queries will be performed + * @param iobuf The iobuf to analyze + * @param[out] maps A list of copyset information, where each copyset's + * information is a map + * @param saveIobufStr Do you want to save the detailed content in iobuf + * + */ void ParseResponseAttachment(const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr = false); + butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, + bool saveIobufStr = false); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * @param chunkserverAddr chunkserver的地址,两者指定一个就好 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * @param chunkserverAddr chunkserver address, just specify one of the two + * + * @return Health returns 0, unhealthy returns -1 + */ int CheckCopysetsOnChunkServer(const ChunkServerIdType& chunkserverId, const std::string& chunkserverAddr); /** - * @brief check copysets' healthy status on chunkserver - * - * @param[in] chunkserAddr: chunkserver address - * @param[in] groupIds: groupId for check, default is null, check all the copysets - * @param[in] queryLeader: whether send rpc to chunkserver which copyset leader on. - * All the chunkserves will be check when check clusters status. - * @param[in] record: raft state rpc response from chunkserver - * @param[in] queryCs: whether send rpc to chunkserver - * - * @return error code - */ + * @brief check copysets' healthy status on chunkserver + * + * @param[in] chunkserAddr: chunkserver address + * @param[in] groupIds: groupId for check, default is null, check all the + * copysets + * @param[in] queryLeader: whether send rpc to chunkserver which copyset + * leader on. All the chunkserves will be check when check clusters status. + * @param[in] record: raft state rpc response from chunkserver + * @param[in] queryCs: whether send rpc to chunkserver + * + * @return error code + */ ChunkServerHealthStatus CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader = true, - std::pair *record = nullptr, - bool queryCs = true); + const std::string& chunkserverAddr, + const std::set& groupIds, bool queryLeader = true, + std::pair* record = nullptr, bool queryCs = true); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param serverIp server的ip,serverId或serverIp指定一个就好 - * @param queryLeader 是否向leader所在的server发送RPC查询, - * 对于检查cluster来说,所有server都会遍历到,不用查询 - * - * @return 健康返回0,不健康返回-1 - */ - int CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, - bool queryLeader = true, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param serverIp Just specify one of the server's IP, serverId, or + * serverIp + * @param queryLeader Does the send RPC queries to the server where the + * leader is located, For checking the cluster, all servers will be + * traversed without querying + * + * @return Health returns 0, unhealthy returns -1 + */ + int CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, + bool queryLeader = true, + std::vector* unhealthyChunkServers = nullptr); /** * @brief concurrent check copyset on server @@ -326,126 +338,130 @@ class CopysetCheckCore { * @param[in] result: rpc response from chunkserver */ void ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, - std::map> *result); + const std::vector& chunkservers, uint32_t* index, + std::map>* result); /** - * @brief 根据leader的map里面的copyset信息分析出copyset是否健康,健康返回0,否则 - * 否则返回错误码 - * - * @param map leader的copyset信息,以键值对的方式存储 - * - * @return 返回错误码 - */ + * @brief: Analyze whether the copyset is healthy based on the copyset + * information in the leader's map, and return 0 if it is healthy. Otherwise + * Otherwise, an error code will be returned + * + * @param map The copyset information of the leader is stored as key value + * pairs + * + * @return returns an error code + */ CheckResult CheckHealthOnLeader(std::map* map); /** - * @brief 向chunkserver发起raft state rpc - * - * @param chunkserverAddr chunkserver的地址 - * @param[out] iobuf 返回的responseattachment,返回0的时候有效 - * - * @return 成功返回0,失败返回-1 - */ + * @brief Initiate raft state rpc to chunkserver + * + * @param chunkserverAddr Address of chunkserver + * @param[out] iobuf The responseattachment returned by is valid when 0 is + * returned + * + * @return returns 0 for success, -1 for failure + */ int QueryChunkServer(const std::string& chunkserverAddr, butil::IOBuf* iobuf); /** - * @brief 把chunkserver上所有的copyset更新到peerNotOnline里面 - * - * @param csAddr chunkserver的地址 - * - * @return 无 - */ + * @brief: Update all copysets on chunkserver to peerNotOnline + * + * @param csAddr chunkserver Address of + * + * @return None + */ void UpdatePeerNotOnlineCopysets(const std::string& csAddr); /** - * @brief 以mds中的copyset配置组为参照,检查chunkserver是否在copyset的配置组中 - * - * @param csAddr chunkserver的地址 - * @param copysets copyset列表 - * @param[out] result 检查结果,copyset到存在与否的映射 - * - * @return 包含返回true,否则返回false - */ + * @brief: Using the copyset configuration group in mds as a reference, + * check if chunkserver is in the copyset configuration group + * + * @param csAddr Address of chunkserver + * @param copysets copyset list + * @param[out] result check result, copyset mapping to presence or absence + * + * @return returns true, otherwise returns false + */ int CheckIfChunkServerInCopysets(const std::string& csAddr, const std::set copysets, std::map* result); /** - * @brief 检查没有leader的copyset是否健康 - * - * @param csAddr chunkserver 地址 - * @param copysetsPeers copyset的groupId到peers的映射 - * - * @return 健康返回true,不健康返回false - */ - bool CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers); + * @brief Check if the copyset without a leader is healthy + * + * @param csAddr chunkserver address + * @param copysetsPeers copyset's groupId to Peers mapping + * + * @return returns true if healthy, false if unhealthy + */ + bool CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers); /** - * @brief 清空统计信息 - * - * @return 无 - */ + * @brief Clear Statistics + * + * @return None + */ void Clear(); /** - * @brief 获取chunkserver上的copyset的在线状态 - * - * @param csAddr chunkserver地址 - * @param groupId copyset的groupId - * - * @return 在线返回true - */ + * @brief: Obtain the online status of the copyset on chunkserver + * + * @param csAddr chunkserver address + * @param groupId copyset's groupId + * + * @return returns true online + */ bool CheckCopySetOnline(const std::string& csAddr, const std::string& groupId); /** - * @brief 获取不在线的peer的数量 - * - * - * @param peers 副本peer的列表ip:port:id的形式 - * - * @return 返回错误码 - */ + * @brief: Obtain the number of offline peers + * + * + * @param peers The list of replica peers in the form of ip:port:id + * + * @return returns an error code + */ CheckResult CheckPeerOnlineStatus(const std::string& groupId, const std::vector& peers); /** - * @brief 更新chunkserver上的copyset的groupId列表 - * - * @param csAddr chunkserver地址 - * @param copysetInfos copyset信息列表 - */ + * @brief Update the groupId list of copyset on chunkserver + * + * @param csAddr chunkserver address + * @param copysetInfos copyset information list + */ void UpdateChunkServerCopysets(const std::string& csAddr, - const CopySetInfosType& copysetInfos); + const CopySetInfosType& copysetInfos); int CheckCopysetsWithMds(); int CheckScanStatus(const std::vector& copysetInfos); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; // for unittest mock csClient std::shared_ptr csClient_; - // 保存copyset的信息 + // Save information for copyset std::map> copysets_; - // 用来保存发送RPC失败的那些chunkserver + // Used to save the chunkservers that failed to send RPC std::set serviceExceptionChunkServers_; - // 用来保存一些copyset加载有问题的chunkserver + // Used to save some copysets and load problematic chunkservers std::set copysetLoacExceptionChunkServers_; - // 用来存放访问过的chunkserver上的copyset列表,避免重复RPC + // Used to store the copyset list on accessed chunkservers to avoid + // duplicate RPCs std::map> chunkserverCopysets_; - // 查询单个copyset的时候,保存复制组的详细信息 + // When querying a single copyset, save the detailed information of the + // replication group std::string copysetsDetail_; const std::string kEmptyAddr = "0.0.0.0:0:0"; diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 0dc5dcf46e..60bb516b86 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -20,23 +20,21 @@ * Author: wudemiao */ -#include #include "src/tools/curve_cli.h" + +#include + #include "src/tools/common.h" -DEFINE_int32(timeout_ms, - -1, "Timeout (in milliseconds) of the operation"); -DEFINE_int32(max_retry, - 3, "Max retry times of each operation"); -DEFINE_string(conf, - "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", +DEFINE_int32(timeout_ms, -1, "Timeout (in milliseconds) of the operation"); +DEFINE_int32(max_retry, 3, "Max retry times of each operation"); +DEFINE_string(conf, "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", "Initial configuration of the replication group"); -DEFINE_string(peer, - "", "Id of the operating peer"); -DEFINE_string(new_conf, - "", "new conf to reset peer"); -DEFINE_bool(remove_copyset, false, "Whether need to remove broken copyset " - "after remove peer (default: false)"); +DEFINE_string(peer, "", "Id of the operating peer"); +DEFINE_string(new_conf, "", "new conf to reset peer"); +DEFINE_bool(remove_copyset, false, + "Whether need to remove broken copyset " + "after remove peer (default: false)"); DEFINE_bool(affirm, true, "If true, command line interactive affirmation is required." @@ -45,26 +43,22 @@ DECLARE_string(mdsAddr); namespace curve { namespace tool { -#define CHECK_FLAG(flagname) \ - do { \ - if ((FLAGS_ ## flagname).empty()) { \ - std::cout << __FUNCTION__ << " requires --" # flagname \ - << std::endl; \ - return -1; \ - } \ - } while (0); \ - +#define CHECK_FLAG(flagname) \ + do { \ + if ((FLAGS_##flagname).empty()) { \ + std::cout << __FUNCTION__ << " requires --" #flagname \ + << std::endl; \ + return -1; \ + } \ + } while (0); bool CurveCli::SupportCommand(const std::string& command) { - return (command == kResetPeerCmd || command == kRemovePeerCmd - || command == kTransferLeaderCmd - || command == kDoSnapshot - || command == kDoSnapshotAll); + return (command == kResetPeerCmd || command == kRemovePeerCmd || + command == kTransferLeaderCmd || command == kDoSnapshot || + command == kDoSnapshotAll); } -int CurveCli::Init() { - return mdsClient_->Init(FLAGS_mdsAddr); -} +int CurveCli::Init() { return mdsClient_->Init(FLAGS_mdsAddr); } butil::Status CurveCli::DeleteBrokenCopyset(braft::PeerId peerId, const LogicPoolID& poolId, @@ -121,13 +115,13 @@ int CurveCli::RemovePeer() { } // STEP 1: remove peer - butil::Status status = curve::chunkserver::RemovePeer( - poolId, copysetId, conf, peer, opt); + butil::Status status = + curve::chunkserver::RemovePeer(poolId, copysetId, conf, peer, opt); auto succ = status.ok(); - std::cout << "Remove peer " << peerId << " for copyset(" - << poolId << ", " << copysetId << ") " - << (succ ? "success" : "fail") << ", original conf: " << conf - << ", status: " << status << std::endl; + std::cout << "Remove peer " << peerId << " for copyset(" << poolId << ", " + << copysetId << ") " << (succ ? "success" : "fail") + << ", original conf: " << conf << ", status: " << status + << std::endl; if (!succ || !FLAGS_remove_copyset) { return succ ? 0 : -1; @@ -138,8 +132,8 @@ int CurveCli::RemovePeer() { succ = status.ok(); std::cout << "Delete copyset(" << poolId << ", " << copysetId << ")" << " in " << peerId << (succ ? "success" : "fail") - << ", original conf: " << conf - << ", status: " << status << std::endl; + << ", original conf: " << conf << ", status: " << status + << std::endl; return succ ? 0 : -1; } @@ -164,25 +158,19 @@ int CurveCli::TransferLeader() { opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::TransferLeader( - FLAGS_logicalPoolId, - FLAGS_copysetId, - conf, - targetPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, conf, targetPeer, opt); if (!st.ok()) { std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " fail, original conf: " << conf + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" + << " to " << targetPeerId << " fail, original conf: " << conf << ", detail: " << st << std::endl; return -1; } std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " success, original conf: " << conf << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << targetPeerId << " success, original conf: " << conf + << std::endl; return 0; } @@ -217,13 +205,14 @@ int CurveCli::ResetPeer() { } curve::common::Peer requestPeer; requestPeer.set_address(requestPeerId.to_string()); - // 目前reset peer只支持reset为1一个副本,不支持增加副本, - // 因为不能通过工具在chunkserver上创建copyset + // Currently, reset peer only supports resetting to 1 replica and does not + // support adding replicas, Because it is not possible to create a copyset + // on chunkserver through tools if (newConf.size() != 1) { std::cout << "New conf can only specify one peer!" << std::endl; return -1; } - // 新的配置必须包含发送RPC的peer + // The new configuration must include a peer that sends RPC if (*newConf.begin() != requestPeerId) { std::cout << "New conf must include the target peer!" << std::endl; return -1; @@ -233,25 +222,20 @@ int CurveCli::ResetPeer() { opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::ResetPeer( - FLAGS_logicalPoolId, - FLAGS_copysetId, - newConf, - requestPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, newConf, requestPeer, opt); if (!st.ok()) { std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " to " << newConf << " fail, requestPeer: " << requestPeerId << ", detail: " << st << std::endl; return -1; } std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << newConf - << " success, requestPeer: " << requestPeerId << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << newConf << " success, requestPeer: " << requestPeerId + << std::endl; return 0; } @@ -274,15 +258,12 @@ int CurveCli::DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - butil::Status st = curve::chunkserver::Snapshot( - FLAGS_logicalPoolId, - FLAGS_copysetId, - peer, - opt); + butil::Status st = curve::chunkserver::Snapshot(FLAGS_logicalPoolId, + FLAGS_copysetId, peer, opt); if (!st.ok()) { std::cout << "Do snapshot of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " fail, requestPeer: " << peer.address() << ", detail: " << st << std::endl; return -1; @@ -301,8 +282,8 @@ int CurveCli::DoSnapshotAll() { braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - std::string csAddr = chunkserver.hostip() + ":" + - std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); curve::common::Peer peer; peer.set_address(csAddr); butil::Status st = curve::chunkserver::SnapshotAll(peer, opt); @@ -315,17 +296,27 @@ int CurveCli::DoSnapshotAll() { return res; } -void CurveCli::PrintHelp(const std::string &cmd) { +void CurveCli::PrintHelp(const std::string& cmd) { std::cout << "Example " << std::endl; if (cmd == kResetPeerCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" + << std::endl; // NOLINT } else if (cmd == kRemovePeerCmd || cmd == kTransferLeaderCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 -max_retry=3 -timeout_ms=100 -remove_copyset=true/false" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 " + "-max_retry=3 -timeout_ms=100 -remove_copyset=true/false" + << std::endl; // NOLINT } else if (cmd == kDoSnapshot) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-max_retry=3 -timeout_ms=100" << std::endl; + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-max_retry=3 -timeout_ms=100" + << std::endl; } else if (cmd == kDoSnapshotAll) { std::cout << "curve_ops_tool " << cmd << std::endl; } else { @@ -333,7 +324,7 @@ void CurveCli::PrintHelp(const std::string &cmd) { } } -int CurveCli::RunCommand(const std::string &cmd) { +int CurveCli::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init CurveCli tool failed" << std::endl; return -1; @@ -358,4 +349,3 @@ int CurveCli::RunCommand(const std::string &cmd) { } } // namespace tool } // namespace curve - diff --git a/src/tools/curve_cli.h b/src/tools/curve_cli.h index 24a4944cee..7267262893 100644 --- a/src/tools/curve_cli.h +++ b/src/tools/curve_cli.h @@ -23,64 +23,65 @@ #ifndef SRC_TOOLS_CURVE_CLI_H_ #define SRC_TOOLS_CURVE_CLI_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" #include "src/tools/mds_client.h" -#include "proto/copyset.pb.h" namespace curve { namespace tool { -using ::curve::chunkserver::LogicPoolID; using ::curve::chunkserver::CopysetID; using ::curve::chunkserver::CopysetRequest; using ::curve::chunkserver::CopysetResponse; using ::curve::chunkserver::CopysetService_Stub; +using ::curve::chunkserver::LogicPoolID; +using ::curve::chunkserver::COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS; -using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT class CurveCli : public CurveTool { public: - explicit CurveCli(std::shared_ptr mdsClient) : - mdsClient_(mdsClient) {} + explicit CurveCli(std::shared_ptr mdsClient) + : mdsClient_(mdsClient) {} /** - * @brief 初始化mds client - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @return returns 0 for success, -1 for failure */ int Init(); /** - * @brief 打印help信息 - * @param 无 - * @return 无 + * @brief Print help information + * @param None + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); @@ -97,47 +98,48 @@ class CurveCli : public CurveTool { const CopysetID& copysetId); /** - * @brief 删除peer - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief delete peer + * @param None + * @return returns 0 for success, -1 for failure */ int RemovePeer(); /** - * @brief 转移leader - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief transfer leader + * @param None + * @return returns 0 for success, -1 for failure */ int TransferLeader(); /** - * @brief 触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshot(); /** - * @brief 触发打快照 - * @param lgPoolId 逻辑池id - * @param copysetId 复制组id - * @param peer 复制组成员 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param lgPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param peer replication group members + * @return returns 0 for success, -1 for failure */ int DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer); /** - * @brief 给集群中全部copyset node触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Trigger a snapshot of all copyset nodes in the cluster + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshotAll(); /** - * @brief 重置配置组成员,目前只支持reset成一个成员 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Reset configuration group members, currently only supports + * resetting to one member + * @param None + * @return returns 0 for success, -1 for failure */ int ResetPeer(); diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 08aa1f62ed..d5f30d9b7b 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -20,45 +20,41 @@ * Author: tongguangxun */ -#include +#include #include +#include #include -#include - -#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include +#include // NOLINT #include -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" -#include "src/common/crc32.h" +#include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/bitmap.h" +#include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "src/common/fast_align.h" - -#include "include/chunkserver/chunkserver_common.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using ::curve::common::align_up; using ::curve::common::is_aligned; /** - * chunkfile pool预分配工具,提供两种分配方式 - * 1. 以磁盘空间百分比方式,指定需要分配的百分比 - * 2. 指定以chunk数量分配 - * 默认的分配方式是以磁盘空间百分比作为分配方式,可以通过-allocateByPercent=false/true - * 调整分配方式。 + * chunkfile pool pre allocation tool, providing two allocation methods + * 1. Specify the percentage to be allocated as a percentage of disk space + * 2. Specify allocation by chunk quantity + * The default allocation method is based on the percentage of disk space, which + * can be achieved by -allocateByPercent=false/true Adjust the allocation + * method. */ -DEFINE_bool(allocateByPercent, - true, +DEFINE_bool(allocateByPercent, true, "allocate filePool by percent of disk size or by chunk num!"); -DEFINE_uint32(fileSize, - 16 * 1024 * 1024, - "chunk size"); +DEFINE_uint32(fileSize, 16 * 1024 * 1024, "chunk size"); DEFINE_uint32(blockSize, 4096, "minimum io alignment supported"); @@ -69,41 +65,34 @@ static bool ValidateBlockSize(const char* /*name*/, uint32_t blockSize) { DEFINE_validator(blockSize, &ValidateBlockSize); -DEFINE_string(fileSystemPath, - "./", - "chunkserver disk path"); +DEFINE_string(fileSystemPath, "./", "chunkserver disk path"); -DEFINE_string(filePoolDir, - "./filePool/", - "chunkfile pool dir"); +DEFINE_string(filePoolDir, "./filePool/", "chunkfile pool dir"); -DEFINE_string(filePoolMetaPath, - "./filePool.meta", +DEFINE_string(filePoolMetaPath, "./filePool.meta", "chunkfile pool meta info file path."); -// preallocateNum仅在测试的时候使用,测试提前预分配固定数量的chunk -// 当设置这个值的时候可以不用设置allocatepercent -DEFINE_uint32(preAllocateNum, - 0, +// preallocateNum is only used during testing, and a fixed number of chunks are +// pre allocated in advance during testing When setting this value, there is no +// need to set allocatepercent +DEFINE_uint32(preAllocateNum, 0, "preallocate chunk nums, this is JUST for curve test"); -// 在系统初始化的时候,管理员需要预先格式化磁盘,并进行预分配 -// 这时候只需要指定allocatepercent,allocatepercent是占整个盘的空间的百分比 -DEFINE_uint32(allocatePercent, - 80, - "preallocate storage percent of total disk"); +// During system initialization, the administrator needs to pre format the disk +// and pre allocate it At this point, only allocate percentage needs to be +// specified, which is the percentage of the entire disk space occupied by +// allocate percentage +DEFINE_uint32(allocatePercent, 80, "preallocate storage percent of total disk"); -// 测试情况下置为false,加快测试速度 -DEFINE_bool(needWriteZero, - true, - "not write zero for test."); +// Set to false during testing to accelerate testing speed +DEFINE_bool(needWriteZero, true, "not write zero for test."); -using curve::fs::FileSystemType; -using curve::fs::LocalFsFactory; +using curve::chunkserver::FilePoolMeta; +using curve::common::kFilePoolMagic; using curve::fs::FileSystemInfo; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::common::kFilePoolMagic; -using curve::chunkserver::FilePoolMeta; +using curve::fs::LocalFsFactory; class CompareInternal { public: @@ -128,7 +117,7 @@ struct AllocateStruct { static int AllocateFiles(AllocateStruct* allocatestruct) { const size_t actualFileSize = allocatestruct->actualFileSize; - char* data = new(std::nothrow)char[actualFileSize]; + char* data = new (std::nothrow) char[actualFileSize]; memset(data, 0, actualFileSize); uint64_t count = 0; @@ -137,14 +126,13 @@ static int AllocateFiles(AllocateStruct* allocatestruct) { { std::unique_lock lk(*allocatestruct->mtx); allocatestruct->allocateChunknum->fetch_add(1); - filename = std::to_string( - allocatestruct->allocateChunknum->load()); + filename = std::to_string(allocatestruct->allocateChunknum->load()); } - std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" - + filename + allocatestruct->cleanChunkSuffix; + std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" + filename + + allocatestruct->cleanChunkSuffix; - int ret = allocatestruct->fsptr->Open(tmpchunkfilepath, - O_RDWR | O_CREAT); + int ret = + allocatestruct->fsptr->Open(tmpchunkfilepath, O_RDWR | O_CREAT); if (ret < 0) { *allocatestruct->checkwrong = true; LOG(ERROR) << "file open failed, " << tmpchunkfilepath; @@ -205,12 +193,12 @@ static bool CanBitmapFitInMetaPage() { constexpr size_t kMaximumBitmapBytes = 1024; auto bitmapBytes = - FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; + FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; LOG(INFO) << "bitmap bytes is " << bitmapBytes; return bitmapBytes <= kMaximumBitmapBytes; } -// TODO(tongguangxun) :添加单元测试 +// TODO(tongguangxun): Adding unit tests int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); @@ -247,7 +235,9 @@ int main(int argc, char** argv) { } tmpChunkSet_.insert(tmpvec.begin(), tmpvec.end()); - uint64_t size = tmpChunkSet_.size() ? atoi((*(--tmpChunkSet_.end())).c_str()) : 0; // NOLINT + uint64_t size = tmpChunkSet_.size() + ? atoi((*(--tmpChunkSet_.end())).c_str()) + : 0; // NOLINT allocateChunknum_.store(size + 1); FileSystemInfo finfo; @@ -278,7 +268,7 @@ int main(int argc, char** argv) { bool checkwrong = false; // two threads concurrent, can reach the bandwidth of disk. - uint64_t threadAllocateNum = preAllocateChunkNum/2; + uint64_t threadAllocateNum = preAllocateChunkNum / 2; std::vector thvec; AllocateStruct allocateStruct; allocateStruct.fsptr = fsptr; @@ -316,7 +306,7 @@ int main(int argc, char** argv) { return -1; } - // 读取meta文件,检查是否写入正确 + // Read the meta file and check if it is written correctly FilePoolMeta recordMeta; ret = curve::chunkserver::FilePoolHelper::DecodeMetaInfoFromMetaFile( fsptr, FLAGS_filePoolMetaPath, 4096, &recordMeta); @@ -345,8 +335,8 @@ int main(int argc, char** argv) { if (recordMeta.filePoolPath != FLAGS_filePoolDir) { LOG(ERROR) << "meta info persistency failed!" - << ", read chunkpath = " << recordMeta.filePoolPath - << ", real chunkpath = " << FLAGS_filePoolDir; + << ", read chunkpath = " << recordMeta.filePoolPath + << ", real chunkpath = " << FLAGS_filePoolDir; break; } diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index 5d9da78ec0..6a4bd0af6f 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -40,8 +40,7 @@ std::ostream& operator<<(std::ostream& os, const vector& ranges) { } uint64_t startOff = ranges[i].beginIndex * FLAGS_pageSize; uint64_t endOff = (ranges[i].endIndex + 1) * FLAGS_pageSize; - os << "[" << startOff << "," - << endOff << ")"; + os << "[" << startOff << "," << endOff << ")"; } return os; } @@ -105,26 +104,24 @@ int CurveMetaTool::RunCommand(const std::string& cmd) { } } - - int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { - // 打开chunk文件 - int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY|O_NOATIME); + // Open chunk file + int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << chunkFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << chunkFileName << ", " << berror() + << std::endl; return -1; } - // 读取chunk头部 + // Read chunk header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << chunkFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << chunkFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << ", " << berror() << std::endl; @@ -138,29 +135,29 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { - // 打开快照文件 - int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY|O_NOATIME); + // Open snapshot file + int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << snapFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << snapFileName << ", " << berror() + << std::endl; return -1; } - // 读取快照文件头部 + // Read snapshot file header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << snapFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << snapFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << std::endl; @@ -174,7 +171,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } diff --git a/src/tools/curve_meta_tool.h b/src/tools/curve_meta_tool.h index fe2b040c58..2125679022 100644 --- a/src/tools/curve_meta_tool.h +++ b/src/tools/curve_meta_tool.h @@ -24,24 +24,26 @@ #define SRC_TOOLS_CURVE_META_TOOL_H_ #include + #include #include #include #include + +#include "src/chunkserver/datastore/chunkserver_chunkfile.h" #include "src/common/bitmap.h" #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -#include "src/chunkserver/datastore/chunkserver_chunkfile.h" namespace curve { namespace tool { -using curve::common::BitRange; -using curve::fs::LocalFileSystem; using curve::chunkserver::ChunkFileMetaPage; -using curve::chunkserver::SnapshotMetaPage; using curve::chunkserver::CSErrorCode; +using curve::chunkserver::SnapshotMetaPage; +using curve::common::BitRange; +using curve::fs::LocalFileSystem; std::ostream& operator<<(std::ostream& os, const vector& ranges); std::ostream& operator<<(std::ostream& os, const ChunkFileMetaPage& metaPage); @@ -49,40 +51,40 @@ std::ostream& operator<<(std::ostream& os, const SnapshotMetaPage& metaPage); class CurveMetaTool : public CurveTool { public: - explicit CurveMetaTool(std::shared_ptr localFs) : - localFS_(localFs) {} + explicit CurveMetaTool(std::shared_ptr localFs) + : localFS_(localFs) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印chunk文件元数据 - * @param chunkFileName chunk文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print chunk file metadata + * @param chunkFileName The file name of the chunk file + * @return successfully returns 0, otherwise returns -1 */ int PrintChunkMeta(const std::string& chunkFileName); /** - * @brief 打印快照文件元数据 - * @param snapFileName 快照文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print snapshot file metadata + * @param snapFileName The file name of the snapshot file + * @return successfully returns 0, otherwise returns -1 */ int PrintSnapshotMeta(const std::string& snapFileName); diff --git a/src/tools/curve_tool_define.h b/src/tools/curve_tool_define.h index 8800bf847c..e2261550d5 100644 --- a/src/tools/curve_tool_define.h +++ b/src/tools/curve_tool_define.h @@ -24,6 +24,7 @@ #define SRC_TOOLS_CURVE_TOOL_DEFINE_H_ #include + #include DECLARE_string(mdsAddr); @@ -40,10 +41,10 @@ DECLARE_string(password); namespace curve { namespace tool { -// 显示版本命令 +// Display Version Command const char kVersionCmd[] = "version"; -// StatusTool相关命令 +// StatusTool related commands const char kStatusCmd[] = "status"; const char kSpaceCmd[] = "space"; const char kChunkserverStatusCmd[] = "chunkserver-status"; @@ -59,7 +60,7 @@ const char kClusterStatusCmd[] = "cluster-status"; const char kScanStatusCmd[] = "scan-status"; const char kFormatStatusCmd[] = "format-status"; -// NameSpaceTool相关命令 +// NameSpaceTool related commands const char kGetCmd[] = "get"; const char kListCmd[] = "list"; const char kSegInfoCmd[] = "seginfo"; @@ -71,7 +72,7 @@ const char kChunkLocatitonCmd[] = "chunk-location"; const char kUpdateThrottle[] = "update-throttle"; const char kListPoolsets[] = "list-poolsets"; -// CopysetCheck相关命令 +// CopysetCheck related commands const char kCheckCopysetCmd[] = "check-copyset"; const char kCheckChunnkServerCmd[] = "check-chunkserver"; const char kCheckServerCmd[] = "check-server"; @@ -79,13 +80,13 @@ const char kCopysetsStatusCmd[] = "copysets-status"; const char kCheckOperatorCmd[] = "check-operator"; const char kListMayBrokenVolumes[] = "list-may-broken-vol"; -// CopysetTool相关命令 +// CopysetTool related commands const char kSetCopysetAvailFlag[] = "set-copyset-availflag"; -// 一致性检查命令 +// Consistency check command const char kCheckConsistencyCmd[] = "check-consistency"; -// 配置变更命令 +// Configuration change command const char kRemovePeerCmd[] = "remove-peer"; const char kTransferLeaderCmd[] = "transfer-leader"; const char kResetPeerCmd[] = "reset-peer"; @@ -96,18 +97,18 @@ const char kDoSnapshotAll[] = "do-snapshot-all"; const char kRapidLeaderSchedule[] = "rapid-leader-schedule"; const char kSetScanState[] = "set-scan-state"; -// curve文件meta相关的命令 +// Meta related commands for curve files const char kChunkMeta[] = "chunk-meta"; const char kSnapshotMeta[] = "snapshot-meta"; -// raft log相关命令 +// raft log related commands const char kRaftLogMeta[] = "raft-log-meta"; const char kOffline[] = "offline"; const char kVars[] = "/vars/"; const char kConfValue[] = "conf_value"; -// raft state 相关常量 +// raft state related constants const char kState[] = "state"; const char kStateLeader[] = "LEADER"; const char kStateFollower[] = "FOLLOWER"; diff --git a/src/tools/curve_tool_factory.h b/src/tools/curve_tool_factory.h index dc48778713..a863bce5fb 100644 --- a/src/tools/curve_tool_factory.h +++ b/src/tools/curve_tool_factory.h @@ -23,18 +23,18 @@ #ifndef SRC_TOOLS_CURVE_TOOL_FACTORY_H_ #define SRC_TOOLS_CURVE_TOOL_FACTORY_H_ -#include #include #include +#include -#include "src/tools/curve_tool.h" -#include "src/tools/status_tool.h" -#include "src/tools/namespace_tool.h" #include "src/tools/consistency_check.h" -#include "src/tools/curve_cli.h" #include "src/tools/copyset_check.h" -#include "src/tools/schedule_tool.h" #include "src/tools/copyset_tool.h" +#include "src/tools/curve_cli.h" +#include "src/tools/curve_tool.h" +#include "src/tools/namespace_tool.h" +#include "src/tools/schedule_tool.h" +#include "src/tools/status_tool.h" namespace curve { namespace tool { @@ -42,41 +42,41 @@ namespace tool { class CurveToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateCurveTool( - const std::string& command); + const std::string& command); private: /** - * @brief 获取StatusTool实例 + * @brief Get StatusTool instance */ static std::shared_ptr GenerateStatusTool(); /** - * @brief 获取NameSpaceTool实例 + * @brief Get NameSpaceTool instance */ static std::shared_ptr GenerateNameSpaceTool(); /** - * @brief 获取ConsistencyCheck实例 + * @brief Get ConsistencyCheck instance */ static std::shared_ptr GenerateConsistencyCheck(); /** - * @brief 获取CurveCli实例 + * @brief Get CurveCli instance */ static std::shared_ptr GenerateCurveCli(); /** - * @brief 获取CopysetCheck实例 + * @brief Get CopysetCheck instance */ static std::shared_ptr GenerateCopysetCheck(); /** - * @brief 获取ScheduleTool实例 + * @brief to obtain a ScheduleTool instance */ static std::shared_ptr GenerateScheduleTool(); diff --git a/src/tools/curve_tool_main.cpp b/src/tools/curve_tool_main.cpp index 8e516dc0e7..5f57f718c1 100644 --- a/src/tools/curve_tool_main.cpp +++ b/src/tools/curve_tool_main.cpp @@ -21,12 +21,16 @@ */ #include + #include "src/common/curve_version.h" #include "src/tools/curve_tool_factory.h" -static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" +static const char* + kHelpStr = + "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "COMMANDS:\n" - "space : show curve all disk type space, include total space and used space\n" //NOLINT + "space : show curve all disk type space, include total space and used " + "space\n" // NOLINT "status : show the total status of the cluster\n" "chunkserver-status : show the chunkserver online status\n" "mds-status : show the mds status\n" @@ -35,22 +39,26 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "etcd-status : show the etcd status\n" "snapshot-clone-status : show the snapshot clone server status\n" "copysets-status : check the health state of all copysets\n" - "chunkserver-list : show curve chunkserver-list, list all chunkserver information\n" //NOLINT + "chunkserver-list : show curve chunkserver-list, list all chunkserver " + "information\n" // NOLINT "server-list : list all server information\n" "logical-pool-list : list all logical pool information\n" "cluster-status : show cluster status\n" "get : show the file info and the actual space of file\n" "list : list the file info of files in the directory\n" "seginfo : list the segments info of the file\n" - "delete : delete the file, to force delete, should specify the --forcedelete=true\n" //NOLINT + "delete : delete the file, to force delete, should specify the " + "--forcedelete=true\n" // NOLINT "clean-recycle : clean the RecycleBin\n" "create : create file, file length unit is GB\n" "extend : extend volume of file\n" - "chunk-location : query the location of the chunk corresponding to the offset\n" //NOLINT + "chunk-location : query the location of the chunk corresponding to the " + "offset\n" // NOLINT "check-consistency : check the consistency of three copies\n" "remove-peer : remove the peer from the copyset\n" - "transfer-leader : transfer the leader of the copyset to the peer\n" //NOLINT - "reset-peer : reset the configuration of copyset, only reset to one peer is supported\n" //NOLINT + "transfer-leader : transfer the leader of the copyset to the peer\n" // NOLINT + "reset-peer : reset the configuration of copyset, only reset to one " + "peer is supported\n" // NOLINT "do-snapshot : do snapshot of the peer of the copyset\n" "do-snapshot-all : do snapshot of all peers of all copysets\n" "check-chunkserver : check the health state of the chunkserver\n" @@ -60,11 +68,13 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "list-may-broken-vol: list all volumes on majority offline copysets\n" "set-copyset-availflag: set copysets available flags\n" "update-throttle: update file throttle params\n" - "rapid-leader-schedule: rapid leader schedule in cluster in logicalpool\n" //NOLINT + "rapid-leader-schedule: rapid leader schedule in cluster in " + "logicalpool\n" // NOLINT "set-scan-state: set scan state for specify logical pool\n" "scan-status: show scan status\n" "list-poolsets: list all poolsets in cluster\n\n" - "You can specify the config path by -confPath to avoid typing too many options\n"; //NOLINT + "You can specify the config path by -confPath to avoid typing too many " + "options\n"; // NOLINT DEFINE_bool(example, false, "print the example of usage"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); @@ -80,8 +90,10 @@ extern std::string rootUserPassword; } // namespace curve void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mdsAddr", &info) && info.is_default) { conf->GetStringValue("mdsAddr", &FLAGS_mdsAddr); @@ -98,27 +110,23 @@ void UpdateFlagsFromConf(curve::common::Configuration* conf) { if (GetCommandLineFlagInfo("rpcRetryTimes", &info) && info.is_default) { conf->GetUInt64Value("rpcRetryTimes", &FLAGS_rpcRetryTimes); } - if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && info.is_default) { conf->GetUInt64Value("rpcConcurrentNum", &FLAGS_rpcConcurrentNum); } - if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && info.is_default) { conf->GetStringValue("snapshotCloneAddr", &FLAGS_snapshotCloneAddr); } if (GetCommandLineFlagInfo("snapshotCloneDummyPort", &info) && - info.is_default) { + info.is_default) { conf->GetStringValue("snapshotCloneDummyPort", - &FLAGS_snapshotCloneDummyPort); + &FLAGS_snapshotCloneDummyPort); } - if (GetCommandLineFlagInfo("userName", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("userName", &info) && info.is_default) { conf->GetStringValue("rootUserName", &FLAGS_userName); } - if (GetCommandLineFlagInfo("password", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("password", &info) && info.is_default) { conf->GetStringValue("rootUserPassword", &FLAGS_password); } } @@ -168,7 +176,8 @@ int main(int argc, char** argv) { UpdateFlagsFromConf(&conf); - // 关掉健康检查,否则Not Connect to的时候重试没有意义 + // Turn off the health check, otherwise trying again when Not Connect to is + // meaningless brpc::FLAGS_health_check_interval = -1; auto curveTool = curve::tool::CurveToolFactory::GenerateCurveTool(command); if (!curveTool) { diff --git a/src/tools/etcd_client.h b/src/tools/etcd_client.h index b7d8f56964..5392a1c6b3 100644 --- a/src/tools/etcd_client.h +++ b/src/tools/etcd_client.h @@ -27,9 +27,9 @@ #include #include +#include #include #include -#include #include "src/common/string_util.h" #include "src/tools/version_tool.h" @@ -49,26 +49,29 @@ class EtcdClient { virtual ~EtcdClient() = default; /** - * @brief 初始化etcdAddrVec - * @param etcdAddr etcd的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize etcdAddrVec + * @param etcdAddr etcd addresses, supporting multiple addresses separated + * by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& etcdAddr); /** - * @brief 获取etcd集群的leader - * @param[out] leaderAddrVec etcd的leader的地址列表,返回值为0时有效 - * @param[out] onlineState etcd集群中每个节点的在线状态,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the leader of the ETCD cluster + * @param[out] leaderAddrVec The address list of the leader for etcd, valid + * when the return value is 0 + * @param[out] onlineState etcd The online state of each node in the + * cluster, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetEtcdClusterStatus(std::vector* leaderAddrVec, - std::map* onlineState); + std::map* onlineState); /** - * @brief 获取etcd的版本并检查版本一致性 - * @param[out] version 版本 - * @param[out] failedList 查询version失败的地址列表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of ETCD and check version consistency + * @param[out] version Version + * @param[out] failedList Query address list for version failure + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckEtcdVersion(std::string* version, std::vector* failedList); diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 4db8bb81f0..50c0eb448b 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -34,12 +34,11 @@ int MDSClient::Init(const std::string& mdsAddr) { return Init(mdsAddr, std::to_string(kDefaultMdsDummyPort)); } -int MDSClient::Init(const std::string& mdsAddr, - const std::string& dummyPort) { +int MDSClient::Init(const std::string& mdsAddr, const std::string& dummyPort) { if (isInited_) { return 0; } - // 初始化channel + // Initialize channel curve::common::SplitString(mdsAddr, ",", &mdsAddrVec_); if (mdsAddrVec_.empty()) { std::cout << "Split mds address fail!" << std::endl; @@ -57,7 +56,7 @@ int MDSClient::Init(const std::string& mdsAddr, std::cout << "Init channel to " << mdsAddr << "fail!" << std::endl; continue; } - // 寻找哪个mds存活 + // Looking for which mds survived curve::mds::topology::ListPhysicalPoolRequest request; curve::mds::topology::ListPhysicalPoolResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -83,7 +82,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < mdsAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -92,7 +91,8 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != mdsAddrVec_.size()) { std::cout << "mds dummy port list must be correspond as" - " mds addr list" << std::endl; + " mds addr list" + << std::endl; return -1; } @@ -109,8 +109,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { return 0; } -int MDSClient::GetFileInfo(const std::string &fileName, - FileInfo* fileInfo) { +int MDSClient::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { assert(fileInfo != nullptr); curve::mds::GetFileInfoRequest request; curve::mds::GetFileInfoResponse response; @@ -123,13 +122,12 @@ int MDSClient::GetFileInfo(const std::string &fileName, std::cout << "GetFileInfo info from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { fileInfo->CopyFrom(response.fileinfo()); return 0; } - std::cout << "GetFileInfo fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetFileInfo fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -150,19 +148,18 @@ int MDSClient::GetAllocatedSize(const std::string& fileName, *allocSize = response.allocatedsize(); if (allocMap) { for (auto it = response.allocsizemap().begin(); - it != response.allocsizemap().end(); ++it) { + it != response.allocsizemap().end(); ++it) { allocMap->emplace(it->first, it->second); } } return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetFileSize(const std::string& fileName, - uint64_t* fileSize) { +int MDSClient::GetFileSize(const std::string& fileName, uint64_t* fileSize) { assert(fileSize != nullptr); curve::mds::GetFileSizeRequest request; curve::mds::GetFileSizeResponse response; @@ -178,8 +175,8 @@ int MDSClient::GetFileSize(const std::string& fileName, *fileSize = response.filesize(); return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -200,8 +197,7 @@ int MDSClient::ListDir(const std::string& dirName, std::cout << "ListDir from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.fileinfo_size(); ++i) { files->emplace_back(response.fileinfo(i)); } @@ -213,8 +209,8 @@ int MDSClient::ListDir(const std::string& dirName, } GetSegmentRes MDSClient::GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment) { + uint64_t offset, + PageFileSegment* segment) { if (!segment) { std::cout << "The argument is a null pointer!" << std::endl; return GetSegmentRes::kOtherError; @@ -260,13 +256,13 @@ int MDSClient::DeleteFile(const std::string& fileName, bool forcedelete) { } if (response.has_statuscode() && - (response.statuscode() == StatusCode::kOK || - response.statuscode() == StatusCode::kFileNotExists || - response.statuscode() == StatusCode::kFileUnderDeleting)) { + (response.statuscode() == StatusCode::kOK || + response.statuscode() == StatusCode::kFileNotExists || + response.statuscode() == StatusCode::kFileUnderDeleting)) { return 0; } - std::cout << "DeleteFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "DeleteFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -294,8 +290,7 @@ int MDSClient::CreateFile(const CreateFileContext& context) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { return 0; } std::cout << "CreateFile fail with errCode: " @@ -316,19 +311,18 @@ int MDSClient::ExtendVolume(const std::string& fileName, uint64_t newSize) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { - std::cout << "extendFile success!" << std::endl; + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { + std::cout << "extendFile success!" << std::endl; return 0; } - std::cout << "extendFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "extendFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames) { + const std::vector& copysets, + std::vector* fileNames) { curve::mds::ListVolumesOnCopysetsRequest request; curve::mds::ListVolumesOnCopysetsResponse response; for (const auto& copyset : copysets) { @@ -343,8 +337,7 @@ int MDSClient::ListVolumesOnCopyset( return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.filenames_size(); ++i) { fileNames->emplace_back(response.filenames(i)); } @@ -373,31 +366,30 @@ int MDSClient::ListClient(std::vector* clientAddrs, return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.clientinfos_size(); ++i) { const auto& clientInfo = response.clientinfos(i); - std::string clientAddr = clientInfo.ip() + ":" + - std::to_string(clientInfo.port()); + std::string clientAddr = + clientInfo.ip() + ":" + std::to_string(clientInfo.port()); clientAddrs->emplace_back(clientAddr); } return 0; } - std::cout << "ListClient fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListClient fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { +int MDSClient::GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { if (!csLocs) { std::cout << "The argument is a null pointer!" << std::endl; return -1; } std::vector csServerInfos; - int res = GetChunkServerListInCopySets(logicalPoolId, - {copysetId}, &csServerInfos); + int res = GetChunkServerListInCopySets(logicalPoolId, {copysetId}, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return -1; @@ -409,9 +401,10 @@ int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, return 0; } -int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos) { +int MDSClient::GetChunkServerListInCopySets( + const PoolIdType& logicalPoolId, + const std::vector& copysetIds, + std::vector* csServerInfos) { if (!csServerInfos) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -424,7 +417,8 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServerListInCopySets; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetChunkServerListInCopySets; // NOLINT if (SendRpcToMds(&request, &response, &stub, fp) != 0) { std::cout << "GetChunkServerListInCopySets from all mds fail!" << std::endl; @@ -432,7 +426,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.csinfo_size(); ++i) { csServerInfos->emplace_back(response.csinfo(i)); } @@ -444,7 +438,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } int MDSClient::ListPhysicalPoolsInCluster( - std::vector* pools) { + std::vector* pools) { if (!pools) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -455,20 +449,19 @@ int MDSClient::ListPhysicalPoolsInCluster( auto fp = &curve::mds::topology::TopologyService_Stub::ListPhysicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPhysicalPool from all mds fail!" - << std::endl; + std::cout << "ListPhysicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.physicalpoolinfos_size(); ++i) { pools->emplace_back(response.physicalpoolinfos(i)); } return 0; } - std::cout << "ListPhysicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPhysicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -481,8 +474,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { } for (const auto& phyPool : phyPools) { std::vector lgPools; - ret = ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), - &lgPools); + ret = + ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), &lgPools); if (ret != 0) { std::cout << "ListLogicalPoolsInPhysicalPool " << phyPool.physicalpoolid() << " fail" << std::endl; @@ -493,8 +486,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { return 0; } -int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools) { +int MDSClient::ListLogicalPoolsInPhysicalPool( + const PoolIdType& id, std::vector* pools) { assert(pools != nullptr); curve::mds::topology::ListLogicalPoolRequest request; curve::mds::topology::ListLogicalPoolResponse response; @@ -503,20 +496,19 @@ int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListLogicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListLogicalPool from all mds fail!" - << std::endl; + std::cout << "ListLogicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.logicalpoolinfos_size(); ++i) { pools->emplace_back(response.logicalpoolinfos(i)); } return 0; } - std::cout << "ListLogicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListLogicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -530,20 +522,19 @@ int MDSClient::ListZoneInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListPoolZone; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPoolZone from all mds fail!" - << std::endl; + std::cout << "ListPoolZone from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.zones_size(); ++i) { zones->emplace_back(response.zones(i)); } return 0; } - std::cout << "ListPoolZone fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPoolZone fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -557,55 +548,54 @@ int MDSClient::ListServersInZone(const ZoneIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListZoneServer; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListZoneServer from all mds fail!" - << std::endl; + std::cout << "ListZoneServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.serverinfo_size(); ++i) { servers->emplace_back(response.serverinfo(i)); } return 0; } - std::cout << "ListZoneServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListZoneServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const ServerIdType& id, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_serverid(id); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const std::string& ip, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_ip(ip); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + ListChunkServerRequest* request, + std::vector* chunkservers) { curve::mds::topology::ListChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "ListChunkServer from all mds fail!" - << std::endl; + std::cout << "ListChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.chunkserverinfos_size(); ++i) { const auto& chunkserver = response.chunkserverinfos(i); - // 跳过retired状态的chunkserver + // Skipping chunkserver in Retired State if (chunkserver.status() == ChunkServerStatus::RETIRED) { continue; } @@ -613,9 +603,9 @@ int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, } return 0; } - std::cout << "ListChunkServer fail with errCode: " - << response.statuscode() << std::endl; - return -1; + std::cout << "ListChunkServer fail with errCode: " << response.statuscode() + << std::endl; + return -1; } int MDSClient::GetChunkServerInfo(const ChunkServerIdType& id, @@ -653,23 +643,22 @@ int MDSClient::GetChunkServerInfo(GetChunkServerInfoRequest* request, auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetChunkServer from all mds fail!" - << std::endl; + std::cout << "GetChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { chunkserver->CopyFrom(response.chunkserverinfo()); return 0; } - std::cout << "GetChunkServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetChunkServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -678,7 +667,7 @@ int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, } int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -697,7 +686,7 @@ int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, } int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, - bool availFlag) { + bool availFlag) { curve::mds::topology::SetCopysetsAvailFlagRequest request; curve::mds::topology::SetCopysetsAvailFlagResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -708,13 +697,12 @@ int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, request.set_availflag(availFlag); auto fp = &curve::mds::topology::TopologyService_Stub::SetCopysetsAvailFlag; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "SetCopysetsAvailFlag from all mds fail!" - << std::endl; + std::cout << "SetCopysetsAvailFlag from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { return 0; } std::cout << "SetCopysetsAvailFlag fail with errCode: " @@ -728,13 +716,12 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListUnAvailCopySets; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListUnAvailCopySets from all mds fail!" - << std::endl; + std::cout << "ListUnAvailCopySets from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.copysets_size(); ++i) { copysets->emplace_back(response.copysets(i)); } @@ -746,21 +733,21 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { } int MDSClient::GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets) { + GetCopySetsInChunkServerRequest* request, + std::vector* copysets) { curve::mds::topology::GetCopySetsInChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInChunkServer; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetCopySetsInChunkServer; // NOLINT if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInChunkServer from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -783,14 +770,13 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInCluster; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInCluster from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInCluster from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -800,9 +786,7 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, return -1; } - -int MDSClient::GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, +int MDSClient::GetCopyset(PoolIdType lpid, CopySetIdType copysetId, CopysetInfo* copysetInfo) { curve::mds::topology::GetCopysetRequest request; curve::mds::topology::GetCopysetResponse response; @@ -843,8 +827,8 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } for (const auto& zone : zones) { if (ListServersInZone(zone.zoneid(), servers) != 0) { - std::cout << "ListServersInZone fail, zoneId :" - << zone.zoneid() << std::endl; + std::cout << "ListServersInZone fail, zoneId :" << zone.zoneid() + << std::endl; return -1; } } @@ -853,7 +837,7 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } int MDSClient::ListChunkServersInCluster( - std::vector* chunkservers) { + std::vector* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -869,8 +853,8 @@ int MDSClient::ListChunkServersInCluster( return 0; } -int MDSClient::ListChunkServersInCluster(std::map>* chunkservers) { +int MDSClient::ListChunkServersInCluster( + std::map>* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -880,8 +864,8 @@ int MDSClient::ListChunkServersInCluster(std::map chunkserverList; - if (ListChunkServersOnServer(server.serverid(), - &chunkserverList) != 0) { + if (ListChunkServersOnServer(server.serverid(), &chunkserverList) != + 0) { std::cout << "ListChunkServersOnServer fail!" << std::endl; return -1; } @@ -889,7 +873,7 @@ int MDSClient::ListChunkServersInCluster(std::mapfind(server.physicalpoolid()); if (iter != chunkservers->end()) { iter->second.insert(iter->second.end(), chunkserverList.begin(), - chunkserverList.end()); + chunkserverList.end()); } else { chunkservers->emplace(server.physicalpoolid(), chunkserverList); } @@ -900,8 +884,8 @@ int MDSClient::ListChunkServersInCluster(std::map* onlineStatus) { assert(onlineStatus != nullptr); onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -943,7 +928,7 @@ int MDSClient::GetMetric(const std::string& metricName, std::string* value) { while (changeTimeLeft >= 0) { brpc::Controller cntl; MetricRet res = metricClient_.GetMetric(mdsAddrVec_[currentMdsIndex_], - metricName, value); + metricName, value); if (res == MetricRet::kOK) { return 0; } @@ -962,8 +947,7 @@ bool MDSClient::ChangeMDServer() { if (currentMdsIndex_ > static_cast(mdsAddrVec_.size() - 1)) { currentMdsIndex_ = 0; } - if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), - nullptr) != 0) { + if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), nullptr) != 0) { return false; } return true; @@ -971,14 +955,14 @@ bool MDSClient::ChangeMDServer() { std::vector MDSClient::GetCurrentMds() { std::vector leaderAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_.GetMetric(item.second, - kMdsStatusMetricName, &status); + MetricRet ret = + metricClient_.GetMetric(item.second, kMdsStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kMdsStatusLeader) { @@ -995,7 +979,8 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { request.set_logicalpoolid(lpoolId); - auto fp = &::curve::mds::schedule::ScheduleService_Stub::RapidLeaderSchedule; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + RapidLeaderSchedule; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "RapidLeaderSchedule fail" << std::endl; return -1; @@ -1006,7 +991,7 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { return 0; } std::cout << "RapidLeaderSchedule fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1027,8 +1012,8 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { auto retCode = response.statuscode(); if (retCode != ::curve::mds::topology::kTopoErrCodeSuccess) { - std::cout << "SetLogicalPoolScanState fail with retCode: " - << retCode << std::endl; + std::cout << "SetLogicalPoolScanState fail with retCode: " << retCode + << std::endl; return -1; } @@ -1037,7 +1022,7 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { int MDSClient::QueryChunkServerRecoverStatus( const std::vector& cs, - std::map *statusMap) { + std::map* statusMap) { assert(statusMap != nullptr); ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest request; ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse response; @@ -1047,7 +1032,8 @@ int MDSClient::QueryChunkServerRecoverStatus( request.add_chunkserverid(id); } - auto fp = &::curve::mds::schedule::ScheduleService_Stub::QueryChunkServerRecoverStatus; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + QueryChunkServerRecoverStatus; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "QueryChunkServerRecoverStatus fail" << std::endl; return -1; @@ -1056,13 +1042,13 @@ int MDSClient::QueryChunkServerRecoverStatus( if (response.statuscode() == ::curve::mds::schedule::kScheduleErrCodeSuccess) { for (auto it = response.recoverstatusmap().begin(); - it != response.recoverstatusmap().end(); ++it) { + it != response.recoverstatusmap().end(); ++it) { (*statusMap)[it->first] = it->second; } return 0; } std::cout << "QueryChunkServerRecoverStatus fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1095,21 +1081,22 @@ int MDSClient::UpdateFileThrottleParams( template int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)) { + void (T::*func)(google::protobuf::RpcController*, + const Request*, Response*, + google::protobuf::Closure*)) { int changeTimeLeft = mdsAddrVec_.size() - 1; while (changeTimeLeft >= 0) { brpc::Controller cntl; cntl.set_timeout_ms(FLAGS_rpcTimeout); (obp->*func)(&cntl, request, response, nullptr); if (!cntl.Failed()) { - // 如果成功了,就返回0,对response的判断放到上一层 + // If successful, return 0 and place the response judgment on the + // previous level return 0; } - bool needRetry = (cntl.ErrorCode() != EHOSTDOWN && - cntl.ErrorCode() != ETIMEDOUT && - cntl.ErrorCode() != brpc::ELOGOFF); + bool needRetry = + (cntl.ErrorCode() != EHOSTDOWN && cntl.ErrorCode() != ETIMEDOUT && + cntl.ErrorCode() != brpc::ELOGOFF); uint64_t retryTimes = 0; while (needRetry && retryTimes < FLAGS_rpcRetryTimes) { cntl.Reset(); @@ -1120,10 +1107,13 @@ int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, } return 0; } - // 对于需要重试的错误,重试次数用完了还没成功就返回错误不切换 - // ERPCTIMEDOUT比较特殊,这种情况下,mds可能切换了也可能没切换,所以 - // 需要重试并且重试次数用完后切换 - // 只有不需要重试的,也就是mds不在线的才会去切换mds + // For errors that require retries, if the retry limit is exhausted + // without success, return an error without switching. However, for + // ERPCTIMEDOUT, which is a special case, the MDS may have switched or + // may not have switched, so it needs to be retried, and if the retry + // limit is exhausted, then switch. Only for errors that do not require + // retries, meaning when the MDS is not online, will the MDS be + // switched. if (needRetry && cntl.ErrorCode() != brpc::ERPCTIMEDOUT) { std::cout << "Send RPC to mds fail, error content: " << cntl.ErrorText() << std::endl; diff --git a/src/tools/mds_client.h b/src/tools/mds_client.h index fbbc94ffab..46948a8a2a 100644 --- a/src/tools/mds_client.h +++ b/src/tools/mds_client.h @@ -23,29 +23,29 @@ #ifndef SRC_TOOLS_MDS_CLIENT_H_ #define SRC_TOOLS_MDS_CLIENT_H_ -#include #include +#include #include -#include #include -#include #include #include +#include #include +#include #include "proto/nameserver2.pb.h" -#include "proto/topology.pb.h" #include "proto/schedule.pb.h" +#include "proto/topology.pb.h" #include "src/common/authenticator.h" -#include "src/mds/common/mds_define.h" +#include "src/common/net_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/common/net_common.h" -#include "src/tools/metric_name.h" -#include "src/tools/metric_client.h" +#include "src/mds/common/mds_define.h" #include "src/tools/common.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/metric_client.h" +#include "src/tools/metric_name.h" using curve::common::ChunkServerLocation; using curve::common::CopysetInfo; @@ -74,502 +74,545 @@ using curve::mds::topology::ZoneInfo; using curve::mds::schedule::RapidLeaderScheduleRequst; using curve::mds::schedule::RapidLeaderScheduleResponse; -using curve::common::Authenticator; - -namespace curve { -namespace tool { - -using curve::mds::topology::PoolsetInfo; - -enum class GetSegmentRes { - kOK = 0, // 获取segment成功 - kSegmentNotAllocated = -1, // segment不存在 - kFileNotExists = -2, // 文件不存在 - kOtherError = -3 // 其他错误 -}; - -using AllocMap = std::unordered_map; - -struct CreateFileContext { - curve::mds::FileType type; - std::string name; - uint64_t length; - uint64_t stripeUnit; - uint64_t stripeCount; - std::string poolset; -}; - -class MDSClient { - public: - MDSClient() : currentMdsIndex_(0), userName_(""), - password_(""), isInited_(false) {} - virtual ~MDSClient() = default; - - /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 - */ - virtual int Init(const std::string& mdsAddr); - - /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有mds用同样的dummy port,用字符串分隔有多个的话 - * 为每个mds设置不同的dummy port - * @return 成功返回0,失败返回-1 - */ - virtual int Init(const std::string& mdsAddr, - const std::string& dummyPort); - - /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); - - /** - * @brief 获取文件或目录分配大小 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录分配大小,返回值为0时有效 - * @param[out] allocMap 文件在各个池子分配的情况 - * @return 成功返回0,失败返回-1 - */ - virtual int GetAllocatedSize(const std::string& fileName, - uint64_t* allocSize, - AllocMap* allocMap = nullptr); - - /** - * @brief 获取文件或目录的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录分配大小,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetFileSize(const std::string& fileName, - uint64_t* fileSize); - - /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListDir(const std::string& dirName, - std::vector* files); - - /** - * @brief 获取指定偏移的segment放到segment里面 - * @param fileName 文件名 - * @param offset 偏移值 - * @param[out] segment 文件中指定偏移的segmentInfo,返回值为0时有效 - * @return 返回GetSegmentRes,区分segment未分配和其他错误 - */ - virtual GetSegmentRes GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment); - - /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 - */ - virtual int DeleteFile(const std::string& fileName, - bool forcedelete = false); - - /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 - */ - virtual int CreateFile(const CreateFileContext& context); - - /** - * @brief List all volumes on copysets - * @param copysets - * @param[out] fileNames volumes name - * @return return 0 when success, -1 when fail - */ - virtual int ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames); - - /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的卷大小 - * @return 成功返回0,失败返回-1 - */ - virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); - - /** - * @brief 列出client的dummyserver的地址 - * @param[out] clientAddrs client地址列表,返回0时有效 - * @param[out] listClientsInRepo 把数据库里的client也列出来 - * @return 成功返回0,失败返回-1 - */ - virtual int ListClient(std::vector* clientAddrs, - bool listClientsInRepo = false); - - /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); - - /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetIds 要查询的copysetId的列表 - * @param[out] csServerInfos copyset成员的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos); - - /** - * @brief 获取集群中的物理池列表 - * @param[out] pools 物理池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListPhysicalPoolsInCluster( - std::vector* pools); - - - /** - * @brief 获取物理池中的逻辑池列表 - * @param id 物理池id - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools); - - /** - * @brief 集群中的逻辑池列表 - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListLogicalPoolsInCluster(std::vector* pools); - - /** - * @brief 获取物理池中的zone列表 - * @param id 物理池id - * @param[out] zones zone信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListZoneInPhysicalPool(const PoolIdType& id, - std::vector* zones); - - /** - * @brief 获取zone中的server列表 - * @param id zone id - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListServersInZone(const ZoneIdType& id, - std::vector* servers); - - /** - * @brief 获取server上的chunkserver的列表 - * @param id server id - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers); - - /** - * @brief 获取server上的chunkserver的列表 - * @param ip server ip - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers); - - /** - * @brief 获取chunkserver的详细信息 - * @param id chunkserver id - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerInfo(const ChunkServerIdType& id, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver的详细信息 - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerInfo(const std::string& csAddr, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver上的所有copyset - * @param id chunkserver的id - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets); - - /** - * @brief 获取chunkserver上的所有copyset - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets); - - /** - * @brief Get all copysets in cluster - * @param[out] the copyset list - * @param[in] filterScaning whether need to filter copyset which in scaning - * @return 0 if success, else return -1 - */ - virtual int GetCopySetsInCluster(std::vector* copysetInfos, - bool filterScaning = false); - - /** - * @brief Get specify copyset - * @param[in] lpid logical pool id - * @param[in] copysetId copyset id - * @param[out] copysetInfo the copyset - * @return 0 if success, else return -1 - */ - virtual int GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, - CopysetInfo* copysetInfo); - - /** - * @brief 列出集群中的所有server - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListServersInCluster(std::vector* servers); - - /** - * @brief 列出集群中的所有chunkserver - * @param[out] chunkservers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersInCluster( - std::vector* chunkservers); - - /** - * @brief list all the chunkservers with poolid in cluster - * @param[out] chunkservers chunkserver info - * @return succeed return 0; failed return -1; - */ - virtual int ListChunkServersInCluster(std::map>* chunkservers); - - /** - * @brief set copysets available flag - * @param copysets copysets going to be set available flag - * @param availFlag availble or not - * @return succeed return 0; failed return -1; - */ - virtual int SetCopysetsAvailFlag(const std::vector copysets, - bool availFlag); - - /** - * @brief list all copysets that are unavailable - * @param[out] copysets copysets that are not availble currently - * @return succeed return 0; failed return -1; - */ - virtual int ListUnAvailCopySets(std::vector* copysets); - - /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名字 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetMetric(const std::string& metricName, uint64_t* value); - - /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名子 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetMetric(const std::string& metricName, std::string* value); - - /** - * @brief 设置userName,访问namespace接口的时候调用 - * @param userName 用户名 - */ - void SetUserName(const std::string& userName) { - userName_ = userName; - } - - /** - * @brief 设置password,访问namespace接口的时候调用 - * @param password 密码 - */ - void SetPassword(const std::string& password) { - password_ = password; - } - - /** - * @brief 获取mds地址列表 - * @return mds地址的列表 - */ - virtual const std::vector& GetMdsAddrVec() const { - return mdsAddrVec_; - } - - virtual const std::map& GetDummyServerMap() - const { - return dummyServerMap_; - } - - /** - * @brief 获取当前mds的地址 - */ - virtual std::vector GetCurrentMds(); - - /** - * @brief 向mds发送rpc触发快速leader均衡 - */ - virtual int RapidLeaderSchedule(PoolIdType lpid); - - /** - * @brief Set specify logical pool to enable/disable scan - * @param[in] lpid logical pool id - * @param[in] scanEnable enable(true)/disable(false) scan - * @return 0 if set success, else return -1 - */ - virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); - - /** - * @brief 获取mds在线状态, - * dummyserver在线且dummyserver记录的listen addr - * 与mds地址一致才认为在线 - * @param[out] onlineStatus mds在线状态,返回0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual void GetMdsOnlineStatus(std::map* onlineStatus); - - /** - * @brief 获取指定chunkserver的恢复状态 - * @param[in] cs 需要查询的chunkserver列表 - * @param[out] statusMap 返回各chunkserver对应的恢复状态 - * @return 成功返回0,失败返回-1 - */ - int QueryChunkServerRecoverStatus( - const std::vector& cs, - std::map *statusMap); - - virtual int UpdateFileThrottleParams( - const std::string& fileName, const curve::mds::ThrottleParams& params); - - int ListPoolset(std::vector* poolsets); - - int ListChunkFormatStatus(std::vector* formatStatuses); - - private: - /** - * @brief 切换mds - * @return 切换成功返回true,所有mds都失败则返回false - */ - bool ChangeMDServer(); - - /** - * @brief 向mds发送RPC,为了复用代码 - * @param - * @return 成功返回0,失败返回-1 - */ - template - int SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)); - - /** - * @brief 获取server上的chunkserver的列表 - * @param request 要发送的request - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int ListChunkServersOnServer(ListChunkServerRequest* request, - std::vector* chunkservers); - - /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int GetChunkServerInfo(GetChunkServerInfoRequest* request, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets); - - /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 - */ - int InitDummyServerMap(const std::string& dummyPort); - - /** - * @brief 通过dummyServer获取mds的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr mds的监听地址 - * @return 成功返回0,失败返回-1 - */ - int GetListenAddrFromDummyPort(const std::string& dummyAddr, - std::string* listenAddr); - - - // 填充signature - template - void FillUserInfo(T* request); - - // 用于发送http请求的client - MetricClient metricClient_; - // 向mds发送RPC的channel - brpc::Channel channel_; - // 保存mds地址的vector - std::vector mdsAddrVec_; - // 保存mds地址对应的dummy server的地址 - std::map dummyServerMap_; - // 保存当前mds在mdsAddrVec_中的索引 - int currentMdsIndex_; - // 用户名 - std::string userName_; - // 密码 - std::string password_; - // 避免重复初始化 - bool isInited_; -}; -} // namespace tool -} // namespace curve - -#endif // SRC_TOOLS_MDS_CLIENT_H_ +using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::ChunkServerInfo; +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::CopySetServerInfo; +using curve::mds::topology::GetChunkServerInfoRequest; +using curve::mds::topology::GetCopySetsInChunkServerRequest; +using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::topology::ListChunkServerRequest; +using curve::mds::topology::LogicalPoolInfo; +using curve::mds::topology::PhysicalPoolInfo; +using curve::mds::topology::PoolIdType; +using curve::mds::topology::ServerIdType; +using curve::mds::topology::ServerInfo; +using curve::mds::topology::ZoneIdType; +using curve::mds::topology::ZoneInfo; + +namespace curve +{ + namespace tool + { + + using curve::mds::topology::PoolsetInfo; + + enum class GetSegmentRes + { + kOK = 0, // Successfully obtained segment + kSegmentNotAllocated = -1, // segment does not exist + kFileNotExists = -2, // File does not exist + kOtherError = -3 // Other errors + }; + + using AllocMap = std::unordered_map; + + struct CreateFileContext + { + curve::mds::FileType type; + std::string name; + uint64_t length; + uint64_t stripeUnit; + uint64_t stripeCount; + std::string poolset; + }; + + class MDSClient + { + public: + MDSClient() + : currentMdsIndex_(0), userName_(""), password_(""), isInited_(false) {} + virtual ~MDSClient() = default; + + /** + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure + */ + virtual int Init(const std::string &mdsAddr); + + /** + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @param dummyPort dummy port list, if only one is entered + * All mds use the same dummy port, separated by strings if + * there are multiple Set different dummy ports for each mds + * @return returns 0 for success, -1 for failure + */ + virtual int Init(const std::string &mdsAddr, const std::string &dummyPort); + + /** + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetFileInfo(const std::string &fileName, FileInfo *fileInfo); + + /** + * @brief Get file or directory allocation size + * @param fileName File name + * @param[out] allocSize file or directory allocation size, valid when the + * return value is 0 + * @param[out] allocMap Allocation of files in various pools + * @return returns 0 for success, -1 for failure + */ + virtual int GetAllocatedSize(const std::string &fileName, + uint64_t *allocSize, + AllocMap *allocMap = nullptr); + + /** + * @brief Get the size of a file or directory + * @param fileName File name + * @param[out] fileSize File or directory allocation size, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetFileSize(const std::string &fileName, uint64_t *fileSize); + + /** + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListDir(const std::string &dirName, + std::vector *files); + + /** + * @brief Get the segment with the specified offset and place it in the + * segment + * @param fileName File name + * @param offset offset value + * @param[out] segment The segmentInfo of the specified offset in the file + * is valid when the return value is 0 + * @return returns GetSegmentRes, distinguishing between unassigned segments + * and other errors + */ + virtual GetSegmentRes GetSegmentInfo(const std::string &fileName, + uint64_t offset, + PageFileSegment *segment); + + /** + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure + */ + virtual int DeleteFile(const std::string &fileName, + bool forcedelete = false); + + /** + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure + */ + virtual int CreateFile(const CreateFileContext &context); + + /** + * @brief List all volumes on copysets + * @param copysets + * @param[out] fileNames volumes name + * @return return 0 when success, -1 when fail + */ + virtual int ListVolumesOnCopyset( + const std::vector ©sets, + std::vector *fileNames); + + /** + * @brief expansion volume + * @param fileName File name + * @param newSize The volume size after expansion + * @return returns 0 for success, -1 for failure + */ + virtual int ExtendVolume(const std::string &fileName, uint64_t newSize); + + /** + * @brief List the address of the client's dummyserver + * @param[out] clientAddrs client address list, valid when 0 is returned + * @param[out] listClientsInRepo also lists the clients in the database + * @return returns 0 for success, -1 for failure + */ + virtual int ListClient(std::vector *clientAddrs, + bool listClientsInRepo = false); + + /** + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool id + * @param copysetId copyset id + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerListInCopySet( + const PoolIdType &logicalPoolId, const CopySetIdType ©setId, + std::vector *csLocs); + + /** + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetIds List of copysetIds to query + * @param[out] csServerInfos A list of copyset members, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerListInCopySets( + const PoolIdType &logicalPoolId, + const std::vector ©setIds, + std::vector *csServerInfos); + + /** + * @brief Get a list of physical pools in the cluster + * @param[out] pools A list of physical pool information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListPhysicalPoolsInCluster( + std::vector *pools); + + /** + * @brief Get a list of logical pools in the physical pool + * @param id Physical pool id + * @param[out] pools List of logical pool information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListLogicalPoolsInPhysicalPool( + const PoolIdType &id, std::vector *pools); + + /** + *List of logical pools in the @brief cluster + * @param[out] pools List of logical pool information, valid when the return + *value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListLogicalPoolsInCluster(std::vector *pools); + + /** + * @brief to obtain a list of zones in the physical pool + * @param id Physical pool id + * @param[out] zones A list of zone information, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListZoneInPhysicalPool(const PoolIdType &id, + std::vector *zones); + + /** + * @brief to obtain a list of servers in the zone + * @param id zone id + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListServersInZone(const ZoneIdType &id, + std::vector *servers); + + /** + * @brief Get a list of chunkservers on the server + * @param id server id + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersOnServer( + const ServerIdType &id, std::vector *chunkservers); + + /** + * @brief Get a list of chunkservers on the server + * @param ip server ip + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersOnServer( + const std::string &ip, std::vector *chunkservers); + + /** + * @brief Get detailed information about chunkserver + * @param id chunkserver id + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerInfo(const ChunkServerIdType &id, + ChunkServerInfo *chunkserver); + + /** + * @brief Get detailed information about chunkserver + * @param csAddr The address of chunkserver, in the format of ip:port + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerInfo(const std::string &csAddr, + ChunkServerInfo *chunkserver); + + /** + * @brief Get all copysets on chunkserver + * @param id The id of chunkserver + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetCopySetsInChunkServer(const ChunkServerIdType &id, + std::vector *copysets); + + /** + * @brief Get all copysets on chunkserver + * @param csAddr The address of chunkserver, in the format of ip: port + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetCopySetsInChunkServer(const std::string &csAddr, + std::vector *copysets); + + /** + * @brief Get all copysets in cluster + * @param[out] the copyset list + * @param[in] filterScaning whether need to filter copyset which in scaning + * @return 0 if success, else return -1 + */ + virtual int GetCopySetsInCluster(std::vector *copysetInfos, + bool filterScaning = false); + + /** + * @brief Get specify copyset + * @param[in] lpid logical pool id + * @param[in] copysetId copyset id + * @param[out] copysetInfo the copyset + * @return 0 if success, else return -1 + */ + virtual int GetCopyset(PoolIdType lpid, CopySetIdType copysetId, + CopysetInfo *copysetInfo); + + /** + * @brief List all servers in the cluster + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListServersInCluster(std::vector *servers); + + /** + * @brief List all chunkservers in the cluster + * @param[out] chunkservers A list of server information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersInCluster( + std::vector *chunkservers); + + /** + * @brief list all the chunkservers with poolid in cluster + * @param[out] chunkservers chunkserver info + * @return succeed return 0; failed return -1; + */ + virtual int ListChunkServersInCluster( + std::map> *chunkservers); + + /** + * @brief set copysets available flag + * @param copysets copysets going to be set available flag + * @param availFlag availble or not + * @return succeed return 0; failed return -1; + */ + virtual int SetCopysetsAvailFlag(const std::vector copysets, + bool availFlag); + + /** + * @brief list all copysets that are unavailable + * @param[out] copysets copysets that are not availble currently + * @return succeed return 0; failed return -1; + */ + virtual int ListUnAvailCopySets(std::vector *copysets); + + /** + * @brief Get the value of a metric for mds + * @param metricName The name of the metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetMetric(const std::string &metricName, uint64_t *value); + + /** + * @brief Get the value of a metric for mds + * @param metricName The name of metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetMetric(const std::string &metricName, std::string *value); + + /** + * @brief sets userName and calls it when accessing the namespace interface + * @param userName username + */ + void SetUserName(const std::string &userName) { userName_ = userName; } + + /** + * @brief sets the password and calls it when accessing the namespace + * interface + * @param password password + */ + void SetPassword(const std::string &password) { password_ = password; } + + /** + * @brief Get mds address list + * @return List of mds addresses + */ + virtual const std::vector &GetMdsAddrVec() const + { + return mdsAddrVec_; + } + + virtual const std::map &GetDummyServerMap() + const + { + return dummyServerMap_; + } + + /** + * @brief Get the address of the current mds + */ + virtual std::vector GetCurrentMds(); + + /** + * @brief sends rpc to mds to trigger fast leader balancing + */ + virtual int RapidLeaderSchedule(PoolIdType lpid); + + /** + * @brief Set specify logical pool to enable/disable scan + * @param[in] lpid logical pool id + * @param[in] scanEnable enable(true)/disable(false) scan + * @return 0 if set success, else return -1 + */ + virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); + + /** + * @brief to obtain mds online status, + * dummyserver is online and the dummyserver records a listen addr + * Only when the address is consistent with the mds address is + * considered online + * @param[out] onlineStatus mds online status, valid when returned to 0 + * @return returns 0 for success, -1 for failure + */ + virtual void GetMdsOnlineStatus(std::map *onlineStatus); + + /** + * @brief Get the recovery status of the specified chunkserver + * @param[in] cs List of chunkservers to query + * @param[out] statusMap returns the recovery status corresponding to each + * chunkserver + * @return returns 0 for success, -1 for failure + */ + int QueryChunkServerRecoverStatus( + const std::vector &cs, + std::map *statusMap); + + virtual int UpdateFileThrottleParams( + const std::string &fileName, const curve::mds::ThrottleParams ¶ms); + + int ListPoolset(std::vector *poolsets); + + int ListChunkFormatStatus(std::vector *formatStatuses); + + private: + /** + * @brief switch mds + * @return returns true if the switch is successful, and false if all mds + * fail + */ + bool ChangeMDServer(); + + /** + * @brief sends RPC to mds for code reuse + * @param + * @return returns 0 for success, -1 for failure + */ + template + int SendRpcToMds(Request *request, Response *response, T *obp, + void (T::*func)(google::protobuf::RpcController *, + const Request *, Response *, + google::protobuf::Closure *)); + + /** + * @brief Get a list of chunkservers on the server + * @param request The request to be sent + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + int ListChunkServersOnServer(ListChunkServerRequest *request, + std::vector *chunkservers); + + /** + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + int GetChunkServerInfo(GetChunkServerInfoRequest *request, + ChunkServerInfo *chunkserver); + + /** + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + int GetCopySetsInChunkServer(GetCopySetsInChunkServerRequest *request, + std::vector *copysets); + + /** + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure + */ + int InitDummyServerMap(const std::string &dummyPort); + + /** + * @brief: Obtain the listening address of mds through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr mds listening address + * @return returns 0 for success, -1 for failure + */ + int GetListenAddrFromDummyPort(const std::string &dummyAddr, + std::string *listenAddr); + + // Fill in the signature + template + void FillUserInfo(T *request); + + // client used to send HTTP requests + MetricClient metricClient_; + // Send RPC channel to mds + brpc::Channel channel_; + // Save vector for mds address + std::vector mdsAddrVec_; + // Save the address of the dummy server corresponding to the mds address + std::map dummyServerMap_; + // Save the current mds in mdsAddrVec_ Index in + int currentMdsIndex_; + // User name + std::string userName_; + // Password + std::string password_; + // Avoiding duplicate initialization + bool isInited_; + }; + } // namespace tool +} // namespace curve + +#endif // SRC_TOOLS_MDS_CLIENT_H_ diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index 776347f738..fc5012d58a 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -30,9 +30,9 @@ DECLARE_uint64(rpcRetryTimes); namespace curve { namespace tool { -MetricRet MetricClient::GetMetric(const std::string &addr, - const std::string &metricName, - std::string *value) { +MetricRet MetricClient::GetMetric(const std::string& addr, + const std::string& metricName, + std::string* value) { brpc::Channel httpChannel; brpc::ChannelOptions options; brpc::Controller cntl; @@ -70,15 +70,16 @@ MetricRet MetricClient::GetMetric(const std::string &addr, res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - // 这里不输出错误,因为对mds有切换的可能,把打印的处理交给外部 + // There is no output error here, as there is a possibility of switching + // between mds, and the printing process is handed over to external parties bool notExist = cntl.ErrorCode() == brpc::EHTTP && cntl.http_response().status_code() == kHttpCodeNotFound; return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; } -MetricRet MetricClient::GetMetricUint(const std::string &addr, - const std::string &metricName, - uint64_t *value) { +MetricRet MetricClient::GetMetricUint(const std::string& addr, + const std::string& metricName, + uint64_t* value) { std::string str; MetricRet res = GetMetric(addr, metricName, &str); if (res != MetricRet::kOK) { @@ -92,9 +93,9 @@ MetricRet MetricClient::GetMetricUint(const std::string &addr, return MetricRet::kOK; } -MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, - const std::string &metricName, - std::string *confValue) { +MetricRet MetricClient::GetConfValueFromMetric(const std::string& addr, + const std::string& metricName, + std::string* confValue) { std::string jsonString; brpc::Controller cntl; MetricRet res = GetMetric(addr, metricName, &jsonString); @@ -118,8 +119,8 @@ MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, return MetricRet::kOK; } -int MetricClient::GetValueFromAttachment(const std::string &attachment, - std::string *value) { +int MetricClient::GetValueFromAttachment(const std::string& attachment, + std::string* value) { auto pos = attachment.find(":"); if (pos == std::string::npos) { std::cout << "parse response attachment fail!" << std::endl; diff --git a/src/tools/metric_client.h b/src/tools/metric_client.h index 94e29a545f..103f8da7f3 100644 --- a/src/tools/metric_client.h +++ b/src/tools/metric_client.h @@ -25,65 +25,68 @@ #include #include + #include #include -#include "src/tools/common.h" + #include "src/common/string_util.h" +#include "src/tools/common.h" #include "src/tools/curve_tool_define.h" namespace curve { namespace tool { enum class MetricRet { - // 成功 + // Success kOK = 0, - // metric未找到 + // Metric not found kNotFound = -1, - // 其他错误 - kOtherErr = -2, + // Other errors + kOtherErr = -2, }; const int kHttpCodeNotFound = 404; class MetricClient { public: - virtual ~MetricClient() {} + virtual ~MetricClient() {} - /** - * @brief 从指定地址获取metric - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief Get metric from specified address + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetric(const std::string& addr, const std::string& metricName, std::string* value); - /** - * @brief 从指定地址获取metric,并转换成uint - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief retrieves metric from the specified address and converts it to + * uint + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetricUint(const std::string& addr, const std::string& metricName, uint64_t* value); /** - * @brief 从metric获取配置的值 - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] confValue metric中配置的值 - * @return 错误码 + * @brief Get the configured value from metric + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] confValue The value configured in metric + * @return error code */ virtual MetricRet GetConfValueFromMetric(const std::string& addr, const std::string& metricName, std::string* confValue); private: - // 从response attachment解析出metric值 + // Parse the metric value from the response attachment int GetValueFromAttachment(const std::string& attachment, std::string* value); }; diff --git a/src/tools/metric_name.h b/src/tools/metric_name.h index 3f85d971a4..e576481ff5 100644 --- a/src/tools/metric_name.h +++ b/src/tools/metric_name.h @@ -22,131 +22,135 @@ #include -#include #include +#include #ifndef SRC_TOOLS_METRIC_NAME_H_ #define SRC_TOOLS_METRIC_NAME_H_ - -namespace curve { -namespace tool { - -// common metric name -const char kCurveVersionMetricName[] = "curve_version"; - -// snapshot clone server metric name -const char kSnapshotCloneConfMetricName[] = - "snapshot_clone_server_config_server_address"; -const char kSnapshotCloneStatusMetricName[] = "snapshotcloneserver_status"; -const char kSnapshotCloneStatusActive[] = "active"; - -// mds metric name -const char kLogicalPoolMetricPrefix[] = "topology_metric_logicalPool_"; -const char kChunkServerMetricPrefix[] = "chunkserver_"; -const char kOperatorNumMetricName[] = "mds_scheduler_metric_operator_num"; -const char kProcessCmdLineMetricName[] = "process_cmdline"; -const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; -const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; -const char kMdsStatusMetricName[] = "mds_status"; -const char kMdsStatusLeader[] = "leader"; -// operator名称 -const char kTotalOpName[] = "operator"; -const char kChangeOpName[] = "change_peer"; -const char kAddOpName[] = "add_peer"; -const char kRemoveOpName[] = "remove_peer"; -const char kTransferOpName[] = "transfer_leader"; - - -inline std::string GetPoolTotalChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeTotalBytes"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolUsedChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeUsedBytes"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolLogicalCapacityName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalCapacity"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolLogicalAllocName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalAlloc"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetCSLeftChunkName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_chunkfilepool_left"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetCSLeftWalSegmentName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_walfilepool_left"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetUseWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_copyset_raft_log_uri"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetUseChunkFilePoolAsWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_walfilepool_use_chunk_file_pool"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetOpNumMetricName(const std::string& opName) { - std::string tmpName = kSechduleOpMetricpPrefix + - opName + "_num"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline bool SupportOpName(const std::string& opName) { - return opName == kTotalOpName || opName == kChangeOpName - || opName == kAddOpName || opName == kRemoveOpName - || opName == kTransferOpName; -} - -inline void PrintSupportOpName() { - std::cout << kTotalOpName << ", " << kChangeOpName - << ", " << kAddOpName << ", " << kRemoveOpName - << ", " << kTransferOpName << std::endl; -} - -} // namespace tool -} // namespace curve - -#endif // SRC_TOOLS_METRIC_NAME_H_ +namespace curve +{ + namespace tool + { + + // common metric name + const char kCurveVersionMetricName[] = "curve_version"; + + // snapshot clone server metric name + const char kSnapshotCloneConfMetricName[] = + "snapshot_clone_server_config_server_address"; + const char kSnapshotCloneStatusMetricName[] = "snapshotcloneserver_status"; + const char kSnapshotCloneStatusActive[] = "active"; + + // mds metric name + const char kLogicalPoolMetricPrefix[] = "topology_metric_logicalPool_"; + const char kChunkServerMetricPrefix[] = "chunkserver_"; + const char kOperatorNumMetricName[] = "mds_scheduler_metric_operator_num"; + const char kProcessCmdLineMetricName[] = "process_cmdline"; + const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; + const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; + const char kMdsStatusMetricName[] = "mds_status"; + const char kMdsStatusLeader[] = "leader"; + // operator Name + const char kTotalOpName[] = "operator"; + const char kChangeOpName[] = "change_peer"; + const char kAddOpName[] = "add_peer"; + const char kRemoveOpName[] = "remove_peer"; + const char kTransferOpName[] = "transfer_leader"; + + inline std::string GetPoolTotalChunkSizeName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeTotalBytes"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolUsedChunkSizeName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeUsedBytes"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolLogicalCapacityName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_logicalCapacity"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolLogicalAllocName(const std::string &poolName) + { + std::string tmpName = kLogicalPoolMetricPrefix + poolName + "_logicalAlloc"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetCSLeftChunkName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_chunkfilepool_left"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetCSLeftWalSegmentName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_walfilepool_left"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetUseWalPoolName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_config_copyset_raft_log_uri"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetUseChunkFilePoolAsWalPoolName(const std::string &csAddr) + { + std::string tmpName = kChunkServerMetricPrefix + csAddr + + "_config_walfilepool_use_chunk_file_pool"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetOpNumMetricName(const std::string &opName) + { + std::string tmpName = kSechduleOpMetricpPrefix + opName + "_num"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline bool SupportOpName(const std::string &opName) + { + return opName == kTotalOpName || opName == kChangeOpName || + opName == kAddOpName || opName == kRemoveOpName || + opName == kTransferOpName; + } + + inline void PrintSupportOpName() + { + std::cout << kTotalOpName << ", " << kChangeOpName << ", " << kAddOpName + << ", " << kRemoveOpName << ", " << kTransferOpName << std::endl; + } + + } // namespace tool +} // namespace curve + +#endif // SRC_TOOLS_METRIC_NAME_H_ diff --git a/src/tools/namespace_tool.cpp b/src/tools/namespace_tool.cpp index 8d6119b75d..b0b039a835 100644 --- a/src/tools/namespace_tool.cpp +++ b/src/tools/namespace_tool.cpp @@ -28,8 +28,9 @@ DEFINE_string(fileName, "", "file name"); DEFINE_string(dirName, "", "directory name"); -DEFINE_string(expireTime, "7d", "Time for file in recyclebin exceed expire time " // NOLINT - "will be deleted (default: 7d)"); +DEFINE_string(expireTime, "7d", + "Time for file in recyclebin exceed expire time " // NOLINT + "will be deleted (default: 7d)"); DEFINE_bool(forcedelete, false, "force delete file or not"); DEFINE_uint64(fileLength, 20, "file length (GB)"); DEFINE_uint64(newSize, 30, "the new size of expanded volume(GB)"); @@ -37,11 +38,14 @@ DEFINE_string(poolset, "", "specify the poolset name"); DEFINE_bool(isTest, false, "is unit test or not"); DEFINE_uint64(offset, 0, "offset to query chunk location"); DEFINE_uint64(rpc_timeout, 3000, "millisecond for rpc timeout"); -DEFINE_bool(showAllocSize, true, "If specified, the allocated size will not be computed"); // NOLINT -DEFINE_bool(showFileSize, true, "If specified, the file size will not be computed"); // NOLINT +DEFINE_bool(showAllocSize, true, + "If specified, the allocated size will not be computed"); // NOLINT +DEFINE_bool(showFileSize, true, + "If specified, the file size will not be computed"); // NOLINT DECLARE_string(mdsAddr); -DEFINE_bool(showAllocMap, false, "If specified, the allocated size in each" - " logical pool will be print"); +DEFINE_bool(showAllocMap, false, + "If specified, the allocated size in each" + " logical pool will be print"); DEFINE_string(throttleType, "", "throttle type"); DEFINE_uint64(limit, 0, "throttle limit"); @@ -66,19 +70,15 @@ int NameSpaceTool::Init() { } bool NameSpaceTool::SupportCommand(const std::string& command) { - return (command == kGetCmd || command == kListCmd - || command == kSegInfoCmd - || command == kDeleteCmd - || command == kCreateCmd - || command == kExtendCmd - || command == kCleanRecycleCmd - || command == kChunkLocatitonCmd - || command == kUpdateThrottle - || command == kListPoolsets); + return (command == kGetCmd || command == kListCmd || + command == kSegInfoCmd || command == kDeleteCmd || + command == kCreateCmd || command == kExtendCmd || + command == kCleanRecycleCmd || command == kChunkLocatitonCmd || + command == kUpdateThrottle || command == kListPoolsets); } -// 根据命令行参数选择对应的操作 -int NameSpaceTool::RunCommand(const std::string &cmd) { +// Select the corresponding operation based on command line parameters +int NameSpaceTool::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init NameSpaceTool failed" << std::endl; return -1; @@ -92,12 +92,12 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } else if (cmd == kSegInfoCmd) { return PrintSegmentInfo(fileName); } else if (cmd == kDeleteCmd) { - // 单元测试不判断输入 + // Unit testing does not judge input if (FLAGS_isTest) { return core_->DeleteFile(fileName, FLAGS_forcedelete); } - std::cout << "Are you sure you want to delete " - << fileName << "?" << "(yes/no)" << std::endl; + std::cout << "Are you sure you want to delete " << fileName << "?" + << "(yes/no)" << std::endl; std::string str; std::cin >> str; if (str == "yes") { @@ -163,29 +163,71 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } } -void NameSpaceTool::PrintHelp(const std::string &cmd) { +void NameSpaceTool::PrintHelp(const std::string& cmd) { std::cout << "Example: " << std::endl; if (cmd == kGetCmd || cmd == kListCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT - " [-showAllocSize=false] [-showFileSize=false] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT + " [-showAllocSize=false] [-showFileSize=false] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kSegInfoCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kCleanRecycleCmd) { - std::cout << "curve_ops_tool " << cmd << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "If -fileName is specified, delete the files in recyclebin that the original directory is fileName" << std::endl; // NOLINT - std::cout << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "If -fileName is specified, delete the files in " + "recyclebin that the original directory is fileName" + << std::endl; // NOLINT + std::cout + << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" + << std::endl; // NOLINT } else if (cmd == kCreateCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -fileLength=20 [--poolset=default] [-stripeUnit=32768] [-stripeCount=32] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "curve_ops_tool " << cmd << " -dirName=/dir -userName=test -password=123 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "The first example can create a volume and the second create a directory." << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -fileLength=20 " + "[--poolset=default] [-stripeUnit=32768] [-stripeCount=32] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -dirName=/dir -userName=test -password=123 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "The first example can create a volume and the second " + "create a directory." + << std::endl; // NOLINT } else if (cmd == kExtendCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -newSize=30 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -newSize=30 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kDeleteCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -forcedelete=true [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 " + "-forcedelete=true [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kChunkLocatitonCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kUpdateThrottle) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test " + "-throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_" + "READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" + << std::endl; // NOLINT } else { std::cout << "command not found!" << std::endl; } @@ -204,7 +246,8 @@ int NameSpaceTool::PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo) { PrintFileInfo(fileInfo); int ret = GetAndPrintAllocSize(fullName); - // 如果是目录的话,计算目录中的文件大小(用户创建时指定的) + // If it is a directory, calculate the file size in the directory (specified + // by the user when creating it) if (fileInfo.filetype() == curve::mds::FileType::INODE_DIRECTORY) { ret = GetAndPrintFileSize(fullName); } @@ -255,14 +298,14 @@ void NameSpaceTool::PrintFileInfo(const FileInfo& fileInfo) { curve::common::SplitString(fileInfoStr, "\n", &items); for (const auto& item : items) { if (item.compare(0, 5, "ctime") == 0) { - // ctime是微妙,打印的时候只打印到秒 + // CTIME is subtle, printing only takes seconds time_t ctime = fileInfo.ctime() / 1000000; std::string standard; curve::common::TimeUtility::TimeStampToStandard(ctime, &standard); std::cout << "ctime: " << standard << std::endl; continue; } - // 把length转换成GB + // Convert length to GB if (item.compare(0, 6, "length") == 0) { uint64_t length = fileInfo.length(); double fileSize = static_cast(length) / curve::mds::kGB; @@ -315,15 +358,15 @@ int NameSpaceTool::PrintPoolsets() { for (const auto& poolset : poolsets) { const std::string str = absl::StrFormat( - "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), - poolset.poolsetname(), poolset.type(), poolset.desc()); + "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), + poolset.poolsetname(), poolset.type(), poolset.desc()); std::cout << str << std::endl; } return 0; } -int NameSpaceTool::PrintSegmentInfo(const std::string &fileName) { +int NameSpaceTool::PrintSegmentInfo(const std::string& fileName) { std::vector segments; if (core_->GetFileSegments(fileName, &segments) != 0) { std::cout << "GetFileSegments fail!" << std::endl; @@ -358,14 +401,13 @@ void NameSpaceTool::PrintSegment(const PageFileSegment& segment) { if (segment.chunks(i).has_copysetid()) { copysetId = segment.chunks(i).copysetid(); } - std::cout << "chunkID: " << chunkId << ", copysetID: " - << copysetId << std::endl; + std::cout << "chunkID: " << chunkId << ", copysetID: " << copysetId + << std::endl; } } - int NameSpaceTool::PrintChunkLocation(const std::string& fileName, - uint64_t offset) { + uint64_t offset) { uint64_t chunkId; std::pair copyset; if (core_->QueryChunkCopyset(fileName, offset, &chunkId, ©set) != 0) { @@ -375,13 +417,12 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, uint32_t logicPoolId = copyset.first; uint32_t copysetId = copyset.second; uint64_t groupId = (static_cast(logicPoolId) << 32) | copysetId; - std::cout << "chunkId: " << chunkId - << ", logicalPoolId: " << logicPoolId - << ", copysetId: " << copysetId - << ", groupId: " << groupId << std::endl; + std::cout << "chunkId: " << chunkId << ", logicalPoolId: " << logicPoolId + << ", copysetId: " << copysetId << ", groupId: " << groupId + << std::endl; std::vector csLocs; - int res = core_->GetChunkServerListInCopySet(logicPoolId, - copysetId, &csLocs); + int res = + core_->GetChunkServerListInCopySet(logicPoolId, copysetId, &csLocs); if (res != 0) { std::cout << "GetChunkServerListInCopySet fail!" << std::endl; return -1; @@ -400,7 +441,7 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, } void NameSpaceTool::TrimEndingSlash(std::string* fileName) { - // 如果最后面有/,去掉 + // If there is/at the end, remove it if (fileName->size() > 1 && fileName->back() == '/') { fileName->pop_back(); } diff --git a/src/tools/namespace_tool.h b/src/tools/namespace_tool.h index 1af7f8ca8f..3594afafa6 100644 --- a/src/tools/namespace_tool.h +++ b/src/tools/namespace_tool.h @@ -26,22 +26,22 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" using curve::mds::FileInfo; using curve::mds::PageFileSegment; @@ -52,71 +52,72 @@ namespace tool { class NameSpaceTool : public CurveTool { public: - explicit NameSpaceTool(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit NameSpaceTool(std::shared_ptr core) + : core_(core), inited_(false) {} /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - // 初始化 + // Initialize int Init(); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fileName); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo); - // 打印目录中的文件信息 + // Print file information in the directory int PrintListDir(const std::string& dirName); - // 打印出文件的segment信息 - int PrintSegmentInfo(const std::string &fileName); + // Print out the segment information of the file + int PrintSegmentInfo(const std::string& fileName); - // 打印fileInfo,把时间转化为易读的格式输出 + // Print fileInfo and convert the time into a readable format for output void PrintFileInfo(const FileInfo& fileInfo); - // 打印PageFileSegment,把同一个chunk的信息打在同一行 + // Print PageFileSegment and type information for the same chunk on the same + // line void PrintSegment(const PageFileSegment& segment); - // 打印chunk的位置信息 - int PrintChunkLocation(const std::string& fileName, - uint64_t offset); + // Print the location information of the chunk + int PrintChunkLocation(const std::string& fileName, uint64_t offset); - // 打印文件的分配大小 + // Allocation size of printed files int GetAndPrintAllocSize(const std::string& fileName); - // 打印目录的file size + // Print the file size of the directory int GetAndPrintFileSize(const std::string& fileName); - // 目前curve mds不支持/test/格式的文件名,需要把末尾的/去掉 + // Currently, curve mds does not support file names in the/test/format, so + // the/at the end needs to be removed void TrimEndingSlash(std::string* fileName); int PrintPoolsets(); private: - // 核心逻辑 + // Core logic std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index b69a6ecacc..4c1f8ff1a4 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -27,8 +27,8 @@ DEFINE_string(password, "", "password of administrator"); namespace curve { namespace tool { -NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) : - client_(client) { +NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) + : client_(client) { client_->SetUserName(FLAGS_userName); client_->SetPassword(FLAGS_password); } @@ -37,7 +37,7 @@ int NameSpaceToolCore::Init(const std::string& mdsAddr) { return client_->Init(mdsAddr); } -int NameSpaceToolCore::GetFileInfo(const std::string &fileName, +int NameSpaceToolCore::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { return client_->GetFileInfo(fileName, fileInfo); } @@ -48,11 +48,10 @@ int NameSpaceToolCore::ListDir(const std::string& dirName, } int NameSpaceToolCore::GetChunkServerListInCopySet( - const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { - return client_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, csLocs); + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { + return client_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + csLocs); } int NameSpaceToolCore::DeleteFile(const std::string& fileName, @@ -65,7 +64,7 @@ int NameSpaceToolCore::CreateFile(const CreateFileContext& ctx) { } int NameSpaceToolCore::ExtendVolume(const std::string& fileName, - uint64_t newSize) { + uint64_t newSize) { return client_->ExtendVolume(fileName, newSize); } int NameSpaceToolCore::GetAllocatedSize(const std::string& fileName, @@ -85,7 +84,7 @@ int NameSpaceToolCore::GetFileSize(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - std::vector* segments) { + std::vector* segments) { FileInfo fileInfo; int res = GetFileInfo(fileName, &fileInfo); if (res != 0) { @@ -96,28 +95,30 @@ int NameSpaceToolCore::GetFileSegments(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, - std::vector* segments) { - // 只能获取page file的segment + const FileInfo& fileInfo, + std::vector* segments) { + // Only segments of page files can be obtained if (fileInfo.filetype() != curve::mds::FileType::INODE_PAGEFILE) { std::cout << "It is not a page file!" << std::endl; return -1; } - // 获取文件的segment数,并打印每个segment的详细信息 + // Obtain the number of segments in the file and print detailed information + // for each segment uint64_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint64_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; - GetSegmentRes res = client_->GetSegmentInfo(fileName, - i * segmentSize, &segment); + GetSegmentRes res = + client_->GetSegmentInfo(fileName, i * segmentSize, &segment); if (res == GetSegmentRes::kOK) { segments->emplace_back(segment); } else if (res == GetSegmentRes::kSegmentNotAllocated) { continue; } else if (res == GetSegmentRes::kFileNotExists) { - // 查询过程中文件被删掉了,清空segment并返回0 + // uring the query process, the file was deleted, the segment was + // cleared, and 0 was returned segments->clear(); return 0; } else { @@ -137,8 +138,7 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, return -1; } - auto needDelete = [](const FileInfo &fileInfo, - uint64_t now, + auto needDelete = [](const FileInfo& fileInfo, uint64_t now, uint64_t expireTime) -> bool { auto filename = fileInfo.filename(); std::vector items; @@ -147,9 +147,9 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, uint64_t dtime; auto n = items.size(); auto id = std::to_string(fileInfo.id()); - if (n >= 2 && items[n - 2] == id - && ::curve::common::StringToUll(items[n - 1], &dtime) - && now - dtime < expireTime) { + if (n >= 2 && items[n - 2] == id && + ::curve::common::StringToUll(items[n - 1], &dtime) && + now - dtime < expireTime) { return false; } @@ -210,10 +210,9 @@ int NameSpaceToolCore::UpdateFileThrottle(const std::string& fileName, return client_->UpdateFileThrottleParams(fileName, params); } -int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, - uint64_t offset, - uint64_t* chunkId, - std::pair* copyset) { +int NameSpaceToolCore::QueryChunkCopyset( + const std::string& fileName, uint64_t offset, uint64_t* chunkId, + std::pair* copyset) { if (!chunkId || !copyset) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -229,11 +228,11 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t segmentSize = fileInfo.segmentsize(); - // segment对齐的offset + // segment aligned offset uint64_t segOffset = (offset / segmentSize) * segmentSize; PageFileSegment segment; - GetSegmentRes segRes = client_->GetSegmentInfo(fileName, - segOffset, &segment); + GetSegmentRes segRes = + client_->GetSegmentInfo(fileName, segOffset, &segment); if (segRes != GetSegmentRes::kOK) { if (segRes == GetSegmentRes::kSegmentNotAllocated) { std::cout << "Chunk has not been allocated!" << std::endl; @@ -243,7 +242,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } } - // 在segment里面的chunk的索引 + // Index of chunk in segment if (segment.chunksize() == 0) { std::cout << "No chunks in segment!" << std::endl; return -1; diff --git a/src/tools/namespace_tool_core.h b/src/tools/namespace_tool_core.h index febf0882f8..60e702e3f7 100644 --- a/src/tools/namespace_tool_core.h +++ b/src/tools/namespace_tool_core.h @@ -26,28 +26,28 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" -#include "src/common/string_util.h" #include "src/common/fs_util.h" +#include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" #include "src/tools/mds_client.h" +using curve::common::ChunkServerLocation; using curve::mds::FileInfo; +using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::StatusCode; -using curve::mds::PageFileChunkInfo; using curve::mds::topology::kTopoErrCodeSuccess; -using curve::common::ChunkServerLocation; namespace curve { namespace tool { @@ -60,107 +60,116 @@ class NameSpaceToolCore { virtual ~NameSpaceToolCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetId copyset ID + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); + virtual int GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& ctx); - /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的文件长度 - * @return 成功返回0,失败返回-1 + /** + * @brief expansion volume + * @param fileName File name + * @param newSize The file length after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 计算文件或目录实际分配的空间 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录已分配大小,返回值为0是有效 - * @param[out] allocMap 在每个池子的分配量,返回值0时有效 - * @return 成功返回0,失败返回-1 + * @brief Calculate the actual allocated space of a file or directory + * @param fileName File name + * @param[out] allocSize The file or directory has already been allocated a + * size, and a return value of 0 is valid + * @param[out] allocMap The allocation amount of each pool, valid when + * returning a value of 0 + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 返回文件或目录的中的文件的用户申请的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录中用户申请的大小,返回值为0是有效 - * @return 成功返回0,失败返回-1 + * @brief Returns the user requested size of files in a file or directory + * @param fileName File name + * @param[out] fileSize The size requested by the user in the file or + * directory, with a return value of 0 being valid + * @return returns 0 for success, -1 for failure */ virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ virtual int GetFileSegments(const std::string& fileName, - std::vector* segments); + std::vector* segments); /** - * @brief 查询offset对应的chunk的id和所属的copyset - * @param fileName 文件名 - * @param offset 文件中的偏移 - * @param[out] chunkId chunkId,返回值为0时有效 - * @param[out] copyset chunk对应的copyset,是logicalPoolId和copysetId的pair - * @return 成功返回0,失败返回-1 + * @brief: Query the ID of the chunk corresponding to the offset and the + * copyset it belongs to + * @param fileName File name + * @param offset Offset in file + * @param[out] chunkId chunkId, valid when the return value is 0 + * @param[out] copyset The copyset corresponding to the chunk is the pair of + * logicalPoolId and copysetId + * @return returns 0 for success, -1 for failure */ virtual int QueryChunkCopyset(const std::string& fileName, uint64_t offset, - uint64_t* chunkId, - std::pair* copyset); + uint64_t* chunkId, + std::pair* copyset); /** * @brief clean recycle bin @@ -174,25 +183,24 @@ class NameSpaceToolCore { virtual int UpdateFileThrottle(const std::string& fileName, const std::string& throttleType, - const uint64_t limit, - const int64_t burst, + const uint64_t limit, const int64_t burst, const int64_t burstLength); virtual int ListPoolset(std::vector* poolsets); private: /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param fileInfo 文件的fileInfo - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param fileInfo The fileInfo of the file + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ - int GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, + int GetFileSegments(const std::string& fileName, const FileInfo& fileInfo, std::vector* segments); - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr client_; }; } // namespace tool diff --git a/src/tools/raft_log_tool.cpp b/src/tools/raft_log_tool.cpp index a4fb97e142..cbe40eb2b5 100644 --- a/src/tools/raft_log_tool.cpp +++ b/src/tools/raft_log_tool.cpp @@ -35,33 +35,31 @@ enum class CheckSumType { CHECKSUM_CRC32 = 1, }; -inline bool VerifyCheckSum(int type, - const char* data, size_t len, uint32_t value) { +inline bool VerifyCheckSum(int type, const char* data, size_t len, + uint32_t value) { CheckSumType checkSunType = static_cast(type); switch (checkSunType) { - case CheckSumType::CHECKSUM_MURMURHASH32: - return (value == braft::murmurhash32(data, len)); - case CheckSumType::CHECKSUM_CRC32: - return (value == braft::crc32(data, len)); - default: - std::cout << "Unknown checksum_type=" << type <Fstat(fd_, &stBuf) != 0) { - std::cout << "Fail to get the stat of " << fileName - << ", " << berror() << std::endl; + std::cout << "Fail to get the stat of " << fileName << ", " << berror() + << std::endl; localFS_->Close(fd_); return -1; } @@ -135,9 +133,7 @@ int SegmentParser::Init(const std::string& fileName) { return 0; } -void SegmentParser::UnInit() { - localFS_->Close(fd_); -} +void SegmentParser::UnInit() { localFS_->Close(fd_); } bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { if (off_ >= fileLen_) { @@ -147,12 +143,11 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { const ssize_t n = localFS_->Read(fd_, buf, off_, ENTRY_HEADER_SIZE); if (n != (ssize_t)ENTRY_HEADER_SIZE) { if (n < 0) { - std::cout << "read header from file, fd: " << fd_ << ", offset: " - << off_ << ", " << berror() << std::endl; + std::cout << "read header from file, fd: " << fd_ + << ", offset: " << off_ << ", " << berror() << std::endl; } else { std::cout << "Read size not match, header size: " - << ENTRY_HEADER_SIZE << ", read size: " - << n << std::endl; + << ENTRY_HEADER_SIZE << ", read size: " << n << std::endl; } return false; } @@ -162,19 +157,20 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { uint32_t data_len = 0; uint32_t data_checksum = 0; uint32_t header_checksum = 0; - butil::RawUnpacker(buf).unpack64((uint64_t&)term) - .unpack32(meta_field) - .unpack32(data_len) - .unpack32(data_checksum) - .unpack32(header_checksum); + butil::RawUnpacker(buf) + .unpack64((uint64_t&)term) + .unpack32(meta_field) + .unpack32(data_len) + .unpack32(data_checksum) + .unpack32(header_checksum); EntryHeader tmp; tmp.term = term; tmp.type = meta_field >> 24; tmp.checksum_type = (meta_field << 8) >> 24; tmp.data_len = data_len; tmp.data_checksum = data_checksum; - if (!VerifyCheckSum(tmp.checksum_type, - buf, ENTRY_HEADER_SIZE - 4, header_checksum)) { + if (!VerifyCheckSum(tmp.checksum_type, buf, ENTRY_HEADER_SIZE - 4, + header_checksum)) { std::cout << "Found corrupted header at offset=" << off_ << ", header=" << tmp; return false; @@ -189,30 +185,28 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { int RaftLogTool::ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex) { int match = 0; - int64_t lastIndex = 0; + int64_t lastIndex = 0; std::string name; - auto pos = fileName.find_last_of("/"); + auto pos = fileName.find_last_of("/"); if (pos == std::string::npos) { name = fileName; } else { name = fileName.substr(pos + 1); } - match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, - firstIndex, &lastIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, firstIndex, + &lastIndex); if (match == 2) { std::cout << "it is a closed segment, path: " << fileName << " first index: " << *firstIndex << " last index: " << lastIndex << std::endl; } else { - match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, - firstIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, firstIndex); if (match == 1) { - std::cout << "it is a opening segment, path: " - << fileName + std::cout << "it is a opening segment, path: " << fileName << " first index: " << *firstIndex << std::endl; } else { - std::cout << "filename = " << fileName << - " is not a raft segment pattern!" << std::endl; + std::cout << "filename = " << fileName + << " is not a raft segment pattern!" << std::endl; return -1; } } diff --git a/src/tools/raft_log_tool.h b/src/tools/raft_log_tool.h index d056608bb9..d445b9a280 100644 --- a/src/tools/raft_log_tool.h +++ b/src/tools/raft_log_tool.h @@ -23,14 +23,16 @@ #ifndef SRC_TOOLS_RAFT_LOG_TOOL_H_ #define SRC_TOOLS_RAFT_LOG_TOOL_H_ -#include #include #include #include +#include + #include #include #include #include + #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" @@ -49,48 +51,46 @@ struct EntryHeader { uint32_t data_len; uint32_t data_checksum; - bool operator== (const EntryHeader& rhs) const; + bool operator==(const EntryHeader& rhs) const; }; std::ostream& operator<<(std::ostream& os, const EntryHeader& h); class SegmentParser { public: - explicit SegmentParser(std::shared_ptr localFS) : - localFS_(localFS) {} + explicit SegmentParser(std::shared_ptr localFS) + : localFS_(localFS) {} /** - * @brief 初始化 - * @param fileName segmnet文件的文件名 - * @return 获取成功返回0,失败返回-1 + * @brief initialization + * @param fileName The file name of the segmnet file + * @return returns 0 if successful, -1 if unsuccessful */ virtual int Init(const std::string& fileName); /** - * @brief 反初始化 + * @brief deinitialization */ virtual void UnInit(); /** - * @brief 获取下一个EntryHeader - * @param[out] header log entry header - * @return 获取成功返回true,失败返回false + * @brief Get the next EntryHeader + * @param[out] header log entry header + * @return returns true for success, false for failure */ virtual bool GetNextEntryHeader(EntryHeader* header); /** - * @brief 判断读取是否成功完成 + * @brief Determine if the read was successfully completed */ - virtual bool SuccessfullyFinished() { - return off_ >= fileLen_; - } + virtual bool SuccessfullyFinished() { return off_ >= fileLen_; } private: - // 文件描述符 + // File Descriptor int fd_; - // 下一个Entry的偏移 + // Offset for the next Entry int64_t off_; - // 文件长度 + // File length int64_t fileLen_; std::shared_ptr localFS_; @@ -98,50 +98,52 @@ class SegmentParser { class RaftLogTool : public CurveTool { public: - explicit RaftLogTool(std::shared_ptr parser) : - parser_(parser) {} + explicit RaftLogTool(std::shared_ptr parser) + : parser_(parser) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印文件中所有raft log的头部信息 - * @param fileName raft log文件名 - * @return 成功返回0,否则返回-1 + * @brief Print the header information of all raft logs in the file + * @param fileName raft log file name + * @return successfully returns 0, otherwise returns -1 */ int PrintHeaders(const std::string& fileName); /** - * @brief 从文件解析出entry header - * @param fd 文件描述符 - * @param offset 文件中的偏移 - * @param[out] head entry头部信息,返回值为0时有效 - * @return 成功返回0,否则返回-1 + * @brief Parse the entry header from the file + * @param fd file descriptor + * @param offset Offset in file + * @param[out] head entry header information, valid when the return value is + * 0 + * @return successfully returns 0, otherwise returns -1 */ - int ParseEntryHeader(int fd, off_t offset, EntryHeader *head); + int ParseEntryHeader(int fd, off_t offset, EntryHeader* head); /** - * @brief 从文件名解析first index - * @param fileName raft log文件名 - * @param[out] firstIndex segment文件包含的log entry的第一个index - * @return 成功返回0,否则返回-1 + * @brief Parsing first index from file name + * @param fileName raft log file name + * @param[out] firstIndex The first index of the log entry contained in the + * segment file + * @return successfully returns 0, otherwise returns -1 */ int ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex); diff --git a/src/tools/schedule_tool.cpp b/src/tools/schedule_tool.cpp index 25cd976382..2370bdd6ca 100644 --- a/src/tools/schedule_tool.cpp +++ b/src/tools/schedule_tool.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/tools/schedule_tool.h" + #include + #include -#include "src/tools/schedule_tool.h" + #include "src/tools/curve_tool_define.h" DEFINE_uint32(logical_pool_id, 1, "logical pool"); DECLARE_string(mdsAddr); DEFINE_bool(scheduleAll, true, "schedule all logical pool or not"); -DEFINE_bool(scanEnable, true, "Enable(true)/Disable(false) scan " - "for specify logical pool"); +DEFINE_bool(scanEnable, true, + "Enable(true)/Disable(false) scan " + "for specify logical pool"); namespace curve { namespace tool { bool ScheduleTool::SupportCommand(const std::string& command) { - return command == kRapidLeaderSchedule || - command == kSetScanState; + return command == kRapidLeaderSchedule || command == kSetScanState; } void ScheduleTool::PrintHelp(const std::string& cmd) { @@ -50,31 +53,28 @@ void ScheduleTool::PrintHelp(const std::string& cmd) { } void ScheduleTool::PrintRapidLeaderScheduleHelp() { - std::cout << "Example :" << std::endl + std::cout + << "Example :" << std::endl << "curve_ops_tool " << kRapidLeaderSchedule << " -logical_pool_id=1 -scheduleAll=false [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool " << kRapidLeaderSchedule - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } void ScheduleTool::PrintSetScanStateHelp() { - std::cout - << "Example:" << std::endl - << " curve_ops_tool " << kSetScanState - << " -logical_pool_id=1 -scanEnable=true/false" - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + std::cout << "Example:" << std::endl + << " curve_ops_tool " << kSetScanState + << " -logical_pool_id=1 -scanEnable=true/false" + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } -int ScheduleTool::RunCommand(const std::string &cmd) { +int ScheduleTool::RunCommand(const std::string& cmd) { if (kRapidLeaderSchedule == cmd) { return DoRapidLeaderSchedule(); - } else if (cmd == kSetScanState) { + } else if (cmd == kSetScanState) { return DoSetScanState(); } std::cout << "Command not supported!" << std::endl; @@ -90,14 +90,14 @@ int ScheduleTool::DoSetScanState() { auto lpid = FLAGS_logical_pool_id; auto scanEnable = FLAGS_scanEnable; auto retCode = mdsClient_->SetLogicalPoolScanState(lpid, scanEnable); - std::cout << (scanEnable ? "Enable" : "Disable") - << " scan for logicalpool(" << lpid << ")" - << (retCode == 0 ? " success" : " fail") << std::endl; + std::cout << (scanEnable ? "Enable" : "Disable") << " scan for logicalpool(" + << lpid << ")" << (retCode == 0 ? " success" : " fail") + << std::endl; return retCode; } int ScheduleTool::DoRapidLeaderSchedule() { - if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { + if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { std::cout << "Init mds client fail!" << std::endl; return -1; } @@ -109,11 +109,11 @@ int ScheduleTool::DoRapidLeaderSchedule() { } int ScheduleTool::ScheduleOne(PoolIdType lpoolId) { - // 给mds发送rpc + // Send rpc to mds int res = mdsClient_->RapidLeaderSchedule(lpoolId); if (res != 0) { - std::cout << "RapidLeaderSchedule pool " << lpoolId - << " fail" << std::endl; + std::cout << "RapidLeaderSchedule pool " << lpoolId << " fail" + << std::endl; return -1; } return 0; diff --git a/src/tools/schedule_tool.h b/src/tools/schedule_tool.h index edc9bf44dc..094475bafc 100644 --- a/src/tools/schedule_tool.h +++ b/src/tools/schedule_tool.h @@ -25,8 +25,9 @@ #include #include -#include "src/tools/mds_client.h" + #include "src/tools/curve_tool.h" +#include "src/tools/mds_client.h" namespace curve { namespace tool { @@ -39,36 +40,37 @@ class ScheduleTool : public CurveTool { : mdsClient_(mdsClient) {} /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; private: /** - * @brief PrintRapidLeaderSchedule 打印rapid-leader-schdule的help信息 + * @brief PrintRapidLeaderSchedule Print help information for + * rapid-leader-schdule */ void PrintRapidLeaderScheduleHelp(); void PrintSetScanStateHelp(); /** - * @brief DoRapidLeaderSchedule 向mds发送rpc进行快速transfer leader + * @brief DoRapidLeaderSchedule sends rpc to mds for fast transfer leader */ int DoRapidLeaderSchedule(); diff --git a/src/tools/snapshot_check.h b/src/tools/snapshot_check.h index 87bf512758..0750cf5b50 100644 --- a/src/tools/snapshot_check.h +++ b/src/tools/snapshot_check.h @@ -25,60 +25,60 @@ #include #include + +#include #include #include #include -#include -#include "src/client/libcurve_file.h" #include "src/client/client_common.h" +#include "src/client/libcurve_file.h" #include "src/common/configuration.h" -#include "src/common/s3_adapter.h" #include "src/common/crc32.h" -#include "src/tools/snapshot_read.h" +#include "src/common/s3_adapter.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/snapshot_read.h" namespace curve { namespace tool { class SnapshotCheck : public CurveTool { public: SnapshotCheck(std::shared_ptr client, - std::shared_ptr snapshot) : - client_(client), snapshot_(snapshot), inited_(false) {} + std::shared_ptr snapshot) + : client_(client), snapshot_(snapshot), inited_(false) {} ~SnapshotCheck(); - /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 比较文件和快照的一致性 - * @return 成功返回0,失败返回-1 + * @brief Compare file and snapshot consistency + * @return returns 0 for success, -1 for failure */ int Check(); private: /** - * 初始化 + * Initialize */ int Init(); diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 2b8be3c739..847027aab3 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -48,7 +48,7 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < serverAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -57,7 +57,8 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != serverAddrVec_.size()) { std::cout << "snapshot clone server dummy port list must be correspond" - " as snapshot clone addr list" << std::endl; + " as snapshot clone addr list" + << std::endl; return -1; } @@ -76,23 +77,23 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::vector SnapshotCloneClient::GetActiveAddrs() { std::vector activeAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_->GetMetric(item.second, - kSnapshotCloneStatusMetricName, &status); + MetricRet ret = metricClient_->GetMetric( + item.second, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kSnapshotCloneStatusActive) { - // 如果是active状态,再访问一下服务端口 - MetricRet ret = metricClient_->GetMetric(item.first, - kSnapshotCloneStatusMetricName, &status); + // If it is in an active state, please visit the service port again + MetricRet ret = metricClient_->GetMetric( + item.first, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.first - << " fail" << std::endl; + std::cout << "Get status metric from " << item.first << " fail" + << std::endl; continue; } activeAddrs.emplace_back(item.first); @@ -102,12 +103,13 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { } void SnapshotCloneClient::GetOnlineStatus( - std::map* onlineStatus) { + std::map* onlineStatus) { onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -117,10 +119,9 @@ void SnapshotCloneClient::GetOnlineStatus( } int SnapshotCloneClient::GetListenAddrFromDummyPort( - const std::string& dummyAddr, - std::string* listenAddr) { - MetricRet res = metricClient_->GetConfValueFromMetric(dummyAddr, - kSnapshotCloneConfMetricName, listenAddr); + const std::string& dummyAddr, std::string* listenAddr) { + MetricRet res = metricClient_->GetConfValueFromMetric( + dummyAddr, kSnapshotCloneConfMetricName, listenAddr); if (res != MetricRet::kOK) { return -1; } diff --git a/src/tools/snapshot_clone_client.h b/src/tools/snapshot_clone_client.h index 295134bd50..711952686a 100644 --- a/src/tools/snapshot_clone_client.h +++ b/src/tools/snapshot_clone_client.h @@ -23,10 +23,10 @@ #ifndef SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ #define SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ -#include -#include #include +#include #include +#include #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" @@ -36,66 +36,69 @@ namespace tool { class SnapshotCloneClient { public: - explicit SnapshotCloneClient(std::shared_ptr metricClient) : - metricClient_(metricClient) {} + explicit SnapshotCloneClient(std::shared_ptr metricClient) + : metricClient_(metricClient) {} virtual ~SnapshotCloneClient() = default; /** - * @brief 初始化,从字符串解析出地址和dummy port - * @param serverAddr snapshot clone server的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有server用同样的dummy port,用字符串分隔有多个的话 - * 为每个server设置不同的dummy port - * @return - * success: 0 - * failed: -1 - * no snapshot server: 1 + * @brief initialization, parsing the address and dummy port from the string + * @param serverAddr Address of snapshot clone server, supporting multiple + * addresses separated by ',' + * @param dummyPort dummyPort list, if only one is entered + * All servers use the same dummy port, separated by strings if there + * are multiple Set different dummy ports for each server + * @return + * Success: 0 + * Failed: -1 + * No snapshot server: 1 * */ virtual int Init(const std::string& serverAddr, const std::string& dummyPort); /** - * @brief 获取当前服务的snapshot clone server的地址 + * @brief Get the address of the snapshot clone server for the current + * service */ virtual std::vector GetActiveAddrs(); /** - * @brief 获取snapshot clone server的在线状态 - * dummyserver在线且dummyserver记录的listen addr - * 与服务地址一致才认为在线 - * @param[out] onlineStatus 每个节点的在线状态 + * @brief Get the online status of the snapshot clone server + * dummyserver is online and the dummyserver records a listen addr + * Only when consistent with the service address is considered + * online + * @param[out] onlineStatus The online status of each node */ virtual void GetOnlineStatus(std::map* onlineStatus); virtual const std::map& GetDummyServerMap() - const { + const { return dummyServerMap_; } private: /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取server的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr 服务地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of the server through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr service address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); private: - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 保存server地址的vector + // Save the vector of the server address std::vector serverAddrVec_; - // 保存server地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the server address std::map dummyServerMap_; }; diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 4444f51fd2..bfb01015d9 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -20,1143 +20,1377 @@ * Author: charisu */ #include "src/tools/status_tool.h" + #include DEFINE_bool(offline, false, "if true, only list offline chunskervers"); -DEFINE_bool(unhealthy, false, "if true, only list chunkserver that unhealthy " - "ratio greater than 0"); -DEFINE_bool(checkHealth, true, "if true, it will check the health " - "state of chunkserver in chunkserver-list"); -DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " - "chunkservers with rpc in chunkserver-list"); -DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" - " include that in repo"); +DEFINE_bool(unhealthy, false, + "if true, only list chunkserver that unhealthy " + "ratio greater than 0"); +DEFINE_bool(checkHealth, true, + "if true, it will check the health " + "state of chunkserver in chunkserver-list"); +DEFINE_bool(checkCSAlive, false, + "if true, it will check the online state of " + "chunkservers with rpc in chunkserver-list"); +DEFINE_bool(listClientInRepo, true, + "if true, list-client will list all clients" + " include that in repo"); DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); DECLARE_string(mdsDummyPort); DECLARE_bool(detail); -const char* kProtocalCurve = "curve"; +const char *kProtocalCurve = "curve"; -namespace curve { -namespace tool { +namespace curve +{ + namespace tool + { -std::ostream& operator<<(std::ostream& os, - std::vector strs) { - for (uint32_t i = 0; i < strs.size(); ++i) { - if (i != 0) { - os << ", "; - } - os << strs[i]; - } - return os; -} - -std::string ToString(ServiceName name) { - static std::map serviceNameMap = - {{ServiceName::kMds, "mds"}, - {ServiceName::kEtcd, "etcd"}, - {ServiceName::kSnapshotCloneServer, - "snapshot-clone-server"}}; - return serviceNameMap[name]; -} - -int StatusTool::Init(const std::string& command) { - if (CommandNeedMds(command) && !mdsInited_) { - if (mdsClient_->Init(FLAGS_mdsAddr, FLAGS_mdsDummyPort) != 0) { - std::cout << "Init mdsClient failed!" << std::endl; - return -1; - } - if (copysetCheckCore_->Init(FLAGS_mdsAddr) != 0) { - std::cout << "Init copysetCheckCore failed!" << std::endl; - return -1; - } - mdsInited_ = true; - } - if (CommandNeedEtcd(command) && !etcdInited_) { - if (etcdClient_->Init(FLAGS_etcdAddr) != 0) { - std::cout << "Init etcdClient failed!" << std::endl; - return -1; - } - etcdInited_ = true; - } - if (CommandNeedSnapshotClone(command)) { - int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, - FLAGS_snapshotCloneDummyPort); - switch (snapshotRet) { - case 0: - // success - break; - case 1: - // no snapshot clone server - noSnapshotServer_ = true; - break; - default: - // -1 and other - std::cout << "Init snapshotClient failed!" << std::endl; - return -1; - } - } - return 0; -} - -bool StatusTool::CommandNeedEtcd(const std::string& command) { - return (command == kEtcdStatusCmd || command == kStatusCmd); -} - -bool StatusTool::CommandNeedMds(const std::string& command) { - return (command != kEtcdStatusCmd && command != kSnapshotCloneStatusCmd); -} - -bool StatusTool::CommandNeedSnapshotClone(const std::string& command) { - return (command == kSnapshotCloneStatusCmd || command == kStatusCmd); -} - -bool StatusTool::SupportCommand(const std::string& command) { - return (command == kSpaceCmd || command == kStatusCmd || - command == kChunkserverListCmd || - command == kChunkserverStatusCmd || command == kMdsStatusCmd || - command == kEtcdStatusCmd || command == kClientStatusCmd || - command == kClientListCmd || command == kSnapshotCloneStatusCmd || - command == kClusterStatusCmd || command == kServerListCmd || - command == kLogicalPoolList || command == kScanStatusCmd || - command == kFormatStatusCmd); -} - -void StatusTool::PrintHelp(const std::string& cmd) { - std::cout << "Example :" << std::endl; - std::cout << "curve_ops_tool " << cmd; - if (CommandNeedMds(cmd)) { - std::cout << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]"; - } - if (CommandNeedEtcd(cmd)) { - std::cout << " [-etcdAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]"; - } - if (CommandNeedSnapshotClone(cmd)) { - std::cout << " [-snapshotCloneAddr=127.0.0.1:5555]" - << " [-confPath=/etc/curve/tools.conf]"; - } - - if (cmd == kChunkserverListCmd) { - std::cout << " [-offline] [-unhealthy] [-checkHealth=false]" - << " [-confPath=/etc/curve/tools.conf]" - << " [-checkCSAlive]"; - } else if (cmd == kClientStatusCmd) { - std::cout << " [-detail] [-confPath=/etc/curve/tools.conf]"; - } else if (cmd == kClientListCmd) { - std::cout << " [-listClientInRepo=false]" - << " [-confPath=/etc/curve/tools.conf]"; - } else if (cmd == kScanStatusCmd) { - std::cout << " [-logicalPoolId=1] [-copysetId=1]" << std::endl; - } - - std::cout << std::endl; -} - -int StatusTool::SpaceCmd() { - SpaceInfo spaceInfo; - int res = GetSpaceInfo(&spaceInfo); - if (res != 0) { - std::cout << "GetSpaceInfo fail!" << std::endl; - return -1; - } - double physicalUsedRatio = 0; - if (spaceInfo.totalChunkSize != 0) { - physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / - spaceInfo.totalChunkSize; - } - - double logicalUsedRatio = 0; - double logicalLeftRatio = 0; - double canBeRecycledRatio = 0; - double createdFileRatio = 0; - if (spaceInfo.totalCapacity != 0) { - logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - logicalLeftRatio = static_cast( - spaceInfo.totalCapacity - spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - createdFileRatio = static_cast(spaceInfo.currentFileSize) / - spaceInfo.totalCapacity; - } - if (spaceInfo.allocatedSize != 0) { - canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / - spaceInfo.allocatedSize; - } - std:: cout.setf(std::ios::fixed); - std::cout << std::setprecision(2); - std::cout << "Space info:" << std::endl; - std::cout << "physical: total = " - << spaceInfo.totalChunkSize / mds::kGB << "GB" - << ", used = " << spaceInfo.usedChunkSize / mds::kGB - << "GB(" << physicalUsedRatio * 100 << "%), left = " - << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB - << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; - std::cout << "logical: total = " - << spaceInfo.totalCapacity / mds::kGB << "GB" - << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" - << "(" << logicalUsedRatio * 100 << "%, can be recycled = " - << spaceInfo.recycleAllocSize / mds::kGB << "GB(" - << canBeRecycledRatio * 100 << "%))" - << ", left = " - << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB - << "GB(" << logicalLeftRatio * 100 << "%)" - << ", created file size = " - << spaceInfo.currentFileSize / mds::kGB - << "GB(" << createdFileRatio * 100 << "%)" << std::endl; - - std::cout << "Every Logicalpool Space info:" << std::endl; - for (const auto &i : spaceInfo.lpoolspaceinfo) { - std::cout << "logicalPool: name = "<< i.second.poolName - << ", poolid = " << i.first - << ", total = "<< i.second.totalCapacity / mds::kGB << "GB" - << ", used = " << i.second.allocatedSize / mds::kGB << "GB" - << ", left = " << (i.second.totalCapacity - - i.second.allocatedSize) / mds::kGB - << "GB"<< std::endl; - } - return 0; -} - -int StatusTool::FormatStatusCmd() { - std::vector formatStatus; - int res = mdsClient_->ListChunkFormatStatus(&formatStatus); - if (res != 0) { - std::cout << "ListChunkserversInCluster fail!" << std::endl; - return -1; - } - for (auto stat : formatStatus) { - std::cout << "ip:" << stat.ip() << " port:" << stat.port() - << " id:" << stat.chunkserverid() - << " format percent:" << stat.formatpercent() << std::endl; - } - return 0; -} - -int StatusTool::ChunkServerListCmd() { - std::vector chunkservers; - int res = mdsClient_->ListChunkServersInCluster(&chunkservers); - if (res != 0) { - std::cout << "ListChunkserversInCluster fail!" << std::endl; - return -1; - } - - std::cout << "curve chunkserver list: " << std::endl; - uint64_t total = 0; - uint64_t online = 0; - uint64_t offline = 0; - uint64_t unstable = 0; - uint64_t pendding = 0; - uint64_t retired = 0; - uint64_t penddingCopyset = 0; - for (auto& chunkserver : chunkservers) { - auto csId = chunkserver.chunkserverid(); - std::vector copysets; - int ret = mdsClient_->GetCopySetsInChunkServer(csId, ©sets); - if (ret != 0) { - std::cout << "GetCopySetsInChunkServer fail, chunkserver id = " - << csId; - return -1; + std::ostream &operator<<(std::ostream &os, std::vector strs) + { + for (uint32_t i = 0; i < strs.size(); ++i) + { + if (i != 0) + { + os << ", "; + } + os << strs[i]; + } + return os; } - double unhealthyRatio = 0.0; - if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); - if (isOnline) { - chunkserver.set_onlinestate(OnlineState::ONLINE); - } else { - chunkserver.set_onlinestate(OnlineState::OFFLINE); - } + std::string ToString(ServiceName name) + { + static std::map serviceNameMap = { + {ServiceName::kMds, "mds"}, + {ServiceName::kEtcd, "etcd"}, + {ServiceName::kSnapshotCloneServer, "snapshot-clone-server"}}; + return serviceNameMap[name]; } - if (chunkserver.onlinestate() != OnlineState::ONLINE) { - if (chunkserver.onlinestate() == OnlineState::OFFLINE) { - offline++; - } - if (chunkserver.onlinestate() == OnlineState::UNSTABLE) { - unstable++; + int StatusTool::Init(const std::string &command) + { + if (CommandNeedMds(command) && !mdsInited_) + { + if (mdsClient_->Init(FLAGS_mdsAddr, FLAGS_mdsDummyPort) != 0) + { + std::cout << "Init mdsClient failed!" << std::endl; + return -1; + } + if (copysetCheckCore_->Init(FLAGS_mdsAddr) != 0) + { + std::cout << "Init copysetCheckCore failed!" << std::endl; + return -1; + } + mdsInited_ = true; } - unhealthyRatio = 1; - } else { - if (FLAGS_offline) { - continue; + if (CommandNeedEtcd(command) && !etcdInited_) + { + if (etcdClient_->Init(FLAGS_etcdAddr) != 0) + { + std::cout << "Init etcdClient failed!" << std::endl; + return -1; + } + etcdInited_ = true; } - if (FLAGS_checkHealth) { - copysetCheckCore_->CheckCopysetsOnChunkServer(csId); - const auto& statistics = - copysetCheckCore_->GetCopysetStatistics(); - unhealthyRatio = statistics.unhealthyRatio; - if (FLAGS_unhealthy && unhealthyRatio == 0) { - continue; + if (CommandNeedSnapshotClone(command)) + { + int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, + FLAGS_snapshotCloneDummyPort); + switch (snapshotRet) + { + case 0: + // success + break; + case 1: + // no snapshot clone server + noSnapshotServer_ = true; + break; + default: + // -1 and other + std::cout << "Init snapshotClient failed!" << std::endl; + return -1; } } - online++; - } - if (chunkserver.status() == ChunkServerStatus::PENDDING) { - pendding++; - penddingCopyset += copysets.size(); + return 0; } - if (chunkserver.status() == ChunkServerStatus::RETIRED) { - retired++; + + bool StatusTool::CommandNeedEtcd(const std::string &command) + { + return (command == kEtcdStatusCmd || command == kStatusCmd); } - total++; - std::cout << "chunkServerID = " << csId - << ", diskType = " << chunkserver.disktype() - << ", hostIP = " << chunkserver.hostip() - << ", port = " << chunkserver.port() - << ", rwStatus = " - << ChunkServerStatus_Name(chunkserver.status()) - << ", diskState = " - << DiskState_Name(chunkserver.diskstatus()) - << ", onlineState = " - << OnlineState_Name(chunkserver.onlinestate()) - << ", copysetNum = " << copysets.size() - << ", mountPoint = " << chunkserver.mountpoint() - << ", diskCapacity = " << chunkserver.diskcapacity() - / curve::mds::kGB << " GB" - << ", diskUsed = " << chunkserver.diskused() - / curve::mds::kGB << " GB"; - if (FLAGS_checkHealth) { - std::cout << ", unhealthyCopysetRatio = " - << unhealthyRatio * 100 << "%"; + + bool StatusTool::CommandNeedMds(const std::string &command) + { + return (command != kEtcdStatusCmd && command != kSnapshotCloneStatusCmd); } - if (chunkserver.has_externalip()) { - std::cout << ", externalIP = " << chunkserver.externalip(); + + bool StatusTool::CommandNeedSnapshotClone(const std::string &command) + { + return (command == kSnapshotCloneStatusCmd || command == kStatusCmd); } - std::cout << std::endl; - } - std::cout << "total: " << total << ", online: " << online; - if (!FLAGS_checkCSAlive) { - std::cout <<", unstable: " << unstable; - } - std::cout << ", offline: " << offline << std::endl; - - std::cout << "pendding: " << pendding - << ", penddingCopyset: " << penddingCopyset - << ", retired:" << retired << std::endl; - return 0; -} - -int StatusTool::ServerListCmd() { - std::vector servers; - int res = mdsClient_->ListServersInCluster(&servers); - if (res != 0) { - std::cout << "ListServersInCluster fail!" << std::endl; - return -1; - } - std::cout << "curve server list: " << std::endl; - uint64_t total = 0; - for (auto& server : servers) { - total++; - std::cout << "serverID = " << server.serverid() - << ", hostName = " << server.hostname() - << ", internalIP = " << server.internalip() - << ", internalPort = " << server.internalport() - << ", externalIp = " << server.externalip() - << ", externalPort = " << server.externalport() - << ", zoneID = " << server.zoneid() - << ", poolID = " << server.physicalpoolid() << std::endl; - } - std::cout << "total: " << total << std::endl; - return 0; -} - -int StatusTool::LogicalPoolListCmd() { - std::vector lgPools; - int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); - if (res != 0) { - std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; - return -1; - } - std::cout << "curve logical pool list: " << std::endl; - uint64_t total = 0; - uint64_t allocSize; - AllocMap allocMap; - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &allocSize, &allocMap); - if (res != 0) { - std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; - return -1; - } - for (auto& lgPool : lgPools) { - total++; - std::string poolName = lgPool.logicalpoolname(); - uint64_t totalSize; - std::string metricName = GetPoolLogicalCapacityName(poolName); - res = mdsClient_->GetMetric(metricName, &totalSize); - if (res != 0) { - std::cout << "Get logical capacity from mds fail!" << std::endl; - return -1; + + bool StatusTool::SupportCommand(const std::string &command) + { + return (command == kSpaceCmd || command == kStatusCmd || + command == kChunkserverListCmd || + command == kChunkserverStatusCmd || command == kMdsStatusCmd || + command == kEtcdStatusCmd || command == kClientStatusCmd || + command == kClientListCmd || command == kSnapshotCloneStatusCmd || + command == kClusterStatusCmd || command == kServerListCmd || + command == kLogicalPoolList || command == kScanStatusCmd || + command == kFormatStatusCmd); } - uint64_t usedSize; - metricName = GetPoolLogicalAllocName(poolName); - res = mdsClient_->GetMetric(metricName, &usedSize); - if (res != 0) { - std::cout << "Get logical alloc size from mds fail!" << std::endl; - return -1; + + void StatusTool::PrintHelp(const std::string &cmd) + { + std::cout << "Example :" << std::endl; + std::cout << "curve_ops_tool " << cmd; + if (CommandNeedMds(cmd)) + { + std::cout << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]"; + } + if (CommandNeedEtcd(cmd)) + { + std::cout << " [-etcdAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]"; + } + if (CommandNeedSnapshotClone(cmd)) + { + std::cout << " [-snapshotCloneAddr=127.0.0.1:5555]" + << " [-confPath=/etc/curve/tools.conf]"; + } + + if (cmd == kChunkserverListCmd) + { + std::cout << " [-offline] [-unhealthy] [-checkHealth=false]" + << " [-confPath=/etc/curve/tools.conf]" + << " [-checkCSAlive]"; + } + else if (cmd == kClientStatusCmd) + { + std::cout << " [-detail] [-confPath=/etc/curve/tools.conf]"; + } + else if (cmd == kClientListCmd) + { + std::cout << " [-listClientInRepo=false]" + << " [-confPath=/etc/curve/tools.conf]"; + } + else if (cmd == kScanStatusCmd) + { + std::cout << " [-logicalPoolId=1] [-copysetId=1]" << std::endl; + } + + std::cout << std::endl; } - double usedRatio = 0; - if (total != 0) { - usedRatio = static_cast(usedSize) / totalSize; + + int StatusTool::SpaceCmd() + { + SpaceInfo spaceInfo; + int res = GetSpaceInfo(&spaceInfo); + if (res != 0) + { + std::cout << "GetSpaceInfo fail!" << std::endl; + return -1; + } + double physicalUsedRatio = 0; + if (spaceInfo.totalChunkSize != 0) + { + physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / + spaceInfo.totalChunkSize; + } + + double logicalUsedRatio = 0; + double logicalLeftRatio = 0; + double canBeRecycledRatio = 0; + double createdFileRatio = 0; + if (spaceInfo.totalCapacity != 0) + { + logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; + logicalLeftRatio = static_cast(spaceInfo.totalCapacity - + spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; + createdFileRatio = static_cast(spaceInfo.currentFileSize) / + spaceInfo.totalCapacity; + } + if (spaceInfo.allocatedSize != 0) + { + canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / + spaceInfo.allocatedSize; + } + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "Space info:" << std::endl; + std::cout << "physical: total = " << spaceInfo.totalChunkSize / mds::kGB + << "GB" + << ", used = " << spaceInfo.usedChunkSize / mds::kGB << "GB(" + << physicalUsedRatio * 100 << "%), left = " + << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB + << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; + std::cout << "logical: total = " << spaceInfo.totalCapacity / mds::kGB + << "GB" + << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" + << "(" << logicalUsedRatio * 100 << "%, can be recycled = " + << spaceInfo.recycleAllocSize / mds::kGB << "GB(" + << canBeRecycledRatio * 100 << "%))" + << ", left = " + << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB + << "GB(" << logicalLeftRatio * 100 << "%)" + << ", created file size = " + << spaceInfo.currentFileSize / mds::kGB << "GB(" + << createdFileRatio * 100 << "%)" << std::endl; + + std::cout << "Every Logicalpool Space info:" << std::endl; + for (const auto &i : spaceInfo.lpoolspaceinfo) + { + std::cout << "logicalPool: name = " << i.second.poolName + << ", poolid = " << i.first + << ", total = " << i.second.totalCapacity / mds::kGB << "GB" + << ", used = " << i.second.allocatedSize / mds::kGB << "GB" + << ", left = " + << (i.second.totalCapacity - i.second.allocatedSize) / + mds::kGB + << "GB" << std::endl; + } + return 0; } - uint64_t canBeRecycle = allocMap[lgPool.logicalpoolid()]; - double recycleRatio = 0; - if (usedSize != 0) { - recycleRatio = static_cast(canBeRecycle) / usedSize; + + int StatusTool::FormatStatusCmd() + { + std::vector formatStatus; + int res = mdsClient_->ListChunkFormatStatus(&formatStatus); + if (res != 0) + { + std::cout << "ListChunkserversInCluster fail!" << std::endl; + return -1; + } + for (auto stat : formatStatus) + { + std::cout << "ip:" << stat.ip() << " port:" << stat.port() + << " id:" << stat.chunkserverid() + << " format percent:" << stat.formatpercent() << std::endl; + } + return 0; } - std::cout << "id = " << lgPool.logicalpoolid() - << ", name = " << lgPool.logicalpoolname() - << ", physicalPoolID = " << lgPool.physicalpoolid() - << ", type = " - << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) - << ", scanEnable = " << lgPool.scanenable() - << ", allocateStatus = " - << curve::mds::topology:: - AllocateStatus_Name(lgPool.allocatestatus()) - << ", total space = " << totalSize / curve::mds::kGB << "GB" - << ", used space = " << usedSize / curve::mds::kGB << "GB" - << "(" << usedRatio * 100 << "%, can be recycled = " - << canBeRecycle / curve::mds::kGB << "GB" - << "(" << recycleRatio * 100 << "%))" << ", left space = " - << (totalSize - usedSize) / curve::mds::kGB - << "GB(" << (1 - usedRatio) * 100 << "%)" << std::endl; - } - std::cout << "total: " << total << std::endl; - return 0; -} - -int StatusTool::StatusCmd() { - int res = PrintClusterStatus(); - bool success = true; - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintClientStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintMdsStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintEtcdStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintSnapshotCloneStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintChunkserverStatus(); - if (res != 0) { - success = false; - } - if (success) { - return 0; - } else { - return -1; - } -} - -int StatusTool::ChunkServerStatusCmd() { - return PrintChunkserverStatus(false); -} - -int StatusTool::PrintClusterStatus() { - int ret = 0; - std::cout << "Cluster status:" << std::endl; - bool healthy = IsClusterHeatlhy(); - if (healthy) { - std::cout << "cluster is healthy" << std::endl; - } else { - std::cout << "cluster is not healthy" << std::endl; - ret = -1; - } - const auto& statistics = copysetCheckCore_->GetCopysetStatistics(); - std::cout << "total copysets: " << statistics.totalNum - << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; - std::vector phyPools; - std::vector lgPools; - int res = GetPoolsInCluster(&phyPools, &lgPools); - if (res != 0) { - std::cout << "GetPoolsInCluster fail!" << std::endl; - ret = -1; - } - std::cout << "physical pool number: " << phyPools.size() - << ", logical pool number: " << lgPools.size() << std::endl; - res = SpaceCmd(); - if (res != 0) { - ret = -1; - } - return ret; -} - -bool StatusTool::IsClusterHeatlhy() { - bool ret = true; - // 1、检查copyset健康状态 - int res = copysetCheckCore_->CheckCopysetsInCluster(); - if (res != 0) { - std::cout << "Copysets are not healthy!" << std::endl; - ret = false; - } - - // 2、检查mds状态 - if (!CheckServiceHealthy(ServiceName::kMds)) { - ret = false; - } - - // 3、检查etcd在线状态 - if (!CheckServiceHealthy(ServiceName::kEtcd)) { - ret = false; - } - - // 4、检查snapshot clone server状态 - if (!noSnapshotServer_ && - !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { - ret = false; - } - - return ret; -} - -bool StatusTool::CheckServiceHealthy(const ServiceName& name) { - std::vector leaderVec; - std::map onlineStatus; - switch (name) { - case ServiceName::kMds: { - leaderVec = mdsClient_->GetCurrentMds(); - mdsClient_->GetMdsOnlineStatus(&onlineStatus); - break; + + int StatusTool::ChunkServerListCmd() + { + std::vector chunkservers; + int res = mdsClient_->ListChunkServersInCluster(&chunkservers); + if (res != 0) + { + std::cout << "ListChunkserversInCluster fail!" << std::endl; + return -1; + } + + std::cout << "curve chunkserver list: " << std::endl; + uint64_t total = 0; + uint64_t online = 0; + uint64_t offline = 0; + uint64_t unstable = 0; + uint64_t pendding = 0; + uint64_t retired = 0; + uint64_t penddingCopyset = 0; + for (auto &chunkserver : chunkservers) + { + auto csId = chunkserver.chunkserverid(); + std::vector copysets; + int ret = mdsClient_->GetCopySetsInChunkServer(csId, ©sets); + if (ret != 0) + { + std::cout << "GetCopySetsInChunkServer fail, chunkserver id = " + << csId; + return -1; + } + + double unhealthyRatio = 0.0; + if (FLAGS_checkCSAlive) + { + // Send RPC to reset online status + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); + if (isOnline) + { + chunkserver.set_onlinestate(OnlineState::ONLINE); + } + else + { + chunkserver.set_onlinestate(OnlineState::OFFLINE); + } + } + if (chunkserver.onlinestate() != OnlineState::ONLINE) + { + if (chunkserver.onlinestate() == OnlineState::OFFLINE) + { + offline++; + } + + if (chunkserver.onlinestate() == OnlineState::UNSTABLE) + { + unstable++; + } + unhealthyRatio = 1; + } + else + { + if (FLAGS_offline) + { + continue; + } + if (FLAGS_checkHealth) + { + copysetCheckCore_->CheckCopysetsOnChunkServer(csId); + const auto &statistics = + copysetCheckCore_->GetCopysetStatistics(); + unhealthyRatio = statistics.unhealthyRatio; + if (FLAGS_unhealthy && unhealthyRatio == 0) + { + continue; + } + } + online++; + } + if (chunkserver.status() == ChunkServerStatus::PENDDING) + { + pendding++; + penddingCopyset += copysets.size(); + } + if (chunkserver.status() == ChunkServerStatus::RETIRED) + { + retired++; + } + total++; + std::cout << "chunkServerID = " << csId + << ", diskType = " << chunkserver.disktype() + << ", hostIP = " << chunkserver.hostip() + << ", port = " << chunkserver.port() << ", rwStatus = " + << ChunkServerStatus_Name(chunkserver.status()) + << ", diskState = " + << DiskState_Name(chunkserver.diskstatus()) + << ", onlineState = " + << OnlineState_Name(chunkserver.onlinestate()) + << ", copysetNum = " << copysets.size() + << ", mountPoint = " << chunkserver.mountpoint() + << ", diskCapacity = " + << chunkserver.diskcapacity() / curve::mds::kGB << " GB" + << ", diskUsed = " << chunkserver.diskused() / curve::mds::kGB + << " GB"; + if (FLAGS_checkHealth) + { + std::cout << ", unhealthyCopysetRatio = " << unhealthyRatio * 100 + << "%"; + } + if (chunkserver.has_externalip()) + { + std::cout << ", externalIP = " << chunkserver.externalip(); + } + std::cout << std::endl; + } + std::cout << "total: " << total << ", online: " << online; + if (!FLAGS_checkCSAlive) + { + std::cout << ", unstable: " << unstable; + } + std::cout << ", offline: " << offline << std::endl; + + std::cout << "pendding: " << pendding + << ", penddingCopyset: " << penddingCopyset + << ", retired:" << retired << std::endl; + return 0; } - case ServiceName::kEtcd: { - int res = etcdClient_->GetEtcdClusterStatus(&leaderVec, - &onlineStatus); - if (res != 0) { - std:: cout << "GetEtcdClusterStatus fail!" << std::endl; - return false; + + int StatusTool::ServerListCmd() + { + std::vector servers; + int res = mdsClient_->ListServersInCluster(&servers); + if (res != 0) + { + std::cout << "ListServersInCluster fail!" << std::endl; + return -1; } - break; + std::cout << "curve server list: " << std::endl; + uint64_t total = 0; + for (auto &server : servers) + { + total++; + std::cout << "serverID = " << server.serverid() + << ", hostName = " << server.hostname() + << ", internalIP = " << server.internalip() + << ", internalPort = " << server.internalport() + << ", externalIp = " << server.externalip() + << ", externalPort = " << server.externalport() + << ", zoneID = " << server.zoneid() + << ", poolID = " << server.physicalpoolid() << std::endl; + } + std::cout << "total: " << total << std::endl; + return 0; } - case ServiceName::kSnapshotCloneServer: { - leaderVec = snapshotClient_->GetActiveAddrs(); - snapshotClient_->GetOnlineStatus(&onlineStatus); - break; + + int StatusTool::LogicalPoolListCmd() + { + std::vector lgPools; + int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); + if (res != 0) + { + std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; + return -1; + } + std::cout << "curve logical pool list: " << std::endl; + uint64_t total = 0; + uint64_t allocSize; + AllocMap allocMap; + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &allocSize, + &allocMap); + if (res != 0) + { + std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; + return -1; + } + for (auto &lgPool : lgPools) + { + total++; + std::string poolName = lgPool.logicalpoolname(); + uint64_t totalSize; + std::string metricName = GetPoolLogicalCapacityName(poolName); + res = mdsClient_->GetMetric(metricName, &totalSize); + if (res != 0) + { + std::cout << "Get logical capacity from mds fail!" << std::endl; + return -1; + } + uint64_t usedSize; + metricName = GetPoolLogicalAllocName(poolName); + res = mdsClient_->GetMetric(metricName, &usedSize); + if (res != 0) + { + std::cout << "Get logical alloc size from mds fail!" << std::endl; + return -1; + } + double usedRatio = 0; + if (total != 0) + { + usedRatio = static_cast(usedSize) / totalSize; + } + uint64_t canBeRecycle = allocMap[lgPool.logicalpoolid()]; + double recycleRatio = 0; + if (usedSize != 0) + { + recycleRatio = static_cast(canBeRecycle) / usedSize; + } + std::cout << "id = " << lgPool.logicalpoolid() + << ", name = " << lgPool.logicalpoolname() + << ", physicalPoolID = " << lgPool.physicalpoolid() + << ", type = " + << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) + << ", scanEnable = " << lgPool.scanenable() + << ", allocateStatus = " + << curve::mds::topology::AllocateStatus_Name( + lgPool.allocatestatus()) + << ", total space = " << totalSize / curve::mds::kGB << "GB" + << ", used space = " << usedSize / curve::mds::kGB << "GB" + << "(" << usedRatio * 100 + << "%, can be recycled = " << canBeRecycle / curve::mds::kGB + << "GB" + << "(" << recycleRatio * 100 << "%))" + << ", left space = " + << (totalSize - usedSize) / curve::mds::kGB << "GB(" + << (1 - usedRatio) * 100 << "%)" << std::endl; + } + std::cout << "total: " << total << std::endl; + return 0; } - default: { - std::cout << "Unknown service" << std::endl; - return false; + + int StatusTool::StatusCmd() + { + int res = PrintClusterStatus(); + bool success = true; + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintClientStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintMdsStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintEtcdStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintSnapshotCloneStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintChunkserverStatus(); + if (res != 0) + { + success = false; + } + if (success) + { + return 0; + } + else + { + return -1; + } } - } - bool ret = true; - if (leaderVec.empty()) { - std::cout << "No " << ToString(name) << " is active" << std::endl; - ret = false; - } else if (leaderVec.size() != 1) { - std::cout << "More than one " << ToString(name) << " is active" - << std::endl; - ret = false; - } - for (const auto& item : onlineStatus) { - if (!item.second) { - std::cout << ToString(name) << " " << item.first << " is offline" + + int StatusTool::ChunkServerStatusCmd() { return PrintChunkserverStatus(false); } + + int StatusTool::PrintClusterStatus() + { + int ret = 0; + std::cout << "Cluster status:" << std::endl; + bool healthy = IsClusterHeatlhy(); + if (healthy) + { + std::cout << "cluster is healthy" << std::endl; + } + else + { + std::cout << "cluster is not healthy" << std::endl; + ret = -1; + } + const auto &statistics = copysetCheckCore_->GetCopysetStatistics(); + std::cout << "total copysets: " << statistics.totalNum + << ", unhealthy copysets: " << statistics.unhealthyNum + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" << std::endl; - ret = false; - } - } - return ret; -} - -void StatusTool::PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus) { - std::vector online; - std::vector offline; - for (const auto& item : onlineStatus) { - if (item.second) { - online.emplace_back(item.first); - } else { - offline.emplace_back(item.first); + std::vector phyPools; + std::vector lgPools; + int res = GetPoolsInCluster(&phyPools, &lgPools); + if (res != 0) + { + std::cout << "GetPoolsInCluster fail!" << std::endl; + ret = -1; + } + std::cout << "physical pool number: " << phyPools.size() + << ", logical pool number: " << lgPools.size() << std::endl; + res = SpaceCmd(); + if (res != 0) + { + ret = -1; + } + return ret; } - } - std::cout << "online " << name << " list: "; - for (uint64_t i = 0; i < online.size(); ++i) { - if (i != 0) { - std::cout << ", "; + + bool StatusTool::IsClusterHeatlhy() + { + bool ret = true; + // 1. Check the health status of copyset + int res = copysetCheckCore_->CheckCopysetsInCluster(); + if (res != 0) + { + std::cout << "Copysets are not healthy!" << std::endl; + ret = false; + } + + // 2. Check the mds status + if (!CheckServiceHealthy(ServiceName::kMds)) + { + ret = false; + } + + // 3. Check the online status of ETCD + if (!CheckServiceHealthy(ServiceName::kEtcd)) + { + ret = false; + } + + // 4. Check the status of the snapshot clone server + if (!noSnapshotServer_ && + !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) + { + ret = false; + } + + return ret; } - std::cout << online[i]; - } - std::cout << std::endl; - - std::cout << "offline " << name << " list: "; - for (uint64_t i = 0; i < offline.size(); ++i) { - if (i != 0) { - std::cout << ", "; + + bool StatusTool::CheckServiceHealthy(const ServiceName &name) + { + std::vector leaderVec; + std::map onlineStatus; + switch (name) + { + case ServiceName::kMds: + { + leaderVec = mdsClient_->GetCurrentMds(); + mdsClient_->GetMdsOnlineStatus(&onlineStatus); + break; + } + case ServiceName::kEtcd: + { + int res = + etcdClient_->GetEtcdClusterStatus(&leaderVec, &onlineStatus); + if (res != 0) + { + std::cout << "GetEtcdClusterStatus fail!" << std::endl; + return false; + } + break; + } + case ServiceName::kSnapshotCloneServer: + { + leaderVec = snapshotClient_->GetActiveAddrs(); + snapshotClient_->GetOnlineStatus(&onlineStatus); + break; + } + default: + { + std::cout << "Unknown service" << std::endl; + return false; + } + } + bool ret = true; + if (leaderVec.empty()) + { + std::cout << "No " << ToString(name) << " is active" << std::endl; + ret = false; + } + else if (leaderVec.size() != 1) + { + std::cout << "More than one " << ToString(name) << " is active" + << std::endl; + ret = false; + } + for (const auto &item : onlineStatus) + { + if (!item.second) + { + std::cout << ToString(name) << " " << item.first << " is offline" + << std::endl; + ret = false; + } + } + return ret; } - std::cout << offline[i]; - } - std::cout << std::endl; -} - -int StatusTool::PrintMdsStatus() { - std::cout << "MDS status:" << std::endl; - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckMdsVersion(&version, &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckMdsVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; + + void StatusTool::PrintOnlineStatus( + const std::string &name, const std::map &onlineStatus) + { + std::vector online; + std::vector offline; + for (const auto &item : onlineStatus) + { + if (item.second) + { + online.emplace_back(item.first); + } + else + { + offline.emplace_back(item.first); + } + } + std::cout << "online " << name << " list: "; + for (uint64_t i = 0; i < online.size(); ++i) + { + if (i != 0) + { + std::cout << ", "; + } + std::cout << online[i]; + } + std::cout << std::endl; + + std::cout << "offline " << name << " list: "; + for (uint64_t i = 0; i < offline.size(); ++i) + { + if (i != 0) + { + std::cout << ", "; + } + std::cout << offline[i]; + } + std::cout << std::endl; } - } - std::vector mdsAddrs = mdsClient_->GetCurrentMds(); - std::cout << "current MDS: " << mdsAddrs << std::endl; - std::map onlineStatus; - mdsClient_->GetMdsOnlineStatus(&onlineStatus); - if (res != 0) { - std::cout << "GetMdsOnlineStatus fail!" << std::endl; - ret = -1; - } else { - PrintOnlineStatus("mds", onlineStatus); - } - return ret; -} - -int StatusTool::PrintEtcdStatus() { - std::cout << "Etcd status:" << std::endl; - std::string version; - std::vector failedList; - int res = etcdClient_->GetAndCheckEtcdVersion(&version, &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckEtcdVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - VersionTool::PrintFailedList(failedList); - ret = -1; + + int StatusTool::PrintMdsStatus() + { + std::cout << "MDS status:" << std::endl; + std::string version; + std::vector failedList; + int res = versionTool_->GetAndCheckMdsVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckMdsVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); + ret = -1; + } + } + std::vector mdsAddrs = mdsClient_->GetCurrentMds(); + std::cout << "current MDS: " << mdsAddrs << std::endl; + std::map onlineStatus; + mdsClient_->GetMdsOnlineStatus(&onlineStatus); + if (res != 0) + { + std::cout << "GetMdsOnlineStatus fail!" << std::endl; + ret = -1; + } + else + { + PrintOnlineStatus("mds", onlineStatus); + } + return ret; } - } - std::vector leaderAddrVec; - std::map onlineStatus; - res = etcdClient_->GetEtcdClusterStatus(&leaderAddrVec, &onlineStatus); - if (res != 0) { - std::cout << "GetEtcdClusterStatus fail!" << std::endl; - return -1; - } - std::cout << "current etcd: " << leaderAddrVec << std::endl; - PrintOnlineStatus("etcd", onlineStatus); - return ret; -} - -int StatusTool::PrintSnapshotCloneStatus() { - std::cout << "SnapshotCloneServer status:" << std::endl; - if (noSnapshotServer_) { - std::cout << "No SnapshotCloneServer" << std::endl; - return 0; - } - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckSnapshotCloneVersion(&version, - &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; + + int StatusTool::PrintEtcdStatus() + { + std::cout << "Etcd status:" << std::endl; + std::string version; + std::vector failedList; + int res = etcdClient_->GetAndCheckEtcdVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckEtcdVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + VersionTool::PrintFailedList(failedList); + ret = -1; + } + } + std::vector leaderAddrVec; + std::map onlineStatus; + res = etcdClient_->GetEtcdClusterStatus(&leaderAddrVec, &onlineStatus); + if (res != 0) + { + std::cout << "GetEtcdClusterStatus fail!" << std::endl; + return -1; + } + std::cout << "current etcd: " << leaderAddrVec << std::endl; + PrintOnlineStatus("etcd", onlineStatus); + return ret; } - } - std::vector activeAddrs = snapshotClient_->GetActiveAddrs(); - std::map onlineStatus; - snapshotClient_->GetOnlineStatus(&onlineStatus); - std::cout << "current snapshot-clone-server: " << activeAddrs << std::endl; - PrintOnlineStatus("snapshot-clone-server", onlineStatus); - return ret; -} - -int StatusTool::PrintClientStatus() { - std::cout << "Client status: " << std::endl; - ClientVersionMapType versionMap; - int res = versionTool_->GetClientVersion(&versionMap); - if (res != 0) { - std::cout << "GetClientVersion fail" << std::endl; - return -1; - } - for (const auto& item : versionMap) { - std::cout << item.first << ": "; - bool first = true; - for (const auto& item2 : item.second) { - if (!first) { - std::cout << ", "; - } - std::cout << "version-" << item2.first << ": " - << item2.second.size(); - first = false; + + int StatusTool::PrintSnapshotCloneStatus() + { + std::cout << "SnapshotCloneServer status:" << std::endl; + if (noSnapshotServer_) + { + std::cout << "No SnapshotCloneServer" << std::endl; + return 0; + } + std::string version; + std::vector failedList; + int res = + versionTool_->GetAndCheckSnapshotCloneVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); + ret = -1; + } + } + std::vector activeAddrs = snapshotClient_->GetActiveAddrs(); + std::map onlineStatus; + snapshotClient_->GetOnlineStatus(&onlineStatus); + std::cout << "current snapshot-clone-server: " << activeAddrs << std::endl; + PrintOnlineStatus("snapshot-clone-server", onlineStatus); + return ret; } - std::cout << std::endl; - if (FLAGS_detail) { - std::cout << "version map: " << std::endl; - versionTool_->PrintVersionMap(item.second); + + int StatusTool::PrintClientStatus() + { + std::cout << "Client status: " << std::endl; + ClientVersionMapType versionMap; + int res = versionTool_->GetClientVersion(&versionMap); + if (res != 0) + { + std::cout << "GetClientVersion fail" << std::endl; + return -1; + } + for (const auto &item : versionMap) + { + std::cout << item.first << ": "; + bool first = true; + for (const auto &item2 : item.second) + { + if (!first) + { + std::cout << ", "; + } + std::cout << "version-" << item2.first << ": " + << item2.second.size(); + first = false; + } + std::cout << std::endl; + if (FLAGS_detail) + { + std::cout << "version map: " << std::endl; + versionTool_->PrintVersionMap(item.second); + } + } + return 0; } - } - return 0; -} - -int StatusTool::ClientListCmd() { - std::vector clientAddrs; - int res = mdsClient_->ListClient(&clientAddrs, FLAGS_listClientInRepo); - if (res != 0) { - std::cout << "ListClient from mds fail!" << std::endl; - return -1; - } - for (const auto& addr : clientAddrs) { - std::cout << addr << std::endl; - } - return 0; -} - -int StatusTool::ScanStatusCmd() { - if (FLAGS_logicalPoolId != 0 && FLAGS_copysetId != 0) { - CopysetInfo copysetInfo; - auto lpid = FLAGS_logicalPoolId; - auto copysetId = FLAGS_copysetId; - if (mdsClient_->GetCopyset(lpid, copysetId, ©setInfo) != 0) { - std::cout << "GetCopyset fail!" << std::endl; - return -1; + + int StatusTool::ClientListCmd() + { + std::vector clientAddrs; + int res = mdsClient_->ListClient(&clientAddrs, FLAGS_listClientInRepo); + if (res != 0) + { + std::cout << "ListClient from mds fail!" << std::endl; + return -1; + } + for (const auto &addr : clientAddrs) + { + std::cout << addr << std::endl; + } + return 0; } - std::cout - << "Scan status for copyset(" - << lpid << "," << copysetId << "):" << std::endl - << " scaning=" << copysetInfo.scaning() - << " lastScanSec=" << copysetInfo.lastscansec() - << " lastScanConsistent=" << copysetInfo.lastscanconsistent() - << std::endl; - - return 0; - } - - std::vector copysetInfos; - if (mdsClient_->GetCopySetsInCluster(©setInfos, true) != 0) { - std::cout << "GetCopySetsInCluster fail!" << std::endl; - return -1; - } - - int count = 0; - std::cout << "Scaning copysets: " << copysetInfos.size(); - for (auto& copysetInfo : copysetInfos) { - if (count % 5 == 0) { + int StatusTool::ScanStatusCmd() + { + if (FLAGS_logicalPoolId != 0 && FLAGS_copysetId != 0) + { + CopysetInfo copysetInfo; + auto lpid = FLAGS_logicalPoolId; + auto copysetId = FLAGS_copysetId; + if (mdsClient_->GetCopyset(lpid, copysetId, ©setInfo) != 0) + { + std::cout << "GetCopyset fail!" << std::endl; + return -1; + } + + std::cout << "Scan status for copyset(" << lpid << "," << copysetId + << "):" << std::endl + << " scaning=" << copysetInfo.scaning() + << " lastScanSec=" << copysetInfo.lastscansec() + << " lastScanConsistent=" << copysetInfo.lastscanconsistent() + << std::endl; + + return 0; + } + + std::vector copysetInfos; + if (mdsClient_->GetCopySetsInCluster(©setInfos, true) != 0) + { + std::cout << "GetCopySetsInCluster fail!" << std::endl; + return -1; + } + + int count = 0; + std::cout << "Scaning copysets: " << copysetInfos.size(); + for (auto ©setInfo : copysetInfos) + { + if (count % 5 == 0) + { + std::cout << std::endl; + } + std::cout << " (" << copysetInfo.logicalpoolid() << "," + << copysetInfo.copysetid() << ")"; + count++; + } + std::cout << std::endl; + + return 0; } - std::cout << " (" << copysetInfo.logicalpoolid() - << "," << copysetInfo.copysetid() << ")"; - count++; - } - - std::cout << std::endl; - - return 0; -} - -int CheckUseWalPool(const std::map> - &poolChunkservers, - bool *useWalPool, - bool *useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { - int ret = 0; - if (!poolChunkservers.empty()) { - ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - // check whether use chunkfilepool - std::string metricValue; - std::string metricName = GetUseWalPoolName(csAddr); - MetricRet res = metricClient->GetConfValueFromMetric(csAddr, - metricName, &metricValue); - if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool conf " - << csAddr << " fail!" << std::endl; - ret = -1; - } - std::string raftLogProtocol = - curve::common::UriParser ::GetProtocolFromUri(metricValue); - *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; - - // check whether use chunkfilepool as walpool from chunkserver conf metric // NOLINT - metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); - res = metricClient->GetConfValueFromMetric(csAddr, metricName, - &metricValue); - if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool as walpool conf " - << csAddr << " fail!" << std::endl; - ret = -1; - } - *useChunkFilePoolAsWalPool = StringToBool(metricValue, - useChunkFilePoolAsWalPool); - } - return ret; -} - -int PrintChunkserverOnlineStatus( - const std::map> &poolChunkservers, - std::shared_ptr copysetCheckCore, - std::shared_ptr mdsClient) { - int ret = 0; - uint64_t total = 0; - uint64_t online = 0; - uint64_t offline = 0; - std::vector offlineCs; - for (const auto& poolChunkserver : poolChunkservers) { - for (const auto& chunkserver : poolChunkserver.second) { - total++; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - if (copysetCheckCore->CheckChunkServerOnline(csAddr)) { - online++; - } else { - offline++; - offlineCs.emplace_back(chunkserver.chunkserverid()); + + int CheckUseWalPool( + const std::map> &poolChunkservers, + bool *useWalPool, bool *useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) + { + int ret = 0; + if (!poolChunkservers.empty()) + { + ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + // check whether use chunkfilepool + std::string metricValue; + std::string metricName = GetUseWalPoolName(csAddr); + MetricRet res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); + if (res != MetricRet::kOK) + { + std::cout << "Get use chunkfilepool conf " << csAddr << " fail!" + << std::endl; + ret = -1; + } + std::string raftLogProtocol = + curve::common::UriParser ::GetProtocolFromUri(metricValue); + *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; + + // check whether use chunkfilepool as walpool from chunkserver conf + // metric // NOLINT + metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); + res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); + if (res != MetricRet::kOK) + { + std::cout << "Get use chunkfilepool as walpool conf " << csAddr + << " fail!" << std::endl; + ret = -1; + } + *useChunkFilePoolAsWalPool = + StringToBool(metricValue, useChunkFilePoolAsWalPool); } + return ret; } - } - // get the recover status of offline chunkservers - std::vector offlineRecover; - if (offlineCs.size() > 0) { - std::map statusMap; - int res = mdsClient->QueryChunkServerRecoverStatus( - offlineCs, &statusMap); - if (res != 0) { - std::cout << "query offlinne chunkserver recover status fail"; - ret = -1; - } else { - // Distinguish between recovering and unrecovered - for (auto it = statusMap.begin(); it != statusMap.end(); ++it) { - if (it->second) { - offlineRecover.emplace_back(it->first); + + int PrintChunkserverOnlineStatus( + const std::map> &poolChunkservers, + std::shared_ptr copysetCheckCore, + std::shared_ptr mdsClient) + { + int ret = 0; + uint64_t total = 0; + uint64_t online = 0; + uint64_t offline = 0; + std::vector offlineCs; + for (const auto &poolChunkserver : poolChunkservers) + { + for (const auto &chunkserver : poolChunkserver.second) + { + total++; + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + if (copysetCheckCore->CheckChunkServerOnline(csAddr)) + { + online++; + } + else + { + offline++; + offlineCs.emplace_back(chunkserver.chunkserverid()); + } + } + } + // get the recover status of offline chunkservers + std::vector offlineRecover; + if (offlineCs.size() > 0) + { + std::map statusMap; + int res = + mdsClient->QueryChunkServerRecoverStatus(offlineCs, &statusMap); + if (res != 0) + { + std::cout << "query offlinne chunkserver recover status fail"; + ret = -1; + } + else + { + // Distinguish between recovering and unrecovered + for (auto it = statusMap.begin(); it != statusMap.end(); ++it) + { + if (it->second) + { + offlineRecover.emplace_back(it->first); + } + } } } + std::cout << "chunkserver: total num = " << total << ", online = " << online + << ", offline = " << offline + << "(recoveringout = " << offlineRecover.size() + << ", chunkserverlist: ["; + + int i = 0; + for (ChunkServerIdType csId : offlineRecover) + { + i++; + if (i == static_cast(offlineRecover.size())) + { + std::cout << csId; + } + else + { + std::cout << csId << ", "; + } + } + std::cout << "])" << std::endl; + return ret; } - } - std::cout << "chunkserver: total num = " << total - << ", online = " << online - << ", offline = " << offline - << "(recoveringout = " << offlineRecover.size() - << ", chunkserverlist: ["; - - int i = 0; - for (ChunkServerIdType csId : offlineRecover) { - i++; - if (i == static_cast(offlineRecover.size())) { - std::cout << csId; - } else { - std::cout << csId << ", "; + + int GetChunkserverLeftSize( + const std::map> &poolChunkservers, + std::map> *poolChunkLeftSize, + std::map> *poolWalSegmentLeftSize, + bool useWalPool, bool useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) + { + int ret = 0; + for (const auto &poolChunkserver : poolChunkservers) + { + std::vector chunkLeftSize; + std::vector walSegmentLeftSize; + for (const auto &chunkserver : poolChunkserver.second) + { + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + std::string metricName = GetCSLeftChunkName(csAddr); + uint64_t chunkNum; + MetricRet res = + metricClient->GetMetricUint(csAddr, metricName, &chunkNum); + if (res != MetricRet::kOK) + { + std::cout << "Get left chunk size of chunkserver " << csAddr + << " fail!" << std::endl; + ret = -1; + continue; + } + uint64_t size = chunkNum * FLAGS_chunkSize; + chunkLeftSize.emplace_back(size / mds::kGB); + + // walfilepool left size + if (useWalPool && !useChunkFilePoolAsWalPool) + { + metricName = GetCSLeftWalSegmentName(csAddr); + uint64_t walSegmentNum; + res = metricClient->GetMetricUint(csAddr, metricName, + &walSegmentNum); + if (res != MetricRet::kOK) + { + std::cout << "Get left wal segment size of chunkserver " + << csAddr << " fail!" << std::endl; + ret = -1; + continue; + } + size = walSegmentNum * FLAGS_walSegmentSize; + walSegmentLeftSize.emplace_back(size / mds::kGB); + } + } + poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); + poolWalSegmentLeftSize->emplace(poolChunkserver.first, + walSegmentLeftSize); + } + return ret; } - } - std::cout << "])" << std::endl; - return ret; -} - -int GetChunkserverLeftSize( - const std::map> &poolChunkservers, - std::map> *poolChunkLeftSize, - std::map> *poolWalSegmentLeftSize, - bool useWalPool, - bool useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { - int ret = 0; - for (const auto& poolChunkserver : poolChunkservers) { - std::vector chunkLeftSize; - std::vector walSegmentLeftSize; - for (const auto& chunkserver : poolChunkserver.second) { - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - std::string metricName = GetCSLeftChunkName(csAddr); - uint64_t chunkNum; - MetricRet res = metricClient->GetMetricUint(csAddr, - metricName, &chunkNum); - if (res != MetricRet::kOK) { - std::cout << "Get left chunk size of chunkserver " << csAddr - << " fail!" << std::endl; + + int StatusTool::PrintChunkserverStatus(bool checkLeftSize) + { + // get and check chunkserver version + std::cout << "ChunkServer status:" << std::endl; + std::string version; + std::vector failedList; + int res = + versionTool_->GetAndCheckChunkServerVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; ret = -1; - continue; - } - uint64_t size = chunkNum * FLAGS_chunkSize; - chunkLeftSize.emplace_back(size / mds::kGB); - - // walfilepool left size - if (useWalPool && !useChunkFilePoolAsWalPool) { - metricName = GetCSLeftWalSegmentName(csAddr); - uint64_t walSegmentNum; - res = metricClient->GetMetricUint(csAddr, metricName, - &walSegmentNum); - if (res != MetricRet::kOK) { - std::cout << "Get left wal segment size of chunkserver " - << csAddr << " fail!" << std::endl; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); ret = -1; - continue; } - size = walSegmentNum * FLAGS_walSegmentSize; - walSegmentLeftSize.emplace_back(size / mds::kGB); } - } - poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); - poolWalSegmentLeftSize->emplace(poolChunkserver.first, - walSegmentLeftSize); - } - return ret; -} - -int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { - // get and check chunkserver version - std::cout << "ChunkServer status:" << std::endl; - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckChunkServerVersion(&version, - &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; - } - } - // list chunkservers in cluster group by poolid - std::map> poolChunkservers; - res = mdsClient_->ListChunkServersInCluster(&poolChunkservers); - if (res != 0) { - std::cout << "ListChunkServersInCluster fail!" << std::endl; - return -1; - } - - // get chunkserver online status - ret = PrintChunkserverOnlineStatus(poolChunkservers, - copysetCheckCore_, - mdsClient_); - if (!checkLeftSize) { - return ret; - } - - bool useWalPool = false; - bool useChunkFilePoolAsWalPool = true; - // check use walpool - ret = CheckUseWalPool(poolChunkservers, &useWalPool, - &useChunkFilePoolAsWalPool, metricClient_); - - // get chunkserver left size - std::map> poolChunkLeftSize; - std::map> poolWalSegmentLeftSize; - ret = GetChunkserverLeftSize(poolChunkservers, - &poolChunkLeftSize, - &poolWalSegmentLeftSize, - useWalPool, - useChunkFilePoolAsWalPool, - metricClient_); - if (0 != ret) { - return ret; - } - // print filepool left size - PrintCsLeftSizeStatistics("chunkfilepool", poolChunkLeftSize); - if (useWalPool && !useChunkFilePoolAsWalPool) { - PrintCsLeftSizeStatistics("walfilepool", poolWalSegmentLeftSize); - } else if (useChunkFilePoolAsWalPool) { - std::cout << "No walpool left size found, " - << "use chunkfilepool as walpool!\n"; - } else { - std::cout << "No walpool left size found, " - << "no walpool used!\n"; - } - return ret; -} - -void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize) { - if (poolLeftSize.empty()) { - std::cout << "No " << name << " left size found!" << std::endl; - return; - } - for (const auto& leftSize : poolLeftSize) { - if (leftSize.second.empty()) { - continue; - } - uint64_t min = leftSize.second[0]; - uint64_t max = leftSize.second[0]; - double sum = 0; - for (const auto& size : leftSize.second) { - sum += size; - if (size < min) { - min = size; + // list chunkservers in cluster group by poolid + std::map> poolChunkservers; + res = mdsClient_->ListChunkServersInCluster(&poolChunkservers); + if (res != 0) + { + std::cout << "ListChunkServersInCluster fail!" << std::endl; + return -1; } - if (size > max) { - max = size; + + // get chunkserver online status + ret = PrintChunkserverOnlineStatus(poolChunkservers, copysetCheckCore_, + mdsClient_); + if (!checkLeftSize) + { + return ret; } - } - uint64_t range = max - min; - double avg = sum / leftSize.second.size(); - sum = 0; - for (const auto& size : leftSize.second) { - sum += (size - avg) * (size - avg); - } - double var = sum / leftSize.second.size(); - std:: cout.setf(std::ios::fixed); - std::cout<< std::setprecision(2); - std::cout<< "pool" << leftSize.first << " " << name; - std::cout << " left size: min = " << min << "GB" - << ", max = " << max << "GB" - << ", average = " << avg << "GB" - << ", range = " << range << "GB" - << ", variance = " << var << std::endl; - } -} - -int StatusTool::GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools) { - int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); - if (res != 0) { - std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; - return -1; - } - for (const auto& phyPool : *phyPools) { - int res = mdsClient_->ListLogicalPoolsInPhysicalPool( - phyPool.physicalpoolid(), lgPools) != 0; - if (res != 0) { - std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; - return -1; + bool useWalPool = false; + bool useChunkFilePoolAsWalPool = true; + // check use walpool + ret = CheckUseWalPool(poolChunkservers, &useWalPool, + &useChunkFilePoolAsWalPool, metricClient_); + + // get chunkserver left size + std::map> poolChunkLeftSize; + std::map> poolWalSegmentLeftSize; + ret = GetChunkserverLeftSize(poolChunkservers, &poolChunkLeftSize, + &poolWalSegmentLeftSize, useWalPool, + useChunkFilePoolAsWalPool, metricClient_); + if (0 != ret) + { + return ret; + } + // print filepool left size + PrintCsLeftSizeStatistics("chunkfilepool", poolChunkLeftSize); + if (useWalPool && !useChunkFilePoolAsWalPool) + { + PrintCsLeftSizeStatistics("walfilepool", poolWalSegmentLeftSize); + } + else if (useChunkFilePoolAsWalPool) + { + std::cout << "No walpool left size found, " + << "use chunkfilepool as walpool!\n"; + } + else + { + std::cout << "No walpool left size found, " + << "no walpool used!\n"; + } + return ret; } - } - return 0; -} - -int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { - std::vector lgPools; - int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); - if (res != 0) { - std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; - return -1; - } - res = mdsClient_->GetFileSize(curve::mds::ROOTFILENAME, - &spaceInfo->currentFileSize); - if (res != 0) { - std::cout << "Get root directory file size from mds fail!" << std::endl; - return -1; - } - // 从metric获取space信息 - for (const auto& lgPool : lgPools) { - LogicalpoolSpaceInfo lpinfo; - std::string poolName = lgPool.logicalpoolname(); - lpinfo.poolName = poolName; - std::string metricName = GetPoolTotalChunkSizeName(poolName); - uint64_t size; - int res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get total chunk byte from mds fail!" << std::endl; - return -1; + + void StatusTool::PrintCsLeftSizeStatistics( + const std::string &name, + const std::map> &poolLeftSize) + { + if (poolLeftSize.empty()) + { + std::cout << "No " << name << " left size found!" << std::endl; + return; + } + for (const auto &leftSize : poolLeftSize) + { + if (leftSize.second.empty()) + { + continue; + } + uint64_t min = leftSize.second[0]; + uint64_t max = leftSize.second[0]; + double sum = 0; + for (const auto &size : leftSize.second) + { + sum += size; + if (size < min) + { + min = size; + } + if (size > max) + { + max = size; + } + } + uint64_t range = max - min; + double avg = sum / leftSize.second.size(); + sum = 0; + for (const auto &size : leftSize.second) + { + sum += (size - avg) * (size - avg); + } + + double var = sum / leftSize.second.size(); + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "pool" << leftSize.first << " " << name; + std::cout << " left size: min = " << min << "GB" + << ", max = " << max << "GB" + << ", average = " << avg << "GB" + << ", range = " << range << "GB" + << ", variance = " << var << std::endl; + } } - spaceInfo->totalChunkSize += size; - lpinfo.totalChunkSize +=size; - metricName = GetPoolUsedChunkSizeName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get used chunk byte from mds fail!" << std::endl; - return -1; + + int StatusTool::GetPoolsInCluster(std::vector *phyPools, + std::vector *lgPools) + { + int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); + if (res != 0) + { + std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; + return -1; + } + for (const auto &phyPool : *phyPools) + { + int res = mdsClient_->ListLogicalPoolsInPhysicalPool( + phyPool.physicalpoolid(), lgPools) != 0; + if (res != 0) + { + std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; + return -1; + } + } + return 0; } - spaceInfo->usedChunkSize += size; - lpinfo.usedChunkSize += size; - metricName = GetPoolLogicalCapacityName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get logical capacity from mds fail!" << std::endl; - return -1; + + int StatusTool::GetSpaceInfo(SpaceInfo *spaceInfo) + { + std::vector lgPools; + int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); + if (res != 0) + { + std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; + return -1; + } + res = mdsClient_->GetFileSize(curve::mds::ROOTFILENAME, + &spaceInfo->currentFileSize); + if (res != 0) + { + std::cout << "Get root directory file size from mds fail!" << std::endl; + return -1; + } + // Obtain space information from metric + for (const auto &lgPool : lgPools) + { + LogicalpoolSpaceInfo lpinfo; + std::string poolName = lgPool.logicalpoolname(); + lpinfo.poolName = poolName; + std::string metricName = GetPoolTotalChunkSizeName(poolName); + uint64_t size; + int res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get total chunk byte from mds fail!" << std::endl; + return -1; + } + spaceInfo->totalChunkSize += size; + lpinfo.totalChunkSize += size; + metricName = GetPoolUsedChunkSizeName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get used chunk byte from mds fail!" << std::endl; + return -1; + } + spaceInfo->usedChunkSize += size; + lpinfo.usedChunkSize += size; + metricName = GetPoolLogicalCapacityName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get logical capacity from mds fail!" << std::endl; + return -1; + } + spaceInfo->totalCapacity += size; + lpinfo.totalCapacity += size; + metricName = GetPoolLogicalAllocName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get logical alloc size from mds fail!" << std::endl; + return -1; + } + spaceInfo->allocatedSize += size; + lpinfo.allocatedSize += size; + spaceInfo->lpoolspaceinfo.insert( + std::pair(lgPool.logicalpoolid(), + lpinfo)); + } + // Obtain the allocation size of RecycleBin + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, + &spaceInfo->recycleAllocSize); + if (res != 0) + { + std::cout << "GetAllocatedSize of RecycleBin fail!" << std::endl; + return -1; + } + return 0; } - spaceInfo->totalCapacity += size; - lpinfo.totalCapacity += size; - metricName = GetPoolLogicalAllocName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get logical alloc size from mds fail!" << std::endl; - return -1; + + int StatusTool::RunCommand(const std::string &cmd) + { + if (Init(cmd) != 0) + { + std::cout << "Init StatusTool failed" << std::endl; + return -1; + } + if (cmd == kSpaceCmd) + { + return SpaceCmd(); + } + else if (cmd == kStatusCmd) + { + return StatusCmd(); + } + else if (cmd == kChunkserverListCmd) + { + return ChunkServerListCmd(); + } + else if (cmd == kServerListCmd) + { + return ServerListCmd(); + } + else if (cmd == kLogicalPoolList) + { + return LogicalPoolListCmd(); + } + else if (cmd == kChunkserverStatusCmd) + { + return ChunkServerStatusCmd(); + } + else if (cmd == kMdsStatusCmd) + { + return PrintMdsStatus(); + } + else if (cmd == kEtcdStatusCmd) + { + return PrintEtcdStatus(); + } + else if (cmd == kClientStatusCmd) + { + return PrintClientStatus(); + } + else if (cmd == kSnapshotCloneStatusCmd) + { + return PrintSnapshotCloneStatus(); + } + else if (cmd == kClusterStatusCmd) + { + return PrintClusterStatus(); + } + else if (cmd == kClientListCmd) + { + return ClientListCmd(); + } + else if (cmd == kScanStatusCmd) + { + return ScanStatusCmd(); + } + else if (cmd == kFormatStatusCmd) + { + return FormatStatusCmd(); + } + else + { + std::cout << "Command not supported!" << std::endl; + return -1; + } + + return 0; } - spaceInfo->allocatedSize += size; - lpinfo.allocatedSize += size; - spaceInfo->lpoolspaceinfo.insert( - std::pair( - lgPool.logicalpoolid(), lpinfo)); - } - // 获取RecycleBin的分配大小 - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &spaceInfo->recycleAllocSize); - if (res != 0) { - std::cout << "GetAllocatedSize of RecycleBin fail!" << std::endl; - return -1; - } - return 0; -} - -int StatusTool::RunCommand(const std::string &cmd) { - if (Init(cmd) != 0) { - std::cout << "Init StatusTool failed" << std::endl; - return -1; - } - if (cmd == kSpaceCmd) { - return SpaceCmd(); - } else if (cmd == kStatusCmd) { - return StatusCmd(); - } else if (cmd == kChunkserverListCmd) { - return ChunkServerListCmd(); - } else if (cmd == kServerListCmd) { - return ServerListCmd(); - } else if (cmd == kLogicalPoolList) { - return LogicalPoolListCmd(); - } else if (cmd == kChunkserverStatusCmd) { - return ChunkServerStatusCmd(); - } else if (cmd == kMdsStatusCmd) { - return PrintMdsStatus(); - } else if (cmd == kEtcdStatusCmd) { - return PrintEtcdStatus(); - } else if (cmd == kClientStatusCmd) { - return PrintClientStatus(); - } else if (cmd == kSnapshotCloneStatusCmd) { - return PrintSnapshotCloneStatus(); - } else if (cmd == kClusterStatusCmd) { - return PrintClusterStatus(); - } else if (cmd == kClientListCmd) { - return ClientListCmd(); - } else if (cmd == kScanStatusCmd) { - return ScanStatusCmd(); - } else if (cmd == kFormatStatusCmd) { - return FormatStatusCmd(); - } else { - std::cout << "Command not supported!" << std::endl; - return -1; - } - - return 0; -} -} // namespace tool -} // namespace curve + } // namespace tool +} // namespace curve diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 82b776fa73..37e0546050 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -23,31 +23,33 @@ #ifndef SRC_TOOLS_STATUS_TOOL_H_ #define SRC_TOOLS_STATUS_TOOL_H_ +#include #include #include -#include -#include + #include -#include -#include -#include +#include #include +#include +#include #include +#include + #include "proto/topology.pb.h" #include "src/common/timeutility.h" +#include "src/common/uri_parser.h" #include "src/mds/common/mds_define.h" -#include "src/tools/mds_client.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/copyset_check_core.h" -#include "src/tools/etcd_client.h" -#include "src/tools/version_tool.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/etcd_client.h" +#include "src/tools/mds_client.h" #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" +#include "src/tools/namespace_tool_core.h" #include "src/tools/snapshot_clone_client.h" -#include "src/common/uri_parser.h" +#include "src/tools/version_tool.h" using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; @@ -63,22 +65,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -100,49 +102,54 @@ class StatusTool : public CurveTool { std::shared_ptr versionTool, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), - etcdClient_(etcdClient), metricClient_(metricClient), - snapshotClient_(snapshotClient), versionTool_(versionTool), - mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} + : mdsClient_(mdsClient), + copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), + metricClient_(metricClient), + snapshotClient_(snapshotClient), + versionTool_(versionTool), + mdsInited_(false), + etcdInited_(false), + noSnapshotServer_(false) {} ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ - static bool SupportCommand(const std::string &command); + static bool SupportCommand(const std::string& command); /** - * @brief 判断集群是否健康 + * @brief to determine whether the cluster is healthy */ bool IsClusterHeatlhy(); private: - int Init(const std::string &command); + int Init(const std::string& command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector *phyPools, - std::vector *lgPools); - int GetSpaceInfo(SpaceInfo *spaceInfo); + int GetPoolsInCluster(std::vector* phyPools, + std::vector* lgPools); + int GetSpaceInfo(SpaceInfo* spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -152,67 +159,67 @@ class StatusTool : public CurveTool { int ScanStatusCmd(); int FormatStatusCmd(); void PrintCsLeftSizeStatistics( - const std::string &name, - const std::map> &poolLeftSize); + const std::string& name, + const std::map>& poolLeftSize); int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command needs to interact with ETCD + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedEtcd(const std::string &command); - + bool CommandNeedEtcd(const std::string& command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command requires mds + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedMds(const std::string &command); + bool CommandNeedMds(const std::string& command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedSnapshotClone(const std::string &command); + bool CommandNeedSnapshotClone(const std::string& command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus Map of online status */ - void PrintOnlineStatus(const std::string &name, - const std::map &onlineStatus); + void PrintOnlineStatus(const std::string& name, + const std::map& onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name Service Name */ - bool CheckServiceHealthy(const ServiceName &name); + bool CheckServiceHealthy(const ServiceName& name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and + // chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..42b1d3e9a5 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -48,8 +48,8 @@ int VersionTool::GetAndCheckMdsVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList) { std::vector chunkServers; int res = mdsClient_->ListChunkServersInCluster(&chunkServers); if (res != 0) { @@ -78,8 +78,8 @@ int VersionTool::GetAndCheckChunkServerVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList) { const auto& dummyServerMap = snapshotClient_->GetDummyServerMap(); std::vector dummyServers; for (const auto& item : dummyServerMap) { @@ -123,9 +123,8 @@ void VersionTool::FetchClientProcessMap(const std::vector& addrVec, ProcessMapType* processMap) { for (const auto& addr : addrVec) { std::string cmd; - MetricRet res = metricClient_->GetMetric(addr, - kProcessCmdLineMetricName, - &cmd); + MetricRet res = + metricClient_->GetMetric(addr, kProcessCmdLineMetricName, &cmd); if (res != MetricRet::kOK) { continue; } @@ -156,10 +155,11 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, failedList->clear(); for (const auto& addr : addrVec) { std::string version; - MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, - &version); + MetricRet res = + metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so + // let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..eb293433e6 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -23,13 +23,14 @@ #ifndef SRC_TOOLS_VERSION_TOOL_H_ #define SRC_TOOLS_VERSION_TOOL_H_ -#include #include -#include #include +#include +#include + +#include "src/common/string_util.h" #include "src/tools/mds_client.h" #include "src/tools/metric_client.h" -#include "src/common/string_util.h" #include "src/tools/snapshot_clone_client.h" namespace curve { @@ -49,95 +50,97 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + : mdsClient_(mdsClient), + snapshotClient_(snapshotClient), metricClient_(metricClient) {} virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int GetAndCheckMdsVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckMdsVersion(std::string* version, + std::vector* failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckChunkServerVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckSnapshotCloneVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ - virtual int GetClientVersion(ClientVersionMapType *versionMap); + virtual int GetClientVersion(ClientVersionMapType* versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap version to address list map */ - static void PrintVersionMap(const VersionMapType &versionMap); + static void PrintVersionMap(const VersionMapType& versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList Access Failed Address List */ - static void PrintFailedList(const std::vector &failedList); + static void PrintFailedList(const std::vector& failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] versionMap version to address map + * @param[out] failedList Query address list for version failure */ - void GetVersionMap(const std::vector &addrVec, - VersionMapType *versionMap, - std::vector *failedList); + void GetVersionMap(const std::vector& addrVec, + VersionMapType* versionMap, + std::vector* failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to + * different processes */ - void FetchClientProcessMap(const std::vector &addrVec, - ProcessMapType *processMap); + void FetchClientProcessMap(const std::vector& addrVec, + ProcessMapType* processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of + * starting the server For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd server + * @param addrVec Address List + * @return The name of the process */ - std::string GetProcessNameFromCmd(const std::string &cmd); + std::string GetProcessNameFromCmd(const std::string& cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshot clone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric clients std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..110a0923b8 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -20,25 +20,26 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service2.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service2.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/common/timeutility.h" +#include "src/common/uuid.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -48,10 +49,12 @@ using curve::common::UUIDGenerator; class BraftCliService2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { peer1.set_address("127.0.0.1:9310:0"); @@ -75,10 +78,10 @@ class BraftCliService2Test : public testing::Test { } public: - const char *ip = "127.0.0.1"; - int port = 9310; - const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + const char* ip = "127.0.0.1"; + int port = 9310; + const char* confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -128,12 +131,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dirMap[peer1.address()]; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -143,12 +142,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dirMap[peer2.address()]; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -158,16 +153,12 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dirMap[peer3.address()]; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,15 +173,15 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /*Add peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,10 +201,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /*Add peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -223,7 +214,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +228,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /*Add peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change + // request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,15 +266,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /*Remove peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,10 +294,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /*Remove peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -315,7 +307,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,15 +321,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /*Remove peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader and send the configuration change + // request to it for processing + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; } else { @@ -367,15 +359,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -395,8 +387,8 @@ TEST_F(BraftCliService2Test, basic2) { } /* transfer leader to leader */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -417,10 +409,10 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /*Transfer leader - illegal peer*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -430,7 +422,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,18 +436,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /*Get leader - illegal copyset*/ { PeerId leaderId = leaderId; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); - GetLeaderRequest2 request; GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -467,14 +458,13 @@ TEST_F(BraftCliService2Test, basic2) { /* remove peer then add peer */ { // 1 remove peer - Peer *removePeer = new Peer(); - Peer *leaderPeer1 = new Peer(); - Peer *leaderPeer2 = new Peer(); - Peer *addPeer = new Peer(); + Peer* removePeer = new Peer(); + Peer* leaderPeer1 = new Peer(); + Peer* leaderPeer2 = new Peer(); + Peer* addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader as a remove peer + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); *removePeer = peer2; } else { @@ -508,7 +498,6 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl1.Failed()); ASSERT_EQ(0, cntl1.ErrorCode()); - // add peer AddPeerRequest2 request2; request2.set_logicpoolid(logicPoolId); @@ -529,17 +518,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /*Snapshot - illegal copyset*/ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); *peerPtr = peer1; request.set_allocated_peer(peerPtr); @@ -557,11 +546,12 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + - ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; + ToGroupId(logicPoolId, copysetId) + "/" + + RAFT_LOG_DIR; std::shared_ptr fs( - LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); + LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); std::vector files; fs->List(copysetDataDir.c_str(), &files); ASSERT_GE(files.size(), 1); @@ -574,7 +564,7 @@ TEST_F(BraftCliService2Test, basic2) { SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); peerPtr->set_address(leaderId.to_string()); request.set_allocated_peer(peerPtr); @@ -586,19 +576,20 @@ TEST_F(BraftCliService2Test, basic2) { LOG(INFO) << "Start do snapshot"; CliService2_Stub stub(&channel); stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); + // Two consecutive snapshots are required to delete the log from the + // first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + // After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -619,18 +610,18 @@ TEST_F(BraftCliService2Test, basic2) { CliService2_Stub stub(&channel); stub.SnapshotAll(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /*Reset peer - illegal copyset*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,9 +637,9 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /*Reset peer - new peer is empty*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; @@ -669,7 +660,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* reset peer - normal */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..63a83cfe9d 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -20,21 +20,22 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" #include "test/chunkserver/chunkserver_test_util.h" namespace curve { @@ -43,10 +44,12 @@ namespace chunkserver { class BraftCliServiceTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { Exec("mkdir 6"); @@ -68,9 +71,9 @@ class BraftCliServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(BraftCliServiceTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9015; - const char *confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; + const char* confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; int snapshotInterval = 600; PeerId peer1("127.0.0.1:9015:0"); PeerId peer2("127.0.0.1:9016:0"); @@ -87,12 +90,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 1 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid1) { - const char *copysetdir = "local://./6"; - StartChunkserver(ip, - port + 0, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./6"; + StartChunkserver(ip, port + 0, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -102,12 +101,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 2 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid2) { - const char *copysetdir = "local://./7"; - StartChunkserver(ip, - port + 1, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./7"; + StartChunkserver(ip, port + 1, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -117,17 +112,13 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 3 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid3) { - const char *copysetdir = "local://./8"; - StartChunkserver(ip, - port + 2, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./8"; + StartChunkserver(ip, port + 2, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -144,6 +135,7 @@ TEST_F(BraftCliServiceTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -166,7 +158,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +180,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,12 +202,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -240,13 +232,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +253,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,12 +273,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -309,7 +301,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +338,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +357,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..c1191bde5b 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -20,24 +20,24 @@ * Author: wudemiao */ +#include "src/chunkserver/chunk_service.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -75,11 +75,10 @@ class ChunkserverTest : public testing::Test { butil::AtExitManager atExitManager; - TEST_F(ChunkserverTest, normal_read_write_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9020; - const char *confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; + const char* confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -96,12 +95,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -111,12 +106,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -126,16 +117,12 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -152,6 +139,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -313,7 +301,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +317,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /*Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +335,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /*Applied index Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -416,9 +404,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -435,9 +421,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -467,7 +451,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +469,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +544,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +563,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /*Multi chunk read/write/delete*/ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +669,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +687,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +754,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..9d3c136e14 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -20,24 +20,23 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -76,9 +75,9 @@ class ChunkService2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(ChunkService2Test, illegial_parameters_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9023; - const char *confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; + const char* confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -95,12 +94,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -110,12 +105,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -125,16 +116,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -151,6 +138,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -177,13 +165,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /*Illegal parameter request test*/ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /*Read overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +189,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /*Read offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +207,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /*Read size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +225,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /*Read copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +244,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /*Read snapshot overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +262,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /*Read snapshot offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +281,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /*Read snapshot size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +300,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /*Read snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +319,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /*Write overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +338,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /*Write offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +357,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /*Write size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +376,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /*The write copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +395,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /*Delete copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +411,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /*Delete snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -434,9 +422,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { request.set_copysetid(copysetId + 1); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, @@ -456,7 +442,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /*Not a leader*/ { PeerId peer1; PeerId peer2; @@ -562,13 +548,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { class ChunkServiceTestClosure : public ::google::protobuf::Closure { public: - explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~ChunkServiceTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -580,13 +565,12 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { class UpdateEpochTestClosure : public ::google::protobuf::Closure { public: - explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~UpdateEpochTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -602,12 +586,12 @@ TEST_F(ChunkService2Test, overload_test) { // inflight throttle uint64_t maxInflight = 0; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -690,9 +674,7 @@ TEST_F(ChunkService2Test, overload_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -750,12 +732,12 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { // inflight throttle uint64_t maxInflight = 10; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -780,17 +762,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -863,9 +845,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -916,7 +896,8 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can + // receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } @@ -995,9 +976,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_NE(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -1055,12 +1034,12 @@ TEST_F(ChunkService2Test, CheckEpochTest) { // inflight throttle uint64_t maxInflight = 10000; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -1083,7 +1062,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_chunkid(chunkId); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk request have epoch, but epoch map have no epoch @@ -1100,7 +1079,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // update epoch map to {(1, 1) , (2, 2)} { @@ -1130,7 +1109,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk check epoch failed { @@ -1146,7 +1125,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } // update epoch map to {(1, 2) , (2, 2)} @@ -1174,7 +1153,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..d401a22185 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -20,14 +20,16 @@ * Author: lixiaocui */ -#include #include "src/chunkserver/chunkserver_helper.h" + +#include + #include "src/chunkserver/register.h" namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +45,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..4b834a5037 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -20,40 +20,41 @@ * Author: lixiaocui1 */ -#include -#include +#include "src/chunkserver/chunkserver_service.h" + #include +#include #include +#include #include -#include "src/chunkserver/chunkserver_service.h" -#include "test/chunkserver/mock_copyset_node_manager.h" + #include "proto/chunkserver.pb.h" +#include "test/chunkserver/mock_copyset_node_manager.h" namespace curve { namespace chunkserver { -using ::testing::Return; using ::testing::_; +using ::testing::Return; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); auto listenAddr = butil::endpoint2str(server->listen_address()).c_str(); - brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr, NULL)); ChunkServerService_Stub stub(&channel); ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,23 +64,22 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/chunkserver/chunkserver_snapshot_test.cpp b/test/chunkserver/chunkserver_snapshot_test.cpp index b534ca2ee3..a05a9e6498 100644 --- a/test/chunkserver/chunkserver_snapshot_test.cpp +++ b/test/chunkserver/chunkserver_snapshot_test.cpp @@ -21,25 +21,25 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" +#include "proto/common.pb.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "proto/common.pb.h" -#include "proto/copyset.pb.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -64,7 +64,7 @@ class ChunkServerSnapshotTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); /* wait for process exit */ - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -77,26 +77,22 @@ class ChunkServerSnapshotTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -112,14 +108,13 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -134,13 +129,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -152,22 +146,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -181,16 +171,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -198,22 +186,18 @@ static void ReadVerify(PeerId leaderId, } /** - * 异常 I/O 验证,验证集群是否处于不可用状态 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Abnormal I/O verification to verify if the cluster is in an unavailable state + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerifyNotAvailable(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerifyNotAvailable(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -228,30 +212,29 @@ static void ReadVerifyNotAvailable(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证copyset status是否符合预期 + * Verify if the copyset status meets expectations * @param peerId: peer id - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id - * @param expectResp: 期待的copyset status + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID + * @param expectResp: Expected copyset status */ -static void CopysetStatusVerify(PeerId peerId, - LogicPoolID logicPoolID, +static void CopysetStatusVerify(PeerId peerId, LogicPoolID logicPoolID, CopysetID copysetId, - CopysetStatusResponse *expectResp) { + CopysetStatusResponse* expectResp) { brpc::Channel channel; ASSERT_EQ(0, channel.Init(peerId.addr, NULL)); CopysetService_Stub stub(&channel); @@ -261,7 +244,7 @@ static void CopysetStatusVerify(PeerId peerId, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -279,14 +262,13 @@ static void CopysetStatusVerify(PeerId peerId, } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -static void CopysetStatusVerify(const std::vector &peerIds, - LogicPoolID logicPoolID, - CopysetID copysetId, +static void CopysetStatusVerify(const std::vector& peerIds, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0) { std::vector resps; for (PeerId peerId : peerIds) { @@ -300,7 +282,7 @@ static void CopysetStatusVerify(const std::vector &peerIds, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -309,7 +291,8 @@ static void CopysetStatusVerify(const std::vector &peerIds, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -333,9 +316,11 @@ static void CopysetStatusVerify(const std::vector &peerIds, butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组是否能够正常提供服务 - * 1. 创建一个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the replication group of one node can provide services + * normally + * 1. Create a replication group for a replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -355,23 +340,18 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; - // read、write、1次配置变更 + // read, write, 1 configuration change int64_t commitedIndex = loop + 1; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -390,12 +370,15 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { } /** - * 验证1个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建1个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of one node can provide + * normal service + * 1. Create a replication group for 1 replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { LogicPoolID logicPoolId = 2; @@ -415,45 +398,30 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; int64_t commitedIndex = 2 * loop + 2; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -473,9 +441,10 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { } /** - * 验证2个节点是否能够正常提供服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether two nodes can provide services normally + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, TwoNodes) { LogicPoolID logicPoolId = 2; @@ -498,12 +467,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ::usleep(2000 * 1000); @@ -511,12 +475,15 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { } /** - * 验证2个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting two nodes after closing non leader nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -539,19 +506,14 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -563,40 +525,33 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ::usleep(2000 * electionTimeoutMs); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of two nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -619,48 +574,34 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { LogicPoolID logicPoolId = 2; @@ -685,26 +626,24 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting after closing non leader nodes on three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -729,19 +668,14 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -750,28 +684,26 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader node and restart of three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -796,62 +728,49 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群暂时不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is temporarily unavailable + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval and write read data + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 6. Wait for the leader to be generated, and then verify the data written + * before the read + * 7. transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -877,19 +796,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -900,40 +814,25 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - - // restart, 需要从 install snapshot 恢复 + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + + // Restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 2, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it ::sleep(3); @@ -944,10 +843,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -961,37 +857,35 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 删除 shutdown peer 的数据目录,然后再拉起来 - * 10. 然后 read 之前写入的数据验证一遍 - * 11. transfer leader 到shut down 的 peer 上 - * 12. 在 read 之前写入的数据验证 - * 13. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Delete the data directory of the shutdown peer and then pull it up again + * 10. Then verify the data written before read + * 11. Transfer leader to shut down peer + * 12. Verification of data written before read + * 13. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { LogicPoolID logicPoolId = 2; @@ -1017,19 +911,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1040,54 +929,40 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); - - // 删除此 peer 的数据,然后重启 + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); + + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(TestCluster::RemoveCopysetDirCmd(shutdownPeerid) - .c_str())); //NOLINT + .c_str())); // NOLINT LOG(INFO) << "remove data cmd: " << TestCluster::RemoveCopysetDirCmd(shutdownPeerid); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); Exec(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid).c_str()); LOG(INFO) << "remove data dir: " << TestCluster::CopysetDirWithoutProtocol(shutdownPeerid); - ASSERT_FALSE(fs->DirExists(TestCluster::CopysetDirWithoutProtocol( - shutdownPeerid).c_str())); //NOLINT + ASSERT_FALSE( + fs->DirExists(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid) + .c_str())); // NOLINT ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it @@ -1099,10 +974,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -1116,38 +988,36 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更 add peer - * 10. 然后 read 之前写入的数据验证一遍 - * 11. 在发起 write,再 read 读出来验证一遍 - * 12. transfer leader 到 add 的 peer 上 - * 13. 在 read 之前写入的数据验证 - * 14. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add peer through configuration changes + * 10. Then verify the data written before read + * 11. Initiate write and read again to verify + * 12. Transfer leader to add's peer + * 13. Verification of data written before read + * 14. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1173,21 +1043,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1198,46 +1063,31 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 一个 peer + // Add a peer { ASSERT_EQ(0, cluster.StartPeer(peer4, true)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); @@ -1245,26 +1095,18 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - peer4, - options); + butil::Status status = AddPeer(logicPoolId, copysetId, + cluster.CopysetConf(), peer4, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } @@ -1277,11 +1119,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - peer4, - options); + status = TransferLeader(logicPoolId, copysetId, conf, peer4, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == peer4) { @@ -1291,21 +1129,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { ::sleep(1); } - ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), - peer4.to_string().c_str())); + ASSERT_EQ( + 0, ::strcmp(leaderId.to_string().c_str(), peer4.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 5, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 5, loop); } @@ -1321,20 +1154,23 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { } /** - * * 验证3个节点的 remove 一个节点,然后再 add 回来,并控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. 通过配置变更 remove 一个 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更再将之前 remove 的 peer add 回来 - * 10. transfer leader 到此 peer - * 11. 在 read 之前写入的数据验证 - * 12. 再 write 数据,再 read 出来验证一遍 + * Verify the removal of one node from three nodes, then add it back and control + * it to recover from the install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Remove a non leader through configuration changes + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add the previously removed peer back through configuration changes + * 10. Transfer leader to this peer + * 11. Verification of data written before read + * 12. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1360,21 +1196,16 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId removePeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { removePeerid = peer2; } else { removePeerid = peer1; @@ -1383,70 +1214,51 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderId.to_string(); ASSERT_NE(0, ::strcmp(removePeerid.to_string().c_str(), leaderId.to_string().c_str())); - // remove 一个 peer + // Remove a peer { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 8000; - butil::Status status = RemovePeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + RemovePeer(logicPoolId, copysetId, cluster.CopysetConf(), + removePeerid, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 回来 + // Add, come back { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + AddPeer(logicPoolId, copysetId, cluster.CopysetConf(), removePeerid, + options); ASSERT_EQ(0, status.error_code()); } @@ -1459,11 +1271,8 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - removePeerid, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, removePeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == removePeerid) { @@ -1476,18 +1285,13 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), removePeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } diff --git a/test/chunkserver/chunkserver_test_util.cpp b/test/chunkserver/chunkserver_test_util.cpp index cb2d020048..612594e9ac 100644 --- a/test/chunkserver/chunkserver_test_util.cpp +++ b/test/chunkserver/chunkserver_test_util.cpp @@ -22,27 +22,27 @@ #include "test/chunkserver/chunkserver_test_util.h" -#include -#include -#include #include #include #include +#include #include #include +#include +#include #include #include #include -#include "src/common/concurrent/task_thread_pool.h" -#include "src/common/crc32.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "test/chunkserver/fake_datastore.h" +#include "src/common/concurrent/task_thread_pool.h" +#include "src/common/crc32.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "test/chunkserver/fake_datastore.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; using ::curve::common::UriParser; @@ -50,25 +50,22 @@ using ::curve::common::UriParser; namespace curve { namespace chunkserver { -std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; } -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath) { +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath) { auto filePoolPtr = std::make_shared(fsptr); if (filePoolPtr == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; @@ -76,10 +73,10 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, int count = 1; std::string dirname = poolpath; while (count <= chunkfileCount) { - std::string filename = poolpath + std::to_string(count); + std::string filename = poolpath + std::to_string(count); fsptr->Mkdir(poolpath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); - char *data = new char[chunkfileSize + 4096]; + char* data = new char[chunkfileSize + 4096]; memset(data, 'a', chunkfileSize + 4096); fsptr->Write(fd, data, 0, chunkfileSize + 4096); fsptr->Close(fd); @@ -87,7 +84,7 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, delete[] data; } /** - * 持久化FilePool meta file + * Persisting FilePool meta file */ FilePoolMeta meta; @@ -107,11 +104,8 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, return filePoolPtr; } -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs) { LOG(INFO) << "Going to start chunk server"; @@ -123,13 +117,14 @@ int StartChunkserver(const char *ip, return -1; } if (server.Start(port, NULL) != 0) { - LOG(ERROR) << "Fail to start Server, port: " << port << ", errno: " - << errno << ", " << strerror(errno); + LOG(ERROR) << "Fail to start Server, port: " << port + << ", errno: " << errno << ", " << strerror(errno); return -1; } LOG(INFO) << "start rpc server success"; - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT const uint32_t kMaxChunkSize = 16 * 1024 * 1024; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = ip; @@ -188,12 +183,10 @@ int StartChunkserver(const char *ip, CopysetID copysetId = 100001; CopysetNodeManager::GetInstance().Init(copysetNodeOptions); CopysetNodeManager::GetInstance().Run(); - CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode(logicPoolId, - copysetId, - peers)); + CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode( + logicPoolId, copysetId, peers)); auto copysetNode = CopysetNodeManager::GetInstance().GetCopysetNode( - logicPoolId, - copysetId); + logicPoolId, copysetId); DataStoreOptions options; options.baseDir = "./test-temp"; options.chunkSize = 16 * 1024 * 1024; @@ -214,18 +207,16 @@ int StartChunkserver(const char *ip, return 0; } -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs) { +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs) { butil::Status status; const int kMaxLoop = (5 * electionTimeoutMs) / 100; for (int i = 0; i < kMaxLoop; ++i) { status = GetLeader(logicPoolId, copysetId, conf, leaderId); if (status.ok()) { /** - * 等待 flush noop entry + * Waiting for flush noop entry */ ::usleep(electionTimeoutMs * 1000); return status; @@ -239,14 +230,14 @@ butil::Status WaitLeader(const LogicPoolID &logicPoolId, return status; } -TestCluster::TestCluster(const std::string &clusterName, +TestCluster::TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - catchupMargin_(10) { + const std::vector& peers) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + catchupMargin_(10) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -255,10 +246,8 @@ TestCluster::TestCluster(const std::string &clusterName, } } -int TestCluster::StartPeer(const PeerId &peerId, - const bool empty, - bool getChunkFromPool, - bool createChunkFilePool) { +int TestCluster::StartPeer(const PeerId& peerId, const bool empty, + bool getChunkFromPool, bool createChunkFilePool) { LOG(INFO) << "going start peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { @@ -299,30 +288,29 @@ int TestCluster::StartPeer(const PeerId &peerId, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ - StartPeerNode(peer->options, peer->conf, - getChunkFromPool, createChunkFilePool); + /*Starting a ChunkServer in a child process*/ + StartPeerNode(peer->options, peer->conf, getChunkFromPool, + createChunkFilePool); exit(0); } LOG(INFO) << "Start peer success, pid: " << pid; peer->pid = pid; peer->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peer))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peer))); return 0; } -int TestCluster::ShutdownPeer(const PeerId &peerId) { +int TestCluster::ShutdownPeer(const PeerId& peerId) { LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { int waitState; if (0 != kill(it->second->pid, SIGINT)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -335,7 +323,7 @@ int TestCluster::ShutdownPeer(const PeerId &peerId) { } } -int TestCluster::StopPeer(const PeerId &peerId) { +int TestCluster::StopPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::RUNNING) { @@ -345,8 +333,8 @@ int TestCluster::StopPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::STOP; @@ -358,7 +346,7 @@ int TestCluster::StopPeer(const PeerId &peerId) { } } -int TestCluster::ContPeer(const PeerId &peerId) { +int TestCluster::ContPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::STOP) { @@ -368,8 +356,8 @@ int TestCluster::ContPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::RUNNING; @@ -381,10 +369,10 @@ int TestCluster::ContPeer(const PeerId &peerId) { } } -int TestCluster::WaitLeader(PeerId *leaderId) { +int TestCluster::WaitLeader(PeerId* leaderId) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(2 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -393,8 +381,10 @@ int TestCluster::WaitLeader(PeerId *leaderId) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderId); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " @@ -417,9 +407,7 @@ int TestCluster::StopAllPeers() { return 0; } -const Configuration TestCluster::CopysetConf() const { - return conf_; -} +const Configuration TestCluster::CopysetConf() const { return conf_; } int TestCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -441,7 +429,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, bool enableGetchunkFromPool, bool createChunkFilePool) { /** - * 用于注释,说明 cmd format + * Used for annotation to explain the cmd format */ std::string cmdFormat = R"( ./bazel-bin/test/chunkserver/server-test @@ -466,7 +454,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, confStr += it->to_string(); confStr += ","; } - // 去掉最后的逗号 + // Remove the last comma confStr.pop_back(); std::string cmd_dir("./bazel-bin/test/chunkserver/server-test"); @@ -478,28 +466,22 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string confs; butil::string_printf(&confs, "-conf=%s", confStr.c_str()); std::string copyset_dir; - butil::string_printf(©set_dir, - "-copyset_dir=%s", + butil::string_printf(©set_dir, "-copyset_dir=%s", options.chunkDataUri.c_str()); std::string election_timeout_ms; - butil::string_printf(&election_timeout_ms, - "-election_timeout_ms=%d", + butil::string_printf(&election_timeout_ms, "-election_timeout_ms=%d", options.electionTimeoutMs); std::string snapshot_interval_s; - butil::string_printf(&snapshot_interval_s, - "-snapshot_interval_s=%d", + butil::string_printf(&snapshot_interval_s, "-snapshot_interval_s=%d", options.snapshotIntervalS); std::string catchup_margin; - butil::string_printf(&catchup_margin, - "-catchup_margin=%d", + butil::string_printf(&catchup_margin, "-catchup_margin=%d", options.catchupMargin); std::string getchunk_from_pool; - butil::string_printf(&getchunk_from_pool, - "-enable_getchunk_from_pool=%d", + butil::string_printf(&getchunk_from_pool, "-enable_getchunk_from_pool=%d", enableGetchunkFromPool); std::string create_pool; - butil::string_printf(&create_pool, - "-create_chunkfilepool=%d", + butil::string_printf(&create_pool, "-create_chunkfilepool=%d", createChunkFilePool); std::string logic_pool_id; butil::string_printf(&logic_pool_id, "-logic_pool_id=%d", logicPoolID_); @@ -508,59 +490,51 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string raft_sync; butil::string_printf(&raft_sync, "-raft_sync=%s", "true"); - char *arg[] = { - const_cast(cmd.c_str()), - const_cast(ip.c_str()), - const_cast(port.c_str()), - const_cast(confs.c_str()), - const_cast(copyset_dir.c_str()), - const_cast(election_timeout_ms.c_str()), - const_cast(snapshot_interval_s.c_str()), - const_cast(catchup_margin.c_str()), - const_cast(logic_pool_id.c_str()), - const_cast(copyset_id.c_str()), - const_cast(getchunk_from_pool.c_str()), - const_cast(create_pool.c_str()), - NULL - }; + char* arg[] = {const_cast(cmd.c_str()), + const_cast(ip.c_str()), + const_cast(port.c_str()), + const_cast(confs.c_str()), + const_cast(copyset_dir.c_str()), + const_cast(election_timeout_ms.c_str()), + const_cast(snapshot_interval_s.c_str()), + const_cast(catchup_margin.c_str()), + const_cast(logic_pool_id.c_str()), + const_cast(copyset_id.c_str()), + const_cast(getchunk_from_pool.c_str()), + const_cast(create_pool.c_str()), + NULL}; ::execv(cmd_dir.c_str(), arg); return 0; } -const std::string TestCluster::CopysetDirWithProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::RemoveCopysetDirCmd(const PeerId &peerId) { +const std::string TestCluster::RemoveCopysetDirCmd(const PeerId& peerId) { std::string cmd; - butil::string_printf(&cmd, - "rm -fr %s-%d-%d", + butil::string_printf(&cmd, "rm -fr %s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return cmd; } LogicPoolID TestCluster::logicPoolID_ = 0; -CopysetID TestCluster::copysetID_ = 0; +CopysetID TestCluster::copysetID_ = 0; } // namespace chunkserver } // namespace curve diff --git a/test/chunkserver/chunkserver_test_util.h b/test/chunkserver/chunkserver_test_util.h index b329e069cd..eaf423bbd4 100644 --- a/test/chunkserver/chunkserver_test_util.h +++ b/test/chunkserver/chunkserver_test_util.h @@ -26,188 +26,182 @@ #include #include -#include -#include -#include #include +#include +#include #include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { using curve::fs::LocalFileSystem; -std::string Exec(const char *cmd); +std::string Exec(const char* cmd); /** - * 当前FilePool需要事先格式化,才能使用,此函数用于事先格式化FilePool - * @param fsptr:本文文件系统指针 - * @param chunkfileSize:chunk文件的大小 - * @param metaPageSize:chunk文件的meta page大小 - * @param poolpath:文件池的路径,例如./chunkfilepool/ - * @param metaPath:meta文件路径,例如./chunkfilepool/chunkfilepool.meta - * @return 初始化成功返回FilePool指针,否则返回null + * The current FilePool needs to be formatted in advance before it can be used. + * This function is used to format the FilePool in advance + * @param fsptr: This article's file system pointer + * @param chunkfileSize: Chunk file size + * @param metaPageSize: The metapage size of the chunk file + * @param poolpath: The path to the file pool, for example ./chunkfilepool/ + * @param metaPath: meta file path, for example + * ./chunkfilepool/chunkfilepool.meta + * @return successfully initializes and returns the FilePool pointer. Otherwise, + * it returns null */ -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath); - -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath); + +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs); -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs); +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs); /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), options(), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; - // Peer的地址 + // Peer's address PeerId peerId; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // copyset的基本配置 + // Basic configuration of copyset CopysetNodeOptions options; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; /** - * 封装模拟 cluster 测试相关的接口 + * Package simulation cluster testing related interfaces */ class TestCluster { public: - TestCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers); + TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers); virtual ~TestCluster() { StopAllPeers(); } public: /** - * 启动一个 Peer + * Start a Peer * @param peerId - * @param empty 初始化配置是否为空 - * @param: get_chunk_from_pool是否从FilePool获取chunk - * @param: createFilePool是否创建FilePool,重启的情况下不需要 - * @return 0:成功,-1 失败 + * @param empty Is the initialization configuration empty + * @param: get_chunk_from_pool Does obtain a chunk from FilePool + * @param: createFilePool: create a FilePool? It is not necessary to restart + * it + * @return 0: Success, -1 failed */ - int StartPeer(const PeerId &peerId, - const bool empty = false, - bool getChunkFrom_pool = false, - bool createFilePool = true); + int StartPeer(const PeerId& peerId, const bool empty = false, + bool getChunkFrom_pool = false, bool createFilePool = true); /** - * 关闭一个 peer,使用 SIGINT + * Close a peer and use SIGINT * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int ShutdownPeer(const PeerId &peerId); - + int ShutdownPeer(const PeerId& peerId); /** - * hang 住一个 peer,使用 SIGSTOP + * Hang lives in a peer and uses SIGSTOP * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int StopPeer(const PeerId &peerId); + int StopPeer(const PeerId& peerId); /** - * 恢复 hang 住的 peer,使用 SIGCONT - * @param peerId - * @return 0:成功,-1 失败 - */ - int ContPeer(const PeerId &peerId); + * Restore the peer where Hang lives and use SIGCONT + * @param peerId + * @return 0: Success, -1 failed + */ + int ContPeer(const PeerId& peerId); /** - * 反复重试直到等到新的 leader 产生 - * @param leaderId 出参,返回 leader id - * @return 0:成功,-1 失败 + * Try again and again until a new leader is generated + * @param leaderId takes a parameter and returns the leader id + * @return 0: Success, -1 failed */ - int WaitLeader(PeerId *leaderId); + int WaitLeader(PeerId* leaderId); /** - * Stop 所有的 peer - * @return 0:成功,-1 失败 + * Stop all peers + * @return 0: Success, -1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /*Returns the current configuration of the cluster*/ const Configuration CopysetConf() const; - /* 修改 PeerNode 配置相关的接口,单位: s */ + /*Modify the interface related to PeerNode configuration, unit: s*/ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); int SetCatchupMargin(int catchupMargin); static int StartPeerNode(CopysetNodeOptions options, - const Configuration conf, - bool from_chunkfile_pool = false, - bool createFilePool = true); + const Configuration conf, + bool from_chunkfile_pool = false, + bool createFilePool = true); public: /** - * 返回执行 peer 的 copyset 路径 with protocol, ex: local://./127.0.0.1:9101:0 - */ - static const std::string CopysetDirWithProtocol(const PeerId &peerId); + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 + */ + static const std::string CopysetDirWithProtocol(const PeerId& peerId); /** - * 返回执行 peer 的 copyset 路径 without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const PeerId &peerId); + static const std::string CopysetDirWithoutProtocol(const PeerId& peerId); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const PeerId &peerid); + static const std::string RemoveCopysetDirCmd(const PeerId& peerid); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::set peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::set peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 + // Snapshot interval int snapshotIntervalS_; - // 选举超时时间 + // Election timeout int electionTimeoutMs_; - // catchup margin配置 + // Catchup margin configuration int catchupMargin_; - // 集群成员配置 + // Cluster member configuration Configuration conf_; - // 逻辑池id - static LogicPoolID logicPoolID_; - // 复制组id - static CopysetID copysetID_; + // Logical Pool ID + static LogicPoolID logicPoolID_; + // Copy Group ID + static CopysetID copysetID_; }; } // namespace chunkserver diff --git a/test/chunkserver/cli2_test.cpp b/test/chunkserver/cli2_test.cpp index d4d482d118..41d3b75ada 100644 --- a/test/chunkserver/cli2_test.cpp +++ b/test/chunkserver/cli2_test.cpp @@ -20,23 +20,24 @@ * Author: wudemiao */ -#include -#include -#include -#include -#include +#include "src/chunkserver/cli2.h" + #include #include #include +#include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli2.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -46,10 +47,12 @@ using curve::common::UUIDGenerator; class Cli2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -84,13 +87,14 @@ class Cli2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(Cli2Test, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9033; - const char *confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; + const char* confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -103,12 +107,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -118,12 +118,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -133,12 +129,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -148,16 +140,12 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid4) { std::string copysetdir = "local://./" + dir4; - StartChunkserver(ip, - port + 3, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 3, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3, pid_t pid4) { @@ -177,6 +165,7 @@ TEST_F(Cli2Test, basic) { kill(pid4_, SIGINT); waitpid(pid4_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -197,11 +186,12 @@ TEST_F(Cli2Test, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /*Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -211,23 +201,18 @@ TEST_F(Cli2Test, basic) { { Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -237,28 +222,21 @@ TEST_F(Cli2Test, basic) { conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -273,90 +251,70 @@ TEST_F(Cli2Test, basic) { peer3.set_address("127.0.0.1:9035:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } @@ -366,33 +324,29 @@ TEST_F(Cli2Test, basic) { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT - butil::Status st = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); - LOG(INFO) << "change peers: " - << st.error_code() << ", " << st.error_str(); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT + butil::Status st = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); + LOG(INFO) << "change peers: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* reset peer */ { - // 等待change peer完成,否则用例会失败 + // Wait for the change peer to complete, otherwise the instance will + // fail sleep(3); Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -401,127 +355,105 @@ TEST_F(Cli2Test, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; Peer leader; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ Peer peer; peer.set_address("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); Peer peer; peer.set_address("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "transfer leader: " << status.error_code() << ", " << status.error_str(); } } - /* change peers - 不存在的 peer */ + /*Change peers - non-existent peers*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT - butil::Status status = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT + butil::Status status = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "change peers: " << status.error_code() << ", " << status.error_str(); } - /* reset peer - newConf为空 */ + /*Reset peer - newConf is empty*/ { Configuration conf; Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status - status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); LOG(INFO) << "reset peer: " << status.error_code() << ", " << status.error_str(); ASSERT_EQ(EINVAL, status.error_code()); } - /* reset peer peer地址非法 */ + /*Illegal reset peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* reset peer peer地址不存在 */ + /*Reset peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } - /* snapshot peer地址非法 */ + /*Illegal snapshot peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* snapshot peer地址不存在 */ + /*The snapshot peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } /* snapshot all normal */ @@ -529,8 +461,8 @@ TEST_F(Cli2Test, basic) { Peer peer; peer.set_address("127.0.0.1:9040:0"); butil::Status status = curve::chunkserver::SnapshotAll(peer, opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } } diff --git a/test/chunkserver/cli_test.cpp b/test/chunkserver/cli_test.cpp index 111ec23773..7aa218a446 100644 --- a/test/chunkserver/cli_test.cpp +++ b/test/chunkserver/cli_test.cpp @@ -20,22 +20,23 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/cli.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -45,10 +46,12 @@ using curve::common::UUIDGenerator; class CliTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -78,13 +81,14 @@ class CliTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CliTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9030; - const char *confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; + const char* confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -97,12 +101,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -112,12 +112,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -127,16 +123,12 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -153,6 +145,7 @@ TEST_F(CliTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -172,11 +165,12 @@ TEST_F(CliTest, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /* Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -185,23 +179,18 @@ TEST_F(CliTest, basic) { /* remove peer */ { PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peerId, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -210,27 +199,20 @@ TEST_F(CliTest, basic) { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -242,95 +224,75 @@ TEST_F(CliTest, basic) { PeerId peer3("127.0.0.1:9032:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -338,41 +300,35 @@ TEST_F(CliTest, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9030:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ PeerId peerId("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peerId, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"); PeerId peer1("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..1452c24e72 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -20,49 +20,47 @@ * Author: wudemiao */ -#include -#include -#include #include #include +#include +#include +#include -#include "src/chunkserver/copyset_node.h" #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" #include "test/chunkserver/chunkserver_test_util.h" DEFINE_int32(request_size, 10, "Size of each requst"); DEFINE_int32(timeout_ms, 500, "Timeout for each request"); DEFINE_int32(election_timeout_ms, 3000, "election timeout ms"); DEFINE_int32(write_percentage, 100, "Percentage of fetch_add"); -DEFINE_string(confs, - "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", +DEFINE_string(confs, "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", "Configuration of the raft group"); -using curve::chunkserver::CopysetRequest; -using curve::chunkserver::CopysetResponse; -using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::CHUNK_OP_TYPE; using curve::chunkserver::ChunkRequest; using curve::chunkserver::ChunkResponse; using curve::chunkserver::ChunkService_Stub; -using curve::chunkserver::PeerId; -using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; using curve::chunkserver::Configuration; -using curve::chunkserver::CHUNK_OP_TYPE; -using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::COPYSET_OP_STATUS; +using curve::chunkserver::CopysetID; +using curve::chunkserver::CopysetRequest; +using curve::chunkserver::CopysetResponse; +using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::LogicPoolID; +using curve::chunkserver::PeerId; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - uint64_t chunkId = 1; - uint64_t sn = 1; - char fillCh = 'a'; + CopysetID copysetId = 100001; + uint64_t chunkId = 1; + uint64_t sn = 1; + char fillCh = 'a'; PeerId leader; curve::chunkserver::Configuration conf; @@ -70,9 +68,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "conf parse failed: " << FLAGS_confs; } - - - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); @@ -105,8 +101,10 @@ int main(int argc, char *argv[]) { if (cntl.Failed()) { LOG(FATAL) << "create copyset fialed: " << cntl.ErrorText(); } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS //NOLINT - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT + if (response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS // NOLINT + || response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT LOG(INFO) << "create copyset success: " << response.status(); } else { LOG(FATAL) << "create copyset failed: "; @@ -116,11 +114,9 @@ int main(int argc, char *argv[]) { // wait leader ::usleep(1000 * FLAGS_election_timeout_ms); - butil::Status status = curve::chunkserver::WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - FLAGS_election_timeout_ms); //NOLINT + butil::Status status = + curve::chunkserver::WaitLeader(logicPoolId, copysetId, conf, &leader, + FLAGS_election_timeout_ms); // NOLINT LOG(INFO) << "leader is: " << leader.to_string(); if (0 != status.error_code()) { LOG(FATAL) << "Wait leader failed"; @@ -176,8 +172,5 @@ int main(int argc, char *argv[]) { } } - return 0; } - - diff --git a/test/chunkserver/clone/clone_copyer_test.cpp b/test/chunkserver/clone/clone_copyer_test.cpp index 3c15969d9a..033664c6a3 100644 --- a/test/chunkserver/clone/clone_copyer_test.cpp +++ b/test/chunkserver/clone/clone_copyer_test.cpp @@ -20,12 +20,13 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_copyer.h" + #include +#include +#include #include "include/client/libcurve.h" -#include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/client/mock/mock_file_client.h" @@ -46,21 +47,16 @@ const uint64_t EXPIRED_USE = 5; class MockDownloadClosure : public DownloadClosure { public: explicit MockDownloadClosure(AsyncDownloadContext* context) - : DownloadClosure(nullptr, nullptr, context, nullptr) - , isRun_(false) {} + : DownloadClosure(nullptr, nullptr, context, nullptr), isRun_(false) {} void Run() { CHECK(!isRun_) << "closure has been invoked."; isRun_ = true; } - bool IsFailed() { - return isFailed_; - } + bool IsFailed() { return isFailed_; } - bool IsRun() { - return isRun_; - } + bool IsRun() { return isRun_; } void Reset() { isFailed_ = false; @@ -71,16 +67,14 @@ class MockDownloadClosure : public DownloadClosure { bool isRun_; }; -class CloneCopyerTest : public testing::Test { +class CloneCopyerTest : public testing::Test { public: void SetUp() { curveClient_ = std::make_shared(); s3Client_ = std::make_shared(); Aws::InitAPI(awsOptions_); } - void TearDown() { - Aws::ShutdownAPI(awsOptions_); - } + void TearDown() { Aws::ShutdownAPI(awsOptions_); } protected: std::shared_ptr curveClient_; @@ -133,8 +127,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,读取成功 - * 预期:调用Open和Read读取数据 + /* Use case: Reading data on curve, successful reading + * Expected: Calling Open and Read to read data */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) @@ -151,12 +145,11 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:再次读前面的文件,但是ret值为-1 - * 预期:直接Read,返回失败 + /* Use case: Read the previous file again, but the ret value is -1 + * Expected: Direct Read, return failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .WillOnce(Invoke([](int fd, CurveAioContext* context, curve::client::UserDataType dataType) { @@ -169,21 +162,20 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Open的时候失败 - * 预期:返回-1 + /* Use case: Reading data on curve, failed during Open + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) .WillOnce(Return(-1)); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Read的时候失败 - * 预期:返回-1 + /* Use case: Failed to read data on curve + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) @@ -195,14 +187,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - - /* 用例:读s3上的数据,读取成功 - * 预期:返回0 + /* Use case: Reading data on s3, successful reading + * Expected: Return 0 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = 0; context->cb(s3Client_.get(), context); })); @@ -211,13 +202,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 - * 预期:返回-1 + /* Use case: Read data on s3, read failed + * Expected: Return -1 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = -1; context->cb(s3Client_.get(), context); })); @@ -226,18 +217,14 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini test { - EXPECT_CALL(*curveClient_, Close(1)) - .Times(1); - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(1)).Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } } @@ -250,16 +237,15 @@ TEST_F(CloneCopyerTest, DisableTest) { options.curveUser.owner = ROOT_OWNER; options.curveUser.password = ROOT_PWD; options.curveFileTimeoutSec = EXPIRED_USE; - // 禁用curveclient和s3adapter + // Disable curveclient and s3adapter options.curveClient = nullptr; options.s3Client = nullptr; // curvefs init success - EXPECT_CALL(*curveClient_, Init(_)) - .Times(0); + EXPECT_CALL(*curveClient_, Init(_)).Times(0); ASSERT_EQ(0, copyer.Init(options)); - // 从上s3或者curve请求下载数据会返回失败 + // Requesting data download from s3 or curve above will return a failure { char* buf = new char[4096]; AsyncDownloadContext context; @@ -268,30 +254,27 @@ TEST_F(CloneCopyerTest, DisableTest) { context.buf = buf; MockDownloadClosure closure(&context); - /* 用例:读curve上的数据,读取失败 + /* Use case: Read data on curve, read failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 + /* Use case: Read data on s3, read failed */ context.location = "test@s3"; - EXPECT_CALL(*s3Client_, GetObjectAsync(_)) - .Times(0); + EXPECT_CALL(*s3Client_, GetObjectAsync(_)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } - // fini 可以成功 + // Fini can succeed ASSERT_EQ(0, copyer.Fini()); } @@ -308,7 +291,7 @@ TEST_F(CloneCopyerTest, ExpiredTest) { // curvefs init success EXPECT_CALL(*curveClient_, Init(StrEq(CURVE_CONF))) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + .WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(0, copyer.Init(options)); { @@ -320,18 +303,18 @@ TEST_F(CloneCopyerTest, ExpiredTest) { MockDownloadClosure closure(&context); /* Case: Read the same chunk after it expired - * Expect: Re-Open the curve file - */ + * Expect: Re-Open the curve file + */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) - .WillOnce(Return(1)); + .WillOnce(Return(1)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_FALSE(closure.IsFailed()); @@ -341,26 +324,23 @@ TEST_F(CloneCopyerTest, ExpiredTest) { context.location = "test:0@cs"; std::this_thread::sleep_for(std::chrono::seconds(1)); EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .WillOnce(Return(2)); + .WillOnce(Return(2)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index 86d6a70898..2632acb635 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -20,21 +20,22 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_core.h" + #include +#include #include +#include #include -#include "src/chunkserver/clone_core.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/op_request.h" -#include "test/chunkserver/mock_copyset_node.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_copyer.h" #include "test/chunkserver/datastore/mock_datastore.h" -#include "src/fs/local_filesystem.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -46,7 +47,7 @@ using curve::fs::LocalFsFactory; ACTION_TEMPLATE(SaveBraftTask, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(value)) { auto input = static_cast(::testing::get(args)); - auto output = static_cast(value); + auto output = static_cast(value); output->data->swap(*input.data); output->done = input.done; } @@ -83,18 +84,19 @@ class CloneCoreTest .WillRepeatedly(Return(LAST_INDEX)); } - std::shared_ptr - GenerateReadRequest(CHUNK_OP_TYPE optype, off_t offset, size_t length) { - ChunkRequest *readRequest = new ChunkRequest(); + std::shared_ptr GenerateReadRequest(CHUNK_OP_TYPE optype, + off_t offset, + size_t length) { + ChunkRequest* readRequest = new ChunkRequest(); readRequest->set_logicpoolid(LOGICPOOL_ID); readRequest->set_copysetid(COPYSET_ID); readRequest->set_chunkid(CHUNK_ID); readRequest->set_optype(optype); readRequest->set_offset(offset); readRequest->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - FakeChunkClosure *closure = new FakeChunkClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + FakeChunkClosure* closure = new FakeChunkClosure(); closure->SetCntl(cntl); closure->SetRequest(readRequest); closure->SetResponse(response); @@ -105,19 +107,19 @@ class CloneCoreTest } void SetCloneParam(std::shared_ptr readRequest) { - ChunkRequest *request = - const_cast(readRequest->GetChunkRequest()); + ChunkRequest* request = + const_cast(readRequest->GetChunkRequest()); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); } - void CheckTask(const braft::Task &task, off_t offset, size_t length, - char *buf) { + void CheckTask(const braft::Task& task, off_t offset, size_t length, + char* buf) { butil::IOBuf data; ChunkRequest request; auto req = ChunkOpRequest::Decode(*task.data, &request, &data, 0, PeerId("127.0.0.1:8200:0")); - auto preq = dynamic_cast(req.get()); + auto preq = dynamic_cast(req.get()); ASSERT_TRUE(preq != nullptr); ASSERT_EQ(LOGICPOOL_ID, request.logicpoolid()); @@ -139,19 +141,20 @@ class CloneCoreTest }; /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不是clone chunk - * result:不会从远端拷贝数据,直接从本地读取数据,结果返回成功 + * Test CHUNK_OP_READ type request, requesting to read a chunk that is not a + * clone chunk Result: Will not copy data from the remote end, directly read + * data from the local, and the result is returned as successful */ TEST_P(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; size_t length = 5 * blocksize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + // Will not copy data from the source EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -159,16 +162,16 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -176,15 +179,17 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk是clone chunk - * case1:请求读取的区域全部被写过 - * result1:全部从本地chunk读取 - * case2:请求读取的区域都未被写过 - * result2:全部从源端读取,产生paste请求 - * case3:请求读取的区域有部分被写过,部分未被写过 - * result3:写过区域从本地chunk读取,未写过区域从源端读取,产生paste请求 - * case4:请求读取的区域部分被写过,请求的偏移未与pagesize对齐 - * result4:返回错误 + * Test CHUNK_OP_READ type request, the requested chunk to read is a clone chunk + * Case1: All regions requested for reading have been written + * Result1: Read all from local chunk + * Case2: The requested read area has not been written + * Result2: Read all from the source and generate a pass request + * Case3: The requested read area has been partially written and partially + * unwritten Result3: Read from the local chunk for regions that have been + * written, and read from the source for regions that have not been written, + * resulting in a pass request Case4: The requested read area has been partially + * written, and the requested offset is not aligned with pagesize Result4: Error + * returned */ TEST_P(CloneCoreTest, ReadChunkTest2) { off_t offset = 0; @@ -195,32 +200,33 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -237,26 +243,27 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -264,16 +271,17 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -289,33 +297,34 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { { info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) - memset(chunkData, 'a', pagesize_ + 2 * blocksize_); + // Reading Chunk Files + char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) + memset(chunkData, 'a', pagesize_ + 2 * blocksize_); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, 0, pagesize_ + 2 * blocksize_)) .WillOnce( DoAll(SetArrayArgument<2>( chunkData, chunkData + pagesize_ + 2 * blocksize_), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -323,24 +332,30 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - 3 * blocksize_), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + 3 * blocksize_), + 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * blocksize_, //NOLINT - 2 * blocksize_), 0); + closure->resContent_.attachment.to_string().c_str() + + 3 * blocksize_, // NOLINT + 2 * blocksize_), + 0); } // case4 { @@ -349,7 +364,8 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { length = 4 * blocksize_; info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); @@ -357,18 +373,18 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); - // 不产生PasteChunkRequest + // Do not generate PasteChunkRequest braft::Task task; EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -377,8 +393,9 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不存在,但是请求中包含源端数据地址 - * 预期结果:从源端下载数据,产生paste请求 + * Test CHUNK_OP_READ type request, the chunk requested for reading does not + * exist, but the request contains the source data address Expected result: + * Download data from the source and generate a pass request */ TEST_P(CloneCoreTest, ReadChunkTest3) { off_t offset = 0; @@ -389,32 +406,33 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / pagesize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -422,16 +440,17 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -445,13 +464,13 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { } /** - * 执行HandleReadRequest过程中出现错误 - * case1:GetChunkInfo时出错 - * result1:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case2:Download时出错 - * result2:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case3:ReadChunk时出错 - * result3:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN + * An error occurred during the execution of HandleReadRequest + * Case1: Error in GetChunkInfo + * Result1: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN Case2: Error downloading Result2: Returns -1, + * and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN Case3: + * Error in ReadChunk Result3: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN */ TEST_P(CloneCoreTest, ReadChunkErrorTest) { off_t offset = 0; @@ -479,8 +498,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -494,10 +513,10 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); closure->SetFailed(); })); @@ -505,8 +524,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -522,17 +541,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -540,16 +559,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); delete[] cloneData; @@ -557,19 +577,20 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk不是clone chunk - * result:不会从远端拷贝数据,也不会从本地读取数据,直接返回成功 + * Test CHUNK_OP_RECOVER type request, the requested chunk is not a clone chunk + *Result: Will not copy data remotely or read data locally, returns success + *directly */ TEST_P(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; size_t length = 5 * pagesize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); + // Will not copy data from the sourc EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -577,14 +598,14 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -592,11 +613,11 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk是clone chunk - * case1:请求恢复的区域全部被写过 - * result1:不会拷贝数据,直接返回成功 - * case2:请求恢复的区域全部或部分未被写过 - * result2:从远端拷贝数据,并产生paste请求 + * Test CHUNK_OP_WRECOVER type request, the requested chunk is clone chunk + * Case1: All areas requested for recovery have been written + * Result1: Will not copy data, returns success directly + * Case2: The requested area for recovery has not been written in whole or in + * part Result2: Copy data from the remote end and generate a pass request */ TEST_P(CloneCoreTest, RecoverChunkTest2) { off_t offset = 0; @@ -607,26 +628,27 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -636,23 +658,24 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -660,14 +683,16 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -678,8 +703,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { } } -// case1: read chunk时,从远端拷贝数据,但是不会产生paste请求 -// case2: recover chunk时,从远端拷贝数据,会产生paste请求 +// Case1: When reading a chunk, copy data from the remote end, but do not +// generate a pass request Case2: When recovering a chunk, copying data from the +// remote end will generate a pass request TEST_P(CloneCoreTest, DisablePasteTest) { off_t offset = 0; size_t length = 5 * blocksize_; @@ -689,39 +715,40 @@ TEST_P(CloneCoreTest, DisablePasteTest) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, false, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, false, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生paste chunk请求 + // No paste chunk request will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -732,23 +759,24 @@ TEST_P(CloneCoreTest, DisablePasteTest) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -756,14 +784,16 @@ TEST_P(CloneCoreTest, DisablePasteTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -775,8 +805,7 @@ TEST_P(CloneCoreTest, DisablePasteTest) { } INSTANTIATE_TEST_CASE_P( - CloneCoreTest, - CloneCoreTest, + CloneCoreTest, CloneCoreTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/clone/clone_manager_test.cpp b/test/chunkserver/clone/clone_manager_test.cpp index f41bc1bed2..6b29058364 100644 --- a/test/chunkserver/clone/clone_manager_test.cpp +++ b/test/chunkserver/clone/clone_manager_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_manager.h" + #include +#include +#include + #include -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/op_request.h" namespace curve { @@ -33,22 +35,18 @@ namespace chunkserver { class UTCloneTask : public CloneTask { public: - UTCloneTask() : CloneTask(nullptr, nullptr, nullptr) - , sleepTime_(0) {} + UTCloneTask() : CloneTask(nullptr, nullptr, nullptr), sleepTime_(0) {} void Run() { - std::this_thread::sleep_for( - std::chrono::milliseconds(sleepTime_)); + std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime_)); isComplete_ = true; } - void SetSleepTime(uint32_t sleepTime) { - sleepTime_ = sleepTime; - } + void SetSleepTime(uint32_t sleepTime) { sleepTime_ = sleepTime; } private: uint32_t sleepTime_; }; -class CloneManagerTest : public testing::Test { +class CloneManagerTest : public testing::Test { public: void SetUp() {} void TearDown() {} @@ -58,32 +56,34 @@ TEST_F(CloneManagerTest, BasicTest) { CloneOptions options; options.checkPeriod = 100; CloneManager cloneMgr; - // 如果线程数设置为0,启动线程池失败 + // If the number of threads is set to 0, starting the thread pool fails { options.threadNum = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 队列深度为0,启动线程池会失败 + // Queue depth is 0, starting thread pool will fail { options.threadNum = 5; options.queueCapacity = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 线程数和队列深度都大于0,可以启动线程池 + // If the number of threads and queue depth are both greater than 0, the + // thread pool can be started { options.threadNum = 5; options.queueCapacity = 100; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), 0); - // 线程池启动运行后,重复Run直接返回成功 + // After the thread pool starts running, repeating the run directly + // returns success ASSERT_EQ(cloneMgr.Run(), 0); } - // 通过Fini暂停任务 + // Pause tasks through Fini { ASSERT_EQ(cloneMgr.Fini(), 0); - // 重复Fini直接返回成功 + // Repeated Fini directly returns success ASSERT_EQ(cloneMgr.Fini(), 0); } } @@ -99,9 +99,9 @@ TEST_F(CloneManagerTest, TaskTest) { std::shared_ptr req = std::make_shared(); - // 测试 GenerateCloneTask 和 IssueCloneTask + // Testing GenerateCloneTask and IssueCloneTask { - // options.core为nullptr,则产生的任务也是nullptr + // If options.core is nullptr, the resulting task is also nullptr std::shared_ptr task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_EQ(task, nullptr); @@ -111,55 +111,58 @@ TEST_F(CloneManagerTest, TaskTest) { task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_NE(task, nullptr); - // 自定义任务测试 + // Custom task testing task = std::make_shared(); ASSERT_FALSE(task->IsComplete()); - // 如果clone manager还未启动,则无法发布任务 + // If the clone manager has not yet started, the task cannot be + // published ASSERT_FALSE(cloneMgr.IssueCloneTask(task)); - // 启动以后就可以发布任务 + // After startup, tasks can be published ASSERT_EQ(cloneMgr.Run(), 0); ASSERT_TRUE(cloneMgr.IssueCloneTask(task)); - // 等待一点时间,任务执行完成,检查任务状态以及是否从队列中移除 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for a moment, the task execution is completed, check the task + // status and whether it has been removed from the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task->IsComplete()); - // 无法发布空任务 + // Unable to publish empty task ASSERT_FALSE(cloneMgr.IssueCloneTask(nullptr)); } - // 测试自定义的测试任务 + // Test custom test tasks { - // 初始化执行时间各不相同的任务 + // Initialize tasks with varying execution times std::shared_ptr task1 = std::make_shared(); std::shared_ptr task2 = std::make_shared(); std::shared_ptr task3 = std::make_shared(); task1->SetSleepTime(100); task2->SetSleepTime(300); task3->SetSleepTime(500); - // 同时发布所有任务 + // Publish all tasks simultaneously ASSERT_TRUE(cloneMgr.IssueCloneTask(task1)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task2)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task3)); - // 此时任务还在执行中,此时引用计数为2 + // At this point, the task is still executing and the reference count is + // 2 ASSERT_FALSE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 等待220ms,task1执行成功,其他还没完成;220ms基本可以保证task1执行完 - std::this_thread::sleep_for( - std::chrono::milliseconds(220)); + // Waiting for 220ms, task1 successfully executed, but other tasks have + // not been completed yet; 220ms basically guarantees the completion of + // task1 execution + std::this_thread::sleep_for(std::chrono::milliseconds(220)); ASSERT_TRUE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,task2执行成功,task3还在执行中 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait another 200ms, task2 successfully executed, and task3 is still + // executing + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,所有任务执行成功,任务全被移出队列 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for another 200ms, all tasks are successfully executed, and all + // tasks are moved out of the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_TRUE(task3->IsComplete()); diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 6746594097..1b509e4b0f 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -20,16 +20,18 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/op_request.h" + #include +#include +#include + #include -#include "src/chunkserver/op_request.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_manager.h" -#include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/datastore/mock_datastore.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -67,28 +69,23 @@ class OpRequestTest FakeCopysetNode(); FakeCloneManager(); } - void TearDown() { - } + void TearDown() {} void FakeCopysetNode() { - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, GetDataStore()) - .WillRepeatedly(Return(datastore_)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, GetDataStore()).WillRepeatedly(Return(datastore_)); EXPECT_CALL(*node_, GetConcurrentApplyModule()) .WillRepeatedly(Return(concurrentApplyModule_.get())); EXPECT_CALL(*node_, GetAppliedIndex()) .WillRepeatedly(Return(LAST_INDEX)); PeerId peer(PEER_STRING); - EXPECT_CALL(*node_, GetLeaderId()) - .WillRepeatedly(Return(peer)); + EXPECT_CALL(*node_, GetLeaderId()).WillRepeatedly(Return(peer)); } void FakeCloneManager() { EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillRepeatedly(Return(true)); } protected: @@ -99,11 +96,11 @@ class OpRequestTest std::shared_ptr node_; std::shared_ptr datastore_; std::shared_ptr cloneMgr_; - std::shared_ptr concurrentApplyModule_; + std::shared_ptr concurrentApplyModule_; }; TEST_P(OpRequestTest, CreateCloneTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -118,20 +115,17 @@ TEST_P(OpRequestTest, CreateCloneTest) { request->set_location(location); request->set_size(size); request->set_sn(sn); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cntl, - request, - response, - closure); + std::make_shared(node_, cntl, request, + response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -152,23 +146,22 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_EQ(sn, request->sn()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -176,28 +169,27 @@ TEST_P(OpRequestTest, CreateCloneTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); @@ -251,8 +243,7 @@ TEST_P(OpRequestTest, CreateCloneTest) { // set expection EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(0); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); opReq->OnApply(3, closure); @@ -264,15 +255,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { closure->response_->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -280,15 +271,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -296,37 +287,33 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, PasteChunkTest) { - // 生成临时的readrequest - ChunkResponse *response = new ChunkResponse(); + // Generate temporary readrequest + ChunkResponse* response = new ChunkResponse(); std::shared_ptr readChunkRequest = - std::make_shared(node_, - nullptr, - nullptr, - nullptr, - response, - nullptr); - - // 创建PasteChunkRequest + std::make_shared(node_, nullptr, nullptr, nullptr, + response, nullptr); + + // Create PasteChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -343,17 +330,14 @@ TEST_P(OpRequestTest, PasteChunkTest) { butil::IOBuf cloneData; cloneData.append(str); - UnitTestClosure *closure = new UnitTestClosure(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - request, - response, - &cloneData, - closure); + std::make_shared(node_, request, response, + &cloneData, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -376,23 +360,22 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_STREQ(str.c_str(), data.to_string().c_str()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -400,85 +383,83 @@ TEST_P(OpRequestTest, PasteChunkTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(response->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:CreateCloneChunk成功 - * 预期:返回 CHUNK_OP_STATUS_SUCCESS ,并更新apply index + * Test OnApply + * Scenario: CreateCloneChunk successful + * Expected: return CHUNK_OP_STATUS_SUCCESS and update the apply index */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -486,15 +467,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { response->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -502,15 +483,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -518,27 +499,27 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, ReadChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -551,21 +532,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { request->set_optype(CHUNK_OP_READ); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -585,17 +562,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -616,19 +592,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_EXPIRED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -649,20 +622,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_NOT_READY; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -688,20 +658,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_DISABLED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -724,36 +691,34 @@ TEST_P(OpRequestTest, ReadChunkTest) { info.bitmap = std::make_shared(chunksize_ / blocksize_); /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true, - * 请求的 apply index 小于等于 node的 apply index - * 预期: 不会走一致性协议,请求提交给concurrentApplyModule_处理 + * Test Process + * Scenario: node_->IsLeaderTerm() == true, + * The requested application index is less than or equal to the + * node's application index Expected: Will not follow the consistency + * protocol, request submission to ConcurrentApplyModule_ handle */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *chunkData = new char[length]; + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -775,21 +740,21 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -797,7 +762,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -811,22 +776,23 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Chunk read locally, returning + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -834,7 +800,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -850,31 +816,29 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -883,54 +847,50 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 但是请求中包含源chunk的信息 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * But the request contains information about the source chunk + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -939,137 +899,135 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 请求中包含源chunk的信息 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 The request contains information about the source + * chunk Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + length), + .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->response_->status()); ASSERT_EQ(memcmp(chunkData, - closure->cntl_->response_attachment().to_string().c_str(), //NOLINT - length), 0); + closure->cntl_->response_attachment() + .to_string() + .c_str(), // NOLINT + length), + 0); delete[] chunkData; } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:读本地chunk的时候返回错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: Error returned when reading local chunk + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件失败 + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Failed to read chunk file EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, RecoverChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -1082,21 +1040,17 @@ TEST_P(OpRequestTest, RecoverChunkTest) { request->set_optype(CHUNK_OP_RECOVER); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -1116,23 +1070,22 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -1154,29 +1107,25 @@ TEST_P(OpRequestTest, RecoverChunkTest) { * expect: don't propose to raft,request commit to concurrentApplyModule_ */ { - // 重置closure + // Reset closure closure->Reset(); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, Propose(_)).Times(0); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); opReq->Process(); @@ -1193,54 +1142,52 @@ TEST_P(OpRequestTest, RecoverChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Directly return to CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Directly return to + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -1248,31 +1195,29 @@ TEST_P(OpRequestTest, RecoverChunkTest) { closure->response_->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -1281,103 +1226,97 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } INSTANTIATE_TEST_CASE_P( - OpRequestTest, - OpRequestTest, + OpRequestTest, OpRequestTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/copyset_epoch_test.cpp b/test/chunkserver/copyset_epoch_test.cpp index f9f80ad50f..810b9c3c5d 100644 --- a/test/chunkserver/copyset_epoch_test.cpp +++ b/test/chunkserver/copyset_epoch_test.cpp @@ -20,26 +20,25 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" #include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/uuid.h" #include "src/fs/fs_common.h" +#include "test/chunkserver/chunkserver_test_util.h" #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 @@ -59,9 +58,7 @@ class CopysetEpochTest : public testing::Test { dir1 = uuidGenerator.GenerateUUID(); Exec(("mkdir " + dir1).c_str()); } - virtual void TearDown() { - Exec(("rm -fr " + dir1).c_str()); - } + virtual void TearDown() { Exec(("rm -fr " + dir1).c_str()); } public: std::string dir1; @@ -70,27 +67,23 @@ class CopysetEpochTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetEpochTest, DISABLED_basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9026; - const char *confs = "127.0.0.1:9026:0"; + const char* confs = "127.0.0.1:9026:0"; int snapshotInterval = 1; int electionTimeoutMs = 3000; - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::string snapshotPath = dir1 + "/4295067297/raft_snapshot"; uint64_t lastIncludeIndex = 0; /** - * 启动一个chunkserver + * Start a chunkserver */ std::string copysetdir = "local://./" + dir1; auto startChunkServerFunc = [&] { - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); }; Thread t1(startChunkServerFunc); @@ -105,111 +98,95 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { ::usleep(1000 * electionTimeoutMs); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); auto node = nodeManager.GetCopysetNode(logicPoolId, copysetId); ASSERT_EQ(1, node->GetConfEpoch()); std::string confEpochPath1 = snapshotPath; - butil::string_appendf(&confEpochPath1, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath1, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath1.append("/"); confEpochPath1.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath1)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(2, node->GetConfEpoch()); std::string confEpochPath2 = snapshotPath; - butil::string_appendf(&confEpochPath2, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath2, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath2.append("/"); confEpochPath2.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath2)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(3, node->GetConfEpoch()); std::string confEpochPath3 = snapshotPath; - butil::string_appendf(&confEpochPath3, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath3, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath3.append("/"); confEpochPath3.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath3)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(4, node->GetConfEpoch()); std::string confEpochPath4 = snapshotPath; - butil::string_appendf(&confEpochPath4, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath4, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath4.append("/"); confEpochPath4.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath4)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(5, node->GetConfEpoch()); std::string confEpochPath5 = snapshotPath; - butil::string_appendf(&confEpochPath5, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath5, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath5.append("/"); confEpochPath5.append(kCurveConfEpochFilename); diff --git a/test/chunkserver/copyset_node_manager_test.cpp b/test/chunkserver/copyset_node_manager_test.cpp index 7103ba0697..fe4f0472e3 100644 --- a/test/chunkserver/copyset_node_manager_test.cpp +++ b/test/chunkserver/copyset_node_manager_test.cpp @@ -20,14 +20,15 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node_manager.h" + +#include #include #include -#include #include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/copyset_node.h" #include "test/chunkserver/mock_copyset_node.h" @@ -35,10 +36,10 @@ namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; -using ::testing::Mock; using ::testing::DoAll; +using ::testing::Mock; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; using ::testing::SetArgPointee; @@ -72,20 +73,19 @@ class CopysetNodeManagerTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = std::make_shared(fs); defaultOptions_.trash = std::make_shared(); } void TearDown() { - CopysetNodeManager *copysetNodeManager = + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); copysetNodeManager->Fini(); ::system("rm -rf node_manager_test"); } protected: - CopysetNodeOptions defaultOptions_; + CopysetNodeOptions defaultOptions_; ConcurrentApplyModule concurrentModule_; }; @@ -93,34 +93,32 @@ TEST_F(CopysetNodeManagerTest, ErrorOptionsTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); defaultOptions_.chunkDataUri = "//."; defaultOptions_.logUri = "//."; ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); } TEST_F(CopysetNodeManagerTest, ServiceNotStartTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_FALSE(copysetNodeManager->LoadFinished()); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->LoadFinished()); /* null server */ { - brpc::Server *server = nullptr; + brpc::Server* server = nullptr; int port = 9000; butil::EndPoint addr(butil::IP_ANY, port); ASSERT_EQ(-1, copysetNodeManager->AddService(server, addr)); @@ -131,7 +129,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -143,21 +141,19 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - // 本地 copyset 未加载完成,则无法创建新的copyset - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Cannot create a new copyset if the local copyset has not been loaded + // completely + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_TRUE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->IsExist(logicPoolId, copysetId)); - // 重复创建 - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Duplicate creation + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); auto copysetNode1 = - copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); + copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); ASSERT_TRUE(nullptr != copysetNode1); auto copysetNode2 = copysetNodeManager->GetCopysetNode(logicPoolId + 1, copysetId + 1); @@ -168,8 +164,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(1, copysetNodes.size()); - ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, - copysetId)); + ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); ASSERT_FALSE(copysetNodeManager->IsExist(logicPoolId, copysetId)); ASSERT_EQ(0, copysetNodeManager->Fini()); @@ -178,46 +173,49 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { } TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - std::shared_ptr mockNode - = std::make_shared(); + std::shared_ptr mockNode = + std::make_shared(); - // 测试copyset node manager还没运行 + // The test copyset node manager has not been run yet EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 启动copyset node manager + // Start the copyset node manager ASSERT_EQ(0, copysetNodeManager->Run()); - // 测试node为空 + // Test node is empty EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(nullptr)); - // 测试无法获取到leader status的情况 + // Test the situation where the leader status cannot be obtained EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); NodeStatus leaderStatus; EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(defaultOptions_.checkRetryTimes) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); + .Times(defaultOptions_.checkRetryTimes) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); leaderStatus.leader_id.parse("127.0.0.1:9043:0"); - // 测试leader first_index 大于 follower last_index的情况 + // Test the situation that leader first_index greater than follower + // last_index leaderStatus.first_index = 1000; NodeStatus followerStatus; followerStatus.last_index = 999; - EXPECT_CALL(*mockNode, GetStatus(_)).Times(1) - .WillOnce(SetArgPointee<0>(followerStatus)); + EXPECT_CALL(*mockNode, GetStatus(_)) + .Times(1) + .WillOnce(SetArgPointee<0>(followerStatus)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 测试可以获取到leader status,且follower当前不在安装快照 的情况 + // The test can obtain the leader status and the follower is currently not + // installing the snapshot leaderStatus.first_index = 1; leaderStatus.committed_index = 2000; NodeStatus status1; @@ -233,14 +231,14 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { status4.last_index = 1666; status4.known_applied_index = 1001; EXPECT_CALL(*mockNode, GetStatus(_)) - .Times(4) - .WillOnce(SetArgPointee<0>(status1)) - .WillOnce(SetArgPointee<0>(status2)) - .WillOnce(SetArgPointee<0>(status3)) - .WillOnce(SetArgPointee<0>(status4)); + .Times(4) + .WillOnce(SetArgPointee<0>(status1)) + .WillOnce(SetArgPointee<0>(status2)) + .WillOnce(SetArgPointee<0>(status3)) + .WillOnce(SetArgPointee<0>(status4)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .Times(4) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_TRUE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); } @@ -248,7 +246,7 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -258,15 +256,14 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LOG(FATAL) << "Fail to start Server"; } - // 构造初始环境 + // Construct initial environment ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); - // 创建多个copyset + // Create multiple copyset int copysetNum = 5; for (int i = 0; i < copysetNum; ++i) { ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + i, - conf)); + copysetId + i, conf)); } std::vector> copysetNodes; copysetNodeManager->GetAllCopysetNodes(©setNodes); @@ -276,11 +273,10 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(0, copysetNodes.size()); - - // 本地 copyset 未加载完成,则无法创建新的copyset + // Cannot create a new copyset if the local copyset has not been loaded + // completely ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + 5, - conf)); + copysetId + 5, conf)); // reload copysets when loadConcurrency < copysetNum std::cout << "Test ReloadCopysets when loadConcurrency=3" << std::endl; diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index 46ed6a4fdb..c81a4b9358 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -20,46 +20,46 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node.h" + +#include #include #include -#include -#include #include -#include -#include #include +#include +#include +#include -#include "test/fs/mock_local_filesystem.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" -#include "test/chunkserver/fake_datastore.h" -#include "test/chunkserver/mock_node.h" -#include "src/chunkserver/conf_epoch_file.h" #include "proto/heartbeat.pb.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/conf_epoch_file.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" +#include "test/chunkserver/fake_datastore.h" #include "test/chunkserver/mock_curve_filesystem_adaptor.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "test/chunkserver/mock_node.h" +#include "test/fs/mock_local_filesystem.h" namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; -using ::testing::Matcher; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Matcher; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; -using curve::fs::MockLocalFileSystem; +using curve::chunkserver::concurrent::ConcurrentApplyOption; using curve::fs::FileSystemType; using curve::fs::MockLocalFileSystem; -using curve::chunkserver::concurrent::ConcurrentApplyOption; const char copysetUri[] = "local://./copyset_node_test"; const int port = 9044; @@ -67,52 +67,36 @@ const int port = 9044; class FakeSnapshotReader : public braft::SnapshotReader { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("/1002093939/temp/238408034"); } - void list_files(std::vector *files) { - return; - } - int load_meta(braft::SnapshotMeta *meta) { - return 1; - } - std::string generate_uri_for_copy() { - return std::string(""); - } + void list_files(std::vector* files) { return; } + int load_meta(braft::SnapshotMeta* meta) { return 1; } + std::string generate_uri_for_copy() { return std::string(""); } }; class FakeSnapshotWriter : public braft::SnapshotWriter { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("."); } - void list_files(std::vector *files) { - return; - } - virtual int save_meta(const braft::SnapshotMeta &meta) { - return 0; - } + void list_files(std::vector* files) { return; } + virtual int save_meta(const braft::SnapshotMeta& meta) { return 0; } - virtual int add_file(const std::string &filename) { - return 0; - } + virtual int add_file(const std::string& filename) { return 0; } - virtual int add_file(const std::string &filename, - const ::google::protobuf::Message *file_meta) { + virtual int add_file(const std::string& filename, + const ::google::protobuf::Message* file_meta) { return 0; } - virtual int remove_file(const std::string &filename) { - return 0; - } + virtual int remove_file(const std::string& filename) { return 0; } }; class FakeClosure : public braft::Closure { public: - void Run() { - std::cerr << "FakeClosure run" << std::endl; - } + void Run() { std::cerr << "FakeClosure run" << std::endl; } }; class CopysetNodeTest : public ::testing::Test { @@ -140,24 +124,21 @@ class CopysetNodeTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = std::make_shared(fs); defaultOptions_.trash = std::make_shared(); defaultOptions_.enableOdsyncWhenOpenChunkFile = true; } - void TearDown() { - ::system("rm -rf copyset_node_test"); - } + void TearDown() { ::system("rm -rf copyset_node_test"); } protected: - CopysetNodeOptions defaultOptions_; + CopysetNodeOptions defaultOptions_; ConcurrentApplyModule concurrentModule_; }; TEST_F(CopysetNodeTest, error_test) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -170,23 +151,24 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs)); - + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); @@ -210,10 +192,10 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); @@ -232,26 +214,30 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); copysetNode.on_snapshot_save(&writer, &closure); @@ -267,7 +253,9 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char* json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -278,19 +266,21 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); FakeClosure closure; FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); copysetNode.on_snapshot_save(&writer, &closure); @@ -328,10 +318,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); DataStoreOptions options; @@ -355,10 +345,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetID copysetID = 1345; Configuration conf; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; FakeClosure closure; FakeSnapshotReader reader; copysetNode.SetLocalFileSystem(mockfs); @@ -387,10 +377,10 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); @@ -409,19 +399,17 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(false)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); } @@ -434,22 +422,19 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(false)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); } @@ -466,27 +451,21 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); FakeClosure closure; FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); + MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); auto sfs = new scoped_refptr(cfa); copysetNode.SetSnapshotFileSystem(sfs); copysetNode.SetLocalFileSystem(mockfs); copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); LOG(INFO) << "OK"; @@ -545,7 +524,7 @@ TEST_F(CopysetNodeTest, error_test) { copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: logic pool id 错误 */ + /* Load: logic pool id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -554,18 +533,15 @@ TEST_F(CopysetNodeTest, error_test) { CopysetNode copysetNode(logicPoolID, copysetID, conf); auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID + 1, - copysetID, - epoch)); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, + logicPoolID + 1, copysetID, epoch)); defaultOptions_.localFileSystem = fs; ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: copyset id 错误 */ + /* Load: copyset id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -575,11 +551,8 @@ TEST_F(CopysetNodeTest, error_test) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID, - copysetID + 1, - epoch)); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, logicPoolID, + copysetID + 1, epoch)); defaultOptions_.localFileSystem = fs; ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); @@ -589,8 +562,8 @@ TEST_F(CopysetNodeTest, error_test) { } TEST_F(CopysetNodeTest, get_conf_change) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -607,12 +580,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { conf1.add_peer(peer1); conf2.add_peer(peer1); - // 当前没有在做配置变更 + // There are currently no configuration changes in progress { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -628,12 +600,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); EXPECT_EQ(ConfigChangeType::NONE, type); } - // 当前正在Add Peer + // Currently adding Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -643,14 +614,12 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, add_peer(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, add_peer(_, _)).Times(1); EXPECT_CALL(*mockNode, remove_peer(_, _)) - .WillOnce( - Invoke([](const PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); - })); + .WillOnce(Invoke([](const PeerId& peer, braft::Closure* done) { + done->status().set_error(-1, + "another config change is ongoing"); + })); Peer addPeer; addPeer.set_address("127.0.0.1:3202:0"); Peer removePeer; @@ -666,12 +635,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::ADD_PEER, type); EXPECT_EQ(addPeer.address(), alterPeer.address()); } - // 当前正在Remove Peer + // Currently removing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -681,13 +649,12 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, remove_peer(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, remove_peer(_, _)).Times(1); EXPECT_CALL(*mockNode, add_peer(_, _)) .WillOnce( Invoke([](const braft::PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); + done->status().set_error( + -1, "another config change is ongoing"); })); Peer addPeer1; addPeer1.set_address("127.0.0.1:3202:0"); @@ -704,12 +671,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); EXPECT_EQ(removePeer.address(), alterPeer.address()); } - // 当前正在Transfer leader + // Currently transferring leader { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -742,12 +708,11 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); EXPECT_EQ(transferee1.address(), alterPeer.address()); } - // 当前正在Change Peer + // Currently changing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); copysetNode.SetCopysetNode(mockNode); @@ -757,8 +722,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { copysetNode.on_leader_start(8); - EXPECT_CALL(*mockNode, change_peers(_, _)) - .Times(1); + EXPECT_CALL(*mockNode, change_peers(_, _)).Times(1); Peer addPeer1; addPeer1.set_address("127.0.0.1:3201:0"); @@ -778,7 +742,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); EXPECT_EQ(addPeer1.address(), alterPeer.address()); } - // leader term小于0 + // leader term is less than 0 { CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); @@ -792,8 +756,8 @@ TEST_F(CopysetNodeTest, get_conf_change) { } TEST_F(CopysetNodeTest, get_hash) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -816,21 +780,26 @@ TEST_F(CopysetNodeTest, get_hash) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件 - ::system("echo \"abcddddddddd333\" >" - "copyset_node_test/8589934594/data/test-2.txt"); - ::system("echo \"mmmmmmmm\" >" - "copyset_node_test/8589934594/data/test-4.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934594/data/test-5.txt"); + // Generate multiple files with data + ::system( + "echo \"abcddddddddd333\" >" + "copyset_node_test/8589934594/data/test-2.txt"); + ::system( + "echo \"mmmmmmmm\" >" + "copyset_node_test/8589934594/data/test-4.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934594/data/test-5.txt"); ::system("touch copyset_node_test/8589934594/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934594/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934594/data/test-1.txt"); - // 获取hash + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934594"); @@ -838,26 +807,32 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; - // 使用不同的copyset id,让目录不一样 + // Using different copyset IDs to make the directory different CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件,并且交换生成文件的顺序 + // Generate multiple files with data and exchange the order of generated + // files ::system("touch copyset_node_test/8589934595/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934595/data/test-1.txt"); - - ::system("echo \"mmmmmmmm\" > " - "copyset_node_test/8589934595/data/test-4.txt"); - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934595/data/test-5.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"abcddddddddd333\" > " - "copyset_node_test/8589934595/data/test-2.txt"); - - // 获取hash + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934595/data/test-1.txt"); + + ::system( + "echo \"mmmmmmmm\" > " + "copyset_node_test/8589934595/data/test-4.txt"); + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934595/data/test-5.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"abcddddddddd333\" > " + "copyset_node_test/8589934595/data/test-2.txt"); + + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934595"); @@ -868,18 +843,17 @@ TEST_F(CopysetNodeTest, get_hash) { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -888,13 +862,14 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, copysetNode.GetHash(&hash)); @@ -905,18 +880,17 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -925,20 +899,18 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } @@ -949,49 +921,49 @@ TEST_F(CopysetNodeTest, get_hash) { struct stat fileInfo; fileInfo.st_size = 1024; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetNode.GetHash(&hash)); } // List success, open success, fstat success, read success { - char *buff = new (std::nothrow) char[1024]; + char* buff = new (std::nothrow) char[1024]; ::memset(buff, 'a', 1024); std::string hash; struct stat fileInfo; fileInfo.st_size = 1024; CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); + std::shared_ptr mockfs = + std::make_shared(); copysetNode.SetLocalFileSystem(mockfs); std::vector files; files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*mockfs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); ASSERT_EQ(0, copysetNode.GetHash(&hash)); @@ -1002,40 +974,38 @@ TEST_F(CopysetNodeTest, get_leader_status) { LogicPoolID logicPoolID = 1; CopysetID copysetID = 1; Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); CopysetNode copysetNode(logicPoolID, copysetID, conf); copysetNode.SetCopysetNode(mockNode); - // 当前peer不是leader,且当前无leader + // The current peer is not a leader, and there is currently no leader { NodeStatus status; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); + .WillOnce(SetArgPointee<0>(status)); NodeStatus leaderStatus; ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); } - // 当前peer为leader + // The current peer is the leader { NodeStatus status; status.leader_id.parse("127.0.0.1:3200:0"); status.peer_id = status.leader_id; status.committed_index = 6666; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); + .WillOnce(SetArgPointee<0>(status)); NodeStatus leaderStatus; ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(status.committed_index, - leaderStatus.committed_index); + ASSERT_EQ(status.committed_index, leaderStatus.committed_index); } - // 存在leader,但不是当前peer + // There is a leader, but it is not the current peer { - // 模拟启动chunkserver - CopysetNodeManager* copysetNodeManager - = &CopysetNodeManager::GetInstance(); + // Simulate starting chunkserver + CopysetNodeManager* copysetNodeManager = + &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); PeerId leader_peer("127.0.0.1:9044:0"); @@ -1044,17 +1014,15 @@ TEST_F(CopysetNodeTest, get_leader_status) { if (server.Start(port, NULL) != 0) { LOG(FATAL) << "Fail to start Server"; } - // 构造leader copyset + // Construct a leader copyset ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, - copysetID, - conf)); - auto leaderNode = copysetNodeManager->GetCopysetNode(logicPoolID, - copysetID); + copysetID, conf)); + auto leaderNode = + copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); ASSERT_TRUE(nullptr != leaderNode); - // 设置预期值 - std::shared_ptr mockLeader - = std::make_shared(logicPoolID, - copysetID); + // Set expected values + std::shared_ptr mockLeader = + std::make_shared(logicPoolID, copysetID); leaderNode->SetCopysetNode(mockLeader); NodeStatus mockLeaderStatus; mockLeaderStatus.leader_id = leader_peer; @@ -1062,16 +1030,17 @@ TEST_F(CopysetNodeTest, get_leader_status) { mockLeaderStatus.committed_index = 10000; mockLeaderStatus.known_applied_index = 6789; EXPECT_CALL(*mockLeader, get_status(_)) - .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); + .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); - // 测试通过follower的node获取leader的committed index + // Test obtaining the committed index of the leader through the node of + // the follower NodeStatus followerStatus; followerStatus.leader_id = leader_peer; followerStatus.peer_id.parse("127.0.0.1:3201:0"); followerStatus.committed_index = 3456; followerStatus.known_applied_index = 3456; EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(followerStatus)); + .WillOnce(SetArgPointee<0>(followerStatus)); NodeStatus leaderStatus; ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); @@ -1086,9 +1055,8 @@ TEST_F(CopysetNodeTest, is_lease_leader) { LogicPoolID logicPoolID = 1; CopysetID copysetID = 1; Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); CopysetNode copysetNode(logicPoolID, copysetID, conf); copysetNode.Init(defaultOptions_); copysetNode.SetCopysetNode(mockNode); @@ -1099,13 +1067,10 @@ TEST_F(CopysetNodeTest, is_lease_leader) { // not leader now { std::vector states = { - braft::LEASE_DISABLED, - braft::LEASE_VALID, - braft::LEASE_NOT_READY, - braft::LEASE_EXPIRED - }; + braft::LEASE_DISABLED, braft::LEASE_VALID, braft::LEASE_NOT_READY, + braft::LEASE_EXPIRED}; braft::LeaderLeaseStatus status; - for (auto &state : states) { + for (auto& state : states) { status.state = state; ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); } diff --git a/test/chunkserver/copyset_service_test.cpp b/test/chunkserver/copyset_service_test.cpp index 973529366b..d456b2a361 100644 --- a/test/chunkserver/copyset_service_test.cpp +++ b/test/chunkserver/copyset_service_test.cpp @@ -20,35 +20,34 @@ * Author: wudemiao */ -#include -#include -#include #include #include #include +#include +#include +#include #include -#include "src/chunkserver/trash.h" +#include "proto/chunk.pb.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "proto/chunk.pb.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; -static std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +static std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; @@ -72,9 +71,7 @@ class CopysetServiceTest : public testing::Test { trash_->Init(opt); } - void TearDown() { - Exec(rmCmd.c_str()); - } + void TearDown() { Exec(rmCmd.c_str()); } protected: std::string testDir; @@ -87,7 +84,7 @@ class CopysetServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetServiceTest, basic) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 1; CopysetID copysetId = 100002; std::string ip = "127.0.0.1"; @@ -99,7 +96,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -115,8 +113,7 @@ TEST_F(CopysetServiceTest, basic) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.trash = trash_; copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); @@ -128,7 +125,7 @@ TEST_F(CopysetServiceTest, basic) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -149,7 +146,7 @@ TEST_F(CopysetServiceTest, basic) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -169,7 +166,7 @@ TEST_F(CopysetServiceTest, basic) { response.status()); } - /* 非法参数测试 */ + /* Illegal parameter testing */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -213,8 +210,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(response.status(), COPYSET_OP_STATUS_FAILURE_UNKNOWN); // CASE 3: delete broken copyset success - ASSERT_TRUE(copysetNodeManager-> - DeleteCopysetNode(logicPoolId, copysetId)); + ASSERT_TRUE( + copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); cntl.Reset(); request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); @@ -228,8 +225,8 @@ TEST_F(CopysetServiceTest, basic) { } TEST_F(CopysetServiceTest, basic2) { - /********************* 设置初始环境 ***********************/ - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + /********************* Set Up Initial Environment ***********************/ + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 2; CopysetID copysetId = 100003; std::string ip = "127.0.0.1"; @@ -241,7 +238,8 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -257,8 +255,7 @@ TEST_F(CopysetServiceTest, basic2) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); ASSERT_EQ(0, copysetNodeManager->Run()); @@ -269,9 +266,9 @@ TEST_F(CopysetServiceTest, basic2) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /********************** 跑测试cases ************************/ + /********************** Run Test Cases ************************/ - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -279,15 +276,15 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -298,22 +295,22 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -324,7 +321,7 @@ TEST_F(CopysetServiceTest, basic2) { response.status()); } - /* 创建多个copyset */ + /* Create multiple copysets */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -332,31 +329,31 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - // 准备第1个copyset + // Prepare the first copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 1); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } - // 准备第2个copyset + // Prepare the second copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 2); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } @@ -370,18 +367,18 @@ TEST_F(CopysetServiceTest, basic2) { // get status { - // 创建一个copyset + // Create a copyset { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 3); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -392,11 +389,11 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - // 睡眠等待leader产生 + // Sleep waiting for leader generation ::usleep(2 * 1000 * 1000); { - // query hash为false + // query hash is false std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -404,7 +401,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(false); @@ -432,7 +429,7 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_FALSE(response.has_hash()); } { - // query hash为true + // query hash is true std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -440,7 +437,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(true); @@ -476,4 +473,3 @@ TEST_F(CopysetServiceTest, basic2) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 5910df808e..26cdd8fb9b 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -20,43 +20,44 @@ * Author: tongguangxun */ -#include #include -#include +#include + #include +#include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/common/bitmap.h" #include "src/common/crc32.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/mock_file_pool.h" #include "test/fs/mock_local_filesystem.h" +using curve::common::Bitmap; using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; -using curve::common::Bitmap; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::Invoke; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using std::shared_ptr; using std::make_shared; +using std::shared_ptr; using std::string; namespace curve { @@ -67,27 +68,21 @@ const char baseDir[] = "/home/chunkserver/copyset/data"; const char chunk1[] = "chunk_1"; const char chunk1Path[] = "/home/chunkserver/copyset/data/chunk_1"; const char chunk1snap1[] = "chunk_1_snap_1"; -const char chunk1snap1Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_1"; +const char chunk1snap1Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_1"; const char chunk1snap2[] = "chunk_1_snap_2"; -const char chunk1snap2Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_2"; +const char chunk1snap2Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_2"; const char chunk2[] = "chunk_2"; -const char chunk2Path[] - = "/home/chunkserver/copyset/data/chunk_2"; +const char chunk2Path[] = "/home/chunkserver/copyset/data/chunk_2"; const char chunk2snap1[] = "chunk_2_snap_1"; -const char chunk2snap1Path[] - = "/home/chunkserver/copyset/data/chunk_2_snap_1"; +const char chunk2snap1Path[] = "/home/chunkserver/copyset/data/chunk_2_snap_1"; const char temp1[] = "chunk_1_tmp"; -const char temp1Path[] - = "/home/chunkserver/copyset/data/chunk_1_tmp"; +const char temp1Path[] = "/home/chunkserver/copyset/data/chunk_1_tmp"; const char location[] = "/file1/0@curve"; const int UT_ERRNO = 1234; -bool hasCreatFlag(int flag) {return flag & O_CREAT;} +bool hasCreatFlag(int flag) { return flag & O_CREAT; } -ACTION_TEMPLATE(SetVoidArrayArgument, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArrayArgument, HAS_1_TEMPLATE_PARAMS(int, k), AND_2_VALUE_PARAMS(first, last)) { auto output = reinterpret_cast(::testing::get(args)); auto input = first; @@ -100,163 +95,140 @@ class CSDataStore_test : public testing::TestWithParam< std::tuple> { public: - void SetUp() { - chunksize_ = std::get<0>(GetParam()); - blocksize_ = std::get<1>(GetParam()); - metapagesize_ = std::get<2>(GetParam()); - - chunk1MetaPage = new char[metapagesize_]; - chunk2MetaPage = new char[metapagesize_]; - chunk1SnapMetaPage = new char[metapagesize_]; - - lfs_ = std::make_shared(); - fpool_ = std::make_shared(lfs_); - DataStoreOptions options; - options.baseDir = baseDir; - options.chunkSize = chunksize_; - options.blockSize = blocksize_; - options.metaPageSize = metapagesize_; - options.locationLimit = kLocationLimit; - options.enableOdsyncWhenOpenChunkFile = true; - dataStore = std::make_shared(lfs_, - fpool_, - options); - fdMock = 100; - memset(chunk1MetaPage, 0, metapagesize_); - memset(chunk2MetaPage, 0, metapagesize_); - memset(chunk1SnapMetaPage, 0, metapagesize_); - } - - void TearDown() override { - delete[] chunk1MetaPage; - delete[] chunk2MetaPage; - delete[] chunk1SnapMetaPage; - } - - inline void FakeEncodeChunk(char* buf, - SequenceNum correctedSn, - SequenceNum sn, - shared_ptr bitmap = nullptr, - const std::string& location = "") { - ChunkFileMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.correctedSn = correctedSn; - metaPage.bitmap = bitmap; - metaPage.location = location; - metaPage.encode(buf); - } - - inline void FakeEncodeSnapshot(char* buf, - SequenceNum sn) { - uint32_t bits = chunksize_ / blocksize_; - SnapshotMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.bitmap = std::make_shared(bits); - metaPage.encode(buf); - } - - /** - * 构造初始环境 - * datastore存在两个chunk,分别为chunk1、chunk2 - * chunk1 和 chunk2的sn都为2,correctSn为0 - * chunk1存在快照文件,快照文件版本号为1 - * chunk2不存在快照文件 - */ - void FakeEnv() { - // fake DirExists - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .WillRepeatedly(Return(true)); - // fake List - vector fileNames; - fileNames.push_back(chunk1); - fileNames.push_back(chunk1snap1); - fileNames.push_back(chunk2); - EXPECT_CALL(*lfs_, List(baseDir, NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - // fake FileExists - ON_CALL(*lfs_, FileExists(_)) - .WillByDefault(Return(false)); - EXPECT_CALL(*lfs_, FileExists(chunk1Path)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(true)); - // fake Open - ON_CALL(*lfs_, Open(_, _)) - .WillByDefault(Return(fdMock++)); - EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillRepeatedly(Return(2)); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk2Path, _)) - .WillRepeatedly(Return(3)); - EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))) - .Times(0); - // fake fpool->GetFile() - ON_CALL(*fpool_, GetFileImpl(_, NotNull())) - .WillByDefault(Return(0)); - EXPECT_CALL(*fpool_, RecycleFile(_)) - .WillRepeatedly(Return(0)); - // fake Close - ON_CALL(*lfs_, Close(_)) - .WillByDefault(Return(0)); - // fake Delete - ON_CALL(*lfs_, Delete(_)) - .WillByDefault(Return(0)); - // fake Fsync - ON_CALL(*lfs_, Fsync(_)) - .WillByDefault(Return(0)); - // fake Fstat - struct stat fileInfo; - fileInfo.st_size = chunksize_ + metapagesize_; - EXPECT_CALL(*lfs_, Fstat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - // fake Read - ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake Write - ON_CALL(*lfs_, - Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake read chunk1 metapage - FakeEncodeChunk(chunk1MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk1's snapshot1 metapage - FakeEncodeSnapshot(chunk1SnapMetaPage, 1); - EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk2 metapage - FakeEncodeChunk(chunk2MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); - } + void SetUp() { + chunksize_ = std::get<0>(GetParam()); + blocksize_ = std::get<1>(GetParam()); + metapagesize_ = std::get<2>(GetParam()); + + chunk1MetaPage = new char[metapagesize_]; + chunk2MetaPage = new char[metapagesize_]; + chunk1SnapMetaPage = new char[metapagesize_]; + + lfs_ = std::make_shared(); + fpool_ = std::make_shared(lfs_); + DataStoreOptions options; + options.baseDir = baseDir; + options.chunkSize = chunksize_; + options.blockSize = blocksize_; + options.metaPageSize = metapagesize_; + options.locationLimit = kLocationLimit; + options.enableOdsyncWhenOpenChunkFile = true; + dataStore = std::make_shared(lfs_, fpool_, options); + fdMock = 100; + memset(chunk1MetaPage, 0, metapagesize_); + memset(chunk2MetaPage, 0, metapagesize_); + memset(chunk1SnapMetaPage, 0, metapagesize_); + } + + void TearDown() override { + delete[] chunk1MetaPage; + delete[] chunk2MetaPage; + delete[] chunk1SnapMetaPage; + } + + inline void FakeEncodeChunk(char* buf, SequenceNum correctedSn, + SequenceNum sn, + shared_ptr bitmap = nullptr, + const std::string& location = "") { + ChunkFileMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.correctedSn = correctedSn; + metaPage.bitmap = bitmap; + metaPage.location = location; + metaPage.encode(buf); + } + + inline void FakeEncodeSnapshot(char* buf, SequenceNum sn) { + uint32_t bits = chunksize_ / blocksize_; + SnapshotMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.bitmap = std::make_shared(bits); + metaPage.encode(buf); + } + + /** + * Construct initial environment + * There are two chunks in the datastore, chunk1 and chunk2 + * The sn of chunk1 and chunk2 are both 2, and correctSn is 0 + * chunk1 has a snapshot file with version number 1 + * chunk2 does not have a snapshot file + */ + void FakeEnv() { + // fake DirExists + EXPECT_CALL(*lfs_, DirExists(baseDir)).WillRepeatedly(Return(true)); + // fake List + vector fileNames; + fileNames.push_back(chunk1); + fileNames.push_back(chunk1snap1); + fileNames.push_back(chunk2); + EXPECT_CALL(*lfs_, List(baseDir, NotNull())) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); + // fake FileExists + ON_CALL(*lfs_, FileExists(_)).WillByDefault(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk1Path)).WillRepeatedly(Return(true)); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(true)); + // fake Open + ON_CALL(*lfs_, Open(_, _)).WillByDefault(Return(fdMock++)); + EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillRepeatedly(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk2Path, _)).WillRepeatedly(Return(3)); + EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))).Times(0); + // fake fpool->GetFile() + ON_CALL(*fpool_, GetFileImpl(_, NotNull())).WillByDefault(Return(0)); + EXPECT_CALL(*fpool_, RecycleFile(_)).WillRepeatedly(Return(0)); + // fake Close + ON_CALL(*lfs_, Close(_)).WillByDefault(Return(0)); + // fake Delete + ON_CALL(*lfs_, Delete(_)).WillByDefault(Return(0)); + // fake Fsync + ON_CALL(*lfs_, Fsync(_)).WillByDefault(Return(0)); + // fake Fstat + struct stat fileInfo; + fileInfo.st_size = chunksize_ + metapagesize_; + EXPECT_CALL(*lfs_, Fstat(_, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + // fake Read + ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake Write + ON_CALL(*lfs_, + Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake read chunk1 metapage + FakeEncodeChunk(chunk1MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1MetaPage, + chunk1MetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk1's snapshot1 metapage + FakeEncodeSnapshot(chunk1SnapMetaPage, 1); + EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk2 metapage + FakeEncodeChunk(chunk2MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk2MetaPage, + chunk2MetaPage + metapagesize_), + Return(metapagesize_))); + } protected: int fdMock; std::shared_ptr lfs_; std::shared_ptr fpool_; - std::shared_ptr dataStore; + std::shared_ptr dataStore; char* chunk1MetaPage; char* chunk2MetaPage; char* chunk1SnapMetaPage; @@ -267,8 +239,8 @@ class CSDataStore_test }; /** * ConstructorTest - * case:测试构造参数为空的情况 - * 预期结果:进程退出 + * Case: Test the case where the construction parameter is empty + * Expected result: Process exited */ TEST_P(CSDataStore_test, ConstructorTest) { // null param test @@ -277,86 +249,66 @@ TEST_P(CSDataStore_test, ConstructorTest) { options.chunkSize = chunksize_; options.blockSize = blocksize_; options.metaPageSize = metapagesize_; - ASSERT_DEATH(std::make_shared(nullptr, - fpool_, - options), - ""); - ASSERT_DEATH(std::make_shared(lfs_, - nullptr, - options), - ""); + ASSERT_DEATH(std::make_shared(nullptr, fpool_, options), ""); + ASSERT_DEATH(std::make_shared(lfs_, nullptr, options), ""); options.baseDir = ""; - ASSERT_DEATH(std::make_shared(lfs_, - fpool_, - options), - ""); + ASSERT_DEATH(std::make_shared(lfs_, fpool_, options), ""); } /** * InitializeTest - * case:存在未知类型的文件 - * 预期结果:删除该文件,返回true + * Case: There is an unknown type of file + * Expected result: Delete the file and return true */ TEST_P(CSDataStore_test, InitializeTest1) { // test unknown file - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(0); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(0); vector fileNames; fileNames.push_back(temp1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // unknown file will be deleted EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在快照文件,但是快照文件没有对应的chunk - * 预期结果:删除快照文件,返回true + * Case: There is a snapshot file, but the snapshot file does not have a + * corresponding chunk Expected result: Delete the snapshot file and return true */ TEST_P(CSDataStore_test, InitializeTest2) { // test snapshot without chunk - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); vector fileNames; fileNames.push_back(chunk2snap1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(false)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(false)); EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在快照文件 - * 预期结果:正常加载文件,返回true + * Case: Chunk file exists, Chunk file has snapshot file + * Expected result: Loading the file normally, returning true */ TEST_P(CSDataStore_test, InitializeTest3) { // test chunk with snapshot FakeEnv(); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * 预期结果:返回true + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Expected result: Returns true */ TEST_P(CSDataStore_test, InitializeTest4) { // test snapshot founded before chunk file , @@ -368,19 +320,16 @@ TEST_P(CSDataStore_test, InitializeTest4) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在两个冲突的快照文件 - * 预期结果:返回false + * Case: There is a chunk file, and there are two conflicting snapshot files in + * the chunk file Expected result: returns false */ TEST_P(CSDataStore_test, InitializeTest5) { // test snapshot conflict @@ -391,47 +340,35 @@ TEST_P(CSDataStore_test, InitializeTest5) { fileNames.push_back(chunk1snap2); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeErrorTest - * case:data目录不存在,创建目录时失败 - * 预期结果:返回false + * Case: The data directory does not exist, creating the directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest1) { // dir not exist and mkdir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(-UT_ERRNO)); // List should not be called - EXPECT_CALL(*lfs_, List(baseDir, _)) - .Times(0); + EXPECT_CALL(*lfs_, List(baseDir, _)).Times(0); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:List目录时失败 - * 预期结果:返回false + * Case: List directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest2) { // List dir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(0)); // List failed EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) @@ -441,220 +378,182 @@ TEST_P(CSDataStore_test, InitializeErrorTest2) { /** * InitializeErrorTest - * case:open chunk文件的时候出错 - * 预期结果:返回false + * Case: Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest3) { // test chunk open failed FakeEnv(); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(1, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1MetaPage[1] += 1; // change the page data memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:open 快照文件的时候出错 - * 预期结果:返回false + * Case: Error opening snapshot file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest4) { // test chunk open failed FakeEnv(); // set open snapshot file failed - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1SnapMetaPage[1] += 1; // change the page data memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); } /** * InitializeErrorTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * open chunk文件的时候出错 - * 预期结果:返回false + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest5) { // test snapshot founded before chunk file , @@ -666,18 +565,16 @@ TEST_P(CSDataStore_test, InitializeErrorTest5) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); } /** * Test - * case:chunk 不存在 - * 预期结果:创建chunk文件,并成功写入数据 + * Case: chunk does not exist + * Expected result: Create chunk file and successfully write data */ TEST_P(CSDataStore_test, WriteChunkTest1) { // initialize @@ -691,47 +588,34 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); - - // 如果sn为0,返回InvalidArgError - EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->WriteChunk(id, - 0, - buf, - offset, - length, - nullptr)); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); + + // If sn is 0, returns InvalidArgError + EXPECT_EQ(CSErrorCode::InvalidArgError, + dataStore->WriteChunk(id, 0, buf, offset, length, nullptr)); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage char chunk3MetaPage[metapagesize_]; // NOLINT memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); FakeEncodeChunk(chunk3MetaPage, 0, 1); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); // will write data EXPECT_CALL(*lfs_, Write(4, Matcher(_), metapagesize_ + offset, length)) .Times(1); - EXPECT_EQ(CSErrorCode::Success, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - EXPECT_CALL(*lfs_, Sync(4)) - .WillOnce(Return(0)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Sync(4)).WillOnce(Return(0)).WillOnce(Return(-1)); // sync chunk success EXPECT_EQ(CSErrorCode::Success, dataStore->SyncChunk(id)); @@ -744,21 +628,17 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { ASSERT_EQ(1, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的sn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request sn smaller than chunk's sn + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest2) { // initialize @@ -776,46 +656,28 @@ TEST_P(CSDataStore_test, WriteChunkTest2) { // snchunk.correctedsn EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的correctedSn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request correctedSn with sn less than chunk + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest3) { // initialize @@ -824,9 +686,8 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { FakeEncodeChunk(chunk2MetaPage, 4, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -838,47 +699,29 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { // sn>chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk不存在快照 - * 预期结果:直接写数据到chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk does not have a snapshot Expected result: Directly + * write data to chunk file */ TEST_P(CSDataStore_test, WriteChunkTest4) { // initialize @@ -898,12 +741,7 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); @@ -914,52 +752,33 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, __amd64)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn, - * chunk不存在快照 - * 预期结果:会更新metapage,然后写数据到chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn, chunk does not have a snapshot Expected result: Metapage will be + * updated and data will be written to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest6) { // initialize @@ -968,9 +787,8 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { FakeEncodeChunk(chunk2MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -989,32 +807,25 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn以及correctSn, - * chunk不存在快照、 - * 预期结果:会创建快照文件,更新metapage, - * 写数据时先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn greater than Chunk's sn and correctSn, + * chunk does not have a snapshot + * Expected result: A snapshot file will be created, and the metapage will be + * updated, When writing data, first perform a Copy-On-Write operation to the + * snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest7) { // initialize @@ -1028,23 +839,19 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // snapshot not exists - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); // expect call chunkfile pool GetFile - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1066,54 +873,37 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写同一个block的数据,不再进行cow,而是直接写入数据 + // Write data for the same block again, no longer co w, but directly write + // the data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - // sn - 1 < chunk.sn , 返回 BackwardRequestError + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + // sn - 1 < chunk. sn, returns BackwardRequestError EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn - 1, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn - 1, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk has a snapshot Expected result: When writing data, + * first perform a Copy-On-Write operation to the snapshot, and then write to + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest9) { // initialize @@ -1143,31 +933,23 @@ TEST_P(CSDataStore_test, WriteChunkTest9) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn - * chunk存在快照 - * 预期结果:更新metapage,然后写chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn chunk has a snapshot Expected result: Update the metapage and write + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest10) { // initialize @@ -1176,9 +958,8 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1198,31 +979,24 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn和correctSn - * chunk存在快照,snapsn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1245,38 +1018,29 @@ TEST_P(CSDataStore_test, WriteChunkTest11) { // sn>chunk.sn, sn>chunk.correctedsn EXPECT_EQ(CSErrorCode::SnapshotConflictError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟克隆 - * case1:clone chunk存在,写入区域之前未写过 - * 预期结果1:写入数据并更新bitmap - * case2:clone chunk存在,写入区域之前已写过 - * 预期结果2:写入数据但不会更新bitmap - * case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果3:写入数据并更新bitmap - * case4:遍写整个chunk - * 预期结果4:写入数据,然后clone chunk会被转为普通chunk + * Write a clone chunk to simulate cloning + * Case1: clone chunk exists and has not been written before writing to the + * region Expected result 1: Write data and update bitmap Case2: clone chunk + * exists and has been written before writing to the region Expected result 2: + * Write data but not update bitmap Case3: chunk exists and is a clone chunk. + * Some areas have been written, while others have not Expected result 3: Write + * data and update bitmap Case4: Overwrite the entire chunk Expected result 4: + * Write data, and then the clone chunk will be converted to a regular chunk */ TEST_P(CSDataStore_test, WriteChunkTest13) { // initialize @@ -1291,7 +1055,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { LOG(INFO) << "case 1"; char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) @@ -1300,30 +1064,25 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk存在,且是clone chunk,写入区域之前未写过 + // Case1: chunk exists and is a clone chunk, which has not been written + // before writing to the region { LOG(INFO) << "case 2"; id = 3; // not exist @@ -1338,13 +1097,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1352,7 +1106,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case2:chunk存在,且是clone chunk,写入区域之前已写过 + // Case2: chunk exists and is a clone chunk, which has been written before + // writing to the region { LOG(INFO) << "case 3"; id = 3; // not exist @@ -1366,13 +1121,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1380,7 +1130,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case3: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { LOG(INFO) << "case 4"; id = 3; // not exist @@ -1389,8 +1140,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1398,14 +1149,10 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); @@ -1413,7 +1160,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case4:遍写整个chunk + // Case4: Overwrite the entire chun { LOG(INFO) << "case 5"; id = 3; // not exist @@ -1422,8 +1169,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1431,41 +1178,33 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟恢复 - * case1:clone chunk 存在,snchunk.sn,sn==chunk.correctedsn - * 预期结果2:写入数据并更新bitmap,更新chunk.sn为sn - * case3:clone chunk存在,sn==chunk.sn,sn==chunk.correctedsn - * 预期结果3:写入数据并更新bitmap - * case4:clone chunk 存在,sn>chunk.sn, sn>chunk.correctedsn - * 预期结果4:返回StatusConflictError + * Write clone chunk to simulate recovery + * Case1: clone chunk exists, snchunk.sn, sn==chunk.correctedsn + * Expected result 2: Write data and update bitmap, update chunk.sn to sn + * Case3: clone chunk exists, sn==chunk.sn, sn==chunk.correctedsn + * Expected result 3: Write data and update bitmap + * Case4: clone chunk exists, sn>chunk.sn, sn>chunk.correctedsn + * Expected result 4: Returning StatusConflictError */ TEST_P(CSDataStore_test, WriteChunkTest14) { // initialize @@ -1480,7 +1219,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -1488,26 +1227,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(2, info.curSn); @@ -1518,32 +1251,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case1:clone chunk存在 + // Case1: clone chunk exists { LOG(INFO) << "case 1"; // sn == chunk.sn, sn < chunk.correctedSn sn = 2; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn < chunk.sn, sn < chunk.correctedSn sn = 1; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); } - // case2:chunk存在,且是clone chunk, + // Case2: chunk exists and is a clone chunk, { LOG(INFO) << "case 2"; id = 3; @@ -1559,13 +1282,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { .Times(2); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1576,7 +1294,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn == correctedsn { LOG(INFO) << "case 3"; @@ -1585,8 +1303,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, blocksize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, blocksize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1594,14 +1312,10 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1612,25 +1326,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn > correctedsn { LOG(INFO) << "case 4"; sn = 4; - // 不会写数据 - EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)) - .Times(0); + // Unable to write data + EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)).Times(0); std::unique_ptr buf(new char[length]); - ASSERT_EQ(CSErrorCode::StatusConflictError, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // chunk的状态不变 + ASSERT_EQ( + CSErrorCode::StatusConflictError, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // The state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1641,25 +1350,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, + * Case: chunk exists, * sn==chunk.sn * sn>chunk.correctedSn * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1696,30 +1401,23 @@ TEST_P(CSDataStore_test, WriteChunkTest15) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, - * sn>chunk.sn - * sn>chunk.correctedSn - * chunk.sn==snap.sn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, + * sn>chunk.sn + * sn>chunk.correctedSn + * chunk.sn==snap.sn + * chunk has a snapshot + * Expected result: When writing data, first perform a Copy-On-Write operation + * to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest16) { // initialize @@ -1728,16 +1426,15 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1760,26 +1457,18 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件时出错 - * 预期结果:写失败,不会改变当前chunk状态 + * WriteChunkTest exception test + * Case: Error creating snapshot file + * Expected result: Write failed and will not change the current chunk state */ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { // initialize @@ -1792,80 +1481,56 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { size_t length = blocksize_; char* buf = new char[length]; // NOLINT memset(buf, 0, length); - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // getchunk failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); // open snapshot failed - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // open success but read snapshot metapage failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage失败 - * 预期结果:写失败,产生快照文件,但是chunk版本号不会改变 - * 再次写入,不会生成新的快照文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, failed to update metadata + * Expected result: Write failed, resulting in a snapshot file, but the chunk + * version number will not change Write again without generating a new snapshot + * file */ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { // initialize @@ -1879,22 +1544,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // write chunk metapage failed EXPECT_CALL(*lfs_, @@ -1902,34 +1563,26 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); // chunk sn not changed ASSERT_EQ(2, info.curSn); ASSERT_EQ(2, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap未发生变化,再次写入,仍会进行cow + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metadata, and failed row + * Expected result: Write failed, snapshot file generated, chunk version number + * changed, The bitmap of the snapshot has not changed. If written again, it + * will still be cowed */ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { // initialize @@ -1943,22 +1596,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1971,12 +1620,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); @@ -1991,12 +1635,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { metapagesize_ + offset, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); @@ -2014,17 +1653,12 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写入仍会cow + // Writing again will still slow down // will copy on write LOG(INFO) << "case 4"; EXPECT_CALL(*lfs_, Read(3, NotNull(), metapagesize_ + offset, length)) @@ -2043,29 +1677,21 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { LOG(INFO) << "case 5"; EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow成功,写数据失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap发生变化,再次写入,直接写chunk文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metapage, row, and write + * data failed Expected result: Write failed, snapshot file generated, chunk + * version number changed, The bitmap of the snapshot has changed, write it + * again and directly write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { // initialize @@ -2079,22 +1705,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -2116,39 +1738,25 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 再次写入直接写chunk文件 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Write directly to the chunk file again // will write data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * WriteChunkTest - * case:chunk 不存在 - * 预期结果:创建chunk文件的时候失败 + * Case: chunk does not exist + * Expected result: Failed to create chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { // initialize @@ -2162,117 +1770,78 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // getchunk success - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Fstat(4, NotNull())).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_EQ(CSErrorCode::FileFormatError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_EQ(CSErrorCode::FileFormatError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /* * WriteChunkErrorTest - * 所写chunk为clone chunk - * case1:请求location过长,导致metapage size超出page size - * 预期结果1:create clone chunk失败 - * case2:写数据时失败 - * 预期结果2:返回InternalError,chunk状态不变 - * case3:更新metapage时失败 - * 预期结果3:返回InternalError,chunk状态不变 + * The chunk written is a clone chunk + * Case1: The request location is too long, causing the metapage size to exceed + * the page size Expected result 1: Create clone chunk failed Case2: Failed to + * write data Expected result 2: InternalError returned, chunk status remains + * unchanged Case3: Failed to update metapage Expected result 3: InternalError + * returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { // initialize @@ -2287,17 +1856,14 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { - string longLocation(kLocationLimit+1, 'a'); + string longLocation(kLocationLimit + 1, 'a'); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, longLocation)); } - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -2305,29 +1871,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { make_shared(chunksize_ / metapagesize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -2340,18 +1900,13 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -2364,32 +1919,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest1) { // initialize @@ -2404,24 +1950,17 @@ TEST_P(CSDataStore_test, ReadChunkTest1) { memset(buf, 0, sizeof(buf)); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:chunk存在,读取区域超过chunk大小或者offset和length未对齐 - * 预期结果:返回InvalidArgError错误码 + * Case: chunk exists, reading area exceeds chunk size or offset and length are + * not aligned Expected result: InvalidArgError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest2) { // initialize @@ -2436,42 +1975,27 @@ TEST_P(CSDataStore_test, ReadChunkTest2) { memset(buf, 0, sizeof(buf)); // test read out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:正常读取存在的chunk - * 预期结果:读取成功 + * Case: Normal reading of existing chunks + * Expected result: read successfully */ TEST_P(CSDataStore_test, ReadChunkTest3) { // initialize @@ -2488,30 +2012,23 @@ TEST_P(CSDataStore_test, ReadChunkTest3) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadChunkTest - * 读取 clone chunk - * case1:读取区域未被写过 - * 预期结果:返回PageNerverWrittenError - * case2:读取区域部分被写过 - * 预期结果:返回PageNerverWrittenError - * case3:读取区域已被写过 - * 预期结果:返回Success,数据成功写入 + * Read clone chunk + * Case1: The read area has not been written + * Expected result: PageNerverWrittenError returned + * Case2: The read area part has been written + * Expected result: PageNerverWrittenError returned + * Case3: The read area has been written + * Expected result: Success returned, data successfully written */ TEST_P(CSDataStore_test, ReadChunkTest4) { // initialize @@ -2529,80 +2046,56 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // case1: 读取未写过区域 + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Case1: Read unwritten area off_t offset = 1 * blocksize_; size_t length = blocksize_; char buf[2 * length]; // NOLINT memset(buf, 0, sizeof(buf)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case2: 读取区域部分被写过 + // Case2: The read area part has been written offset = 0; length = 2 * blocksize_; - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case3: 读取区域已写过 + // Case3: The read area has been written offset = 0; length = blocksize_; EXPECT_CALL(*lfs_, Read(4, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkErrorTest - * case:读chunk文件时出错 - * 预期结果:读取失败,返回InternalError + * Case: Error reading chunk file + * Expected result: Read failed, returned InternalError */ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { // initialize @@ -2619,25 +2112,18 @@ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { // initialize @@ -2652,25 +2138,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于chunk版本号 - * 预期结果:读chunk的数据 + * Case: chunk exists, request version number equal to Chunk version number + * Expected result: Read chunk data */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { // initialize @@ -2685,54 +2164,35 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { memset(buf, 0, length); // test out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test offset not aligned offset = chunksize_ - 1; length = chunksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test length not aligned offset = chunksize_; length = chunksize_ + 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test in range offset = blocksize_; length = 2 * blocksize_; EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于snapshot版本号 - * 预期结果:读快照的数据 + * Case: chunk exists, request version number equal to snapshot version number + * Expected result: Read data from snapshot */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { // initialize @@ -2760,12 +2220,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test out of range sn = 1; @@ -2774,16 +2229,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { char* readBuf = new char[length]; memset(readBuf, 0, length); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // test in range, read [0, 4*blocksize_) offset = 0; // read chunk in[0, blocksize_) and [3*blocksize_, 4*blocksize_) - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2792,26 +2242,19 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { 2 * blocksize_)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkTest - * case:chunk存在,但是请求的版本号不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk exists, but the requested version number does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { // initialize @@ -2826,25 +2269,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { memset(buf, 0, length); // test sn not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkErrorTest - * case:读快照时失败 - * 预期结果:返回InternalError + * Case: Failed to read snapshot + * Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { // initialize @@ -2872,12 +2308,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { offset + metapagesize_, length)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test in range, read [0, 4*blocksize_) sn = 1; @@ -2889,15 +2320,10 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // read snapshot failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2907,20 +2333,17 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { ASSERT_EQ(CSErrorCode::InternalError, dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkErrorTest - * case:chunk存在,请求版本号等于chunk版本号,读数据时失败 - * 预期结果:返回InternalError + * Case: chunk exists, request version number is equal to Chunk version number, + * failed while reading data Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { // initialize @@ -2938,18 +2361,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } @@ -2971,12 +2387,9 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest1) { EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** @@ -2994,24 +2407,18 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest2) { char buf[blocksize_]; // NOLINT(runtime/arrays) memset(buf, 0, blocksize_); // test chunk exists - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .Times(1); - EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunkMetaPage(id, sn, buf)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)).Times(1); + EXPECT_EQ(CSErrorCode::Success, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } - /** * DeleteChunkTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest1) { // initialize @@ -3022,21 +2429,17 @@ TEST_P(CSDataStore_test, DeleteChunkTest1) { SequenceNum sn = 2; // test chunk not exists - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteChunkTest - * case:chunk存在快照文件 - * 预期结果:返回Success, chunk被删除,快照被删除 + * Case: Chunk has a snapshot file present + * Expected result: Success returned, chunk deleted, snapshot deleted */ TEST_P(CSDataStore_test, DeleteChunkTest2) { // initialize @@ -3046,25 +2449,21 @@ TEST_P(CSDataStore_test, DeleteChunkTest2) { ChunkID id = 1; SequenceNum sn = 2; - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // delete chunk with snapshot - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); CSChunkInfo info; ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** - * case:chunk存在,快照文件不存在 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest3) { // initialize @@ -3075,27 +2474,22 @@ TEST_P(CSDataStore_test, DeleteChunkTest3) { SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkTest - * chunk存在,快照文件不存在 - * case1: snchunkinfo.sn - * 预期结果2:返回成功 + * chunk exists, snapshot file does not exist + * Case1: snchunkinfo.sn + * Expected result 2: Success returned */ TEST_P(CSDataStore_test, DeleteChunkTest4) { // initialize @@ -3107,37 +2501,30 @@ TEST_P(CSDataStore_test, DeleteChunkTest4) { // case1 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(0); + EXPECT_CALL(*lfs_, Close(3)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).Times(0); EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->DeleteChunk(id, 1)); + dataStore->DeleteChunk(id, 1)); } // case2 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, 3)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, 3)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkErrorTest - * case:chunk存在,快照文件不存在,recyclechunk时出错 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist, error occurred during + * recyclechunk Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { // initialize @@ -3147,24 +2534,19 @@ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { ChunkID id = 2; SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(-1)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(-1)); + EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { // initialize @@ -3177,27 +2559,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } -// 对于DeleteSnapshotChunkOrCorrectSn来说,内部主要有两个操作 -// 一个是删除快照文件,一个是修改correctedSn -// 当存在快照文件时,fileSn>=chunk的sn是判断是否要删除快照的唯一条件 -// 对于correctedSn来说,fileSn大于chunk的sn以及correctedSn是判断 -// 是否要修改correctedSn的唯一条件 +// For DeleteSnapshotChunkOrCorrectSn, there are two main internal operations +// One is to delete the snapshot file, and the other is to modify correctedSn +// When there is a snapshot file, the sn of fileSn>=chunk is the only condition +// to determine whether to delete the snapshot For correctedSn, if fileSn is +// greater than chunk's sn and correctedSn is the judgment Do you want to modify +// the unique condition for correctedSn /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn >= chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn>snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn>=Chunk's sn + * fileSn==correctedSn of chunk + * chunk.sn>snap.sn + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // initialize @@ -3206,9 +2587,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3216,11 +2596,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // fileSn == correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3228,18 +2606,17 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn < chunk的sn - * 此时无论correctSn为何值都不会修改correctedSn - * 预期结果:返回成功,不会删除快照,不会修改correctedSn + * Case: chunk exists, snapshot exists + * fileSn < chunk's sn + * At this point, regardless of the value of correctSn, correctedSn will + * not be modified Expected result: Success returned, snapshot will not be + * deleted, correctedSn will not be modified */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // initialize @@ -3248,9 +2625,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { FakeEncodeChunk(chunk1MetaPage, 0, 3); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3258,8 +2634,7 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // 2 > correctedSn SequenceNum fileSn = 2; // snapshot should not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), 0, metapagesize_)) @@ -3267,17 +2642,14 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - // 下则用例用于补充DeleteSnapshotChunkOrCorrectSnTest2用例中 - // 当 fileSn == sn 时的边界情况 - // fileSn == sn - // fileSn > correctedSn + // The following use case is used to supplement the + // DeleteSnapshotChunkOrCorrectSnTest2 use case Boundary situation when + // fileSn == sn fileSn == sn fileSn > correctedSn fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3285,17 +2657,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn以及correctedSn - * 预期结果:删除快照,并修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn and correctedSn + * Expected result: Delete the snapshot and modify correctedSn, returning + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // initialize @@ -3307,11 +2678,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3319,17 +2688,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn <= chunk的sn或correctedSn - * 预期结果:不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn <= SN or correctedSn of chunk + * Expected result: CorrectedSn will not be modified, returning success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { // initialize @@ -3347,19 +2714,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn > chunk的sn及correctedSn - * 预期结果:修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn > chunk's sn and correctedSn + * Expected result: Modify correctedSn and return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { // initialize @@ -3377,18 +2741,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在,chunk为clone chunk - * 预期结果:返回StatusConflictError + * Case: chunk exists, snapshot does not exist, chunk is clone chunk + * Expected result: Returning StatusConflictError */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { // initialize @@ -3405,29 +2766,23 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // 无论correctedSn为多少,都返回StatusConflictError + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Returns StatusConflictError regardless of the number of correctedSn EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 1)); EXPECT_EQ(CSErrorCode::StatusConflictError, @@ -3439,23 +2794,20 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 5)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn - * fileSn > chunk的correctedSn + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn + * fileSn > chunk's correctedSn * chunk.sn==snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // initialize @@ -3464,16 +2816,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 2); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3481,11 +2832,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3493,21 +2842,19 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn == chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3533,11 +2879,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { // fileSn == correctedSn SequenceNum fileSn = 2; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3545,18 +2889,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:修改correctedSn时失败 - * 预期结果:返回失败,correctedSn的值未改变 + * Case: Failed to modify correctedSn + * Expected result: Failed to return, the value of correctedSn has not changed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { // initialize @@ -3582,18 +2923,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:回收snapshot的chunk的时候失败 - * 预期结果:返回失败 + * Case: Failed to recycle snapshot chunks + * Expected result: return failed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // initialize @@ -3605,11 +2943,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .WillOnce(Return(-1)); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).WillOnce(Return(-1)); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3617,26 +2953,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * CreateCloneChunkTest - * case1:指定的chunk不存在,输入错误的参数 - * 预期结果1:返回InvalidArgError - * case2:指定的chunk不存在,指定chunksize与配置一致 - * 预期结果2:创建成功 - * case3:指定的chunk存在,参数与原chunk一致 - * 预期结果3:返回成功 - * case4:指定的chunk存在,参数与原chunk不一致 - * 预期结果4:返回ChunkConflictError,不改变原chunk信息 - * case5:指定的chunk存在,指定chunksize与配置不一致 - * 预期结果5: 返回InvalidArgError,不改变原chunk信息 - * case6:指定的chunk存在,chunk不是clone chunk,参数与chunk信息一致 - * 预期结果:返回ChunkConflictError,不改变原chunk信息 + * Case1: The specified chunk does not exist, incorrect parameter input + * Expected result 1: InvalidArgError returned + * Case2: The specified chunk does not exist, the specified chunksize is + * consistent with the configuration Expected result 2: Creation successful + * Case3: The specified chunk exists, and the parameters are consistent with the + * original chunk Expected result 3: Success returned Case4: The specified chunk + * exists, and the parameters are inconsistent with the original chunk Expected + * result 4: ChunkConflictError returned without changing the original chunk + * information Case5: The specified chunk exists, but the specified chunk size + * is inconsistent with the configuration Expected result 5: InvalidArgError + * returned without changing the original chunk information Case6: The specified + * chunk exists, but the chunk is not a clone chunk. The parameters are + * consistent with the chunk information Expected result: ChunkConflictError + * returned without changing the original chunk information */ TEST_P(CSDataStore_test, CreateCloneChunkTest) { // initialize @@ -3652,58 +2988,44 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { shared_ptr bitmap = make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); - // case1:输入错误的参数 + // Case1: Input incorrect parameters { // size != chunksize EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - blocksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, blocksize_, location)); // sn == 0 EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - 0, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, 0, correctedSn, chunksize_, location)); // location is empty - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - "")); + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "")); } - // case2:指定的chunk不存在,指定chunksize与配置一致 + // Case2: The specified chunk does not exist, the specified chunksize is + // consistent with the configuration { // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3714,15 +3036,13 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case3:指定的chunk存在,参数与原chunk一致 + // Case3: The specified chunk exists, and the parameters are consistent with + // the original chunk { EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3733,31 +3053,23 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case4:指定的chunk存在,参数与原chunk不一致 - // 返回ChunkConflictError,但是不会改变原chunk信息 + // Case4: The specified chunk exists, and the parameters are inconsistent + // with the original chunk Returns ChunkConflictError, but does not change + // the original chunk information { - // 版本不一致 + // Version inconsistency EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn + 1, - correctedSn, - chunksize_, - location)); - // correctedSn不一致 + dataStore->CreateCloneChunk(id, sn + 1, correctedSn, + chunksize_, location)); + // Inconsistent correctedSn EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn + 1, - chunksize_, - location)); - // location不一致 + dataStore->CreateCloneChunk(id, sn, correctedSn + 1, + chunksize_, location)); + // Inconsistent location EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "temp")); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3768,16 +3080,15 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case5:指定的chunk存在,指定chunksize与配置不一致 - // 返回InvalidArgError,但是不会改变原chunk信息 + // Case5: The specified chunk exists, but the specified chunksize is + // inconsistent with the configuration Returns InvalidArgError, but does not + // change the original chunk information { - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_ + metapagesize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, + chunksize_ + metapagesize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3788,39 +3099,33 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case6:已存在chunk,chunk不是clone chunk + // Case6: Chunk already exists, chunk is not a clone chunk { - // location 为空 + // location is empty EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - "")); + chunksize_, "")); - // location 不为空 + // location is not empty EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - location)); + chunksize_, location)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * CreateCloneChunkErrorTest - * case:chunk不存在,调chunkFile->Open的时候失败 - * 预期结果:创建clone chunk失败 + * Case: chunk does not exist, failed when calling chunkFile->Open + * Expected result: Failed to create clone chunk */ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { // initialize @@ -3832,47 +3137,40 @@ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { SequenceNum correctedSn = 2; CSChunkInfo info; // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InternalError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * PasteChunkTedt - * case1:chunk 不存在 - * 预期结果1:返回ChunkNotExistError - * case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 - * 预期结果2:返回InvalidArgError - * case3:chunk存在,但不是clone chunk - * 预期结果3:返回成功 - * case4:chunk存在,且是clone chunk,写入区域之前未写过 - * 预期结果4:写入数据并更新bitmap - * case5:chunk存在,且是clone chunk,写入区域之前已写过 - * 预期结果5:无数据写入,且不会更新bitmap - * case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果6:只写入未写过数据,并更新bitmap - * case7:遍写整个chunk - * 预期结果7:数据写入未写过区域,然后clone chunk会被转为普通chunk + * Case1: Chunk does not exist + * Expected result 1: ChunkNotExistError returned + * Case2: chunk exists, requested offset exceeds chunk file size or offset + * length is not aligned Expected result 2: InvalidArgError returned Case3: + * chunk exists, but not clone chunk Expected result 3: Success returned Case4: + * chunk exists and is a clone chunk, which has not been written before writing + * to the region Expected result 4: Write data and update bitmap Case5: chunk + * exists and is a clone chunk, which has been written before writing to the + * region Expected result 5: No data written and Bitmap will not be updated + * Case6: chunk exists and is a clone chunk. Some areas have been written, while + * others have not Expected result 6: Only write unwritten data and update + * bitmap Case7: Overwrite the entire chunk Expected result 7: Data is written + * to an unwritten area, and then the clone chunk will be converted to a regular + * chunk */ TEST_P(CSDataStore_test, PasteChunkTest1) { // initialize @@ -3887,7 +3185,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -3895,90 +3193,68 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk 不存在 + // Case1: chunk does not exist { id = 4; // not exist ASSERT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 + // Case2: chunk exists, requested offset exceeds chunk file size or offset + // length is not aligned { id = 3; // not exist offset = chunksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_ - 1; length = blocksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_; length = blocksize_ + 1; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case3:chunk存在,但不是clone chunk + // Case3: chunk exists, but not clone chunk { EXPECT_CALL(*lfs_, Write(_, Matcher(NotNull()), _, _)) .Times(0); - // 快照不存在 + // The snapshot does not exist id = 2; offset = 0; length = blocksize_; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); - // 快照存在 + // Snapshot exists id = 1; offset = 0; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case4:chunk存在,且是clone chunk,写入区域之前未写过 + // Case4: chunk exists and is a clone chunk, which has not been written + // before writing to the region { id = 3; // not exist offset = blocksize_; @@ -3991,11 +3267,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -4003,7 +3276,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case5:chunk存在,且是clone chunk,写入区域之前已写过 + // Case5: chunk exists and is a clone chunk, which has been written before + // writing to the region { id = 3; // not exist offset = blocksize_; @@ -4015,23 +3289,22 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(1)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case6: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { id = 3; // not exist offset = 0; length = 4 * blocksize_; - // [2 * blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage + // [2 * blocksize_, 4 * blocksize_) area has been written, [0, + // blocksize_) is a metapage EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_, blocksize_)) .Times(1); @@ -4043,21 +3316,21 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { .Times(1); ASSERT_EQ(CSErrorCode::Success, dataStore->PasteChunk(id, buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(4, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case7:遍写整个chunk + // Case7: Overwrite the entire chunk { id = 3; // not exist offset = 0; length = chunksize_; - // [blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage - EXPECT_CALL(*lfs_, Write(4, - Matcher(NotNull()), + // [blocksize_, 4 * blocksize_) area has been written, [0, blocksize_) + // is a metapage + EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_ + 4 * blocksize_, chunksize_ - 4 * blocksize_)) .Times(1); @@ -4065,33 +3338,26 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* * PasteChunkErrorTest - * case1:写数据时失败 - * 预期结果1:返回InternalError,chunk状态不变 - * case2:更新metapage时失败 - * 预期结果2:返回InternalError,chunk状态不变 + * Case1: Failed to write data + * Expected result 1: InternalError returned, chunk status remains unchanged + * Case2: Failed to update metapage + * Expected result 2: InternalError returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { // initialize @@ -4106,7 +3372,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -4114,29 +3380,23 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -4149,16 +3409,13 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -4171,29 +3428,22 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* - * chunk不存在 + * Chunk does not exist */ TEST_P(CSDataStore_test, GetHashErrorTest1) { // initialize @@ -4205,21 +3455,15 @@ TEST_P(CSDataStore_test, GetHashErrorTest1) { // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); + dataStore->GetChunkHash(id, 0, 4096, &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * read报错 + * Read error */ TEST_P(CSDataStore_test, GetHashErrorTest2) { // initialize @@ -4231,23 +3475,16 @@ TEST_P(CSDataStore_test, GetHashErrorTest2) { off_t offset = 0; size_t length = metapagesize_ + chunksize_; // test read chunk failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->GetChunkHash(id, 0, 4096, &hash)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * 获取datastore状态测试 + * Obtain Datastore Status Test */ TEST_P(CSDataStore_test, GetStatusTest) { // initialize @@ -4259,17 +3496,13 @@ TEST_P(CSDataStore_test, GetStatusTest) { ASSERT_EQ(2, status.chunkFileCount); // ASSERT_EQ(1, status.snapshotCount); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } INSTANTIATE_TEST_CASE_P( - CSDataStoreTest, - CSDataStore_test, + CSDataStoreTest, CSDataStore_test, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/datastore/file_helper_unittest.cpp b/test/chunkserver/datastore/file_helper_unittest.cpp index 0f7ca39b95..359d7303d7 100644 --- a/test/chunkserver/datastore/file_helper_unittest.cpp +++ b/test/chunkserver/datastore/file_helper_unittest.cpp @@ -20,10 +20,11 @@ * Author: yangyaokai */ -#include #include -#include +#include + #include +#include #include "src/chunkserver/datastore/datastore_file_helper.h" #include "test/fs/mock_local_filesystem.h" @@ -32,17 +33,17 @@ using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; namespace curve { namespace chunkserver { @@ -54,6 +55,7 @@ class FileHelper_MockTest : public testing::Test { fileHelper_ = std::make_shared(fs_); } void TearDown() {} + protected: std::shared_ptr fs_; std::shared_ptr fileHelper_; @@ -64,29 +66,26 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { vector chunkFiles; vector snapFiles; - // case1:List失败,返回-1 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-1)); + // Case1: List failed, returned -1 + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); - // 如果返回ENOENT错误,直接返回成功 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-ENOENT)); + // If an ENOENT error is returned, success is returned directly + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-ENOENT)); ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); vector files; string chunk1 = "chunk_1"; string chunk2 = "chunk_2"; string snap1 = "chunk_1_snap_1"; - string other = "chunk_1_S"; // 非法文件名 + string other = "chunk_1_S"; // Illegal file name files.emplace_back(chunk1); files.emplace_back(chunk2); files.emplace_back(snap1); files.emplace_back(other); EXPECT_CALL(*fs_, List(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); - // case2:List成功,返回chunk文件和snapshot文件 + // Case2: List successful, returning chunk file and snapshot file ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); ASSERT_EQ(2, chunkFiles.size()); ASSERT_STREQ(chunk1.c_str(), chunkFiles[0].c_str()); @@ -94,7 +93,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { ASSERT_EQ(1, snapFiles.size()); ASSERT_STREQ(snap1.c_str(), snapFiles[0].c_str()); - // case3:允许vector为空指针 + // Case3: Allow vector to be a null pointer ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, nullptr, nullptr)); } diff --git a/test/chunkserver/datastore/filepool_mock_unittest.cpp b/test/chunkserver/datastore/filepool_mock_unittest.cpp index 5a70d46551..182b1e90ac 100644 --- a/test/chunkserver/datastore/filepool_mock_unittest.cpp +++ b/test/chunkserver/datastore/filepool_mock_unittest.cpp @@ -20,943 +20,844 @@ * Author: yangyaokai */ +#include #include #include #include -#include + #include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::Invoke; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using curve::fs::MockLocalFileSystem; using curve::common::kFilePoolMagic; +using curve::fs::MockLocalFileSystem; + +namespace curve +{ + namespace chunkserver + { + + const ChunkSizeType CHUNK_SIZE = 16 * 1024 * 1024; + const PageSizeType PAGE_SIZE = 4096; + const uint32_t metaFileSize = 4096; + const uint32_t blockSize = 4096; + const uint32_t fileSize = CHUNK_SIZE + PAGE_SIZE; + const std::string poolDir = "./chunkfilepool_dat"; // NOLINT + const std::string poolMetaPath = "./chunkfilepool_dat.meta"; // NOLINT + const std::string filePath1 = poolDir + "/1"; // NOLINT + const std::string targetPath = "./data/chunk_1"; // NOLINT + const char *kChunkSize = "chunkSize"; + const char *kMetaPageSize = "metaPageSize"; + const char *kChunkFilePoolPath = "chunkfilepool_path"; + const char *kCRC = "crc"; + const char *kBlockSize = "blockSize"; + + class CSChunkfilePoolMockTest : public testing::Test + { + public: + void SetUp() { lfs_ = std::make_shared(); } + + void TearDown() {} + + static Json::Value GenerateMetaJson(bool hasBlockSize = false) + { + // JSON format for normal meta files + FilePoolMeta meta; + meta.chunkSize = CHUNK_SIZE; + meta.metaPageSize = PAGE_SIZE; + meta.hasBlockSize = hasBlockSize; + if (hasBlockSize) + { + meta.blockSize = blockSize; + } + meta.filePoolPath = poolDir; + + Json::Value jsonContent; + jsonContent[kChunkSize] = CHUNK_SIZE; + jsonContent[kMetaPageSize] = PAGE_SIZE; + + if (hasBlockSize) + { + jsonContent[kBlockSize] = blockSize; + } + + jsonContent[kChunkFilePoolPath] = poolDir; + jsonContent[kCRC] = meta.Crc32(); + return jsonContent; + } + + void FakeMetaFile() + { + EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) + .WillOnce(Return(100)); + EXPECT_CALL(*lfs_, Read(100, NotNull(), 0, metaFileSize)) + .WillOnce(Invoke( + [this](int /*fd*/, char *buf, uint64_t offset, int length) + { + EXPECT_EQ(offset, 0); + EXPECT_EQ(length, metaFileSize); + + Json::Value root = GenerateMetaJson(); + auto json = root.toStyledString(); + strncpy(buf, json.c_str(), json.size() + 1); + return metaFileSize; + })); + + EXPECT_CALL(*lfs_, Close(100)).Times(1); + } + + void FakePool(FilePool *pool, const FilePoolOptions &options, + uint32_t fileNum) + { + if (options.getFileFromPool) + { + FakeMetaFile(); + std::vector fileNames; + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + for (int i = 1; i <= fileNum; ++i) + { + std::string name = std::to_string(i); + std::string filePath = poolDir + "/" + name; + fileNames.push_back(name); + EXPECT_CALL(*lfs_, FileExists(filePath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath, _)).WillOnce(Return(i)); + EXPECT_CALL(*lfs_, Fstat(i, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(i)).Times(1); + } + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + + ASSERT_EQ(true, pool->Initialize(options)); + ASSERT_EQ(fileNum, pool->Size()); + } + else + { + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + ASSERT_EQ(true, pool->Initialize(options)); + } + } + + protected: + std::shared_ptr lfs_; + }; + + // Exception testing for PersistEnCodeMetaInfo interface + TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) + { + FilePoolMeta meta; + meta.chunkSize = CHUNK_SIZE; + meta.metaPageSize = PAGE_SIZE; + meta.hasBlockSize = false; + meta.filePoolPath = poolDir; + + // open failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, + poolMetaPath)); + } + // open successful, write failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) + .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, + poolMetaPath)); + } + // open successful, write successful + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) + .WillOnce(Return(4096)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ( + 0, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); + } + } + + // Exception testing for DecodeMetaInfoFromMetaFile interface + TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) + { + FilePoolMeta meta; + + // open failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // read failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // read successful, parsing Json format failed + { + char buf[metaFileSize] = {0}; + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, chunksize is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kChunkSize); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, metapagesize is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kMetaPageSize); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, kFilePoolPath is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kChunkFilePoolPath); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Successfully parsed Json format, kCRC is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kCRC); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Successfully parsed Json format, crc mismatch + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root[kCRC] = 0; + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Normal process + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } -namespace curve { -namespace chunkserver { - -const ChunkSizeType CHUNK_SIZE = 16 * 1024 * 1024; -const PageSizeType PAGE_SIZE = 4096; -const uint32_t metaFileSize = 4096; -const uint32_t blockSize = 4096; -const uint32_t fileSize = CHUNK_SIZE + PAGE_SIZE; -const std::string poolDir = "./chunkfilepool_dat"; // NOLINT -const std::string poolMetaPath = "./chunkfilepool_dat.meta"; // NOLINT -const std::string filePath1 = poolDir + "/1"; // NOLINT -const std::string targetPath = "./data/chunk_1"; // NOLINT -const char* kChunkSize = "chunkSize"; -const char* kMetaPageSize = "metaPageSize"; -const char* kChunkFilePoolPath = "chunkfilepool_path"; -const char* kCRC = "crc"; -const char* kBlockSize = "blockSize"; - -class CSChunkfilePoolMockTest : public testing::Test { - public: - void SetUp() { - lfs_ = std::make_shared(); - } - - void TearDown() {} - - static Json::Value GenerateMetaJson(bool hasBlockSize = false) { - // 正常的meta文件的json格式 - FilePoolMeta meta; - meta.chunkSize = CHUNK_SIZE; - meta.metaPageSize = PAGE_SIZE; - meta.hasBlockSize = hasBlockSize; - if (hasBlockSize) { - meta.blockSize = blockSize; + // Normal process + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(true); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } } - meta.filePoolPath = poolDir; - Json::Value jsonContent; - jsonContent[kChunkSize] = CHUNK_SIZE; - jsonContent[kMetaPageSize] = PAGE_SIZE; + TEST_F(CSChunkfilePoolMockTest, InitializeTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + options.retryTimes = 3; + + /****************getFileFromPool is true**************/ + // Failed while checking valid + { + // DecodeMetaInfoFromMetaFile has been tested separately on it + // Here, select a set of uncommon examples from the above to test + // parsing JSON format failed + FilePool pool(lfs_); + char buf[metaFileSize] = {0}; + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) + .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // getFileFromPool is true, checkvalid succeeded, current directory does not + // exist + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)) + .WillOnce(Return(false)); + ASSERT_EQ(true, pool.Initialize(options)); + pool.WaitoFormatDoneForTesting(); + } + // The current directory exists, list directory failed + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, List(_, _)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory successful, file name contains non numeric characters + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("aaa"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory succeeded, it contains objects of non ordinary file types + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(false)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory successful, open file failed + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // Failed to retrieve stat file information + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // stat file information successful, file size mismatch + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE; + EXPECT_CALL(*lfs_, Fstat(2, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // File information matching + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + EXPECT_CALL(*lfs_, Fstat(2, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(true, pool.Initialize(options)); + ASSERT_EQ(1, pool.Size()); + } - if (hasBlockSize) { - jsonContent[kBlockSize] = blockSize; + /****************getFileFromPool is false**************/ + options.getFileFromPool = false; + // The current directory does not exist, creating directory failed + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // The current directory does not exist, creating the directory succeeded + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(0)); + ASSERT_EQ(true, pool.Initialize(options)); + } + // The current directory exists + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + ASSERT_EQ(true, pool.Initialize(options)); + } } - jsonContent[kChunkFilePoolPath] = poolDir; - jsonContent[kCRC] = meta.Crc32(); - return jsonContent; - } - - void FakeMetaFile() { - EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(100)); - EXPECT_CALL(*lfs_, Read(100, NotNull(), 0, metaFileSize)) - .WillOnce(Invoke( - [this](int /*fd*/, char* buf, uint64_t offset, int length) { - EXPECT_EQ(offset, 0); - EXPECT_EQ(length, metaFileSize); - - Json::Value root = GenerateMetaJson(); - auto json = root.toStyledString(); - strncpy(buf, json.c_str(), json.size() + 1); - return metaFileSize; - })); - - EXPECT_CALL(*lfs_, Close(100)) - .Times(1); - } - - void FakePool(FilePool* pool, - const FilePoolOptions& options, - uint32_t fileNum) { - if (options.getFileFromPool) { - FakeMetaFile(); - std::vector fileNames; - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - for (int i = 1; i <= fileNum; ++i) { - std::string name = std::to_string(i); - std::string filePath = poolDir + "/" + name; - fileNames.push_back(name); - EXPECT_CALL(*lfs_, FileExists(filePath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath, _)) - .WillOnce(Return(i)); - EXPECT_CALL(*lfs_, Fstat(i, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(i)) - .Times(1); - } - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - - ASSERT_EQ(true, pool->Initialize(options)); - ASSERT_EQ(fileNum, pool->Size()); - } else { - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - ASSERT_EQ(true, pool->Initialize(options)); + TEST_F(CSChunkfilePoolMockTest, GetFileTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + int retryTimes = 3; + options.retryTimes = retryTimes; + + char metapage[PAGE_SIZE] = {0}; + + /****************getFileFromPool is true**************/ + // There is no remaining chunk situation + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Chunk present, open failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, write failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk present, fsync failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, closing failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, EEXIST error returned when renaming + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .WillOnce(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-EEXIST)); + ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(9, pool.Size()); + } + // Chunk exists, non EEXIST error returned when renaming + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, rename successful + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .WillOnce(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(9, pool.Size()); + } + + options.getFileFromPool = false; + /****************getFileFromPool is false**************/ + // Failed on open + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed while failing + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed while writing + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Fsync failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(fileSize)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed to close + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(fileSize)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } } - } - protected: - std::shared_ptr lfs_; -}; + TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + int retryTimes = 3; + options.retryTimes = retryTimes; + + /****************getFileFromPool is false**************/ + options.getFileFromPool = false; + // Failed to delete file + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(-1)); + ASSERT_EQ(-1, pool.RecycleFile(filePath1)); + } + // Successfully deleted file + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.RecycleFile(filePath1)); + } -// PersistEnCodeMetaInfo接口的异常测试 -TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { - FilePoolMeta meta; - meta.chunkSize = CHUNK_SIZE; - meta.metaPageSize = PAGE_SIZE; - meta.hasBlockSize = false; - meta.filePoolPath = poolDir; + /****************getFileFromPool is true**************/ + options.getFileFromPool = true; + // open failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } - // open失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, - poolMetaPath)); - } - // open成功,write失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, - poolMetaPath)); - } - // open成功,write成功 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) - .WillOnce(Return(4096)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ( - 0, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); - } -} - -// DecodeMetaInfoFromMetaFile接口的异常测试 -TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { - FilePoolMeta meta; - - // open失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // read失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // read成功,解析Json格式失败 - { - char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,chunksize为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kChunkSize); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,metapagesize为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kMetaPageSize); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,kFilePoolPath为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kChunkFilePoolPath); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,kCRC为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kCRC); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,crc不匹配 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root[kCRC] = 0; - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 正常流程 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - - // 正常流程 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(true); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } -} - -TEST_F(CSChunkfilePoolMockTest, InitializeTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - options.retryTimes = 3; - - /****************getFileFromPool为true**************/ - // checkvalid时失败 - { - // DecodeMetaInfoFromMetaFile在上面已经单独测试过了 - // 这里选上面中的一组异常用例来检验即可 - // 解析json格式失败 - FilePool pool(lfs_); - char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // getFileFromPool为true,checkvalid成功,当前目录不存在 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - ASSERT_EQ(true, pool.Initialize(options)); - pool.WaitoFormatDoneForTesting(); - } - // 当前目录存在,list目录失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,文件名中包含非数字字符 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("aaa"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,目录中包含非普通文件类型的对象 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(false)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,open文件时失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // stat文件信息时失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // stat文件信息成功,文件大小不匹配 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE; - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // 文件信息匹配 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(true, pool.Initialize(options)); - ASSERT_EQ(1, pool.Size()); - } - - /****************getFileFromPool为false**************/ - options.getFileFromPool = false; - // 当前目录不存在,创建目录失败 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // 当前目录不存在,创建目录成功 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(0)); - ASSERT_EQ(true, pool.Initialize(options)); - } - // 当前目录存在 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - ASSERT_EQ(true, pool.Initialize(options)); - } -} - -TEST_F(CSChunkfilePoolMockTest, GetFileTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - int retryTimes = 3; - options.retryTimes = retryTimes; - - char metapage[PAGE_SIZE] = {0}; - - /****************getFileFromPool为true**************/ - // 没有剩余chunk的情况 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // 存在chunk,open时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,write时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,fsync时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,close时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,rename时返回EEXIST错误 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-EEXIST)); - ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(9, pool.Size()); - } - // 存在chunk,rename时返回非EEXIST错误 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,rename成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(9, pool.Size()); - } - - options.getFileFromPool = false; - /****************getFileFromPool为false**************/ - // open 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // fallocate 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // write 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // fsync 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(fileSize)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // close 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(fileSize)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } -} - -TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - int retryTimes = 3; - options.retryTimes = retryTimes; - - /****************getFileFromPool为false**************/ - options.getFileFromPool = false; - // delete文件时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleFile(filePath1)); - } - // delete文件成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleFile(filePath1)); - } - - /****************getFileFromPool为true**************/ - options.getFileFromPool = true; - // open失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat成功,大小不匹配 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat信息匹配,rename失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - ASSERT_EQ(0, pool.Size()); - } - - // Fstat信息匹配,rename成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - ASSERT_EQ(1, pool.Size()); - } -} - -} // namespace chunkserver -} // namespace curve + // Fstat failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } + + // Fstat successful, size mismatch + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } + + // Fstat information matching, rename failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-1)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + ASSERT_EQ(0, pool.Size()); + } + + // Fstat information matching, rename successful + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + ASSERT_EQ(1, pool.Size()); + } + } + + } // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/datastore/filepool_unittest.cpp b/test/chunkserver/datastore/filepool_unittest.cpp index ea1592f62b..e7297f7224 100644 --- a/test/chunkserver/datastore/filepool_unittest.cpp +++ b/test/chunkserver/datastore/filepool_unittest.cpp @@ -51,9 +51,9 @@ using ::testing::StrEq; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::FilePoolState; -using curve::chunkserver::FilePoolMeta; using curve::common::kFilePoolMagic; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; @@ -183,8 +183,9 @@ TEST_P(CSFilePool_test, InitializeNomalTest) { // initialize ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cfop)); ASSERT_EQ(100, chunkFilePoolPtr_->Size()); - // 初始化阶段会扫描FilePool内的所有文件,在扫描结束之后需要关闭这些文件 - // 防止过多的文件描述符被占用 + // During the initialization phase, all files in the FilePool will be + // scanned, and after the scan is completed, these files need to be closed + // Prevent excessive file descriptors from being occupied ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "1")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "2")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "50.clean")); @@ -291,8 +292,7 @@ TEST_P(CSFilePool_test, GetFileTest) { ASSERT_EQ(-2, fsptr->Delete("test0")); // CASE 2: get dirty chunk - auto checkBytes = [this](const std::string& filename, - char byte, + auto checkBytes = [this](const std::string& filename, char byte, bool isCleaned = false) { ASSERT_TRUE(fsptr->FileExists(filename)); int fd = fsptr->Open(filename, O_RDWR); @@ -631,8 +631,7 @@ TEST_P(CSFilePool_test, CleanChunkTest) { } } -INSTANTIATE_TEST_CASE_P(CSFilePoolTest, - CSFilePool_test, +INSTANTIATE_TEST_CASE_P(CSFilePoolTest, CSFilePool_test, ::testing::Values(false, true)); TEST(CSFilePool, GetFileDirectlyTest) { @@ -641,8 +640,9 @@ TEST(CSFilePool, GetFileDirectlyTest) { fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); const std::string filePoolPath = FILEPOOL_DIR; // create chunkfile in chunkfile pool dir - // if chunkfile pool 的getFileFromPool开关关掉了,那么 - // FilePool的size是一直为0,不会从pool目录中找 + // if the getFileFromPool switch of the chunkfile pool is turned off, then + // The size of FilePool is always 0 and will not be found in the pool + // directory std::string filename = filePoolPath + "1000"; fsptr->Mkdir(filePoolPath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); @@ -666,7 +666,8 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cspopt)); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); - // 测试获取chunk,chunkfile pool size不变一直为0 + // Test to obtain chunk, chunkfile pool size remains unchanged and remains + // at 0 char metapage[4096]; memset(metapage, '1', 4096); @@ -683,12 +684,12 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_EQ(buf[i], '1'); } - // 测试回收chunk,文件被删除,FilePool Size不受影响 + // Test recycling chunk, file deleted, FilePool Size not affected chunkFilePoolPtr_->RecycleFile("./new1"); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists("./new1")); - // 删除测试文件及目录 + // Delete test files and directories ASSERT_EQ(0, fsptr->Close(fd)); ASSERT_EQ(0, fsptr->Delete(filePoolPath + "1000")); ASSERT_EQ(0, fsptr->Delete(filePoolPath)); diff --git a/test/chunkserver/fake_datastore.h b/test/chunkserver/fake_datastore.h index 75b5c80330..6d26815bc8 100644 --- a/test/chunkserver/fake_datastore.h +++ b/test/chunkserver/fake_datastore.h @@ -24,27 +24,25 @@ #define TEST_CHUNKSERVER_FAKE_DATASTORE_H_ #include -#include #include +#include #include -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; class FakeCSDataStore : public CSDataStore { public: FakeCSDataStore(DataStoreOptions options, - std::shared_ptr fs) : - CSDataStore(fs, - std::make_shared(fs), - options) { + std::shared_ptr fs) + : CSDataStore(fs, std::make_shared(fs), options) { chunk_ = new (std::nothrow) char[options.chunkSize]; ::memset(chunk_, 0, options.chunkSize); sn_ = 0; @@ -93,10 +91,7 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode ReadChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, + CSErrorCode ReadChunk(ChunkID id, SequenceNum sn, char* buf, off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -105,18 +100,15 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); if (HasInjectError()) { return CSErrorCode::InternalError; } return CSErrorCode::Success; } - CSErrorCode ReadSnapshotChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, - size_t length) override { + CSErrorCode ReadSnapshotChunk(ChunkID id, SequenceNum sn, char* buf, + off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -124,32 +116,26 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); return CSErrorCode::Success; } - CSErrorCode WriteChunk(ChunkID id, - SequenceNum sn, - const butil::IOBuf& buf, - off_t offset, - size_t length, - uint32_t *cost, - const std::string & csl = "") override { + CSErrorCode WriteChunk(ChunkID id, SequenceNum sn, const butil::IOBuf& buf, + off_t offset, size_t length, uint32_t* cost, + const std::string& csl = "") override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; } - ::memcpy(chunk_+offset, buf.to_string().c_str(), length); + ::memcpy(chunk_ + offset, buf.to_string().c_str(), length); *cost = length; chunkIds_.insert(id); sn_ = sn; return CSErrorCode::Success; } - CSErrorCode CreateCloneChunk(ChunkID id, - SequenceNum sn, - SequenceNum correctedSn, - ChunkSizeType size, + CSErrorCode CreateCloneChunk(ChunkID id, SequenceNum sn, + SequenceNum correctedSn, ChunkSizeType size, const string& location) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -160,9 +146,7 @@ class FakeCSDataStore : public CSDataStore { return CSErrorCode::Success; } - CSErrorCode PasteChunk(ChunkID id, - const char * buf, - off_t offset, + CSErrorCode PasteChunk(ChunkID id, const char* buf, off_t offset, size_t length) { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -171,12 +155,11 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(chunk_+offset, buf, length); + ::memcpy(chunk_ + offset, buf, length); return CSErrorCode::Success; } - CSErrorCode GetChunkInfo(ChunkID id, - CSChunkInfo* info) override { + CSErrorCode GetChunkInfo(ChunkID id, CSChunkInfo* info) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -190,10 +173,8 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode GetChunkHash(ChunkID id, - off_t offset, - size_t length, - std::string *hash) { + CSErrorCode GetChunkHash(ChunkID id, off_t offset, size_t length, + std::string* hash) { uint32_t crc32c = 0; if (chunkIds_.find(id) != chunkIds_.end()) { crc32c = curve::common::CRC32(chunk_ + offset, length); @@ -213,14 +194,14 @@ class FakeCSDataStore : public CSDataStore { if (errorCode == CSErrorCode::Success) { return error_; } else { - // 注入错误自动恢复 + // Automatic recovery of injection errors error_ = CSErrorCode::Success; return errorCode; } } private: - char *chunk_; + char* chunk_; std::set chunkIds_; bool snapDeleteFlag_; SequenceNum sn_; @@ -234,14 +215,14 @@ class FakeFilePool : public FilePool { : FilePool(lfs) {} ~FakeFilePool() {} - bool Initialize(const FilePoolOptions &cfop) { + bool Initialize(const FilePoolOptions& cfop) { LOG(INFO) << "FakeFilePool init success"; return true; } - int GetChunk(const std::string &chunkpath, char *metapage) { return 0; } - int RecycleChunk(const std::string &chunkpath) { return 0; } + int GetChunk(const std::string& chunkpath, char* metapage) { return 0; } + int RecycleChunk(const std::string& chunkpath) { return 0; } size_t Size() { return 4; } - void UnInitialize() { } + void UnInitialize() {} }; } // namespace chunkserver diff --git a/test/chunkserver/heartbeat_helper_test.cpp b/test/chunkserver/heartbeat_helper_test.cpp index 7b9f9a9c6b..57d88c6c45 100644 --- a/test/chunkserver/heartbeat_helper_test.cpp +++ b/test/chunkserver/heartbeat_helper_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/heartbeat_helper.h" + #include +#include #include -#include #include -#include "src/chunkserver/heartbeat_helper.h" +#include +#include + #include "src/chunkserver/chunkserver_service.h" #include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/mock_copyset_node_manager.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Mock; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace chunkserver { @@ -46,12 +48,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_epoch(2); std::vector newPeers; - // 1. 目标节点格式错误 + // 1. Destination node format error { - // 目标节点为空 + // The target node is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 目标节点不为空但格式有误 + // The target node is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.4"); conf.set_allocated_configchangeitem(replica); @@ -63,12 +65,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_configchangeitem(replica); } - // 2. 待删除节点格式错误 + // 2. The format of the node to be deleted is incorrect { - // 待删除节点为空 + // The node to be deleted is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 待删除接节点不为空但格式有误 + // The node to be deleted is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.1"); conf.set_allocated_oldpeer(replica); @@ -80,13 +82,13 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_oldpeer(replica); } - // 3. 生成新配置成功 + // 3. Successfully generated new configuration { for (int i = 0; i < 3; i++) { - auto replica = conf.add_peers(); - replica->set_id(i + 1); - replica->set_address( - "192.0.0." + std::to_string(i + 1) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i + 1); + replica->set_address("192.0.0." + std::to_string(i + 1) + + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); ASSERT_EQ(3, newPeers.size()); @@ -110,19 +112,17 @@ TEST(HeartbeatHelperTest, test_CopySetConfValid) { std::shared_ptr copyset; - // 1. chunkserver中不存在需要变更的copyset - { - ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); - } + // 1. There is no copyset that needs to be changed in chunkserver + { ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 2. mds下发copysetConf的epoch是落后的 + // 2. The epoch of copysetConf issued by mds is outdated { copyset = std::make_shared(); EXPECT_CALL(*copyset, GetConfEpoch()).Times(2).WillOnce(Return(3)); ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 3. mds下发copysetConf正常 + // 3. Mds sends copysetConf normally { EXPECT_CALL(*copyset, GetConfEpoch()).WillOnce(Return(2)); ASSERT_TRUE(HeartbeatHelper::CopySetConfValid(conf, copyset)); @@ -140,24 +140,24 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { auto copyset = std::make_shared(); - // 1. mds下发空配置 + // 1. MDS issued empty configuration { conf.set_epoch(0); ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 2. 该副本不在复制组中 + // 2. The replica is not in the replication group { conf.set_epoch(2); for (int i = 2; i <= 4; i++) { - auto replica = conf.add_peers(); - replica->set_id(i); - replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i); + replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 3. 该副本在复制组中 + // 3. This replica is in the replication group { butil::str2endpoint("192.0.0.4:8200", &csEp); ASSERT_FALSE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); @@ -165,39 +165,37 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { } TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { - // 1. peerId的格式不对 + // 1. The format of peerId is incorrect { std::string peerId = "127.0.0:5555:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - // 2. 对端的chunkserver_service未起起来 + // 2. Opposite chunkserver_service not started { std::string peerId = "127.0.0.1:8888:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); string listenAddr(butil::endpoint2str(server->listen_address()).c_str()); - // 3. 对端copyset未加载完成 + // 3. Peer copyset not loaded completed { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } - // 4. 对端copyset加载完成 + // 4. End to end copyset loading completed { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); ASSERT_TRUE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } @@ -210,4 +208,3 @@ TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/heartbeat_test.cpp b/test/chunkserver/heartbeat_test.cpp index fcfcae375a..eabadce0ee 100644 --- a/test/chunkserver/heartbeat_test.cpp +++ b/test/chunkserver/heartbeat_test.cpp @@ -20,25 +20,26 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat.h" + #include #include +#include + #include -#include "test/chunkserver/heartbeat_test_common.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" - +#include "src/common/configuration.h" +#include "test/chunkserver/heartbeat_test_common.h" #include "test/client/fake/fakeMDS.h" -std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT namespace curve { namespace chunkserver { -const LogicPoolID poolId = 666; -const CopysetID copysetId = 888; +const LogicPoolID poolId = 666; +const CopysetID copysetId = 888; class HeartbeatTest : public ::testing::Test { public: @@ -57,27 +58,27 @@ class HeartbeatTest : public ::testing::Test { hbtest_->UnInitializeMds(); } - protected: std::shared_ptr hbtest_; }; TEST_F(HeartbeatTest, TransferLeader) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; - std::string dest1 = "127.0.0.1:8200:0"; - std::string dest2 = "127.0.0.1:8201:0"; + std::string dest1 = "127.0.0.1:8200:0"; + std::string dest2 = "127.0.0.1:8201:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo,expectleader是dst1 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst1 ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -86,11 +87,11 @@ TEST_F(HeartbeatTest, TransferLeader) { peer->set_address(dest1); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst1 + // Construct CopySetConf in resp, transfer to dst CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -99,27 +100,28 @@ TEST_F(HeartbeatTest, TransferLeader) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::TRANSFER_LEADER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); - // 构造req中期望的CopySetInfo,expectleader是dst2 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst2 + // Construct CopySetConf in resp, transfer to dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); conf.set_allocated_configchangeitem(peer); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, RemovePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; std::string leaderPeer = "127.0.0.1:8200:0"; std::string destPeer = "127.0.0.1:8202:0"; @@ -128,21 +130,21 @@ TEST_F(HeartbeatTest, RemovePeer) { hbtest_->WaitCopysetReady(poolId, copysetId, confStr); hbtest_->TransferLeaderSync(poolId, copysetId, confStr, leaderPeer); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -151,62 +153,62 @@ TEST_F(HeartbeatTest, RemovePeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::REMOVE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_after_Configchange) { - // 创建copyset + // Create copyset std::vector cslist{"127.0.0.1:8200"}; std::string confStr = "127.0.0.1:8200:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_not_exist_in_MDS) { - // 在chunkserver上创建一个copyset + // Create a copyset on chunkserver std::vector cslist{"127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); conf.set_epoch(0); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, AddPeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0"; std::string addPeer = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -216,11 +218,11 @@ TEST_F(HeartbeatTest, AddPeer) { } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -229,14 +231,14 @@ TEST_F(HeartbeatTest, AddPeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::ADD_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, ChangePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string oldConf = "127.0.0.1:8200:0,127.0.0.1:8202:0"; std::string addOne = "127.0.0.1:8201:0"; std::string rmOne = "127.0.0.1:8202:0"; @@ -244,7 +246,7 @@ TEST_F(HeartbeatTest, ChangePeer) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, oldConf); hbtest_->WaitCopysetReady(poolId, copysetId, oldConf); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -254,7 +256,7 @@ TEST_F(HeartbeatTest, ChangePeer) { replica->set_address("127.0.0.1:8201:0"); expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -271,7 +273,7 @@ TEST_F(HeartbeatTest, ChangePeer) { conf.set_allocated_oldpeer(peer); conf.set_type(curve::mds::heartbeat::CHANGE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 20d6b444f8..5a24d3dac9 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -23,8 +23,8 @@ #include "test/chunkserver/heartbeat_test_common.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT static const char* confPath[3] = { "./8200/chunkserver.conf", @@ -37,12 +37,12 @@ namespace chunkserver { HeartbeatTestCommon* HeartbeatTestCommon::hbtestCommon_ = nullptr; -void HeartbeatTestCommon::CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; +void HeartbeatTestCommon::CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; LOG(INFO) << "Cleaning peer " << peer; @@ -52,16 +52,16 @@ void HeartbeatTestCommon::CleanPeer( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - std::string sender = req->ip() + ":" + std::to_string(req->port()) - + ":0"; + std::string sender = + req->ip() + ":" + std::to_string(req->port()) + ":0"; if (sender != peer) { continue; } if (req->copysetinfos_size() >= 1) { int i = 0; - for (; i < req->copysetinfos_size(); i ++) { - if ( req->copysetinfos(i).logicalpoolid() == poolId && - req->copysetinfos(i).copysetid() == copysetId ) { + for (; i < req->copysetinfos_size(); i++) { + if (req->copysetinfos(i).logicalpoolid() == poolId && + req->copysetinfos(i).copysetid() == copysetId) { break; } } @@ -94,7 +94,7 @@ void HeartbeatTestCommon::CleanPeer( void HeartbeatTestCommon::CreateCopysetPeers( LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& confStr) { + const std::vector& cslist, const std::string& confStr) { braft::Configuration conf; ASSERT_EQ(0, conf.parse_from(confStr)); std::vector confPeers; @@ -113,8 +113,8 @@ void HeartbeatTestCommon::CreateCopysetPeers( cntl.set_timeout_ms(3000); request.set_logicpoolid(poolId); request.set_copysetid(copysetId); - for (auto peer = confPeers.begin(); - peer != confPeers.end(); peer++) { + for (auto peer = confPeers.begin(); peer != confPeers.end(); + peer++) { request.add_peerid(peer->to_string()); } @@ -122,11 +122,11 @@ void HeartbeatTestCommon::CreateCopysetPeers( copyset_stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "Creating copyset failed: " - << cntl.ErrorCode() << " " << cntl.ErrorText(); + LOG(ERROR) << "Creating copyset failed: " << cntl.ErrorCode() + << " " << cntl.ErrorText(); } else if (COPYSET_OP_STATUS_EXIST == response.status()) { - LOG(INFO) << "Skipped creating existed copyset <" - << poolId << ", " << copysetId << ">: " << conf + LOG(INFO) << "Skipped creating existed copyset <" << poolId + << ", " << copysetId << ">: " << conf << " on peer: " << *it; break; } else if (COPYSET_OP_STATUS_SUCCESS == response.status()) { @@ -141,8 +141,9 @@ void HeartbeatTestCommon::CreateCopysetPeers( } } -void HeartbeatTestCommon::WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& confStr) { +void HeartbeatTestCommon::WaitCopysetReady(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -160,9 +161,10 @@ void HeartbeatTestCommon::WaitCopysetReady( } } -void HeartbeatTestCommon::TransferLeaderSync( - LogicPoolID poolId, CopysetID copysetId, - const std::string& confStr, const std::string& newLeader) { +void HeartbeatTestCommon::TransferLeaderSync(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr, + const std::string& newLeader) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -198,21 +200,18 @@ void HeartbeatTestCommon::ReleaseHeartbeat() { } void HeartbeatTestCommon::SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { cntl_ = cntl; req_ = request; resp_ = response; done_ = done; } -void HeartbeatTestCommon::GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done) { +void HeartbeatTestCommon::GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done) { std::unique_lock lock(hbtestCommon_->GetMutex()); handlerReady_.store(true, std::memory_order_release); @@ -230,10 +229,8 @@ void HeartbeatTestCommon::GetHeartbeat( } void HeartbeatTestCommon::HeartbeatCallback( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { { std::unique_lock lock(hbtestCommon_->GetMutex()); if (!hbtestCommon_->GetReady().load(std::memory_order_acquire)) { @@ -250,8 +247,8 @@ void HeartbeatTestCommon::HeartbeatCallback( } bool HeartbeatTestCommon::SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect) { + const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect) { if (!expect.IsInitialized()) { if (!orig.IsInitialized()) { return true; @@ -301,13 +298,12 @@ bool HeartbeatTestCommon::SameCopySetInfo( } bool HeartbeatTestCommon::WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimit) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimit) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; int64_t startTime = butil::monotonic_time_ms(); bool leaderPeerSet = expectedInfo.has_leaderpeer(); @@ -316,8 +312,8 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - // 获取当前copyset的leader - std::string sender = + // Get the leader of the current copyset + std::string sender = req->ip() + ":" + std::to_string(req->port()) + ":0"; if (1 == req->copysetinfos_size()) { leader = req->copysetinfos(0).leaderpeer().address(); @@ -333,8 +329,10 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( } } - // 如果当前req是leader发送的,判断req中的内容是否符合要求 - // 如果符合要求,返回true; 如果不符合要求,设置resp中的内容 + // If the current req is sent by the leader, determine whether the + // content in the req meets the requirements If it meets the + // requirements, return true; If it does not meet the requirements, set + // the content in resp if (leader == sender) { if (!leaderPeerSet) { auto peer = new ::curve::common::Peer(); @@ -342,22 +340,23 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( expectedInfo.set_allocated_leaderpeer(peer); } - // 判断req是否符合要求, 符合要求返回true + // Determine whether the req meets the requirements, and return true + // if it meets the requirements if (req->copysetinfos_size() == 1) { if (SameCopySetInfo(req->copysetinfos(0), expectedInfo)) { return true; } LOG(INFO) << "req->copysetinfos:" - << req->copysetinfos(0).DebugString() - << ", expectedInfo: " << expectedInfo.DebugString(); + << req->copysetinfos(0).DebugString() + << ", expectedInfo: " << expectedInfo.DebugString(); } else if (req->copysetinfos_size() == 0) { - if (SameCopySetInfo( - ::curve::mds::heartbeat::CopySetInfo{}, expectedInfo)) { + if (SameCopySetInfo(::curve::mds::heartbeat::CopySetInfo{}, + expectedInfo)) { return true; } } - // 不符合要求设置resp + // Not meeting the requirements to set resp if (req->copysetinfos_size() == 1) { auto build = resp->add_needupdatecopysets(); if (!build->has_epoch()) { @@ -388,7 +387,7 @@ int RmDirData(std::string uri) { int RemovePeersData(bool rmChunkServerMeta) { common::Configuration conf; - for (int i = 0; i < 3; i ++) { + for (int i = 0; i < 3; i++) { conf.SetConfigPath(confPath[i]); CHECK(conf.LoadConfig()) << "load conf err"; @@ -396,35 +395,35 @@ int RemovePeersData(bool rmChunkServerMeta) { LOG_IF(FATAL, !conf.GetStringValue("copyset.chunk_data_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " data dir: " << strerror(errno); + << " data dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " log dir: " << strerror(errno); + << " log dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft meta dir: " << strerror(errno); + << " raft meta dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_snapshot_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft snapshot dir: " << strerror(errno); + << " raft snapshot dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.recycler_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft recycler dir: " << strerror(errno); + << " raft recycler dir: " << strerror(errno); return -1; } @@ -432,7 +431,7 @@ int RemovePeersData(bool rmChunkServerMeta) { if (rmChunkServerMeta) { if (RmFile(res)) { LOG(ERROR) << "Failed to remove node " << i - << " chunkserver meta file: " << strerror(errno); + << " chunkserver meta file: " << strerror(errno); return -1; } } diff --git a/test/chunkserver/heartbeat_test_common.h b/test/chunkserver/heartbeat_test_common.h index 433f7119eb..744dbe78d3 100644 --- a/test/chunkserver/heartbeat_test_common.h +++ b/test/chunkserver/heartbeat_test_common.h @@ -23,20 +23,20 @@ #ifndef TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ #define TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ -#include #include +#include +#include #include +#include //NOLINT #include -#include -#include //NOLINT #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" #include "proto/heartbeat.pb.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/heartbeat.h" +#include "src/common/configuration.h" #include "src/common/uri_parser.h" #include "test/client/fake/fakeMDS.h" @@ -47,7 +47,7 @@ using ::curve::common::UriParser; class HeartbeatTestCommon { public: - explicit HeartbeatTestCommon(const std::string &filename) { + explicit HeartbeatTestCommon(const std::string& filename) { hbtestCommon_ = this; handlerReady_.store(false, std::memory_order_release); @@ -57,17 +57,11 @@ class HeartbeatTestCommon { mds_->StartService(); } - std::atomic& GetReady() { - return handlerReady_; - } + std::atomic& GetReady() { return handlerReady_; } - std::mutex& GetMutex() { - return hbMtx_; - } + std::mutex& GetMutex() { return hbMtx_; } - std::condition_variable& GetCV() { - return hbCV_; - } + std::condition_variable& GetCV() { return hbCV_; } void UnInitializeMds() { mds_->UnInitialize(); @@ -75,105 +69,110 @@ class HeartbeatTestCommon { } /** - * CleanPeer 清空peer上指定copyset数据 + * CleanPeer: Clear the specified copyset data on the peer * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] peer chunkserver ip + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] peer chunkserver IP */ - void CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer); + void CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer); /** - * CreateCopysetPeers 在指定chunkserverlist上创建指定配置的copyset + * CreateCopysetPeers: Create a copyset of the specified configuration on + * the specified chunkserverlist * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] cslist 待创建copyset的chunkserver列表 - * @param[in] conf 使用该配置作为初始配置创建copyset + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] cslist The chunkserver list for the copyset to be created + * @param[in] conf Use this configuration as the initial configuration to + * create a copyset */ void CreateCopysetPeers(LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& conf); + const std::vector& cslist, + const std::string& conf); /** - * WaitCopysetReady 等待指定copyset选出leader + * WaitCopysetReady: Wait for the specified copyset to select the leader * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members */ - void WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& conf); + void WaitCopysetReady(LogicPoolID poolId, CopysetID copysetId, + const std::string& conf); /** - * TransferLeaderSync 触发transferleader并等待完成 + * TransferLeaderSync: Trigger transferleader and waits for completion * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 - * @param[in] newLeader 目标leader + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members + * @param[in] newLeader Target Leader */ void TransferLeaderSync(LogicPoolID poolId, CopysetID copysetId, - const std::string& conf, const std::string& newLeader); + const std::string& conf, + const std::string& newLeader); /** - * WailForConfigChangeOk 指定时间内(timeLimitMs),chunkserver是否上报了 - * 符合预期的copyset信息 + * WailForConfigChangeOk: Determine whether the chunkserver has reported the + * expected copyset information within the specified time limit + * (timeLimitMs). * - * @param[in] conf mds需要下发给指定copyset的变更命令 - * @param[in] expectedInfo 变更之后期望复制组配置 - * @param[in] timeLimitMs 等待时间 + * @param[in] conf mds needs to issue a change command to the specified + * copyset + * @param[in] expectedInfo replication group configuration after change + * @param[in] timeLimitMs waiting time * - * @return false-指定时间内copyset配置未能达到预期, true-达到预期 + * @return false - Copyset configuration failed to meet expectations within + * the specified time, true - met expectations */ bool WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimitMs); + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimitMs); /** - * SameCopySetInfo 比较两个copysetInfo是否一致 + * SameCopySetInfo: Compare two copysetInfo structures to check if they are + * identical. * - * @param[in] orig 待比较的copysetInfo - * @param[in] expect 期望copysetInfo + * @param[in] orig The copysetInfo to compare. + * @param[in] expect The expected copysetInfo for comparison. * - * @return true-一致 false-不一致 + * @return true if they are identical, false if they are not. */ - bool SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect); + bool SameCopySetInfo(const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect); /** - * ReleaseHeartbeat heartbeat中的会掉设置为nullptr + * ReleaseHeartbeat: Set the callback in the heartbeat to nullptr. */ void ReleaseHeartbeat(); /** - * SetHeartbeatInfo 把mds接受到的cntl等信息复制到成员变量 + * SetHeartbeatInfo: Copy the cntl and other information received by mds to + * the member variable */ - void SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + void SetHeartbeatInfo(::google::protobuf::RpcController* cntl, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); /** - * GetHeartbeat 把当前成员中的cntl等变量设置到rpc中 + * GetHeartbeat: Set the current member's cntl and other variables into the + * RPC. */ - void GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done); + void GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done); /** - * HeartbeatCallback heartbeat回掉 + * HeartbeatCallback: heartbeat callback */ - static void HeartbeatCallback( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + static void HeartbeatCallback(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); private: FakeMDS* mds_; diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index de06bcc255..d2d517bfc4 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -21,9 +21,9 @@ * 2018/12/23 Wenyu Zhou Initial version */ -#include #include #include +#include #include "include/chunkserver/chunkserver_common.h" #include "src/chunkserver/chunkserver.h" @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static const char *param[3][15] = { +static const char* param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -89,7 +89,7 @@ using ::curve::chunkserver::ChunkServer; butil::AtExitManager atExitManager; -static int RunChunkServer(int i, int argc, char **argv) { +static int RunChunkServer(int i, int argc, char** argv) { auto chunkserver = new curve::chunkserver::ChunkServer(); if (chunkserver == nullptr) { LOG(ERROR) << "Failed to create chunkserver " << i; @@ -104,7 +104,7 @@ static int RunChunkServer(int i, int argc, char **argv) { return 0; } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { int ret; pid_t pids[3]; testing::InitGoogleTest(&argc, argv); @@ -133,10 +133,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create chunkserver process 0"; } else if (pids[i] == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), - const_cast(param[i])); + return RunChunkServer(i, sizeof(param[i]) / sizeof(char*), + const_cast(param[i])); } } @@ -148,8 +149,9 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create test proccess"; } else if (pid == 0) { /* - * RUN_ALL_TESTS内部可能会调用LOG(), - * 有较低概率因不兼容fork()而卡死 + * LOG() may be called internally in RUN_ALL_TESTS, + * There is a low probability of getting stuck due to incompatible + * fork() */ ret = RUN_ALL_TESTS(); return ret; @@ -171,10 +173,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to restart chunkserver process 1"; } else if (pid == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), - const_cast(param[1])); + ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char*), + const_cast(param[1])); return ret; } sleep(2); diff --git a/test/chunkserver/inflight_throttle_test.cpp b/test/chunkserver/inflight_throttle_test.cpp index 8faa18d76e..333e1f6934 100644 --- a/test/chunkserver/inflight_throttle_test.cpp +++ b/test/chunkserver/inflight_throttle_test.cpp @@ -20,10 +20,11 @@ * Author: wudemiao */ +#include "src/chunkserver/inflight_throttle.h" + #include #include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/inflight_throttle.h" namespace curve { namespace chunkserver { @@ -31,7 +32,7 @@ namespace chunkserver { using curve::common::Thread; TEST(InflightThrottleTest, basic) { - // 基本测试 + // Basic testing { uint64_t maxInflight = 1; InflightThrottle inflightThrottle(maxInflight); @@ -45,7 +46,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发加 + // Concurrent addition { uint64_t maxInflight = 10000; InflightThrottle inflightThrottle(maxInflight); @@ -78,7 +79,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发减 + // Concurrent reduction { uint64_t maxInflight = 16; InflightThrottle inflightThrottle(maxInflight); diff --git a/test/chunkserver/metrics_test.cpp b/test/chunkserver/metrics_test.cpp index 282802336f..57c7a79c33 100644 --- a/test/chunkserver/metrics_test.cpp +++ b/test/chunkserver/metrics_test.cpp @@ -20,24 +20,25 @@ * Author: yangyaokai */ -#include -#include +#include +#include #include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include -#include "src/common/configuration.h" #include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/datastore/filepool_helper.h" namespace curve { @@ -55,15 +56,14 @@ const PageSizeType PAGE_SIZE = 4 * 1024; const int chunkNum = 10; const LogicPoolID logicId = 1; -const string baseDir = "./data_csmetric"; // NOLINT -const string copysetDir = "local://./data_csmetric"; // NOLINT -const string logDir = "curve://./data_csmetric"; // NOLINT -const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT +const string baseDir = "./data_csmetric"; // NOLINT +const string copysetDir = "local://./data_csmetric"; // NOLINT +const string logDir = "curve://./data_csmetric"; // NOLINT +const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT const string chunkPoolMetaPath = "./chunkfilepool_csmetric.meta"; // NOLINT -const string walPoolDir = "./walfilepool_csmetric"; // NOLINT -const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT -const string trashPath = "./trash_csmetric"; // NOLINT - +const string walPoolDir = "./walfilepool_csmetric"; // NOLINT +const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT +const string trashPath = "./trash_csmetric"; // NOLINT class CSMetricTest : public ::testing::Test { public: @@ -90,8 +90,7 @@ class CSMetricTest : public ::testing::Test { cfop.blockSize = BLOCK_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool->Size()); @@ -147,8 +146,8 @@ class CSMetricTest : public ::testing::Test { } void CreateConfigFile() { - confFile_ = "csmetric.conf"; - // 创建配置文件 + confFile_ = "csmetric.conf"; + // Create Configuration File std::string confItem; std::ofstream cFile(confFile_); ASSERT_TRUE(cFile.is_open()); @@ -210,18 +209,18 @@ TEST_F(CSMetricTest, CopysetMetricTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - // 如果copyset的metric已经存在,返回-1 + // If the metric for the copyset already exists, return -1 rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, -1); - // 获取不存在的copyset metric,返回nullptr + // Get non-existent copyset metric and return nullptr CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, 2); ASSERT_EQ(copysetMetric, nullptr); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); ASSERT_NE(copysetMetric, nullptr); - // 删除copyset metric后,再去获取返回nullptr + // After deleting the copyset metric, go to retrieve and return nullptr rc = metric_->RemoveCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); @@ -233,7 +232,8 @@ TEST_F(CSMetricTest, OnRequestTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -257,7 +257,7 @@ TEST_F(CSMetricTest, OnRequestTest) { const IOMetricPtr cpDownloadMetric = copysetMetric->GetIOMetric(CSIOMetricType::DOWNLOAD); - // 统计写入成功的情况 + // Count the success of writing metric_->OnRequest(logicId, copysetId, CSIOMetricType::WRITE_CHUNK); ASSERT_EQ(1, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverWriteMetric->ioNum_.get_value()); @@ -268,7 +268,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 + // Statistics on successful reads metric_->OnRequest(logicId, copysetId, CSIOMetricType::READ_CHUNK); ASSERT_EQ(1, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverReadMetric->ioNum_.get_value()); @@ -279,7 +279,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 + // Statistics on successful recovery metric_->OnRequest(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK); ASSERT_EQ(1, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(0, serverRecoverMetric->ioNum_.get_value()); @@ -290,7 +290,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 + // Count the success of pass metric_->OnRequest(logicId, copysetId, CSIOMetricType::PASTE_CHUNK); ASSERT_EQ(1, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverPasteMetric->ioNum_.get_value()); @@ -301,7 +301,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 + // Statistics on successful downloads metric_->OnRequest(logicId, copysetId, CSIOMetricType::DOWNLOAD); ASSERT_EQ(1, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverDownloadMetric->ioNum_.get_value()); @@ -318,7 +318,8 @@ TEST_F(CSMetricTest, OnResponseTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -345,9 +346,9 @@ TEST_F(CSMetricTest, OnResponseTest) { size_t size = PAGE_SIZE; int64_t latUs = 100; bool hasError = false; - // 统计写入成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the success of writing + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -357,9 +358,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Statistics on successful reads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -369,9 +370,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on successful recovery + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -381,9 +382,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the success of pass + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -393,9 +394,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on successful downloads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -406,9 +407,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(0, cpDownloadMetric->errorNum_.get_value()); hasError = true; - // 统计写入失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the number of write failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -418,9 +420,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpWriteMetric->errorNum_.get_value()); - // 统计读取失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Count the number of read failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -430,9 +433,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpReadMetric->errorNum_.get_value()); - // 统计恢复失败的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on recovery failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -442,9 +445,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpRecoverMetric->errorNum_.get_value()); - // 统计paste失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the situation of pass failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -454,9 +457,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpPasteMetric->errorNum_.get_value()); - // 统计下载失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on download failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -468,19 +471,21 @@ TEST_F(CSMetricTest, OnResponseTest) { } TEST_F(CSMetricTest, CountTest) { - // 初始状态下,没有copyset,FilePool中有chunkNum个chunk + // In the initial state, there is no copyset and there are chunkNum chunks + // in FilePool ASSERT_EQ(0, metric_->GetCopysetCount()); ASSERT_EQ(10, metric_->GetChunkLeftCount()); // Shared with chunk file pool ASSERT_EQ(0, metric_->GetWalSegmentLeftCount()); - // 创建copyset + // Create copyset Configuration conf; CopysetID copysetId = 1; ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId, conf)); ASSERT_EQ(1, metric_->GetCopysetCount()); - // 此时copyset下面没有chunk和快照 - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + // At this point, there are no chunks or snapshots under the copyset + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_EQ(0, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); @@ -522,7 +527,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(10, metric_->GetWalSegmentLeftCount()); ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId2, conf)); ASSERT_EQ(2, metric_->GetCopysetCount()); - CopysetMetricPtr copysetMetric2 = metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT + CopysetMetricPtr copysetMetric2 = + metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT ASSERT_EQ(0, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(1, metric_->GetTotalWalSegmentCount()); @@ -534,7 +540,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(2, metric_->GetTotalWalSegmentCount()); - // 写入数据生成chunk + // Write data to generate chunk std::shared_ptr datastore = copysetMgr_->GetCopysetNode(logicId, copysetId)->GetDataStore(); ChunkID id = 1; @@ -553,7 +559,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(0, metric_->GetTotalCloneChunkCount()); - // 增加版本号,生成快照 + // Add version number and generate snapshot seq = 2; ASSERT_EQ(CSErrorCode::Success, datastore->WriteChunk(id, seq, dataBuf, offset, length, nullptr)); @@ -561,14 +567,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 删除快照 + // Delete snapshot ASSERT_EQ(CSErrorCode::Success, datastore->DeleteSnapshotChunkOrCorrectSn(id, seq)); ASSERT_EQ(1, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 创建 clone chunk + // Create clone chunk ChunkID id2 = 2; ChunkID id3 = 3; std::string location = "test@cs"; @@ -580,7 +586,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(2, copysetMetric->GetCloneChunkCount()); - // clone chunk被覆盖写一遍,clone chun转成普通chunk + // The clone chunk is overwritten and written once, converting it to a + // regular chunk char* buf2 = new char[CHUNK_SIZE]; butil::IOBuf dataBuf2; dataBuf2.append(buf2, CHUNK_SIZE); @@ -591,15 +598,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 删除上面的chunk - ASSERT_EQ(CSErrorCode::Success, - datastore->DeleteChunk(id2, 1)); + // Delete the chunk above + ASSERT_EQ(CSErrorCode::Success, datastore->DeleteChunk(id2, 1)); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 模拟copyset重新加载datastore,重新初始化后,chunk数量不变 - // for bug fix: CLDCFS-1473 + // Simulate copyset to reload the datastore, and after reinitialization, the + // number of chunks remains unchanged for bug fix: CLDCFS-1473 datastore->Initialize(); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); @@ -608,7 +614,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(1, metric_->GetTotalCloneChunkCount()); - // 模拟copyset放入回收站测试 + // Simulate copyset placement in the recycle bin for testing ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId)); ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId2)); ASSERT_EQ(nullptr, metric_->GetCopysetMetric(logicId, copysetId)); @@ -619,7 +625,7 @@ TEST_F(CSMetricTest, CountTest) { // copysetId2: 1(wal) ASSERT_EQ(4, metric_->GetChunkTrashedCount()); - // 测试leader count计数 + // Test leader count count ASSERT_EQ(0, metric_->GetLeaderCount()); metric_->IncreaseLeaderCount(); ASSERT_EQ(1, metric_->GetLeaderCount()); @@ -639,11 +645,11 @@ TEST_F(CSMetricTest, ConfigTest) { "{\"conf_name\":\"chunksize\",\"conf_value\":\"1234\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), "{\"conf_name\":\"timeout\",\"conf_value\":\"100\"}"); - // 修改新增配置信息 + // Modify new configuration information conf.SetStringValue("chunksize", "4321"); conf.SetStringValue("port", "9999"); metric_->ExposeConfigMetric(&conf); - // // 验证修改后信息 + // Verify modified information ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "chunksize").c_str(), "{\"conf_name\":\"chunksize\",\"conf_value\":\"4321\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), @@ -657,7 +663,7 @@ TEST_F(CSMetricTest, OnOffTest) { ChunkServerMetricOptions metricOptions; metricOptions.port = PORT; metricOptions.ip = IP; - // 关闭metric开关后进行初始化 + // Initialize after turning off the metric switch { metricOptions.collectMetric = false; ASSERT_EQ(0, metric_->Init(metricOptions)); @@ -669,7 +675,7 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(ret, true); metric_->ExposeConfigMetric(&conf); } - // 初始化后获取所有指标项都为空 + // Obtain all indicator items as empty after initialization { ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::READ_CHUNK), nullptr); ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::WRITE_CHUNK), nullptr); @@ -685,7 +691,8 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(metric_->GetTotalCloneChunkCount(), 0); ASSERT_EQ(metric_->GetTotalWalSegmentCount(), 0); } - // 创建copyset的metric返回成功,但实际并未创建 + // Creating the metric for the copyset returned success, but it was not + // actually created { CopysetID copysetId = 1; ASSERT_EQ(0, metric_->CreateCopysetMetric(logicId, copysetId)); @@ -696,7 +703,7 @@ TEST_F(CSMetricTest, OnOffTest) { PAGE_SIZE, 100, false); ASSERT_EQ(0, metric_->RemoveCopysetMetric(logicId, copysetId)); } - // 增加leader count,但是实际未计数 + // Increase the leader count, but it is not actually counted { metric_->IncreaseLeaderCount(); ASSERT_EQ(metric_->GetLeaderCount(), 0); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp index b33d196d95..1329b919a6 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp @@ -20,38 +20,38 @@ * Author: tongguangxun */ -#include -#include #include #include +#include +#include #include -#include "src/fs/local_filesystem.h" -#include "test/fs/mock_local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +#include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; using ::testing::SetArgReferee; -using ::testing::AtLeast; +using ::testing::StrEq; +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::chunkserver::FilePool; using curve::fs::MockLocalFileSystem; namespace curve { namespace chunkserver { @@ -63,7 +63,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); FilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(FilePoolPtr_); @@ -146,32 +146,33 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr FilePoolPtr_; - std::shared_ptr fsptr; - std::shared_ptr lfs; - CurveFilesystemAdaptor* rfa; + std::shared_ptr FilePoolPtr_; + std::shared_ptr fsptr; + std::shared_ptr lfs; + CurveFilesystemAdaptor* rfa; }; TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { - // 1. open flag不带CREAT, open失败 + // 1. open flag without CREAT, open failed CreateChunkFile("./10"); std::string path = "./10"; butil::File::Error e; ASSERT_EQ(FilePoolPtr_->Size(), 3); EXPECT_CALL(*lfs, Open(_, _)).Times(AtLeast(1)).WillRepeatedly(Return(-1)); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(FilePoolPtr_->Size(), 3); ASSERT_EQ(nullptr, fa); - // 2. open flag带CREAT, 从FilePool取文件,但是FilePool打开文件失败 - // 所以还是走原有逻辑,本地创建文件成功 - EXPECT_CALL(*lfs, Open(_, _)).Times(3).WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)); + // 2. open flag with CREAT to retrieve files from FilePool, but FilePool + // failed to open the file So we still follow the original logic and + // successfully create the file locally + EXPECT_CALL(*lfs, Open(_, _)) + .Times(3) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(0)); ASSERT_EQ(FilePoolPtr_->Size(), 3); path = "./11"; @@ -182,7 +183,8 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { ASSERT_FALSE(fsptr->FileExists("./10")); ASSERT_EQ(nullptr, fa); - // 3. 待创建文件在Filter中,但是直接本地创建该文件,创建成功 + // 3. The file to be created is in Filter, but it was created locally and + // successfully EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); path = BRAFT_SNAPSHOT_META_FILE; @@ -191,14 +193,16 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { - // 1. 删除文件,文件存在且在过滤名单里,但delete失败,返回false + // 1. Delete file. The file exists and is on the filter list, but delete + // failed with false return EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); bool ret = fsadaptor->delete_file(BRAFT_SNAPSHOT_META_FILE, true); ASSERT_FALSE(ret); - // 2. 删除文件,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 2. Delete file. The file exists and is not on the filter list, but the + // recycle chunk failed with false return EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); @@ -206,29 +210,35 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { ret = fsadaptor->delete_file("temp", true); ASSERT_FALSE(ret); - // 3. 删除目录,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 3. Delete directory. The file exists and is not on the filter list, but + // the recycle chunk failed with false return std::vector dircontent; dircontent.push_back("/2"); dircontent.push_back("/1"); dircontent.push_back(BRAFT_SNAPSHOT_META_FILE); - EXPECT_CALL(*lfs, DirExists(_)).Times(2).WillOnce(Return(true)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs, DirExists(_)) + .Times(2) + .WillOnce(Return(true)) + .WillOnce(Return(false)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs, List(_, _)).Times(2).WillRepeatedly(DoAll( - SetArgPointee<1>(dircontent), Return(-1))); + EXPECT_CALL(*lfs, List(_, _)) + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<1>(dircontent), Return(-1))); ret = fsadaptor->delete_file("1", true); ASSERT_FALSE(ret); } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { - // 1. 重命名文件,文件存在且在过滤名单里,但Rename失败,返回false + // 1. Renaming file, file exists and is on the filter list, but Rename + // failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); bool ret = fsadaptor->rename("1", BRAFT_SNAPSHOT_META_FILE); ASSERT_FALSE(ret); - // 2. 重命名文件,文件存在且不在过滤名单里,但Rename失败,返回false + // 2. Renaming file. The file exists and is not on the filter list, but + // Rename failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(0)); @@ -237,5 +247,5 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { ASSERT_TRUE(ret); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp index 926ccc76c5..a7de21c7fe 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp @@ -20,20 +20,21 @@ * Author: tongguangxun */ -#include +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include #include +#include #include -#include "src/fs/local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { @@ -44,7 +45,7 @@ class CurveFilesystemAdaptorTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); chunkFilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(chunkFilePoolPtr_); @@ -124,42 +125,39 @@ class CurveFilesystemAdaptorTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr chunkFilePoolPtr_; - std::shared_ptr fsptr; - CurveFilesystemAdaptor* rfa; + std::shared_ptr chunkFilePoolPtr_; + std::shared_ptr fsptr; + CurveFilesystemAdaptor* rfa; }; TEST_F(CurveFilesystemAdaptorTest, open_file_test) { - // 1. open flag不带CREAT + // 1. Open flag without CREAT std::string path = "./raftsnap/10"; butil::File::Error e; ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsptr->FileExists("./raftsnap/10")); ASSERT_EQ(nullptr, fa); - // 2. open flag待CREAT, 从FilePool取文件 + // 2. Open flag for CREAT, retrieve files from FilePool ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 2); ASSERT_TRUE(fsptr->FileExists("./raftsnap/10")); ASSERT_NE(nullptr, fa); - // 3. open flag待CREAT,FilePool为空时,从FilePool取文件 + // 3. Open flag, wait for CREAT, and when FilePool is empty, retrieve the + // file from FilePool ClearFilePool(); - fa = fsadaptor->open("./raftsnap/11", - O_RDONLY | O_CLOEXEC | O_CREAT, - nullptr, - &e); + fa = fsadaptor->open("./raftsnap/11", O_RDONLY | O_CLOEXEC | O_CREAT, + nullptr, &e); ASSERT_EQ(nullptr, fa); } TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1/test_temp2")); @@ -169,11 +167,11 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { CreateChunkFile("./test_temp/test_temp1/2"); CreateChunkFile("./test_temp/test_temp1/test_temp2/1"); CreateChunkFile("./test_temp/test_temp1/test_temp2/2"); - // 非递归删除非空文件夹,返回false + // Non recursive deletion of non empty folders, returning false ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsadaptor->delete_file("./test_temp", false)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - // 递归删除文件夹,chunk被回收到FilePool + // Recursively delete folder, chunk is recycled to FilePool ASSERT_TRUE(fsadaptor->delete_file("./test_temp", true)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp")); @@ -186,7 +184,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/1")); ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/2")); - // 2. 创建一个单层空目录 + // 2. Create a single level empty directory ASSERT_EQ(0, fsptr->Mkdir("./test_temp3")); ASSERT_TRUE(fsadaptor->delete_file("./test_temp3", false)); ASSERT_EQ(0, fsptr->Mkdir("./test_temp4")); @@ -195,7 +193,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->DirExists("./test_temp3")); ASSERT_FALSE(fsptr->DirExists("./test_temp4")); - // 3. 删除一个常规chunk文件, 会被回收到FilePool + // 3. Deleting a regular chunk file will be recycled to FilePool ASSERT_EQ(0, fsptr->Mkdir("./test_temp5")); CreateChunkFile("./test_temp5/3"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp5/3", false)); @@ -211,8 +209,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_EQ(0, fsptr->Delete("./test_temp5")); ASSERT_EQ(0, fsptr->Delete("./test_temp6")); - - // 4. 删除一个非chunk大小的文件,会直接删除该文件 + // 4. Deleting a file of a non chunk size will directly delete the file ASSERT_EQ(0, fsptr->Mkdir("./test_temp7")); int fd = fsptr->Open("./test_temp7/5", O_RDWR | O_CREAT); char data[4096]; @@ -226,12 +223,13 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { } TEST_F(CurveFilesystemAdaptorTest, rename_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); std::string filename = "./test_temp/"; filename.append(BRAFT_SNAPSHOT_META_FILE); - // 目标文件size是chunksize,但是目标文件在过滤名单里,所以直接过滤 + // The target file size is chunksize, but it is on the filtering list, so it + // is directly filtered CreateChunkFile(filename); int poolSize = chunkFilePoolPtr_->Size(); std::string temppath = "./temp"; @@ -243,7 +241,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete(filename)); - // 目标文件size是chunksize,但是目标文件不在过滤名单里,所以先回收再rename + // The target file size is chunksize, but it is not on the filter list, so + // recycle it first and rename it again filename = "./test_temp/"; filename.append("test"); CreateChunkFile(filename); @@ -254,9 +253,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_TRUE(fsptr->FileExists(filename)); ASSERT_EQ(0, fsptr->Delete(filename)); - ASSERT_EQ(0, fsptr->Delete("./test_temp")); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp index 9e3ca39605..8b72b7f84e 100644 --- a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp +++ b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include +#include + #include #include -#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -34,11 +36,11 @@ namespace chunkserver { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; -using ::testing::Mock; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::Mock; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::UnorderedElementsAre; @@ -53,13 +55,14 @@ class CurveSnapshotAttachmentMockTest : public testing::Test { new CurveSnapshotAttachment(fs_)); } void TearDown() {} + protected: std::shared_ptr fs_; scoped_refptr attachment_; }; TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { - // 返回成功 + // Return successful vector fileNames; fileNames.emplace_back("chunk_1"); fileNames.emplace_back("chunk_1_snap_1"); @@ -69,24 +72,21 @@ TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { vector snapFiles; attachment_->list_attach_files(&snapFiles, kRaftSnapDir); - std::string snapPath1 = - "../../data/chunk_1_snap_1"; - std::string snapPath2 = - "../../data/chunk_2_snap_1"; - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); + std::string snapPath1 = "../../data/chunk_1_snap_1"; + std::string snapPath2 = "../../data/chunk_2_snap_1"; + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); - // 路径结尾添加反斜杠 + // Add a backslash at the end of the path EXPECT_CALL(*fs_, List(kDataDir, _)) .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); attachment_->list_attach_files(&snapFiles, std::string(kRaftSnapDir) + "/"); - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); - // 返回失败 - EXPECT_CALL(*fs_, List(kDataDir, _)) - .WillRepeatedly(Return(-1)); + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); + // Return failed + EXPECT_CALL(*fs_, List(kDataDir, _)).WillRepeatedly(Return(-1)); ASSERT_DEATH(attachment_->list_attach_files(&snapFiles, kRaftSnapDir), ""); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp index 94bcc4d5a8..66891bc031 100644 --- a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp +++ b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp @@ -21,23 +21,23 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -61,7 +61,7 @@ class RaftSnapFilePoolTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer2).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -74,26 +74,22 @@ class RaftSnapFilePoolTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel* channel = new brpc::Channel; uint64_t sn = 1; ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); @@ -108,18 +104,16 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); ChunkService_Stub stub(channel); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - if (response.status() == - CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { + if (response.status() == CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { std::string redirect = response.redirect(); leaderId.parse(redirect); delete channel; @@ -127,8 +121,7 @@ static void WriteThenReadVerify(PeerId leaderId, ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); continue; } - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); // read { @@ -140,13 +133,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -158,22 +150,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -187,16 +175,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -204,18 +190,23 @@ static void ReadVerify(PeerId leaderId, } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了, install snapshot的数据从FilePool中取文件 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown and restart of non-leader nodes in a cluster of 3 nodes, + * and control them to recover from installing snapshots. + * 1. Create a replication group with 3 replicas. + * 2. Wait for the leader to emerge, write data, and then read to verify. + * 3. Shutdown a non-leader node. + * 4. Sleep for a duration longer than a snapshot interval, then write and read + * data. + * 5. Sleep for a duration longer than a snapshot interval again, then write and + * read data. Steps 4 and 5 are to ensure that at least two snapshots are taken. + * Therefore, when the node restarts, it must recover via an install snapshot + * because the log has already been deleted. The data for the install snapshot + * is retrieved from the FilePool. + * 6. Wait for the leader to emerge, then read the previously written data for + * verification. + * 7. Transfer leadership to the shut down peer. + * 8. Verify the data written before the transfer of leadership. + * 9. Write data again, then read it to verify. */ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -238,75 +229,67 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.StartPeer(peer2, false, true, true)); ASSERT_EQ(0, cluster.StartPeer(peer3, false, true, true)); - // 等待FilePool创建成功 + // Waiting for FilePool creation to succeed std::this_thread::sleep_for(std::chrono::seconds(60)); PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 获取三个chunkserver的FilePool的pool容量 - std::shared_ptr fs(LocalFsFactory::CreateFs( - FileSystemType::EXT4, "")); + // Obtain the pool capacity of FilePool for three chunkservers + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::vector Peer1ChunkPoolSize; std::vector Peer2ChunkPoolSize; std::vector Peer3ChunkPoolSize; std::string copysetdir1, copysetdir2, copysetdir3; - butil::string_printf(©setdir1, - "./%s-%d-%d", - butil::ip2str(peer1.addr.ip).c_str(), - peer1.addr.port, + butil::string_printf(©setdir1, "./%s-%d-%d", + butil::ip2str(peer1.addr.ip).c_str(), peer1.addr.port, 0); - butil::string_printf(©setdir2, - "./%s-%d-%d", - butil::ip2str(peer2.addr.ip).c_str(), - peer2.addr.port, + butil::string_printf(©setdir2, "./%s-%d-%d", + butil::ip2str(peer2.addr.ip).c_str(), peer2.addr.port, 0); - butil::string_printf(©setdir3, - "./%s-%d-%d", - butil::ip2str(peer3.addr.ip).c_str(), - peer3.addr.port, + butil::string_printf(©setdir3, "./%s-%d-%d", + butil::ip2str(peer3.addr.ip).c_str(), peer3.addr.port, 0); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 目前只有chunk文件才会从FilePool中取 - // raft snapshot meta 和 conf epoch文件直接从文件系统创建 + // Currently, only chunk files are retrieved from FilePool + // raft snapshot meta and conf epoch files are created directly from the + // file system ASSERT_EQ(20, Peer1ChunkPoolSize.size()); ASSERT_EQ(20, Peer2ChunkPoolSize.size()); ASSERT_EQ(20, Peer3ChunkPoolSize.size()); LOG(INFO) << "write 1 start"; - // 发起 read/write, 写数据会触发chunkserver从FilePool取chunk - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write, writing data will trigger chunkserver to fetch + // chunks from FilePool + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 - ::sleep(1*snapshotTimeoutS); + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,ChunkFilePool容量少一个 + // After writing the data, ChunkFilePool has one less capacity ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // shutdown 某个非 leader 的 peer + // shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -317,68 +300,61 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers the snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file, and it will delete the previous snapshot file. The + // deleted file will be reclaimed into the FilePool. So overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but a snapshot is taken and then returned. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); LOG(INFO) << "write 2 end"; - ::sleep(1*snapshotTimeoutS); + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file. Then, it will delete the previous snapshot file, and + // the deleted file will be reclaimed into the FilePool. So, overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but it involves taking one snapshot and returning + // another to the FilePool. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 3 start"; - // 增加chunkid,使chunkserver端的chunk又被取走一个 - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId + 1, - length, - ch + 2, - loop); + // Add a chunkid to remove another chunk from the chunkserver side + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, + ch + 2, loop); LOG(INFO) << "write 3 end"; ::sleep(snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one if (shutdownPeerid == peer1) { ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); @@ -388,22 +364,17 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { } ASSERT_EQ(18, Peer3ChunkPoolSize.size()); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid, false, true, false)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 - ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, - length, ch + 2, loop); + // Read it out and verify it again + ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, ch + 2, + loop); LOG(INFO) << "write 4 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); LOG(INFO) << "write 4 end"; @@ -416,10 +387,7 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -433,20 +401,21 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - ::sleep(5*snapshotTimeoutS); + ::sleep(5 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 当前的raftsnapshot filesystem只存取chunk文件 - // meta文件遵守原有逻辑,直接通过文件系统创建,所以这里只有两个chunk被取出 + // The current raftsnapshot filesystem only accesses chunk files + // The meta file follows the original logic and is created directly through + // the file system, so only two chunks are extracted here ASSERT_EQ(18, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); ASSERT_EQ(18, Peer3ChunkPoolSize.size()); diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index d6f5d9aa97..50f6f46c1d 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -20,20 +20,20 @@ * Author: wudemiao */ -#include -#include -#include #include +#include +#include +#include -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service.h" -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::Configuration; @@ -42,6 +42,7 @@ using curve::chunkserver::CopysetNodeManager; using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::LogicPoolID; using curve::chunkserver::PeerId; @@ -52,9 +53,6 @@ using curve::common::UriParser; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::chunkserver::FilePoolHelper; -using curve::chunkserver::FilePoolMeta; DEFINE_string(ip, "127.0.0.1", "Initial configuration of the replication group"); @@ -73,7 +71,7 @@ DEFINE_bool(create_chunkfilepool, true, "create chunkfile pool"); butil::AtExitManager atExitManager; -void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, +void CreateChunkFilePool(const std::string& dirname, uint64_t chunksize, std::shared_ptr fsptr) { std::string datadir = dirname + "/chunkfilepool"; std::string metapath = dirname + "/chunkfilepool.meta"; @@ -110,7 +108,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); FilePoolMeta meta; - meta.chunkSize = cpopt.fileSize; + meta.chunkSize = cpopt.fileSize; meta.metaPageSize = cpopt.metaFileSize; meta.hasBlockSize = true; meta.blockSize = cpopt.blockSize; @@ -120,7 +118,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, (void)FilePoolHelper::PersistEnCodeMetaInfo(fsptr, meta, metapath); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); /* Generally you only need one Server. */ @@ -142,7 +140,8 @@ int main(int argc, char *argv[]) { std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; - // TODO(yyk) 这部分实现不太优雅,后续进行重构 + // The implementation of TODO(yyk) is not very elegant, and will be + // refactored in the future std::string copysetUri = FLAGS_copyset_dir + "/copysets"; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = FLAGS_ip; diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..2c28a6015c 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); @@ -556,8 +556,7 @@ TEST_F(TrashTest, recycle_copyset_dir_list_err) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -569,8 +568,7 @@ TEST_F(TrashTest, recycle_copyset_dir_ok) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -607,18 +605,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // chunk_200_snap_1, abc +1 // log/ - using item4list = struct{ + using item4list = struct { std::string subdir; std::vector& names; }; std::vector action4List{ - { "", copysets }, - { "/4294967493.55555", dirs}, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, - { "/4294967494.55555", dirs}, - { "/4294967494.55555/data", chunks2 }, - { "/4294967494.55555/log", logfiles2 }, + {"", copysets}, + {"/4294967493.55555", dirs}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, + {"/4294967494.55555", dirs}, + {"/4294967494.55555/data", chunks2}, + {"/4294967494.55555/log", logfiles2}, }; for (auto& it : action4List) { @@ -627,18 +625,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { } EXPECT_CALL(*lfs, DirExists(_)) - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_100 - .WillOnce(Return(false)) // chunk_101 - .WillOnce(Return(true)) // log - .WillOnce(Return(false)) // curve_log_10086_10087 - .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 - .WillOnce(Return(false)) // log_10083_10084 - .WillOnce(Return(false)) // log_inprogress_10085 - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_200_snap_1 - .WillOnce(Return(false)) // abc - .WillOnce(Return(true)); // log + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_100 + .WillOnce(Return(false)) // chunk_101 + .WillOnce(Return(true)) // log + .WillOnce(Return(false)) // curve_log_10086_10087 + .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 + .WillOnce(Return(false)) // log_10083_10084 + .WillOnce(Return(false)) // log_inprogress_10085 + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_200_snap_1 + .WillOnce(Return(false)) // abc + .WillOnce(Return(true)); // log trash->Init(ops); ASSERT_EQ(5, trash->GetChunkNum()); @@ -657,14 +655,14 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, DirExists(_)) .WillOnce(Return(true)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // data .WillOnce(Return(false)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // log + .WillOnce(Return(true)) // log .WillOnce(Return(false)) - .WillOnce(Return(true)) // raft_snapshot - .WillOnce(Return(true)) // temp - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // raft_snapshot + .WillOnce(Return(true)) // temp + .WillOnce(Return(true)) // data .WillOnce(Return(false)); std::string trashedCopysetDir = "/trash_test0/copysets/4294967495"; @@ -695,21 +693,21 @@ TEST_F(TrashTest, test_chunk_num_statistic) { std::vector raftfiles{RAFT_DATA_DIR, RAFT_LOG_DIR}; // DirExists - using item4dirExists = struct{ + using item4dirExists = struct { std::string subdir; bool exist; }; std::vector action4DirExists{ - { "", true }, - { "/4294967493.55555", true }, - { "/4294967493.55555/data", true }, - { "/4294967493.55555/log", true }, - { "/4294967493.55555/data/chunk_100", false }, - { "/4294967493.55555/data/chunk_101", false }, - { "/4294967493.55555/log/curve_log_10086_10087", false }, - { "/4294967493.55555/log/curve_log_inprogress_10088", false }, - { "/4294967493.55555/log/log_10083_10084", false }, - { "/4294967493.55555/log/log_inprogress_10085", false }, + {"", true}, + {"/4294967493.55555", true}, + {"/4294967493.55555/data", true}, + {"/4294967493.55555/log", true}, + {"/4294967493.55555/data/chunk_100", false}, + {"/4294967493.55555/data/chunk_101", false}, + {"/4294967493.55555/log/curve_log_10086_10087", false}, + {"/4294967493.55555/log/curve_log_inprogress_10088", false}, + {"/4294967493.55555/log/log_10083_10084", false}, + {"/4294967493.55555/log/log_inprogress_10085", false}, }; for (auto& it : action4DirExists) { @@ -719,10 +717,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // List std::vector action4List2{ - { "", copysets }, - { "/4294967493.55555", raftfiles }, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, + {"", copysets}, + {"/4294967493.55555", raftfiles}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, }; for (auto& it : action4List2) { @@ -735,16 +733,16 @@ TEST_F(TrashTest, test_chunk_num_statistic) { SetCopysetNeedDelete(trashPath + "/" + copysets[2], notNeedDelete); // RecycleFile - using item4CycleFile = struct{ + using item4CycleFile = struct { std::shared_ptr pool; std::string subdir; int ret; }; std::vector action4CycleFile{ - { pool, "/4294967493.55555/data/chunk_100", 0 }, - { pool, "/4294967493.55555/data/chunk_101", -1 }, - { walPool, "/4294967493.55555/log/curve_log_10086_10087", 0 }, - { walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1 }, + {pool, "/4294967493.55555/data/chunk_100", 0}, + {pool, "/4294967493.55555/data/chunk_101", -1}, + {walPool, "/4294967493.55555/log/curve_log_10086_10087", 0}, + {walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1}, }; for (auto& it : action4CycleFile) { diff --git a/test/client/client_common_unittest.cpp b/test/client/client_common_unittest.cpp index d7601e19f1..6052bf93f1 100644 --- a/test/client/client_common_unittest.cpp +++ b/test/client/client_common_unittest.cpp @@ -20,28 +20,29 @@ * Author: tongguangxun */ -#include - #include "src/client/client_common.h" +#include + namespace curve { namespace client { TEST(ClientCommon, PeerAddrTest) { - // 默认构造函数创建的成员变量内容为空 + // The member variable content created by the default constructor is empty PeerAddr chunkaddr; ASSERT_TRUE(chunkaddr.IsEmpty()); EndPoint ep; str2endpoint("127.0.0.1:8000", &ep); - // 从已有的endpoint创建PeerAddr,变量内容非空 + // Create PeerAddr from an existing endpoint, with non empty variable + // content PeerAddr caddr(ep); ASSERT_FALSE(caddr.IsEmpty()); ASSERT_EQ(caddr.addr_.port, 8000); ASSERT_STREQ("127.0.0.1:8000:0", caddr.ToString().c_str()); - // reset置位后成员变量内容为空 + // After resetting, the member variable content is empty caddr.Reset(); ASSERT_TRUE(caddr.IsEmpty()); @@ -49,7 +50,8 @@ TEST(ClientCommon, PeerAddrTest) { PeerAddr caddr2; ASSERT_TRUE(caddr2.IsEmpty()); - // 从字符串中解析出地址信息,字符串不符合解析格式返回-1,"ip:port:index" + // Resolve address information from the string, if the string does not + // conform to the parsing format, return -1, "ip:port:index" std::string ipaddr1("127.0.0.1"); ASSERT_EQ(-1, caddr2.Parse(ipaddr1)); std::string ipaddr2("127.0.0.q:9000:0"); @@ -61,11 +63,12 @@ TEST(ClientCommon, PeerAddrTest) { std::string ipaddr5("127.0.0.1001:9000:0"); ASSERT_EQ(-1, caddr2.Parse(ipaddr5)); - // 从字符串解析地址成功后,成员变量即为非空 + // After successfully resolving the address from the string, the member + // variable becomes non empty ASSERT_EQ(0, caddr2.Parse(ipaddr)); ASSERT_FALSE(caddr2.IsEmpty()); - // 验证非空成员变量是否为预期值 + // Verify if the non empty member variable is the expected value EndPoint ep1; str2endpoint("127.0.0.1:9000", &ep1); ASSERT_EQ(caddr2.addr_, ep1); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index cfae5506e1..6f7fd3fdf3 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -20,48 +20,47 @@ * Author: tongguangxun */ -#include -#include -#include +#include #include +#include #include #include -#include -#include +#include +#include +#include +#include +#include //NOLINT #include #include //NOLINT -#include //NOLINT #include -#include +#include "absl/memory/memory.h" +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" +#include "test/client/mock/mock_namespace_service.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" -#include "test/client/mock/mock_namespace_service.h" - -#include "absl/memory/memory.h" uint32_t chunk_size = 4 * 1024 * 1024; uint32_t segment_size = 1 * 1024 * 1024 * 1024; -std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT -std::string configpath = // NOLINT - "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT +std::string configpath = // NOLINT + "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT -extern curve::client::FileClient *globalclient; +extern curve::client::FileClient* globalclient; namespace curve { namespace client { @@ -96,10 +95,10 @@ class MDSClientTest : public ::testing::Test { ASSERT_TRUE(false) << "Fail to add service"; } - curve::mds::topology::GetChunkServerInfoResponse *response = + curve::mds::topology::GetChunkServerInfoResponse* response = new curve::mds::topology::GetChunkServerInfoResponse(); response->set_statuscode(0); - curve::mds::topology::ChunkServerInfo *serverinfo = + curve::mds::topology::ChunkServerInfo* serverinfo = new curve::mds::topology::ChunkServerInfo(); serverinfo->set_chunkserverid(888); serverinfo->set_disktype("nvme"); @@ -113,8 +112,8 @@ class MDSClientTest : public ::testing::Test { serverinfo->set_diskcapacity(11111); serverinfo->set_diskused(1111); response->set_allocated_chunkserverinfo(serverinfo); - FakeReturn *getcsret = - new FakeReturn(nullptr, static_cast(response)); // NOLINT + FakeReturn* getcsret = + new FakeReturn(nullptr, static_cast(response)); // NOLINT topologyservice.SetGetChunkserverFakeReturn(getcsret); brpc::ServerOptions options; @@ -150,8 +149,8 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -163,19 +162,18 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Create(filename.c_str(), userinfo, len)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -197,8 +195,8 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -215,20 +213,18 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Mkdir(dirpath.c_str(), userinfo)); - - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -250,8 +246,8 @@ TEST_F(MDSClientTest, Closefile) { ::curve::mds::CloseFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCloseFile(fakeret); LOG(INFO) << "now create file!"; @@ -259,25 +255,23 @@ TEST_F(MDSClientTest, Closefile) { mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::NOTEXIST); - // file close ok ::curve::mds::CloseFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloseFile(fakeret1); LOG(INFO) << "now create file!"; ret = mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::OK); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloseFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -296,8 +290,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::OpenFileResponse openresponse; openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(fakeret); FInfo finfo; @@ -308,7 +302,7 @@ TEST_F(MDSClientTest, Openfile) { // has protosession no fileinfo ::curve::mds::OpenFileResponse openresponse1; - ::curve::mds::ProtoSession *se = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); se->set_leasetime(10000000); @@ -317,8 +311,8 @@ TEST_F(MDSClientTest, Openfile) { openresponse1.set_statuscode(::curve::mds::StatusCode::kOK); openresponse1.set_allocated_protosession(se); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&openresponse1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&openresponse1)); curvefsservice.SetOpenFile(fakeret1); ASSERT_EQ(globalclient->Open(filename, userinfo), -LIBCURVE_ERROR::FAILED); @@ -326,13 +320,13 @@ TEST_F(MDSClientTest, Openfile) { // has protosession and finfo ::curve::mds::OpenFileResponse openresponse2; - ::curve::mds::ProtoSession *se2 = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se2 = new ::curve::mds::ProtoSession; se2->set_sessionid("1"); se2->set_createtime(12345); se2->set_leasetime(10000000); se2->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *fin = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* fin = new ::curve::mds::FileInfo; fin->set_filename("_filename_"); fin->set_id(1); fin->set_parentid(0); @@ -347,21 +341,21 @@ TEST_F(MDSClientTest, Openfile) { openresponse2.set_allocated_protosession(se2); openresponse2.set_allocated_fileinfo(fin); - FakeReturn *fakeret2 = - new FakeReturn(nullptr, static_cast(&openresponse2)); + FakeReturn* fakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret2); ASSERT_EQ(globalclient->Open(filename, userinfo), LIBCURVE_ERROR::OK); ASSERT_EQ(LIBCURVE_ERROR::OK, Write(0, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(0, nullptr, 0, 0)); - ::curve::mds::ProtoSession *socupied = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* socupied = new ::curve::mds::ProtoSession; socupied->set_sessionid("1"); socupied->set_createtime(12345); socupied->set_leasetime(10000000); socupied->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *focupied = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* focupied = new ::curve::mds::FileInfo; focupied->set_filename("_filename_"); focupied->set_id(1); focupied->set_parentid(0); @@ -381,14 +375,14 @@ TEST_F(MDSClientTest, Openfile) { refreshresponse.set_statuscode(::curve::mds::StatusCode::kOK); refreshresponse.set_sessionid("2"); - FakeReturn *r = - new FakeReturn(nullptr, static_cast(&responseOccupied)); + FakeReturn* r = + new FakeReturn(nullptr, static_cast(&responseOccupied)); curvefsservice.SetOpenFile(r); - FakeReturn *refreshret = - new FakeReturn(nullptr, static_cast(&refreshresponse)); + FakeReturn* refreshret = + new FakeReturn(nullptr, static_cast(&refreshresponse)); curvefsservice.SetRefreshSession(refreshret, []() {}); - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename("_filename_"); info->set_id(1); @@ -402,8 +396,8 @@ TEST_F(MDSClientTest, Openfile) { getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); curvefsservice.SetGetFileInfoFakeReturn(fakegetinfo); int fd = globalclient->Open(filename, userinfo); @@ -411,12 +405,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, Write(fd, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(fd, nullptr, 0, 0)); - // 测试关闭文件 + // Test closing file ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecloseret = - new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* fakecloseret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(fakecloseret); globalclient->Close(fd); @@ -426,12 +420,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, AioWrite(fd, &aioctx)); ASSERT_EQ(LIBCURVE_ERROR::OK, AioRead(fd, &aioctx)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret3 = - new FakeReturn(&cntl, static_cast(&openresponse2)); + FakeReturn* fakeret3 = + new FakeReturn(&cntl, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret3); curvefsservice.CleanRetryTimes(); @@ -441,8 +435,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::CloseFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kSessionNotExist); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloseFile(fakeret4); globalclient->Close(0); @@ -458,8 +452,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetRenameFile(fakeret); @@ -475,8 +469,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetRenameFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -486,8 +480,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetRenameFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -497,8 +491,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetRenameFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -508,19 +502,18 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetRenameFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rename(userinfo, filename1, filename2)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetRenameFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -543,8 +536,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetExtendFile(fakeret); @@ -560,8 +553,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetExtendFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -571,8 +564,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetExtendFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -582,8 +575,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetExtendFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -593,8 +586,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetExtendFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, @@ -604,19 +597,18 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response5; response5.set_statuscode(::curve::mds::StatusCode::kShrinkBiggerFile); - FakeReturn *fakeret6 = - new FakeReturn(nullptr, static_cast(&response5)); + FakeReturn* fakeret6 = + new FakeReturn(nullptr, static_cast(&response5)); curvefsservice.SetExtendFile(fakeret6); ASSERT_EQ(-1 * LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE, globalclient->Extend(filename1, userinfo, newsize)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetExtendFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -640,8 +632,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -652,8 +644,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Unlink(filename1, userinfo)); @@ -662,8 +654,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -673,8 +665,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -684,26 +676,25 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Unlink(filename1, userinfo)); - // 设置delete force + // Set delete force fiu_init(0); fiu_enable("test/client/fake/fakeMDS/forceDeleteFile", 1, nullptr, 0); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOT_SUPPORT, globalclient->Unlink(filename1, userinfo, true)); fiu_disable("test/client/fake/fakeMDS/forceDeleteFile"); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -727,8 +718,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -744,8 +735,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Rmdir(filename1, userinfo)); @@ -754,8 +745,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -765,8 +756,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -776,19 +767,18 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rmdir(filename1, userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -802,7 +792,7 @@ TEST_F(MDSClientTest, Rmdir) { TEST_F(MDSClientTest, StatFile) { std::string filename = "/1_userinfo_"; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; info->set_filename("_filename_"); info->set_id(1); @@ -816,11 +806,11 @@ TEST_F(MDSClientTest, StatFile) { response.set_allocated_fileinfo(info); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); - curve::client::FInfo_t *finfo = new curve::client::FInfo_t; + curve::client::FInfo_t* finfo = new curve::client::FInfo_t; FileStatInfo fstat; globalclient->StatFile(filename, userinfo, &fstat); @@ -831,12 +821,11 @@ TEST_F(MDSClientTest, StatFile) { ASSERT_EQ(fstat.ctime, 12345678); ASSERT_EQ(fstat.length, 4 * 1024 * 1024 * 1024ul); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -872,7 +861,7 @@ TEST_F(MDSClientTest, GetFileInfo) { response.set_statuscode(::curve::mds::StatusCode::kOK); auto fakeret = absl::make_unique( - nullptr, static_cast(&response)); + nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret.get()); curve::client::FileEpoch_t fEpoch; @@ -890,19 +879,19 @@ TEST_F(MDSClientTest, GetFileInfo) { ASSERT_EQ(finfo->segmentsize, 1 * 1024 * 1024 * 1024ul); ASSERT_EQ(finfo->blocksize, hasBlockSize ? blocksize : 4096); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - auto fakeret2 = absl::make_unique( - &cntl, static_cast(&response)); + auto fakeret2 = + absl::make_unique(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2.get()); curvefsservice.CleanRetryTimes(); - ASSERT_EQ(LIBCURVE_ERROR::FAILED, - mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), - &fEpoch)); + ASSERT_EQ( + LIBCURVE_ERROR::FAILED, + mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), &fEpoch)); } } @@ -940,7 +929,7 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { // checkTimer(10000, 11000); curve::mds::GetOrAllocateSegmentResponse response; - curve::mds::PageFileSegment *pfs = new curve::mds::PageFileSegment; + curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(pfs); response.mutable_pagefilesegment()->set_logicalpoolid(1234); @@ -953,8 +942,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { chunk->set_copysetid(i); chunk->set_chunkid(i); } - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(fakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse response_1; @@ -971,8 +960,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { cslocs->set_port(5000 + j); } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); curve::client::MetaCache mc; @@ -1035,8 +1024,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { GetChunkServerListInCopySetsResponse response_2; response_2.set_statuscode(-1); - FakeReturn *faktopologyeret_2 = - new FakeReturn(nullptr, static_cast(&response_2)); + FakeReturn* faktopologyeret_2 = + new FakeReturn(nullptr, static_cast(&response_2)); topologyservice.SetFakeReturn(faktopologyeret_2); uint32_t csid; @@ -1097,8 +1086,8 @@ TEST_F(MDSClientTest, GetServerList) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; for (int j = 0; j < 256; j++) { csinfo = response_1.add_csinfo(); csinfo->set_copysetid(j); @@ -1111,8 +1100,8 @@ TEST_F(MDSClientTest, GetServerList) { } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); std::vector cpidvec; @@ -1222,12 +1211,12 @@ TEST_F(MDSClientTest, GetLeaderTest) { mc.UpdateCopysetInfo(1234, 1234, cslist); - // 测试复制组里第三个addr为leader + // The third addr in the test replication group is the leader curve::chunkserver::GetLeaderResponse2 response1; - curve::common::Peer *peer1 = new curve::common::Peer(); + curve::common::Peer* peer1 = new curve::common::Peer(); peer1->set_address(peerinfo_3.internalAddr.ToString()); response1.set_allocated_leader(peer1); - FakeReturn fakeret1(nullptr, static_cast(&response1)); + FakeReturn fakeret1(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); cliservice3.SetFakeReturn(&fakeret1); @@ -1245,12 +1234,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { butil::str2endpoint("127.0.0.1", 29122, &expected); EXPECT_EQ(expected, leaderep); - // 测试拉取新leader失败,需要到mds重新fetch新的serverlist - // 当前新leader是3,尝试再刷新leader,这个时候会从1, 2获取leader - // 但是这时候leader找不到了,于是就会触发向mds重新拉取最新的server list + // The test failed to retrieve the new leader, and a new serverlist needs to + // be retrieved from the mds The current new leader is 3. Try refreshing the + // leader again, and at this time, the leader will be obtained from 1 and 2 + // But at this point, the leader cannot be found, so it will trigger a new + // pull of the latest server list from the mds brpc::Controller controller11; controller11.SetFailed(-1, "error"); - FakeReturn fakeret111(&controller11, static_cast(&response1)); + FakeReturn fakeret111(&controller11, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret111); cliservice2.SetFakeReturn(&fakeret111); cliservice3.SetFakeReturn(&fakeret111); @@ -1259,8 +1250,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; csinfo = response_1.add_csinfo(); csinfo->set_copysetid(1234); for (int i = 0; i < 4; i++) { @@ -1271,28 +1262,31 @@ TEST_F(MDSClientTest, GetLeaderTest) { cslocs->set_port(29120 + i); } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); cliservice1.CleanInvokeTimes(); cliservice2.CleanInvokeTimes(); cliservice3.CleanInvokeTimes(); - // 向当前集群中拉取leader,然后会从mds一侧获取新server list + // Pull the leader from the current cluster, and then obtain a new server + // list from the mds side EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, true)); - // getleader请求会跳过当前leader + // The getleader request will skip the current leader EXPECT_EQ(0, cliservice3.GetInvokeTimes()); - // 因为从mds获取新的copyset信息了,所以其leader信息被重置了,需要重新获取新leader - // 获取新新的leader,这时候会从1,2,3,4这四个server拉取新leader,并成功获取新leader + // Because the new copyset information was obtained from the mds, its leader + // information has been reset and a new leader needs to be obtained Obtain a + // new leader, which will be pulled from servers 1, 2, 3, and 4 and + // successfully obtain the new leader std::string leader = "10.182.26.2:29123:0"; peer1 = new curve::common::Peer(); peer1->set_address(leader); peer1->set_id(4321); response1.set_allocated_leader(peer1); - fakeret1 = FakeReturn(nullptr, static_cast(&response1)); + fakeret1 = FakeReturn(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); @@ -1309,7 +1303,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { cliservice3.CleanInvokeTimes(); cliservice4.CleanInvokeTimes(); - // refresh为false,所以只会从metacache中获取,不会发起rpc请求 + // Refresh is false, so it will only be obtained from the metacache and will + // not initiate rpc requests EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, false)); EXPECT_EQ(expected, leaderep); EXPECT_EQ(0, cliservice1.GetInvokeTimes()); @@ -1317,13 +1312,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { EXPECT_EQ(0, cliservice3.GetInvokeTimes()); EXPECT_EQ(0, cliservice4.GetInvokeTimes()); - // 测试新增一个leader,该节点不在配置组内, 然后通过向mds - // 查询其chunkserverInfo之后, 将其成功插入metacache - curve::common::Peer *peer7 = new curve::common::Peer(); + // Add a new leader to the test, which is not in the configuration group, + // and then add it to the mds After querying its chunkserverInfo, + // successfully insert it into the metacache + curve::common::Peer* peer7 = new curve::common::Peer(); leader = "10.182.26.2:29124:0"; peer7->set_address(leader); response1.set_allocated_leader(peer7); - FakeReturn fakeret44(nullptr, static_cast(&response1)); + FakeReturn fakeret44(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret44); cliservice2.SetFakeReturn(&fakeret44); cliservice3.SetFakeReturn(&fakeret44); @@ -1355,19 +1351,18 @@ TEST_F(MDSClientTest, GetLeaderTest) { LOG(INFO) << "GetLeaderTest stopped"; } - TEST_F(MDSClientTest, GetFileInfoException) { std::string filename = "/1_userinfo_"; - FakeReturn *fakeret = nullptr; - curve::client::FInfo_t *finfo = nullptr; + FakeReturn* fakeret = nullptr; + curve::client::FInfo_t* finfo = nullptr; FileEpoch_t fEpoch; { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1376,7 +1371,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { } { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); info->clear_parentid(); @@ -1389,7 +1384,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { info->clear_segmentsize(); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1405,17 +1400,17 @@ TEST_F(MDSClientTest, CreateCloneFile) { std::string filename = "/1_userinfo_"; FInfo finfo; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::CreateCloneFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateCloneFile(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1424,12 +1419,12 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 认证失败 + // Authentication failed curve::mds::CreateCloneFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateCloneFile(fakecreateclone1); @@ -1437,14 +1432,14 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 请求成功 + // Request successful info->set_id(5); curve::mds::CreateCloneFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); response2.set_allocated_fileinfo(info); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCreateCloneFile(fakecreateclone2); @@ -1463,15 +1458,15 @@ TEST_F(MDSClientTest, CreateCloneFile) { TEST_F(MDSClientTest, CompleteCloneMeta) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1479,23 +1474,23 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1506,15 +1501,15 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { TEST_F(MDSClientTest, CompleteCloneFile) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1522,23 +1517,23 @@ TEST_F(MDSClientTest, CompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1556,8 +1551,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret); @@ -1568,8 +1563,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetChangeOwner(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -1579,8 +1574,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetChangeOwner(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1590,8 +1585,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetChangeOwner(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1601,19 +1596,18 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetChangeOwner(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->ChangeOwner(filename1, "newowner", userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1634,7 +1628,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_CntlFailed) { cntl.SetFailed(-1, "Failed"); std::unique_ptr fakeret( - new FakeReturn(&cntl, static_cast(&response))); + new FakeReturn(&cntl, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); auto startTime = curve::common::TimeUtility::GetTimeofDayMs(); @@ -1652,7 +1646,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseError) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); ASSERT_EQ(LIBCURVE_ERROR::FAILED, @@ -1680,7 +1674,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseOK) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); std::vector returnIds; @@ -1697,8 +1691,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetListDir(fakeret); @@ -1724,16 +1718,16 @@ TEST_F(MDSClientTest, ListDir) { fin->set_owner("test"); } - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetListDir(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Listdir(filename1, userinfo, &filestatVec)); C_UserInfo_t cuserinfo; memcpy(cuserinfo.owner, "test", 5); - FileStatInfo *filestat = new FileStatInfo[5]; - DirInfo_t *dir = OpenDir(filename1.c_str(), &cuserinfo); + FileStatInfo* filestat = new FileStatInfo[5]; + DirInfo_t* dir = OpenDir(filename1.c_str(), &cuserinfo); ASSERT_NE(dir, nullptr); ASSERT_EQ(-LIBCURVE_ERROR::FAILED, Listdir(nullptr)); ASSERT_EQ(LIBCURVE_ERROR::OK, Listdir(dir)); @@ -1767,8 +1761,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetListDir(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1778,8 +1772,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetListDir(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1789,19 +1783,18 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetListDir(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Listdir(filename1, userinfo, &filestatVec)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetListDir(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1816,7 +1809,7 @@ TEST_F(MDSClientTest, ListDir) { TEST(LibcurveInterface, InvokeWithOutInit) { CurveAioContext aioctx; UserInfo_t userinfo; - C_UserInfo_t *ui = nullptr; + C_UserInfo_t* ui = nullptr; FileClient fc; ASSERT_EQ(-LIBCURVE_ERROR::FAILED, fc.Create("", userinfo, 0)); @@ -1859,10 +1852,10 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { using GetLeaderResponse2 = curve::chunkserver::GetLeaderResponse2; void SetUp() override { - // 添加service,并启动server + // Add a service and start the server for (int i = 0; i < kChunkServerNum; ++i) { - auto &chunkserver = chunkServers[i]; - auto &fakeCliService = fakeCliServices[i]; + auto& chunkserver = chunkServers[i]; + auto& fakeCliService = fakeCliServices[i]; ASSERT_EQ(0, chunkserver.AddService( &fakeCliService, brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add service"; @@ -1870,7 +1863,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { brpc::ServerOptions options; options.idle_timeout_sec = -1; - const auto &ipPort = + const auto& ipPort = "127.0.0.1:" + std::to_string(chunkserverPorts[i]); ASSERT_EQ(0, chunkserver.Start(ipPort.c_str(), &options)) << "Fail to start server"; @@ -1886,7 +1879,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { externalAddrs[i] = PeerAddr(endpoint); } - // 设置copyset peer信息 + // Set copyset peer information for (int i = 0; i < kChunkServerNum; ++i) { curve::client::CopysetPeerInfo peerinfo; peerinfo.peerID = i + 1; @@ -1900,7 +1893,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void ResetAllFakeCliService() { - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.CleanInvokeTimes(); cliService.ClearDelay(); cliService.ClearErrorCode(); @@ -1909,7 +1902,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { int GetAllInvokeTimes() { int total = 0; - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { total += cliService.GetInvokeTimes(); } @@ -1917,29 +1910,29 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void TearDown() override { - for (auto &server : chunkServers) { + for (auto& server : chunkServers) { server.Stop(0); server.Join(); } } - GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr &addr) { + GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr& addr) { GetLeaderResponse2 response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(addr.ToString()); response.set_allocated_leader(peer); return response; } - void SetGetLeaderResponse(const curve::client::PeerAddr &addr) { + void SetGetLeaderResponse(const curve::client::PeerAddr& addr) { static GetLeaderResponse2 response; response = MakeResponse(addr); static FakeReturn fakeret(nullptr, nullptr); - fakeret = FakeReturn(nullptr, static_cast(&response)); + fakeret = FakeReturn(nullptr, static_cast(&response)); - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.SetFakeReturn(&fakeret); } @@ -1971,16 +1964,16 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { }; TEST_F(ServiceHelperGetLeaderTest, NormalTest) { - // 测试复制组里第一个chunkserver为leader + // Test the first chunkserver in the replication group as the leader GetLeaderResponse2 response = MakeResponse(internalAddrs[0]); - FakeReturn fakeret0(nullptr, static_cast(&response)); + FakeReturn fakeret0(nullptr, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret0); - FakeReturn fakeret1(nullptr, static_cast(&response)); + FakeReturn fakeret1(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - FakeReturn fakeret2(nullptr, static_cast(&response)); + FakeReturn fakeret2(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); GetLeaderRpcOption rpcOption; @@ -1993,14 +1986,15 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 + // Test pulling a new leader for the second time, skip the first chunkserver + // directly, and search for the second and third two int32_t currentLeaderIndex = 0; curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2012,15 +2006,16 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第三次获取leader,会跳过第二个chunkserver,重试1/3 + // Testing for the third time obtaining the leader will skip the second + // chunkserver and retry 1/3 currentLeaderIndex = 1; currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2034,13 +2029,14 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { } TEST_F(ServiceHelperGetLeaderTest, RpcDelayTest) { - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[2]; + const auto& currentLeader = internalAddrs[2]; SetGetLeaderResponse(currentLeader); - // 再次GetLeader会向chunkserver 1/2 发送请求 - // 在chunksever GetLeader service 中加入sleep,触发backup request + // GetLeader will send a request to chunkserver 1/2 again + // Add a sleep in the chunksever GetLeader service to trigger a backup + // request fakeCliServices[0].SetDelayMs(200); fakeCliServices[1].SetDelayMs(200); @@ -2063,25 +2059,26 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader,GetLeader会向chunkserver 1/2发送请求 + // Set the third chunkserver as the leader, and GetLeader will send a + // request to chunkserver 1/2 const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 设置第一个chunkserver GetLeader service 延迟 + // Set the delay for the first chunkserver GetLeader service fakeCliServices[0].SetDelayMs(200); - // 设置第二个chunkserver 返回对应的错误码 + // Set the second chunkserver to return the corresponding error code for (auto errCode : exceptionErrCodes) { fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret); GetLeaderRpcOption rpcOption; @@ -2095,7 +2092,7 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(1)); ASSERT_EQ(currentLeader, leaderAddr); - for (auto &cliservice : fakeCliServices) { + for (auto& cliservice : fakeCliServices) { cliservice.CleanInvokeTimes(); } } @@ -2105,25 +2102,25 @@ TEST_F(ServiceHelperGetLeaderTest, AllChunkServerExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 另外两个chunkserver都返回对应的错误码 + // The other two chunkservers both return corresponding error codes for (auto errCode : exceptionErrCodes) { fakeCliServices[0].SetErrorCode(errCode); fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret); fakeCliServices[1].SetFakeReturn(&fakeret); @@ -2178,8 +2175,8 @@ TEST_F(MDSClientTest, StatFileStatusTest) { response.set_allocated_fileinfo(info.release()); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); std::unique_ptr finfo( @@ -2208,7 +2205,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { brpc::Controller cntl; cntl.SetFailed(-1, "rpc failed"); - FakeReturn *fakeRet = new FakeReturn(&cntl, nullptr); + FakeReturn* fakeRet = new FakeReturn(&cntl, nullptr); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); uint64_t startMs = curve::common::TimeUtility::GetTimeofDayMs(); @@ -2222,7 +2219,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kOK); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2233,7 +2230,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2251,7 +2248,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { for (auto err : errorCodes) { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(err); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_NE(LIBCURVE_ERROR::OK, @@ -2272,10 +2269,10 @@ using ::testing::SaveArgPointee; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; -static void MockRefreshSession(::google::protobuf::RpcController *controller, - const curve::mds::ReFreshSessionRequest *request, - curve::mds::ReFreshSessionResponse *response, - ::google::protobuf::Closure *done) { +static void MockRefreshSession(::google::protobuf::RpcController* controller, + const curve::mds::ReFreshSessionRequest* request, + curve::mds::ReFreshSessionResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard guard(done); response->set_statuscode(curve::mds::StatusCode::kOK); @@ -2317,7 +2314,7 @@ TEST_F(MDSClientRefreshSessionTest, StartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2344,7 +2341,7 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2376,7 +2373,7 @@ const std::vector clientConf{ std::string("throttle.enable=true"), }; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index 2f092fc79f..4072bd60f4 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -20,37 +20,38 @@ * Author: tongguangxun */ -#include +#include "src/client/client_metric.h" + #include #include +#include -#include // NOLINT -#include // NOLINT -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -#include "proto/nameserver2.pb.h" #include "include/client/libcurve.h" -#include "src/client/client_metric.h" -#include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" #include "src/client/client_config.h" +#include "src/client/file_instance.h" +#include "src/client/libcurve_file.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" DECLARE_string(chunkserver_list); -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT namespace curve { namespace client { -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9150"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -64,7 +65,7 @@ const std::vector clientConf { }; TEST(MetricTest, ChunkServer_MetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -72,25 +73,26 @@ TEST(MetricTest, ChunkServer_MetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT std::string configpath("./test/client/configs/client_metric.conf"); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +149,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -165,8 +167,8 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(-2, ret); - - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + // 4 correct reads and writes, 4 timeout reads and writes, timeout will + // cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -204,7 +206,7 @@ void cb(CurveAioContext* ctx) { } // namespace TEST(MetricTest, SlowRequestMetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -212,16 +214,17 @@ TEST(MetricTest, SlowRequestMetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -267,13 +270,13 @@ TEST(MetricTest, SlowRequestMetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -383,5 +386,5 @@ TEST(MetricTest, MetricHelperTest) { ASSERT_NO_THROW(MetricHelper::IncremSlowRequestNum(nullptr)); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..680d80ce93 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -20,30 +20,29 @@ * Author: tongguangxun */ -#include -#include +#include +#include #include #include -#include +#include +#include +#include #include #include -#include -#include - -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "src/client/client_config.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/file_instance.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/fakeMDS.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -72,7 +71,7 @@ void sessioncallback(CurveAioContext* aioctx) { TEST(ClientSession, LeaseTaskTest) { FLAGS_chunkserver_list = - "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; + "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; std::string filename = "/1"; @@ -80,7 +79,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); @@ -104,7 +103,7 @@ TEST(ClientSession, LeaseTaskTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -117,8 +116,8 @@ TEST(ClientSession, LeaseTaskTest) { openresponse.set_allocated_protosession(se); openresponse.set_allocated_fileinfo(finfo); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice->SetOpenFile(openfakeret); // 2. set refresh response @@ -129,7 +128,7 @@ TEST(ClientSession, LeaseTaskTest) { std::unique_lock lk(mtx); refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename(filename); info->set_seqnum(2); info->set_id(1); @@ -143,8 +142,8 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); refreshresp.set_sessionid("1234"); refreshresp.set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice->SetRefreshSession(refreshfakeret, refresht); // 3. open the file @@ -253,10 +252,9 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_allocated_fileinfo(newFileInfo); refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* refreshFakeRetWithNewInodeId = new FakeReturn( - nullptr, static_cast(&refreshresp)); - curvefsservice->SetRefreshSession( - refreshFakeRetWithNewInodeId, refresht); + FakeReturn* refreshFakeRetWithNewInodeId = + new FakeReturn(nullptr, static_cast(&refreshresp)); + curvefsservice->SetRefreshSession(refreshFakeRetWithNewInodeId, refresht); { std::unique_lock lk(mtx); @@ -302,8 +300,8 @@ TEST(ClientSession, LeaseTaskTest) { // 11. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice->SetCloseFile(closefileret); LOG(INFO) << "uninit fileinstance"; @@ -321,12 +319,12 @@ TEST(ClientSession, LeaseTaskTest) { } // namespace client } // namespace curve -std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9101,127.0.0.1:9102"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -337,18 +335,17 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.maxRetryMS=5000") -}; + std::string("mds.maxRetryMS=5000")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..4ef1c6487c 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -20,10 +20,11 @@ * Author: wuhanqing */ -#include -#include -#include #include +#include +#include +#include + #include #include "src/client/unstable_helper.h" @@ -48,50 +49,51 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + // First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - cs.first, cs.second)); + helper.GetCurrentUnstableState(cs.first, cs.second)); } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + // Add another timeout to each chunkserver + // The first two are in the chunkserver unstable state, and the third is in + // the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + // Continue to increase the number of timeouts + // In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + // The first timeout of a new chunkserver can be directly set to chunkserver + // unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -99,22 +101,22 @@ TEST(UnstableHelperTest, normal_test) { helper.IncreTimeout(chunkserver4.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver4.first, chunkserver4.second)); + helper.GetCurrentUnstableState(chunkserver4.first, + chunkserver4.second)); - // 其他ip的chunkserver + // Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } } // namespace client diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..442af59c6f 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -20,23 +20,23 @@ * Author: tongguangxun */ -#include +#include #include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "include/client/libcurve.h" #include "src/client/client_common.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" #include "src/client/iomanager4chunk.h" +#include "src/client/libcurve_file.h" #include "src/client/libcurve_snapshot.h" +#include "test/client/fake/fakeMDS.h" extern std::string mdsMetaServerAddr; extern std::string configpath; @@ -70,8 +70,8 @@ class CurveClientUserAuthFail : public ::testing::Test { ASSERT_EQ(0, server.Join()); } - brpc::Server server; - MetaServerOption metaopt; + brpc::Server server; + MetaServerOption metaopt; FakeMDSCurveFSService curvefsservice; FakeMDSTopologyService topologyservice; }; @@ -102,7 +102,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -115,16 +115,16 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { openresponse.mutable_fileinfo()->set_seqnum(2); openresponse.mutable_fileinfo()->set_filename(filename); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret); // 1. create a File authfailed ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); size_t len = 4 * 1024 * 1024ul; @@ -138,7 +138,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { LOG(INFO) << "get refresh session request!"; refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::ReFreshSessionResponse refreshresp; refreshresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); refreshresp.set_sessionid("1234"); @@ -147,12 +147,13 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { refreshresp.mutable_fileinfo()->set_filename(filename); refreshresp.mutable_fileinfo()->set_id(1); refreshresp.mutable_fileinfo()->set_parentid(0); - refreshresp.mutable_fileinfo()->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + refreshresp.mutable_fileinfo()->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT refreshresp.mutable_fileinfo()->set_chunksize(4 * 1024 * 1024); refreshresp.mutable_fileinfo()->set_length(4 * 1024 * 1024 * 1024ul); refreshresp.mutable_fileinfo()->set_ctime(12345678); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, refresht); // 3. open the file auth failed @@ -161,47 +162,47 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // 4. open file success openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* openfakeret2 - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret2); openret = fileinstance.Open(); ASSERT_EQ(openret, LIBCURVE_ERROR::OK); -/* - // 5. wait for refresh - for (int i = 0; i < 4; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + /* + // 5. wait for refresh + for (int i = 0; i < 4; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; - aioctx.ret = LIBCURVE_ERROR::OK; - aioctx.cb = sessioncallback; - aioctx.buf = nullptr; - - fileinstance.AioRead(&aioctx); - fileinstance.AioWrite(&aioctx); - - for (int i = 0; i < 1; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + CurveAioContext aioctx; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.ret = LIBCURVE_ERROR::OK; + aioctx.cb = sessioncallback; + aioctx.buf = nullptr; + + fileinstance.AioRead(&aioctx); + fileinstance.AioWrite(&aioctx); + + for (int i = 0; i < 1; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - char buffer[10]; - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); -*/ + char buffer[10]; + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); + */ // 6. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(closefileret); ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, fileinstance.Close()); @@ -235,12 +236,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -255,54 +255,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - emptyuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, emptyuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16 * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - emptyuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, emptyuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -311,7 +308,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -319,20 +317,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - emptyuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, emptyuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); + cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; @@ -341,7 +338,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; @@ -359,7 +356,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ASSERT_TRUE(!cl.Init(opt)); UserInfo_t rootuserinfo; - rootuserinfo.owner ="root"; + rootuserinfo.owner = "root"; rootuserinfo.password = "123"; std::string filename = "./1_usertest_.img"; @@ -370,12 +367,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -390,54 +386,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - rootuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, rootuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16ull*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16ull * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - rootuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, rootuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -446,7 +439,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -454,21 +448,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - rootuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, rootuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, rootuserinfo, - &seqvec, &fivec)); + cl.ListSnapShot(filename, rootuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index b71383ec9d..548db4f6d0 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ -#include +#include "src/client/copyset_client.h" + #include +#include #include -#include #include +#include -#include //NOLINT -#include // NOLINT +#include // NOLINT +#include //NOLINT -#include "src/client/copyset_client.h" -#include "test/client/mock/mock_meta_cache.h" -#include "src/common/concurrent/count_down_event.h" -#include "test/client/mock/mock_chunkservice.h" -#include "test/client/mock/mock_request_context.h" #include "src/client/chunk_closure.h" +#include "src/client/metacache.h" +#include "src/client/request_closure.h" +#include "src/common/concurrent/count_down_event.h" #include "src/common/timeutility.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" +#include "test/client/mock/mock_request_context.h" #include "test/client/mock/mock_request_scheduler.h" -#include "src/client/request_closure.h" -#include "src/client/metacache.h" namespace curve { namespace client { @@ -47,18 +48,18 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkRequest; +using curve::client::MetaCache; +using curve::common::TimeUtility; using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SaveArgPointee; -using curve::client::MetaCache; -using curve::common::TimeUtility; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; class CopysetClientTest : public testing::Test { protected: @@ -76,60 +77,62 @@ class CopysetClientTest : public testing::Test { public: std::string listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; -/* TODO(wudemiao) 当前 controller 错误不能通过 mock 返回 */ +/* TODO(wudemiao) current controller error cannot be returned through mock */ int gWriteCntlFailedCode = 0; int gReadCntlFailedCode = 0; -static void WriteChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void WriteChunkFunc(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { /* return response */ brpc::ClosureGuard doneGuard(done); if (0 != gWriteCntlFailedCode) { if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) { std::this_thread::sleep_for(std::chrono::milliseconds(3500)); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); } } -static void ReadChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void ReadChunkFunc(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) { std::this_thread::sleep_for(std::chrono::milliseconds(4000)); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(gReadCntlFailedCode, "read controller error"); } } -static void ReadChunkSnapshotFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, //NOLINT - google::protobuf::Closure *done) { +static void ReadChunkSnapshotFunc( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, // NOLINT + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "read snapshot controller error"); } } -static void DeleteChunkSnapshotFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void DeleteChunkSnapshotFunc( + ::google::protobuf::RpcController* controller, // NOLINT + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "delete snapshot controller error"); } } @@ -146,32 +149,35 @@ static void CreateCloneChunkFunc( } } -static void RecoverChunkFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { +static void RecoverChunkFunc( + ::google::protobuf::RpcController* controller, // NOLINT + const ::curve::chunkserver::ChunkRequest* request, // NOLINT + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "recover chunk controller error"); } } -static void GetChunkInfoFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, //NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, //NOLINT - google::protobuf::Closure *done) { +static void GetChunkInfoFunc( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse* response, // NOLINT + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed(-1, "get chunk info controller error"); } } TEST_F(CopysetClientTest, normal_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -215,7 +221,7 @@ TEST_F(CopysetClientTest, normal_test) { // write success for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -225,29 +231,29 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -256,30 +262,30 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -288,65 +294,62 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } // read success for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = i * 8; reqCtx->rawlength_ = len; reqCtx->subIoIndex_ = 0; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -355,30 +358,28 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -387,25 +388,23 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -417,8 +416,9 @@ TEST_F(CopysetClientTest, normal_test) { */ TEST_F(CopysetClientTest, write_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -465,9 +465,9 @@ TEST_F(CopysetClientTest, write_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -476,7 +476,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -485,21 +485,22 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -507,24 +508,27 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gWriteCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -534,7 +538,7 @@ TEST_F(CopysetClientTest, write_error_test) { } /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -543,14 +547,17 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为5000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The retry timeout set by the configuration file is 5000 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying three times is normal, only 3 * + // 1000 sleep is required But after increasing the index backoff, the + // timeout will increase to 1000+2000+2000=5000 Adding random factors, + // the three retry times should be greater than 7000 and less than 8000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -558,12 +565,12 @@ TEST_F(CopysetClientTest, write_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -577,7 +584,7 @@ TEST_F(CopysetClientTest, write_error_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -586,31 +593,35 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*5000 - // 但是增加指数退避之后,睡眠间隔将增加到10000 + 20000 = 30000 - // 加上随机因子,三次重试时间应该大于29000, 且小于50000 + // The retry sleep time set in the configuration file is 5000 because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying three times is normal, only 3 * 5000 sleep is + // required But after increasing the index retreat, the sleep interval + // will increase to 10000+20000=30000 Adding random factors, the three + // retry times should be greater than 29000 and less than 50000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); ASSERT_GT(end - start, 28000); @@ -618,9 +629,9 @@ TEST_F(CopysetClientTest, write_error_test) { gWriteCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -629,7 +640,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -637,21 +648,22 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -660,7 +672,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -670,28 +682,31 @@ TEST_F(CopysetClientTest, write_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -700,35 +715,37 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, did not return a leader, refreshing the meta cache failed + */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -737,38 +754,38 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -777,7 +794,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); FileMetric fm("test"); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -786,33 +803,36 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); - auto elpased = curve::common::TimeUtility::GetTimeofDayUs() - - startTimeUs; + auto elpased = + curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; // chunkserverOPRetryIntervalUS = 5000 - // 每次redirect睡眠500us,共重试2次(chunkserverOPMaxRetry=3,判断时大于等于就返回,所以共只重试了两次) - // 所以总共耗费时间大于1000us + // redirect sleep for 500us each time and retry a total of 2 times + // (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, + // so only two retries were made) So the total time spent is greater + // than 1000us ASSERT_GE(elpased, 1000); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -821,7 +841,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -829,22 +849,23 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -853,7 +874,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -864,24 +885,25 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } // epoch too old { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -890,7 +912,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -898,17 +920,18 @@ TEST_F(CopysetClientTest, write_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); } scheduler.Fini(); @@ -919,8 +942,9 @@ TEST_F(CopysetClientTest, write_error_test) { */ TEST_F(CopysetClientTest, write_failed_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -968,7 +992,7 @@ TEST_F(CopysetClientTest, write_failed_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -977,13 +1001,16 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要超时49*500 - // 但是增加指数退避之后,超时时间将增加到49*1000 = 49000 + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // a timeout of 49 * 500 But after increasing the index backoff, the + // timeout will increase to 49 * 1000=49000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -991,12 +1018,12 @@ TEST_F(CopysetClientTest, write_failed_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(50)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1009,7 +1036,7 @@ TEST_F(CopysetClientTest, write_failed_test) { } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1018,31 +1045,34 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000us - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000... ~= 4650000 + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times normally only requires 49 * 5000us of sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000... ~= 4650000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); + reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); ASSERT_GT(end - start, 250000); @@ -1052,14 +1082,14 @@ TEST_F(CopysetClientTest, write_failed_test) { scheduler.Fini(); } - /** * read failed testing */ TEST_F(CopysetClientTest, read_failed_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1105,7 +1135,7 @@ TEST_F(CopysetClientTest, read_failed_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1113,13 +1143,16 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要50*500 - // 但是增加指数退避之后,超时时间将增加到500 + 1000 + 2000... ~= 60000 + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // 50 * 500 But after increasing the index retreat, the timeout will + // increase to 500+1000+2000...~=60000 uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1128,12 +1161,11 @@ TEST_F(CopysetClientTest, read_failed_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(50)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1146,9 +1178,9 @@ TEST_F(CopysetClientTest, read_failed_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1157,29 +1189,32 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000 - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000 ... = 4650000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000 ... = 4650000 Adding random + // factors, the three retry times should be greater than 2900 and less + // than 5000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); @@ -1196,8 +1231,9 @@ TEST_F(CopysetClientTest, read_failed_test) { */ TEST_F(CopysetClientTest, read_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1242,9 +1278,9 @@ TEST_F(CopysetClientTest, read_error_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); iot.PrepareReadIOBuffers(1); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1253,7 +1289,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1261,21 +1297,20 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* chunk not exist */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1284,7 +1319,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1292,20 +1327,19 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1313,11 +1347,13 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1326,12 +1362,11 @@ TEST_F(CopysetClientTest, read_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1342,7 +1377,7 @@ TEST_F(CopysetClientTest, read_error_test) { /* controller set timeout */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1350,14 +1385,17 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的超时时间为1000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The timeout configured in the settings file is 1000, but due to chunk + // server timeout, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 1000. However, with the added + // exponential backoff, the timeout intervals will increase to 1000 + + // 2000 + 2000 = 5000. Considering the random factor, the total time for + // three retries should be greater than 7000 and less than 8000. uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1366,12 +1404,11 @@ TEST_F(CopysetClientTest, read_error_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(AtLeast(3)) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -1384,9 +1421,9 @@ TEST_F(CopysetClientTest, read_error_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1395,28 +1432,31 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为500,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*500 - // 但是增加指数退避之后,睡眠间隔将增加到1000 + 2000 = 3000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // The retry sleep time set in the configuration file is 500, but due to + // chunkserver timeouts, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 500. However, with the added + // exponential backoff, the sleep intervals will increase to 1000 + 2000 + // = 3000. Considering the random factor, the total time for three + // retries should be greater than 2900 and less than 5000. uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); @@ -1426,9 +1466,9 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_LT(end - start, 3 * 5000); } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1437,7 +1477,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1445,21 +1485,21 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1468,7 +1508,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1478,26 +1518,27 @@ TEST_F(CopysetClientTest, read_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1506,40 +1547,38 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1548,40 +1587,37 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response1; response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); + // response1.set_redirect(leaderStr2); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1590,7 +1626,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1598,24 +1634,25 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1624,7 +1661,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1632,22 +1669,22 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -1656,7 +1693,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1667,23 +1704,20 @@ TEST_F(CopysetClientTest, read_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -1696,8 +1730,9 @@ TEST_F(CopysetClientTest, read_error_test) { */ TEST_F(CopysetClientTest, read_snapshot_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -1732,19 +1767,18 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1752,31 +1786,31 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* chunk snapshot not exist */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1784,61 +1818,61 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1846,31 +1880,31 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1880,36 +1914,38 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1918,34 +1954,35 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1954,38 +1991,37 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -1993,34 +2029,35 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2028,32 +2065,32 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2064,51 +2101,51 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -2120,8 +2157,9 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { */ TEST_F(CopysetClientTest, delete_snapshot_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2148,17 +2186,16 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2166,59 +2203,59 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2226,30 +2263,30 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2259,38 +2296,39 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2299,33 +2337,34 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2334,73 +2373,73 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr);; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + response.set_redirect(leaderStr); + ; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2408,31 +2447,31 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2443,55 +2482,53 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(2) .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(DeleteChunkSnapshotFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -2503,8 +2540,9 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { */ TEST_F(CopysetClientTest, create_s3_clone_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2531,17 +2569,16 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2549,57 +2586,57 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(Invoke(CreateCloneChunkFunc)); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - // /* 其他错误 */ + // /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2607,29 +2644,29 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } /* op success */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2637,33 +2674,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2672,32 +2709,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2706,36 +2744,35 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2743,32 +2780,33 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2776,30 +2814,30 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2810,69 +2848,67 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(CreateCloneChunkFunc))) .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } } - /** * recover chunk error testing */ TEST_F(CopysetClientTest, recover_chunk_error_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -2899,17 +2935,16 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2917,12 +2952,13 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -2930,42 +2966,41 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(RecoverChunkFunc)); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -2973,28 +3008,28 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3004,29 +3039,30 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_redirect(leaderStr); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3035,37 +3071,36 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3074,35 +3109,34 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3110,31 +3144,32 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3142,29 +3177,29 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); @@ -3175,47 +3210,46 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ChunkResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; ChunkResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, - 0, 4096, reqDone); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -3227,8 +3261,9 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { */ TEST_F(CopysetClientTest, get_chunk_info_test) { MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); IOSenderOption ioSenderOpt; @@ -3254,28 +3289,27 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -3283,66 +3317,62 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { } /* controller error */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) .WillRepeatedly(Invoke(GetChunkInfoFunc)); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3351,32 +3381,33 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_redirect(leaderStr); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3384,30 +3415,30 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3415,92 +3446,89 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -3510,54 +3538,49 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { GetChunkInfoResponse response2; response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) + SetArgPointee<3>(leaderAddr), Return(0))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; GetChunkInfoResponse response; response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) + SetArgPointee<3>(leaderAddr), Return(-1))) .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -3574,23 +3597,22 @@ void WriteCallBack(CurveAioContext* aioctx) { delete aioctx; } -void PrepareOpenFile(FakeCurveFSService *service, - OpenFileResponse *openresp, - FakeReturn *fakeReturn) { +void PrepareOpenFile(FakeCurveFSService* service, OpenFileResponse* openresp, + FakeReturn* fakeReturn) { openresp->set_statuscode(curve::mds::StatusCode::kOK); - auto *session = openresp->mutable_protosession(); + auto* session = openresp->mutable_protosession(); session->set_sessionid("xxx"); session->set_leasetime(10000); session->set_createtime(10000); session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); - auto *fileinfo = openresp->mutable_fileinfo(); + auto* fileinfo = openresp->mutable_fileinfo(); fileinfo->set_id(1); fileinfo->set_filename("filename"); fileinfo->set_parentid(0); fileinfo->set_length(10ULL * 1024 * 1024 * 1024); fileinfo->set_blocksize(4096); - *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); + *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); service->SetOpenFile(fakeReturn); } @@ -3620,7 +3642,7 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { // create fake chunkserver service FakeChunkServerService fakechunkservice; - // 设置cli服务 + // Set up cli service CliServiceFake fakeCliservice; FakeCurveFSService curvefsService; @@ -3631,9 +3653,11 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { brpc::Server server; ASSERT_EQ(0, server.AddService(&fakechunkservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakechunkservice"; - ASSERT_EQ(0, server.AddService(&fakeCliservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakecliservice"; + brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakechunkservice"; + ASSERT_EQ( + 0, server.AddService(&fakeCliservice, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakecliservice"; ASSERT_EQ( 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add curvefsService"; @@ -3670,11 +3694,12 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); - // 设置文件版本号 + // Set file version number fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - // 发送写请求,并等待sec秒后检查io是否返回 - auto startWriteAndCheckResult = [&fileinstance](int sec)-> bool { // NOLINT + // Send a write request and wait for seconds to check if IO returns + auto startWriteAndCheckResult = + [&fileinstance](int sec) -> bool { // NOLINT CurveAioContext* aioctx = new CurveAioContext(); char buffer[4096]; @@ -3684,29 +3709,30 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; aioctx->cb = WriteCallBack; - // 下发写请求 + // Send write request fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); std::this_thread::sleep_for(std::chrono::seconds(sec)); return gWriteSuccessFlag; }; - // 第一次写成功,并更新chunkserver端的文件版本号 + // Successfully written for the first time and updated the file version + // number on the chunkserver side ASSERT_TRUE(startWriteAndCheckResult(3)); - // 设置一个旧的版本号去写 + // Set an old version number to write fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); gWriteSuccessFlag = false; - // chunkserver返回backward,重新获取版本号后还是旧的版本 - // IO hang + // chunkserver returns the feedback, and after obtaining the version number + // again, it is still the old version IO hang ASSERT_FALSE(startWriteAndCheckResult(3)); - // 更新版本号为正常状态 + // Update version number to normal state fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); std::this_thread::sleep_for(std::chrono::seconds(1)); - // 上次写请求成功 + // Last write request successful ASSERT_EQ(true, gWriteSuccessFlag); server.Stop(0); @@ -3763,8 +3789,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); { - // redirect情况下, chunkserver返回新的leader - // 重试之前不会睡眠 + // In the redirect case, chunkserver returns a new leader + // Will not sleep until retry RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3791,7 +3817,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce(DoAll(SetArgPointee<2>(leaderId), SetArgPointee<3>(leaderAddr), Return(0))) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(1) .WillOnce(Return(0)); @@ -3803,21 +3829,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader ID, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver返回旧leader - // 重试之前会睡眠 + // In the redirect case, chunkserver returns the old leader + // Sleep before retrying RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3842,7 +3867,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) .Times(1) .WillOnce(Return(0)); @@ -3853,21 +3878,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回同样的leader id,重试之前会进行睡眠 + // Return the same leader ID and sleep before retrying ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到新leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain a new leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3893,7 +3917,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce(DoAll(SetArgPointee<2>(leaderId), SetArgPointee<3>(leaderAddr), Return(0))) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) .Times(2) @@ -3902,21 +3926,20 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader id, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到旧leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain old leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3940,7 +3963,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); + SetArgPointee<3>(leaderAddr), Return(0))); EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) .Times(2) @@ -3949,9 +3972,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3966,19 +3988,16 @@ class TestRunnedRequestClosure : public RequestClosure { public: TestRunnedRequestClosure() : RequestClosure(nullptr) {} - void Run() override { - runned_ = true; - } + void Run() override { runned_ = true; } - bool IsRunned() const { - return runned_; - } + bool IsRunned() const { return runned_; } private: bool runned_ = false; }; -// 测试session失效后,重试请求会被重新放入请求队列 +// After the test session fails, the retry request will be placed back in the +// request queue TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { MockRequestScheduler requestScheduler; CopysetClient copysetClient; @@ -3988,12 +4007,11 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, &requestScheduler, nullptr)); - // 设置session not valid + // Set session not valid copysetClient.StartRecycleRetryRPC(); { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); TestRunnedRequestClosure closure; copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); @@ -4001,8 +4019,7 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { } { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); TestRunnedRequestClosure closure; copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); @@ -4010,5 +4027,5 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/fake/client_workflow_test.cpp b/test/client/fake/client_workflow_test.cpp index c42a9371ba..fdab88f1ed 100644 --- a/test/client/fake/client_workflow_test.cpp +++ b/test/client/fake/client_workflow_test.cpp @@ -19,28 +19,28 @@ * File Created: Saturday, 13th October 2018 1:59:08 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include +#include +#include // NOLINT #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" -using curve::client::PeerAddr; using curve::client::EndPoint; +using curve::client::PeerAddr; -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -67,7 +67,7 @@ void readcallbacktest(CurveAioContext* context) { delete context; } -int main(int argc, char ** argv) { +int main(int argc, char** argv) { // google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); std::string configpath = "./test/client/configs/client.conf"; @@ -76,7 +76,7 @@ int main(int argc, char ** argv) { LOG(FATAL) << "Fail to init config"; } - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // uint64_t size = FLAGS_test_disk_size; @@ -86,7 +86,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -127,12 +127,11 @@ int main(int argc, char ** argv) { memset(buffer + 7 * 1024, 'h', 1024); uint64_t offset_base; - for (int i = 0; i < 16; i ++) { + for (int i = 0; i < 16; i++) { uint64_t offset = i * chunk_size; Write(fd, buffer, offset, 4096); } - char* buf2 = new char[128 * 1024]; char* buf1 = new char[128 * 1024]; @@ -155,7 +154,7 @@ int main(int argc, char ** argv) { aioctx2->op = LIBCURVE_OP_READ; aioctx2->cb = readcallbacktest; AioRead(fd, aioctx2); - if (j%10 == 0) { + if (j % 10 == 0) { mds.EnableNetUnstable(600); } else { mds.EnableNetUnstable(100); @@ -185,18 +184,18 @@ int main(int argc, char ** argv) { CurveAioContext readaioctx; { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } writeflag = false; AioWrite(fd, &writeaioctx); { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { @@ -204,31 +203,31 @@ int main(int argc, char ** argv) { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } @@ -236,7 +235,7 @@ int main(int argc, char ** argv) { LOG(INFO) << "LibCurve I/O verified for stage 1, going to read repeatedly"; -// skip_write_io: + // skip_write_io: std::atomic stop(false); auto testfunc = [&]() { while (!stop.load()) { @@ -247,44 +246,44 @@ int main(int argc, char ** argv) { AioRead(fd, &readaioctx); { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { if (readbuffer[i] != 'a') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } } -skip_read_io: + skip_read_io: std::this_thread::sleep_for(std::chrono::milliseconds(50)); } }; diff --git a/test/client/fake/client_workflow_test4snap.cpp b/test/client/fake/client_workflow_test4snap.cpp index 9aa9a75e23..4dcb77aec9 100644 --- a/test/client/fake/client_workflow_test4snap.cpp +++ b/test/client/fake/client_workflow_test4snap.cpp @@ -19,26 +19,26 @@ * File Created: Monday, 7th January 2019 10:04:50 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include +#include +#include //NOLINT -#include "src/client/client_common.h" #include "include/client/libcurve.h" -#include "src/client/libcurve_snapshot.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_snapshot.h" #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -55,21 +55,21 @@ std::condition_variable interfacecv; DECLARE_uint64(test_disk_size); -using curve::client::UserInfo_t; -using curve::client::PeerAddr; -using curve::client::EndPoint; -using curve::client::SegmentInfo; -using curve::client::ChunkInfoDetail; -using curve::client::SnapshotClient; using curve::client::ChunkID; -using curve::client::LogicPoolID; -using curve::client::CopysetID; using curve::client::ChunkIDInfo; +using curve::client::ChunkInfoDetail; +using curve::client::CopysetID; using curve::client::CopysetPeerInfo; -using curve::client::MetaCache; +using curve::client::EndPoint; using curve::client::LogicalPoolCopysetIDInfo; +using curve::client::LogicPoolID; +using curve::client::MetaCache; +using curve::client::PeerAddr; +using curve::client::SegmentInfo; +using curve::client::SnapshotClient; +using curve::client::UserInfo_t; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); std::string filename = "/1_userinfo_test.txt"; @@ -79,7 +79,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 8200, &ep); PeerId pd(ep); @@ -116,10 +116,8 @@ int main(int argc, char ** argv) { SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - if (LIBCURVE_ERROR::FAILED == cl.GetSnapshotSegmentInfo(filename, - userinfo, - 0, 0, - &seginfo)) { + if (LIBCURVE_ERROR::FAILED == + cl.GetSnapshotSegmentInfo(filename, userinfo, 0, 0, &seginfo)) { LOG(ERROR) << "GetSnapshotSegmentInfo failed!"; return -1; } @@ -140,7 +138,7 @@ int main(int argc, char ** argv) { cl.DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo(1, 10000, 1), 2); - ChunkInfoDetail *chunkInfo = new ChunkInfoDetail; + ChunkInfoDetail* chunkInfo = new ChunkInfoDetail; cl.GetChunkInfo(ChunkIDInfo(1, 10000, 1), chunkInfo); for (auto iter : chunkInfo->chunkSn) { if (iter != 1111) { diff --git a/test/client/fake/fakeChunkserver.h b/test/client/fake/fakeChunkserver.h index 6ebbbeffcf..0841e18d7d 100644 --- a/test/client/fake/fakeChunkserver.h +++ b/test/client/fake/fakeChunkserver.h @@ -23,15 +23,15 @@ #ifndef TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ #define TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ +#include #include #include #include -#include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include "proto/chunk.pb.h" #include "proto/cli2.pb.h" @@ -40,8 +40,8 @@ #include "test/client/fake/mockMDS.h" using braft::PeerId; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; class FakeChunkService : public ChunkService { public: @@ -53,20 +53,19 @@ class FakeChunkService : public ChunkService { } virtual ~FakeChunkService() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(-1, "set rpc failed!"); } - ::memcpy(chunk_, - cntl->request_attachment().to_string().c_str(), + ::memcpy(chunk_, cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_appliedindex(2); @@ -75,13 +74,13 @@ class FakeChunkService : public ChunkService { } } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(EHOSTDOWN, "set rpc failed!"); } @@ -97,67 +96,69 @@ class FakeChunkService : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {1}; ::memset(buff, 1, 8192); cntl->response_attachment().append(buff, request->size()); auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, // NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkInforet_->controller_ != nullptr && - fakeGetChunkInforet_->controller_->Failed()) { + fakeGetChunkInforet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkInfoResponse*>( - fakeGetChunkInforet_->response_); + fakeGetChunkInforet_->response_); response->CopyFrom(*resp); } - void GetChunkHash(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkHashRequest *request, // NOLINT - ::curve::chunkserver::GetChunkHashResponse *response, - google::protobuf::Closure *done) { + void GetChunkHash( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkHashRequest* request, // NOLINT + ::curve::chunkserver::GetChunkHashResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkHashRet_->controller_ != nullptr && - fakeGetChunkHashRet_->controller_->Failed()) { + fakeGetChunkHashRet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkHashResponse*>( - fakeGetChunkHashRet_->response_); + fakeGetChunkHashRet_->response_); response->CopyFrom(*resp); } @@ -177,13 +178,9 @@ class FakeChunkService : public ChunkService { fakeGetChunkHashRet_ = fakeret; } - void SetRPCFailed() { - rpcFailed = true; - } + void SetRPCFailed() { rpcFailed = true; } - void ReSetRPCFailed() { - rpcFailed = false; - } + void ReSetRPCFailed() { rpcFailed = false; } FakeReturn* fakedeletesnapchunkret_; FakeReturn* fakereadchunksnapret_; @@ -200,16 +197,13 @@ class FakeChunkService : public ChunkService { waittimeMS = 0; } - void CleanRetryTimes() { - retryTimes.store(0); - } + void CleanRetryTimes() { retryTimes.store(0); } - uint64_t GetRetryTimes() { - return retryTimes.load(); - } + uint64_t GetRetryTimes() { return retryTimes.load(); } private: - // wait4netunstable用来模拟网络延时,当打开之后,每个读写rpc会停留一段时间再返回 + // wait4netunstable is used to simulate network latency. When turned on, + // each read/write rpc will pause for a period of time before returning bool wait4netunstable; uint64_t waittimeMS; bool rpcFailed; @@ -219,32 +213,24 @@ class FakeChunkService : public ChunkService { class CliServiceFake : public curve::chunkserver::CliService2 { public: - CliServiceFake() { - invokeTimes = 0; - } + CliServiceFake() { invokeTimes = 0; } void GetLeader(::google::protobuf::RpcController* controller, - const curve::chunkserver::GetLeaderRequest2* request, - curve::chunkserver::GetLeaderResponse2* response, - ::google::protobuf::Closure* done) { + const curve::chunkserver::GetLeaderRequest2* request, + curve::chunkserver::GetLeaderResponse2* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(leaderid_.to_string()); response->set_allocated_leader(peer); invokeTimes++; } - void SetPeerID(PeerId peerid) { - leaderid_ = peerid; - } + void SetPeerID(PeerId peerid) { leaderid_ = peerid; } - uint64_t GetInvokeTimes() { - return invokeTimes; - } + uint64_t GetInvokeTimes() { return invokeTimes; } - void ReSetInvokeTimes() { - invokeTimes = 0; - } + void ReSetInvokeTimes() { invokeTimes = 0; } private: PeerId leaderid_; @@ -253,17 +239,19 @@ class CliServiceFake : public curve::chunkserver::CliService2 { class FakeChunkServerService : public ChunkService { public: - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakewriteret_->controller_ != nullptr && fakewriteret_->controller_->Failed()) { // NOLINT + if (fakewriteret_->controller_ != nullptr && + fakewriteret_->controller_->Failed()) { // NOLINT controller->SetFailed("failed"); } - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakewriteret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakewriteret_->response_); // NOLINT response->CopyFrom(*resp); static uint64_t latestSn = 0; @@ -274,13 +262,13 @@ class FakeChunkServerService : public ChunkService { latestSn = std::max(latestSn, request->sn()); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {0}; if (request->has_appliedindex()) { memset(buff, 'a', 4096); @@ -290,17 +278,14 @@ class FakeChunkServerService : public ChunkService { memset(buff + 4096, 'd', 4096); } cntl->response_attachment().append(buff, request->size()); - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakereadret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakereadret_->response_); // NOLINT response->CopyFrom(*resp); } - void SetFakeWriteReturn(FakeReturn* ret) { - fakewriteret_ = ret; - } + void SetFakeWriteReturn(FakeReturn* ret) { fakewriteret_ = ret; } - void SetFakeReadReturn(FakeReturn* ret) { - fakereadret_ = ret; - } + void SetFakeReadReturn(FakeReturn* ret) { fakereadret_ = ret; } private: FakeReturn* fakewriteret_; @@ -310,23 +295,20 @@ class FakeChunkServerService : public ChunkService { class FakeRaftStateService : public braft::raft_stat { public: void default_method(::google::protobuf::RpcController* controller, - const ::braft::IndexRequest*, - ::braft::IndexResponse*, + const ::braft::IndexRequest*, ::braft::IndexResponse*, ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); // NOLINT + brpc::Controller* cntl = + dynamic_cast(controller); // NOLINT if (failed_) { cntl->SetFailed("failed for test"); return; } cntl->response_attachment().append(buf_); } - void SetBuf(const butil::IOBuf& iobuf) { - buf_ = iobuf; - } - void SetFailed(bool failed) { - failed_ = failed; - } + void SetBuf(const butil::IOBuf& iobuf) { buf_ = iobuf; } + void SetFailed(bool failed) { failed_ = failed; } + private: butil::IOBuf buf_; bool failed_ = false; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index e29f251c26..6daed2e5ed 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -22,73 +22,68 @@ #ifndef TEST_CLIENT_FAKE_FAKEMDS_H_ #define TEST_CLIENT_FAKE_FAKEMDS_H_ -#include -#include -#include #include +#include +#include +#include -#include -#include #include -#include #include -#include "src/client/client_common.h" -#include "test/client/fake/mockMDS.h" -#include "test/client/fake/fakeChunkserver.h" +#include +#include +#include -#include "proto/nameserver2.pb.h" -#include "proto/topology.pb.h" #include "proto/copyset.pb.h" -#include "proto/schedule.pb.h" -#include "src/common/timeutility.h" -#include "src/common/authenticator.h" #include "proto/heartbeat.pb.h" +#include "proto/nameserver2.pb.h" +#include "proto/schedule.pb.h" +#include "proto/topology.pb.h" +#include "src/client/client_common.h" #include "src/client/mds_client_base.h" +#include "src/common/authenticator.h" +#include "src/common/timeutility.h" #include "src/common/uuid.h" +#include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/mockMDS.h" using curve::common::Authenticator; using braft::PeerId; -using curve::common::Authenticator; using curve::chunkserver::COPYSET_OP_STATUS; -using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; -using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using curve::common::Authenticator; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; +using ::curve::mds::schedule::RapidLeaderScheduleRequst; +using ::curve::mds::schedule::RapidLeaderScheduleResponse; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; using ::curve::mds::topology::GetChunkServerInfoRequest; using ::curve::mds::topology::GetChunkServerInfoResponse; +using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; +using ::curve::mds::topology::GetClusterInfoRequest; +using ::curve::mds::topology::GetClusterInfoResponse; +using ::curve::mds::topology::GetCopySetsInChunkServerRequest; +using ::curve::mds::topology::GetCopySetsInChunkServerResponse; using ::curve::mds::topology::ListChunkServerRequest; using ::curve::mds::topology::ListChunkServerResponse; +using ::curve::mds::topology::ListLogicalPoolRequest; +using ::curve::mds::topology::ListLogicalPoolResponse; using ::curve::mds::topology::ListPhysicalPoolRequest; using ::curve::mds::topology::ListPhysicalPoolResponse; using ::curve::mds::topology::ListPoolZoneRequest; using ::curve::mds::topology::ListPoolZoneResponse; using ::curve::mds::topology::ListZoneServerRequest; using ::curve::mds::topology::ListZoneServerResponse; -using ::curve::mds::topology::GetCopySetsInChunkServerRequest; -using ::curve::mds::topology::GetCopySetsInChunkServerResponse; -using ::curve::mds::topology::ListLogicalPoolRequest; -using ::curve::mds::topology::ListLogicalPoolResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; -using ::curve::mds::schedule::RapidLeaderScheduleRequst; -using ::curve::mds::schedule::RapidLeaderScheduleResponse; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; - DECLARE_bool(start_builtin_service); class FakeMDSCurveFSService : public curve::mds::CurveFSService { public: - FakeMDSCurveFSService() { - retrytimes_ = 0; - } + FakeMDSCurveFSService() { retrytimes_ = 0; } void ListClient(::google::protobuf::RpcController* controller, const ::curve::mds::ListClientRequest* request, @@ -96,39 +91,39 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListClient_->controller_ != nullptr && - fakeListClient_->controller_->Failed()) { + fakeListClient_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListClientResponse*>( - fakeListClient_->response_); + fakeListClient_->response_); response->CopyFrom(*resp); } void CreateFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateFileRequest* request, - ::curve::mds::CreateFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateFileRequest* request, + ::curve::mds::CreateFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateFileret_->controller_ != nullptr - && fakeCreateFileret_->controller_->Failed()) { + if (fakeCreateFileret_->controller_ != nullptr && + fakeCreateFileret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateFileResponse*>( - fakeCreateFileret_->response_); + fakeCreateFileret_->response_); response->CopyFrom(*resp); } void GetFileInfo(::google::protobuf::RpcController* controller, - const ::curve::mds::GetFileInfoRequest* request, - ::curve::mds::GetFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::GetFileInfoRequest* request, + ::curve::mds::GetFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetFileInforet_->controller_ != nullptr && fakeGetFileInforet_->controller_->Failed()) { @@ -138,14 +133,15 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::GetFileInfoResponse*>( - fakeGetFileInforet_->response_); + fakeGetFileInforet_->response_); response->CopyFrom(*resp); } - void IncreaseFileEpoch(::google::protobuf::RpcController* controller, - const ::curve::mds::IncreaseFileEpochRequest* request, - ::curve::mds::IncreaseFileEpochResponse* response, - ::google::protobuf::Closure* done) { + void IncreaseFileEpoch( + ::google::protobuf::RpcController* controller, + const ::curve::mds::IncreaseFileEpochRequest* request, + ::curve::mds::IncreaseFileEpochResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeIncreaseFileEpochret_->controller_ != nullptr && fakeIncreaseFileEpochret_->controller_->Failed()) { @@ -155,7 +151,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::IncreaseFileEpochResponse*>( - fakeIncreaseFileEpochret_->response_); + fakeIncreaseFileEpochret_->response_); response->CopyFrom(*resp); } @@ -165,41 +161,42 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetAllocatedSizeRet_->controller_ != nullptr && - fakeGetAllocatedSizeRet_->controller_->Failed()) { + fakeGetAllocatedSizeRet_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::GetAllocatedSizeResponse*>( - fakeGetAllocatedSizeRet_->response_); + fakeGetAllocatedSizeRet_->response_); response->CopyFrom(*resp); } - void GetOrAllocateSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetOrAllocateSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetOrAllocateSegmentret_->controller_ != nullptr && - fakeGetOrAllocateSegmentret_->controller_->Failed()) { + fakeGetOrAllocateSegmentret_->controller_->Failed()) { controller->SetFailed("failed"); } if (!strcmp(request->owner().c_str(), "root")) { - // 当user为root用户的时候需要检查其signature信息 + // When the user is root, it is necessary to check their signature + // information std::string str2sig = Authenticator::GetString2Signature( - request->date(), - request->owner()); - std::string sig = Authenticator::CalcString2Signature(str2sig, - "root_password"); + request->date(), request->owner()); + std::string sig = + Authenticator::CalcString2Signature(str2sig, "root_password"); ASSERT_STREQ(request->signature().c_str(), sig.c_str()); LOG(INFO) << "GetOrAllocateSegment with password!"; } retrytimes_++; - // 检查请求内容是全路径 + // Check that the request content is full path auto checkFullpath = [&]() { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); @@ -207,14 +204,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { (void)checkFullpath; fiu_do_on("test/client/fake/fakeMDS.GetOrAllocateSegment", - checkFullpath()); + checkFullpath()); curve::mds::GetOrAllocateSegmentResponse* resp; - if (request->filename() == "/clonesource") { + if (request->filename() == "/clonesource") { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentretForClone_->response_); + fakeGetOrAllocateSegmentretForClone_->response_); } else { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentret_->response_); + fakeGetOrAllocateSegmentret_->response_); } response->CopyFrom(*resp); } @@ -236,26 +233,26 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { } void OpenFile(::google::protobuf::RpcController* controller, - const ::curve::mds::OpenFileRequest* request, - ::curve::mds::OpenFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::OpenFileRequest* request, + ::curve::mds::OpenFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeopenfile_->controller_ != nullptr && - fakeopenfile_->controller_->Failed()) { + fakeopenfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::OpenFileResponse*>( - fakeopenfile_->response_); + fakeopenfile_->response_); response->CopyFrom(*resp); } void RefreshSession(::google::protobuf::RpcController* controller, const curve::mds::ReFreshSessionRequest* request, curve::mds::ReFreshSessionResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::Closure* done) { { brpc::ClosureGuard done_guard(done); if (fakeRefreshSession_->controller_ != nullptr && @@ -266,10 +263,10 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { static int seq = 1; auto resp = static_cast<::curve::mds::ReFreshSessionResponse*>( - fakeRefreshSession_->response_); + fakeRefreshSession_->response_); if (resp->statuscode() == ::curve::mds::StatusCode::kOK) { - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_seqnum(seq++); info->set_filename("_filename_"); info->set_id(resp->fileinfo().id()); @@ -279,13 +276,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { info->set_length(4 * 1024 * 1024 * 1024ul); info->set_ctime(12345678); - curve::mds::ProtoSession *protoSession = - new curve::mds::ProtoSession(); + curve::mds::ProtoSession* protoSession = + new curve::mds::ProtoSession(); protoSession->set_sessionid("1234"); protoSession->set_createtime(12345); protoSession->set_leasetime(10000000); protoSession->set_sessionstatus( - ::curve::mds::SessionStatus::kSessionOK); + ::curve::mds::SessionStatus::kSessionOK); response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_sessionid("1234"); @@ -299,175 +296,166 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; - if (refreshtask_) - refreshtask_(); + if (refreshtask_) refreshtask_(); } void CreateSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateSnapShotRequest* request, - ::curve::mds::CreateSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateSnapShotRequest* request, + ::curve::mds::CreateSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakecreatesnapshotret_->controller_ != nullptr && - fakecreatesnapshotret_->controller_->Failed()) { + fakecreatesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateSnapShotResponse*>( - fakecreatesnapshotret_->response_); + fakecreatesnapshotret_->response_); response->CopyFrom(*resp); } void ListSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::ListSnapShotFileInfoRequest* request, - ::curve::mds::ListSnapShotFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListSnapShotFileInfoRequest* request, + ::curve::mds::ListSnapShotFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakelistsnapshotret_->controller_ != nullptr && - fakelistsnapshotret_->controller_->Failed()) { + fakelistsnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::ListSnapShotFileInfoResponse*>( - fakelistsnapshotret_->response_); + fakelistsnapshotret_->response_); response->CopyFrom(*resp); } void DeleteSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::DeleteSnapShotRequest* request, - ::curve::mds::DeleteSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::DeleteSnapShotRequest* request, + ::curve::mds::DeleteSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapshotret_->controller_ != nullptr && - fakedeletesnapshotret_->controller_->Failed()) { + fakedeletesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakedeletesnapshotret_->response_); + fakedeletesnapshotret_->response_); response->CopyFrom(*resp); } - void CheckSnapShotStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::CheckSnapShotStatusRequest* request, - ::curve::mds::CheckSnapShotStatusResponse* response, - ::google::protobuf::Closure* done) { + void CheckSnapShotStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::CheckSnapShotStatusRequest* request, + ::curve::mds::CheckSnapShotStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakechecksnapshotret_->controller_ != nullptr && - fakechecksnapshotret_->controller_->Failed()) { + fakechecksnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakechecksnapshotret_->response_); + fakechecksnapshotret_->response_); response->CopyFrom(*resp); } - void GetSnapShotFileSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetSnapShotFileSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakegetsnapsegmentinforet_->controller_ != nullptr && - fakegetsnapsegmentinforet_->controller_->Failed()) { + fakegetsnapsegmentinforet_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakegetsnapsegmentinforet_->response_); + fakegetsnapsegmentinforet_->response_); response->CopyFrom(*resp); } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } void CloseFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CloseFileRequest* request, - ::curve::mds::CloseFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CloseFileRequest* request, + ::curve::mds::CloseFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeclosefile_->controller_ != nullptr && - fakeclosefile_->controller_->Failed()) { + fakeclosefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakeclosefile_->response_); + fakeclosefile_->response_); response->CopyFrom(*resp); if (closeFileTask_) { @@ -481,14 +469,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakerenamefile_->controller_ != nullptr && - fakerenamefile_->controller_->Failed()) { + fakerenamefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakerenamefile_->response_); + fakerenamefile_->response_); response->CopyFrom(*resp); } @@ -498,7 +486,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletefile_->controller_ != nullptr && - fakedeletefile_->controller_->Failed()) { + fakedeletefile_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -509,12 +497,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakedeletefile_->response_); + fakedeletefile_->response_); if (request->forcedelete()) { LOG(INFO) << "force delete file!"; - fiu_do_on("test/client/fake/fakeMDS/forceDeleteFile", - resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); + fiu_do_on( + "test/client/fake/fakeMDS/forceDeleteFile", + resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); } response->CopyFrom(*resp); @@ -526,103 +515,97 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeextendfile_->controller_ != nullptr && - fakeextendfile_->controller_->Failed()) { + fakeextendfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ExtendFileResponse*>( - fakeextendfile_->response_); + fakeextendfile_->response_); response->CopyFrom(*resp); } void CreateCloneFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateCloneFileRequest* request, - ::curve::mds::CreateCloneFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateCloneFileRequest* request, + ::curve::mds::CreateCloneFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateCloneFile_->controller_ != nullptr - && fakeCreateCloneFile_->controller_->Failed()) { + if (fakeCreateCloneFile_->controller_ != nullptr && + fakeCreateCloneFile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateCloneFileResponse*>( - fakeCreateCloneFile_->response_); + fakeCreateCloneFile_->response_); response->CopyFrom(*resp); } - void SetCloneFileStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::SetCloneFileStatusRequest* request, - ::curve::mds::SetCloneFileStatusResponse* response, - ::google::protobuf::Closure* done) { + void SetCloneFileStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::SetCloneFileStatusRequest* request, + ::curve::mds::SetCloneFileStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeSetCloneFileStatus_->controller_ != nullptr - && fakeSetCloneFileStatus_->controller_->Failed()) { + if (fakeSetCloneFileStatus_->controller_ != nullptr && + fakeSetCloneFileStatus_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::SetCloneFileStatusResponse*>( - fakeSetCloneFileStatus_->response_); + fakeSetCloneFileStatus_->response_); response->CopyFrom(*resp); } void ChangeOwner(::google::protobuf::RpcController* controller, - const ::curve::mds::ChangeOwnerRequest* request, - ::curve::mds::ChangeOwnerResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ChangeOwnerRequest* request, + ::curve::mds::ChangeOwnerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeChangeOwner_->controller_ != nullptr && - fakeChangeOwner_->controller_->Failed()) { + fakeChangeOwner_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ChangeOwnerResponse*>( - fakeChangeOwner_->response_); + fakeChangeOwner_->response_); response->CopyFrom(*resp); } void ListDir(::google::protobuf::RpcController* controller, - const ::curve::mds::ListDirRequest* request, - ::curve::mds::ListDirResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListDirRequest* request, + ::curve::mds::ListDirResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListDir_->controller_ != nullptr && - fakeListDir_->controller_->Failed()) { + fakeListDir_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListDirResponse*>( - fakeListDir_->response_); + fakeListDir_->response_); response->CopyFrom(*resp); } - void SetListDir(FakeReturn* fakeret) { - fakeListDir_ = fakeret; - } + void SetListDir(FakeReturn* fakeret) { fakeListDir_ = fakeret; } - void SetListClient(FakeReturn* fakeret) { - fakeListClient_ = fakeret; - } + void SetListClient(FakeReturn* fakeret) { fakeListClient_ = fakeret; } void SetCreateCloneFile(FakeReturn* fakeret) { fakeCreateCloneFile_ = fakeret; } - void SetExtendFile(FakeReturn* fakeret) { - fakeextendfile_ = fakeret; - } - + void SetExtendFile(FakeReturn* fakeret) { fakeextendfile_ = fakeret; } void SetCreateFileFakeReturn(FakeReturn* fakeret) { fakeCreateFileret_ = fakeret; @@ -652,9 +635,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakeDeAllocateSegment_ = fakeret; } - void SetOpenFile(FakeReturn* fakeret) { - fakeopenfile_ = fakeret; - } + void SetOpenFile(FakeReturn* fakeret) { fakeopenfile_ = fakeret; } void SetRefreshSession(FakeReturn* fakeret, std::function t) { fakeRefreshSession_ = fakeret; @@ -685,61 +666,41 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakedeletesnapchunkret_ = fakeret; } - void SetCloseFile(FakeReturn* fakeret) { - fakeclosefile_ = fakeret; - } + void SetCloseFile(FakeReturn* fakeret) { fakeclosefile_ = fakeret; } - void SetCheckSnap(FakeReturn* fakeret) { - fakechecksnapshotret_ = fakeret; - } + void SetCheckSnap(FakeReturn* fakeret) { fakechecksnapshotret_ = fakeret; } - void SetRenameFile(FakeReturn* fakeret) { - fakerenamefile_ = fakeret; - } + void SetRenameFile(FakeReturn* fakeret) { fakerenamefile_ = fakeret; } - void SetDeleteFile(FakeReturn* fakeret) { - fakedeletefile_ = fakeret; - } + void SetDeleteFile(FakeReturn* fakeret) { fakedeletefile_ = fakeret; } - void SetRegistRet(FakeReturn* fakeret) { - fakeRegisterret_ = fakeret; - } + void SetRegistRet(FakeReturn* fakeret) { fakeRegisterret_ = fakeret; } void SetCloneFileStatus(FakeReturn* fakeret) { fakeSetCloneFileStatus_ = fakeret; } - void SetChangeOwner(FakeReturn* fakeret) { - fakeChangeOwner_ = fakeret; - } + void SetChangeOwner(FakeReturn* fakeret) { fakeChangeOwner_ = fakeret; } void SetCloseFileTask(std::function task) { closeFileTask_ = task; } - void CleanRetryTimes() { - retrytimes_ = 0; - } + void CleanRetryTimes() { retrytimes_ = 0; } - uint64_t GetRetryTimes() { - return retrytimes_; - } + uint64_t GetRetryTimes() { return retrytimes_; } - std::string GetIP() { - return ip_; - } + std::string GetIP() { return ip_; } - uint16_t GetPort() { - return port_; - } + uint16_t GetPort() { return port_; } - void CheckAuth(const std::string& signature, - const std::string& filename, - const std::string& owner, - uint64_t date) { + void CheckAuth(const std::string& signature, const std::string& filename, + const std::string& owner, uint64_t date) { if (owner == curve::client::kRootUserName) { - std::string str2sig = Authenticator::GetString2Signature(date, owner); // NOLINT - std::string sigtest = Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT + std::string str2sig = + Authenticator::GetString2Signature(date, owner); // NOLINT + std::string sigtest = + Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT ASSERT_STREQ(sigtest.c_str(), signature.c_str()); } else { ASSERT_STREQ("", signature.c_str()); @@ -785,18 +746,17 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { class FakeMDSTopologyService : public curve::mds::topology::TopologyService { public: void GetChunkServerListInCopySets( - ::google::protobuf::RpcController* controller, - const GetChunkServerListInCopySetsRequest* request, - GetChunkServerListInCopySetsResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); int statcode = 0; if (response->has_statuscode()) { statcode = response->statuscode(); } - if (statcode == -1 || - (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed())) { + if (statcode == -1 || (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed())) { controller->SetFailed("failed"); } @@ -805,11 +765,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->CopyFrom(*resp); } - void RegistChunkServer( - ::google::protobuf::RpcController* controller, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - ::google::protobuf::Closure* done) { + void RegistChunkServer(::google::protobuf::RpcController* controller, + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); @@ -818,87 +777,87 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void GetChunkServer(::google::protobuf::RpcController* controller, - const GetChunkServerInfoRequest* request, - GetChunkServerInfoResponse* response, - ::google::protobuf::Closure* done) { + const GetChunkServerInfoRequest* request, + GetChunkServerInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListChunkServer(::google::protobuf::RpcController* controller, - const ListChunkServerRequest* request, - ListChunkServerResponse* response, - ::google::protobuf::Closure* done) { + const ListChunkServerRequest* request, + ListChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListPhysicalPool(::google::protobuf::RpcController* controller, - const ListPhysicalPoolRequest* request, - ListPhysicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistpoolret_->controller_ != nullptr - && fakelistpoolret_->controller_->Failed()) { + if (fakelistpoolret_->controller_ != nullptr && + fakelistpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistpoolret_->response_); + auto resp = + static_cast(fakelistpoolret_->response_); response->CopyFrom(*resp); } void ListPoolZone(::google::protobuf::RpcController* controller, - const ListPoolZoneRequest* request, - ListPoolZoneResponse* response, - ::google::protobuf::Closure* done) { + const ListPoolZoneRequest* request, + ListPoolZoneResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistzoneret_->controller_ != nullptr - && fakelistzoneret_->controller_->Failed()) { + if (fakelistzoneret_->controller_ != nullptr && + fakelistzoneret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistzoneret_->response_); + auto resp = + static_cast(fakelistzoneret_->response_); response->CopyFrom(*resp); } void ListZoneServer(::google::protobuf::RpcController* controller, - const ListZoneServerRequest* request, - ListZoneServerResponse* response, - ::google::protobuf::Closure* done) { + const ListZoneServerRequest* request, + ListZoneServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistserverret_->controller_ != nullptr - && fakelistserverret_->controller_->Failed()) { + if (fakelistserverret_->controller_ != nullptr && + fakelistserverret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistserverret_->response_); + auto resp = + static_cast(fakelistserverret_->response_); response->CopyFrom(*resp); } - void GetCopySetsInChunkServer(::google::protobuf::RpcController* controller, - const GetCopySetsInChunkServerRequest* request, - GetCopySetsInChunkServerResponse* response, - ::google::protobuf::Closure* done) { + void GetCopySetsInChunkServer( + ::google::protobuf::RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakegetcopysetincsret_->controller_ != nullptr - && fakegetcopysetincsret_->controller_->Failed()) { + if (fakegetcopysetincsret_->controller_ != nullptr && + fakegetcopysetincsret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -908,12 +867,12 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void ListLogicalPool(::google::protobuf::RpcController* controller, - const ListLogicalPoolRequest* request, - ListLogicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistlogicalpoolret_->controller_ != nullptr - && fakelistlogicalpoolret_->controller_->Failed()) { + if (fakelistlogicalpoolret_->controller_ != nullptr && + fakelistlogicalpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -933,9 +892,7 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->set_clusterid(uuid); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; FakeReturn* fakelistpoolret_; @@ -945,11 +902,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { FakeReturn* fakelistlogicalpoolret_; }; -typedef void (*HeartbeatCallback) ( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); +typedef void (*HeartbeatCallback)(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { public: @@ -975,19 +931,18 @@ class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { private: HeartbeatCallback cb_; - mutable std::mutex cbMtx_; + mutable std::mutex cbMtx_; }; class FakeCreateCopysetService : public curve::chunkserver::CopysetService { public: - void CreateCopysetNode( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::CopysetRequest* request, - ::curve::chunkserver::CopysetResponse* response, - ::google::protobuf::Closure* done) { + void CreateCopysetNode(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetRequest* request, + ::curve::chunkserver::CopysetResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -996,22 +951,23 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->CopyFrom(*resp); } - void GetCopysetStatus(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::CopysetStatusRequest *request, - ::curve::chunkserver::CopysetStatusResponse *response, - google::protobuf::Closure *done) { + void GetCopysetStatus( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetStatusRequest* request, + ::curve::chunkserver::CopysetStatusResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } response->set_state(::braft::State::STATE_LEADER); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); response->set_allocated_peer(peer); peer->set_address("127.0.0.1:1111"); - curve::common::Peer *leader = new curve::common::Peer(); + curve::common::Peer* leader = new curve::common::Peer(); response->set_allocated_leader(leader); leader->set_address("127.0.0.1:1111"); response->set_readonly(1); @@ -1029,21 +985,13 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->set_status(status_); } - void SetHash(uint64_t hash) { - hash_ = hash; - } + void SetHash(uint64_t hash) { hash_ = hash; } - void SetApplyindex(uint64_t index) { - applyindex_ = index; - } + void SetApplyindex(uint64_t index) { applyindex_ = index; } - void SetStatus(const COPYSET_OP_STATUS& status) { - status_ = status; - } + void SetStatus(const COPYSET_OP_STATUS& status) { status_ = status; } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } public: uint64_t applyindex_; @@ -1054,30 +1002,29 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { public: - void RapidLeaderSchedule( - google::protobuf::RpcController* cntl_base, - const RapidLeaderScheduleRequst* request, - RapidLeaderScheduleResponse* response, - google::protobuf::Closure* done) { + void RapidLeaderSchedule(google::protobuf::RpcController* cntl_base, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void QueryChunkServerRecoverStatus( google::protobuf::RpcController* cntl_base, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } @@ -1086,9 +1033,7 @@ class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { response->CopyFrom(*resp); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; }; @@ -1118,15 +1063,11 @@ class FakeMDS { std::vector conf; }; - FakeScheduleService* GetScheduleService() { - return &fakeScheduleService_; - } + FakeScheduleService* GetScheduleService() { return &fakeScheduleService_; } - FakeMDSCurveFSService* GetMDSService() { - return &fakecurvefsservice_; - } + FakeMDSCurveFSService* GetMDSService() { return &fakecurvefsservice_; } - std::vector GetCreateCopysetService() { + std::vector GetCreateCopysetService() { return copysetServices_; } @@ -1134,15 +1075,11 @@ class FakeMDS { return chunkServices_; } - CliServiceFake* GetCliService() { - return &fakeCliService_; - } + CliServiceFake* GetCliService() { return &fakeCliService_; } - std::vector GetChunkservice() { - return chunkServices_; - } + std::vector GetChunkservice() { return chunkServices_; } - std::vector GetRaftStateService() { + std::vector GetRaftStateService() { return raftStateServices_; } @@ -1159,23 +1096,23 @@ class FakeMDS { private: std::vector copysetnodeVec_; brpc::Server* server_; - std::vector chunkservers_; + std::vector chunkservers_; std::vector server_addrs_; std::vector peers_; - std::vector chunkServices_; - std::vector copysetServices_; - std::vector raftStateServices_; - std::vector fakeChunkServerServices_; + std::vector chunkServices_; + std::vector copysetServices_; + std::vector raftStateServices_; + std::vector fakeChunkServerServices_; std::string filename_; uint64_t size_; - CliServiceFake fakeCliService_; + CliServiceFake fakeCliService_; FakeMDSCurveFSService fakecurvefsservice_; FakeMDSTopologyService faketopologyservice_; FakeMDSHeartbeatService fakeHeartbeatService_; FakeScheduleService fakeScheduleService_; - std::map metrics_; + std::map metrics_; }; -#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ +#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ diff --git a/test/client/inflight_rpc_control_test.cpp b/test/client/inflight_rpc_control_test.cpp index 8d6d4de1ee..717211348f 100644 --- a/test/client/inflight_rpc_control_test.cpp +++ b/test/client/inflight_rpc_control_test.cpp @@ -72,7 +72,7 @@ TEST(InflightRPCTest, TestInflightRPC) { int maxInflightNum = 8; { - // 测试inflight数量 + // Number of inflight tests InflightControl control; control.SetMaxInflightNum(maxInflightNum); ASSERT_EQ(0, control.GetCurrentInflightNum()); @@ -89,7 +89,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试GetInflightTokan与ReleaseInflightToken的并发 + // Testing the concurrency of GetInflightTokan and ReleaseInflightToken InflightControl control; control.SetMaxInflightNum(maxInflightNum); @@ -123,7 +123,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试WaitInflightAllComeBack + // Testing WaitInflightAllComeBack InflightControl control; control.SetMaxInflightNum(maxInflightNum); for (int i = 1; i <= maxInflightNum; ++i) { @@ -148,13 +148,15 @@ TEST(InflightRPCTest, TestInflightRPC) { } TEST(InflightRPCTest, FileCloseTest) { - // 测试在文件关闭的时候,lese续约失败不会调用iomanager已析构的资源 - // lease时长10s,在lease期间仅续约一次,一次失败就会调用iomanager - // block IO,这时候其实调用的是scheduler的LeaseTimeoutBlockIO + // Test that when the lease renewal fails at the time of file closure, it + // will not invoke the already destructed resources of the IO manager. The + // lease duration is 10 seconds, and only one renewal is allowed during the + // lease period. If the renewal fails, it will trigger the IO manager's + // block IO, which actually calls the LeaseTimeoutBlockIO of the scheduler. IOOption ioOption; ioOption.reqSchdulerOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 10000; - // 设置inflight RPC最大数量为1 + // Set the maximum number of inflight RPCs to 1 ioOption.ioSenderOpt.inflightOpt.fileMaxInFlightRPCNum = 1; std::condition_variable cv; @@ -200,7 +202,8 @@ TEST(InflightRPCTest, FileCloseTest) { LeaseExecutor lease(lopt, userinfo, nullptr, iomanager); for (int j = 0; j < 5; j++) { - // 测试iomanager退出之后,lease再去调用其scheduler资源不会crash + // After testing the iomanager exit, please call its scheduler + // resource again without crashing lease.InvalidLease(); } @@ -214,11 +217,12 @@ TEST(InflightRPCTest, FileCloseTest) { } }; - // 并发两个线程,一个线程启动iomanager初始化,然后反初始化 - // 另一个线程启动lease续约,然后调用iomanager使其block IO - // 预期:并发两个线程,lease线程续约失败即使在iomanager线程 - // 退出的同时去调用其block IO接口也不会出现并发竞争共享资源的 - // 场景。 + // Concurrently run two threads: one thread initializes the IO manager and + // then deinitializes it, while the other thread initiates lease renewal and + // then calls the IO manager to make it block IO. Expectation: Concurrent + // execution of the two threads should not result in concurrent competition + // for shared resources, even if the lease thread fails to renew while the + // IO manager thread exits. std::thread t1(f1); std::thread t2(f2); diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 1f423250fa..10dae34e55 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -31,22 +31,22 @@ #include //NOLINT #include // NOLINT #include -#include //NOLINT +#include //NOLINT +#include "include/client/libcurve.h" #include "src/client/client_common.h" #include "src/client/client_config.h" #include "src/client/config_info.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" #include "src/client/iomanager4file.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" #include "src/client/mds_client.h" #include "src/client/metacache.h" #include "src/client/metacache_struct.h" #include "src/client/request_context.h" -#include "src/client/splitor.h" #include "src/client/source_reader.h" +#include "src/client/splitor.h" #include "test/client/fake/fakeMDS.h" #include "test/client/fake/mockMDS.h" #include "test/client/fake/mock_schedule.h" @@ -90,7 +90,8 @@ class IOTrackerSplitorTest : public ::testing::Test { fopt.ioOpt.ioSplitOpt.fileIOSplitMaxSizeKB = 64; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; // NOLINT + fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = + 500; // NOLINT fopt.ioOpt.metaCacheOpt.metacacheGetLeaderRetry = 3; fopt.ioOpt.metaCacheOpt.metacacheRPCRetryIntervalUS = 500; fopt.ioOpt.reqSchdulerOpt.scheduleQueueCapacity = 4096; @@ -131,11 +132,11 @@ class IOTrackerSplitorTest : public ::testing::Test { void InsertMetaCache() { if (server.AddService(&curvefsservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } if (server.AddService(&topologyservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } brpc::ServerOptions options; @@ -148,7 +149,7 @@ class IOTrackerSplitorTest : public ::testing::Test { * 1. set openfile response */ ::curve::mds::OpenFileResponse* openresponse = - new ::curve::mds::OpenFileResponse(); + new ::curve::mds::OpenFileResponse(); ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -170,21 +171,23 @@ class IOTrackerSplitorTest : public ::testing::Test { openresponse->set_statuscode(::curve::mds::StatusCode::kOK); openresponse->set_allocated_protosession(se); openresponse->set_allocated_fileinfo(fin); - FakeReturn* openfakeret = new FakeReturn(nullptr, static_cast(openresponse)); // NOLINT + FakeReturn* openfakeret = new FakeReturn( + nullptr, static_cast(openresponse)); // NOLINT curvefsservice.SetOpenFile(openfakeret); fileinstance_->Open(); /** * 2. set closefile response */ - ::curve::mds::CloseFileResponse* closeresp = new ::curve::mds::CloseFileResponse; // NOLINT + ::curve::mds::CloseFileResponse* closeresp = + new ::curve::mds::CloseFileResponse; // NOLINT closeresp->set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(closeresp)); curvefsservice.SetCloseFile(closefileret); /** - * 3. 设置GetOrAllocateSegmentresponse + * 3. Set GetOrAllocateSegmentresponse */ curve::mds::GetOrAllocateSegmentResponse* response = new curve::mds::GetOrAllocateSegmentResponse(); @@ -192,30 +195,27 @@ class IOTrackerSplitorTest : public ::testing::Test { response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_allocated_pagefilesegment(pfs); - response->mutable_pagefilesegment()-> - set_logicalpoolid(1234); - response->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_startoffset(0); - - for (int i = 0; i < 256; i ++) { + response->mutable_pagefilesegment()->set_logicalpoolid(1234); + response->mutable_pagefilesegment()->set_segmentsize(1 * 1024 * 1024 * + 1024); + response->mutable_pagefilesegment()->set_chunksize(4 * 1024 * 1024); + response->mutable_pagefilesegment()->set_startoffset(0); + + for (int i = 0; i < 256; i++) { auto chunk = response->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeret = new FakeReturn(nullptr, - static_cast(response)); + getsegmentfakeret = + new FakeReturn(nullptr, static_cast(response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(getsegmentfakeret); curve::mds::GetOrAllocateSegmentResponse* notallocateresponse = - new curve::mds::GetOrAllocateSegmentResponse(); - notallocateresponse->set_statuscode(::curve::mds::StatusCode - ::kSegmentNotAllocated); - notallocatefakeret = new FakeReturn(nullptr, - static_cast(notallocateresponse)); + new curve::mds::GetOrAllocateSegmentResponse(); + notallocateresponse->set_statuscode( + ::curve::mds::StatusCode ::kSegmentNotAllocated); + notallocatefakeret = + new FakeReturn(nullptr, static_cast(notallocateresponse)); // set GetOrAllocateSegmentResponse for read from clone source curve::mds::GetOrAllocateSegmentResponse* cloneSourceResponse = @@ -224,28 +224,27 @@ class IOTrackerSplitorTest : public ::testing::Test { cloneSourceResponse->set_statuscode(::curve::mds::StatusCode::kOK); cloneSourceResponse->set_allocated_pagefilesegment(clonepfs); - cloneSourceResponse->mutable_pagefilesegment()-> - set_logicalpoolid(1); - cloneSourceResponse->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_startoffset(1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_logicalpoolid(1); + cloneSourceResponse->mutable_pagefilesegment()->set_segmentsize( + 1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_chunksize(4 * 1024 * + 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_startoffset( + 1 * 1024 * 1024 * 1024); for (int i = 256; i < 512; i++) { - auto chunk = cloneSourceResponse->mutable_pagefilesegment() - ->add_chunks(); + auto chunk = + cloneSourceResponse->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeretclone = new FakeReturn(nullptr, - static_cast(cloneSourceResponse)); + getsegmentfakeretclone = + new FakeReturn(nullptr, static_cast(cloneSourceResponse)); /** * 4. set refresh response */ - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename("1_userinfo_.txt"); info->set_seqnum(2); info->set_id(1); @@ -260,18 +259,19 @@ class IOTrackerSplitorTest : public ::testing::Test { refreshresp->set_statuscode(::curve::mds::StatusCode::kOK); refreshresp->set_sessionid("1234"); refreshresp->set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, nullptr); /** - * 5. 设置topology返回值 + * 5. Set topology return value */ - ::curve::mds::topology::GetChunkServerListInCopySetsResponse* response_1 - = new ::curve::mds::topology::GetChunkServerListInCopySetsResponse; + ::curve::mds::topology::GetChunkServerListInCopySetsResponse* + response_1 = new ::curve::mds::topology:: + GetChunkServerListInCopySetsResponse; response_1->set_statuscode(0); uint64_t chunkserveridc = 1; - for (int i = 0; i < 256; i ++) { + for (int i = 0; i < 256; i++) { auto csinfo = response_1->add_csinfo(); csinfo->set_copysetid(i); @@ -282,23 +282,23 @@ class IOTrackerSplitorTest : public ::testing::Test { cslocs->set_port(9104); } } - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(response_1)); topologyservice.SetFakeReturn(faktopologyeret); - curve::client::MetaCache* mc = fileinstance_->GetIOManager4File()-> - GetMetaCache(); + curve::client::MetaCache* mc = + fileinstance_->GetIOManager4File()->GetMetaCache(); curve::client::FInfo_t fi; fi.userinfo = userinfo; - fi.chunksize = 4 * 1024 * 1024; + fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; SegmentInfo sinfo; LogicalPoolCopysetIDInfo_t lpcsIDInfo; mdsclient_->GetOrAllocateSegment(true, 0, &fi, nullptr, &sinfo); int count = 0; for (auto iter : sinfo.chunkvec) { - uint64_t index = (sinfo.startoffset + count*fi.chunksize ) - / fi.chunksize; + uint64_t index = + (sinfo.startoffset + count * fi.chunksize) / fi.chunksize; mc->UpdateChunkInfoByIndex(index, iter); ++count; } @@ -339,17 +339,17 @@ class IOTrackerSplitorTest : public ::testing::Test { curvefsservice.SetOpenFile(fakeOpen_.get()); } - FileClient *fileClient_; + FileClient* fileClient_; UserInfo_t userinfo; std::shared_ptr mdsclient_; FileServiceOption fopt; - FileInstance *fileinstance_; + FileInstance* fileinstance_; brpc::Server server; FakeMDSCurveFSService curvefsservice; FakeTopologyService topologyservice; - FakeReturn *getsegmentfakeret; - FakeReturn *notallocatefakeret; - FakeReturn *getsegmentfakeretclone; + FakeReturn* getsegmentfakeret; + FakeReturn* notallocatefakeret; + FakeReturn* getsegmentfakeretclone; OpenFileResponse openResp_; std::unique_ptr fakeOpen_; @@ -376,7 +376,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -521,7 +521,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } ASSERT_EQ('a', data[0]); ASSERT_EQ('a', data[4 * 1024 - 1]); @@ -557,7 +557,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { { std::unique_lock lk(writemtx); - writecv.wait(lk, []()->bool{return iowriteflag;}); + writecv.wait(lk, []() -> bool { return iowriteflag; }); } std::unique_ptr writebuffer(new char[aioctx->length]); @@ -603,13 +603,11 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetSegmentFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get segment接口返回失败,底层task thread层会一直重试, - // 但是不会阻塞上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { - while (reqcount > 0) { - fileinstance_->AioWrite(aioctx); - reqcount--; + // When the 'get segment' interface on the MDS (Metadata Server) side is +reported as failed, the underlying task thread layer will keep retrying. + // However, this will not block the upper layer from continuing to send IO +requests downward. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount +> 0) { fileinstance_->AioWrite(aioctx); reqcount--; } }; @@ -636,15 +634,12 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { ioctxmana->SetRequestScheduler(mockschuler); ioctxmana->SetIOOpt(fopt.ioOpt); - // offset 10*1024*1024*1024ul 不在metacache里 - // client回去mds拿segment和serverlist - CurveAioContext* aioctx = new CurveAioContext; - aioctx->offset = 10*1024*1024*1024ul; - aioctx->length = chunk_size + 8 * 1024; - aioctx->ret = LIBCURVE_ERROR::OK; - aioctx->cb = writecallback; - aioctx->buf = new char[aioctx->length]; - aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; + // The offset offset 10*1024*1024*1024ul is not in the metacache. + // The client will request the segment and serverlist from the MDS (Metadata +Server). CurveAioContext* aioctx = new CurveAioContext; aioctx->offset = +10*1024*1024*1024ul; aioctx->length = chunk_size + 8 * 1024; aioctx->ret = +LIBCURVE_ERROR::OK; aioctx->cb = writecallback; aioctx->buf = new +char[aioctx->length]; aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; char* data = static_cast(aioctx->buf); @@ -652,10 +647,10 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get server list接口返回失败,底层task thread层会一直重试 - // 但是不会阻塞,上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { + // If the "get server list" interface on the MDS side is reported as a +failure, the underlying task thread layer will keep retrying. + // However, this won't block the process, and the upper layer will continue +sending IO requests downstream. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount > 0) { fileinstance_->AioWrite(aioctx); reqcount--; @@ -722,7 +717,7 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { process.join(); } - std::unique_ptr writebuffer(new char[length]); + std::unique_ptr writebuffer(new char[length]); memcpy(writebuffer.get(), writeData.to_string().c_str(), length); ASSERT_EQ('a', writebuffer[0]); @@ -768,8 +763,8 @@ TEST_F(IOTrackerSplitorTest, ExceptionTest_TEST) { auto threadfunc = [&]() { iotracker->SetUserDataType(UserDataType::RawBuffer); - iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), - &fi, nullptr); + iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), &fi, + nullptr); }; std::thread process(threadfunc); @@ -800,8 +795,7 @@ TEST_F(IOTrackerSplitorTest, BoundaryTEST) { // this offset and length will make splitor split fail. // we set disk size = 1G. - uint64_t offset = 1 * 1024 * 1024 * 1024 - - 4 * 1024 * 1024 - 4 *1024; + uint64_t offset = 1 * 1024 * 1024 * 1024 - 4 * 1024 * 1024 - 4 * 1024; uint64_t length = 4 * 1024 * 1024 + 8 * 1024; char* buf = new char[length]; @@ -828,11 +822,10 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { /** * this offset and length will make splitor split into two 8k IO. */ - uint64_t length = 2 * 64 * 1024; // 128KB + uint64_t length = 2 * 64 * 1024; // 128KB uint64_t offset = 4 * 1024 * 1024 - length; // 4MB - 128KB char* buf = new char[length]; - memset(buf, 'a', 64 * 1024); // 64KB memset(buf + 64 * 1024, 'b', 64 * 1024); // 64KB butil::IOBuf writeData; @@ -902,37 +895,33 @@ TEST_F(IOTrackerSplitorTest, InvalidParam) { mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - nullptr, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + nullptr, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, nullptr, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, nullptr, - &reqlist, cid, &iobuf, offset, length, 0)); + ASSERT_EQ( + -1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, nullptr, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ( - -1, curve::client::Splitor::IO2ChunkRequests( - iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, - &fi, nullptr)); + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, + &fi, nullptr)); ASSERT_EQ(0, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + iotracker, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, nullptr, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - nullptr, cid, &iobuf, offset, length, 0)); + iotracker, mc, nullptr, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, nullptr, offset, length, @@ -961,7 +950,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { MetaCache metaCache; FInfo_t fileInfo; - fileInfo.chunksize = 16 * 1024 * 1024; // 16M + fileInfo.chunksize = 16 * 1024 * 1024; // 16M fileInfo.filestatus = FileStatus::CloneMetaInstalled; CloneSourceInfo cloneSourceInfo; @@ -969,7 +958,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { cloneSourceInfo.length = 10ull * 1024 * 1024 * 1024; // 10GB cloneSourceInfo.segmentSize = 1ull * 1024 * 1024 * 1024; // 1GB - // 源卷只分配了第一个和最后一个segment + // The source volume has only allocated the first and last segments cloneSourceInfo.allocatedSegmentOffsets.insert(0); cloneSourceInfo.allocatedSegmentOffsets.insert(cloneSourceInfo.length - cloneSourceInfo.segmentSize); @@ -980,14 +969,14 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ChunkIndex chunkIdx = 0; RequestSourceInfo sourceInfo; - // 第一个chunk + // First chunk sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_TRUE(sourceInfo.IsValid()); ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 克隆卷最后一个chunk + // Clone the last chunk of the volume chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize - 1; LOG(INFO) << "clone length = " << fileInfo.sourceInfo.length << ", chunk size = " << fileInfo.chunksize @@ -1000,19 +989,19 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 10720641024); - // 源卷未分配segment - // 读取每个segment的第一个chunk + // Source volume unassigned segment + // Read the first chunk of each segment for (int i = 1; i < 9; ++i) { ChunkIndex chunkIdx = i * cloneSourceInfo.segmentSize / fileInfo.chunksize; - RequestSourceInfo sourceInfo = Splitor::CalcRequestSourceInfo( - &ioTracker, &metaCache, chunkIdx); + RequestSourceInfo sourceInfo = + Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); } - // 超过长度 + // Exceeding length chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize; sourceInfo = @@ -1021,7 +1010,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 源卷长度为0 + // Source volume length is 0 chunkIdx = 0; fileInfo.sourceInfo.length = 0; metaCache.UpdateFileInfo(fileInfo); @@ -1031,7 +1020,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 不是read/write请求 + // Not a read/write request chunkIdx = 1; ioTracker.SetOpType(OpType::READ_SNAP); sourceInfo = @@ -1045,7 +1034,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { chunkIdx = 0; - // 不是克隆卷 + // Not a clone volume sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); @@ -1068,7 +1057,7 @@ TEST_F(IOTrackerSplitorTest, stripeTest) { fi.segmentsize = 1 * 1024 * 1024 * 1024ul; fi.stripeUnit = 1 * 1024 * 1024; fi.stripeCount = 4; - memset(buf, 'a', length); // 64KB + memset(buf, 'a', length); // 64KB dataCopy.append(buf, length); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); MetaCache* mc = iomana->GetMetaCache(); @@ -1162,9 +1151,9 @@ TEST_F(IOTrackerSplitorTest, TestDisableStripeForStripeFile) { IOTracker ioTracker(iomanager, cache, &scheduler, nullptr, true); std::vector reqlist; - ASSERT_EQ(0, - Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, &dataCopy, - offset, length, mdsclient_.get(), &fi, nullptr)); + ASSERT_EQ(0, Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, + &dataCopy, offset, length, + mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(2, reqlist.size()); auto* first = reqlist[0]; @@ -1206,7 +1195,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegment) { } for (int i = 0; i < length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1233,11 +1222,11 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < aioctx.length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1303,7 +1292,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment2) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < 4 * 1024; i++) { @@ -1342,8 +1331,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1352,7 +1340,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { mc->UpdateChunkInfoByIndex(257, chunkIdInfo); FInfo_t fileInfo; - fileInfo.chunksize = 4 * 1024 * 1024; // 4M + fileInfo.chunksize = 4 * 1024 * 1024; // 4M fileInfo.fullPathName = "/1_userinfo_.txt"; fileInfo.owner = "userinfo"; fileInfo.filestatus = FileStatus::CloneMetaInstalled; @@ -1389,7 +1377,6 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { ASSERT_EQ('a', data[4 * 1024 + chunk_size]); ASSERT_EQ('a', data[length - 1]); - fileinstance2->UnInitialize(); delete fileinstance2; @@ -1398,8 +1385,8 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { curvefsservice.SetGetOrAllocateSegmentFakeReturn(notallocatefakeret); - curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone - (getsegmentfakeretclone); + curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone( + getsegmentfakeretclone); PrepareOpenFile(); MockRequestScheduler* mockschuler = new MockRequestScheduler; @@ -1420,8 +1407,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1460,7 +1446,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -1478,28 +1464,22 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, TimedCloseFd) { std::unordered_map fakeHandlers; fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/1"), + std::piecewise_construct, std::forward_as_tuple("/1"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, true)); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/2"), + std::piecewise_construct, std::forward_as_tuple("/2"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); FileInstance* instance = new FileInstance(); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/3"), + std::piecewise_construct, std::forward_as_tuple("/3"), std::forward_as_tuple( instance, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, - false)); + ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); SourceReader::GetInstance().SetReadHandlers(fakeHandlers); diff --git a/test/client/lease_executor_test.cpp b/test/client/lease_executor_test.cpp index 4f5629ad8b..e008abd8f6 100644 --- a/test/client/lease_executor_test.cpp +++ b/test/client/lease_executor_test.cpp @@ -16,17 +16,18 @@ /* * Project: curve - * File Created: 2019年11月20日 + * File Created: November 20, 2019 * Author: wuhanqing */ +#include "src/client/lease_executor.h" + +#include #include #include #include -#include #include "src/client/iomanager4file.h" -#include "src/client/lease_executor.h" #include "src/client/mds_client.h" #include "test/client/mock/mock_namespace_service.h" @@ -81,8 +82,8 @@ class LeaseExecutorTest : public ::testing::Test { response_.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(response_), - Invoke(MockRefreshSession))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(response_), Invoke(MockRefreshSession))); } protected: diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 3f582b8a3c..82fe048992 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -21,33 +21,32 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include #include #include -#include +#include #include // #define CBD_BACKEND_FAKE #include "include/client/libcbd.h" - -#include "src/client/libcurve_file.h" #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_file.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" using curve::client::EndPoint; -#define BUFSIZE 4 * 1024 -#define FILESIZE 10uL * 1024 * 1024 * 1024 -#define NEWSIZE 20uL * 1024 * 1024 * 1024 +#define BUFSIZE 4 * 1024 +#define FILESIZE 10uL * 1024 * 1024 * 1024 +#define NEWSIZE 20uL * 1024 * 1024 * 1024 -#define filename "1_userinfo_test.img" +#define filename "1_userinfo_test.img" const uint64_t GiB = 1024ull * 1024 * 1024; @@ -68,11 +67,11 @@ class TestLibcbdLibcurve : public ::testing::Test { public: void SetUp() { FLAGS_chunkserver_list = - "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; + "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; mds_ = new FakeMDS(filename); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9110, &ep); braft::PeerId pd(ep); @@ -381,7 +380,8 @@ TEST_F(TestLibcbdLibcurve, ReadAndCloseConcurrencyTest) { auto readThread = [buffer](int fd) { auto start = curve::common::TimeUtility::GetTimeofDayMs(); - ASSERT_EQ(BUFSIZE, cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT + ASSERT_EQ(BUFSIZE, + cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT auto end = curve::common::TimeUtility::GetTimeofDayMs(); ASSERT_LE(end - start, 1000); @@ -429,12 +429,12 @@ TEST_F(TestLibcbdLibcurve, IncreaseEpochTest) { ASSERT_EQ(ret, LIBCURVE_ERROR::OK); } -std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9951"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -445,17 +445,16 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("discard.discardTaskDelayMs=10") -}; + std::string("discard.discardTaskDelayMs=10")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 99d35696b4..8a0c7a4b90 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -24,6 +24,7 @@ #include #include #include + #include // NOLINT #include // NOLINT #include @@ -58,14 +59,14 @@ std::condition_variable writeinterfacecv; std::mutex interfacemtx; std::condition_variable interfacecv; -void writecallbacktest(CurveAioContext *context) { +void writecallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); writeflag = true; writeinterfacecv.notify_one(); LOG(INFO) << "aio call back here, errorcode = " << context->ret; } -void readcallbacktest(CurveAioContext *context) { +void readcallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); readflag = true; interfacecv.notify_one(); @@ -88,7 +89,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { memcpy(userinfo.owner, "userinfo", 9); memcpy(userinfo.password, "", 1); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -128,7 +129,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { ASSERT_NE(fd, -1); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -155,7 +156,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -244,7 +245,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -279,7 +280,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { fiu_enable("test/client/fake/fakeMDS.GetOrAllocateSegment", 1, nullptr, 0); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -303,7 +304,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; memset(readbuffer, 0xFF, 8 * 1024); CurveAioContext readaioctx; readaioctx.buf = readbuffer; @@ -375,7 +376,7 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { mdsclient_.Initialize(fopt.metaServerOpt); fileinstance_.Initialize("/test", &mdsclient_, userinfo, fopt); - // 设置leaderid + // set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -413,12 +414,11 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // 正常情况下只有第一次会去get leader + // Normally, getting the leader will only occur the first time. ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // metacache中被写过的copyset leadermaychange都处于正常状态 - ChunkIDInfo_t chunkinfo1; - MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); - ASSERT_EQ(rc, MetaCacheErrorType::OK); + // LeaderMayChange remains in a normal state for copyset leader that has +been written to in metacache. ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = +mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_) { @@ -430,17 +430,21 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { } } - // 设置chunkservice返回失败,那么mds每次重试都会去拉新的leader - // 127.0.0.1:9151:0,127.0.0.1:9152:0,127.0.0.1:9153:0是当前集群信息 - // 127.0.0.1:9151对应第一个chunkservice - // 设置rpc失败,会导致client将该chunkserverid上的leader copyset都标记为 + // If chunkservice returns failure, MDS will retry and fetch new leaders +each time. + // The current cluster information is: 127.0.0.1:9151:0, 127.0.0.1:9152:0, +127.0.0.1:9153:0. + // 127.0.0.1:9151 corresponds to the first chunkservice. + // An RPC failure causes the client to mark all leader copysets on that +chunkserver id as // leadermaychange chunkservice[0]->SetRPCFailed(); // -现在写第二个chunk,第二个chunk与第一个chunk不在同一个copyset里,这次读写失败 - ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 +Now, write to the second chunk; as it does not belong to the same copyset as the +first chunk, this read and write attempt fails. ASSERT_EQ(-2, +fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, +fileinstance_.Read(buffer, 1 * chunk_size, length)); + // Obtain chunkid information for the second chunk. ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -449,33 +453,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_ || i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // 这两个leader为该chunkserver的copyset的LeaderMayChange置位 - ASSERT_TRUE(ci.LeaderMayChange()); - } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // Set LeaderMayChange for both of these leaders of the +chunkserver's copysets. ASSERT_TRUE(ci.LeaderMayChange()); } else { + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } chunkservice[0]->ReSetRPCFailed(); - // 再次写第二个chunk,这时候获取leader成功后,会将LeaderMayChange置位fasle - // 第一个chunk对应的copyset依然LeaderMayChange为true - ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); + // Write to the second chunk again; after successfully obtaining a leader, +LeaderMayChange will be set to false. + // LeaderMayChange for the copyset corresponding to the first chunk remains +true. ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2. ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange保持原有状态 + // LeaderMayChange for copyset1 remains unchanged. ASSERT_TRUE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } @@ -485,33 +489,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { butil::str2endpoint("127.0.0.1", 9152, &ep2); PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rpc失败,迫使copyset切换leader,切换leader后读写成功 - chunkservice[0]->SetRPCFailed(); - // 读写第一个和第二个chunk + // Force an RPC failure to trigger copyset leader switch; successful read +and write after leader switch. chunkservice[0]->SetRPCFailed(); + // Read and write to the first and second chunks. ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // 这个时候 + // At this point for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2 ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange置位 + // Set LeaderMayChange for copyset1 ASSERT_FALSE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For the current copyset without leader information, directly set +LeaderMayChange ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } - // 验证copyset id信息更新 + // Verify the update of copyset ID information. // copyset id = 888, chunkserver id = 100 101 102 // copyset id = 999, chunkserver id = 102 103 104 CopysetPeerInfo csinfo1; @@ -568,8 +572,8 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { curve::client::CopysetPeerInfo peer9(103, addr); csinfo3.csinfos_.push_back(peer9); - // 更新copyset信息,chunkserver 104的信息被清除 - // 100,和 101上添加了新的copyset信息 + // Update copyset information, clearing the information for chunkserver 104. + // New copyset information has been added on chunk servers 100 and 101. mc->UpdateChunkserverCopysetInfo(FLAGS_logic_pool_id, csinfo3); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 888)); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 999)); @@ -596,7 +600,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { // open not create file ASSERT_EQ(-1 * LIBCURVE_ERROR::FAILED, Open(filename.c_str(), &userinfo)); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -610,7 +614,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(0, Init(configpath.c_str())); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 8 * 1024); CurveAioContext writeaioctx; @@ -623,7 +627,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(-LIBCURVE_ERROR::BAD_FD, AioWrite(1234, &writeaioctx)); // aioread not opened file - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -681,10 +685,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { LOG(INFO) << "here"; mdsclient_->Initialize(fopt.metaServerOpt); - fileinstance_.Initialize( - "/UnstableChunkserverTest", mdsclient_, userinfo, OpenFlags{}, fopt); + fileinstance_.Initialize("/UnstableChunkserverTest", mdsclient_, userinfo, + OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -699,14 +703,14 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - CliServiceFake *cliservice = mds.GetCliService(); - std::vector chunkservice = mds.GetFakeChunkService(); + CliServiceFake* cliservice = mds.GetCliService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -722,7 +726,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -740,19 +745,20 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.EnableNetUnstable(10000); - // 写2次,读2次,每次请求重试3次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // unstable阈值为10,所以第11次请求返回时,对应的chunkserver被标记为unstable - // leader在对应chunkserver上的copyset会设置leaderMayChange为true - // 下次发起请求时,会先去刷新leader信息, - // 由于leader没有发生改变,而且延迟仍然存在 - // 所以第12次请求仍然超时,leaderMayChange仍然为true + // Write twice, read twice, and retry three times per request + // Due to the delay set on the chunkserver side, each request will time out + // The unstable threshold is 10, so when the 11th request returns, the + // corresponding chunkserver is marked as unstable The copyset of the leader + // on the corresponding chunkserver will set leaderMayChange to true The + // next time a request is made, the leader information will be refreshed + // first, Since the leader has not changed and the delay still exists So the + // 12th request still timed out, and leaderMayChange is still true ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 + // Obtain chunkid information for the second chunk ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -769,9 +775,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { } } - // 当copyset处于unstable状态时 - // 不进入超时时间指数退避逻辑,rpc超时时间设置为默认值 - // 所以每个请求总时间为3s,4个请求需要12s + // When copyset is in an unstable state + // Do not enter the timeout index backoff logic, and set the rpc timeout to + // the default value So the total time for each request is 3 seconds, and 4 + // requests require 12 seconds auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); @@ -783,9 +790,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.DisableNetUnstable(); - // 取消延迟,再次读写第2个chunk - // 获取leader信息后,会将leaderMayChange置为false - // 第一个chunk对应的copyset依赖leaderMayChange为true + // Cancel delay and read and write the second chunk again + // After obtaining the leader information, the leaderMayChange will be set + // to false The copyset dependency for the first chunk, leaderMayChange, is + // true ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; ++i) { @@ -809,7 +817,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rcp返回失败,迫使copyset切换leader, 切换leader后读写成功 + // Failed to set rcp return, forcing copyset to switch leaders. After + // switching leaders, read and write succeeded chunkservice[0]->SetRPCFailed(); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); @@ -872,7 +881,7 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { fileinstance_.Initialize("/ResumeTimeoutBackoff", mdsclient_, userinfo, OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -887,13 +896,13 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - std::vector chunkservice = mds.GetFakeChunkService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -909,7 +918,8 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -927,17 +937,18 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { mds.EnableNetUnstable(10000); - // 写2次, 每次请求重试11次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // 第一个请求重试11次,会把chunkserver标记为unstable + // Write twice, retry 11 times per request + // Due to the delay set on the chunkserver side, each request will time out + // The first request will be retried 11 times and the chunkserver will be + // marked as unstable ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - // 第二个写请求,由于其对应的copyset leader may change - // 第1次请求超时时间为1s - // 后面4次重试由于leader may change所以超时时间也是1s - // 第5-11次请求由于重试次数超过minRetryTimesForceTimeoutBackoff - // 所以超时时间都进入指数退避,为8s * 6 = 48s - // 所以第二次写请求,总共耗时53s,并写入失败 + // The second write request, due to its corresponding copyset leader may + // change The first request timeout is 1 second The timeout for the next + // four retries is also 1 second due to the leader may change 5th to 11th + // requests due to more than minRetryTimesForceTimeoutBackoff retries So all + // timeout times enter exponential backoff, which is 8s * 6 = 48s So the + // second write request took a total of 53 seconds and failed to write auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); auto elapsedMs = TimeUtility::GetTimeofDayMs() - start; @@ -961,7 +972,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { uint64_t size = 100 * 1024 * 1024 * 1024ul; FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -975,12 +986,12 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(0, fc.Init(configpath)); - FakeMDSCurveFSService *service = NULL; + FakeMDSCurveFSService* service = NULL; service = mds.GetMDSService(); ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); CreateFileContext context; context.pagefile = true; @@ -991,7 +1002,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, ret); response.set_statuscode(::curve::mds::StatusCode::kFileExists); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); context.pagefile = true; context.name = filename2; @@ -1003,7 +1014,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::EXISTS, -ret); FileStatInfo_t fsinfo; - ::curve::mds::FileInfo *info = new curve::mds::FileInfo; + ::curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename(filename2); info->set_id(1); @@ -1017,8 +1028,8 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { info->set_stripecount(4); getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); service->SetGetFileInfoFakeReturn(fakegetinfo); ret = fc.StatFile(filename2, userinfo, &fsinfo); ASSERT_EQ(1024 * 1024, fsinfo.stripeUnit); diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index e95912f610..c466457d99 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -20,41 +20,41 @@ * Author: tongguangxun */ -#include #include #include +#include +#include +#include //NOLINT #include -#include //NOLINT -#include //NOLINT +#include //NOLINT #include -#include +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" namespace curve { namespace client { -// 测试mds failover切换状态机 +// Testing mds failover switching state machine TEST(MDSChangeTest, MDSFailoverTest) { RPCExcutorRetryPolicy rpcexcutor; - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); @@ -70,12 +70,15 @@ TEST(MDSChangeTest, MDSFailoverTest) { int mds1RetryTimes = 0; int mds2RetryTimes = 0; - // 场景1: mds0、1、2, currentworkindex = 0, mds0, mds1, mds2都宕机, - // 发到其rpc都以EHOSTDOWN返回,导致上层client会一直切换mds重试 - // 按照0-->1-->2持续进行 - // 每次rpc返回-EHOSTDOWN,会直接触发RPC切换。最终currentworkindex没有切换 + // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all + // down, + // All RPCs sent to them are returned as EHOSTDOWN, resulting in + // upper level clients constantly switching to mds and retrying + // Continue according to 0-->1-->2 + // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC + // switching. The final currentworkindex did not switch auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; } @@ -91,12 +94,13 @@ TEST(MDSChangeTest, MDSFailoverTest) { }; uint64_t startMS = TimeUtility::GetTimeofDayMs(); - // 控制面接口调用, 1000为本次rpc的重试总时间 + // Control surface interface call, 1000 is the total retry time of this RPC rpcexcutor.DoRPCTask(task1, 1000); uint64_t endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 1000 - 1); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); @@ -106,16 +110,18 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 场景2:mds0、1、2, currentworkindex = 0, mds0宕机,并且这时候将正在工作的 - // mds索引切换到index2,预期client在index=0重试之后会直接切换到index 2 - // mds2这这时候直接返回OK,rpc停止重试。 - // 预期client总共发送两次rpc,一次发送到mds0,另一次发送到mds2,跳过中间的 - // mds1。 + // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will + // be working at this time + // Mds index switches to index2, and it is expected that the client + // will directly switch to index2 after retrying with index = 0 At + // this point, mds2 directly returns OK and rpc stops trying again. + // Expected client to send a total of two RPCs, one to mds0 and the + // other to mds2, skipping the middle mds1。 mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; rpcexcutor.SetCurrentWorkIndex(2); @@ -129,7 +135,8 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 2) { mds2RetryTimes++; - // 本次返回ok,那么RPC应该成功了,不会再重试 + // If OK is returned this time, then RPC should have succeeded and + // will not try again return LIBCURVE_ERROR::OK; } @@ -144,16 +151,17 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(mds1RetryTimes, 0); ASSERT_EQ(mds2RetryTimes, 1); - // 场景3:mds0、1、2,currentworkindex = 1,且mds1宕机了, - // 这时候会切换到mds0和mds2 - // 在切换到2之后,mds1又恢复了,这时候切换到mds1,然后rpc发送成功。 - // 这时候的切换顺序为1->2->0, 1->2->0, 1。 + // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, + // At this point, it will switch to mds0 and mds2 + // After switching to 2, mds1 resumed, and then switched to mds1, and + // the rpc was successfully sent. At this point, the switching order is + // 1->2->0, 1->2->0, 1. mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; rpcexcutor.SetCurrentWorkIndex(1); auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; return -ECONNRESET; @@ -161,7 +169,8 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 1) { mds1RetryTimes++; - // 当在mds1上重试到第三次的时候向上返回成功,停止重试 + // When retrying on mds1 for the third time, success is returned + // upwards and the retry is stopped if (mds1RetryTimes == 3) { return LIBCURVE_ERROR::OK; } @@ -186,22 +195,24 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); - // 场景4:mds0、1、2, currentWorkindex = 0, 但是发往mds1的rpc请求一直超时 - // 最后rpc返回结果是超时. - // 对于超时的mds节点会连续重试mds.maxFailedTimesBeforeChangeMDS后切换 - // 当前mds.maxFailedTimesBeforeChangeMDS=2。 - // 所以重试逻辑应该是:0->0->1->2, 0->0->1->2, 0->0->1->2, ... + // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 + // consistently times out + // The final result returned by rpc is timeout + // For timeout mds nodes, they will continuously retry + // mds.maxFailedTimesBeforeChangeMDS and switch Current + // mds.maxFailedTimesBeforeChangeMDS=2. + // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, + // ... LOG(INFO) << "case 4"; mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; rpcexcutor.SetCurrentWorkIndex(0); auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { + brpc::Channel* channel, brpc::Controller* cntl) -> int { if (mdsindex == 0) { mds0RetryTimes++; - return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT - : -ETIMEDOUT; + return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT : -ETIMEDOUT; } if (mdsindex == 1) { @@ -222,17 +233,17 @@ TEST(MDSChangeTest, MDSFailoverTest) { endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); - // 场景5:mds0、1、2,currentWorkIndex = 0 - // 但是rpc请求前10次全部返回EHOSTDOWN - // mds重试睡眠10ms,所以总共耗时100ms时间 + // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 + // But the first 10 requests from rpc all returned EHOSTDOWN + // Mds retries sleep for 10ms, so it takes a total of 100ms rpcexcutor.SetCurrentWorkIndex(0); int hostDownTimes = 10; auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { + brpc::Channel* channel, brpc::Controller* cntl) { static int count = 0; if (++count <= hostDownTimes) { return -EHOSTDOWN; @@ -241,27 +252,28 @@ TEST(MDSChangeTest, MDSFailoverTest) { return 0; }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task5, 10000); // 总重试时间10s + rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 100); - // 场景6: mds在重试过程中一直返回EHOSTDOWN,总共重试5s + // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with + // a total of 5 retries rpcexcutor.SetCurrentWorkIndex(0); int calledTimes = 0; auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { + brpc::Channel* channel, brpc::Controller* cntl) { ++calledTimes; return -EHOSTDOWN; }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task6, 5 * 1000); // 总重试时间5s + rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 5 * 1000 - 1); - // 每次hostdown情况下,睡眠10ms,总重试时间5s,所以总共重试次数小于等于500次 - // 为了尽量减少误判,所以加入10次冗余 + // In each hostdown situation, sleep for 10ms and the total retry time is + // 5s, so the total number of retries is less than or equal to 500 times In + // order to minimize false positives, 10 redundant attempts were added LOG(INFO) << "called times " << calledTimes; ASSERT_LE(calledTimes, 510); } @@ -269,7 +281,7 @@ TEST(MDSChangeTest, MDSFailoverTest) { } // namespace client } // namespace curve -const std::vector registConfOff { +const std::vector registConfOff{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("rpcRetryTimes=3"), std::string("global.logPath=./runlog/"), @@ -281,10 +293,9 @@ const std::vector registConfOff { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=false") -}; + std::string("mds.registerToMDS=false")}; -const std::vector registConfON { +const std::vector registConfON{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("global.logPath=./runlog/"), std::string("synchronizeRPCTimeoutMS=500"), @@ -297,14 +308,14 @@ const std::vector registConfON { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=true") -}; - -std::string mdsMetaServerAddr = "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT -int main(int argc, char ** argv) { + std::string("mds.registerToMDS=true")}; + +std::string mdsMetaServerAddr = + "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/test/client/mock/mock_chunkservice.h b/test/client/mock/mock_chunkservice.h index 3891ce60bf..134f404a85 100644 --- a/test/client/mock/mock_chunkservice.h +++ b/test/client/mock/mock_chunkservice.h @@ -25,8 +25,8 @@ #include #include -#include #include +#include #include @@ -39,48 +39,48 @@ namespace client { using ::testing::_; using ::testing::Invoke; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; -/* 当前仅仅模拟单 chunk read/write */ +/*Currently, only single chunk read/write is simulated*/ class FakeChunkServiceImpl : public ChunkService { public: virtual ~FakeChunkServiceImpl() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); chunkIds_.insert(request->chunkid()); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); ::memcpy(chunk_ + request->offset(), cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunkSnapshot(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunkSnapshot(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); @@ -88,113 +88,114 @@ class FakeChunkServiceImpl : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); LOG(INFO) << "delete chunk snapshot: " << request->chunkid(); if (chunkIds_.find(request->chunkid()) == chunkIds_.end()) { - response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT - LOG(INFO) << "delete chunk snapshot: " - << request->chunkid() << " not exist"; + response->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT + LOG(INFO) << "delete chunk snapshot: " << request->chunkid() + << " not exist"; return; } chunkIds_.erase(request->chunkid()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->add_chunksn(1); response->add_chunksn(2); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void CreateCloneChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void CreateCloneChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void RecoverChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void RecoverChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } private: std::set chunkIds_; - /* 由于 bthread 栈空间的限制,这里不会开很大的空间,如果测试需要更大的空间 - * 请在堆上申请 */ + /* Due to the limitations of the bthread stack space, there will not be a + * large amount of space opened here. If testing requires more space Please + * apply on the pile*/ char chunk_[4096] = {0}; }; class MockChunkServiceImpl : public ChunkService { public: - MOCK_METHOD4(WriteChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunkSnapshot, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, void( - ::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(GetChunkInfo, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(WriteChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunkSnapshot, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(GetChunkInfo, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done)); MOCK_METHOD4(CreateCloneChunk, void(::google::protobuf::RpcController* controller, const ::curve::chunkserver::ChunkRequest* request, ::curve::chunkserver::ChunkResponse* response, google::protobuf::Closure* done)); - MOCK_METHOD4(RecoverChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(UpdateEpoch, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::UpdateEpochRequest *request, - ::curve::chunkserver::UpdateEpochResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(RecoverChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(UpdateEpoch, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::UpdateEpochRequest* request, + ::curve::chunkserver::UpdateEpochResponse* response, + google::protobuf::Closure* done)); void DelegateToFake() { ON_CALL(*this, WriteChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::WriteChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::WriteChunk)); ON_CALL(*this, ReadChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::ReadChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::ReadChunk)); } private: FakeChunkServiceImpl fakeChunkService; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // TEST_CLIENT_MOCK_MOCK_CHUNKSERVICE_H_ diff --git a/test/client/request_scheduler_test.cpp b/test/client/request_scheduler_test.cpp index 9ff0636530..bf75580957 100644 --- a/test/client/request_scheduler_test.cpp +++ b/test/client/request_scheduler_test.cpp @@ -20,18 +20,19 @@ * Author: wudemiao */ -#include -#include -#include +#include "src/client/request_scheduler.h" + #include +#include #include +#include +#include -#include "src/client/request_scheduler.h" #include "src/client/client_common.h" -#include "test/client/mock/mock_meta_cache.h" +#include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" #include "test/client/mock/mock_request_context.h" -#include "src/common/concurrent/count_down_event.h" namespace curve { namespace client { @@ -49,8 +50,9 @@ TEST(RequestSchedulerTest, fake_server_test) { brpc::Server server; std::string listenAddr = "127.0.0.1:9109"; FakeChunkServiceImpl fakeChunkService; - ASSERT_EQ(server.AddService(&fakeChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&fakeChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(server.Start(listenAddr.c_str(), &option), 0); @@ -94,7 +96,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* error request schedule test when scheduler not run */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -102,17 +104,17 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(-1, requestScheduler.ScheduleRequest(reqCtxs)); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -120,7 +122,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -147,7 +149,7 @@ TEST(RequestSchedulerTest, fake_server_test) { const uint64_t len1 = 16; /* write should with attachment size */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -157,18 +159,18 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -177,12 +179,12 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -190,11 +192,10 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; ::memset(writebuff1, 'a', 8); ::memset(writebuff1 + 8, '\0', 8); @@ -203,34 +204,33 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -258,9 +258,9 @@ TEST(RequestSchedulerTest, fake_server_test) { } // read snapshot - // 1. 先 write snapshot + // 1. Write snapshot first { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -272,35 +272,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } - // 2. 再 read snapshot 验证一遍 + // 2. Verify with read snapshot again { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -309,47 +308,45 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 3. 在 delete snapshot + // 3. In delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 4. 重复 delete snapshot + // 4. Repeat delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -357,22 +354,22 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试 get chunk info + // Test get chunk info { ChunkInfoDetail chunkInfo; - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->chunkinfodetail_ = &chunkInfo; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -383,9 +380,9 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试createClonechunk + // Test createClonechunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -395,36 +392,35 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->location_ = "destination"; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 测试recoverChunk + // Testing recoverChunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::RECOVER_CHUNK; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -434,7 +430,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* read/write chunk test */ const int kMaxLoop = 100; for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -444,35 +440,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -482,34 +477,33 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::UNKNOWN; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(-1, reqDone->GetErrorCode()); } - /* 2. 并发测试 */ + /* 2. Concurrent testing */ curve::common::CountDownEvent cond(4 * kMaxLoop); auto func = [&]() { for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -518,7 +512,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -538,18 +532,17 @@ TEST(RequestSchedulerTest, fake_server_test) { cond.Wait(); for (int i = 0; i < kMaxLoop; i += 1) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, 1000, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -578,11 +571,11 @@ TEST(RequestSchedulerTest, CommonTest) { MetaCache metaCache; FileMetric fm("test"); - // scheduleQueueCapacity 设置为 0 + // scheduleQueueCapacity set to 0 opt.scheduleQueueCapacity = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); - // threadpoolsize 设置为 0 + // threadpoolsize set to 0 opt.scheduleQueueCapacity = 4096; opt.scheduleThreadpoolSize = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); @@ -597,5 +590,5 @@ TEST(RequestSchedulerTest, CommonTest) { ASSERT_EQ(0, sche.Fini()); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/request_sender_test.cpp b/test/client/request_sender_test.cpp index 92882bac79..c453fd2468 100644 --- a/test/client/request_sender_test.cpp +++ b/test/client/request_sender_test.cpp @@ -20,11 +20,12 @@ * Author: wudemiao */ +#include "src/client/request_sender.h" + #include #include #include "src/client/client_common.h" -#include "src/client/request_sender.h" #include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" @@ -54,9 +55,7 @@ class FakeChunkClosure : public ClientClosure { SetClosure(&reqeustClosure); } - void Run() override { - event->Signal(); - } + void Run() override { event->Signal(); } void SendRetryRequest() override {} @@ -96,7 +95,7 @@ class RequestSenderTest : public ::testing::Test { }; TEST_F(RequestSenderTest, BasicTest) { - // 非法的 port + // Illegal port std::string leaderStr = "127.0.0.1:65539"; butil::EndPoint leaderAddr; ChunkServerID leaderId = 1; @@ -126,8 +125,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { FakeChunkClosure closure(&event); sourceInfo.cloneFileSource.clear(); - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_FALSE(chunkRequest.has_clonefilesource()); @@ -148,8 +147,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { sourceInfo.cloneFileOffset = 0; sourceInfo.valid = true; - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_TRUE(chunkRequest.has_clonefilesource()); diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..2bfbed38ca 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -20,10 +20,10 @@ * Author: yangyaokai */ -#include - #include "src/common/bitmap.h" +#include + namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test Comparison Operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set ranges at the beginning and end, and clear ranges + // in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +302,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear ranges at the beginning and end, and set ranges + // in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +321,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +342,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +363,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..d573142cf0 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -20,30 +20,30 @@ * Author: charisu */ -#include - #include "src/common/channel_pool.h" +#include + namespace curve { namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/common/configuration_test.cpp b/test/common/configuration_test.cpp index 9dc770bcc8..d51c2c84f4 100644 --- a/test/common/configuration_test.cpp +++ b/test/common/configuration_test.cpp @@ -21,17 +21,17 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "src/common/configuration.h" + #include +#include -#include -#include #include +#include #include +#include #include -#include "src/common/configuration.h" - namespace curve { namespace common { @@ -87,9 +87,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -129,52 +127,54 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } -// 覆盖原有配置 +// Overwrite the original configuration TEST_F(ConfigurationTest, SaveConfig) { bool ret; Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } -// 读取当前配置写到其他路径 +// Read the current configuration and write to another path TEST_F(ConfigurationTest, SaveConfigToFileNotExist) { bool ret; - // 加载当前配置 + // Load current configuration Configuration conf; conf.SetConfigPath(confFile_); ret = conf.LoadConfig(); ASSERT_EQ(ret, true); - // 写配置到其他位置 + // Write configuration to another location std::string newFile("curve.conf.test2"); conf.SetConfigPath(newFile); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 从新配置文件加载,并读取某项配置来进行校验 + // Load from a new configuration file and read a certain configuration for + // verification Configuration newConf; newConf.SetConfigPath(newFile); ret = newConf.LoadConfig(); @@ -337,11 +337,11 @@ TEST_F(ConfigurationTest, TestMetric) { "{\"conf_name\":\"key1\",\"conf_value\":\"123\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key2").c_str(), "{\"conf_name\":\"key2\",\"conf_value\":\"1.230000\"}"); - // 还未设置时,返回空 + // When not yet set, return empty ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key3").c_str(), ""); - // 支持自动更新metric + // Support for automatic updating of metrics conf.SetIntValue("key1", 234); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key1").c_str(), "{\"conf_name\":\"key1\",\"conf_value\":\"234\"}"); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index 8bdc5c9681..41633c6425 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -20,13 +20,13 @@ * Author: wudemiao */ +#include "src/common/concurrent/count_down_event.h" + #include -#include //NOLINT #include -#include //NOLINT - -#include "src/common/concurrent/count_down_event.h" +#include //NOLINT +#include //NOLINT namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(CountDownEventTest, basic) { }; std::thread t1(func); - std::this_thread::sleep_for(std::chrono::milliseconds(3*sleepMs)); + std::this_thread::sleep_for(std::chrono::milliseconds(3 * sleepMs)); ASSERT_TRUE(isRun.load()); t1.join(); @@ -89,8 +89,7 @@ TEST(CountDownEventTest, basic) { cond.WaitFor(1000); } - - /* 1. initCnt==Signal次数 */ + /* 1. InitCnt==Signal count */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); @@ -111,13 +110,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 2. initCnt signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 20; - const int kInitCnt = kEventNum - 10; + const int kInitCnt = kEventNum - 10; CountDownEvent cond(kInitCnt); auto func = [&] { for (int i = 0; i < kEventNum; ++i) { @@ -128,7 +127,7 @@ TEST(CountDownEventTest, basic) { std::thread t1(func); - /* 等到Signal次数>initCnt */ + /* Wait until SignalCount>initCnt */ while (true) { ::usleep(5); if (signalCount.load(std::memory_order_acquire) > kInitCnt) { @@ -141,13 +140,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 3. initCnt>Signal次数 */ + /* 3. initCnt>SignalCount */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 10; - /* kSignalEvent1 + kSignalEvent2等于kEventNum */ + /* kSignalEvent1 + kSignalEvent2 = kEventNum */ const int kSignalEvent1 = kEventNum - 5; const int kSignalEvent2 = 5; CountDownEvent cond(kEventNum); @@ -167,7 +166,8 @@ TEST(CountDownEventTest, basic) { }; std::thread waitThread(waitFunc); - /* 由于t1 唤醒的次数不够,所以waitThread会阻塞在wait那里 */ + /* Due to insufficient wake-up times for t1, waitThread will block at + * the wait location */ ASSERT_EQ(false, passWait.load(std::memory_order_acquire)); auto func2 = [&] { @@ -176,7 +176,7 @@ TEST(CountDownEventTest, basic) { cond.Signal(); } }; - /* 运行t2,补上不够的唤醒次数 */ + /* Run t2 to make up for insufficient wake-up times */ std::thread t2(func2); t1.join(); @@ -203,8 +203,9 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件未到达,超时返回,可以容许在一定的误差 - ASSERT_GT(static_cast(elpased.count()), waitForMs-1000); + // The event did not arrive and returned after a timeout, allowing for a + // certain error + ASSERT_GT(static_cast(elpased.count()), waitForMs - 1000); t1.join(); } @@ -226,7 +227,7 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件达到,提前返回 + // Event reached, return early ASSERT_GT(waitForMs, static_cast(elpased.count())); t1.join(); diff --git a/test/common/lru_cache_test.cpp b/test/common/lru_cache_test.cpp index a5e9d65e19..773d42e153 100644 --- a/test/common/lru_cache_test.cpp +++ b/test/common/lru_cache_test.cpp @@ -20,11 +20,13 @@ * Author: xuchaojie,lixiaocui */ -#include +#include "src/common/lru_cache.h" + #include +#include + #include -#include "src/common/lru_cache.h" #include "src/common/timeutility.h" namespace curve { @@ -33,26 +35,26 @@ namespace common { TEST(TestCacheMetrics, testall) { CacheMetrics cacheMetrics("LRUCache"); - // 1. 新增数据项 + // 1. Add Data Item cacheMetrics.UpdateAddToCacheCount(); ASSERT_EQ(1, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateAddToCacheBytes(1000); ASSERT_EQ(1000, cacheMetrics.cacheBytes.get_value()); - // 2. 移除数据项 + // 2. Remove Data Item cacheMetrics.UpdateRemoveFromCacheCount(); ASSERT_EQ(0, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateRemoveFromCacheBytes(200); ASSERT_EQ(800, cacheMetrics.cacheBytes.get_value()); - // 3. cache命中 + // 3. cache hit ASSERT_EQ(0, cacheMetrics.cacheHit.get_value()); cacheMetrics.OnCacheHit(); ASSERT_EQ(1, cacheMetrics.cacheHit.get_value()); - // 4. cache未命中 + // 4. cache Misses ASSERT_EQ(0, cacheMetrics.cacheMiss.get_value()); cacheMetrics.OnCacheMiss(); ASSERT_EQ(1, cacheMetrics.cacheMiss.get_value()); @@ -60,10 +62,10 @@ TEST(TestCacheMetrics, testall) { TEST(CaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get uint64_t cacheSize = 0; for (int i = 1; i <= maxCount + 1; i++) { std::string eliminated; @@ -74,8 +76,8 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { } else { cacheSize += std::to_string(i).size() * 2 - std::to_string(1).size() * 2; - ASSERT_EQ( - cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); + ASSERT_EQ(cacheSize, + cache->GetCacheMetrics()->cacheBytes.get_value()); } std::string res; @@ -83,7 +85,7 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 第一个元素被剔出 + // 2. The first element is removed std::string res; ASSERT_FALSE(cache->Get(std::to_string(1), &res)); for (int i = 2; i <= maxCount + 1; i++) { @@ -91,17 +93,17 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->Get("2", &res)); cacheSize -= std::to_string(2).size() * 2; ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); ASSERT_EQ(cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); - // 4. 重复put + // 4. Repeat put std::string eliminated; cache->Put("4", "hello", &eliminated); ASSERT_TRUE(cache->Get("4", &res)); @@ -116,7 +118,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -125,7 +127,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->Get("1", &res)); } @@ -192,9 +194,7 @@ TEST(CaCheTest, TestCacheGetLastKV) { ASSERT_EQ(1, k); ASSERT_EQ(1, v); } -bool TestFunction(const int& a) { - return a > 1; -} +bool TestFunction(const int& a) { return a > 1; } TEST(CaCheTest, TestCacheGetLastKVWithFunction) { auto cache = std::make_shared>( std::make_shared("LruCache")); @@ -228,10 +228,10 @@ TEST(SglCaCheTest, TestGetBefore) { TEST(SglCaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached uint64_t cacheSize = 0; for (int i = 1; i <= maxCount; i++) { cache->Put(std::to_string(i)); @@ -240,19 +240,19 @@ TEST(SglCaCheTest, test_cache_with_capacity_limit) { ASSERT_TRUE(cache->IsCached(std::to_string(i))); } - // 2. 第一个元素被剔出 + // 2. The first element is removed cache->Put(std::to_string(11)); ASSERT_FALSE(cache->IsCached(std::to_string(1))); - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->IsCached("2")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); - // 4. 重复put + // 4. Repeat put cache->Put("4"); ASSERT_TRUE(cache->IsCached("4")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); @@ -262,7 +262,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -271,7 +271,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { ASSERT_FALSE(cache->IsCached(std::to_string(100))); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->IsCached("1")); } @@ -315,7 +315,7 @@ TEST(TimedCaCheTest, test_base) { ASSERT_EQ(i, cache->GetCacheMetrics()->cacheCount.get_value()); } else { ASSERT_EQ(maxCount, - cache->GetCacheMetrics()->cacheCount.get_value()); + cache->GetCacheMetrics()->cacheCount.get_value()); } std::string res; ASSERT_TRUE(cache->Get(std::to_string(i), &res)); @@ -355,5 +355,3 @@ TEST(TimedCaCheTest, test_timeout) { } // namespace common } // namespace curve - - diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..fcb7791d54 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ +#include "src/common/concurrent/task_thread_pool.h" + #include -#include #include +#include #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace common { using curve::common::CountDownEvent; -void TestAdd1(int a, double b, CountDownEvent *cond) { +void TestAdd1(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); } -int TestAdd2(int a, double b, CountDownEvent *cond) { +int TestAdd2(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); @@ -47,7 +48,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /*Test thread pool start input parameter*/ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +75,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /*Test not set, at this time it is INT_ MAX*/ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +93,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /*TestAdd2 is a function with a return value*/ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +101,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /*Basic task testing*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +134,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /*Wait for all tasks to complete execution*/ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /*The test queue is full, push will block*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -157,8 +158,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond4(1); CountDownEvent startRunCond4(1); - auto waitTask = [&](CountDownEvent* sigCond, - CountDownEvent* waitCond) { + auto waitTask = [&](CountDownEvent* sigCond, CountDownEvent* waitCond) { sigCond->Signal(); waitCond->Wait(); runTaskCount.fetch_add(1, std::memory_order_acq_rel); @@ -169,12 +169,13 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /*Stuck all processing threads in the thread pool*/ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /*Wait for waitTask1, waitTask2, waitTask3, and waitTask4 to start + * running*/ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +187,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /*Record the number of tasks from thread push to thread pool queue*/ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +209,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /*Waiting for thread pool queue to be pushed full*/ int pushTaskCount; while (true) { ::usleep(50); @@ -222,32 +223,33 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /*The tasks that were pushed in were not executed*/ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + *At this point, the thread pool queue must be full of push, and the + *push After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /*Wake up all threads in the thread pool*/ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /*Wait for all task executions to complete*/ while (true) { ::usleep(10); - if (runTaskCount.load(std::memory_order_acquire) - >= 4 + 3 * kMaxLoop) { + if (runTaskCount.load(std::memory_order_acquire) >= + 4 + 3 * kMaxLoop) { break; } } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + *Wait for all push threads to exit so that the pushThreadCount count is + *updated */ pushThreadCond.Wait(); diff --git a/test/common/test_name_lock.cpp b/test/common/test_name_lock.cpp index e5520e0a1a..074dd885ce 100644 --- a/test/common/test_name_lock.cpp +++ b/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include "src/common/concurrent/name_lock.h" @@ -31,29 +32,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Rame lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Rame lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Rame lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -63,12 +62,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Ruccessfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -79,14 +79,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -94,12 +94,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = Thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve diff --git a/test/failpoint/failpoint_test.cpp b/test/failpoint/failpoint_test.cpp index f0096b0ea4..c77f3b6e52 100644 --- a/test/failpoint/failpoint_test.cpp +++ b/test/failpoint/failpoint_test.cpp @@ -19,56 +19,56 @@ * Created Date: Monday May 13th 2019 * Author: hzsunjianliang */ -#include -#include #include +#include +#include + #include "test/failpoint/fiu_local.h" /* - * libfiu 使用文档详见:https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html - * 分为2个部分,一部分是core API,包括fiu_do_on/fiu_return_on/fiu_init - * core API 用于作用与注入在业务代码处,并由外部control API控制触发。 - * control API 包括:fiu_enable\fiu_disable\fiu_enable_random等等 - * 用于在测试代码处用户进行错误的注入,具体使用方式和方法如下示例代码所示 + * For detailed documentation on how to use libfiu, please refer to: + * https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html Libfiu is divided into + * two parts: the core API, which includes functions like + * fiu_do_on/fiu_return_on/fiu_init. The core API is used to inject faults into + * your business code and is controlled externally using the control API. The + * control API includes functions like fiu_enable, fiu_disable, + * fiu_enable_random, and more. These functions are used in your test code to + * inject errors. You can find specific usage examples and methods in the code + * snippets below. */ namespace curve { namespace failpint { -class FailPointTest: public ::testing::Test { +class FailPointTest : public ::testing::Test { protected: - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } }; -// 注入方式: 通过返回值的方式进行注入 +// Injection method: Inject by returning a value size_t free_space() { - fiu_return_on("no_free_space", 0); - return 100; + fiu_return_on("no_free_space", 0); + return 100; } -// 注入方式: 通过side_effet 进行注入 -void modify_state(int *val) { +// Injection method: through side_effet injection +void modify_state(int* val) { *val += 1; fiu_do_on("side_effect", *val += 1); return; } -// 注入方式: 通过side_effet 进行注入(lambda方式) -void modify_state_with_lamda(int &val) { //NOLINT - fiu_do_on("side_effect_2", - auto func = [&] () { - val++; - }; - func();); +// Injection method: through side_effet injection (lambda method) +void modify_state_with_lamda(int& val) { // NOLINT + fiu_do_on( + "side_effect_2", auto func = [&]() { val++; }; func();); return; } -// 错误触发方式: 总是触发 +// Error triggering method: always triggered TEST_F(FailPointTest, alwaysfail) { if (fiu_enable("no_free_space", 1, NULL, 0) == 0) { ASSERT_EQ(free_space(), 0); @@ -80,7 +80,7 @@ TEST_F(FailPointTest, alwaysfail) { ASSERT_EQ(free_space(), 100); } -// 错误触发方式: 随机触发错误 +// Error triggering method: Random error triggering TEST_F(FailPointTest, nondeterministic) { if (fiu_enable_random("no_free_space", 1, NULL, 0, 1) == 0) { ASSERT_EQ(free_space(), 0); @@ -144,6 +144,5 @@ TEST_F(FailPointTest, WildZard) { } } - } // namespace failpint } // namespace curve diff --git a/test/fs/ext4_filesystem_test.cpp b/test/fs/ext4_filesystem_test.cpp index f2c6cfa520..65540555c5 100644 --- a/test/fs/ext4_filesystem_test.cpp +++ b/test/fs/ext4_filesystem_test.cpp @@ -21,34 +21,34 @@ */ #include -#include #include -#include -#include +#include #include +#include +#include + #include -#include "test/fs/mock_posix_wrapper.h" #include "src/fs/ext4_filesystem_impl.h" +#include "test/fs/mock_posix_wrapper.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; +using ::testing::StrEq; namespace curve { namespace fs { -ACTION_TEMPLATE(SetVoidArgPointee, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArgPointee, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(first)) { auto output = reinterpret_cast(::testing::get(args)); *output = first; @@ -56,18 +56,18 @@ ACTION_TEMPLATE(SetVoidArgPointee, class Ext4LocalFileSystemTest : public testing::Test { public: - void SetUp() { - wrapper = std::make_shared(); - lfs = Ext4FileSystemImpl::getInstance(); - lfs->SetPosixWrapper(wrapper); - errno = 1234; - } + void SetUp() { + wrapper = std::make_shared(); + lfs = Ext4FileSystemImpl::getInstance(); + lfs->SetPosixWrapper(wrapper); + errno = 1234; + } - void TearDown() { - errno = 0; - // allows the destructor of lfs_ to be invoked correctly - Mock::VerifyAndClear(wrapper.get()); - } + void TearDown() { + errno = 0; + // allows the destructor of lfs_ to be invoked correctly + Mock::VerifyAndClear(wrapper.get()); + } protected: std::shared_ptr wrapper; @@ -79,99 +79,70 @@ TEST_F(Ext4LocalFileSystemTest, InitTest) { option.enableRenameat2 = true; struct utsname kernel_info; - // 测试版本偏低的情况 - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "2.16.0"); + // Testing with a lower version + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "2.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.19-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.16.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "4.16.0"); + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "4.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); } // test Statfs TEST_F(Ext4LocalFileSystemTest, StatfsTest) { FileSystemInfo fsinfo; - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), 0); - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), -errno); } // test Open TEST_F(Ext4LocalFileSystemTest, OpenTest) { - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(666)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(666)); ASSERT_EQ(lfs->Open("/a", 0), 666); - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Open("/a", 0), -errno); } // test Close TEST_F(Ext4LocalFileSystemTest, CloseTest) { - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Close(666), 0); - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Close(666), -errno); } @@ -185,32 +156,26 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { fileInfo.st_mode = S_IFREG; // /a is a file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // /b is a dir EXPECT_CALL(*wrapper, stat(StrEq("/b"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), Return(0))); // /b/1 is a file EXPECT_CALL(*wrapper, stat(StrEq("/b/1"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); DIR* dirp = reinterpret_cast(0x01); struct dirent entryArray[1]; memset(entryArray, 0, sizeof(entryArray)); memcpy(entryArray[0].d_name, "1", 1); - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(2) .WillOnce(Return(entryArray)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillRepeatedly(Return(0)); } // test delete dir @@ -219,8 +184,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { ASSERT_EQ(lfs->Delete("/b"), 0); // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(nullptr)); // List will failed ASSERT_EQ(lfs->Delete("/b"), -errno); } @@ -229,8 +193,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { { ASSERT_EQ(lfs->Delete("/a"), 0); // error occured when remove file - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Delete("/a"), -errno); } } @@ -242,32 +205,25 @@ TEST_F(Ext4LocalFileSystemTest, MkdirTest) { info.st_mode = S_IFDIR; // success EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .Times(0); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).Times(0); ASSERT_EQ(lfs->Mkdir("/a"), 0); // stat failed ,mkdir success - EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("/a"), 0); // test relative path EXPECT_CALL(*wrapper, stat(_, NotNull())) .Times(2) .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))) .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("aaa/bbb"), 0); // is not a dir, mkdir failed info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Mkdir("/a"), -errno); } @@ -277,19 +233,16 @@ TEST_F(Ext4LocalFileSystemTest, DirExistsTest) { info.st_mode = S_IFDIR; // is dir EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->DirExists("/a"), false); // not dir info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), false); } @@ -299,19 +252,16 @@ TEST_F(Ext4LocalFileSystemTest, FileExistsTest) { info.st_mode = S_IFREG; // is file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->FileExists("/a"), false); // not file info.st_mode = S_IFDIR; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), false); } @@ -320,11 +270,9 @@ TEST_F(Ext4LocalFileSystemTest, RenameTest) { LocalFileSystemOption option; option.enableRenameat2 = false; ASSERT_EQ(0, lfs->Init(option)); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Rename("/a", "/b"), 0); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Rename("/a", "/b"), -errno); } @@ -333,13 +281,10 @@ TEST_F(Ext4LocalFileSystemTest, Renameat2Test) { LocalFileSystemOption option; option.enableRenameat2 = true; struct utsname kernel_info; - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); EXPECT_CALL(*wrapper, renameat2(NotNull(), NotNull(), 0)) .WillOnce(Return(0)); @@ -359,20 +304,17 @@ TEST_F(Ext4LocalFileSystemTest, ListTest) { memcpy(entryArray[2].d_name, "1", 1); vector names; // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(nullptr)); ASSERT_EQ(lfs->List("/a", &names), -errno); // success - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(4) .WillOnce(Return(entryArray)) .WillOnce(Return(entryArray + 1)) .WillOnce(Return(entryArray + 2)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->List("/a", &names), 0); ASSERT_THAT(names, ElementsAre("1")); } @@ -397,13 +339,11 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { ASSERT_EQ(lfs->Read(666, buf, 0, 3), 2); ASSERT_STREQ(buf, "12"); // pread failed - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillRepeatedly(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,but only return -1 once errno = EINTR; @@ -418,16 +358,12 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { TEST_F(Ext4LocalFileSystemTest, WriteTest) { char buf[4] = {0}; // success - EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)).WillOnce(Return(1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), 3); // pwrite failed - EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; @@ -509,12 +445,10 @@ TEST_F(Ext4LocalFileSystemTest, WriteIOBufTest) { // test Fallocate TEST_F(Ext4LocalFileSystemTest, FallocateTest) { // success - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), 0); // fallocate failed - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), -errno); } @@ -522,31 +456,27 @@ TEST_F(Ext4LocalFileSystemTest, FallocateTest) { TEST_F(Ext4LocalFileSystemTest, FstatTest) { struct stat info; // success - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fstat(666, &info), 0); // fallocate failed - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fstat(666, &info), -errno); } // test Fsync TEST_F(Ext4LocalFileSystemTest, FsyncTest) { // success - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fsync(666), 0); // fallocate failed - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fsync(666), -errno); } TEST_F(Ext4LocalFileSystemTest, ReadRealTest) { std::shared_ptr pw = std::make_shared(); lfs->SetPosixWrapper(pw); - int fd = lfs->Open("a", O_CREAT|O_RDWR); + int fd = lfs->Open("a", O_CREAT | O_RDWR); ASSERT_LT(0, fd); // 0 < fd char buf[8192] = {0}; ASSERT_EQ(4096, lfs->Write(fd, buf, 0, 4096)); diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index a36bfedcee..30c40442aa 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -24,8 +24,8 @@ #include #include -#include #include +#include #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" @@ -49,24 +49,23 @@ static constexpr uint32_t kOpRequestAlignSize = 4096; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static const char *chunkServerParams[1][16] = { - { "chunkserver", "-chunkServerIp=127.0.0.1", - "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, - "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", - "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkserver.dat", - "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", - "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", - "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkfilepool.meta", - "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", - "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/walfilepool.meta", - "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", - "-raft_sync_segments=true", NULL }, +static const char* chunkServerParams[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", + "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, + "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", + "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkserver.dat", + "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", + "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", + "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool.meta", + "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", + "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -107,7 +106,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -125,11 +124,11 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } - int InitCluster(PeerCluster *cluster) { + int InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs_); @@ -139,7 +138,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -168,45 +167,46 @@ class ChunkServerIoTest : public testing::Test { std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* Scenario 1: Newly created file, Chunk file does not exist*/ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* Scenario 2: After generating a chunk file through WriteChunk, perform + * the operation*/ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); + data.c_str(), &chunkData)); ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunkId, sn1, NULL_SN, leader)); - ASSERT_EQ(0, verify->VerifyReadChunk( - chunkId, sn1, 0, 4 * KB, &chunkData)); + ASSERT_EQ(0, + verify->VerifyReadChunk(chunkId, sn1, 0, 4 * KB, &chunkData)); ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, kChunkSize - 4 * KB, - 4 * KB, nullptr)); + 4 * KB, nullptr)); data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length * 2, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 8 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files*/ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); } void TestSnapshotIO(std::shared_ptr verify) { @@ -217,150 +217,164 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, + 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, + 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, + 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not + // exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, - data.c_str(), &chunkData1a)); + data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 - */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + * Scenario 1: Taking a snapshot of a file for the first time + */ + chunkData1b.assign(chunkData1a); // Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + data.c_str(), &chunkData1b)); + // Write repeatedly to the same area to verify that there will be no + // duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, - data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + data.c_str(), &chunkData1b)); + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot + // version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 - ASSERT_EQ(0, - verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); + // Chunk1 read [0, 12KB], expected to read version 2 data + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 - ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( - chunk2, sn1, 0, 12 * KB, nullptr)); + // Reading snapshot of chunk2, expected chunk not to exist + ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn1, 0, 12 * KB, + nullptr)); /* - * 场景二:第一次快照结束,删除快照 - */ - // 删除chunk1快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); + // Obtain chunk1 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 - */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + * Scenario 3: Taking a second snapshot + */ + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); // Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, - data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + data.c_str(), &chunkData1c)); + // Obtain chunk1 information, expect its version to be 3 and snapshot + // version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, - &chunkData1b)); + &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, - &chunkData2)); + &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, + // the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); + verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 - */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + * Scenario 4: The second snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot because chunk1 and its snapshot have been + // deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 - */ - // 删除chunk1,已不存在,预期成功 + * Scenario 5: User deletes files + */ + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + verify->VerifyDeleteChunk(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk2, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunk(chunk2, sn3)); + // Obtaining chunk2 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk2, NULL_SN, NULL_SN, leader)); } public: @@ -370,7 +384,7 @@ class ChunkServerIoTest : public testing::Test { CopysetID copysetId_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; std::string externalIp_; private: @@ -391,8 +405,8 @@ class ChunkServerIoTest : public testing::Test { * */ TEST_F(ChunkServerIoTest, BasicIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } @@ -401,15 +415,15 @@ TEST_F(ChunkServerIoTest, BasicIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } TEST_F(ChunkServerIoTest, SnapshotIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } @@ -417,8 +431,8 @@ TEST_F(ChunkServerIoTest, SnapshotIO) { TEST_F(ChunkServerIoTest, SnapshotIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..0aae174746 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -20,9 +20,9 @@ * Author: qinyi */ -#include -#include #include +#include +#include #include #include @@ -30,14 +30,14 @@ #include #include "include/client/libcurve.h" -#include "src/common/s3_adapter.h" -#include "src/common/timeutility.h" -#include "src/client/inflight_controller.h" #include "src/chunkserver/cli2.h" +#include "src/client/inflight_controller.h" #include "src/common/concurrent/count_down_event.h" -#include "test/integration/common/chunkservice_op.h" +#include "src/common/s3_adapter.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" +#include "test/integration/common/chunkservice_op.h" #include "test/util/config_generator.h" using curve::CurveCluster; @@ -91,11 +91,11 @@ const uint32_t kChunkSize = 16 * 1024 * 1024; const uint32_t kChunkServerMaxIoSize = 64 * 1024; const std::vector mdsConf0{ - { "--confPath=" + MDS0_CONF_PATH }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME }, - { "--sessionInterSec=20" }, - { "--etcdAddr=" + ETCD_CLIENT_IP_PORT }, + {"--confPath=" + MDS0_CONF_PATH}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME}, + {"--sessionInterSec=20"}, + {"--etcdAddr=" + ETCD_CLIENT_IP_PORT}, }; const std::vector mdsFileConf0{ @@ -129,73 +129,67 @@ const std::vector csCommonConf{ }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER0_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER0_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER1_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER1_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER2_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER2_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta"}}; namespace curve { namespace chunkserver { @@ -203,7 +197,9 @@ namespace chunkserver { class CSCloneRecoverTest : public ::testing::Test { public: CSCloneRecoverTest() - : logicPoolId_(1), copysetId_(1), chunkData1_(kChunkSize, 'X'), + : logicPoolId_(1), + copysetId_(1), + chunkData1_(kChunkSize, 'X'), chunkData2_(kChunkSize, 'Y') {} void SetUp() { @@ -217,11 +213,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,19 +227,20 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, - mdsConf0, true); + mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, - CHUNK_SERVER1_IP_PORT, - CHUNK_SERVER2_IP_PORT}; + CHUNK_SERVER1_IP_PORT, + CHUNK_SERVER2_IP_PORT}; for (int i = 0; i < 3; ++i) { Json::Value server; std::vector ipPort; @@ -278,7 +275,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -291,13 +288,12 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createPPCmd: " << createPPCmd; ret = system(createPPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserve pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +315,8 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -331,27 +328,26 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createLPCmd: " << createLPCmd; ret = system(createLPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); - struct ChunkServiceOpConf conf0 = { &leaderPeer_, logicPoolId_, - copysetId_, 5000 }; + struct ChunkServiceOpConf conf0 = {&leaderPeer_, logicPoolId_, + copysetId_, 5000}; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +413,10 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** Send a write request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Is IO successfully completed */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +428,8 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -447,8 +444,7 @@ class CSCloneRecoverTest : public ::testing::Test { int ret; if ((ret = AioWrite(fd_, context))) { - LOG(ERROR) << "failed to send aio write request, err=" - << ret; + LOG(ERROR) << "failed to send aio write request, err=" << ret; return false; } @@ -460,11 +456,11 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** Send a read request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @data: Read out data + * @return: Is IO successfully completed */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +469,8 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -485,8 +482,7 @@ class CSCloneRecoverTest : public ::testing::Test { context->cb = readCallBack; int ret; if ((ret = AioRead(fd_, context))) { - LOG(ERROR) << "failed to send aio read request, err=" - << ret; + LOG(ERROR) << "failed to send aio read request, err=" << ret; return false; } @@ -547,7 +543,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +555,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +609,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +629,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +643,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,17 +663,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Clone files will not be converted to regular chunk1 files after being + * read through Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +682,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +707,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +721,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +747,17 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated and successfully written. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +765,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +800,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +815,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +826,10 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * The clone file will be converted to a regular chunk1 file after being + * overwritten Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +837,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +861,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +874,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,17 +894,18 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that the clone file will not be converted to a regular + * chunk1 file after being read through Write by increasing the version, If + * it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +913,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +938,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +951,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +977,17 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +995,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1019,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,17 +1045,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1064,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1087,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1119,17 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1137,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index a5ac75a823..acf24fc63a 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -21,72 +21,60 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "src/common/concurrent/concurrent.h" -#include "test/integration/common/peer_cluster.h" #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::common::Thread; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::common::Thread; static const char* kFakeMdsAddr = "127.0.0.1:9329"; constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *chunkConcurrencyParams1[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9076", - "-chunkServerStoreUri=local://./9076/", - "-chunkServerMetaUri=local://./9076/chunkserver.dat", - "-copySetUri=local://./9076/copysets", - "-raftSnapshotUri=curve://./9076/copysets", - "-raftLogUri=curve://./9076/copysets", - "-recycleUri=local://./9076/recycler", - "-chunkFilePoolDir=./9076/chunkfilepool/", - "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", - "-walFilePoolDir=./9076/walfilepool/", - "-walFilePoolMetaPath=./9076/walfilepool.meta", - "-conf=./9076/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams1[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9076", + "-chunkServerStoreUri=local://./9076/", + "-chunkServerMetaUri=local://./9076/chunkserver.dat", + "-copySetUri=local://./9076/copysets", + "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", + "-recycleUri=local://./9076/recycler", + "-chunkFilePoolDir=./9076/chunkfilepool/", + "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", + "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; -static const char *chunkConcurrencyParams2[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9077", - "-chunkServerStoreUri=local://./9077/", - "-chunkServerMetaUri=local://./9077/chunkserver.dat", - "-copySetUri=local://./9077/copysets", - "-raftSnapshotUri=curve://./9077/copysets", - "-raftLogUri=curve://./9077/copysets", - "-recycleUri=local://./9077/recycler", - "-chunkFilePoolDir=./9077/chunkfilepool/", - "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", - "-walFilePoolDir=./9077/walfilepool/", - "-walFilePoolMetaPath=./9077/walfilepool.meta", - "-conf=./9077/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams2[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9077", + "-chunkServerStoreUri=local://./9077/", + "-chunkServerMetaUri=local://./9077/chunkserver.dat", + "-copySetUri=local://./9077/copysets", + "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", + "-recycleUri=local://./9077/recycler", + "-chunkFilePoolDir=./9077/chunkfilepool/", + "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", + "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -94,7 +82,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -136,14 +124,14 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -162,10 +150,10 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { CopysetID copysetId; std::map paramsIndexs; - std::vector params; + std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -182,7 +170,6 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { electionTimeoutMs = 3000; snapshotIntervalS = 60; - ASSERT_TRUE(cg1.Init("9077")); cg1.SetKV("copyset.election_timeout_ms", "3000"); cg1.SetKV("copyset.snapshot_interval_s", "60"); @@ -198,14 +185,12 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - poolDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool/"; - metaDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool.meta"; + poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool/"; + metaDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool.meta"; FilePoolMeta meta(kChunkSize, kPageSize, poolDir); FilePoolHelper::PersistEnCodeMetaInfo(lfs, meta, metaDir); @@ -213,7 +198,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // There maybe one chunk in cleaning, so you should allocate // (kChunkNum + 1) chunks in start if you want to use kChunkNum chunks. // This situation will not occur in the production environment - allocateChunk(lfs, kChunkNum+1, poolDir, kChunkSize); + allocateChunk(lfs, kChunkNum + 1, poolDir, kChunkSize); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -224,14 +209,14 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // wait for process exit ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -243,28 +228,23 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::vector peers; PeerId leaderId; Peer leaderPeer; - int electionTimeoutMs; - int snapshotIntervalS; + int electionTimeoutMs; + int snapshotIntervalS; LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; - std::map paramsIndexs; - std::vector params; + std::map paramsIndexs; + std::vector params; std::string poolDir; std::string metaDir; - std::shared_ptr lfs; + std::shared_ptr lfs; }; -// 写chunk -int WriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - off_t offset, - size_t len, - const char *data, +// Write chunk +int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, off_t offset, size_t len, const char* data, const int sn = 1) { PeerId leaderId(leader.address()); brpc::Channel channel; @@ -299,13 +279,9 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read -void RandReadChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for read +void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; uint64_t appliedIndex = 1; PeerId leaderId(leader.address()); @@ -314,7 +290,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -330,7 +306,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -342,7 +318,8 @@ void RandReadChunk(Peer leader, } if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS && - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { //NOLINT + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { // NOLINT LOG(INFO) << "read failed: " << CHUNK_OP_STATUS_Name(response.status()); ret = -1; @@ -352,13 +329,9 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write -void RandWriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for writing +void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; char data[kOpRequestAlignSize] = {'a'}; int length = kOpRequestAlignSize; @@ -369,7 +342,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -385,7 +358,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -406,12 +379,9 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 -void RandDeleteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop) { +// Randomly select a chunk to delete +void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop) { int ret = 0; PeerId leaderId(leader.address()); @@ -420,7 +390,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -450,12 +420,9 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk -void CreateCloneChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID start, - ChunkID end) { +// Create clone chunk +void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID start, ChunkID end) { int ret = 0; SequenceNum sn = 2; SequenceNum correctedSn = 1; @@ -497,10 +464,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + *Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -511,37 +478,21 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -549,33 +500,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -583,8 +525,9 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -592,29 +535,19 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -622,7 +555,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -646,7 +579,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -654,7 +587,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -664,50 +597,30 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -716,7 +629,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -726,38 +639,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -765,33 +663,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + RandReadMultiNotExistChunk) { // NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -799,7 +690,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -809,39 +700,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already + // been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout + // failure for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -849,7 +727,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -857,38 +735,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -897,7 +761,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -907,38 +771,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -946,30 +796,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -979,10 +822,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -993,36 +836,21 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); + + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1030,33 +858,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1064,8 +883,9 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1073,29 +893,19 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -1103,7 +913,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1127,7 +937,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1135,7 +945,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1143,38 +953,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1183,7 +979,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1193,38 +989,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1232,33 +1013,25 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1266,7 +1039,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1274,26 +1047,17 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1301,7 +1065,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1309,38 +1073,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1349,7 +1099,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1359,38 +1109,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1398,30 +1134,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -1430,7 +1159,8 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, +// with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1440,52 +1170,32 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread with a 20% probability + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } diff --git a/test/integration/chunkserver/datastore/datastore_basic_test.cpp b/test/integration/chunkserver/datastore/datastore_basic_test.cpp index 14fdc3901c..a7367253c5 100644 --- a/test/integration/chunkserver/datastore/datastore_basic_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_basic_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_bas"; // NOLINT -const string poolDir = "./chunkfilepool_int_bas"; // NOLINT +const string baseDir = "./data_int_bas"; // NOLINT +const string poolDir = "./chunkfilepool_int_bas"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_bas.meta"; // NOLINT class BasicTestSuit : public DatastoreIntegrationBase { @@ -36,51 +36,49 @@ class BasicTestSuit : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(BasicTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scene One: New file created, Chunk file does not + * exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // ChunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /****************** Scene Two: Operations after generating chunk files + * through WriteChunk **************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -93,69 +91,53 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; @@ -164,27 +146,19 @@ TEST_F(BasicTestSuit, BasicTest) { butil::IOBuf iobuf1_1_4; iobuf1_1_4.append(buf1_1_4, length); - errorCode = dataStore_->WriteChunk(id, - sn, - iobuf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, iobuf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene Three: User deletes file******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp index 3b0d635652..6db8375ff2 100644 --- a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_clo"; // NOLINT -const string poolDir = "./chunkfilepool_int_clo"; // NOLINT +const string baseDir = "./data_int_clo"; // NOLINT +const string poolDir = "./chunkfilepool_int_clo"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_clo.meta"; // NOLINT class CloneTestSuit : public DatastoreIntegrationBase { @@ -36,7 +36,7 @@ class CloneTestSuit : public DatastoreIntegrationBase { }; /** - * 克隆场景测试 + * Clone scenario testing */ TEST_F(CloneTestSuit, CloneTest) { ChunkID id = 1; @@ -48,16 +48,14 @@ TEST_F(CloneTestSuit, CloneTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk1 + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -71,14 +69,13 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Call the interface again, but still return success. Chunk information + // remains unchanged + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -92,14 +89,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 创建克隆文件chunk2 - errorCode = dataStore_->CreateCloneChunk(2, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk2 + errorCode = + dataStore_->CreateCloneChunk(2, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(2, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -113,23 +108,19 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ - // 构造原始数据 + /******************Scene 2: Restoring Cloned Files******************/ + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // WriteChunk写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf1[2 * PAGE_SIZE]; memset(writeBuf1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf1, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf1, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -137,26 +128,23 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -164,30 +152,26 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘a’ + // Reading Chunk data, [0, 8KB] data should be 'a' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // WriteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; char writeBuf3[2 * PAGE_SIZE]; memset(writeBuf3, 'c', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf3, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf3, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -195,11 +179,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 4KB]数据应为‘a’,[4KB, 12KB]数据应为‘c’ + // Reading Chunk data, [0, 4KB] data should be 'a', [4KB, 12KB] data should + // be 'c' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -207,17 +192,18 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(0, memcmp(writeBuf1, readBuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(writeBuf3, readBuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Conversion of Cloned Files after Iterative + * Writing into Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->PasteChunk(id, - overBuf, + errorCode = dataStore_->PasteChunk(id, overBuf, i * kMB, // offset 1 * kMB); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -226,15 +212,15 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - /******************场景三:删除文件****************/ + /******************Scene 3: Delete File****************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(1, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(2, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(2, &info); @@ -242,7 +228,7 @@ TEST_F(CloneTestSuit, CloneTest) { } /** - * 恢复场景测试 + * Recovery scenario testing */ TEST_F(CloneTestSuit, RecoverTest) { ChunkID id = 1; @@ -254,16 +240,15 @@ TEST_F(CloneTestSuit, RecoverTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -277,14 +262,14 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 + // Call the interface again, but still return success. Chunk information + // remains unchanged errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, 3, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -298,20 +283,17 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ + /******************Scene 2: Restoring Cloned Files******************/ sn = 3; - // 构造原始数据 + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // PasteChunk写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); @@ -319,30 +301,26 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(pasteBuf, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf2[2 * PAGE_SIZE]; memset(writeBuf2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf2, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf2, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -350,26 +328,23 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘b’ + // Reading Chunk data, [0, 8KB] data should be 'b' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf2, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // PasteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -377,11 +352,12 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 8KB]数据应为‘b’,[8KB, 12KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be 'b', [8KB, 12KB] data should + // be '1' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -389,19 +365,19 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(0, memcmp(writeBuf2, readBuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(pasteBuf, readBuf + 2 * PAGE_SIZE, PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Convert Cloned Files from Sequential Write to + * Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->WriteChunk(id, - sn, - overBuf, - i * kMB, // offset + errorCode = dataStore_->WriteChunk(id, sn, overBuf, + i * kMB, // offset 1 * kMB, nullptr); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); diff --git a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp index e873cdb667..e1ded2ef1a 100644 --- a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_con"; // NOLINT -const string poolDir = "./chunkfilepool_int_con"; // NOLINT +const string baseDir = "./data_int_con"; // NOLINT +const string poolDir = "./chunkfilepool_int_con"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_con.meta"; // NOLINT class ConcurrencyTestSuit : public DatastoreIntegrationBase { @@ -46,9 +46,8 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { const int kThreadNum = 10; auto readFunc = [&](ChunkID id) { - // 五分之一概率增加版本号 - if (rand_r(&seed) % 5 == 0) - ++sn; + // One fifth probability of increasing version number + if (rand_r(&seed) % 5 == 0) ++sn; uint64_t pageIndex = rand_r(&seed) % (CHUNK_SIZE / PAGE_SIZE); offset = pageIndex * PAGE_SIZE; dataStore_->ReadChunk(id, sn, buf, offset, length); @@ -60,9 +59,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { dataStore_->WriteChunk(id, sn, buf, offset, length, nullptr); }; - auto deleteFunc = [&](ChunkID id) { - dataStore_->DeleteChunk(id, sn); - }; + auto deleteFunc = [&](ChunkID id) { dataStore_->DeleteChunk(id, sn); }; auto deleteSnapFunc = [&](ChunkID id) { dataStore_->DeleteSnapshotChunkOrCorrectSn(id, sn); @@ -107,7 +104,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { Thread threads[kThreadNum]; printf("===============TEST CHUNK1===================\n"); - // 测试并发对同一chunk进行随机操作 + // Testing concurrent random operations on the same chunk for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, 1, kLoopNum); } @@ -118,7 +115,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { printf("===============TEST RANDOM==================\n"); - // 测试并发对不同chunk进行随机操作 + // Test and perform random operations on different chunks simultaneously int idRange = 10; for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, idRange, kLoopNum); diff --git a/test/integration/chunkserver/datastore/datastore_exception_test.cpp b/test/integration/chunkserver/datastore/datastore_exception_test.cpp index 5405b03e8c..cc020c395b 100644 --- a/test/integration/chunkserver/datastore/datastore_exception_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_exception_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_exc"; // NOLINT -const string poolDir = "./chunkfilepool_int_exc"; // NOLINT +const string baseDir = "./data_int_exc"; // NOLINT +const string poolDir = "./chunkfilepool_int_exc"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_exc.meta"; // NOLINT class ExceptionTestSuit : public DatastoreIntegrationBase { @@ -36,9 +36,9 @@ class ExceptionTestSuit : public DatastoreIntegrationBase { }; /** - * 异常测试1 - * 用例:chunk的metapage数据损坏,然后启动DataStore - * 预期:重启失败 + * Exception test 1 + * Scenario: Chunk's metapage data is corrupt, and then start DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest1) { SequenceNum fileSn = 1; @@ -47,46 +47,41 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试2 - * 用例:chunk的metapage数据损坏,然后更新了metapage,然后重启DataStore - * 预期:重启datastore可以成功 + * Exception Test 2 + * Scenario: Chunk's metapage data is corrupt, then the metapage is updated, and + * then the DataStore is restarted Expected: Restarting the datastore can be + * successful */ TEST_F(ExceptionTestSuit, ExceptionTest2) { SequenceNum fileSn = 1; @@ -95,55 +90,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 触发metapage更新 + // Trigger metapage Update errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); } /** - * 异常测试3 - * 用例:chunk快照的metapage数据损坏,然后重启DataStore - * 预期:重启失败 + * Exception Test 3 + * Scenario: Chunk snapshot metadata data corruption, then restart DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest3) { SequenceNum fileSn = 1; @@ -152,55 +137,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试4 - * 用例:chunk快照的metapage数据损坏,但是更新了metapage,然后重启DataStore - * 预期:重启成功 + * Exception Test 4 + * Scenario: Chunk snapshot's metapage data is corrupt, but the metapage is + * updated, and then restart the DataStore Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest4) { SequenceNum fileSn = 1; @@ -209,64 +184,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 触发快照metapage更新 + // Trigger snapshot metadata update errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset + PAGE_SIZE, - length, + fileSn, buf, offset + PAGE_SIZE, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试5 - * 用例:WriteChunk数据写到一半重启 - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 5 + * Scenario: WriteChunk data is written halfway and restarted + * Expected: Successful restart, successful re execution of the previous + * operation */ TEST_F(ExceptionTestSuit, ExceptionTest5) { SequenceNum fileSn = 1; @@ -275,66 +238,54 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; - // 通过lfs写一半数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write half of the data to the chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf2, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试6 - * 用例:WriteChunk更新metapage后重启,sn>chunk.sn,sn==chunk.correctedSn - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 6 + * Scenario: WriteChunk updates the metapage and restarts, + * sn>chunk.sn,sn==chunk.correctedSn Expected: Successful restart, successful re + * execution of the previous operation */ TEST_F(ExceptionTestSuit, ExceptionTest6) { SequenceNum fileSn = 1; @@ -343,84 +294,70 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 更新 correctedsn 为2 + // Update correctedsn to 2 errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的请求参数 + // Construct request parameters to write char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; fileSn = 2; // sn > chunk.sn; sn == chunk.correctedSn - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = fileSn; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试7 - * 用例:WriteChunk产生快照后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 7 + * Scenario: WriteChunk generates a snapshot and restarts, restoring historical + * and current operations sn>chunk.sn, sn>chunk.correctedSn Test + * chunk.sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest7) { SequenceNum fileSn = 1; @@ -429,18 +366,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -452,19 +386,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -472,61 +404,47 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试8 - * 用例:WriteChunk产生快照后重启, + * Exception Test8 + * Scenario: WriteChunk generates a snapshot and restarts, * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn==chunk.correctedSn - * 预期:重启成功 + * Test chunk.sn==chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest8) { SequenceNum fileSn = 1; @@ -535,27 +453,20 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,构造chunk.sn==chunk.correctedsn的场景 + // Generate chunk1 and construct a scenario where + // chunk.sn==chunk.correctedsn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf1, - offset, - length, - nullptr); + ++fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 2; @@ -567,19 +478,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -587,60 +496,46 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 2, - readbuf, - offset, - length); + 2, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试9 - * 用例:WriteChunk产生快照并更新metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 9 + * Scenario: WriteChunk generates a snapshot and updates the metapage before + * restarting, restoring historical and current operations sn>chunk.sn, + * sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest9) { SequenceNum fileSn = 1; @@ -649,18 +544,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -672,38 +564,36 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = 2; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -711,56 +601,42 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试10 - * 用例:WriteChunk更新快照metapage前重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 10 + * Scenario: WriteChunk restarts before updating the snapshot metapage to + * restore historical and current operations sn>chunk.sn, sn>chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest10) { SequenceNum fileSn = 1; @@ -769,42 +645,35 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); - // 更新metapage + // Update Metapage char metabuf[PAGE_SIZE]; lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage SnapshotMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -813,19 +682,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -833,67 +700,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试11 - * 用例:WriteChunk更新快照metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 11 + * Scenario: WriteChunk updates snapshot metadata and restarts to restore + * historical and current operations sn>chunk.sn, sn>chunk.correctedSn Expected: + * Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest11) { SequenceNum fileSn = 1; @@ -902,53 +754,44 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -956,66 +799,51 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试12 - * 用例:PasteChunk,数据写入一半时,还未更新metapage重启/崩溃 - * 预期:重启成功,paste成功 + * Exception Test 12 + * Scenario: PasteChunk, when data is written halfway and the metapage has not + * been updated, restart/crash Expected: Reboot successful, pass successful */ TEST_F(ExceptionTestSuit, ExceptionTest12) { ChunkID id = 1; @@ -1027,14 +855,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { CSChunkInfo info; std::string location("test@s3"); - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -1048,58 +875,50 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf1[PAGE_SIZE]; memset(buf1, '1', length); offset = 0; length = PAGE_SIZE; - // 通过lfs写数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write data to chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.blockSize = BLOCK_SIZE; options.metaPageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->PasteChunk(1, // id - buf1, - offset, - length); + buf1, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查bitmap + // Check Bitmap errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(1, info.bitmap->NextClearBit(0)); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - sn, - readbuf, - offset, - length); + sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } diff --git a/test/integration/chunkserver/datastore/datastore_integration_base.h b/test/integration/chunkserver/datastore/datastore_integration_base.h index 0731eb39cd..bf99263214 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_base.h +++ b/test/integration/chunkserver/datastore/datastore_integration_base.h @@ -24,26 +24,27 @@ #define TEST_INTEGRATION_CHUNKSERVER_DATASTORE_DATASTORE_INTEGRATION_BASE_H_ #include -#include #include +#include #include + #include #include +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/timeutility.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/filepool_helper.h" -using curve::fs::FileSystemType; -using curve::fs::LocalFileSystem; -using curve::fs::LocalFsFactory; using curve::common::Atomic; using curve::common::Thread; using curve::common::TimeUtility; +using curve::fs::FileSystemType; +using curve::fs::LocalFileSystem; +using curve::fs::LocalFsFactory; using ::testing::UnorderedElementsAre; @@ -55,12 +56,12 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; const ChunkSizeType BLOCK_SIZE = 4096; const PageSizeType PAGE_SIZE = 4 * 1024; -extern const string baseDir; // NOLINT -extern const string poolDir; // NOLINT +extern const string baseDir; // NOLINT +extern const string poolDir; // NOLINT extern const string poolMetaPath; // NOLINT /** - * DataStore层集成LocalFileSystem层测试 + * Datastore layer integration LocalFileSystem layer testing */ class DatastoreIntegrationBase : public testing::Test { public: @@ -79,9 +80,7 @@ class DatastoreIntegrationBase : public testing::Test { options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + dataStore_ = std::make_shared(lfs_, filePool_, options); if (dataStore_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } @@ -105,8 +104,7 @@ class DatastoreIntegrationBase : public testing::Test { cfop.metaPageSize = PAGE_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool_->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool_->Size()); @@ -121,8 +119,8 @@ class DatastoreIntegrationBase : public testing::Test { } protected: - std::shared_ptr filePool_; - std::shared_ptr lfs_; + std::shared_ptr filePool_; + std::shared_ptr lfs_; std::shared_ptr dataStore_; }; diff --git a/test/integration/chunkserver/datastore/datastore_integration_test.cpp b/test/integration/chunkserver/datastore/datastore_integration_test.cpp index 52693dfa9e..a5f0316ba9 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_integration_test.cpp @@ -28,8 +28,8 @@ namespace chunkserver { const uint64_t kMB = 1024 * 1024; const ChunkSizeType CHUNK_SIZE = 16 * kMB; const PageSizeType PAGE_SIZE = 4 * 1024; -const string baseDir = "./data_int"; // NOLINT -const string poolDir = "./chunkfilepool_int"; // NOLINT +const string baseDir = "./data_int"; // NOLINT +const string poolDir = "./chunkfilepool_int"; // NOLINT const string poolMetaPath = "./chunkfilepool_int.meta"; // NOLINT class DatastoreIntegrationTest : public DatastoreIntegrationBase { @@ -39,51 +39,49 @@ class DatastoreIntegrationTest : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(DatastoreIntegrationTest, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scenario 1: New File Created, Chunk File Does Not + * Exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // chunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /******************Scene 2: Operations after generating chunk files via + * WriteChunk.**************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - // 第一次WriteChunk会产生chunk文件 - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + // The first WriteChunk will generate a chunk file + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -95,87 +93,63 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene 3: User Deletes File******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -185,7 +159,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { } /** - * 重启恢复测试 + * Restart Recovery Test */ TEST_F(DatastoreIntegrationTest, RestartTest) { SequenceNum fileSn = 1; @@ -196,7 +170,7 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSChunkInfo info3; std::string location("test@s3"); - // 构造要用到的读写缓冲区 + // Construct read and write buffers to be used char buf1_1[2 * PAGE_SIZE]; memset(buf1_1, 'a', length); char buf2_1[2 * PAGE_SIZE]; @@ -212,7 +186,8 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { size_t readSize = 4 * PAGE_SIZE; char readBuf[4 * PAGE_SIZE]; - // 各个操作对应的错误码返回值,错误码命名格式为 e_optype_chunid_sn + // The error code return value corresponding to each operation, and the + // error code naming format is e_optype_chunid_sn CSErrorCode e_write_1_1; CSErrorCode e_write_2_1; CSErrorCode e_write_2_2; @@ -224,112 +199,99 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSErrorCode e_delsnap_2_3; CSErrorCode e_clone_3_1; - // 模拟所有用户请求,用lamdba函数可以用于验证日志恢复时重用这部分代码 - // 如果后面要加用例,只需要在函数内加操作即可 + // Simulate all user requests and use the lamdba function to validate the + // reuse of this code during log recovery If you want to add use cases + // later, you only need to add operations within the function auto ApplyRequests = [&]() { fileSn = 1; - // 模拟普通文件操作,WriteChunk产生chunk1、chunk2 + // Simulate ordinary file operations, WriteChunk generates chunk1, + // chunk2 offset = 0; length = 2 * PAGE_SIZE; - // 产生chunk1 - e_write_1_1 = dataStore_->WriteChunk(1, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 产生chunk2 - e_write_2_1 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 删除chunk1 + // Generate chunk1 + e_write_1_1 = + dataStore_->WriteChunk(1, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Generate chunk2 + e_write_2_1 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Delete chunk1 e_del_1_1 = dataStore_->DeleteChunk(1, fileSn); - // 模拟快照操作 + // Simulate snapshot operations ++fileSn; offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_2 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_2, - offset, - length, - nullptr); - // 删除chunk2快照 + // Write chunk2 to generate a snapshot file + e_write_2_2 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_2, offset, length, nullptr); + // Delete chunk2 snapshot e_delsnap_2_2 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后删除chunk2快照 + // Simulate taking another snapshot and then delete the chunk2 snapshot ++fileSn; e_delsnap_2_3 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后写数据到chunk2产生快照 + // Simulate another snapshot, then write data to chunk2 to generate a + // snapshot ++fileSn; offset = 2 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_4 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_4, - offset, - length, - nullptr); - - // 模拟克隆操作 + // Write chunk2 to generate a snapshot file + e_write_2_4 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_4, offset, length, nullptr); + + // Simulate Clone Operations e_clone_3_1 = dataStore_->CreateCloneChunk(3, // chunk id - 1, // sn - 0, // corrected sn - CHUNK_SIZE, - location); - // 写数据到chunk3 + 1, // sn + 0, // corrected sn + CHUNK_SIZE, location); + // Write data to chunk3 offset = 0; length = 2 * PAGE_SIZE; - // 写chunk3 - e_write_3_1 = dataStore_->WriteChunk(3, // chunk id - 1, // sn - writeBuf, - offset, - length, - nullptr); - // paste数据到chunk3 + // Write chunk3 + e_write_3_1 = dataStore_->WriteChunk(3, // chunk id + 1, // sn + writeBuf, offset, length, nullptr); + // Paste data to chunk3 offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id - pasteBuf, - offset, - length); + e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id + pasteBuf, offset, length); }; - // 检查上面用户操作以后,DataStore层各文件的状态,可重用 + // After checking the user actions above, the status of each file in the + // DataStore layer can be reused auto CheckStatus = [&]() { CSErrorCode errorCode; - // chunk1 不存在 + // Chunk1 does not exist errorCode = dataStore_->GetChunkInfo(1, &info1); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // chunk2存在,版本为4,correctedSn为3,存在快照,快照版本为2 + // Chunk2 exists, version 4, correctedSn is 3, snapshot exists, snapshot + // version 2 errorCode = dataStore_->GetChunkInfo(2, &info2); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(4, info2.curSn); ASSERT_EQ(2, info2.snapSn); ASSERT_EQ(3, info2.correctedSn); - // 检查chunk2数据,[0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c + // Check chunk2 data, [0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c errorCode = dataStore_->ReadChunk(2, fileSn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_4, readBuf + 2 * PAGE_SIZE, 2 * PAGE_SIZE)); - // 检查chunk2快照数据,[0, 1KB]:a , [1KB, 3KB]:b + // Check chunk2 snapshot data, [0, 1KB]:a , [1KB, 3KB]:b errorCode = dataStore_->ReadSnapshotChunk(2, 2, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 2 * PAGE_SIZE)); }; - /******************构造重启前的数据******************/ - // 提交操作 + /******************Generate data before reboot******************/ + // Submit Action ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::Success); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); @@ -340,27 +302,27 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { ASSERT_EQ(e_clone_3_1, CSErrorCode::Success); ASSERT_EQ(e_write_3_1, CSErrorCode::Success); ASSERT_EQ(e_paste_3_1, CSErrorCode::Success); - // 检查此时各个文件的状态 + // Check the status of each file at this time CheckStatus(); - /******************场景一:重启重新加载文件******************/ - // 模拟重启 + /******************Scene 1: Reboot and Reload Files******************/ + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.pageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查各个chunk的状态,应该与前面的一致 + // Check the status of each chunk, which should be consistent with the + // previous one CheckStatus(); - /******************场景二:恢复日志,重放之前的操作******************/ - // 模拟日志回放 + /******************Scene 2: Restore logs, replay previous + * actions******************/ + // Simulate log playback ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::BackwardRequestError); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index f7a9d9ae5a..8d8a64812b 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -27,10 +27,10 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_res"; // NOLINT -const string poolDir = "./chunfilepool_int_res"; // NOLINT +const string baseDir = "./data_int_res"; // NOLINT +const string poolDir = "./chunfilepool_int_res"; // NOLINT const string poolMetaPath = "./chunfilepool_int_res.meta"; // NOLINT -// 以下的测试读写数据都在[0, 32kb]范围内 +// The following test read and write data are within the range of [0, 32kb] const uint64_t kMaxSize = 8 * PAGE_SIZE; struct RangeData { @@ -39,9 +39,7 @@ struct RangeData { size_t length; RangeData() = default; RangeData(char ch, off_t off, size_t len) - : data(ch) - , offset(off) - , length(len) {} + : data(ch), offset(off), length(len) {} }; struct ExpectStatus { @@ -52,12 +50,12 @@ struct ExpectStatus { ExpectStatus() : exist(false), chunkData(nullptr), snapshotData(nullptr) {} ~ExpectStatus() { if (chunkData != nullptr) { - delete [] chunkData; + delete[] chunkData; chunkData = nullptr; } if (snapshotData != nullptr) { - delete [] snapshotData; + delete[] snapshotData; snapshotData = nullptr; } } @@ -66,26 +64,16 @@ struct ExpectStatus { class ExecStep { public: explicit ExecStep(std::shared_ptr* datastore, ChunkID id) - : datastore_(datastore) - , id_(id) - , statusAfterExec_(nullptr) {} + : datastore_(datastore), id_(id), statusAfterExec_(nullptr) {} virtual ~ExecStep() {} - std::shared_ptr GetDataStore() { - return (*datastore_); - } + std::shared_ptr GetDataStore() { return (*datastore_); } - ChunkID GetChunkID() { - return id_; - } + ChunkID GetChunkID() { return id_; } - std::shared_ptr GetStatus() { - return statusAfterExec_; - } + std::shared_ptr GetStatus() { return statusAfterExec_; } - void ClearStatus() { - statusAfterExec_ = nullptr; - } + void ClearStatus() { statusAfterExec_ = nullptr; } virtual void SetExpectStatus() { statusAfterExec_ = std::make_shared(); @@ -100,29 +88,25 @@ class ExecStep { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - (*datastore_)->ReadChunk(id_, - info.curSn, - (chunkData + offset), - offset, - length); + (*datastore_) + ->ReadChunk(id_, info.curSn, (chunkData + offset), + offset, length); } } else { - (*datastore_)->ReadChunk(id_, - info.curSn, - chunkData, - 0, - kMaxSize); + (*datastore_) + ->ReadChunk(id_, info.curSn, chunkData, 0, kMaxSize); } statusAfterExec_->chunkData = chunkData; - // 快照存在,读取快照数据 + // Snapshot exists, reading snapshot data if (info.snapSn > 0) { char* snapData = new char[kMaxSize]; - (*datastore_)->ReadSnapshotChunk( - id_, info.snapSn, snapData, 0, kMaxSize); + (*datastore_) + ->ReadSnapshotChunk(id_, info.snapSn, snapData, 0, + kMaxSize); statusAfterExec_->snapshotData = snapData; } } // if (err == CSErrorCode::Success) @@ -142,23 +126,22 @@ class ExecWrite : public ExecStep { public: ExecWrite(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, RangeData data) - : ExecStep(datastore, id) - , sn_(sn) - , data_(data) {} + : ExecStep(datastore, id), sn_(sn), data_(data) {} ~ExecWrite() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); - (*datastore_)->WriteChunk(id_, sn_, buf, - data_.offset, data_.length, nullptr); + (*datastore_) + ->WriteChunk(id_, sn_, buf, data_.offset, data_.length, nullptr); } void Dump() override { - printf("WriteChunk, id = %lu, sn = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, sn_, data_.offset, data_.length, data_.data); + printf( + "WriteChunk, id = %lu, sn = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, sn_, data_.offset, data_.length, data_.data); } private: @@ -170,21 +153,21 @@ class ExecPaste : public ExecStep { public: ExecPaste(std::shared_ptr* datastore, ChunkID id, RangeData data) - : ExecStep(datastore, id) - , data_(data) {} + : ExecStep(datastore, id), data_(data) {} ~ExecPaste() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); (*datastore_)->PasteChunk(id_, buf, data_.offset, data_.length); - delete [] buf; + delete[] buf; } void Dump() override { - printf("PasteChunk, id = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, data_.offset, data_.length, data_.data); + printf( + "PasteChunk, id = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, data_.offset, data_.length, data_.data); } private: @@ -195,13 +178,10 @@ class ExecDelete : public ExecStep { public: ExecDelete(std::shared_ptr* datastore, ChunkID id, SequenceNum sn) - : ExecStep(datastore, id) - , sn_(sn) {} + : ExecStep(datastore, id), sn_(sn) {} ~ExecDelete() {} - void Exec() override { - (*datastore_)->DeleteChunk(id_, sn_); - } + void Exec() override { (*datastore_)->DeleteChunk(id_, sn_); } void Dump() override { printf("DeleteChunk, id = %lu, sn = %lu.\n", id_, sn_); @@ -213,11 +193,9 @@ class ExecDelete : public ExecStep { class ExecDeleteSnapshot : public ExecStep { public: - ExecDeleteSnapshot(std::shared_ptr* datastore, - ChunkID id, - SequenceNum correctedSn) - : ExecStep(datastore, id) - , correctedSn_(correctedSn) {} + ExecDeleteSnapshot(std::shared_ptr* datastore, ChunkID id, + SequenceNum correctedSn) + : ExecStep(datastore, id), correctedSn_(correctedSn) {} ~ExecDeleteSnapshot() {} void Exec() override { @@ -225,8 +203,10 @@ class ExecDeleteSnapshot : public ExecStep { } void Dump() override { - printf("DeleteSnapshotChunkOrCorrectSn, " - "id = %lu, correctedSn = %lu.\n", id_, correctedSn_); + printf( + "DeleteSnapshotChunkOrCorrectSn, " + "id = %lu, correctedSn = %lu.\n", + id_, correctedSn_); } private: @@ -238,22 +218,23 @@ class ExecCreateClone : public ExecStep { ExecCreateClone(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, SequenceNum correctedSn, ChunkSizeType size, std::string location) - : ExecStep(datastore, id) - , sn_(sn) - , correctedSn_(correctedSn) - , size_(size) - , location_(location) {} + : ExecStep(datastore, id), + sn_(sn), + correctedSn_(correctedSn), + size_(size), + location_(location) {} ~ExecCreateClone() {} void Exec() override { - (*datastore_)->CreateCloneChunk( - id_, sn_, correctedSn_, size_, location_); + (*datastore_) + ->CreateCloneChunk(id_, sn_, correctedSn_, size_, location_); } void Dump() override { - printf("CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " - "chunk size = %u, location = %s.\n", - id_, sn_, correctedSn_, size_, location_.c_str()); + printf( + "CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " + "chunk size = %u, location = %s.\n", + id_, sn_, correctedSn_, size_, location_.c_str()); } private: @@ -269,41 +250,41 @@ class StepList { explicit StepList(ClearFunc clearFunc) : clearFunc_(clearFunc) {} ~StepList() {} - void Add(std::shared_ptr step) { - steps.push_back(step); - } + void Add(std::shared_ptr step) { steps.push_back(step); } - int GetStepCount() { - return steps.size(); - } + int GetStepCount() { return steps.size(); } void ClearEnv() { clearFunc_(); - // 清理每一步的预期状态,因为清理环境后,读取到的数据内容可能会不一样 - // 因为通过FilePool分配的chunk初始内容是不确定的 - for (auto &step : steps) { + // Clean up the expected state of each step, as the data content read + // after cleaning up the environment may differ Because the initial + // content of the chunk allocated through FilePool is uncertain + for (auto& step : steps) { step->ClearStatus(); } } - // 重启前,用户最后执行的操作可能为任意步骤, - // 需要验证每个步骤作为最后执行操作时,日志从该步骤前任意步骤进行恢复的幂等性 - // 对于未执行的步骤可以不必验证,只要保证已执行步骤的恢复是幂等的 - // 未执行的步骤恢复一定是幂等的 + // Before restarting, the last action performed by the user may be any step, + // It is necessary to verify the idempotence of the log recovery from any + // step before each step as the final execution operation For steps that + // have not been executed, there is no need to verify as long as the + // recovery of the executed steps is idempotent Unexecuted step recovery + // must be idempotent bool VerifyLogReplay() { - // 验证每个步骤作为最后执行操作时日志恢复的幂等性 + // Verify the idempotence of log recovery at each step as the final + // operation for (int lastStep = 0; lastStep < steps.size(); ++lastStep) { - // 重新初始化环境 + // Reinitialize the environment ClearEnv(); printf("==============Verify log replay to step%d==============\n", - lastStep + 1); - // 构造重启前环境 + lastStep + 1); + // Construct a pre restart environment if (!ConstructEnv(lastStep)) { LOG(ERROR) << "Construct env failed."; Dump(); return false; } - // 验证日志恢复后的幂等性 + // Verify the idempotence of log recovery if (!ReplayLog(lastStep)) { LOG(ERROR) << "Replay log failed." << "last step: step" << lastStep + 1; @@ -322,15 +303,16 @@ class StepList { } private: - // 构造初始状态 + // Construction initial state bool ConstructEnv(int lastStep) { - // 模拟日志恢复前执行,用于构造初始Chunk状态,并初始化每一步的预期状态 + // Execute before simulating log recovery to construct the initial Chunk + // state and initialize the expected state for each step for (int curStep = 0; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); step->SetExpectStatus(); } - // 检查构造出来的状态是否符合预期 + // Check if the constructed state meets expectations if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "last step: step" << lastStep + 1; @@ -339,16 +321,18 @@ class StepList { return true; } - // 从最后步骤前任意一个步骤进行恢复都应该保证幂等性 + // Restoring from any step before the final step should ensure idempotence bool ReplayLog(int lastStep) { - // 模拟从不同的起始位置进行日志恢复 + // Simulate log recovery from different starting locations for (int beginStep = 0; beginStep <= lastStep; ++beginStep) { - // 执行恢复前,chunk的状态保证为预期的状态 + // Before performing the recovery, the state of the chunk is + // guaranteed to be the expected state for (int curStep = beginStep; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); } - // 每次日志恢复完成检查Chunk状态是否符合预期 + // Check if the Chunk status meets expectations after each log + // recovery is completed if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "begin step: step" << beginStep + 1 @@ -361,8 +345,7 @@ class StepList { bool CheckChunkData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -373,50 +356,41 @@ class StepList { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - datastore->ReadChunk(id, - info.curSn, - (actualData + offset), - offset, - length); + datastore->ReadChunk(id, info.curSn, (actualData + offset), + offset, length); } } else { - datastore->ReadChunk(id, - info.curSn, - actualData, - 0, - kMaxSize); + datastore->ReadChunk(id, info.curSn, actualData, 0, kMaxSize); } int ret = memcmp(expectStatus->chunkData, actualData, kMaxSize); if (ret != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id - << ", ret: " << ret; + << "chunk id: " << id << ", ret: " << ret; for (int i = 0; i < kMaxSize; ++i) { if (*(expectStatus->chunkData + i) != *(actualData + i)) { - LOG(ERROR) << "diff pos: " << i - << ", expect data: " - << *(expectStatus->chunkData + i) - << ", actual data: " << *(actualData + i); + LOG(ERROR) + << "diff pos: " << i + << ", expect data: " << *(expectStatus->chunkData + i) + << ", actual data: " << *(actualData + i); break; } } - delete [] actualData; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } bool CheckSnapData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -424,23 +398,22 @@ class StepList { char* actualData = new char[kMaxSize]; CSErrorCode err; - err = datastore->ReadSnapshotChunk( - id, info.snapSn, actualData, 0, kMaxSize); + err = datastore->ReadSnapshotChunk(id, info.snapSn, actualData, 0, + kMaxSize); if (err != CSErrorCode::Success) { LOG(ERROR) << "Read snapshot failed." - << "Error Code: " << err - << ", chunk id: " << id; - delete [] actualData; + << "Error Code: " << err << ", chunk id: " << id; + delete[] actualData; return false; } if (memcmp(expectStatus->snapshotData, actualData, kMaxSize) != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id; - delete [] actualData; + << "chunk id: " << id; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } @@ -448,57 +421,51 @@ class StepList { std::shared_ptr step = steps[lastStep]; std::shared_ptr expectStatus = step->GetStatus(); - // 获取chunk信息 - std::shared_ptr datastore = - step->GetDataStore(); + // Obtain chunk information + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; CSErrorCode err = datastore->GetChunkInfo(id, &info); - // 返回Success说明chunk存在 + // Returning Success indicates that the chunk exists if (err == CSErrorCode::Success) { - // 检查chunk的状态 - if (!expectStatus->exist || - expectStatus->chunkInfo != info) { + // Check the status of the chunk + if (!expectStatus->exist || expectStatus->chunkInfo != info) { LOG(ERROR) << "Chunk info is not as expected!"; LOG(ERROR) << "Expect status(" << "chunk exist: " << expectStatus->exist << ", sn: " << expectStatus->chunkInfo.curSn - << ", correctedSn: " << expectStatus->chunkInfo.correctedSn // NOLINT + << ", correctedSn: " + << expectStatus->chunkInfo.correctedSn // NOLINT << ", snap sn: " << expectStatus->chunkInfo.snapSn << ", isClone: " << expectStatus->chunkInfo.isClone << ", location: " << expectStatus->chunkInfo.location << ")."; LOG(ERROR) << "Actual status(" - << "chunk exist: " << true - << ", sn: " << info.curSn - << ", correctedSn: " << info.correctedSn + << "chunk exist: " << true << ", sn: " << info.curSn + << ", correctedSn: " << info.correctedSn << ", isClone: " << info.isClone - << ", location: " << info.location - << ")."; + << ", location: " << info.location << ")."; return false; } - // 检查chunk的数据状态 - if (!CheckChunkData(step)) - return false; + // Check the data status of the chunk + if (!CheckChunkData(step)) return false; - // 检查快照状态 + // Check snapshot status if (info.snapSn > 0) { - // 检查快照的数据状态 - if (!CheckSnapData(step)) - return false; + // Check the data status of the snapshot + if (!CheckSnapData(step)) return false; } } else if (err == CSErrorCode::ChunkNotExistError) { - // 预期chunk存在,实际却不存在 + // The expected chunk exists, but it does not actually exist if (expectStatus->exist) { LOG(ERROR) << "Chunk is expected to exist, but actual not."; return false; } } else { LOG(ERROR) << "Get chunk info failed." - << "chunk id: " << id - << ", error code: " << err; + << "chunk id: " << id << ", error code: " << err; return false; } return true; @@ -529,7 +496,7 @@ TEST_F(RestartTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -538,7 +505,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -547,7 +514,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:DeleteChunk + // Step 3: DeleteChunk std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); @@ -561,7 +528,7 @@ TEST_F(RestartTestSuit, SnapshotTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -570,10 +537,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 模拟用户打了快照,此时sn +1 + // Simulated user took a snapshot, at which point sn+1 ++sn; - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -582,20 +549,21 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:用户请求删除快照 + // Step 3: User requests to delete the snapshot std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第四步:此次快照过程中没有数据写入,直接DeleteSnapshotOrCorrectedSn + // Step 4: No data was written during this snapshot process, directly delete + // SnapshotOrCorrectedSn std::shared_ptr step4 = std::make_shared(&dataStore_, id, sn); list.Add(step4); - // 第五步:WriteChunk,写[8kb, 16kb]区域 + // Step 5: WriteChunk, write the [8kb, 16kb] area RangeData step5Data; step5Data.offset = 2 * PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -604,10 +572,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第六步:WriteChunk,写[4kb, 12kb]区域 + // Step 6: WriteChunk, write the [4kb, 12kb] area RangeData step6Data; step6Data.offset = PAGE_SIZE; step6Data.length = 2 * PAGE_SIZE; @@ -616,20 +584,20 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step6Data); list.Add(step6); - // 第七步:用户请求删除快照 + // Step 7: User requests to delete the snapshot std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第八步:用户请求删除快照 + // Step 8: User requests to delete the snapshot std::shared_ptr step8 = std::make_shared(&dataStore_, id, sn); list.Add(step8); - // 第九步:用户请求删除chunk + // Step 9: User requests to delete chunk std::shared_ptr step9 = std::make_shared(&dataStore_, id, sn); list.Add(step9); @@ -637,7 +605,8 @@ TEST_F(RestartTestSuit, SnapshotTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试克隆场景,以及克隆后打快照的组合场景 +// Test the cloning scenario and the combination scenario of taking a snapshot +// after cloning TEST_F(RestartTestSuit, CloneTest) { StepList list(clearFunc); @@ -646,17 +615,12 @@ TEST_F(RestartTestSuit, CloneTest) { SequenceNum correctedSn = 0; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 第二步:WriteChunk,写[0kb, 8kb]区域 + // Step 2: WriteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -665,7 +629,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -674,7 +638,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -683,10 +647,10 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step4Data); list.Add(step4); - // 模拟打快照 + // Simulate taking a snapshot ++sn; - // 第五步:WriteChunk,写[4kb, 12kb]区域 + // Step 5: WriteChunk, write the [4kb, 12kb] area RangeData step5Data; step5Data.offset = PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -695,12 +659,12 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 第六步:用户请求删除快照 + // Step 6: User requests to delete the snapshot std::shared_ptr step6 = std::make_shared(&dataStore_, id, sn); list.Add(step6); - // 第七步:DeleteChunk + // Step 7: DeleteChunk std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); @@ -708,7 +672,7 @@ TEST_F(RestartTestSuit, CloneTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试恢复场景 +// Testing Recovery Scenarios TEST_F(RestartTestSuit, RecoverTest) { StepList list(clearFunc); @@ -717,20 +681,15 @@ TEST_F(RestartTestSuit, RecoverTest) { SequenceNum correctedSn = 5; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 数据写入的版本应为最新的版本 + // The version of data writing should be the latest version sn = correctedSn; - // 第二步:PasteChunk,写[0kb, 8kb]区域 + // Step 2: PasteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -739,7 +698,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -748,7 +707,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -757,7 +716,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step4Data); list.Add(step4); - // 第五步:DeleteChunk + // Step 5: DeleteChunk std::shared_ptr step5 = std::make_shared(&dataStore_, id, sn); list.Add(step5); @@ -765,7 +724,9 @@ TEST_F(RestartTestSuit, RecoverTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 按照实际用户使用从场景随机产生每一步的操作,校验一定操作个数下都能保证幂等性 +// Randomly generate each step of the operation from the scene based on actual +// user usage, and verify that a certain number of operations can ensure +// idempotence TEST_F(RestartTestSuit, RandomCombine) { StepList list(clearFunc); @@ -775,7 +736,7 @@ TEST_F(RestartTestSuit, RandomCombine) { std::string location("test@s3"); std::srand(std::time(nullptr)); - // 写随机地址的数据,在[0, kMaxSize]范围内写 + // Write random address data within the range of [0, kMaxSize] auto randWriteOrPaste = [&](bool isPaste) { int pageCount = kMaxSize / PAGE_SIZE; RangeData stepData; @@ -793,21 +754,17 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 随机的克隆过程 + // Random cloning process auto randClone = [&]() { - // 二分之一概率,模拟恢复过程 - if (std::rand() % 2 == 0) - correctedSn = 2; + // Half probability, simulating the recovery process + if (std::rand() % 2 == 0) correctedSn = 2; std::shared_ptr createStep = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + std::make_shared(&dataStore_, id, sn, correctedSn, + CHUNK_SIZE, location); list.Add(createStep); - // 克隆过程模拟5个操作,Write或者Paste,三分之一概率Write + // The cloning process simulates 5 operations, Write or Paste, with a + // one-third probability of Write for (int i = 0; i < 5; ++i) { if (std::rand() % 3 == 0) { randWriteOrPaste(false); @@ -816,7 +773,8 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 遍写一遍chunk,可以用于模拟后续写入创建快照 + // Write the chunk over and over again, which can be used to simulate + // subsequent writes and create snapshots RangeData pasteData; pasteData.offset = 0; pasteData.length = CHUNK_SIZE; @@ -826,11 +784,12 @@ TEST_F(RestartTestSuit, RandomCombine) { list.Add(pasteStep); }; - // 随机的快照过程 + // Random snapshot process auto randSnapshot = [&](int* stepCount) { - // 快照需要将版本+1 + // Snapshots require version+1 ++sn; - // 三分之一的概率调DeleteSnapshot,一旦调了DeleteSnapshot就退出快照 + // One third of the probability is to call DeleteSnapshot, and once + // DeleteSnapshot is called, it exits the snapshot while (true) { if (std::rand() % 3 == 0) { std::shared_ptr step = @@ -844,14 +803,14 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 创建clone chunk, + // Create a clone chunk randClone(); - // 设置最长执行步数 + // Set the maximum number of execution steps int maxSteps = 30; int stepCount = 0; while (stepCount < maxSteps) { - // 三分之一的概率会模拟快照过程 + // One-third of the probability will simulate the snapshot process if (std::rand() % 3 == 0) { randSnapshot(&stepCount); } else { @@ -860,7 +819,7 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 最后删除chunk + // Finally, delete the chunk std::shared_ptr lastStep = std::make_shared(&dataStore_, id, sn); list.Add(lastStep); diff --git a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp index 61dc402c21..f1dfa68b26 100644 --- a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_sna"; // NOLINT -const string poolDir = "./chunkfilepool_int_sna"; // NOLINT +const string baseDir = "./data_int_sna"; // NOLINT +const string poolDir = "./chunkfilepool_int_sna"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_sna.meta"; // NOLINT class SnapshotTestSuit : public DatastoreIntegrationBase { @@ -36,14 +36,16 @@ class SnapshotTestSuit : public DatastoreIntegrationBase { }; /** - * 快照场景测试 - * 构造存在两个chunk的文件,分别为chunk1和chunk2,做如下操作 - * 1.写chunk1 - * 2.模拟第一次打快照,转储过程中写chunk1并产生快照,chunk2未发生数据写入 - * 3.删除快照,然后向chunk2中写入数据 - * 4.模拟第二次打快照,转储过程中写chunk1,但是不写chunk2 - * 5.删除快照,再次向chunk2写入数据 - * 6.删除文件 + * Snapshot scenario testing + * Construct a file with two chunks, chunk1 and chunk2, as follows + * 1. Write chunk1 + * 2. Simulate the first snapshot taken, write chunk1 during the dump process + * and generate a snapshot, but chunk2 does not have data write + * 3. Delete the snapshot and write data to chunk2 + * 4. Simulate taking a second snapshot, writing chunk1 during the dump process, + * but not chunk2 + * 5. Delete the snapshot and write data to chunk2 again + * 6. Delete files */ TEST_F(SnapshotTestSuit, SnapshotTest) { SequenceNum fileSn = 1; @@ -55,39 +57,34 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { CSChunkInfo chunk1Info; CSChunkInfo chunk2Info; - /******************构造初始环境,创建chunk1******************/ + /****************** Creating Initial Environment, Creating Chunk1 + * ******************/ - // 向chunk1的[0, 12KB)区域写入数据 "1" + // Write data '1' to the [0, 12KB) area of chunk1 offset = 0; length = 3 * PAGE_SIZE; // 12KB char buf1_1[3 * PAGE_SIZE]; memset(buf1_1, '1', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_1, - offset, - length, - nullptr); + fileSn, buf1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景一:第一次给文件打快照******************/ + /******************Scene 1: Take the first snapshot of the + * file******************/ - // 模拟打快照,此时文件版本递增 - ++fileSn; // fileSn == 2 + // Simulate taking a snapshot, where the file version increases + ++fileSn; // fileSn == 2 - // 向chunk1的[4KB, 8KB)区域写入数据 “2” + // Write data '2' to the [4KB, 8KB] area of chunk1 offset = 1 * PAGE_SIZE; length = 1 * PAGE_SIZE; char buf1_2[3 * PAGE_SIZE]; memset(buf1_2, '2', 3 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); @@ -96,256 +93,242 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { size_t readSize = 3 * PAGE_SIZE; char readbuf[3 * PAGE_SIZE]; - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该都是‘1’ + // Read the [0, 12KB) area of the chunk1 snapshot file, and the data read + // should all be '1' errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // 重复写入,验证不会重复cow,读快照时[4KB, 8KB)区域的数据应为“1” + // Repeat write, verify that there will be no duplicate rows, and when + // reading the snapshot, the data in the [4KB, 8KB] area should be '1' errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写未cow过的区域,写入[0,4kb]区域 + // Write to an uncooked area, write to the [0,4kb] area offset = 0; length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写部分cow过的区域,写入[4kb,12kb]区域 + // Write the area that has been partially cowed, and write the [4kb, 12kb] + // area offset = PAGE_SIZE; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk1Info.curSn); ASSERT_EQ(1, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB):1 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,12KB]:2 The data content returned from reading chunk1 snapshot should + // be [0, 12KB):1 The data in other address spaces can be guaranteed without + // any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该还是‘1’ + // When reading the [0, 12KB) area of the chunk1 snapshot file, the read + // data should still be '1' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // ReadSnapshotChun,请求offset+length > page size + // ReadSnapshotChun, request offset+length > page size offset = CHUNK_SIZE - PAGE_SIZE; readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, offset, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::InvalidArgError); - // 读chunk2快照文件,返回ChunkNotExistError + // Read chunk2 snapshot file and return ChunkNotExistError readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - /******************场景二:第一次快照结束,删除快照******************/ + /******************Scene 2: First snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功,并删除快照 + // Request to delete the snapshot of chunk1, return success, and delete the + // snapshot errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 向chunk2的[0, 8KB)区域写入数据 "a" + // Write data 'a' to the [0, 8KB) area of chunk2 offset = 0; length = 2 * PAGE_SIZE; // 8KB char buf2_2[2 * PAGE_SIZE]; memset(buf2_2, 'a', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_2, - offset, - length, - nullptr); + fileSn, buf2_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(0, chunk2Info.correctedSn); - /******************场景三:第二次打快照******************/ + /******************Scene 3: Take second snapshot******************/ - // 模拟第二次打快照,版本递增 + // Simulate taking a second snapshot and increasing the version ++fileSn; // fileSn == 3 - // 向chunk1的[0KB, 8KB)区域写入数据 “3” + // Write data '3' to the [0KB, 8KB) area of chunk1 offset = 0; length = 2 * PAGE_SIZE; char buf1_3[2 * PAGE_SIZE]; memset(buf1_3, '3', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_3, - offset, - length, - nullptr); + fileSn, buf1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(2, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,8KB]:3,[8KB, 12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB]:2 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,8KB]:3, [8KB, 12KB]:2 The data content returned from reading chunk1 + // snapshot should be [0, 12KB]:2 The data in other address spaces can be + // guaranteed without any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_3, readbuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_2, readbuf + 2 * PAGE_SIZE, 1 * PAGE_SIZE)); - // 读chunk1快照文件的[0, 12KB)区域,数据内容为‘2’ + // Read the [0, 12KB) area of the chunk1 snapshot file, with data content of + // '2' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk2快照返回的数据内容应该为[0, 8KB):a,其余地址空间的数据可以不用保证 + // The data content returned by reading the chunk2 snapshot should be [0, + // 8KB): a, and the data in the other address spaces can be guaranteed + // without any need to readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_2, readbuf, readSize)); - /******************场景四:第二次快照结束,删除快照******************/ + /******************Scene 4: Second snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功 + // Request to delete snapshot of chunk1, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk2信息,符合预期 + // Check chunk2 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 向chunk2的[0KB, 4KB)区域写入数据 “b” + // Write data 'b' to the [0KB, 4KB) area of chunk2 offset = 0; length = 1 * PAGE_SIZE; char buf2_3[1 * PAGE_SIZE]; memset(buf2_3, 'b', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,符合预期,curSn变为3,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, as expected, curSn becomes 3, no snapshot will + // be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 再次向chunk2的[0KB, 8KB)区域写入数据 + // Write data to the [0KB, 8KB) area of chunk2 again errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,chunk信息不变,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, chunk information remains unchanged and no + // snapshot will be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - /******************场景五:用户删除文件******************/ + /******************Scene 5: User Deletes File******************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id2, &chunk1Info); diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 2364d61dd2..ae59850db5 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_str"; // NOLINT -const string poolDir = "./chunkfilepool_int_str"; // NOLINT +const string baseDir = "./data_int_str"; // NOLINT +const string poolDir = "./chunkfilepool_int_str"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_str.meta"; // NOLINT class StressTestSuit : public DatastoreIntegrationBase { @@ -64,7 +64,7 @@ TEST_F(StressTestSuit, StressTest) { auto RunStress = [&](int threadNum, int rwPercent, int ioNum) { uint64_t beginTime = TimeUtility::GetTimeofDayUs(); - Thread *threads = new Thread[threadNum]; + Thread* threads = new Thread[threadNum]; int readThreadNum = threadNum * rwPercent / 100; int ioNumAvg = ioNum / threadNum; int idRange = 100; @@ -92,27 +92,27 @@ TEST_F(StressTestSuit, StressTest) { printf("===============TEST WRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 0, 10000); - // 10个线程 + // 10 threads RunStress(10, 0, 50000); - // 50个线程 + // 50 threads RunStress(50, 0, 100000); printf("===============TEST READ==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 100, 10000); - // 10个线程 + // 10 threads RunStress(10, 100, 50000); - // 50个线程 + // 50 threads RunStress(50, 100, 100000); printf("===============TEST READWRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 50, 10000); - // 10个线程 + // 10 threads RunStress(10, 50, 50000); - // 50个线程 + // 50 threads RunStress(50, 50, 100000); } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index aa676fc718..df41c9b07b 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include +#include #include -#include // NOLINT -#include // NOLINT -#include +#include // NOLINT +#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -48,12 +48,12 @@ curve::client::InflightControl inflightContl; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "--mdsDbName=module_exception_curve_chunkserver" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22233" }, - { "--updateToRepoSec=5" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"--mdsDbName=module_exception_curve_chunkserver"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22233"}, + {"--updateToRepoSec=5"}, }; const std::vector chunkserverConf4{ @@ -119,9 +119,9 @@ const std::vector chunkserverConf6{ {"-walFilePoolDir=./moduleException6/walfilepool/"}, {"-walFilePoolMetaPath=./moduleException6/walfilepool.meta"}}; -std::string mdsaddr = // NOLINT +std::string mdsaddr = // NOLINT "127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"; -std::string logpath = "./runlog/ChunkserverException"; // NOLINT +std::string logpath = "./runlog/ChunkserverException"; // NOLINT const std::vector clientConf{ std::string("mds.listen.addr=") + mdsaddr, @@ -129,9 +129,11 @@ const std::vector clientConf{ std::string("chunkserver.rpcTimeoutMS=1000"), std::string("chunkserver.opMaxRetry=10"), }; -class CSModuleException : public ::testing::Test { - public: - void SetUp() { +class CSModuleException : public ::testing::Test +{ +public: + void SetUp() + { std::string confPath = "./test/integration/client/config/client.conf.1"; system("mkdir ./runlog/ChunkserverException"); system("rm -rf module_exception_test_chunkserver.etcd"); @@ -143,15 +145,16 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ - "--name=module_exception_test_chunkserver" }); + "--name=module_exception_test_chunkserver"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +171,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -184,13 +187,15 @@ class CSModuleException : public ::testing::Test { LOG(INFO) << "exec cmd: " << createPPCmd; int ret = 0; int retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createPPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +212,8 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -221,26 +227,29 @@ class CSModuleException : public ::testing::Test { ret = 0; retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createLPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } - void TearDown() { + void TearDown() + { ::Close(fd); UnInit(); ASSERT_EQ(0, cluster->StopCluster()); @@ -250,19 +259,25 @@ class CSModuleException : public ::testing::Test { "module_exception_test_chunkserver.etcd"); } - void CreateOpenFileBackend() { + void CreateOpenFileBackend() + { createDone = false; createOrOpenFailed = false; - auto func = [&]() { - for (int i = 0; i < 20; i++) { + auto func = [&]() + { + for (int i = 0; i < 20; i++) + { std::string filename = "/" + std::to_string(i); int ret = curve::test::FileCommonOperation::Open(filename, "curve"); ret == -1 ? createOrOpenFailed = true : 0; - if (ret != -1) { + if (ret != -1) + { ::Close(ret); - } else { + } + else + { break; } } @@ -276,44 +291,55 @@ class CSModuleException : public ::testing::Test { t.detach(); } - void WaitBackendCreateDone() { + void WaitBackendCreateDone() + { std::unique_lock lk(createMtx); - createCV.wait(lk, [&]() { return createDone; }); + createCV.wait(lk, [&]() + { return createDone; }); } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If io can be issued normally within the expected time, return + * true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, - uint64_t* failCount = nullptr) { + uint64_t *failCount = nullptr) + { inflightContl.SetMaxInflightNum(16); resumeFlag = false; ioFailedCount = 0; - auto wcb = [](CurveAioContext* context) { + auto wcb = [](CurveAioContext *context) + { inflightContl.DecremInflightNum(); - if (context->ret == context->length) { + if (context->ret == context->length) + { std::unique_lock lk(resumeMtx); resumeFlag = true; resumeCV.notify_all(); - } else { + } + else + { ioFailedCount++; } delete context; }; - char* writebuf = new char[size]; + char *writebuf = new char[size]; memset(writebuf, 'a', size); - auto iofunc = [&]() { + auto iofunc = [&]() + { std::this_thread::sleep_for(std::chrono::seconds(predictTimeS)); inflightContl.WaitInflightComeBack(); - CurveAioContext* context = new CurveAioContext; + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = off; context->length = size; @@ -335,7 +361,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,182 +371,195 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Is there a failure to hang and uninstall bool createOrOpenFailed; bool createDone; std::mutex createMtx; std::condition_variable createCV; - CurveCluster* cluster; + CurveCluster *cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node -TEST_F(CSModuleException, ChunkserverException) { +TEST_F(CSModuleException, ChunkserverException) +{ LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1.. Test restarting a chunkserver + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill a chunkserver: The client's read and + // write requests are stuck at most + // election_timeout*2s can read and write normally + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang a chunk server, and then restore the hang's chunk server + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang a chunkserver: client + // Read and write requests may experience a maximum delay of + // selection_timeout*2s for normal read and write operations + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunkservers + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill two chunkservers: expected client IO + // to continue to hang, new write IO and overwrite write both hang + // Pulling up a chunkserver in the kill: client IO is expected to be + // at most Restore read and write within (chunkserver starts + // playback of data+2 * selection_timeout) time + // c. Pulling up another kill chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunkservers, and then restore Hang's chunkservers + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang two chunkservers: client IO continues + // to hang, while new write IO and overwrite write both hang c. Restore + // one of the chunkservers: client IO restores read and write, + // Recovery time from chunkserver to client IO during election_ + // Timeout * 2 + // d. Restoring another hang's chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just hung, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunkservers + // 2. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Close three chunkservers: client IO hang + // c. Restart a chunkserver: client IO hang + // d. Restart the second chunkserver: client IO hang, + // Until the chunkserver is fully restored and IO is restored. + // The recovery time is approximately equal to (chunkserver starts + // playback data+2 * election_timeout) + // e. Restarting the third chunkserver: No impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +567,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunkservers, and then restore Hang's chunkservers + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang three chunkservers: client IO hang c. + // Restore a chunkserver: client IO hang d. Restore another + // chunkserver: expected to be + // election_ About timeout * 2, client IO recovery + // e. Restore the last chunkserver: Expected no impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 - // (copysetNum / load_concurrency) * election_timeout + // 7. The client's IO is expected to recover within a maximum of 2 * + // electtime seconds If slow start is configured, wait (copysetNum / + // load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/integration/client/common/file_operation.cpp b/test/integration/client/common/file_operation.cpp index 44dfc186a5..c5943a629f 100644 --- a/test/integration/client/common/file_operation.cpp +++ b/test/integration/client/common/file_operation.cpp @@ -43,15 +43,15 @@ int FileCommonOperation::Open(const std::string& filename, memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); - // 先创建文件 - int ret = Create(filename.c_str(), &userinfo, 100*1024*1024*1024ul); + // Create a file first + int ret = Create(filename.c_str(), &userinfo, 100 * 1024 * 1024 * 1024ul); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret << ", filename = " << filename; return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -68,8 +68,8 @@ void FileCommonOperation::Close(int fd) { } int FileCommonOperation::Open(const std::string& filename, - const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount) { + const std::string& owner, uint64_t stripeUnit, + uint64_t stripeCount) { assert(globalclient != nullptr); C_UserInfo_t userinfo; @@ -84,7 +84,7 @@ int FileCommonOperation::Open(const std::string& filename, context.stripeUnit = stripeUnit; context.stripeCount = stripeCount; - // 先创建文件 + // Create a file first int ret = globalclient->Create2(context); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret @@ -92,7 +92,7 @@ int FileCommonOperation::Open(const std::string& filename, return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -102,5 +102,5 @@ int FileCommonOperation::Open(const std::string& filename, return fd; } -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve diff --git a/test/integration/client/common/file_operation.h b/test/integration/client/common/file_operation.h index 0414146eff..c46b7add46 100644 --- a/test/integration/client/common/file_operation.h +++ b/test/integration/client/common/file_operation.h @@ -30,17 +30,18 @@ namespace curve { namespace test { class FileCommonOperation { public: - /** - * 指定文件名,打开文件,如果没创建则先创建,返回fd - */ + /** + * Specify a file name, open the file, if not created, create it first, + * return fd + */ static int Open(const std::string& filename, const std::string& owner); static void Close(int fd); static int Open(const std::string& filename, const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount); + uint64_t stripeUnit, uint64_t stripeCount); }; -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve #endif // TEST_INTEGRATION_CLIENT_COMMON_FILE_OPERATION_H_ diff --git a/test/integration/client/mds_exception_test.cpp b/test/integration/client/mds_exception_test.cpp index ad0d82b093..6eb665621a 100644 --- a/test/integration/client/mds_exception_test.cpp +++ b/test/integration/client/mds_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include +#include #include -#include // NOLINT -#include // NOLINT -#include +#include // NOLINT +#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -51,11 +51,11 @@ bool testIORead = false; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--mdsDbName=module_exception_curve_mds" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22230" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--mdsDbName=module_exception_curve_mds"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22230"}, }; const std::vector chunkserverConf1{ @@ -124,9 +124,9 @@ const std::vector chunkserverConf3{ {"-walFilePoolDir=./moduleException3/walfilepool/"}, {"-walFilePoolMetaPath=./moduleException3/walfilepool.meta"}}; -std::string mdsaddr = // NOLINT - "127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"; // NOLINT -std::string logpath = "./runlog/MDSExceptionTest"; // NOLINT +std::string mdsaddr = // NOLINT + "127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"; // NOLINT +std::string logpath = "./runlog/MDSExceptionTest"; // NOLINT const std::vector clientConf{ std::string("mds.listen.addr=") + mdsaddr, @@ -135,9 +135,11 @@ const std::vector clientConf{ std::string("chunkserver.opMaxRetry=10"), }; -class MDSModuleException : public ::testing::Test { - public: - void SetUp() { +class MDSModuleException : public ::testing::Test +{ +public: + void SetUp() + { std::string confPath = "./test/integration/client/config/client.conf"; system("mkdir ./runlog/MDSExceptionTest"); system("rm -rf module_exception_test_mds.etcd"); @@ -149,14 +151,15 @@ class MDSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22230", "127.0.0.1:22231", std::vector{"--name=module_exception_test_mds"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22230:22231, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; @@ -173,7 +176,7 @@ class MDSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -189,14 +192,16 @@ class MDSModuleException : public ::testing::Test { LOG(INFO) << "exec cmd: " << createPPCmd; int ret = 0; int retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createPPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22225", chunkserverConf1); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22225, pid = " << pid; @@ -212,7 +217,8 @@ class MDSModuleException : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -226,22 +232,24 @@ class MDSModuleException : public ::testing::Test { ret = 0; retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createLPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); ipmap[0] = "127.0.0.1:22222"; @@ -253,7 +261,8 @@ class MDSModuleException : public ::testing::Test { configmap[2] = mdsConf; } - void TearDown() { + void TearDown() + { ::Close(fd); UnInit(); @@ -263,22 +272,28 @@ class MDSModuleException : public ::testing::Test { system("rm -rf moduleException1 moduleException2 moduleException3"); } - void CreateOpenFileBackend() { + void CreateOpenFileBackend() + { createDone = false; createOrOpenFailed = false; - auto func = [&]() { + auto func = [&]() + { static int num = 0; - for (int i = num; i < num + 20; i++) { + for (int i = num; i < num + 20; i++) + { std::string filename = "/" + std::to_string(i); LOG(INFO) << "now create file: " << filename; int ret = curve::test::FileCommonOperation::Open(filename, "curve"); ret == -1 ? createOrOpenFailed = true : 0; - if (ret != -1) { + if (ret != -1) + { ::Close(ret); std::this_thread::sleep_for(std::chrono::milliseconds(500)); - } else { + } + else + { break; } } @@ -293,44 +308,55 @@ class MDSModuleException : public ::testing::Test { t.detach(); } - void WaitBackendCreateDone() { + void WaitBackendCreateDone() + { std::unique_lock lk(createMtx); - createCV.wait(lk, [&]() { return createDone; }); + createCV.wait(lk, [&]() + { return createDone; }); } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内嫩够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If the io is issued normally within the expected time, return + * true; otherwise, return false */ - bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) { + bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) + { inflightContl.SetMaxInflightNum(16); resumeFlag = false; ioFailedCount = 0; - auto wcb = [](CurveAioContext* context) { + auto wcb = [](CurveAioContext *context) + { inflightContl.DecremInflightNum(); - if (context->ret == context->length) { + if (context->ret == context->length) + { std::unique_lock lk(resumeMtx); resumeFlag = true; resumeCV.notify_all(); - } else { + } + else + { ioFailedCount++; } LOG(INFO) << "end aiowrite with ret = " << context->ret; delete context; }; - char* writebuf = new char[size]; + char *writebuf = new char[size]; memset(writebuf, 'a', size); - auto iofunc = [&]() { + auto iofunc = [&]() + { std::this_thread::sleep_for(std::chrono::seconds(predictTimeS)); inflightContl.WaitInflightComeBack(); - CurveAioContext* context = new CurveAioContext; + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = off; context->length = size; @@ -352,7 +378,7 @@ class MDSModuleException : public ::testing::Test { ret = resumeFlag; } - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -360,25 +386,28 @@ class MDSModuleException : public ::testing::Test { return ret; } - /**下发一个写请求 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否下发成功 + /** Send a write request + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Whether the IO was successfully issued */ - bool SendAioWriteRequest(uint64_t offset, uint64_t size) { + bool SendAioWriteRequest(uint64_t offset, uint64_t size) + { writeIOReturnFlag = false; - auto writeCallBack = [](CurveAioContext* context) { - // 无论IO是否成功,只要返回,就置为true + auto writeCallBack = [](CurveAioContext *context) + { + // Regardless of whether IO is successful or not, as long as it + // returns, it is set to true writeIOReturnFlag = true; - char* buffer = reinterpret_cast(context->buf); + char *buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; }; - char* buffer = new char[size]; + char *buffer = new char[size]; memset(buffer, 'a', size); - CurveAioContext* context = new CurveAioContext(); + CurveAioContext *context = new CurveAioContext(); context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = offset; context->length = size; @@ -388,26 +417,30 @@ class MDSModuleException : public ::testing::Test { return AioWrite(fd, context) == 0; } - /** 下发一个写请求并读取进行数据验证 - * @param: fd 卷fd - * @param: 当前需要下发io的偏移 - * @param:下发io的大小 - * @return: 数据是否一致 - */ - void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) { - char* writebuf = new char[size]; - char* readbuf = new char[size]; + /** Send a write request and read for data validation + * @param: fd volume fd + * @param: The offset that currently needs to be issued for IO + * @param: The size of the distributed IO + * @return: Whether the data is consistent + */ + void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) + { + char *writebuf = new char[size]; + char *readbuf = new char[size]; unsigned int i; - LOG(INFO) << "VerifyDataConsistency(): offset " << - offset << ", size " << size; - for (i = 0; i < size; i++) { + LOG(INFO) << "VerifyDataConsistency(): offset " << offset << ", size " + << size; + for (i = 0; i < size; i++) + { writebuf[i] = ('a' + std::rand() % 26); } - // 开始写 - auto wcb = [](CurveAioContext* context) { - if (context->ret == context->length) { + // Start writing + auto wcb = [](CurveAioContext *context) + { + if (context->ret == context->length) + { testIOWrite = true; } std::unique_lock lk(resumeMtx); @@ -415,8 +448,10 @@ class MDSModuleException : public ::testing::Test { delete context; }; - auto writefunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + auto writefunc = [&]() + { + CurveAioContext *context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = offset; context->length = size; @@ -434,9 +469,11 @@ class MDSModuleException : public ::testing::Test { writeThread.join(); ASSERT_TRUE(testIOWrite); - // 开始读 - auto rcb = [](CurveAioContext* context) { - if (context->ret == context->length) { + // Start reading + auto rcb = [](CurveAioContext *context) + { + if (context->ret == context->length) + { testIORead = true; } std::unique_lock lk(resumeMtx); @@ -444,8 +481,10 @@ class MDSModuleException : public ::testing::Test { delete context; }; - auto readfunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + auto readfunc = [&]() + { + CurveAioContext *context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_READ; context->offset = offset; context->length = size; @@ -471,53 +510,63 @@ class MDSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether mounting or unmounting fails. bool createOrOpenFailed; bool createDone; std::mutex createMtx; std::condition_variable createCV; - CurveCluster* cluster; + CurveCluster *cluster; std::map ipmap; std::map> configmap; }; #define segment_size 1 * 1024 * 1024 * 1024ul -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node -TEST_F(MDSModuleException, MDSExceptionTest) { +TEST_F(MDSModuleException, MDSExceptionTest) +{ LOG(INFO) << "current case: KillOneInserviceMDSThenRestartTheMDS"; /********** KillOneInserviceMDSThenRestartTheMDS *************/ - // 1. 重启一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台mds,在mds服务切换到另一台mds之前, - // client 新写IO会hang,挂卸载服务会异常 - // c. mds服务切换后,预期client IO无影响,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restarting a currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down an MDS, before + // the MDS service switches to another MDS, + // new write IO from clients will hang, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // be unaffected, and mount/unmount services will be normal. d. When + // bringing the MDS back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Kill an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -525,85 +574,103 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillOneNotInserviceMDSThenRestartTheMDS"; /*********** KillOneNotInserviceMDSThenRestartTheMDS *******/ - // 1. 重启一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Restart an MDS that is not in service + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Turn off an MDS that is not in service, + // expect no impact on client IO, and suspend and uninstall the service + // normally + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台不在服务的mds,在启动的时候第一台mds当选leader, kill第二台 + // 2. Kill an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and kill the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int killid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(killid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(2 * segment_size, 4096, 25)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(killid, ipmap[killid], 22240 + killid, configmap[killid], false); LOG(INFO) << "mds " << killid << " started on " << ipmap[killid] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneInserviceMDSThenResumeTheMDS"; /************ hangOneInserviceMDSThenResumeTheMDS ********/ - // 1. hang一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 0. 先睡眠一段时间等待mds集群选出leader + // 1. Hang one of the currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS is restored, it is expected to have no impact on + // client IO. + // 0. First, sleep for a period of time to allow the MDS cluster to elect a + // leader. std::this_thread::sleep_for(std::chrono::seconds(10)); - // 1. 集群最初状态,io正常下发 + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Hang an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->HangMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) auto ret = MonitorResume(3 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_TRUE(false); } - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], @@ -614,39 +681,46 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被kill的mds,对集群没有影响 + // 7. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneNotInserviceMDSThenResumeTheMDS"; /********** hangOneNotInserviceMDSThenResumeTheMDS ***********/ - // 1. hang一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Hang an out of service MDS + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang an MDS that is not in service, + // expecting no impact on client IO, and suspending and uninstalling + // the service is normal + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台不在服务的mds,在启动的时候第一台mds当选leader, hang第二台 + // 2. Hang an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and hang the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int hangid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->HangMDS(hangid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台iops监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start backend iops monitoring and start writing from the next segment + // to trigger getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(4 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_EQ(0, cluster->StopMDS(hangid)); pid = cluster->StartSingleMDS(hangid, ipmap[hangid], 22240 + hangid, @@ -657,42 +731,50 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoInserviceMDSThenRestartTheMDS"; /************* KillTwoInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,在mds服务切换到另一台mds之前, - // client 新写IO会出现失败,挂卸载服务会异常 - // c. mds服务切换后,预期client IO恢复,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, one of which is currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // before the MDS service switches to another MDS, + // new write IO from clients will fail, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // recover, and mount/unmount services will be normal. d. When bringing + // the MDS nodes back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill两台mds,在启动的时候第一台mds当选leader, kill前二台 + // 2. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the first two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int secondid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(secondid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(5 * segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -700,10 +782,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起被kill的其他mds + // 9. Pull up other mds killed pid = cluster->StartSingleMDS(secondid, ipmap[secondid], 22240 + secondid, configmap[secondid], false); LOG(INFO) << "mds " << secondid << " started on " << ipmap[secondid] @@ -712,18 +794,22 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillTwoNotInserviceMDSThenRestartTheMDS"; /******** KillTwoNotInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中两台都不在服务 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,预期client IO无影响,挂卸载服务正常 - // c. 重启这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, with both nodes not currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // it is expected that client IO will be unaffected, and mount/unmount + // services will be normal. c. When restarting these two MDS nodes, it is + // expected that client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. kill两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 3. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the second two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int tempid_1 = (serviceMDSID + 1) % 3; @@ -731,27 +817,28 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->StopMDS(tempid_1)); ASSERT_EQ(0, cluster->StopMDS(tempid_2)); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ASSERT_TRUE(MonitorResume(6 * segment_size, 4096, 10)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(tempid_1, ipmap[tempid_1], 22240 + tempid_1, configmap[tempid_1], false); LOG(INFO) << "mds " << tempid_1 << " started on " << ipmap[tempid_1] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 集群没有影响 + // 8. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起其他mds,使集群恢复正常 + // 9. Pull up other mds to restore the cluster to normal pid = cluster->StartSingleMDS(tempid_2, ipmap[tempid_2], 22240 + tempid_2, configmap[tempid_2], false); LOG(INFO) << "mds " << tempid_2 << " started on " << ipmap[tempid_2] @@ -760,17 +847,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: hangTwoInserviceMDSThenResumeTheMDS"; /******** hangTwoInserviceMDSThenResumeTheMDS ************/ - // 1. hang两台mds,其中包含一台正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 1. hang两台mds,在启动的时候第一台mds当选leader, hang前二台 + // 1. Hang two MDS nodes, one of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS nodes are recovered, it is expected to have no + // impact on client IO. + // 1. Hang two MDS nodes, with the first MDS being elected as leader during + // startup, and both being hung before the process. serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = serviceMDSID; @@ -778,25 +872,30 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); LOG(INFO) << "monitor resume start!"; - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(7 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_TRUE(false); } LOG(INFO) << "monitor resume done!"; - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); LOG(INFO) << "wait backend create thread done!"; - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -814,20 +913,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被hang的mds,对集群没有影响 + // 7. Pulling up the hung mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangTwoNotInserviceMDSThenResumeTheMDS"; /********** hangTwoNotInserviceMDSThenResumeTheMDS ********/ - // 1. hang两台mds,其中不包含正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // c. 恢复这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two MDS nodes, neither of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang one MDS node that is not + // currently serving. It is expected that client IO will be unaffected, + // and mount/unmount services will behave normally. c. When these two MDS + // nodes are recovered, client IO is expected to be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 2. Hang two mds, the first mds is selected as the leader when starting, + // and kill the second two mds serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = (serviceMDSID + 1) % 3; @@ -835,22 +938,25 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ret = MonitorResume(8 * segment_size, 4096, 10); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -868,178 +974,197 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeMDSThenRestartTheMDS"; /********* KillThreeMDSThenRestartTheMDS *********/ - // 1. 重启三台mds - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. kill三台mds:client 在session过期之后出现IO 失败 - // c. client session过期之前这段时间的新写会失败,覆盖写不影响 - // d. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // e. 恢复另外两台hang的mds,client io无影响 - - // 1. kill三台mds + // 1. Restart three MDS nodes. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Kill all three MDS nodes: Client + // IO failures occur after session expiration. c. During the period + // before the client session expires, new writes will fail, but overwrite + // writes will not be affected. d. Recover one of the hung MDS nodes: + // Client session renewal succeeds, and IO returns to normal. e. Recover + // the other two hung MDS nodes: Client IO remains unaffected. + + // 1. Kill three MDSs ASSERT_EQ(0, cluster->StopAllMDS()); - // 确保mds确实退出了 + // Ensure that the mds has indeed exited std::this_thread::sleep_for(std::chrono::seconds(10)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 3. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(9 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(30)); ASSERT_FALSE(writeIOReturnFlag); - // 4. 等待后台挂卸载监测结束 + // 4. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 5. 判断当前挂卸载情况 + // 5. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 6. 拉起被kill的进程 + // 6. Pulling up the process of being killed pid = -1; - while (pid < 0) { + while (pid < 0) + { pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 7. 检测上次IO是否返回 + // 7. Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ASSERT_TRUE(MonitorResume(segment_size, 4096, 10)); - // 9. 再拉起被kill的进程 + // 9. Pull up the process of being killed again pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, false); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; ASSERT_GT(pid, 0); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 11. 拉起其他被kill的mds + // 11. Pull up other killed mds pid = cluster->StartSingleMDS(2, "127.0.0.1:22224", 22232, mdsConf, false); LOG(INFO) << "mds 2 started on 127.0.0.1:22224, pid = " << pid; ASSERT_GT(pid, 0); LOG(INFO) << "current case: hangThreeMDSThenResumeTheMDS"; /********** hangThreeMDSThenResumeTheMDS **************/ - // 1. hang三台mds,然后恢复 - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. hang三台mds:client 在session过期之后出现IO hang - // c. client session过期之前这段时间的新写会一直hang,覆盖写不影响 - // e. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // f. 恢复另外两台hang的mds,client io无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three MDS nodes and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang three MDS nodes: Client IO + // hangs after the session expires. c. During the period before the + // client session expires, new writes will hang continuously, but + // overwrite writes will not be affected. e. Recover one of the hung MDS + // nodes: Client session renewal succeeds, and IO returns to normal. f. + // Recover the other two hung MDS nodes: Client IO remains unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang三台mds + // 2. Hang Three MDSs ASSERT_EQ(0, cluster->HangMDS(0)); ASSERT_EQ(0, cluster->HangMDS(1)); ASSERT_EQ(0, cluster->HangMDS(2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 4. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(10 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(3)); ret = writeIOReturnFlag; - if (ret) { + if (ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 5. 等待监测结束 + // 5. Waiting for monitoring to end WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 - if (!createOrOpenFailed) { + // 6. Determine the current suspension and uninstallation situation + if (!createOrOpenFailed) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 7. 拉起被hang的进程, 有可能hang的进程因为长时间未与etcd握手, - // 导致其被拉起后就退出了,所以这里在recover之后再启动该mds, - // 这样保证集群中至少有一个mds在提供服务 + // 7. Pulling up the process being hung may result in the process not + // shaking hands with ETCD for a long time, + // After it was pulled up, it exited, so the mds was restarted after + // recover, This ensures that at least one mds in the cluster is + // providing services ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->StopMDS(1)); pid = -1; - while (pid < 0) { + while (pid < 0) + { pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 检测上次IO是否返回 + // Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ret = MonitorResume(segment_size, 4096, 1); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 9. 再拉起被hang的进程 + // 9. Pull up the process of being hung again ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); } -TEST_F(MDSModuleException, StripeMDSExceptionTest) { +TEST_F(MDSModuleException, StripeMDSExceptionTest) +{ LOG(INFO) << "current case: StripeMDSExceptionTest"; - // 1. 创建一个条带的卷 - int stripefd = curve::test::FileCommonOperation::Open("/test2", - "curve", 1024 * 1024, 8); + // 1. Create a striped volume + int stripefd = curve::test::FileCommonOperation::Open("/test2", "curve", + 1024 * 1024, 8); ASSERT_NE(stripefd, -1); uint64_t offset = std::rand() % 5 * segment_size; - // 2. 进行数据的读写校验 - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + // 2. Perform data read and write verification + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); std::this_thread::sleep_for(std::chrono::seconds(60)); - // 3. kill 一台当前为leader的mds + // 3. Kill an MDS that is currently the leader LOG(INFO) << "stop mds."; int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 4. 启动后台挂卸载线程 + // 4. Start the background suspend and unload thread CreateOpenFileBackend(); - // 5. 继续随机写数据进行校验 + // 5. Continue to randomly write data for verification offset = std::rand() % 5 * segment_size; LOG(INFO) << "when stop mds, write and read data."; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); - // 6. 等待挂卸载检测结果 + // 6. Waiting for the results of pending uninstallation detection WaitBackendCreateDone(); - // 7. 挂卸载服务正常 + // 7. Hanging and uninstalling service is normal ASSERT_TRUE(createOrOpenFailed); - LOG(INFO) <<"start mds."; + LOG(INFO) << "start mds."; pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -1047,10 +1172,9 @@ TEST_F(MDSModuleException, StripeMDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - LOG(INFO) << "start mds, write and read data."; offset = std::rand() % 5 * segment_size; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); ::Close(stripefd); } diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index ea5c7e4c37..6ffca843bb 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -20,54 +20,52 @@ * Author: wuhanqing */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT +#include #include -#include +#include +#include // NOLINT +#include #include -#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT +#include #include "include/client/libcurve.h" -#include "src/common/timeutility.h" #include "src/client/client_metric.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" using curve::CurveCluster; -const char* kMdsConfPath = "./test/integration/unstable_test_mds.conf"; -const char* kCSConfPath = "./test/integration/unstable_test_cs.conf"; -const char* kClientConfPath = "./test/integration/unstable_test_client.conf"; +const char *kMdsConfPath = "./test/integration/unstable_test_mds.conf"; +const char *kCSConfPath = "./test/integration/unstable_test_cs.conf"; +const char *kClientConfPath = "./test/integration/unstable_test_client.conf"; -const char* kEtcdClientIpPort = "127.0.0.1:21000"; -const char* kEtcdPeerIpPort = "127.0.0.1:20999"; -const char* kMdsIpPort = "127.0.0.1:30010"; -const char* kClientInflightNum = "6"; -const char* kLogPath = "./runlog/"; +const char *kEtcdClientIpPort = "127.0.0.1:21000"; +const char *kEtcdPeerIpPort = "127.0.0.1:20999"; +const char *kMdsIpPort = "127.0.0.1:30010"; +const char *kClientInflightNum = "6"; +const char *kLogPath = "./runlog/"; curve::client::PerSecondMetric iops("test", "iops"); -std::atomic running{ false }; +std::atomic running{false}; const std::vector chunkserverConfigOpts{ "chunkfilepool.enable_get_chunk_from_pool=false", - "walfilepool.enable_get_segment_from_pool=false" -}; + "walfilepool.enable_get_segment_from_pool=false"}; -const std::vector mdsConfigOpts{ - std::string("mds.etcd.endpoint=") + std::string(kEtcdClientIpPort) -}; +const std::vector mdsConfigOpts{std::string("mds.etcd.endpoint=") + + std::string(kEtcdClientIpPort)}; const std::vector clientConfigOpts{ std::string("mds.listen.addr=") + kMdsIpPort, @@ -81,9 +79,8 @@ const std::vector mdsConf{ std::string("--confPath=") + kMdsConfPath, std::string("--mdsAddr=") + kMdsIpPort, std::string("--etcdAddr=") + kEtcdClientIpPort, - { "--log_dir=./runlog/mds" }, - { "--stderrthreshold=3" } -}; + {"--log_dir=./runlog/mds"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverConfTemplate{ {"-raft_sync_segments=true"}, @@ -105,14 +102,21 @@ const std::vector chunkserverConfTemplate{ {"--stderrthreshold=3"}}; const std::vector chunkserverPorts{ - 31000, 31001, 31010, 31011, 31020, 31021, + 31000, + 31001, + 31010, + 31011, + 31020, + 31021, }; -std::vector GenChunkserverConf(int port) { +std::vector GenChunkserverConf(int port) +{ std::vector conf(chunkserverConfTemplate); char temp[NAME_MAX_SIZE]; - auto formatter = [&](const std::string& format, int port) { + auto formatter = [&](const std::string &format, int port) + { snprintf(temp, sizeof(temp), format.c_str(), port); return temp; }; @@ -138,23 +142,21 @@ std::vector GenChunkserverConf(int port) { return conf; } -off_t RandomWriteOffset() { - return rand() % 32 * (16 * 1024 * 1024); -} +off_t RandomWriteOffset() { return rand() % 32 * (16 * 1024 * 1024); } -size_t RandomWriteLength() { - return rand() % 32 * 4096; -} +size_t RandomWriteLength() { return rand() % 32 * 4096; } static char buffer[1024 * 4096]; -struct ChunkserverParam { +struct ChunkserverParam +{ int id; int port; - std::string addr{ "127.0.0.1:" }; + std::string addr{"127.0.0.1:"}; std::vector conf; - ChunkserverParam(int id, int port) { + ChunkserverParam(int id, int port) + { this->id = id; this->port = port; this->addr.append(std::to_string(port)); @@ -162,10 +164,12 @@ struct ChunkserverParam { } }; -class UnstableCSModuleException : public ::testing::Test { - protected: - static void SetUpTestCase() { - // 清理文件夹 +class UnstableCSModuleException : public ::testing::Test +{ +protected: + static void SetUpTestCase() + { + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +179,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,50 +187,53 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ - "--name=module_exception_curve_unstable_cs" }); + "--name=module_exception_curve_unstable_cs"}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << ":" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 - ASSERT_EQ( - 0, - cluster->PreparePhysicalPool( - 1, - "./test/integration/client/config/unstable/" - "topo_unstable.json")); + // 3. Creating a physical pool + ASSERT_EQ(0, cluster->PreparePhysicalPool( + 1, + "./test/integration/client/config/unstable/" + "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster->PrepareLogicalPool( - 1, "test/integration/client/config/unstable/topo_unstable.json")); + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ( + 0, + cluster->PrepareLogicalPool( + 1, + "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } - static void TearDownTestCase() { + static void TearDownTestCase() + { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -236,9 +243,11 @@ class UnstableCSModuleException : public ::testing::Test { ::unlink(kClientConfPath); } - static void StartAllChunkserver() { + static void StartAllChunkserver() + { int id = 1; - for (auto port : chunkserverPorts) { + for (auto port : chunkserverPorts) + { ChunkserverParam param(id, port); chunkServers.emplace(id, param); @@ -252,18 +261,21 @@ class UnstableCSModuleException : public ::testing::Test { } } - static void OpenAndWrite(const std::string& filename) { + static void OpenAndWrite(const std::string &filename) + { int fd = curve::test::FileCommonOperation::Open(filename, "curve"); ASSERT_NE(-1, fd); std::vector writeThs; - for (int i = 0; i < 5; ++i) { + for (int i = 0; i < 5; ++i) + { writeThs.emplace_back(AioWriteFunc, fd); LOG(INFO) << "write " << filename << ", thread " << (i + 1) << " started"; } - for (auto& th : writeThs) { + for (auto &th : writeThs) + { th.join(); } @@ -271,14 +283,17 @@ class UnstableCSModuleException : public ::testing::Test { LOG(INFO) << "stop all write thread, filename " << filename; } - static void AioWriteFunc(int fd) { - auto cb = [](CurveAioContext* ctx) { + static void AioWriteFunc(int fd) + { + auto cb = [](CurveAioContext *ctx) + { iops.count << 1; delete ctx; }; - while (running) { - CurveAioContext* context = new CurveAioContext; + while (running) + { + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->cb = cb; context->offset = RandomWriteOffset(); @@ -300,9 +315,11 @@ class UnstableCSModuleException : public ::testing::Test { int UnstableCSModuleException::fd = 0; std::unique_ptr UnstableCSModuleException::cluster; -std::unordered_map UnstableCSModuleException::chunkServers; // NOLINT +std::unordered_map + UnstableCSModuleException::chunkServers; // NOLINT -TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { +TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) +{ const std::string filename = "/TestCommonReadAndWrite"; constexpr size_t length = 4ull * 1024 * 1024; constexpr off_t offset = 4ull * 1024 * 1024; @@ -323,28 +340,31 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver -TEST_F(UnstableCSModuleException, HangOneZone) { +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside +TEST_F(UnstableCSModuleException, HangOneZone) +{ srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; - for (int i = 0; i < 2; ++i) { + for (int i = 0; i < 2; ++i) + { openAndWriteThreads.emplace_back( &UnstableCSModuleException::OpenAndWrite, "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); - for (int i = 1; i <= 30; ++i) { + for (int i = 1; i <= 30; ++i) + { std::this_thread::sleep_for(std::chrono::seconds(1)); beforeRecords.push_back(iops.value.get_value(1)); } @@ -353,19 +373,21 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 - for (int i = 1; i <= 10; ++i) { + // Print IOPS per second + for (int i = 1; i <= 10; ++i) + { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 - if (i >= 5) { + // Record the iops value for 5 seconds after recording + if (i >= 5) + { afterRecords.push_back(tmp); } } @@ -381,7 +403,8 @@ TEST_F(UnstableCSModuleException, HangOneZone) { ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); running = false; - for (auto& th : openAndWriteThreads) { + for (auto &th : openAndWriteThreads) + { th.join(); } LOG(INFO) << "all write thread stoped"; diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..72410a5ca7 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -20,26 +20,28 @@ * Author: lixiaocui */ -#include +#include "test/integration/cluster_common/cluster.h" + #include -#include -#include -#include #include #include #include -#include -#include //NOLINT +#include +#include +#include +#include + #include //NOLINT +#include #include +#include +#include //NOLINT #include #include -#include -#include "test/integration/cluster_common/cluster.h" +#include "src/client/client_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/client/client_common.h" #include "src/kvstorageclient/etcd_client.h" using ::curve::client::UserInfo_t; @@ -50,29 +52,29 @@ namespace curve { using ::curve::client::CreateFileContext; -int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { +int CurveCluster::InitMdsClient(const curve::client::MetaServerOption& op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); } -std::vector VecStr2VecChar(std::vector args) { - std::vector argv(args.size() + 1); // for the NULL terminator +std::vector VecStr2VecChar(std::vector args) { + std::vector argv(args.size() + 1); // for the NULL terminator for (std::size_t i = 0; i < args.size(); ++i) { // not include cmd - argv[i] = new char[args[i].size()+1]; + argv[i] = new char[args[i].size() + 1]; snprintf(argv[i], args[i].size() + 1, "%s", args[i].c_str()); } argv[args.size()] = NULL; return argv; } -void ClearArgv(const std::vector &argv) { - for (auto const &item : argv) { - delete [] item; +void ClearArgv(const std::vector& argv) { + for (auto const& item : argv) { + delete[] item; } } int CurveCluster::InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints) { + const std::string& etcdEndpoints) { EtcdConf conf; conf.Endpoints = new char[etcdEndpoints.size()]; std::memcpy(conf.Endpoints, etcdEndpoints.c_str(), etcdEndpoints.size()); @@ -88,8 +90,8 @@ int CurveCluster::InitSnapshotCloneMetaStoreEtcd( } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient, - codec); + metaStore_ = + std::make_shared(etcdClient, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return -1; @@ -106,17 +108,13 @@ int CurveCluster::StopCluster() { LOG(INFO) << "stop cluster begin..."; int ret = 0; - if (StopAllMDS() < 0) - ret = -1; + if (StopAllMDS() < 0) ret = -1; - if (StopAllChunkServer() < 0) - ret = -1; + if (StopAllChunkServer() < 0) ret = -1; - if (StopAllSnapshotCloneServer() < 0) - ret = -1; + if (StopAllSnapshotCloneServer() < 0) ret = -1; - if (StopAllEtcd() < 0) - ret = -1; + if (StopAllEtcd() < 0) ret = -1; if (!ret) LOG(INFO) << "success stop cluster"; @@ -125,9 +123,9 @@ int CurveCluster::StopCluster() { return ret; } -int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, +int CurveCluster::StartSingleMDS(int id, const std::string& ipPort, int dummyPort, - const std::vector &mdsConf, + const std::vector& mdsConf, bool expectLeader) { LOG(INFO) << "start mds " << ipPort << " begin..."; pid_t pid = ::fork(); @@ -135,20 +133,21 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); args.emplace_back("--mdsAddr=" + ipPort); args.emplace_back("--dummyPort=" + std::to_string(dummyPort)); - for (auto &item : mdsConf) { + for (auto& item : mdsConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -221,26 +220,27 @@ int CurveCluster::StopAllMDS() { } int CurveCluster::StartSnapshotCloneServer( - int id, const std::string &ipPort, - const std::vector &snapshotcloneConf) { + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf) { LOG(INFO) << "start snapshotcloneserver " << ipPort << " begin ..."; pid_t pid = ::fork(); if (0 > pid) { LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + // Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); args.emplace_back("--addr=" + ipPort); - for (auto &item : snapshotcloneConf) { + for (auto& item : snapshotcloneConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -317,19 +317,18 @@ int CurveCluster::StopAllSnapshotCloneServer() { int ret = 0; auto tempMap = snapPidMap_; for (auto pair : tempMap) { - if (StopSnapshotCloneServer(pair.first) < 0) - ret = -1; + if (StopSnapshotCloneServer(pair.first) < 0) ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; } -int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf) { +int CurveCluster::StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf) { LOG(INFO) << "start etcd " << clientIpPort << " begin..."; pid_t pid = ::fork(); @@ -337,7 +336,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -348,14 +347,15 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, args.emplace_back("--initial-cluster-token=etcd-cluster-1"); args.emplace_back("--election-timeout=3000"); args.emplace_back("--heartbeat-interval=300"); - for (auto &item : etcdConf) { + for (auto& item : etcdConf) { args.push_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -380,7 +380,7 @@ bool CurveCluster::WaitForEtcdClusterAvalible(int waitSec) { return false; } else { int i = 0; - for (auto &item : etcdClientIpPort_) { + for (auto& item : etcdClientIpPort_) { i++; if (i == etcdClientIpPort_.size()) { endpoint += "http://" + item.second; @@ -464,9 +464,9 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystempath, +int CurveCluster::FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystempath, uint32_t size) { LOG(INFO) << "FormatFilePool begin..."; @@ -475,8 +475,7 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, " -filePoolMetaPath=" + filePoolmetapath + " -fileSystemPath=" + filesystempath + " -allocateByPercent=false -preAllocateNum=" + - std::to_string(size * 300) + - " -needWriteZero=false"; + std::to_string(size * 300) + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); @@ -485,8 +484,8 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, } int CurveCluster::StartSingleChunkServer( - int id, const std::string &ipPort, - const std::vector &chunkserverConf) { + int id, const std::string& ipPort, + const std::vector& chunkserverConf) { LOG(INFO) << "start chunkserver " << id << ", " << ipPort << " begin..."; std::vector split; ::curve::common::SplitString(ipPort, ":", &split); @@ -500,19 +499,20 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); args.emplace_back("-chunkServerPort=" + split[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -530,7 +530,7 @@ int CurveCluster::StartSingleChunkServer( } int CurveCluster::StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf) { + int id, const std::vector& chunkserverConf) { std::vector ipPort; ::curve::common::SplitString(ChunkServerIpPortInBackground(id), ":", &ipPort); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -555,13 +555,14 @@ int CurveCluster::StartSingleChunkServerInBackground( args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + ipPort[0]); args.emplace_back("-chunkServerPort=" + ipPort[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); @@ -723,7 +724,7 @@ std::string CurveCluster::ChunkServerIpPortInBackground(int id) { } int CurveCluster::PreparePhysicalPool(int mdsId, - const std::string &clusterMap) { + const std::string& clusterMap) { LOG(INFO) << "create physicalpool begin..."; std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + @@ -741,15 +742,14 @@ int CurveCluster::PreparePhysicalPool(int mdsId, return 0; } -int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { +int CurveCluster::PrepareLogicalPool(int mdsId, const std::string& clusterMap) { LOG(INFO) << "create logicalpool begin..."; - std::string createLPCmd = - std::string("./bazel-bin/tools/curvefsTool") + - std::string(" -cluster_map=") + clusterMap + - std::string(" -mds_addr=") + MDSIpPort(mdsId) + - std::string(" -op=create_logicalpool") + - std::string(" -stderrthreshold=0 -minloglevel=0"); + std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + + std::string(" -cluster_map=") + clusterMap + + std::string(" -mds_addr=") + MDSIpPort(mdsId) + + std::string(" -op=create_logicalpool") + + std::string(" -stderrthreshold=0 -minloglevel=0"); LOG(INFO) << "exec cmd: " << createLPCmd; RETURN_IF_NOT_ZERO(system(createLPCmd.c_str())); @@ -758,7 +758,7 @@ int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { return 0; } -bool CurveCluster::CurrentServiceMDS(int *curId) { +bool CurveCluster::CurrentServiceMDS(int* curId) { for (auto mdsId : mdsPidMap_) { if (0 == ProbePort(mdsIpPort_[mdsId.first], 20000, true)) { *curId = mdsId.first; @@ -772,8 +772,8 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { return false; } -int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize, +int CurveCluster::CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize, bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; @@ -785,13 +785,12 @@ int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, context.length = fileSize; context.poolset = poolset; - RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(context)); + RETURN_IF_NOT_ZERO(mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } -int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, +int CurveCluster::ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen) { int socket_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == socket_fd) { @@ -819,7 +818,7 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = - connect(socket_fd, (struct sockaddr *)&addr, sizeof(addr)); + connect(socket_fd, (struct sockaddr*)&addr, sizeof(addr)); if (expectOpen && connectRes == 0) { LOG(INFO) << "probe " << ipPort << " success."; close(socket_fd); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..71777d5241 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -23,215 +23,219 @@ #ifndef TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ #define TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ -#include #include -#include #include -#include "src/client/mds_client.h" +#include +#include + #include "src/client/config_info.h" -#include "test/util/config_generator.h" +#include "src/client/mds_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" +#include "test/util/config_generator.h" -using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; using ::curve::client::MDSClient; +using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; namespace curve { -#define RETURN_IF_NOT_ZERO(x) \ - do { \ - int ret = (x); \ - if (ret != 0) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get non-ZERO, return -1"; \ - return ret; \ - } \ +#define RETURN_IF_NOT_ZERO(x) \ + do { \ + int ret = (x); \ + if (ret != 0) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get non-ZERO, return -1"; \ + return ret; \ + } \ } while (0) -#define RETURN_IF_FALSE(x) \ - do { \ - bool ret = (x); \ - if (!ret) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get FALSE, return -1"; \ - return -1; \ - } \ +#define RETURN_IF_FALSE(x) \ + do { \ + bool ret = (x); \ + if (!ret) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get FALSE, return -1"; \ + return -1; \ + } \ } while (0) class CurveCluster { public: /** - * CurveCluster 构造函数 + * CurveCluster constructor * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" + * @param[in] netWorkSegment The network address of the bridge, which + * defaults to "192.168.200." + * @param[in] nsPrefix The prefix of the network namespace, which defaults + * to "integ_" */ - CurveCluster(const std::string &netWorkSegment = "192.168.200.", - const std::string &nsPrefix = "integ_") + CurveCluster(const std::string& netWorkSegment = "192.168.200.", + const std::string& nsPrefix = "integ_") : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 + * InitMdsClient initializes mdsclient for interaction with mds * - * @param op 参数设置 - * @return 0.成功; 非0.失败 + * @param op parameter setting + * @return 0. Success; Non 0. Failure */ - int InitMdsClient(const curve::client::MetaServerOption &op); - + int InitMdsClient(const curve::client::MetaServerOption& op); /** - * @brief 初始化metastore + * @brief Initialize metastore * - * @param[in] etcdEndpoints etcd client的ip port + * @param[in] etcdEndpoints etcd client's IP port * - * @return 返回错误码 + * @return returns an error code */ - int InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints); + int InitSnapshotCloneMetaStoreEtcd(const std::string& etcdEndpoints); /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 + * If BuildNet needs to use a different IP to start the chunkserver, + * This function needs to be called first in the SetUp of the test case + * @return 0. Success; Non 0. Failure */ int BuildNetWork(); /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 + * StopCluster stops all processes in the cluster + * @return 0.Success; -1.Failure */ int StopCluster(); /** - * @brief 生成各模块配置文件 + * @brief Generate configuration files for each module * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 + * @tparam T any ConfigGenerator + * @param configPath Configuration file path + * @param options Configuration items modified */ - template - void PrepareConfig(const std::string &configPath, - const std::vector &options) { + template + void PrepareConfig(const std::string& configPath, + const std::vector& options) { T gentor(configPath); gentor.SetConfigOptions(options); gentor.Generate(); } /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX + * StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to + 192.168.200.1:XXXX * * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: + * @param[in] ipPort specifies the ipPort of the mds + * @param[in] mdsConf mds startup parameter item, example: * const std::vector mdsConf{ {"--graceful_quit_on_sigterm"}, {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 + * @param[in] expectLeader is the expected leader expected + * @return success returns pid; Failure returns -1 */ - int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, - const std::vector &mdsConf, + int StartSingleMDS(int id, const std::string& ipPort, int dummyPort, + const std::vector& mdsConf, bool expectLeader); /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 + * StopMDS stops the specified id's mds + * @return 0.Success; -1.Failure */ int StopMDS(int id); /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 + * StopAllMDS stops all mds + * @return 0.Success; -1.Failure */ int StopAllMDS(); /** - * @brief 启动一个snapshotcloneserver + * @brief Start a snapshotcloneserver * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of snapshotclone server + * @param ipPort IP Port + * @param snapshot clone Conf parameter item + * @return success returns pid; Failure returns -1 */ - int - StartSnapshotCloneServer(int id, const std::string &ipPort, - const std::vector &snapshotcloneConf); + int StartSnapshotCloneServer( + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf); /** - * @brief 停止指定Id的snapshotcloneserver + * @brief Stop the snapshotcloneserver for the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return returns 0 for success, -1 for failure */ int StopSnapshotCloneServer(int id, bool force = false); /** - * @brief 重启指定Id的snapshotcloneserver + * @brief: Restart the snapshotcloneserver with the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return success returns pid; Failure returns -1 */ int RestartSnapshotCloneServer(int id, bool force = false); /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure */ int StopAllSnapshotCloneServer(); /** - * StartSingleEtcd 启动一个etcd节点 + * StartSingleEtcd starts an etcd node * * @param clientIpPort * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 + * @param etcdConf etcd startup parameter, it is recommended to specify the + * name according to the module to prevent concurrent runtime conflicts * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 + * @return success returns pid; Failure returns -1 */ - int StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf); + int StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf); /** * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 + * Wait for the ETCD cluster leader election to be successful and available + * for a certain period of time */ bool WaitForEtcdClusterAvalible(int waitSec = 20); /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 + * StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure */ int StopEtcd(int id); /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 + * StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure */ int StopAllEtcd(); /** - * @brief 格式化FilePool + * @brief Format FilePool * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 + * @param filePooldir FilePool directory + * @param filePoolmetapath FilePool metadata directory + * @param filesystemPath file system directory * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystemPath, uint32_t size); + int FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystemPath, uint32_t size); /** - * StartSingleChunkServer 启动一个chunkserver节点 + * StartSingleChunkServer starts a chunkserver node * * @param[in] id * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: + * @param[in] chunkserverConf chunkserver startup item, example: * const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./basic1/"}, @@ -243,209 +247,218 @@ class CurveCluster { {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, {"-raft_sync_segments=true"}, }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 + It is recommended to also use the abbreviation of the module for the + file name. The file name should not be too long, otherwise registering to + the database will fail + * @return success returns pid; Failure returns -1 */ - int StartSingleChunkServer(int id, const std::string &ipPort, - const std::vector &chunkserverConf); + int StartSingleChunkServer(int id, const std::string& ipPort, + const std::vector& chunkserverConf); /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort + * StartSingleChunkServer Starts a chunkserver with the specified id in the + * network namespace No need to specify ipPort * * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 + * @param chunkserverConf, same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf); + int id, const std::vector& chunkserverConf); /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 + * StopChunkServer stops the chunkserver process with the specified id + * @return 0.Success; -1.Failure */ int StopChunkServer(int id); /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 + * StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure */ int StopAllChunkServer(); /** - * PreparePhysicalPool 创建物理池 + * PreparePhysicalPool Create Physical Pool * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) + * @param[in] id Send command to the specified mds with id + * @param[in] clusterMap topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different + * IPs) * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in + * the clusterMap) + * @return 0.Success; -1.Failure */ - int PreparePhysicalPool(int mdsId, const std::string &clusterMap); + int PreparePhysicalPool(int mdsId, const std::string& clusterMap); /** - * @return 0.成功; -1.失败 + * @return 0.Success; -1.Failure */ - int PrepareLogicalPool(int mdsId, const std::string &clusterMap); + int PrepareLogicalPool(int mdsId, const std::string& clusterMap); /** - * MDSIpPort 获取指定id的mds地址 + * MDSIpPort retrieves the mds address of the specified id */ std::string MDSIpPort(int id); /** - * EtcdClientIpPort 获取指定id的etcd client地址 + * EtcdClientIpPort retrieves the etcd client address for the specified id */ std::string EtcdClientIpPort(int id); /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id */ std::string EtcdPeersIpPort(int id); /** - * ChunkServerIpPort 获取指定id的chunkserver地址 + * ChunkServerIpPort retrieves the chunkserver address for the specified id */ std::string ChunkServerIpPort(int id); /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 + * HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure */ int HangMDS(int id); /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangMDS(int id); /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 + * HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure */ int HangEtcd(int id); /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangEtcd(int id); /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 + * HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure */ int HangChunkServer(int id); /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 + * RecoverHangChunkServer Restores the chunkserver process where hang + * resides + * @return 0.Success; -1.Failure */ int RecoverHangChunkServer(int id); /** - * CurrentServiceMDS 获取当前正在提供服务的mds + * CurrentServiceMDS obtains the mds that are currently providing services * - * @param[out] curId 当前正在服务的mds编号 + * @param[out] curId is currently serving the mds number * - * @return true表示有正在服务的mds, false表示没有正在服务的mds + * @return true indicates that there are serving mds, while false indicates + * that there are no serving mds */ - bool CurrentServiceMDS(int *curId); + bool CurrentServiceMDS(int* curId); /** - * CreateFile 在curve中创建文件 + * CreateFile creates a file in Curve. * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 - */ - int CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize = 0, + * @param[in] user User + * @param[in] pwd Password + * @param[in] fileName File name + * @param[in] fileSize File size + * @param[in] normalFile Whether it is a normal file + * @return 0. Success; -1. Failure + */ + int CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize = 0, bool normalFile = true, const std::string& poolset = ""); private: /** - * ProbePort 探测指定ipPort是否处于监听状态 + * ProbePort checks if the specified ipPort is in a listening state. * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 + * @param[in] ipPort The specified ipPort value. + * @param[in] timeoutMs The timeout for probing in milliseconds. + * @param[in] expectOpen Whether it is expected to be in a listening state. * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 + * @return 0 indicates that the probing meets the expected condition within + * the specified time. -1 indicates that the probing does not meet the + * expected condition within the specified time. */ - int ProbePort(const std::string &ipPort, int64_t timeoutMs, + int ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen); /** * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort + * Used to generate chunkserver ipPort when chunkservers with different + * IPs are required */ std::string ChunkServerIpPortInBackground(int id); /** - * HangProcess hang住一个进程 + * HangProcess hang * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int HangProcess(pid_t pid); /** - * RecoverHangProcess 恢复hang住的进程 + * RecoverHangProcess * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int RecoverHangProcess(pid_t pid); private: - // 网络号 + // Network number std::string networkSegment_; - // 网络命名空间前缀 + // Network namespace prefix std::string nsPrefix_; - // mds的id对应的进程号 + // The process number corresponding to the ID of the mds std::map mdsPidMap_; - // mds的id对应的ipport + // The ipport corresponding to the ID of the mds std::map mdsIpPort_; - // snapshotcloneserver id对应的pid + // The pid corresponding to the snapshotcloneserver id std::map snapPidMap_; - // snapshotcloneserver id对应的ipPort + // The ipPort corresponding to the snapshotcloneserver ID std::map snapIpPort_; - // snapshotcloneserver id对应的conf + // Conf corresponding to snapshotcloneserver id std::map> snapConf_; - // etcd的id对应的进程号 + // The process number corresponding to the id of ETCD std::map etcdPidMap_; - // etcd的id对应的client ipport + // The client ipport corresponding to the id of ETCD std::map etcdClientIpPort_; - // etcd的id对应的peer ipport + // Peer ipport corresponding to the id of ETCD std::map etcdPeersIpPort_; - // chunkserver的id对应的进程号 + // The process number corresponding to the id of chunkserver std::map chunkserverPidMap_; - // chunkserver的id对应的ipport + // The IP port corresponding to the ID of the chunkserver std::map chunkserverIpPort_; // mdsClient std::shared_ptr mdsClient_; public: - // SnapshotCloneMetaStore用于测试过程中灌数据 + // SnapshotCloneMetaStore for filling data during testing std::shared_ptr metaStore_; }; } // namespace curve diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 8f49b1ebe0..071bc58e1f 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ - /* * Project: curve * Created Date: 19-09-02 @@ -22,113 +21,110 @@ */ #include + +#include //NOLINT #include #include -#include #include //NOLINT -#include //NOLINT +#include + #include "test/integration/cluster_common/cluster.h" namespace curve { const std::vector mdsConf{ - { "--graceful_quit_on_sigterm" }, - { "--confPath=./conf/mds.conf" }, - { "--mdsDbName=cluster_common_curve_mds" }, - { "--sessionInterSec=30" }, + {"--graceful_quit_on_sigterm"}, + {"--confPath=./conf/mds.conf"}, + {"--mdsDbName=cluster_common_curve_mds"}, + {"--sessionInterSec=30"}, }; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic1/" }, - { "-chunkServerMetaUri=local://./basic1/chunkserver.dat" }, - { "-copySetUri=local://./basic1/copysets" }, - { "-raftSnapshotUri=curve://./basic1/copysets" }, - { "-raftLogUri=curve://./basic1/copysets" }, - { "-recycleUri=local://./basic1/recycler" }, - { "-chunkFilePoolDir=./basic1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic1/walfilepool/" }, - { "-walFilePoolMetaPath=./basic1/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic1/"}, + {"-chunkServerMetaUri=local://./basic1/chunkserver.dat"}, + {"-copySetUri=local://./basic1/copysets"}, + {"-raftSnapshotUri=curve://./basic1/copysets"}, + {"-raftLogUri=curve://./basic1/copysets"}, + {"-recycleUri=local://./basic1/recycler"}, + {"-chunkFilePoolDir=./basic1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic1/walfilepool/"}, + {"-walFilePoolMetaPath=./basic1/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic2/" }, - { "-chunkServerMetaUri=local://./basic2/chunkserver.dat" }, - { "-copySetUri=local://./basic2/copysets" }, - { "-raftSnapshotUri=curve://./basic2/copysets" }, - { "-raftLogUri=curve://./basic2/copysets" }, - { "-recycleUri=local://./basic2/recycler" }, - { "-chunkFilePoolDir=./basic2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic2/walfilepool/" }, - { "-walFilePoolMetaPath=./basic2/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic2/"}, + {"-chunkServerMetaUri=local://./basic2/chunkserver.dat"}, + {"-copySetUri=local://./basic2/copysets"}, + {"-raftSnapshotUri=curve://./basic2/copysets"}, + {"-raftLogUri=curve://./basic2/copysets"}, + {"-recycleUri=local://./basic2/recycler"}, + {"-chunkFilePoolDir=./basic2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic2/walfilepool/"}, + {"-walFilePoolMetaPath=./basic2/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic3/" }, - { "-chunkServerMetaUri=local://./basic3/chunkserver.dat" }, - { "-copySetUri=local://./basic3/copysets" }, - { "-raftSnapshotUri=curve://./basic3/copysets" }, - { "-raftLogUri=curve://./basic3/copysets" }, - { "-recycleUri=local://./basic3/recycler" }, - { "-chunkFilePoolDir=./basic3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic3/walfilepool/" }, - { "-walFilePoolMetaPath=./basic3/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic3/"}, + {"-chunkServerMetaUri=local://./basic3/chunkserver.dat"}, + {"-copySetUri=local://./basic3/copysets"}, + {"-raftSnapshotUri=curve://./basic3/copysets"}, + {"-raftLogUri=curve://./basic3/copysets"}, + {"-recycleUri=local://./basic3/recycler"}, + {"-chunkFilePoolDir=./basic3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic3/walfilepool/"}, + {"-walFilePoolMetaPath=./basic3/walfilepool.meta"}}; class ClusterBasicTest : public ::testing::Test { protected: void SetUp() { curveCluster_ = std::make_shared(); - // TODO(lixiaocui): 需要用sudo去运行,后续打开 + // TODO(lixiaocui): It needs to be run with sudo and opened later // curveCluster_->BuildNetWork(); } - void TearDown() { - ASSERT_EQ(0, curveCluster_->StopCluster()); - } + void TearDown() { ASSERT_EQ(0, curveCluster_->StopCluster()); } protected: std::shared_ptr curveCluster_; }; -// TODO(lixiaocui): 需要sudo运行,ci变更后打开 +// TODO(lixiaocui): Requires sudo to run and open after ci changes TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { - // 起etcd + // Starting etcd pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=basic_test_start_stop_module1" }); + std::vector{"--name=basic_test_start_stop_module1"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); - // 起mds + // Starting mds pid = curveCluster_->StartSingleMDS(1, "192.168.200.1:3333", 3334, mdsConf, true); LOG(INFO) << "mds 1 started on 192.168.200.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 创建chunkserver + // Create chunkserver pid = curveCluster_->StartSingleChunkServerInBackground(1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started in background, pid = " << pid; @@ -142,17 +138,19 @@ TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { LOG(INFO) << "chunkserver 3 started in background, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system("rm -r test_start_stop_module1.etcd"); @@ -165,16 +163,16 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_EQ(0, system("rm -fr basic*")); ASSERT_EQ(0, system((std::string("mkdir -p ") + commonDir).c_str())); - // 起etcd + // Starting etcd std::string etcdDir = commonDir + "/etcd.log"; pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=test_start_stop_module2" }); + std::vector{"--name=test_start_stop_module2"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起mds + // Starting mds auto mdsConfbak = mdsConf; auto mdsDir = commonDir + "/mds"; ASSERT_EQ(0, system((std::string("mkdir ") + mdsDir).c_str())); @@ -184,19 +182,19 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { true); LOG(INFO) << "mds 1 started on 127.0.0.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 初始化mdsclient + // Initialize mdsclient curve::client::MetaServerOption op; op.rpcRetryOpt.rpcTimeoutMs = 4000; - op.rpcRetryOpt.addrs = std::vector{ "127.0.0.1:3333" }; + op.rpcRetryOpt.addrs = std::vector{"127.0.0.1:3333"}; ASSERT_EQ(0, curveCluster_->InitMdsClient(op)); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建chunkserver + // Create chunkserver auto copy1 = chunkserverConf1; std::string chunkserver1Dir = commonDir + "/chunkserver1"; ASSERT_EQ(0, system((std::string("mkdir ") + chunkserver1Dir).c_str())); @@ -224,40 +222,42 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { LOG(INFO) << "chunkserver 3 started on 127.0.0.1:2004, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建文件 + // Create File ASSERT_EQ(0, curveCluster_->CreateFile("test", "test", "/basic_test", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // hang mds进程 + // hang mds process ASSERT_EQ(0, curveCluster_->HangMDS(1)); - // 创建文件失败 + // Failed to create file ASSERT_NE(0, curveCluster_->CreateFile("test1", "test1", "/basic_test1", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 恢复mds进程 + // Resume mds process ASSERT_EQ(0, curveCluster_->RecoverHangMDS(1)); - // 创建文件成功 + // Successfully created file ASSERT_EQ(0, curveCluster_->CreateFile("test2", "test2", "/basic_test2", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system((std::string("rm -fr ") + commonDir).c_str()); @@ -271,7 +271,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_EQ(0, system("rm -fr test_multi_etcd_node*.etcd")); ASSERT_EQ(0, system((std::string("mkdir ") + commonDir).c_str())); - // 起三个etcd + // Start three ETCDs std::string etcdDir = commonDir + "/etcd"; ASSERT_EQ(0, system((std::string("mkdir ") + etcdDir).c_str())); std::vector etcdCluster{ @@ -307,7 +307,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起三mds + // Starting three mds std::string mds1Dir = commonDir + "/mds1"; std::string mds2Dir = commonDir + "/mds2"; std::string mds3Dir = commonDir + "/mds3"; @@ -340,16 +340,16 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { LOG(INFO) << "mds 3 started on 127.0.0.1:2312, pid = " << pid; ASSERT_GT(pid, 0); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); ASSERT_EQ(0, curveCluster_->StopMDS(2)); ASSERT_EQ(0, curveCluster_->StopMDS(3)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); ASSERT_EQ(0, curveCluster_->StopEtcd(2)); ASSERT_EQ(0, curveCluster_->StopEtcd(3)); diff --git a/test/integration/cluster_common/mds.basic.conf b/test/integration/cluster_common/mds.basic.conf index 9486982bf5..b0cb16d055 100644 --- a/test/integration/cluster_common/mds.basic.conf +++ b/test/integration/cluster_common/mds.basic.conf @@ -15,196 +15,196 @@ # # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=localhost:2221 -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=1000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 -# leader竞选时会创建session, 单位是秒, 因为go端代码的接口这个值得单位就是s +# During the leader campaign, a session will be created in seconds, as the value unit for the interface of the go side code is seconds mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误。这里设置10分钟超时,超时后mds会继续竞选 +# The timeout for leader election. If set to 0, the election will block indefinitely if unsuccessful. If set to a value greater than 0, an error will be returned if not elected as leader within the electionTimeoutMs duration. +# Here, a timeout of 10 minutes is set, and if it times out, the MDS will continue the election process. mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=4 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=1800 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=1800 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkserver offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): It needs to be related to the time interval of snapshots to some extent. mds.scheduler.chunkserver.cooling.timeSec=1800 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=1000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=3000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mysql Database config # -# 数据库使用的database名称 +# The database name used by the database mds.DbName=cluster_common_curve_mds -# 数据库用户名 +# Database username mds.DbUser=root -# 数据库地址 +# Database address mds.DbUrl=localhost -# 数据库登录密码 +# Database login password mds.DbPassword=qwer mds.DbPoolSize=128 # # mds.session settings # -# mds.session过期时间,单位us +# mds.session expiration time, in us mds.session.leaseTimeUs=5000000 -# 能够容忍的client和mds之间的时钟不同步的时间,单位us +# Tolerable time of clock asynchrony between client and mds, in units of us mds.session.toleranceTimeUs=500000 -# mds.session后台扫描线程扫描间隔时间,单位us +# mds.session Background Scan Thread Scan Interval Time, Unit: us mds.session.intevalTimeUs=500000 # # auth settings # -# root用户密码 +# root User Password mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=1 -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit=90 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of Deviation from the Mean ScatterWidth of All Chunk Servers. Setting a high percentage for scatterWidth deviation can lead to some machines having +# excessively small scatterWidth, which impacts machine recovery times and reduces the overall reliability of the cluster. Additionally, it can result in certain machines +# having excessively large scatterWidth values, causing copysets on these chunk servers to be scattered across various machines. When other machines write data, these servers +# with larger scatterWidth can become performance bottlenecks. +# Conversely, setting a low percentage for scatterWidth deviation requires a higher degree of scatterWidth uniformity, demanding more from the copyset algorithm. This +# can lead to the algorithm being unable to produce optimal results. It is recommended to set the value at 20 for a balance between these factors. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./runlog/ diff --git a/test/integration/common/chunkservice_op.cpp b/test/integration/common/chunkservice_op.cpp index d359d5e294..13e9f05954 100644 --- a/test/integration/common/chunkservice_op.cpp +++ b/test/integration/common/chunkservice_op.cpp @@ -31,9 +31,9 @@ namespace chunkserver { static constexpr uint32_t kOpRequestAlignSize = 4096; const PageSizeType kPageSize = kOpRequestAlignSize; -int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, + size_t len, const char* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -69,9 +69,9 @@ int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data, + size_t len, std::string* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -105,7 +105,7 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); LOG_IF(ERROR, status) << "read failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -114,10 +114,10 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - std::string *data) { + std::string* data) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -145,7 +145,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, LOG_IF(ERROR, status) << "readchunksnapshot failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -154,7 +154,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -184,7 +184,7 @@ int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, } int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( - struct ChunkServiceOpConf *opConf, ChunkID chunkId, uint64_t correctedSn) { + struct ChunkServiceOpConf* opConf, ChunkID chunkId, uint64_t correctedSn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -213,9 +213,9 @@ int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( return status; } -int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { PeerId leaderId(opConf->leaderPeer->address()); @@ -249,7 +249,7 @@ int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -280,10 +280,10 @@ int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, SequenceNum *curSn, - SequenceNum *snapSn, - std::string *redirectedLeader) { +int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, SequenceNum* curSn, + SequenceNum* snapSn, + std::string* redirectedLeader) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -305,18 +305,18 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); if (status == CHUNK_OP_STATUS_SUCCESS) { switch (response.chunksn().size()) { - case 2: - *snapSn = response.chunksn(1); - FALLTHROUGH_INTENDED; - case 1: - *curSn = response.chunksn(0); - break; - case 0: - return CHUNK_OP_STATUS_CHUNK_NOTEXIST; - default: - LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " - << response.chunksn().size(); - return -1; + case 2: + *snapSn = response.chunksn(1); + FALLTHROUGH_INTENDED; + case 1: + *curSn = response.chunksn(0); + break; + case 0: + return CHUNK_OP_STATUS_CHUNK_NOTEXIST; + default: + LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " + << response.chunksn().size(); + return -1; } } @@ -331,7 +331,7 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, string *chunkData, + const char* data, string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { int ret = @@ -342,7 +342,8 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, << ", offset=" << offset << ", len=" << len << ", cloneFileSource=" << cloneFileSource << ", cloneFileOffset=" << cloneFileOffset << ", ret=" << ret; - // chunk写成功,同步更新chunkData内容和existChunks_ + // Chunk successfully written, synchronously updating chunkData content and + // existChunks_ if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) chunkData->replace(offset, len, data); existChunks_.insert(chunkId); @@ -352,7 +353,7 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData, + string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { std::string data(len, 0); @@ -369,8 +370,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, if (ret != CHUNK_OP_STATUS_SUCCESS && ret != CHUNK_OP_STATUS_CHUNK_NOTEXIST) { return -1; - } else if (ret == CHUNK_OP_STATUS_SUCCESS && - !chunk_existed && + } else if (ret == CHUNK_OP_STATUS_SUCCESS && !chunk_existed && cloneFileSource.empty()) { LOG(ERROR) << "Unexpected read success, chunk " << chunkId << " should not existed"; @@ -381,20 +381,19 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences uint32_t i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个page的第一个字节 + // Print the first byte of each page uint32_t j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -409,7 +408,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData) { + string* chunkData) { std::string data(len, 0); bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); @@ -431,20 +430,19 @@ int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences int i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个4KB的第一个字节 + // Print the first byte of each 4KB int j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -461,8 +459,7 @@ int ChunkServiceVerify::VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn) { int ret = ChunkServiceOp::DeleteChunk(opConf_, chunkId, sn); LOG(INFO) << "Delete Chunk " << chunkId << ", sn " << sn << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.erase(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.erase(chunkId); return ret; } @@ -477,7 +474,7 @@ int ChunkServiceVerify::VerifyDeleteChunkSnapshotOrCorrectSn( } int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { @@ -487,8 +484,7 @@ int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, << location << ", correctedSn=" << correctedSn << ", sn=" << sn << ", chunkSize=" << chunkSize << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.insert(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.insert(chunkId); return ret; } @@ -517,31 +513,33 @@ int ChunkServiceVerify::VerifyGetChunkInfo(ChunkID chunkId, bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); switch (ret) { - case CHUNK_OP_STATUS_SUCCESS: - // 如果curSn或snapSn与预期不符,则返回-1 - LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) - << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn - << ", expected " << expCurSn << "; snapSn=" << snapSn - << ", expected " << expSnapSn; - return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; - - case CHUNK_OP_STATUS_CHUNK_NOTEXIST: - // 如果chunk预期存在,则返回-1 - LOG_IF(ERROR, chunk_existed) - << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId - << " must be existed"; - return chunk_existed ? -1 : 0; - - case CHUNK_OP_STATUS_REDIRECTED: - // 如果返回的redirectedLeader与给定的不符,则返回-1 - LOG_IF(ERROR, expLeader != redirectedLeader) - << "GetChunkInfo failed, redirected to " << redirectedLeader - << ", expected " << expLeader; - return (expLeader != redirectedLeader) ? -1 : 0; - - default: - LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, ret=" << ret; - return -1; + case CHUNK_OP_STATUS_SUCCESS: + // If curSn or snapSn does not match expectations, return -1 + LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) + << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn + << ", expected " << expCurSn << "; snapSn=" << snapSn + << ", expected " << expSnapSn; + return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; + + case CHUNK_OP_STATUS_CHUNK_NOTEXIST: + // If chunk is expected to exist, return -1 + LOG_IF(ERROR, chunk_existed) + << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId + << " must be existed"; + return chunk_existed ? -1 : 0; + + case CHUNK_OP_STATUS_REDIRECTED: + // If the redirectedLeader returned does not match the given, then + // -1 is returned + LOG_IF(ERROR, expLeader != redirectedLeader) + << "GetChunkInfo failed, redirected to " << redirectedLeader + << ", expected " << expLeader; + return (expLeader != redirectedLeader) ? -1 : 0; + + default: + LOG(ERROR) << "GetChunkInfo for " << chunkId + << "failed, ret=" << ret; + return -1; } LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, Illgal branch"; diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..b6338ba888 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -24,9 +24,11 @@ #define TEST_INTEGRATION_COMMON_CHUNKSERVICE_OP_H_ #include -#include -#include + #include +#include +#include + #include "include/chunkserver/chunkserver_common.h" #include "proto/common.pb.h" @@ -40,7 +42,7 @@ using std::string; #define NULL_SN -1 struct ChunkServiceOpConf { - Peer *leaderPeer; + Peer* leaderPeer; LogicPoolID logicPoolId; CopysetID copysetId; uint32_t rpcTimeout; @@ -49,221 +51,246 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data to be written + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, + const char* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum sn, off_t offset, size_t len, - string *data, + static int ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum sn, off_t offset, size_t len, string* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, + static int ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data); + size_t len, std::string* data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn chunk version + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical + * legacy through chunkService If no snapshot is generated during the dump + * process, modify the correctedSn of the chunk + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, + static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location The location of the source chunk on the source side, + * possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, const std::string &location, + static int CreateCloneChunk(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn returns the current chunk version + * @param snapSn returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum *curSn, SequenceNum *snapSn, - string *redirectedLeader); + static int GetChunkInfo(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum* curSn, SequenceNum* snapSn, + string* redirectedLeader); }; class ChunkServiceVerify { public: - explicit ChunkServiceVerify(struct ChunkServiceOpConf *opConf) + explicit ChunkServiceVerify(struct ChunkServiceOpConf* opConf) : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief executes the write chunk and writes the data to the corresponding + * area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data to be written + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, string *chunkData, + size_t len, const char* data, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief executes the read chunk and verifies whether the read content + * matches the expected data in the corresponding region of the chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData, + size_t len, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + *And verify whether the read content matches the expected data in the + *corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @return The read request result meets the expected return of 0, + *otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData); + size_t len, string* chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ - int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, + int VerifyCreateCloneChunk(ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief to obtain chunk metadata and verify if the results meet + * expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn Expected chunk version, -1 indicates non-existent + * @param expSanpSn Expected snapshot version, -1 indicates non-existent + * @param expLeader Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: - struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + struct ChunkServiceOpConf* opConf_; + // Record the chunkId (expected existence) that has been written, used to + // determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/common/config_generator.h b/test/integration/common/config_generator.h index e838aed61f..84e32f47d1 100644 --- a/test/integration/common/config_generator.h +++ b/test/integration/common/config_generator.h @@ -40,7 +40,7 @@ class CSTConfigGenerator : public ConfigGenerator { CSTConfigGenerator() {} ~CSTConfigGenerator() {} bool Init(const std::string& port) { - // 加载配置文件模板 + // Load Configuration File Template config_.SetConfigPath(DEFAULT_CHUNKSERVER_CONF); if (!config_.LoadConfig()) { return false; diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index f09db13283..ab335a4328 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -22,39 +22,38 @@ #include "test/integration/common/peer_cluster.h" -#include -#include #include +#include +#include +#include #include #include -#include -#include "src/chunkserver/cli2.h" -#include "src/chunkserver/register.h" +#include "proto/cli2.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/chunkserver_helper.h" +#include "src/chunkserver/cli2.h" +#include "src/chunkserver/register.h" #include "src/fs/fs_common.h" -#include "proto/cli2.pb.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; - -PeerCluster::PeerCluster(const std::string &clusterName, +PeerCluster::PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - paramsIndexs_(paramsIndexs), - params_(params), - isFakeMdsStart_(false) { + const std::vector& peers, + std::vector params, + std::map paramsIndexs) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + paramsIndexs_(paramsIndexs), + params_(params), + isFakeMdsStart_(false) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -63,7 +62,7 @@ PeerCluster::PeerCluster(const std::string &clusterName, } } -int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { +int PeerCluster::StartFakeTopoloyService(const std::string& listenAddr) { if (isFakeMdsStart_) { return 0; } @@ -81,9 +80,7 @@ int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { return ret; } -int PeerCluster::StartPeer(const Peer &peer, - int id, - const bool empty) { +int PeerCluster::StartPeer(const Peer& peer, int id, const bool empty) { LOG(INFO) << "going start peer: " << peer.address() << " " << id; auto it = peersMap_.find(peer.address()); if (it != peersMap_.end()) { @@ -109,18 +106,17 @@ int PeerCluster::StartPeer(const Peer &peer, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ + /* Starting a ChunkServer in a child process */ StartPeerNode(id, params_[paramsIndexs_[id]]); exit(0); } LOG(INFO) << "start peer success, peer id = " << pid; peerNode->pid = pid; peerNode->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peerNode))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peerNode))); - // 在创建copyset之前,先等chunkserver启动 + // Before creating a copyset, wait for chunkserver to start ::usleep(1500 * 1000); int ret = CreateCopyset(logicPoolID_, copysetID_, peer, peers_); @@ -133,7 +129,7 @@ int PeerCluster::StartPeer(const Peer &peer, return 0; } -int PeerCluster::ShutdownPeer(const Peer &peer) { +int PeerCluster::ShutdownPeer(const Peer& peer) { PeerId peerId(peer.address()); LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); @@ -141,8 +137,8 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { int waitState; if (0 != kill(it->second->pid, SIGKILL)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -156,7 +152,7 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { } } -int PeerCluster::HangPeer(const Peer &peer) { +int PeerCluster::HangPeer(const Peer& peer) { LOG(INFO) << "peer cluster: hang " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -168,8 +164,8 @@ int PeerCluster::HangPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -184,7 +180,7 @@ int PeerCluster::HangPeer(const Peer &peer) { } } -int PeerCluster::SignalPeer(const Peer &peer) { +int PeerCluster::SignalPeer(const Peer& peer) { LOG(INFO) << "peer cluster: signal " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -196,8 +192,8 @@ int PeerCluster::SignalPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -212,18 +208,17 @@ int PeerCluster::SignalPeer(const Peer &peer) { } } -int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader) { +int PeerCluster::ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const std::string& leaderAddr, Peer* leader) { brpc::Channel channel; auto pos = leaderAddr.rfind(":"); std::string addr = leaderAddr.substr(0, pos); if (channel.Init(addr.c_str(), NULL) != 0) { - LOG(ERROR) <<"Fail to init channel to " << leaderAddr.c_str(); + LOG(ERROR) << "Fail to init channel to " << leaderAddr.c_str(); return -1; } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -235,7 +230,7 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, stub.GetLeader(&cntl, &request, &response, NULL); if (cntl.Failed()) { - LOG(ERROR) <<"confirm leader fail"; + LOG(ERROR) << "confirm leader fail"; return -1; } Peer leader2 = response.leader(); @@ -244,21 +239,21 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, PeerId leaderId1; leaderId1.parse(leader->address()); if (leaderId2.is_empty()) { - LOG(ERROR) <<"Confirmed leaderId is null"; + LOG(ERROR) << "Confirmed leaderId is null"; return -1; } if (leaderId2 != leaderId1) { - LOG(INFO) << "twice leaderId is inconsistent, first is " - << leaderId1 << " second is " << leaderId2; + LOG(INFO) << "twice leaderId is inconsistent, first is " << leaderId1 + << " second is " << leaderId2; return -1; } return 0; } -int PeerCluster::WaitLeader(Peer *leaderPeer) { +int PeerCluster::WaitLeader(Peer* leaderPeer) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(3 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -267,15 +262,17 @@ int PeerCluster::WaitLeader(Peer *leaderPeer) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderPeer); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " << leaderPeer->address(); std::string leaderAddr = leaderPeer->address(); - int ret = ConfirmLeader(logicPoolID_, copysetID_, - leaderAddr, leaderPeer); + int ret = + ConfirmLeader(logicPoolID_, copysetID_, leaderAddr, leaderPeer); if (ret == 0) { return ret; } @@ -299,9 +296,7 @@ int PeerCluster::StopAllPeers() { return 0; } -Configuration PeerCluster::CopysetConf() const { - return conf_; -} +Configuration PeerCluster::CopysetConf() const { return conf_; } int PeerCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -313,10 +308,10 @@ int PeerCluster::SetElectionTimeoutMs(int electionTimeoutMs) { return 0; } -int PeerCluster::StartPeerNode(int id, char *arg[]) { +int PeerCluster::StartPeerNode(int id, char* arg[]) { struct RegisterOptions opt; - opt.chunkserverMetaUri = "local://./" + std::to_string(id) + - "/chunkserver.dat"; + opt.chunkserverMetaUri = + "local://./" + std::to_string(id) + "/chunkserver.dat"; opt.fs = fs_; Register regist(opt); @@ -334,52 +329,43 @@ int PeerCluster::StartPeerNode(int id, char *arg[]) { return 0; } -const std::string PeerCluster::CopysetDirWithProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::RemoveCopysetDirCmd(const Peer &peer) { +const std::string PeerCluster::RemoveCopysetDirCmd(const Peer& peer) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets", peerId.addr.port); + butil::string_printf(&cmd, "rm -fr %d/copysets", peerId.addr.port); return cmd; } -const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer &peer, +const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets/%s", - peerId.addr.port, + butil::string_printf(&cmd, "rm -fr %d/copysets/%s", peerId.addr.port, ToGroupIdString(logicPoolID, copysetID).c_str()); return cmd; } -int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers) { +int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers) { LOG(INFO) << "PeerCluster begin create copyset: " << ToGroupIdString(logicPoolID, copysetID); @@ -403,17 +389,17 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetService_Stub stub(&channel); stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "failed create copsyet, " - << cntl.ErrorText() << std::endl; + LOG(ERROR) << "failed create copsyet, " << cntl.ErrorText() + << std::endl; ::usleep(1000 * 1000); continue; } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT - LOG(INFO) << "create copyset " << ToGroupIdString(logicPoolID, - copysetID) - << " success."; + if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS || + response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT + LOG(INFO) << "create copyset " + << ToGroupIdString(logicPoolID, copysetID) << " success."; return 0; } @@ -423,14 +409,13 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, return -1; } -int PeerCluster::PeerToId(const Peer &peer) { +int PeerCluster::PeerToId(const Peer& peer) { PeerId peerId(peer.address()); return peerId.addr.port; } -int PeerCluster::GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers) { +int PeerCluster::GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers) { for (auto& peer : peers) { if (leader.address() != peer.address()) { followers->push_back(peer); @@ -442,28 +427,23 @@ int PeerCluster::GetFollwerPeers(const std::vector& peers, ChunkServerID PeerCluster::chunkServerId_ = 0; -std::shared_ptr PeerCluster::fs_ - = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); +std::shared_ptr PeerCluster::fs_ = + LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn) { +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn) { LOG(INFO) << "Write then read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); @@ -486,9 +466,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -507,9 +486,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -521,22 +499,17 @@ void WriteThenReadVerify(Peer leaderPeer, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop) { LOG(INFO) << "Read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -556,12 +529,10 @@ void ReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -569,22 +540,18 @@ void ReadVerify(Peer leaderPeer, } /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read snapshot verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -592,7 +559,7 @@ void ReadSnapshotVerify(Peer leaderPeer, ChunkService_Stub stub(&channel); - // 获取chunk的快照版本 + // Obtain the snapshot version of the chunk uint64_t snapSn; { brpc::Controller cntl; @@ -603,12 +570,10 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_copysetid(copysetId); request.set_chunkid(chunkId); stub.GetChunkInfo(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); ASSERT_EQ(2, response.chunksn_size()); snapSn = std::min(response.chunksn(0), response.chunksn(1)); } @@ -622,16 +587,14 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(snapSn); stub.ReadChunkSnapshot(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -639,18 +602,15 @@ void ReadSnapshotVerify(Peer leaderPeer, } /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn) { +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn) { LOG(INFO) << "Delete snapshot verify, csn: " << csn; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -668,31 +628,25 @@ void DeleteSnapshotVerify(Peer leaderPeer, request.set_chunkid(chunkId); request.set_correctedsn(csn); stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read not verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -712,12 +666,10 @@ void ReadNotVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STRNE(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -725,22 +677,18 @@ void ReadNotVerify(Peer leaderPeer, } /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -760,32 +708,28 @@ void ReadVerifyNotAvailable(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Write verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -807,23 +751,22 @@ void WriteVerifyNotAvailable(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch) { std::vector resps; for (Peer peer : peers) { @@ -838,7 +781,7 @@ void CopysetStatusVerify(const std::vector &peers, cntl.set_timeout_ms(2000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peerP = new Peer(); + Peer* peerP = new Peer(); request.set_allocated_peer(peerP); peerP->set_address(peerId.to_string()); request.set_queryhash(true); @@ -847,7 +790,8 @@ void CopysetStatusVerify(const std::vector &peers, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -869,20 +813,15 @@ void CopysetStatusVerify(const std::vector &peers, } } - - -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt) { Peer leaderPeer; const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(cluster->GetLogicPoolId(), - cluster->GetCopysetId(), - cluster->CopysetConf(), - targetLeader, - opt); + status = + TransferLeader(cluster->GetLogicPoolId(), cluster->GetCopysetId(), + cluster->CopysetConf(), targetLeader, opt); if (0 == status.error_code()) { cluster->WaitLeader(&leaderPeer); if (leaderPeer.address() == targetLeader.address()) { @@ -891,8 +830,7 @@ void TransferLeaderAssertSuccess(PeerCluster *cluster, } ::sleep(1); } - ASSERT_STREQ(targetLeader.address().c_str(), - leaderPeer.address().c_str()); + ASSERT_STREQ(targetLeader.address().c_str(), leaderPeer.address().c_str()); } } // namespace chunkserver diff --git a/test/integration/common/peer_cluster.h b/test/integration/common/peer_cluster.h index 4a5fcacb58..24b2c2d63e 100644 --- a/test/integration/common/peer_cluster.h +++ b/test/integration/common/peer_cluster.h @@ -23,29 +23,29 @@ #ifndef TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ #define TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ +#include +#include #include #include #include -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/copyset_node.h" #include "proto/common.pb.h" #include "proto/topology.pb.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" -using ::curve::mds::topology::TopologyService; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; +using ::curve::mds::topology::TopologyService; namespace curve { namespace chunkserver { @@ -53,37 +53,37 @@ namespace chunkserver { using curve::common::Peer; /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; // Peer Peer peer; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; class FakeTopologyService : public TopologyService { void RegistChunkServer(google::protobuf::RpcController* cntl_base, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - google::protobuf::Closure* done) { + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); response->set_chunkserverid(request->chunkserverid()); @@ -92,16 +92,13 @@ class FakeTopologyService : public TopologyService { }; /** - * 封装模拟cluster测试相关的接口 + * Package simulation cluster testing related interfaces */ class PeerCluster { public: - PeerCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs); + PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers, + std::vector params, std::map paramsIndexs); virtual ~PeerCluster() { StopAllPeers(); if (isFakeMdsStart_) { @@ -116,139 +113,134 @@ class PeerCluster { * * @return 0 for success, -1 for failed */ - int StartFakeTopoloyService(const std::string &listenAddr); + int StartFakeTopoloyService(const std::string& listenAddr); /** - * 启动一个 Peer + * Start a Peer * @param peer - * @param empty初始化配置是否为空 - * @return 0,成功;-1,失败 + * @param empty Is the initialization configuration empty + * @return 0, successful- 1. Failure */ - int StartPeer(const Peer &peer, - int id, - const bool empty = false); + int StartPeer(const Peer& peer, int id, const bool empty = false); /** - * 关闭一个peer,使用SIGINT + * Close a peer and use SIGINT * @param peer - * @return 0 成功;-1 失败 + * @return 0 successful, - 1 failed */ - int ShutdownPeer(const Peer &peer); - + int ShutdownPeer(const Peer& peer); /** - * hang住一个peer,使用SIGSTOP + * hang lives in a peer and uses SIGSTOP * @param peer - * @return 0成功;-1失败 + * @return 0 successful, - 1 failed */ - int HangPeer(const Peer &peer); + int HangPeer(const Peer& peer); /** - * 恢复hang住的peer,使用SIGCONT - * @param peer - * @return 0:成功,-1 失败 - */ - int SignalPeer(const Peer &peer); + * Restore the peer where Hang lives and use SIGCONT + * @param peer + * @return 0: Success, -1 failed + */ + int SignalPeer(const Peer& peer); /** - * 反复重试直到等到新的leader产生 - * @param leaderPeer出参,返回leader info - * @return 0,成功;-1 失败 + * Try again and again until a new leader is generated + * @param leaderPeer generates parameters and returns leader information + * @return 0 successful, - 1 failed */ - int WaitLeader(Peer *leaderPeer); + int WaitLeader(Peer* leaderPeer); /** - * confirm leader + * confirm leader * @param: LogicPoolID logicalPool id * @param: copysetId copyset id * @param: leaderAddr leader address - * @param: leader leader info - * @return 0,成功;-1 失败 + * @param: leader leader information + * @return 0 successful, - 1 failed */ - int ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader); - + int ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::string& leaderAddr, + Peer* leader); /** - * Stop所有的peer - * @return 0,成功;-1 失败 + * Stop all peers + * @return 0 successful, - 1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /* Returns the current configuration of the cluster */ Configuration CopysetConf() const; - LogicPoolID GetLogicPoolId() const {return logicPoolID_;} + LogicPoolID GetLogicPoolId() const { return logicPoolID_; } - CopysetID GetCopysetId() const {return copysetID_;} + CopysetID GetCopysetId() const { return copysetID_; } - void SetWorkingCopyset(CopysetID copysetID) {copysetID_ = copysetID;} + void SetWorkingCopyset(CopysetID copysetID) { copysetID_ = copysetID; } - /* 修改 PeerNode 配置相关的接口,单位: s */ + /* Modify the interface related to PeerNode configuration, unit: s */ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); - static int StartPeerNode(int id, char *arg[]); + static int StartPeerNode(int id, char* arg[]); - static int PeerToId(const Peer &peer); + static int PeerToId(const Peer& peer); - static int GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers); + static int GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers); public: /** - * 返回执行peer的copyset路径with protocol, ex: local://./127.0.0.1:9101:0 + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithProtocol(const Peer &peer); + static const std::string CopysetDirWithProtocol(const Peer& peer); /** - * 返回执行peer的copyset路径without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const Peer &peer); + static const std::string CopysetDirWithoutProtocol(const Peer& peer); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const Peer &peer); + static const std::string RemoveCopysetDirCmd(const Peer& peer); - static const std::string RemoveCopysetLogDirCmd(const Peer &peer, + static const std::string RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID); - static int CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers); + static int CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::vector peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::vector peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 - int snapshotIntervalS_; - // 选举超时时间 - int electionTimeoutMs_; - // 集群成员配置 - Configuration conf_; - - // 逻辑池id - LogicPoolID logicPoolID_; - // 复制组id - CopysetID copysetID_; + // Snapshot interval + int snapshotIntervalS_; + // Election timeout + int electionTimeoutMs_; + // Cluster member configuration + Configuration conf_; + + // Logical Pool ID + LogicPoolID logicPoolID_; + // Copy Group ID + CopysetID copysetID_; // chunkserver id - static ChunkServerID chunkServerId_; - // 文件系统适配层 + static ChunkServerID chunkServerId_; + // File System Adaptation Layer static std::shared_ptr fs_; - // chunkserver启动传入参数的映射关系(chunkserver id: params_'s index) + // chunkserver starts the mapping relationship of incoming parameters + // (chunkserver id: params_'s index) std::map paramsIndexs_; - // chunkserver启动需要传递的参数列表 - std::vector params_; + // List of parameters to be passed for chunkserver startup + std::vector params_; // fake mds server brpc::Server fakeMdsServer_; @@ -259,148 +251,117 @@ class PeerCluster { }; /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn = 1); +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn = 1); /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop); /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param csn corrected sn + *Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn); +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn); /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + *Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0); /** - * transfer leader,并且预期能够成功 - * @param cluster: 集群的指针 - * @param targetLeader: 期望tranfer的目标节点 - * @param opt: tranfer 请求使用的 clioption + * transfer leader and expected to succeed + * @param cluster: Pointer to the cluster + * @param targetLeader: The target node for the expected transfer + * @param opt: The clioption used in the transfer request */ -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt); } // namespace chunkserver diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..ae597506bc 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -21,44 +21,44 @@ */ #include "test/integration/heartbeat/common.h" + #include "test/mds/mock/mock_alloc_statistic.h" namespace curve { namespace mds { -void HeartbeatIntegrationCommon::PrepareAddPoolset( - const Poolset &poolset) { +void HeartbeatIntegrationCommon::PrepareAddPoolset(const Poolset& poolset) { int ret = topology_->AddPoolset(poolset); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } void HeartbeatIntegrationCommon::PrepareAddLogicalPool( - const LogicalPool &lpool) { + const LogicalPool& lpool) { int ret = topology_->AddLogicalPool(lpool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; } void HeartbeatIntegrationCommon::PrepareAddPhysicalPool( - const PhysicalPool &ppool) { + const PhysicalPool& ppool) { int ret = topology_->AddPhysicalPool(ppool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } -void HeartbeatIntegrationCommon::PrepareAddZone(const Zone &zone) { +void HeartbeatIntegrationCommon::PrepareAddZone(const Zone& zone) { int ret = topology_->AddZone(zone); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } -void HeartbeatIntegrationCommon::PrepareAddServer(const Server &server) { +void HeartbeatIntegrationCommon::PrepareAddServer(const Server& server) { int ret = topology_->AddServer(server); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void HeartbeatIntegrationCommon::PrepareAddChunkServer( - const ChunkServer &chunkserver) { + const ChunkServer& chunkserver) { ChunkServer cs(chunkserver); cs.SetOnlineState(OnlineState::ONLINE); int ret = topology_->AddChunkServer(cs); @@ -68,7 +68,7 @@ void HeartbeatIntegrationCommon::PrepareAddChunkServer( void HeartbeatIntegrationCommon::PrepareAddCopySet( CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members) { + const std::set& members) { CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); int ret = topology_->AddCopySet(cs); @@ -78,10 +78,10 @@ void HeartbeatIntegrationCommon::PrepareAddCopySet( void HeartbeatIntegrationCommon::UpdateCopysetTopo( CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, - ChunkServerIdType leader, const std::set &members, + ChunkServerIdType leader, const std::set& members, ChunkServerIdType candidate) { ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE(topology_->GetCopySet(CopySetKey{ logicalPoolId, copysetId }, + ASSERT_TRUE(topology_->GetCopySet(CopySetKey{logicalPoolId, copysetId}, ©setInfo)); copysetInfo.SetEpoch(epoch); copysetInfo.SetLeader(leader); @@ -93,8 +93,8 @@ void HeartbeatIntegrationCommon::UpdateCopysetTopo( } void HeartbeatIntegrationCommon::SendHeartbeat( - const ChunkServerHeartbeatRequest &request, bool expectFailed, - ChunkServerHeartbeatResponse *response) { + const ChunkServerHeartbeatRequest& request, bool expectFailed, + ChunkServerHeartbeatResponse* response) { // init brpc client brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr_.c_str(), NULL)); @@ -109,7 +109,7 @@ void HeartbeatIntegrationCommon::SendHeartbeat( } void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( - ChunkServerIdType id, ChunkServerHeartbeatRequest *req) { + ChunkServerIdType id, ChunkServerHeartbeatRequest* req) { ChunkServer out; EXPECT_TRUE(topology_->GetChunkServer(id, &out)) << "get chunkserver: " << id << " fail"; @@ -139,7 +139,7 @@ void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( } void HeartbeatIntegrationCommon::AddCopySetToRequest( - ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, + ChunkServerHeartbeatRequest* req, const CopySetInfo& csInfo, ConfigChangeType type) { auto info = req->add_copysetinfos(); info->set_logicalpoolid(csInfo.GetLogicalPoolId()); @@ -170,7 +170,7 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( << "get chunkserver: " << csInfo.GetCandidate() << " error"; std::string ipport = out.GetHostIp() + ":" + std::to_string(out.GetPort()) + ":0"; - ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); auto replica = new ::curve::common::Peer(); replica->set_address(ipport.c_str()); confChxInfo->set_allocated_peer(replica); @@ -180,13 +180,13 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( } } -void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator &op) { +void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator& op) { auto opController = coordinator_->GetOpController(); ASSERT_TRUE(opController->AddOperator(op)); } void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( - const CopySetKey &id) { + const CopySetKey& id) { auto opController = coordinator_->GetOpController(); opController->RemoveOperator(id); } @@ -243,11 +243,11 @@ void HeartbeatIntegrationCommon::PrepareBasicCluseter() { PrepareAddChunkServer(cs3); // add copyset - PrepareAddCopySet(1, 1, std::set{ 1, 2, 3 }); + PrepareAddCopySet(1, 1, std::set{1, 2, 3}); } void HeartbeatIntegrationCommon::InitHeartbeatOption( - Configuration *conf, HeartbeatOption *heartbeatOption) { + Configuration* conf, HeartbeatOption* heartbeatOption) { heartbeatOption->heartbeatIntervalMs = conf->GetIntValue("mds.heartbeat.intervalMs"); heartbeatOption->heartbeatMissTimeOutMs = @@ -259,7 +259,7 @@ void HeartbeatIntegrationCommon::InitHeartbeatOption( } void HeartbeatIntegrationCommon::InitSchedulerOption( - Configuration *conf, ScheduleOption *scheduleOption) { + Configuration* conf, ScheduleOption* scheduleOption) { scheduleOption->enableCopysetScheduler = conf->GetBoolValue("mds.enable.copyset.scheduler"); scheduleOption->enableLeaderScheduler = @@ -305,22 +305,20 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto idGen = std::make_shared(); auto tokenGen = std::make_shared(); - auto topologyStorage = - std::make_shared(); + auto topologyStorage = std::make_shared(); topology_ = std::make_shared(idGen, tokenGen, topologyStorage); ASSERT_EQ(kTopoErrCodeSuccess, topology_->Init(topologyOption)); // init topology manager - topologyStat_ = - std::make_shared(topology_); + topologyStat_ = std::make_shared(topology_); topologyStat_->Init(); auto copysetManager = std::make_shared(CopysetOption()); auto allocStat = std::make_shared(); auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +339,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/heartbeat/common.h b/test/integration/heartbeat/common.h index b281d5a9ab..7787a22910 100644 --- a/test/integration/heartbeat/common.h +++ b/test/integration/heartbeat/common.h @@ -23,41 +23,41 @@ #ifndef TEST_INTEGRATION_HEARTBEAT_COMMON_H_ #define TEST_INTEGRATION_HEARTBEAT_COMMON_H_ -#include -#include #include #include +#include +#include -#include -#include //NOLINT -#include //NOLINT -#include +#include //NOLINT #include +#include +#include #include +#include //NOLINT +#include #include -#include +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/topology.pb.h" #include "src/common/configuration.h" -#include "src/mds/topology/topology_config.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_token_generator.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/topology/topology_storge.h" -#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" +#include "src/mds/copyset/copyset_config.h" +#include "src/mds/copyset/copyset_manager.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" #include "src/mds/heartbeat/heartbeat_manager.h" #include "src/mds/heartbeat/heartbeat_service.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" -#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" #include "src/mds/schedule/operator.h" -#include "src/mds/copyset/copyset_manager.h" -#include "src/mds/copyset/copyset_config.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "proto/topology.pb.h" -#include "proto/heartbeat.pb.h" -#include "proto/common.pb.h" -#include "src/common/timeutility.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" +#include "src/mds/topology/topology_storge.h" +#include "src/mds/topology/topology_token_generator.h" using ::curve::common::Configuration; using std::string; @@ -65,15 +65,17 @@ using std::string; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::ChunkServerState; using ::curve::mds::topology::CopySetIdType; +using ::curve::mds::topology::CopySetKey; using ::curve::mds::topology::DefaultIdGenerator; using ::curve::mds::topology::DefaultTokenGenerator; using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::LogicalPool; using ::curve::mds::topology::LogicalPoolType; -using ::curve::mds::topology::Poolset; using ::curve::mds::topology::PhysicalPool; -using ::curve::mds::topology::PoolsetIdType; using ::curve::mds::topology::PoolIdType; +using ::curve::mds::topology::Poolset; +using ::curve::mds::topology::PoolsetIdType; +using ::curve::mds::topology::Server; using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::TopologyImpl; using ::curve::mds::topology::TopologyOption; @@ -82,8 +84,6 @@ using ::curve::mds::topology::TopologyStatImpl; using ::curve::mds::topology::UNINTIALIZE_ID; using ::curve::mds::topology::Zone; using ::curve::mds::topology::ZoneIdType; -using ::curve::mds::topology::Server; -using ::curve::mds::topology::CopySetKey; using ::curve::mds::heartbeat::ChunkServerHeartbeatRequest; using ::curve::mds::heartbeat::ChunkServerHeartbeatResponse; @@ -120,247 +120,206 @@ class FakeTopologyStorage : public TopologyStorage { public: FakeTopologyStorage() {} - bool - LoadPoolset(std::unordered_map *PoolsetMap, - PoolsetIdType *maxPoolsetId) { + bool LoadPoolset(std::unordered_map* PoolsetMap, + PoolsetIdType* maxPoolsetId) { return true; } - bool - LoadLogicalPool(std::unordered_map *logicalPoolMap, - PoolIdType *maxLogicalPoolId) { + bool LoadLogicalPool( + std::unordered_map* logicalPoolMap, + PoolIdType* maxLogicalPoolId) { return true; } bool LoadPhysicalPool( - std::unordered_map *physicalPoolMap, - PoolIdType *maxPhysicalPoolId) { + std::unordered_map* physicalPoolMap, + PoolIdType* maxPhysicalPoolId) { return true; } - bool LoadZone(std::unordered_map *zoneMap, - ZoneIdType *maxZoneId) { + bool LoadZone(std::unordered_map* zoneMap, + ZoneIdType* maxZoneId) { return true; } - bool LoadServer(std::unordered_map *serverMap, - ServerIdType *maxServerId) { + bool LoadServer(std::unordered_map* serverMap, + ServerIdType* maxServerId) { return true; } bool LoadChunkServer( - std::unordered_map *chunkServerMap, - ChunkServerIdType *maxChunkServerId) { + std::unordered_map* chunkServerMap, + ChunkServerIdType* maxChunkServerId) { return true; } - bool LoadCopySet(std::map *copySetMap, - std::map *copySetIdMaxMap) { + bool LoadCopySet(std::map* copySetMap, + std::map* copySetIdMaxMap) { return true; } - bool StoragePoolset(const Poolset &data) { - return true; - } - bool StorageLogicalPool(const LogicalPool &data) { - return true; - } - bool StoragePhysicalPool(const PhysicalPool &data) { - return true; - } - bool StorageZone(const Zone &data) { - return true; - } - bool StorageServer(const Server &data) { - return true; - } - bool StorageChunkServer(const ChunkServer &data) { - return true; - } - bool StorageCopySet(const CopySetInfo &data) { - return true; - } - - bool DeletePoolset(PoolsetIdType id) { - return true; - } - bool DeleteLogicalPool(PoolIdType id) { - return true; - } - bool DeletePhysicalPool(PoolIdType id) { - return true; - } - bool DeleteZone(ZoneIdType id) { - return true; - } - bool DeleteServer(ServerIdType id) { - return true; - } - bool DeleteChunkServer(ChunkServerIdType id) { - return true; - } - bool DeleteCopySet(CopySetKey key) { - return true; - } - - bool UpdateLogicalPool(const LogicalPool &data) { - return true; - } - bool UpdatePhysicalPool(const PhysicalPool &data) { - return true; - } - bool UpdateZone(const Zone &data) { - return true; - } - bool UpdateServer(const Server &data) { - return true; - } - bool UpdateChunkServer(const ChunkServer &data) { - return true; - } - bool UpdateCopySet(const CopySetInfo &data) { - return true; - } - - bool LoadClusterInfo(std::vector *info) { - return true; - } - bool StorageClusterInfo(const ClusterInformation &info) { - return true; - } + bool StoragePoolset(const Poolset& data) { return true; } + bool StorageLogicalPool(const LogicalPool& data) { return true; } + bool StoragePhysicalPool(const PhysicalPool& data) { return true; } + bool StorageZone(const Zone& data) { return true; } + bool StorageServer(const Server& data) { return true; } + bool StorageChunkServer(const ChunkServer& data) { return true; } + bool StorageCopySet(const CopySetInfo& data) { return true; } + + bool DeletePoolset(PoolsetIdType id) { return true; } + bool DeleteLogicalPool(PoolIdType id) { return true; } + bool DeletePhysicalPool(PoolIdType id) { return true; } + bool DeleteZone(ZoneIdType id) { return true; } + bool DeleteServer(ServerIdType id) { return true; } + bool DeleteChunkServer(ChunkServerIdType id) { return true; } + bool DeleteCopySet(CopySetKey key) { return true; } + + bool UpdateLogicalPool(const LogicalPool& data) { return true; } + bool UpdatePhysicalPool(const PhysicalPool& data) { return true; } + bool UpdateZone(const Zone& data) { return true; } + bool UpdateServer(const Server& data) { return true; } + bool UpdateChunkServer(const ChunkServer& data) { return true; } + bool UpdateCopySet(const CopySetInfo& data) { return true; } + + bool LoadClusterInfo(std::vector* info) { return true; } + bool StorageClusterInfo(const ClusterInformation& info) { return true; } }; } // namespace topology class HeartbeatIntegrationCommon { public: - /* HeartbeatIntegrationCommon 构造函数 + /* HeartbeatIntegrationCommon constructor * - * @param[in] conf 配置信息 + * @param[in] conf configuration information */ - explicit HeartbeatIntegrationCommon(const Configuration &conf) { + explicit HeartbeatIntegrationCommon(const Configuration& conf) { conf_ = conf; } - /* PrepareAddPoolset 在集群中添加物理池集合 + /* PrepareAddPoolset adds a physical pool collection to the cluster * - * @param[in] poolset 物理池集合(池组) + * @param[in] poolset Physical pool set (pool group) */ - void PrepareAddPoolset(const Poolset &poolset); + void PrepareAddPoolset(const Poolset& poolset); - /* PrepareAddLogicalPool 在集群中添加逻辑池 + /* PrepareAddLogicalPool Adding a Logical Pool to a Cluster * - * @param[in] lpool 逻辑池 + * @param[in] lpool logical pool */ - void PrepareAddLogicalPool(const LogicalPool &lpool); + void PrepareAddLogicalPool(const LogicalPool& lpool); - /* PrepareAddPhysicalPool 在集群中添加物理池 + /* PrepareAddPhysicalPool Adding a Physical Pool to a Cluster * - * @param[in] ppool 物理池 + * @param[in] ppool physical pool */ - void PrepareAddPhysicalPool(const PhysicalPool &ppool); + void PrepareAddPhysicalPool(const PhysicalPool& ppool); - /* PrepareAddZone 在集群中添加zone + /* PrepareAddZone adds a zone to the cluster * * @param[in] zone */ - void PrepareAddZone(const Zone &zone); + void PrepareAddZone(const Zone& zone); - /* PrepareAddServer 在集群中添加server + /* PrepareAddServer Adding a server to a Cluster * * @param[in] server */ - void PrepareAddServer(const Server &server); + void PrepareAddServer(const Server& server); - /* PrepareAddChunkServer 在集群中添加chunkserver节点 + /* PrepareAddChunkServer adds chunkserver nodes to the cluster * * @param[in] chunkserver */ - void PrepareAddChunkServer(const ChunkServer &chunkserver); + void PrepareAddChunkServer(const ChunkServer& chunkserver); - /* PrepareAddCopySet 在集群中添加copyset + /* PrepareAddCopySet Adding a copyset to a cluster * - * @param[in] copysetId copyset id - * @param[in] logicalPoolId 逻辑池id - * @param[in] members copyset成员 + * @param[in] copysetId copyset ID + * @param[in] logicalPoolId Logical Pool ID + * @param[in] members copyset members */ void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members); + const std::set& members); - /* UpdateCopysetTopo 更新topology中copyset的状态 + /* UpdateCopysetTopo updates the status of copyset in topology * - * @param[in] copysetId copyset的id - * @param[in] logicalPoolId 逻辑池id - * @param[in] epoch copyset的epoch - * @param[in] leader copyset的leader - * @param[in] members copyset的成员 - * @param[in] candidate copyset的candidate信息 + * @param[in] copysetId The ID of the copyset + * @param[in] logicalPoolId Logical Pool ID + * @param[in] epoch epoch of copyset + * @param[in] leader copyset's leader + * @param[in] members members of copyset + * @param[in] candidate copyset's candidate information */ void UpdateCopysetTopo(CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidate = UNINTIALIZE_ID); - /* SendHeartbeat 发送心跳 + /* SendHeartbeat sends a heartbeat * * @param[in] req - * @param[in] expectedFailed 为true表示希望发送成功,为false表示希望发送失败 + * @param[in] expectedFailed true: to indicate that the transmission is + * expected to succeed, false: indicate that the transmission is expected to + * fail * @param[out] response */ - void SendHeartbeat(const ChunkServerHeartbeatRequest &request, + void SendHeartbeat(const ChunkServerHeartbeatRequest& request, bool expectFailed, - ChunkServerHeartbeatResponse *response); + ChunkServerHeartbeatResponse* response); - /* BuildBasicChunkServerRequest 构建最基本的request + /* BuildBasicChunkServerRequest Build the most basic request * - * @param[in] id chunkserver的id - * @param[out] req 构造好的指定id的request + * @param[in] id chunkserver ID + * @param[out] req Constructed request with specified id */ void BuildBasicChunkServerRequest(ChunkServerIdType id, - ChunkServerHeartbeatRequest *req); + ChunkServerHeartbeatRequest* req); - /* AddCopySetToRequest 向request中添加copyset + /* AddCopySetToRequest adds a copyset to the request * * @param[in] req - * @param[in] csInfo copyset信息 - * @param[in] type copyset当前变更类型 + * @param[in] csInfo copyset information + * @param[in] type copyset Current change type */ - void AddCopySetToRequest(ChunkServerHeartbeatRequest *req, - const CopySetInfo &csInfo, + void AddCopySetToRequest(ChunkServerHeartbeatRequest* req, + const CopySetInfo& csInfo, ConfigChangeType type = ConfigChangeType::NONE); - /* AddOperatorToOpController 向调度模块添加op + /* AddOperatorToOpController adds op to the scheduling module * * @param[in] op */ - void AddOperatorToOpController(const Operator &op); + void AddOperatorToOpController(const Operator& op); - /* RemoveOperatorFromOpController 从调度模块移除指定copyset上的op + /* RemoveOperatorFromOpController removes the op on the specified copyset + * from the scheduling module * - * @param[in] id 需要移除op的copysetId + * @param[in] id needs to remove the copysetId of op */ - void RemoveOperatorFromOpController(const CopySetKey &id); + void RemoveOperatorFromOpController(const CopySetKey& id); /* - * PrepareBasicCluseter 在topology中构建最基本的拓扑结构 - * 一个物理池,一个逻辑池,三个zone,每个zone一个chunkserver, - * 集群中有一个copyset + * PrepareBasicCluseter builds the most basic topology structure in topology + * One physical pool, one logical pool, three zones, and one chunkserver for + * each zone, There is a copyset in the cluster */ void PrepareBasicCluseter(); /** - * InitHeartbeatOption 初始化heartbeatOption + * InitHeartbeatOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的心跳option + * @param[in] conf configuration module + * @param[out] heartbeat option assignment completed heartbeat option */ - void InitHeartbeatOption(Configuration *conf, - HeartbeatOption *heartbeatOption); + void InitHeartbeatOption(Configuration* conf, + HeartbeatOption* heartbeatOption); /** - * InitSchedulerOption 初始化scheduleOption + * InitSchedulerOption initializes scheduleOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的调度option + * @param[in] conf configuration module + * @param[out] heartbeat Scheduling option with completed assignment of + * option */ - void InitSchedulerOption(Configuration *conf, - ScheduleOption *scheduleOption); + void InitSchedulerOption(Configuration* conf, + ScheduleOption* scheduleOption); /** - * BuildBasicCluster 运行heartbeat/topology/scheduler模块 + * BuildBasicCluster runs the heartbeat/topology/scheduler module */ void BuildBasicCluster(); diff --git a/test/integration/heartbeat/heartbeat_basic_test.cpp b/test/integration/heartbeat/heartbeat_basic_test.cpp index c9a2ae416d..4144a9d53b 100644 --- a/test/integration/heartbeat/heartbeat_basic_test.cpp +++ b/test/integration/heartbeat/heartbeat_basic_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -32,19 +32,19 @@ namespace mds { class HeartbeatBasicTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 300); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 500); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", 0); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6879"); - // scheduler相关的内容 + // Schedule related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -65,14 +65,14 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithCandidateOpOnGoing() { - // 构造mds中copyset当前状 + // Construct the current state of copyset in mds ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 10); + std::set{1, 2, 3}, 10); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -80,15 +80,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsNoCnandidateOpOnGoing() { - // 构造mds中copyset当前状态 + // Construct the current state of copyset in mds // copyset-1(epoch=5, peers={1,2,3}, leader=1); ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -96,14 +96,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3, 4 }); + std::set{1, 2, 3, 4}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -111,14 +112,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - hbtest_->UpdateCopysetTopo( - 1, 1, 5, 1, std::set{ 1, 2, 3, 4 }, 4); + hbtest_->UpdateCopysetTopo(1, 1, 5, 1, + std::set{1, 2, 3, 4}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -126,12 +128,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -139,12 +142,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 2); + std::set{1, 2, 3}, 2); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -152,22 +156,23 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrePareMdsWithCandidateNoOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); } void PrepareMdsWithChangeOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); @@ -175,24 +180,25 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithChangeOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); - // scheduler中copyset-1有operator:startEpoch=5,step=step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // In the scheduler, copyset-1 has + // operator:startEpoch=5,step=step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); } - bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo &expected) { + bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo& expected) { ::curve::mds::topology::CopySetInfo copysetInfo; if (!hbtest_->topology_->GetCopySet( - CopySetKey{ expected.GetLogicalPoolId(), expected.GetId() }, + CopySetKey{expected.GetLogicalPoolId(), expected.GetId()}, ©setInfo)) { return false; } @@ -226,9 +232,9 @@ class HeartbeatBasicTest : public ::testing::Test { return true; } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -255,14 +261,14 @@ class HeartbeatBasicTest : public ::testing::Test { }; TEST_F(HeartbeatBasicTest, test_request_no_chunkserverID) { - // 空的HeartbeatRequest + // Empty HeartbeatRequest ChunkServerHeartbeatRequest req; ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBFAIL, &rep); } TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { - // mds不存在该chunkserver + // The chunkserver does not exist in the mds ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_chunkserverid(4); @@ -273,8 +279,8 @@ TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { } TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { - // chunkserver上报的id相同,ip和port不匹配 - // ip不匹配 + // The id reported by chunkserver is the same, but the IP and port do not + // match IP mismatch ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_ip("127.0.0.1"); @@ -283,14 +289,14 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // port不匹配 + // Port mismatch req.set_ip("10.198.100.3"); req.set_port(1111); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // token不匹配 + // Token mismatch req.set_ip("10.198.100.3"); req.set_port(9000); req.set_token("youdao"); @@ -300,20 +306,20 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { } TEST_F(HeartbeatBasicTest, test_chunkserver_offline_then_online) { - // chunkserver上报心跳时间间隔大于offline - // sleep 800ms, 该chunkserver onffline状态 + // Chunkserver reports that the heartbeat time interval is greater than + // offline Sleep 800ms, the chunkserver onffline status std::this_thread::sleep_for(std::chrono::milliseconds(800)); ChunkServer out; hbtest_->topology_->GetChunkServer(1, &out); ASSERT_EQ(OnlineState::OFFLINE, out.GetOnlineState()); - // chunkserver上报心跳,chunkserver online + // Chunkserver reports heartbeat, chunkserver online ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(out.GetId(), &req); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 后台健康检查程序把chunksrver更新为onlinne状态 + // The backend health check program updates chunksrver to online status uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool updateSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 2) { @@ -330,8 +336,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -339,8 +344,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); } @@ -349,8 +353,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -361,8 +364,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_TRUE(copysetInfo.HasCandidate()); @@ -373,7 +375,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - CopySetKey key{ 1, 1 }; + CopySetKey key{1, 1}; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -387,11 +389,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -400,8 +401,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -417,11 +417,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(4, copysetInfo.GetCandidate()); ASSERT_EQ(0, rep.needupdatecopysets_size()); @@ -431,8 +430,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -441,11 +439,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -454,8 +451,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -464,11 +460,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -477,8 +472,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -487,11 +481,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -500,8 +493,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -510,11 +502,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -523,12 +514,11 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // 上报copyset-1(epoch=2, peers={1,2,3,4}, leader=1) + // Report copyset-1(epoch=2, peers={1,2,3,4}, leader=1) auto copysetMembers = copysetInfo.GetCopySetMembers(); copysetMembers.emplace(4); CopySetInfo csInfo(1, 1); @@ -537,11 +527,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -550,8 +539,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -564,415 +552,398 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } -// 上报的是leader +// Reported as the leader TEST_F(HeartbeatBasicTest, test_leader_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5 + // response is empty, mds updates epoch to 5 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5,leader为2 + // response is empty, mds updates epoch to 5, and leader to 2 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的不是复制组成员 +// The reported member is not a replication group member TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -980,29 +951,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1010,29 +980,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1040,30 +1009,29 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1071,32 +1039,31 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9090, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1104,29 +1071,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1140,24 +1106,23 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(10, copysetInfo.GetCandidate()); } @@ -1165,246 +1130,238 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 10 }; + std::set res{1, 2, 3, 10}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition6) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition12) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition13) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1413,16 +1370,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition14) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1431,48 +1388,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition15) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition16) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition17) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1481,16 +1438,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition18) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1499,128 +1456,126 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition19) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition20) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition21) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition24) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition25) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1629,16 +1584,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition26) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1647,48 +1602,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition27) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition28) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition29) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1697,16 +1652,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1715,16 +1670,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition31) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1733,16 +1688,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition32) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1756,16 +1711,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition33) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1781,16 +1736,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition34) { ChunkServer cs5(5, "testtoekn", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1804,16 +1759,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1825,16 +1780,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(ConfigChangeType::ADD_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", @@ -1844,42 +1799,41 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_test_mdsWithCandidate_OpOnGoing_condition2) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报 + // chunkserver1 reporting // copyset-1(epoch=5, peers={1,2,3}, leader=1, // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); @@ -1888,206 +1842,200 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=6, peers={1,2,3,10}, leader=2) + // chunkserver1 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) + // chunkserver2 reports copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(7, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition6) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition7) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition11) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2096,28 +2044,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition12) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2126,84 +2073,81 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition15) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2212,28 +2156,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition16) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2242,196 +2185,189 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition23) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2440,28 +2376,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition24) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2470,86 +2405,83 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition27) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2558,19 +2490,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2580,14 +2512,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2596,19 +2527,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2618,14 +2549,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2634,19 +2564,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2656,30 +2586,29 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2694,48 +2623,47 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { ASSERT_EQ(ConfigChangeType::REMOVE_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_2) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // cofigChangeInfo={peer: 4, type:REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2743,22 +2671,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2766,23 +2694,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(7); csInfo.SetLeader(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2790,23 +2718,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2814,24 +2742,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2839,24 +2767,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2864,25 +2792,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2890,25 +2818,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2916,26 +2844,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2943,43 +2871,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2987,44 +2915,44 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3032,43 +2960,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3076,45 +3004,45 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_15) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3124,17 +3052,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3145,13 +3073,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3161,17 +3089,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3182,14 +3110,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3199,17 +3127,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3220,14 +3148,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3237,17 +3165,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3258,847 +3186,846 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // configChangeInfo={peer: 4, type: REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_2) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_3) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_4) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_5) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_33) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4108,17 +4035,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4129,14 +4056,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4146,17 +4073,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4167,27 +4094,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4198,16 +4125,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4217,75 +4144,75 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0 ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -4299,350 +4226,350 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.2:9000:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_3) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_4) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4651,15 +4578,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4669,13 +4596,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4684,15 +4611,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4702,14 +4629,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4720,15 +4647,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { hbtest_->PrepareAddChunkServer(cs1); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4738,15 +4665,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4758,15 +4685,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4776,25 +4703,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4804,346 +4731,346 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1, // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_2) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_3) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5152,15 +5079,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5170,14 +5097,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5186,15 +5113,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5204,15 +5131,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5224,15 +5151,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5242,26 +5169,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5271,661 +5198,661 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_1) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_2) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: ADD_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_3) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_4) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_5) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_6) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_7) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_8) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_9) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_10) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_11) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_12) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_13) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_14) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_15) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_16) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_17) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_18) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_19) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_20) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_21) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_22) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5934,16 +5861,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5953,11 +5880,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5966,17 +5893,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5986,12 +5913,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6000,17 +5927,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6020,13 +5947,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6035,15 +5962,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6053,23 +5980,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6079,468 +6006,467 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_1) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_2) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_3) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_4) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_5) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_6) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }, - 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_7) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_8) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_9) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_10) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_11) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_12) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_13) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_14) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6550,29 +6476,29 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6582,118 +6508,118 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6703,24 +6629,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6730,31 +6656,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6764,23 +6690,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6790,31 +6716,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6824,24 +6750,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6851,31 +6777,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6885,23 +6811,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6911,11 +6837,11 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6923,15 +6849,15 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6944,14 +6870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -6960,24 +6886,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -6985,20 +6911,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7006,20 +6932,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_4) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7029,20 +6955,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7050,20 +6976,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7071,20 +6997,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7092,20 +7018,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7113,23 +7039,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7137,24 +7063,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7162,24 +7088,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7187,25 +7113,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7213,25 +7139,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7239,26 +7165,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_14) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7268,25 +7194,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7296,26 +7222,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7323,24 +7249,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7348,25 +7274,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_18) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7376,25 +7302,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7404,26 +7330,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7431,24 +7357,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7456,25 +7382,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_22) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7484,25 +7410,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7512,26 +7438,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7539,23 +7465,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7563,24 +7489,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7588,25 +7514,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7614,26 +7540,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7641,24 +7567,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7666,25 +7592,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_30) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7694,25 +7620,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7722,26 +7648,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7749,24 +7675,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7774,25 +7700,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_34) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7802,25 +7728,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7830,26 +7756,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7859,16 +7785,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7878,14 +7804,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7895,16 +7821,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7914,13 +7840,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -7928,13 +7854,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7944,14 +7870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7961,16 +7887,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7980,28 +7906,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8011,15 +7937,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8029,16 +7955,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8048,26 +7974,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8077,15 +8003,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8093,15 +8019,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8114,14 +8040,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -8130,24 +8056,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8155,20 +8081,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8176,20 +8102,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_4) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8199,20 +8125,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8220,20 +8146,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8241,20 +8167,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8262,20 +8188,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8283,24 +8209,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8308,25 +8234,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8334,25 +8260,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8360,26 +8286,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8387,26 +8313,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8414,27 +8340,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_14) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8444,26 +8370,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8473,27 +8399,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8501,25 +8427,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8527,26 +8453,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_18) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8556,26 +8482,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8585,27 +8511,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8613,25 +8539,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8639,26 +8565,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_22) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8668,26 +8594,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8697,27 +8623,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8725,24 +8651,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8750,25 +8676,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8776,26 +8702,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8803,27 +8729,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8831,25 +8757,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8857,26 +8783,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_30) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8886,26 +8812,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8915,27 +8841,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8943,25 +8869,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8969,26 +8895,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_34) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8998,26 +8924,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9027,27 +8953,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9057,16 +8983,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9076,15 +9002,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9094,16 +9020,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9113,15 +9039,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -9129,13 +9055,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9145,16 +9071,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9164,16 +9090,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9183,28 +9109,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9214,15 +9140,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9232,16 +9158,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9251,27 +9177,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9281,16 +9207,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } diff --git a/test/integration/heartbeat/heartbeat_exception_test.cpp b/test/integration/heartbeat/heartbeat_exception_test.cpp index 67ac0bcf01..3b04c79390 100644 --- a/test/integration/heartbeat/heartbeat_exception_test.cpp +++ b/test/integration/heartbeat/heartbeat_exception_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -31,19 +31,19 @@ namespace curve { namespace mds { class HeartbeatExceptionTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 3000); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 5000); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", sleepTimeMs_); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6880"); - // scheduler相关的内容 + // scheduler related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -64,9 +64,9 @@ class HeartbeatExceptionTest : public ::testing::Test { conf->SetIntValue("mds.scheduler.minScatterWidth", 50); } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -95,35 +95,51 @@ class HeartbeatExceptionTest : public ::testing::Test { }; /* - * bug说明:稳定性测试环境,宕机一台机器之后设置pending,副本恢复过程中mds有切换 - * 最终发现有5个pending状态的chunkserver没有完成迁移 - * 分析: - * 1. mds1提供服务时产生operator并下发给copyset-1{A,B,C} + - * D的变更,C是offline状态 - * 2. copyset-1完成配置变更,此时leader上的配置更新为epoch=2/{A,B,C,D}, - * candidate上的配置为epoch=1/{A,B,C}, mds1中记录的配置为epoch=1/{A,B,C} - * 3. mds1挂掉,mds2提供服务, 并从数据库加载copyset,mds2中copyset-1的配置 - * epoch=1/{A,B,C} - * 4. candidate-D上报心跳,copyset-1的配置为epoch=1/{A,B,C}。mds2发现D上报的 - * copyset中epoch和mds2记录的相同,但D并不在mds2记录的复制组中且调度模块也没有 - * 对应的operator,下发命令把D上的copyset-1删除导致D被误删 + * Bug Description: In a stability testing environment, when one machine + crashes, it is set to "pending," and during the replica recovery process, there + is MDS switching. Eventually, it was found that there were 5 "pending" chunk + servers that did not complete migration. + + * Analysis: + * 1. When MDS1 is providing services, it generates an operator and sends it to + * copyset-1 {A, B, C} + D for modification, where C is in an offline state. + * 2. Copyset-1 completes the configuration change. At this point, the + configuration on the leader is updated to epoch=2/{A, B, C, D}, + * the configuration on the candidate is epoch=1/{A, B, C}, and the + configuration recorded in MDS1 is epoch=1/{A, B, C}. + * 3. MDS1 crashes, and MDS2 takes over the service. MDS2 loads copysets from + the database, and the configuration for copyset-1 in MDS2 is epoch=1/{A, B, C}. + * 4. Candidate-D reports a heartbeat, and the configuration for copyset-1 is + epoch=1/{A, B, C}. + * MDS2 finds that the epoch reported by D matches the one recorded in MDS2, + but D is not in the replication group recorded by MDS2, + * and there is no corresponding operator in the scheduling module. As a + result, a command is issued to delete copyset-1 on D, leading to an accidental + deletion of D. * - * 解决方法: - * 正常情况下,heartbeat模块会在mds启动一定时间(目前配置20min)后才可以下发删除copyset - * 的命令,极大概率保证这段时间内copyset-leader上的配置更新到mds, - * 防止刚加入复制组 副本上的数据被误删 + * Solution: + * Under normal circumstances, the heartbeat module should wait for a certain + period (currently configured as 20 minutes) after MDS starts before issuing a + command to delete a copyset. + * This greatly ensures that during this time, the configuration on the + copyset-leader is updated in MDS, + * preventing the accidental deletion of data on replicas that have just joined + the replication group. * - * 这个时间的起始点应该是mds正式对外提供服务的时间,而不是mds的启动时间。如果设置为mds的启动 - * 时间,备mds启动很久后如果能够提供服务,就立马可以删除,导致bug + * The starting point for this time should be when MDS officially starts + providing external services, + * rather than the MDS startup time. If it is set based on the MDS startup time, + then if the standby MDS starts much later but can still provide services, it + could be deleted immediately, leading to the bug. */ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { // 1. copyset-1(epoch=2, peers={1,2,3}, leader=1) - // scheduler中有+4的operator - CopySetKey key{ 1, 1 }; + // In the scheduler, there is an operator that +4 + CopySetKey key{1, 1}; int startEpoch = 2; ChunkServerIdType leader = 1; ChunkServerIdType candidate = 4; - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); Operator op(2, key, OperatorPriority::NormalPriority, @@ -131,8 +147,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); - // 2. leader上报copyset-1(epoch=2, peers={1,2,3}, leader=1) - // mds下发配置变更 + // 2. leader reports copyset-1(epoch=2, peers={1,2,3}, leader=1) + // mds issued configuration changes ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(leader, &req); CopySetInfo csInfo(key.first, key.second); @@ -140,7 +156,7 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发+D的配置变更 + // Check the response and issue configuration changes for+D ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(key.first, conf.logicalpoolid()); @@ -150,25 +166,28 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(ConfigChangeType::ADD_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); - // 3. 清除mds中的operrator(模拟mds重启) + // 3 Clear the optimizer in mds (simulate mds restart) hbtest_->RemoveOperatorFromOpController(key); - // 4. canndidate上报落后的与mds的配置(candidate回放日志时会一一apply旧配置): + // 4. The candidate reports the outdated configuration compared to MDS (the + // candidate replays logs one by one to apply the old configuration): // copyset-1(epoch=1, peers={1,2,3}, leader=1) - // 由于mds.heartbeat.clean_follower_afterMs时间还没有到,mds还不能下发 - // 删除命令。mds下发为空,candidate上的数据不会被误删 + // Because mds.heartbeat.clean_follower_afterMs time has not yet elapsed, + // MDS cannot issue deletion commands. MDS issues no commands, so the + // data on the candidate will not be accidentally deleted. rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 5. 睡眠mds.heartbeat.clean_follower_afterMs + 10ms后 - // canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds下发删除配置,candidate上的数据会被误删 + // 5. Sleep mds.heartbeat.clean_follower_afterMs + 10ms, + // candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds issues a deletion configuration, and the data on the candidate + // will be mistakenly deleted usleep((sleepTimeMs_ + 10) * 1000); rep.Clear(); req.Clear(); @@ -183,7 +202,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(peers.size(), conf.peers_size()); ASSERT_EQ(startEpoch, conf.epoch()); - // 6. leader上报最新配置copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // 6. leader reports the latest configuration copyset-1(epoch=3, + // peers={1,2,3,4}, leader=1) auto newPeers = peers; newPeers.emplace(candidate); auto newEpoch = startEpoch + 1; @@ -193,24 +213,25 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { BuildCopySetInfo(&csInfo, startEpoch + 1, leader, newPeers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查mdstopology的数据 + // Check the data of mdstopology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ASSERT_EQ(newEpoch, copysetInfo.GetEpoch()); ASSERT_EQ(leader, copysetInfo.GetLeader()); ASSERT_EQ(newPeers, copysetInfo.GetCopySetMembers()); - // 7. canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds不下发配置 + // 7. candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds does not distribute configuration rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发copyset当前配置指导candidate删除数据 + // Check the response and issue the copyset current configuration guide + // candidate to delete data ASSERT_EQ(0, rep.needupdatecopysets_size()); } diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index 5660617558..ca34604820 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftConfigChangeTestLogDir[] = "./runlog/RaftConfigChange"; const char* kFakeMdsAddr = "127.0.0.1:9080"; @@ -46,96 +46,66 @@ const char* kFakeMdsAddr = "127.0.0.1:9080"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftConfigParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9081", - "-chunkServerStoreUri=local://./9081/", - "-chunkServerMetaUri=local://./9081/chunkserver.dat", - "-copySetUri=local://./9081/copysets", - "-raftSnapshotUri=curve://./9081/copysets", - "-raftLogUri=curve://./9081/copysets", - "-recycleUri=local://./9081/recycler", - "-chunkFilePoolDir=./9081/chunkfilepool/", - "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", - "-walFilePoolDir=./9081/walfilepool/", - "-walFilePoolMetaPath=./9081/walfilepool.meta", - "-conf=./9081/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9082", - "-chunkServerStoreUri=local://./9082/", - "-chunkServerMetaUri=local://./9082/chunkserver.dat", - "-copySetUri=local://./9082/copysets", - "-raftSnapshotUri=curve://./9082/copysets", - "-raftLogUri=curve://./9082/copysets", - "-recycleUri=local://./9082/recycler", - "-chunkFilePoolDir=./9082/chunkfilepool/", - "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", - "-walFilePoolDir=./9082/walfilepool/", - "-walFilePoolMetaPath=./9082/walfilepool.meta", - "-conf=./9082/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9083", - "-chunkServerStoreUri=local://./9083/", - "-chunkServerMetaUri=local://./9083/chunkserver.dat", - "-copySetUri=local://./9083/copysets", - "-raftSnapshotUri=curve://./9083/copysets", - "-raftLogUri=curve://./9083/copysets", - "-recycleUri=local://./9083/recycler", - "-chunkFilePoolDir=./9083/chunkfilepool/", - "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", - "-walFilePoolDir=./9083/walfilepool/", - "-walFilePoolMetaPath=./9083/walfilepool.meta", - "-conf=./9083/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9084", - "-chunkServerStoreUri=local://./9084/", - "-chunkServerMetaUri=local://./9084/chunkserver.dat", - "-copySetUri=local://./9084/copysets", - "-raftSnapshotUri=curve://./9084/copysets", - "-raftLogUri=curve://./9084/copysets", - "-recycleUri=local://./9084/recycler", - "-chunkFilePoolDir=./9084/chunkfilepool/", - "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", - "-walFilePoolDir=./9084/walfilepool/", - "-walFilePoolMetaPath=./9084/walfilepool.meta", - "-conf=./9084/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9085", - "-chunkServerStoreUri=local://./9085/", - "-chunkServerMetaUri=local://./9085/chunkserver.dat", - "-copySetUri=local://./9085/copysets", - "-raftSnapshotUri=curve://./9085/copysets", - "-raftLogUri=curve://./9085/copysets", - "-recycleUri=local://./9085/recycler", - "-chunkFilePoolDir=./9085/chunkfilepool/", - "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", - "-walFilePoolDir=./9085/walfilepool/", - "-walFilePoolMetaPath=./9085/walfilepool.meta", - "-conf=./9085/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9081", + "-chunkServerStoreUri=local://./9081/", + "-chunkServerMetaUri=local://./9081/chunkserver.dat", + "-copySetUri=local://./9081/copysets", + "-raftSnapshotUri=curve://./9081/copysets", + "-raftLogUri=curve://./9081/copysets", + "-recycleUri=local://./9081/recycler", + "-chunkFilePoolDir=./9081/chunkfilepool/", + "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", + "-walFilePoolDir=./9081/walfilepool/", + "-walFilePoolMetaPath=./9081/walfilepool.meta", + "-conf=./9081/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9082", + "-chunkServerStoreUri=local://./9082/", + "-chunkServerMetaUri=local://./9082/chunkserver.dat", + "-copySetUri=local://./9082/copysets", + "-raftSnapshotUri=curve://./9082/copysets", + "-raftLogUri=curve://./9082/copysets", + "-recycleUri=local://./9082/recycler", + "-chunkFilePoolDir=./9082/chunkfilepool/", + "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", + "-walFilePoolDir=./9082/walfilepool/", + "-walFilePoolMetaPath=./9082/walfilepool.meta", + "-conf=./9082/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9083", + "-chunkServerStoreUri=local://./9083/", + "-chunkServerMetaUri=local://./9083/chunkserver.dat", + "-copySetUri=local://./9083/copysets", + "-raftSnapshotUri=curve://./9083/copysets", + "-raftLogUri=curve://./9083/copysets", + "-recycleUri=local://./9083/recycler", + "-chunkFilePoolDir=./9083/chunkfilepool/", + "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", + "-walFilePoolDir=./9083/walfilepool/", + "-walFilePoolMetaPath=./9083/walfilepool.meta", + "-conf=./9083/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9084", + "-chunkServerStoreUri=local://./9084/", + "-chunkServerMetaUri=local://./9084/chunkserver.dat", + "-copySetUri=local://./9084/copysets", + "-raftSnapshotUri=curve://./9084/copysets", + "-raftLogUri=curve://./9084/copysets", + "-recycleUri=local://./9084/recycler", + "-chunkFilePoolDir=./9084/chunkfilepool/", + "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", + "-walFilePoolDir=./9084/walfilepool/", + "-walFilePoolMetaPath=./9084/walfilepool.meta", + "-conf=./9084/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9085", + "-chunkServerStoreUri=local://./9085/", + "-chunkServerMetaUri=local://./9085/chunkserver.dat", + "-copySetUri=local://./9085/copysets", + "-raftSnapshotUri=curve://./9085/copysets", + "-raftLogUri=curve://./9085/copysets", + "-recycleUri=local://./9085/recycler", + "-chunkFilePoolDir=./9085/chunkfilepool/", + "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", + "-walFilePoolDir=./9085/walfilepool/", + "-walFilePoolMetaPath=./9085/walfilepool.meta", + "-conf=./9085/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftConfigChangeTest : public testing::Test { @@ -179,39 +149,34 @@ class RaftConfigChangeTest : public testing::Test { ASSERT_TRUE(cg4.Init("9084")); ASSERT_TRUE(cg5.Init("9085")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -268,22 +233,20 @@ class RaftConfigChangeTest : public testing::Test { int confChangeTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; + std::vector params; int maxWaitInstallSnapshotMs; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 1. 3个节点正常启动 - * 2. 移除一个follower - * 3. 重复移除上一个follower - * 4. 再添加回来 - * 5. 重复添加回来 + * 1. 3 nodes start normally + * 2. Remove a follower + * 3. Repeatedly remove the previous follower + * 4. Add it back again + * 5. Repeatedly add back */ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { LogicPoolID logicPoolId = 2; @@ -293,7 +256,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 member LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -301,12 +264,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -317,15 +276,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除1个follower + // 2. Remove 1 follower LOG(INFO) << "remove 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -335,61 +289,40 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st1.ok()); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 重复移除,验证重复移除的逻辑是否正常 - butil::Status - st2 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + // 3. Duplicate removal, verify if the logic of duplicate removal is normal + butil::Status st2 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st2.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. add回来 + // 4. Add it back conf.remove_peer(removePeer.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st3.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 重复add回来,验证重复添加的逻辑是否正常 + // 5. Repeat the add and verify if the logic added repeatedly is normal conf.add_peer(removePeer.address()); - butil::Status - st4 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st4 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st4.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } @@ -402,7 +335,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -410,12 +343,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -426,75 +355,50 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()) << st2.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } @@ -507,7 +411,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -515,12 +419,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -531,81 +431,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower LOG(INFO) << "recover hang follower"; ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 3个节点正常启动 - * 2. 移除leader - * 3. 再将old leader添加回来 + * 1. 3 nodes start normally + * 2. Remove leader + * 3. Add the old leader back again */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -615,7 +490,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -623,12 +498,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -638,22 +509,17 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除leader + // 2. Remove leader LOG(INFO) << "remove leader"; braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); ASSERT_TRUE(st1.ok()); Peer oldLeader = leaderPeer; @@ -661,50 +527,35 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. add回来 + // 3. Add it back conf.remove_peer(oldLeader.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, oldLeader, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, oldLeader, options); ASSERT_TRUE(st3.ok()); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } /** - * 1. 3个节点正常启动 - * 2. 挂一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull it up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -714,7 +565,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -722,12 +573,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -738,79 +585,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以只用验证2个副本数据一致性 + // The leader has been removed, so only the consistency of the data for two + // replicas is verified ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -822,10 +647,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { } /** - * 1. 3个节点正常启动 - * 2. hang一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -835,7 +660,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -843,12 +668,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -859,78 +680,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以验证2个副本数据一致性 + // The leader has been removed, so verify the data consistency of the two + // replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -942,9 +741,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. 挂掉B,transfer leader给B - * 3. 拉起B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang up B, transfer leader to B + * 3. Pull up B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -954,7 +753,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -962,12 +761,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -978,28 +773,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to shutdown peer braft::cli::CliOptions options; @@ -1009,19 +794,14 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status st1 = TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch -1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(shutdownPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 拉起follower,然后再把leader transfer过去 + // 4. Pull up the follower and then transfer the leader over LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -1032,11 +812,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -1050,32 +827,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_STREQ(shutdownPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. hang B,transfer leader给B - * 3. 恢复 B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang B, transfer leader to B + * 3. Restore B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -1085,7 +852,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1093,12 +860,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1109,28 +872,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to hang peer braft::cli::CliOptions options; @@ -1145,7 +898,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(hangPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 恢复follower,然后再把leader transfer过去 + // 4. Restore the follower and then transfer the leader LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); @@ -1155,54 +908,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << hangPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - hangPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, hangPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == hangPeer.address()) { break; } } - LOG(INFO) << i + 1 << " th transfer leader to " - << hangPeer.address() << " failed"; + LOG(INFO) << i + 1 << " th transfer leader to " << hangPeer.address() + << " failed"; ::sleep(1); } ASSERT_STREQ(hangPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** * - * 1. {A、B、C} 3个节点正常启 - * 2. 挂掉一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1210,7 +952,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1218,12 +960,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1234,54 +972,34 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 通过打两次快照确保后面的恢复必须走安装快照 + // Wait snapshot, ensuring that subsequent restores must follow the + // installation snapshot by taking two snapshots LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); Configuration conf = cluster.CopysetConf(); @@ -1291,25 +1009,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()) << st.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1324,12 +1032,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta /** * - * 1. {A、B、C} 3个节点正常启 - * 2. hang一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1337,7 +1046,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1345,12 +1054,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1361,54 +1066,33 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); @@ -1419,25 +1103,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1451,11 +1125,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log和数据 - * 3. 重启follower,follower能够通过数据恢复最终追上leader + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs and data + * 3. Restart the follower, and the follower can eventually catch up with the + * leader through data recovery */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1463,7 +1139,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1471,12 +1147,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1487,92 +1159,63 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 删除此peer的数据,然后重启 + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(PeerCluster::RemoveCopysetDirCmd(shutdownPeer).c_str())); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::CopysetDirWithoutProtocol( - shutdownPeer))); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE( + fs->DirExists(PeerCluster::CopysetDirWithoutProtocol(shutdownPeer))); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log - * 3. 重启follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs + * 3. Restart follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1580,7 +1223,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1588,12 +1231,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1604,96 +1243,69 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 删除此peer的log,然后重启 - ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId).c_str()); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId))); - - // wait snapshot, 保证能够触发安装快照 + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // Delete the log of this peer and restart it + ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, logicPoolId, + copysetId) + .c_str()); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd( + shutdownPeer, logicPoolId, copysetId))); + + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中挂掉leader - * 本次install snapshot失败,但是new leader会被选出来,new leader继续给 - * follower恢复数据,最终follower数据追上leader并一致 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process + * The install snapshot failed this time, but the new leader will be selected + * and will continue to provide The follower recovers data, and ultimately the + * follower data catches up with the leader and is consistent */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1701,7 +1313,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1709,12 +1321,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1725,91 +1333,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1822,11 +1396,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader重启 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and restart the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1834,7 +1410,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1842,12 +1418,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1858,93 +1430,59 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1957,11 +1495,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中hang leader + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1969,7 +1509,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1977,12 +1517,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1993,91 +1529,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader hang + // 4. After a period of random sleep, hang up the leader and simulate the + // leader hang during installation snapshot int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2090,11 +1592,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader hang一会 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and during the recovery process, the leader will + * hang for a while */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2102,7 +1608,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2110,12 +1616,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2126,58 +1628,39 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); @@ -2191,22 +1674,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2219,12 +1692,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower挂了 - * 4. 一段时间后拉起来 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), but the follower hung during the recovery process + * 4. After a period of time, pull it up */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2232,7 +1708,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2240,12 +1716,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2256,63 +1728,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, @@ -2321,33 +1773,27 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower重启了 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot). During the recovery process, the follower + * restarted */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2355,7 +1801,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2363,12 +1809,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2379,97 +1821,70 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower hang了 - * 4. 一段时间后恢复 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and the follower has changed during the recovery + * process + * 4. Restore after a period of time */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2477,7 +1892,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2485,12 +1900,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2501,63 +1912,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,hang follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a period of random sleep, hang the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - // 5. 把follower恢复 + // 5. Restore the follower int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); @@ -2565,32 +1956,23 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2600,7 +1982,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2608,12 +1990,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2624,52 +2002,32 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -2684,11 +2042,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -2703,31 +2058,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderPeer.address().c_str(), shutdownPeer.address().c_str())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 创建5个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉两个follower - * 3. 让两个follower从installsnapshot恢复 + * 1. Create a replication group of 5 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up two followers + * 3. Restore two followers from installsnapshot */ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2737,7 +2083,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 member LOG(INFO) << "start 5 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2747,12 +2093,8 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2765,15 +2107,10 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个follower + // 2. Hang up 2 followers LOG(INFO) << "shutdown 2 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2782,37 +2119,22 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { Peer shutdownPeer2 = followerPeers[1]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer1)); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer2)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown 2 follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer1, PeerCluster::PeerToId(shutdownPeer1))); @@ -2820,33 +2142,24 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { PeerCluster::PeerToId(shutdownPeer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组{A、B、C},并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2856,19 +2169,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2878,30 +2187,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -2912,33 +2213,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -2954,11 +2244,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2968,19 +2259,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2990,30 +2277,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3024,33 +2303,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3066,11 +2334,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3080,19 +2349,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3102,29 +2367,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader LOG(INFO) << "shutdown 1 leader"; Peer shutdownPeer = leaderPeer; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3135,33 +2392,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3177,11 +2423,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3191,19 +2438,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3213,29 +2456,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 leader"; Peer hangPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3246,33 +2481,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index f6a39c3436..15b731e329 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -21,121 +21,91 @@ */ #include -#include #include +#include -#include #include +#include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftLogRepTestLogDir[] = "./runlog/RaftLogRep"; const char* kFakeMdsAddr = "127.0.0.1:9070"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftLogParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9071", - "-chunkServerStoreUri=local://./9071/", - "-chunkServerMetaUri=local://./9071/chunkserver.dat", - "-copySetUri=local://./9071/copysets", - "-raftSnapshotUri=curve://./9071/copysets", - "-raftLogUri=curve://./9071/copysets", - "-recycleUri=local://./9071/recycler", - "-chunkFilePoolDir=./9071/chunkfilepool/", - "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", - "-walFilePoolDir=./9071/walfilepool/", - "-walFilePoolMetaPath=./9071/walfilepool.meta", - "-conf=./9071/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9072", - "-chunkServerStoreUri=local://./9072/", - "-chunkServerMetaUri=local://./9072/chunkserver.dat", - "-copySetUri=local://./9072/copysets", - "-raftSnapshotUri=curve://./9072/copysets", - "-raftLogUri=curve://./9072/copysets", - "-recycleUri=local://./9072/recycler", - "-chunkFilePoolDir=./9072/chunkfilepool/", - "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", - "-walFilePoolDir=./9072/walfilepool/", - "-walFilePoolMetaPath=./9072/walfilepool.meta", - "-conf=./9072/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9073", - "-chunkServerStoreUri=local://./9073/", - "-chunkServerMetaUri=local://./9073/chunkserver.dat", - "-copySetUri=local://./9073/copysets", - "-raftSnapshotUri=curve://./9073/copysets", - "-raftLogUri=curve://./9073/copysets", - "-recycleUri=local://./9073/recycler", - "-chunkFilePoolDir=./9073/chunkfilepool/", - "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", - "-walFilePoolDir=./9073/walfilepool/", - "-walFilePoolMetaPath=./9073/walfilepool.meta", - "-conf=./9073/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9074", - "-chunkServerStoreUri=local://./9074/", - "-chunkServerMetaUri=local://./9074/chunkserver.dat", - "-copySetUri=local://./9074/copysets", - "-raftSnapshotUri=curve://./9074/copysets", - "-raftLogUri=curve://./9074/copysets", - "-recycleUri=local://./9074/recycler", - "-chunkFilePoolDir=./9074/chunkfilepool/", - "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", - "-walFilePoolDir=./9074/walfilepool/", - "-walFilePoolMetaPath=./9074/walfilepool.meta", - "-conf=./9074/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9075", - "-chunkServerStoreUri=local://./9075/", - "-chunkServerMetaUri=local://./9075/chunkserver.dat", - "-copySetUri=local://./9075/copysets", - "-raftSnapshotUri=curve://./9075/copysets", - "-raftLogUri=curve://./9075/copysets", - "-recycleUri=local://./9075/recycler", - "-chunkFilePoolDir=./9075/chunkfilepool/", - "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", - "-walFilePoolDir=./9075/walfilepool/", - "-walFilePoolMetaPath=./9075/walfilepool.meta", - "-conf=./9075/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9071", + "-chunkServerStoreUri=local://./9071/", + "-chunkServerMetaUri=local://./9071/chunkserver.dat", + "-copySetUri=local://./9071/copysets", + "-raftSnapshotUri=curve://./9071/copysets", + "-raftLogUri=curve://./9071/copysets", + "-recycleUri=local://./9071/recycler", + "-chunkFilePoolDir=./9071/chunkfilepool/", + "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", + "-walFilePoolDir=./9071/walfilepool/", + "-walFilePoolMetaPath=./9071/walfilepool.meta", + "-conf=./9071/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9072", + "-chunkServerStoreUri=local://./9072/", + "-chunkServerMetaUri=local://./9072/chunkserver.dat", + "-copySetUri=local://./9072/copysets", + "-raftSnapshotUri=curve://./9072/copysets", + "-raftLogUri=curve://./9072/copysets", + "-recycleUri=local://./9072/recycler", + "-chunkFilePoolDir=./9072/chunkfilepool/", + "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", + "-walFilePoolDir=./9072/walfilepool/", + "-walFilePoolMetaPath=./9072/walfilepool.meta", + "-conf=./9072/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9073", + "-chunkServerStoreUri=local://./9073/", + "-chunkServerMetaUri=local://./9073/chunkserver.dat", + "-copySetUri=local://./9073/copysets", + "-raftSnapshotUri=curve://./9073/copysets", + "-raftLogUri=curve://./9073/copysets", + "-recycleUri=local://./9073/recycler", + "-chunkFilePoolDir=./9073/chunkfilepool/", + "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", + "-walFilePoolDir=./9073/walfilepool/", + "-walFilePoolMetaPath=./9073/walfilepool.meta", + "-conf=./9073/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9074", + "-chunkServerStoreUri=local://./9074/", + "-chunkServerMetaUri=local://./9074/chunkserver.dat", + "-copySetUri=local://./9074/copysets", + "-raftSnapshotUri=curve://./9074/copysets", + "-raftLogUri=curve://./9074/copysets", + "-recycleUri=local://./9074/recycler", + "-chunkFilePoolDir=./9074/chunkfilepool/", + "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", + "-walFilePoolDir=./9074/walfilepool/", + "-walFilePoolMetaPath=./9074/walfilepool.meta", + "-conf=./9074/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9075", + "-chunkServerStoreUri=local://./9075/", + "-chunkServerMetaUri=local://./9075/chunkserver.dat", + "-copySetUri=local://./9075/copysets", + "-raftSnapshotUri=curve://./9075/copysets", + "-raftLogUri=curve://./9075/copysets", + "-recycleUri=local://./9075/recycler", + "-chunkFilePoolDir=./9075/chunkfilepool/", + "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", + "-walFilePoolDir=./9075/walfilepool/", + "-walFilePoolMetaPath=./9075/walfilepool.meta", + "-conf=./9075/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftLogReplicationTest : public testing::Test { @@ -177,39 +147,34 @@ class RaftLogReplicationTest : public testing::Test { ASSERT_TRUE(cg4.Init("9074")); ASSERT_TRUE(cg5.Init("9075")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -265,19 +230,20 @@ class RaftLogReplicationTest : public testing::Test { int electionTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; butil::AtExitManager atExitManager; /** - * 验证3个节点的复制组,测试隐式提交 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 等带step down - * 3. 拉起1个follower + * Validate replication groups for 3 nodes and test implicit commit + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Wait for step down + * 3. Pull up 1 follower */ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { LogicPoolID logicPoolId = 2; @@ -287,19 +253,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -310,62 +272,38 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); - // 3. 等待step down,等待2个选举超时,保证一定step down + // 3. Wait for step down, wait for 2 elections to timeout, ensure a certain + // step down ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 4. 拉起1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // new leader就是old leader + // new leader is an old leader ASSERT_STREQ(leaderPeer.address().c_str(), newLeader.address().c_str()); - // read step down之前append进去的log entry,测试隐式提交 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Read the log entries appended before the "read step down" to test + // implicit commits. + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -378,11 +316,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { } /** - * 验证3个节点的复制组,测试日志截断 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 挂掉leader - * 3. 拉起2个follower + * Verify the replication groups of three nodes and test log truncation + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Hang up the leader + * 3. Pull up 2 followers */ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { LogicPoolID logicPoolId = 2; @@ -392,19 +331,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -415,33 +350,23 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - 2); - - // 3. 挂掉leader + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 2); + + // 3. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - // 4. 拉起2个follower + // 4. Pull up 2 followers ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], @@ -449,22 +374,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 日志截断 - ReadNotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Log truncation + ReadNotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 2); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -477,12 +392,14 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { } /** - * 验证3个节点的复制组,测试向落后多个term的follower复制日志 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉一个follower - * 3. 挂掉leader,等待2个ET重启 - * 4. 挂掉leader,等待2个ET重启 - * 3. 拉起挂掉的follower + * Verify the replication group of three nodes, and test copying logs to + * followers who fall behind multiple terms + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up a follower + * 3. Hang up the leader and wait for 2 ETs to restart + * 4. Hang up the leader and wait for 2 ETs to restart + * 3. Pull up the hanging follower */ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { LogicPoolID logicPoolId = 2; @@ -492,19 +409,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -515,89 +428,64 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个Follower + // 2. Hang up 1 Follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 挂掉leader,等待2个ET重启 + // 3. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 挂掉leader,等待2个ET重启 + // 4. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 拉起挂掉的follower + // 5. Pull up the hanging follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 多等一会,保证安装快照成功 + // Wait a little longer to ensure successful installation of the snapshot ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. 挂掉leader - * 3. leader拉起来 - * 4. 挂1一个follower - * 5. follower拉起来 - * 6. 挂2个follower - * 7. 拉起1个follower - * 8. 挂掉leader - * 9. 拉起上一步挂的leader - * 10. 挂掉leader和两个follower - * 11. 逐个拉起来 - * 12. 挂掉3个follower - * 13. 逐个拉起来 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang up the leader + * 3. Pull up the leader + * 4. Hang 1 follower + * 5. Follower, pull it up + * 6. Hang 2 followers + * 7. Pull up 1 follower + * 8. Hang up the leader + * 9. Pull up the leader from the previous step + * 10. Hang up the leader and two followers + * 11. Pull up one by one + * 12. Hang up three followers + * 13. Pull up one by one */ TEST_F(RaftLogReplicationTest, FourNodeKill) { LogicPoolID logicPoolId = 2; @@ -607,7 +495,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -615,12 +503,8 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -632,124 +516,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -758,117 +599,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j 1); ::usleep(1000 * electionTimeoutMs * 2); - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang1一个follower - * 5. 恢复follower - * 6. hang2个follower - * 7. 恢复1个follower - * 8. hangleader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang1, a follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hangleader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FourNodeHang) { LogicPoolID logicPoolId = 2; @@ -878,7 +683,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -886,12 +691,8 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -903,119 +704,76 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); ASSERT_STRNE(oldLeader.address().c_str(), newLeader.address().c_str()); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); -// 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1024,107 +782,70 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个恢复 + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k 1); - - // 13. 逐个恢复 + // 13. Restore one by one ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. 挂 leader - * 3. 恢复leader - * 4. 挂1一个follower - * 5. 恢复follower - * 6. 挂2个follower - * 7. 恢复1个follower - * 8. 挂leader - * 9. 恢复一步挂的leader - * 10. 挂leader和两个follower - * 11. 逐个恢复 - * 12. 挂3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang the leader + * 3. Restore leader + * 4. Hang 1 follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang the leader + * 9. Restore one-step suspended leaders + * 10. Hang the leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeKill) { LogicPoolID logicPoolId = 2; @@ -1134,7 +855,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1143,12 +864,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1161,122 +878,79 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Follower, pull it up ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(newLeader, - PeerCluster::PeerToId(newLeader))); + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(newLeader, PeerCluster::PeerToId(newLeader))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1285,113 +959,78 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } - /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang 1一个follower - * 5. 恢复follower - * 6. hang 2个follower - * 7. 恢复1个follower - * 8. hang leader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang 1, one follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang leader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeHang) { LogicPoolID logicPoolId = 2; @@ -1401,7 +1040,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1410,12 +1049,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1428,115 +1063,72 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(newLeader)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1545,83 +1137,49 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个恢复 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个恢复 + // 13. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index a8e57aaa3f..d6cd2981dc 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -21,102 +21,78 @@ */ #include -#include #include +#include #include -#include "test/integration/common/peer_cluster.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftSnapshotTestLogDir[] = "./runlog/RaftSnapshot"; const char* kFakeMdsAddr = "127.0.0.1:9320"; static constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *raftVoteParam[4][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9321", - "-chunkServerStoreUri=local://./9321/", - "-chunkServerMetaUri=local://./9321/chunkserver.dat", - "-copySetUri=local://./9321/copysets", - "-raftSnapshotUri=curve://./9321/copysets", - "-recycleUri=local://./9321/recycler", - "-chunkFilePoolDir=./9321/chunkfilepool/", - "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", - "-conf=./9321/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9321/copysets", - "-walFilePoolDir=./9321/walfilepool/", - "-walFilePoolMetaPath=./9321/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9322", - "-chunkServerStoreUri=local://./9322/", - "-chunkServerMetaUri=local://./9322/chunkserver.dat", - "-copySetUri=local://./9322/copysets", - "-raftSnapshotUri=curve://./9322/copysets", - "-recycleUri=local://./9322/recycler", - "-chunkFilePoolDir=./9322/chunkfilepool/", - "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", - "-conf=./9322/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9322/copysets", - "-walFilePoolDir=./9322/walfilepool/", - "-walFilePoolMetaPath=./9322/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9323", - "-chunkServerStoreUri=local://./9323/", - "-chunkServerMetaUri=local://./9323/chunkserver.dat", - "-copySetUri=local://./9323/copysets", - "-raftSnapshotUri=curve://./9323/copysets", - "-recycleUri=local://./9323/recycler", - "-chunkFilePoolDir=./9323/chunkfilepool/", - "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", - "-conf=./9323/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9323/copysets", - "-walFilePoolDir=./9323/walfilepool/", - "-walFilePoolMetaPath=./9323/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9324", - "-chunkServerStoreUri=local://./9324/", - "-chunkServerMetaUri=local://./9324/chunkserver.dat", - "-copySetUri=local://./9324/copysets", - "-raftSnapshotUri=curve://./9324/copysets", - "-recycleUri=local://./9324/recycler", - "-chunkFilePoolDir=./9324/chunkfilepool/", - "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", - "-conf=./9324/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9324/copysets", - "-walFilePoolDir=./9324/walfilepool/", - "-walFilePoolMetaPath=./9324/walfilepool.meta", - NULL - }, +static const char* raftVoteParam[4][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9321", + "-chunkServerStoreUri=local://./9321/", + "-chunkServerMetaUri=local://./9321/chunkserver.dat", + "-copySetUri=local://./9321/copysets", + "-raftSnapshotUri=curve://./9321/copysets", + "-recycleUri=local://./9321/recycler", + "-chunkFilePoolDir=./9321/chunkfilepool/", + "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", + "-conf=./9321/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9321/copysets", + "-walFilePoolDir=./9321/walfilepool/", + "-walFilePoolMetaPath=./9321/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9322", + "-chunkServerStoreUri=local://./9322/", + "-chunkServerMetaUri=local://./9322/chunkserver.dat", + "-copySetUri=local://./9322/copysets", + "-raftSnapshotUri=curve://./9322/copysets", + "-recycleUri=local://./9322/recycler", + "-chunkFilePoolDir=./9322/chunkfilepool/", + "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", + "-conf=./9322/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9322/copysets", + "-walFilePoolDir=./9322/walfilepool/", + "-walFilePoolMetaPath=./9322/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9323", + "-chunkServerStoreUri=local://./9323/", + "-chunkServerMetaUri=local://./9323/chunkserver.dat", + "-copySetUri=local://./9323/copysets", + "-raftSnapshotUri=curve://./9323/copysets", + "-recycleUri=local://./9323/recycler", + "-chunkFilePoolDir=./9323/chunkfilepool/", + "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", + "-conf=./9323/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9323/copysets", + "-walFilePoolDir=./9323/walfilepool/", + "-walFilePoolMetaPath=./9323/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9324", + "-chunkServerStoreUri=local://./9324/", + "-chunkServerMetaUri=local://./9324/chunkserver.dat", + "-copySetUri=local://./9324/copysets", + "-raftSnapshotUri=curve://./9324/copysets", + "-recycleUri=local://./9324/recycler", + "-chunkFilePoolDir=./9324/chunkfilepool/", + "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", + "-conf=./9324/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9324/copysets", + "-walFilePoolDir=./9324/walfilepool/", + "-walFilePoolMetaPath=./9324/walfilepool.meta", NULL}, }; class RaftSnapshotTest : public testing::Test { @@ -152,32 +128,28 @@ class RaftSnapshotTest : public testing::Test { ASSERT_TRUE(cg3_.Init("9323")); ASSERT_TRUE(cg4_.Init("9324")); cg1_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg1_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg1_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg1_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg1_.SetKV("mds.listen.addr", kFakeMdsAddr); cg2_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg2_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg2_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg2_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg2_.SetKV("mds.listen.addr", kFakeMdsAddr); cg3_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg3_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg3_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg3_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg3_.SetKV("mds.listen.addr", kFakeMdsAddr); cg4_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg4_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg4_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg4_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg4_.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1_.Generate()); ASSERT_TRUE(cg2_.Generate()); @@ -194,7 +166,7 @@ class RaftSnapshotTest : public testing::Test { params_.push_back(const_cast(raftVoteParam[2])); params_.push_back(const_cast(raftVoteParam[3])); - // 配置默认raft client option + // Configure default raft client option defaultCliOpt_.max_retry = 3; defaultCliOpt_.timeout_ms = 10000; } @@ -232,20 +204,20 @@ class RaftSnapshotTest : public testing::Test { braft::cli::CliOptions defaultCliOpt_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; }; - /** - * 验证连续通过快照恢复copyset - * 1.创建3个副本的复制组 - * 2.挂掉一个follower - * 3.写入数据,并等待raft snapshot 产生 - * 4.启动挂掉的follower,使其通过snapshot恢复 - * 5.transfer leader到刚启动的follower,读数据验证 - * 6.remove old leader,主要为了删除其copyset目录 - * 7.添加新的peer,使其通过快照加载数据 - * 8.transfer leader到新加入的peer,读数据验证 + *Verify continuous recovery of copyset through snapshots + *1. Create a replication group of 3 replicas + *2. Hang up a follower + *3. Write data and wait for the raft snapshot to be generated + *4. Start the failed follower and restore it through snapshot + *5. Transfer the leader to the newly started follower and read the data for + *verification + *6. Remove old leader, mainly to delete its copyset directory + *7. Add a new peer to load data through a snapshot + *8. Transfer leader to newly added peer, read data validation */ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { LogicPoolID logicPoolId = 2; @@ -261,12 +233,8 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -276,7 +244,7 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); Peer oldLeader = leaderPeer; - // 挂掉一个follower + // Hang up a follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -288,21 +256,15 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot,保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -310,43 +272,44 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); Configuration conf = cluster.CopysetConf(); - // 删除旧leader及其目录 + // Delete old leader and its directory butil::Status status = RemovePeer(logicPoolId, copysetId, conf, oldLeader, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); + rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); ::system(rmdir.c_str()); - // 添加新的peer - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + // Add a new peer + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()) << status; - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据, - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + *verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval, write read data, + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + *two-step It is to ensure that at least two snapshots are taken, so that when + *the node restarts again, it must pass the install snapshot + * 6. Wait for the leader to be generated, and then verify the data written + *before the read + * 7. Transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -362,12 +325,8 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -377,22 +336,17 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -403,47 +357,31 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); - // 再次发起 read/write + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop, - initsn + 1); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 1, loop, initsn + 1); LOG(INFO) << "write 2 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 3 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop, - initsn + 1); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 2, loop, initsn + 1); LOG(INFO) << "write 3 end"; @@ -451,24 +389,29 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch + 2, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch + 2, + loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,并更新写版本,产生chunk快照 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval, - * 5. 删除chunk快照,再次用新版本write 数据,产生新的chunk快照 - * 6. 然后再 sleep 超过一个 snapshot interval;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 7. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 8. transfer leader 到shut down 的peer 上 - * 9. 在 read 之前写入的数据验证 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and update the write + *version to generate a chunk snapshot + * 3. Shutdown non leader + * 4. Then the sleep exceeds one snapshot interval, + * 5. Delete the chunk snapshot and write the data again with a new version to + *generate a new chunk snapshot + * 6. Then sleep more than one snapshot interval; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the + *node restarts again, it must pass the install snapshot + * 7. Wait for the leader to be generated, and then verify the data written + *before the read + * 8. Transfer leader to shut down peer + * 9. Verification of data written before read */ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LogicPoolID logicPoolId = 2; @@ -484,12 +427,8 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -499,43 +438,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -546,41 +473,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); - // 删除旧的快照 - DeleteSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, + // Delete old snapshots + DeleteSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, initsn + 1); // csn = 2 - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 3 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // c loop, initsn + 2); // sn = 3 LOG(INFO) << "write 3 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // b + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // b loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -589,24 +506,29 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } /** - * 验证curve快照转储过程当中,chunkserver存在多个copyset情况下, - * 1. 创建3个副本的复制组 - * 2. 为每个复制组的chunkserver生成新的copyset,并作为后续操作对象 - * 3. 等待 leader 产生,write 数据 - * 4. sleep 超过一个 snapshot interval,确保产生raft快照 - * 5. 更新写版本,产生chunk快照 - * 6. 然后 sleep 超过一个 snapshot interval,确保产生raft快照 - * 7. shutdown 非 leader - * 8. AddPeer添加一个新节点使其通过加载快照恢复,然后remove掉shutdown的peer - * 9. 切换leader到新添加的peer - * 10. 等待 leader 产生,然后 read 之前产生的数据和chunk快照进行验证 + * During the process of verifying the curve snapshot dump, if there are + * multiple copysets in the chunkserver, + * 1. Create a replication group of 3 replicas + * 2. Generate a new copyset for each replication group's chunkserver and use it + * as a subsequent operation object + * 3. Wait for the leader to generate and write data + * 4. If the sleep exceeds one snapshot interval, ensure that a raft snapshot is + * generated + * 5. Update the write version to generate a chunk snapshot + * 6. Then the sleep exceeds one snapshot interval to ensure that a raft + * snapshot is generated + * 7. Shutdown non leader + * 8. Add a new node to AddPeer and restore it by loading a snapshot, then + * remove the shutdown peer + * 9. Switch the leader to the newly added peer + * 10. Wait for the leader to be generated, then read the data and chunk + * snapshot generated before validation */ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LogicPoolID logicPoolId = 2; @@ -622,18 +544,14 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); ASSERT_EQ(0, cluster.StartPeer(peer3_, PeerCluster::PeerToId(peer3_))); - // 创建新的copyset + // Create a new copyset LOG(INFO) << "create new copyset."; ++copysetId; int ret = cluster.CreateCopyset(logicPoolId, copysetId, peer1_, peers); @@ -643,57 +561,46 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ret = cluster.CreateCopyset(logicPoolId, copysetId, peer3_, peers); ASSERT_EQ(0, ret); - // 使用新的copyset作为操作对象 + // Use the new copyset as the operand cluster.SetWorkingCopyset(copysetId); Peer leaderPeer; ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // wait snapshot, 保证能够触发打快照 - // 通过至少两次快照,保证新加的peer通过下载快照安装 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + // Ensure that the newly added peer is installed by downloading the snapshot + // by taking at least two snapshots + ::sleep(1.5 * snapshotIntervalS_); - // shutdown 某个follower + // Shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -704,30 +611,28 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 添加新的peer,并移除shutdown的peer + // Add a new peer and remove the shutdown peer Configuration conf = cluster.CopysetConf(); - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); butil::Status status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()); - // 删除旧leader及其目录 + // Delete old leader and its directory status = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); + rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); ::system(rmdir.c_str()); - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } } // namespace chunkserver diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index 5f87a1495f..9b5d97b98f 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -21,84 +21,66 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftVoteTestLogDir[] = "./runlog/RaftVote"; const char* kFakeMdsAddr = "127.0.0.1:9089"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftVoteParam[3][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9091", - "-chunkServerStoreUri=local://./9091/", - "-chunkServerMetaUri=local://./9091/chunkserver.dat", - "-copySetUri=local://./9091/copysets", - "-raftSnapshotUri=curve://./9091/copysets", - "-recycleUri=local://./9091/recycler", - "-chunkFilePoolDir=./9091/chunkfilepool/", - "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", - "-conf=./9091/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9091/copysets", - "-walFilePoolDir=./9091/walfilepool/", - "-walFilePoolMetaPath=./9091/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9092", - "-chunkServerStoreUri=local://./9092/", - "-chunkServerMetaUri=local://./9092/chunkserver.dat", - "-copySetUri=local://./9092/copysets", - "-raftSnapshotUri=curve://./9092/copysets", - "-recycleUri=local://./9092/recycler", - "-chunkFilePoolDir=./9092/chunkfilepool/", - "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", - "-conf=./9092/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9092/copysets", - "-walFilePoolDir=./9092/walfilepool/", - "-walFilePoolMetaPath=./9092/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9093", - "-chunkServerStoreUri=local://./9093/", - "-chunkServerMetaUri=local://./9093/chunkserver.dat", - "-copySetUri=local://./9093/copysets", - "-raftSnapshotUri=curve://./9093/copysets", - "-recycleUri=local://./9093/recycler", - "-chunkFilePoolDir=./9093/chunkfilepool/", - "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", - "-conf=./9093/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9093/copysets", - "-walFilePoolDir=./9093/walfilepool/", - "-walFilePoolMetaPath=./9093/walfilepool.meta", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9091", + "-chunkServerStoreUri=local://./9091/", + "-chunkServerMetaUri=local://./9091/chunkserver.dat", + "-copySetUri=local://./9091/copysets", + "-raftSnapshotUri=curve://./9091/copysets", + "-recycleUri=local://./9091/recycler", + "-chunkFilePoolDir=./9091/chunkfilepool/", + "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", + "-conf=./9091/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9091/copysets", + "-walFilePoolDir=./9091/walfilepool/", + "-walFilePoolMetaPath=./9091/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9092", + "-chunkServerStoreUri=local://./9092/", + "-chunkServerMetaUri=local://./9092/chunkserver.dat", + "-copySetUri=local://./9092/copysets", + "-raftSnapshotUri=curve://./9092/copysets", + "-recycleUri=local://./9092/recycler", + "-chunkFilePoolDir=./9092/chunkfilepool/", + "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", + "-conf=./9092/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9092/copysets", + "-walFilePoolDir=./9092/walfilepool/", + "-walFilePoolMetaPath=./9092/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9093", + "-chunkServerStoreUri=local://./9093/", + "-chunkServerMetaUri=local://./9093/chunkserver.dat", + "-copySetUri=local://./9093/copysets", + "-raftSnapshotUri=curve://./9093/copysets", + "-recycleUri=local://./9093/recycler", + "-chunkFilePoolDir=./9093/chunkfilepool/", + "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", + "-conf=./9093/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9093/copysets", + "-walFilePoolDir=./9093/walfilepool/", + "-walFilePoolMetaPath=./9093/walfilepool.meta", NULL}, }; class RaftVoteTest : public testing::Test { @@ -130,25 +112,22 @@ class RaftVoteTest : public testing::Test { ASSERT_TRUE(cg2.Init("9092")); ASSERT_TRUE(cg3.Init("9093")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -189,22 +168,21 @@ class RaftVoteTest : public testing::Test { int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组 - * 1. 创建1个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader,验证可用性 - * 3. 拉起leader - * 4. hang住leader - * 5. 恢复leader + * Verify replication group for 1 node + * 1. Create a replication group of 1 member, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and verify availability + * 3. Pull up the leader + * 4. Hang in the leader + * 5. Restore leader */ TEST_F(RaftVoteTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -214,17 +192,13 @@ TEST_F(RaftVoteTest, OneNode) { char ch = 'a'; int loop = 25; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -234,85 +208,51 @@ TEST_F(RaftVoteTest, OneNode) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉这个节点 + // 2. Hang up this node ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 将节点拉起来 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the node ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. hang住此节点 + // 4. Hang on to this node ASSERT_EQ(0, cluster.HangPeer(peer1)); ::usleep(200 * 1000); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 5. 恢复节点 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 5. Restore nodes ASSERT_EQ(0, cluster.SignalPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证2个节点的复制组,并挂掉leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of two nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -322,18 +262,14 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -343,55 +279,36 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并挂掉follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of two nodes and hang the follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LogicPoolID logicPoolId = 2; @@ -401,19 +318,15 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -423,15 +336,10 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉follower + // 2. Hang up the follower Peer followerPeer; if (leaderPeer.address() == peer1.address()) { followerPeer = peer2; @@ -441,57 +349,37 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LOG(INFO) << "kill follower " << followerPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down,之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // Wait for the leader step to down, and after that, read is no longer + // supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 拉起follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Pull up the follower LOG(INFO) << "restart follower " << followerPeer.address(); - ASSERT_EQ(0, - cluster.StartPeer(followerPeer, - PeerCluster::PeerToId(followerPeer))); + ASSERT_EQ(0, cluster.StartPeer(followerPeer, + PeerCluster::PeerToId(followerPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并hang leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication group of 2 nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -501,18 +389,14 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -522,56 +406,37 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. Hang leader LOG(INFO) << "hang leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore leader LOG(INFO) << "recover leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的复制组,并发Hang一个follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 恢复follower + * Verify the replication group of two nodes and concurrently hang a follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LogicPoolID logicPoolId = 2; @@ -581,19 +446,15 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -603,13 +464,8 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang follower Peer followerPeer; @@ -621,53 +477,33 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LOG(INFO) << "hang follower " << followerPeer.address(); ASSERT_EQ(0, cluster.HangPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // After waiting for the leader step to down, read is no longer supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 恢复follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Restore follower LOG(INFO) << "recover follower " << followerPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(followerPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组,等待leader产生,write数据,然后read出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of three replicas, wait for the leader to + * generate, write the data, and then read it out for verification */ TEST_F(RaftVoteTest, ThreeNodesNormal) { LogicPoolID logicPoolId = 2; @@ -682,12 +518,8 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -700,24 +532,20 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { PeerId leaderId; ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Initiate read/write agai + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -727,19 +555,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -750,55 +574,36 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { LogicPoolID logicPoolId = 2; @@ -808,19 +613,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -831,57 +632,37 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 拉起follower - ASSERT_EQ(0, - cluster.StartPeer(followerPeers[0], - PeerCluster::PeerToId(followerPeers[0]))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Pull up the follower + ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], + PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,反复restart leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复restart leader + * Verify the replication group of three nodes and repeatedly restart the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeated restart leader */ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { LogicPoolID logicPoolId = 2; @@ -891,19 +672,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -914,13 +691,8 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. restart leader for (int i = 0; i < 5; ++i) { @@ -928,32 +700,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ::sleep(3); ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, + length, ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); @@ -961,9 +718,11 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { } /** - * 验证3个节点的复制组,反复重启一个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复重启follower + * Verify the replication groups of three nodes and restart a follower + * repeatedly + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeatedly restarting the follower */ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { LogicPoolID logicPoolId = 2; @@ -973,19 +732,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -996,27 +751,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 反复 restart follower + // 2. Repeatedly restart follower for (int i = 0; i < 5; ++i) { std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); @@ -1028,11 +773,13 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { } /** - * 验证3个节点的复制组,并挂掉leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader和1个follwoer - * 3. 拉起leader - * 4. 拉起follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and 1 follower + * 3. Pull up the leader + * 4. Pull up the follower */ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1042,19 +789,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1065,72 +808,48 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader和Follower + // 2. Hang up the leader and follower ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 拉起1个follower - * 4. 拉起1个follower + * Verify the replication groups of three nodes and hang two followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Pull up 1 follower + * 4. Pull up 1 follower */ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1140,19 +859,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1163,73 +878,49 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个follower + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], PeerCluster::PeerToId(followerPeers[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉3个成员 - * 3. 拉起1个成员 - * 4. 拉起1个成员 - * 5. 拉起1个成员 + * Verify the replication group of 3 nodes and suspend 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Pull up 1 member + * 4. Pull up 1 member + * 5. Pull up 1 member */ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { LogicPoolID logicPoolId = 2; @@ -1239,19 +930,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1262,80 +949,50 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); ASSERT_EQ(0, cluster.ShutdownPeer(peer2)); ASSERT_EQ(0, cluster.ShutdownPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer1, - PeerCluster::PeerToId(peer1))); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(peer1, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - - // 4. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer2, - PeerCluster::PeerToId(peer2))); + ReadVerifyNotAvailable(peer1, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer2, PeerCluster::PeerToId(peer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 5. 再拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer3, - PeerCluster::PeerToId(peer3))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 5. Pull up one more member + ASSERT_EQ(0, cluster.StartPeer(peer3, PeerCluster::PeerToId(peer3))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - - /** - * 验证3个节点的复制组,并hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -1345,19 +1002,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1368,65 +1021,40 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader Peer oldPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 等待new leader产生 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // Waiting for new leader generation ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复 old leader + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - /** - * 验证3个节点的复制组,并hang1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of 3 nodes and hang 1 follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { LogicPoolID logicPoolId = 2; @@ -1436,19 +1064,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1459,56 +1083,38 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复follower + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并hang leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader和1个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader and 1 follower + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1518,19 +1124,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1541,13 +1143,8 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); @@ -1555,63 +1152,39 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复 old leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 0); } /** - * 验证3个节点的复制组,并hang 2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang两个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of 3 nodes and hang 2 followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang two followers + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1621,19 +1194,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1644,89 +1213,54 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 2个follower + // 2. Hang 2 followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers[1])); - // step down之前提交request会超时 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); - - // 等待step down之后,读也不可提供服务 + // Submitting a request before the step down will timeout + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); + + // After waiting for the step down, reading is not available for service ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 4. 恢复1个follower + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 4. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[1])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并hang 3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang 3个成员 - * 3. 恢复1个成员 - * 4. 恢复1个成员 - * 5. 恢复1个成员 + * Verify the replication group of 3 nodes and hang 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Restore 1 member + * 4. Restore 1 member + * 5. Restore 1 member */ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { LogicPoolID logicPoolId = 2; @@ -1736,19 +1270,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1759,77 +1289,41 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.HangPeer(peer1)); ASSERT_EQ(0, cluster.HangPeer(peer2)); ASSERT_EQ(0, cluster.HangPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个成员 + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer1)); ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); - // 4. 恢复1个成员 + // 4. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer2)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 再恢复1个成员 + // 5. Restore 1 more member ASSERT_EQ(0, cluster.SignalPeer(peer3)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp index 49191fdd40..af6be699fd 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp @@ -20,11 +20,11 @@ * Author: xuchaojie */ +#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" + #include #include -#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" - namespace curve { namespace snapshotcloneserver { @@ -36,9 +36,8 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t fileLength = 64ULL * 1024 * 1024; - -int FakeCurveFsClient::Init(const CurveClientOptions &options) { - // 初始化一个文件用打快照和克隆 +int FakeCurveFsClient::Init(const CurveClientOptions& options) { + // Initialize a file for snapshot and cloning FInfo fileInfo; fileInfo.id = 100; fileInfo.parentid = 3; @@ -59,15 +58,13 @@ int FakeCurveFsClient::Init(const CurveClientOptions &options) { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::UnInit() { - return LIBCURVE_ERROR::OK; -} +int FakeCurveFsClient::UnInit() { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) { +int FakeCurveFsClient::CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { @@ -77,8 +74,8 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, snapInfo.filetype = FileType::INODE_SNAPSHOT_PAGEFILE; snapInfo.id = fileId_++; snapInfo.parentid = it->second.id; - snapInfo.filename = (it->second.filename + "-" - + std::to_string(it->second.seqnum)); + snapInfo.filename = + (it->second.filename + "-" + std::to_string(it->second.seqnum)); snapInfo.filestatus = FileStatus::Created; it->second.seqnum++; @@ -89,11 +86,11 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, } } -int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) { +int FakeCurveFsClient::DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileSnapInfoMap_.find(filename); if (it != fileSnapInfoMap_.end()) { fileSnapInfoMap_.erase(it); @@ -102,12 +99,12 @@ int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, +int FakeCurveFsClient::GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, FInfo* snapInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileSnapInfoMap_.find(filename) != fileSnapInfoMap_.end()) { *snapInfo = fileSnapInfoMap_[filename]; return LIBCURVE_ERROR::OK; @@ -115,17 +112,18 @@ int FakeCurveFsClient::GetSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, + uint64_t seq, uint64_t offset, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -134,50 +132,47 @@ int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) { +int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, + char* buf, SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT memset(buf, 'x', len); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) { + std::string user, uint64_t seq, + FileStatus* filestatus) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) { +int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT chunkInfo->chunkSn.push_back(1); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT fileInfo->id = fileId_++; fileInfo->parentid = 2; @@ -202,37 +197,37 @@ int FakeCurveFsClient::CreateCloneFile( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) { +int FakeCurveFsClient::CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, + uint64_t sn, uint64_t csn, + uint64_t chunkSize, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) { +int FakeCurveFsClient::RecoverChunk(const ChunkIDInfo& chunkidinfo, + uint64_t offset, uint64_t len, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CompleteCloneMeta( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneMeta(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::CloneMetaInstalled; @@ -242,11 +237,12 @@ int FakeCurveFsClient::CompleteCloneMeta( } } -int FakeCurveFsClient::CompleteCloneFile( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneFile(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::Cloned; @@ -256,12 +252,13 @@ int FakeCurveFsClient::CompleteCloneFile( } } -int FakeCurveFsClient::SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) { +int FakeCurveFsClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.SetCloneFileStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.SetCloneFileStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = filestatus; @@ -271,12 +268,11 @@ int FakeCurveFsClient::SetCloneFileStatus( } } -int FakeCurveFsClient::GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) { +int FakeCurveFsClient::GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileMap_.find(filename) != fileMap_.end()) { *fileInfo = fileMap_[filename]; return LIBCURVE_ERROR::OK; @@ -284,18 +280,18 @@ int FakeCurveFsClient::GetFileInfo( return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -304,16 +300,16 @@ int FakeCurveFsClient::GetOrAllocateSegmentInfo( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) { - LOG(INFO) << "RenameCloneFile from " << origin - << " to " << destination; +int FakeCurveFsClient::RenameCloneFile(const std::string& user, + uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) { + LOG(INFO) << "RenameCloneFile from " << origin << " to " << destination; fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(origin); if (it != fileMap_.end()) { it->second.parentid = 3; @@ -326,10 +322,8 @@ int FakeCurveFsClient::RenameCloneFile( } } -int FakeCurveFsClient::DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) { +int FakeCurveFsClient::DeleteFile(const std::string& fileName, + const std::string& user, uint64_t fileId) { auto it = fileMap_.find(fileName); if (it != fileMap_.end()) { fileMap_.erase(it); @@ -340,14 +334,15 @@ int FakeCurveFsClient::DeleteFile( } int FakeCurveFsClient::Mkdir(const std::string& dirpath, - const std::string &user) { + const std::string& user) { return -LIBCURVE_ERROR::EXISTS; } int FakeCurveFsClient::ChangeOwner(const std::string& filename, const std::string& newOwner) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.owner = newOwner; @@ -358,7 +353,7 @@ int FakeCurveFsClient::ChangeOwner(const std::string& filename, } bool FakeCurveFsClient::JudgeCloneDirHasFile() { - for (auto &f : fileMap_) { + for (auto& f : fileMap_) { if (2 == f.second.parentid) { LOG(INFO) << "Clone dir has file, fileinfo is :" << " id = " << f.second.id diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.h b/test/integration/snapshotcloneserver/fake_curvefs_client.h index 0f3a0a6107..c93d76daa4 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.h +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.h @@ -23,15 +23,13 @@ #ifndef TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ #define TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" - using ::curve::client::UserInfo_t; - namespace curve { namespace snapshotcloneserver { @@ -43,122 +41,84 @@ extern const char* testFile1; class FakeCurveFsClient : public CurveFsClient { public: - FakeCurveFsClient() : - fileId_(101) {} + FakeCurveFsClient() : fileId_(101) {} virtual ~FakeCurveFsClient() {} - int Init(const CurveClientOptions &options) override; + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure *scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; /** - * @brief 判断/clone目录下是否存在临时文件 + * @brief Check if there are temporary files under the /clone directory. * - * @retval true 存在 - * @retval false 不存在 + * @retval true If they exist. + * @retval false If they do not exist. */ bool JudgeCloneDirHasFile(); @@ -169,11 +129,11 @@ class FakeCurveFsClient : public CurveFsClient { // fileName -> snapshot fileInfo std::map fileSnapInfoMap_; - // inodeid 从101开始,100以内预留 - // 快照所属文件Id一律为100, parentid = 99 - // "/" 目录的Id为1 - // "/clone" 目录的Id为2 - // "/user1" 目录的Id为3 + // Inode IDs start from 101, with numbers under 100 reserved. + // Snapshot file IDs are always 100, with a parentid = 99. + // The ID for the "/" directory is 1. + // The ID for the "/clone" directory is 2. + // The ID for the "/user1" directory is 3. std::atomic fileId_; }; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 8f312b9a88..4bcc65f8e7 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -20,24 +20,24 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; const std::string kTestPrefix = "SCSTest"; // NOLINT @@ -65,27 +65,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -99,11 +98,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -128,66 +127,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -212,7 +208,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -241,16 +237,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -260,13 +256,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -275,21 +271,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -303,7 +296,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -322,7 +315,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/config/" @@ -396,7 +390,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -496,23 +490,23 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 常规测试用例 -// 场景一:快照增加删除查找 +// Regular test cases +// Scenario 1: Adding, deleting, and searching snapshots TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { std::string uuid1; int ret = 0; - // 操作1:用户testUser1_对不存在的文件打快照 - // 预期1:返回文件不存在 + // Step1: User testUser1_ Take a snapshot of non-existent files + // Expected 1: Return file does not exist ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:用户testUser2_对testFile1_打快照 - // 预期2:返回用户认证失败 + // Step2: User testUser2_ For testFile1_ Take a snapshot + // Expected 2: Failed to return user authentication ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:用户testUser1_对testFile1_打快照snap1。 - // 预期3:打快照成功 + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. + // Expected 3: Successful snapshot taking ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); ASSERT_EQ(0, ret); @@ -520,56 +514,56 @@ TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4: 获取快照信息,user=testUser1_,filename=testFile1_ - // 预期4:返回快照snap1的信息 + // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 4: Return information for snapshot snap1 bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_TRUE(success1); - // 操作5:获取快照信息,user=testUser2_,filename=testFile1_ - // 预期5:返回用户认证失败 + // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ + // Expected 5: User authentication failure returned FileSnapshotInfo info1; ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作6:获取快照信息,user=testUser2_,filename=testFile2_ - // 预期6:返回空 + // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ + // Expected 6: Return null std::vector infoVec; ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7:testUser2_删除快照snap1 - // 预期7:返回用户认证失败 + // Step7: testUser2_ Delete snapshot snap1 + // Expected 7: User authentication failure returned ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8:testUser1_删除testFile2_的快照,ID为snap1 - // 预期8:返回文件名不匹配 + // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for + // Expected 8: Return file name mismatch ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); - // 操作9:testUser1_删除快照snap1 - // 预期9:返回删除成功 + // Step9: testUser1_ Delete snapshot snap1 + // Expected 9: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 操作10:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期10:返回空 + // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 10: Return empty ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作11:testUser1_删除快照snap1(重复删除) - // 预期11:返回删除成功 + // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) + // Expected 11: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 复原testFile1_ + // Restore testFile1_ std::string fakeData2(4096, 'x'); ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); } -// 场景二:取消快照 +// Scenario 2: Cancel Snapshot TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); @@ -584,29 +578,35 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { if (info1.GetSnapshotInfo().GetStatus() == Status::pending || info1.GetSnapshotInfo().GetStatus() == Status::canceling) { if (!isCancel) { - // 操作1:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser2_取消testFile1_的快照snap1 - // 预期1:取消用户认证失败 + // Step1: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser2_ before the snapshot is completed_ + // Cancel testFile1_ Snap1 of snapshot + // Expected 1: Failed to cancel user authentication int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, retCode); - // 操作2:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile1_ - // 的不存在的快照 - // 预期2:返回kErrCodeCannotCancelFinished + // Step2: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile1_ A non-existent snapshot of + // Expected 2: Return kErrCodeCannotCancelFinished retCode = CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); - // 操作3:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile2_的快照snap1 - // 预期3: 返回文件名不匹配 + // Step3: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile2_ Snap1 of snapshot + // Expected 3: Return file name mismatch retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); - // 操作4:用户testUser1_对testFile1_打快照, - // 在快照未完成前testUser1_取消快照snap1 - // 预期4:取消快照成功 + // Step4: User testUser1_ For testFile1_ Take a snapshot, + // testUser1_ before the snapshot is completed_ + // Cancel snapshot snap1 + // Expected 4: Successfully cancelled snapshot retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, retCode); isCancel = true; @@ -621,47 +621,48 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { << static_cast(info1.GetSnapshotInfo().GetStatus()); } } else if (retCode == -8) { - // 操作5:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期5:返回空 + // Step5: Obtain snapshot information, user=testUser1_, + // filename=testFile1_ Expected 5: Return empty success1 = true; break; } } ASSERT_TRUE(success1); - // 操作6: 在快照已完成后,testUser1_取消testFile1_的快照snap1 - // 预期6: 返回待取消的快照不存在或已完成 + // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ + // Snap1 of snapshot Expected 6: Returning a pending snapshot that does not + // exist or has been completed ret = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(kErrCodeCannotCancelFinished, ret); } -// 场景三:lazy快照克隆场景 +// Scenario 3: Lazy snapshot clone scene TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1, uuid2, uuid3, uuid4, uuid5; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", "/ItUser1/SnapLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 操作4: testUser1_ clone 块照snap1,fileName=SnapLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 + // (duplicate clone) Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", true, &uuid4); ASSERT_EQ(0, ret); @@ -670,68 +671,68 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { ret = Flatten(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作5: testUser1_ GetCloneTask - // 预期5:返回SnapLazyClone1的clone 任务 + // Step5: testUser1_ GetCloneTask + // Expected 5: Return clone task for SnapLazyClone1 bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); ASSERT_TRUE(success1); - // 操作6: testUser2_ GetCloneTask - // 预期6: 返回空 + // Step6: testUser2_ GetCloneTask + // Expected 6: Return null std::vector infoVec; ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7: testUser2_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期7:返回用户认证失败 + // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 7: User authentication failure returned ret = CleanCloneTask(testUser2_, uuid3); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期8:返回执行成功 + // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 8: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 等待清理完成 + // Waiting for cleaning to complete std::this_thread::sleep_for(std::chrono::seconds(3)); - // 操作9: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID(重复执行) - // 预期9:返回执行成功 + // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // (repeated execution) Expected 9: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作10:testUser1_ GetCloneTask - // 预期10:返回空 + // Step10: testUser1_ GetCloneTask + // Expected 10: Return empty TaskCloneInfo info; ret = GetCloneTaskInfo(testUser1_, uuid3, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景四:非lazy快照克隆场景 +// Scenario 4: Non lazy snapshot clone scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapNotLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapNotLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapNotLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); ASSERT_EQ(0, ret); @@ -739,39 +740,39 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作4: testUser1_ clone 块照snap1, - // fileName=SnapNotLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, + // fileName=SnapNotLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景五:lazy快照恢复场景 +// Scenario 5: Lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(0, ret); @@ -783,38 +784,38 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景六:非lazy快照恢复场景 +// Scenario 6: Non lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(0, ret); @@ -822,43 +823,43 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景七: lazy镜像克隆场景 +// Scenario 7: Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageLazyClone1 - // 预期1:返回文件不存在 + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageLazyClone1 Expected 1: Return file does not exist std::string uuid1, uuid2, uuid3, uuid4; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageLazyClone1", true, &uuid3); ASSERT_EQ(0, ret); - // 操作4:对未完成lazy克隆的文件ImageLazyClone1打快照snap1 - // 预期4:返回文件状态异常 + // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not + // completed the lazy clone Expected 4: Abnormal file status returned ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); FileSnapshotInfo info2; @@ -867,7 +868,7 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); - // Flatten之前验证数据正确性 + // Verify data correctness before Flatten std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -878,23 +879,23 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); ASSERT_TRUE(success1); - // Flatten之后验证数据正确性 + // Verify data correctness after Flatten std::string fakeData2(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); } -// 场景八:非lazy镜像克隆场景 +// Scenario 8: Non Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageNotLazyClone1 Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageNotLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); @@ -903,23 +904,23 @@ TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageNotLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageNotLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景九:快照存在失败场景 +// Scenario 9: The snapshot has a failure scenario TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { std::string snapId = "errorSnapUuid"; SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, - 0, 0, kDefaultPoolset, 0, Status::error); + 0, 0, kDefaultPoolset, 0, Status::error); cluster_->metaStore_->AddSnapshot(snapInfo); @@ -928,114 +929,113 @@ TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { ASSERT_GT(pid, 0); std::string uuid1, uuid2; - // 操作1: lazy clone 快照snap1 - // 预期1:返回快照存在异常 + // Step1: lazy clone snapshot snap1 + // Expected 1: Exception in returning snapshot int ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作2:非lazy clone 快照snap1 - // 预期2:返回快照存在异常 + // Step2: Non lazy clone snapshot snap1 + // Expected 2: Exception in returning snapshot ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作3:lazy 从 快照snap1 recover - // 预期3:返回快照存在异常 + // Step3: lazy snap1 recover from snapshot + // Expected 3: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作4:非lazy 从 快照snap1 recover - // 预期4:返回快照存在异常 + // Step4: Snap1 recover from snapshot without lazy + // Expected 4: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作5:用户testUser1_对testFile4_打快照snap1 - // 预期5:清理失败快照,并打快照成功 + // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 + // Expectation 5: Clean failed snapshot and take snapshot successfully ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); ASSERT_EQ(0, ret); - // 校验快照成功 + // Successfully verified snapshot bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); ASSERT_TRUE(success1); - // 校验清理失败快照成功 + // Verification cleaning failed, snapshot succeeded FileSnapshotInfo info1; int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); ASSERT_EQ(kErrCodeFileNotExist, retCode); } -// [线上问题修复]克隆失败,回滚删除克隆卷,再次创建同样的uuid的卷的场景 +//[Online issue repair] Clone failed, rollback delete clone volume, and create +//the same uuid volume again scenario TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; - // 操作1:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDestUUID - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID + // Expected 1 to return successful cloning std::string dstFile = "/ItUser1/CloneHasSameDest"; int ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo; userinfo.owner = testUser1_; int ret2 = fileClient_->Unlink(dstFile, userinfo, false); ASSERT_EQ(0, ret2); - - // 操作2:testUser1_ 再次clone 镜像testFile1_, + // Step2: testUser1_ Clone image testFile1_ again, // fileName=CloneHasSameDestUUID - // 预期2 返回克隆成功 + // Expected 2 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 操作3:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest2 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 + // Expected 3 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest2"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo2; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - - // 操作4:testUser1_ 再次clone 镜像testFile2_, + // Step4: testUser1_ Clone the image testFile2_ again, // fileName=CloneHasSameDest2 - // 预期4 返回克隆成功 + // Expected 4 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 验证再次克隆lazyflag不同的情况 - // 操作5:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest3 - // 预期5 返回克隆成功 + // Verify different situations when cloning lazyflag again + // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 + // Expected 5 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest3"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo3; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作6:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step6: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期6 返回克隆成功 + // Expected 6 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); ASSERT_EQ(0, ret); @@ -1043,30 +1043,31 @@ TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo4; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作7:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step7: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期7 返回克隆成功 + // Expected 7 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// lazy克隆卷,删除克隆卷,再删除源卷,源卷需要可以删除 +// Lazy clone volume, delete clone volume, and then delete source volume. The +// source volume can be deleted if needed TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { - // 操作1:testUser1_ clone 镜像testFile5_,lazy克隆两个卷dstFile1,dstFile2 - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 + // and dstFile2 Expected 1 to return successful cloning std::string uuid1; std::string uuid2; std::string dstFile1 = "/dest1"; @@ -1081,29 +1082,29 @@ TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); ASSERT_EQ(0, ret); - // 删除源卷,删除失败,卷被占用 + // Delete source volume, deletion failed, volume occupied ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(-27, ret); - // 操作2:删除目的卷dstFile1成功,再次删除源卷 - // 预期2 删除失败,卷被占用 + // Step2: Successfully delete the destination volume dstFile1, delete the + // source volume again Expected 2 deletion failed, volume occupied ret = fileClient_->Unlink(dstFile1, userinfo, false); ASSERT_EQ(0, ret); ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(-27, ret); - - // 操作3:删除目的卷dstFile2成功,再次删除源卷 - // 预期3 删除成功 + // Step3: Successfully delete the destination volume dstFile2, delete the + // source volume again Expected 3 deletion successful ret = fileClient_->Unlink(dstFile2, userinfo, false); ASSERT_EQ(0, ret); ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(0, ret); - // 操作4: 等待一段时间,看垃圾记录后台能否删除 + // Step4: Wait for a period of time to see if the garbage record can be + // deleted in the background bool noRecord = false; for (int i = 0; i < 100; i++) { TaskCloneInfo info; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp index a725cbe12f..cc0a47698d 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp @@ -20,24 +20,24 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; const std::string kTestPrefix = "ConSCSTest"; // NOLINT @@ -96,11 +96,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -125,66 +125,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -196,7 +193,8 @@ const std::vector snapshotcloneserverConfigOptions{ std::string("server.clonePoolThreadNum=8"), std::string("server.createCloneChunkConcurrency=2"), std::string("server.recoverChunkConcurrency=2"), - // 最大快照数修改为3,以测试快照达到上限的用例 + // Modify the maximum number of snapshots to 3 to test cases where snapshots + // reach the upper limit std::string("server.maxSnapshotLimit=3"), std::string("client.methodRetryTimeSec=1"), std::string("server.clientAsyncMethodRetryTimeSec=1"), @@ -211,7 +209,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -222,7 +220,8 @@ const std::vector clientConfigOptions{ const char* testFile1_ = "/concurrentItUser1/file1"; const char* testFile2_ = - "/concurrentItUser1/file2"; // 将在TestImage2Clone2Success中删除 //NOLINT + "/concurrentItUser1/file2"; // Will be removed from + // TestImage2Clone2Success//NOLINT const char* testFile3_ = "/concurrentItUser2/file3"; const char* testFile4_ = "/concurrentItUser1/file3"; const char* testUser1_ = "concurrentItUser1"; @@ -240,16 +239,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -257,13 +256,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -272,21 +271,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -300,7 +296,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -319,7 +315,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -387,7 +384,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -487,9 +484,9 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 并发测试用例 +// Concurrent test cases -// 这个用例测试快照层数,放在最前面 +// This use case tests the number of snapshot layers, placed at the top TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { std::string uuid1, uuid2, uuid3; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -507,7 +504,8 @@ TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { bool success3 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid3); ASSERT_TRUE(success3); - // 快照层数设置为3,尝试再打一次快照,超过层数失败 + // Set the number of snapshot layers to 3. Attempt to take another snapshot, + // exceeding the number of layers failed ret = MakeSnapshot(testUser1_, testFile1_, "snap3", &uuid3); ASSERT_EQ(kErrCodeSnapshotCountReachLimit, ret); @@ -586,7 +584,7 @@ TEST_F(SnapshotCloneServerTest, TestSnapSameClone1Success) { ret1 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid1); ASSERT_EQ(0, ret1); - // 幂等 + // Idempotent ret2 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid2); ASSERT_EQ(0, ret2); @@ -733,7 +731,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -748,7 +746,8 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid1, true)); - // clone完成stage1之后即可对外提供服务,测试克隆卷是否能正常读取数据 + // After the clone completes stage1, it can provide external services and + // test whether the cloned volume can read data normally std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -760,7 +759,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData2)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -783,7 +782,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyRecoverSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp index 326ebe66c0..6da5478c86 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp @@ -20,15 +20,15 @@ * Author: xuchaojie */ -#include -#include -#include #include #include +#include +#include +#include +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" -#include "test/integration/cluster_common/cluster.h" using curve::CurveCluster; @@ -73,9 +73,9 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); system(std::string("rm -rf ExcSCSTest.etcd").c_str()); - pid_t pid = cluster_->StartSingleEtcd(1, kEtcdClientIpPort, - kEtcdPeerIpPort, - std::vector{ "--name=ExcSCSTest"}); + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=ExcSCSTest"}); LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -92,22 +92,18 @@ class SnapshotCloneServerTest : public ::testing::Test { system(std::string("rm -rf ExcSCSTest.etcd").c_str()); } - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } - bool JudgeSnapTaskFailCleanTaskAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid, - SnapshotInfo *snapInfo) { - // 验证任务失败 + bool JudgeSnapTaskFailCleanTaskAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid, + SnapshotInfo* snapInfo) { + // Verification task failed FileSnapshotInfo info1; - int ret = GetSnapshotInfo( - user, file, uuid, &info1); + int ret = GetSnapshotInfo(user, file, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetSnapshotInfo Fail" << ", ret = " << ret; @@ -126,7 +122,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); if (ret != -1) { @@ -137,28 +133,27 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeSnapTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid) { + bool JudgeSnapTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid) { SnapshotInfo snapInfo; - bool success = JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo); + bool success = + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo); if (!success) { return false; } int seqNum = snapInfo.GetSeqNum(); - // 验证curve上无快照 + // Verify that there are no snapshots on the curve FInfo fInfo; - int ret = server_->GetCurveFsClient()->GetSnapshot( - file, user, seqNum, &fInfo); + int ret = server_->GetCurveFsClient()->GetSnapshot(file, user, seqNum, + &fInfo); if (ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on curve" << ", ret = " << ret; return false; } - // 验证nos上无快照 + // Verify that there are no snapshots on NOS ChunkIndexDataName indexData(file, seqNum); if (server_->GetDataStore()->ChunkIndexDataExist(indexData)) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on nos."; @@ -167,13 +162,11 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeCloneTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务状态为error + bool JudgeCloneTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verify that the task status is error TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetCloneTask fail" << ", ret = " << ret; @@ -188,31 +181,28 @@ class SnapshotCloneServerTest : public ::testing::Test { return CleanCloneTaskAndCheckEnvClean(user, uuid); } - bool JudgeCloneTaskNotExistCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务不存在 + bool JudgeCloneTaskNotExistCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verification task does not exist TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret != kErrCodeFileNotExist) { LOG(INFO) << "AsserTaskNotExist fail" << ", ret = " << ret; return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool CleanCloneTaskAndCheckEnvClean( - const std::string &user, - const std::string &uuid) { + bool CleanCloneTaskAndCheckEnvClean(const std::string& user, + const std::string& uuid) { int ret = CleanCloneTask(user, uuid); if (ret < 0) { LOG(INFO) << "CleanCloneTask fail" @@ -222,7 +212,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - // 验证任务不存在 + // Verification task does not exist TaskCloneInfo info; ret = GetCloneTaskInfo(user, uuid, &info); if (kErrCodeFileNotExist != ret) { @@ -231,34 +221,29 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool PrepreTestSnapshot( - const std::string &user, - const std::string &file, - const std::string &snapName, - std::string *uuid) { - int ret = MakeSnapshot(user, - file , snapName, uuid); + bool PrepreTestSnapshot(const std::string& user, const std::string& file, + const std::string& snapName, std::string* uuid) { + int ret = MakeSnapshot(user, file, snapName, uuid); if (ret < 0) { return false; } - bool success1 = CheckSnapshotSuccess(user, file, - *uuid); + bool success1 = CheckSnapshotSuccess(user, file, *uuid); return success1; } bool PrepreTestSnapshotIfNotExist() { if (testSnapId_.empty()) { - bool ret = PrepreTestSnapshot(testUser1, - testFile1, "testSnap", &testSnapId_); + bool ret = PrepreTestSnapshot(testUser1, testFile1, "testSnap", + &testSnapId_); return ret; } return true; @@ -266,53 +251,56 @@ class SnapshotCloneServerTest : public ::testing::Test { std::string testSnapId_; - static SnapshotCloneServerModule *server_; - static SnapshotCloneServerOptions *options_; + static SnapshotCloneServerModule* server_; + static SnapshotCloneServerOptions* options_; static CurveCluster* cluster_; }; -SnapshotCloneServerModule * SnapshotCloneServerTest::server_ = nullptr; -SnapshotCloneServerOptions * SnapshotCloneServerTest::options_ = nullptr; -CurveCluster * SnapshotCloneServerTest::cluster_ = nullptr; +SnapshotCloneServerModule* SnapshotCloneServerTest::server_ = nullptr; +SnapshotCloneServerOptions* SnapshotCloneServerTest::options_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCurvefs) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap1", &uuid); + int ret = MakeSnapshot(user, file, "snap1", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateSnapshot"); // NOLINT SnapshotInfo snapInfo; - ASSERT_TRUE(JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo)); + ASSERT_TRUE( + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo)); } - TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetSnapshot) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap2", &uuid); + int ret = MakeSnapshot(user, file, "snap2", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { @@ -320,18 +308,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap3", &uuid); + int ret = MakeSnapshot(user, file, "snap3", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.DeleteSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { @@ -339,38 +329,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap4", &uuid); + int ret = MakeSnapshot(user, file, "snap4", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { + TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap5", &uuid); + int ret = MakeSnapshot(user, file, "snap5", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { @@ -378,18 +374,21 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap6", &uuid); + int ret = MakeSnapshot(user, file, "snap6", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { @@ -397,18 +396,19 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap7", &uuid); + int ret = MakeSnapshot(user, file, "snap7", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { @@ -416,16 +416,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT 1, NULL, 0); - // 验证任务失败 - int ret = MakeSnapshot(user, file , "snap8", &uuid); + // Verification task failed + int ret = MakeSnapshot(user, file, "snap8", &uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(-1, ret); @@ -436,20 +440,23 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnUpdateSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap9", &uuid); + int ret = MakeSnapshot(user, file, "snap9", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed FileSnapshotInfo info1; - ret = GetSnapshotInfo( - user, file, uuid, &info1); + ret = GetSnapshotInfo(user, file, uuid, &info1); ASSERT_EQ(kErrCodeInternalError, ret); @@ -462,38 +469,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnPutChunkIndexData) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap10", &uuid); + int ret = MakeSnapshot(user, file, "snap10", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnDataChunkTranferComplete) { + TestCreateSnapshotFailOnDataChunkTranferComplete) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap11", &uuid); + int ret = MakeSnapshot(user, file, "snap11", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { @@ -503,16 +516,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap12", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -526,16 +543,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap13", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -549,16 +570,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap14", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -572,16 +597,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteSnapshot) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap15", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -592,220 +621,234 @@ TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/cloneSuccess1", true, - &uuid1); + "/user1/cloneSuccess1", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -815,238 +858,251 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess1", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCompleteCloneMeta) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnGetFileInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnAddCloneInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnCreateCloneChunk) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1056,52 +1112,53 @@ TEST_F(SnapshotCloneServerTest, std::this_thread::sleep_for(std::chrono::milliseconds(3000)); ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/cloneSuccess2", true, - &uuid1); + "/user1/cloneSuccess2", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1111,276 +1168,299 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess2", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess2", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRecoverChunk) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnCompleteCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } - TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1390,116 +1470,121 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, false); ASSERT_TRUE(success1); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 恢复未完成前删除目标文件 + // Delete target files before recovery is complete ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); + server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } } // namespace snapshotcloneserver diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp index 2e549688b8..b1b99953ae 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp @@ -20,16 +20,14 @@ * Author: xuchaojie */ -#include - #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" +#include namespace curve { namespace snapshotcloneserver { -int SnapshotCloneServerModule::Start( - const SnapshotCloneServerOptions &option) { +int SnapshotCloneServerModule::Start(const SnapshotCloneServerOptions& option) { serverOption_ = option; client_ = std::make_shared(); @@ -45,13 +43,8 @@ int SnapshotCloneServerModule::Start( auto cloneRef_ = std::make_shared(); - auto core = - std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - serverOption_); + auto core = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, serverOption_); if (core->Init() < 0) { LOG(ERROR) << "SnapshotCore init fail."; @@ -61,8 +54,7 @@ int SnapshotCloneServerModule::Start( auto taskMgr = std::make_shared(core, snapshotMetric); snapshotServiceManager_ = - std::make_shared(taskMgr, - core); + std::make_shared(taskMgr, core); if (snapshotServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "SnapshotServiceManager init fail."; @@ -71,13 +63,9 @@ int SnapshotCloneServerModule::Start( auto cloneMetric = std::make_shared(); - auto cloneCore = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - serverOption_); + auto cloneCore = + std::make_shared(client_, metaStore_, dataStore_, + snapshotRef_, cloneRef_, serverOption_); if (cloneCore->Init() < 0) { LOG(ERROR) << "CloneCore init fail."; return kErrCodeServerInitFail; @@ -87,28 +75,26 @@ int SnapshotCloneServerModule::Start( std::make_shared(cloneCore, cloneMetric); auto cloneServiceManagerBackend = - std::make_shared(cloneCore); + std::make_shared(cloneCore); - cloneServiceManager_ = - std::make_shared(cloneTaskMgr, - cloneCore, cloneServiceManagerBackend); + cloneServiceManager_ = std::make_shared( + cloneTaskMgr, cloneCore, cloneServiceManagerBackend); if (cloneServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "CloneServiceManager init fail."; return kErrCodeServerInitFail; } server_ = std::make_shared(); - service_ = - std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); + service_ = std::make_shared( + snapshotServiceManager_, cloneServiceManager_); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) { LOG(ERROR) << "Failed to add snapshot_service!\n"; return kErrCodeServerInitFail; } - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index d4ccb66c65..8ed3364576 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -20,20 +20,20 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "src/common/uuid.h" -#include "src/common/location_operator.h" -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/common/location_operator.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/uuid.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; @@ -49,27 +49,27 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +// Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 -const char *cloneTempDir_ = "/clone"; -const char *mdsRootUser_ = "root"; -const char *mdsRootPassword_ = "root_password"; +// Some constant definitions +const char* cloneTempDir_ = "/clone"; +const char* mdsRootUser_ = "root"; +const char* mdsRootPassword_ = "root_password"; constexpr uint32_t kProgressTransferSnapshotDataStart = 10; -const char *kEtcdClientIpPort = "127.0.0.1:10021"; -const char *kEtcdPeerIpPort = "127.0.0.1:10022"; -const char *kMdsIpPort = "127.0.0.1:10023"; -const char *kChunkServerIpPort1 = "127.0.0.1:10024"; -const char *kChunkServerIpPort2 = "127.0.0.1:10025"; -const char *kChunkServerIpPort3 = "127.0.0.1:10026"; -const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; -const char *kSnapshotCloneServerDummyServerPort = "12002"; -const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +const char* kEtcdClientIpPort = "127.0.0.1:10021"; +const char* kEtcdPeerIpPort = "127.0.0.1:10022"; +const char* kMdsIpPort = "127.0.0.1:10023"; +const char* kChunkServerIpPort1 = "127.0.0.1:10024"; +const char* kChunkServerIpPort2 = "127.0.0.1:10025"; +const char* kChunkServerIpPort3 = "127.0.0.1:10026"; +const char* kSnapshotCloneServerIpPort = "127.0.0.1:10027"; +const char* kSnapshotCloneServerDummyServerPort = "12002"; +const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; -static const char *kDefaultPoolset = "default"; +static const char* kDefaultPoolset = "default"; const int kMdsDummyPort = 10028; @@ -79,27 +79,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -120,11 +119,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -151,66 +150,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -237,7 +233,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -246,8 +242,8 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char *testFile1_ = "/RcvItUser1/file1"; -const char *testUser1_ = "RcvItUser1"; +const char* testFile1_ = "/RcvItUser1/file1"; +const char* testUser1_ = "RcvItUser1"; int testFd1_ = 0; namespace curve { @@ -262,16 +258,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -281,13 +277,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -296,21 +292,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 2); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 2); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 2); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 2); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 2); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -324,7 +317,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -343,7 +336,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -381,9 +375,9 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(INFO) << "Write testFile1_ success."; } - static bool CreateAndWriteFile(const std::string &fileName, - const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool CreateAndWriteFile(const std::string& fileName, + const std::string& user, + const std::string& dataSample, int* fdOut) { UserInfo_t userinfo; userinfo.owner = user; int ret = fileClient_->Create(fileName, userinfo, testFile1Length); @@ -394,8 +388,8 @@ class SnapshotCloneServerTest : public ::testing::Test { return WriteFile(fileName, user, dataSample, fdOut); } - static bool WriteFile(const std::string &fileName, const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool WriteFile(const std::string& fileName, const std::string& user, + const std::string& dataSample, int* fdOut) { int ret = 0; UserInfo_t userinfo; userinfo.owner = user; @@ -404,7 +398,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + // 2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -421,14 +415,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - static bool CheckFileData(const std::string &fileName, - const std::string &user, - const std::string &dataSample) { + static bool CheckFileData(const std::string& fileName, + const std::string& user, + const std::string& dataSample) { UserInfo_t userinfo; userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -490,7 +484,7 @@ class SnapshotCloneServerTest : public ::testing::Test { void TearDown() {} - void PrepareSnapshotForTestFile1(std::string *uuid1) { + void PrepareSnapshotForTestFile1(std::string* uuid1) { if (!hasSnapshotForTestFile1_) { int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); ASSERT_EQ(0, ret); @@ -509,23 +503,23 @@ class SnapshotCloneServerTest : public ::testing::Test { } } - int PrepareCreateCloneFile(const std::string &fileName, FInfo *fInfoOut, + int PrepareCreateCloneFile(const std::string& fileName, FInfo* fInfoOut, bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( - testFile1_, fileName, - UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, - seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); + testFile1_, fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), + testFile1Length, seqNum, chunkSize, 0, 0, kDefaultPoolset, + fInfoOut); return ret; } - int PrepareCreateCloneMeta(FInfo *fInfoOut, const std::string &newFileName, - std::vector *segInfoOutVec) { + int PrepareCreateCloneMeta(FInfo* fInfoOut, const std::string& newFileName, + std::vector* segInfoOutVec) { fInfoOut->fullPathName = newFileName; fInfoOut->userinfo = UserInfo_t(mdsRootUser_, mdsRootPassword_); for (int i = 0; i < testFile1AllocSegmentNum; i++) { @@ -540,7 +534,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCreateCloneChunk(const std::vector &segInfoVec, + int PrepareCreateCloneChunk(const std::vector& segInfoVec, bool IsRecover = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -555,13 +549,14 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So cloning each segment from the snapshot + // only creates the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -571,8 +566,10 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the + // snapshot + 2, // Restore the version number of the new file, which is + // the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -585,7 +582,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LocationOperator::GenerateCurveLocation( testFile1_, i * segmentSize + j * chunkSize); ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -593,11 +590,11 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", copysetId = " << cidInfo.cpid_ << ", chunkId = " << cidInfo.cid_ << ", seqNum = " << 1 << ", csn = " << 0; - int ret = - snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 - chunkSize, cb); + int ret = snapClient_->CreateCloneChunk( + location, cidInfo, + 1, // Clone using initial version number 1 + 0, // Clone using 0 + chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; } @@ -614,14 +611,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneMeta(const std::string &uuid) { + int PrepareCompleteCloneMeta(const std::string& uuid) { std::string fileName = std::string(cloneTempDir_) + "/" + uuid; int ret = snapClient_->CompleteCloneMeta( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); return ret; } - int PrepareRecoverChunk(const std::vector &segInfoVec, + int PrepareRecoverChunk(const std::vector& segInfoVec, bool IsSnapshot = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -630,14 +627,15 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So clone each segment from the snapshot and + // only recover the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -658,7 +656,7 @@ class SnapshotCloneServerTest : public ::testing::Test { for (uint64_t j = 0; j < segmentSize / chunkSize; j++) { ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -686,44 +684,42 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneFile(const std::string &fileName) { + int PrepareCompleteCloneFile(const std::string& fileName) { return snapClient_->CompleteCloneFile( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } - int PrepareChangeOwner(const std::string &fileName) { + int PrepareChangeOwner(const std::string& fileName) { return fileClient_->ChangeOwner( fileName, testUser1_, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } int PrepareRenameCloneFile(uint64_t originId, uint64_t destinationId, - const std::string &fileName, - const std::string &newFileName) { + const std::string& fileName, + const std::string& newFileName) { return snapClient_->RenameCloneFile( UserInfo_t(mdsRootUser_, mdsRootPassword_), originId, destinationId, fileName, newFileName); } - static CurveCluster *cluster_; - static FileClient *fileClient_; - static SnapshotClient *snapClient_; + static CurveCluster* cluster_; + static FileClient* fileClient_; + static SnapshotClient* snapClient_; bool hasSnapshotForTestFile1_ = false; std::string snapIdForTestFile1_; }; -CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; -FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; -SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; +FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; +SnapshotClient* SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +// Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -740,19 +736,18 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +// A snapshot has been created in the curve, but the successful result has not +// been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -769,18 +764,18 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +// A snapshot has been created in the curve, and the results have been returned. +// Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, - chunkSize, segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -797,7 +792,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots +// and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -812,7 +808,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -836,16 +832,14 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -860,7 +854,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -870,12 +865,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -890,7 +883,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -898,12 +891,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -918,7 +909,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -932,12 +924,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -952,7 +942,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -964,12 +955,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -984,7 +973,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1000,12 +990,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1020,7 +1008,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1034,12 +1022,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1054,7 +1040,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1072,12 +1059,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1092,7 +1077,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1108,12 +1093,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1128,7 +1111,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1147,12 +1131,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1167,7 +1149,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1185,12 +1167,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1205,7 +1185,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1227,12 +1208,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1247,7 +1226,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1267,12 +1246,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1287,7 +1264,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1310,12 +1288,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1330,7 +1306,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1352,12 +1328,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1372,7 +1346,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1400,12 +1375,10 @@ TEST_F(SnapshotCloneServerTest, LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, fInfoOut.id, fileName, dstFile)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1420,18 +1393,16 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); std::string uuid1 = UUIDGenerator().GenerateUUID(); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1451,7 +1422,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1462,12 +1434,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1487,7 +1457,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1497,12 +1467,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1522,7 +1490,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1537,12 +1506,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1562,7 +1529,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1576,12 +1544,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1601,7 +1567,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1618,12 +1585,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1643,7 +1608,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1659,12 +1624,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1684,7 +1647,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1703,12 +1667,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1728,7 +1690,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1746,12 +1708,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1771,7 +1731,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1792,12 +1753,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1817,7 +1776,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1837,12 +1796,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1862,7 +1819,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1886,12 +1844,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1911,7 +1867,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1934,12 +1890,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1954,7 +1908,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1980,12 +1935,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2000,7 +1953,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2025,12 +1978,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2045,7 +1996,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; @@ -2073,12 +2025,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp index f56bae71e7..94d648ab86 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp @@ -20,22 +20,23 @@ * Author: hzsunjianliang */ -#include -#include -#include #include +#include +#include +#include + #include // NOLINT #include // NOLINT -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" #include "src/snapshotcloneserver/snapshotclone_server.h" +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" #include "test/util/config_generator.h" const std::string kTestPrefix = "MainSCSTest"; // NOLINT -// 一些常数定义 +// Some constant definitions const char* cloneTempDir_ = "/clone"; const char* mdsRootUser_ = "root"; const char* mdsRootPassword_ = "root_password"; @@ -56,13 +57,12 @@ const std::string kEtcdName = kTestPrefix; // NOLINT const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; @@ -81,11 +81,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector snapClientConfigOptions{ @@ -119,7 +119,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; namespace curve { @@ -135,11 +135,11 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; system(rmcmd.c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{"--name=" + std::string(kEtcdName)}); @@ -150,7 +150,7 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; @@ -201,7 +201,7 @@ TEST_F(SnapshotCloneServerMainTest, testmain) { std::this_thread::sleep_for(std::chrono::seconds(2)); - // 测试验证是否状态为active + // Test and verify if the status is active // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; std::string cmd = "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index ff92a579f3..8bb7f66138 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -20,18 +20,20 @@ * Author: lixiaocui1 */ -#include #include -#include //NOLINT +#include + #include //NOLINT #include #include -#include "src/kvstorageclient/etcd_client.h" -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/common/timeutility.h" +#include //NOLINT + +#include "proto/nameserver2.pb.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/timeutility.h" +#include "src/kvstorageclient/etcd_client.h" #include "src/mds/common/mds_define.h" -#include "proto/nameserver2.pb.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" namespace curve { namespace kvstorage { @@ -43,7 +45,7 @@ using ::curve::mds::NameSpaceStorageCodec; using ::curve::mds::PageFileChunkInfo; using ::curve::mds::PageFileSegment; -// 接口测试 +// Interface testing class TestEtcdClinetImp : public ::testing::Test { protected: TestEtcdClinetImp() {} @@ -63,8 +65,9 @@ class TestEtcdClinetImp : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", @@ -75,7 +78,7 @@ class TestEtcdClinetImp : public ::testing::Test { exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -108,8 +111,8 @@ class TestEtcdClinetImp : public ::testing::Test { TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { // 1. put file - // - file0~file9 put到etcd中 - // - file6有快照 + // - file0~file9 put into etcd + // - file6 has a snapshot std::map keyMap; std::map fileName; FileInfo fileInfo7, fileInfo8; @@ -170,7 +173,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } } - // 2. get file, 可以正确获取并解码file0~file9 + // 2. get file, which can correctly obtain and decode file0~file9 for (int i = 0; i < keyMap.size(); i++) { std::string out; int errCode = client_->Get(keyMap[i], &out); @@ -180,7 +183,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], fileinfo.filename()); } - // 3. list file, 可以list到file0~file9 + // 3. list file, which can be listed to file0~file9 std::vector listRes; std::vector> listRes2; int errCode = client_->List("01", "02", &listRes2); @@ -193,7 +196,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], finfo.filename()); } - // 4. delete file, 删除file0~file4,这部分文件不能再获取到 + // 4. Delete file, delete file0~file4, these files cannot be retrieved + // anymore for (int i = 0; i < keyMap.size() / 2; i++) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Delete(keyMap[i])); // can not get delete file @@ -201,13 +205,13 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[i], &out)); } - // 5. rename file: rename file9 ~ file10, file10本来不存在 - Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), - const_cast(fileInfo9.c_str()), + // 5. Rename file: rename file9~file10, file10 does not originally exist + Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), + const_cast(fileInfo9.c_str()), static_cast(keyMap[9].size()), static_cast(fileInfo9.size())}; - Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), - const_cast(fileInfo10.c_str()), + Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), + const_cast(fileInfo10.c_str()), static_cast(fileKey10.size()), static_cast(fileInfo10.size())}; std::vector ops{op1, op2}; @@ -222,12 +226,12 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName10, fileinfo.filename()); // 6. snapshot of keyMap[6] - Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), - const_cast(fileInfo6.c_str()), + Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), + const_cast(fileInfo6.c_str()), static_cast(keyMap[6].size()), static_cast(fileInfo6.size())}; - Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -258,9 +262,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ("200", out); // 8. rename file: rename file7 ~ file8 - Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), - const_cast(""), static_cast(keyMap[7].size()), - 0}; + Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), + const_cast(""), static_cast(keyMap[7].size()), 0}; FileInfo newFileInfo7; newFileInfo7.CopyFrom(fileInfo7); newFileInfo7.set_parentid(fileInfo8.parentid()); @@ -271,17 +274,17 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { std::string encodeNewFileInfo7; ASSERT_TRUE(newFileInfo7.SerializeToString(&encodeNewFileInfo7)); Operation op9{OpType::OpPut, - const_cast(encodeNewFileInfo7Key.c_str()), - const_cast(encodeNewFileInfo7.c_str()), + const_cast(encodeNewFileInfo7Key.c_str()), + const_cast(encodeNewFileInfo7.c_str()), static_cast(encodeNewFileInfo7Key.size()), static_cast(encodeNewFileInfo7.size())}; ops.clear(); ops.emplace_back(op8); ops.emplace_back(op9); ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); - // 不能获取 file7 + // Unable to obtain file7 ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[7], &out)); - // 成功获取rename以后的file7 + // Successfully obtained file7 after renam ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Get(keyMap[8], &out)); ASSERT_TRUE(NameSpaceStorageCodec::DecodeFileInfo(out, &fileinfo)); ASSERT_EQ(newFileInfo7.filename(), fileinfo.filename()); @@ -304,8 +307,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->TxnN(ops)); client_->SetTimeout(5000); - Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -321,7 +324,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { - // 准备一批数据 + // Prepare a batch of data // "011" "013" "015" "017" "019" for (int i = 1; i <= 9; i += 2) { std::string key = std::string("01") + std::to_string(i); @@ -336,13 +339,13 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Put(key, value)); } - // 获取当前revision - // 通过GetCurrentRevision获取 + // Obtain the current revision + // Obtained through GetCurrentRevision int64_t curRevision; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->GetCurrentRevision(&curRevision)); LOG(INFO) << "get current revision: " << curRevision; - // 根据当前revision获取前5个key-value + // Obtain the top 5 key values based on the current revision std::vector out; std::string lastKey; int res = client_->ListWithLimitAndRevision("01", "", 5, curRevision, &out, @@ -355,7 +358,7 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(value, out[i - 1]); } - // 根据当前revision获取后5个key-value + // Obtain the last 5 key values based on the current revision out.clear(); res = client_->ListWithLimitAndRevision(lastKey, "", 5, curRevision, &out, &lastKey); @@ -395,37 +398,41 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { uint64_t leaderOid; { - // 1. leader1竞选成功,client退出后leader2竞选成功 + // 1. leader1 successfully ran, but leader2 successfully ran after + // client exited LOG(INFO) << "test case1 start..."; - // 启动一个线程竞选leader + // Start a thread to run for the leader int electionTimeoutMs = 0; uint64_t targetOid; common::Thread thread1(&EtcdClientImp::CampaignLeader, client_, pfx, leaderName1, sessionnInterSec, electionTimeoutMs, &targetOid); - // 等待线程1执行完成, 线程1执行完成就说明竞选成功, - // 否则electionTimeoutMs为0的情况下会一直hung在里面 + // Waiting for thread 1 to complete execution indicates a successful + // election, Otherwise, if electionTimeoutMs is 0, they will remain in + // it all the time thread1.join(); LOG(INFO) << "thread 1 exit."; client_->CloseClient(); - // 启动第二个线程竞选leader + // Start the second thread to run for the leader auto client2 = std::make_shared(); ASSERT_EQ(0, client2->Init(conf, dialtTimeout, retryTimes)); common::Thread thread2(&EtcdClientImp::CampaignLeader, client2, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); - // 线程1退出后,leader2会当选 + // After thread1 exits, leader2 will be elected thread2.join(); LOG(INFO) << "thread 2 exit."; - // leader2为leader的情况下此时观察leader1的key应该发现session过期 + // If leader2 is the leader, observing the key of leader1 at this time + // should reveal that the session has expired ASSERT_EQ(EtcdErrCode::EtcdObserverLeaderInternal, client2->LeaderObserve(targetOid, leaderName1)); client2->CloseClient(); } { - // 2. leader1竞选成功后,不退出; leader2竞选超时 + // 2. After the successful election of leader1, do not withdraw; leader2 + // campaign timeout LOG(INFO) << "test case2 start..."; int electionTimeoutMs = 1000; auto client1 = std::make_shared(); @@ -436,7 +443,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader2再次竞选 + // leader2 is running again common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); @@ -446,8 +453,9 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { } { - // 3. leader1竞选成功后,删除key; leader2竞选成功; observe leader1改变; - // observer leader2的过程中etcd挂掉 + // 3. After the successful election of leader1, delete the key; The + // leader2 campaign was successful; Observe leader1 changed; + // During the process of observer leader2, etcd crashes LOG(INFO) << "test case3 start..."; uint64_t targetOid; int electionTimeoutMs = 0; @@ -458,17 +466,17 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { &targetOid); thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader1卸任leader + // leader1 Resignation Leader ASSERT_EQ(EtcdErrCode::EtcdLeaderResiginSuccess, client1->LeaderResign(targetOid, 1000)); - // leader2当选 + // leader2 elected common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); thread2.join(); - // leader2启动线程observe + // leader2 starts thread observe common::Thread thread3(&EtcdClientImp::LeaderObserve, client1, targetOid, leaderName2); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -477,7 +485,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { client1->CloseClient(); LOG(INFO) << "thread 2 exit."; - // 使得etcd完全停掉 + // Make the ETCD completely stop std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -490,12 +498,13 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { segment.set_logicalpoolid(11); int size = segment.segmentsize() / segment.chunksize(); for (uint32_t i = 0; i < size; i++) { - PageFileChunkInfo *chunkinfo = segment.add_chunks(); + PageFileChunkInfo* chunkinfo = segment.add_chunks(); chunkinfo->set_chunkid(i + 1); chunkinfo->set_copysetid(i + 1); } - // 放入segment,前三个属于文件1,后四个属于文件2 + // Place the segment, with the first three belonging to file1 and the last + // four belonging to file2 uint64_t id1 = 101; uint64_t id2 = 100001; for (uint32_t i = 0; i < 7; ++i) { @@ -514,7 +523,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { LOG(INFO) << segment.startoffset(); } - // 获取文件1的segment + // Obtain the segment of file1 std::string startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1, 0); std::string endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1 + 1, 0); @@ -527,7 +536,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { ASSERT_EQ(i * 1024, segment2.startoffset()); } - // 获取文件2的segment + // Obtain the segment of file2 startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2, 0); endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2 + 1, 0); out.clear(); diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..222f76a6bc 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -20,24 +20,26 @@ * Author: lixiaocui */ -#include -#include #include "src/mds/heartbeat/chunkserver_healthy_checker.h" + +#include +#include + #include "src/mds/topology/topology_item.h" #include "test/mds/mock/mock_topology.h" +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::ChunkServer; using ::curve::mds::topology::ChunkServerStatus; -using ::curve::mds::topology::OnlineState; using ::curve::mds::topology::CopySetKey; -using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::kTopoErrCodeInternalError; +using ::curve::mds::topology::kTopoErrCodeSuccess; +using ::curve::mds::topology::OnlineState; namespace curve { namespace mds { @@ -53,7 +55,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -65,8 +67,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { 6, steady_clock::now() - std::chrono::milliseconds(10000)); checker->UpdateLastReceivedHeartbeatTime( 7, steady_clock::now() - std::chrono::milliseconds(10000)); - checker->UpdateLastReceivedHeartbeatTime( - 8, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(8, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 9, steady_clock::now() - std::chrono::milliseconds(4000)); checker->UpdateLastReceivedHeartbeatTime( @@ -94,30 +95,31 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // chunkserver-1 update to online + // chunkserver-2 Heartbeat Miss, Keep Unstable + // chunkserver-3, chunkserver-5, chunkserver-6 heartbeat offline, + // The retried status of chunkserver-3 will be updated and removed from + // the heartbeat map chunkserver-5 is already in a retired state and + // does not need to be updated chunkserver-6 get info failed, status not + // successfully updated chunkserver-7 update failed, status not + // successfully updated chunkserver-8, pendding && online, updated to + // onLine chunkserver-9, pendding && unstable, updated to retired + // chunkserver-10, pendding && offline, updated to retired EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); - ChunkServer cs2(2, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs3(3, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs5(5, "", "", 1, "", 0, "", - ChunkServerStatus::RETIRED, OnlineState::UNSTABLE); - ChunkServer cs7(7, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs9(9, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); - ChunkServer cs10(10, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); + .Times(7) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); + ChunkServer cs2(2, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs3(3, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs5(5, "", "", 1, "", 0, "", ChunkServerStatus::RETIRED, + OnlineState::UNSTABLE); + ChunkServer cs7(7, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs9(9, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); + ChunkServer cs10(10, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); EXPECT_CALL(*topology, GetChunkServer(2, _)) .WillOnce(DoAll(SetArgPointee<1>(cs2), Return(true))); EXPECT_CALL(*topology, GetChunkServer(3, _)) @@ -128,8 +130,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { .WillOnce(Return(std::vector{})); EXPECT_CALL(*topology, GetChunkServer(5, _)) .WillOnce(DoAll(SetArgPointee<1>(cs5), Return(true))); - EXPECT_CALL(*topology, GetChunkServer(6, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology, GetChunkServer(6, _)).WillOnce(Return(false)); EXPECT_CALL(*topology, GetChunkServer(7, _)) .WillOnce(DoAll(SetArgPointee<1>(cs7), Return(true))); EXPECT_CALL(*topology, GetChunkServer(9, _)) @@ -164,15 +165,13 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 - checker->UpdateLastReceivedHeartbeatTime( - 2, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 6, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 7, steady_clock::now()); + // chunkserver 2, 6, 7 Heartbeat received + checker->UpdateLastReceivedHeartbeatTime(2, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(6, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(7, steady_clock::now()); EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(3).WillRepeatedly(Return(kTopoErrCodeSuccess)); + .Times(3) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); checker->CheckHeartBeatInterval(); ASSERT_TRUE(checker->GetHeartBeatInfo(2, &info)); ASSERT_EQ(OnlineState::ONLINE, info.state); diff --git a/test/mds/heartbeat/heartbeat_manager_test.cpp b/test/mds/heartbeat/heartbeat_manager_test.cpp index 54c4397287..6f1b539405 100644 --- a/test/mds/heartbeat/heartbeat_manager_test.cpp +++ b/test/mds/heartbeat/heartbeat_manager_test.cpp @@ -20,52 +20,54 @@ * Author: lixiaocui */ -#include +#include "src/mds/heartbeat/heartbeat_manager.h" + #include +#include #include -#include "src/mds/heartbeat/heartbeat_manager.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" + #include "src/common/timeutility.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" +#include "test/mds/heartbeat/common.h" #include "test/mds/mock/mock_coordinator.h" -#include "test/mds/mock/mock_topology.h" #include "test/mds/mock/mock_topoAdapter.h" -#include "test/mds/heartbeat/common.h" +#include "test/mds/mock/mock_topology.h" -using ::testing::Return; -using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::MockTopologyStat; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { namespace heartbeat { class TestHeartbeatManager : public ::testing::Test { protected: - TestHeartbeatManager() {} - ~TestHeartbeatManager() {} - - void SetUp() override { - HeartbeatOption option; - option.cleanFollowerAfterMs = 0; - option.heartbeatMissTimeOutMs = 10000; - option.offLineTimeOutMs = 30000; - option.mdsStartTime = steady_clock::now(); - topology_ = std::make_shared(); - coordinator_ = std::make_shared(); - topologyStat_ = std::make_shared(); - heartbeatManager_ = std::make_shared( - option, topology_, topologyStat_, coordinator_); - } - - void TearDown() override {} + TestHeartbeatManager() {} + ~TestHeartbeatManager() {} + + void SetUp() override { + HeartbeatOption option; + option.cleanFollowerAfterMs = 0; + option.heartbeatMissTimeOutMs = 10000; + option.offLineTimeOutMs = 30000; + option.mdsStartTime = steady_clock::now(); + topology_ = std::make_shared(); + coordinator_ = std::make_shared(); + topologyStat_ = std::make_shared(); + heartbeatManager_ = std::make_shared( + option, topology_, topologyStat_, coordinator_); + } + + void TearDown() override {} protected: - std::shared_ptr topology_; - std::shared_ptr topologyStat_; - std::shared_ptr coordinator_; - std::shared_ptr heartbeatManager_; + std::shared_ptr topology_; + std::shared_ptr topologyStat_; + std::shared_ptr coordinator_; + std::shared_ptr heartbeatManager_; }; TEST_F(TestHeartbeatManager, test_stop_and_run) { @@ -124,9 +126,10 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { ASSERT_EQ(0, response.needupdatecopysets_size()); // 7. startTime not initialized - // TODO(lixiaocui): 后续考虑心跳加上错误码 - ::curve::mds::topology::ChunkServer normalCs( - 1, "hello", "", 1, "192.168.10.1", 9000, ""); + // TODO(lixiaocui): Consider adding an error code to the heartbeat in the + // future + ::curve::mds::topology::ChunkServer normalCs(1, "hello", "", 1, + "192.168.10.1", 9000, ""); EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); heartbeatManager_->ChunkServerHeartbeat(req, &response); @@ -138,7 +141,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(t, req.chunkserverid())) + UpdateChunkServerStartUpTime(t, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -148,7 +151,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(0, req.chunkserverid())) + UpdateChunkServerStartUpTime(0, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -394,8 +397,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -450,8 +452,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -509,8 +510,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -626,7 +626,8 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { .WillOnce(DoAll(SetArgPointee<2>(chunkServer2), Return(true))) .WillOnce(DoAll(SetArgPointee<2>(chunkServer3), Return(true))); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .Times(2).WillRepeatedly(Return(false)); + .Times(2) + .WillRepeatedly(Return(false)); heartbeatManager_->ChunkServerHeartbeat(request, &response); ASSERT_EQ(1, response.needupdatecopysets_size()); ASSERT_EQ(1, response.needupdatecopysets(0).logicalpoolid()); @@ -634,8 +635,7 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { ASSERT_EQ(0, response.needupdatecopysets(0).epoch()); } -TEST_F(TestHeartbeatManager, - test_handle_copySetInfo_stale_epoch_update_err) { +TEST_F(TestHeartbeatManager, test_handle_copySetInfo_stale_epoch_update_err) { auto request = GetChunkServerHeartbeatRequestForTest(); ChunkServerHeartbeatResponse response; ::curve::mds::topology::ChunkServer chunkServer1( @@ -937,5 +937,3 @@ TEST_F(TestHeartbeatManager, test_patrol_copySetInfo_return_order) { } // namespace heartbeat } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..2a388c8944 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include "src/common/namespace_define.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; namespace curve { namespace mds { @@ -44,18 +47,18 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { auto mockEtcdClient = std::make_shared(); { - // 1. list失败 + // 1. list failed EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) .WillOnce(Return(EtcdErrCode::EtcdCanceled)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 2. list成功,解析失败 + // 2. list successful, parsing failed std::vector values{"hello"}; EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, @@ -64,10 +67,10 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 3. 获取已有的segment alloc value成功 + // 3. Successfully obtained the existing segment alloc value std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient, @@ -77,7 +80,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1024, out[1]); } @@ -89,32 +92,35 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { // 1. CalculateSegmentAlloc ok LOG(INFO) << "start test1......"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(Return(EtcdErrCode::EtcdUnknown)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 2. ListWithLimitAndRevision成功,但是解析失败 + // 2. ListWithLimitAndRevision succeeded, but parsing failed LOG(INFO) << "start test2......"; std::vector values{"hello"}; std::string lastKey = "021"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce( DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 LOG(INFO) << "start test3......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; ASSERT_TRUE( @@ -123,23 +129,24 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); + Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1 << 30, out[1]); } { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 LOG(INFO) << "start test4......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; std::vector values; @@ -160,20 +167,22 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(2, out.size()); ASSERT_EQ(500L * (1 << 30), out[1]); ASSERT_EQ(501L * (1 << 30), out[2]); @@ -181,5 +190,3 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { } } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..f250e7e401 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -60,14 +60,14 @@ class AllocStatisticTest : public ::testing::Test { TEST_F(AllocStatisticTest, test_Init) { { - // 1. 从etcd中获取当前revision失败 + // 1. Failed to obtain the current revision from ETCD LOG(INFO) << "test1......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdCanceled)); ASSERT_EQ(-1, allocStatistic_->Init()); } { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 + // 2. Failed to obtain the alloc size corresponding to the existing logicalPool LOG(INFO) << "test2......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdOK)); @@ -80,7 +80,7 @@ TEST_F(AllocStatisticTest, test_Init) { ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); } { - // 3. init成功 + // 3. init successful LOG(INFO) << "test3......"; std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; @@ -99,8 +99,8 @@ TEST_F(AllocStatisticTest, test_Init) { } TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 + // Initialize allocStatistics + // Old value: logicalPooId(1):1024 std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) @@ -124,19 +124,19 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { values.emplace_back(encodeSegment); } - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 + // 1 Only old values can be obtained before regular persistent threads and statistical threads are started int64_t alloc; ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024, alloc); ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - // 2. 更新segment的值 + // 2 Update the value of segment allocStatistic_->DeAllocSpace(1, 64, 1); allocStatistic_->AllocSpace(1, 32, 1); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024 - 32, alloc); - // 设置mock的etcd中segment的值 + // Set the value of segment in the ETCD of the mock // logicalPoolId(1):500 * (1<<30) // logicalPoolId(2):501 * (1<<30) segment.set_logicalpoolid(2); @@ -167,7 +167,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { .WillOnce(Return(EtcdErrCode::EtcdCanceled)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - // 设置mock的Put结果 + // Set the Put result of the mock EXPECT_CALL(*mockEtcdClient_, Put( NameSpaceStorageCodec::EncodeSegmentAllocKey(1), NameSpaceStorageCodec::EncodeSegmentAllocValue( @@ -198,7 +198,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - // 2. 启动定期持久化线程和统计线程 + // 2 Start regular persistence and statistics threads for (int i = 1; i <= 2; i++) { allocStatistic_->AllocSpace(i, 1L << 30, i + 3); } @@ -211,7 +211,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { ASSERT_EQ(502L *(1 << 30), alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); - // 再通过alloc进行更新 + // Update through alloc again for (int i = 1; i <= 2; i++) { allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); } diff --git a/test/mds/nameserver2/clean_core_test.cpp b/test/mds/nameserver2/clean_core_test.cpp index 5288fd83d6..ca568b7209 100644 --- a/test/mds/nameserver2/clean_core_test.cpp +++ b/test/mds/nameserver2/clean_core_test.cpp @@ -20,23 +20,25 @@ * Author: hzsunjianliang */ -#include -#include -#include #include "src/mds/nameserver2/clean_core.h" -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/mock/mock_topology.h" + +#include +#include +#include + #include "src/mds/chunkserverclient/copyset_client.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_chunkserverclient.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using ::curve::mds::chunkserverclient::MockChunkServerClient; +using curve::mds::topology::MockTopology; using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using curve::mds::topology::MockTopology; -using ::curve::mds::chunkserverclient::ChunkServerClientOption; -using ::curve::mds::chunkserverclient::MockChunkServerClient; namespace curve { namespace mds { @@ -56,8 +58,8 @@ class CleanCoreTest : public testing::Test { cleanCore_ = std::make_shared(storage_, client_, allocStatistic_); - csClient_ = std::make_shared( - topology_, option_, channelPool_); + csClient_ = std::make_shared(topology_, option_, + channelPool_); } void TearDown() override {} @@ -81,7 +83,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -89,19 +91,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -111,47 +113,48 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { - // 联调Bug修复:快照文件共享源文件的segment,所以在查询segment的时候需要使用 - // ParentID 进行查找 + // Joint debugging bug fix: The snapshot file shares a segment of the + // source file, so it needs to be used when querying segments ParentID + // for lookup uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; uint64_t expectParentID = 101; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(expectParentID, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); @@ -159,7 +162,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_parentid(expectParentID); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -173,19 +176,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -200,7 +203,7 @@ TEST_F(CleanCoreTest, testcleanfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -208,19 +211,18 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; - ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kOK); + ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -231,52 +233,51 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment ok, DeleteSnapShotChunk Error - } - { + } { // get segment ok, DeleteSnapShotChunk ok, DeleteSegment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*storage_, DeleteSegment(_, _, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } } @@ -310,12 +311,9 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { // CopysetClient DeleteChunk failed { - EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillOnce(Return(false)); - EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) - .Times(0); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*topology_, GetCopySet(_, _)).WillOnce(Return(false)); + EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)).Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, cleanCore_->CleanDiscardSegment(fakeKey, discardSegmentInfo, @@ -333,16 +331,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::InternalError)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, @@ -361,16 +357,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(1); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(1); TaskProgress progress; ASSERT_EQ(StatusCode::kOK, cleanCore_->CleanDiscardSegment( diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index 899b942ee8..7ce79cb724 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -19,46 +19,47 @@ * Created Date: Wednesday September 12th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" -#include "src/mds/nameserver2/namespace_storage.h" + +#include +#include + +#include "src/common/namespace_define.h" #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/topology/topology_item.h" -#include "src/common/namespace_define.h" - -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" #include "test/mds/nameserver2/mock/mock_chunk_allocate.h" #include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" #include "test/mds/nameserver2/mock/mock_file_record_manager.h" -#include "test/mds/mock/mock_alloc_statistic.h" -#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" -using ::testing::AtLeast; -using ::testing::StrEq; +using curve::common::Authenticator; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; -using ::testing::SetArgPointee; using ::testing::SaveArg; -using ::testing::Invoke; -using curve::common::Authenticator; +using ::testing::SetArgPointee; +using ::testing::StrEq; +using curve::common::kDefaultPoolsetName; using curve::common::TimeUtility; -using curve::mds::topology::MockTopology; -using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; using curve::mds::snapshotcloneclient::DestFileInfo; -using curve::common::kDefaultPoolsetName; +using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; +using curve::mds::topology::MockTopology; namespace curve { namespace mds { -class CurveFSTest: public ::testing::Test { +class CurveFSTest : public ::testing::Test { protected: void SetUp() override { storage_ = std::make_shared(); @@ -68,7 +69,8 @@ class CurveFSTest: public ::testing::Test { mockcleanManager_ = std::make_shared(); topology_ = std::make_shared(); snapshotClient_ = std::make_shared(); - // session repo已经mock,数据库相关参数不需要 + // The session repo has been mocked, and database related parameters are + // not required fileRecordManager_ = std::make_shared(); fileRecordOptions_.fileRecordExpiredTimeUs = 5 * 1000; fileRecordOptions_.scanIntervalTimeUs = 1 * 1000; @@ -83,7 +85,7 @@ class CurveFSTest: public ::testing::Test { curveFSOptions_.authOptions = authOptions_; curveFSOptions_.fileRecordOptions = fileRecordOptions_; - curvefs_ = &kCurveFS; + curvefs_ = &kCurveFS; allocStatistic_ = std::make_shared(); FileInfo fileInfo; @@ -95,16 +97,12 @@ class CurveFSTest: public ::testing::Test { fileInfo.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); curvefs_->Init(storage_, inodeIdGenerator_, mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - snapshotClient_); + mockcleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, snapshotClient_); DefaultSegmentSize = curvefs_->GetDefaultSegmentSize(); kMiniFileLength = curvefs_->GetMinFileLength(); kMaxFileLength = curvefs_->GetMaxFileLength(); @@ -115,11 +113,9 @@ class CurveFSTest: public ::testing::Test { Return(std::vector{kDefaultPoolsetName})); } - void TearDown() override { - curvefs_->Uninit(); - } + void TearDown() override { curvefs_->Uninit(); } - CurveFS *curvefs_; + CurveFS* curvefs_; std::shared_ptr storage_; std::shared_ptr inodeIdGenerator_; std::shared_ptr mockChunkAllocator_; @@ -140,108 +136,112 @@ class CurveFSTest: public ::testing::Test { TEST_F(CurveFSTest, testCreateFile1) { // test parm error std::map spacePools; - spacePools.insert(std::pair(1, - kMaxFileLength - kMiniFileLength)); - EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) + spacePools.insert( + std::pair(1, kMaxFileLength - kMiniFileLength)); + EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength - 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMaxFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", "owner1", - FileType::INODE_PAGEFILE, - kMaxFileLength - kMiniFileLength + DefaultSegmentSize, - 0, 0), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->CreateFile( + "/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength - kMiniFileLength + DefaultSegmentSize, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, - 0, 0, 0), StatusCode::kFileExists); + ASSERT_EQ( + curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, 0, 0, 0), + StatusCode::kFileExists); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); - + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -253,17 +253,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateFile( - "/dir1", kDefaultPoolsetName, "owner1", - FileType::INODE_DIRECTORY, 0, 0, 0); + auto statusCode = + curvefs_->CreateFile("/dir1", kDefaultPoolsetName, "owner1", + FileType::INODE_DIRECTORY, 0, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_FALSE(fileInfo.has_throttleparams()); } @@ -276,18 +274,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = - curvefs_->CreateFile("/file1", kDefaultPoolsetName, - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength, 0, 0); + auto statusCode = curvefs_->CreateFile( + "/file1", kDefaultPoolsetName, "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_TRUE(fileInfo.has_throttleparams()); } @@ -300,71 +295,76 @@ TEST_F(CurveFSTest, testCreateStripeFile) { spacePools.insert(std::pair(1, kMaxFileLength)); spacePools.insert(std::pair(2, kMaxFileLength)); EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + GetRemainingSpaceInLogicalPool(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 1 * 1024 * 1024, 4), StatusCode::kOK); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1 * 1024 * 1024, 4), + StatusCode::kOK); } { // test stripeStripe and stripeCount is not all zero ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 1), - StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 1), + StatusCode::kParaError); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 1024*1024ul, - 0), StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1024 * 1024ul, 0), + StatusCode::kParaError); } { // test stripeUnit more then chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 16*1024*1024ul + 1, - 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 16 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } { // test stripeUnit is not divisible by chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 4*1024*1024ul + 1, 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 4 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } } TEST_F(CurveFSTest, testCreateFileWithPoolset) { const std::map spacePools{ - {1, kMaxFileLength}, - {2, kMaxFileLength}, + {1, kMaxFileLength}, + {2, kMaxFileLength}, }; EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillRepeatedly(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .WillRepeatedly(Invoke([](uint64_t* id) { - static std::atomic counter{0}; - *id = counter++; - return true; - })); + .WillRepeatedly(Invoke([](uint64_t* id) { + static std::atomic counter{0}; + *id = counter++; + return true; + })); // create file without poolset, assign to default poolset { @@ -382,8 +382,8 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset but not same with anyone { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{kDefaultPoolsetName, "SSD"})); + .WillOnce( + Return(std::vector{kDefaultPoolsetName, "SSD"})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "HDD", "owner", @@ -393,8 +393,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset and poolset exists { - EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(Return(StoreStatus::OK)); + EXPECT_CALL(*storage_, PutFile(_)).WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)); ASSERT_EQ(StatusCode::kOK, @@ -406,8 +405,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // cluster doesn't have poolset { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{})); + .WillOnce(Return(std::vector{})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "SSD", "owner", FileType::INODE_PAGEFILE, @@ -419,23 +417,19 @@ TEST(TestSelectPoolsetByRules, Test) { ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/filename", {})); { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ("system", SelectPoolsetByRules("/system/file", rules)); } { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/systems", rules)); } { std::map rules{ - {"/system/", "system"}, - {"/systems/", "system1"}, + {"/system/", "system"}, + {"/systems/", "system1"}, }; ASSERT_EQ("system1", SelectPoolsetByRules("/systems/file", rules)); } @@ -443,9 +437,7 @@ TEST(TestSelectPoolsetByRules, Test) { // subdir rules { std::map rules{ - {"/system/", "system"}, - {"/system/sub/", "system-sub"} - }; + {"/system/", "system"}, {"/system/sub/", "system-sub"}}; ASSERT_EQ("system-sub", SelectPoolsetByRules("/system/sub/file", rules)); @@ -462,15 +454,15 @@ TEST_F(CurveFSTest, testGetFileInfo) { FileInfo rootFileInfo = curvefs_->GetRootFileInfo(); ASSERT_EQ(fileInfo.id(), rootFileInfo.id()); - ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); + ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); ASSERT_EQ(fileInfo.filetype(), rootFileInfo.filetype()); { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kFileNotExists); } @@ -478,8 +470,8 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kStorageError); } @@ -487,134 +479,134 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test ok FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(Return(StoreStatus::OK)); + .Times(2) + .WillRepeatedly(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kOK); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); FileInfo retFileInfo; std::string lastEntry; ASSERT_EQ(curvefs_->GetFileInfo("/testdir/file1", &retFileInfo), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo fileInfo1; ASSERT_EQ(curvefs_->GetFileInfo("testdir/file1", &fileInfo1), - StatusCode::kStorageError); + StatusCode::kStorageError); } } TEST_F(CurveFSTest, testDeleteFile) { // test remove root ASSERT_EQ(curvefs_->DeleteFile("/", kUnitializedFileID, false), - StatusCode::kParaError); + StatusCode::kParaError); // test delete directory ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete directory, directory is not empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kDirNotEmpty); + StatusCode::kDirNotEmpty); } // test delete directory, delete file fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete recyclebin pagefile,cleanManager fail @@ -623,44 +615,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(false)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::KInternalError); + kUnitializedFileID, true), + StatusCode::KInternalError); } // test force delete recyclebin file ok @@ -669,44 +659,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } // test force delete already deleting @@ -715,250 +703,245 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); // mockcleanManager_ = std::make_shared(); auto notNullTask = std::make_shared(1, nullptr, fileInfo); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(notNullTask)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } - // test force delete file not in recyclebin + // test force delete file not in recyclebin { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(USERSTARTINODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, true), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under snapshot { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileUnderSnapShot); + StatusCode::kFileUnderSnapShot); } // test delete pagefile, storage error { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } // delete not support file type { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under clone { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kHasRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete pagefile, file under clone but has no ref but delete fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile, file under clone but has no ref success { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check list empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, file has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::KeyNotExist))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::KeyNotExist))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -967,37 +950,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, inode mismatch { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(10); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1006,37 +989,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1045,17 +1028,17 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); // EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) // .Times(1) // .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete failed when mds didn't start for enough time @@ -1113,7 +1096,7 @@ TEST_F(CurveFSTest, testDeleteFile) { TEST_F(CurveFSTest, testGetAllocatedSize) { AllocatedSize allocSize; - FileInfo fileInfo; + FileInfo fileInfo; uint64_t segmentSize = 1 * 1024 * 1024 * 1024ul; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); @@ -1130,22 +1113,21 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { segment.set_logicalpoolid(2); segments.emplace_back(segment); - // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(3 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 2 * segmentSize}, {2, segmentSize}}; + std::unordered_map expected = { + {1, 2 * segmentSize}, {2, segmentSize}}; ASSERT_EQ(expected, allocSize.allocSizeMap); } // test directory normal @@ -1157,73 +1139,72 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(9 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 6 * segmentSize}, {2, 3 * segmentSize}}; + std::unordered_map expected = { + {1, 6 * segmentSize}, {2, 3 * segmentSize}}; } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list segment fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } } TEST_F(CurveFSTest, testGetFileSize) { uint64_t fileSize; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_length(10 * kGB); @@ -1231,11 +1212,10 @@ TEST_F(CurveFSTest, testGetFileSize) { // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(10 * kGB, fileSize); } // test directory normal @@ -1247,49 +1227,47 @@ TEST_F(CurveFSTest, testGetFileSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(30 * kGB, fileSize); } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } } @@ -1301,9 +1279,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kNotDirectory); @@ -1313,8 +1291,8 @@ TEST_F(CurveFSTest, testReadDir) { // test getFile Not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kDirNotExist); @@ -1324,9 +1302,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector sideEffectArgs; sideEffectArgs.clear(); @@ -1335,9 +1313,9 @@ TEST_F(CurveFSTest, testReadDir) { sideEffectArgs.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), + Return(StoreStatus::OK))); auto ret = curvefs_->ReadDir("/file1", &items); ASSERT_EQ(ret, StatusCode::kOK); @@ -1355,16 +1333,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kOK); @@ -1373,19 +1351,19 @@ TEST_F(CurveFSTest, testRecoverFile) { // the upper dir not exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->RecoverFile("/k8s/file1", - "/RecycleBin/k8s/file1-10", 2), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->RecoverFile("/k8s/file1", "/RecycleBin/k8s/file1-10", 2), + StatusCode::kFileNotExists); } // the same file exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileExists); @@ -1400,12 +1378,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 2), StatusCode::kFileIdNotMatch); @@ -1420,12 +1398,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileUnderDeleting); @@ -1440,12 +1418,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileCloneMetaInstalled); @@ -1460,12 +1438,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileError); @@ -1478,16 +1456,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kStorageError); @@ -1502,22 +1480,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1528,18 +1506,18 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo1; fileInfo1.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1548,8 +1526,8 @@ TEST_F(CurveFSTest, testRenameFile) { // old file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1560,16 +1538,16 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1582,22 +1560,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1625,9 +1603,9 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(AtLeast(1)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kNotSupported); @@ -1644,33 +1622,33 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_id(10); fileInfo3.set_id(11); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 10, 11), StatusCode::kOK); @@ -1683,18 +1661,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(4) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileExists); @@ -1707,31 +1685,31 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1744,32 +1722,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1782,32 +1760,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1820,32 +1798,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1859,18 +1837,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( @@ -1908,26 +1886,25 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); - EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 0), StatusCode::kShrinkBiggerFile); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", kMiniFileLength), + StatusCode::kOK); } // test enlarge size unit is not segment @@ -1941,14 +1918,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 1 + kMiniFileLength), StatusCode::kExtentUnitError); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 1 + kMiniFileLength), + StatusCode::kExtentUnitError); } // test enlarge size ok @@ -1962,11 +1939,11 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -1974,8 +1951,8 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo doesn't has throttle params ASSERT_FALSE(modifiedInfo.has_throttleparams()); @@ -1999,11 +1976,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(1); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2011,16 +1988,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test enlarge size ok, and update throttle params @@ -2041,11 +2016,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(120 * kMB); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2053,16 +2028,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test size over maxsize @@ -2076,14 +2049,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMaxFileLength), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMaxFileLength), + StatusCode::kFileLengthNotSupported); } // file not exist @@ -2097,14 +2070,13 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kFileNotExists); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kFileNotExists); } // extend directory @@ -2116,15 +2088,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kNotSupported); } } @@ -2135,20 +2106,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // file owner same with newowner @@ -2157,12 +2127,11 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), StatusCode::kOK); } // file is under snapshot, can not changeOwner @@ -2171,16 +2140,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileUnderSnapShot); @@ -2189,8 +2158,8 @@ TEST_F(CurveFSTest, testChangeOwner) { // file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileNotExists); @@ -2202,17 +2171,17 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kStorageError); @@ -2224,20 +2193,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // changeOwner dir not empty @@ -2246,16 +2214,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo1); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kDirNotEmpty); @@ -2267,9 +2235,9 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_APPENDECFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kNotSupported); @@ -2315,18 +2283,19 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kOK); } // test normal get & allocate not exist segment @@ -2343,29 +2312,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kOK); } // file is a directory @@ -2379,14 +2347,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kParaError); } // segment offset not align file segment size @@ -2403,14 +2372,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 1, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 1, false, &segment), + StatusCode::kParaError); } // file length < segment offset + segmentsize @@ -2427,14 +2397,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - kMiniFileLength, false, &segment), StatusCode::kParaError); + ASSERT_EQ(curvefs_->GetOrAllocateSegment( + "/user1/file2", kMiniFileLength, false, &segment), + StatusCode::kParaError); } // alloc chunk segment fail @@ -2451,24 +2422,24 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(false)); + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(false)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kSegmentAllocateError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kSegmentAllocateError); } // put segment fail @@ -2485,29 +2456,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kStorageError); } } @@ -2732,8 +2702,8 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { std::string fileName = "/snapshotFile1WithInvalidClientVersion"; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2753,10 +2723,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2769,8 +2739,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test client version empty invalid @@ -2780,10 +2751,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2791,13 +2762,14 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo info; ASSERT_EQ(StatusCode::kOK, - curvefs_->RefreshSession( - fileName, "", 0 , "", "127.0.0.1", 1234, "", &info)); + curvefs_->RefreshSession(fileName, "", 0, "", "127.0.0.1", + 1234, "", &info)); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test under snapshot @@ -2806,9 +2778,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2819,24 +2791,22 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { snapShotFiles.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/snapshotFile1", - &snapShotFileInfoRet), StatusCode::kFileUnderSnapShot); + &snapShotFileInfoRet), + StatusCode::kFileUnderSnapShot); } { // test File is not PageFile - } - { + } { // test storage ListFile error - } - { + } { // test GenId error - } - { + } { // test create snapshot ok FileInfo originalFile; originalFile.set_id(1); @@ -2845,25 +2815,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2871,15 +2840,16 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; - ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile", - &snapShotFileInfoRet), StatusCode::kOK); + ASSERT_EQ( + curvefs_->CreateSnapShotFile("/originalFile", &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test create snapshot ok @@ -2890,25 +2860,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2917,14 +2886,15 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile2", - &snapShotFileInfoRet), StatusCode::kOK); + &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test storage snapshotFile Error @@ -2934,22 +2904,21 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { TEST_F(CurveFSTest, testListSnapShotFile) { { // workPath error - } - { + } { // dir not support std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("/", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // lookupFile error std::vector snapFileInfos; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ListSnapShotFile("/originalFile", &snapFileInfos), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // check type not support @@ -2960,13 +2929,13 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // ListFile error @@ -2977,17 +2946,17 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kStorageError); + StatusCode::kStorageError); } { // ListFile ok @@ -2998,37 +2967,36 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; - FileInfo snapShotFile; + FileInfo snapShotFile; snapShotFile.set_parentid(1); snapFileInfos.push_back(snapShotFile); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), + Return(StoreStatus::OK))); std::vector snapFileInfosRet; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfosRet), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(snapFileInfosRet.size(), 1); ASSERT_EQ(snapFileInfosRet[0].SerializeAsString(), - snapShotFile.SerializeAsString()); - } + snapShotFile.SerializeAsString()); + } } - TEST_F(CurveFSTest, testGetSnapShotFileInfo) { { // ListSnapShotFile error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/", 1, &snapshotFileInfo), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // snapfile not exist(not under snapshot) @@ -3039,19 +3007,20 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // under snapshot, butsnapfile not exist @@ -3062,22 +3031,23 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(2); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // test ok @@ -3088,24 +3058,25 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(1); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kOK); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kOK); ASSERT_EQ(snapshotFileInfo.SerializeAsString(), - snapInfo.SerializeAsString()); + snapInfo.SerializeAsString()); } } @@ -3114,7 +3085,7 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { // GetSnapShotFileInfo error PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // offset not align @@ -3125,9 +3096,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3135,13 +3106,14 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_segmentsize(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 1, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 1, &segment), + StatusCode::kParaError); } { // storage->GetSegment return error @@ -3154,11 +3126,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3167,17 +3139,18 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kSegmentNotAllocated); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kSegmentNotAllocated); } { // ok @@ -3190,12 +3163,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); - + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3204,9 +3176,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment expectSegment; expectSegment.set_logicalpoolid(1); @@ -3214,20 +3186,21 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { expectSegment.set_chunksize(curvefs_->GetDefaultChunkSize()); expectSegment.set_startoffset(0); - PageFileChunkInfo *chunkInfo = expectSegment.add_chunks(); + PageFileChunkInfo* chunkInfo = expectSegment.add_chunks(); chunkInfo->set_chunkid(1); chunkInfo->set_copysetid(1); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expectSegment), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(expectSegment), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kOK); ASSERT_EQ(expectSegment.SerializeAsString(), - segment.SerializeAsString()); + segment.SerializeAsString()); } } @@ -3236,7 +3209,7 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { // GetSnapShotFileInfo error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->DeleteFileSnapShotFile("/", 1, nullptr), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // under deleteing @@ -3247,9 +3220,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3258,12 +3231,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kSnapshotDeleting); + StatusCode::kSnapshotDeleting); } { // delete snapshot file filetype error (internal case) @@ -3274,9 +3247,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3286,12 +3259,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete storage error @@ -3302,9 +3275,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3315,16 +3288,16 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete snapshot ok @@ -3335,9 +3308,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3348,21 +3321,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kOK); + StatusCode::kOK); } { // message the snapshot delete manager error, return error @@ -3373,9 +3345,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3386,21 +3358,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(false)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } } @@ -3409,7 +3380,7 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { { PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // snapshot file is not deleting @@ -3421,9 +3392,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3431,14 +3402,15 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileCreated); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileCreated); ASSERT_EQ(progress, 0); } @@ -3452,9 +3424,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3464,21 +3436,21 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { std::vector snapShotFiles2; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), - Return(StoreStatus::OK))); - - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(snapShotFiles), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), + Return(StoreStatus::OK))); + + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileNotExists); ASSERT_EQ(progress, 100); } @@ -3491,9 +3463,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3501,19 +3473,19 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileDeleteError); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileDeleteError); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 0); } @@ -3527,9 +3499,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3537,24 +3509,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::PROGRESSING); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3568,9 +3541,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3578,24 +3551,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::FAILED); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3609,9 +3583,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3619,66 +3593,67 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(100); taskProgress.SetStatus(TaskStatus::SUCCESS); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 100); } } TEST_F(CurveFSTest, testOpenFile) { - // 文件不存在 + // File does not exist { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kFileNotExists); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kFileNotExists); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // open目录 + // Open directory { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kNotSupported); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kNotSupported); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // 执行成功 + // Execution successful { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), @@ -3854,19 +3829,19 @@ TEST_F(CurveFSTest, testOpenFile) { TEST_F(CurveFSTest, testCloseFile) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), StatusCode::kOK); - // 执行成功 + // Execution successful { EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) @@ -3880,39 +3855,41 @@ TEST_F(CurveFSTest, testCloseFile) { TEST_F(CurveFSTest, testRefreshSession) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kOK); - // 文件不存在 + // File does not exist { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RefreshSession("/file1", "sessionidxxxxx", 12345, - "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + "signaturexxxx", "127.0.0.1", 1234, + "", &fileInfo1), StatusCode::kFileNotExists); } - // 执行成功 + // Execution successful { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); uint64_t date = ::curve::common::TimeUtility::GetTimeofDayUs(); ASSERT_EQ(curvefs_->RefreshSession("/file1", protoSession.sessionid(), - date, "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + date, "signaturexxxx", "127.0.0.1", + 1234, "", &fileInfo1), StatusCode::kOK); ASSERT_EQ(1, curvefs_->GetOpenFileNum()); } @@ -3921,39 +3898,41 @@ TEST_F(CurveFSTest, testRefreshSession) { TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配,date超时 + // Root user, signature matching, date timeout { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date), + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); + ASSERT_EQ(curvefs_->CheckDestinationOwner( + filename, authOptions_.rootOwner, sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), - StatusCode::kOwnerAuthFail); + ASSERT_EQ( + curvefs_->CheckDestinationOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), + StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckDestinationOwner( + "/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner(authOptions_.rootOwner); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - "normaluser", "wrongpass", date), + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", "normaluser", + "wrongpass", date), StatusCode::kOwnerAuthFail); } } @@ -3961,16 +3940,16 @@ TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { TEST_F(CurveFSTest, testCheckPathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配, 并检测date过期 + // Root user, signature matching, and detecting date expiration { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckPathOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, @@ -3978,168 +3957,176 @@ TEST_F(CurveFSTest, testCheckPathOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckPathOwner("/file1", authOptions_.rootOwner, - "wrongpass", date), + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功, 并检测date超时 + // Normal user, non root user authentication successful for files in the + // root directory, and detection of date timeout { - ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", - "wrongpass", date), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", date), + StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", - date + 15 * 2000 * 2000), + date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } } TEST_F(CurveFSTest, testCheckFileOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配 + // Root user, signature matching { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", authOptions_.rootOwner, + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功 + // Normal user, non root user authentication succeeded for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser", "", date), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser", "", date), + StatusCode::kOK); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser1", "", date), StatusCode::kOwnerAuthFail); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser1", "", date), + StatusCode::kOwnerAuthFail); } } - TEST_F(CurveFSTest, testCreateCloneFile) { // test parm error - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_DIRECTORY, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_DIRECTORY, + kMiniFileLength, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength - 1, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); FileInfo fileInfo; - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, - "default", &fileInfo); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + &fileInfo); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_EQ(fileInfo.filename(), "file1"); ASSERT_EQ(fileInfo.owner(), "owner1"); @@ -4156,54 +4143,58 @@ TEST_F(CurveFSTest, testCreateCloneFile) { TEST_F(CurveFSTest, testSetCloneFileStatus) { { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test inodeid not match @@ -4211,13 +4202,13 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - 10, FileStatus::kFileCloned), - StatusCode::kFileIdNotMatch); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", 10, FileStatus::kFileCloned), + StatusCode::kFileIdNotMatch); } { // test filestatus not ok @@ -4226,43 +4217,41 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { FileStatus setStatus; StatusCode expectReturn; int putFileTime; - } testCases[] { + } testCases[]{ {FileStatus::kFileCloning, FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloning, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, - FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, - {FileStatus::kFileCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + FileStatus::kFileCloneMetaInstalled, StatusCode::kOK, 1}, + {FileStatus::kFileCloned, FileStatus::kFileCloned, StatusCode::kOK, + 1}, {FileStatus::kFileCreated, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCreated, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloning, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCreated, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileDeleting, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloning, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0} - }; + StatusCode::kCloneStatusNotMatch, 0}}; for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { { @@ -4270,17 +4259,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(testCases[i].originStatus); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(testCases[i].putFileTime)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(testCases[i].putFileTime)) + .WillOnce(Return(StoreStatus::OK)); - - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - kUnitializedFileID, testCases[i].setStatus), + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", kUnitializedFileID, + testCases[i].setStatus), testCases[i].expectReturn); } } @@ -4291,17 +4280,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test put file ok @@ -4309,17 +4298,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kOK); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kOK); } } @@ -4327,10 +4316,10 @@ TEST_F(CurveFSTest, Init) { // test getFile ok { FileInfo fileInfo1, fileInfo2, fileInfo3, fileInfo4, fileInfo5; - fileInfo1.set_parentid(ROOTINODEID+1); + fileInfo1.set_parentid(ROOTINODEID + 1); fileInfo2.set_parentid(ROOTINODEID); - fileInfo2.set_id(RECYCLEBININODEID+1); + fileInfo2.set_id(RECYCLEBININODEID + 1); fileInfo3.set_parentid(ROOTINODEID); fileInfo3.set_id(RECYCLEBININODEID); @@ -4355,30 +4344,23 @@ TEST_F(CurveFSTest, Init) { const struct { FileInfo info; - bool ret; + bool ret; } testCases[] = { - {fileInfo1, false}, - {fileInfo2, false}, - {fileInfo3, false}, - {fileInfo4, false}, - {fileInfo5, true}, + {fileInfo1, false}, {fileInfo2, false}, {fileInfo3, false}, + {fileInfo4, false}, {fileInfo5, true}, }; - for (int i = 0; i < sizeof(testCases)/ sizeof(testCases[0]); i++) { + for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), - Return(StoreStatus::OK))); - - ASSERT_EQ(testCases[i].ret, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), + Return(StoreStatus::OK))); + + ASSERT_EQ(testCases[i].ret, + kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4388,15 +4370,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } // test getfile not exist @@ -4410,15 +4387,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); // putfile ok FileInfo fileInfo5; @@ -4436,15 +4408,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(true, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(true, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4497,11 +4464,11 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { { // normal test EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); PageFileSegment segment2 = segment; PageFileSegment segment3 = segment; auto chunk = segment.add_chunks(); @@ -4515,41 +4482,39 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { std::vector segVec2 = {segment2}; std::vector segVec3 = {segment3}; EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<1>(segVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec3), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(DoAll(SetArgPointee<1>(segVec1), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<1>(segVec2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<1>(segVec3), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); ASSERT_EQ(1, fileNames.size()); ASSERT_EQ("file1", fileNames[0]); } // list file fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } // list segment fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } } @@ -4571,8 +4536,8 @@ TEST_F(CurveFSTest, TestUpdateFileThrottleParams) { FileInfo updatedFileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) .WillOnce( DoAll(SaveArg<0>(&updatedFileInfo), Return(StoreStatus::OK))); @@ -4635,8 +4600,8 @@ TEST(StripeParamTest, Test) { rc = CheckStripeParam(segmentSize, chunkSize, 4096, 128); EXPECT_EQ(StatusCode::kParaError, rc); - rc = CheckStripeParam(segmentSize, chunkSize, 4096, - segmentSize / chunkSize); + rc = + CheckStripeParam(segmentSize, chunkSize, 4096, segmentSize / chunkSize); EXPECT_EQ(StatusCode::kOK, rc); } diff --git a/test/mds/nameserver2/file_lock_test.cpp b/test/mds/nameserver2/file_lock_test.cpp index 25b524d195..6c5f14a943 100644 --- a/test/mds/nameserver2/file_lock_test.cpp +++ b/test/mds/nameserver2/file_lock_test.cpp @@ -19,26 +19,28 @@ * Created Date: 2019-04-03 * Author: hzchenwei7 */ +#include "src/mds/nameserver2/file_lock.h" + #include -#include #include +#include + #include // NOLINT -#include "src/mds/nameserver2/file_lock.h" -using ::testing::AtLeast; -using ::testing::StrEq; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; using ::testing::SetArgPointee; +using ::testing::StrEq; namespace curve { namespace mds { FileLockManager flm(4); -class FileLockManagerTest: public ::testing::Test { +class FileLockManagerTest : public ::testing::Test { public: FileLockManagerTest() {} }; @@ -59,9 +61,7 @@ void ReadLock(const std::string& filePath, bool unlock = false) { } } -void Unlock(const std::string& filePath) { - flm.Unlock(filePath); -} +void Unlock(const std::string& filePath) { flm.Unlock(filePath); } TEST_F(FileLockManagerTest, Basic) { std::string filePath1 = "/home/dir1/file1"; @@ -115,62 +115,46 @@ TEST_F(FileLockManagerTest, UnlockInAnotherThread) { Unlock(filePath); } -class FileReadLockGuardTest: public ::testing::Test { +class FileReadLockGuardTest : public ::testing::Test { public: FileReadLockGuardTest() {} }; TEST_F(FileReadLockGuardTest, LockUnlockTest) { - { - FileReadLockGuard guard(&flm, "/"); - } + { FileReadLockGuard guard(&flm, "/"); } - { - FileReadLockGuard guard(&flm, "/a"); - } + { FileReadLockGuard guard(&flm, "/a"); } - { - FileReadLockGuard guard(&flm, "/a/b"); - } + { FileReadLockGuard guard(&flm, "/a/b"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -class FileWriteLockGuardTest: public ::testing::Test { +class FileWriteLockGuardTest : public ::testing::Test { public: FileWriteLockGuardTest() {} }; TEST_F(FileWriteLockGuardTest, LockUnlockTest) { - { - FileWriteLockGuard guard(&flm, "/"); - } + { FileWriteLockGuard guard(&flm, "/"); } - { - FileWriteLockGuard guard(&flm, "/a"); - } + { FileWriteLockGuard guard(&flm, "/a"); } - { - FileWriteLockGuard guard(&flm, "/a/b"); - } + { FileWriteLockGuard guard(&flm, "/a/b"); } - { - FileWriteLockGuard guard(&flm, "/a", "/a"); - } + { FileWriteLockGuard guard(&flm, "/a", "/a"); } - { - FileWriteLockGuard guard(&flm, "/a", "/b"); - } + { FileWriteLockGuard guard(&flm, "/a", "/b"); } - { - FileWriteLockGuard guard(&flm, "/b", "/a"); - } + { FileWriteLockGuard guard(&flm, "/b", "/a"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -// 以下这种情况,跑测试的时候会出现Segmentation fault,是锁的实现机制的问题 -// 要避免这样使用锁,已在代码里进行规避,以下注释的测试保留,提醒使用者注意 +// In the following scenario, a Segmentation fault may occur when running tests, +// due to issues with the locking mechanism. To avoid using locks in this way, +// precautions have been taken in the code. The commented-out test cases are +// retained to remind users to be cautious. /* TEST_F(FileWriteLockGuardTest, LockUnlockTest1) { { diff --git a/test/mds/nameserver2/file_record_test.cpp b/test/mds/nameserver2/file_record_test.cpp index 37a728b012..3369db4554 100644 --- a/test/mds/nameserver2/file_record_test.cpp +++ b/test/mds/nameserver2/file_record_test.cpp @@ -20,15 +20,16 @@ * Author : wuhanqing */ +#include "src/mds/nameserver2/file_record.h" + #include #include -#include //NOLINT -#include // NOLINT +#include //NOLINT +#include // NOLINT #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/nameserver2/file_record.h" namespace curve { namespace mds { @@ -37,15 +38,15 @@ TEST(FileRecordTest, timeout_test) { butil::EndPoint ep; butil::str2endpoint("127.0.0.1:1111", &ep); - // 设置有效时间为1ms + // Set the effective time to 1ms FileRecord record(1 * 1000, "0.0.6", ep); - // 判断超时 + // Judgment timeout ASSERT_EQ(false, record.IsTimeout()); - // 判断版本号 + // Determine version number ASSERT_EQ("0.0.6", record.GetClientVersion()); - // 睡眠一段时间判断超时是否生效 + // Sleep for a period of time to determine if the timeout is effective std::this_thread::sleep_for(std::chrono::milliseconds(15)); ASSERT_EQ(true, record.IsTimeout()); @@ -89,9 +90,9 @@ TEST(FileRecordManagerTest, normal_test) { kInvalidPort); fileRecordManager.UpdateFileRecord("file4", "0.0.6", "127.0.0.1", 1235); - // 总共记录了4个文件 - // 其中一个port为Invalid - // 其中两个文件打开的client ip port相同 + // A total of 4 files were recorded + // One of the ports is Invalid + // Two of the files have the same client IP port opened ASSERT_EQ(2, fileRecordManager.ListAllClient().size()); // ClientIpPortType clientIpPort; @@ -110,8 +111,7 @@ TEST(FileRecordManagerTest, normal_test) { butil::endpoint2str(clients[0]).c_str()); clients.clear(); - ASSERT_FALSE( - fileRecordManager.FindFileMountPoint("file100", &clients)); + ASSERT_FALSE(fileRecordManager.FindFileMountPoint("file100", &clients)); fileRecordManager.Stop(); } @@ -127,7 +127,7 @@ TEST(FileRecordManagerTest, open_file_num_test) { ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); - // 插入两个记录 + // Insert two records fileRecordManager.UpdateFileRecord("file1", "", "127.0.0.1", 0); fileRecordManager.UpdateFileRecord("file2", "", "127.0.0.1", 0); @@ -138,18 +138,18 @@ TEST(FileRecordManagerTest, open_file_num_test) { } }; - // 只对 file1 定期续约 + // Regular renewal only for file1 std::thread th(task, "file1"); - // sleep 50ms后,file2 会超时 + // After 50ms of sleep, file2 will timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(1, fileRecordManager.GetOpenFileNum()); - // 停止 file1 的定期续约 + // Stop regular renewal of file1 running = false; th.join(); - // sleep 50ms后,file1 也会超时 + // After 50ms of sleep, file1 will also timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); diff --git a/test/mds/nameserver2/namespace_service_test.cpp b/test/mds/nameserver2/namespace_service_test.cpp index c5247030f2..09fff706e2 100644 --- a/test/mds/nameserver2/namespace_service_test.cpp +++ b/test/mds/nameserver2/namespace_service_test.cpp @@ -19,40 +19,42 @@ * Created Date: Wednesday September 26th 2018 * Author: hzsunjianliang */ -#include -#include -#include +#include "src/mds/nameserver2/namespace_service.h" + #include #include -#include "src/mds/nameserver2/namespace_service.h" -#include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/chunk_allocator.h" -#include "src/common/timeutility.h" +#include +#include +#include + +#include "src/common/authenticator.h" #include "src/common/configuration.h" #include "src/common/string_util.h" -#include "test/mds/nameserver2/fakes.h" -#include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" -#include "src/mds/nameserver2/clean_manager.h" +#include "src/common/timeutility.h" +#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/chunk_allocator.h" #include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_manager.h" #include "src/mds/nameserver2/clean_task_manager.h" -#include "src/common/authenticator.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/mock/mock_chunkserver.h" -#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/curvefs.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_chunkserver.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/fakes.h" +#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" +#include "test/mds/nameserver2/mock/mock_clean_manager.h" -using curve::common::TimeUtility; using curve::common::Authenticator; -using curve::mds::topology::MockTopology; +using curve::common::TimeUtility; using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Invoke; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -74,34 +76,33 @@ class NameSpaceServiceTest : public ::testing::Test { protected: void SetUp() override { // init the kcurvefs, use the fake element - storage_ = std::make_shared(); + storage_ = std::make_shared(); inodeGenerator_ = std::make_shared(0); topology_ = std::make_shared(); ChunkServerClientOption option; auto channelPool = std::make_shared(); - auto client = std::make_shared(topology_, - option, channelPool); + auto client = + std::make_shared(topology_, option, channelPool); allocStatistic_ = std::make_shared(); - cleanCore_ = std::make_shared( - storage_, client, allocStatistic_); + cleanCore_ = + std::make_shared(storage_, client, allocStatistic_); // new taskmanger for 2 worker thread, and check thread period 2 second - cleanTaskManager_ = std::make_shared(channelPool, - 2, 2000); + cleanTaskManager_ = + std::make_shared(channelPool, 2, 2000); - cleanManager_ = std::make_shared(cleanCore_, - cleanTaskManager_, storage_); + cleanManager_ = std::make_shared( + cleanCore_, cleanTaskManager_, storage_); ASSERT_EQ(cleanManager_->Start(), true); std::shared_ptr topologyChunkAllocator = - std::make_shared(); + std::make_shared(); std::shared_ptr chunkIdGenerator = - std::make_shared(); - chunkSegmentAllocate_ = - std::make_shared( - topologyChunkAllocator, chunkIdGenerator); + std::make_shared(); + chunkSegmentAllocate_ = std::make_shared( + topologyChunkAllocator, chunkIdGenerator); fileRecordManager_ = std::make_shared(); fileRecordOptions.fileRecordExpiredTimeUs = 5 * 1000; @@ -118,16 +119,13 @@ class NameSpaceServiceTest : public ::testing::Test { curveFSOptions.authOptions = authOptions; kCurveFS.Init(storage_, inodeGenerator_, chunkSegmentAllocate_, - cleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions, topology_, - nullptr); + cleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions, topology_, nullptr); ASSERT_EQ(curveFSOptions.defaultChunkSize, - kCurveFS.GetDefaultChunkSize()); + kCurveFS.GetDefaultChunkSize()); ASSERT_EQ(curveFSOptions.defaultSegmentSize, - kCurveFS.GetDefaultSegmentSize()); + kCurveFS.GetDefaultSegmentSize()); ASSERT_EQ(curveFSOptions.minFileLength, kCurveFS.GetMinFileLength()); ASSERT_EQ(curveFSOptions.maxFileLength, kCurveFS.GetMaxFileLength()); DefaultSegmentSize = kCurveFS.GetDefaultSegmentSize(); @@ -150,7 +148,7 @@ class NameSpaceServiceTest : public ::testing::Test { } } - template + template void SetRequestAuth(T* request, RequestOption option) { uint64_t date = TimeUtility::GetTimeofDayUs(); request->set_date(date); @@ -173,18 +171,16 @@ class NameSpaceServiceTest : public ::testing::Test { uint64_t time; auto n = items.size(); - if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) - || time < dtime || time - dtime > 1) { + if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) || + time < dtime || time - dtime > 1) { LOG(INFO) << "unexpected filename: " << filename - << ", dtime: " << dtime - << ", time in file: " << time; + << ", dtime: " << dtime << ", time in file: " << time; return false; } return true; } - bool DeleteFile(const std::string& filename, - RequestOption option, + bool DeleteFile(const std::string& filename, RequestOption option, DeleteFileResponse* response) { brpc::Controller cntl; DeleteFileRequest request; @@ -201,8 +197,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool GetFileInfo(const std::string& filename, - RequestOption option, + bool GetFileInfo(const std::string& filename, RequestOption option, GetFileInfoResponse* response) { brpc::Controller cntl; GetFileInfoRequest request; @@ -218,8 +213,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool ListDir(const std::string& dirname, - RequestOption option, + bool ListDir(const std::string& dirname, RequestOption option, ListDirResponse* response) { brpc::Controller cntl; ListDirRequest request; @@ -260,8 +254,9 @@ TEST_F(NameSpaceServiceTest, test1) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -273,7 +268,6 @@ TEST_F(NameSpaceServiceTest, test1) { CurveFSService_Stub stub(&channel); - // test CreateFile // create /file1(owner1) , /file2(owner2), /dir/file3(owner3) std::vector logicalPools{1, 2, 3}; @@ -285,7 +279,7 @@ TEST_F(NameSpaceServiceTest, test1) { brpc::Controller cntl; uint64_t fileLength = kMiniFileLength; - // 创建file1,owner1 + // Create file1, owner1 request.set_filename("/file1"); request.set_owner("owner1"); request.set_date(TimeUtility::GetTimeofDayUs()); @@ -347,7 +341,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个不存在的目录下创建文件,会失败 kFileNotExists + // Creating a file in a non-existent directory will fail kFileNotExists cntl.Reset(); request.set_filename("/dir4/file4"); request.set_owner("owner4"); @@ -363,7 +357,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个文件下创建文件,会失败 kNotDirectory + // Creating a file under one file will fail kNotDirectory cntl.Reset(); request.set_filename("/file2/file4"); request.set_owner("owner2"); @@ -379,7 +373,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的文件,会创建失败kFileExists + // If you create an existing file, it will fail to create kFileExists cntl.Reset(); request.set_filename("/file2"); request.set_poolset(""); @@ -396,7 +390,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的目录,会创建失败kFileExists + // If you create an existing directory, it will fail to create kFileExist cntl.Reset(); request.set_filename("/dir"); request.set_owner("owner3"); @@ -412,7 +406,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建其他类型文件,返回kNotSupported + // Create other types of files and return kNotSupported cntl.Reset(); request.set_filename("/file4"); request.set_owner("owner4"); @@ -457,7 +451,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建文件名不规范的文件会失败 + // Creating files with non-standard file names will fail cntl.Reset(); request.set_filename("/file4/"); request.set_owner("owner4"); @@ -515,10 +509,10 @@ TEST_F(NameSpaceServiceTest, test1) { cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename("/"); listRequest.set_owner(authOptions.rootOwner); @@ -527,7 +521,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(listResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(listResponse.fileinfo_size(), 4); - } else { + } else { ASSERT_TRUE(false); } } @@ -559,7 +553,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_EQ(response1.fileinfo().parentid(), 0); ASSERT_EQ(response1.fileinfo().filetype(), INODE_PAGEFILE); ASSERT_EQ(response1.fileinfo().chunksize(), - curveFSOptions.defaultChunkSize); + curveFSOptions.defaultChunkSize); ASSERT_EQ(response1.fileinfo().segmentsize(), DefaultSegmentSize); ASSERT_EQ(response1.fileinfo().length(), fileLength); } else { @@ -567,7 +561,7 @@ TEST_F(NameSpaceServiceTest, test1) { } // test GetOrAllocateSegment - // 为file1分配空间 + // Allocate space for file1 cntl.Reset(); GetOrAllocateSegmentRequest request2; GetOrAllocateSegmentResponse response2; @@ -606,13 +600,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response2.statuscode(), StatusCode::kOK); ASSERT_EQ(response2.pagefilesegment().segmentsize(), - response1.fileinfo().segmentsize()); + response1.fileinfo().segmentsize()); ASSERT_EQ(response2.pagefilesegment().chunksize(), - response1.fileinfo().chunksize()); + response1.fileinfo().chunksize()); ASSERT_EQ(response2.pagefilesegment().startoffset(), request2.offset()); - int chunkNumber = response2.pagefilesegment().segmentsize()/ - response2.pagefilesegment().chunksize(); + int chunkNumber = response2.pagefilesegment().segmentsize() / + response2.pagefilesegment().chunksize(); ASSERT_EQ(response2.pagefilesegment().chunks().size(), chunkNumber); } else { @@ -631,7 +625,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response3.statuscode(), StatusCode::kOK); ASSERT_EQ(response3.pagefilesegment().SerializeAsString(), - response2.pagefilesegment().SerializeAsString()); + response2.pagefilesegment().SerializeAsString()); } else { ASSERT_TRUE(false); } @@ -682,8 +676,8 @@ TEST_F(NameSpaceServiceTest, test1) { // test change owner { - // 当前有文件 /file1(owner1) , /file2(owner2), /dir/file3(owner3) - // changeowner success + // There are currently /file1(owner1) , /file2(owner2), + // /dir/file3(owner3) changeowner success cntl.Reset(); ChangeOwnerRequest request; ChangeOwnerResponse response; @@ -694,10 +688,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -713,10 +707,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -732,10 +726,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner("newowner1"); request.set_signature(sig); request.set_date(date); @@ -766,10 +760,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date + kStaledRequestTimeIntervalUs * 2); @@ -785,10 +779,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -799,15 +793,15 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // changeowner 文件名不规范,失败 + // changeowner file name is not standardized, failed cntl.Reset(); request.set_filename("/file1/"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -820,12 +814,12 @@ TEST_F(NameSpaceServiceTest, test1) { } // test RenameFile - // 重命名到根目录下,非root owner,失败 - // fileinfoid不匹配,失败 - // 重命名成功 /dir/file3 -> /dir/file4 - // 原文件不存在,重命名失败 - // 重命名到根目录下,root owner,成功 /dir/file4 -> /file4 - // 文件名不规范,失败 + // Renaming to root directory, not root owner, failed + // fileinfoid mismatch, failed + // Rename successful /dir/file3 -> /dir/file4 + // The original file does not exist, renaming failed + // Rename to the root directory, root owner, successful /dir/file4 -> + // /file4 File name not standardized, failed cntl.Reset(); RenameFileRequest request4; RenameFileResponse response4; @@ -858,10 +852,10 @@ TEST_F(NameSpaceServiceTest, test1) { std::string oldname = "/dir/file4"; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); request4.set_oldfilename(oldname); request4.set_newfilename("/file4"); @@ -951,8 +945,8 @@ TEST_F(NameSpaceServiceTest, test1) { } // test ExtendFile - // 扩容file2,第一次扩大,成功;第二次缩小,失败 - // 扩容的文件名不符合规范,失败 + // Expanding file2 for the first time, successful; Second reduction, failed + // The expanded file name does not meet the specifications and failed uint64_t newsize = kMiniFileLength * 2; cntl.Reset(); ExtendFileRequest request5; @@ -992,8 +986,9 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // begin session test,开始测试时,有/file1,/file2和/file4 - // OpenFile case1. 文件不存在,返回kFileNotExists + // begin session test, at the beginning of the session test, there are + // /file1,/file2, and/file4 OpenFile case1. File does not exist, returned + // kFileNotExists cntl.Reset(); OpenFileRequest request8; OpenFileResponse response8; @@ -1008,7 +1003,8 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // OpenFile case2. 文件存在,没有open过,返回成功、session、fileInfo + // OpenFile case2. The file exists and has not been opened. Success, + // session, and fileInfo are returned cntl.Reset(); OpenFileRequest request9; OpenFileResponse response9; @@ -1020,7 +1016,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response9.statuscode(), StatusCode::kOK); ASSERT_EQ(response9.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response9.fileinfo().filename(), "file2"); } else { ASSERT_TRUE(false); @@ -1037,13 +1033,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response10.statuscode(), StatusCode::kOK); ASSERT_EQ(response10.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response10.fileinfo().filename(), "file1"); } else { ASSERT_TRUE(false); } - // openFile case3, 文件名不符合规范 + // OpenFile case3, file name does not meet specifications OpenFileRequest request11; OpenFileResponse response11; cntl.Reset(); @@ -1058,7 +1054,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case1. 文件不存在,返回kFileNotExists + // CloseFile case1 File does not exist, returned kFileNotExists cntl.Reset(); CloseFileRequest request12; CloseFileResponse response12; @@ -1074,7 +1070,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case2. 文件存在,session存在,返回成功 + // CloseFile case2 File exists, session exists, success returne CloseFileRequest request13; CloseFileResponse response13; cntl.Reset(); @@ -1092,7 +1088,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case3. 文件名不符合规范 + // CloseFile case3. The file name does not meet the specification cntl.Reset(); request14.set_filename("/file2/"); request14.set_owner("owner2"); @@ -1106,7 +1102,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case1. 文件不存在,返回kFileNotExists + // RefreshSession case1. File does not exist, returned kFileNotExists cntl.Reset(); ReFreshSessionRequest request15; ReFreshSessionResponse response15; @@ -1124,7 +1120,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case2. 文件名不符合规范 + // RefreshSession case2. The file name does not meet the specifications ReFreshSessionRequest request18; ReFreshSessionResponse response18; cntl.Reset(); @@ -1155,8 +1151,9 @@ TEST_F(NameSpaceServiceTest, snapshottests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1168,7 +1165,6 @@ TEST_F(NameSpaceServiceTest, snapshottests) { CurveFSService_Stub stub(&channel); - // test create file std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) @@ -1188,7 +1184,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1204,7 +1200,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1246,7 +1242,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { snapshotRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.CreateSnapShot(&cntl, &snapshotRequest, &snapshotResponses, NULL); if (!cntl.Failed()) { - ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); + ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); } else { ASSERT_TRUE(false); } @@ -1310,11 +1306,11 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), - StatusCode::kSegmentNotAllocated); + StatusCode::kSegmentNotAllocated); } else { ASSERT_TRUE(false); } @@ -1326,8 +1322,8 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), StatusCode::kParaError); } else { @@ -1407,13 +1403,14 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); using ::curve::chunkserver::MockChunkService; - MockChunkService *chunkService = new MockChunkService(); - ASSERT_EQ(server.AddService(chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + MockChunkService* chunkService = new MockChunkService(); + ASSERT_EQ(server.AddService(chunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1426,7 +1423,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { CurveFSService_Stub stub(&channel); - // 先创建文件/file1,目录/dir1,文件/dir1/file2 + // First create file '/file1', directory '/dir1', file '/dir1/file2' std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) .Times(AtLeast(1)) @@ -1444,7 +1441,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1480,7 +1477,8 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FAIL(); } - // 查看文件/file1,目录/dir1,文件/dir1/file2的状态 + // View the status of file '/file1', directory '/dir1', and file + // '/dir1/file2' cntl.Reset(); GetFileInfoRequest request1; GetFileInfoResponse response1; @@ -1489,7 +1487,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1509,7 +1507,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -1539,7 +1537,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 文件/dir1/file2申请segment + // File '/dir1/file2' application segment GetOrAllocateSegmentRequest allocRequest; GetOrAllocateSegmentResponse allocResponse; for (int i = 0; i < 10; i++) { @@ -1551,15 +1549,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } } - // 开始测试删除文件逻辑 - // 1 如果文件有快照,那么删除文件返回kFileUnderSnapShot + // Start testing delete file logic + // 1. If the file has a snapshot, deleting the file returns + // kFileUnderSnapShot cntl.Reset(); CreateSnapShotRequest snapshotRequest; CreateSnapShotResponse snapshotResponses; @@ -1623,7 +1621,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { stub.CheckSnapShotStatus(&cntl, &checkRequest, &checkResponse, NULL); if (!cntl.Failed()) { if (checkResponse.statuscode() == - StatusCode::kSnapshotFileNotExists) { + StatusCode::kSnapshotFileNotExists) { break; } else { ASSERT_EQ(checkResponse.statuscode(), StatusCode::kOK); @@ -1636,10 +1634,10 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { } } ASSERT_LE(attempts, 100) - << "max attempts for check snapshot status exhausted"; - + << "max attempts for check snapshot status exhausted"; - // 2 如果目录下有文件,那么删除目录返回kDirNotEmpty + // 2. If there are files in the directory, deleting the directory returns + // kDirNotEmpty cntl.Reset(); request3.set_filename("/dir1"); request3.set_owner("owner"); @@ -1653,7 +1651,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 3 如果传入的fileid不匹配,删除文件失败 + // 3. If the passed in fileids do not match, deleting the file fails cntl.Reset(); DeleteFileRequest request5; DeleteFileResponse response5; @@ -1670,7 +1668,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 4 删除文件/file1成功,查询文件已经删除 + // 4. Successfully deleted file '/file1', query file has been deleted cntl.Reset(); request3.set_filename("/file1"); request3.set_owner("owner"); @@ -1696,15 +1694,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 查询垃圾箱 + // Query Trash Bin ListDirRequest listRequest; ListDirResponse listResponse; cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -1716,37 +1714,36 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FileInfo file = listResponse.fileinfo(0); ASSERT_TRUE(CheckFilename(file.filename(), dtime)); // file1-1-${dtime} ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); - } else { + } else { ASSERT_TRUE(false); } - // 删除文件/dir1/file2成功,删除目录/dir1成功,查询目录和文件均已经删除 - using ::curve::mds::topology::ChunkServerStatus; - using ::curve::mds::topology::OnlineState; + // Successfully deleted file '/dir1/file2', deleted directory '/dir1', + // queried directory and files have been deleted + using ::curve::chunkserver::CHUNK_OP_STATUS; using ::curve::chunkserver::ChunkRequest; using ::curve::chunkserver::ChunkResponse; - using ::curve::chunkserver::CHUNK_OP_STATUS; + using ::curve::mds::topology::ChunkServerStatus; + using ::curve::mds::topology::OnlineState; CopySetInfo copyset(1, 1); copyset.SetLeader(1); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); ChunkServer chunkserver(1, "", "", 1, "127.0.0.1", listenAddr.port, "", - ChunkServerStatus::READWRITE, OnlineState::ONLINE); + ChunkServerStatus::READWRITE, OnlineState::ONLINE); EXPECT_CALL(*topology_, GetChunkServer(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); ChunkResponse chunkResponse; chunkResponse.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); EXPECT_CALL(*chunkService, DeleteChunk(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(chunkResponse), - Invoke([](RpcController *controller, - const ChunkRequest *chunkRequest, - ChunkResponse *chunkResponse, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(chunkResponse), + Invoke([](RpcController* controller, + const ChunkRequest* chunkRequest, + ChunkResponse* chunkResponse, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); stub_ = std::make_shared(&channel); @@ -1858,8 +1855,9 @@ TEST_F(NameSpaceServiceTest, clonetest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1944,8 +1942,9 @@ TEST_F(NameSpaceServiceTest, listClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1979,8 +1978,9 @@ TEST_F(NameSpaceServiceTest, listAllClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2085,8 +2085,9 @@ TEST_F(NameSpaceServiceTest, ListVolumesOnCopysets) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2116,8 +2117,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2143,7 +2145,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_PAGEFILE); createRequest.set_filelength(fileLength); - stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); } else { @@ -2187,7 +2189,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -2207,7 +2209,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -2249,8 +2251,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } @@ -2278,10 +2279,10 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ListDirRequest listRequest; ListDirResponse listResponse; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2295,7 +2296,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.originalfullpathname(), "/dir1/file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2327,7 +2328,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.filename(), "file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2400,14 +2401,14 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_TRUE(false); } - // 3. check the ctime of recovered file is greater than the other in recyclebin //NOLINT + // 3. check the ctime of recovered file is greater than the other in + // recyclebin //NOLINT FileInfo recycleFile; cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2471,10 +2472,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // 3. check the fileId of recovered file 3 and not recovered is 4 cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2588,8 +2588,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_poolset(kDefaultPoolset); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2632,7 +2632,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileCloneMetaInstalled); + StatusCode::kRecoverFileCloneMetaInstalled); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2649,8 +2649,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_owner("owner"); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2690,8 +2690,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { recoverRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileError); + ASSERT_EQ(recoverRresponse.statuscode(), StatusCode::kRecoverFileError); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2754,9 +2753,9 @@ TEST_F(NameSpaceServiceTest, TestDeAllocateSegment) { // create file and allocate segment { std::vector logicalPools{1, 2, 3}; - EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(logicalPools)); + EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(logicalPools)); CreateFileRequest createRequest; CreateFileResponse createResponse; createRequest.set_filename(filename); diff --git a/test/mds/schedule/coordinator_test.cpp b/test/mds/schedule/coordinator_test.cpp index b18aa07b31..90284dfeff 100644 --- a/test/mds/schedule/coordinator_test.cpp +++ b/test/mds/schedule/coordinator_test.cpp @@ -20,19 +20,21 @@ * Author: lixiaocui */ -#include #include "src/mds/schedule/coordinator.h" + +#include + #include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::schedule::ScheduleOption; +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::UNINTIALIZE_ID; @@ -85,29 +87,31 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { copySetKey.first = 1; copySetKey.second = 1; Operator testOperator(startEpoch, copySetKey, - OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(4)); + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -115,21 +119,22 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -139,21 +144,23 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -167,8 +174,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -179,8 +187,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -189,16 +198,18 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -228,34 +239,36 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { CopySetKey copySetKey; copySetKey.first = 1; copySetKey.second = 1; - Operator testOperator( - startEpoch, copySetKey, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + Operator testOperator(startEpoch, copySetKey, + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(1, 4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); PeerInfo peer1(1, 1, 1, "127.0.0.1", 9001); ChunkServerInfo csInfo1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -265,22 +278,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -290,21 +304,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -318,8 +334,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -330,8 +347,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -340,16 +358,18 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -359,70 +379,68 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { auto coordinator = std::make_shared(topoAdapter); ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; - coordinator->InitScheduler( - scheduleOption, std::make_shared(topo)); + coordinator->InitScheduler(scheduleOption, + std::make_shared(topo)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为chunkserver-1 - Operator testOperator(1, CopySetKey{1, 1}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 1)); + // 2. There is a leader change on the copyset and the target leader is + // chunkserver-1 + Operator testOperator( + 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 3. copyset上有remove peer操作 - Operator testOperator(1, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 3. There is a remove peer operation on the copyset + Operator testOperator( + 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 2})); } { - // 4. copyset上有add peer操作, target不是1 - Operator testOperator(1, CopySetKey{1, 3}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2)); + // 4. There is an add peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 3})); } { - // 5. copyset上有add peer操作, target是1 - Operator testOperator(1, CopySetKey{1, 4}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 5. There is an add peer operation on the copyset, with a target of 1 + Operator testOperator( + 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 4})); } { - // 6. copyset上有change peer操作,target不是1 - Operator testOperator(1, CopySetKey{1, 5}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 2)); + // 6. There is a change peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 5})); } { - // 7. copyset上有change peer操作,target是1 - Operator testOperator(1, CopySetKey{1, 6}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 1)); + // 7. There is a change peer operation on the copyset, with a target of + // 1 + Operator testOperator( + 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 6})); } @@ -479,15 +497,15 @@ TEST(CoordinatorTest, test_RapidLeaderSchedule) { EXPECT_CALL(*topoAdapter, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - coordinator->RapidLeaderSchedule(2)); + coordinator->RapidLeaderSchedule(2)); } TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { /* - 场景: - chunkserver1: offline 有恢复op - chunkserver2: offline 没有恢复op,没有candidate,有其他op - chunkserver3: offline 有candidate + Scenario: + chunkserver1: offline has recovery op + chunkserver2: offline has no recovery op, no candidate, and other ops + chunkserver3: offline has candidate chunkserver4: online chunkserver4: online */ @@ -496,21 +514,18 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { auto topoAdapter = std::make_shared(); auto coordinator = std::make_shared(topoAdapter); - // 获取option + // Get option ScheduleOption scheduleOption = GetScheduleOption(); coordinator->InitScheduler(scheduleOption, metric); - // 构造chunkserver + // Construct chunkserver std::vector chunkserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { PeerInfo peer(i, i % 3 + 1, i, "192.168.0." + std::to_string(i), 9000); - ChunkServerInfo csInfo( - peer, - OnlineState::ONLINE, - DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); if (i <= 3) { csInfo.state = OnlineState::OFFLINE; } @@ -519,28 +534,21 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op - Operator opForCopySet1( - 1, CopySetKey{1, 1}, - OperatorPriority::HighPriority, - steady_clock::now(), - std::make_shared(1, 4)); + // Construct op + Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, + steady_clock::now(), + std::make_shared(1, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet1)); Operator opForCopySet2( - 2, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 4)); + 2, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); - CopySetInfo copyset2( - CopySetKey{1, 2}, 1, 4, - peersFor2, - ConfigChangeInfo{}, - CopysetStatistics{}); + CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}, + CopysetStatistics{}); std::vector peersFor3({peerInfos[2], peerInfos[3], peerInfos[4]}); ConfigChangeInfo configChangeInfoForCS3; @@ -550,13 +558,10 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { configChangeInfoForCS3.set_allocated_peer(replica); configChangeInfoForCS3.set_type(ConfigChangeType::CHANGE_PEER); configChangeInfoForCS3.set_finished(true); - CopySetInfo copyset3( - CopySetKey{1, 3}, 1, 4, - peersFor3, - configChangeInfoForCS3, - CopysetStatistics{}); + CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, + configChangeInfoForCS3, CopysetStatistics{}); - // 1. 查询所有chunkserver + // 1. Query all chunkservers { EXPECT_CALL(*topoAdapter, GetChunkServerInfos()) .WillOnce(Return(chunkserverInfos)); @@ -567,8 +572,8 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{}, &statusMap)); ASSERT_EQ(6, statusMap.size()); ASSERT_TRUE(statusMap[1]); ASSERT_FALSE(statusMap[2]); @@ -578,26 +583,26 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定chunkserver, 但chunkserver不存在 + // 2. Query for specified chunkserver, but chunkserver does not exist { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(7, _)) .WillOnce(Return(false)); std::map statusMap; ASSERT_EQ(kScheduleErrInvalidQueryChunkserverID, - coordinator->QueryChunkServerRecoverStatus( - std::vector{7}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{7}, &statusMap)); } - // 3. 查询指定chunkserver, 不在恢复中 + // 3. Query the specified chunkserver, not in recovery { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(6, _)) - .WillOnce(DoAll(SetArgPointee<1>(chunkserverInfos[5]), - Return(true))); + .WillOnce( + DoAll(SetArgPointee<1>(chunkserverInfos[5]), Return(true))); std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{6}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{6}, &statusMap)); ASSERT_EQ(1, statusMap.size()); ASSERT_FALSE(statusMap[6]); } @@ -606,4 +611,3 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index 3be00637b0..f1705f950a 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -21,20 +21,21 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/common/timeutility.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" -#include "src/common/timeutility.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -58,8 +59,8 @@ class TestLeaderSchedule : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.leaderSchedulerIntervalSec = 1; opt.chunkserverCoolingTimeSec = 0; - leaderScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + leaderScheduler_ = + std::make_shared(opt, topoAdapter_, opController_); } void TearDown() override { @@ -91,15 +92,12 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; - ChunkServerInfo csInfo1( - peer1, offlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, offlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -110,8 +108,8 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -134,15 +132,12 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; CopySetIdType copysetId = 1; @@ -152,8 +147,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySet1.candidatePeerInfo = PeerInfo(1, 1, 1, "192.168.10.1", 9000); std::vector copySetInfos({copySet1}); @@ -165,7 +160,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { .WillRepeatedly(Return(copySetInfos)); leaderScheduler_->Schedule(); - ASSERT_EQ(0, opController_->GetOperators().size());} + ASSERT_EQ(0, opController_->GetOperators().size()); +} TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -174,15 +170,12 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -193,8 +186,8 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -206,7 +199,6 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .WillRepeatedly(Return(false)); - leaderScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } @@ -218,15 +210,12 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo3.startUpTime = 3; std::vector csInfos({csInfo1, csInfo2, csInfo3}); @@ -238,8 +227,8 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -264,25 +253,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -298,11 +281,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -334,14 +317,14 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - res = dynamic_cast(op.step.get()); + res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo6.info.id, res->GetTargetPeer()); } @@ -359,25 +342,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 4, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 4, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -393,11 +370,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -429,7 +406,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); @@ -439,7 +416,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -449,19 +426,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -472,18 +445,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -498,7 +471,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -513,7 +486,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -521,7 +494,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -531,19 +504,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::PENDDING, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::PENDDING, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -554,18 +523,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -580,7 +549,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -595,7 +564,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -603,7 +572,3 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { } // namespace schedule } // namespace mds } // namespace curve - - - - diff --git a/test/mds/schedule/operatorStep_test.cpp b/test/mds/schedule/operatorStep_test.cpp index 3cab9d2911..0147579ce8 100644 --- a/test/mds/schedule/operatorStep_test.cpp +++ b/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "src/common/timeutility.h" #include "test/mds/schedule/common.h" @@ -30,8 +31,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -49,21 +50,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -76,7 +77,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -90,14 +91,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -110,7 +111,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -127,8 +128,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -141,8 +141,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -158,7 +158,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -174,8 +174,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -199,13 +198,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -218,7 +216,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -234,31 +232,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -269,24 +267,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the optimizer in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -296,7 +294,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } @@ -362,9 +360,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -377,9 +375,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -509,12 +507,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 3: copyset has no config change -> Ordered @@ -525,12 +524,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 4: copyset has config change but the change type diff --git a/test/mds/schedule/rapidLeaderSheduler_test.cpp b/test/mds/schedule/rapidLeaderSheduler_test.cpp index 3caecf7111..5d9389c6d9 100644 --- a/test/mds/schedule/rapidLeaderSheduler_test.cpp +++ b/test/mds/schedule/rapidLeaderSheduler_test.cpp @@ -20,20 +20,20 @@ * Author: lixiaocui */ -#include "test/mds/schedule/mock_topoAdapter.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/schedule/common.h" +#include "src/mds/schedule/operatorFactory.h" #include "src/mds/schedule/scheduleMetrics.h" #include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorFactory.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -55,14 +55,17 @@ class TestRapidLeaderSchedule : public ::testing::Test { auto testCopySetInfo = GetCopySetInfoForTest(); ChunkServerInfo csInfo1(testCopySetInfo.peers[0], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 1, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo2(testCopySetInfo.peers[1], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo3(testCopySetInfo.peers[2], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); chunkServerInfos_.emplace_back(csInfo1); chunkServerInfos_.emplace_back(csInfo2); chunkServerInfos_.emplace_back(csInfo3); @@ -77,14 +80,14 @@ class TestRapidLeaderSchedule : public ::testing::Test { TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { std::shared_ptr rapidLeaderScheduler; - // 1. mds没有任何logicalpool + // 1. Mds does not have any logicalpool { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 0); @@ -93,21 +96,21 @@ TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); } - // 2. mds逻辑池列表中没有指定logicalpool + // 2. No logicalpool specified in the mds logical pool list { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); } } TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { std::shared_ptr rapidLeaderScheduler; { - // 1. 指定logicalpool中没有chunkserver + // 1. There is no chunkserver in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -121,7 +124,7 @@ TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { } { - // 2. 指定logicalpool中没有copyset + // 2. There is no copyset in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -141,7 +144,8 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { opt_, topoAdapter_, opController_, 1); { - // 1. copyset的副本数目为1, 不会产生迁移 + // 1. The number of copies for copyset is 1, and migration will not + // occur EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -158,16 +162,17 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } { - // 2. chunkserver上拥有的leader数目最多相差1, 不会产生迁移 + // 2. The maximum difference in the number of leaders owned on + // chunkserver is 1, and migration will not occur // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 EXPECT_CALL(*topoAdapter_, GetLogicalpools()) - .WillOnce(Return(std::vector{1})); + .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) .WillOnce(Return(chunkServerInfos_)); EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{GetCopySetInfoForTest()})); + .WillOnce( + Return(std::vector{GetCopySetInfoForTest()})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); ASSERT_EQ(0, opController_->GetOperators().size()); @@ -175,7 +180,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -189,7 +194,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto chunkserverInfosBak = chunkServerInfos_; chunkserverInfosBak[0].leaderCount = 3; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -197,8 +202,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); OperatorFactory factory; opController_->AddOperator(factory.CreateRemovePeerOperator( copyset2, 2, OperatorPriority::NormalPriority)); @@ -206,18 +211,18 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); ASSERT_EQ(3, operators.size()); - auto op1 = dynamic_cast(operators[0].step.get()); + auto op1 = dynamic_cast(operators[0].step.get()); ASSERT_TRUE(nullptr != op1); ASSERT_EQ(2, op1->GetTargetPeer()); ASSERT_EQ(1, operators[0].copysetID.second); - auto op2 = dynamic_cast(operators[2].step.get()); + auto op2 = dynamic_cast(operators[2].step.get()); ASSERT_TRUE(nullptr != op2); ASSERT_EQ(3, op2->GetTargetPeer()); ASSERT_EQ(3, operators[2].copysetID.second); } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -232,7 +237,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { chunkserverInfosBak[0].leaderCount = 3; chunkserverInfosBak[0].status = ChunkServerStatus::PENDDING; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -240,8 +245,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index c7c11b299e..8e26a2ff57 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -21,23 +21,24 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/mds/common/mds_define.h" #include "src/mds/schedule/operatorController.h" -#include "src/mds/topology/topology_id_generator.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/topology/topology_id_generator.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::mds::topology::TopologyIdGenerator; using ::curve::mds::topology::MockTopology; +using ::curve::mds::topology::TopologyIdGenerator; namespace curve { namespace mds { @@ -62,7 +63,7 @@ class TestRecoverSheduler : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.chunkserverFailureTolerance = 3; recoverScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + opt, topoAdapter_, opController_); } void TearDown() override { opController_ = nullptr; @@ -71,9 +72,9 @@ class TestRecoverSheduler : public ::testing::Test { } protected: - std::shared_ptr topoAdapter_; - std::shared_ptr opController_; - std::shared_ptr recoverScheduler_; + std::shared_ptr topoAdapter_; + std::shared_ptr opController_; + std::shared_ptr recoverScheduler_; }; TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { @@ -82,10 +83,8 @@ TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); CopySetKey copySetKey; - copySetKey. - first = 1; - copySetKey. - second = 1; + copySetKey.first = 1; + copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(opController_->AddOperator(testOperator)); @@ -107,8 +106,8 @@ TEST_F(TestRecoverSheduler, test_copySet_has_configChangeInfo) { TEST_F(TestRecoverSheduler, test_chunkServer_cannot_get) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()). - WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) + .WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(_, _)) .Times(3) .WillRepeatedly(Return(false)); @@ -132,27 +131,27 @@ TEST_F(TestRecoverSheduler, test_server_has_more_offline_chunkserver) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::UNSTABLE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } TEST_F(TestRecoverSheduler, - test_server_has_more_offline_and_retired_chunkserver) { + test_server_has_more_offline_and_retired_chunkserver) { auto testCopySetInfo = GetCopySetInfoForTest(); EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) .WillRepeatedly(Return(std::vector({testCopySetInfo}))); @@ -168,27 +167,27 @@ TEST_F(TestRecoverSheduler, PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); - EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) + EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillOnce(Return(2)); recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -208,64 +207,61 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, 2, 100, 100, ChunkServerStatisticInfo{}); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); - ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 2, 100, 100, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerIdType id1 = 1; ChunkServerIdType id2 = 2; ChunkServerIdType id3 = 3; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) - .WillRepeatedly(Return(90)); + .WillRepeatedly(Return(90)); { - // 1. 所有chunkserveronline + // 1. All chunkserveronline EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id3, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) - .Times(2).WillRepeatedly(Return(2)); + .Times(2) + .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); @@ -290,14 +286,13 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(4, op.step.get()->GetTargetPeer()); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换chunkserver + // 5. Unable to select a replacement chunkserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(_)) .WillOnce(Return(std::vector{})); @@ -306,7 +301,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 6. 在chunkserver上创建copyset失败 + // 6. Failed to create copyset on chunkserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); std::vector chunkserverList( @@ -335,5 +330,3 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } // namespace schedule } // namespace mds } // namespace curve - - diff --git a/test/mds/schedule/scheduleMetrics_test.cpp b/test/mds/schedule/scheduleMetrics_test.cpp index 66969a6845..3714260772 100644 --- a/test/mds/schedule/scheduleMetrics_test.cpp +++ b/test/mds/schedule/scheduleMetrics_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduleMetrics.h" + #include #include #include -#include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/schedule/operatorController.h" + #include "src/mds/schedule/operator.h" +#include "src/mds/schedule/operatorController.h" #include "test/mds/mock/mock_topology.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::CopySetKey; +using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -47,24 +49,22 @@ class ScheduleMetricsTest : public testing::Test { scheduleMetrics = std::make_shared(topo); } - void TearDown() { - } + void TearDown() {} ::curve::mds::topology::ChunkServer GetChunkServer(int id) { - return ::curve::mds::topology::ChunkServer( - id, "", "", id, "", 9000, ""); + return ::curve::mds::topology::ChunkServer(id, "", "", id, "", 9000, + ""); } ::curve::mds::topology::Server GetServer(int id) { - std::string hostName = - "pubbeta2-curve" + std::to_string(id) + ".org"; - return ::curve::mds::topology::Server( - id, hostName, "", 0, "", 0, id, 1, ""); + std::string hostName = "pubbeta2-curve" + std::to_string(id) + ".org"; + return ::curve::mds::topology::Server(id, hostName, "", 0, "", 0, id, 1, + ""); } std::string GetChunkServerHostPort(int id) { return GetServer(id).GetHostName() + ":" + - std::to_string(GetChunkServer(id).GetPort()); + std::to_string(GetChunkServer(id).GetPort()); } public: @@ -74,24 +74,24 @@ class ScheduleMetricsTest : public testing::Test { TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { Operator addOp(1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operators of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(addOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -100,34 +100,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(addCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(addCsInfo.GetId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + GetChunkServerHostPort(2); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(addCsInfo.GetEpoch()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ("UNINTIALIZE_ID", - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(addOp.startEpoch), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(ADDPEER, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"1\",") + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + @@ -143,7 +143,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -154,25 +154,26 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { Operator rmOp(1, CopySetKey{1, 2}, OperatorPriority::HighPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo rmCsInfo(1, 2); rmCsInfo.SetCopySetMembers(std::set{1, 2, 3}); rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(rmOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -181,34 +182,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(rmCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(rmCsInfo.GetId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(rmCsInfo.GetEpoch()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(GetChunkServerHostPort(1), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(rmOp.startEpoch), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(HIGH, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(REMOVEPEER, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "opPriority")); + ASSERT_EQ( + REMOVEPEER, + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opType")); + ASSERT_EQ( + GetChunkServerHostPort(3), + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"2\",") + std::string("\"copySetLeader\":") + @@ -226,7 +228,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -237,25 +239,27 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -263,30 +267,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { ASSERT_EQ(1, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(1, scheduleMetrics->operators.size()); - ASSERT_EQ(std::to_string(transCsInfo.GetLogicalPoolId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetLogicalPoolId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "logicalPoolId")); - ASSERT_EQ(std::to_string(transCsInfo.GetId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); - ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "copySetPeers")); - ASSERT_EQ(std::to_string(transCsInfo.GetEpoch()), + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); + ASSERT_EQ(copysetpeers, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("copySetPeers")); + ASSERT_EQ( + std::to_string(transCsInfo.GetEpoch()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetEpoch")); - ASSERT_EQ(std::to_string(transferOp.startEpoch), + ASSERT_EQ( + std::to_string(transferOp.startEpoch), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "startEpoch")); - ASSERT_EQ(NORMAL, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(TRANSFERLEADER, + ASSERT_EQ(NORMAL, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("opPriority")); + ASSERT_EQ( + TRANSFERLEADER, scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), + ASSERT_EQ( + GetChunkServerHostPort(3), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opItem")); std::string res = @@ -301,47 +310,49 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("\"Normal\",\"opType\":\"TransferLeader\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[transferOp.copysetID].JsonBody()); - LOG(INFO) << "format: " + scheduleMetrics->operators[transferOp.copysetID].JsonBody()); + LOG(INFO) + << "format: " << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { Operator changeOp(1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + steady_clock::now(), std::make_shared(1, 4)); ::curve::mds::topology::CopySetInfo changeCsInfo(1, 4); changeCsInfo.SetCopySetMembers(std::set{1, 2, 3}); changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(4)) .WillOnce(Return(GetServer(4).GetHostName() + ":" + - std::to_string(GetChunkServer(4).GetPort()))); + std::to_string(GetChunkServer(4).GetPort()))); scheduleMetrics->UpdateAddMetric(changeOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -350,31 +361,32 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(changeCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "logicalPoolId")); - ASSERT_EQ(std::to_string(changeCsInfo.GetId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "logicalPoolId")); + ASSERT_EQ(std::to_string(changeCsInfo.GetId()), + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(changeCsInfo.GetEpoch()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(std::to_string(changeOp.startEpoch), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(CHANGEPEER, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(4), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"4\",") + std::string("\"copySetLeader\":") + @@ -387,32 +399,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { std::string("\"Normal\",\"opType\":\"ChangePeer\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[changeOp.copysetID].JsonBody()); + scheduleMetrics->operators[changeOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); + << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } TEST_F(ScheduleMetricsTest, test_abnormal) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -422,36 +436,32 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { ASSERT_TRUE( scheduleMetrics->operators[transferOp.copysetID].JsonBody().empty()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - - // 获取chunkserver 或者 server失败 + // Failed to obtain chunkserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) - .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(1)) - .WillOnce(Return("")); - EXPECT_CALL(*topo, GetHostNameAndPortById(2)) - .WillOnce(Return("")); + .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); + EXPECT_CALL(*topo, GetHostNameAndPortById(1)).WillOnce(Return("")); + EXPECT_CALL(*topo, GetHostNameAndPortById(2)).WillOnce(Return("")); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); std::string res = - std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + - std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + - std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + - std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + - std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + - std::string("\"Normal\",\"opType\":\"TransferLeader\",") + - std::string("\"startEpoch\":\"1\"}"); + std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + + std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + + std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + + std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + + std::string("\"Normal\",\"opType\":\"TransferLeader\",") + + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/scheduleService/scheduleService_test.cpp b/test/mds/schedule/scheduleService/scheduleService_test.cpp index 9814f8ce0b..17ab08e546 100644 --- a/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,24 +20,25 @@ * Author: lixiaocui */ -#include -#include -#include +#include "src/mds/schedule/scheduleService/scheduleService.h" + #include +#include #include +#include +#include -#include "src/mds/schedule/scheduleService/scheduleService.h" -#include "test/mds/mock/mock_coordinator.h" #include "proto/schedule.pb.h" +#include "test/mds/mock/mock_coordinator.h" namespace curve { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,10 +46,10 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); - ASSERT_EQ(0, - server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server_->Start("127.0.0.1", {5900, 5999}, nullptr)); listenAddr_ = server_->listen_address(); } @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_RapidLeaderSchedule) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { request.set_logicalpoolid(1); RapidLeaderScheduleResponse response; - // 1. 快速leader均衡返回成功 + // 1. Fast leader balance returned successfully { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeSuccess)); @@ -85,7 +86,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { ASSERT_EQ(kScheduleErrCodeSuccess, response.statuscode()); } - // 2. 传入的logicalpoolid不存在 + // 2. The logicaltool passed in does not exist { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeInvalidLogicalPool)); @@ -105,13 +106,13 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { request.add_chunkserverid(1); QueryChunkServerRecoverStatusResponse response; - // 1. 查询chunkserver恢复状态返回成功 + // 1. Querying the recovery status of chunkserver returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(DoAll(SetArgPointee<1>(expectRes), - Return(kScheduleErrCodeSuccess))); + Return(kScheduleErrCodeSuccess))); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); @@ -121,11 +122,11 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的chunkserverid不合法 + // 2. The chunkserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(Return(kScheduleErrInvalidQueryChunkserverID)); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index b8b3ddb148..b6919dee9b 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -22,25 +22,27 @@ #include #include -#include -#include -#include -#include + #include +#include +#include +#include #include -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_config.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/schedule/topoAdapter.h" -#include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorController.h" +#include + #include "src/mds/common/mds_define.h" -#include "src/mds/copyset/copyset_policy.h" #include "src/mds/copyset/copyset_manager.h" +#include "src/mds/copyset/copyset_policy.h" +#include "src/mds/schedule/operatorController.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/schedulerPOC/mock_topology.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/schedulerPOC/mock_topology.h" using ::curve::mds::topology::MockTopology; @@ -141,10 +143,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { logicalPoolSet_.insert(0); } - std::vector - GetLogicalPoolInCluster(LogicalPoolFilter filter = [](const LogicalPool &) { - return true; - }) const override { + std::vector GetLogicalPoolInCluster(LogicalPoolFilter filter = + [](const LogicalPool&) { + return true; + }) const override { std::vector ret; for (auto lid : logicalPoolSet_) { ret.emplace_back(lid); @@ -152,10 +154,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - std::vector - GetChunkServerInCluster(ChunkServerFilter filter = [](const ChunkServer &) { - return true; - }) const override { + std::vector GetChunkServerInCluster( + ChunkServerFilter filter = [](const ChunkServer&) { + return true; + }) const override { std::vector ret; for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); it++) { @@ -165,7 +167,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInLogicalPool( - PoolIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + PoolIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list ret; @@ -177,7 +179,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInServer( - ServerIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + ServerIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list res; @@ -190,7 +192,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector GetCopySetsInCluster( - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -202,7 +204,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector GetCopySetsInChunkServer( ChunkServerIdType csId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -217,7 +219,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector<::curve::mds::topology::CopySetInfo> GetCopySetInfosInLogicalPool( PoolIdType logicalPoolId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector<::curve::mds::topology::CopySetInfo> ret; @@ -230,7 +232,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - bool GetServer(ServerIdType serverId, Server *out) const override { + bool GetServer(ServerIdType serverId, Server* out) const override { auto it = serverMap_.find(serverId); if (it != serverMap_.end()) { *out = it->second; @@ -240,7 +242,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetCopySet(::curve::mds::topology::CopySetKey key, - ::curve::mds::topology::CopySetInfo *out) const override { + ::curve::mds::topology::CopySetInfo* out) const override { auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { *out = it->second; @@ -251,7 +253,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetChunkServer(ChunkServerIdType chunkserverId, - ChunkServer *out) const override { + ChunkServer* out) const override { auto it = chunkServerMap_.find(chunkserverId); if (it != chunkServerMap_.end()) { *out = it->second; @@ -260,7 +262,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return false; } - bool GetLogicalPool(PoolIdType poolId, LogicalPool *out) const override { + bool GetLogicalPool(PoolIdType poolId, LogicalPool* out) const override { LogicalPool::RedundanceAndPlaceMentPolicy rap; rap.pageFileRAP.copysetNum = copySetMap_.size(); rap.pageFileRAP.replicaNum = 3; @@ -273,7 +275,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return true; } - int UpdateChunkServerOnlineState(const OnlineState &onlineState, + int UpdateChunkServerOnlineState(const OnlineState& onlineState, ChunkServerIdType id) override { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -284,7 +286,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } } - int UpdateChunkServerRwState(const ChunkServerStatus &rwStatus, + int UpdateChunkServerRwState(const ChunkServerStatus& rwStatus, ChunkServerIdType id) { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -296,7 +298,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } int UpdateCopySetTopo( - const ::curve::mds::topology::CopySetInfo &data) override { + const ::curve::mds::topology::CopySetInfo& data) override { CopySetKey key(data.GetLogicalPoolId(), data.GetId()); auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { @@ -330,19 +332,19 @@ class FakeTopologyServiceManager : public TopologyServiceManager { bool CreateCopysetNodeOnChunkServer( ChunkServerIdType csId, - const std::vector<::curve::mds::topology::CopySetInfo> &cs) override { + const std::vector<::curve::mds::topology::CopySetInfo>& cs) override { return true; } }; class FakeTopologyStat : public TopologyStat { public: - explicit FakeTopologyStat(const std::shared_ptr &topo) + explicit FakeTopologyStat(const std::shared_ptr& topo) : topo_(topo) {} void UpdateChunkServerStat(ChunkServerIdType csId, - const ChunkServerStat &stat) {} + const ChunkServerStat& stat) {} - bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat *stat) { + bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat* stat) { if (!leaderCountOn) { stat->leaderCount = 10; return true; @@ -360,7 +362,7 @@ class FakeTopologyStat : public TopologyStat { stat->leaderCount = leaderCount; return true; } - bool GetChunkPoolSize(PoolIdType pId, uint64_t *chunkPoolSize) { + bool GetChunkPoolSize(PoolIdType pId, uint64_t* chunkPoolSize) { return true; } @@ -401,7 +403,7 @@ class CopysetSchedulerPOC : public testing::Test { void TearDown() override {} void PrintScatterWithInOnlineChunkServer(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; std::map factorMap; int max = -1; @@ -437,7 +439,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (online chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -446,14 +448,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintScatterWithInLogicalPool(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; int max = -1; int maxId = -1; @@ -477,7 +479,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (all chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -486,14 +488,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInOnlineChunkServer(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -526,7 +528,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", copyset num:" << number; } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -535,14 +537,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << "), Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInLogicalPool(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -561,7 +563,7 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -570,13 +572,13 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" - << min; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: " << max << ", Minimum Value: " << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { - // 打印每个chunkserver上leader的数量 + // Print the number of leaders on each chunkserver std::map leaderDistribute; int sumNumber = 0; int max = -1; @@ -612,10 +614,10 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } int GetLeaderCountRange(PoolIdType lid = 0) { @@ -637,16 +639,16 @@ class CopysetSchedulerPOC : public testing::Test { return max - min; } - // 计算每个chunkserver的scatter-with + // Calculate the scatter with for each chunkserver int GetChunkServerScatterwith(ChunkServerIdType csId) { - // 计算chunkserver上的scatter-with + // Calculate scatter with on chunkserver std::map chunkServerCount; for (auto it : topo_->GetCopySetsInChunkServer(csId)) { // get copyset info ::curve::mds::topology::CopySetInfo info; topo_->GetCopySet(it, &info); - // 统计所分布的chunkserver + // Count the distributed chunkservers for (auto it : info.GetCopySetMembers()) { if (it == csId) { continue; @@ -673,11 +675,11 @@ class CopysetSchedulerPOC : public testing::Test { ChunkServerIdType RandomOfflineOneChunkServer(PoolIdType lid = 0) { auto chunkServers = topo_->GetChunkServerInLogicalPool(lid); - // 选择[0, chunkServers.size())中的index + // Select the index in [0, chunkServers.size()) std::srand(std::time(nullptr)); int index = std::rand() % chunkServers.size(); - // 设置目标chunkserver的状态为offline + // Set the status of the target chunkserver to offline auto it = chunkServers.begin(); std::advance(it, index); topo_->UpdateChunkServerOnlineState(OnlineState::OFFLINE, *it); @@ -697,7 +699,7 @@ class CopysetSchedulerPOC : public testing::Test { topo_->UpdateChunkServerOnlineState(OnlineState::ONLINE, id); } - void SetChunkServerOnline(const std::set &list) { + void SetChunkServerOnline(const std::set& list) { for (auto id : list) { SetChunkServerOnline(id); } @@ -741,10 +743,10 @@ class CopysetSchedulerPOC : public testing::Test { opt, topoAdapter_, opController_); } - void ApplyOperatorsInOpController(const std::set &list) { + void ApplyOperatorsInOpController(const std::set& list) { std::vector keys; for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ASSERT_TRUE(list.end() != list.find(type->GetOldPeer())); @@ -771,7 +773,7 @@ class CopysetSchedulerPOC : public testing::Test { void ApplyTranferLeaderOperator() { for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ::curve::mds::topology::CopySetInfo info; @@ -781,9 +783,9 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 有两个chunkserver offline的停止条件: - // 所有copyset均有两个及以上的副本offline - bool SatisfyStopCondition(const std::set &idList) { + // There are two stopping conditions for chunkserver offline: + // All copysets have two or more copies offline + bool SatisfyStopCondition(const std::set& idList) { std::vector<::curve::mds::topology::CopySetKey> copysetList; for (auto id : idList) { auto list = topo_->GetCopySetsInChunkServer(id); @@ -831,58 +833,65 @@ class CopysetSchedulerPOC : public testing::Test { }; TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { - // 测试一个chunkserver offline恢复后的情况 - // 1. 创建recoverScheduler + // Testing the situation of a chunkserver offline recovery + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择一个chunkserver处于offline状态 + // 2. Select any chunkserver to be offline ChunkServerIdType choose = RandomOfflineOneChunkServer(); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); // update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // =============================结果====================================== - // ===========================集群初始状态================================= + // =============================Result====================================== + // =============================Initial state of the + // cluster============================= // ###print scatter-with in cluster### - // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 + // Mean: 97.9556, Variance: 11.5314, Standard Deviation: 3.39579, Max: 106, + // Min: 88 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // =============================Status after + // Recovery================================= // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, - // 最小值:95 //NOLINT + // Mean: 98.8156, variance: 10.3403, standard deviation: 3.21564, maximum + // value: 106, Minimum value: 95//NOLINT // ###print scatter-with in cluster### - // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 + // Mean: 98.2667, Variance: 64.2289, Standard Deviation: 8.0143, Max: 106, + // Min: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.77729, 标准差: 1.33315, 最大值:109, 最小值:100 + // Mean value: 100.559, variance: 1.77729, standard deviation: 1.33315, + // maximum value: 109, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.6333, 标准差: 7.59166, 最大值: 109, 最小值:0 + // Mean value: 100, variance: 57.6333, standard deviation: 7.59166, maximum + // value: 109, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline的情况 - // 1. 创建recoverScheduler + // Testing the situation of another chunkserver offline during the recovery + // process of one chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; ChunkServerIdType choose1 = 0; ChunkServerIdType choose2 = 0; choose1 = RandomOfflineOneChunkServer(); idlist.emplace(choose1); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -896,35 +905,43 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { ApplyOperatorsInOpController(std::set{choose2}); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.3, 方差:9.89889, 标准差:3.14625, 最大值:106, 最小值:89 + // Mean value: 97.3, variance: 9.89889, standard deviation: 3.14625, maximum + // value: 106, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.348, 方差:7.47418, 标准差: 2.73389, 最大值:108, 最小值:101 + // Mean value: 100.348, variance: 7.47418, standard deviation: 2.73389, + // maximum value: 108, minimum value: 101 // ###print scatter-with in cluster### - // 均值:99.2333, 方差:118.034, 标准差: 10.8644, 最大值:108, 最小值:0 + // Mean value: 99.2333, variance: 118.034, standard deviation: 10.8644, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.9735, 标准差: 1.72438, 最大值:112, 最小值:100 + // Mean value: 101.124, variance: 2.9735, standard deviation: 1.72438, + // maximum value: 112, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.3, 标准差: 10.7378, 最大值: 112, 最小值:0 + // Mean value: 100, variance: 115.3, standard deviation: 10.7378, maximum + // value: 112, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 1. 创建recoverScheduler + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 6; i++) { @@ -934,7 +951,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -950,35 +967,42 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { - // 测试20个chunkserver 接连 offline - // 1. 创建recoverScheduler + // Test 20 chunkservers connected offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 20; i++) { @@ -988,7 +1012,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1004,7 +1028,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); @@ -1012,24 +1036,24 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { - // 测试一个server有多个chunkserver offline, 有一个被设置为pending, - // 可以recover的情况 + // Testing a server with multiple chunkservers offline, with one set to + // pending, Conditions that can be recovered offlineTolerent_ = 20; BuilRecoverScheduler(4); - // offline一个server上的chunkserver + // Offline Chunkserver on a server auto chunkserverSet = OfflineChunkServerInServer1(); - // 选择其中一个设置为pendding状态 + // Select one of the settings as pending status ChunkServerIdType target = *chunkserverSet.begin(); topo_->UpdateChunkServerRwState(ChunkServerStatus::PENDDING, target); int opNum = 0; int targetOpNum = topo_->GetCopySetsInChunkServer(target).size(); - // 开始恢复 + // Start recovery do { recoverScheduler_->Schedule(); opNum += opController_->GetOperators().size(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{target}); } while (topo_->GetCopySetsInChunkServer(target).size() > 0); @@ -1038,14 +1062,14 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { // NOLINT - // 测试一个chunkserver offline, 集群回迁的情况 + // Testing a cluster offline and cluster fetch situation - // 1. 一个chunkserver offline后恢复 + // 1. Restore after a chunkserver is offline BuilRecoverScheduler(1); ChunkServerIdType choose = RandomOfflineOneChunkServer(); do { recoverScheduler_->Schedule(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); @@ -1053,23 +1077,30 @@ TEST_F(CopysetSchedulerPOC, PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6667, 方差:10.9444, 标准差: 3.30824, 最大值:107, 最小值:90 + // Mean value: 97.6667, variance: 10.9444, standard deviation: 3.30824, + // maximum value: 107, minimum value: 90 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:99.1061, 方差:10.1172, 标准差: 3.18076, 最大值:108, 最小值:91 + // Mean value: 99.1061, variance: 10.1172, standard deviation: 3.18076, + // maximum value: 108, minimum value: 91 // ###print scatter-with in cluster### - // 均值:98.5556, 方差:64.3247, 标准差: 8.02027, 最大值:108, 最小值:0 + // Mean value: 98.5556, variance: 64.3247, standard deviation: 8.02027, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.56499, 标准差: 1.251, 最大值:107, 最小值:100 + // Mean value: 100.559, variance: 1.56499, standard deviation: 1.251, + // maximum value: 107, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.4222, 标准差: 7.57774, 最大值: 107, 最小值:0 + // Mean value: 100, variance: 57.4222, standard deviation: 7.57774, maximum + // value: 107, minimum value: 0 - // 2. chunkserver-choose恢复成online状态 + // 2. Chunkserver house restore to online state SetChunkServerOnline(choose); BuildCopySetScheduler(1); std::vector csList; @@ -1087,20 +1118,23 @@ TEST_F(CopysetSchedulerPOC, minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ====================================Result==================================== + // ====================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:99.2667, 方差:9.65111, 标准差: 3.10662, 最大值:109, 最小值:91 + // Mean value: 99.2667, variance: 9.65111, standard deviation: 3.10662, + // maximum value: 109, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 + // Mean value: 100, variance: 0.5, standard deviation: 0.707107, maximum + // value: 101, minimum value: 91 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline - // 集群回迁的情况 + // During the recovery process of testing one chunkserver offline, another + // chunkserver offline Cluster fetch situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; ChunkServerIdType choose1 = 0; @@ -1124,23 +1158,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.4889, 方差:9.96099, 标准差: 3.1561, 最大值:105, 最小值:89 + // Mean value: 97.4889, variance: 9.96099, standard deviation: 3.1561, + // maximum value: 105, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.472, 方差:7.37281, 标准差: 2.71529, 最大值:106, 最小值:91 + // Mean value: 100.472, variance: 7.37281, standard deviation: 2.71529, + // maximum value: 106, minimum value: 91 // ###print scatter-with in cluster### - // 均值:99.3556, 方差:118.207, 标准差: 10.8723, 最大值:106, 最小值:0 + // Mean value: 99.3556, variance: 118.207, standard deviation: 10.8723, + // maximum value: 106, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.77125, 标准差: 1.66471, 最大值:111, 最小值:100 + // Mean value: 101.124, variance: 2.77125, standard deviation: 1.66471, + // maximum value: 111, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.1, 标准差: 10.7285, 最大值: 111, 最小值:0 + // Mean value: 100, variance: 115.1, standard deviation: 10.7285, maximum + // value: 111, minimum value: 0 - // 2. cchunkserver恢复成online状态 + // 2. Restore cchunkserver to online state SetChunkServerOnline(choose1); SetChunkServerOnline(choose2); BuildCopySetScheduler(1); @@ -1152,20 +1193,22 @@ TEST_F(CopysetSchedulerPOC, } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { // NOLINT - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 回迁的情况 + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline Migration situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; std::vector origin; @@ -1176,7 +1219,7 @@ TEST_F(CopysetSchedulerPOC, origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1197,23 +1240,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ===================================Result==================================== + // ===================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 - // 2. chunkserver恢复成online状态 + // 2. Chunkserver restored to online state SetChunkServerOnline(idlist); BuildCopySetScheduler(1); std::vector csList; @@ -1235,12 +1285,14 @@ TEST_F(CopysetSchedulerPOC, ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, diff --git a/test/mds/schedule/scheduler_helper_test.cpp b/test/mds/schedule/scheduler_helper_test.cpp index ff54d4c5bf..76668c415d 100644 --- a/test/mds/schedule/scheduler_helper_test.cpp +++ b/test/mds/schedule/scheduler_helper_test.cpp @@ -20,15 +20,17 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduler_helper.h" + #include + #include "test/mds/schedule/common.h" #include "test/mds/schedule/mock_topoAdapter.h" -#include "src/mds/schedule/scheduler_helper.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace mds { @@ -42,9 +44,7 @@ class TestSchedulerHelper : public ::testing::Test { topoAdapter_ = std::make_shared(); } - void TearDown() override { - topoAdapter_ = nullptr; - } + void TearDown() override { topoAdapter_ = nullptr; } protected: std::shared_ptr topoAdapter_; @@ -56,67 +56,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = true; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -126,67 +142,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_not_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = false; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -195,7 +227,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { ChunkServerIdType source = 1; ChunkServerIdType target = 4; { - // 1. 获取target的信息失败 + // 1. Failed to obtain information for target EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(Return(false)); ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( @@ -204,9 +236,10 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); ChunkServerInfo info4(peer4, OnlineState::ONLINE, DiskState::DISKERROR, - ChunkServerStatus::READWRITE, 1, 1, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 1, 1, + ChunkServerStatisticInfo{}); { - // 2. 获取到的标准zoneNum = 0 + // 2. Obtained standard zoneNum=0 EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) @@ -216,12 +249,12 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { } { - // 3. 迁移之后不符合zone条件 + // 3. Does not meet zone conditions after migration EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) .WillOnce(Return(4)); - ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( + ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( topoAdapter_, target, source, copyset, 1, 0.01)); } } @@ -283,18 +316,18 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于target, old=1, new=2 + // For target, old=1, new=2 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(2, scatterWidth[target].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -324,19 +357,19 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_source) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于target, old=1, new=3 + // For target, old=1, new=3 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(3, scatterWidth[target].second); - // 对于replica1, old=2, new=3 + // For replica1, old=2, new=3 ASSERT_EQ(2, scatterWidth[1].first); ASSERT_EQ(3, scatterWidth[1].second); - // 对于replica2, old=3, new=3 + // For replica2, old=3, new=3 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(3, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -362,22 +395,22 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_target) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=2 + // For replica3, old=2, new=2 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(2, scatterWidth[3].second); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -405,14 +438,14 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, - 10, 0.1, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 10, 0.1, + &affected); ASSERT_FALSE(res); ASSERT_EQ(0, affected); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -440,53 +473,55 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, + &affected); ASSERT_TRUE(res); ASSERT_EQ(0, affected); } - TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); ChunkServerInfo info1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info2(peer2, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info3(peer3, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); std::vector chunkserverList{info1, info2, info3}; // {1,2,3} CopySetInfo copyset1(CopySetKey{1, 1}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset2(CopySetKey{1, 2}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,3} CopySetInfo copyset3(CopySetKey{1, 3}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,4} CopySetInfo copyset4(CopySetKey{1, 4}, 1, 1, - std::vector{peer1, peer2, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset5(CopySetKey{1, 5}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); - std::vector copysetList{ - copyset1, copyset2, copyset3, copyset4, copyset5}; + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); + std::vector copysetList{copyset1, copyset2, copyset3, copyset4, + copyset5}; // chunkserver-1: 5, chunkserver-2: 3 chunkserver-3: 4 - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) - .WillOnce(Return(copysetList)); - SchedulerHelper::SortChunkServerByCopySetNumAsc( - &chunkserverList, topoAdapter_); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()).WillOnce(Return(copysetList)); + SchedulerHelper::SortChunkServerByCopySetNumAsc(&chunkserverList, + topoAdapter_); ASSERT_EQ(info2.info.id, chunkserverList[0].info.id); ASSERT_EQ(info3.info.id, chunkserverList[1].info.id); @@ -496,4 +531,3 @@ TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 236e526371..1881504452 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -20,19 +20,20 @@ * Author: charisu */ +#include "src/mds/server/mds.h" + +#include #include -#include #include -#include +#include #include + #include #include -#include "src/mds/server/mds.h" #include "src/common/concurrent/concurrent.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" - +#include "src/common/timeutility.h" #include "test/mds/mock/mock_etcdclient.h" using ::curve::common::Thread; @@ -55,18 +56,19 @@ class MDSTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", - "http://localhost:10032", - "--advertise-client-urls", - "http://localhost:10032", "--listen-peer-urls", - "http://localhost:10033", "--name", "testMds", - nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://localhost:10032", "--advertise-client-urls", + "http://localhost:10032", "--listen-peer-urls", + "http://localhost:10033", "--name", "testMds", nullptr)); exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered auto client = std::make_shared(); EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); @@ -102,7 +104,7 @@ class MDSTest : public ::testing::Test { }; TEST_F(MDSTest, common) { - // 加载配置 + // Load Configuration std::string confPath = "./conf/mds.conf"; auto conf = std::make_shared(); conf->SetConfigPath(confPath); @@ -116,7 +118,7 @@ TEST_F(MDSTest, common) { mds.InitMdsOptions(conf); mds.StartDummy(); - // 从dummy server获取version和mds监听端口 + // Obtain version and mds listening ports from dummy server brpc::Channel httpChannel; brpc::Controller cntl; brpc::ChannelOptions options; @@ -124,12 +126,12 @@ TEST_F(MDSTest, common) { std::string dummyAddr = "127.0.0.1:" + std::to_string(kDummyPort); ASSERT_EQ(0, httpChannel.Init(dummyAddr.c_str(), &options)); - // 测试获取version + // Test to obtain version cntl.http_request().uri() = dummyAddr + "/vars/curve_version"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); ASSERT_FALSE(cntl.Failed()); - // 测试获取mds监听端口 + // Testing to obtain the mds listening port cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_config_mds_listen_addr"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -140,13 +142,13 @@ TEST_F(MDSTest, common) { auto pos = attachment.find(":"); ASSERT_NE(std::string::npos, pos); std::string jsonString = attachment.substr(pos + 2); - // 去除两端引号 + // Remove double quotes jsonString = jsonString.substr(1, jsonString.size() - 2); reader.parse(jsonString, value); std::string mdsAddr = value["conf_value"].asString(); ASSERT_EQ(kMdsAddr, mdsAddr); - // 获取leader状态,此时mds_status应为follower + // Obtain the leader status, at which point mds_status should be follower cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_status"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -156,7 +158,7 @@ TEST_F(MDSTest, common) { mds.StartCompaginLeader(); - // 此时isLeader应为true + // At this point, isLeader should be true cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/is_leader"; ASSERT_FALSE(cntl.Failed()); @@ -164,7 +166,7 @@ TEST_F(MDSTest, common) { cntl.response_attachment().to_string().find("leader")); mds.Init(); - // 启动mds + // Start mds Thread mdsThread(&MDS::Run, &mds); // sleep 5s sleep(5); @@ -172,7 +174,7 @@ TEST_F(MDSTest, common) { // 1、init channel ASSERT_EQ(0, channel_.Init(kMdsAddr.c_str(), nullptr)); - // 2、测试hearbeat接口 + // 2. Test the heartbeat interface cntl.Reset(); heartbeat::ChunkServerHeartbeatRequest request1; heartbeat::ChunkServerHeartbeatResponse response1; @@ -180,7 +182,7 @@ TEST_F(MDSTest, common) { request1.set_token("123"); request1.set_ip("127.0.0.1"); request1.set_port(8888); - heartbeat::DiskState *diskState = new heartbeat::DiskState(); + heartbeat::DiskState* diskState = new heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); request1.set_allocated_diskstate(diskState); @@ -193,7 +195,7 @@ TEST_F(MDSTest, common) { stub1.ChunkServerHeartbeat(&cntl, &request1, &response1, nullptr); ASSERT_FALSE(cntl.Failed()); - // 3、测试namespaceService接口 + // 3. Test the namespaceService interface cntl.Reset(); GetFileInfoRequest request2; GetFileInfoResponse response2; @@ -205,7 +207,7 @@ TEST_F(MDSTest, common) { stub2.GetFileInfo(&cntl, &request2, &response2, nullptr); ASSERT_FALSE(cntl.Failed()); - // 4、测试topology接口 + // 4. Testing the topology interface cntl.Reset(); topology::ListPhysicalPoolRequest request3; topology::ListPhysicalPoolResponse response3; @@ -213,7 +215,7 @@ TEST_F(MDSTest, common) { stub3.ListPhysicalPool(&cntl, &request3, &response3, nullptr); ASSERT_FALSE(cntl.Failed()); - // 5、停掉mds + // 5. Stop the MDS uint64_t startTime = curve::common::TimeUtility::GetTimeofDayMs(); mds.Stop(); mdsThread.join(); @@ -250,7 +252,7 @@ TEST(TestParsePoolsetRules, Test) { { // subdir rules ASSERT_TRUE(ParsePoolsetRules( - "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); + "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); ASSERT_EQ(3, rules.size()); ASSERT_EQ("system", rules["/system/"]); ASSERT_EQ("data", rules["/data/"]); @@ -260,8 +262,8 @@ TEST(TestParsePoolsetRules, Test) { TEST_F(MDSTest, TestBlockSize) { using ::testing::_; - using ::testing::Return; using ::testing::Invoke; + using ::testing::Return; auto client = std::make_shared(); @@ -269,8 +271,7 @@ TEST_F(MDSTest, TestBlockSize) { { EXPECT_CALL(*client, Get(_, _)) .WillOnce(Return(EtcdErrCode::EtcdKeyNotExist)); - EXPECT_CALL(*client, Put(_, _)) - .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*client, Put(_, _)).WillOnce(Return(EtcdErrCode::EtcdOK)); ASSERT_TRUE(CheckOrInsertBlockSize(client.get())); } diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..59c394cda9 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -22,25 +22,25 @@ #include -#include "test/mds/topology/mock_topology.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" #include "src/common/namespace_define.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_item.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; -using ::testing::_; -using ::testing::Contains; -using ::testing::SetArgPointee; -using ::testing::SaveArg; -using ::testing::DoAll; using ::curve::common::Configuration; using ::curve::common::kDefaultPoolsetId; using ::curve::common::kDefaultPoolsetName; +using ::testing::_; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestTopology : public ::testing::Test { protected: @@ -52,13 +52,11 @@ class TestTopology : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); const std::unordered_map poolsetMap{ {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; ON_CALL(*storage_, LoadPoolset(_, _)) .WillByDefault(DoAll( @@ -80,128 +78,90 @@ class TestTopology : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(id, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 0) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 0) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - uint32_t internalPort = 0, - const std::string &externalHostIp = "testExternalIp", - uint32_t externalPort = 0, - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - internalPort, - externalHostIp, - externalPort, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + uint32_t internalPort = 0, + const std::string& externalHostIp = "testExternalIp", + uint32_t externalPort = 0, ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, internalPort, + externalHostIp, externalPort, zoneId, physicalPoolId, + desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -218,16 +178,12 @@ class TestTopology : public ::testing::Test { TEST_F(TestTopology, test_init_success) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); const std::unordered_map poolsetMap{ - {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -235,40 +191,33 @@ TEST_F(TestTopology, test_init_success) { std::unordered_map chunkServerMap_; std::map copySetMap_; - logicalPoolMap_[0x01] = LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, false, true); + logicalPoolMap_[0x01] = + LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, false, true); physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); - serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, - "127.0.0.1", 8200, 0x21, 0x11, "desc1"); - chunkServerMap_[0x41] = ChunkServer(0x41, "token", "ssd", - 0x31, "127.0.0.1", 8200, "/"); + serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", + 8200, 0x21, 0x11, "desc1"); + chunkServerMap_[0x41] = + ChunkServer(0x41, "token", "ssd", 0x31, "127.0.0.1", 8200, "/"); copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), Return(true))); EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(serverMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(serverMap_), Return(true))); EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), Return(true))); EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -278,10 +227,8 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*idGenerator_, initChunkServerIdGenerator(_)); EXPECT_CALL(*idGenerator_, initCopySetIdGenerator(_)); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); TopologyOption option; int ret = topology_->Init(option); @@ -291,8 +238,7 @@ TEST_F(TestTopology, test_init_success) { TEST_F(TestTopology, test_init_loadClusterFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(false))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(false))); TopologyOption option; int ret = topology_->Init(option); @@ -302,11 +248,9 @@ TEST_F(TestTopology, test_init_loadClusterFail) { TEST_F(TestTopology, test_init_StorageClusterInfoFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -318,11 +262,9 @@ TEST_F(TestTopology, test_init_loadLogicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -334,13 +276,10 @@ TEST_F(TestTopology, test_init_LoadPhysicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -354,15 +293,11 @@ TEST_F(TestTopology, test_init_LoadZoneFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -377,16 +312,11 @@ TEST_F(TestTopology, test_init_LoadServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -402,19 +332,13 @@ TEST_F(TestTopology, test_init_LoadChunkServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -431,21 +355,14 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -462,18 +379,11 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); @@ -487,15 +397,9 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "test1", physicalPoolId); - LogicalPool pool(id, - "test2", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(id, "test2", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -506,18 +410,11 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddLogicalPool(pool); @@ -528,16 +425,9 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - ++physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); - + LogicalPool pool(0x01, "test1", ++physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -546,26 +436,18 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, test_AddPhysicalPool_success) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { PrepareAddPoolset(); PoolIdType id = 0x11; PoolsetIdType pid = 0x61; - PhysicalPool pool(id, - "test1", - pid, - "desc"); + PhysicalPool pool(id, "test1", pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeIdDuplicated, ret); @@ -573,12 +455,8 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(false)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -590,13 +468,9 @@ TEST_F(TestTopology, test_AddZone_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); @@ -616,10 +490,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -631,13 +502,9 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(0x21, - "testZone", - physicalPoolId, - "desc"); + Zone zone(0x21, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(false)); int ret = topology_->AddZone(zone); @@ -649,11 +516,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); - + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -668,18 +531,10 @@ TEST_F(TestTopology, test_AddServer_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -701,15 +556,8 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { PrepareAddZone(zoneId, "test", physicalPoolId); PrepareAddServer(id); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); @@ -724,46 +572,29 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(false)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, test_AddServer_ZoneNotFound) { PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); } - TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddPoolset(); ChunkServerIdType csId = 0x41; @@ -773,20 +604,13 @@ TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); ChunkServerState state; state.SetDiskCapacity(1024); state.SetDiskUsed(512); cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); @@ -812,18 +636,9 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token2", - "ssd", - serverId); - - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + PrepareAddChunkServer(csId, "token2", "ssd", serverId); + + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -839,16 +654,9 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(false)); int ret = topology_->AddChunkServer(cs); @@ -860,13 +668,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -880,8 +682,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemoveLogicalPool(id); @@ -904,8 +705,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemoveLogicalPool(id); @@ -917,8 +717,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemovePhysicalPool(poolId); @@ -939,8 +738,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemovePhysicalPool(poolId); @@ -952,12 +750,9 @@ TEST_F(TestTopology, test_RemoveZone_success) { ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - PrepareAddZone(zoneId, - "testZone", - poolId); + PrepareAddZone(zoneId, "testZone", poolId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(true)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -982,8 +777,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(false)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -995,16 +789,9 @@ TEST_F(TestTopology, test_RemoveServer_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(true)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1030,16 +817,9 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(false)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1052,18 +832,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(true)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1075,7 +851,6 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { ASSERT_TRUE(it == csList.end()); } - TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { ChunkServerIdType csId = 0x41; @@ -1090,19 +865,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(false)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1113,26 +883,15 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdateLogicalPool(pool); @@ -1146,15 +905,9 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->UpdateLogicalPool(pool); @@ -1166,26 +919,15 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdateLogicalPool(pool); @@ -1197,24 +939,19 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); LogicalPool pool2; topology_->GetLogicalPool(logicalPoolId, &pool2); ASSERT_EQ(AllocateStatus::ALLOW, pool2.GetStatus()); // update to deny - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1223,11 +960,10 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { ASSERT_EQ(AllocateStatus::DENY, pool3.GetStatus()); // update to allow - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1239,18 +975,12 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeLogicalPoolNotFound, ret); } @@ -1260,19 +990,14 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1285,8 +1010,7 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { PrepareAddLogicalPool(lpid, "name", ppid); auto set_state = [&](PoolIdType lpid, bool scanEnable) { - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); auto retCode = topology_->UpdateLogicalPoolScanState(lpid, scanEnable); ASSERT_EQ(retCode, kTopoErrCodeSuccess); }; @@ -1309,14 +1033,12 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { check_state(lpid, true); // CASE 4: logical pool not found -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .Times(0); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).Times(0); auto retCode = topology_->UpdateLogicalPoolScanState(lpid + 1, true); ASSERT_EQ(retCode, kTopoErrCodeLogicalPoolNotFound); // CASE 5: update storage fail -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); retCode = topology_->UpdateLogicalPoolScanState(lpid, true); ASSERT_EQ(retCode, kTopoErrCodeStorgeFail); } @@ -1325,18 +1047,11 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1349,69 +1064,45 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; PoolIdType pid = 0x61; - PhysicalPool newPool(physicalPoolId, - "name1", - pid, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, ret); } - TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - - TEST_F(TestTopology, UpdateZone_success) { PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(true)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(true)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, UpdateZone_ZoneNotFound) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); @@ -1422,18 +1113,11 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(false)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(false)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1445,28 +1129,13 @@ TEST_F(TestTopology, UpdateServer_success) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1477,15 +1146,8 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeServerNotFound, ret); @@ -1498,34 +1160,18 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(false)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1535,24 +1181,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1566,28 +1199,15 @@ TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { ChunkServerIdType csId = 0x41; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, "server1", - "ip1", 0, "ip2", 0, zoneId, physicalPoolId); - PrepareAddServer(serverId2, "server2", - "ip3", 0, "ip4", 0, zoneId, physicalPoolId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId2, - "ip3", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "server1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); + PrepareAddServer(serverId2, "server2", "ip3", 0, "ip4", 0, zoneId, + physicalPoolId); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId2, "ip3", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1596,13 +1216,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { ServerIdType serverId = 0x31; ChunkServerIdType csId = 0x41; - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); @@ -1617,24 +1231,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(false)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1648,11 +1249,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); @@ -1662,17 +1259,16 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1684,7 +1280,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1697,22 +1293,17 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1726,60 +1317,50 @@ TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); @@ -1790,7 +1371,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1803,13 +1384,9 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); uint64_t time = 0x1234567812345678; - int ret = topology_->UpdateChunkServerStartUpTime(time, csId); + int ret = topology_->UpdateChunkServerStartUpTime(time, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ChunkServer cs; @@ -1819,7 +1396,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { ChunkServerIdType csId = 0x41; - int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); + int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1831,19 +1408,18 @@ TEST_F(TestTopology, FindLogicalPool_success) { std::string physicalPoolName = "PhysiclPool1"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); PrepareAddLogicalPool(logicalPoolId, logicalPoolName, physicalPoolId); - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); ASSERT_EQ(logicalPoolId, ret); } TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { std::string logicalPoolName = "logicalPool1"; std::string physicalPoolName = "PhysiclPool1"; - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindPhysicalPool_success) { @@ -1858,11 +1434,9 @@ TEST_F(TestTopology, FindPhysicalPool_success) { TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { std::string physicalPoolName = "physicalPoolName"; PoolIdType ret = topology_->FindPhysicalPool(physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } - TEST_F(TestTopology, FindZone_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1879,8 +1453,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindZone_success2) { @@ -1900,8 +1473,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolId); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostName_success) { @@ -1910,8 +1482,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { std::string hostName = "host1"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName); + PrepareAddServer(serverId, hostName); ServerIdType ret = topology_->FindServerByHostName(hostName); ASSERT_EQ(serverId, ret); @@ -1920,8 +1491,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { std::string hostName = "host1"; ServerIdType ret = topology_->FindServerByHostName(hostName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostIpPort_success) { @@ -1932,12 +1502,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort(internalHostIp, 0); ASSERT_EQ(serverId, ret); @@ -1954,16 +1519,10 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort("ip3", 0); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindChunkServerNotRetired_success) { @@ -1977,21 +1536,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); - - ChunkServerIdType ret = topology_->FindChunkServerNotRetired( - internalHostIp, port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); + + ChunkServerIdType ret = + topology_->FindChunkServerNotRetired(internalHostIp, port); ASSERT_EQ(csId, ret); } @@ -2006,22 +1555,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); ChunkServerIdType ret = topology_->FindChunkServerNotRetired("ip3", port); - ASSERT_EQ(static_cast( - UNINTIALIZE_ID), ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, GetLogicalPool_success) { @@ -2089,7 +1627,6 @@ TEST_F(TestTopology, GetServer_success) { ASSERT_EQ(true, ret); } - TEST_F(TestTopology, GetServer_GetServerNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -2133,7 +1670,6 @@ TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { ASSERT_EQ(false, ret); } - TEST_F(TestTopology, GetChunkServerInCluster_success) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -2371,8 +1907,8 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); - PrepareAddServer( - serverId, "name2", "ip1", 0, "ip2", 0, zoneId, physicalPoolId); + PrepareAddServer(serverId, "name2", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); PrepareAddChunkServer(csId, "token", "ssd", serverId); PrepareAddChunkServer(csId2, "token", "ssd", serverId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); @@ -2452,12 +1988,12 @@ TEST_F(TestTopology, AddCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2470,8 +2006,7 @@ TEST_F(TestTopology, AddCopySet_success) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -2486,12 +2021,12 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2519,12 +2054,12 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2551,12 +2086,12 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2569,8 +2104,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(false)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -2585,12 +2119,12 @@ TEST_F(TestTopology, RemoveCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2601,8 +2135,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2620,12 +2153,12 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2636,8 +2169,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(false)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2655,12 +2187,12 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2687,12 +2219,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2716,11 +2248,10 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateCopySet(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateCopySet(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -2735,12 +2266,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2775,12 +2306,12 @@ TEST_F(TestTopology, GetCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2809,12 +2340,12 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2843,12 +2374,12 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2860,7 +2391,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddCopySet(copysetId, logicalPoolId, replicas); std::vector csList = - topology_->GetCopySetsInLogicalPool(logicalPoolId); + topology_->GetCopySetsInLogicalPool(logicalPoolId); ASSERT_EQ(1, csList.size()); } @@ -2874,12 +2405,12 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2890,8 +2421,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInCluster(); + std::vector csList = topology_->GetCopySetsInCluster(); ASSERT_EQ(1, csList.size()); } @@ -2905,12 +2435,12 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2921,44 +2451,33 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInChunkServer(0x41); + std::vector csList = topology_->GetCopySetsInChunkServer(0x41); ASSERT_EQ(1, csList.size()); } TEST_F(TestTopology, test_create_default_poolset) { - EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadClusterInfo(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPoolset(_, _)).WillOnce(Return(true)); Poolset poolset; EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce( - DoAll(SaveArg<0>(&poolset), Return(true))); + .WillOnce(DoAll(SaveArg<0>(&poolset), Return(true))); std::unordered_map physicalPoolMap{ {1, {1, "pool1", UNINTIALIZE_ID, ""}}, {2, {2, "pool2", UNINTIALIZE_ID, ""}}, }; EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), - SetArgPointee<1>(2), + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), SetArgPointee<1>(2), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(true)); int rc = topology_->Init({}); ASSERT_EQ(kTopoErrCodeSuccess, rc); diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..2f3c59e089 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -20,30 +20,28 @@ * Author: xuchaojie */ -#include #include +#include #include - -#include "src/mds/topology/topology_chunk_allocator.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/topology/mock_topology.h" -#include "test/mds/mock/mock_topology.h" #include "proto/nameserver2.pb.h" #include "src/common/timeutility.h" +#include "src/mds/common/mds_define.h" +#include "src/mds/topology/topology_chunk_allocator.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; using ::testing::Invoke; - +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyChunkAllocator : public ::testing::Test { protected: @@ -54,21 +52,17 @@ class TestTopologyChunkAllocator : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); TopologyOption option; topoStat_ = std::make_shared(topology_); - chunkFilePoolAllocHelp_ = - std::make_shared(); + chunkFilePoolAllocHelp_ = std::make_shared(); chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig(true, true, 15); option.PoolUsagePercentLimit = 85; option.enableLogicalPoolStatus = true; allocStatistic_ = std::make_shared(); - testObj_ = std::make_shared(topology_, - allocStatistic_, - topoStat_, - chunkFilePoolAllocHelp_, - option); + testObj_ = std::make_shared( + topology_, allocStatistic_, topoStat_, chunkFilePoolAllocHelp_, + option); } virtual void TearDown() { @@ -85,53 +79,37 @@ class TestTopologyChunkAllocator : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 10240) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 10240) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) @@ -139,78 +117,56 @@ class TestTopologyChunkAllocator : public ::testing::Test { } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ChunkServerStat stat; - stat.chunkFilepoolSize = diskCapacity-diskUsed; + stat.chunkFilepoolSize = diskCapacity - diskUsed; topoStat_->UpdateChunkServerStat(id, stat); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members, - bool availFlag = true) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members, + bool availFlag = true) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); cs.SetAvailableFlag(availFlag); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -228,7 +184,7 @@ class TestTopologyChunkAllocator : public ::testing::Test { }; TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_success) { + Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -247,7 +203,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -258,12 +214,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -275,20 +227,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { + Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -304,7 +252,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -315,12 +263,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); @@ -328,12 +272,8 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -343,18 +283,14 @@ TEST_F(TestTopologyChunkAllocator, topoStat_->UpdateChunkServerStat(0x42, stat); topoStat_->UpdateChunkServerStat(0x43, stat); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { + Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -370,7 +306,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -385,16 +321,16 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); std::map enoughsize; - std::vector pools ={0x01}; + std::vector pools = {0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, - &enoughsize, "testPoolset"); + testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize, + "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -412,7 +348,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -423,16 +359,11 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_TRUE(ret); @@ -443,12 +374,8 @@ TEST_F(TestTopologyChunkAllocator, // second time std::vector infos2; - ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos2); + ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos2); ASSERT_TRUE(ret); @@ -493,20 +420,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -514,18 +437,14 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -542,7 +461,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -553,27 +472,23 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_FALSE(ret); } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -584,12 +499,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { for (int i = 0; i < 100000; i++) { int chunkNumber = 64; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( + copySetIds, 1, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(chunkNumber, infos.size()); for (int j = 0; j < chunkNumber; j++) { @@ -598,7 +509,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } int minCount = copySetMap[0]; int maxCount = copySetMap[0]; - for (auto &pair : copySetMap) { + for (auto& pair : copySetMap) { if (pair.second > maxCount) { maxCount = pair.second; } @@ -610,10 +521,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { double minPercent = static_cast(avg - minCount) / avg; double maxPercent = static_cast(maxCount - avg) / avg; LOG(INFO) << "AllocateChunkRandomInSingleLogicalPool poc" - <<", minCount = " << minCount - <<", maxCount = " << maxCount - << ", avg = " << avg - << ", minPercent = " << minPercent + << ", minCount = " << minCount << ", maxCount = " << maxCount + << ", avg = " << avg << ", minPercent = " << minPercent << ", maxPercent = " << maxPercent; ASSERT_TRUE(minPercent < 0.1); @@ -621,7 +530,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -632,23 +542,19 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { int chunkNumber = 64; std::vector infos; AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + copySetIds, 1, chunkNumber, &infos); } uint64_t stoptime = curve::common::TimeUtility::GetTimeofDayUs(); double usetime = stoptime - startime; - double tps = 1000000.0 * 100000.0/usetime; + double tps = 1000000.0 * 100000.0 / usetime; - std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " - << tps + std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " << tps << " * 64 chunk per second."; } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { + TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 20; i++) { @@ -657,13 +563,8 @@ TEST(TestAllocateChunkPolicy, uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(5, nextIndex); ASSERT_EQ(chunkNumber, infos.size()); @@ -680,26 +581,20 @@ TEST(TestAllocateChunkPolicy, } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { + TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { std::vector copySetIds; std::map copySetMap; uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_FALSE(ret); ASSERT_EQ(15, nextIndex); ASSERT_EQ(0, infos.size()); } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc) { std::map poolWeightMap; std::map poolMap; for (int i = 0; i < 5; i++) { @@ -709,8 +604,8 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolWeightMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolWeightMap, + &pid); poolMap[pid]++; } @@ -719,7 +614,8 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be 0, 10000, 20000, 30000, + // 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -727,8 +623,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc2) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc2) { std::map poolMap; poolMap[0] = 100000; poolMap[1] = 90000; @@ -738,12 +633,11 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolMap, &pid); poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,9 +645,8 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolRandom) { +// Test to see if random allocation to each pool is possible +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; std::map allocMap; allocMap[1] = 0; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..fd1112a4ec 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include "src/mds/topology/topology_metric.h" -#include "test/mds/topology/mock_topology.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyMetric : public ::testing::Test { public: @@ -48,10 +48,9 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); topologyStat_ = std::make_shared(); allocStatistic_ = std::make_shared(); @@ -76,122 +75,87 @@ class TestTopologyMetric : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool") { - PhysicalPool pool(id, - name, - pid, - desc); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool") { + PhysicalPool pool(id, name, pid, desc); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/") { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState st; - st.SetDiskCapacity(100 * 1024); - st.SetDiskUsed(10 * 1024); - cs.SetChunkServerState(st); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + const std::string& token = "testToken", + const std::string& diskType = "nvme", + ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", + uint32_t port = 0, + const std::string& diskPath = "/") { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState st; + st.SetDiskCapacity(100 * 1024); + st.SetDiskUsed(10 * 1024); + cs.SetChunkServerState(st); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -207,7 +171,7 @@ class TestTopologyMetric : public ::testing::Test { std::shared_ptr testObj_; }; -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -229,14 +193,13 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { rap.pageFileRAP.replicaNum = 3; PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE, rap); + PAGEFILE, rap); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -258,12 +221,10 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), Return(true))); testObj_->UpdateTopologyMetrics(); @@ -283,9 +244,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x42]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x42]->copysetNum.get_value()); @@ -301,9 +262,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x43]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x43]->copysetNum.get_value()); @@ -319,43 +280,75 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(1, gLogicalPoolMetrics.size()); - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); //NOLINT - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->chunkServerNum.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthRange.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMin.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMax.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumRange.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMin.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMax.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumRange.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMin.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMax.get_value()); //NOLINT - ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskCapacity.get_value()); //NOLINT - ASSERT_EQ(20 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); //NOLINT - ASSERT_EQ(10 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); //NOLINT - - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 3, + gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); // NOLINT + ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId] + ->chunkServerNum.get_value()); // NOLINT + ASSERT_EQ( + 1, + gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthRange.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMin.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMax.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumRange.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMin.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMax.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumRange.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMin.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMax.get_value()); // NOLINT + ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId] + ->diskCapacity.get_value()); // NOLINT + ASSERT_EQ( + 20 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); // NOLINT + ASSERT_EQ( + 10 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); // NOLINT + + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeUsedBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeLeftBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTrashedBytes.get_value()); - ASSERT_EQ(1024 * 9, + ASSERT_EQ( + 1024 * 9, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->readIOPS.get_value()); @@ -372,7 +365,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1, gClusterMetrics->copysetNum.get_value()); } -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -396,7 +389,6 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -414,8 +406,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); testObj_->UpdateTopologyMetrics(); diff --git a/test/resources.list b/test/resources.list index 9be11dbb07..20b047da17 100644 --- a/test/resources.list +++ b/test/resources.list @@ -18,30 +18,30 @@ Used port list: # client - 9101: session service 测试 - 9102: applyindex service 测试 - 9103: snapshot service 测试 - 9104: client端其他测试 - 9105: client workflow测试mds占用 - 9106: client workflow测试chunkserver占用 - 9107: client workflow测试chunkserver占用 - 9108: client workflow测试chunkserver占用 - 9109: request scheduler测试占用 - 9110/9111/9112: TestLibcbdLibcurve测试占用 - 9115/9116/9117: TestLibcurveInterface测试占用 - - 9120: mds 接口测试 - 9121: mds 接口测试 - 9122: mds 接口测试 - 9123: mds 接口测试 - 9130: metric测试 - 9131: metric测试 - 9132: metric测试 - 9140: metric测试 - 9141: metric测试 - 9142: metric测试 - 9150/9151 ChunkserverUnstableTest - 19151/19110/19111/19112 curveClient测试 + 9101: session service testing + 9102: applyindex service testing + 9103: snapshot service testing + 9104: Other client testing + 9105: client workflow testing, MDS usage + 9106: client workflow testing, Chunkserver usage + 9107: client workflow testing, Chunkserver usage + 9108: client workflow testing, Chunkserver usage + 9109: request scheduler testing usage + 9110/9111/9112: TestLibcbdLibcurve testing usage + 9115/9116/9117: TestLibcurveInterface testing usage + + 9120: MDS interface testing + 9121: MDS interface testing + 9122: MDS interface testing + 9123: MDS interface testing + 9130: metric testing + 9131: metric testing + 9132: metric testing + 9140: metric testing + 9141: metric testing + 9142: metric testing + 9150/9151: ChunkserverUnstableTest + 19151/19110/19111/19112: curveClient testing client_test_unittest: 21000 diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index f57c2d15c0..882905855d 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include +#include "src/common/location_operator.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/location_operator.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" using ::curve::common::LocationOperator; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,10 +50,8 @@ class TestCloneCoreImpl : public ::testing::Test { virtual ~TestCloneCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); - cloneRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); + cloneRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -65,14 +62,9 @@ class TestCloneCoreImpl : public ::testing::Test { option.recoverChunkConcurrency = 2; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - option); - EXPECT_CALL(*client_, Mkdir(_, _)) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, option); + EXPECT_CALL(*client_, Mkdir(_, _)).WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(core_->Init(), 0); } @@ -86,66 +78,47 @@ class TestCloneCoreImpl : public ::testing::Test { } protected: - // 辅助mock函数 + // Auxiliary mock function void MockBuildFileInfoFromSnapshotSuccess( std::shared_ptr task); - void MockBuildFileInfoFromFileSuccess( - std::shared_ptr task); + void MockBuildFileInfoFromFileSuccess(std::shared_ptr task); - void MockCreateCloneFileSuccess( - std::shared_ptr task); + void MockCreateCloneFileSuccess(std::shared_ptr task); - void MockCloneMetaSuccess( - std::shared_ptr task); + void MockCloneMetaSuccess(std::shared_ptr task); - void MockCreateCloneChunkSuccess( - std::shared_ptr task); + void MockCreateCloneChunkSuccess(std::shared_ptr task); - void MockCompleteCloneMetaSuccess( - std::shared_ptr task); + void MockCompleteCloneMetaSuccess(std::shared_ptr task); - void MockRecoverChunkSuccess( - std::shared_ptr task); + void MockRecoverChunkSuccess(std::shared_ptr task); - void MockChangeOwnerSuccess( - std::shared_ptr task); + void MockChangeOwnerSuccess(std::shared_ptr task); - void MockRenameCloneFileSuccess( - std::shared_ptr task); + void MockRenameCloneFileSuccess(std::shared_ptr task); - void MockCompleteCloneFileSuccess( - std::shared_ptr task); + void MockCompleteCloneFileSuccess(std::shared_ptr task); - void MockBuildFileInfoFromSnapshotFail( - std::shared_ptr task); + void MockBuildFileInfoFromSnapshotFail(std::shared_ptr task); - void MockBuildFileInfoFromFileFail( - std::shared_ptr task); + void MockBuildFileInfoFromFileFail(std::shared_ptr task); - void MockCreateCloneFileFail( - std::shared_ptr task); + void MockCreateCloneFileFail(std::shared_ptr task); - void MockCloneMetaFail( - std::shared_ptr task); + void MockCloneMetaFail(std::shared_ptr task); - void MockCreateCloneChunkFail( - std::shared_ptr task); + void MockCreateCloneChunkFail(std::shared_ptr task); - void MockCompleteCloneMetaFail( - std::shared_ptr task); + void MockCompleteCloneMetaFail(std::shared_ptr task); - void MockRecoverChunkFail( - std::shared_ptr task); + void MockRecoverChunkFail(std::shared_ptr task); - void MockChangeOwnerFail( - std::shared_ptr task); + void MockChangeOwnerFail(std::shared_ptr task); - void MockRenameCloneFileFail( - std::shared_ptr task); + void MockRenameCloneFileFail(std::shared_ptr task); - void MockCompleteCloneFileFail( - std::shared_ptr task); + void MockCompleteCloneFileFail(std::shared_ptr task); protected: std::shared_ptr core_; @@ -157,9 +130,8 @@ class TestCloneCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -171,16 +143,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -188,35 +157,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -224,35 +183,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kRecover, - source, - destination, - "", - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kRecover, source, + destination, "", 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -260,7 +209,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -283,15 +232,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { fInfo.filestatus = FileStatus::Created; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -299,7 +246,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -311,13 +258,11 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -325,7 +270,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -336,20 +281,18 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -360,23 +303,21 @@ TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, AddCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -390,16 +331,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -413,16 +354,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -431,16 +372,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -448,42 +389,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -491,42 +420,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId + 1; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -535,16 +452,16 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -556,23 +473,20 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { SnapshotInfo snap("id1", "user1", destination, "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -584,20 +498,18 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -606,16 +518,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::UNKNOWN)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -631,16 +543,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -662,29 +574,26 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { fInfo.filename = "file1"; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, SetCloneFileStatus(source, - FileStatus::BeingCloned, - option.mdsRootUser)) + EXPECT_CALL(*client_, SetCloneFileStatus(source, FileStatus::BeingCloned, + option.mdsRootUser)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(1, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -705,7 +614,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::cloning); @@ -726,7 +635,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { + HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cloning); @@ -752,9 +661,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -769,10 +678,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { - CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { + CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); cinfo.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -792,22 +700,20 @@ TEST_F(TestCloneCoreImpl, uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kRecover, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::recovering); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -827,7 +733,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::recovering); @@ -847,10 +753,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnCreateCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -867,8 +772,8 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -886,8 +791,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -906,8 +811,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -927,8 +832,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -949,8 +854,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRenameCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1011,8 +916,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneFail) { core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); @@ -1034,11 +938,10 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1056,9 +959,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1074,9 +977,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1095,17 +998,16 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1124,17 +1026,15 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStepUnknown) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStepUnknown) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); info.SetNextStep(static_cast(8)); auto cloneMetric = std::make_shared("id1"); @@ -1163,14 +1063,12 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( uint64_t filelength = 1 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, kDefaultPoolset, - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, kDefaultPoolset, time, + status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; @@ -1178,9 +1076,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } ChunkIndexData snapMeta; @@ -1191,18 +1088,15 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( snapMeta.PutChunkDataName(chunk2); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapMeta), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapMeta), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = 100; fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( @@ -1216,9 +1110,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneFileSuccess( @@ -1226,8 +1119,8 @@ void TestCloneCoreImpl::MockCreateCloneFileSuccess( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(LIBCURVE_ERROR::OK))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCloneMetaSuccess( @@ -1238,33 +1131,25 @@ void TestCloneCoreImpl::MockCloneMetaSuccess( segInfoOut.segmentsize = segmentsize; segInfoOut.chunksize = chunksize; segInfoOut.startoffset = 0; - segInfoOut.chunkvec = {{1, 1, 1}, - {2, 2, 1}}; + segInfoOut.chunkvec = {{1, 1, 1}, {2, 2, 1}}; segInfoOut.lpcpIDInfo.lpid = 1; segInfoOut.lpcpIDInfo.cpidVec = {1, 2}; EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, 0, _, _, _)) .WillRepeatedly( - DoAll(SetArgPointee<4>(segInfoOut), - Return(LIBCURVE_ERROR::OK))); + DoAll(SetArgPointee<4>(segInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneChunkSuccess( std::shared_ptr task) { std::string location1, location2; if (CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType()) { - location1 = LocationOperator::GenerateS3Location( - "file1-0-1"); - location2 = LocationOperator::GenerateS3Location( - "file1-1-1"); + location1 = LocationOperator::GenerateS3Location("file1-0-1"); + location2 = LocationOperator::GenerateS3Location("file1-1-1"); } else { - location1 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("0")); - location2 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("1048576")); + location1 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("0")); + location2 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("1048576")); } uint32_t correctSn = 0; @@ -1273,18 +1158,15 @@ void TestCloneCoreImpl::MockCreateCloneChunkSuccess( } else { correctSn = 100; } - EXPECT_CALL(*client_, CreateCloneChunk( - AnyOf(location1, location2), _, _, correctSn, _, _)) + EXPECT_CALL(*client_, CreateCloneChunk(AnyOf(location1, location2), _, _, + correctSn, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(LIBCURVE_ERROR::OK))); } @@ -1297,15 +1179,12 @@ void TestCloneCoreImpl::MockCompleteCloneMetaSuccess( void TestCloneCoreImpl::MockRecoverChunkSuccess( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK), - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK), scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockChangeOwnerSuccess( @@ -1338,22 +1217,18 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) @@ -1362,9 +1237,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( @@ -1378,9 +1252,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockCreateCloneFileFail( @@ -1388,12 +1261,11 @@ void TestCloneCoreImpl::MockCreateCloneFileFail( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(-LIBCURVE_ERROR::FAILED))); } -void TestCloneCoreImpl::MockCloneMetaFail( - std::shared_ptr task) { +void TestCloneCoreImpl::MockCloneMetaFail(std::shared_ptr task) { EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, _, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); } @@ -1402,15 +1274,12 @@ void TestCloneCoreImpl::MockCreateCloneChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, CreateCloneChunk(_, _, _, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(-LIBCURVE_ERROR::FAILED))); } @@ -1424,13 +1293,9 @@ void TestCloneCoreImpl::MockRecoverChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { scc->Run(); }), + Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockChangeOwnerFail( @@ -1452,7 +1317,7 @@ void TestCloneCoreImpl::MockCompleteCloneFileFail( } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1462,20 +1327,17 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1485,17 +1347,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(-1))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(0, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1505,17 +1364,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1525,17 +1381,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { cinfo.SetStatus(CloneStatus::cloning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeCannotCleanCloneUnfinished, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1545,17 +1398,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { cinfo.SetStatus(CloneStatus::errorCleaning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1565,21 +1415,18 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1589,14 +1436,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1606,14 +1452,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1624,14 +1469,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { .Times(1) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1642,43 +1486,36 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { .Times(1) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); std::shared_ptr task = std::make_shared(info, cloneMetric, cloneClosure); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - TestCheckFileExists) { +TEST_F(TestCloneCoreImpl, TestCheckFileExists) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeFileExist); EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 10), kErrCodeFileNotExist); @@ -1693,36 +1530,31 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeInternalError); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeSuccess); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 0); } TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); + TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 2); @@ -1730,26 +1562,22 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSetStatusFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSetStatusFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); - EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) @@ -1758,13 +1586,11 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..ec27aa8fe7 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -20,9 +20,8 @@ * Author: xuchaojie */ - -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" #include "test/util/config_generator.h" @@ -40,19 +39,14 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to + // complete as soon as possible std::vector options = { - {"mds.listen.addr=127.0.0.1:8888", - "mds.registerToMDS=false", - "mds.rpcTimeoutMS=1", - "mds.maxRPCTimeoutMS=1", - "mds.maxRetryMS=1", - "mds.rpcRetryIntervalUS=1", - "metacache.getLeaderTimeOutMS=1", - "metacache.getLeaderRetry=1", - "metacache.rpcRetryIntervalUS=1", - "chunkserver.opRetryIntervalUS=1", - "chunkserver.opMaxRetry=1", + {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", + "mds.rpcTimeoutMS=1", "mds.maxRPCTimeoutMS=1", "mds.maxRetryMS=1", + "mds.rpcRetryIntervalUS=1", "metacache.getLeaderTimeOutMS=1", + "metacache.getLeaderRetry=1", "metacache.rpcRetryIntervalUS=1", + "chunkserver.opRetryIntervalUS=1", "chunkserver.opMaxRetry=1", "chunkserver.rpcTimeoutMS=1", "chunkserver.maxRetrySleepIntervalUS=1", "chunkserver.maxRPCTimeoutMS=1"}, @@ -64,8 +58,7 @@ class TestCurveFsClientImpl : public ::testing::Test { virtual void SetUp() { std::shared_ptr snapClient = std::make_shared(); - std::shared_ptr fileClient = - std::make_shared(); + std::shared_ptr fileClient = std::make_shared(); client_ = std::make_shared(snapClient, fileClient); clientOption_.configPath = kClientConfigPath; clientOption_.mdsRootUser = "root"; @@ -75,9 +68,7 @@ class TestCurveFsClientImpl : public ::testing::Test { client_->Init(clientOption_); } - virtual void TearDown() { - client_->UnInit(); - } + virtual void TearDown() { client_->UnInit(); } protected: std::shared_ptr client_; @@ -85,9 +76,7 @@ class TestCurveFsClientImpl : public ::testing::Test { }; struct TestClosure : public SnapCloneClosure { - void Run() { - std::unique_ptr selfGuard(this); - } + void Run() { std::unique_ptr selfGuard(this); } }; TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { @@ -111,35 +100,35 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { SegmentInfo segInfo; ret = client_->GetSnapshotSegmentInfo("file1", "user1", 1, 0, &segInfo); ASSERT_LT(ret, 0); - ret = client_->GetSnapshotSegmentInfo( - "file1", clientOption_.mdsRootUser, 1, 0, &segInfo); + ret = client_->GetSnapshotSegmentInfo("file1", clientOption_.mdsRootUser, 1, + 0, &segInfo); ASSERT_LT(ret, 0); ChunkIDInfo cidinfo; FileStatus fstatus; ret = client_->CheckSnapShotStatus("file1", "user1", 1, &fstatus); ASSERT_LT(ret, 0); - ret = client_->CheckSnapShotStatus( - "file1", clientOption_.mdsRootUser, 1, &fstatus); + ret = client_->CheckSnapShotStatus("file1", clientOption_.mdsRootUser, 1, + &fstatus); ASSERT_LT(ret, 0); ChunkInfoDetail chunkInfo; ret = client_->GetChunkInfo(cidinfo, &chunkInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); + ret = client_->CreateCloneFile("source1", "file1", "user1", 1024, 1, 1024, + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, "default", &fInfo); + ret = + client_->CreateCloneFile("source1", "file1", clientOption_.mdsRootUser, + 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - TestClosure *cb = new TestClosure(); + TestClosure* cb = new TestClosure(); ret = client_->CreateCloneChunk("", cidinfo, 1, 2, 1024, cb); ASSERT_EQ(ret, 0); - TestClosure *cb2 = new TestClosure(); + TestClosure* cb2 = new TestClosure(); ret = client_->RecoverChunk(cidinfo, 0, 1024, cb2); ASSERT_EQ(ret, 0); @@ -159,8 +148,9 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 - // ret = client_->GetOrAllocateSegmentInfo( + // The client retries the mds interface infinitely, and these two interfaces + // loop endlessly. Please comment them out first ret = + // client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); // ret = client_->GetOrAllocateSegmentInfo( @@ -169,8 +159,8 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->RenameCloneFile("user1", 1, 2, "file1", "file2"); ASSERT_LT(ret, 0); - ret = client_->RenameCloneFile( - clientOption_.mdsRootUser, 1, 2, "file1", "file2"); + ret = client_->RenameCloneFile(clientOption_.mdsRootUser, 1, 2, "file1", + "file2"); ASSERT_LT(ret, 0); ret = client_->DeleteFile("file1", "user1", 1); @@ -187,7 +177,5 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); } - - } // namespace snapshotcloneserver } // namespace curve diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index d4c40963f1..02e363ee1a 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -20,26 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" - namespace curve { namespace snapshotcloneserver { -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestSnapshotCoreImpl : public ::testing::Test { public: @@ -47,8 +45,7 @@ class TestSnapshotCoreImpl : public ::testing::Test { virtual ~TestSnapshotCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -60,11 +57,8 @@ class TestSnapshotCoreImpl : public ::testing::Test { option.snapshotCoreThreadNum = 1; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - option); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, option); ASSERT_EQ(core_->Init(), 0); } @@ -84,7 +78,6 @@ class TestSnapshotCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { const std::string file = "file"; const std::string user = "user"; @@ -96,18 +89,13 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { sinfo.SetStatus(Status::done); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddSnapshot(_)) - .WillOnce(Return(kErrCodeSuccess)); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); + EXPECT_CALL(*metaStore_, AddSnapshot(_)).WillOnce(Return(kErrCodeSuccess)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeSuccess, ret); } @@ -119,16 +107,11 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreTaskExist) { SnapshotInfo info; std::vector list; - SnapshotInfo sinfo("snapid1", - user, - file, - desc); + SnapshotInfo sinfo("snapid1", user, file, desc); sinfo.SetStatus(Status::pending); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeTaskExist, ret); } @@ -144,9 +127,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreAddSnapshotFail) { fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, AddSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); @@ -163,9 +144,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFileNotExist) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::NOTEXIST))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::NOTEXIST))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); } @@ -181,9 +161,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInvalidUser) { fInfo.filestatus = FileStatus::Created; fInfo.owner = "user2"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::AUTHFAIL))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::AUTHFAIL))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInvalidUser, ret); } @@ -198,9 +177,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInternalError) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInternalError, ret); } @@ -216,9 +194,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFailStatusInvalid) { fInfo.filestatus = FileStatus::Cloning; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); } @@ -232,8 +208,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPreSuccess) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -252,8 +227,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_GetSnapshotInfoNotExist) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -269,8 +243,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_UpdateSnapshotFail) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); @@ -290,8 +263,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_InvalidUser) { SnapshotInfo info(uuid, user2, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -307,8 +279,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_DeleteSnapshotUnfinished) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -325,8 +296,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_FileNameNotMatch) { SnapshotInfo info(uuid, user, fileName2, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -342,16 +312,14 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_TaskExit) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::deleting); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } -TEST_F(TestSnapshotCoreImpl, - TestGetFileSnapshotInfoSuccess) { +TEST_F(TestSnapshotCoreImpl, TestGetFileSnapshotInfoSuccess) { std::string file = "file1"; std::vector info; @@ -362,8 +330,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -378,9 +345,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -389,10 +354,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -407,10 +370,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -420,29 +381,21 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce(DoAll(SetArgPointee<4>(segInfo2), Return(kErrCodeSuccess))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -464,16 +417,13 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) @@ -481,28 +431,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -518,8 +462,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::done, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CreateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_CreateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -533,10 +476,8 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(-LIBCURVE_ERROR::FAILED))); - + .WillOnce( + DoAll(SetArgPointee<2>(seqNum), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -547,8 +488,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -562,10 +502,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -574,9 +511,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -587,8 +523,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_UpdateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_UpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -601,10 +536,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -613,10 +545,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -630,7 +560,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { + TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -643,10 +573,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -655,10 +582,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -672,7 +597,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { + TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -685,12 +610,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -699,21 +620,15 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); core_->HandleCreateSnapshotTask(task); @@ -722,8 +637,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetChunkInfoFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetChunkInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -737,10 +651,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -749,10 +660,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -767,10 +676,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -780,25 +687,19 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(chunkInfo), - Return(-LIBCURVE_ERROR::FAILED))); + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -807,7 +708,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { + TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -820,12 +721,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -834,10 +731,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -852,10 +747,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -865,29 +758,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -899,7 +785,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { + TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -912,11 +798,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -925,10 +808,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -943,10 +824,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -956,29 +835,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -989,23 +861,20 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); // 上一个快照 + info2.SetSeqNum(seqNum - 1); // Previous snapshot info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -1017,7 +886,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { + TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1031,9 +900,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1042,10 +909,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1060,10 +925,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1073,29 +936,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1111,35 +967,28 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info); snapInfos.push_back(info2); - EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) + EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillOnce(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -1148,7 +997,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { + TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1161,11 +1010,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1174,10 +1020,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1192,10 +1036,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1205,29 +1047,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1245,34 +1080,26 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -1287,7 +1114,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { + TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1300,11 +1127,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1313,10 +1137,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1331,10 +1153,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1344,29 +1164,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1384,35 +1197,27 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(2) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(2) @@ -1430,8 +1235,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_DeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1444,11 +1248,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1457,10 +1258,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1475,10 +1274,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1488,29 +1285,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1528,41 +1318,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1577,7 +1358,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1590,11 +1371,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1603,10 +1381,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1621,10 +1397,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1634,29 +1408,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1674,41 +1441,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1726,7 +1484,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1739,11 +1497,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1752,10 +1507,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1770,10 +1523,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1783,29 +1534,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1823,41 +1567,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1875,7 +1610,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1888,11 +1623,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1901,10 +1633,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1919,10 +1649,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1932,29 +1660,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1972,41 +1693,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2025,7 +1737,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskExistIndexDataSuccess) { + TestHandleCreateSnapshotTaskExistIndexDataSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2043,8 +1755,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2057,12 +1768,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2072,10 +1779,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2085,21 +1790,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2114,29 +1812,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); - + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) @@ -2163,7 +1854,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { + TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2180,8 +1871,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2194,13 +1884,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2210,10 +1895,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2223,21 +1906,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2252,9 +1928,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2266,8 +1941,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkVecInfoMiss) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskChunkVecInfoMiss) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2284,8 +1958,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2298,28 +1971,18 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); SegmentInfo segInfo1; SegmentInfo segInfo2; - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2334,9 +1997,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2348,8 +2010,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2374,9 +2035,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2384,15 +2043,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2412,7 +2066,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { + TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2437,9 +2091,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2447,12 +2099,9 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce( + DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeInternalError))); EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); @@ -2466,7 +2115,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { + TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2491,9 +2140,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2501,15 +2148,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2528,8 +2170,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2554,9 +2195,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2564,15 +2203,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2580,7 +2214,6 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); - EXPECT_CALL(*dataStore_, DeleteChunkIndexData(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -2609,9 +2242,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2620,10 +2251,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2636,10 +2265,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2649,29 +2276,22 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2689,60 +2309,50 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -2764,7 +2374,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { + TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2778,9 +2388,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2789,19 +2397,17 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Invoke([task](const UUID& uuid, CASFunc cas) { task->Cancel(); return kErrCodeSuccess; })); - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2818,7 +2424,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { + TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2832,9 +2438,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2843,10 +2447,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2859,10 +2461,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2872,40 +2472,32 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) - .WillOnce(Invoke([task](const ChunkIndexDataName &name, - const ChunkIndexData &meta) { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke( + [task](const ChunkIndexDataName& name, const ChunkIndexData& meta) { + task->Cancel(); + return kErrCodeSuccess; + })); - - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2925,7 +2517,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2939,9 +2531,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2950,10 +2540,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2968,10 +2556,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2981,29 +2567,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3021,60 +2600,49 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + // Enter cancel + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -3086,7 +2654,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3100,9 +2668,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3111,10 +2677,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3127,10 +2691,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3140,29 +2702,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3180,58 +2735,48 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3250,7 +2795,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3264,9 +2809,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3275,10 +2818,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3291,10 +2832,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3304,29 +2843,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3344,60 +2876,50 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3420,4 +2942,3 @@ TEST_F(TestSnapshotCoreImpl, } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..0af03c9315 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" - -#include "test/snapshotcloneserver/mock_snapshot_server.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/snapshotcloneserver/mock_snapshot_server.h" using curve::common::CountDownEvent; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Property; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,21 +50,16 @@ class TestSnapshotServiceManager : public ::testing::Test { virtual void SetUp() { serverOption_.snapshotPoolThreadNum = 8; serverOption_.snapshotTaskManagerScanIntervalMs = 100; - core_ = - std::make_shared(); - auto metaStore_ = - std::shared_ptr(); + core_ = std::make_shared(); + auto metaStore_ = std::shared_ptr(); snapshotMetric_ = std::make_shared(metaStore_); - std::shared_ptr - taskMgr_ = + std::shared_ptr taskMgr_ = std::make_shared(core_, snapshotMetric_); manager_ = std::make_shared(taskMgr_, core_); - ASSERT_EQ(0, manager_->Init(serverOption_)) - << "manager init fail."; - ASSERT_EQ(0, manager_->Start()) - << "manager start fail."; + ASSERT_EQ(0, manager_->Init(serverOption_)) << "manager init fail."; + ASSERT_EQ(0, manager_->Start()) << "manager start fail."; } virtual void TearDown() { @@ -75,31 +69,22 @@ class TestSnapshotServiceManager : public ::testing::Test { snapshotMetric_ = nullptr; } - void PrepareCreateSnapshot( - const std::string &file, - const std::string &user, - const std::string &desc, - UUID uuid) { + void PrepareCreateSnapshot(const std::string& file, const std::string& user, + const std::string& desc, UUID uuid) { SnapshotInfo info(uuid, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); @@ -112,8 +97,7 @@ class TestSnapshotServiceManager : public ::testing::Test { SnapshotCloneServerOptions serverOption_; }; -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -122,32 +106,25 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -155,8 +132,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPreFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -165,21 +141,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeInternalError))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccessByTaskExist) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccessByTaskExist) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -188,20 +156,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeTaskExist))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeTaskExist))); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPushTaskFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPushTaskFail) { const std::string file1 = "file1"; const std::string user1 = "user1"; const std::string desc1 = "snap1"; @@ -209,33 +170,21 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuid1, user1, file1, desc1); EXPECT_CALL(*core_, CreateSnapshotPre(file1, user1, desc1, _)) - .WillRepeatedly(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([] (std::shared_ptr task) { - })); + .WillOnce(Invoke([](std::shared_ptr task) {})); UUID uuid; - int ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid); + int ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); UUID uuid2; - ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid2); + ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid2); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -243,8 +192,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotMultiThreadSuccess) { const std::string file1 = "file1"; const std::string file2 = "file2"; const std::string file3 = "file3"; @@ -264,15 +212,9 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); std::condition_variable cv; std::mutex m; @@ -281,43 +223,28 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cv, &m, &count] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - std::unique_lock lk(m); - count++; - task->Finish(); - cv.notify_all(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cv, &m, &count](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + std::unique_lock lk(m); + count++; + task->Finish(); + cv.notify_all(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file2, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file2, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file3, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file3, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - cv.wait(lk, [&count](){return count == 3;}); + cv.wait(lk, [&count]() { return count == 3; }); - - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); ASSERT_EQ(3, snapshotMetric_->snapshotSucceed.get_value()); @@ -325,7 +252,7 @@ TEST_F(TestSnapshotServiceManager, } TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSameFileSuccess) { + TestCreateSnapshotMultiThreadSameFileSuccess) { const std::string file1 = "file1"; const std::string user = "user1"; const std::string desc1 = "snap1"; @@ -343,52 +270,32 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); CountDownEvent cond1(3); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cond1] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -408,19 +315,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -428,7 +334,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -438,30 +344,23 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] ( - std::shared_ptr task) { - LOG(INFO) << "in HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - break; - } - } - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + LOG(INFO) << "in HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + break; + } + } + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); @@ -496,19 +395,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -516,8 +414,6 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - - TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; @@ -543,10 +439,10 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - cond1.Signal(); - })); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -555,9 +451,8 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -579,19 +474,18 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -599,7 +493,6 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -610,29 +503,22 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -659,8 +545,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -688,8 +573,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -702,8 +586,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeInternalError))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -722,8 +606,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail2) { snapInfo.push_back(snap1); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -740,29 +624,22 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -789,8 +666,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // empty filter SnapshotFilterCondition filter; @@ -826,14 +702,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter uuid SnapshotFilterCondition filter2; @@ -852,14 +726,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::pending, s.GetStatus()); ASSERT_EQ(progress, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by filename SnapshotFilterCondition filter3; @@ -890,14 +762,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by status SnapshotFilterCondition filter4; @@ -923,14 +793,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by user SnapshotFilterCondition filter5; @@ -949,8 +817,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -964,8 +831,8 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeInternalError))); SnapshotFilterCondition filter; std::vector fileSnapInfo; @@ -993,32 +860,30 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskSuccess) { list.push_back(snap2); list.push_back(snap3); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeSuccess))); CountDownEvent cond1(2); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -1041,15 +906,13 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskFail) { list.push_back(snap1); list.push_back(snap2); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeInternalError))); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1062,31 +925,27 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info2(uuidOut2, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - LOG(INFO) << "in mock HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - cond1.Signal(); - break; - } - } - task->Finish(); - cond2.Signal(); - })); - - // 取消排队的快照会调一次 + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + LOG(INFO) << "in mock HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + cond1.Signal(); + break; + } + } + task->Finish(); + cond2.Signal(); + })); + + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1099,32 +958,20 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCancelScheduledSnapshotTask(_)) .WillOnce(Invoke(callback)); - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 - ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid2); + // Take another snapshot to cover the queuing situation + ret = manager_->CreateSnapshot(file, user, desc, &uuid2); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 - ret = manager_->CancelSnapshot(uuidOut2, - user, - file); + // Cancel queued snapshots first + ret = manager_->CancelSnapshot(uuidOut2, user, file); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CancelSnapshot(uuidOut, - user, - file); + ret = manager_->CancelSnapshot(uuidOut, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -1132,8 +979,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffUser) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffUser) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1142,41 +988,32 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string user2 = "user2"; - ret = manager_->CancelSnapshot(uuidOut, - user2, - file); + ret = manager_->CancelSnapshot(uuidOut, user2, file); cond2.Signal(); ASSERT_EQ(kErrCodeInvalidUser, ret); cond1.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffFile) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffFile) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1185,40 +1022,30 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string file2 = "file2"; - ret = manager_->CancelSnapshot(uuidOut, - user, - file2); + ret = manager_->CancelSnapshot(uuidOut, user, file2); cond2.Signal(); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); cond1.Wait(); } - } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..b88d1fab08 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -20,14 +20,15 @@ * Author: charisu */ -#include #include "src/tools/chunkserver_client.h" -#include "test/client/fake/mockMDS.h" + +#include + #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" -using curve::chunkserver::GetChunkInfoResponse; using curve::chunkserver::CHUNK_OP_STATUS; - +using curve::chunkserver::GetChunkInfoResponse; DECLARE_string(chunkserver_list); namespace brpc { @@ -46,9 +47,7 @@ class ChunkServerClientTest : public ::testing::Test { fakemds.Initialize(); fakemds.CreateFakeChunkservers(false); } - void TearDown() { - fakemds.UnInitialize(); - } + void TearDown() { fakemds.UnInitialize(); } ChunkServerClient client; FakeMDS fakemds; }; @@ -59,37 +58,36 @@ TEST_F(ChunkServerClientTest, Init) { } TEST_F(ChunkServerClientTest, GetRaftStatus) { - std::vector statServices = - fakemds.GetRaftStateService(); - // 正常情况 + std::vector statServices = + fakemds.GetRaftStateService(); + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkInfoResponse()); + std::unique_ptr response(new GetChunkInfoResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -98,23 +96,23 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { auto copysetServices = fakemds.GetCreateCopysetService(); CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address("127.0.0.1:9191"); request.set_logicpoolid(1); request.set_copysetid(1001); request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -122,27 +120,26 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { } TEST_F(ChunkServerClientTest, GetChunkHash) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkHashResponse()); + std::unique_ptr response(new GetChunkHashResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_hash("1234"); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); } diff --git a/test/tools/config/data_check.conf b/test/tools/config/data_check.conf index 7380f75bd5..0f93452c72 100644 --- a/test/tools/config/data_check.conf +++ b/test/tools/config/data_check.conf @@ -15,131 +15,131 @@ # # -# mds一侧配置信息 +# MDS side configuration information # -# mds的地址信息 +# Address information of mds mds.listen.addr=127.0.0.1:9160 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的超时时间 +# Time out for communication with mds mds.rpcTimeoutMS=1000 -# 与mds通信最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout time for communication with MDS, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 在当前mds上连续重试次数超过该限制就切换 +# Switch if the number of consecutive retries on the current mds exceeds this limit mds.maxFailedTimesBeforeChangeMDS=5 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=1000 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=3 -# getleader接口每次重试之前需要先睡眠一段时间 +# The getleader interface needs to sleep for a period of time before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=4096 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of Execution Threads for the Queue +# The task of execution threads is to retrieve IO and then send it over the network before moving on to the next network task. +# The time it takes for a task to be retrieved from the queue and the RPC request to be sent typically ranges from 20 microseconds (20us) to 100 microseconds (100us). +# The lower end of this range, 20us, is under normal conditions when leader acquisition is not required during transmission. If leader acquisition is necessary during transmission, the time may extend to around 100us. +# The throughput of a single thread ranges from 100,000 (10w) to 500,000 (50w) tasks per second. This performance level meets the requirements. schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=500000 -# 任务队列线程池大小, 默认值为1个线程 +# Task queue thread pool size, default value is 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=50000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 下发IO最大的分片KB +# Maximum sharding KB for issuing IO global.fileIOSplitMaxSizeKB=4 -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=2048 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.opRetryIntervalUS=100000 metacache.getLeaderBackupRequestMS=100 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=./runlog/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/test/tools/copyset_check_core_test.cpp b/test/tools/copyset_check_core_test.cpp index 9ef6de55ce..ef085e2548 100644 --- a/test/tools/copyset_check_core_test.cpp +++ b/test/tools/copyset_check_core_test.cpp @@ -20,20 +20,22 @@ * Author: charisu */ -#include #include "src/tools/copyset_check_core.h" -#include "test/tools/mock/mock_mds_client.h" + +#include + #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_mds_client.h" -using ::testing::_; -using ::testing::Return; -using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::An; using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetServerInfo; using curve::mds::topology::DiskState; using curve::mds::topology::OnlineState; -using curve::mds::topology::CopySetServerInfo; +using ::testing::_; +using ::testing::An; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; DECLARE_uint64(operatorMaxPeriod); DECLARE_bool(checkOperator); @@ -69,9 +71,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -93,7 +95,7 @@ class CopysetCheckCoreTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *serverInfo) { + void GetServerInfoForTest(curve::mds::topology::ServerInfo* serverInfo) { serverInfo->set_serverid(1); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -108,24 +110,24 @@ class CopysetCheckCoreTest : public ::testing::Test { } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - const std::string& state = "FOLLOWER", - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool minOffline = false, - bool majOffline = false) { + const std::string& state = "FOLLOWER", + bool noLeader = false, bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool minOffline = false, + bool majOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (minOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else if (majOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 127.0.0.1:9195:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 " + "127.0.0.1:9195:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -135,7 +137,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (state == "LEADER") { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -150,11 +154,15 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else if (state == "FOLLOWER") { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } else { os << "state: " << state << "\r\n"; @@ -176,7 +184,7 @@ TEST_F(CopysetCheckCoreTest, Init) { ASSERT_EQ(-1, copysetCheck.Init("127.0.0.1:6666")); } -// CheckOneCopyset正常情况 +// CheckOneCopyset normal situation TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { std::vector csLocs; butil::IOBuf followerBuf; @@ -191,17 +199,12 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kHealthy, copysetCheck.CheckOneCopyset(1, 100)); butil::IOBuf iobuf; @@ -215,7 +218,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { ASSERT_EQ(iobuf.to_string(), copysetCheck.GetCopysetDetail()); } -// CheckOneCopyset异常情况 +// CheckOneCopyset Exception TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { std::vector csLocs; butil::IOBuf followerBuf; @@ -231,52 +234,45 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { copyset.set_logicalpoolid(1); copyset.set_copysetid(100); - // 1、GetChunkServerListInCopySet失败 + // 1. GetChunkServerListInCopySet failed EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck1.CheckOneCopyset(1, 100)); - // 2、copyset不健康 + // 2. Copyset is unhealthy GetIoBufForTest(&followerBuf, "4294967396", "FOLLOWER", true); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck2.CheckOneCopyset(1, 100)); - // 3、有peer不在线,一个是chunkserver不在线,一个是copyset不在线 + // 3. Some peers are not online, one is chunkserver, and the other is + // copyset GetIoBufForTest(&followerBuf, "4294967397"); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(4) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kMajorityPeerNotOnline, - copysetCheck3.CheckOneCopyset(1, 100)); + copysetCheck3.CheckOneCopyset(1, 100)); } - -// CheckCopysetsOnChunkserver正常情况 +// CheckCopysetsOnChunkserver normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -297,63 +293,52 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { csServerInfos.emplace_back(csServerInfo); } - // mds返回Chunkserver retired的情况,直接返回0 + // Mds returns the case of Chunkserver retired, directly returning 0 GetCsInfoForTest(&csInfo, csId, false, "LEADER"); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); expectedRes[kTotal].insert(gId); - // 通过id查询,有一个copyset配置组中没有当前chunkserver,应忽略 + // Through ID query, there is a copyset configuration group that does not + // have the current chunkserver and should be ignored GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(4) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 通过地址查询 + // Search through address EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck3.CheckCopysetsOnChunkServer(csAddr)); ASSERT_DOUBLE_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); } -// CheckCopysetsOnChunkserver异常情况 +// CheckCopysetsOnChunkserver Exception TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -376,7 +361,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetIoBufForTest(&followerBuf2, gId, "FOLLOWER", true); std::map> expectedRes; - // 1、GetChunkServerInfo失败的情况 + // 1. The situation of GetChunkServerInfo failur CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -385,7 +370,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、chunkserver发送RPC失败的情况 + // 2. The situation where chunkserver fails to send RPC std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -398,53 +383,43 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(10) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillRepeatedly(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::set expectedExcepCs = {csAddr, "127.0.0.1:9493", - "127.0.0.1:9394", "127.0.0.1:9496", - "127.0.0.1:9293", "127.0.0.1:9396", - "127.0.0.1:9499"}; + std::set expectedExcepCs = { + csAddr, "127.0.0.1:9493", "127.0.0.1:9394", "127.0.0.1:9496", + "127.0.0.1:9293", "127.0.0.1:9396", "127.0.0.1:9499"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); std::set expectedCopysetExcepCs = {"127.0.0.1:9292"}; ASSERT_EQ(expectedCopysetExcepCs, - copysetCheck2.GetCopysetLoadExceptionChunkServer()); + copysetCheck2.GetCopysetLoadExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); expectedRes.clear(); - // 3、获取chunkserver上的copyset失败的情况 + // 3. Failure in obtaining copyset on chunkserver GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) .WillOnce(Return(-1)); @@ -455,22 +430,16 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck3.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // 4、获取copyset对应的chunkserver列表失败的情况 + // 4. Failure in obtaining the chunkserver list corresponding to the copyset GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*csClient_, GetRaftStatus(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*csClient_, GetRaftStatus(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -480,18 +449,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck4.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 检查copyset是否在配置组中时出错 + // Error checking if copyset is in configuration group EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -499,10 +464,12 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(-1, copysetCheck5.CheckCopysetsOnChunkServer(csAddr)); } -// chunkserver上copyset不健康的情况 -// 检查单个server和集群都是复用的CheckCopysetsOnChunkserver -// 所以CheckCopysetsOnChunkserver要测每个不健康的情况,其他的只要测健康和不健康还有不在线的情况就好 -// 具体什么原因不健康不用关心 +// Unhealthy copyset on chunkserver +// Check that both individual servers and clusters are reusable +// CheckCopysetsOnChunkservers So CheckCopysetsOnChunkserver needs to test every +// unhealthy situation, and the rest just needs to test for healthy, unhealthy, +// and offline situations What are the specific reasons for being unhealthy? +// Don't worry TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ChunkServerIdType csId = 1; std::string csAddr1 = "127.0.0.1:9194"; @@ -516,110 +483,107 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { uint64_t gId = 4294967396; std::string groupId; - // 1、首先加入9个健康的copyset + // 1. First, add 9 healthy copysets for (int i = 0; i < 9; ++i) { groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; } - // 2、加入没有leader的copyset + // 2. Add a copyset without a leader groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 3、加入正在安装快照的copyset + // 3. Add a copyset that is currently installing snapshots groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kInstallingSnapshot].emplace(groupId); os << temp << "\r\n"; - // 4、加入peer不足的copyset + // 4. Add a copyset with insufficient peers groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kPeersNoSufficient].emplace(groupId); os << temp << "\r\n"; - // 5、加入日志差距大的copset + // 5. Add a eclipse with a large log gap groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - true, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, true, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kLogIndexGapTooBig].emplace(groupId); os << temp << "\r\n"; - // 6、加入无法解析的copyset,这种情况不会发生,发生了表明程序有bug - // 打印错误信息,但不会加入到unhealthy + // 6. Add a copyset that cannot be parsed. This situation will not occur, + // indicating a bug in the program + // Print error message, but it will not be added to unhealthy groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, true, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, true, + false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; - // 7.1、加入少数peer不在线的copyset + // 7.1. Add a few copysets where peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, true); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, false, + true); expectedRes[kTotal].emplace(groupId); expectedRes[kMinorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 7.2、加入大多数peer不在线的copyset + // 7.2. Add copysets where most peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false, true); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false, true); expectedRes[kTotal].emplace(groupId); expectedRes[kMajorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 8、加入CANDIDATE状态的copyset + // 8. Add a copyset in the CANDIDATE state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "CANDIDATE"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 9、加入TRANSFERRING状态的copyset + // 9. Add a copyset in the TRANSFERRING state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "TRANSFERRING"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 10、加入ERROR状态的copyset + // 10. Add a copyset in the ERROR state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "ERROR"); expectedRes[kTotal].emplace(groupId); expectedRes["state ERROR"].emplace(groupId); os << temp << "\r\n"; - // 11、加入SHUTDOWN状态的copyset + // 11. Add a copyset in SHUTDOWN state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "SHUTDOWN"); expectedRes[kTotal].emplace(groupId); expectedRes["state SHUTDOWN"].emplace(groupId); os << temp; - // 设置mock对象的返回,8个正常iobuf里面,设置一个的peer不在线,因此unhealthy++ + // Set the return of mock objects. Among the 8 normal iobufs, one peer is + // set to be offline, resulting in unhealthy++ os.move_to(iobuf); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*csClient_, Init(csAddr1)) - .WillOnce(Return(-1)); - EXPECT_CALL(*csClient_, Init(csAddr2)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(csAddr1)).WillOnce(Return(-1)); + EXPECT_CALL(*csClient_, Init(csAddr2)).WillOnce(Return(-1)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); std::vector csServerInfos; CopySetServerInfo csServerInfo; GetCsServerInfoForTest(&csServerInfo, 1); @@ -629,10 +593,9 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { csServerInfos.emplace_back(csServerInfo); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); - // 检查结果 + // Inspection results std::set expectedExcepCs = {csAddr1, csAddr2}; CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck.CheckCopysetsOnChunkServer(csId)); @@ -641,7 +604,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ASSERT_EQ(expectedExcepCs, copysetCheck.GetServiceExceptionChunkServer()); } -// CheckCopysetsOnServer正常情况 +// CheckCopysetsOnServer normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ServerIdType serverId = 1; std::string serverIp = "127.0.0.1"; @@ -656,21 +619,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { std::string groupId; groupId = std::to_string(gId++); expectedRes[kTotal].emplace(groupId); - GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, false, + false, false); - // 通过id查询 + // Query by ID EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnServer(serverId, &unhealthyCs)); @@ -678,19 +637,15 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 通过ip查询 + // Query through IP EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 通过ip查询 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Query through IP CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnServer(serverIp, &unhealthyCs)); ASSERT_EQ(0, unhealthyCs.size()); @@ -698,7 +653,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); } -// CheckCopysetsOnServer异常情况 +// CheckCopysetsOnServer Exceptio TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ServerIdType serverId = 1; butil::IOBuf iobuf; @@ -721,7 +676,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { gIds.emplace(std::to_string(gId)); } - // 1、ListChunkServersOnServer失败的情况 + // 1. Situation of ListChunkServersOnServer failure EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) .WillOnce(Return(-1)); @@ -730,7 +685,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 3、一个chunkserver访问失败,一个chunkserver不健康的情况 + // 3. A chunkserver access failure and an unhealthy chunkserver situation GetIoBufForTest(&iobuf, groupId, "LEADER", false, true); expectedRes[kTotal] = gIds; expectedRes[kTotal].emplace(groupId); @@ -738,21 +693,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { expectedRes[kMinorityPeerNotOnline] = gIds; EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - EXPECT_CALL(*mdsClient_, - GetCopySetsInChunkServer("127.0.0.1:9191", _)) + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer("127.0.0.1:9191", _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -760,15 +711,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnServer(serverId, &unhealthyCs)); ASSERT_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::vector unhealthyCsExpected = - {"127.0.0.1:9191", "127.0.0.1:9192"}; + std::vector unhealthyCsExpected = {"127.0.0.1:9191", + "127.0.0.1:9192"}; ASSERT_EQ(unhealthyCsExpected, unhealthyCs); - std::set expectedExcepCs = - {"127.0.0.1:9191"}; + std::set expectedExcepCs = {"127.0.0.1:9191"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); } -// CheckCopysetsInCluster正常情况 +// CheckCopysetsInCluster normal situation TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", "LEADER"); @@ -783,23 +733,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); std::vector copysetsInMds; CopysetInfo copyset; copyset.set_logicalpoolid(1); @@ -807,8 +751,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); @@ -826,7 +769,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { GetCsInfoForTest(&chunkserver, 1); std::vector chunkservers = {chunkserver}; - // 1、ListServersInCluster失败 + // 1. ListServersInCluster failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -835,89 +778,75 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、CheckCopysetsOnServer返回不为0 + // 2. CheckCopysetsOnServer returned a non zero value EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) .WillOnce(Return(-1)); std::vector copysetsInMds; EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); expectedRes[kTotal] = {}; ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 3、GetMetric失败 + // 3. GetMetric failed expectedRes[kTotal] = {"4294967396"}; EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(2) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10), Return(0))); CopysetInfo copyset; copyset.set_logicalpoolid(1); copyset.set_copysetid(100); copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); - // 获取operator失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); + // Failed to obtain operator CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck3.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // operator数量大于0 + // The number of operators is greater than 0 CopysetCheckCore copysetCheck4(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 4、比较chunkserver跟mds的copyset失败 + // 4. Failed to compare the copyset between chunkserver and mds EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(9) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(9).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 从获取copyset失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Failed to obtain copyset from EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量不一致 + // Inconsistent number of copysets copysetsInMds.clear(); copyset.set_logicalpoolid(1); copyset.set_copysetid(101); @@ -926,16 +855,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量一致,但是内容不一致 + // The number of copysets is consistent, but the content is inconsistent copysetsInMds.pop_back(); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); } @@ -944,21 +871,17 @@ TEST_F(CopysetCheckCoreTest, CheckOperator) { CopysetCheckCore copysetCheck(mdsClient_, csClient_); std::string opName = "change_peer"; uint64_t checkTime = 3; - // 1、获取metric失败 - EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 1. Failed to obtain metric + EXPECT_CALL(*mdsClient_, GetMetric(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.CheckOperator(opName, checkTime)); - // 2、operator数量不为0 + // 2. The number of operators is not 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10), Return(0))); ASSERT_EQ(10, copysetCheck.CheckOperator(opName, checkTime)); - // 3、operator数量为0 + // 3. The number of operators is 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); ASSERT_EQ(0, copysetCheck.CheckOperator(opName, checkTime)); } @@ -969,11 +892,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { GetCsInfoForTest(&chunkserver, 1); chunkservers.emplace_back(chunkserver); } - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(12) .WillOnce(Return(0)) @@ -988,11 +910,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { copyset.set_copysetid(100 + i); copysets.emplace_back(copyset); } - EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer( - An(), _)) + EXPECT_CALL(*mdsClient_, + GetCopySetsInChunkServer(An(), _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), Return(0))); std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -1001,16 +922,14 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { } EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); std::vector fileNames = {"file1", "file2"}; std::vector fileNames2; CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, ListVolumesOnCopyset(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); ASSERT_EQ(0, copysetCheck1.ListMayBrokenVolumes(&fileNames2)); ASSERT_EQ(fileNames, fileNames2); } diff --git a/test/tools/copyset_check_test.cpp b/test/tools/copyset_check_test.cpp index 01c7e3f4c2..2e034b6d27 100644 --- a/test/tools/copyset_check_test.cpp +++ b/test/tools/copyset_check_test.cpp @@ -20,15 +20,17 @@ * Author: charisu */ -#include #include "src/tools/copyset_check.h" + +#include + #include "src/tools/copyset_check_core.h" #include "test/tools/mock/mock_copyset_check_core.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; DECLARE_bool(detail); @@ -55,26 +57,23 @@ class CopysetCheckTest : public ::testing::Test { core_ = std::make_shared(); FLAGS_detail = true; } - void TearDown() { - core_ = nullptr; - } + void TearDown() { core_ = nullptr; } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - bool isLeader = false, - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool peerOffline = false) { + bool isLeader = false, bool noLeader = false, + bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool peerOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (peerOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -84,7 +83,9 @@ class CopysetCheckTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (isLeader) { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -99,26 +100,31 @@ class CopysetCheckTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } os.move_to(*buf); } - std::map> res1 = - {{"total", {"4294967396", "4294967397"}}}; - std::map> res2 = - {{"total", {"4294967396", "4294967397", "4294967398", - "4294967399", "4294967400", "4294967401"}}, - {"installing snapshot", {"4294967397"}}, - {"no leader", {"4294967398"}}, - {"index gap too big", {"4294967399"}}, - {"peers not sufficient", {"4294967400"}}, - {"peer not online", {"4294967401"}}}; + std::map> res1 = { + {"total", {"4294967396", "4294967397"}}}; + std::map> res2 = { + {"total", + {"4294967396", "4294967397", "4294967398", "4294967399", "4294967400", + "4294967401"}}, + {"installing snapshot", {"4294967397"}}, + {"no leader", {"4294967398"}}, + {"index gap too big", {"4294967399"}}, + {"peers not sufficient", {"4294967400"}}, + {"peer not online", {"4294967401"}}}; std::set serviceExcepCs = {"127.0.0.1:9092"}; std::set copysetExcepCs = {"127.0.0.1:9093"}; std::set emptySet; @@ -143,29 +149,25 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { copysetCheck.PrintHelp("check-copyset"); butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", true); - std::vector peersInCopyset = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; + std::vector peersInCopyset = { + "127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; std::string copysetDetail = iobuf.to_string(); - // Init失败的情况 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // The situation of Init failure + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 不支持的命令 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Unsupported command ASSERT_EQ(-1, copysetCheck.RunCommand("check-nothings")); copysetCheck.PrintHelp("check-nothins"); - // 没有指定逻辑池和copyset的话返回失败 + // If no logical pool and copyset are specified, a failure is returne ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); FLAGS_logicalPoolId = 1; FLAGS_copysetId = 100; copysetCheck.PrintHelp("check-copyset"); - // 健康的情况 + // Healthy situation EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kHealthy)); @@ -180,7 +182,7 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-copyset")); - // copyset不健康的情况 + // The unhealthy situation of copyset EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kLogIndexGapTooBig)); @@ -199,15 +201,13 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { TEST_F(CopysetCheckTest, testCheckChunkServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-chunkserver"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 没有指定chunkserver的话报错 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Error reported if chunkserver is not specified ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); copysetCheck.PrintHelp("check-chunkserver"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_chunkserverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverId)) .Times(1) @@ -225,11 +225,11 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // id和地址同时指定,报错 + // Error reported when both ID and address are specified simultaneously FLAGS_chunkserverAddr = "127.0.0.1:8200"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); FLAGS_chunkserverId = 0; - // 通过地址查询 + // Search through address EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(0)); @@ -247,7 +247,7 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(-1)); @@ -269,23 +269,20 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { TEST_F(CopysetCheckTest, testCheckServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-server"); - std::vector chunkservers = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + std::vector chunkservers = {"127.0.0.1:9091", "127.0.0.1:9092", + "127.0.0.1:9093"}; + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 没有指定server的话报错 + // If no server is specified, an error will be reported ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); copysetCheck.PrintHelp("check-server"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_serverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -299,15 +296,14 @@ TEST_F(CopysetCheckTest, testCheckServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // id和ip同时指定,报错 + // Error reported when both ID and IP are specified simultaneously FLAGS_serverIp = "127.0.0.1"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); FLAGS_serverId = 0; - // 通过ip查询 + // Query through IP EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -322,7 +318,7 @@ TEST_F(CopysetCheckTest, testCheckServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) .WillOnce(Return(-1)); @@ -344,14 +340,10 @@ TEST_F(CopysetCheckTest, testCheckServer) { TEST_F(CopysetCheckTest, testCheckCluster) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("copysets-status"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(0)); + // Healthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(0)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -366,10 +358,8 @@ TEST_F(CopysetCheckTest, testCheckCluster) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand(kCopysetsStatusCmd)); - // 不健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(-1)); + // Unhealthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res2)); @@ -388,14 +378,12 @@ TEST_F(CopysetCheckTest, testCheckCluster) { TEST_F(CopysetCheckTest, testCheckOperator) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-operator"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、不支持的operator + // 1. Unsupported operator FLAGS_opName = "no_operator"; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、transfer leader的operator和total的 + // 2. The operator and total of the transfer leader EXPECT_CALL(*core_, CheckOperator(_, FLAGS_leaderOpInterval)) .Times(2) .WillOnce(Return(0)) @@ -404,7 +392,7 @@ TEST_F(CopysetCheckTest, testCheckOperator) { ASSERT_EQ(0, copysetCheck.RunCommand(kCheckOperatorCmd)); FLAGS_opName = kTotalOpName; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、其他operator + // 2. Other operators EXPECT_CALL(*core_, CheckOperator(_, FLAGS_opIntervalExceptLeader)) .Times(3) .WillOnce(Return(10)) @@ -420,15 +408,11 @@ TEST_F(CopysetCheckTest, testCheckOperator) { TEST_F(CopysetCheckTest, PrintMayBrokenVolumes) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp(kListMayBrokenVolumes); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); // fail - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand(kListMayBrokenVolumes)); - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(0)); ASSERT_EQ(0, copysetCheck.RunCommand(kListMayBrokenVolumes)); } diff --git a/test/tools/curve_cli_test.cpp b/test/tools/curve_cli_test.cpp index 133d9de42d..0ad6d9cae8 100644 --- a/test/tools/curve_cli_test.cpp +++ b/test/tools/curve_cli_test.cpp @@ -20,22 +20,25 @@ * Author: charisu */ -#include -#include +#include "src/tools/curve_cli.h" + #include +#include #include +#include + #include -#include "src/tools/curve_cli.h" + #include "test/tools/mock/mock_cli_service.h" #include "test/tools/mock/mock_copyset_service.h" #include "test/tools/mock/mock_mds_client.h" using ::testing::_; -using ::testing::Return; -using ::testing::Invoke; +using ::testing::An; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::An; DECLARE_int32(timeout_ms); DECLARE_int32(max_retry); @@ -50,10 +53,8 @@ DECLARE_bool(affirm); namespace curve { namespace tool { -template -void callback(RpcController* controller, - const Req* request, - Resp* response, +template +void callback(RpcController* controller, const Req* request, Resp* response, Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -67,7 +68,7 @@ class CurveCliTest : public ::testing::Test { mockCliService = new MockCliService(); mockCopysetService_ = std::make_shared(); ASSERT_EQ(0, server->AddService(mockCliService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(mockCopysetService_.get(), brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); @@ -82,8 +83,8 @@ class CurveCliTest : public ::testing::Test { mockCliService = nullptr; } - brpc::Server *server; - MockCliService *mockCliService; + brpc::Server* server; + MockCliService* mockCliService; std::shared_ptr mockCopysetService_; const std::string conf = "127.0.0.1:9192:0"; const std::string peer = "127.0.0.1:9192:0"; @@ -113,20 +114,20 @@ TEST_F(CurveCliTest, RemovePeer) { curveCli.PrintHelp("remove-peer"); curveCli.PrintHelp("test"); curveCli.RunCommand("test"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -134,32 +135,27 @@ TEST_F(CurveCliTest, RemovePeer) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const RemovePeerRequest2 *request, - RemovePeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const RemovePeerRequest2* request, + RemovePeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("remove-peer")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); // TEST CASES: remove broken copyset after remove peer @@ -181,8 +177,8 @@ TEST_F(CurveCliTest, RemovePeer) { EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(getLeaderResp), - Invoke(getLeaderFunc))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(getLeaderResp), Invoke(getLeaderFunc))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) .Times(3) .WillRepeatedly(Invoke(removePeerFunc)); @@ -210,21 +206,21 @@ TEST_F(CurveCliTest, RemovePeer) { TEST_F(CurveCliTest, TransferLeader) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("transfer-leader"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -232,147 +228,132 @@ TEST_F(CurveCliTest, TransferLeader) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, curveCli.RunCommand("transfer-leader")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); } TEST_F(CurveCliTest, ResetPeer) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("reset-peer"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf为空 + // newConf is empty FLAGS_peer = peer; FLAGS_new_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析newConf失败 + // Failed to parse newConf FLAGS_new_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_new_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf有三个副本 + // newConf has three copies FLAGS_peer = peer; FLAGS_new_conf = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf不包含peer + // newConf does not contain peer FLAGS_new_conf = "127.0.0.1:8201:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 执行变更成功 + // Successfully executed changes FLAGS_new_conf = conf; EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const ResetPeerRequest2* request, + ResetPeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("reset-peer")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const ResetPeerRequest2* request, + ResetPeerResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); } TEST_F(CurveCliTest, DoSnapshot) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 解析peer失败 + // Parsing peer failed FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotRequest2* request, + SnapshotResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const SnapshotRequest2* request, + SnapshotResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); } TEST_F(CurveCliTest, DoSnapshotAll) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot-all"); - // 执行变更成功 + // Successfully executed changes std::vector chunkservers; ChunkServerInfo csInfo; csInfo.set_hostip("127.0.0.1"); csInfo.set_port(9192); chunkservers.emplace_back(csInfo); - EXPECT_CALL(*mdsClient_, Init(_)) + EXPECT_CALL(*mdsClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(2) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotAllRequest* request, + SnapshotAllResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot-all")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) + // Failed to execute changes + EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const SnapshotAllRequest* request, + SnapshotAllResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot-all")); } diff --git a/test/tools/curve_meta_tool_test.cpp b/test/tools/curve_meta_tool_test.cpp index 1d493c56f8..a94d54dbb3 100644 --- a/test/tools/curve_meta_tool_test.cpp +++ b/test/tools/curve_meta_tool_test.cpp @@ -20,10 +20,13 @@ * Author: charisu */ +#include "src/tools/curve_meta_tool.h" + #include + #include #include -#include "src/tools/curve_meta_tool.h" + #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -32,8 +35,8 @@ namespace tool { using curve::common::Bitmap; using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -43,12 +46,8 @@ const char chunkFileName[] = "chunk_001"; class CurveMetaToolTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } std::shared_ptr localFs_; }; @@ -65,30 +64,28 @@ TEST_F(CurveMetaToolTest, SupportCommand) { TEST_F(CurveMetaToolTest, PrintChunkMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(6) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(5) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(5).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 3、解析失败 + // 3. Parsing failed char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 4、普通chunk + // 4. Ordinary chunk ChunkFileMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -97,9 +94,9 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); - // 5、克隆chunk + // 5. Clone chunk metaPage.location = "test@s3"; uint32_t size = CHUNK_SIZE / PAGE_SIZE; auto bitmap = std::make_shared(size); @@ -110,36 +107,34 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); } TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(5) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(4) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(4).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 3、解析失败 + // 3. Parsing faile char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 4、成功chunk + // 4. Successful Chunk SnapshotMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -153,9 +148,8 @@ TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("snapshot-meta")); } } // namespace tool } // namespace curve - diff --git a/test/tools/data_consistency_check_test.cpp b/test/tools/data_consistency_check_test.cpp index 15cd238004..c9641ee9b5 100644 --- a/test/tools/data_consistency_check_test.cpp +++ b/test/tools/data_consistency_check_test.cpp @@ -19,20 +19,20 @@ * File Created: Friday, 28th June 2019 2:29:14 pm * Author: tongguangxun */ +#include +#include #include #include -#include -#include #include "src/tools/consistency_check.h" -#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" DECLARE_bool(check_hash); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; extern uint32_t segment_size; @@ -41,8 +41,7 @@ extern uint32_t chunk_size; class ConsistencyCheckTest : public ::testing::Test { public: void SetUp() { - nameSpaceTool_ = - std::make_shared(); + nameSpaceTool_ = std::make_shared(); csClient_ = std::make_shared(); } @@ -70,8 +69,7 @@ class ConsistencyCheckTest : public ::testing::Test { } void GetCopysetStatusForTest(CopysetStatusResponse* response, - int64_t applyingIndex = 1111, - bool ok = true) { + int64_t applyingIndex = 1111, bool ok = true) { if (ok) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { @@ -110,51 +108,41 @@ TEST_F(ConsistencyCheckTest, Consistency) { CopysetStatusResponse response; GetCopysetStatusForTest(&response); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(90) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(90).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(60) - .WillRepeatedly(DoAll(SetArgPointee<1>(response), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(30) - .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), - Return(0))); - // 1、检查hash + .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), Return(0))); + // 1. Check hash FLAGS_check_hash = true; curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); cfc1.PrintHelp("check-consistency"); cfc1.PrintHelp("check-nothing"); ASSERT_EQ(0, cfc1.RunCommand("check-consistency")); - // 2、检查applyIndex + // 2. Check the applyIndex FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); ASSERT_EQ(-1, cfc2.RunCommand("check-nothing")); - // mds返回副本为空的情况 + // Mds returns a situation where the replica is empty EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>( - std::vector()), - Return(0))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(std::vector()), Return(0))); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); FLAGS_check_hash = true; ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); @@ -180,61 +168,45 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { CopysetStatusResponse response3; GetCopysetStatusForTest(&response3, 2222); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); - // 1、检查hash,apply index一致,hash不一致 + // 1. Check if the hash and apply index are consistent and the hash is + // inconsistent FLAGS_check_hash = true; - EXPECT_CALL(*csClient_, Init(_)) - .Times(5) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(5).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>("2222"), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>("1111"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>("2222"), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>("1111"), Return(0))); curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc1.RunCommand("check-consistency")); - // 2、检查hash的时候apply index不一致 - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // 2. When checking the hash, the apply index is inconsistent + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc2.RunCommand("check-consistency")); - // 3、检查applyIndex + // 3. Check the applyIndex FLAGS_check_hash = false; - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc3(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc3.RunCommand("check-consistency")); } @@ -254,62 +226,47 @@ TEST_F(ConsistencyCheckTest, CheckError) { } FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc(nameSpaceTool_, csClient_); - // 0、Init失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 1、获取segment失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Failed to obtain segment + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 2、获取chunkserver list失败 + // 2. Failed to obtain chunkserver list EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 3、init 向chunkserverclient init失败 + // 3. Failed to init to chunkserverclient init EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 4、从chunkserver获取copyset status失败 - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. Failed to obtain copyset status from chunkserver + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 5、从chunkserver获取chunk hash失败 + // 5. Failed to obtain chunk hash from chunkserver FLAGS_check_hash = true; CopysetStatusResponse response1; GetCopysetStatusForTest(&response1); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); - EXPECT_CALL(*csClient_, GetChunkHash(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); + EXPECT_CALL(*csClient_, GetChunkHash(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); } diff --git a/test/tools/etcd_client_test.cpp b/test/tools/etcd_client_test.cpp index b6774425bd..0e7d8a9765 100644 --- a/test/tools/etcd_client_test.cpp +++ b/test/tools/etcd_client_test.cpp @@ -20,11 +20,14 @@ * Author: charisu */ +#include "src/tools/etcd_client.h" + #include -#include //NOLINT + #include //NOLINT #include -#include "src/tools/etcd_client.h" +#include //NOLINT + #include "src/common/timeutility.h" class EtcdClientTest : public ::testing::Test { @@ -36,21 +39,23 @@ class EtcdClientTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, - execlp("etcd", "etcd", "--listen-client-urls", - "http://127.0.0.1:2366", "--advertise-client-urls", - "http://127.0.0.1:2366", "--listen-peer-urls", - "http://127.0.0.1:2367", - "--initial-advertise-peer-urls", - "http://127.0.0.1:2367", "--initial-cluster", - "toolEtcdClientTest=http://127.0.0.1:2367", - "--name", "toolEtcdClientTest", nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://127.0.0.1:2366", "--advertise-client-urls", + "http://127.0.0.1:2366", "--listen-peer-urls", + "http://127.0.0.1:2367", "--initial-advertise-peer-urls", + "http://127.0.0.1:2367", "--initial-cluster", + "toolEtcdClientTest=http://127.0.0.1:2367", "--name", + "toolEtcdClientTest", nullptr)); exit(0); } - // 一定时间内尝试check直到etcd完全起来 + // Try checking for a certain period of time until the ETCD is + // completely up curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); bool running; @@ -59,8 +64,8 @@ class EtcdClientTest : public ::testing::Test { 5) { std::vector leaderAddrVec; std::map onlineState; - ASSERT_EQ(0, - client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); + ASSERT_EQ( + 0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); if (onlineState["127.0.0.1:2366"]) { running = true; break; @@ -81,22 +86,22 @@ class EtcdClientTest : public ::testing::Test { TEST_F(EtcdClientTest, GetEtcdClusterStatus) { curve::tool::EtcdClient client; - // Init失败的情况 + // The situation of Init failure ASSERT_EQ(-1, client.Init("")); - // Init成功 + // Init succeeded ASSERT_EQ(0, client.Init(etcdAddr)); std::vector leaderAddrVec; std::map onlineState; - // 正常情况 + // Normal situation ASSERT_EQ(0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); - std::map expected = { { "127.0.0.1:2366", true }, - { "127.0.0.1:2368", false } }; + std::map expected = {{"127.0.0.1:2366", true}, + {"127.0.0.1:2368", false}}; ASSERT_EQ(expected, onlineState); ASSERT_EQ(1, leaderAddrVec.size()); ASSERT_EQ("127.0.0.1:2366", leaderAddrVec[0]); - // 空指针错误 + // Null pointer error ASSERT_EQ(-1, client.GetEtcdClusterStatus(nullptr, &onlineState)); ASSERT_EQ(-1, client.GetEtcdClusterStatus(&leaderAddrVec, nullptr)); } @@ -105,13 +110,13 @@ TEST_F(EtcdClientTest, GetAndCheckEtcdVersion) { curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); - // 正常情况 + // Normal situation std::string version; std::vector failedList; ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 个别etcd获取version失败 + // Individual ETCD failed to obtain version ASSERT_EQ(0, client.Init(etcdAddr)); ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_EQ(1, failedList.size()); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index e261d43895..c89d8d7066 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -20,13 +20,16 @@ * Author: charisu */ -#include +#include "src/tools/mds_client.h" + #include +#include + #include -#include "src/tools/mds_client.h" + #include "test/tools/mock/mock_namespace_service.h" -#include "test/tools/mock/mock_topology_service.h" #include "test/tools/mock/mock_schedule_service.h" +#include "test/tools/mock/mock_topology_service.h" using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; @@ -68,8 +71,8 @@ namespace tool { const char mdsAddr[] = "127.0.0.1:9191,127.0.0.1:9192"; template -void callback(RpcController *controller, const Req *request, Resp *response, - Closure *done) { +void callback(RpcController* controller, const Req* request, Resp* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -90,15 +93,15 @@ class ToolMDSClientTest : public ::testing::Test { ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); - // 初始化mds client + // Initialize mds client curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.Init(mdsAddr, "9194,9193")); @@ -116,7 +119,7 @@ class ToolMDSClientTest : public ::testing::Test { scheduleService = nullptr; } - void GetFileInfoForTest(uint64_t id, FileInfo *fileInfo) { + void GetFileInfoForTest(uint64_t id, FileInfo* fileInfo) { fileInfo->set_id(id); fileInfo->set_filename("test"); fileInfo->set_parentid(0); @@ -127,11 +130,11 @@ class ToolMDSClientTest : public ::testing::Test { fileInfo->set_ctime(1573546993000000); } - void GetCopysetInfoForTest(CopySetServerInfo *info, int num, + void GetCopysetInfoForTest(CopySetServerInfo* info, int num, uint32_t copysetId = 1) { info->Clear(); for (int i = 0; i < num; ++i) { - curve::common::ChunkServerLocation *csLoc = info->add_cslocs(); + curve::common::ChunkServerLocation* csLoc = info->add_cslocs(); csLoc->set_chunkserverid(i); csLoc->set_hostip("127.0.0.1"); csLoc->set_port(9191 + i); @@ -139,14 +142,14 @@ class ToolMDSClientTest : public ::testing::Test { info->set_copysetid(copysetId); } - void GetSegmentForTest(PageFileSegment *segment) { + void GetSegmentForTest(PageFileSegment* segment) { segment->set_logicalpoolid(1); segment->set_segmentsize(DefaultSegmentSize); segment->set_chunksize(kChunkSize); segment->set_startoffset(0); } - void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo *pool) { + void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo* pool) { pool->set_physicalpoolid(id); pool->set_physicalpoolname("testPool"); pool->set_desc("physical pool for test"); @@ -155,7 +158,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetLogicalPoolForTest(PoolIdType id, - curve::mds::topology::LogicalPoolInfo *lpInfo) { + curve::mds::topology::LogicalPoolInfo* lpInfo) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); lpInfo->set_physicalpoolid(1); @@ -167,14 +170,14 @@ class ToolMDSClientTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetZoneInfoForTest(ZoneIdType id, ZoneInfo *zoneInfo) { + void GetZoneInfoForTest(ZoneIdType id, ZoneInfo* zoneInfo) { zoneInfo->set_zoneid(1); zoneInfo->set_zonename("testZone"); zoneInfo->set_physicalpoolid(1); zoneInfo->set_physicalpoolname("testPool"); } - void GetServerInfoForTest(ServerIdType id, ServerInfo *serverInfo) { + void GetServerInfoForTest(ServerIdType id, ServerInfo* serverInfo) { serverInfo->set_serverid(id); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -189,7 +192,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetChunkServerInfoForTest(ChunkServerIdType id, - ChunkServerInfo *csInfo, + ChunkServerInfo* csInfo, bool retired = false) { csInfo->set_chunkserverid(id); csInfo->set_disktype("ssd"); @@ -206,10 +209,10 @@ class ToolMDSClientTest : public ::testing::Test { csInfo->set_diskcapacity(1024); csInfo->set_diskused(512); } - brpc::Server *server; - curve::mds::MockNameService *nameService; - curve::mds::topology::MockTopologyService *topoService; - curve::mds::schedule::MockScheduleService *scheduleService; + brpc::Server* server; + curve::mds::MockNameService* nameService; + curve::mds::topology::MockTopologyService* topoService; + curve::mds::schedule::MockScheduleService* scheduleService; MDSClient mdsClient; const uint64_t kChunkSize = 16777216; const uint64_t DefaultSegmentSize = 1024 * 1024 * 1024; @@ -220,9 +223,9 @@ TEST(MDSClientInitTest, Init) { ASSERT_EQ(-1, mdsClient.Init("")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1:65536")); - // dummy server非法 + // dummy server is illegal ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "")); - // dummy server与mds不匹配 + // dummy server and mds do not match ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "9091,9092,9093")); } @@ -232,44 +235,44 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { std::string filename = "/test"; curve::mds::FileInfo outFileInfo; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { + .WillRepeatedly(Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 返回码不为OK + // The return code is not O curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 正常情况 - curve::mds::FileInfo *info = new curve::mds::FileInfo; + // Normal situation + curve::mds::FileInfo* info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetFileInfo(filename, &outFileInfo)); ASSERT_EQ(info->DebugString(), outFileInfo.DebugString()); } @@ -277,33 +280,33 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { TEST_F(ToolMDSClientTest, GetAllocatedSize) { uint64_t allocSize; std::string filename = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, Closure *done) { + [](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 返回码不为OK + // The return code is not OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 正常情况 + // Normal situation response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { response.mutable_allocsizemap()->insert( @@ -313,10 +316,10 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); AllocMap allocMap; ASSERT_EQ(0, mdsClient.GetAllocatedSize(filename, &allocSize, &allocMap)); ASSERT_EQ(DefaultSegmentSize * 3, allocSize); @@ -330,32 +333,32 @@ TEST_F(ToolMDSClientTest, ListDir) { std::string fileName = "/test"; std::vector fileInfoVec; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 返回码不为OK + // The return code is not OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto fileInfo = response.add_fileinfo(); @@ -364,10 +367,10 @@ TEST_F(ToolMDSClientTest, ListDir) { EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListDir(fileName, &fileInfoVec)); for (int i = 0; i < 5; i++) { FileInfo expected; @@ -381,70 +384,70 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { curve::mds::PageFileSegment outSegment; uint64_t offset = 0; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // segment不存在 + // segment does not exist curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 文件不存在 + // File does not exist response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kFileNotExists, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 其他错误 + // Other errors response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 正常情况 - PageFileSegment *segment = new PageFileSegment(); + // Normal situation + PageFileSegment* segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(segment); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOK, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); ASSERT_EQ(segment->DebugString(), outSegment.DebugString()); @@ -453,41 +456,41 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { TEST_F(ToolMDSClientTest, DeleteFile) { std::string fileName = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 返回码不为OK + // The return code is not OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.DeleteFile(fileName)); } @@ -505,43 +508,41 @@ TEST_F(ToolMDSClientTest, CreateFile) { context.stripeCount = stripeCount; context.poolset = ""; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 返回码不为OK + // The return code is not OK curve::mds::CreateFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.CreateFile(context)); } @@ -553,10 +554,10 @@ TEST_F(ToolMDSClientTest, ExtendVolume_success) { EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ExtendVolume(fileName, length)); } @@ -564,32 +565,32 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); return; - // 返回码不为OK + // The return code is not OK curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); } @@ -598,35 +599,35 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { CopySetIdType copysetId = 100; std::vector csLocs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, Closure *done) { + [](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 返回码不为OK + // The return code is not OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); CopySetServerInfo csInfo; GetCopysetInfoForTest(&csInfo, 3, copysetId); @@ -635,10 +636,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); ASSERT_EQ(csInfo.cslocs_size(), csLocs.size()); @@ -646,7 +647,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); } - // 测试获取多个copyset + // Test obtaining multiple copysets std::vector expected; response.Clear(); response.set_statuscode(kTopoErrCodeSuccess); @@ -662,10 +663,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets(logicalPoolId, copysets, &csServerInfos)); ASSERT_EQ(expected.size(), csServerInfos.size()); @@ -677,47 +678,45 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 返回码不为OK + // The return code is not OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_physicalpoolinfos(); GetPhysicalPoolInfoForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListPhysicalPoolsInCluster(&pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -731,46 +730,44 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { PoolIdType poolId = 1; std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { + [](RpcController* controller, const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 返回码不为OK + // The return code is not OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_logicalpoolinfos(); GetLogicalPoolForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -783,33 +780,33 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { PoolIdType poolId = 1; std::vector zones; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto zoneInfo = response.add_zones(); @@ -818,10 +815,10 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); ASSERT_EQ(3, zones.size()); for (int i = 0; i < 3; ++i) { @@ -835,35 +832,35 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { ZoneIdType zoneId; std::vector servers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto serverInfo = response.add_serverinfo(); @@ -873,10 +870,10 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListServersInZone(zoneId, &servers)); ASSERT_EQ(3, servers.size()); for (int i = 0; i < 3; ++i) { @@ -890,35 +887,36 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { ServerIdType serverId = 1; std::vector chunkservers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 正常情况,两个chunkserver正常,一个chunkserver retired + // Under normal circumstances, two chunkservers are normal and one + // chunkserver retired response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto csInfo = response.add_chunkserverinfos(); @@ -928,10 +926,10 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); ASSERT_EQ(2, chunkservers.size()); for (int i = 0; i < 2; ++i) { @@ -946,23 +944,23 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { std::string csAddr = "127.0.0.1:8200"; ChunkServerInfo chunkserver; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::GetChunkServerInfoResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -970,17 +968,17 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); - ChunkServerInfo *csInfo = new ChunkServerInfo(); + ChunkServerInfo* csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); response.set_allocated_chunkserverinfo(csInfo); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -988,18 +986,18 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); ChunkServerInfo expected; GetChunkServerInfoForTest(1, &expected); ASSERT_EQ(expected.DebugString(), chunkserver.DebugString()); - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); csAddr = "127.0.0.1"; @@ -1013,36 +1011,36 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { std::string csAddr = "127.0.0.1:8200"; std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, Closure *done) { + [](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 返回码不为OK + // The return code is not OK GetCopySetsInChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 正常情况 + // Normal situatio response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1053,10 +1051,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1066,7 +1064,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(1, copysets[i].logicalpoolid()); ASSERT_EQ(1000 + i, copysets[i].copysetid()); } - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); csAddr = "127.0.0.1"; @@ -1078,34 +1076,34 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 返回码不为OK + // The return code is not O GetCopySetsInClusterResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1116,10 +1114,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInCluster(©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1131,11 +1129,11 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { TEST_F(ToolMDSClientTest, GetCopyset) { auto succCallback = callback; - auto failCallback = [](RpcController *controller, - const GetCopysetRequest *request, - GetCopysetResponse *response, Closure *done) { + auto failCallback = [](RpcController* controller, + const GetCopysetRequest* request, + GetCopysetResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed("fail"); }; @@ -1184,42 +1182,42 @@ TEST_F(ToolMDSClientTest, GetCopyset) { } TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 返回码不为OK + // The return code is not OK RapidLeaderScheduleResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 成功 + // Success response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.RapidLeaderSchedule(1)); } @@ -1234,13 +1232,13 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { // CASE 1: Send rpc failed { - auto failCallback = [](RpcController *controller, - const SetLogicalPoolScanStateRequest *request, - SetLogicalPoolScanStateResponse *response, - Closure *done) { + auto failCallback = [](RpcController* controller, + const SetLogicalPoolScanStateRequest* request, + SetLogicalPoolScanStateResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("fail"); }; EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) @@ -1267,43 +1265,43 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { std::map statusMap; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, Closure *done) { + [](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 1. QueryChunkServerRecoverStatus失败的情况 + // 1. QueryChunkServerRecoverStatus failed situation QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 2. QueryChunkServerRecoverStatus成功的情况 + // 2. Success of QueryChunkServerRecoverStatus response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); } @@ -1329,12 +1327,12 @@ TEST_F(ToolMDSClientTest, GetMetric) { TEST_F(ToolMDSClientTest, GetCurrentMds) { bvar::Status value; value.expose("mds_status"); - // 有leader + // With a leader value.set_value("leader"); std::vector curMds = mdsClient.GetCurrentMds(); ASSERT_EQ(1, curMds.size()); ASSERT_EQ("127.0.0.1:9192", curMds[0]); - // 没有leader + // No leader value.set_value("follower"); ASSERT_TRUE(mdsClient.GetCurrentMds().empty()); } @@ -1343,20 +1341,22 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { bvar::Status value; value.expose("mds_config_mds_listen_addr"); std::map onlineStatus; - // 9180在线,9999不在线 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9192\"}"); + // 9180 online, 9999 offline + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); - // 9180的服务端口不一致 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9188\"}"); + // The service ports of 9180 are inconsistent + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); - // 非json格式 + // Non JSON format value.set_value("127.0.0.1::9191"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; @@ -1366,33 +1366,33 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { TEST_F(ToolMDSClientTest, ListClient) { std::vector clientAddrs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 返回码不为OK + // The return code is not OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto clientInfo = response.add_clientinfos(); @@ -1402,14 +1402,14 @@ TEST_F(ToolMDSClientTest, ListClient) { EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListClient(&clientAddrs)); ASSERT_EQ(response.clientinfos_size(), clientAddrs.size()); for (int i = 0; i < 5; i++) { - const auto &clientInfo = response.clientinfos(i); + const auto& clientInfo = response.clientinfos(i); std::string expected = clientInfo.ip() + ":" + std::to_string(clientInfo.port()); ASSERT_EQ(expected, clientAddrs[i]); @@ -1424,13 +1424,13 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); @@ -1441,10 +1441,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // normal @@ -1456,10 +1456,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); ASSERT_EQ(response.filenames_size(), fileNames.size()); for (int i = 0; i < 5; i++) { @@ -1478,12 +1478,12 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); @@ -1494,10 +1494,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // normal @@ -1505,10 +1505,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.SetCopysetsAvailFlag(copysets, false)); } @@ -1518,12 +1518,12 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); @@ -1534,10 +1534,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // normal @@ -1550,10 +1550,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListUnAvailCopySets(©sets)); } diff --git a/test/tools/metric_client_test.cpp b/test/tools/metric_client_test.cpp index 30f6c78802..7e41b910f5 100644 --- a/test/tools/metric_client_test.cpp +++ b/test/tools/metric_client_test.cpp @@ -20,10 +20,12 @@ * Author: charisu */ -#include +#include "src/tools/metric_client.h" + #include +#include + #include -#include "src/tools/metric_client.h" namespace curve { namespace tool { @@ -43,82 +45,71 @@ class MetricClientTest : public ::testing::Test { delete server; server = nullptr; } - brpc::Server *server; + brpc::Server* server; }; TEST_F(MetricClientTest, GetMetric) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "string_metric"; bvar::Status metric(metricName, "value"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetric(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetric("127.0.0.1:9191", - "not-exist-metric", - &value)); + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetric("127.0.0.1:9191", "not-exist-metric", &value)); } TEST_F(MetricClientTest, GetMetricUint) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "uint_metric"; bvar::Status metric(metricName, 10); uint64_t value; - ASSERT_EQ(MetricRet::kOK, client.GetMetricUint(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetMetricUint(serverAddr, metricName, &value)); ASSERT_EQ(10, value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetricUint(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint("127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetricUint(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ( + MetricRet::kOtherErr, + client.GetMetricUint("127.0.0.1:9191", "not-exist-metric", &value)); + // Parsing failed bvar::Status metric2("string_metric", "value"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint(serverAddr, - "string_metric", - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetricUint(serverAddr, "string_metric", &value)); } TEST_F(MetricClientTest, GetConfValue) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "conf_metric"; bvar::Status conf_metric(metricName, ""); - conf_metric.set_value("{\"conf_name\":\"key\"," - "\"conf_value\":\"value\"}"); + conf_metric.set_value( + "{\"conf_name\":\"key\"," + "\"conf_value\":\"value\"}"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetConfValueFromMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetConfValueFromMetric( - serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - "127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ( + MetricRet::kNotFound, + client.GetConfValueFromMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric("127.0.0.1:9191", + "not-exist-metric", &value)); + // Parsing failed conf_metric.set_value("string"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); } } // namespace tool diff --git a/test/tools/namespace_tool_core_test.cpp b/test/tools/namespace_tool_core_test.cpp index e1b365b28f..7affe3b1a6 100644 --- a/test/tools/namespace_tool_core_test.cpp +++ b/test/tools/namespace_tool_core_test.cpp @@ -20,18 +20,20 @@ * Author: charisu */ +#include "src/tools/namespace_tool_core.h" + #include + #include "src/common/timeutility.h" -#include "src/tools/namespace_tool_core.h" #include "test/tools/mock/mock_mds_client.h" +using curve::tool::CreateFileContext; +using curve::tool::GetSegmentRes; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; -using ::testing::SetArgPointee; +using ::testing::Return; using ::testing::SaveArg; -using curve::tool::GetSegmentRes; -using curve::tool::CreateFileContext; +using ::testing::SetArgPointee; DECLARE_bool(isTest); DECLARE_string(fileName); @@ -39,12 +41,8 @@ DECLARE_uint64(offset); class NameSpaceToolCoreTest : public ::testing::Test { protected: - void SetUp() { - client_ = std::make_shared(); - } - void TearDown() { - client_ = nullptr; - } + void SetUp() { client_ = std::make_shared(); } + void TearDown() { client_ = nullptr; } void GetFileInfoForTest(FileInfo* fileInfo) { fileInfo->set_id(1); @@ -98,14 +96,11 @@ TEST_F(NameSpaceToolCoreTest, GetFileInfo) { EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetFileInfo(fileName, &fileInfo)); ASSERT_EQ(expected.DebugString(), fileInfo.DebugString()); - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileInfo(fileName, &fileInfo)); } @@ -122,17 +117,14 @@ TEST_F(NameSpaceToolCoreTest, ListDir) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.ListDir(fileName, &files)); ASSERT_EQ(expected.size(), files.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), files[i].DebugString()); } - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ListDir(fileName, &files)); } @@ -140,14 +132,12 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 5 * segmentSize; - uint64_t stripeUnit = 32 * 1024 *1024; + uint64_t stripeUnit = 32 * 1024 * 1024; uint64_t stripeCount = 32; std::string pstName = ""; - // 1、正常情况 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(0)); CreateFileContext context; context.type = curve::mds::FileType::INODE_PAGEFILE; @@ -159,10 +149,8 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { ASSERT_EQ(0, namespaceTool.CreateFile(context)); - // 2、创建失败 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CreateFile(context)); } @@ -170,16 +158,12 @@ TEST_F(NameSpaceToolCoreTest, ExtendVolume) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 10 * segmentSize; - // 1、正常情况 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.ExtendVolume(fileName, length)); - // 2、创建失败 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ExtendVolume(fileName, length)); } @@ -188,16 +172,12 @@ TEST_F(NameSpaceToolCoreTest, DeleteFile) { std::string fileName = "/test"; bool forceDelete = false; - // 1、正常情况 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.DeleteFile(fileName, forceDelete)); - // 2、创建失败 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.DeleteFile(fileName, forceDelete)); } @@ -213,23 +193,22 @@ TEST_F(NameSpaceToolCoreTest, GetChunkServerListInCopySet) { expected.emplace_back(csLoc); } - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + copysetId, &csLocs)); ASSERT_EQ(expected.size(), csLocs.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csLocs[i].DebugString()); } - // 2、失败 + // 2. Failure EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet( + logicalPoolId, copysetId, &csLocs)); } TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { @@ -274,18 +253,14 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { // CASE 1: clean recycle bin success EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(7) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(7).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 2: clean recycle bin fail EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(7) .WillOnce(Return(-1)) @@ -293,47 +268,35 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 3: list dir fail - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 4: clean recycle bin with expireTime is "3s" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(6).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3s"))); // CASE 5: clean recycle bin with expireTime is "3m" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(5) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(5).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3m"))); // CASE 6: clean recycle bin with expireTime is "3d" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(3).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3d"))); // CASE 7: clean recycle bin with different dirname auto cleanByDir = [&](const std::string& dirname, int deleteTimes) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(deleteTimes) @@ -352,10 +315,9 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { cleanByDir("/", 7); } - TEST_F(NameSpaceToolCoreTest, GetAllocatedSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t allocSize; EXPECT_CALL(*client_, GetAllocatedSize(_, _, _)) .Times(1) @@ -374,38 +336,33 @@ TEST_F(NameSpaceToolCoreTest, QueryChunkCopyset) { uint64_t chunkId; std::pair copyset; - // 正常情况 + // Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(segment), - Return(GetSegmentRes::kOK))); - ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + .WillOnce(DoAll(SetArgPointee<2>(segment), Return(GetSegmentRes::kOK))); + ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); ASSERT_EQ(2001, chunkId); ASSERT_EQ(1, copyset.first); ASSERT_EQ(1001, copyset.second); - // GetFileInfo失败 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + // GetFileInfo failed + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); - // GetSegmentInfo失败 + // GetSegmentInfo failed EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); } TEST_F(NameSpaceToolCoreTest, GetFileSegments) { @@ -417,33 +374,29 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { PageFileSegment expected; GetSegmentForTest(&expected); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(5) .WillOnce(Return(GetSegmentRes::kSegmentNotAllocated)) - .WillRepeatedly(DoAll(SetArgPointee<2>(expected), - Return(GetSegmentRes::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(expected), Return(GetSegmentRes::kOK))); ASSERT_EQ(0, namespaceTool.GetFileSegments(fileName, &segments)); ASSERT_EQ(4, segments.size()); for (uint64_t i = 0; i < segments.size(); ++i) { ASSERT_EQ(expected.DebugString(), segments[i].DebugString()); } - // 2、GetFileInfo失败的情况 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The situation of GetFileInfo failure + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileSegments(fileName, &segments)); - // 3、获取segment失败 + // 3. Failed to obtain segment EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); @@ -452,11 +405,9 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { TEST_F(NameSpaceToolCoreTest, GetFileSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t size; - EXPECT_CALL(*client_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*client_, GetFileSize(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.GetFileSize("/test", &size)); } @@ -465,8 +416,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 1. throttle type is invalid { - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "hello", 10000, 0, 0)); @@ -476,11 +426,10 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { { curve::mds::ThrottleParams params; EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .WillOnce( - DoAll(SaveArg<1>(¶ms), Return(0))); + .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, -1, -1)); + 10000, -1, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_FALSE(params.has_burst()); ASSERT_FALSE(params.has_burstlength()); @@ -489,8 +438,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 3. burst lower than limit { curve::mds::ThrottleParams params; - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", 10000, 5000, -1)); @@ -504,7 +452,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, -1)); + 10000, 50000, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(1, params.burstlength()); @@ -518,7 +466,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, 10)); + 10000, 50000, 10)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(10, params.burstlength()); diff --git a/test/tools/namespace_tool_test.cpp b/test/tools/namespace_tool_test.cpp index a8202bda39..526263446f 100644 --- a/test/tools/namespace_tool_test.cpp +++ b/test/tools/namespace_tool_test.cpp @@ -21,13 +21,15 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/namespace_tool.h" + +#include + #include "test/tools/mock/mock_namespace_tool_core.h" using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; DECLARE_bool(isTest); @@ -39,9 +41,7 @@ DECLARE_bool(showAllocMap); class NameSpaceToolTest : public ::testing::Test { protected: - NameSpaceToolTest() { - FLAGS_isTest = true; - } + NameSpaceToolTest() { FLAGS_isTest = true; } void SetUp() { core_ = std::make_shared(); } @@ -106,80 +106,68 @@ TEST_F(NameSpaceToolTest, GetFile) { PageFileSegment segment; GetSegmentForTest(&segment); FLAGS_fileName = "/test/"; - // 0、Init失败 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(-1, namespaceTool.RunCommand("abc")); - // 1、正常情况 + // 1. Normal situation FLAGS_showAllocMap = true; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); curve::tool::AllocMap allocMap = {{1, segmentSize}, {2, 9 * segmentSize}}; EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - SetArgPointee<2>(allocMap), - Return(0))); + SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 2、获取fileInfo失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain fileInfo + EXPECT_CALL(*core_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 3、计算大小失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) + // 3. Calculation of size failed + EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 4、get的是目录的话还要计算file size + // 4. If the target is a directory, the file size should also be calculated FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 5、指定了-showAllocSize=false的话不计算分配大小 + // 5. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 6、对目录指定了-showFileSize=false的话不计算文件大小 + // 6. If - showFileSize=false is specified for the directory, the file size + // will not be calculated FLAGS_showFileSize = false; FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); } @@ -190,75 +178,66 @@ TEST_F(NameSpaceToolTest, ListDir) { GetFileInfoForTest(&fileInfo); PageFileSegment segment; GetSegmentForTest(&segment); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation std::vector files; for (uint64_t i = 0; i < 3; ++i) { files.emplace_back(fileInfo); } EXPECT_CALL(*core_, ListDir(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); FLAGS_fileName = "/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); FLAGS_fileName = "/test/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 2、listDir失败 - EXPECT_CALL(*core_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListDir failed + EXPECT_CALL(*core_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 3、计算大小失败,个别的文件计算大小失败会继续计算,但是返回-1 + // 3. Failed to calculate the size. Some files will continue to be + // calculated if the size calculation fails, but will return -1 EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(3) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 4、指定了-showAllocSize=false的话不计算分配大小 + // 4. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 4、list的时候有目录的话计算fileSize + // 4. If there is a directory in the list, calculate fileSize FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); files.emplace_back(fileInfo2); EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 5、指定了-showFileSize=false的话不计算文件大小 + // 5. If - showFileSize=false is specified, the file size will not be + // calculated FLAGS_showFileSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); } @@ -272,81 +251,58 @@ TEST_F(NameSpaceToolTest, SegInfo) { segments.emplace_back(segment); } FLAGS_fileName = "/test"; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, GetFileSegments(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(segments), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("seginfo")); - // 2、GetFileSegment失败 - EXPECT_CALL(*core_, GetFileSegments(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. GetFileSegment failed + EXPECT_CALL(*core_, GetFileSegments(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("seginfo")); } TEST_F(NameSpaceToolTest, CreateFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("create"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("create")); - // 2、创建失败 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("create")); } TEST_F(NameSpaceToolTest, DeleteFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("delete"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("delete")); - // 2、创建失败 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("delete")); } TEST_F(NameSpaceToolTest, CleanRecycle) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("clean-recycle"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("clean-recycle")); - // 2、失败 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failure + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("clean-recycle")); } @@ -361,33 +317,28 @@ TEST_F(NameSpaceToolTest, PrintChunkLocation) { } uint64_t chunkId = 2001; std::pair copyset = {1, 101}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("chunk-location")); - // 2、QueryChunkCopyset失败 + // 2. QueryChunkCopyset failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("chunk-location")); - // 3、GetChunkServerListInCopySet失败 + // 3. GetChunkServerListInCopySet failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) diff --git a/test/tools/raft_log_tool_test.cpp b/test/tools/raft_log_tool_test.cpp index ff70a5ef8b..f026ac064c 100644 --- a/test/tools/raft_log_tool_test.cpp +++ b/test/tools/raft_log_tool_test.cpp @@ -20,16 +20,19 @@ * Author: charisu */ +#include "src/tools/raft_log_tool.h" + #include + #include #include -#include "src/tools/raft_log_tool.h" + #include "test/tools/mock/mock_segment_parser.h" DECLARE_string(fileName); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; namespace curve { @@ -37,12 +40,8 @@ namespace tool { class RaftLogToolTest : public ::testing::Test { protected: - void SetUp() { - parser_ = std::make_shared(); - } - void TearDown() { - parser_ = nullptr; - } + void SetUp() { parser_ = std::make_shared(); } + void TearDown() { parser_ = nullptr; } std::shared_ptr parser_; }; @@ -58,23 +57,19 @@ TEST_F(RaftLogToolTest, PrintHeaders) { raftLogTool.PrintHelp("chunk-meta"); ASSERT_EQ(-1, raftLogTool.RunCommand("chunk-meta")); - // 文件名格式不对 + // The file name format is incorrect FLAGS_fileName = "illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); FLAGS_fileName = "/tmp/illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // parser初始化失败 + // parser initialization faile FLAGS_fileName = "/tmp/log_inprogress_002"; - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 解析失败 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Parsing failed + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(1) .WillOnce(Return(false)); @@ -83,10 +78,8 @@ TEST_F(RaftLogToolTest, PrintHeaders) { .WillOnce(Return(false)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 正常情况 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Normal situation + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(3) .WillOnce(Return(true)) @@ -100,4 +93,3 @@ TEST_F(RaftLogToolTest, PrintHeaders) { } // namespace tool } // namespace curve - diff --git a/test/tools/segment_parser_test.cpp b/test/tools/segment_parser_test.cpp index 3f9e1f465f..12e6614a9f 100644 --- a/test/tools/segment_parser_test.cpp +++ b/test/tools/segment_parser_test.cpp @@ -21,8 +21,10 @@ */ #include + #include #include + #include "src/tools/raft_log_tool.h" #include "test/fs/mock_local_filesystem.h" @@ -31,8 +33,8 @@ namespace tool { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -41,23 +43,19 @@ const uint32_t DATA_LEN = 20; class SetmentParserTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } void PackHeader(const EntryHeader& header, char* buf, bool checkFail = false) { memset(buf, 0, ENTRY_HEADER_SIZE); - const uint32_t meta_field = (header.type << 24) | - (header.checksum_type << 16); + const uint32_t meta_field = + (header.type << 24) | (header.checksum_type << 16); butil::RawPacker packer(buf); packer.pack64(header.term) - .pack32(meta_field) - .pack32((uint32_t)header.data_len) - .pack32(header.data_checksum); + .pack32(meta_field) + .pack32((uint32_t)header.data_len) + .pack32(header.data_checksum); uint32_t checkSum = braft::murmurhash32(buf, ENTRY_HEADER_SIZE - 4); if (checkFail) { packer.pack32(checkSum + 1); @@ -71,29 +69,23 @@ class SetmentParserTest : public ::testing::Test { TEST_F(SetmentParserTest, Init) { SegmentParser parser(localFs_); - // 1、打开文件失败 + // 1. Failed to open file EXPECT_CALL(*localFs_, Open(_, _)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 2、获取文件大小失败 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain file size + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 3、成功 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 3. Success + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, parser.Init(fileName)); - // 4、反初始化 - EXPECT_CALL(*localFs_, Close(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. De-initialization + EXPECT_CALL(*localFs_, Close(_)).Times(1).WillOnce(Return(0)); parser.UnInit(); } @@ -102,13 +94,10 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { struct stat stBuf; stBuf.st_size = 88; - EXPECT_CALL(*localFs_, Open(_, _)) - .Times(1) - .WillOnce(Return(1)); + EXPECT_CALL(*localFs_, Open(_, _)).Times(1).WillOnce(Return(1)); EXPECT_CALL(*localFs_, Fstat(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(stBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(stBuf), Return(0))); ASSERT_EQ(0, parser.Init(fileName)); EntryHeader header; @@ -120,30 +109,30 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { header.data_checksum = 73235795; char header_buf[ENTRY_HEADER_SIZE] = {0}; - // 读出来的数据大小不对 + // The size of the data read out is incorrect EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) .WillOnce(Return(22)); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 校验失败 + // Verification failed PackHeader(header, header_buf, true); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) - .WillOnce(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillOnce(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 正常情况 + // Normal situation PackHeader(header, header_buf); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(2) - .WillRepeatedly(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillRepeatedly(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); ASSERT_EQ(header, header2); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); @@ -155,4 +144,3 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { } // namespace tool } // namespace curve - diff --git a/test/tools/snapshot_clone_client_test.cpp b/test/tools/snapshot_clone_client_test.cpp index 024a270a69..9a87583dd8 100644 --- a/test/tools/snapshot_clone_client_test.cpp +++ b/test/tools/snapshot_clone_client_test.cpp @@ -20,28 +20,27 @@ * Author: charisu */ +#include "src/tools/snapshot_clone_client.h" + #include + #include -#include "src/tools/snapshot_clone_client.h" + #include "test/tools/mock/mock_metric_client.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace tool { class SnapshotCloneClientTest : public ::testing::Test { protected: - void SetUp() { - metricClient_ = std::make_shared(); - } + void SetUp() { metricClient_ = std::make_shared(); } - void TearDown() { - metricClient_ = nullptr; - } + void TearDown() { metricClient_ = nullptr; } std::shared_ptr metricClient_; }; @@ -50,60 +49,57 @@ TEST_F(SnapshotCloneClientTest, Init) { // no snapshot clone server ASSERT_EQ(1, client.Init("", "")); ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "")); - // dummy server与mds不匹配 + // Dummy server and mds do not match ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "8081,8082,8083")); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091,9092,9093")); - std::map expected = - {{"127.0.0.1:5555", "127.0.0.1:9091"}, - {"127.0.0.1:5556", "127.0.0.1:9092"}, - {"127.0.0.1:5557", "127.0.0.1:9093"}}; + "9091,9092,9093")); + std::map expected = { + {"127.0.0.1:5555", "127.0.0.1:9091"}, + {"127.0.0.1:5556", "127.0.0.1:9092"}, + {"127.0.0.1:5557", "127.0.0.1:9093"}}; ASSERT_EQ(expected, client.GetDummyServerMap()); } TEST_F(SnapshotCloneClientTest, GetActiveAddr) { - // 正常情况 + // Normal situation SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); std::vector activeAddr = client.GetActiveAddrs(); ASSERT_EQ(1, activeAddr.size()); ASSERT_EQ("127.0.0.1:5555", activeAddr[0]); - // 有一个dummyserver显示active,服务端口访问失败 + // There is a dummyserver displaying active, and the service port access + // failed EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_TRUE(activeAddr.empty()); - // 有一个获取metric失败,其他返回standby + // One failed to obtain metric, while the others returned standby EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); ASSERT_TRUE(client.GetActiveAddrs().empty()); - // 有两个active状态的 + // Having two active states EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_EQ(2, activeAddr.size()); ASSERT_EQ("127.0.0.1:5556", activeAddr[0]); @@ -112,15 +108,16 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { TEST_F(SnapshotCloneClientTest, GetOnlineStatus) { SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); - // 有一个在线,有一个获取metric失败,有一个listen addr不匹配 + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); + // One online, one failed to obtain metric, and one did not match the listen + // addr EXPECT_CALL(*metricClient_, GetConfValueFromMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5555"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5557"), - Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5555"), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5557"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); std::map onlineStatus; client.GetOnlineStatus(&onlineStatus); diff --git a/test/tools/status_tool_test.cpp b/test/tools/status_tool_test.cpp index 8b33183220..8dba1a8f94 100644 --- a/test/tools/status_tool_test.cpp +++ b/test/tools/status_tool_test.cpp @@ -19,25 +19,28 @@ * File Created: 2019-11-26 * Author: charisu */ +#include "src/tools/status_tool.h" + #include + #include -#include "src/tools/status_tool.h" -#include "test/tools/mock/mock_namespace_tool_core.h" -#include "test/tools/mock/mock_copyset_check_core.h" + #include "test/tools//mock/mock_mds_client.h" +#include "test/tools/mock/mock_copyset_check_core.h" #include "test/tools/mock/mock_etcd_client.h" -#include "test/tools/mock/mock_version_tool.h" #include "test/tools/mock/mock_metric_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +#include "test/tools/mock/mock_version_tool.h" +using curve::mds::topology::AllocateStatus; +using curve::mds::topology::LogicalPoolType; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::LogicalPoolType; -using curve::mds::topology::AllocateStatus; DECLARE_bool(offline); DECLARE_bool(unhealthy); @@ -76,7 +79,7 @@ class StatusToolTest : public ::testing::Test { pool->set_desc("physical pool for test"); } - void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo *lpInfo, + void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo* lpInfo, bool getSpace = true) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); @@ -89,9 +92,9 @@ class StatusToolTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -113,7 +116,7 @@ class StatusToolTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *server, + void GetServerInfoForTest(curve::mds::topology::ServerInfo* server, uint64_t id) { server->set_serverid(id); server->set_hostname("localhost"); @@ -137,8 +140,7 @@ class StatusToolTest : public ::testing::Test { }; TEST_F(StatusToolTest, InitAndSupportCommand) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); ASSERT_TRUE(statusTool.SupportCommand("status")); ASSERT_TRUE(statusTool.SupportCommand("space")); @@ -153,10 +155,9 @@ TEST_F(StatusToolTest, InitAndSupportCommand) { } TEST_F(StatusToolTest, InitFail) { - StatusTool statusTool1(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - // 1、status命令需要所有的init + StatusTool statusTool1(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + // 1. The status command requires all inits EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(3) .WillOnce(Return(-1)) @@ -169,50 +170,38 @@ TEST_F(StatusToolTest, InitFail) { .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); - // 2、etcd-status命令只需要初始化etcdClinet - StatusTool statusTool2(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The etcd-status command only needs to initialize etcdClinet + StatusTool statusTool2(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool2.RunCommand("etcd-status")); - // 3、space和其他命令不需要初始化etcdClient - StatusTool statusTool3(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); + // 3. Space and other commands do not require initialization of etcdClient + StatusTool statusTool3(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(2) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool3.RunCommand("space")); ASSERT_EQ(-1, statusTool3.RunCommand("chunkserver-list")); - // 4、snapshot-clone-status只需要snapshot clone - StatusTool statusTool4(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 4. snapshot-clone-status only requires snapshot clone + StatusTool statusTool4(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool4.RunCommand("snapshot-clone-status")); } TEST_F(StatusToolTest, SpaceCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("space"); statusTool.PrintHelp("123"); @@ -221,92 +210,70 @@ TEST_F(StatusToolTest, SpaceCmd) { std::vector lgPools; lgPools.emplace_back(lgPool); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("123")); - // 2、ListLogicalPoolsInPhysicalPool失败的情况 + // 2. The situation of ListLogicalPoolsInPhysicalPool failure EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 3、获取filesize失败 + // 3. Failed to obtain filesize EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetFileSize(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 4、获取metric失败的情况 + // 4. Failure to obtain metric EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 5、获取RecyleBin大小失败的情况 + // 5. Failure in obtaining the size of RecycleBin EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -314,33 +281,28 @@ TEST_F(StatusToolTest, SpaceCmd) { } TEST_F(StatusToolTest, ChunkServerCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("chunkserver-list"); std::vector chunkservers; - // 加入5个chunkserver,2个offline + // Add 5 chunkservers and 2 offline ChunkServerInfo csInfo; for (uint64_t i = 1; i <= 5; ++i) { GetCsInfoForTest(&csInfo, i, i <= 2); chunkservers.emplace_back(csInfo); } - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - - // 正常情况,有一个chunkserver的UnhealthyRatio大于0 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + + // Under normal circumstances, there is a chunkserver with an UnhealthyRatio + // greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -349,23 +311,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { .WillRepeatedly(Return(statistics1)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示offline的 + // Only display offline FLAGS_offline = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示unhealthy ratio大于0的 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // Show only those with unhealthy ratio greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -376,21 +336,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { FLAGS_unhealthy = true; ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // list chunkserver失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // List chunkserver failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-list")); - // FLAGS_checkCSAlive为true的时候,会发送rpc检查chunkserver在线状态 + // when FLAGS_checkCSAlive is true, an rpc will be sent to check the online + // status of the chunkserver FLAGS_checkHealth = false; FLAGS_checkCSAlive = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(5) .WillOnce(Return(false)) @@ -399,8 +359,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { } TEST_F(StatusToolTest, StatusCmdCommon) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("status"); statusTool.PrintHelp("chunkserver-status"); @@ -422,10 +381,9 @@ TEST_F(StatusToolTest, StatusCmdCommon) { {"0.0.2", {"127.0.0.1:8002"}}, {"0.0.3", {"127.0.0.1:8003"}}}; ClientVersionMapType clientVersionMap = {{"nebd-server", versionMap}, - {"python", versionMap}, - {"qemu", versionMap}}; - std::vector offlineList = {"127.0.0.1:8004", - "127.0.0.1:8005"}; + {"python", versionMap}, + {"qemu", versionMap}}; + std::vector offlineList = {"127.0.0.1:8004", "127.0.0.1:8005"}; std::vector leaderAddr = {"127.0.0.1:2379"}; std::map onlineState = {{"127.0.0.1:2379", true}, {"127.0.0.1:2381", true}, @@ -440,22 +398,14 @@ TEST_F(StatusToolTest, StatusCmdCommon) { } chunkservers.emplace(1, chunkserverList); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); - // 正常情况 - // 1、设置cluster的输出 + // Normal situation + // 1. Set the output of the cluster EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); @@ -464,41 +414,31 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(Return(statistics1)); EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInPhysicalPool(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); - // 设置client status的输出 + // Set the output of client status EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), Return(0))); - // 2、设置MDS status的输出 + // 2. Set the output of MDS status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(2) .WillRepeatedly(Return(mdsAddr)); @@ -506,25 +446,21 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); - // 3、设置etcd status的输出 + // 3. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), Return(0))); EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); - // 设置snapshot clone的输出 + // Set the output of snapshot clone std::vector activeAddr = {"127.0.0.1:5555"}; EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(2) .WillRepeatedly(Return(activeAddr)); @@ -532,39 +468,36 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineState)); - // 4、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 4. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*metricClient_, GetMetricUint(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(1000), - Return(MetricRet::kOK))); + .WillRepeatedly(DoAll(SetArgPointee<2>(1000), Return(MetricRet::kOK))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("status")); - // 5、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 5. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-status")); - // 6、设置mds status的输出 + // 6. Set the output of mds statu EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(mdsAddr)); @@ -572,37 +505,26 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("mds-status")); - // 7、设置etcd status的输出 + // 7. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("etcd-status")); } TEST_F(StatusToolTest, StatusCmdError) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); // 1、cluster unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) @@ -611,24 +533,22 @@ TEST_F(StatusToolTest, StatusCmdError) { EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) .Times(1) .WillOnce(Return(statistics2)); - // 列出物理池失败 + // Failed to list physical pools EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 列出逻辑池失败 + // Failed to list logical pools EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 获取client version失败 - EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(Return(-1)); + // Failed to obtain client version + EXPECT_CALL(*versionTool_, GetClientVersion(_)).WillOnce(Return(-1)); - // 2、当前无mds可用 + // 2. Currently, no mds are available std::vector failedList = {"127.0.0.1:6666", "127.0.0.1:6667"}; EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map mdsOnlineStatus = {{"127.0.0.1:6666", false}, {"127.0.0.1:6667", false}}; EXPECT_CALL(*mdsClient_, GetCurrentMds()) @@ -638,7 +558,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); - // 3、GetEtcdClusterStatus失败 + // 3. GetEtcdClusterStatus failed EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -646,10 +566,9 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(Return(-1)); - // 当前无snapshot clone server可用 + // Currently, no snapshot clone server is available EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map onlineStatus = {{"127.0.0.1:5555", false}, {"127.0.0.1:5556", false}}; EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) @@ -659,42 +578,42 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineStatus)); - // 4、获取chunkserver version失败并ListChunkServersInCluster失败 + // 4. Failed to obtain chunkserver version and ListChunkServersInCluster EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) .WillOnce(Return(-1)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("status")); - // 获取mds在线状态失败 + // Failed to obtain mds online status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); - // 获取mdsversion失败 + // Failed to obtain mdsversion EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("mds-status")); - // 个别chunkserver获取version失败 + // Individual chunkservers failed to obtain version EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - SetArgPointee<1>(failedList), - Return(0))); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), SetArgPointee<1>(failedList), + Return(0))); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-status")); } TEST_F(StatusToolTest, IsClusterHeatlhy) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); std::map onlineStatus = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", true}, @@ -702,55 +621,54 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { std::map onlineStatus2 = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", false}, {"127.0.0.1:8003", true}}; - // 1、copysets不健康 + // 1. Copysets are unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(-1)); - // 2、没有mds可用 + // 2. No mds available EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); - // 3、有mds不在线 + // 3. There are MDSs that are not online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); - // 4、获取etcd集群状态失败 + // 4. Failed to obtain the ETCD cluster status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(Return(-1)); - // 5、没有snapshot-clone-server可用 + // 5. No snapshot-clone-server available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector())); - // 6、有snapshot-clone-server不在线 + // 6. There is snapshot-clone-server that is not online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); ASSERT_FALSE(statusTool.IsClusterHeatlhy()); - // 1、copyset健康 + // 1. Copyset Health EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); - // 2、超过一个mds在服务 + // 2. More than one mds is in service EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector(2))); - // 3、mds都在线 + // 3. MDS is all online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); - // 4、etcd没有leader且有etcd不在线 + // 4. ETCD does not have a leader and there are ETCDs that are not online EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - SetArgPointee<1>(onlineStatus2), - Return(0))); - // 5、有多个snapshot-clone-server可用 + SetArgPointee<1>(onlineStatus2), Return(0))); + // 5. Multiple snapshot-clone-server are available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector(2))); - // 9、snapshot-clone-server都在线 + // 9. snapshot-clone-server is all online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); @@ -758,43 +676,30 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { } TEST_F(StatusToolTest, ListClientCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector clientAddrs; for (int i = 0; i < 10; ++i) { clientAddrs.emplace_back("127.0.0.1:900" + std::to_string(i)); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("client-list")); - // 失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // Failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("client-list")); } TEST_F(StatusToolTest, ServerList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector servers; for (int i = 0; i < 3; ++i) { @@ -802,13 +707,12 @@ TEST_F(StatusToolTest, ServerList) { GetServerInfoForTest(&server, i); servers.emplace_back(server); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("server-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -816,15 +720,10 @@ TEST_F(StatusToolTest, ServerList) { } TEST_F(StatusToolTest, LogicalPoolList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector lgPools; for (int i = 1; i <= 3; ++i) { @@ -832,30 +731,25 @@ TEST_F(StatusToolTest, LogicalPoolList) { GetLogicalPoolForTest(i, &lgPool); lgPools.emplace_back(lgPool); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); AllocMap allocMap = {{1, DefaultSegmentSize}, {2, DefaultSegmentSize * 20}}; EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(allocMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("logical-pool-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); } } // namespace tool } // namespace curve - diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..db40892f40 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -21,21 +21,23 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/version_tool.h" + +#include + #include "test/tools/mock/mock_mds_client.h" #include "test/tools/mock/mock_metric_client.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::DiskState; +using curve::mds::topology::OnlineState; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; namespace curve { namespace tool { @@ -53,8 +55,8 @@ class VersionToolTest : public ::testing::Test { metricClient_ = nullptr; } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -73,64 +75,61 @@ class VersionToolTest : public ::testing::Test { TEST_F(VersionToolTest, GetAndCheckMdsVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,123 +150,112 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 1. Normal situation + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 2. ListChunkServersInCluster failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 3. Failed to obtain metric + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 4. chunkserverList is empty + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - Return(0))); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillOnce( + DoAll(SetArgPointee<0>(std::vector()), Return(0))); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 5. version inconsistency + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 6. Old version + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } TEST_F(VersionToolTest, GetClientVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::vector clientAddrs = - {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", - "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; + std::vector clientAddrs = {"127.0.0.1:8000", "127.0.0.1:8001", + "127.0.0.1:8002", "127.0.0.1:8003", + "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, kProcessCmdLineMetricName, _)) .Times(6) .WillOnce(Return(MetricRet::kOtherErr)) - .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessPython), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessOther), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessPython), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessOther), Return(MetricRet::kOK))) .WillRepeatedly(DoAll(SetArgPointee<2>(kProcessNebdServer), - Return(MetricRet::kOK))); + Return(MetricRet::kOK))); EXPECT_CALL(*metricClient_, GetMetric(_, kCurveVersionMetricName, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) .WillOnce(Return(MetricRet::kNotFound)) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))); ClientVersionMapType clientVersionMap; ClientVersionMapType expected; VersionMapType versionMap = {{"0.0.5.2", {"127.0.0.1:8004"}}, @@ -282,85 +270,80 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListClient failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, versionTool.GetClientVersion(&clientVersionMap)); } TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } diff --git a/test/util/config_generator.h b/test/util/config_generator.h index f0508e58ca..7ee14f23d0 100644 --- a/test/util/config_generator.h +++ b/test/util/config_generator.h @@ -32,14 +32,15 @@ namespace curve { using curve::common::Configuration; -// 各模块继承该接口,实现自己的初始化配置函数 +// Each module inherits this interface and implements its own initialization +// configuration function class ConfigGenerator { public: ConfigGenerator() = default; virtual ~ConfigGenerator() = default; - virtual bool LoadTemplete(const std::string &defaultConfigPath) { + virtual bool LoadTemplete(const std::string& defaultConfigPath) { config_.SetConfigPath(defaultConfigPath); if (!config_.LoadConfig()) { return false; @@ -47,23 +48,22 @@ class ConfigGenerator { return true; } - virtual void SetConfigPath(const std::string &configPath) { + virtual void SetConfigPath(const std::string& configPath) { configPath_ = configPath; } - // 设置配置项 + // Set Configuration Items virtual void SetKV(const std::string& key, const std::string& value) { config_.SetValue(key, value); } /** - * @brief 批量设置配置项 + * @brief Batch Set Configuration Items * - * @param options 配置项表,形如 "Ip=127.0.0.1" + * @param options configuration item table, in the form of "Ip=127.0.0.1" */ - virtual void SetConfigOptions( - const std::vector &options) { - for (const std::string &op : options) { + virtual void SetConfigOptions(const std::vector& options) { + for (const std::string& op : options) { int delimiterPos = op.find("="); std::string key = op.substr(0, delimiterPos); std::string value = op.substr(delimiterPos + 1); @@ -71,7 +71,7 @@ class ConfigGenerator { } } - // 用于生成配置文件 + // Used to generate configuration files virtual bool Generate() { if (configPath_ != "") { config_.SetConfigPath(configPath_); @@ -80,27 +80,25 @@ class ConfigGenerator { return false; } - virtual bool Generate(const std::string &newConfigPath) { + virtual bool Generate(const std::string& newConfigPath) { configPath_ = newConfigPath; return Generate(); } - // 删除配置文件 - virtual int Remove() { - return ::remove(configPath_.c_str()); - } + // Delete Profile + virtual int Remove() { return ::remove(configPath_.c_str()); } protected: - // 配置文件路径 + // Configuration file path std::string configPath_; - // 配置器 + // Configurator Configuration config_; }; #define DEFAULT_MDS_CONF "conf/mds.conf" struct MDSConfigGenerator : public ConfigGenerator { - explicit MDSConfigGenerator(const std::string &configPath) { + explicit MDSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_MDS_CONF); SetConfigPath(configPath); } @@ -109,7 +107,7 @@ struct MDSConfigGenerator : public ConfigGenerator { #define DEFAULT_CHUNKSERVER_CONF "conf/chunkserver.conf.example" struct CSConfigGenerator : public ConfigGenerator { - explicit CSConfigGenerator(const std::string &configPath) { + explicit CSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CHUNKSERVER_CONF); SetConfigPath(configPath); } @@ -118,7 +116,7 @@ struct CSConfigGenerator : public ConfigGenerator { #define DEFAULT_CLIENT_CONF "conf/client.conf" struct ClientConfigGenerator : public ConfigGenerator { - explicit ClientConfigGenerator(const std::string &configPath) { + explicit ClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CLIENT_CONF); SetConfigPath(configPath); } @@ -127,7 +125,7 @@ struct ClientConfigGenerator : public ConfigGenerator { #define DEFAULT_CS_CLIENT_CONF "conf/cs_client.conf" struct CSClientConfigGenerator : public ConfigGenerator { - explicit CSClientConfigGenerator(const std::string &configPath) { + explicit CSClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CS_CLIENT_CONF); SetConfigPath(configPath); } @@ -136,7 +134,7 @@ struct CSClientConfigGenerator : public ConfigGenerator { #define DEFAULT_SNAP_CLIENT_CONF "conf/snap_client.conf" struct SnapClientConfigGenerator : public ConfigGenerator { - explicit SnapClientConfigGenerator(const std::string &configPath) { + explicit SnapClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SNAP_CLIENT_CONF); SetConfigPath(configPath); } @@ -145,7 +143,7 @@ struct SnapClientConfigGenerator : public ConfigGenerator { #define DEFAULT_S3_CONF "conf/s3.conf" struct S3ConfigGenerator : public ConfigGenerator { - explicit S3ConfigGenerator(const std::string &configPath) { + explicit S3ConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_S3_CONF); SetConfigPath(configPath); SetKV("s3.endpoint", "127.0.0.1:9999"); @@ -155,7 +153,7 @@ struct S3ConfigGenerator : public ConfigGenerator { #define DEFAULT_SCS_CONF "conf/snapshot_clone_server.conf" struct SCSConfigGenerator : public ConfigGenerator { - explicit SCSConfigGenerator(const std::string &configPath) { + explicit SCSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SCS_CONF); SetConfigPath(configPath); } diff --git a/thirdparties/etcdclient/etcdclient.go b/thirdparties/etcdclient/etcdclient.go index 355e99b162..52e6fdebd4 100644 --- a/thirdparties/etcdclient/etcdclient.go +++ b/thirdparties/etcdclient/etcdclient.go @@ -21,7 +21,7 @@ package main enum EtcdErrCode { - // grpc errCode, 具体的含义见: + // grpc errCode, for specific meanings, refer to: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -42,7 +42,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom Error Codes EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -87,6 +87,11 @@ import "C" import ( "context" "errors" + "log" + "strings" + "sync" + "time" + "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/clientv3/concurrency" "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" @@ -94,10 +99,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "log" - "strings" - "sync" - "time" ) const ( @@ -203,7 +204,7 @@ func GetErrCode(op string, err error) C.enum_EtcdErrCode { return C.EtcdUnknown } -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required // //export NewEtcdClientV3 func NewEtcdClientV3(conf C.struct_EtcdConf) C.enum_EtcdErrCode { @@ -282,9 +283,8 @@ func EtcdClientGet(timeout C.int, key *C.char, resp.Header.Revision } -// TODO(lixiaocui): list可能需要有长度限制 -// -//export EtcdClientList +// TODO(lixiaocui): list may require a length limit +// export EtcdClientList func EtcdClientList(timeout C.int, startKey, endKey *C.char, startLen, endLen C.int) (C.enum_EtcdErrCode, uint64, int64) { goStartKey := C.GoStringN(startKey, startLen) @@ -437,7 +437,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, goPfx := C.GoStringN(pfx, pfxLen) goLeaderName := C.GoStringN(leaderName, nameLen) - // 创建带ttl的session + // Create a session with ttl var sessionOpts concurrency.SessionOption = concurrency.WithTTL(int(sessionInterSec)) session, err := concurrency.NewSession(globalClient, sessionOpts) if err != nil { @@ -445,7 +445,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, return C.EtcdCampaignInternalErr, 0 } - // 创建election和超时context + // Create an election and timeout context var election *concurrency.Election = concurrency.NewElection(session, goPfx) var ctx context.Context var cancel context.CancelFunc @@ -460,7 +460,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, wg.Add(2) defer wg.Wait() - // 监测当前的leader + // Monitor the current leader obCtx, obCancel := context.WithCancel(context.Background()) observer := election.Observe(obCtx) defer obCancel() @@ -484,7 +484,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 监测自己key的存活状态 + // Monitor the survival status of one's own key exitSignal := make(chan struct{}, 1) go func() { defer wg.Done() @@ -502,8 +502,8 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 1. Campaign返回nil说明当前mds持有的key版本号最小 - // 2. Campaign返回时不检测自己持有key的状态,所以返回nil后需要监测session.Done() + // 1. Campaign returns nil indicating that the current MDS holds the smallest key version number + // 2. When Campaign returns, it does not detect the status of the key it holds, so after returning nil, it is necessary to monitor session. Done() if err := election.Campaign(ctx, goLeaderName); err == nil { log.Printf("[%s/%x] campaign for leader success", goLeaderName, session.Lease()) diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index 2227257bf3..a104e4e21f 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -29,27 +29,25 @@ using ::curve::common::kDefaultPoolsetName; DEFINE_string(mds_addr, "127.0.0.1:6666", - "mds ip and port list, separated by \",\""); + "mds ip and port list, separated by \",\""); -DEFINE_string(op, - "", - "operation: create_logicalpool, " - "create_physicalpool, " - "set_chunkserver, " - "set_logicalpool"); +DEFINE_string(op, "", + "operation: create_logicalpool, " + "create_physicalpool, " + "set_chunkserver, " + "set_logicalpool"); DEFINE_string(cluster_map, "/etc/curve/topo.json", "cluster topology map."); DEFINE_int32(chunkserver_id, -1, "chunkserver id for set chunkserver status."); DEFINE_string(chunkserver_status, "readwrite", - "chunkserver status: readwrite, pendding."); + "chunkserver status: readwrite, pendding."); DEFINE_uint32(rpcTimeOutMs, 5000u, "rpc time out"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); DEFINE_uint32(logicalpool_id, -1, "logicalpool id for set logicalpool status."); -DEFINE_string(logicalpool_status, "allow", - "logicalpool status: allow, deny."); +DEFINE_string(logicalpool_status, "allow", "logicalpool status: allow, deny."); const int kRetCodeCommonErr = -1; const int kRetCodeRedirectMds = -2; @@ -73,7 +71,6 @@ const char kAllocStatusDeny[] = "deny"; const char kPoolsets[] = "poolsets"; const char kPoolsetName[] = "poolset"; - using ::curve::common::SplitString; namespace curve { @@ -83,8 +80,10 @@ namespace topology { const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail if (conf->LoadConfig()) { google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) { @@ -122,20 +121,18 @@ int CurvefsTools::TryAnotherMdsAddress() { } mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; - LOG(INFO) << "try mds address(" << mdsAddressIndex_ - << "): " << mdsAddress; + LOG(INFO) << "try mds address(" << mdsAddressIndex_ << "): " << mdsAddress; int ret = channel_.Init(mdsAddress.c_str(), NULL); if (ret != 0) { - LOG(ERROR) << "Fail to init channel to mdsAddress: " - << mdsAddress; + LOG(ERROR) << "Fail to init channel to mdsAddress: " << mdsAddress; } return ret; } int CurvefsTools::DealFailedRet(int ret, std::string operation) { if (kRetCodeRedirectMds == ret) { - LOG(WARNING) << operation << " fail on mds: " - << mdsAddressStr_[mdsAddressIndex_]; + LOG(WARNING) << operation + << " fail on mds: " << mdsAddressStr_[mdsAddressIndex_]; } else { LOG(ERROR) << operation << " fail."; } @@ -166,10 +163,9 @@ int CurvefsTools::HandleCreateLogicalPool() { std::string copysetNumStr = std::to_string(lgPool.copysetNum); std::string zoneNumStr = std::to_string(lgPool.zoneNum); - std::string rapString = "{\"replicaNum\":" + replicaNumStr - + ", \"copysetNum\":" + copysetNumStr - + ", \"zoneNum\":" + zoneNumStr - + "}"; + std::string rapString = "{\"replicaNum\":" + replicaNumStr + + ", \"copysetNum\":" + copysetNumStr + + ", \"zoneNum\":" + zoneNumStr + "}"; request.set_redundanceandplacementpolicy(rapString); request.set_userpolicy("{\"aaa\":1}"); @@ -189,7 +185,7 @@ int CurvefsTools::HandleCreateLogicalPool() { stub.CreateLogicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() == kTopoErrCodeSuccess) { @@ -199,8 +195,7 @@ int CurvefsTools::HandleCreateLogicalPool() { LOG(INFO) << "Logical pool already exist"; } else { LOG(ERROR) << "CreateLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } } @@ -221,7 +216,7 @@ int CurvefsTools::ScanLogicalPool() { return ret; } for (auto it = logicalPoolInfos.begin(); - it != logicalPoolInfos.end();) { + it != logicalPoolInfos.end();) { auto ix = std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), [it](const CurveLogicalPoolData& data) { @@ -236,8 +231,9 @@ int CurvefsTools::ScanLogicalPool() { return 0; } -int CurvefsTools::ListLogicalPool(const std::string& phyPoolName, - std::list *logicalPoolInfos) { +int CurvefsTools::ListLogicalPool( + const std::string& phyPoolName, + std::list* logicalPoolInfos) { TopologyService_Stub stub(&channel_); ListLogicalPoolRequest request; ListLogicalPoolResponse response; @@ -246,15 +242,13 @@ int CurvefsTools::ListLogicalPool(const std::string& phyPoolName, cntl.set_log_id(1); request.set_physicalpoolname(phyPoolName); - LOG(INFO) << "ListLogicalPool send request: " - << request.DebugString(); + LOG(INFO) << "ListLogicalPool send request: " << request.DebugString(); stub.ListLogicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { return kRetCodeRedirectMds; } for (int i = 0; i < response.logicalpoolinfos_size(); i++) { - logicalPoolInfos->push_back( - response.logicalpoolinfos(i)); + logicalPoolInfos->push_back(response.logicalpoolinfos(i)); } return 0; } @@ -311,7 +305,6 @@ int CurvefsTools::HandleBuildCluster() { return ret; } - int CurvefsTools::ReadClusterMap() { std::ifstream fin(FLAGS_cluster_map); if (fin.is_open()) { @@ -325,8 +318,8 @@ int CurvefsTools::ReadClusterMap() { return -1; } } else { - LOG(ERROR) << "open cluster map file : " - << FLAGS_cluster_map << " fail."; + LOG(ERROR) << "open cluster map file : " << FLAGS_cluster_map + << " fail."; return -1; } return 0; @@ -339,7 +332,7 @@ int CurvefsTools::InitPoolsetData() { for (const auto& poolset : clusterMap_[kPoolsets]) { CurvePoolsetData poolsetData; if (!poolset[kName].isString()) { - LOG(ERROR) <<"poolset name must be string" << poolset[kName]; + LOG(ERROR) << "poolset name must be string" << poolset[kName]; return -1; } poolsetData.name = poolset[kName].asString(); @@ -364,7 +357,7 @@ int CurvefsTools::InitServerData() { LOG(ERROR) << "No servers in cluster map"; return -1; } - for (const auto &server : clusterMap_[kServers]) { + for (const auto& server : clusterMap_[kServers]) { CurveServerData serverData; if (!server[kName].isString()) { LOG(ERROR) << "server name must be string"; @@ -423,7 +416,7 @@ int CurvefsTools::InitLogicalPoolData() { LOG(ERROR) << "No servers in cluster map"; return -1; } - for (const auto &lgPool : clusterMap_[kLogicalPools]) { + for (const auto& lgPool : clusterMap_[kLogicalPools]) { CurveLogicalPoolData lgPoolData; if (!lgPool[kName].isString()) { LOG(ERROR) << "logicalpool name must be string"; @@ -496,8 +489,7 @@ int CurvefsTools::ListPoolset(std::list* poolsetInfos) { } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPoolset Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received ListPoolset Rpc response success, " @@ -511,7 +503,7 @@ int CurvefsTools::ListPoolset(std::list* poolsetInfos) { } int CurvefsTools::ListPhysicalPool( - std::list *physicalPoolInfos) { + std::list* physicalPoolInfos) { TopologyService_Stub stub(&channel_); ListPhysicalPoolRequest request; ListPhysicalPoolResponse response; @@ -519,38 +511,30 @@ int CurvefsTools::ListPhysicalPool( cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListPhysicalPool send request: " - << request.DebugString(); + LOG(INFO) << "ListPhysicalPool send request: " << request.DebugString(); - stub.ListPhysicalPool(&cntl, - &request, - &response, - nullptr); + stub.ListPhysicalPool(&cntl, &request, &response, nullptr); if (cntl.Failed()) { return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received ListPhysicalPool Rpc response success, " << response.DebugString(); } - for (int i = 0; - i < response.physicalpoolinfos_size(); - i++) { - physicalPoolInfos->push_back( - response.physicalpoolinfos(i)); + for (int i = 0; i < response.physicalpoolinfos_size(); i++) { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); } return 0; } -int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, - std::list *physicalPoolInfos) { +int CurvefsTools::ListPhysicalPoolsInPoolset( + PoolsetIdType poolsetid, std::list* physicalPoolInfos) { TopologyService_Stub stub(&channel_); ListPhysicalPoolsInPoolsetRequest request; ListPhysicalPoolResponse response; @@ -570,10 +554,8 @@ int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetid = " - << poolsetid; + << "Message is :" << response.DebugString() + << " , poolsetid = " << poolsetid; return response.statuscode(); } else { LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," @@ -587,7 +569,7 @@ int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, } int CurvefsTools::AddListPoolZone(PoolIdType poolid, - std::list *zoneInfos) { + std::list* zoneInfos) { TopologyService_Stub stub(&channel_); ListPoolZoneRequest request; ListPoolZoneResponse response; @@ -597,8 +579,7 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListPoolZone, send request: " - << request.DebugString(); + LOG(INFO) << "ListPoolZone, send request: " << request.DebugString(); stub.ListPoolZone(&cntl, &request, &response, nullptr); @@ -607,10 +588,8 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListPoolZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalpoolid = " - << poolid; + << "Message is :" << response.DebugString() + << " , physicalpoolid = " << poolid; return response.statuscode(); } else { LOG(INFO) << "Received ListPoolZone Rpc response success, " @@ -624,7 +603,7 @@ int CurvefsTools::AddListPoolZone(PoolIdType poolid, } int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, - std::list *serverInfos) { + std::list* serverInfos) { TopologyService_Stub stub(&channel_); ListZoneServerRequest request; ListZoneServerResponse response; @@ -633,8 +612,7 @@ int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "ListZoneServer, send request: " - << request.DebugString(); + LOG(INFO) << "ListZoneServer, send request: " << request.DebugString(); stub.ListZoneServer(&cntl, &request, &response, nullptr); @@ -643,14 +621,12 @@ int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "ListZoneServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneid = " - << zoneid; + << "Message is :" << response.DebugString() + << " , zoneid = " << zoneid; return response.statuscode(); } else { LOG(INFO) << "ListZoneServer Rpc response success, " - << response.DebugString(); + << response.DebugString(); } for (int i = 0; i < response.serverinfo_size(); i++) { @@ -700,11 +676,11 @@ int CurvefsTools::ScanCluster() { // get all phsicalpool and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(physicalPoolToAdd.begin(), - physicalPoolToAdd.end(), - [server](CurvePhysicalPoolData& data) { - return data.physicalPoolName == server.physicalPoolName; - }) != physicalPoolToAdd.end()) { + if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [server](CurvePhysicalPoolData& data) { + return data.physicalPoolName == + server.physicalPoolName; + }) != physicalPoolToAdd.end()) { continue; } CurvePhysicalPoolData poolData; @@ -738,11 +714,11 @@ int CurvefsTools::ScanCluster() { for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) { auto ix = std::find_if( - physicalPoolToAdd.begin(), physicalPoolToAdd.end(), - [it](const CurvePhysicalPoolData& data) { - return (data.poolsetName == it->poolsetname()) && - (data.physicalPoolName == it->physicalpoolname()); - }); + physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [it](const CurvePhysicalPoolData& data) { + return (data.poolsetName == it->poolsetname()) && + (data.physicalPoolName == it->physicalpoolname()); + }); if (ix != physicalPoolToAdd.end()) { physicalPoolToAdd.erase(ix); it++; @@ -755,14 +731,12 @@ int CurvefsTools::ScanCluster() { // get zone and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(zoneToAdd.begin(), - zoneToAdd.end(), - [server](CurveZoneData& data) { - return (data.physicalPoolName == - server.physicalPoolName) && - (data.zoneName == - server.zoneName); - }) != zoneToAdd.end()) { + if (std::find_if(zoneToAdd.begin(), zoneToAdd.end(), + [server](CurveZoneData& data) { + return (data.physicalPoolName == + server.physicalPoolName) && + (data.zoneName == server.zoneName); + }) != zoneToAdd.end()) { continue; } CurveZoneData CurveZoneData; @@ -784,9 +758,8 @@ int CurvefsTools::ScanCluster() { } zoneInfos.clear(); - for (auto it = physicalPoolInfos.begin(); - it != physicalPoolInfos.end(); - it++) { + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end(); + it++) { PoolIdType poolid = it->physicalpoolid(); ret = AddListPoolZone(poolid, &zoneInfos); if (ret < 0) { @@ -794,15 +767,12 @@ int CurvefsTools::ScanCluster() { } } - for (auto it = zoneInfos.begin(); - it != zoneInfos.end();) { + for (auto it = zoneInfos.begin(); it != zoneInfos.end();) { auto ix = std::find_if( zoneToAdd.begin(), zoneToAdd.end(), [it](const CurveZoneData& data) { - return (data.physicalPoolName == - it->physicalpoolname()) && - (data.zoneName == - it->zonename()); + return (data.physicalPoolName == it->physicalpoolname()) && + (data.zoneName == it->zonename()); }); if (ix != zoneToAdd.end()) { zoneToAdd.erase(ix); @@ -816,15 +786,12 @@ int CurvefsTools::ScanCluster() { // get server and compare // De-duplication for (auto server : serverDatas) { - if (std::find_if(serverToAdd.begin(), - serverToAdd.end(), - [server](CurveServerData& data) { - return data.serverName == - server.serverName; - }) != serverToAdd.end()) { + if (std::find_if(serverToAdd.begin(), serverToAdd.end(), + [server](CurveServerData& data) { + return data.serverName == server.serverName; + }) != serverToAdd.end()) { LOG(WARNING) << "WARING! Duplicated Server Name: " - << server.serverName - << " , ignored."; + << server.serverName << " , ignored."; continue; } serverToAdd.push_back(server); @@ -843,9 +810,7 @@ int CurvefsTools::ScanCluster() { } serverInfos.clear(); - for (auto it = zoneInfos.begin(); - it != zoneInfos.end(); - it++) { + for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) { ZoneIdType zoneid = it->zoneid(); ret = AddListZoneServer(zoneid, &serverInfos); if (ret < 0) { @@ -853,17 +818,14 @@ int CurvefsTools::ScanCluster() { } } - for (auto it = serverInfos.begin(); - it != serverInfos.end(); - it++) { - auto ix = - std::find_if( - serverToAdd.begin(), serverToAdd.end(), - [it](const CurveServerData& data) { - return (data.serverName == it->hostname()) && - (data.zoneName == it->zonename()) && - (data.physicalPoolName == it->physicalpoolname()); - }); + for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) { + auto ix = std::find_if( + serverToAdd.begin(), serverToAdd.end(), + [it](const CurveServerData& data) { + return (data.serverName == it->hostname()) && + (data.zoneName == it->zonename()) && + (data.physicalPoolName == it->physicalpoolname()); + }); if (ix != serverToAdd.end()) { serverToAdd.erase(ix); } else { @@ -893,22 +855,19 @@ int CurvefsTools::CreatePoolset() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreatePoolset, send request: " - << request.DebugString(); + LOG(INFO) << "CreatePoolset, send request: " << request.DebugString(); stub.CreatePoolset(&cntl, &request, &response, nullptr); if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "CreatePoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetName =" - << it.name; + << "Message is :" << response.DebugString() + << " , poolsetName =" << it.name; return response.statuscode(); } else { LOG(INFO) << "Received CreatePoolset response success, " @@ -939,15 +898,13 @@ int CurvefsTools::CreatePhysicalPool() { if (cntl.Failed()) { LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); + << cntl.ErrorText(); return kRetCodeRedirectMds; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolName =" - << it.physicalPoolName; + << "Message is :" << response.DebugString() + << " , physicalPoolName =" << it.physicalPoolName; return response.statuscode(); } else { LOG(INFO) << "Received CreatePhysicalPool response success, " @@ -971,8 +928,7 @@ int CurvefsTools::CreateZone() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreateZone, send request: " - << request.DebugString(); + LOG(INFO) << "CreateZone, send request: " << request.DebugString(); stub.CreateZone(&cntl, &request, &response, nullptr); @@ -980,20 +936,15 @@ int CurvefsTools::CreateZone() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "CreateZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneName = " - << it.zoneName; + LOG(ERROR) << "CreateZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneName = " << it.zoneName; return kRetCodeCommonErr; } if (response.statuscode() != 0) { LOG(ERROR) << "CreateZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneName = " - << it.zoneName; + << "Message is :" << response.DebugString() + << " , zoneName = " << it.zoneName; return response.statuscode(); } else { LOG(INFO) << "Received CreateZone Rpc success, " @@ -1023,8 +974,7 @@ int CurvefsTools::CreateServer() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "CreateServer, send request: " - << request.DebugString(); + LOG(INFO) << "CreateServer, send request: " << request.DebugString(); stub.RegistServer(&cntl, &request, &response, nullptr); @@ -1032,12 +982,9 @@ int CurvefsTools::CreateServer() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "RegistServer, errcorde = " - << response.statuscode() - << ", error content : " - << cntl.ErrorText() - << " , serverName = " - << it.serverName; + LOG(ERROR) << "RegistServer, errcorde = " << response.statuscode() + << ", error content : " << cntl.ErrorText() + << " , serverName = " << it.serverName; return kRetCodeCommonErr; } if (response.statuscode() == kTopoErrCodeSuccess) { @@ -1047,10 +994,8 @@ int CurvefsTools::CreateServer() { LOG(INFO) << "Server already exist"; } else { LOG(ERROR) << "RegistServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverName = " - << it.serverName; + << "Message is :" << response.DebugString() + << " , serverName = " << it.serverName; return response.statuscode(); } } @@ -1080,18 +1025,14 @@ int CurvefsTools::ClearPhysicalPool() { } else if (cntl.Failed()) { LOG(ERROR) << "DeletePhysicalPool, errcorde = " << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , physicalPoolId = " - << it; + << ", error content:" << cntl.ErrorText() + << " , physicalPoolId = " << it; return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolId = " - << it; + << "Message is :" << response.DebugString() + << " , physicalPoolId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " @@ -1128,7 +1069,7 @@ int CurvefsTools::ClearPoolset() { return kRetCodeCommonErr; } else if (response.statuscode() != kTopoErrCodeSuccess && response.statuscode() != - kTopoErrCodeCannotDeleteDefaultPoolset) { + kTopoErrCodeCannotDeleteDefaultPoolset) { LOG(ERROR) << "DeletePoolset Rpc response fail. " << "Message is :" << response.DebugString() << " , PoolsetId = " << it; @@ -1153,8 +1094,7 @@ int CurvefsTools::ClearZone() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "DeleteZone, send request: " - << request.DebugString(); + LOG(INFO) << "DeleteZone, send request: " << request.DebugString(); stub.DeleteZone(&cntl, &request, &response, nullptr); @@ -1162,19 +1102,14 @@ int CurvefsTools::ClearZone() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneId = " - << it; + LOG(ERROR) << "DeleteZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneId = " << it; return kRetCodeCommonErr; } else if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeleteZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneId = " - << it; + << "Message is :" << response.DebugString() + << " , zoneId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeleteZone Rpc success, " @@ -1196,8 +1131,7 @@ int CurvefsTools::ClearServer() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "DeleteServer, send request: " - << request.DebugString(); + LOG(INFO) << "DeleteServer, send request: " << request.DebugString(); stub.DeleteServer(&cntl, &request, &response, nullptr); @@ -1205,20 +1139,15 @@ int CurvefsTools::ClearServer() { cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteServer, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , serverId = " - << it; + LOG(ERROR) << "DeleteServer, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , serverId = " << it; return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeleteServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverId = " - << it; + << "Message is :" << response.DebugString() + << " , serverId = " << it; return response.statuscode(); } else { LOG(INFO) << "Received DeleteServer Rpc response success, " @@ -1254,25 +1183,21 @@ int CurvefsTools::SetChunkServer() { stub.SetChunkServer(&cntl, &request, &response, nullptr); - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " << response.statuscode() - << ", error content:" - << cntl.ErrorText(); + << ", error content:" << cntl.ErrorText(); return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " - << "response success, " - << response.DebugString(); + << "response success, " << response.DebugString(); } return 0; } @@ -1327,30 +1252,24 @@ int CurvefsTools::SetLogicalPool() { cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); cntl.set_log_id(1); - LOG(INFO) << "SetLogicalPool, send request: " - << request.DebugString(); + LOG(INFO) << "SetLogicalPool, send request: " << request.DebugString(); stub.SetLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { return kRetCodeRedirectMds; } else if (cntl.Failed()) { - LOG(ERROR) << "SetLogicalPool, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText(); + LOG(ERROR) << "SetLogicalPool, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText(); return kRetCodeCommonErr; } if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "SetLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); + << "Message is :" << response.DebugString(); return response.statuscode(); } else { LOG(INFO) << "Received SetLogicalPool Rpc " - << "response success, " - << response.DebugString(); + << "response success, " << response.DebugString(); } return 0; } @@ -1359,9 +1278,7 @@ int CurvefsTools::SetLogicalPool() { } // namespace mds } // namespace curve - - -int main(int argc, char **argv) { +int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/tools/snaptool/queryclone.py b/tools/snaptool/queryclone.py index a80d746f7a..cde76bc130 100644 --- a/tools/snaptool/queryclone.py +++ b/tools/snaptool/queryclone.py @@ -5,18 +5,21 @@ import common import time -status = ['done', 'cloning', 'recovering', 'cleaning', 'errorCleaning', 'error', 'retrying', 'metaInstalled'] +status = ['done', 'cloning', 'recovering', 'cleaning', + 'errorCleaning', 'error', 'retrying', 'metaInstalled'] filetype = ['file', 'snapshot'] clonestep = ['createCloneFile', 'createCloneMeta', 'createCloneChunk', 'completeCloneMeta', - 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] + 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] tasktype = ["clone", "recover"] islazy = ["notlazy", "lazy"] + def __get_status(args): if args.status: return status.index(args.status) return None + def __get_type(args): if args.clone: return tasktype.index("clone") @@ -24,12 +27,14 @@ def __get_type(args): return tasktype.index("recover") return None + def query_clone_recover(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) if totalCount == 0: print "no record found" return - # 提高打印可读性 + # Improving Print Readability for record in records: code = record['TaskStatus'] record['TaskStatus'] = status[code] @@ -42,15 +47,18 @@ def query_clone_recover(args): code = record['IsLazy'] record['IsLazy'] = islazy[code] time_temp = record['Time'] - record['Time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) + record['Time'] = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) notes = {} heads = ['UUID', 'User', 'TaskType', 'Src', 'File', - 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] + 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] common.printTable(heads, records, notes) + def clone_recover_status(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, None, __get_type(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, None, __get_type(args)) if totalCount == 0: print "no record found" return @@ -64,17 +72,17 @@ def clone_recover_status(args): clone_statistics[status_name].append(record['UUID']) else: clone_statistics[status_name] = [record['UUID']] - else : + else: if recover_statistics.has_key(status_name): recover_statistics[status_name].append(record['UUID']) else: recover_statistics[status_name] = [record['UUID']] if clone_statistics: print "clone status:" - for k,v in clone_statistics.items(): + for k, v in clone_statistics.items(): print("%s : %d" % (k, len(v))) if recover_statistics: print "recover status:" - for k,v in recover_statistics.items(): - print("%s : %d" % (k, len(v))) \ No newline at end of file + for k, v in recover_statistics.items(): + print("%s : %d" % (k, len(v))) From c45d2898a6bfe706cb35b19d10f4cd043ce65459 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 13 Oct 2023 22:56:07 +0800 Subject: [PATCH 4/8] style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. --- tools/curvefsTool.cpp | 2668 +++++++++++++++++++++++------------------ 1 file changed, 1481 insertions(+), 1187 deletions(-) diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index a104e4e21f..3d1a726b47 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -73,1256 +73,1550 @@ const char kPoolsetName[] = "poolset"; using ::curve::common::SplitString; -namespace curve { -namespace mds { -namespace topology { - -const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT - -void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // If the configuration file does not exist, no error will be reported, and - // the command line will prevail. This is to avoid strong dependence on the - // configuration If the configuration file exists and no command line is - // specified, the configuration file shall prevail - if (conf->LoadConfig()) { - google::CommandLineFlagInfo info; - if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) { - conf->GetStringValue("mdsAddr", &FLAGS_mds_addr); - LOG(INFO) << "conf: " << FLAGS_mds_addr; - } - } -} - -int CurvefsTools::Init() { - curve::common::Configuration conf; - conf.SetConfigPath(FLAGS_confPath); - UpdateFlagsFromConf(&conf); - SplitString(FLAGS_mds_addr, ",", &mdsAddressStr_); - if (mdsAddressStr_.empty()) { - LOG(ERROR) << "no available mds address."; - return kRetCodeCommonErr; - } - - butil::EndPoint endpt; - for (const auto& addr : mdsAddressStr_) { - if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { - LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; - return kRetCodeCommonErr; - } - } - mdsAddressIndex_ = -1; - return 0; -} - -int CurvefsTools::TryAnotherMdsAddress() { - if (mdsAddressStr_.size() == 0) { - LOG(ERROR) << "no available mds address."; - return kRetCodeCommonErr; - } - mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); - std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; - LOG(INFO) << "try mds address(" << mdsAddressIndex_ << "): " << mdsAddress; - int ret = channel_.Init(mdsAddress.c_str(), NULL); - if (ret != 0) { - LOG(ERROR) << "Fail to init channel to mdsAddress: " << mdsAddress; - } - return ret; -} - -int CurvefsTools::DealFailedRet(int ret, std::string operation) { - if (kRetCodeRedirectMds == ret) { - LOG(WARNING) << operation - << " fail on mds: " << mdsAddressStr_[mdsAddressIndex_]; - } else { - LOG(ERROR) << operation << " fail."; - } - return ret; -} - -int CurvefsTools::HandleCreateLogicalPool() { - int ret = ReadClusterMap(); - if (ret < 0) { - return DealFailedRet(ret, "read cluster map"); - } - ret = InitLogicalPoolData(); - if (ret < 0) { - return DealFailedRet(ret, "init logical pool data"); - } - ret = ScanLogicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "scan logical pool"); - } - for (const auto& lgPool : lgPoolDatas) { - TopologyService_Stub stub(&channel_); - - CreateLogicalPoolRequest request; - request.set_logicalpoolname(lgPool.name); - request.set_physicalpoolname(lgPool.physicalPoolName); - request.set_type(lgPool.type); - std::string replicaNumStr = std::to_string(lgPool.replicasNum); - std::string copysetNumStr = std::to_string(lgPool.copysetNum); - std::string zoneNumStr = std::to_string(lgPool.zoneNum); - - std::string rapString = "{\"replicaNum\":" + replicaNumStr + - ", \"copysetNum\":" + copysetNumStr + - ", \"zoneNum\":" + zoneNumStr + "}"; - - request.set_redundanceandplacementpolicy(rapString); - request.set_userpolicy("{\"aaa\":1}"); - request.set_scatterwidth(lgPool.scatterwidth); - request.set_status(lgPool.status); - - CreateLogicalPoolResponse response; - - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(-1); - cntl.set_log_id(1); - - LOG(INFO) << "CreateLogicalPool, second request: " - << request.DebugString(); - - stub.CreateLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() == kTopoErrCodeSuccess) { - LOG(INFO) << "Received CreateLogicalPool Rpc response success, " - << response.DebugString(); - } else if (response.statuscode() == kTopoErrCodeLogicalPoolExist) { - LOG(INFO) << "Logical pool already exist"; - } else { - LOG(ERROR) << "CreateLogicalPool Rpc response fail. " - << "Message is :" << response.DebugString(); - return response.statuscode(); - } - } - return 0; -} - -int CurvefsTools::ScanLogicalPool() { - // get all logicalpool and compare - // De-duplication - std::set phyPools; - for (const auto& lgPool : lgPoolDatas) { - phyPools.insert(lgPool.physicalPoolName); - } - for (const auto& phyPool : phyPools) { - std::list logicalPoolInfos; - int ret = ListLogicalPool(phyPool, &logicalPoolInfos); - if (ret < 0) { - return ret; - } - for (auto it = logicalPoolInfos.begin(); - it != logicalPoolInfos.end();) { - auto ix = - std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), - [it](const CurveLogicalPoolData& data) { - return data.name == it->logicalpoolname(); - }); - if (ix != lgPoolDatas.end()) { - lgPoolDatas.erase(ix); - it++; +namespace curve +{ + namespace mds + { + namespace topology + { + + const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT + + void UpdateFlagsFromConf(curve::common::Configuration *conf) + { + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail + if (conf->LoadConfig()) + { + google::CommandLineFlagInfo info; + if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) + { + conf->GetStringValue("mdsAddr", &FLAGS_mds_addr); + LOG(INFO) << "conf: " << FLAGS_mds_addr; + } + } } - } - } - return 0; -} - -int CurvefsTools::ListLogicalPool( - const std::string& phyPoolName, - std::list* logicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListLogicalPoolRequest request; - ListLogicalPoolResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - request.set_physicalpoolname(phyPoolName); - - LOG(INFO) << "ListLogicalPool send request: " << request.DebugString(); - stub.ListLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - for (int i = 0; i < response.logicalpoolinfos_size(); i++) { - logicalPoolInfos->push_back(response.logicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::HandleBuildCluster() { - int ret = ReadClusterMap(); - if (ret < 0) { - return DealFailedRet(ret, "read cluster map"); - } - ret = InitPoolsetData(); - if (ret < 0) { - return DealFailedRet(ret, "init poolset data"); - } - ret = InitServerData(); - if (ret < 0) { - return DealFailedRet(ret, "init server data"); - } - ret = ScanCluster(); - if (ret < 0) { - return DealFailedRet(ret, "scan cluster"); - } - ret = ClearServer(); - if (ret < 0) { - return DealFailedRet(ret, "clear server"); - } - ret = ClearZone(); - if (ret < 0) { - return DealFailedRet(ret, "clear zone"); - } - ret = ClearPhysicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "clear physicalpool"); - } - ret = ClearPoolset(); - if (ret < 0) { - return DealFailedRet(ret, "clear poolset"); - } - ret = CreatePoolset(); - if (ret < 0) { - return DealFailedRet(ret, "create Poolset"); - } - ret = CreatePhysicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "create physicalpool"); - } - ret = CreateZone(); - if (ret < 0) { - return DealFailedRet(ret, "create zone"); - } - ret = CreateServer(); - if (ret < 0) { - return DealFailedRet(ret, "create server"); - } - return ret; -} - -int CurvefsTools::ReadClusterMap() { - std::ifstream fin(FLAGS_cluster_map); - if (fin.is_open()) { - Json::CharReaderBuilder reader; - JSONCPP_STRING errs; - bool ok = Json::parseFromStream(reader, fin, &clusterMap_, &errs); - fin.close(); - if (!ok) { - LOG(ERROR) << "Parse cluster map file " << FLAGS_cluster_map - << " fail: " << errs; - return -1; - } - } else { - LOG(ERROR) << "open cluster map file : " << FLAGS_cluster_map - << " fail."; - return -1; - } - return 0; -} -int CurvefsTools::InitPoolsetData() { - if (clusterMap_[kPoolsets].isNull()) { - return 0; - } - - for (const auto& poolset : clusterMap_[kPoolsets]) { - CurvePoolsetData poolsetData; - if (!poolset[kName].isString()) { - LOG(ERROR) << "poolset name must be string" << poolset[kName]; - return -1; - } - poolsetData.name = poolset[kName].asString(); - - if (!poolset[kType].isString()) { - LOG(ERROR) << "poolset type must be string"; - return -1; - } - poolsetData.type = poolset[kType].asString(); - if (poolsetData.type.empty()) { - LOG(ERROR) << "poolset type must not empty"; - return -1; - } - - poolsetDatas.emplace_back(std::move(poolsetData)); - } - return 0; -} - -int CurvefsTools::InitServerData() { - if (clusterMap_[kServers].isNull()) { - LOG(ERROR) << "No servers in cluster map"; - return -1; - } - for (const auto& server : clusterMap_[kServers]) { - CurveServerData serverData; - if (!server[kName].isString()) { - LOG(ERROR) << "server name must be string"; - return -1; - } - serverData.serverName = server[kName].asString(); - if (!server[kInternalIp].isString()) { - LOG(ERROR) << "server internal ip must be string"; - return -1; - } - serverData.internalIp = server[kInternalIp].asString(); - if (!server[kInternalPort].isUInt()) { - LOG(ERROR) << "server internal port must be uint"; - return -1; - } - serverData.internalPort = server[kInternalPort].asUInt(); - if (!server[kExternalIp].isString()) { - LOG(ERROR) << "server internal port must be string"; - return -1; - } - serverData.externalIp = server[kExternalIp].asString(); - if (!server[kExternalPort].isUInt()) { - LOG(ERROR) << "server internal port must be string"; - return -1; - } - serverData.externalPort = server[kExternalPort].asUInt(); - if (!server[kZone].isString()) { - LOG(ERROR) << "server zone must be string"; - return -1; - } - serverData.zoneName = server[kZone].asString(); - - if (!server[kPhysicalPool].isString()) { - LOG(ERROR) << "server physicalpool must be string"; - return -1; - } - serverData.physicalPoolName = server[kPhysicalPool].asString(); - - if (!server.isMember(kPoolsetName)) { - serverData.poolsetName = kDefaultPoolsetName; - } else if (server[kPoolsetName].isString()) { - serverData.poolsetName = server[kPoolsetName].asString(); - } else { - LOG(ERROR) << "server poolsetName must be string, poolsetName is " - << server[kPoolsetName]; - return -1; - } - serverDatas.emplace_back(std::move(serverData)); - } - return 0; -} - -int CurvefsTools::InitLogicalPoolData() { - if (clusterMap_[kLogicalPools].isNull()) { - LOG(ERROR) << "No servers in cluster map"; - return -1; - } - for (const auto& lgPool : clusterMap_[kLogicalPools]) { - CurveLogicalPoolData lgPoolData; - if (!lgPool[kName].isString()) { - LOG(ERROR) << "logicalpool name must be string"; - return -1; - } - lgPoolData.name = lgPool[kName].asString(); - if (!lgPool[kPhysicalPool].isString()) { - LOG(ERROR) << "logicalpool physicalpool must be string"; - return -1; - } - lgPoolData.physicalPoolName = lgPool[kPhysicalPool].asString(); - if (!lgPool[kType].isInt()) { - LOG(ERROR) << "logicalpool type must be int"; - return -1; - } - lgPoolData.type = static_cast(lgPool[kType].asInt()); - if (!lgPool[kReplicasNum].isUInt()) { - LOG(ERROR) << "logicalpool replicasnum must be uint"; - return -1; - } - lgPoolData.replicasNum = lgPool[kReplicasNum].asUInt(); - if (!lgPool[kCopysetNum].isUInt64()) { - LOG(ERROR) << "logicalpool copysetnum must be uint64"; - return -1; - } - lgPoolData.copysetNum = lgPool[kCopysetNum].asUInt64(); - if (!lgPool[kZoneNum].isUInt64()) { - LOG(ERROR) << "logicalpool zonenum must be uint64"; - return -1; - } - lgPoolData.zoneNum = lgPool[kZoneNum].asUInt(); - if (!lgPool[kScatterWidth].isUInt()) { - LOG(ERROR) << "logicalpool scatterwidth must be uint"; - return -1; - } - lgPoolData.scatterwidth = lgPool[kScatterWidth].asUInt(); - if (lgPool[kAllocStatus].isString()) { - if (lgPool[kAllocStatus].asString() == kAllocStatusAllow) { - lgPoolData.status = AllocateStatus::ALLOW; - } else if (lgPool[kAllocStatus].asString() == kAllocStatusDeny) { - lgPoolData.status = AllocateStatus::DENY; - } else { - LOG(ERROR) << "logicalpool status string is invalid!, which is " - << lgPool[kAllocStatus].asString(); - return -1; + int CurvefsTools::Init() + { + curve::common::Configuration conf; + conf.SetConfigPath(FLAGS_confPath); + UpdateFlagsFromConf(&conf); + SplitString(FLAGS_mds_addr, ",", &mdsAddressStr_); + if (mdsAddressStr_.empty()) + { + LOG(ERROR) << "no available mds address."; + return kRetCodeCommonErr; + } + + butil::EndPoint endpt; + for (const auto &addr : mdsAddressStr_) + { + if (butil::str2endpoint(addr.c_str(), &endpt) < 0) + { + LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; + return kRetCodeCommonErr; + } + } + mdsAddressIndex_ = -1; + return 0; } - } else { - LOG(WARNING) << "logicalpool not set, use default allow"; - lgPoolData.status = AllocateStatus::ALLOW; - } - lgPoolDatas.emplace_back(lgPoolData); - } - return 0; -} - -int CurvefsTools::ListPoolset(std::list* poolsetInfos) { - TopologyService_Stub stub(&channel_); - ListPoolsetRequest request; - ListPoolsetResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPoolset send request: " << request.DebugString(); - - stub.ListPoolset(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPoolset Rpc response fail. " - << "Message is :" << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPoolset Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.poolsetinfos_size(); i++) { - poolsetInfos->push_back(response.poolsetinfos(i)); - } - return 0; -} - -int CurvefsTools::ListPhysicalPool( - std::list* physicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListPhysicalPoolRequest request; - ListPhysicalPoolResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPhysicalPool send request: " << request.DebugString(); - - stub.ListPhysicalPool(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPhysicalPool Rpc response fail. " - << "Message is :" << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPhysicalPool Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.physicalpoolinfos_size(); i++) { - physicalPoolInfos->push_back(response.physicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::ListPhysicalPoolsInPoolset( - PoolsetIdType poolsetid, std::list* physicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListPhysicalPoolsInPoolsetRequest request; - ListPhysicalPoolResponse response; - request.add_poolsetid(poolsetid); - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPhysicalPoolsInPoolset, send request: " - << request.DebugString(); - - stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " - << "Message is :" << response.DebugString() - << " , poolsetid = " << poolsetid; - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," - << response.DebugString(); - } - - for (int i = 0; i < response.physicalpoolinfos_size(); i++) { - physicalPoolInfos->push_back(response.physicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::AddListPoolZone(PoolIdType poolid, - std::list* zoneInfos) { - TopologyService_Stub stub(&channel_); - ListPoolZoneRequest request; - ListPoolZoneResponse response; - request.set_physicalpoolid(poolid); - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPoolZone, send request: " << request.DebugString(); - - stub.ListPoolZone(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPoolZone Rpc response fail. " - << "Message is :" << response.DebugString() - << " , physicalpoolid = " << poolid; - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPoolZone Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.zones_size(); i++) { - zoneInfos->push_back(response.zones(i)); - } - return 0; -} - -int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, - std::list* serverInfos) { - TopologyService_Stub stub(&channel_); - ListZoneServerRequest request; - ListZoneServerResponse response; - request.set_zoneid(zoneid); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListZoneServer, send request: " << request.DebugString(); - - stub.ListZoneServer(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListZoneServer Rpc response fail. " - << "Message is :" << response.DebugString() - << " , zoneid = " << zoneid; - return response.statuscode(); - } else { - LOG(INFO) << "ListZoneServer Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.serverinfo_size(); i++) { - serverInfos->push_back(response.serverinfo(i)); - } - return 0; -} - -int CurvefsTools::ScanCluster() { - // get all poolsets and compare - // De-duplication - for (const auto& poolset : poolsetDatas) { - if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [poolset](const CurvePoolsetData& data) { - return data.name == poolset.name; - }) != poolsetToAdd.end()) { - continue; - } - poolsetToAdd.push_back(poolset); - } - - std::list poolsetInfos; - int ret = ListPoolset(&poolsetInfos); - if (ret < 0) { - return ret; - } - - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { - if (it->poolsetname() == kDefaultPoolsetName) { - ++it; - continue; - } - - auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [it](const CurvePoolsetData& data) { - return data.name == it->poolsetname(); - }); - if (ix != poolsetToAdd.end()) { - poolsetToAdd.erase(ix); - it++; - } else { - poolsetToDel.push_back(it->poolsetid()); - it = poolsetInfos.erase(it); - } - } - - // get all phsicalpool and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), - [server](CurvePhysicalPoolData& data) { - return data.physicalPoolName == - server.physicalPoolName; - }) != physicalPoolToAdd.end()) { - continue; - } - CurvePhysicalPoolData poolData; - poolData.physicalPoolName = server.physicalPoolName; - poolData.poolsetName = server.poolsetName.empty() ? kDefaultPoolsetName - : server.poolsetName; - physicalPoolToAdd.push_back(poolData); - } - - std::list physicalPoolInfos; - for (auto poolsetid : poolsetToDel) { - ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); - if (ret < 0) { - return ret; - } - } - for (auto phyPoolinfo : physicalPoolInfos) { - physicalPoolToDel.push_back(phyPoolinfo.physicalpoolid()); - } - - physicalPoolInfos.clear(); - - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end(); it++) { - PoolsetIdType poolsetid = it->poolsetid(); - ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) { - auto ix = std::find_if( - physicalPoolToAdd.begin(), physicalPoolToAdd.end(), - [it](const CurvePhysicalPoolData& data) { - return (data.poolsetName == it->poolsetname()) && - (data.physicalPoolName == it->physicalpoolname()); - }); - if (ix != physicalPoolToAdd.end()) { - physicalPoolToAdd.erase(ix); - it++; - } else { - physicalPoolToDel.push_back(it->physicalpoolid()); - it = physicalPoolInfos.erase(it); - } - } - - // get zone and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(zoneToAdd.begin(), zoneToAdd.end(), - [server](CurveZoneData& data) { - return (data.physicalPoolName == - server.physicalPoolName) && - (data.zoneName == server.zoneName); - }) != zoneToAdd.end()) { - continue; - } - CurveZoneData CurveZoneData; - CurveZoneData.physicalPoolName = server.physicalPoolName; - CurveZoneData.zoneName = server.zoneName; - zoneToAdd.push_back(CurveZoneData); - } - - std::list zoneInfos; - for (auto poolid : physicalPoolToDel) { - ret = AddListPoolZone(poolid, &zoneInfos); - if (ret < 0) { - return ret; - } - } - - for (auto zinfo : zoneInfos) { - zoneToDel.push_back(zinfo.zoneid()); - } - - zoneInfos.clear(); - for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end(); - it++) { - PoolIdType poolid = it->physicalpoolid(); - ret = AddListPoolZone(poolid, &zoneInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = zoneInfos.begin(); it != zoneInfos.end();) { - auto ix = std::find_if( - zoneToAdd.begin(), zoneToAdd.end(), - [it](const CurveZoneData& data) { - return (data.physicalPoolName == it->physicalpoolname()) && - (data.zoneName == it->zonename()); - }); - if (ix != zoneToAdd.end()) { - zoneToAdd.erase(ix); - it++; - } else { - zoneToDel.push_back(it->zoneid()); - it = zoneInfos.erase(it); - } - } - - // get server and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(serverToAdd.begin(), serverToAdd.end(), - [server](CurveServerData& data) { - return data.serverName == server.serverName; - }) != serverToAdd.end()) { - LOG(WARNING) << "WARING! Duplicated Server Name: " - << server.serverName << " , ignored."; - continue; - } - serverToAdd.push_back(server); - } - - std::list serverInfos; - for (auto zoneid : zoneToDel) { - ret = AddListZoneServer(zoneid, &serverInfos); - if (ret < 0) { - return ret; - } - } - - for (auto sinfo : serverInfos) { - serverToDel.push_back(sinfo.serverid()); - } - - serverInfos.clear(); - for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) { - ZoneIdType zoneid = it->zoneid(); - ret = AddListZoneServer(zoneid, &serverInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) { - auto ix = std::find_if( - serverToAdd.begin(), serverToAdd.end(), - [it](const CurveServerData& data) { - return (data.serverName == it->hostname()) && - (data.zoneName == it->zonename()) && - (data.physicalPoolName == it->physicalpoolname()); - }); - if (ix != serverToAdd.end()) { - serverToAdd.erase(ix); - } else { - serverToDel.push_back(it->serverid()); - } - } - - return 0; -} - -int CurvefsTools::CreatePoolset() { - TopologyService_Stub stub(&channel_); - for (const auto& it : poolsetToAdd) { - if (it.name == kDefaultPoolsetName) { - continue; - } - - PoolsetRequest request; - request.set_poolsetname(it.name); - request.set_type(it.type); - request.set_desc(""); - - PoolsetResponse response; - - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreatePoolset, send request: " << request.DebugString(); - - stub.CreatePoolset(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "CreatePoolset Rpc response fail. " - << "Message is :" << response.DebugString() - << " , poolsetName =" << it.name; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreatePoolset response success, " - << response.DebugString(); - } - } - return 0; -} - -int CurvefsTools::CreatePhysicalPool() { - TopologyService_Stub stub(&channel_); - for (auto it : physicalPoolToAdd) { - PhysicalPoolRequest request; - request.set_physicalpoolname(it.physicalPoolName); - request.set_desc(""); - request.set_poolsetname(it.poolsetName); - - PhysicalPoolResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreatePhysicalPool, send request: " - << request.DebugString(); - - stub.CreatePhysicalPool(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " - << "Message is :" << response.DebugString() - << " , physicalPoolName =" << it.physicalPoolName; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreatePhysicalPool response success, " - << response.DebugString(); - } - } - return 0; -} - -int CurvefsTools::CreateZone() { - TopologyService_Stub stub(&channel_); - for (auto it : zoneToAdd) { - ZoneRequest request; - request.set_zonename(it.zoneName); - request.set_physicalpoolname(it.physicalPoolName); - request.set_desc(""); - - ZoneResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreateZone, send request: " << request.DebugString(); - - stub.CreateZone(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "CreateZone, errcorde = " << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , zoneName = " << it.zoneName; - return kRetCodeCommonErr; - } - if (response.statuscode() != 0) { - LOG(ERROR) << "CreateZone Rpc response fail. " - << "Message is :" << response.DebugString() - << " , zoneName = " << it.zoneName; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreateZone Rpc success, " - << response.DebugString(); - } - } - return 0; -} - -int CurvefsTools::CreateServer() { - TopologyService_Stub stub(&channel_); - for (auto it : serverToAdd) { - ServerRegistRequest request; - request.set_hostname(it.serverName); - request.set_internalip(it.internalIp); - request.set_internalport(it.internalPort); - request.set_externalip(it.externalIp); - request.set_externalport(it.externalPort); - request.set_zonename(it.zoneName); - request.set_physicalpoolname(it.physicalPoolName); - request.set_poolsetname(it.poolsetName); - request.set_desc(""); - - ServerRegistResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreateServer, send request: " << request.DebugString(); - - stub.RegistServer(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "RegistServer, errcorde = " << response.statuscode() - << ", error content : " << cntl.ErrorText() - << " , serverName = " << it.serverName; - return kRetCodeCommonErr; - } - if (response.statuscode() == kTopoErrCodeSuccess) { - LOG(INFO) << "Received RegistServer Rpc response success, " - << response.DebugString(); - } else if (response.statuscode() == kTopoErrCodeIpPortDuplicated) { - LOG(INFO) << "Server already exist"; - } else { - LOG(ERROR) << "RegistServer Rpc response fail. " - << "Message is :" << response.DebugString() - << " , serverName = " << it.serverName; - return response.statuscode(); - } - } - return 0; -} - -int CurvefsTools::ClearPhysicalPool() { - TopologyService_Stub stub(&channel_); - for (auto it : physicalPoolToDel) { - PhysicalPoolRequest request; - request.set_physicalpoolid(it); - - PhysicalPoolResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "DeletePhysicalPool, send request: " - << request.DebugString(); - - stub.DeletePhysicalPool(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeletePhysicalPool, errcorde = " - << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , physicalPoolId = " << it; - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " - << "Message is :" << response.DebugString() - << " , physicalPoolId = " << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " - << response.statuscode(); - } - } - return 0; -} - -int CurvefsTools::ClearPoolset() { - TopologyService_Stub stub(&channel_); - for (const auto& it : poolsetToDel) { - PoolsetRequest request; - request.set_poolsetid(it); - - PoolsetResponse response; - - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::TryAnotherMdsAddress() + { + if (mdsAddressStr_.size() == 0) + { + LOG(ERROR) << "no available mds address."; + return kRetCodeCommonErr; + } + mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); + std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; + LOG(INFO) << "try mds address(" << mdsAddressIndex_ << "): " << mdsAddress; + int ret = channel_.Init(mdsAddress.c_str(), NULL); + if (ret != 0) + { + LOG(ERROR) << "Fail to init channel to mdsAddress: " << mdsAddress; + } + return ret; + } - LOG(INFO) << "DeletePoolset, send request: " << request.DebugString(); + int CurvefsTools::DealFailedRet(int ret, std::string operation) + { + if (kRetCodeRedirectMds == ret) + { + LOG(WARNING) << operation + << " fail on mds: " << mdsAddressStr_[mdsAddressIndex_]; + } + else + { + LOG(ERROR) << operation << " fail."; + } + return ret; + } - stub.DeletePoolset(&cntl, &request, &response, nullptr); + int CurvefsTools::HandleCreateLogicalPool() + { + int ret = ReadClusterMap(); + if (ret < 0) + { + return DealFailedRet(ret, "read cluster map"); + } + ret = InitLogicalPoolData(); + if (ret < 0) + { + return DealFailedRet(ret, "init logical pool data"); + } + ret = ScanLogicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "scan logical pool"); + } + for (const auto &lgPool : lgPoolDatas) + { + TopologyService_Stub stub(&channel_); + + CreateLogicalPoolRequest request; + request.set_logicalpoolname(lgPool.name); + request.set_physicalpoolname(lgPool.physicalPoolName); + request.set_type(lgPool.type); + std::string replicaNumStr = std::to_string(lgPool.replicasNum); + std::string copysetNumStr = std::to_string(lgPool.copysetNum); + std::string zoneNumStr = std::to_string(lgPool.zoneNum); + + std::string rapString = "{\"replicaNum\":" + replicaNumStr + + ", \"copysetNum\":" + copysetNumStr + + ", \"zoneNum\":" + zoneNumStr + "}"; + + request.set_redundanceandplacementpolicy(rapString); + request.set_userpolicy("{\"aaa\":1}"); + request.set_scatterwidth(lgPool.scatterwidth); + request.set_status(lgPool.status); + + CreateLogicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(-1); + cntl.set_log_id(1); + + LOG(INFO) << "CreateLogicalPool, second request: " + << request.DebugString(); + + stub.CreateLogicalPool(&cntl, &request, &response, nullptr); + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() == kTopoErrCodeSuccess) + { + LOG(INFO) << "Received CreateLogicalPool Rpc response success, " + << response.DebugString(); + } + else if (response.statuscode() == kTopoErrCodeLogicalPoolExist) + { + LOG(INFO) << "Logical pool already exist"; + } + else + { + LOG(ERROR) << "CreateLogicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + } + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeletePoolset, errcode = " << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , PoolsetId = " << it; - return kRetCodeCommonErr; - } else if (response.statuscode() != kTopoErrCodeSuccess && - response.statuscode() != - kTopoErrCodeCannotDeleteDefaultPoolset) { - LOG(ERROR) << "DeletePoolset Rpc response fail. " - << "Message is :" << response.DebugString() - << " , PoolsetId = " << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeletePoolset Rpc success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::ScanLogicalPool() + { + // get all logicalpool and compare + // De-duplication + std::set phyPools; + for (const auto &lgPool : lgPoolDatas) + { + phyPools.insert(lgPool.physicalPoolName); + } + for (const auto &phyPool : phyPools) + { + std::list logicalPoolInfos; + int ret = ListLogicalPool(phyPool, &logicalPoolInfos); + if (ret < 0) + { + return ret; + } + for (auto it = logicalPoolInfos.begin(); + it != logicalPoolInfos.end();) + { + auto ix = + std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), + [it](const CurveLogicalPoolData &data) + { + return data.name == it->logicalpoolname(); + }); + if (ix != lgPoolDatas.end()) + { + lgPoolDatas.erase(ix); + it++; + } + } + } + return 0; + } -int CurvefsTools::ClearZone() { - TopologyService_Stub stub(&channel_); - for (auto it : zoneToDel) { - ZoneRequest request; - request.set_zoneid(it); + int CurvefsTools::ListLogicalPool( + const std::string &phyPoolName, + std::list *logicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListLogicalPoolRequest request; + ListLogicalPoolResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + request.set_physicalpoolname(phyPoolName); + + LOG(INFO) << "ListLogicalPool send request: " << request.DebugString(); + stub.ListLogicalPool(&cntl, &request, &response, nullptr); + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + for (int i = 0; i < response.logicalpoolinfos_size(); i++) + { + logicalPoolInfos->push_back(response.logicalpoolinfos(i)); + } + return 0; + } - ZoneResponse response; + int CurvefsTools::HandleBuildCluster() + { + int ret = ReadClusterMap(); + if (ret < 0) + { + return DealFailedRet(ret, "read cluster map"); + } + ret = InitPoolsetData(); + if (ret < 0) + { + return DealFailedRet(ret, "init poolset data"); + } + ret = InitServerData(); + if (ret < 0) + { + return DealFailedRet(ret, "init server data"); + } + ret = ScanCluster(); + if (ret < 0) + { + return DealFailedRet(ret, "scan cluster"); + } + ret = ClearServer(); + if (ret < 0) + { + return DealFailedRet(ret, "clear server"); + } + ret = ClearZone(); + if (ret < 0) + { + return DealFailedRet(ret, "clear zone"); + } + ret = ClearPhysicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "clear physicalpool"); + } + ret = ClearPoolset(); + if (ret < 0) + { + return DealFailedRet(ret, "clear poolset"); + } + ret = CreatePoolset(); + if (ret < 0) + { + return DealFailedRet(ret, "create Poolset"); + } + ret = CreatePhysicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "create physicalpool"); + } + ret = CreateZone(); + if (ret < 0) + { + return DealFailedRet(ret, "create zone"); + } + ret = CreateServer(); + if (ret < 0) + { + return DealFailedRet(ret, "create server"); + } + return ret; + } - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::ReadClusterMap() + { + std::ifstream fin(FLAGS_cluster_map); + if (fin.is_open()) + { + Json::CharReaderBuilder reader; + JSONCPP_STRING errs; + bool ok = Json::parseFromStream(reader, fin, &clusterMap_, &errs); + fin.close(); + if (!ok) + { + LOG(ERROR) << "Parse cluster map file " << FLAGS_cluster_map + << " fail: " << errs; + return -1; + } + } + else + { + LOG(ERROR) << "open cluster map file : " << FLAGS_cluster_map + << " fail."; + return -1; + } + return 0; + } + int CurvefsTools::InitPoolsetData() + { + if (clusterMap_[kPoolsets].isNull()) + { + return 0; + } + + for (const auto &poolset : clusterMap_[kPoolsets]) + { + CurvePoolsetData poolsetData; + if (!poolset[kName].isString()) + { + LOG(ERROR) << "poolset name must be string" << poolset[kName]; + return -1; + } + poolsetData.name = poolset[kName].asString(); + + if (!poolset[kType].isString()) + { + LOG(ERROR) << "poolset type must be string"; + return -1; + } + poolsetData.type = poolset[kType].asString(); + if (poolsetData.type.empty()) + { + LOG(ERROR) << "poolset type must not empty"; + return -1; + } + + poolsetDatas.emplace_back(std::move(poolsetData)); + } + return 0; + } - LOG(INFO) << "DeleteZone, send request: " << request.DebugString(); + int CurvefsTools::InitServerData() + { + if (clusterMap_[kServers].isNull()) + { + LOG(ERROR) << "No servers in cluster map"; + return -1; + } + for (const auto &server : clusterMap_[kServers]) + { + CurveServerData serverData; + if (!server[kName].isString()) + { + LOG(ERROR) << "server name must be string"; + return -1; + } + serverData.serverName = server[kName].asString(); + if (!server[kInternalIp].isString()) + { + LOG(ERROR) << "server internal ip must be string"; + return -1; + } + serverData.internalIp = server[kInternalIp].asString(); + if (!server[kInternalPort].isUInt()) + { + LOG(ERROR) << "server internal port must be uint"; + return -1; + } + serverData.internalPort = server[kInternalPort].asUInt(); + if (!server[kExternalIp].isString()) + { + LOG(ERROR) << "server internal port must be string"; + return -1; + } + serverData.externalIp = server[kExternalIp].asString(); + if (!server[kExternalPort].isUInt()) + { + LOG(ERROR) << "server internal port must be string"; + return -1; + } + serverData.externalPort = server[kExternalPort].asUInt(); + if (!server[kZone].isString()) + { + LOG(ERROR) << "server zone must be string"; + return -1; + } + serverData.zoneName = server[kZone].asString(); + + if (!server[kPhysicalPool].isString()) + { + LOG(ERROR) << "server physicalpool must be string"; + return -1; + } + serverData.physicalPoolName = server[kPhysicalPool].asString(); + + if (!server.isMember(kPoolsetName)) + { + serverData.poolsetName = kDefaultPoolsetName; + } + else if (server[kPoolsetName].isString()) + { + serverData.poolsetName = server[kPoolsetName].asString(); + } + else + { + LOG(ERROR) << "server poolsetName must be string, poolsetName is " + << server[kPoolsetName]; + return -1; + } + + serverDatas.emplace_back(std::move(serverData)); + } + return 0; + } - stub.DeleteZone(&cntl, &request, &response, nullptr); + int CurvefsTools::InitLogicalPoolData() + { + if (clusterMap_[kLogicalPools].isNull()) + { + LOG(ERROR) << "No servers in cluster map"; + return -1; + } + for (const auto &lgPool : clusterMap_[kLogicalPools]) + { + CurveLogicalPoolData lgPoolData; + if (!lgPool[kName].isString()) + { + LOG(ERROR) << "logicalpool name must be string"; + return -1; + } + lgPoolData.name = lgPool[kName].asString(); + if (!lgPool[kPhysicalPool].isString()) + { + LOG(ERROR) << "logicalpool physicalpool must be string"; + return -1; + } + lgPoolData.physicalPoolName = lgPool[kPhysicalPool].asString(); + if (!lgPool[kType].isInt()) + { + LOG(ERROR) << "logicalpool type must be int"; + return -1; + } + lgPoolData.type = static_cast(lgPool[kType].asInt()); + if (!lgPool[kReplicasNum].isUInt()) + { + LOG(ERROR) << "logicalpool replicasnum must be uint"; + return -1; + } + lgPoolData.replicasNum = lgPool[kReplicasNum].asUInt(); + if (!lgPool[kCopysetNum].isUInt64()) + { + LOG(ERROR) << "logicalpool copysetnum must be uint64"; + return -1; + } + lgPoolData.copysetNum = lgPool[kCopysetNum].asUInt64(); + if (!lgPool[kZoneNum].isUInt64()) + { + LOG(ERROR) << "logicalpool zonenum must be uint64"; + return -1; + } + lgPoolData.zoneNum = lgPool[kZoneNum].asUInt(); + if (!lgPool[kScatterWidth].isUInt()) + { + LOG(ERROR) << "logicalpool scatterwidth must be uint"; + return -1; + } + lgPoolData.scatterwidth = lgPool[kScatterWidth].asUInt(); + if (lgPool[kAllocStatus].isString()) + { + if (lgPool[kAllocStatus].asString() == kAllocStatusAllow) + { + lgPoolData.status = AllocateStatus::ALLOW; + } + else if (lgPool[kAllocStatus].asString() == kAllocStatusDeny) + { + lgPoolData.status = AllocateStatus::DENY; + } + else + { + LOG(ERROR) << "logicalpool status string is invalid!, which is " + << lgPool[kAllocStatus].asString(); + return -1; + } + } + else + { + LOG(WARNING) << "logicalpool not set, use default allow"; + lgPoolData.status = AllocateStatus::ALLOW; + } + lgPoolDatas.emplace_back(lgPoolData); + } + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteZone, errcorde = " << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , zoneId = " << it; - return kRetCodeCommonErr; - } else if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeleteZone Rpc response fail. " - << "Message is :" << response.DebugString() - << " , zoneId = " << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeleteZone Rpc success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::ListPoolset(std::list *poolsetInfos) + { + TopologyService_Stub stub(&channel_); + ListPoolsetRequest request; + ListPoolsetResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPoolset send request: " << request.DebugString(); + + stub.ListPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPoolset Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPoolset Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.poolsetinfos_size(); i++) + { + poolsetInfos->push_back(response.poolsetinfos(i)); + } + return 0; + } -int CurvefsTools::ClearServer() { - TopologyService_Stub stub(&channel_); - for (auto it : serverToDel) { - DeleteServerRequest request; - request.set_serverid(it); + int CurvefsTools::ListPhysicalPool( + std::list *physicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListPhysicalPoolRequest request; + ListPhysicalPoolResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPhysicalPool send request: " << request.DebugString(); + + stub.ListPhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPhysicalPool Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.physicalpoolinfos_size(); i++) + { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); + } + return 0; + } - DeleteServerResponse response; + int CurvefsTools::ListPhysicalPoolsInPoolset( + PoolsetIdType poolsetid, std::list *physicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListPhysicalPoolsInPoolsetRequest request; + ListPhysicalPoolResponse response; + request.add_poolsetid(poolsetid); + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPhysicalPoolsInPoolset, send request: " + << request.DebugString(); + + stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , poolsetid = " << poolsetid; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," + << response.DebugString(); + } + + for (int i = 0; i < response.physicalpoolinfos_size(); i++) + { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); + } + return 0; + } - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::AddListPoolZone(PoolIdType poolid, + std::list *zoneInfos) + { + TopologyService_Stub stub(&channel_); + ListPoolZoneRequest request; + ListPoolZoneResponse response; + request.set_physicalpoolid(poolid); + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPoolZone, send request: " << request.DebugString(); + + stub.ListPoolZone(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPoolZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalpoolid = " << poolid; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPoolZone Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.zones_size(); i++) + { + zoneInfos->push_back(response.zones(i)); + } + return 0; + } - LOG(INFO) << "DeleteServer, send request: " << request.DebugString(); + int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, + std::list *serverInfos) + { + TopologyService_Stub stub(&channel_); + ListZoneServerRequest request; + ListZoneServerResponse response; + request.set_zoneid(zoneid); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListZoneServer, send request: " << request.DebugString(); + + stub.ListZoneServer(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListZoneServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneid = " << zoneid; + return response.statuscode(); + } + else + { + LOG(INFO) << "ListZoneServer Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.serverinfo_size(); i++) + { + serverInfos->push_back(response.serverinfo(i)); + } + return 0; + } - stub.DeleteServer(&cntl, &request, &response, nullptr); + int CurvefsTools::ScanCluster() + { + // get all poolsets and compare + // De-duplication + for (const auto &poolset : poolsetDatas) + { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](const CurvePoolsetData &data) + { + return data.name == poolset.name; + }) != poolsetToAdd.end()) + { + continue; + } + poolsetToAdd.push_back(poolset); + } + + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) + { + return ret; + } + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) + { + if (it->poolsetname() == kDefaultPoolsetName) + { + ++it; + continue; + } + + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](const CurvePoolsetData &data) + { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) + { + poolsetToAdd.erase(ix); + it++; + } + else + { + poolsetToDel.push_back(it->poolsetid()); + it = poolsetInfos.erase(it); + } + } + + // get all phsicalpool and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [server](CurvePhysicalPoolData &data) + { + return data.physicalPoolName == + server.physicalPoolName; + }) != physicalPoolToAdd.end()) + { + continue; + } + CurvePhysicalPoolData poolData; + poolData.physicalPoolName = server.physicalPoolName; + poolData.poolsetName = server.poolsetName.empty() ? kDefaultPoolsetName + : server.poolsetName; + physicalPoolToAdd.push_back(poolData); + } + + std::list physicalPoolInfos; + for (auto poolsetid : poolsetToDel) + { + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto phyPoolinfo : physicalPoolInfos) + { + physicalPoolToDel.push_back(phyPoolinfo.physicalpoolid()); + } + + physicalPoolInfos.clear(); + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end(); it++) + { + PoolsetIdType poolsetid = it->poolsetid(); + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) + { + auto ix = std::find_if( + physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [it](const CurvePhysicalPoolData &data) + { + return (data.poolsetName == it->poolsetname()) && + (data.physicalPoolName == it->physicalpoolname()); + }); + if (ix != physicalPoolToAdd.end()) + { + physicalPoolToAdd.erase(ix); + it++; + } + else + { + physicalPoolToDel.push_back(it->physicalpoolid()); + it = physicalPoolInfos.erase(it); + } + } + + // get zone and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(zoneToAdd.begin(), zoneToAdd.end(), + [server](CurveZoneData &data) + { + return (data.physicalPoolName == + server.physicalPoolName) && + (data.zoneName == server.zoneName); + }) != zoneToAdd.end()) + { + continue; + } + CurveZoneData CurveZoneData; + CurveZoneData.physicalPoolName = server.physicalPoolName; + CurveZoneData.zoneName = server.zoneName; + zoneToAdd.push_back(CurveZoneData); + } + + std::list zoneInfos; + for (auto poolid : physicalPoolToDel) + { + ret = AddListPoolZone(poolid, &zoneInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto zinfo : zoneInfos) + { + zoneToDel.push_back(zinfo.zoneid()); + } + + zoneInfos.clear(); + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end(); + it++) + { + PoolIdType poolid = it->physicalpoolid(); + ret = AddListPoolZone(poolid, &zoneInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = zoneInfos.begin(); it != zoneInfos.end();) + { + auto ix = std::find_if( + zoneToAdd.begin(), zoneToAdd.end(), + [it](const CurveZoneData &data) + { + return (data.physicalPoolName == it->physicalpoolname()) && + (data.zoneName == it->zonename()); + }); + if (ix != zoneToAdd.end()) + { + zoneToAdd.erase(ix); + it++; + } + else + { + zoneToDel.push_back(it->zoneid()); + it = zoneInfos.erase(it); + } + } + + // get server and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(serverToAdd.begin(), serverToAdd.end(), + [server](CurveServerData &data) + { + return data.serverName == server.serverName; + }) != serverToAdd.end()) + { + LOG(WARNING) << "WARING! Duplicated Server Name: " + << server.serverName << " , ignored."; + continue; + } + serverToAdd.push_back(server); + } + + std::list serverInfos; + for (auto zoneid : zoneToDel) + { + ret = AddListZoneServer(zoneid, &serverInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto sinfo : serverInfos) + { + serverToDel.push_back(sinfo.serverid()); + } + + serverInfos.clear(); + for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) + { + ZoneIdType zoneid = it->zoneid(); + ret = AddListZoneServer(zoneid, &serverInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) + { + auto ix = std::find_if( + serverToAdd.begin(), serverToAdd.end(), + [it](const CurveServerData &data) + { + return (data.serverName == it->hostname()) && + (data.zoneName == it->zonename()) && + (data.physicalPoolName == it->physicalpoolname()); + }); + if (ix != serverToAdd.end()) + { + serverToAdd.erase(ix); + } + else + { + serverToDel.push_back(it->serverid()); + } + } + + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteServer, errcorde = " << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , serverId = " << it; - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeleteServer Rpc response fail. " - << "Message is :" << response.DebugString() - << " , serverId = " << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeleteServer Rpc response success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::CreatePoolset() + { + TopologyService_Stub stub(&channel_); + for (const auto &it : poolsetToAdd) + { + if (it.name == kDefaultPoolsetName) + { + continue; + } + + PoolsetRequest request; + request.set_poolsetname(it.name); + request.set_type(it.type); + request.set_desc(""); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreatePoolset, send request: " << request.DebugString(); + + stub.CreatePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "CreatePoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , poolsetName =" << it.name; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreatePoolset response success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::SetChunkServer() { - SetChunkServerStatusRequest request; - request.set_chunkserverid(FLAGS_chunkserver_id); - if (FLAGS_chunkserver_status == "pendding") { - request.set_chunkserverstatus(ChunkServerStatus::PENDDING); - } else if (FLAGS_chunkserver_status == "readwrite") { - request.set_chunkserverstatus(ChunkServerStatus::READWRITE); - } else if (FLAGS_chunkserver_status == "retired") { - LOG(ERROR) << "SetChunkServer retired not unsupport!"; - return kRetCodeCommonErr; - } else { - LOG(ERROR) << "SetChunkServer param error, unknown chunkserver status"; - return kRetCodeCommonErr; - } + int CurvefsTools::CreatePhysicalPool() + { + TopologyService_Stub stub(&channel_); + for (auto it : physicalPoolToAdd) + { + PhysicalPoolRequest request; + request.set_physicalpoolname(it.physicalPoolName); + request.set_desc(""); + request.set_poolsetname(it.poolsetName); + + PhysicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreatePhysicalPool, send request: " + << request.DebugString(); + + stub.CreatePhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalPoolName =" << it.physicalPoolName; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreatePhysicalPool response success, " + << response.DebugString(); + } + } + return 0; + } - SetChunkServerStatusResponse response; - TopologyService_Stub stub(&channel_); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::CreateZone() + { + TopologyService_Stub stub(&channel_); + for (auto it : zoneToAdd) + { + ZoneRequest request; + request.set_zonename(it.zoneName); + request.set_physicalpoolname(it.physicalPoolName); + request.set_desc(""); + + ZoneResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreateZone, send request: " << request.DebugString(); + + stub.CreateZone(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "CreateZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneName = " << it.zoneName; + return kRetCodeCommonErr; + } + if (response.statuscode() != 0) + { + LOG(ERROR) << "CreateZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneName = " << it.zoneName; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreateZone Rpc success, " + << response.DebugString(); + } + } + return 0; + } - LOG(INFO) << "SetChunkServerStatusRequest, send request: " - << request.DebugString(); + int CurvefsTools::CreateServer() + { + TopologyService_Stub stub(&channel_); + for (auto it : serverToAdd) + { + ServerRegistRequest request; + request.set_hostname(it.serverName); + request.set_internalip(it.internalIp); + request.set_internalport(it.internalPort); + request.set_externalip(it.externalIp); + request.set_externalport(it.externalPort); + request.set_zonename(it.zoneName); + request.set_physicalpoolname(it.physicalPoolName); + request.set_poolsetname(it.poolsetName); + request.set_desc(""); + + ServerRegistResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreateServer, send request: " << request.DebugString(); + + stub.RegistServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "RegistServer, errcorde = " << response.statuscode() + << ", error content : " << cntl.ErrorText() + << " , serverName = " << it.serverName; + return kRetCodeCommonErr; + } + if (response.statuscode() == kTopoErrCodeSuccess) + { + LOG(INFO) << "Received RegistServer Rpc response success, " + << response.DebugString(); + } + else if (response.statuscode() == kTopoErrCodeIpPortDuplicated) + { + LOG(INFO) << "Server already exist"; + } + else + { + LOG(ERROR) << "RegistServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , serverName = " << it.serverName; + return response.statuscode(); + } + } + return 0; + } - stub.SetChunkServer(&cntl, &request, &response, nullptr); + int CurvefsTools::ClearPhysicalPool() + { + TopologyService_Stub stub(&channel_); + for (auto it : physicalPoolToDel) + { + PhysicalPoolRequest request; + request.set_physicalpoolid(it); + + PhysicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeletePhysicalPool, send request: " + << request.DebugString(); + + stub.DeletePhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeletePhysicalPool, errcorde = " + << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , physicalPoolId = " << it; + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalPoolId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " + << response.statuscode(); + } + } + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " - << response.statuscode() - << ", error content:" << cntl.ErrorText(); - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " - << "Message is :" << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " - << "response success, " << response.DebugString(); - } - return 0; -} + int CurvefsTools::ClearPoolset() + { + TopologyService_Stub stub(&channel_); + for (const auto &it : poolsetToDel) + { + PoolsetRequest request; + request.set_poolsetid(it); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeletePoolset, send request: " << request.DebugString(); + + stub.DeletePoolset(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeletePoolset, errcode = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , PoolsetId = " << it; + return kRetCodeCommonErr; + } + else if (response.statuscode() != kTopoErrCodeSuccess && + response.statuscode() != + kTopoErrCodeCannotDeleteDefaultPoolset) + { + LOG(ERROR) << "DeletePoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , PoolsetId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeletePoolset Rpc success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::ScanPoolset() { - for (const auto& poolset : poolsetDatas) { - if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [poolset](CurvePoolsetData& data) { - return data.name == poolset.name; - }) != poolsetToAdd.end()) { - continue; - } - // CurvePoolsetData poolsetData; - // poolsetData.name = poolset.; - poolsetToAdd.push_back(poolset); - } - std::list poolsetInfos; - int ret = ListPoolset(&poolsetInfos); - if (ret < 0) { - return ret; - } - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { - auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [it](const CurvePoolsetData& data) { - return data.name == it->poolsetname(); - }); - if (ix != poolsetToAdd.end()) { - poolsetToAdd.erase(ix); - it++; - } else { - poolsetToDel.push_back(static_cast(it->poolsetid())); - it = poolsetInfos.erase(it); - } - } - return 0; -} + int CurvefsTools::ClearZone() + { + TopologyService_Stub stub(&channel_); + for (auto it : zoneToDel) + { + ZoneRequest request; + request.set_zoneid(it); + + ZoneResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeleteZone, send request: " << request.DebugString(); + + stub.DeleteZone(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeleteZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneId = " << it; + return kRetCodeCommonErr; + } + else if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeleteZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeleteZone Rpc success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::SetLogicalPool() { - SetLogicalPoolRequest request; - request.set_logicalpoolid(FLAGS_logicalpool_id); - if (FLAGS_logicalpool_status == "allow") { - request.set_status(AllocateStatus::ALLOW); - } else if (FLAGS_logicalpool_status == "deny") { - request.set_status(AllocateStatus::DENY); - } else { - LOG(ERROR) << "SetLogicalPool param error, unknown logicalpool status"; - return kRetCodeCommonErr; - } - SetLogicalPoolResponse response; - TopologyService_Stub stub(&channel_); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::ClearServer() + { + TopologyService_Stub stub(&channel_); + for (auto it : serverToDel) + { + DeleteServerRequest request; + request.set_serverid(it); + + DeleteServerResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeleteServer, send request: " << request.DebugString(); + + stub.DeleteServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeleteServer, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , serverId = " << it; + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeleteServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , serverId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeleteServer Rpc response success, " + << response.DebugString(); + } + } + return 0; + } - LOG(INFO) << "SetLogicalPool, send request: " << request.DebugString(); + int CurvefsTools::SetChunkServer() + { + SetChunkServerStatusRequest request; + request.set_chunkserverid(FLAGS_chunkserver_id); + if (FLAGS_chunkserver_status == "pendding") + { + request.set_chunkserverstatus(ChunkServerStatus::PENDDING); + } + else if (FLAGS_chunkserver_status == "readwrite") + { + request.set_chunkserverstatus(ChunkServerStatus::READWRITE); + } + else if (FLAGS_chunkserver_status == "retired") + { + LOG(ERROR) << "SetChunkServer retired not unsupport!"; + return kRetCodeCommonErr; + } + else + { + LOG(ERROR) << "SetChunkServer param error, unknown chunkserver status"; + return kRetCodeCommonErr; + } + + SetChunkServerStatusResponse response; + TopologyService_Stub stub(&channel_); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "SetChunkServerStatusRequest, send request: " + << request.DebugString(); + + stub.SetChunkServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " + << response.statuscode() + << ", error content:" << cntl.ErrorText(); + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " + << "response success, " << response.DebugString(); + } + return 0; + } - stub.SetLogicalPool(&cntl, &request, &response, nullptr); + int CurvefsTools::ScanPoolset() + { + for (const auto &poolset : poolsetDatas) + { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](CurvePoolsetData &data) + { + return data.name == poolset.name; + }) != poolsetToAdd.end()) + { + continue; + } + // CurvePoolsetData poolsetData; + // poolsetData.name = poolset.; + poolsetToAdd.push_back(poolset); + } + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) + { + return ret; + } + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) + { + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](const CurvePoolsetData &data) + { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) + { + poolsetToAdd.erase(ix); + it++; + } + else + { + poolsetToDel.push_back(static_cast(it->poolsetid())); + it = poolsetInfos.erase(it); + } + } + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "SetLogicalPool, errcorde = " << response.statuscode() - << ", error content:" << cntl.ErrorText(); - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "SetLogicalPool Rpc response fail. " - << "Message is :" << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received SetLogicalPool Rpc " - << "response success, " << response.DebugString(); - } - return 0; -} + int CurvefsTools::SetLogicalPool() + { + SetLogicalPoolRequest request; + request.set_logicalpoolid(FLAGS_logicalpool_id); + if (FLAGS_logicalpool_status == "allow") + { + request.set_status(AllocateStatus::ALLOW); + } + else if (FLAGS_logicalpool_status == "deny") + { + request.set_status(AllocateStatus::DENY); + } + else + { + LOG(ERROR) << "SetLogicalPool param error, unknown logicalpool status"; + return kRetCodeCommonErr; + } + SetLogicalPoolResponse response; + TopologyService_Stub stub(&channel_); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "SetLogicalPool, send request: " << request.DebugString(); + + stub.SetLogicalPool(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "SetLogicalPool, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText(); + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "SetLogicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received SetLogicalPool Rpc " + << "response success, " << response.DebugString(); + } + return 0; + } -} // namespace topology -} // namespace mds -} // namespace curve + } // namespace topology + } // namespace mds +} // namespace curve -int main(int argc, char** argv) { +int main(int argc, char **argv) +{ google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); int ret = 0; curve::mds::topology::CurvefsTools tools; - if (tools.Init() < 0) { + if (tools.Init() < 0) + { LOG(ERROR) << "curvefsTool init error."; return kRetCodeCommonErr; } int maxTry = tools.GetMaxTry(); int retry = 0; - for (; retry < maxTry; retry++) { + for (; retry < maxTry; retry++) + { ret = tools.TryAnotherMdsAddress(); - if (ret < 0) { + if (ret < 0) + { return kRetCodeCommonErr; } std::string operation = FLAGS_op; - if (operation == "create_logicalpool") { + if (operation == "create_logicalpool") + { ret = tools.HandleCreateLogicalPool(); - } else if (operation == "create_physicalpool") { + } + else if (operation == "create_physicalpool") + { ret = tools.HandleBuildCluster(); - } else if (operation == "set_chunkserver") { + } + else if (operation == "set_chunkserver") + { ret = tools.SetChunkServer(); - } else if (operation == "set_logicalpool") { + } + else if (operation == "set_logicalpool") + { ret = tools.SetLogicalPool(); - } else { + } + else + { LOG(ERROR) << "undefined op."; ret = kRetCodeCommonErr; break; } - if (ret != kRetCodeRedirectMds) { + if (ret != kRetCodeRedirectMds) + { break; } } - if (retry >= maxTry) { + if (retry >= maxTry) + { LOG(ERROR) << "rpc retry times exceed."; return kRetCodeCommonErr; } - if (ret < 0) { + if (ret < 0) + { LOG(ERROR) << "exec fail, ret = " << ret; - } else { + } + else + { LOG(INFO) << "exec success, ret = " << ret; } From c36835aaa2acab5537493f504a729b7c3a399b27 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 13 Oct 2023 22:56:07 +0800 Subject: [PATCH 5/8] style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. --- src/chunkserver/chunkserver.cpp | 6 ++---- src/chunkserver/clone_core.cpp | 10 +++++----- src/chunkserver/conf_epoch_file.h | 12 ++++++------ src/common/bitmap.h | 4 ++-- src/tools/chunkserver_client.h | 24 +++++++++++------------ test/integration/cluster_common/cluster.h | 2 +- 6 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index ddc7875277..8101735949 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -95,8 +95,7 @@ namespace curve RegisterCurveSegmentLogStorageOrDie(); - // ==========================Load Configuration - // Items===============================// + // ==========================Load Configuration Items===============================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); @@ -116,8 +115,7 @@ namespace curve conf.PrintConfig(); curve::common::ExposeCurveVersion(); - // ============================nitialize each - // module==========================// + // ============================Initialize each module==========================// LOG(INFO) << "Initializing ChunkServer modules"; // Prioritize initializing the metric collection module diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index af05a01646..99eb260a95 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -223,11 +223,11 @@ int CloneCore::HandleReadRequest(std::shared_ptr readRequest, CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); /* - *Chunk exists: Check and analyze Bitmap to determine if it can be read - *locally Chunk does not exist: if it contains clone information, it will be - *read from clonesource, otherwise an error will be returned Because the - *upper level ReadChunkRequest::OnApply has already processed NoExist And - *the situation where cloneinfo does not exist + * Chunk exists: Check and analyze Bitmap to determine if it can be read + * locally Chunk does not exist: if it contains clone information, it will be + * read from clonesource, otherwise an error will be returned Because the + * upper level ReadChunkRequest::OnApply has already processed NoExist And + * the situation where cloneinfo does not exist */ switch (errorCode) { case CSErrorCode::Success: diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 4d2513fc2b..979dd90032 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -59,12 +59,12 @@ class ConfEpochFile { /** * Serialize configuration version information and save it to a snapshot - *file. The format is as follows: The 'head' indicates the length and is in - *binary format. The rest is in text format for easy viewing when necessary. - *'sync' ensures data persistence. | head | - *Configuration version information | | 8 bytes size_t | uint32_t | - *Variable length text | | length | crc32 | logic pool id - *| copyset id | epoch| The persistence above is separated by ':' + * file. The format is as follows: The 'head' indicates the length and is in + * binary format. The rest is in text format for easy viewing when necessary. + * 'sync' ensures data persistence. | head | + * Configuration version information | | 8 bytes size_t | uint32_t | + * Variable length text | | length | crc32 | logic pool id + * | copyset id | epoch| The persistence above is separated by ':' * @param path: File path * @param logicPoolID: Logical Pool ID * @param copysetID: Copy group ID diff --git a/src/common/bitmap.h b/src/common/bitmap.h index f4b6f76ce7..34cf72edbd 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -57,8 +57,8 @@ class Bitmap { explicit Bitmap(uint32_t bits); /** * Constructor when initializing from an existing snapshot file - *The constructor will create a new bitmap internally, and then use the - *bitmap memcpy in the parameters + * The constructor will create a new bitmap internally, and then use the + * bitmap memcpy in the parameters * @param bits: Bitmap bits * @param bitmap: An externally provided bitmap for initialization */ diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 400755cb30..6c6e006e31 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -80,22 +80,22 @@ class ChunkServerClient { virtual bool CheckChunkServerOnline(); /** - * @brief calls the GetCopysetStatus interface of chunkserver - * @param request Query the request for the copyset - * @param response The response returned contains detailed information about - * the replication group, which is valid when the return value is 0 - * @return returns 0 for success, -1 for failure - */ + * @brief calls the GetCopysetStatus interface of chunkserver + * @param request Query the request for the copyset + * @param response The response returned contains detailed information + * about the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief Get the hash value of chunks from chunkserver - * @param chunk The chunk to query - * @param[out] The hash value chunkHash chunk, valid when the return value - * is 0 - * @return returns 0 for success, -1 for failure - */ + * @brief Get the hash value of chunks from chunkserver + * @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); private: diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index 71777d5241..f663594f4a 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -120,7 +120,7 @@ class CurveCluster { /** * StartSingleMDS starts an mds * If need chunkservers with different IPs, please set the ipPort to - 192.168.200.1:XXXX + * 192.168.200.1:XXXX * * @param[in] id mdsId * @param[in] ipPort specifies the ipPort of the mds From b0148e250affae2d37b91af4f3ee6bd503f5bdf9 Mon Sep 17 00:00:00 2001 From: koko2pp Date: Mon, 27 Nov 2023 11:58:26 +0800 Subject: [PATCH 6/8] Code formatting changes Signed-off-by: koko2pp --- curvefs_python/cbd_client.h | 111 +- curvefs_python/libcurvefs.h | 65 +- nebd/src/common/name_lock.h | 149 +- src/chunkserver/clone_core.cpp | 871 +- src/client/request_sender.h | 293 +- src/snapshotcloneserver/clone/clone_core.cpp | 3270 ++++--- test/chunkserver/copyset_node_test.cpp | 2111 ++--- test/client/copyset_client_test.cpp | 7994 +++++++++-------- test/client/mds_failover_test.cpp | 500 +- .../snapshotcloneserver_common_test.cpp | 1864 ++-- .../snapshotcloneserver_test.cpp | 246 +- .../alloc_statistic_helper_test.cpp | 292 +- .../allocstatistic/alloc_statistic_test.cpp | 395 +- 13 files changed, 9330 insertions(+), 8831 deletions(-) diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index a5415b26e3..c9c0133ed9 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -29,59 +29,62 @@ #include "curvefs_python/curve_type.h" -namespace curve { -namespace client { - -class FileClient; - -} // namespace client -} // namespace curve - -class CBDClient { - public: - CBDClient(); - ~CBDClient(); - - int Init(const char* configPath); - void UnInit(); - - int Open(const char* filename, UserInfo_t* userInfo); - int Close(int fd); - - int Create(const char* filename, UserInfo_t* userInfo, size_t size); - int Create2(const CreateContext* context); - int Unlink(const char* filename, UserInfo_t* info); - int DeleteForce(const char* filename, UserInfo_t* info); - int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); - int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); - int Extend(const char* filename, UserInfo_t* info, uint64_t size); - - // Synchronous read and write - int Read(int fd, char* buf, unsigned long offset, - unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, - unsigned long length); // NOLINT - - // Asynchronous read and write - int AioRead(int fd, AioContext* aioctx); - int AioWrite(int fd, AioContext* aioctx); - - // Obtain basic information about the file - int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); - int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); - - DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); - int Listdir(DirInfos_t* dirinfo); - void CloseDir(DirInfos_t* dirinfo); - int Mkdir(const char* dirpath, UserInfo_t* info); - int Rmdir(const char* dirpath, UserInfo_t* info); - - std::string GetClusterId(); - - std::vector ListPoolset(); - - private: - std::unique_ptr client_; +namespace curve +{ + namespace client + { + + class FileClient; + + } // namespace client +} // namespace curve + +class CBDClient +{ +public: + CBDClient(); + ~CBDClient(); + + int Init(const char *configPath); + void UnInit(); + + int Open(const char *filename, UserInfo_t *userInfo); + int Close(int fd); + + int Create(const char *filename, UserInfo_t *userInfo, size_t size); + int Create2(const CreateContext *context); + int Unlink(const char *filename, UserInfo_t *info); + int DeleteForce(const char *filename, UserInfo_t *info); + int Recover(const char *filename, UserInfo_t *info, uint64_t fileId); + int Rename(UserInfo_t *info, const char *oldpath, const char *newpath); + int Extend(const char *filename, UserInfo_t *info, uint64_t size); + + // Synchronous read and write + int Read(int fd, char *buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char *buf, unsigned long offset, + unsigned long length); // NOLINT + + // Asynchronous read and write + int AioRead(int fd, AioContext *aioctx); + int AioWrite(int fd, AioContext *aioctx); + + // Obtain basic information about the file + int StatFile(const char *filename, UserInfo_t *info, FileInfo_t *finfo); + int ChangeOwner(const char *filename, const char *owner, UserInfo_t *info); + + DirInfos_t *OpenDir(const char *dirpath, UserInfo_t *userinfo); + int Listdir(DirInfos_t *dirinfo); + void CloseDir(DirInfos_t *dirinfo); + int Mkdir(const char *dirpath, UserInfo_t *info); + int Rmdir(const char *dirpath, UserInfo_t *info); + + std::string GetClusterId(); + + std::vector ListPoolset(); + +private: + std::unique_ptr client_; }; -#endif // CURVEFS_PYTHON_CBD_CLIENT_H_ +#endif // CURVEFS_PYTHON_CBD_CLIENT_H_ diff --git a/curvefs_python/libcurvefs.h b/curvefs_python/libcurvefs.h index 069c4542f4..b1bdb0275c 100644 --- a/curvefs_python/libcurvefs.h +++ b/curvefs_python/libcurvefs.h @@ -19,7 +19,7 @@ * File Created: Tuesday, 25th September 2018 2:07:05 pm * Author: */ -#ifndef CURVE_LIBCURVE_INTERFACE_H // NOLINT +#ifndef CURVE_LIBCURVE_INTERFACE_H // NOLINT #define CURVE_LIBCURVE_INTERFACE_H #include @@ -31,47 +31,48 @@ #include "curvefs_python/curve_type.h" #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -int Init(const char* path); -int Open4Qemu(const char* filename); -int Open(const char* filename, UserInfo_t* info); -int Create(const char* filename, UserInfo_t* info, size_t size); + int Init(const char *path); + int Open4Qemu(const char *filename); + int Open(const char *filename, UserInfo_t *info); + int Create(const char *filename, UserInfo_t *info, size_t size); -// Synchronous read and write -int Read(int fd, char* buf, unsigned long offset, - unsigned long length); // NOLINT -int Write(int fd, const char* buf, unsigned long offset, - unsigned long length); // NOLINT + // Synchronous read and write + int Read(int fd, char *buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char *buf, unsigned long offset, + unsigned long length); // NOLINT -// Asynchronous read and write -int AioRead(int fd, AioContext* aioctx); -int AioWrite(int fd, AioContext* aioctx); + // Asynchronous read and write + int AioRead(int fd, AioContext *aioctx); + int AioWrite(int fd, AioContext *aioctx); -// Obtain basic information about the file -int StatFile4Qemu(const char* filename, FileInfo_t* finfo); -int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); -int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); -int Close(int fd); + // Obtain basic information about the file + int StatFile4Qemu(const char *filename, FileInfo_t *finfo); + int StatFile(const char *filename, UserInfo_t *info, FileInfo_t *finfo); + int ChangeOwner(const char *filename, const char *owner, UserInfo_t *info); + int Close(int fd); -int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); -int Extend(const char* filename, UserInfo_t* info, uint64_t size); -int Unlink(const char* filename, UserInfo_t* info); -int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); -int DeleteForce(const char* filename, UserInfo_t* info); -DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); -void CloseDir(DirInfos_t* dirinfo); -int Listdir(DirInfos_t* dirinfo); -int Mkdir(const char* dirpath, UserInfo_t* info); -int Rmdir(const char* dirpath, UserInfo_t* info); + int Rename(UserInfo_t *info, const char *oldpath, const char *newpath); + int Extend(const char *filename, UserInfo_t *info, uint64_t size); + int Unlink(const char *filename, UserInfo_t *info); + int Recover(const char *filename, UserInfo_t *info, uint64_t fileId); + int DeleteForce(const char *filename, UserInfo_t *info); + DirInfos_t *OpenDir(const char *dirpath, UserInfo_t *userinfo); + void CloseDir(DirInfos_t *dirinfo); + int Listdir(DirInfos_t *dirinfo); + int Mkdir(const char *dirpath, UserInfo_t *info); + int Rmdir(const char *dirpath, UserInfo_t *info); -void UnInit(); + void UnInit(); -int GetClusterId(char* buf = nullptr, int len = 0); + int GetClusterId(char *buf = nullptr, int len = 0); #ifdef __cplusplus } #endif -#endif // !CURVE_LIBCURVE_INTERFACE_H //NOLINT +#endif // !CURVE_LIBCURVE_INTERFACE_H //NOLINT diff --git a/nebd/src/common/name_lock.h b/nebd/src/common/name_lock.h index e179c4272d..eaebf6e806 100644 --- a/nebd/src/common/name_lock.h +++ b/nebd/src/common/name_lock.h @@ -25,80 +25,87 @@ #include #include -#include // NOLINT +#include // NOLINT #include #include #include #include "nebd/src/common/uncopyable.h" -namespace nebd { -namespace common { - -class NameLock : public Uncopyable { - public: - explicit NameLock(int bucketNum = 256); - - /** - * @brief locks the specified string - * - * @param lockStr locked string - */ - void Lock(const std::string& lockStr); - - /** - * @brief Attempt to specify sting lock - * - * @param lockStr locked string - * - * @retval succeeded - * @retval failed - */ - bool TryLock(const std::string& lockStr); - - /** - * @brief unlocks the specified string - * - * @param lockStr locked string - */ - void Unlock(const std::string& lockStr); - - private: - struct LockEntry { - std::atomic ref_; - std::mutex lock_; - }; - using LockEntryPtr = std::shared_ptr; - - struct LockBucket { - std::mutex mu; - std::unordered_map lockMap; - }; - using LockBucketPtr = std::shared_ptr; - - int GetBucketOffset(const std::string& lockStr); - - private: - std::vector locks_; -}; - -class NameLockGuard : public Uncopyable { - public: - NameLockGuard(NameLock& lock, const std::string& lockStr) - : // NOLINT - lock_(lock), - lockStr_(lockStr) { - lock_.Lock(lockStr_); - } - - ~NameLockGuard() { lock_.Unlock(lockStr_); } - - private: - NameLock& lock_; - std::string lockStr_; -}; - -} // namespace common -} // namespace nebd - -#endif // NEBD_SRC_COMMON_NAME_LOCK_H_ +namespace nebd +{ + namespace common + { + + class NameLock : public Uncopyable + { + public: + explicit NameLock(int bucketNum = 256); + + /** + * @brief locks the specified string + * + * @param lockStr locked string + */ + void Lock(const std::string &lockStr); + + /** + * @brief Attempt to specify sting lock + * + * @param lockStr locked string + * + * @retval succeeded + * @retval failed + */ + bool TryLock(const std::string &lockStr); + + /** + * @brief unlocks the specified string + * + * @param lockStr locked string + */ + void Unlock(const std::string &lockStr); + + private: + struct LockEntry + { + std::atomic ref_; + std::mutex lock_; + }; + using LockEntryPtr = std::shared_ptr; + + struct LockBucket + { + std::mutex mu; + std::unordered_map lockMap; + }; + using LockBucketPtr = std::shared_ptr; + + int GetBucketOffset(const std::string &lockStr); + + private: + std::vector locks_; + }; + + class NameLockGuard : public Uncopyable + { + public: + NameLockGuard(NameLock &lock, const std::string &lockStr) + : // NOLINT + lock_(lock), + lockStr_(lockStr) + { + lock_.Lock(lockStr_); + } + + ~NameLockGuard() { lock_.Unlock(lockStr_); } + + private: + NameLock &lock_; + std::string lockStr_; + }; + + } // namespace common +} // namespace nebd + +#endif // NEBD_SRC_COMMON_NAME_LOCK_H_ diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index 99eb260a95..422a5cce31 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -32,424 +32,467 @@ #include "src/common/bitmap.h" #include "src/common/timeutility.h" -namespace curve { -namespace chunkserver { - -using curve::common::Bitmap; -using curve::common::TimeUtility; - -static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } - -DownloadClosure::DownloadClosure(std::shared_ptr readRequest, - std::shared_ptr cloneCore, - AsyncDownloadContext* downloadCtx, - Closure* done) - : isFailed_(false), - beginTime_(TimeUtility::GetTimeofDayUs()), - downloadCtx_(downloadCtx), - cloneCore_(cloneCore), - readRequest_(readRequest), - done_(done) { - // Record initial metric - if (readRequest_ != nullptr) { - const ChunkRequest* request = readRequest_->GetChunkRequest(); - ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - csMetric->OnRequest(request->logicpoolid(), request->copysetid(), - CSIOMetricType::DOWNLOAD); - } -} - -void DownloadClosure::Run() { - std::unique_ptr selfGuard(this); - std::unique_ptr contextGuard(downloadCtx_); - brpc::ClosureGuard doneGuard(done_); - butil::IOBuf copyData; - copyData.append_user_data(downloadCtx_->buf, downloadCtx_->size, - ReadBufferDeleter); - - CHECK(readRequest_ != nullptr) << "read request is nullptr."; - // Record End Metric - const ChunkRequest* request = readRequest_->GetChunkRequest(); - ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; - csMetric->OnResponse(request->logicpoolid(), request->copysetid(), - CSIOMetricType::DOWNLOAD, downloadCtx_->size, - latencyUs, isFailed_); - - // Copying data from the source failed - if (isFailed_) { - LOG(ERROR) << "download origin data failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " AsyncDownloadContext: " << *downloadCtx_; - cloneCore_->SetResponse( - readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return; - } - - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - // Release doneGuard, hand over the closure to the pass request for - // processing - cloneCore_->PasteCloneData(readRequest_, ©Data, - downloadCtx_->offset, downloadCtx_->size, - doneGuard.release()); - } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // Error or end of processing call closure returned to user - cloneCore_->SetReadChunkResponse(readRequest_, ©Data); - - // Paste clone data is an asynchronous operation that can be processed - // quickly - cloneCore_->PasteCloneData(readRequest_, ©Data, - downloadCtx_->offset, downloadCtx_->size, - nullptr); - } -} - -void CloneClosure::Run() { - // Release resources - std::unique_ptr selfGuard(this); - std::unique_ptr requestGuard(request_); - std::unique_ptr responseGuard(response_); - brpc::ClosureGuard doneGuard(done_); - // If userResponse is not empty, you need to set the response_ Assign the - // relevant content in to userResponse - if (userResponse_ != nullptr) { - if (response_->has_status()) { - userResponse_->set_status(response_->status()); +namespace curve +{ + namespace chunkserver + { + + using curve::common::Bitmap; + using curve::common::TimeUtility; + + static void ReadBufferDeleter(void *ptr) { delete[] static_cast(ptr); } + + DownloadClosure::DownloadClosure(std::shared_ptr readRequest, + std::shared_ptr cloneCore, + AsyncDownloadContext *downloadCtx, + Closure *done) + : isFailed_(false), + beginTime_(TimeUtility::GetTimeofDayUs()), + downloadCtx_(downloadCtx), + cloneCore_(cloneCore), + readRequest_(readRequest), + done_(done) + { + // Record initial metric + if (readRequest_ != nullptr) + { + const ChunkRequest *request = readRequest_->GetChunkRequest(); + ChunkServerMetric *csMetric = ChunkServerMetric::GetInstance(); + csMetric->OnRequest(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD); + } } - if (response_->has_redirect()) { - userResponse_->set_redirect(response_->redirect()); + + void DownloadClosure::Run() + { + std::unique_ptr selfGuard(this); + std::unique_ptr contextGuard(downloadCtx_); + brpc::ClosureGuard doneGuard(done_); + butil::IOBuf copyData; + copyData.append_user_data(downloadCtx_->buf, downloadCtx_->size, + ReadBufferDeleter); + + CHECK(readRequest_ != nullptr) << "read request is nullptr."; + // Record End Metric + const ChunkRequest *request = readRequest_->GetChunkRequest(); + ChunkServerMetric *csMetric = ChunkServerMetric::GetInstance(); + uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; + csMetric->OnResponse(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD, downloadCtx_->size, + latencyUs, isFailed_); + + // Copying data from the source failed + if (isFailed_) + { + LOG(ERROR) << "download origin data failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " AsyncDownloadContext: " << *downloadCtx_; + cloneCore_->SetResponse( + readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return; + } + + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + // Release doneGuard, hand over the closure to the pass request for + // processing + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, + doneGuard.release()); + } + else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) + { + // Error or end of processing call closure returned to user + cloneCore_->SetReadChunkResponse(readRequest_, ©Data); + + // Paste clone data is an asynchronous operation that can be processed + // quickly + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, + nullptr); + } } - if (response_->has_appliedindex()) { - userResponse_->set_appliedindex(response_->appliedindex()); + + void CloneClosure::Run() + { + // Release resources + std::unique_ptr selfGuard(this); + std::unique_ptr requestGuard(request_); + std::unique_ptr responseGuard(response_); + brpc::ClosureGuard doneGuard(done_); + // If userResponse is not empty, you need to set the response_ Assign the + // relevant content in to userResponse + if (userResponse_ != nullptr) + { + if (response_->has_status()) + { + userResponse_->set_status(response_->status()); + } + if (response_->has_redirect()) + { + userResponse_->set_redirect(response_->redirect()); + } + if (response_->has_appliedindex()) + { + userResponse_->set_appliedindex(response_->appliedindex()); + } + } } - } -} - -int CloneCore::CloneReadByLocalInfo( - std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* request = readRequest->request_; - off_t offset = request->offset(); - size_t length = request->size(); - const uint32_t blockSize = chunkInfo.blockSize; - - // offset and length must be aligned with blockSize - if (offset % blockSize != 0 || length % blockSize != 0) { - LOG(ERROR) << "Invalid offset or length: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " offset: " << offset << " length: " << length - << " block size: " << blockSize; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - return -1; - } - - uint32_t beginIndex = offset / blockSize; - uint32_t endIndex = (offset + length - 1) / blockSize; - - // When submitting a request to CloneManager, the chunk must be a clone - // chunk However, due to other requests for the same chunk, it is possible - // that the chunk has already been overwritten at this time So here we need - // to first determine whether the chunk is a clone chunk, and then determine - // whether to copy the data if so - bool needClone = chunkInfo.isClone && - (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != - Bitmap::NO_POS); - if (needClone) { - // The TODO(yyk) block can be optimized, but the optimization method may - // determine complex conditions Currently, the decision to trigger - // copying is only based on whether there are unwritten pages If the - // data within the requested read range in the chunk has a page that has - // not been written, it is necessary to copy the data from the source - // side - AsyncDownloadContext* downloadCtx = - new (std::nothrow) AsyncDownloadContext; - downloadCtx->location = chunkInfo.location; - downloadCtx->offset = offset; - downloadCtx->size = length; - downloadCtx->buf = new (std::nothrow) char[length]; - DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( - readRequest, shared_from_this(), downloadCtx, doneGuard.release()); - copyer_->DownloadAsync(downloadClosure); - return 0; - } - - // Performing this step indicates that there is no need to copy data. If it - // is a recover request, it can directly return success If it is a ReadChunk - // request, read the chunk directly and return - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // Error or end of processing call closure returned to user - return ReadChunk(readRequest); - } - return 0; -} - -void CloneCore::CloneReadByRequestInfo( - std::shared_ptr readRequest, Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* chunkRequest = readRequest->request_; - - auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - std::string location = - func(chunkRequest->clonefilesource(), chunkRequest->clonefileoffset()); - - AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; - downloadCtx->location = location; - downloadCtx->offset = chunkRequest->offset(); - downloadCtx->size = chunkRequest->size(); - downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; - DownloadClosure* downloadClosure = new (std::nothrow) DownloadClosure( - readRequest, shared_from_this(), downloadCtx, doneGuard.release()); - copyer_->DownloadAsync(downloadClosure); - return; -} - -int CloneCore::HandleReadRequest(std::shared_ptr readRequest, - Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* request = readRequest->request_; - - // Obtain chunk information - CSChunkInfo chunkInfo; - ChunkID id = readRequest->ChunkId(); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - - /* - * Chunk exists: Check and analyze Bitmap to determine if it can be read - * locally Chunk does not exist: if it contains clone information, it will be - * read from clonesource, otherwise an error will be returned Because the - * upper level ReadChunkRequest::OnApply has already processed NoExist And - * the situation where cloneinfo does not exist - */ - switch (errorCode) { - case CSErrorCode::Success: - return CloneReadByLocalInfo(readRequest, chunkInfo, - doneGuard.release()); - case CSErrorCode::ChunkNotExistError: - if (existCloneInfo(request)) { - CloneReadByRequestInfo(readRequest, doneGuard.release()); + + int CloneCore::CloneReadByLocalInfo( + std::shared_ptr readRequest, const CSChunkInfo &chunkInfo, + Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *request = readRequest->request_; + off_t offset = request->offset(); + size_t length = request->size(); + const uint32_t blockSize = chunkInfo.blockSize; + + // offset and length must be aligned with blockSize + if (offset % blockSize != 0 || length % blockSize != 0) + { + LOG(ERROR) << "Invalid offset or length: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " offset: " << offset << " length: " << length + << " block size: " << blockSize; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + return -1; + } + + uint32_t beginIndex = offset / blockSize; + uint32_t endIndex = (offset + length - 1) / blockSize; + + // When submitting a request to CloneManager, the chunk must be a clone + // chunk However, due to other requests for the same chunk, it is possible + // that the chunk has already been overwritten at this time So here we need + // to first determine whether the chunk is a clone chunk, and then determine + // whether to copy the data if so + bool needClone = chunkInfo.isClone && + (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS); + if (needClone) + { + // The TODO(yyk) block can be optimized, but the optimization method may + // determine complex conditions Currently, the decision to trigger + // copying is only based on whether there are unwritten pages If the + // data within the requested read range in the chunk has a page that has + // not been written, it is necessary to copy the data from the source + // side + AsyncDownloadContext *downloadCtx = + new (std::nothrow) AsyncDownloadContext; + downloadCtx->location = chunkInfo.location; + downloadCtx->offset = offset; + downloadCtx->size = length; + downloadCtx->buf = new (std::nothrow) char[length]; + DownloadClosure *downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); + copyer_->DownloadAsync(downloadClosure); return 0; } - // Otherwise, fallthrough will directly return an error - FALLTHROUGH_INTENDED; - default: - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; - } -} - -int CloneCore::ReadChunk(std::shared_ptr readRequest) { - const ChunkRequest* request = readRequest->request_; - off_t offset = request->offset(); - size_t length = request->size(); - std::unique_ptr chunkData(new char[length]); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode; - errorCode = dataStore->ReadChunk(request->chunkid(), request->sn(), - chunkData.get(), offset, length); - if (CSErrorCode::Success != errorCode) { - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << offset << " read length: " << length - << " error code: " << errorCode; - return -1; - } - - // After successful reading, update the apply index - readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - // After completing the data reading, Return can return the results to the - // user - readRequest->cntl_->response_attachment().append(chunkData.get(), length); - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - return 0; -} - -int CloneCore::SetReadChunkResponse( - std::shared_ptr readRequest, - const butil::IOBuf* cloneData) { - const ChunkRequest* request = readRequest->request_; - CSChunkInfo chunkInfo; - ChunkID id = readRequest->ChunkId(); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - - // If the chunk does not exist, it is necessary to determine whether the - // request contains information about the source chunk If the source chunk - // information is provided, it indicates that the lazy allocation chunk - // mechanism is used, and clone data can be directly returned There is a - // situation where the requested chunk is lazily allocated and the requested - // chunk exists locally, And the requested read area has already been - // written, and when copying data from the source, the chunk has been - // deleted again In this case, it will be returned as a normal request, but - // the returned data does not meet expectations Due to the current delayed - // deletion of our curve files, it is ensured that there is no user IO when - // the files are truly deleted If some changes are added later that trigger - // this issue, it needs to be fixed - // TODO(yyk) fix it - bool expect = errorCode == CSErrorCode::Success || - (errorCode == CSErrorCode::ChunkNotExistError && - existCloneInfo(request)); - if (!expect) { - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; - } - - size_t length = request->size(); - butil::IOBuf responseData; - // If a chunk exists, read the regions that have already been written from - // the chunk and merge them back - if (errorCode == CSErrorCode::Success) { - char* chunkData = new (std::nothrow) char[length]; - int ret = ReadThenMerge(readRequest, chunkInfo, cloneData, chunkData); - responseData.append_user_data(chunkData, length, ReadBufferDeleter); - if (ret < 0) { - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return ret; + + // Performing this step indicates that there is no need to copy data. If it + // is a recover request, it can directly return success If it is a ReadChunk + // request, read the chunk directly and return + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + } + else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) + { + // Error or end of processing call closure returned to user + return ReadChunk(readRequest); + } + return 0; + } + + void CloneCore::CloneReadByRequestInfo( + std::shared_ptr readRequest, Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *chunkRequest = readRequest->request_; + + auto func = ::curve::common::LocationOperator::GenerateCurveLocation; + std::string location = + func(chunkRequest->clonefilesource(), chunkRequest->clonefileoffset()); + + AsyncDownloadContext *downloadCtx = new (std::nothrow) AsyncDownloadContext; + downloadCtx->location = location; + downloadCtx->offset = chunkRequest->offset(); + downloadCtx->size = chunkRequest->size(); + downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; + DownloadClosure *downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); + copyer_->DownloadAsync(downloadClosure); + return; } - } else { - responseData = *cloneData; - } - readRequest->cntl_->response_attachment().append(responseData); - - // After successful reading, update the apply index - readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - return 0; -} - -int CloneCore::ReadThenMerge(std::shared_ptr readRequest, - const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, char* chunkData) { - const ChunkRequest* request = readRequest->request_; - std::shared_ptr dataStore = readRequest->datastore_; - - off_t offset = request->offset(); - size_t length = request->size(); - uint32_t blockSize = chunkInfo.blockSize; - uint32_t beginIndex = offset / blockSize; - uint32_t endIndex = (offset + length - 1) / blockSize; - // Obtain the regions where the chunk file has been written and not written - std::vector copiedRanges; - std::vector uncopiedRanges; - if (chunkInfo.isClone) { - chunkInfo.bitmap->Divide(beginIndex, endIndex, &uncopiedRanges, - &copiedRanges); - } else { - BitRange range; - range.beginIndex = beginIndex; - range.endIndex = endIndex; - copiedRanges.push_back(range); - } - - // The offset of the starting position to be read in the chunk - off_t readOff; - // The relative offset of the read data to be copied into the buffer - off_t relativeOff; - // The length of data read from chunk each time - size_t readSize; - // 1. Read for regions that have already been written, read from the chunk - // file - CSErrorCode errorCode; - for (auto& range : copiedRanges) { - readOff = range.beginIndex * blockSize; - readSize = (range.endIndex - range.beginIndex + 1) * blockSize; - relativeOff = readOff - offset; - errorCode = - dataStore->ReadChunk(request->chunkid(), request->sn(), - chunkData + relativeOff, readOff, readSize); - if (CSErrorCode::Success != errorCode) { - LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << readOff - << " read length: " << readSize - << " error code: " << errorCode; - return -1; + + int CloneCore::HandleReadRequest(std::shared_ptr readRequest, + Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *request = readRequest->request_; + + // Obtain chunk information + CSChunkInfo chunkInfo; + ChunkID id = readRequest->ChunkId(); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); + + /* + * Chunk exists: Check and analyze Bitmap to determine if it can be read + * locally Chunk does not exist: if it contains clone information, it will be + * read from clonesource, otherwise an error will be returned Because the + * upper level ReadChunkRequest::OnApply has already processed NoExist And + * the situation where cloneinfo does not exist + */ + switch (errorCode) + { + case CSErrorCode::Success: + return CloneReadByLocalInfo(readRequest, chunkInfo, + doneGuard.release()); + case CSErrorCode::ChunkNotExistError: + if (existCloneInfo(request)) + { + CloneReadByRequestInfo(readRequest, doneGuard.release()); + return 0; + } + // Otherwise, fallthrough will directly return an error + FALLTHROUGH_INTENDED; + default: + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; + } } - } - - // 2. Merge: For areas that have not been written before, copy them from the - // downloaded area on the source side for merging - for (auto& range : uncopiedRanges) { - readOff = range.beginIndex * blockSize; - readSize = (range.endIndex - range.beginIndex + 1) * blockSize; - relativeOff = readOff - offset; - cloneData->copy_to(chunkData + relativeOff, readSize, relativeOff); - } - return 0; -} - -void CloneCore::PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, off_t offset, - size_t cloneDataSize, Closure* done) { - const ChunkRequest* request = readRequest->request_; - bool dontPaste = - CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() && !enablePaste_; - if (dontPaste) return; - - // After the data copy is completed, it is necessary to generate a - // PaseChunkRequest and paste the data to the chunk file - ChunkRequest* pasteRequest = new ChunkRequest(); - pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); - pasteRequest->set_logicpoolid(request->logicpoolid()); - pasteRequest->set_copysetid(request->copysetid()); - pasteRequest->set_chunkid(request->chunkid()); - pasteRequest->set_offset(offset); - pasteRequest->set_size(cloneDataSize); - std::shared_ptr req = nullptr; - - ChunkResponse* pasteResponse = new ChunkResponse(); - CloneClosure* closure = new CloneClosure(); - closure->SetRequest(pasteRequest); - closure->SetResponse(pasteResponse); - closure->SetClosure(done); - // If it is a request for a recover chunk, the result of the pass needs to - // be returned through rpc - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - closure->SetUserResponse(readRequest->response_); - } - - ChunkServiceClosure* pasteClosure = new (std::nothrow) - ChunkServiceClosure(nullptr, pasteRequest, pasteResponse, closure); - - req = std::make_shared( - readRequest->node_, pasteRequest, pasteResponse, cloneData, - pasteClosure); - req->Process(); -} - -inline void CloneCore::SetResponse( - std::shared_ptr readRequest, CHUNK_OP_STATUS status) { - auto applyIndex = readRequest->node_->GetAppliedIndex(); - readRequest->response_->set_appliedindex(applyIndex); - readRequest->response_->set_status(status); -} - -} // namespace chunkserver -} // namespace curve + + int CloneCore::ReadChunk(std::shared_ptr readRequest) + { + const ChunkRequest *request = readRequest->request_; + off_t offset = request->offset(); + size_t length = request->size(); + std::unique_ptr chunkData(new char[length]); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode; + errorCode = dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData.get(), offset, length); + if (CSErrorCode::Success != errorCode) + { + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + LOG(ERROR) << "read chunk failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << offset << " read length: " << length + << " error code: " << errorCode; + return -1; + } + + // After successful reading, update the apply index + readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); + // After completing the data reading, Return can return the results to the + // user + readRequest->cntl_->response_attachment().append(chunkData.get(), length); + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + return 0; + } + + int CloneCore::SetReadChunkResponse( + std::shared_ptr readRequest, + const butil::IOBuf *cloneData) + { + const ChunkRequest *request = readRequest->request_; + CSChunkInfo chunkInfo; + ChunkID id = readRequest->ChunkId(); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); + + // If the chunk does not exist, it is necessary to determine whether the + // request contains information about the source chunk If the source chunk + // information is provided, it indicates that the lazy allocation chunk + // mechanism is used, and clone data can be directly returned There is a + // situation where the requested chunk is lazily allocated and the requested + // chunk exists locally, And the requested read area has already been + // written, and when copying data from the source, the chunk has been + // deleted again In this case, it will be returned as a normal request, but + // the returned data does not meet expectations Due to the current delayed + // deletion of our curve files, it is ensured that there is no user IO when + // the files are truly deleted If some changes are added later that trigger + // this issue, it needs to be fixed + // TODO(yyk) fix it + bool expect = errorCode == CSErrorCode::Success || + (errorCode == CSErrorCode::ChunkNotExistError && + existCloneInfo(request)); + if (!expect) + { + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; + } + + size_t length = request->size(); + butil::IOBuf responseData; + // If a chunk exists, read the regions that have already been written from + // the chunk and merge them back + if (errorCode == CSErrorCode::Success) + { + char *chunkData = new (std::nothrow) char[length]; + int ret = ReadThenMerge(readRequest, chunkInfo, cloneData, chunkData); + responseData.append_user_data(chunkData, length, ReadBufferDeleter); + if (ret < 0) + { + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return ret; + } + } + else + { + responseData = *cloneData; + } + readRequest->cntl_->response_attachment().append(responseData); + + // After successful reading, update the apply index + readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + return 0; + } + + int CloneCore::ReadThenMerge(std::shared_ptr readRequest, + const CSChunkInfo &chunkInfo, + const butil::IOBuf *cloneData, char *chunkData) + { + const ChunkRequest *request = readRequest->request_; + std::shared_ptr dataStore = readRequest->datastore_; + + off_t offset = request->offset(); + size_t length = request->size(); + uint32_t blockSize = chunkInfo.blockSize; + uint32_t beginIndex = offset / blockSize; + uint32_t endIndex = (offset + length - 1) / blockSize; + // Obtain the regions where the chunk file has been written and not written + std::vector copiedRanges; + std::vector uncopiedRanges; + if (chunkInfo.isClone) + { + chunkInfo.bitmap->Divide(beginIndex, endIndex, &uncopiedRanges, + &copiedRanges); + } + else + { + BitRange range; + range.beginIndex = beginIndex; + range.endIndex = endIndex; + copiedRanges.push_back(range); + } + + // The offset of the starting position to be read in the chunk + off_t readOff; + // The relative offset of the read data to be copied into the buffer + off_t relativeOff; + // The length of data read from chunk each time + size_t readSize; + // 1. Read for regions that have already been written, read from the chunk + // file + CSErrorCode errorCode; + for (auto &range : copiedRanges) + { + readOff = range.beginIndex * blockSize; + readSize = (range.endIndex - range.beginIndex + 1) * blockSize; + relativeOff = readOff - offset; + errorCode = + dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData + relativeOff, readOff, readSize); + if (CSErrorCode::Success != errorCode) + { + LOG(ERROR) << "read chunk failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << readOff + << " read length: " << readSize + << " error code: " << errorCode; + return -1; + } + } + + // 2. Merge: For areas that have not been written before, copy them from the + // downloaded area on the source side for merging + for (auto &range : uncopiedRanges) + { + readOff = range.beginIndex * blockSize; + readSize = (range.endIndex - range.beginIndex + 1) * blockSize; + relativeOff = readOff - offset; + cloneData->copy_to(chunkData + relativeOff, readSize, relativeOff); + } + return 0; + } + + void CloneCore::PasteCloneData(std::shared_ptr readRequest, + const butil::IOBuf *cloneData, off_t offset, + size_t cloneDataSize, Closure *done) + { + const ChunkRequest *request = readRequest->request_; + bool dontPaste = + CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() && !enablePaste_; + if (dontPaste) + return; + + // After the data copy is completed, it is necessary to generate a + // PaseChunkRequest and paste the data to the chunk file + ChunkRequest *pasteRequest = new ChunkRequest(); + pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); + pasteRequest->set_logicpoolid(request->logicpoolid()); + pasteRequest->set_copysetid(request->copysetid()); + pasteRequest->set_chunkid(request->chunkid()); + pasteRequest->set_offset(offset); + pasteRequest->set_size(cloneDataSize); + std::shared_ptr req = nullptr; + + ChunkResponse *pasteResponse = new ChunkResponse(); + CloneClosure *closure = new CloneClosure(); + closure->SetRequest(pasteRequest); + closure->SetResponse(pasteResponse); + closure->SetClosure(done); + // If it is a request for a recover chunk, the result of the pass needs to + // be returned through rpc + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + closure->SetUserResponse(readRequest->response_); + } + + ChunkServiceClosure *pasteClosure = new (std::nothrow) + ChunkServiceClosure(nullptr, pasteRequest, pasteResponse, closure); + + req = std::make_shared( + readRequest->node_, pasteRequest, pasteResponse, cloneData, + pasteClosure); + req->Process(); + } + + inline void CloneCore::SetResponse( + std::shared_ptr readRequest, CHUNK_OP_STATUS status) + { + auto applyIndex = readRequest->node_->GetAppliedIndex(); + readRequest->response_->set_appliedindex(applyIndex); + readRequest->response_->set_status(status); + } + + } // namespace chunkserver +} // namespace curve diff --git a/src/client/request_sender.h b/src/client/request_sender.h index 99bc94b2e3..a08be423d6 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -35,148 +35,151 @@ #include "src/client/client_config.h" #include "src/client/request_context.h" -namespace curve { -namespace client { - -/** - * A RequestSender is responsible for managing all aspects of a ChunkServer - * Connection, currently there is only one connection for a ChunkServer - */ -class RequestSender { - public: - RequestSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint) - : chunkServerId_(chunkServerId), - serverEndPoint_(serverEndPoint), - channel_() {} - virtual ~RequestSender() {} - - int Init(const IOSenderOption& ioSenderOpt); - - /** - * Reading Chunk - * @param IDInfo is the ID information related to chunk - * @param sn: File version number - * @param offset: Read offset - * @param length: Read length - * @param sourceInfo Data source information - * @param done: closure of asynchronous callback on the previous layer - */ - int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, - size_t length, const RequestSourceInfo& sourceInfo, - ClientClosure* done); - - /** - * Write Chunk - * @param IDInfo is the ID information related to chunk - * @param fileId: file id - * @param epoch: file epoch - * @param sn: File version number - * @param data The data to be written - * @param offset: write offset - * @param length: The length written - * @param sourceInfo Data source information - * @param done: closure of asynchronous callback on the previous layer - */ - int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, - uint64_t sn, const butil::IOBuf& data, off_t offset, - size_t length, const RequestSourceInfo& sourceInfo, - ClientClosure* done); - - /** - * Reading Chunk snapshot files - * @param IDInfo is the ID information related to chunk - * @param sn: File version number - * @param offset: Read offset - * @param length: Read length - * @param done: closure of asynchronous callback on the previous layer - */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, - size_t length, ClientClosure* done); - - /** - * Delete snapshots generated during this dump or left over from history - * If no snapshot is generated during the dump process, modify the - * correctedSn of the chunk - * @param IDInfo is the ID information related to chunk - * @param correctedSn: Chunk The version number that needs to be corrected - * @param done: closure of asynchronous callback on the previous layer - */ - int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - ClientClosure* done); - - /** - * Obtain information about chunk files - * @param IDInfo is the ID information related to chunk - * @param done: closure of asynchronous callback on the previous layer - * @param retriedTimes: Number of retries - */ - int GetChunkInfo(const ChunkIDInfo& idinfo, ClientClosure* done); - - /** - * @brief lazy Create clone chunk - * @detail - * - The format definition of a location is A@B The form of. - * - If the source data is on s3, the location format is uri@s3 Uri is the - * address of the actual chunk object; - * - If the source data is on curves, the location format - * is/filename/chunkindex@cs - * - * @param IDInfo is the ID information related to chunk - * @param done: closure of asynchronous callback on the previous layer - * @param: location, URL of the data source - * @param: sn chunk's serial number - * @param: correntSn used to modify the chunk when creating CloneChunk - * @param: chunkSize Chunk size - * @param retriedTimes: Number of retries - * - * @return error code - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, ClientClosure* done, - const std::string& location, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize); - - /** - * @brief Actual recovery chunk data - * @param IDInfo is the ID information related to chunk - * @param done: closure of asynchronous callback on the previous layer - * @param: offset: offset - * @param: len: length - * @param retriedTimes: Number of retries - * - * @return error code - */ - int RecoverChunk(const ChunkIDInfo& idinfo, ClientClosure* done, - uint64_t offset, uint64_t len); - /** - * Reset Link to Chunk Server - * @param chunkServerId: Chunk Server unique identifier - * @param serverEndPoint: Chunk Server - * @return 0 succeeded, -1 failed - */ - int ResetSender(ChunkServerID chunkServerId, - butil::EndPoint serverEndPoint); - - bool IsSocketHealth() { return channel_.CheckHealth() == 0; } - - private: - void UpdateRpcRPS(ClientClosure* done, OpType type) const; - - void SetRpcStuff(ClientClosure* done, brpc::Controller* cntl, - google::protobuf::Message* rpcResponse) const; - - private: - // Rpc stub configuration - IOSenderOption iosenderopt_; - // The unique identification ID of ChunkServer - ChunkServerID chunkServerId_; - // Address of ChunkServer - butil::EndPoint serverEndPoint_; - brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be - maintained in the later stage */ -}; - -} // namespace client -} // namespace curve - -#endif // SRC_CLIENT_REQUEST_SENDER_H_ +namespace curve +{ + namespace client + { + + /** + * A RequestSender is responsible for managing all aspects of a ChunkServer + * Connection, currently there is only one connection for a ChunkServer + */ + class RequestSender + { + public: + RequestSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint) + : chunkServerId_(chunkServerId), + serverEndPoint_(serverEndPoint), + channel_() {} + virtual ~RequestSender() {} + + int Init(const IOSenderOption &ioSenderOpt); + + /** + * Reading Chunk + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int ReadChunk(const ChunkIDInfo &idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo &sourceInfo, + ClientClosure *done); + + /** + * Write Chunk + * @param IDInfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param data The data to be written + * @param offset: write offset + * @param length: The length written + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo &idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf &data, off_t offset, + size_t length, const RequestSourceInfo &sourceInfo, + ClientClosure *done); + + /** + * Reading Chunk snapshot files + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer + */ + int ReadChunkSnapshot(const ChunkIDInfo &idinfo, uint64_t sn, off_t offset, + size_t length, ClientClosure *done); + + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param IDInfo is the ID information related to chunk + * @param correctedSn: Chunk The version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer + */ + int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo &idinfo, + uint64_t correctedSn, + ClientClosure *done); + + /** + * Obtain information about chunk files + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param retriedTimes: Number of retries + */ + int GetChunkInfo(const ChunkIDInfo &idinfo, ClientClosure *done); + + /** + * @brief lazy Create clone chunk + * @detail + * - The format definition of a location is A@B The form of. + * - If the source data is on s3, the location format is uri@s3 Uri is the + * address of the actual chunk object; + * - If the source data is on curves, the location format + * is/filename/chunkindex@cs + * + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: location, URL of the data source + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: chunkSize Chunk size + * @param retriedTimes: Number of retries + * + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo &idinfo, ClientClosure *done, + const std::string &location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize); + + /** + * @brief Actual recovery chunk data + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: offset: offset + * @param: len: length + * @param retriedTimes: Number of retries + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo &idinfo, ClientClosure *done, + uint64_t offset, uint64_t len); + /** + * Reset Link to Chunk Server + * @param chunkServerId: Chunk Server unique identifier + * @param serverEndPoint: Chunk Server + * @return 0 succeeded, -1 failed + */ + int ResetSender(ChunkServerID chunkServerId, + butil::EndPoint serverEndPoint); + + bool IsSocketHealth() { return channel_.CheckHealth() == 0; } + + private: + void UpdateRpcRPS(ClientClosure *done, OpType type) const; + + void SetRpcStuff(ClientClosure *done, brpc::Controller *cntl, + google::protobuf::Message *rpcResponse) const; + + private: + // Rpc stub configuration + IOSenderOption iosenderopt_; + // The unique identification ID of ChunkServer + ChunkServerID chunkServerId_; + // Address of ChunkServer + butil::EndPoint serverEndPoint_; + brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be + maintained in the later stage */ + }; + + } // namespace client +} // namespace curve + +#endif // SRC_CLIENT_REQUEST_SENDER_H_ diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index 021da6b359..5620a4ff63 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -37,1605 +37,1903 @@ using ::curve::common::NameLock; using ::curve::common::NameLockGuard; using ::curve::common::UUIDGenerator; -namespace curve { -namespace snapshotcloneserver { - -int CloneCoreImpl::Init() { - int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { - LOG(ERROR) << "Mkdir fail, ret = " << ret - << ", dirpath = " << cloneTempDir_; - return kErrCodeServerInitFail; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CloneOrRecoverPre(const UUID& source, - const std::string& user, - const std::string& destination, - bool lazyFlag, CloneTaskType taskType, - std::string poolset, - CloneInfo* cloneInfo) { - // Check if there are tasks executing in the database - std::vector cloneInfoList; - metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); - bool needJudgeFileExist = false; - std::vector existCloneInfos; - for (auto& info : cloneInfoList) { - LOG(INFO) << "CloneOrRecoverPre find same clone task" - << ", source = " << source << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset - << ", Exist CloneInfo : " << info; - // is clone - if (taskType == CloneTaskType::kClone) { - if (info.GetStatus() == CloneStatus::cloning || - info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && (info.GetSrc() == source) && - (info.GetIsLazy() == lazyFlag) && - (info.GetTaskType() == taskType)) { - // Treat as the same clone - *cloneInfo = info; - return kErrCodeTaskExist; - } else { - // Treat it as a different clone, then the file is actually - // occupied and the return file already exists - return kErrCodeFileExist; - } - } else if (info.GetStatus() == CloneStatus::done || - info.GetStatus() == CloneStatus::error || - info.GetStatus() == CloneStatus::metaInstalled) { - // It may have been deleted, and it is necessary to determine - // whether the file exists again, Allowing further cloning under - // deleted conditions - existCloneInfos.push_back(info); - needJudgeFileExist = true; - } else { - // At this point, the same clone task is being deleted and the - // return file is occupied - return kErrCodeFileExist; - } - } else { // is recover - if (info.GetStatus() == CloneStatus::recovering || - info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && (info.GetSrc() == source) && - (info.GetIsLazy() == lazyFlag) && - (info.GetTaskType() == taskType)) { - // Treat as the same clone, return task already exists - *cloneInfo = info; - return kErrCodeTaskExist; - } else { - // Treat it as a different clone, then the file is actually - // occupied and the return file already exists - return kErrCodeFileExist; - } - } else if (info.GetStatus() == CloneStatus::done || - info.GetStatus() == CloneStatus::error || - info.GetStatus() == CloneStatus::metaInstalled) { - // nothing - } else { - // At this point, the same task is being deleted and the return - // file is occupied - return kErrCodeFileExist; +namespace curve +{ + namespace snapshotcloneserver + { + + int CloneCoreImpl::Init() + { + int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) + { + LOG(ERROR) << "Mkdir fail, ret = " << ret + << ", dirpath = " << cloneTempDir_; + return kErrCodeServerInitFail; } + return kErrCodeSuccess; } - } - - // The target file already exists and cannot be cloned or recovered if it - // does not exist - FInfo destFInfo; - int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); - switch (ret) { - case LIBCURVE_ERROR::OK: - if (CloneTaskType::kClone == taskType) { - if (needJudgeFileExist) { - bool match = false; - // Find the cloneInfo that matches the inodeid - for (auto& existInfo : existCloneInfos) { - if (destFInfo.id == existInfo.GetDestId()) { - *cloneInfo = existInfo; - match = true; - break; + + int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, + const std::string &user, + const std::string &destination, + bool lazyFlag, CloneTaskType taskType, + std::string poolset, + CloneInfo *cloneInfo) + { + // Check if there are tasks executing in the database + std::vector cloneInfoList; + metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); + bool needJudgeFileExist = false; + std::vector existCloneInfos; + for (auto &info : cloneInfoList) + { + LOG(INFO) << "CloneOrRecoverPre find same clone task" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset + << ", Exist CloneInfo : " << info; + // is clone + if (taskType == CloneTaskType::kClone) + { + if (info.GetStatus() == CloneStatus::cloning || + info.GetStatus() == CloneStatus::retrying) + { + if ((info.GetUser() == user) && (info.GetSrc() == source) && + (info.GetIsLazy() == lazyFlag) && + (info.GetTaskType() == taskType)) + { + // Treat as the same clone + *cloneInfo = info; + return kErrCodeTaskExist; + } + else + { + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists + return kErrCodeFileExist; } } - if (match) { - return kErrCodeTaskExist; - } else { - // If not found, then none of the dest files were - // created by these clone tasks, It means the file has a - // duplicate name - LOG(ERROR) - << "Clone dest file exist, " - << "but task not match! " - << "source = " << source << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset; + else if (info.GetStatus() == CloneStatus::done || + info.GetStatus() == CloneStatus::error || + info.GetStatus() == CloneStatus::metaInstalled) + { + // It may have been deleted, and it is necessary to determine + // whether the file exists again, Allowing further cloning under + // deleted conditions + existCloneInfos.push_back(info); + needJudgeFileExist = true; + } + else + { + // At this point, the same clone task is being deleted and the + // return file is occupied + return kErrCodeFileExist; + } + } + else + { // is recover + if (info.GetStatus() == CloneStatus::recovering || + info.GetStatus() == CloneStatus::retrying) + { + if ((info.GetUser() == user) && (info.GetSrc() == source) && + (info.GetIsLazy() == lazyFlag) && + (info.GetTaskType() == taskType)) + { + // Treat as the same clone, return task already exists + *cloneInfo = info; + return kErrCodeTaskExist; + } + else + { + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists + return kErrCodeFileExist; + } + } + else if (info.GetStatus() == CloneStatus::done || + info.GetStatus() == CloneStatus::error || + info.GetStatus() == CloneStatus::metaInstalled) + { + // nothing + } + else + { + // At this point, the same task is being deleted and the return + // file is occupied return kErrCodeFileExist; } - } else { - // There is no corresponding cloneInfo, which means the file - // has a duplicate name - LOG(ERROR) << "Clone dest file must not exist" - << ", source = " << source << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset; - return kErrCodeFileExist; } - } else if (CloneTaskType::kRecover == taskType) { - // The recover task keeps the poolset information of the volume - // unchanged - poolset = destFInfo.poolset; - } else { - assert(false); - } - break; - case -LIBCURVE_ERROR::NOTEXIST: - if (CloneTaskType::kRecover == taskType) { - LOG(ERROR) << "Recover dest file must exist" - << ", source = " << source << ", user = " << user - << ", destination = " << destination; - return kErrCodeFileNotExist; } - break; - default: - LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret << ", source = " << source - << ", user = " << user; - return kErrCodeInternalError; - } - // Is it a snapshot - SnapshotInfo snapInfo; - CloneFileType fileType; - - { - NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), source); - ret = metaStore_->GetSnapshotInfo(source, &snapInfo); - if (0 == ret) { - if (CloneTaskType::kRecover == taskType && - destination != snapInfo.GetFileName()) { - LOG(ERROR) << "Can not recover from the snapshot " - << "which is not belong to the destination volume."; - return kErrCodeInvalidSnapshot; - } - if (snapInfo.GetStatus() != Status::done) { - LOG(ERROR) << "Can not clone by snapshot has status:" - << static_cast(snapInfo.GetStatus()); - return kErrCodeInvalidSnapshot; - } - if (snapInfo.GetUser() != user) { - LOG(ERROR) << "Clone snapshot by invalid user" - << ", source = " << source << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset - << ", snapshot.user = " << snapInfo.GetUser(); - return kErrCodeInvalidUser; - } - fileType = CloneFileType::kSnapshot; - snapshotRef_->IncrementSnapshotRef(source); - } - } - if (ret < 0) { - FInfo fInfo; - ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); - switch (ret) { + // The target file already exists and cannot be cloned or recovered if it + // does not exist + FInfo destFInfo; + int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); + switch (ret) + { case LIBCURVE_ERROR::OK: - fileType = CloneFileType::kFile; + if (CloneTaskType::kClone == taskType) + { + if (needJudgeFileExist) + { + bool match = false; + // Find the cloneInfo that matches the inodeid + for (auto &existInfo : existCloneInfos) + { + if (destFInfo.id == existInfo.GetDestId()) + { + *cloneInfo = existInfo; + match = true; + break; + } + } + if (match) + { + return kErrCodeTaskExist; + } + else + { + // If not found, then none of the dest files were + // created by these clone tasks, It means the file has a + // duplicate name + LOG(ERROR) + << "Clone dest file exist, " + << "but task not match! " + << "source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; + return kErrCodeFileExist; + } + } + else + { + // There is no corresponding cloneInfo, which means the file + // has a duplicate name + LOG(ERROR) << "Clone dest file must not exist" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; + return kErrCodeFileExist; + } + } + else if (CloneTaskType::kRecover == taskType) + { + // The recover task keeps the poolset information of the volume + // unchanged + poolset = destFInfo.poolset; + } + else + { + assert(false); + } break; case -LIBCURVE_ERROR::NOTEXIST: - case -LIBCURVE_ERROR::PARAM_ERROR: - LOG(ERROR) << "Clone source file not exist" - << ", source = " << source << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset; - return kErrCodeFileNotExist; + if (CloneTaskType::kRecover == taskType) + { + LOG(ERROR) << "Recover dest file must exist" + << ", source = " << source << ", user = " << user + << ", destination = " << destination; + return kErrCodeFileNotExist; + } + break; default: LOG(ERROR) << "GetFileInfo encounter an error" << ", ret = " << ret << ", source = " << source << ", user = " << user; return kErrCodeInternalError; - } - if (fInfo.filestatus != FileStatus::Created && - fInfo.filestatus != FileStatus::Cloned && - fInfo.filestatus != FileStatus::BeingCloned) { - LOG(ERROR) << "Can not clone when file status = " - << static_cast(fInfo.filestatus); - return kErrCodeFileStatusInvalid; - } + } - // TODO (User authentication for mirror cloning to be improved) - } - - UUID uuid = UUIDGenerator().GenerateUUID(); - CloneInfo info(uuid, user, taskType, source, destination, poolset, fileType, - lazyFlag); - if (CloneTaskType::kClone == taskType) { - info.SetStatus(CloneStatus::cloning); - } else { - info.SetStatus(CloneStatus::recovering); - } - // Here, you must first AddCloneInfo because if you first set - // CloneFileStatus and then AddCloneInfo, If AddCloneInfo fails and - // unexpectedly restarts, no one will know that SetCloneFileStatus has been - // called, causing Mirror cannot be deleted - ret = metaStore_->AddCloneInfo(info); - if (ret < 0) { - LOG(ERROR) << "AddCloneInfo error" - << ", ret = " << ret << ", taskId = " << uuid - << ", user = " << user << ", source = " << source - << ", destination = " << destination - << ", poolset = " << poolset; - if (CloneFileType::kSnapshot == fileType) { - snapshotRef_->DecrementSnapshotRef(source); - } - return ret; - } - if (CloneFileType::kFile == fileType) { - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - ret = client_->SetCloneFileStatus(source, FileStatus::BeingCloned, - mdsRootUser_); - if (ret < 0) { - // The SetCloneFileStatus error is not handled here, - // Because all results of SetCloneFileStatus failure are acceptable, - // Compared to handling SetCloneFileStatus failure, it is more - // direct: For example, calling DeleteCloneInfo to delete a task, - // Once DeleteCloneInfo fails and an error is returned to the user, - // Restarting the service will cause Clone to continue, - // Inconsistency with the results returned by the user, causing - // confusion for the user - LOG(WARNING) << "SetCloneFileStatus encounter an error" - << ", ret = " << ret << ", source = " << source - << ", user = " << user; - } - cloneRef_->IncrementRef(source); - } - - *cloneInfo = info; - return kErrCodeSuccess; -} - -int CloneCoreImpl::FlattenPre(const std::string& user, const TaskIdType& taskId, - CloneInfo* cloneInfo) { - (void)user; - int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); - if (ret < 0) { - return kErrCodeFileNotExist; - } - switch (cloneInfo->GetStatus()) { - case CloneStatus::done: - case CloneStatus::cloning: - case CloneStatus::recovering: { - // A task exists is returned for completed or in progress, - // indicating that it does not need to be processed - return kErrCodeTaskExist; - } - case CloneStatus::metaInstalled: { - if (CloneTaskType::kClone == cloneInfo->GetTaskType()) { - cloneInfo->SetStatus(CloneStatus::cloning); - } else { - cloneInfo->SetStatus(CloneStatus::recovering); + // Is it a snapshot + SnapshotInfo snapInfo; + CloneFileType fileType; + + { + NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), source); + ret = metaStore_->GetSnapshotInfo(source, &snapInfo); + if (0 == ret) + { + if (CloneTaskType::kRecover == taskType && + destination != snapInfo.GetFileName()) + { + LOG(ERROR) << "Can not recover from the snapshot " + << "which is not belong to the destination volume."; + return kErrCodeInvalidSnapshot; + } + if (snapInfo.GetStatus() != Status::done) + { + LOG(ERROR) << "Can not clone by snapshot has status:" + << static_cast(snapInfo.GetStatus()); + return kErrCodeInvalidSnapshot; + } + if (snapInfo.GetUser() != user) + { + LOG(ERROR) << "Clone snapshot by invalid user" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset + << ", snapshot.user = " << snapInfo.GetUser(); + return kErrCodeInvalidUser; + } + fileType = CloneFileType::kSnapshot; + snapshotRef_->IncrementSnapshotRef(source); + } } - break; - } - case CloneStatus::cleaning: - case CloneStatus::errorCleaning: - case CloneStatus::error: - default: { - LOG(ERROR) << "FlattenPre find clone task status Invalid" - << ", status = " - << static_cast(cloneInfo->GetStatus()); - return kErrCodeFileStatusInvalid; - } - } - ret = metaStore_->UpdateCloneInfo(*cloneInfo); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << cloneInfo->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleCloneOrRecoverTask( - std::shared_ptr task) { - brpc::ClosureGuard doneGuard(task->GetClosure().get()); - int ret = kErrCodeSuccess; - FInfo newFileInfo; - CloneSegmentMap segInfos; - if (IsSnapshot(task)) { - ret = BuildFileInfoFromSnapshot(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } else { - ret = BuildFileInfoFromFile(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } - - // In the steps after kCreateCloneMeta, it is necessary to update the - // chunkIdInfo in the CloneChunkInfo information - if (NeedUpdateCloneMeta(task)) { - ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } - - CloneStep step = task->GetCloneInfo().GetNextStep(); - while (step != CloneStep::kEnd) { - switch (step) { - case CloneStep::kCreateCloneFile: - ret = CreateCloneFile(task, newFileInfo); - if (ret < 0) { - HandleCloneError(task, ret); - return; + if (ret < 0) + { + FInfo fInfo; + ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); + switch (ret) + { + case LIBCURVE_ERROR::OK: + fileType = CloneFileType::kFile; + break; + case -LIBCURVE_ERROR::NOTEXIST: + case -LIBCURVE_ERROR::PARAM_ERROR: + LOG(ERROR) << "Clone source file not exist" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "GetFileInfo encounter an error" + << ", ret = " << ret << ", source = " << source + << ", user = " << user; + return kErrCodeInternalError; } - task->SetProgress(kProgressCreateCloneFile); - break; - case CloneStep::kCreateCloneMeta: - ret = CreateCloneMeta(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + if (fInfo.filestatus != FileStatus::Created && + fInfo.filestatus != FileStatus::Cloned && + fInfo.filestatus != FileStatus::BeingCloned) + { + LOG(ERROR) << "Can not clone when file status = " + << static_cast(fInfo.filestatus); + return kErrCodeFileStatusInvalid; } - task->SetProgress(kProgressCreateCloneMeta); - break; - case CloneStep::kCreateCloneChunk: - ret = CreateCloneChunk(task, newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + + // TODO (User authentication for mirror cloning to be improved) + } + + UUID uuid = UUIDGenerator().GenerateUUID(); + CloneInfo info(uuid, user, taskType, source, destination, poolset, fileType, + lazyFlag); + if (CloneTaskType::kClone == taskType) + { + info.SetStatus(CloneStatus::cloning); + } + else + { + info.SetStatus(CloneStatus::recovering); + } + // Here, you must first AddCloneInfo because if you first set + // CloneFileStatus and then AddCloneInfo, If AddCloneInfo fails and + // unexpectedly restarts, no one will know that SetCloneFileStatus has been + // called, causing Mirror cannot be deleted + ret = metaStore_->AddCloneInfo(info); + if (ret < 0) + { + LOG(ERROR) << "AddCloneInfo error" + << ", ret = " << ret << ", taskId = " << uuid + << ", user = " << user << ", source = " << source + << ", destination = " << destination + << ", poolset = " << poolset; + if (CloneFileType::kSnapshot == fileType) + { + snapshotRef_->DecrementSnapshotRef(source); } - break; - case CloneStep::kCompleteCloneMeta: - ret = CompleteCloneMeta(task, newFileInfo, segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + return ret; + } + if (CloneFileType::kFile == fileType) + { + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + ret = client_->SetCloneFileStatus(source, FileStatus::BeingCloned, + mdsRootUser_); + if (ret < 0) + { + // The SetCloneFileStatus error is not handled here, + // Because all results of SetCloneFileStatus failure are acceptable, + // Compared to handling SetCloneFileStatus failure, it is more + // direct: For example, calling DeleteCloneInfo to delete a task, + // Once DeleteCloneInfo fails and an error is returned to the user, + // Restarting the service will cause Clone to continue, + // Inconsistency with the results returned by the user, causing + // confusion for the user + LOG(WARNING) << "SetCloneFileStatus encounter an error" + << ", ret = " << ret << ", source = " << source + << ", user = " << user; } - task->SetProgress(kProgressMetaInstalled); - break; - case CloneStep::kRecoverChunk: - ret = RecoverChunk(task, newFileInfo, segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + cloneRef_->IncrementRef(source); + } + + *cloneInfo = info; + return kErrCodeSuccess; + } + + int CloneCoreImpl::FlattenPre(const std::string &user, const TaskIdType &taskId, + CloneInfo *cloneInfo) + { + (void)user; + int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); + if (ret < 0) + { + return kErrCodeFileNotExist; + } + switch (cloneInfo->GetStatus()) + { + case CloneStatus::done: + case CloneStatus::cloning: + case CloneStatus::recovering: + { + // A task exists is returned for completed or in progress, + // indicating that it does not need to be processed + return kErrCodeTaskExist; + } + case CloneStatus::metaInstalled: + { + if (CloneTaskType::kClone == cloneInfo->GetTaskType()) + { + cloneInfo->SetStatus(CloneStatus::cloning); + } + else + { + cloneInfo->SetStatus(CloneStatus::recovering); } break; - case CloneStep::kChangeOwner: - ret = ChangeOwner(task, newFileInfo); - if (ret < 0) { + } + case CloneStatus::cleaning: + case CloneStatus::errorCleaning: + case CloneStatus::error: + default: + { + LOG(ERROR) << "FlattenPre find clone task status Invalid" + << ", status = " + << static_cast(cloneInfo->GetStatus()); + return kErrCodeFileStatusInvalid; + } + } + ret = metaStore_->UpdateCloneInfo(*cloneInfo); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo fail" + << ", ret = " << ret + << ", taskId = " << cloneInfo->GetTaskId(); + return ret; + } + return kErrCodeSuccess; + } + + void CloneCoreImpl::HandleCloneOrRecoverTask( + std::shared_ptr task) + { + brpc::ClosureGuard doneGuard(task->GetClosure().get()); + int ret = kErrCodeSuccess; + FInfo newFileInfo; + CloneSegmentMap segInfos; + if (IsSnapshot(task)) + { + ret = BuildFileInfoFromSnapshot(task, &newFileInfo, &segInfos); + if (ret < 0) + { HandleCloneError(task, ret); return; } - break; - case CloneStep::kRenameCloneFile: - ret = RenameCloneFile(task, newFileInfo); - if (ret < 0) { + } + else + { + ret = BuildFileInfoFromFile(task, &newFileInfo, &segInfos); + if (ret < 0) + { HandleCloneError(task, ret); return; } - if (IsLazy(task)) { - HandleLazyCloneStage1Finish(task); - doneGuard.release(); + } + + // In the steps after kCreateCloneMeta, it is necessary to update the + // chunkIdInfo in the CloneChunkInfo information + if (NeedUpdateCloneMeta(task)) + { + ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); return; } - break; - case CloneStep::kCompleteCloneFile: - ret = CompleteCloneFile(task, newFileInfo, segInfos); - if (ret < 0) { + } + + CloneStep step = task->GetCloneInfo().GetNextStep(); + while (step != CloneStep::kEnd) + { + switch (step) + { + case CloneStep::kCreateCloneFile: + ret = CreateCloneFile(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressCreateCloneFile); + break; + case CloneStep::kCreateCloneMeta: + ret = CreateCloneMeta(task, &newFileInfo, &segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressCreateCloneMeta); + break; + case CloneStep::kCreateCloneChunk: + ret = CreateCloneChunk(task, newFileInfo, &segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kCompleteCloneMeta: + ret = CompleteCloneMeta(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressMetaInstalled); + break; + case CloneStep::kRecoverChunk: + ret = RecoverChunk(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kChangeOwner: + ret = ChangeOwner(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kRenameCloneFile: + ret = RenameCloneFile(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + if (IsLazy(task)) + { + HandleLazyCloneStage1Finish(task); + doneGuard.release(); + return; + } + break; + case CloneStep::kCompleteCloneFile: + ret = CompleteCloneFile(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + default: + LOG(ERROR) << "can not reach here" + << ", taskid = " << task->GetTaskId(); HandleCloneError(task, ret); return; } - break; - default: - LOG(ERROR) << "can not reach here" - << ", taskid = " << task->GetTaskId(); - HandleCloneError(task, ret); - return; + task->UpdateMetric(); + step = task->GetCloneInfo().GetNextStep(); + } + HandleCloneSuccess(task); } - task->UpdateMetric(); - step = task->GetCloneInfo().GetNextStep(); - } - HandleCloneSuccess(task); -} - -int CloneCoreImpl::BuildFileInfoFromSnapshot( - std::shared_ptr task, FInfo* newFileInfo, - CloneSegmentMap* segInfos) { - segInfos->clear(); - UUID source = task->GetCloneInfo().GetSrc(); - - SnapshotInfo snapInfo; - int ret = metaStore_->GetSnapshotInfo(source, &snapInfo); - if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo error" - << ", source = " << source - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - } - newFileInfo->chunksize = snapInfo.GetChunkSize(); - newFileInfo->segmentsize = snapInfo.GetSegmentSize(); - newFileInfo->length = snapInfo.GetFileLength(); - newFileInfo->stripeUnit = snapInfo.GetStripeUnit(); - newFileInfo->stripeCount = snapInfo.GetStripeCount(); - - if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && - task->GetCloneInfo().GetPoolset().empty()) { - LOG(ERROR) << "Recover task's poolset should not be empty"; - return kErrCodeInternalError; - } - newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : snapInfo.GetPoolset(); - - if (IsRecover(task)) { - FInfo fInfo; - std::string destination = task->GetCloneInfo().GetDest(); - std::string user = task->GetCloneInfo().GetUser(); - ret = client_->GetFileInfo(destination, mdsRootUser_, &fInfo); - switch (ret) { - case LIBCURVE_ERROR::OK: - break; - case -LIBCURVE_ERROR::NOTEXIST: - LOG(ERROR) << "BuildFileInfoFromSnapshot " - << "find dest file not exist, maybe deleted" - << ", ret = " << ret - << ", destination = " << destination - << ", user = " << user + + int CloneCoreImpl::BuildFileInfoFromSnapshot( + std::shared_ptr task, FInfo *newFileInfo, + CloneSegmentMap *segInfos) + { + segInfos->clear(); + UUID source = task->GetCloneInfo().GetSrc(); + + SnapshotInfo snapInfo; + int ret = metaStore_->GetSnapshotInfo(source, &snapInfo); + if (ret < 0) + { + LOG(ERROR) << "GetSnapshotInfo error" + << ", source = " << source << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; - default: - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", destination = " << destination - << ", user = " << user - << ", taskid = " << task->GetTaskId(); + } + newFileInfo->chunksize = snapInfo.GetChunkSize(); + newFileInfo->segmentsize = snapInfo.GetSegmentSize(); + newFileInfo->length = snapInfo.GetFileLength(); + newFileInfo->stripeUnit = snapInfo.GetStripeUnit(); + newFileInfo->stripeCount = snapInfo.GetStripeCount(); + + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) + { + LOG(ERROR) << "Recover task's poolset should not be empty"; return kErrCodeInternalError; - } - // The destinationId recovered from the snapshot is the ID of the target - // file - task->GetCloneInfo().SetDestId(fInfo.id); - // Restore seqnum+1 from snapshot - newFileInfo->seqnum = fInfo.seqnum + 1; - } else { - newFileInfo->seqnum = kInitializeSeqNum; - } - newFileInfo->owner = task->GetCloneInfo().GetUser(); - - ChunkIndexDataName indexName(snapInfo.GetFileName(), snapInfo.GetSeqNum()); - ChunkIndexData snapMeta; - ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); - if (ret < 0) { - LOG(ERROR) << "GetChunkIndexData error" - << ", fileName = " << snapInfo.GetFileName() - << ", seqNum = " << snapInfo.GetSeqNum() - << ", taskid = " << task->GetTaskId(); - return ret; - } - - uint64_t segmentSize = snapInfo.GetSegmentSize(); - uint64_t chunkSize = snapInfo.GetChunkSize(); - uint64_t chunkPerSegment = segmentSize / chunkSize; - - std::vector chunkIndexs = snapMeta.GetAllChunkIndex(); - for (auto& chunkIndex : chunkIndexs) { - ChunkDataName chunkDataName; - snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); - uint64_t segmentIndex = chunkIndex / chunkPerSegment; - CloneChunkInfo info; - info.location = chunkDataName.ToDataChunkKey(); - info.needRecover = true; - if (IsRecover(task)) { - info.seqNum = chunkDataName.chunkSeqNum_; - } else { - info.seqNum = kInitializeSeqNum; - } + } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : snapInfo.GetPoolset(); + + if (IsRecover(task)) + { + FInfo fInfo; + std::string destination = task->GetCloneInfo().GetDest(); + std::string user = task->GetCloneInfo().GetUser(); + ret = client_->GetFileInfo(destination, mdsRootUser_, &fInfo); + switch (ret) + { + case LIBCURVE_ERROR::OK: + break; + case -LIBCURVE_ERROR::NOTEXIST: + LOG(ERROR) << "BuildFileInfoFromSnapshot " + << "find dest file not exist, maybe deleted" + << ", ret = " << ret + << ", destination = " << destination + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret + << ", destination = " << destination + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + // The destinationId recovered from the snapshot is the ID of the target + // file + task->GetCloneInfo().SetDestId(fInfo.id); + // Restore seqnum+1 from snapshot + newFileInfo->seqnum = fInfo.seqnum + 1; + } + else + { + newFileInfo->seqnum = kInitializeSeqNum; + } + newFileInfo->owner = task->GetCloneInfo().GetUser(); + + ChunkIndexDataName indexName(snapInfo.GetFileName(), snapInfo.GetSeqNum()); + ChunkIndexData snapMeta; + ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); + if (ret < 0) + { + LOG(ERROR) << "GetChunkIndexData error" + << ", fileName = " << snapInfo.GetFileName() + << ", seqNum = " << snapInfo.GetSeqNum() + << ", taskid = " << task->GetTaskId(); + return ret; + } - auto it = segInfos->find(segmentIndex); - if (it == segInfos->end()) { - CloneSegmentInfo segInfo; - segInfo.emplace(chunkIndex % chunkPerSegment, info); - segInfos->emplace(segmentIndex, segInfo); - } else { - it->second.emplace(chunkIndex % chunkPerSegment, info); - } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::BuildFileInfoFromFile(std::shared_ptr task, - FInfo* newFileInfo, - CloneSegmentMap* segInfos) { - segInfos->clear(); - UUID source = task->GetCloneInfo().GetSrc(); - std::string user = task->GetCloneInfo().GetUser(); - - FInfo fInfo; - int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret << ", source = " << source - << ", user = " << user << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - } - // GetOrAllocateSegment depends on fullPathName - fInfo.fullPathName = source; - - newFileInfo->chunksize = fInfo.chunksize; - newFileInfo->segmentsize = fInfo.segmentsize; - newFileInfo->length = fInfo.length; - newFileInfo->seqnum = kInitializeSeqNum; - newFileInfo->owner = task->GetCloneInfo().GetUser(); - newFileInfo->stripeUnit = fInfo.stripeUnit; - newFileInfo->stripeCount = fInfo.stripeCount; - - if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && - task->GetCloneInfo().GetPoolset().empty()) { - LOG(ERROR) << "Recover task's poolset should not be empty"; - return kErrCodeInternalError; - } - newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : fInfo.poolset; - - uint64_t fileLength = fInfo.length; - uint64_t segmentSize = fInfo.segmentsize; - uint64_t chunkSize = fInfo.chunksize; - - if (0 == segmentSize) { - LOG(ERROR) << "GetFileInfo return invalid fileInfo, segmentSize == 0" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (fileLength % segmentSize != 0) { - LOG(ERROR) << "GetFileInfo return invalid fileInfo, " - << "fileLength is not align to SegmentSize" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - for (uint64_t i = 0; i < fileLength / segmentSize; i++) { - uint64_t offset = i * segmentSize; - SegmentInfo segInfoOut; - ret = client_->GetOrAllocateSegmentInfo(false, offset, &fInfo, - mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { - LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", ret = " << ret << ", filename = " << source - << ", user = " << user << ", offset = " << offset - << ", allocateIfNotExist = " - << "false" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (segInfoOut.chunkvec.size() != 0) { - CloneSegmentInfo segInfo; - for (std::vector::size_type j = 0; - j < segInfoOut.chunkvec.size(); j++) { + uint64_t segmentSize = snapInfo.GetSegmentSize(); + uint64_t chunkSize = snapInfo.GetChunkSize(); + uint64_t chunkPerSegment = segmentSize / chunkSize; + + std::vector chunkIndexs = snapMeta.GetAllChunkIndex(); + for (auto &chunkIndex : chunkIndexs) + { + ChunkDataName chunkDataName; + snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); + uint64_t segmentIndex = chunkIndex / chunkPerSegment; CloneChunkInfo info; - info.location = std::to_string(offset + j * chunkSize); - info.seqNum = kInitializeSeqNum; + info.location = chunkDataName.ToDataChunkKey(); info.needRecover = true; - segInfo.emplace(j, info); + if (IsRecover(task)) + { + info.seqNum = chunkDataName.chunkSeqNum_; + } + else + { + info.seqNum = kInitializeSeqNum; + } + + auto it = segInfos->find(segmentIndex); + if (it == segInfos->end()) + { + CloneSegmentInfo segInfo; + segInfo.emplace(chunkIndex % chunkPerSegment, info); + segInfos->emplace(segmentIndex, segInfo); + } + else + { + it->second.emplace(chunkIndex % chunkPerSegment, info); + } } - segInfos->emplace(i, segInfo); - } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CreateCloneFile(std::shared_ptr task, - const FInfo& fInfo) { - std::string fileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = fInfo.owner; - uint64_t fileLength = fInfo.length; - uint64_t seqNum = fInfo.seqnum; - uint32_t chunkSize = fInfo.chunksize; - uint64_t stripeUnit = fInfo.stripeUnit; - uint64_t stripeCount = fInfo.stripeCount; - const auto& poolset = fInfo.poolset; - - std::string source = ""; - // Clone source is only available when cloning from a file - if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) { - source = task->GetCloneInfo().GetSrc(); - } - - FInfo fInfoOut; - int ret = client_->CreateCloneFile( - source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, - stripeUnit, stripeCount, poolset, &fInfoOut); - if (ret == LIBCURVE_ERROR::OK) { - // nothing - } else if (ret == -LIBCURVE_ERROR::EXISTS) { - ret = client_->GetFileInfo(fileName, mdsRootUser_, &fInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret << ", fileName = " << fileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + return kErrCodeSuccess; } - } else { - LOG(ERROR) << "CreateCloneFile file" - << ", ret = " << ret << ", destination = " << fileName - << ", user = " << user << ", fileLength = " << fileLength - << ", seqNum = " << seqNum << ", chunkSize = " << chunkSize - << ", return fileId = " << fInfoOut.id - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - task->GetCloneInfo().SetOriginId(fInfoOut.id); - if (IsClone(task)) { - // In the case of cloning, destinationId = originId; - task->GetCloneInfo().SetDestId(fInfoOut.id); - } - task->GetCloneInfo().SetTime(fInfoOut.ctime); - // If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk - // yet Wait until stage 2 recoveryChunk, go to createCloneMeta, - // createCloneChunk - if (IsLazy(task) && IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); - } - - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CreateCloneMeta(std::shared_ptr task, - FInfo* fInfo, CloneSegmentMap* segInfos) { - int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); - if (ret < 0) { - return ret; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneChunk); - - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CreateCloneChunk(std::shared_ptr task, - const FInfo& fInfo, - CloneSegmentMap* segInfos) { - int ret = kErrCodeSuccess; - uint32_t chunkSize = fInfo.chunksize; - uint32_t correctSn = 0; - // When cloning, correctSn is 0, and when restoring, it is the newly - // generated file version - if (IsClone(task)) { - correctSn = 0; - } else { - correctSn = fInfo.seqnum; - } - auto tracker = std::make_shared(); - for (auto& cloneSegmentInfo : *segInfos) { - for (auto& cloneChunkInfo : cloneSegmentInfo.second) { - std::string location; - if (IsSnapshot(task)) { - location = LocationOperator::GenerateS3Location( - cloneChunkInfo.second.location); - } else { - location = LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull(cloneChunkInfo.second.location)); - } - ChunkIDInfo cidInfo = cloneChunkInfo.second.chunkIdInfo; - - auto context = std::make_shared(); - context->location = location; - context->cidInfo = cidInfo; - context->cloneChunkInfo = &cloneChunkInfo.second; - context->sn = cloneChunkInfo.second.seqNum; - context->csn = correctSn; - context->chunkSize = chunkSize; - context->taskid = task->GetTaskId(); - context->startTime = TimeUtility::GetTimeofDaySec(); - context->clientAsyncMethodRetryTimeSec = - clientAsyncMethodRetryTimeSec_; - - ret = StartAsyncCreateCloneChunk(task, tracker, context); - if (ret < 0) { + + int CloneCoreImpl::BuildFileInfoFromFile(std::shared_ptr task, + FInfo *newFileInfo, + CloneSegmentMap *segInfos) + { + segInfos->clear(); + UUID source = task->GetCloneInfo().GetSrc(); + std::string user = task->GetCloneInfo().GetUser(); + + FInfo fInfo; + int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + // GetOrAllocateSegment depends on fullPathName + fInfo.fullPathName = source; + + newFileInfo->chunksize = fInfo.chunksize; + newFileInfo->segmentsize = fInfo.segmentsize; + newFileInfo->length = fInfo.length; + newFileInfo->seqnum = kInitializeSeqNum; + newFileInfo->owner = task->GetCloneInfo().GetUser(); + newFileInfo->stripeUnit = fInfo.stripeUnit; + newFileInfo->stripeCount = fInfo.stripeCount; + + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) + { + LOG(ERROR) << "Recover task's poolset should not be empty"; return kErrCodeInternalError; } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : fInfo.poolset; - if (tracker->GetTaskNum() >= createCloneChunkConcurrency_) { - tracker->WaitSome(1); + uint64_t fileLength = fInfo.length; + uint64_t segmentSize = fInfo.segmentsize; + uint64_t chunkSize = fInfo.chunksize; + + if (0 == segmentSize) + { + LOG(ERROR) << "GetFileInfo return invalid fileInfo, segmentSize == 0" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; } - std::list results = - tracker->PopResultContexts(); - ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); - if (ret < 0) { + if (fileLength % segmentSize != 0) + { + LOG(ERROR) << "GetFileInfo return invalid fileInfo, " + << "fileLength is not align to SegmentSize" + << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + + for (uint64_t i = 0; i < fileLength / segmentSize; i++) + { + uint64_t offset = i * segmentSize; + SegmentInfo segInfoOut; + ret = client_->GetOrAllocateSegmentInfo(false, offset, &fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOT_ALLOCATE) + { + LOG(ERROR) << "GetOrAllocateSegmentInfo fail" + << ", ret = " << ret << ", filename = " << source + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "false" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (segInfoOut.chunkvec.size() != 0) + { + CloneSegmentInfo segInfo; + for (std::vector::size_type j = 0; + j < segInfoOut.chunkvec.size(); j++) + { + CloneChunkInfo info; + info.location = std::to_string(offset + j * chunkSize); + info.seqNum = kInitializeSeqNum; + info.needRecover = true; + segInfo.emplace(j, info); + } + segInfos->emplace(i, segInfo); + } + } + return kErrCodeSuccess; } - } - // Tasks with insufficient remaining quantity in the end - do { - tracker->WaitSome(1); - std::list results = - tracker->PopResultContexts(); - if (0 == results.size()) { - // Completed, no new results - break; + + int CloneCoreImpl::CreateCloneFile(std::shared_ptr task, + const FInfo &fInfo) + { + std::string fileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = fInfo.owner; + uint64_t fileLength = fInfo.length; + uint64_t seqNum = fInfo.seqnum; + uint32_t chunkSize = fInfo.chunksize; + uint64_t stripeUnit = fInfo.stripeUnit; + uint64_t stripeCount = fInfo.stripeCount; + const auto &poolset = fInfo.poolset; + + std::string source = ""; + // Clone source is only available when cloning from a file + if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) + { + source = task->GetCloneInfo().GetSrc(); + } + + FInfo fInfoOut; + int ret = client_->CreateCloneFile( + source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, + stripeUnit, stripeCount, poolset, &fInfoOut); + if (ret == LIBCURVE_ERROR::OK) + { + // nothing + } + else if (ret == -LIBCURVE_ERROR::EXISTS) + { + ret = client_->GetFileInfo(fileName, mdsRootUser_, &fInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret << ", fileName = " << fileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + else + { + LOG(ERROR) << "CreateCloneFile file" + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user << ", fileLength = " << fileLength + << ", seqNum = " << seqNum << ", chunkSize = " << chunkSize + << ", return fileId = " << fInfoOut.id + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + task->GetCloneInfo().SetOriginId(fInfoOut.id); + if (IsClone(task)) + { + // In the case of cloning, destinationId = originId; + task->GetCloneInfo().SetDestId(fInfoOut.id); + } + task->GetCloneInfo().SetTime(fInfoOut.ctime); + // If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk + // yet Wait until stage 2 recoveryChunk, go to createCloneMeta, + // createCloneChunk + if (IsLazy(task) && IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); + } + + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); - if (ret < 0) { - return kErrCodeInternalError; + + int CloneCoreImpl::CreateCloneMeta(std::shared_ptr task, + FInfo *fInfo, CloneSegmentMap *segInfos) + { + int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); + if (ret < 0) + { + return ret; + } + + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneChunk); + + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } while (true); - - if (IsLazy(task) && IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::StartAsyncCreateCloneChunk( - std::shared_ptr task, - std::shared_ptr tracker, - std::shared_ptr context) { - CreateCloneChunkClosure* cb = new CreateCloneChunkClosure(tracker, context); - tracker->AddOneTrace(); - LOG(INFO) << "Doing CreateCloneChunk" - << ", location = " << context->location - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn << ", csn = " << context->csn - << ", taskid = " << task->GetTaskId(); - int ret = client_->CreateCloneChunk(context->location, context->cidInfo, - context->sn, context->csn, - context->chunkSize, cb); - - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "CreateCloneChunk fail" - << ", ret = " << ret << ", location = " << context->location - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn << ", csn = " << context->csn - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( - std::shared_ptr task, - std::shared_ptr tracker, - const std::list& results) { - int ret = kErrCodeSuccess; - for (auto context : results) { - if (context->retCode == -LIBCURVE_ERROR::EXISTS) { - LOG(INFO) << "CreateCloneChunk chunk exist" + + int CloneCoreImpl::CreateCloneChunk(std::shared_ptr task, + const FInfo &fInfo, + CloneSegmentMap *segInfos) + { + int ret = kErrCodeSuccess; + uint32_t chunkSize = fInfo.chunksize; + uint32_t correctSn = 0; + // When cloning, correctSn is 0, and when restoring, it is the newly + // generated file version + if (IsClone(task)) + { + correctSn = 0; + } + else + { + correctSn = fInfo.seqnum; + } + auto tracker = std::make_shared(); + for (auto &cloneSegmentInfo : *segInfos) + { + for (auto &cloneChunkInfo : cloneSegmentInfo.second) + { + std::string location; + if (IsSnapshot(task)) + { + location = LocationOperator::GenerateS3Location( + cloneChunkInfo.second.location); + } + else + { + location = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), + std::stoull(cloneChunkInfo.second.location)); + } + ChunkIDInfo cidInfo = cloneChunkInfo.second.chunkIdInfo; + + auto context = std::make_shared(); + context->location = location; + context->cidInfo = cidInfo; + context->cloneChunkInfo = &cloneChunkInfo.second; + context->sn = cloneChunkInfo.second.seqNum; + context->csn = correctSn; + context->chunkSize = chunkSize; + context->taskid = task->GetTaskId(); + context->startTime = TimeUtility::GetTimeofDaySec(); + context->clientAsyncMethodRetryTimeSec = + clientAsyncMethodRetryTimeSec_; + + ret = StartAsyncCreateCloneChunk(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } + + if (tracker->GetTaskNum() >= createCloneChunkConcurrency_) + { + tracker->WaitSome(1); + } + std::list results = + tracker->PopResultContexts(); + ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + } + // Tasks with insufficient remaining quantity in the end + do + { + tracker->WaitSome(1); + std::list results = + tracker->PopResultContexts(); + if (0 == results.size()) + { + // Completed, no new results + break; + } + ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); + if (ret < 0) + { + return kErrCodeInternalError; + } + } while (true); + + if (IsLazy(task) && IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::StartAsyncCreateCloneChunk( + std::shared_ptr task, + std::shared_ptr tracker, + std::shared_ptr context) + { + CreateCloneChunkClosure *cb = new CreateCloneChunkClosure(tracker, context); + tracker->AddOneTrace(); + LOG(INFO) << "Doing CreateCloneChunk" << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); - context->cloneChunkInfo->needRecover = false; - } else if (context->retCode != LIBCURVE_ERROR::OK) { - uint64_t nowTime = TimeUtility::GetTimeofDaySec(); - if (nowTime - context->startTime < - context->clientAsyncMethodRetryTimeSec) { - // retry - std::this_thread::sleep_for(std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - ret = StartAsyncCreateCloneChunk(task, tracker, context); - if (ret < 0) { - return kErrCodeInternalError; - } - } else { - LOG(ERROR) << "CreateCloneChunk tracker GetResult fail" - << ", ret = " << ret + int ret = client_->CreateCloneChunk(context->location, context->cidInfo, + context->sn, context->csn, + context->chunkSize, cb); + + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "CreateCloneChunk fail" + << ", ret = " << ret << ", location = " << context->location + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( + std::shared_ptr task, + std::shared_ptr tracker, + const std::list &results) + { + int ret = kErrCodeSuccess; + for (auto context : results) + { + if (context->retCode == -LIBCURVE_ERROR::EXISTS) + { + LOG(INFO) << "CreateCloneChunk chunk exist" + << ", location = " << context->location + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", seqNum = " << context->sn + << ", csn = " << context->csn + << ", taskid = " << task->GetTaskId(); + context->cloneChunkInfo->needRecover = false; + } + else if (context->retCode != LIBCURVE_ERROR::OK) + { + uint64_t nowTime = TimeUtility::GetTimeofDaySec(); + if (nowTime - context->startTime < + context->clientAsyncMethodRetryTimeSec) + { + // retry + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + ret = StartAsyncCreateCloneChunk(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + else + { + LOG(ERROR) << "CreateCloneChunk tracker GetResult fail" + << ", ret = " << ret + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + } + return ret; + } + + int CloneCoreImpl::CompleteCloneMeta(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + (void)fInfo; + (void)segInfos; + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "CompleteCloneMeta fail" + << ", ret = " << ret << ", filename = " << origin + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + if (IsLazy(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } - return ret; -} - -int CloneCoreImpl::CompleteCloneMeta(std::shared_ptr task, - const FInfo& fInfo, - const CloneSegmentMap& segInfos) { - (void)fInfo; - (void)segInfos; - std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = task->GetCloneInfo().GetUser(); - int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "CompleteCloneMeta fail" - << ", ret = " << ret << ", filename = " << origin - << ", user = " << user << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::RecoverChunk(std::shared_ptr task, - const FInfo& fInfo, - const CloneSegmentMap& segInfos) { - int ret = kErrCodeSuccess; - uint32_t chunkSize = fInfo.chunksize; - - uint32_t totalProgress = - kProgressRecoverChunkEnd - kProgressRecoverChunkBegin; - uint32_t segNum = segInfos.size(); - double progressPerData = static_cast(totalProgress) / segNum; - uint32_t index = 0; - - if (0 == cloneChunkSplitSize_ || chunkSize % cloneChunkSplitSize_ != 0) { - LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" - << ", taskid = " << task->GetTaskId(); - return kErrCodeChunkSizeNotAligned; - } - - auto tracker = std::make_shared(); - uint64_t workingChunkNum = 0; - // To avoid collisions with the same chunk, asynchronous requests for - // different chunks - for (auto& cloneSegmentInfo : segInfos) { - for (auto& cloneChunkInfo : cloneSegmentInfo.second) { - if (!cloneChunkInfo.second.needRecover) { - continue; - } - // When the current number of chunks for concurrent work exceeds the - // required number of concurrent tasks, digest a portion first - while (workingChunkNum >= recoverChunkConcurrency_) { + + int CloneCoreImpl::RecoverChunk(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + int ret = kErrCodeSuccess; + uint32_t chunkSize = fInfo.chunksize; + + uint32_t totalProgress = + kProgressRecoverChunkEnd - kProgressRecoverChunkBegin; + uint32_t segNum = segInfos.size(); + double progressPerData = static_cast(totalProgress) / segNum; + uint32_t index = 0; + + if (0 == cloneChunkSplitSize_ || chunkSize % cloneChunkSplitSize_ != 0) + { + LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" + << ", taskid = " << task->GetTaskId(); + return kErrCodeChunkSizeNotAligned; + } + + auto tracker = std::make_shared(); + uint64_t workingChunkNum = 0; + // To avoid collisions with the same chunk, asynchronous requests for + // different chunks + for (auto &cloneSegmentInfo : segInfos) + { + for (auto &cloneChunkInfo : cloneSegmentInfo.second) + { + if (!cloneChunkInfo.second.needRecover) + { + continue; + } + // When the current number of chunks for concurrent work exceeds the + // required number of concurrent tasks, digest a portion first + while (workingChunkNum >= recoverChunkConcurrency_) + { + uint64_t completeChunkNum = 0; + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); + if (ret < 0) + { + return kErrCodeInternalError; + } + workingChunkNum -= completeChunkNum; + } + // Chunk joining a new job + workingChunkNum++; + auto context = std::make_shared(); + context->cidInfo = cloneChunkInfo.second.chunkIdInfo; + context->totalPartNum = chunkSize / cloneChunkSplitSize_; + context->partIndex = 0; + context->partSize = cloneChunkSplitSize_; + context->taskid = task->GetTaskId(); + context->startTime = TimeUtility::GetTimeofDaySec(); + context->clientAsyncMethodRetryTimeSec = + clientAsyncMethodRetryTimeSec_; + + LOG(INFO) << "RecoverChunk start" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + + ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + task->SetProgress(static_cast(kProgressRecoverChunkBegin + + index * progressPerData)); + task->UpdateMetric(); + index++; + } + + while (workingChunkNum > 0) + { uint64_t completeChunkNum = 0; ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( task, tracker, &completeChunkNum); - if (ret < 0) { + if (ret < 0) + { return kErrCodeInternalError; } workingChunkNum -= completeChunkNum; } - // Chunk joining a new job - workingChunkNum++; - auto context = std::make_shared(); - context->cidInfo = cloneChunkInfo.second.chunkIdInfo; - context->totalPartNum = chunkSize / cloneChunkSplitSize_; - context->partIndex = 0; - context->partSize = cloneChunkSplitSize_; - context->taskid = task->GetTaskId(); - context->startTime = TimeUtility::GetTimeofDaySec(); - context->clientAsyncMethodRetryTimeSec = - clientAsyncMethodRetryTimeSec_; - - LOG(INFO) << "RecoverChunk start" - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneFile); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + return kErrCodeSuccess; } - task->SetProgress(static_cast(kProgressRecoverChunkBegin + - index * progressPerData)); - task->UpdateMetric(); - index++; - } - - while (workingChunkNum > 0) { - uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( - task, tracker, &completeChunkNum); - if (ret < 0) { - return kErrCodeInternalError; - } - workingChunkNum -= completeChunkNum; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneFile); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::StartAsyncRecoverChunkPart( - std::shared_ptr task, - std::shared_ptr tracker, - std::shared_ptr context) { - RecoverChunkClosure* cb = new RecoverChunkClosure(tracker, context); - tracker->AddOneTrace(); - uint64_t offset = context->partIndex * context->partSize; - LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" + + int CloneCoreImpl::StartAsyncRecoverChunkPart( + std::shared_ptr task, + std::shared_ptr tracker, + std::shared_ptr context) + { + RecoverChunkClosure *cb = new RecoverChunkClosure(tracker, context); + tracker->AddOneTrace(); + uint64_t offset = context->partIndex * context->partSize; + LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", offset = " << offset + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + int ret = + client_->RecoverChunk(context->cidInfo, offset, context->partSize, cb); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RecoverChunk fail" + << ", ret = " << ret << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize + << ", offset = " << offset << ", len = " << context->partSize << ", taskid = " << task->GetTaskId(); - int ret = - client_->RecoverChunk(context->cidInfo, offset, context->partSize, cb); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RecoverChunk fail" - << ", ret = " << ret - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( - std::shared_ptr task, - std::shared_ptr tracker, - uint64_t* completeChunkNum) { - *completeChunkNum = 0; - tracker->WaitSome(1); - std::list results = tracker->PopResultContexts(); - for (auto context : results) { - if (context->retCode != LIBCURVE_ERROR::OK) { - uint64_t nowTime = TimeUtility::GetTimeofDaySec(); - if (nowTime - context->startTime < - context->clientAsyncMethodRetryTimeSec) { - // retry - std::this_thread::sleep_for(std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - int ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { - return ret; + return ret; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + std::shared_ptr task, + std::shared_ptr tracker, + uint64_t *completeChunkNum) + { + *completeChunkNum = 0; + tracker->WaitSome(1); + std::list results = tracker->PopResultContexts(); + for (auto context : results) + { + if (context->retCode != LIBCURVE_ERROR::OK) + { + uint64_t nowTime = TimeUtility::GetTimeofDaySec(); + if (nowTime - context->startTime < + context->clientAsyncMethodRetryTimeSec) + { + // retry + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + int ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return ret; + } + } + else + { + LOG(ERROR) << "RecoverChunk tracker GetResult fail" + << ", ret = " << context->retCode + << ", taskid = " << task->GetTaskId(); + return context->retCode; + } } - } else { - LOG(ERROR) << "RecoverChunk tracker GetResult fail" - << ", ret = " << context->retCode - << ", taskid = " << task->GetTaskId(); - return context->retCode; - } - } else { - // Start a new shard, index++, and reset the start time - context->partIndex++; - context->startTime = TimeUtility::GetTimeofDaySec(); - if (context->partIndex < context->totalPartNum) { - int ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { - return ret; + else + { + // Start a new shard, index++, and reset the start time + context->partIndex++; + context->startTime = TimeUtility::GetTimeofDaySec(); + if (context->partIndex < context->totalPartNum) + { + int ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return ret; + } + } + else + { + LOG(INFO) << "RecoverChunk Complete" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + (*completeChunkNum)++; + } } - } else { - LOG(INFO) << "RecoverChunk Complete" - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - (*completeChunkNum)++; } + return kErrCodeSuccess; } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::ChangeOwner(std::shared_ptr task, - const FInfo& fInfo) { - (void)fInfo; - std::string user = task->GetCloneInfo().GetUser(); - std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - - int ret = client_->ChangeOwner(origin, user); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "ChangeOwner fail, ret = " << ret - << ", fileName = " << origin << ", newOwner = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kRenameCloneFile); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::RenameCloneFile(std::shared_ptr task, - const FInfo& fInfo) { - std::string user = fInfo.owner; - uint64_t originId = task->GetCloneInfo().GetOriginId(); - uint64_t destinationId = task->GetCloneInfo().GetDestId(); - std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string destination = task->GetCloneInfo().GetDest(); - - // Rename first - int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, - origin, destination); - if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // It is possible that it has already been renamed - FInfo destFInfo; - ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RenameCloneFile return NOTEXIST," - << "And get dest fileInfo fail, ret = " << ret - << ", destination filename = " << destination - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::ChangeOwner(std::shared_ptr task, + const FInfo &fInfo) + { + (void)fInfo; + std::string user = task->GetCloneInfo().GetUser(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + + int ret = client_->ChangeOwner(origin, user); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "ChangeOwner fail, ret = " << ret + << ", fileName = " << origin << ", newOwner = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + + task->GetCloneInfo().SetNextStep(CloneStep::kRenameCloneFile); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + return kErrCodeSuccess; } - if (destFInfo.id != originId) { - LOG(ERROR) << "RenameCloneFile return NOTEXIST," - << "And get dest file id not equal, ret = " << ret - << "originId = " << originId - << "destFInfo.id = " << destFInfo.id - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::RenameCloneFile(std::shared_ptr task, + const FInfo &fInfo) + { + std::string user = fInfo.owner; + uint64_t originId = task->GetCloneInfo().GetOriginId(); + uint64_t destinationId = task->GetCloneInfo().GetDestId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string destination = task->GetCloneInfo().GetDest(); + + // Rename first + int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, + origin, destination); + if (-LIBCURVE_ERROR::NOTEXIST == ret) + { + // It is possible that it has already been renamed + FInfo destFInfo; + ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RenameCloneFile return NOTEXIST," + << "And get dest fileInfo fail, ret = " << ret + << ", destination filename = " << destination + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (destFInfo.id != originId) + { + LOG(ERROR) << "RenameCloneFile return NOTEXIST," + << "And get dest file id not equal, ret = " << ret + << "originId = " << originId + << "destFInfo.id = " << destFInfo.id + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + else if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RenameCloneFile fail" + << ", ret = " << ret << ", user = " << user + << ", originId = " << originId << ", origin = " << origin + << ", destination = " << destination + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + + if (IsLazy(task)) + { + if (IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + } + task->GetCloneInfo().SetStatus(CloneStatus::metaInstalled); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kEnd); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::CompleteCloneFile(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + (void)fInfo; + (void)segInfos; + std::string fileName; + if (IsLazy(task)) + { + fileName = task->GetCloneInfo().GetDest(); + } + else + { + fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + } + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); + switch (ret) + { + case LIBCURVE_ERROR::OK: + break; + case -LIBCURVE_ERROR::NOTEXIST: + LOG(ERROR) << "CompleteCloneFile " + << "find dest file not exist, maybe deleted" + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "CompleteCloneFile fail" + << ", ret = " << ret << ", fileName = " << fileName + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (IsLazy(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kEnd); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } else if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RenameCloneFile fail" - << ", ret = " << ret << ", user = " << user - << ", originId = " << originId << ", origin = " << origin - << ", destination = " << destination - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - if (IsLazy(task)) { - if (IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + + void CloneCoreImpl::HandleLazyCloneStage1Finish( + std::shared_ptr task) + { + LOG(INFO) << "Task Lazy Stage1 Success" + << ", TaskInfo : " << *task; + task->GetClosure()->SetErrCode(kErrCodeSuccess); + task->Finish(); + task->GetClosure()->Run(); + return; } - task->GetCloneInfo().SetStatus(CloneStatus::metaInstalled); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kEnd); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CompleteCloneFile(std::shared_ptr task, - const FInfo& fInfo, - const CloneSegmentMap& segInfos) { - (void)fInfo; - (void)segInfos; - std::string fileName; - if (IsLazy(task)) { - fileName = task->GetCloneInfo().GetDest(); - } else { - fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - } - std::string user = task->GetCloneInfo().GetUser(); - int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); - switch (ret) { - case LIBCURVE_ERROR::OK: - break; - case -LIBCURVE_ERROR::NOTEXIST: - LOG(ERROR) << "CompleteCloneFile " - << "find dest file not exist, maybe deleted" - << ", ret = " << ret << ", destination = " << fileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - default: - LOG(ERROR) << "CompleteCloneFile fail" - << ", ret = " << ret << ", fileName = " << fileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kEnd); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." - << " ret = " << ret << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleLazyCloneStage1Finish( - std::shared_ptr task) { - LOG(INFO) << "Task Lazy Stage1 Success" - << ", TaskInfo : " << *task; - task->GetClosure()->SetErrCode(kErrCodeSuccess); - task->Finish(); - task->GetClosure()->Run(); - return; -} - -void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { - int ret = kErrCodeSuccess; - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, FileStatus::Created, - mdsRootUser_); - if (ret < 0) { - task->GetCloneInfo().SetStatus(CloneStatus::error); - int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret2 < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret2 - << ", uuid = " << task->GetTaskId(); + + void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) + { + int ret = kErrCodeSuccess; + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + task->GetCloneInfo().SetStatus(CloneStatus::error); + int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret2 < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret2 + << ", uuid = " << task->GetTaskId(); + } + LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" + << ", ret = " << ret << ", TaskInfo : " << *task; + task->Finish(); + return; + } } - LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret << ", TaskInfo : " << *task; - task->Finish(); + } + task->GetCloneInfo().SetStatus(CloneStatus::done); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + task->SetProgress(kProgressCloneComplete); + + LOG(INFO) << "Task Success" + << ", TaskInfo : " << *task; + task->Finish(); + return; + } + + void CloneCoreImpl::HandleCloneError(std::shared_ptr task, + int retCode) + { + int ret = kErrCodeSuccess; + if (NeedRetry(task, retCode)) + { + HandleCloneToRetry(task); return; } + + if (IsLazy(task)) + { + task->GetClosure()->SetErrCode(retCode); + } + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + << ", taskid = " << task->GetTaskId(); + } + } + } + task->GetCloneInfo().SetStatus(CloneStatus::error); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + LOG(ERROR) << "Task Fail" + << ", TaskInfo : " << *task; + task->Finish(); + return; } - } - task->GetCloneInfo().SetStatus(CloneStatus::done); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" - << " ret = " << ret << ", uuid = " << task->GetTaskId(); - } - task->SetProgress(kProgressCloneComplete); - - LOG(INFO) << "Task Success" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCloneError(std::shared_ptr task, - int retCode) { - int ret = kErrCodeSuccess; - if (NeedRetry(task, retCode)) { - HandleCloneToRetry(task); - return; - } - - if (IsLazy(task)) { - task->GetClosure()->SetErrCode(retCode); - } - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - ret = client_->SetCloneFileStatus(source, FileStatus::Created, - mdsRootUser_); - if (ret < 0) { - LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret - << ", taskid = " << task->GetTaskId(); + + void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) + { + task->GetCloneInfo().SetStatus(CloneStatus::retrying); + int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + LOG(WARNING) << "Task Fail, Retrying" + << ", TaskInfo : " << *task; + task->Finish(); + return; + } + + void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) + { + TaskIdType taskId = task->GetCloneInfo().GetTaskId(); + int ret = metaStore_->DeleteCloneInfo(taskId); + if (ret < 0) + { + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", taskId = " << taskId; + } + else + { + LOG(INFO) << "Clean Task Success" + << ", TaskInfo : " << *task; } + task->SetProgress(kProgressCloneComplete); + task->GetCloneInfo().SetStatus(CloneStatus::done); + + task->Finish(); + return; } - } - task->GetCloneInfo().SetStatus(CloneStatus::error); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret << ", uuid = " << task->GetTaskId(); - } - LOG(ERROR) << "Task Fail" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) { - task->GetCloneInfo().SetStatus(CloneStatus::retrying); - int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" - << " ret = " << ret << ", uuid = " << task->GetTaskId(); - } - LOG(WARNING) << "Task Fail, Retrying" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) { - TaskIdType taskId = task->GetCloneInfo().GetTaskId(); - int ret = metaStore_->DeleteCloneInfo(taskId); - if (ret < 0) { - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret << ", taskId = " << taskId; - } else { - LOG(INFO) << "Clean Task Success" - << ", TaskInfo : " << *task; - } - task->SetProgress(kProgressCloneComplete); - task->GetCloneInfo().SetStatus(CloneStatus::done); - - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { - task->GetCloneInfo().SetStatus(CloneStatus::error); - int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret << ", uuid = " << task->GetTaskId(); - } - LOG(ERROR) << "Clean Task Fail" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -int CloneCoreImpl::GetCloneInfoList(std::vector* taskList) { - metaStore_->GetCloneInfoList(taskList); - return kErrCodeSuccess; -} - -int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) { - return metaStore_->GetCloneInfo(taskId, cloneInfo); -} - -int CloneCoreImpl::GetCloneInfoByFileName(const std::string& fileName, - std::vector* list) { - return metaStore_->GetCloneInfoByFileName(fileName, list); -} - -inline bool CloneCoreImpl::IsLazy(std::shared_ptr task) { - return task->GetCloneInfo().GetIsLazy(); -} - -inline bool CloneCoreImpl::IsSnapshot(std::shared_ptr task) { - return CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType(); -} - -inline bool CloneCoreImpl::IsFile(std::shared_ptr task) { - return CloneFileType::kFile == task->GetCloneInfo().GetFileType(); -} - -inline bool CloneCoreImpl::IsRecover(std::shared_ptr task) { - return CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType(); -} - -inline bool CloneCoreImpl::IsClone(std::shared_ptr task) { - return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); -} - -bool CloneCoreImpl::NeedUpdateCloneMeta(std::shared_ptr task) { - bool ret = true; - CloneStep step = task->GetCloneInfo().GetNextStep(); - if (CloneStep::kCreateCloneFile == step || - CloneStep::kCreateCloneMeta == step || CloneStep::kEnd == step) { - ret = false; - } - return ret; -} - -bool CloneCoreImpl::NeedRetry(std::shared_ptr task, - int retCode) { - if (IsLazy(task)) { - CloneStep step = task->GetCloneInfo().GetNextStep(); - if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) { - // In scenarios where the file does not exist, there is no need to - // retry as it may have been deleted - if (retCode != kErrCodeFileNotExist) { - return true; + + void CloneCoreImpl::HandleCleanError(std::shared_ptr task) + { + task->GetCloneInfo().SetStatus(CloneStatus::error); + int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } + LOG(ERROR) << "Clean Task Fail" + << ", TaskInfo : " << *task; + task->Finish(); + return; } - } - return false; -} - -int CloneCoreImpl::CreateOrUpdateCloneMeta(std::shared_ptr task, - FInfo* fInfo, - CloneSegmentMap* segInfos) { - std::string newFileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = fInfo->owner; - FInfo fInfoOut; - int ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); - if (LIBCURVE_ERROR::OK == ret) { - // nothing - } else if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // Perhaps it has already been renamed - newFileName = task->GetCloneInfo().GetDest(); - ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "File is missing, " - << "when CreateOrUpdateCloneMeta, " - << "GetFileInfo fail, ret = " << ret - << ", filename = " << newFileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; + + int CloneCoreImpl::GetCloneInfoList(std::vector *taskList) + { + metaStore_->GetCloneInfoList(taskList); + return kErrCodeSuccess; } - // If it has already been renamed, then the id should be consistent - uint64_t originId = task->GetCloneInfo().GetOriginId(); - if (fInfoOut.id != originId) { - LOG(ERROR) << "File is missing, fileId not equal, " - << "when CreateOrUpdateCloneMeta" - << ", fileId = " << fInfoOut.id - << ", originId = " << originId - << ", filename = " << newFileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; + + int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) + { + return metaStore_->GetCloneInfo(taskId, cloneInfo); } - } else { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret << ", filename = " << newFileName - << ", user = " << user << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - // Update fInfo - *fInfo = fInfoOut; - // GetOrAllocateSegment depends on fullPathName and needs to be updated here - fInfo->fullPathName = newFileName; - - uint32_t segmentSize = fInfo->segmentsize; - for (auto& segInfo : *segInfos) { - SegmentInfo segInfoOut; - uint64_t offset = segInfo.first * segmentSize; - ret = client_->GetOrAllocateSegmentInfo(true, offset, fInfo, - mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", newFileName = " << newFileName - << ", user = " << user << ", offset = " << offset - << ", allocateIfNotExist = " - << "true" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::GetCloneInfoByFileName(const std::string &fileName, + std::vector *list) + { + return metaStore_->GetCloneInfoByFileName(fileName, list); } - for (auto& cloneChunkInfo : segInfo.second) { - if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) { - LOG(ERROR) << "can not find chunkIndexInSeg = " - << cloneChunkInfo.first - << ", segmentIndex = " << segInfo.first - << ", logicalPoolId = " - << cloneChunkInfo.second.chunkIdInfo.lpid_ - << ", copysetId = " - << cloneChunkInfo.second.chunkIdInfo.cpid_ - << ", chunkId = " - << cloneChunkInfo.second.chunkIdInfo.cid_ - << ", taskid = " << task->GetTaskId(); + inline bool CloneCoreImpl::IsLazy(std::shared_ptr task) + { + return task->GetCloneInfo().GetIsLazy(); + } + + inline bool CloneCoreImpl::IsSnapshot(std::shared_ptr task) + { + return CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType(); + } + + inline bool CloneCoreImpl::IsFile(std::shared_ptr task) + { + return CloneFileType::kFile == task->GetCloneInfo().GetFileType(); + } + + inline bool CloneCoreImpl::IsRecover(std::shared_ptr task) + { + return CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType(); + } + + inline bool CloneCoreImpl::IsClone(std::shared_ptr task) + { + return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); + } + + bool CloneCoreImpl::NeedUpdateCloneMeta(std::shared_ptr task) + { + bool ret = true; + CloneStep step = task->GetCloneInfo().GetNextStep(); + if (CloneStep::kCreateCloneFile == step || + CloneStep::kCreateCloneMeta == step || CloneStep::kEnd == step) + { + ret = false; + } + return ret; + } + + bool CloneCoreImpl::NeedRetry(std::shared_ptr task, + int retCode) + { + if (IsLazy(task)) + { + CloneStep step = task->GetCloneInfo().GetNextStep(); + if (CloneStep::kRecoverChunk == step || + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) + { + // In scenarios where the file does not exist, there is no need to + // retry as it may have been deleted + if (retCode != kErrCodeFileNotExist) + { + return true; + } + } + } + return false; + } + + int CloneCoreImpl::CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo *fInfo, + CloneSegmentMap *segInfos) + { + std::string newFileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = fInfo->owner; + FInfo fInfoOut; + int ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); + if (LIBCURVE_ERROR::OK == ret) + { + // nothing + } + else if (-LIBCURVE_ERROR::NOTEXIST == ret) + { + // Perhaps it has already been renamed + newFileName = task->GetCloneInfo().GetDest(); + ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "File is missing, " + << "when CreateOrUpdateCloneMeta, " + << "GetFileInfo fail, ret = " << ret + << ", filename = " << newFileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + // If it has already been renamed, then the id should be consistent + uint64_t originId = task->GetCloneInfo().GetOriginId(); + if (fInfoOut.id != originId) + { + LOG(ERROR) << "File is missing, fileId not equal, " + << "when CreateOrUpdateCloneMeta" + << ", fileId = " << fInfoOut.id + << ", originId = " << originId + << ", filename = " << newFileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + } + else + { + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret << ", filename = " << newFileName + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - cloneChunkInfo.second.chunkIdInfo = - segInfoOut.chunkvec[cloneChunkInfo.first]; + // Update fInfo + *fInfo = fInfoOut; + // GetOrAllocateSegment depends on fullPathName and needs to be updated here + fInfo->fullPathName = newFileName; + + uint32_t segmentSize = fInfo->segmentsize; + for (auto &segInfo : *segInfos) + { + SegmentInfo segInfoOut; + uint64_t offset = segInfo.first * segmentSize; + ret = client_->GetOrAllocateSegmentInfo(true, offset, fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "GetOrAllocateSegmentInfo fail" + << ", newFileName = " << newFileName + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "true" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + + for (auto &cloneChunkInfo : segInfo.second) + { + if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) + { + LOG(ERROR) << "can not find chunkIndexInSeg = " + << cloneChunkInfo.first + << ", segmentIndex = " << segInfo.first + << ", logicalPoolId = " + << cloneChunkInfo.second.chunkIdInfo.lpid_ + << ", copysetId = " + << cloneChunkInfo.second.chunkIdInfo.cpid_ + << ", chunkId = " + << cloneChunkInfo.second.chunkIdInfo.cid_ + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + cloneChunkInfo.second.chunkIdInfo = + segInfoOut.chunkvec[cloneChunkInfo.first]; + } + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, + const TaskIdType &taskId, + CloneInfo *cloneInfo) + { + int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); + if (ret < 0) + { + // Directly returns success when it does not exist, making the interface + // idempotent + return kErrCodeSuccess; + } + if (cloneInfo->GetUser() != user) + { + LOG(ERROR) << "CleanCloneOrRecoverTaskPre by Invalid user"; + return kErrCodeInvalidUser; + } + switch (cloneInfo->GetStatus()) + { + case CloneStatus::done: + cloneInfo->SetStatus(CloneStatus::cleaning); + break; + case CloneStatus::error: + cloneInfo->SetStatus(CloneStatus::errorCleaning); + break; + case CloneStatus::cleaning: + case CloneStatus::errorCleaning: + return kErrCodeTaskExist; + break; + default: + LOG(ERROR) << "Can not clean clone/recover task unfinished."; + return kErrCodeCannotCleanCloneUnfinished; + break; + } + + ret = metaStore_->UpdateCloneInfo(*cloneInfo); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo fail" + << ", ret = " << ret << ", taskId = " << taskId; + return ret; + } + return kErrCodeSuccess; } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string& user, - const TaskIdType& taskId, - CloneInfo* cloneInfo) { - int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); - if (ret < 0) { - // Directly returns success when it does not exist, making the interface - // idempotent - return kErrCodeSuccess; - } - if (cloneInfo->GetUser() != user) { - LOG(ERROR) << "CleanCloneOrRecoverTaskPre by Invalid user"; - return kErrCodeInvalidUser; - } - switch (cloneInfo->GetStatus()) { - case CloneStatus::done: - cloneInfo->SetStatus(CloneStatus::cleaning); - break; - case CloneStatus::error: - cloneInfo->SetStatus(CloneStatus::errorCleaning); - break; - case CloneStatus::cleaning: - case CloneStatus::errorCleaning: - return kErrCodeTaskExist; - break; - default: - LOG(ERROR) << "Can not clean clone/recover task unfinished."; - return kErrCodeCannotCleanCloneUnfinished; - break; - } - - ret = metaStore_->UpdateCloneInfo(*cloneInfo); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret << ", taskId = " << taskId; - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleCleanCloneOrRecoverTask( - std::shared_ptr task) { - // Only the wrong clone/recover task cleans up temporary files - if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { - // In the event of an error, the mirror being cloned flag may not be - // cleared - if (IsFile(task)) { - // Resend - std::string source = task->GetCloneInfo().GetSrc(); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus( - source, FileStatus::Created, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST) { - LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + + void CloneCoreImpl::HandleCleanCloneOrRecoverTask( + std::shared_ptr task) + { + // Only the wrong clone/recover task cleans up temporary files + if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) + { + // In the event of an error, the mirror being cloned flag may not be + // cleared + if (IsFile(task)) + { + // Resend + std::string source = task->GetCloneInfo().GetSrc(); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus( + source, FileStatus::Created, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK && + ret != -LIBCURVE_ERROR::NOTEXIST) + { + LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + << ", taskid = " << task->GetTaskId(); + HandleCleanError(task); + return; + } + } + } + std::string tempFileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + uint64_t fileId = task->GetCloneInfo().GetOriginId(); + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) + { + LOG(ERROR) << "DeleteFile failed" + << ", ret = " << ret << ", fileName = " << tempFileName + << ", user = " << user << ", fileId = " << fileId << ", taskid = " << task->GetTaskId(); HandleCleanError(task); return; } } - } - std::string tempFileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - uint64_t fileId = task->GetCloneInfo().GetOriginId(); - std::string user = task->GetCloneInfo().GetUser(); - int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); - if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) { - LOG(ERROR) << "DeleteFile failed" - << ", ret = " << ret << ", fileName = " << tempFileName - << ", user = " << user << ", fileId = " << fileId - << ", taskid = " << task->GetTaskId(); - HandleCleanError(task); + HandleCleanSuccess(task); return; } - } - HandleCleanSuccess(task); - return; -} - -int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( - std::shared_ptr task) { - TaskIdType taskId = task->GetCloneInfo().GetTaskId(); - int ret = metaStore_->DeleteCloneInfo(taskId); - if (ret < 0) { - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret << ", taskId = " << taskId; - return kErrCodeInternalError; - } - - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, FileStatus::Created, - mdsRootUser_); - if (ret < 0) { - LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret << ", TaskInfo : " << *task; + + int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( + std::shared_ptr task) + { + TaskIdType taskId = task->GetCloneInfo().GetTaskId(); + int ret = metaStore_->DeleteCloneInfo(taskId); + if (ret < 0) + { + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeInternalError; } + + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" + << ", ret = " << ret << ", TaskInfo : " << *task; + return kErrCodeInternalError; + } + } + } + + return kErrCodeSuccess; } - } - - return kErrCodeSuccess; -} - -int CloneCoreImpl::CheckFileExists(const std::string& filename, - uint64_t inodeId) { - FInfo destFInfo; - int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); - if (ret == LIBCURVE_ERROR::OK) { - if (destFInfo.id == inodeId) { - return kErrCodeFileExist; - } else { - return kErrCodeFileNotExist; + + int CloneCoreImpl::CheckFileExists(const std::string &filename, + uint64_t inodeId) + { + FInfo destFInfo; + int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); + if (ret == LIBCURVE_ERROR::OK) + { + if (destFInfo.id == inodeId) + { + return kErrCodeFileExist; + } + else + { + return kErrCodeFileNotExist; + } + } + + if (ret == -LIBCURVE_ERROR::NOTEXIST) + { + return kErrCodeFileNotExist; + } + + return kErrCodeInternalError; } - } - - if (ret == -LIBCURVE_ERROR::NOTEXIST) { - return kErrCodeFileNotExist; - } - - return kErrCodeInternalError; -} - -// When adding or subtracting reference counts, the interface will lock the -// reference count map; When adding a reference count and reducing the reference -// count to 0, an additional lock needs to be added to the modified record. -int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo& cloneInfo) { - // First, reduce the reference count. If you are cloning from a mirror and - // the reference count is reduced to 0, you need to modify the status of the - // source mirror to 'created' - std::string source = cloneInfo.GetSrc(); - if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { - snapshotRef_->DecrementSnapshotRef(source); - } else { - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, FileStatus::Created, - mdsRootUser_); - if (ret == -LIBCURVE_ERROR::NOTEXIST) { - LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " - << source; - } else if (ret != LIBCURVE_ERROR::OK) { - cloneRef_->IncrementRef(source); - LOG(ERROR) << "SetCloneFileStatus fail" - << ", ret = " << ret - << ", cloneInfo : " << cloneInfo; + + // When adding or subtracting reference counts, the interface will lock the + // reference count map; When adding a reference count and reducing the reference + // count to 0, an additional lock needs to be added to the modified record. + int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) + { + // First, reduce the reference count. If you are cloning from a mirror and + // the reference count is reduced to 0, you need to modify the status of the + // source mirror to 'created' + std::string source = cloneInfo.GetSrc(); + if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) + { + snapshotRef_->DecrementSnapshotRef(source); + } + else + { + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret == -LIBCURVE_ERROR::NOTEXIST) + { + LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " + << source; + } + else if (ret != LIBCURVE_ERROR::OK) + { + cloneRef_->IncrementRef(source); + LOG(ERROR) << "SetCloneFileStatus fail" + << ", ret = " << ret + << ", cloneInfo : " << cloneInfo; + return kErrCodeInternalError; + } + } + } + + // Delete this record. If the deletion fails, add back the previously + // subtracted reference count + int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); + if (ret != 0) + { + if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) + { + NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), + source); + snapshotRef_->IncrementSnapshotRef(source); + } + else + { + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + cloneRef_->IncrementRef(source); + } + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", CloneInfo = " << cloneInfo; return kErrCodeInternalError; } - } - } - - // Delete this record. If the deletion fails, add back the previously - // subtracted reference count - int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); - if (ret != 0) { - if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { - NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), - source); - snapshotRef_->IncrementSnapshotRef(source); - } else { - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - cloneRef_->IncrementRef(source); - } - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret << ", CloneInfo = " << cloneInfo; - return kErrCodeInternalError; - } - LOG(INFO) << "HandleDeleteCloneInfo success" - << ", cloneInfo = " << cloneInfo; + LOG(INFO) << "HandleDeleteCloneInfo success" + << ", cloneInfo = " << cloneInfo; - return kErrCodeSuccess; -} + return kErrCodeSuccess; + } -} // namespace snapshotcloneserver -} // namespace curve + } // namespace snapshotcloneserver +} // namespace curve diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index c81a4b9358..4a30fae926 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -42,1059 +42,1074 @@ #include "test/chunkserver/mock_node.h" #include "test/fs/mock_local_filesystem.h" -namespace curve { -namespace chunkserver { - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::DoAll; -using ::testing::InSequence; -using ::testing::Invoke; -using ::testing::Matcher; -using ::testing::Return; -using ::testing::SaveArgPointee; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; - -using curve::chunkserver::concurrent::ConcurrentApplyOption; -using curve::fs::FileSystemType; -using curve::fs::MockLocalFileSystem; - -const char copysetUri[] = "local://./copyset_node_test"; -const int port = 9044; - -class FakeSnapshotReader : public braft::SnapshotReader { - public: - std::string get_path() { - /*Returns a non-existent path*/ - return std::string("/1002093939/temp/238408034"); - } - void list_files(std::vector* files) { return; } - int load_meta(braft::SnapshotMeta* meta) { return 1; } - std::string generate_uri_for_copy() { return std::string(""); } -}; - -class FakeSnapshotWriter : public braft::SnapshotWriter { - public: - std::string get_path() { - /*Returns a non-existent path*/ - return std::string("."); - } - void list_files(std::vector* files) { return; } - virtual int save_meta(const braft::SnapshotMeta& meta) { return 0; } - - virtual int add_file(const std::string& filename) { return 0; } - - virtual int add_file(const std::string& filename, - const ::google::protobuf::Message* file_meta) { - return 0; - } - - virtual int remove_file(const std::string& filename) { return 0; } -}; - -class FakeClosure : public braft::Closure { - public: - void Run() { std::cerr << "FakeClosure run" << std::endl; } -}; - -class CopysetNodeTest : public ::testing::Test { - protected: - void SetUp() { - defaultOptions_.ip = "127.0.0.1"; - defaultOptions_.port = port; - defaultOptions_.electionTimeoutMs = 1000; - defaultOptions_.snapshotIntervalS = 30; - defaultOptions_.catchupMargin = 50; - defaultOptions_.chunkDataUri = copysetUri; - defaultOptions_.chunkSnapshotUri = copysetUri; - defaultOptions_.logUri = copysetUri; - defaultOptions_.raftMetaUri = copysetUri; - defaultOptions_.raftSnapshotUri = copysetUri; - defaultOptions_.loadConcurrency = 5; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.checkRetryTimes = 3; - defaultOptions_.finishLoadMargin = 1000; - - defaultOptions_.concurrentapply = &concurrentModule_; - ConcurrentApplyOption opt{2, 1, 2, 1}; - defaultOptions_.concurrentapply->Init(opt); - std::shared_ptr fs = - LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ASSERT_TRUE(nullptr != fs); - defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = std::make_shared(fs); - defaultOptions_.trash = std::make_shared(); - defaultOptions_.enableOdsyncWhenOpenChunkFile = true; - } - - void TearDown() { ::system("rm -rf copyset_node_test"); } - - protected: - CopysetNodeOptions defaultOptions_; - ConcurrentApplyModule concurrentModule_; -}; - -TEST_F(CopysetNodeTest, error_test) { - std::shared_ptr fs( - LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - // on_snapshot_save: List failed +namespace curve +{ + namespace chunkserver { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char* json = - "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" - "774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) - .Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1).WillOnce(Return(-1)); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - LOG(INFO) << closure.status().error_cstr(); - } - - // on_snapshot_save: save conf open failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - LOG(INFO) << closure.status().error_cstr(); - } - // on_snapshot_save: success - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char* json = - "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" - "774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) - .Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - } - - // on_snapshot_save: success, enableOdsyncWhenOpenChunkFile_ = false - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char* json = - "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" - "774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.enableOdsyncWhenOpenChunkFile = false; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; - defaultOptions_.syncThreshold = 65536; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) - .Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - } - // ShipToSync & handle sync time out - { - CopysetNode::copysetSyncPool_ = - std::make_shared>(); - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - defaultOptions_.enableOdsyncWhenOpenChunkFile = false; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; - defaultOptions_.syncThreshold = 65536; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - ChunkID id1 = 100; - ChunkID id2 = 200; - ChunkID id3 = 100; - copysetNode.ShipToSync(id1); - copysetNode.ShipToSync(id2); - copysetNode.ShipToSync(id3); - copysetNode.HandleSyncTimerOut(); - } - - // on_snapshot_load: Dir not exist, File not exist, data init success - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - DataStoreOptions options; - options.baseDir = "./test-temp"; - options.chunkSize = 16 * 1024 * 1024; - options.metaPageSize = 4 * 1024; - options.blockSize = 4 * 1024; - std::shared_ptr dataStore = - std::make_shared(options, fs); - copysetNode.SetCSDateStore(dataStore); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(0, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - // on_snapshot_load: Dir not exist, File not exist, data init failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - FakeClosure closure; - FakeSnapshotReader reader; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - DataStoreOptions options; - options.baseDir = "./test-temp"; - options.chunkSize = 16 * 1024 * 1024; - options.metaPageSize = 4 * 1024; - options.blockSize = 4 * 1024; - std::shared_ptr dataStore = - std::make_shared(options, fs); - copysetNode.SetCSDateStore(dataStore); - dataStore->InjectError(); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - // on_snapshot_load: Dir not exist, File exist, load conf.epoch failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, delete failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, delete success, rename failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, rename success - // file exist, open failed - { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr mockfs = - std::make_shared(); - std::unique_ptr epochFile(new ConfEpochFile(mockfs)); - ; - defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - /* on_error */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - braft::Error error; - ASSERT_DEATH(copysetNode.on_error(error), ".*raft error.*"); - } - /* Fini, raftNode is null */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.Fini(); - } - /* Fini, raftNode is not null */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.Fini(); - } - /* Load/SaveConfEpoch */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_EQ(0, copysetNode.SaveConfEpoch(kCurveConfEpochFilename)); - ASSERT_EQ(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - ASSERT_EQ(0, copysetNode.GetConfEpoch()); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* load: ConfEpochFile load failed*/ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* Load: logic pool id error */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - uint64_t epoch = 12; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID + 1, copysetID, epoch)); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* Load: copyset id error */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - uint64_t epoch = 12; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, logicPoolID, - copysetID + 1, epoch)); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } -} - -TEST_F(CopysetNodeTest, get_conf_change) { - std::shared_ptr fs( - LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - Configuration conf1; - Configuration conf2; - PeerId peer("127.0.0.1:3200:0"); - PeerId peer1("127.0.0.1:3201:0"); - PeerId emptyPeer; - conf.add_peer(peer); - conf1.add_peer(peer); - conf1.add_peer(peer1); - conf2.add_peer(peer1); - - // There are currently no configuration changes in progress - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::NONE, type); - } - // Currently adding Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, add_peer(_, _)).Times(1); - EXPECT_CALL(*mockNode, remove_peer(_, _)) - .WillOnce(Invoke([](const PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); - })); - Peer addPeer; - addPeer.set_address("127.0.0.1:3202:0"); - Peer removePeer; - removePeer.set_address("127.0.0.1:3200:0"); - copysetNode.AddPeer(addPeer); - copysetNode.RemovePeer(removePeer); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::ADD_PEER, type); - EXPECT_EQ(addPeer.address(), alterPeer.address()); - } - // Currently removing Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, remove_peer(_, _)).Times(1); - EXPECT_CALL(*mockNode, add_peer(_, _)) - .WillOnce( - Invoke([](const braft::PeerId& peer, braft::Closure* done) { - done->status().set_error( - -1, "another config change is ongoing"); - })); - Peer addPeer1; - addPeer1.set_address("127.0.0.1:3202:0"); - Peer removePeer; - removePeer.set_address("127.0.0.1:3200:0"); - copysetNode.RemovePeer(removePeer); - copysetNode.AddPeer(addPeer1); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); - EXPECT_EQ(removePeer.address(), alterPeer.address()); - } - // Currently transferring leader - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - Peer transferee1; - transferee1.set_address("127.0.0.1:3201:0"); - Peer transferee2; - transferee2.set_address("127.0.0.1:3200:0"); - EXPECT_CALL(*mockNode, transfer_leadership_to(_)) - .WillOnce(Return(0)) - .WillOnce(Return(-1)); - EXPECT_CALL(*mockNode, leader_id()) - .WillOnce(Return(peer)) - .WillOnce(Return(peer1)) - .WillOnce(Return(peer)); - copysetNode.TransferLeader(transferee1); - copysetNode.TransferLeader(transferee2); - copysetNode.TransferLeader(transferee2); - - NodeStatus status; - status.state = braft::State::STATE_TRANSFERRING; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); - EXPECT_EQ(transferee1.address(), alterPeer.address()); - } - // Currently changing Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, change_peers(_, _)).Times(1); - - Peer addPeer1; - addPeer1.set_address("127.0.0.1:3201:0"); - std::vector peers; - peers.emplace_back(addPeer1); - copysetNode.ChangePeer(peers); - Peer addPeer2; - addPeer2.set_address("127.0.0.1:3202:0"); - peers.emplace_back(addPeer2); - copysetNode.ChangePeer(peers); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); - EXPECT_EQ(addPeer1.address(), alterPeer.address()); - } - // leader term is less than 0 - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - copysetNode.on_leader_start(-1); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::NONE, type); - } -} - -TEST_F(CopysetNodeTest, get_hash) { - std::shared_ptr fs( - LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - LogicPoolID logicPoolID = 1 + 1; - CopysetID copysetID = 1 + 1; - Configuration conf; - Configuration conf1; - PeerId peer("127.0.0.1:3200:0"); - PeerId peer1("127.0.0.1:3201:0"); - PeerId emptyPeer; - conf.add_peer(peer); - conf1.add_peer(peer); - conf1.add_peer(peer1); - - std::string hashValue = std::to_string(1355371765); - // get hash - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - // Generate multiple files with data - ::system( - "echo \"abcddddddddd333\" >" - "copyset_node_test/8589934594/data/test-2.txt"); - ::system( - "echo \"mmmmmmmm\" >" - "copyset_node_test/8589934594/data/test-4.txt"); - ::system( - "dd if=/dev/zero of=" - "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT - ::system( - "echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934594/data/test-5.txt"); - - ::system("touch copyset_node_test/8589934594/data/test-1.txt"); - ::system( - "echo \"wwwww\" > " - "copyset_node_test/8589934594/data/test-1.txt"); - - // Get hash - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_STREQ(hashValue.c_str(), hash.c_str()); - ::system("rm -fr copyset_node_test/8589934594"); - } - - { - std::string hash; - // Using different copyset IDs to make the directory different - CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); - - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - // Generate multiple files with data and exchange the order of generated - // files - ::system("touch copyset_node_test/8589934595/data/test-1.txt"); - ::system( - "echo \"wwwww\" > " - "copyset_node_test/8589934595/data/test-1.txt"); - - ::system( - "echo \"mmmmmmmm\" > " - "copyset_node_test/8589934595/data/test-4.txt"); - ::system( - "echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934595/data/test-5.txt"); - ::system( - "dd if=/dev/zero of=" - "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT - ::system( - "echo \"abcddddddddd333\" > " - "copyset_node_test/8589934595/data/test-2.txt"); - - // Get hash - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_STREQ(hashValue.c_str(), hash.c_str()); - ::system("rm -fr copyset_node_test/8589934595"); - } - - // List failed - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - std::vector files; - files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } - - // List success - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_EQ(hash, "0"); - } - - // List success, open failed - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } - - // List success, open success,fstat failed - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); + using ::testing::_; + using ::testing::AnyNumber; + using ::testing::AtLeast; + using ::testing::DoAll; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Matcher; + using ::testing::Return; + using ::testing::SaveArgPointee; + using ::testing::SetArgPointee; + using ::testing::SetArgReferee; + + using curve::chunkserver::concurrent::ConcurrentApplyOption; + using curve::fs::FileSystemType; + using curve::fs::MockLocalFileSystem; + + const char copysetUri[] = "local://./copyset_node_test"; + const int port = 9044; + + class FakeSnapshotReader : public braft::SnapshotReader + { + public: + std::string get_path() + { + /*Returns a non-existent path*/ + return std::string("/1002093939/temp/238408034"); + } + void list_files(std::vector *files) { return; } + int load_meta(braft::SnapshotMeta *meta) { return 1; } + std::string generate_uri_for_copy() { return std::string(""); } + }; + + class FakeSnapshotWriter : public braft::SnapshotWriter + { + public: + std::string get_path() + { + /*Returns a non-existent path*/ + return std::string("."); + } + void list_files(std::vector *files) { return; } + virtual int save_meta(const braft::SnapshotMeta &meta) { return 0; } + + virtual int add_file(const std::string &filename) { return 0; } + + virtual int add_file(const std::string &filename, + const ::google::protobuf::Message *file_meta) + { + return 0; + } + + virtual int remove_file(const std::string &filename) { return 0; } + }; + + class FakeClosure : public braft::Closure + { + public: + void Run() { std::cerr << "FakeClosure run" << std::endl; } + }; + + class CopysetNodeTest : public ::testing::Test + { + protected: + void SetUp() + { + defaultOptions_.ip = "127.0.0.1"; + defaultOptions_.port = port; + defaultOptions_.electionTimeoutMs = 1000; + defaultOptions_.snapshotIntervalS = 30; + defaultOptions_.catchupMargin = 50; + defaultOptions_.chunkDataUri = copysetUri; + defaultOptions_.chunkSnapshotUri = copysetUri; + defaultOptions_.logUri = copysetUri; + defaultOptions_.raftMetaUri = copysetUri; + defaultOptions_.raftSnapshotUri = copysetUri; + defaultOptions_.loadConcurrency = 5; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.checkRetryTimes = 3; + defaultOptions_.finishLoadMargin = 1000; + + defaultOptions_.concurrentapply = &concurrentModule_; + ConcurrentApplyOption opt{2, 1, 2, 1}; + defaultOptions_.concurrentapply->Init(opt); + std::shared_ptr fs = + LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ASSERT_TRUE(nullptr != fs); + defaultOptions_.localFileSystem = fs; + defaultOptions_.chunkFilePool = std::make_shared(fs); + defaultOptions_.trash = std::make_shared(); + defaultOptions_.enableOdsyncWhenOpenChunkFile = true; + } + + void TearDown() { ::system("rm -rf copyset_node_test"); } + + protected: + CopysetNodeOptions defaultOptions_; + ConcurrentApplyModule concurrentModule_; + }; + + TEST_F(CopysetNodeTest, error_test) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + // on_snapshot_save: List failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)).Times(1).WillOnce(Return(-1)); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + LOG(INFO) << closure.status().error_cstr(); + } + + // on_snapshot_save: save conf open failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + LOG(INFO) << closure.status().error_cstr(); + } + // on_snapshot_save: success + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + } + + // on_snapshot_save: success, enableOdsyncWhenOpenChunkFile_ = false + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.enableOdsyncWhenOpenChunkFile = false; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; + defaultOptions_.syncThreshold = 65536; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + } + // ShipToSync & handle sync time out + { + CopysetNode::copysetSyncPool_ = + std::make_shared>(); + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + defaultOptions_.enableOdsyncWhenOpenChunkFile = false; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; + defaultOptions_.syncThreshold = 65536; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + ChunkID id1 = 100; + ChunkID id2 = 200; + ChunkID id3 = 100; + copysetNode.ShipToSync(id1); + copysetNode.ShipToSync(id2); + copysetNode.ShipToSync(id3); + copysetNode.HandleSyncTimerOut(); + } + + // on_snapshot_load: Dir not exist, File not exist, data init success + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + DataStoreOptions options; + options.baseDir = "./test-temp"; + options.chunkSize = 16 * 1024 * 1024; + options.metaPageSize = 4 * 1024; + options.blockSize = 4 * 1024; + std::shared_ptr dataStore = + std::make_shared(options, fs); + copysetNode.SetCSDateStore(dataStore); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(0, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + // on_snapshot_load: Dir not exist, File not exist, data init failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + FakeClosure closure; + FakeSnapshotReader reader; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + DataStoreOptions options; + options.baseDir = "./test-temp"; + options.chunkSize = 16 * 1024 * 1024; + options.metaPageSize = 4 * 1024; + options.blockSize = 4 * 1024; + std::shared_ptr dataStore = + std::make_shared(options, fs); + copysetNode.SetCSDateStore(dataStore); + dataStore->InjectError(); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + // on_snapshot_load: Dir not exist, File exist, load conf.epoch failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, delete failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, delete success, rename failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + defaultOptions_.localFileSystem = mockfs; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, rename success + // file exist, open failed + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + defaultOptions_.localFileSystem = mockfs; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + /* on_error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + braft::Error error; + ASSERT_DEATH(copysetNode.on_error(error), ".*raft error.*"); + } + /* Fini, raftNode is null */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.Fini(); + } + /* Fini, raftNode is not null */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.Fini(); + } + /* Load/SaveConfEpoch */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_EQ(0, copysetNode.SaveConfEpoch(kCurveConfEpochFilename)); + ASSERT_EQ(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + ASSERT_EQ(0, copysetNode.GetConfEpoch()); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* load: ConfEpochFile load failed*/ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* Load: logic pool id error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + uint64_t epoch = 12; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ConfEpochFile confEpochFile(fs); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, + logicPoolID + 1, copysetID, epoch)); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* Load: copyset id error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + uint64_t epoch = 12; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ConfEpochFile confEpochFile(fs); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, logicPoolID, + copysetID + 1, epoch)); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + } - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1).WillOnce(Return(-1)); + TEST_F(CopysetNodeTest, get_conf_change) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + Configuration conf1; + Configuration conf2; + PeerId peer("127.0.0.1:3200:0"); + PeerId peer1("127.0.0.1:3201:0"); + PeerId emptyPeer; + conf.add_peer(peer); + conf1.add_peer(peer); + conf1.add_peer(peer1); + conf2.add_peer(peer1); + + // There are currently no configuration changes in progress + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::NONE, type); + } + // Currently adding Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, add_peer(_, _)).Times(1); + EXPECT_CALL(*mockNode, remove_peer(_, _)) + .WillOnce(Invoke([](const PeerId &peer, braft::Closure *done) + { done->status().set_error(-1, + "another config change is ongoing"); })); + Peer addPeer; + addPeer.set_address("127.0.0.1:3202:0"); + Peer removePeer; + removePeer.set_address("127.0.0.1:3200:0"); + copysetNode.AddPeer(addPeer); + copysetNode.RemovePeer(removePeer); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::ADD_PEER, type); + EXPECT_EQ(addPeer.address(), alterPeer.address()); + } + // Currently removing Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, remove_peer(_, _)).Times(1); + EXPECT_CALL(*mockNode, add_peer(_, _)) + .WillOnce( + Invoke([](const braft::PeerId &peer, braft::Closure *done) + { done->status().set_error( + -1, "another config change is ongoing"); })); + Peer addPeer1; + addPeer1.set_address("127.0.0.1:3202:0"); + Peer removePeer; + removePeer.set_address("127.0.0.1:3200:0"); + copysetNode.RemovePeer(removePeer); + copysetNode.AddPeer(addPeer1); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); + EXPECT_EQ(removePeer.address(), alterPeer.address()); + } + // Currently transferring leader + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + Peer transferee1; + transferee1.set_address("127.0.0.1:3201:0"); + Peer transferee2; + transferee2.set_address("127.0.0.1:3200:0"); + EXPECT_CALL(*mockNode, transfer_leadership_to(_)) + .WillOnce(Return(0)) + .WillOnce(Return(-1)); + EXPECT_CALL(*mockNode, leader_id()) + .WillOnce(Return(peer)) + .WillOnce(Return(peer1)) + .WillOnce(Return(peer)); + copysetNode.TransferLeader(transferee1); + copysetNode.TransferLeader(transferee2); + copysetNode.TransferLeader(transferee2); + + NodeStatus status; + status.state = braft::State::STATE_TRANSFERRING; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); + EXPECT_EQ(transferee1.address(), alterPeer.address()); + } + // Currently changing Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, change_peers(_, _)).Times(1); + + Peer addPeer1; + addPeer1.set_address("127.0.0.1:3201:0"); + std::vector peers; + peers.emplace_back(addPeer1); + copysetNode.ChangePeer(peers); + Peer addPeer2; + addPeer2.set_address("127.0.0.1:3202:0"); + peers.emplace_back(addPeer2); + copysetNode.ChangePeer(peers); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); + EXPECT_EQ(addPeer1.address(), alterPeer.address()); + } + // leader term is less than 0 + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + copysetNode.on_leader_start(-1); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::NONE, type); + } + } - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } + TEST_F(CopysetNodeTest, get_hash) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + LogicPoolID logicPoolID = 1 + 1; + CopysetID copysetID = 1 + 1; + Configuration conf; + Configuration conf1; + PeerId peer("127.0.0.1:3200:0"); + PeerId peer1("127.0.0.1:3201:0"); + PeerId emptyPeer; + conf.add_peer(peer); + conf1.add_peer(peer); + conf1.add_peer(peer1); + + std::string hashValue = std::to_string(1355371765); + // get hash + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + // Generate multiple files with data + ::system( + "echo \"abcddddddddd333\" >" + "copyset_node_test/8589934594/data/test-2.txt"); + ::system( + "echo \"mmmmmmmm\" >" + "copyset_node_test/8589934594/data/test-4.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934594/data/test-5.txt"); + + ::system("touch copyset_node_test/8589934594/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934594/data/test-1.txt"); + + // Get hash + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_STREQ(hashValue.c_str(), hash.c_str()); + ::system("rm -fr copyset_node_test/8589934594"); + } + + { + std::string hash; + // Using different copyset IDs to make the directory different + CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); + + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + // Generate multiple files with data and exchange the order of generated + // files + ::system("touch copyset_node_test/8589934595/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934595/data/test-1.txt"); + + ::system( + "echo \"mmmmmmmm\" > " + "copyset_node_test/8589934595/data/test-4.txt"); + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934595/data/test-5.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"abcddddddddd333\" > " + "copyset_node_test/8589934595/data/test-2.txt"); + + // Get hash + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_STREQ(hashValue.c_str(), hash.c_str()); + ::system("rm -fr copyset_node_test/8589934595"); + } + + // List failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_EQ(hash, "0"); + } + + // List success, open failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success,fstat failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success, fstat success, read failed + { + std::string hash; + struct stat fileInfo; + fileInfo.st_size = 1024; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success, fstat success, read success + { + char *buff = new (std::nothrow) char[1024]; + ::memset(buff, 'a', 1024); + std::string hash; + struct stat fileInfo; + fileInfo.st_size = 1024; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*mockfs, Read(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); + + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + } + } - // List success, open success, fstat success, read failed - { - std::string hash; - struct stat fileInfo; - fileInfo.st_size = 1024; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } - - // List success, open success, fstat success, read success - { - char* buff = new (std::nothrow) char[1024]; - ::memset(buff, 'a', 1024); - std::string hash; - struct stat fileInfo; - fileInfo.st_size = 1024; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockfs = - std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - EXPECT_CALL(*mockfs, List(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); - - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - } -} - -TEST_F(CopysetNodeTest, get_leader_status) { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.SetCopysetNode(mockNode); - - // The current peer is not a leader, and there is currently no leader - { - NodeStatus status; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - NodeStatus leaderStatus; - ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); - } - - // The current peer is the leader - { - NodeStatus status; - status.leader_id.parse("127.0.0.1:3200:0"); - status.peer_id = status.leader_id; - status.committed_index = 6666; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - NodeStatus leaderStatus; - ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(status.committed_index, leaderStatus.committed_index); - } - - // There is a leader, but it is not the current peer - { - // Simulate starting chunkserver - CopysetNodeManager* copysetNodeManager = - &CopysetNodeManager::GetInstance(); - ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - ASSERT_EQ(0, copysetNodeManager->Run()); - PeerId leader_peer("127.0.0.1:9044:0"); - brpc::Server server; - ASSERT_EQ(0, copysetNodeManager->AddService(&server, leader_peer.addr)); - if (server.Start(port, NULL) != 0) { - LOG(FATAL) << "Fail to start Server"; + TEST_F(CopysetNodeTest, get_leader_status) + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.SetCopysetNode(mockNode); + + // The current peer is not a leader, and there is currently no leader + { + NodeStatus status; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + NodeStatus leaderStatus; + ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); + } + + // The current peer is the leader + { + NodeStatus status; + status.leader_id.parse("127.0.0.1:3200:0"); + status.peer_id = status.leader_id; + status.committed_index = 6666; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + NodeStatus leaderStatus; + ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); + ASSERT_EQ(status.committed_index, leaderStatus.committed_index); + } + + // There is a leader, but it is not the current peer + { + // Simulate starting chunkserver + CopysetNodeManager *copysetNodeManager = + &CopysetNodeManager::GetInstance(); + ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); + ASSERT_EQ(0, copysetNodeManager->Run()); + PeerId leader_peer("127.0.0.1:9044:0"); + brpc::Server server; + ASSERT_EQ(0, copysetNodeManager->AddService(&server, leader_peer.addr)); + if (server.Start(port, NULL) != 0) + { + LOG(FATAL) << "Fail to start Server"; + } + // Construct a leader copyset + ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, + copysetID, conf)); + auto leaderNode = + copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); + ASSERT_TRUE(nullptr != leaderNode); + // Set expected values + std::shared_ptr mockLeader = + std::make_shared(logicPoolID, copysetID); + leaderNode->SetCopysetNode(mockLeader); + NodeStatus mockLeaderStatus; + mockLeaderStatus.leader_id = leader_peer; + mockLeaderStatus.peer_id = leader_peer; + mockLeaderStatus.committed_index = 10000; + mockLeaderStatus.known_applied_index = 6789; + EXPECT_CALL(*mockLeader, get_status(_)) + .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); + + // Test obtaining the committed index of the leader through the node of + // the follower + NodeStatus followerStatus; + followerStatus.leader_id = leader_peer; + followerStatus.peer_id.parse("127.0.0.1:3201:0"); + followerStatus.committed_index = 3456; + followerStatus.known_applied_index = 3456; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(followerStatus)); + + NodeStatus leaderStatus; + ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); + ASSERT_EQ(mockLeaderStatus.committed_index, + leaderStatus.committed_index); + ASSERT_EQ(mockLeaderStatus.known_applied_index, + leaderStatus.known_applied_index); + } } - // Construct a leader copyset - ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, - copysetID, conf)); - auto leaderNode = - copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); - ASSERT_TRUE(nullptr != leaderNode); - // Set expected values - std::shared_ptr mockLeader = - std::make_shared(logicPoolID, copysetID); - leaderNode->SetCopysetNode(mockLeader); - NodeStatus mockLeaderStatus; - mockLeaderStatus.leader_id = leader_peer; - mockLeaderStatus.peer_id = leader_peer; - mockLeaderStatus.committed_index = 10000; - mockLeaderStatus.known_applied_index = 6789; - EXPECT_CALL(*mockLeader, get_status(_)) - .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); - - // Test obtaining the committed index of the leader through the node of - // the follower - NodeStatus followerStatus; - followerStatus.leader_id = leader_peer; - followerStatus.peer_id.parse("127.0.0.1:3201:0"); - followerStatus.committed_index = 3456; - followerStatus.known_applied_index = 3456; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(followerStatus)); - - NodeStatus leaderStatus; - ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(mockLeaderStatus.committed_index, - leaderStatus.committed_index); - ASSERT_EQ(mockLeaderStatus.known_applied_index, - leaderStatus.known_applied_index); - } -} - -TEST_F(CopysetNodeTest, is_lease_leader) { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::shared_ptr mockNode = - std::make_shared(logicPoolID, copysetID); - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.Init(defaultOptions_); - copysetNode.SetCopysetNode(mockNode); - - EXPECT_FALSE(copysetNode.IsLeaderTerm()); - EXPECT_EQ(-1, copysetNode.LeaderTerm()); - - // not leader now - { - std::vector states = { - braft::LEASE_DISABLED, braft::LEASE_VALID, braft::LEASE_NOT_READY, - braft::LEASE_EXPIRED}; - braft::LeaderLeaseStatus status; - for (auto& state : states) { - status.state = state; - ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + + TEST_F(CopysetNodeTest, is_lease_leader) + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.Init(defaultOptions_); + copysetNode.SetCopysetNode(mockNode); + + EXPECT_FALSE(copysetNode.IsLeaderTerm()); + EXPECT_EQ(-1, copysetNode.LeaderTerm()); + + // not leader now + { + std::vector states = { + braft::LEASE_DISABLED, braft::LEASE_VALID, braft::LEASE_NOT_READY, + braft::LEASE_EXPIRED}; + braft::LeaderLeaseStatus status; + for (auto &state : states) + { + status.state = state; + ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + } + } + + // ABA problem, current node is term 8(on leader start), + // but leader lease term is 10 + { + copysetNode.on_leader_start(8); + braft::LeaderLeaseStatus status; + status.term = 10; + status.state = braft::LEASE_NOT_READY; + ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + } + + // normal condition + { + copysetNode.on_leader_start(10); + braft::LeaderLeaseStatus status; + status.term = 10; + status.state = braft::LEASE_VALID; + ASSERT_TRUE(copysetNode.IsLeaseLeader(status)); + } } - } - // ABA problem, current node is term 8(on leader start), - // but leader lease term is 10 - { - copysetNode.on_leader_start(8); - braft::LeaderLeaseStatus status; - status.term = 10; - status.state = braft::LEASE_NOT_READY; - ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); - } - - // normal condition - { - copysetNode.on_leader_start(10); - braft::LeaderLeaseStatus status; - status.term = 10; - status.state = braft::LEASE_VALID; - ASSERT_TRUE(copysetNode.IsLeaseLeader(status)); - } -} - -} // namespace chunkserver -} // namespace curve + } // namespace chunkserver +} // namespace curve diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index 548db4f6d0..759cb6fc3c 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -28,8 +28,8 @@ #include #include -#include // NOLINT -#include //NOLINT +#include // NOLINT +#include //NOLINT #include "src/client/chunk_closure.h" #include "src/client/metacache.h" @@ -42,3990 +42,4030 @@ #include "test/client/mock/mock_request_context.h" #include "test/client/mock/mock_request_scheduler.h" -namespace curve { -namespace client { - -using curve::chunkserver::CHUNK_OP_STATUS; -using curve::chunkserver::ChunkRequest; - -using curve::client::MetaCache; -using curve::common::TimeUtility; -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::DoAll; -using ::testing::InSequence; -using ::testing::Invoke; -using ::testing::Return; -using ::testing::SaveArgPointee; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; - -class CopysetClientTest : public testing::Test { - protected: - virtual void SetUp() { - listenAddr_ = "127.0.0.1:9109"; - server_ = new brpc::Server(); - } - - virtual void TearDown() { - server_->Stop(0); - server_->Join(); - delete server_; - server_ = nullptr; - } - - public: - std::string listenAddr_; - brpc::Server* server_; -}; - -/* TODO(wudemiao) current controller error cannot be returned through mock */ -int gWriteCntlFailedCode = 0; -int gReadCntlFailedCode = 0; - -static void WriteChunkFunc(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - /* return response */ - brpc::ClosureGuard doneGuard(done); - if (0 != gWriteCntlFailedCode) { - if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) { - std::this_thread::sleep_for(std::chrono::milliseconds(3500)); +namespace curve +{ + namespace client + { + + using curve::chunkserver::CHUNK_OP_STATUS; + using curve::chunkserver::ChunkRequest; + + using curve::client::MetaCache; + using curve::common::TimeUtility; + using ::testing::_; + using ::testing::AnyNumber; + using ::testing::AtLeast; + using ::testing::DoAll; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Return; + using ::testing::SaveArgPointee; + using ::testing::SetArgPointee; + using ::testing::SetArgReferee; + + class CopysetClientTest : public testing::Test + { + protected: + virtual void SetUp() + { + listenAddr_ = "127.0.0.1:9109"; + server_ = new brpc::Server(); + } + + virtual void TearDown() + { + server_->Stop(0); + server_->Join(); + delete server_; + server_ = nullptr; + } + + public: + std::string listenAddr_; + brpc::Server *server_; + }; + + /* TODO(wudemiao) current controller error cannot be returned through mock */ + int gWriteCntlFailedCode = 0; + int gReadCntlFailedCode = 0; + + static void WriteChunkFunc(::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + /* return response */ + brpc::ClosureGuard doneGuard(done); + if (0 != gWriteCntlFailedCode) + { + if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) + { + std::this_thread::sleep_for(std::chrono::milliseconds(3500)); + } + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); + } } - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); - } -} - -static void ReadChunkFunc(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) { - std::this_thread::sleep_for(std::chrono::milliseconds(4000)); + + static void ReadChunkFunc(::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) + { + std::this_thread::sleep_for(std::chrono::milliseconds(4000)); + } + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(gReadCntlFailedCode, "read controller error"); + } } - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(gReadCntlFailedCode, "read controller error"); - } -} - -static void ReadChunkSnapshotFunc( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, // NOLINT - ::curve::chunkserver::ChunkResponse* response, // NOLINT - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "read snapshot controller error"); - } -} - -static void DeleteChunkSnapshotFunc( - ::google::protobuf::RpcController* controller, // NOLINT - const ::curve::chunkserver::ChunkRequest* request, // NOLINT - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "delete snapshot controller error"); - } -} - -static void CreateCloneChunkFunc( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "create clone chunk controller error"); - } -} - -static void RecoverChunkFunc( - ::google::protobuf::RpcController* controller, // NOLINT - const ::curve::chunkserver::ChunkRequest* request, // NOLINT - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "recover chunk controller error"); - } -} - -static void GetChunkInfoFunc( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT - ::curve::chunkserver::GetChunkInfoResponse* response, // NOLINT - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "get chunk info controller error"); - } -} - -TEST_F(CopysetClientTest, normal_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - // write success - for (int i = 0; i < 10; ++i) { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - - reqCtx->offset_ = i * 8; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - // read success - for (int i = 0; i < 10; ++i) { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = i * 8; - reqCtx->rawlength_ = len; - reqCtx->subIoIndex_ = 0; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * write error testing - */ -TEST_F(CopysetClientTest, write_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // The retry sleep time set in the configuration file is 5000, as there - // is no triggering of underlying index backoff, so there will be no - // sleep between retries - uint64_t start = TimeUtility::GetTimeofDayUs(); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 10000); - gWriteCntlFailedCode = 0; - } - /* controller set timeout */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry timeout set by the configuration file is 5000 because the - // chunkserver setting returns timeout Causing the triggering of an - // exponential backoff of the underlying timeout time, increasing the - // interval between each retry. Retrying three times is normal, only 3 * - // 1000 sleep is required But after increasing the index backoff, the - // timeout will increase to 1000+2000+2000=5000 Adding random factors, - // the three retry times should be greater than 7000 and less than 8000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 3000); - ASSERT_LT(end - start, 6000); - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gWriteCntlFailedCode = 0; - } - - /* controller set timeout */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry sleep time set in the configuration file is 5000 because - // the chunkserver setting returns timeout Causing triggering of - // low-level exponential backoff, increasing the interval between each - // retry. Retrying three times is normal, only 3 * 5000 sleep is - // required But after increasing the index retreat, the sleep interval - // will increase to 10000+20000=30000 Adding random factors, the three - // retry times should be greater than 29000 and less than 50000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 28000); - ASSERT_LT(end - start, 2 * 50000); - gWriteCntlFailedCode = 0; - } - - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - - ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); - } - /* Not a leader, did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - // response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, did not return a leader, refreshing the meta cache failed - */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - // response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - FileMetric fm("test"); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - auto elpased = - curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; - // chunkserverOPRetryIntervalUS = 5000 - // redirect sleep for 500us each time and retry a total of 2 times - // (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, - // so only two retries were made) So the total time spent is greater - // than 1000us - ASSERT_GE(elpased, 1000); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - // epoch too old - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(1) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - reqDone->GetErrorCode()); - } - - scheduler.Fini(); -} - -/** - * write failed testing - */ -TEST_F(CopysetClientTest, write_failed_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* controller set timeout */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry timeout set by the configuration file is 500 because the - // chunkserver setting returns timeout Causing the triggering of an - // exponential backoff of the underlying timeout time, increasing the - // interval between each retry. Retrying 50 times normally only requires - // a timeout of 49 * 500 But after increasing the index backoff, the - // timeout will increase to 49 * 1000=49000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(50)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(50) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 25000); - ASSERT_LT(end - start, 55000); - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gWriteCntlFailedCode = 0; - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry sleep time set in the configuration file is 5000us because - // the chunkserver setting returns timeout Causing triggering of - // low-level exponential backoff, increasing the interval between each - // retry. Retrying 50 times normally only requires 49 * 5000us of sleep - // But after increasing the index of retreat, the sleep interval will - // increase to 10000 + 20000 + 40000... ~= 4650000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(50) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 250000); - ASSERT_LT(end - start, 4650000); - gWriteCntlFailedCode = 0; - } - scheduler.Fini(); -} - -/** - * read failed testing - */ -TEST_F(CopysetClientTest, read_failed_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - /* controller set timeout */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // The retry timeout set by the configuration file is 500 because the - // chunkserver setting returns timeout Causing the triggering of an - // exponential backoff of the underlying timeout time, increasing the - // interval between each retry. Retrying 50 times normally only requires - // 50 * 500 But after increasing the index retreat, the timeout will - // increase to 500+1000+2000...~=60000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(50)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(50) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 25000); - ASSERT_LT(end - start, 60000); - - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gReadCntlFailedCode = 0; - } - - /* Set overload */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry sleep time set in the configuration file is 5000us because - // the chunkserver setting returns timeout Causing triggering of - // low-level exponential backoff, increasing the interval between each - // retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep - // But after increasing the index of retreat, the sleep interval will - // increase to 10000 + 20000 + 40000 ... = 4650000 Adding random - // factors, the three retry times should be greater than 2900 and less - // than 5000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(50) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 250000); - ASSERT_LT(end - start, 4650000); - } - scheduler.Fini(); -} - -/** - * read error testing - */ -TEST_F(CopysetClientTest, read_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* chunk not exist */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(0, reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // The retry sleep time set in the configuration file is 5000, as there - // is no triggering of underlying index backoff, so there will be no - // sleep between retries - uint64_t start = TimeUtility::GetTimeofDayUs(); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 1000); - gReadCntlFailedCode = 0; - } - - /* controller set timeout */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // The timeout configured in the settings file is 1000, but due to chunk - // server timeout, it triggers exponential backoff, increasing the - // interval between retries. In normal conditions, three retries would - // only require a sleep time of 3 * 1000. However, with the added - // exponential backoff, the timeout intervals will increase to 1000 + - // 2000 + 2000 = 5000. Considering the random factor, the total time for - // three retries should be greater than 7000 and less than 8000. - uint64_t start = TimeUtility::GetTimeofDayMs(); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 3000); - ASSERT_LT(end - start, 6000); - - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gReadCntlFailedCode = 0; - } - - /* Set overload */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // The retry sleep time set in the configuration file is 500, but due to - // chunkserver timeouts, it triggers exponential backoff, increasing the - // interval between retries. In normal conditions, three retries would - // only require a sleep time of 3 * 500. However, with the added - // exponential backoff, the sleep intervals will increase to 1000 + 2000 - // = 3000. Considering the random factor, the total time for three - // retries should be greater than 2900 and less than 5000. - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 2900); - ASSERT_LT(end - start, 3 * 5000); - } - - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - // response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - // response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - scheduler.Fini(); -} - -/** - * read snapshot error testing - */ -TEST_F(CopysetClientTest, read_snapshot_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - size_t len = 8; - int sn = 1; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* chunk snapshot not exist */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * delete snapshot error testing - */ -TEST_F(CopysetClientTest, delete_snapshot_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - ; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, - DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, - reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * create clone chunk error testing - */ -TEST_F(CopysetClientTest, create_s3_clone_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::CREATE_CLONE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->seq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(1) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(3) // NOLINT - .WillRepeatedly(Invoke(CreateCloneChunkFunc)); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - // /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* op success */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) - .Times(1) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, - 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * recover chunk error testing - */ -TEST_F(CopysetClientTest, recover_chunk_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(RecoverChunkFunc)); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * get chunk info error testing - */ -TEST_F(CopysetClientTest, get_chunk_info_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* Illegal parameter */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(GetChunkInfoFunc)); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* Other errors */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* Not a leader, returning the correct leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but did not return a leader, refreshing the meta cache - * failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* Not a leader, but returned an incorrect leader */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader still failed */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(3) - .WillRepeatedly( - DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset does not exist, updating leader succeeded */ - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) - .Times(1) - .WillOnce( - DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -namespace { - -bool gWriteSuccessFlag = false; - -void WriteCallBack(CurveAioContext* aioctx) { - gWriteSuccessFlag = true; - delete aioctx; -} - -void PrepareOpenFile(FakeCurveFSService* service, OpenFileResponse* openresp, - FakeReturn* fakeReturn) { - openresp->set_statuscode(curve::mds::StatusCode::kOK); - auto* session = openresp->mutable_protosession(); - session->set_sessionid("xxx"); - session->set_leasetime(10000); - session->set_createtime(10000); - session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); - auto* fileinfo = openresp->mutable_fileinfo(); - fileinfo->set_id(1); - fileinfo->set_filename("filename"); - fileinfo->set_parentid(0); - fileinfo->set_length(10ULL * 1024 * 1024 * 1024); - fileinfo->set_blocksize(4096); - - *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); - - service->SetOpenFile(fakeReturn); -} - -} // namespace - -TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { - const std::string endpoint = "127.0.0.1:9102"; - - ClientConfig cc; - const std::string& configPath = "./conf/client.conf"; - cc.Init(configPath.c_str()); - FileInstance fileinstance; - UserInfo userinfo; - userinfo.owner = "userinfo"; - - std::shared_ptr mdsclient = std::make_shared(); - - // set mds addr - auto mdsopts = cc.GetFileServiceOption().metaServerOpt; - mdsopts.rpcRetryOpt.addrs.clear(); - mdsopts.rpcRetryOpt.addrs.push_back(endpoint); - - ASSERT_EQ(LIBCURVE_ERROR::OK, mdsclient->Initialize(mdsopts)); - ASSERT_TRUE(fileinstance.Initialize( - "/test", mdsclient, userinfo, OpenFlags{}, cc.GetFileServiceOption())); - - // create fake chunkserver service - FakeChunkServerService fakechunkservice; - // Set up cli service - CliServiceFake fakeCliservice; - - FakeCurveFSService curvefsService; - OpenFileResponse openresp; - FakeReturn fakeReturn; - - PrepareOpenFile(&curvefsService, &openresp, &fakeReturn); - - brpc::Server server; - ASSERT_EQ(0, server.AddService(&fakechunkservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) - << "Fail to add fakechunkservice"; - ASSERT_EQ( - 0, server.AddService(&fakeCliservice, brpc::SERVER_DOESNT_OWN_SERVICE)) - << "Fail to add fakecliservice"; - ASSERT_EQ( - 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) - << "Fail to add curvefsService"; - - ASSERT_EQ(0, server.Start(endpoint.c_str(), nullptr)) - << "Fail to start server at " << endpoint; - - // fill metacache - curve::client::MetaCache* mc = - fileinstance.GetIOManager4File()->GetMetaCache(); - curve::client::ChunkIDInfo_t chunkinfo(1, 2, 3); - mc->UpdateChunkInfoByIndex(0, chunkinfo); - curve::client::CopysetInfo cpinfo; - curve::client::EndPoint ep; - butil::str2endpoint("127.0.0.1", 9102, &ep); - - braft::PeerId pd(ep); - curve::client::PeerAddr addr = curve::client::PeerAddr(ep); - curve::client::CopysetPeerInfo peer(1, addr, addr); - cpinfo.csinfos_.push_back(peer); - mc->UpdateCopysetInfo(2, 3, cpinfo); - - fakeCliservice.SetPeerID(pd); - - curve::chunkserver::ChunkResponse response; - response.set_status( - curve::chunkserver::CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response.set_appliedindex(0); - FakeReturn writeFakeRet(nullptr, static_cast(&response)); - fakechunkservice.SetFakeWriteReturn(&writeFakeRet); - - const int kNewFileSn = 100; - const int kOldFileSn = 30; - - ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); - - // Set file version number - fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - - // Send a write request and wait for seconds to check if IO returns - auto startWriteAndCheckResult = - [&fileinstance](int sec) -> bool { // NOLINT - CurveAioContext* aioctx = new CurveAioContext(); - char buffer[4096]; - - aioctx->buf = buffer; - aioctx->offset = 0; - aioctx->length = sizeof(buffer); - aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; - aioctx->cb = WriteCallBack; - - // Send write request - fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); - - std::this_thread::sleep_for(std::chrono::seconds(sec)); - return gWriteSuccessFlag; - }; - - // Successfully written for the first time and updated the file version - // number on the chunkserver side - ASSERT_TRUE(startWriteAndCheckResult(3)); - - // Set an old version number to write - fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); - gWriteSuccessFlag = false; - - // chunkserver returns the feedback, and after obtaining the version number - // again, it is still the old version IO hang - ASSERT_FALSE(startWriteAndCheckResult(3)); - - // Update version number to normal state - fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - std::this_thread::sleep_for(std::chrono::seconds(1)); - - // Last write request successful - ASSERT_EQ(true, gWriteSuccessFlag); - - server.Stop(0); - server.Join(); -} - -TEST_F(CopysetClientTest, retry_rpc_sleep_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - const uint64_t sleepUsBeforeRetry = 5 * 1000 * 1000; - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = - sleepUsBeforeRetry; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8] = {0}; - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(len)); - off_t offset = 0; - - ChunkServerID leaderId = 10000; - ChunkServerID leaderId2 = 10001; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); + static void ReadChunkSnapshotFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, // NOLINT + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "read snapshot controller error"); + } + } - { - // In the redirect case, chunkserver returns a new leader - // Will not sleep until retry - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // Returns a new leader ID, so there will be no sleep before retrying - ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void DeleteChunkSnapshotFunc( + ::google::protobuf::RpcController *controller, // NOLINT + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "delete snapshot controller error"); + } + } - { - // In the redirect case, chunkserver returns the old leader - // Sleep before retrying - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // Return the same leader ID and sleep before retrying - ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void CreateCloneChunkFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "create clone chunk controller error"); + } + } - { - // In the redirect case, chunkserver did not return a leader - // Actively refresh to obtain a new leader - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // Returns a new leader id, so there will be no sleep before retrying - ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void RecoverChunkFunc( + ::google::protobuf::RpcController *controller, // NOLINT + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "recover chunk controller error"); + } + } - { - // In the redirect case, chunkserver did not return a leader - // Actively refresh to obtain old leader - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, - offset, len, {}, reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - scheduler.Fini(); -} - -class TestRunnedRequestClosure : public RequestClosure { - public: - TestRunnedRequestClosure() : RequestClosure(nullptr) {} - - void Run() override { runned_ = true; } - - bool IsRunned() const { return runned_; } - - private: - bool runned_ = false; -}; - -// After the test session fails, the retry request will be placed back in the -// request queue -TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { - MockRequestScheduler requestScheduler; - CopysetClient copysetClient; - IOSenderOption ioSenderOption; - MetaCache metaCache; - - ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, - &requestScheduler, nullptr)); - - // Set session not valid - copysetClient.StartRecycleRetryRPC(); + static void GetChunkInfoFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::GetChunkInfoRequest *request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse *response, // NOLINT + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "get chunk info controller error"); + } + } - { - EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + TEST_F(CopysetClientTest, normal_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + // write success + for (int i = 0; i < 10; ++i) + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + + reqCtx->offset_ = i * 8; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + // read success + for (int i = 0; i < 10; ++i) + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = i * 8; + reqCtx->rawlength_ = len; + reqCtx->subIoIndex_ = 0; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } - TestRunnedRequestClosure closure; - copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); - ASSERT_FALSE(closure.IsRunned()); - } + /** + * write error testing + */ + TEST_F(CopysetClientTest, write_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries + uint64_t start = TimeUtility::GetTimeofDayUs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 10000); + gWriteCntlFailedCode = 0; + } + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry timeout set by the configuration file is 5000 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying three times is normal, only 3 * + // 1000 sleep is required But after increasing the index backoff, the + // timeout will increase to 1000+2000+2000=5000 Adding random factors, + // the three retry times should be greater than 7000 and less than 8000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 3000); + ASSERT_LT(end - start, 6000); + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gWriteCntlFailedCode = 0; + } + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000 because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying three times is normal, only 3 * 5000 sleep is + // required But after increasing the index retreat, the sleep interval + // will increase to 10000+20000=30000 Adding random factors, the three + // retry times should be greater than 29000 and less than 50000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 28000); + ASSERT_LT(end - start, 2 * 50000); + gWriteCntlFailedCode = 0; + } + + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + + ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); + } + /* Not a leader, did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, did not return a leader, refreshing the meta cache failed + */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + FileMetric fm("test"); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto elpased = + curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; + // chunkserverOPRetryIntervalUS = 5000 + // redirect sleep for 500us each time and retry a total of 2 times + // (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, + // so only two retries were made) So the total time spent is greater + // than 1000us + ASSERT_GE(elpased, 1000); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + // epoch too old + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, + reqDone->GetErrorCode()); + } + + scheduler.Fini(); + } - { - EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + /** + * write failed testing + */ + TEST_F(CopysetClientTest, write_failed_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // a timeout of 49 * 500 But after increasing the index backoff, the + // timeout will increase to 49 * 1000=49000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(50)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 25000); + ASSERT_LT(end - start, 55000); + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gWriteCntlFailedCode = 0; + } + + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times normally only requires 49 * 5000us of sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000... ~= 4650000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 250000); + ASSERT_LT(end - start, 4650000); + gWriteCntlFailedCode = 0; + } + scheduler.Fini(); + } + + /** + * read failed testing + */ + TEST_F(CopysetClientTest, read_failed_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // 50 * 500 But after increasing the index retreat, the timeout will + // increase to 500+1000+2000...~=60000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(50)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 25000); + ASSERT_LT(end - start, 60000); + + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gReadCntlFailedCode = 0; + } + + /* Set overload */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000 ... = 4650000 Adding random + // factors, the three retry times should be greater than 2900 and less + // than 5000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 250000); + ASSERT_LT(end - start, 4650000); + } + scheduler.Fini(); + } + + /** + * read error testing + */ + TEST_F(CopysetClientTest, read_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* chunk not exist */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(0, reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries + uint64_t start = TimeUtility::GetTimeofDayUs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 1000); + gReadCntlFailedCode = 0; + } + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The timeout configured in the settings file is 1000, but due to chunk + // server timeout, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 1000. However, with the added + // exponential backoff, the timeout intervals will increase to 1000 + + // 2000 + 2000 = 5000. Considering the random factor, the total time for + // three retries should be greater than 7000 and less than 8000. + uint64_t start = TimeUtility::GetTimeofDayMs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 3000); + ASSERT_LT(end - start, 6000); + + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gReadCntlFailedCode = 0; + } + + /* Set overload */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 500, but due to + // chunkserver timeouts, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 500. However, with the added + // exponential backoff, the sleep intervals will increase to 1000 + 2000 + // = 3000. Considering the random factor, the total time for three + // retries should be greater than 2900 and less than 5000. + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 2900); + ASSERT_LT(end - start, 3 * 5000); + } + + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + scheduler.Fini(); + } + + /** + * read snapshot error testing + */ + TEST_F(CopysetClientTest, read_snapshot_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + size_t len = 8; + int sn = 1; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* chunk snapshot not exist */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * delete snapshot error testing + */ + TEST_F(CopysetClientTest, delete_snapshot_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + ; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * create clone chunk error testing + */ + TEST_F(CopysetClientTest, create_s3_clone_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::CREATE_CLONE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(Invoke(CreateCloneChunkFunc)); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + // /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* op success */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * recover chunk error testing + */ + TEST_F(CopysetClientTest, recover_chunk_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(RecoverChunkFunc)); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } - TestRunnedRequestClosure closure; - copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); - ASSERT_FALSE(closure.IsRunned()); - } -} + /** + * get chunk info error testing + */ + TEST_F(CopysetClientTest, get_chunk_info_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(GetChunkInfoFunc)); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + namespace + { + + bool gWriteSuccessFlag = false; + + void WriteCallBack(CurveAioContext *aioctx) + { + gWriteSuccessFlag = true; + delete aioctx; + } + + void PrepareOpenFile(FakeCurveFSService *service, OpenFileResponse *openresp, + FakeReturn *fakeReturn) + { + openresp->set_statuscode(curve::mds::StatusCode::kOK); + auto *session = openresp->mutable_protosession(); + session->set_sessionid("xxx"); + session->set_leasetime(10000); + session->set_createtime(10000); + session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); + auto *fileinfo = openresp->mutable_fileinfo(); + fileinfo->set_id(1); + fileinfo->set_filename("filename"); + fileinfo->set_parentid(0); + fileinfo->set_length(10ULL * 1024 * 1024 * 1024); + fileinfo->set_blocksize(4096); + + *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); + + service->SetOpenFile(fakeReturn); + } + + } // namespace + + TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) + { + const std::string endpoint = "127.0.0.1:9102"; + + ClientConfig cc; + const std::string &configPath = "./conf/client.conf"; + cc.Init(configPath.c_str()); + FileInstance fileinstance; + UserInfo userinfo; + userinfo.owner = "userinfo"; + + std::shared_ptr mdsclient = std::make_shared(); + + // set mds addr + auto mdsopts = cc.GetFileServiceOption().metaServerOpt; + mdsopts.rpcRetryOpt.addrs.clear(); + mdsopts.rpcRetryOpt.addrs.push_back(endpoint); + + ASSERT_EQ(LIBCURVE_ERROR::OK, mdsclient->Initialize(mdsopts)); + ASSERT_TRUE(fileinstance.Initialize( + "/test", mdsclient, userinfo, OpenFlags{}, cc.GetFileServiceOption())); + + // create fake chunkserver service + FakeChunkServerService fakechunkservice; + // Set up cli service + CliServiceFake fakeCliservice; + + FakeCurveFSService curvefsService; + OpenFileResponse openresp; + FakeReturn fakeReturn; + + PrepareOpenFile(&curvefsService, &openresp, &fakeReturn); + + brpc::Server server; + ASSERT_EQ(0, server.AddService(&fakechunkservice, + brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakechunkservice"; + ASSERT_EQ( + 0, server.AddService(&fakeCliservice, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakecliservice"; + ASSERT_EQ( + 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add curvefsService"; + + ASSERT_EQ(0, server.Start(endpoint.c_str(), nullptr)) + << "Fail to start server at " << endpoint; + + // fill metacache + curve::client::MetaCache *mc = + fileinstance.GetIOManager4File()->GetMetaCache(); + curve::client::ChunkIDInfo_t chunkinfo(1, 2, 3); + mc->UpdateChunkInfoByIndex(0, chunkinfo); + curve::client::CopysetInfo cpinfo; + curve::client::EndPoint ep; + butil::str2endpoint("127.0.0.1", 9102, &ep); + + braft::PeerId pd(ep); + curve::client::PeerAddr addr = curve::client::PeerAddr(ep); + curve::client::CopysetPeerInfo peer(1, addr, addr); + cpinfo.csinfos_.push_back(peer); + mc->UpdateCopysetInfo(2, 3, cpinfo); + + fakeCliservice.SetPeerID(pd); + + curve::chunkserver::ChunkResponse response; + response.set_status( + curve::chunkserver::CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.set_appliedindex(0); + FakeReturn writeFakeRet(nullptr, static_cast(&response)); + fakechunkservice.SetFakeWriteReturn(&writeFakeRet); + + const int kNewFileSn = 100; + const int kOldFileSn = 30; + + ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); + + // Set file version number + fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); + + // Send a write request and wait for seconds to check if IO returns + auto startWriteAndCheckResult = + [&fileinstance](int sec) -> bool { // NOLINT + CurveAioContext *aioctx = new CurveAioContext(); + char buffer[4096]; + + aioctx->buf = buffer; + aioctx->offset = 0; + aioctx->length = sizeof(buffer); + aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; + aioctx->cb = WriteCallBack; + + // Send write request + fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); + + std::this_thread::sleep_for(std::chrono::seconds(sec)); + return gWriteSuccessFlag; + }; + + // Successfully written for the first time and updated the file version + // number on the chunkserver side + ASSERT_TRUE(startWriteAndCheckResult(3)); + + // Set an old version number to write + fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); + gWriteSuccessFlag = false; + + // chunkserver returns the feedback, and after obtaining the version number + // again, it is still the old version IO hang + ASSERT_FALSE(startWriteAndCheckResult(3)); + + // Update version number to normal state + fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); + std::this_thread::sleep_for(std::chrono::seconds(1)); + + // Last write request successful + ASSERT_EQ(true, gWriteSuccessFlag); + + server.Stop(0); + server.Join(); + } + + TEST_F(CopysetClientTest, retry_rpc_sleep_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + const uint64_t sleepUsBeforeRetry = 5 * 1000 * 1000; + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = + sleepUsBeforeRetry; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8] = {0}; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(len)); + off_t offset = 0; + + ChunkServerID leaderId = 10000; + ChunkServerID leaderId2 = 10001; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + { + // In the redirect case, chunkserver returns a new leader + // Will not sleep until retry + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Returns a new leader ID, so there will be no sleep before retrying + ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver returns the old leader + // Sleep before retrying + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Return the same leader ID and sleep before retrying + ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain a new leader + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Returns a new leader id, so there will be no sleep before retrying + ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain old leader + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + scheduler.Fini(); + } + + class TestRunnedRequestClosure : public RequestClosure + { + public: + TestRunnedRequestClosure() : RequestClosure(nullptr) {} + + void Run() override { runned_ = true; } + + bool IsRunned() const { return runned_; } + + private: + bool runned_ = false; + }; + + // After the test session fails, the retry request will be placed back in the + // request queue + TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) + { + MockRequestScheduler requestScheduler; + CopysetClient copysetClient; + IOSenderOption ioSenderOption; + MetaCache metaCache; + + ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, + &requestScheduler, nullptr)); + + // Set session not valid + copysetClient.StartRecycleRetryRPC(); + + { + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + + TestRunnedRequestClosure closure; + copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); + ASSERT_FALSE(closure.IsRunned()); + } + + { + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + + TestRunnedRequestClosure closure; + copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); + ASSERT_FALSE(closure.IsRunned()); + } + } -} // namespace client -} // namespace curve + } // namespace client +} // namespace curve diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index c466457d99..df487eca62 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -25,9 +25,9 @@ #include #include -#include //NOLINT +#include //NOLINT #include -#include //NOLINT +#include //NOLINT #include #include "include/client/libcurve.h" @@ -47,239 +47,262 @@ #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" -namespace curve { -namespace client { - -// Testing mds failover switching state machine -TEST(MDSChangeTest, MDSFailoverTest) { - RPCExcutorRetryPolicy rpcexcutor; - - MetaServerOption metaopt; - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); - - metaopt.rpcRetryOpt.rpcTimeoutMs = 1000; - metaopt.rpcRetryOpt.rpcRetryIntervalUS = 10000; // 10ms - metaopt.rpcRetryOpt.maxFailedTimesBeforeChangeAddr = 2; - metaopt.rpcRetryOpt.rpcTimeoutMs = 1500; - - rpcexcutor.SetOption(metaopt.rpcRetryOpt); - - int mds0RetryTimes = 0; - int mds1RetryTimes = 0; - int mds2RetryTimes = 0; - - // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all - // down, - // All RPCs sent to them are returned as EHOSTDOWN, resulting in - // upper level clients constantly switching to mds and retrying - // Continue according to 0-->1-->2 - // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC - // switching. The final currentworkindex did not switch - auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl) -> int { - if (mdsindex == 0) { - mds0RetryTimes++; +namespace curve +{ + namespace client + { + + // Testing mds failover switching state machine + TEST(MDSChangeTest, MDSFailoverTest) + { + RPCExcutorRetryPolicy rpcexcutor; + + MetaServerOption metaopt; + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); + + metaopt.rpcRetryOpt.rpcTimeoutMs = 1000; + metaopt.rpcRetryOpt.rpcRetryIntervalUS = 10000; // 10ms + metaopt.rpcRetryOpt.maxFailedTimesBeforeChangeAddr = 2; + metaopt.rpcRetryOpt.rpcTimeoutMs = 1500; + + rpcexcutor.SetOption(metaopt.rpcRetryOpt); + + int mds0RetryTimes = 0; + int mds1RetryTimes = 0; + int mds2RetryTimes = 0; + + // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all + // down, + // All RPCs sent to them are returned as EHOSTDOWN, resulting in + // upper level clients constantly switching to mds and retrying + // Continue according to 0-->1-->2 + // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC + // switching. The final currentworkindex did not switch + auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + } + return -EHOSTDOWN; + }; + + uint64_t startMS = TimeUtility::GetTimeofDayMs(); + // Control surface interface call, 1000 is the total retry time of this RPC + rpcexcutor.DoRPCTask(task1, 1000); + uint64_t endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 1000 - 1); + + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds + ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); + ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task1, 3000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 3000 - 1); + ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); + + // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will + // be working at this time + // Mds index switches to index2, and it is expected that the client + // will directly switch to index2 after retrying with index = 0 At + // this point, mds2 directly returns OK and rpc stops trying again. + // Expected client to send a total of two RPCs, one to mds0 and the + // other to mds2, skipping the middle mds1。 + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + rpcexcutor.SetCurrentWorkIndex(2); + return -ECONNRESET; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + return -ECONNRESET; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + // If OK is returned this time, then RPC should have succeeded and + // will not try again + return LIBCURVE_ERROR::OK; + } + + return 0; + }; + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task2, 1000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_LT(endMS - startMS, 1000); + ASSERT_EQ(2, rpcexcutor.GetCurrentWorkIndex()); + ASSERT_EQ(mds0RetryTimes, 1); + ASSERT_EQ(mds1RetryTimes, 0); + ASSERT_EQ(mds2RetryTimes, 1); + + // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, + // At this point, it will switch to mds0 and mds2 + // After switching to 2, mds1 resumed, and then switched to mds1, and + // the rpc was successfully sent. At this point, the switching order is + // 1->2->0, 1->2->0, 1. + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + rpcexcutor.SetCurrentWorkIndex(1); + auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + return -ECONNRESET; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + // When retrying on mds1 for the third time, success is returned + // upwards and the retry is stopped + if (mds1RetryTimes == 3) + { + return LIBCURVE_ERROR::OK; + } + return -ECONNREFUSED; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + return -brpc::ELOGOFF; + } + + return 0; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task3, 1000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_LT(endMS - startMS, 1000); + ASSERT_EQ(mds0RetryTimes, 2); + ASSERT_EQ(mds1RetryTimes, 3); + ASSERT_EQ(mds2RetryTimes, 2); + + ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); + + // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 + // consistently times out + // The final result returned by rpc is timeout + // For timeout mds nodes, they will continuously retry + // mds.maxFailedTimesBeforeChangeMDS and switch Current + // mds.maxFailedTimesBeforeChangeMDS=2. + // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, + // ... + LOG(INFO) << "case 4"; + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + rpcexcutor.SetCurrentWorkIndex(0); + auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT : -ETIMEDOUT; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + return -ECONNREFUSED; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + return -brpc::ELOGOFF; + } + + return 0; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task4, 3000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 3000 - 1); + ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds + ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); + + // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 + // But the first 10 requests from rpc all returned EHOSTDOWN + // Mds retries sleep for 10ms, so it takes a total of 100ms + rpcexcutor.SetCurrentWorkIndex(0); + int hostDownTimes = 10; + auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, + brpc::Channel *channel, brpc::Controller *cntl) + { + static int count = 0; + if (++count <= hostDownTimes) + { + return -EHOSTDOWN; + } + + return 0; + }; + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GE(endMS - startMS, 100); + + // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with + // a total of 5 retries + rpcexcutor.SetCurrentWorkIndex(0); + int calledTimes = 0; + auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, + brpc::Channel *channel, brpc::Controller *cntl) + { + ++calledTimes; + return -EHOSTDOWN; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GE(endMS - startMS, 5 * 1000 - 1); + + // In each hostdown situation, sleep for 10ms and the total retry time is + // 5s, so the total number of retries is less than or equal to 500 times In + // order to minimize false positives, 10 redundant attempts were added + LOG(INFO) << "called times " << calledTimes; + ASSERT_LE(calledTimes, 510); } - if (mdsindex == 1) { - mds1RetryTimes++; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - } - return -EHOSTDOWN; - }; - - uint64_t startMS = TimeUtility::GetTimeofDayMs(); - // Control surface interface call, 1000 is the total retry time of this RPC - rpcexcutor.DoRPCTask(task1, 1000); - uint64_t endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 1000 - 1); - - // This retry is a polling retry, and the number of retries per mds should - // be close to and not exceed the total number of mds - ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); - ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task1, 3000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 3000 - 1); - ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - - // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will - // be working at this time - // Mds index switches to index2, and it is expected that the client - // will directly switch to index2 after retrying with index = 0 At - // this point, mds2 directly returns OK and rpc stops trying again. - // Expected client to send a total of two RPCs, one to mds0 and the - // other to mds2, skipping the middle mds1。 - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl) -> int { - if (mdsindex == 0) { - mds0RetryTimes++; - rpcexcutor.SetCurrentWorkIndex(2); - return -ECONNRESET; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - return -ECONNRESET; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - // If OK is returned this time, then RPC should have succeeded and - // will not try again - return LIBCURVE_ERROR::OK; - } - - return 0; - }; - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task2, 1000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_LT(endMS - startMS, 1000); - ASSERT_EQ(2, rpcexcutor.GetCurrentWorkIndex()); - ASSERT_EQ(mds0RetryTimes, 1); - ASSERT_EQ(mds1RetryTimes, 0); - ASSERT_EQ(mds2RetryTimes, 1); - - // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, - // At this point, it will switch to mds0 and mds2 - // After switching to 2, mds1 resumed, and then switched to mds1, and - // the rpc was successfully sent. At this point, the switching order is - // 1->2->0, 1->2->0, 1. - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - rpcexcutor.SetCurrentWorkIndex(1); - auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl) -> int { - if (mdsindex == 0) { - mds0RetryTimes++; - return -ECONNRESET; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - // When retrying on mds1 for the third time, success is returned - // upwards and the retry is stopped - if (mds1RetryTimes == 3) { - return LIBCURVE_ERROR::OK; - } - return -ECONNREFUSED; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - return -brpc::ELOGOFF; - } - - return 0; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task3, 1000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_LT(endMS - startMS, 1000); - ASSERT_EQ(mds0RetryTimes, 2); - ASSERT_EQ(mds1RetryTimes, 3); - ASSERT_EQ(mds2RetryTimes, 2); - - ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); - - // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 - // consistently times out - // The final result returned by rpc is timeout - // For timeout mds nodes, they will continuously retry - // mds.maxFailedTimesBeforeChangeMDS and switch Current - // mds.maxFailedTimesBeforeChangeMDS=2. - // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, - // ... - LOG(INFO) << "case 4"; - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - rpcexcutor.SetCurrentWorkIndex(0); - auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl) -> int { - if (mdsindex == 0) { - mds0RetryTimes++; - return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT : -ETIMEDOUT; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - return -ECONNREFUSED; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - return -brpc::ELOGOFF; - } - - return 0; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task4, 3000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 3000 - 1); - ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // This retry is a polling retry, and the number of retries per mds should - // be close to and not exceed the total number of mds - ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); - - // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 - // But the first 10 requests from rpc all returned EHOSTDOWN - // Mds retries sleep for 10ms, so it takes a total of 100ms - rpcexcutor.SetCurrentWorkIndex(0); - int hostDownTimes = 10; - auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, brpc::Controller* cntl) { - static int count = 0; - if (++count <= hostDownTimes) { - return -EHOSTDOWN; - } - - return 0; - }; - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GE(endMS - startMS, 100); - - // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with - // a total of 5 retries - rpcexcutor.SetCurrentWorkIndex(0); - int calledTimes = 0; - auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, brpc::Controller* cntl) { - ++calledTimes; - return -EHOSTDOWN; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GE(endMS - startMS, 5 * 1000 - 1); - - // In each hostdown situation, sleep for 10ms and the total retry time is - // 5s, so the total number of retries is less than or equal to 500 times In - // order to minimize false positives, 10 redundant attempts were added - LOG(INFO) << "called times " << calledTimes; - ASSERT_LE(calledTimes, 510); -} - -} // namespace client -} // namespace curve + } // namespace client +} // namespace curve const std::vector registConfOff{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), @@ -311,11 +334,12 @@ const std::vector registConfON{ std::string("mds.registerToMDS=true")}; std::string mdsMetaServerAddr = - "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT -int main(int argc, char** argv) { + "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT +int main(int argc, char **argv) +{ ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 4bcc65f8e7..1ff6116653 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -39,52 +39,52 @@ using curve::client::FileClient; using curve::client::SourceReader; using curve::client::UserInfo_t; -const std::string kTestPrefix = "SCSTest"; // NOLINT +const std::string kTestPrefix = "SCSTest"; // NOLINT const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkGap = 1; -const char* kEtcdClientIpPort = "127.0.0.1:10001"; -const char* kEtcdPeerIpPort = "127.0.0.1:10002"; -const char* kMdsIpPort = "127.0.0.1:10003"; -const char* kChunkServerIpPort1 = "127.0.0.1:10004"; -const char* kChunkServerIpPort2 = "127.0.0.1:10005"; -const char* kChunkServerIpPort3 = "127.0.0.1:10006"; -const char* kSnapshotCloneServerIpPort = "127.0.0.1:10007"; +const char *kEtcdClientIpPort = "127.0.0.1:10001"; +const char *kEtcdPeerIpPort = "127.0.0.1:10002"; +const char *kMdsIpPort = "127.0.0.1:10003"; +const char *kChunkServerIpPort1 = "127.0.0.1:10004"; +const char *kChunkServerIpPort2 = "127.0.0.1:10005"; +const char *kChunkServerIpPort3 = "127.0.0.1:10006"; +const char *kSnapshotCloneServerIpPort = "127.0.0.1:10007"; const int kMdsDummyPort = 10008; -const char* kSnapshotCloneServerDummyServerPort = "12000"; -const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock3"; +const char *kSnapshotCloneServerDummyServerPort = "12000"; +const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock3"; -static const char* kDefaultPoolset = "default"; +static const char *kDefaultPoolset = "default"; -const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT -const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT -const std::string kMdsConfigPath = // NOLINT +const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT +const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT +const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT +const std::string kS3ConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -130,13 +130,13 @@ const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, {"-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat"}, // NOLINT + "1/chunkserver.dat"}, // NOLINT {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, {"-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta"}, // NOLINT + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, @@ -150,13 +150,13 @@ const std::vector chunkserverConf2{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, {"-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat"}, // NOLINT + "2/chunkserver.dat"}, // NOLINT {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, {"-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta"}, // NOLINT + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, @@ -170,13 +170,13 @@ const std::vector chunkserverConf3{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, {"-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat"}, // NOLINT + "3/chunkserver.dat"}, // NOLINT {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, {"-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta"}, // NOLINT + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, @@ -217,908 +217,954 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char* testFile1_ = "/ItUser1/file1"; -const char* testFile2_ = "/ItUser1/file2"; -const char* testFile3_ = "/ItUser2/file3"; -const char* testFile4_ = "/ItUser1/file3"; -const char* testFile5_ = "/ItUser1/file4"; -const char* testUser1_ = "ItUser1"; -const char* testUser2_ = "ItUser2"; - -namespace curve { -namespace snapshotcloneserver { - -class SnapshotCloneServerTest : public ::testing::Test { - public: - static void SetUpTestCase() { - std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; - system(mkLogDirCmd.c_str()); - - cluster_ = new CurveCluster(); - ASSERT_NE(nullptr, cluster_); - - // Initialize db - system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); - system(std::string("rm -rf " + kTestPrefix + "1").c_str()); - system(std::string("rm -rf " + kTestPrefix + "2").c_str()); - system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - - // Start etcd - pid_t pid = cluster_->StartSingleEtcd( - 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{"--name=" + kTestPrefix}); - LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort - << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->InitSnapshotCloneMetaStoreEtcd(kEtcdClientIpPort); - - cluster_->PrepareConfig(kMdsConfigPath, - mdsConfigOptions); - - // Start an mds - pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, - true); - LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - // Creating a physical pool - ASSERT_EQ(0, cluster_->PreparePhysicalPool( - 1, - "./test/integration/snapshotcloneserver/" - "config/topo.json")); // NOLINT - - // format chunkfilepool and walfilepool - std::vector threadpool(3); - - threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); - for (int i = 0; i < 3; i++) { - threadpool[i].join(); +const char *testFile1_ = "/ItUser1/file1"; +const char *testFile2_ = "/ItUser1/file2"; +const char *testFile3_ = "/ItUser2/file3"; +const char *testFile4_ = "/ItUser1/file3"; +const char *testFile5_ = "/ItUser1/file4"; +const char *testUser1_ = "ItUser1"; +const char *testUser2_ = "ItUser2"; + +namespace curve +{ + namespace snapshotcloneserver + { + + class SnapshotCloneServerTest : public ::testing::Test + { + public: + static void SetUpTestCase() + { + std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; + system(mkLogDirCmd.c_str()); + + cluster_ = new CurveCluster(); + ASSERT_NE(nullptr, cluster_); + + // Initialize db + system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); + system(std::string("rm -rf " + kTestPrefix + "1").c_str()); + system(std::string("rm -rf " + kTestPrefix + "2").c_str()); + system(std::string("rm -rf " + kTestPrefix + "3").c_str()); + + // Start etcd + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=" + kTestPrefix}); + LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort + << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->InitSnapshotCloneMetaStoreEtcd(kEtcdClientIpPort); + + cluster_->PrepareConfig(kMdsConfigPath, + mdsConfigOptions); + + // Start an mds + pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, + true); + LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + // Creating a physical pool + ASSERT_EQ(0, cluster_->PreparePhysicalPool( + 1, + "./test/integration/snapshotcloneserver/" + "config/topo.json")); // NOLINT + + // format chunkfilepool and walfilepool + std::vector threadpool(3); + + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); + for (int i = 0; i < 3; i++) + { + threadpool[i].join(); + } + + cluster_->PrepareConfig(kCsClientConfigPath, + csClientConfigOptions); + + cluster_->PrepareConfig(kS3ConfigPath, + s3ConfigOptions); + + cluster_->PrepareConfig(kCSConfigPath, + chunkserverConfigOptions); + + // Create chunkserver + pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, + chunkserverConf1); + LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + pid = cluster_->StartSingleChunkServer(2, kChunkServerIpPort2, + chunkserverConf2); + LOG(INFO) << "chunkserver 2 started on " << kChunkServerIpPort2 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + pid = cluster_->StartSingleChunkServer(3, kChunkServerIpPort3, + chunkserverConf3); + LOG(INFO) << "chunkserver 3 started on " << kChunkServerIpPort3 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + + std::this_thread::sleep_for(std::chrono::seconds(5)); + + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ(0, cluster_->PrepareLogicalPool( + 1, + "./test/integration/snapshotcloneserver/config/" + "topo.json")); + + cluster_->PrepareConfig( + kSnapClientConfigPath, snapClientConfigOptions); + + cluster_->PrepareConfig( + kSCSConfigPath, snapshotcloneserverConfigOptions); + + pid = cluster_->StartSnapshotCloneServer(1, kSnapshotCloneServerIpPort, + snapshotcloneConf); + LOG(INFO) << "SnapshotCloneServer 1 started on " + << kSnapshotCloneServerIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kClientConfigPath, + clientConfigOptions); + + fileClient_ = new FileClient(); + fileClient_->Init(kClientConfigPath); + + UserInfo_t userinfo; + userinfo.owner = "ItUser1"; + + ASSERT_EQ(0, fileClient_->Mkdir("/ItUser1", userinfo)); + + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CreateAndWriteFile(testFile1_, testUser1_, fakeData)); + LOG(INFO) << "Write testFile1_ success."; + + ASSERT_TRUE(CreateAndWriteFile(testFile2_, testUser1_, fakeData)); + LOG(INFO) << "Write testFile2_ success."; + + UserInfo_t userinfo2; + userinfo2.owner = "ItUser2"; + ASSERT_EQ(0, fileClient_->Mkdir("/ItUser2", userinfo2)); + + ASSERT_TRUE(CreateAndWriteFile(testFile3_, testUser2_, fakeData)); + LOG(INFO) << "Write testFile3_ success."; + + ASSERT_EQ(0, fileClient_->Create(testFile4_, userinfo, + 10ULL * 1024 * 1024 * 1024)); + + ASSERT_EQ(0, fileClient_->Create(testFile5_, userinfo, + 10ULL * 1024 * 1024 * 1024)); + } + + static bool CreateAndWriteFile(const std::string &fileName, + const std::string &user, + const std::string &dataSample) + { + UserInfo_t userinfo; + userinfo.owner = user; + int ret = + fileClient_->Create(fileName, userinfo, 10ULL * 1024 * 1024 * 1024); + if (ret < 0) + { + LOG(ERROR) << "Create fail, ret = " << ret; + return false; + } + return WriteFile(fileName, user, dataSample); + } + + static bool WriteFile(const std::string &fileName, const std::string &user, + const std::string &dataSample) + { + int ret = 0; + UserInfo_t userinfo; + userinfo.owner = user; + int testfd1_ = fileClient_->Open(fileName, userinfo); + if (testfd1_ < 0) + { + LOG(ERROR) << "Open fail, ret = " << testfd1_; + return false; + } + // Write the first 4k data and two segments for each chunk + uint64_t totalChunk = 2ULL * segmentSize / chunkSize; + for (uint64_t i = 0; i < totalChunk / chunkGap; i++) + { + ret = + fileClient_->Write(testfd1_, dataSample.c_str(), + i * chunkSize * chunkGap, dataSample.size()); + if (ret < 0) + { + LOG(ERROR) << "Write Fail, ret = " << ret; + return false; + } + } + ret = fileClient_->Close(testfd1_); + if (ret < 0) + { + LOG(ERROR) << "Close fail, ret = " << ret; + return false; + } + return true; + } + + static bool CheckFileData(const std::string &fileName, + const std::string &user, + const std::string &dataSample) + { + UserInfo_t userinfo; + userinfo.owner = user; + int dstFd = fileClient_->Open(fileName, userinfo); + if (dstFd < 0) + { + LOG(ERROR) << "Open fail, ret = " << dstFd; + return false; + } + + int ret = 0; + uint64_t totalChunk = 2ULL * segmentSize / chunkSize; + for (uint64_t i = 0; i < totalChunk / chunkGap; i++) + { + char buf[4096]; + ret = fileClient_->Read(dstFd, buf, i * chunkSize * chunkGap, 4096); + if (ret < 0) + { + LOG(ERROR) << "Read fail, ret = " << ret; + return false; + } + std::string data(buf, 4096); + if (data != dataSample) + { + LOG(ERROR) << "CheckFileData not Equal, data = [" << data + << "] , expect data = [" << dataSample << "]."; + return false; + } + } + ret = fileClient_->Close(dstFd); + if (ret < 0) + { + LOG(ERROR) << "Close fail, ret = " << ret; + return false; + } + return true; + } + + static void TearDownTestCase() + { + fileClient_->UnInit(); + delete fileClient_; + fileClient_ = nullptr; + ASSERT_EQ(0, cluster_->StopCluster()); + delete cluster_; + cluster_ = nullptr; + system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); + system(std::string("rm -rf " + kTestPrefix + "1").c_str()); + system(std::string("rm -rf " + kTestPrefix + "2").c_str()); + system(std::string("rm -rf " + kTestPrefix + "3").c_str()); + } + + void SetUp() {} + + void TearDown() {} + + void PrepareSnapshotForTestFile1(std::string *uuid1) + { + if (!hasSnapshotForTestFile1_) + { + int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); + ASSERT_EQ(0, ret); + bool success1 = + CheckSnapshotSuccess(testUser1_, testFile1_, *uuid1); + ASSERT_TRUE(success1); + hasSnapshotForTestFile1_ = true; + snapIdForTestFile1_ = *uuid1; + } + } + + void WaitDeleteSnapshotForTestFile1() + { + if (hasSnapshotForTestFile1_) + { + ASSERT_EQ(0, DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, + snapIdForTestFile1_)); + } + } + + static CurveCluster *cluster_; + static FileClient *fileClient_; + + bool hasSnapshotForTestFile1_ = false; + std::string snapIdForTestFile1_; + }; + + CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; + FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; + + // Regular test cases + // Scenario 1: Adding, deleting, and searching snapshots + TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) + { + std::string uuid1; + int ret = 0; + // Step1: User testUser1_ Take a snapshot of non-existent files + // Expected 1: Return file does not exist + ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: User testUser2_ For testFile1_ Take a snapshot + // Expected 2: Failed to return user authentication + ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. + // Expected 3: Successful snapshot taking + ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); + ASSERT_EQ(0, ret); + + std::string fakeData(4096, 'y'); + ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 4: Return information for snapshot snap1 + bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_TRUE(success1); + + // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ + // Expected 5: User authentication failure returned + FileSnapshotInfo info1; + ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ + // Expected 6: Return null + std::vector infoVec; + ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step7: testUser2_ Delete snapshot snap1 + // Expected 7: User authentication failure returned + ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for + // Expected 8: Return file name mismatch + ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); + ASSERT_EQ(kErrCodeFileNameNotMatch, ret); + + // Step9: testUser1_ Delete snapshot snap1 + // Expected 9: Successful deletion returned + ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, ret); + + // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 10: Return empty + ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) + // Expected 11: Successful deletion returned + ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, ret); + + // Restore testFile1_ + std::string fakeData2(4096, 'x'); + ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); } - cluster_->PrepareConfig(kCsClientConfigPath, - csClientConfigOptions); - - cluster_->PrepareConfig(kS3ConfigPath, - s3ConfigOptions); - - cluster_->PrepareConfig(kCSConfigPath, - chunkserverConfigOptions); - - // Create chunkserver - pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, - chunkserverConf1); - LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - pid = cluster_->StartSingleChunkServer(2, kChunkServerIpPort2, - chunkserverConf2); - LOG(INFO) << "chunkserver 2 started on " << kChunkServerIpPort2 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - pid = cluster_->StartSingleChunkServer(3, kChunkServerIpPort3, - chunkserverConf3); - LOG(INFO) << "chunkserver 3 started on " << kChunkServerIpPort3 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - - std::this_thread::sleep_for(std::chrono::seconds(5)); - - // Create a logical pool and sleep for a period of time to let the - // underlying copyset select the primary first - ASSERT_EQ(0, cluster_->PrepareLogicalPool( - 1, - "./test/integration/snapshotcloneserver/config/" - "topo.json")); - - cluster_->PrepareConfig( - kSnapClientConfigPath, snapClientConfigOptions); - - cluster_->PrepareConfig( - kSCSConfigPath, snapshotcloneserverConfigOptions); - - pid = cluster_->StartSnapshotCloneServer(1, kSnapshotCloneServerIpPort, - snapshotcloneConf); - LOG(INFO) << "SnapshotCloneServer 1 started on " - << kSnapshotCloneServerIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kClientConfigPath, - clientConfigOptions); - - fileClient_ = new FileClient(); - fileClient_->Init(kClientConfigPath); - - UserInfo_t userinfo; - userinfo.owner = "ItUser1"; - - ASSERT_EQ(0, fileClient_->Mkdir("/ItUser1", userinfo)); - - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CreateAndWriteFile(testFile1_, testUser1_, fakeData)); - LOG(INFO) << "Write testFile1_ success."; - - ASSERT_TRUE(CreateAndWriteFile(testFile2_, testUser1_, fakeData)); - LOG(INFO) << "Write testFile2_ success."; - - UserInfo_t userinfo2; - userinfo2.owner = "ItUser2"; - ASSERT_EQ(0, fileClient_->Mkdir("/ItUser2", userinfo2)); - - ASSERT_TRUE(CreateAndWriteFile(testFile3_, testUser2_, fakeData)); - LOG(INFO) << "Write testFile3_ success."; - - ASSERT_EQ(0, fileClient_->Create(testFile4_, userinfo, - 10ULL * 1024 * 1024 * 1024)); - - ASSERT_EQ(0, fileClient_->Create(testFile5_, userinfo, - 10ULL * 1024 * 1024 * 1024)); - } - - static bool CreateAndWriteFile(const std::string& fileName, - const std::string& user, - const std::string& dataSample) { - UserInfo_t userinfo; - userinfo.owner = user; - int ret = - fileClient_->Create(fileName, userinfo, 10ULL * 1024 * 1024 * 1024); - if (ret < 0) { - LOG(ERROR) << "Create fail, ret = " << ret; - return false; + // Scenario 2: Cancel Snapshot + TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) + { + std::string uuid1; + int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = false; + bool isCancel = false; + for (int i = 0; i < 600; i++) + { + FileSnapshotInfo info1; + int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid1, &info1); + if (retCode == 0) + { + if (info1.GetSnapshotInfo().GetStatus() == Status::pending || + info1.GetSnapshotInfo().GetStatus() == Status::canceling) + { + if (!isCancel) + { + // Step1: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser2_ before the snapshot is completed_ + // Cancel testFile1_ Snap1 of snapshot + // Expected 1: Failed to cancel user authentication + int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeInvalidUser, retCode); + + // Step2: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile1_ A non-existent snapshot of + // Expected 2: Return kErrCodeCannotCancelFinished + retCode = + CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); + ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); + + // Step3: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile2_ Snap1 of snapshot + // Expected 3: Return file name mismatch + retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); + ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); + + // Step4: User testUser1_ For testFile1_ Take a snapshot, + // testUser1_ before the snapshot is completed_ + // Cancel snapshot snap1 + // Expected 4: Successfully cancelled snapshot + retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, retCode); + isCancel = true; + } + std::this_thread::sleep_for(std::chrono::milliseconds(3000)); + continue; + } + else if (info1.GetSnapshotInfo().GetStatus() == Status::done) + { + success1 = false; + break; + } + else + { + FAIL() << "Snapshot Fail On status = " + << static_cast(info1.GetSnapshotInfo().GetStatus()); + } + } + else if (retCode == -8) + { + // Step5: Obtain snapshot information, user=testUser1_, + // filename=testFile1_ Expected 5: Return empty + success1 = true; + break; + } + } + ASSERT_TRUE(success1); + + // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ + // Snap1 of snapshot Expected 6: Returning a pending snapshot that does not + // exist or has been completed + ret = CancelSnapshot(testUser1_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeCannotCancelFinished, ret); } - return WriteFile(fileName, user, dataSample); - } - - static bool WriteFile(const std::string& fileName, const std::string& user, - const std::string& dataSample) { - int ret = 0; - UserInfo_t userinfo; - userinfo.owner = user; - int testfd1_ = fileClient_->Open(fileName, userinfo); - if (testfd1_ < 0) { - LOG(ERROR) << "Open fail, ret = " << testfd1_; - return false; + + // Scenario 3: Lazy snapshot clone scene + TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1, uuid2, uuid3, uuid4, uuid5; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", + "/ItUser1/SnapLazyClone1", true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", + true, &uuid2); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 3 to return successful cloning + std::string dstFile = "/ItUser1/SnapLazyClone1"; + ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); + ASSERT_EQ(0, ret); + + // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 + // (duplicate clone) Expected 4: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", + true, &uuid4); + ASSERT_EQ(0, ret); + + // Flatten + ret = Flatten(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Step5: testUser1_ GetCloneTask + // Expected 5: Return clone task for SnapLazyClone1 + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); + ASSERT_TRUE(success1); + + // Step6: testUser2_ GetCloneTask + // Expected 6: Return null + std::vector infoVec; + ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 7: User authentication failure returned + ret = CleanCloneTask(testUser2_, uuid3); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 8: Return execution successful + ret = CleanCloneTask(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Waiting for cleaning to complete + std::this_thread::sleep_for(std::chrono::seconds(3)); + + // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // (repeated execution) Expected 9: Return execution successful + ret = CleanCloneTask(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Step10: testUser1_ GetCloneTask + // Expected 10: Return empty + TaskCloneInfo info; + ret = GetCloneTaskInfo(testUser1_, uuid3, &info); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - // Write the first 4k data and two segments for each chunk - uint64_t totalChunk = 2ULL * segmentSize / chunkSize; - for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { - ret = - fileClient_->Write(testfd1_, dataSample.c_str(), - i * chunkSize * chunkGap, dataSample.size()); - if (ret < 0) { - LOG(ERROR) << "Write Fail, ret = " << ret; - return false; - } + + // Scenario 4: Non lazy snapshot clone scenario + TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapNotLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", + "/ItUser1/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Clone", testUser2_, snapId, + "/ItUser2/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 3 to return successful cloning + std::string dstFile = "/ItUser1/SnapNotLazyClone1"; + ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); + ASSERT_TRUE(success1); + + // Step4: testUser1_ Clone block photo snap1, + // fileName=SnapNotLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser1/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - ret = fileClient_->Close(testfd1_); - if (ret < 0) { - LOG(ERROR) << "Close fail, ret = " << ret; - return false; + + // Scenario 5: Lazy snapshot recovery scenario + TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, + true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned + ret = + CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success + ret = + CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); + ASSERT_EQ(0, ret); + + // Flatten + ret = Flatten(testUser1_, uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); + ASSERT_TRUE(success1); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist + ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", + true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); } - return true; - } - - static bool CheckFileData(const std::string& fileName, - const std::string& user, - const std::string& dataSample) { - UserInfo_t userinfo; - userinfo.owner = user; - int dstFd = fileClient_->Open(fileName, userinfo); - if (dstFd < 0) { - LOG(ERROR) << "Open fail, ret = " << dstFd; - return false; + + // Scenario 6: Non lazy snapshot recovery scenario + TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, + false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, + &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success + ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, + &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); + ASSERT_TRUE(success1); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist + ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", + false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); } - int ret = 0; - uint64_t totalChunk = 2ULL * segmentSize / chunkSize; - for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { - char buf[4096]; - ret = fileClient_->Read(dstFd, buf, i * chunkSize * chunkGap, 4096); - if (ret < 0) { - LOG(ERROR) << "Read fail, ret = " << ret; - return false; - } - std::string data(buf, 4096); - if (data != dataSample) { - LOG(ERROR) << "CheckFileData not Equal, data = [" << data - << "] , expect data = [" << dataSample << "]."; - return false; - } + // Scenario 7: Lazy Mirror Clone Scene + TEST_F(SnapshotCloneServerTest, TestImageLazyClone) + { + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageLazyClone1 Expected 1: Return file does not exist + std::string uuid1, uuid2, uuid3, uuid4; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", + "/ItUser1/ImageLazyClone1", true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 + // Expected 2 to return successful cloning + std::string dstFile = "/ItUser1/ImageLazyClone1"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); + ASSERT_EQ(0, ret); + + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, testFile1_, + "/ItUser1/ImageLazyClone1", true, &uuid3); + ASSERT_EQ(0, ret); + + // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not + // completed the lazy clone Expected 4: Abnormal file status returned + ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); + ASSERT_EQ(kErrCodeFileStatusInvalid, ret); + FileSnapshotInfo info2; + int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid4, &info2); + ASSERT_EQ(kErrCodeFileNotExist, retCode); + + ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); + + // Verify data correctness before Flatten + std::string fakeData1(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); + + // Flatten + ret = Flatten(testUser1_, uuid2); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); + ASSERT_TRUE(success1); + + // Verify data correctness after Flatten + std::string fakeData2(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); } - ret = fileClient_->Close(dstFd); - if (ret < 0) { - LOG(ERROR) << "Close fail, ret = " << ret; - return false; + + // Scenario 8: Non Lazy Mirror Clone Scene + TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) + { + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageNotLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", + "/ItUser1/ImageNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 + // Expected 2 to return successful cloning + std::string dstFile = "/ItUser1/ImageNotLazyClone1"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); + ASSERT_TRUE(success1); + + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageNotLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, testFile1_, + "/ItUser1/ImageNotLazyClone1", false, &uuid1); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - return true; - } - - static void TearDownTestCase() { - fileClient_->UnInit(); - delete fileClient_; - fileClient_ = nullptr; - ASSERT_EQ(0, cluster_->StopCluster()); - delete cluster_; - cluster_ = nullptr; - system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); - system(std::string("rm -rf " + kTestPrefix + "1").c_str()); - system(std::string("rm -rf " + kTestPrefix + "2").c_str()); - system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - } - - void SetUp() {} - - void TearDown() {} - - void PrepareSnapshotForTestFile1(std::string* uuid1) { - if (!hasSnapshotForTestFile1_) { - int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); + + // Scenario 9: The snapshot has a failure scenario + TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) + { + std::string snapId = "errorSnapUuid"; + SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, + 0, 0, kDefaultPoolset, 0, Status::error); + + cluster_->metaStore_->AddSnapshot(snapInfo); + + pid_t pid = cluster_->RestartSnapshotCloneServer(1); + LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; + ASSERT_GT(pid, 0); + std::string uuid1, uuid2; + + // Step1: lazy clone snapshot snap1 + // Expected 1: Exception in returning snapshot + int ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser2/SnapLazyClone1", true, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step2: Non lazy clone snapshot snap1 + // Expected 2: Exception in returning snapshot + ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser2/SnapNotLazyClone1", false, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step3: lazy snap1 recover from snapshot + // Expected 3: Exception in returning snapshot + ret = + CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step4: Snap1 recover from snapshot without lazy + // Expected 4: Exception in returning snapshot + ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, + &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 + // Expectation 5: Clean failed snapshot and take snapshot successfully + ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); ASSERT_EQ(0, ret); - bool success1 = - CheckSnapshotSuccess(testUser1_, testFile1_, *uuid1); + + // Successfully verified snapshot + bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); ASSERT_TRUE(success1); - hasSnapshotForTestFile1_ = true; - snapIdForTestFile1_ = *uuid1; + + // Verification cleaning failed, snapshot succeeded + FileSnapshotInfo info1; + int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); + ASSERT_EQ(kErrCodeFileNotExist, retCode); } - } - void WaitDeleteSnapshotForTestFile1() { - if (hasSnapshotForTestFile1_) { - ASSERT_EQ(0, DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, - snapIdForTestFile1_)); + //[Online issue repair] Clone failed, rollback delete clone volume, and create + // the same uuid volume again scenario + TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) + { + std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; + // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID + // Expected 1 to return successful cloning + std::string dstFile = "/ItUser1/CloneHasSameDest"; + int ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo; + userinfo.owner = testUser1_; + int ret2 = fileClient_->Unlink(dstFile, userinfo, false); + ASSERT_EQ(0, ret2); + + // Step2: testUser1_ Clone image testFile1_ again, + // fileName=CloneHasSameDestUUID + // Expected 2 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 + // Expected 3 to return successful cloning + dstFile = "/ItUser1/CloneHasSameDest2"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo2; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step4: testUser1_ Clone the image testFile2_ again, + // fileName=CloneHasSameDest2 + // Expected 4 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); + ASSERT_EQ(0, ret); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Verify different situations when cloning lazyflag again + // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 + // Expected 5 to return successful cloning + dstFile = "/ItUser1/CloneHasSameDest3"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo3; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step6: testUser1_ Non lazy clone image testFile2_ again, + // fileName=CloneHasSameDest3 + // Expected 6 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); + ASSERT_TRUE(success1); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Delete Clone Volume + UserInfo_t userinfo4; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step7: testUser1_ Non lazy clone image testFile2_ again, + // fileName=CloneHasSameDest3 + // Expected 7 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); + ASSERT_EQ(0, ret); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - } - static CurveCluster* cluster_; - static FileClient* fileClient_; + // Lazy clone volume, delete clone volume, and then delete source volume. The + // source volume can be deleted if needed + TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) + { + // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 + // and dstFile2 Expected 1 to return successful cloning + std::string uuid1; + std::string uuid2; + std::string dstFile1 = "/dest1"; + std::string dstFile2 = "/dest2"; + UserInfo_t userinfo; + userinfo.owner = testUser1_; + int ret = + CloneOrRecover("Clone", testUser1_, testFile5_, dstFile1, true, &uuid1); + ASSERT_EQ(0, ret); + + ret = + CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); + ASSERT_EQ(0, ret); + + // Delete source volume, deletion failed, volume occupied - bool hasSnapshotForTestFile1_ = false; - std::string snapIdForTestFile1_; -}; + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(-27, ret); -CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; -FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; - -// Regular test cases -// Scenario 1: Adding, deleting, and searching snapshots -TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { - std::string uuid1; - int ret = 0; - // Step1: User testUser1_ Take a snapshot of non-existent files - // Expected 1: Return file does not exist - ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: User testUser2_ For testFile1_ Take a snapshot - // Expected 2: Failed to return user authentication - ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. - // Expected 3: Successful snapshot taking - ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); - ASSERT_EQ(0, ret); - - std::string fakeData(4096, 'y'); - ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ - // Expected 4: Return information for snapshot snap1 - bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_TRUE(success1); - - // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ - // Expected 5: User authentication failure returned - FileSnapshotInfo info1; - ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ - // Expected 6: Return null - std::vector infoVec; - ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // Step7: testUser2_ Delete snapshot snap1 - // Expected 7: User authentication failure returned - ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for - // Expected 8: Return file name mismatch - ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); - ASSERT_EQ(kErrCodeFileNameNotMatch, ret); - - // Step9: testUser1_ Delete snapshot snap1 - // Expected 9: Successful deletion returned - ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, ret); - - // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ - // Expected 10: Return empty - ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) - // Expected 11: Successful deletion returned - ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, ret); - - // Restore testFile1_ - std::string fakeData2(4096, 'x'); - ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); -} - -// Scenario 2: Cancel Snapshot -TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { - std::string uuid1; - int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = false; - bool isCancel = false; - for (int i = 0; i < 600; i++) { - FileSnapshotInfo info1; - int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid1, &info1); - if (retCode == 0) { - if (info1.GetSnapshotInfo().GetStatus() == Status::pending || - info1.GetSnapshotInfo().GetStatus() == Status::canceling) { - if (!isCancel) { - // Step1: User testUser1_ For testFile1_ Take a snapshot - // snap1, - // testUser2_ before the snapshot is completed_ - // Cancel testFile1_ Snap1 of snapshot - // Expected 1: Failed to cancel user authentication - int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeInvalidUser, retCode); - - // Step2: User testUser1_ For testFile1_ Take a snapshot - // snap1, - // testUser1_ before the snapshot is completed_ - // Cancel testFile1_ A non-existent snapshot of - // Expected 2: Return kErrCodeCannotCancelFinished - retCode = - CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); - ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); - - // Step3: User testUser1_ For testFile1_ Take a snapshot - // snap1, - // testUser1_ before the snapshot is completed_ - // Cancel testFile2_ Snap1 of snapshot - // Expected 3: Return file name mismatch - retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); - ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); - - // Step4: User testUser1_ For testFile1_ Take a snapshot, - // testUser1_ before the snapshot is completed_ - // Cancel snapshot snap1 - // Expected 4: Successfully cancelled snapshot - retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, retCode); - isCancel = true; + // Step2: Successfully delete the destination volume dstFile1, delete the + // source volume again Expected 2 deletion failed, volume occupied + ret = fileClient_->Unlink(dstFile1, userinfo, false); + ASSERT_EQ(0, ret); + + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(-27, ret); + + // Step3: Successfully delete the destination volume dstFile2, delete the + // source volume again Expected 3 deletion successful + ret = fileClient_->Unlink(dstFile2, userinfo, false); + ASSERT_EQ(0, ret); + + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(0, ret); + + // Step4: Wait for a period of time to see if the garbage record can be + // deleted in the background + bool noRecord = false; + for (int i = 0; i < 100; i++) + { + TaskCloneInfo info; + int ret1 = GetCloneTaskInfo(testUser1_, uuid1, &info); + int ret2 = GetCloneTaskInfo(testUser1_, uuid2, &info); + if (ret1 == kErrCodeFileNotExist && ret2 == kErrCodeFileNotExist) + { + noRecord = true; + break; } + std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - continue; - } else if (info1.GetSnapshotInfo().GetStatus() == Status::done) { - success1 = false; - break; - } else { - FAIL() << "Snapshot Fail On status = " - << static_cast(info1.GetSnapshotInfo().GetStatus()); } - } else if (retCode == -8) { - // Step5: Obtain snapshot information, user=testUser1_, - // filename=testFile1_ Expected 5: Return empty - success1 = true; - break; - } - } - ASSERT_TRUE(success1); - - // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ - // Snap1 of snapshot Expected 6: Returning a pending snapshot that does not - // exist or has been completed - ret = CancelSnapshot(testUser1_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeCannotCancelFinished, ret); -} - -// Scenario 3: Lazy snapshot clone scene -TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // Step1: testUser1_ A snapshot with a clone that does not exist, - // fileName=SnapLazyClone1 Expected 1: Return snapshot does not exist - std::string uuid1, uuid2, uuid3, uuid4, uuid5; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", - "/ItUser1/SnapLazyClone1", true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 - // Expected 2: User authentication failure returned - ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", - true, &uuid2); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 - // Expected 3 to return successful cloning - std::string dstFile = "/ItUser1/SnapLazyClone1"; - ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); - ASSERT_EQ(0, ret); - - // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 - // (duplicate clone) Expected 4: Returns successful cloning (idempotent) - ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", - true, &uuid4); - ASSERT_EQ(0, ret); - - // Flatten - ret = Flatten(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // Step5: testUser1_ GetCloneTask - // Expected 5: Return clone task for SnapLazyClone1 - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); - ASSERT_TRUE(success1); - - // Step6: testUser2_ GetCloneTask - // Expected 6: Return null - std::vector infoVec; - ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 - // Expected 7: User authentication failure returned - ret = CleanCloneTask(testUser2_, uuid3); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 - // Expected 8: Return execution successful - ret = CleanCloneTask(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // Waiting for cleaning to complete - std::this_thread::sleep_for(std::chrono::seconds(3)); - - // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 - // (repeated execution) Expected 9: Return execution successful - ret = CleanCloneTask(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // Step10: testUser1_ GetCloneTask - // Expected 10: Return empty - TaskCloneInfo info; - ret = GetCloneTaskInfo(testUser1_, uuid3, &info); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// Scenario 4: Non lazy snapshot clone scenario -TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // Step1: testUser1_ A snapshot with a clone that does not exist, - // fileName=SnapNotLazyClone1 Expected 1: Return snapshot does not exist - std::string uuid1; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", - "/ItUser1/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 - // Expected 2: User authentication failure returned - ret = CloneOrRecover("Clone", testUser2_, snapId, - "/ItUser2/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 - // Expected 3 to return successful cloning - std::string dstFile = "/ItUser1/SnapNotLazyClone1"; - ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); - ASSERT_TRUE(success1); - - // Step4: testUser1_ Clone block photo snap1, - // fileName=SnapNotLazyClone1 (duplicate clone) - // Expected 4: Returns successful cloning (idempotent) - ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser1/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(0, ret); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// Scenario 5: Lazy snapshot recovery scenario -TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // Step1: testUser1_ Recover snapshot that does not exist, - // fileName=testFile1_ Expected 1: Return snapshot does not exist - std::string uuid1; - int ret; - ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, - true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ - // Expected 2: User authentication failure returned - ret = - CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ - // Expected 3 return recovery success - ret = - CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); - ASSERT_EQ(0, ret); - - // Flatten - ret = Flatten(testUser1_, uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); - ASSERT_TRUE(success1); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent - // file Expected 4: Return target file does not exist - ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", - true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); -} - -// Scenario 6: Non lazy snapshot recovery scenario -TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // Step1: testUser1_ Recover snapshot that does not exist, - // fileName=testFile1_ Expected 1: Return snapshot does not exist - std::string uuid1; - int ret; - ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, - false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ - // Expected 2: User authentication failure returned - ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, - &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ - // Expected 3 return recovery success - ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, - &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); - ASSERT_TRUE(success1); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent - // file Expected 4: Return target file does not exist - ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", - false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); -} - -// Scenario 7: Lazy Mirror Clone Scene -TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { - // Step1: testUser1_ Clone does not exist in an image, - // fileName=ImageLazyClone1 Expected 1: Return file does not exist - std::string uuid1, uuid2, uuid3, uuid4; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", - "/ItUser1/ImageLazyClone1", true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 - // Expected 2 to return successful cloning - std::string dstFile = "/ItUser1/ImageLazyClone1"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); - ASSERT_EQ(0, ret); - - // Step3: testUser1_ Clone image testFile1_, - // FileName=ImageLazyClone1 (duplicate clone) - // Expected 3: Returns successful cloning (idempotent) - ret = CloneOrRecover("Clone", testUser1_, testFile1_, - "/ItUser1/ImageLazyClone1", true, &uuid3); - ASSERT_EQ(0, ret); - - // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not - // completed the lazy clone Expected 4: Abnormal file status returned - ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); - ASSERT_EQ(kErrCodeFileStatusInvalid, ret); - FileSnapshotInfo info2; - int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid4, &info2); - ASSERT_EQ(kErrCodeFileNotExist, retCode); - - ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); - - // Verify data correctness before Flatten - std::string fakeData1(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); - - // Flatten - ret = Flatten(testUser1_, uuid2); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); - ASSERT_TRUE(success1); - - // Verify data correctness after Flatten - std::string fakeData2(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); -} - -// Scenario 8: Non Lazy Mirror Clone Scene -TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { - // Step1: testUser1_ Clone does not exist in an image, - // fileName=ImageNotLazyClone1 Expected 1: Return snapshot does not exist - std::string uuid1; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", - "/ItUser1/ImageNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 - // Expected 2 to return successful cloning - std::string dstFile = "/ItUser1/ImageNotLazyClone1"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); - ASSERT_TRUE(success1); - - // Step3: testUser1_ Clone image testFile1_, - // FileName=ImageNotLazyClone1 (duplicate clone) - // Expected 3: Returns successful cloning (idempotent) - ret = CloneOrRecover("Clone", testUser1_, testFile1_, - "/ItUser1/ImageNotLazyClone1", false, &uuid1); - ASSERT_EQ(0, ret); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// Scenario 9: The snapshot has a failure scenario -TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { - std::string snapId = "errorSnapUuid"; - SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, - 0, 0, kDefaultPoolset, 0, Status::error); - - cluster_->metaStore_->AddSnapshot(snapInfo); - - pid_t pid = cluster_->RestartSnapshotCloneServer(1); - LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; - ASSERT_GT(pid, 0); - std::string uuid1, uuid2; - - // Step1: lazy clone snapshot snap1 - // Expected 1: Exception in returning snapshot - int ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser2/SnapLazyClone1", true, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // Step2: Non lazy clone snapshot snap1 - // Expected 2: Exception in returning snapshot - ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser2/SnapNotLazyClone1", false, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // Step3: lazy snap1 recover from snapshot - // Expected 3: Exception in returning snapshot - ret = - CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // Step4: Snap1 recover from snapshot without lazy - // Expected 4: Exception in returning snapshot - ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, - &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 - // Expectation 5: Clean failed snapshot and take snapshot successfully - ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); - ASSERT_EQ(0, ret); - - // Successfully verified snapshot - bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); - ASSERT_TRUE(success1); - - // Verification cleaning failed, snapshot succeeded - FileSnapshotInfo info1; - int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); - ASSERT_EQ(kErrCodeFileNotExist, retCode); -} - -//[Online issue repair] Clone failed, rollback delete clone volume, and create -//the same uuid volume again scenario -TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { - std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; - // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID - // Expected 1 to return successful cloning - std::string dstFile = "/ItUser1/CloneHasSameDest"; - int ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); - ASSERT_EQ(0, ret); - - // Delete Clone Volume - UserInfo_t userinfo; - userinfo.owner = testUser1_; - int ret2 = fileClient_->Unlink(dstFile, userinfo, false); - ASSERT_EQ(0, ret2); - - // Step2: testUser1_ Clone image testFile1_ again, - // fileName=CloneHasSameDestUUID - // Expected 2 to return successful cloning - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); - ASSERT_EQ(0, ret); - - // Verify data correctness - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 - // Expected 3 to return successful cloning - dstFile = "/ItUser1/CloneHasSameDest2"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); - ASSERT_EQ(0, ret); - - // Delete Clone Volume - UserInfo_t userinfo2; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - // Step4: testUser1_ Clone the image testFile2_ again, - // fileName=CloneHasSameDest2 - // Expected 4 to return successful cloning - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); - ASSERT_EQ(0, ret); - - // Verify data correctness - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // Verify different situations when cloning lazyflag again - // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 - // Expected 5 to return successful cloning - dstFile = "/ItUser1/CloneHasSameDest3"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); - ASSERT_EQ(0, ret); - - // Delete Clone Volume - UserInfo_t userinfo3; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - // Step6: testUser1_ Non lazy clone image testFile2_ again, - // fileName=CloneHasSameDest3 - // Expected 6 to return successful cloning - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); - ASSERT_TRUE(success1); - - // Verify data correctness - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // Delete Clone Volume - UserInfo_t userinfo4; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - // Step7: testUser1_ Non lazy clone image testFile2_ again, - // fileName=CloneHasSameDest3 - // Expected 7 to return successful cloning - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); - ASSERT_EQ(0, ret); - - // Verify data correctness - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// Lazy clone volume, delete clone volume, and then delete source volume. The -// source volume can be deleted if needed -TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { - // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 - // and dstFile2 Expected 1 to return successful cloning - std::string uuid1; - std::string uuid2; - std::string dstFile1 = "/dest1"; - std::string dstFile2 = "/dest2"; - UserInfo_t userinfo; - userinfo.owner = testUser1_; - int ret = - CloneOrRecover("Clone", testUser1_, testFile5_, dstFile1, true, &uuid1); - ASSERT_EQ(0, ret); - - ret = - CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); - ASSERT_EQ(0, ret); - - // Delete source volume, deletion failed, volume occupied - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(-27, ret); - - // Step2: Successfully delete the destination volume dstFile1, delete the - // source volume again Expected 2 deletion failed, volume occupied - ret = fileClient_->Unlink(dstFile1, userinfo, false); - ASSERT_EQ(0, ret); - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(-27, ret); - - // Step3: Successfully delete the destination volume dstFile2, delete the - // source volume again Expected 3 deletion successful - ret = fileClient_->Unlink(dstFile2, userinfo, false); - ASSERT_EQ(0, ret); - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(0, ret); - - // Step4: Wait for a period of time to see if the garbage record can be - // deleted in the background - bool noRecord = false; - for (int i = 0; i < 100; i++) { - TaskCloneInfo info; - int ret1 = GetCloneTaskInfo(testUser1_, uuid1, &info); - int ret2 = GetCloneTaskInfo(testUser1_, uuid2, &info); - if (ret1 == kErrCodeFileNotExist && ret2 == kErrCodeFileNotExist) { - noRecord = true; - break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - } - - ASSERT_TRUE(noRecord); -} -} // namespace snapshotcloneserver -} // namespace curve + ASSERT_TRUE(noRecord); + } + } // namespace snapshotcloneserver +} // namespace curve diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp index 94d648ab86..9dd30a65b9 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp @@ -25,8 +25,8 @@ #include #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT #include "src/client/libcurve_file.h" #include "src/snapshotcloneserver/snapshotclone_server.h" @@ -34,35 +34,35 @@ #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" #include "test/util/config_generator.h" -const std::string kTestPrefix = "MainSCSTest"; // NOLINT +const std::string kTestPrefix = "MainSCSTest"; // NOLINT // Some constant definitions -const char* cloneTempDir_ = "/clone"; -const char* mdsRootUser_ = "root"; -const char* mdsRootPassword_ = "root_password"; +const char *cloneTempDir_ = "/clone"; +const char *mdsRootUser_ = "root"; +const char *mdsRootPassword_ = "root_password"; const uint64_t segmentSize = 32ULL * 1024 * 1024; -const char* kEtcdClientIpPort = "127.0.0.1:10041"; -const char* kEtcdPeerIpPort = "127.0.0.1:10042"; -const char* kMdsIpPort = "127.0.0.1:10043"; -const char* kSnapshotCloneServerIpPort = "127.0.0.1:10047"; +const char *kEtcdClientIpPort = "127.0.0.1:10041"; +const char *kEtcdPeerIpPort = "127.0.0.1:10042"; +const char *kMdsIpPort = "127.0.0.1:10043"; +const char *kSnapshotCloneServerIpPort = "127.0.0.1:10047"; const int kMdsDummyPort = 10048; -const char* kSnapshotCloneServerDummyServerPort = "12004"; -const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock4"; +const char *kSnapshotCloneServerDummyServerPort = "12004"; +const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock4"; -const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT -const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT -const std::string kEtcdName = kTestPrefix; // NOLINT -const std::string kMdsConfigPath = // NOLINT +const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT +const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT +const std::string kEtcdName = kTestPrefix; // NOLINT +const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT +const std::string kS3ConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; @@ -122,104 +122,110 @@ const std::vector snapshotcloneConf{ {"--stderrthreshold=3"}, }; -namespace curve { -namespace snapshotcloneserver { - -class SnapshotCloneServerMainTest : public ::testing::Test { - public: - void SetUp() { - std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; - system(mkLogDirCmd.c_str()); - system("mkdir -p /data/log/curve ./fakes3"); - - cluster_ = new CurveCluster(); - ASSERT_NE(nullptr, cluster_); - - // Initialize db - std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; - system(rmcmd.c_str()); - - // Start etcd - pid_t pid = cluster_->StartSingleEtcd( - 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{"--name=" + std::string(kEtcdName)}); - LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort - << "::" << kEtcdPeerIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kMdsConfigPath, - mdsConfigOptions); - - // Start an mds - pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, - true); - LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kS3ConfigPath, - s3ConfigOptions); - - cluster_->PrepareConfig( - kSnapClientConfigPath, snapClientConfigOptions); - - cluster_->PrepareConfig( - kSCSConfigPath, snapshotcloneserverConfigOptions); - } - - void TearDown() { - ASSERT_EQ(0, cluster_->StopCluster()); - delete cluster_; - cluster_ = nullptr; - - std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; - system(rmcmd.c_str()); - } - - public: - CurveCluster* cluster_; -}; - -TEST_F(SnapshotCloneServerMainTest, testmain) { - std::shared_ptr conf = std::make_shared(); - conf->SetConfigPath(kSCSConfigPath); - - ASSERT_TRUE(conf->LoadConfig()); - LOG(INFO) << kSCSConfigPath; - conf->PrintConfig(); - - SnapShotCloneServer* snapshotCloneServer = new SnapShotCloneServer(conf); - - snapshotCloneServer->InitAllSnapshotCloneOptions(); - - snapshotCloneServer->StartDummy(); - - snapshotCloneServer->StartCompaginLeader(); - - ASSERT_TRUE(snapshotCloneServer->Init()); - - ASSERT_TRUE(snapshotCloneServer->Start()); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - - // Test and verify if the status is active - // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; - std::string cmd = - "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + - "/vars/" + std::string(statusMetricName) + "\""; - // snapshotcloneserver_status : "active\r\n" - std::string expectResult = std::string(statusMetricName) + " : \"" + - std::string(ACTIVE) + "\"\r\n"; - - FILE* fp = popen(cmd.c_str(), "r"); - ASSERT_TRUE(fp != nullptr); - char buf[1024]; - fread(buf, sizeof(char), sizeof(buf), fp); - pclose(fp); - std::string result(buf); - ASSERT_EQ(result, expectResult); - - snapshotCloneServer->Stop(); - LOG(INFO) << "snapshotCloneServer Stopped"; -} -} // namespace snapshotcloneserver -} // namespace curve +namespace curve +{ + namespace snapshotcloneserver + { + + class SnapshotCloneServerMainTest : public ::testing::Test + { + public: + void SetUp() + { + std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; + system(mkLogDirCmd.c_str()); + system("mkdir -p /data/log/curve ./fakes3"); + + cluster_ = new CurveCluster(); + ASSERT_NE(nullptr, cluster_); + + // Initialize db + std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; + system(rmcmd.c_str()); + + // Start etcd + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=" + std::string(kEtcdName)}); + LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort + << "::" << kEtcdPeerIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kMdsConfigPath, + mdsConfigOptions); + + // Start an mds + pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, + true); + LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kS3ConfigPath, + s3ConfigOptions); + + cluster_->PrepareConfig( + kSnapClientConfigPath, snapClientConfigOptions); + + cluster_->PrepareConfig( + kSCSConfigPath, snapshotcloneserverConfigOptions); + } + + void TearDown() + { + ASSERT_EQ(0, cluster_->StopCluster()); + delete cluster_; + cluster_ = nullptr; + + std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; + system(rmcmd.c_str()); + } + + public: + CurveCluster *cluster_; + }; + + TEST_F(SnapshotCloneServerMainTest, testmain) + { + std::shared_ptr conf = std::make_shared(); + conf->SetConfigPath(kSCSConfigPath); + + ASSERT_TRUE(conf->LoadConfig()); + LOG(INFO) << kSCSConfigPath; + conf->PrintConfig(); + + SnapShotCloneServer *snapshotCloneServer = new SnapShotCloneServer(conf); + + snapshotCloneServer->InitAllSnapshotCloneOptions(); + + snapshotCloneServer->StartDummy(); + + snapshotCloneServer->StartCompaginLeader(); + + ASSERT_TRUE(snapshotCloneServer->Init()); + + ASSERT_TRUE(snapshotCloneServer->Start()); + + std::this_thread::sleep_for(std::chrono::seconds(2)); + + // Test and verify if the status is active + // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; + std::string cmd = + "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + + "/vars/" + std::string(statusMetricName) + "\""; + // snapshotcloneserver_status : "active\r\n" + std::string expectResult = std::string(statusMetricName) + " : \"" + + std::string(ACTIVE) + "\"\r\n"; + + FILE *fp = popen(cmd.c_str(), "r"); + ASSERT_TRUE(fp != nullptr); + char buf[1024]; + fread(buf, sizeof(char), sizeof(buf), fp); + pclose(fp); + std::string result(buf); + ASSERT_EQ(result, expectResult); + + snapshotCloneServer->Stop(); + LOG(INFO) << "snapshotCloneServer Stopped"; + } + } // namespace snapshotcloneserver +} // namespace curve diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 2a388c8944..d7a49f2c8b 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -41,152 +41,158 @@ using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; using ::curve::common::SEGMENTINFOKEYPREFIX; -namespace curve { -namespace mds { -TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { - auto mockEtcdClient = std::make_shared(); - +namespace curve +{ + namespace mds { - // 1. list failed - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - } + TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) + { + auto mockEtcdClient = std::make_shared(); - { - // 2. list successful, parsing failed - std::vector values{"hello"}; - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - } - { - // 3. Successfully obtained the existing segment alloc value - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - ASSERT_EQ(1, out.size()); - ASSERT_EQ(1024, out[1]); - } -} + { + // 1. list failed + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + } -TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { - auto mockEtcdClient = std::make_shared(); - { - // 1. CalculateSegmentAlloc ok - LOG(INFO) << "start test1......"; - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, - SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(Return(EtcdErrCode::EtcdUnknown)); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - } - { - // 2. ListWithLimitAndRevision succeeded, but parsing failed - LOG(INFO) << "start test2......"; - std::vector values{"hello"}; - std::string lastKey = "021"; - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, - SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce( - DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - } - { - // 3. ListWithLimitAndRevision successful, parsing successful, - // bundle=1000, number obtained is 1 - LOG(INFO) << "start test3......"; - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16 * 1024 * 1024); - segment.set_startoffset(0); - std::string encodeSegment; - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - std::vector values{encodeSegment}; - std::string lastKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, - SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - ASSERT_EQ(1, out.size()); - ASSERT_EQ(1 << 30, out[1]); - } - { - // 4. ListWithLimitAndRevision successful, parsing successful - // bundle=1000, get a number of 1001 - LOG(INFO) << "start test4......"; - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16 * 1024 * 1024); - segment.set_startoffset(0); - std::string encodeSegment; - std::vector values; - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 1; i <= 500; i++) { - values.emplace_back(encodeSegment); + { + // 2. list successful, parsing failed + std::vector values{"hello"}; + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + } + { + // 3. Successfully obtained the existing segment alloc value + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + ASSERT_EQ(1, out.size()); + ASSERT_EQ(1024, out[1]); + } } - segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 501; i <= 1000; i++) { - values.emplace_back(encodeSegment); - } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); - std::string lastKey2 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, - SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, - ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, - GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>(std::vector{ - encodeSegment, encodeSegment}), - SetArgPointee<5>(lastKey2), - Return(EtcdErrCode::EtcdOK))); + TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) + { + auto mockEtcdClient = std::make_shared(); + { + // 1. CalculateSegmentAlloc ok + LOG(INFO) << "start test1......"; + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(Return(EtcdErrCode::EtcdUnknown)); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + } + { + // 2. ListWithLimitAndRevision succeeded, but parsing failed + LOG(INFO) << "start test2......"; + std::vector values{"hello"}; + std::string lastKey = "021"; + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce( + DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + } + { + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 + LOG(INFO) << "start test3......"; + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + std::vector values{encodeSegment}; + std::string lastKey = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), + Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + ASSERT_EQ(1, out.size()); + ASSERT_EQ(1 << 30, out[1]); + } + { + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 + LOG(INFO) << "start test4......"; + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + std::vector values; + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 1; i <= 500; i++) + { + values.emplace_back(encodeSegment); + } - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - ASSERT_EQ(2, out.size()); - ASSERT_EQ(500L * (1 << 30), out[1]); - ASSERT_EQ(501L * (1 << 30), out[2]); - } -} -} // namespace mds -} // namespace curve + segment.set_logicalpoolid(2); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 501; i <= 1000; i++) + { + values.emplace_back(encodeSegment); + } + std::string lastKey1 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey2 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(values), + SetArgPointee<5>(lastKey1), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), + SetArgPointee<5>(lastKey2), + Return(EtcdErrCode::EtcdOK))); + + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + ASSERT_EQ(2, out.size()); + ASSERT_EQ(500L * (1 << 30), out[1]); + ASSERT_EQ(501L * (1 << 30), out[2]); + } + } + } // namespace mds +} // namespace curve diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index f250e7e401..3a4b579852 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -28,205 +28,212 @@ #include "src/common/namespace_define.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; using ::curve::common::SEGMENTINFOKEYPREFIX; -namespace curve { -namespace mds { - -class AllocStatisticTest : public ::testing::Test { - protected: - void SetUp() override { - periodicPersistInterMs_ = 2; - retryInterMs_ = 2; - mockEtcdClient_ = std::make_shared(); - allocStatistic_ = std::make_shared( - periodicPersistInterMs_, retryInterMs_, mockEtcdClient_); - } - - protected: - int64_t periodicPersistInterMs_; - int64_t retryInterMs_; - std::shared_ptr allocStatistic_; - std::shared_ptr mockEtcdClient_; -}; - -TEST_F(AllocStatisticTest, test_Init) { - { - // 1. Failed to obtain the current revision from ETCD - LOG(INFO) << "test1......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdCanceled)); - ASSERT_EQ(-1, allocStatistic_->Init()); - } +namespace curve +{ + namespace mds { - // 2. Failed to obtain the alloc size corresponding to the existing logicalPool - LOG(INFO) << "test2......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)); - ASSERT_EQ(-1, allocStatistic_->Init()); - int64_t alloc; - ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - } - { - // 3. init successful - LOG(INFO) << "test3......"; - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - ASSERT_EQ(0, allocStatistic_->Init()); - int64_t alloc; - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024, alloc); - } -} - -TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // Initialize allocStatistics - // Old value: logicalPooId(1):1024 - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) - .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - ASSERT_EQ(0, allocStatistic_->Init()); - - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); - segment.set_startoffset(0); - std::string encodeSegment; - values.clear(); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 1; i <= 500; i++) { - values.emplace_back(encodeSegment); - } - - // 1 Only old values can be obtained before regular persistent threads and statistical threads are started - int64_t alloc; - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024, alloc); - ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - - // 2 Update the value of segment - allocStatistic_->DeAllocSpace(1, 64, 1); - allocStatistic_->AllocSpace(1, 32, 1); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024 - 32, alloc); - - // Set the value of segment in the ETCD of the mock - // logicalPoolId(1):500 * (1<<30) - // logicalPoolId(2):501 * (1<<30) - segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 501; i <= 1000; i++) { - values.emplace_back(encodeSegment); - } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); - std::string lastKey2 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .Times(2) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), - SetArgPointee<5>(lastKey2), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) - .Times(2) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - - // Set the Put result of the mock - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue( - 1, 1024 - 32 + (1L << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(3), - NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - - // 2 Start regular persistence and statistics threads - for (int i = 1; i <= 2; i++) { - allocStatistic_->AllocSpace(i, 1L << 30, i + 3); - } - allocStatistic_->Run(); - std::this_thread::sleep_for(std::chrono::seconds(6)); - - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(502L *(1 << 30), alloc); - std::this_thread::sleep_for(std::chrono::milliseconds(30)); - - // Update through alloc again - for (int i = 1; i <= 2; i++) { - allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); - } - allocStatistic_->AllocSpace(3, 1L << 30, 10); - - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(500L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); - ASSERT_EQ(1L << 30, alloc); - std::this_thread::sleep_for(std::chrono::milliseconds(30)); - - allocStatistic_->Stop(); -} - -} // namespace mds -} // namespace curve + + class AllocStatisticTest : public ::testing::Test + { + protected: + void SetUp() override + { + periodicPersistInterMs_ = 2; + retryInterMs_ = 2; + mockEtcdClient_ = std::make_shared(); + allocStatistic_ = std::make_shared( + periodicPersistInterMs_, retryInterMs_, mockEtcdClient_); + } + + protected: + int64_t periodicPersistInterMs_; + int64_t retryInterMs_; + std::shared_ptr allocStatistic_; + std::shared_ptr mockEtcdClient_; + }; + + TEST_F(AllocStatisticTest, test_Init) + { + { + // 1. Failed to obtain the current revision from ETCD + LOG(INFO) << "test1......"; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(Return(EtcdErrCode::EtcdCanceled)); + ASSERT_EQ(-1, allocStatistic_->Init()); + } + { + // 2. Failed to obtain the alloc size corresponding to the existing logicalPool + LOG(INFO) << "test2......"; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); + ASSERT_EQ(-1, allocStatistic_->Init()); + int64_t alloc; + ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + } + { + // 3. init successful + LOG(INFO) << "test3......"; + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + ASSERT_EQ(0, allocStatistic_->Init()); + int64_t alloc; + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024, alloc); + } + } + + TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) + { + // Initialize allocStatistics + // Old value: logicalPooId(1):1024 + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + ASSERT_EQ(0, allocStatistic_->Init()); + + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + values.clear(); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 1; i <= 500; i++) + { + values.emplace_back(encodeSegment); + } + + // 1 Only old values can be obtained before regular persistent threads and statistical threads are started + int64_t alloc; + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024, alloc); + ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + + // 2 Update the value of segment + allocStatistic_->DeAllocSpace(1, 64, 1); + allocStatistic_->AllocSpace(1, 32, 1); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024 - 32, alloc); + + // Set the value of segment in the ETCD of the mock + // logicalPoolId(1):500 * (1<<30) + // logicalPoolId(2):501 * (1<<30) + segment.set_logicalpoolid(2); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 501; i <= 1000; i++) + { + values.emplace_back(encodeSegment); + } + std::string lastKey1 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey2 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); + EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .Times(2) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)) + .WillOnce(DoAll(SetArgPointee<4>(values), + SetArgPointee<5>(lastKey1), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( + lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>( + std::vector{encodeSegment, encodeSegment}), + SetArgPointee<5>(lastKey2), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .Times(2) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + + // Set the Put result of the mock + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 1024 - 32 + (1L << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(3), + NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + + // 2 Start regular persistence and statistics threads + for (int i = 1; i <= 2; i++) + { + allocStatistic_->AllocSpace(i, 1L << 30, i + 3); + } + allocStatistic_->Run(); + std::this_thread::sleep_for(std::chrono::seconds(6)); + + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(501L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + ASSERT_EQ(502L * (1 << 30), alloc); + std::this_thread::sleep_for(std::chrono::milliseconds(30)); + + // Update through alloc again + for (int i = 1; i <= 2; i++) + { + allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); + } + allocStatistic_->AllocSpace(3, 1L << 30, 10); + + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(500L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + ASSERT_EQ(501L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); + ASSERT_EQ(1L << 30, alloc); + std::this_thread::sleep_for(std::chrono::milliseconds(30)); + + allocStatistic_->Stop(); + } + + } // namespace mds +} // namespace curve From 50c0779a26522460d6bdf0ff2f6847081933825c Mon Sep 17 00:00:00 2001 From: YunhuiChen <18868877340@163.com> Date: Mon, 20 Nov 2023 19:16:36 +0800 Subject: [PATCH 7/8] fix ut test bug change docs website Signed-off-by: Wangpan chore: prepare for rebase style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. style: Apply Clang format to modified files and restore patch files. style: Apply Clang format to modified files ,and restore patch files, and rebase all conficts. doc: add changelog for v2.7. Signed-off-by: Wine93 Update CHANGELOG-2.7.md Signed-off-by: Wangpan update curve-arch.png Signed-off-by: Wangpan Code formatting changes Signed-off-by: koko2pp --- CHANGELOG-2.7.md | 5 + README.md | 2 +- README_cn.md | 2 +- WORKSPACE | 4 +- build.sh | 10 +- conf/chunkserver.conf | 144 +- conf/chunkserver.conf.example | 145 +- conf/client.conf | 122 +- conf/cs_client.conf | 116 +- conf/mds.conf | 170 +- conf/py_client.conf | 112 +- conf/snap_client.conf | 116 +- conf/snapshot_clone_server.conf | 80 +- conf/tools.conf | 10 +- curve-ansible/client.ini | 2 +- .../wait_copysets_status_healthy.yml | 2 +- curve-ansible/group_vars/mds.yml | 2 +- .../roles/generate_config/defaults/main.yml | 20 +- .../templates/chunkserver.conf.j2 | 138 +- .../generate_config/templates/client.conf.j2 | 116 +- .../generate_config/templates/mds.conf.j2 | 166 +- .../templates/nebd-client.conf.j2 | 22 +- .../templates/nebd-server.conf.j2 | 10 +- .../templates/snapshot_clone_server.conf.j2 | 80 +- .../generate_config/templates/tools.conf.j2 | 10 +- .../install_package/files/disk_uuid_repair.py | 103 +- .../templates/chunkserver_ctl.sh.j2 | 24 +- .../templates/chunkserver_deploy.sh.j2 | 32 +- .../templates/etcd-daemon.sh.j2 | 44 +- .../templates/mds-daemon.sh.j2 | 52 +- .../install_package/templates/nebd-daemon.j2 | 8 +- .../templates/snapshot-daemon.sh.j2 | 52 +- .../roles/install_package/vars/main.yml | 2 +- .../roles/restart_service/defaults/main.yml | 2 +- .../tasks/include/restart_mds.yml | 2 +- .../tasks/include/restart_snapshotclone.yml | 2 +- .../roles/restart_service/tasks/main.yml | 2 +- .../roles/restart_service/vars/main.yml | 2 +- .../vars/main.yml | 2 +- .../tasks/include/start_chunkserver.yml | 2 +- .../roles/start_service/tasks/main.yml | 2 +- .../roles/stop_service/tasks/main.yml | 2 +- curve-ansible/rolling_update_curve.yml | 14 +- curve-ansible/server.ini | 14 +- curvefs/conf/curvebs_client.conf | 120 +- curvefs/monitor/grafana-report.py | 48 +- .../grafana/provisioning/dashboards/mds.json | 8 +- .../metaserverclient/metaserver_client.cpp | 81 +- .../src/metaserver/copyset/conf_epoch_file.h | 36 +- curvefs/src/metaserver/inflight_throttle.h | 14 +- .../test/mds/schedule/coordinator_test.cpp | 112 +- .../test/mds/schedule/operatorStep_test.cpp | 72 +- .../mds/schedule/recoverScheduler_test.cpp | 36 +- .../mds/schedule/scheduleMetrics_test.cpp | 40 +- .../scheduleService/scheduleService_test.cpp | 15 +- curvefs/test/volume/bitmap_allocator_test.cpp | 7 +- curvefs_python/cbd_client.h | 109 +- curvefs_python/curve_type.h | 109 +- curvefs_python/curvefs_tool.py | 85 +- curvefs_python/libcurvefs.h | 68 +- curvefs_python/test.py | 7 +- curvesnapshot_python/libcurveSnapshot.cpp | 190 +- curvesnapshot_python/libcurveSnapshot.h | 246 +- .../local/chunkserver/conf/chunkserver.conf.0 | 4 +- .../local/chunkserver/conf/chunkserver.conf.1 | 4 +- .../local/chunkserver/conf/chunkserver.conf.2 | 4 +- docs/images/Curve-arch.odg | Bin 404344 -> 404487 bytes docs/images/Curve-arch.png | Bin 281309 -> 283471 bytes include/chunkserver/chunkserver_common.h | 74 +- include/client/libcurve.h | 408 +- include/etcdclient/etcdclient.h | 133 +- .../nebd-package/etc/nebd/nebd-client.conf | 22 +- .../nebd-package/etc/nebd/nebd-server.conf | 10 +- monitor/grafana-report.py | 48 +- monitor/grafana/dashboards/chunkserver.json | 104 +- monitor/grafana/dashboards/client.json | 34 +- monitor/grafana/dashboards/etcd.json | 2 +- monitor/grafana/dashboards/mds.json | 80 +- monitor/grafana/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- .../provisioning/dashboards/chunkserver.json | 104 +- .../provisioning/dashboards/client.json | 34 +- .../grafana/provisioning/dashboards/etcd.json | 2 +- .../grafana/provisioning/dashboards/mds.json | 80 +- .../provisioning/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- nebd/etc/nebd/nebd-client.conf | 22 +- nebd/etc/nebd/nebd-server.conf | 10 +- nebd/nebd-package/usr/bin/nebd-daemon | 8 +- nebd/src/common/configuration.cpp | 68 +- nebd/src/common/configuration.h | 127 +- nebd/src/common/crc32.h | 32 +- nebd/src/common/file_lock.h | 15 +- nebd/src/common/name_lock.h | 157 +- nebd/src/common/stringstatus.h | 34 +- nebd/src/common/timeutility.h | 12 +- nebd/src/part1/async_request_closure.cpp | 22 +- nebd/src/part1/async_request_closure.h | 81 +- nebd/src/part1/heartbeat_manager.h | 30 +- nebd/src/part1/libnebd.cpp | 36 +- nebd/src/part1/libnebd.h | 150 +- nebd/src/part1/libnebd_file.h | 88 +- nebd/src/part1/nebd_client.cpp | 141 +- nebd/src/part1/nebd_client.h | 122 +- nebd/src/part1/nebd_common.h | 34 +- nebd/src/part1/nebd_metacache.h | 33 +- nebd/src/part2/define.h | 48 +- nebd/src/part2/file_entity.cpp | 87 +- nebd/src/part2/file_entity.h | 162 +- nebd/src/part2/file_manager.cpp | 12 +- nebd/src/part2/file_manager.h | 134 +- nebd/src/part2/heartbeat_manager.cpp | 23 +- nebd/src/part2/heartbeat_manager.h | 77 +- nebd/src/part2/main.cpp | 11 +- nebd/src/part2/metafile_manager.cpp | 70 +- nebd/src/part2/metafile_manager.h | 50 +- nebd/src/part2/nebd_server.cpp | 47 +- nebd/src/part2/nebd_server.h | 60 +- nebd/src/part2/request_executor.h | 16 +- nebd/src/part2/request_executor_curve.h | 70 +- nebd/src/part2/util.h | 6 +- nebd/test/common/configuration_test.cpp | 27 +- nebd/test/common/test_name_lock.cpp | 36 +- .../test/part1/heartbeat_manager_unittest.cpp | 32 +- nebd/test/part1/nebd_client_unittest.cpp | 144 +- nebd/test/part2/file_manager_unittest.cpp | 239 +- .../test/part2/heartbeat_manager_unittest.cpp | 57 +- nebd/test/part2/heartbeat_service_test.cpp | 20 +- nebd/test/part2/metafile_manager_test.cpp | 101 +- nebd/test/part2/test_nebd_server.cpp | 34 +- .../part2/test_request_executor_curve.cpp | 141 +- proto/chunk.proto | 70 +- proto/cli.proto | 6 +- proto/cli2.proto | 16 +- proto/common.proto | 12 +- proto/copyset.proto | 42 +- proto/heartbeat.proto | 72 +- proto/nameserver2.proto | 114 +- proto/schedule.proto | 2 +- proto/topology.proto | 2 +- robot/Resources/keywords/deploy.py | 501 +- robot/Resources/keywords/fault_inject.py | 1518 ++-- robot/Resources/keywords/snapshot_operate.py | 76 +- robot/curve_choas.txt | 10 +- robot/curve_robot.txt | 38 +- src/chunkserver/chunk_closure.cpp | 28 +- src/chunkserver/chunk_closure.h | 55 +- src/chunkserver/chunk_service.cpp | 303 +- src/chunkserver/chunk_service.h | 103 +- src/chunkserver/chunk_service_closure.cpp | 103 +- src/chunkserver/chunk_service_closure.h | 65 +- src/chunkserver/chunkserver.cpp | 2031 +++-- src/chunkserver/chunkserver.h | 97 +- src/chunkserver/chunkserver_helper.cpp | 20 +- src/chunkserver/chunkserver_main.cpp | 2 +- src/chunkserver/chunkserver_metrics.cpp | 121 +- src/chunkserver/chunkserver_metrics.h | 355 +- src/chunkserver/cli.h | 54 +- src/chunkserver/cli2.cpp | 132 +- src/chunkserver/cli2.h | 83 +- src/chunkserver/clone_copyer.h | 85 +- src/chunkserver/clone_core.cpp | 898 +- src/chunkserver/clone_core.h | 155 +- src/chunkserver/clone_manager.cpp | 24 +- src/chunkserver/clone_manager.h | 59 +- src/chunkserver/clone_task.h | 28 +- src/chunkserver/conf_epoch_file.cpp | 38 +- src/chunkserver/conf_epoch_file.h | 63 +- src/chunkserver/config_info.h | 96 +- src/chunkserver/copyset_node.cpp | 503 +- src/chunkserver/copyset_node.h | 337 +- src/chunkserver/copyset_node_manager.cpp | 195 +- src/chunkserver/copyset_node_manager.h | 200 +- src/chunkserver/copyset_service.cpp | 92 +- src/chunkserver/copyset_service.h | 39 +- src/chunkserver/heartbeat.cpp | 1194 +-- src/chunkserver/heartbeat.h | 110 +- src/chunkserver/heartbeat_helper.cpp | 68 +- src/chunkserver/heartbeat_helper.h | 71 +- src/chunkserver/inflight_throttle.h | 17 +- src/chunkserver/op_request.cpp | 494 +- src/chunkserver/op_request.h | 341 +- src/chunkserver/passive_getfn.h | 112 +- .../raftsnapshot/curve_file_adaptor.h | 7 +- .../raftsnapshot/curve_file_service.cpp | 75 +- .../raftsnapshot/curve_filesystem_adaptor.cpp | 86 +- .../raftsnapshot/curve_filesystem_adaptor.h | 145 +- .../curve_snapshot_attachment.cpp | 21 +- .../raftsnapshot/curve_snapshot_attachment.h | 57 +- .../raftsnapshot/curve_snapshot_copier.cpp | 126 +- .../raftsnapshot/curve_snapshot_copier.h | 8 +- .../raftsnapshot/curve_snapshot_file_reader.h | 41 +- src/chunkserver/raftsnapshot/define.h | 9 +- src/chunkserver/register.cpp | 28 +- src/chunkserver/register.h | 22 +- src/chunkserver/trash.cpp | 119 +- src/chunkserver/trash.h | 328 +- src/client/chunk_closure.cpp | 488 +- src/client/chunk_closure.h | 150 +- src/client/client_common.h | 126 +- src/client/client_metric.h | 181 +- src/client/config_info.h | 204 +- src/client/copyset_client.cpp | 198 +- src/client/copyset_client.h | 246 +- src/client/file_instance.cpp | 82 +- src/client/file_instance.h | 122 +- src/client/inflight_controller.h | 57 +- src/client/io_condition_varaiable.h | 35 +- src/client/io_tracker.cpp | 119 +- src/client/io_tracker.h | 253 +- src/client/iomanager.h | 37 +- src/client/iomanager4chunk.h | 168 +- src/client/iomanager4file.cpp | 44 +- src/client/iomanager4file.h | 187 +- src/client/lease_executor.cpp | 15 +- src/client/lease_executor.h | 138 +- src/client/libcurve_file.cpp | 310 +- src/client/libcurve_file.h | 257 +- src/client/libcurve_snapshot.h | 547 +- src/client/mds_client.cpp | 553 +- src/client/mds_client.h | 584 +- src/client/mds_client_base.h | 591 +- src/client/metacache.cpp | 105 +- src/client/metacache.h | 238 +- src/client/metacache_struct.h | 119 +- src/client/request_closure.h | 81 +- src/client/request_context.h | 53 +- src/client/request_scheduler.cpp | 58 +- src/client/request_scheduler.h | 122 +- src/client/request_sender.h | 313 +- src/client/request_sender_manager.cpp | 9 +- src/client/request_sender_manager.h | 25 +- src/client/service_helper.cpp | 80 +- src/client/service_helper.h | 81 +- src/client/splitor.h | 140 +- src/client/unstable_helper.cpp | 8 +- src/client/unstable_helper.h | 39 +- src/common/authenticator.h | 29 +- src/common/bitmap.cpp | 139 +- src/common/bitmap.h | 396 +- src/common/channel_pool.h | 22 +- .../concurrent/bounded_blocking_queue.h | 38 +- src/common/concurrent/concurrent.h | 51 +- src/common/concurrent/count_down_event.h | 50 +- src/common/concurrent/task_thread_pool.h | 67 +- src/common/configuration.cpp | 125 +- src/common/configuration.h | 175 +- src/common/crc32.h | 32 +- src/common/curve_define.h | 39 +- src/common/define.h | 69 +- src/common/fs_util.h | 10 +- src/common/interruptible_sleeper.h | 22 +- src/common/location_operator.cpp | 32 +- src/common/location_operator.h | 46 +- src/common/net_common.h | 20 +- src/common/s3_adapter.cpp | 1384 +-- src/common/s3_adapter.h | 201 +- .../snapshotclone/snapshotclone_define.cpp | 12 +- .../snapshotclone/snapshotclone_define.h | 74 +- src/common/stringstatus.h | 35 +- src/common/timeutility.h | 22 +- src/common/uuid.h | 39 +- src/common/wait_interval.h | 19 +- src/fs/ext4_filesystem_impl.cpp | 117 +- src/fs/local_filesystem.h | 209 +- src/kvstorageclient/etcd_client.h | 100 +- src/leader_election/leader_election.cpp | 48 +- src/leader_election/leader_election.h | 42 +- src/mds/nameserver2/clean_core.cpp | 98 +- src/mds/nameserver2/clean_core.h | 39 +- src/mds/nameserver2/clean_manager.h | 33 +- src/mds/nameserver2/clean_task.h | 90 +- src/mds/nameserver2/clean_task_manager.cpp | 38 +- src/mds/nameserver2/clean_task_manager.h | 54 +- src/snapshotcloneserver/clone/clone_core.cpp | 3364 +++---- src/snapshotcloneserver/clone/clone_core.h | 560 +- .../clone/clone_service_manager.cpp | 341 +- .../clone/clone_service_manager.h | 411 +- src/snapshotcloneserver/clone/clone_task.h | 127 +- .../clone/clone_task_manager.cpp | 97 +- .../clone/clone_task_manager.h | 115 +- src/snapshotcloneserver/common/config.h | 49 +- .../common/curvefs_client.h | 595 +- .../common/snapshotclone_info.h | 413 +- .../common/snapshotclone_meta_store.h | 122 +- .../common/snapshotclone_meta_store_etcd.h | 59 +- .../common/snapshotclone_metric.h | 95 +- src/snapshotcloneserver/common/task.h | 32 +- src/snapshotcloneserver/common/task_info.h | 88 +- src/snapshotcloneserver/common/thread_pool.h | 36 +- src/snapshotcloneserver/main.cpp | 19 +- .../snapshot/snapshot_core.cpp | 450 +- .../snapshot/snapshot_core.h | 391 +- .../snapshot/snapshot_data_store.cpp | 42 +- .../snapshot/snapshot_data_store.h | 282 +- .../snapshot/snapshot_data_store_s3.h | 81 +- .../snapshot/snapshot_service_manager.cpp | 192 +- .../snapshot/snapshot_service_manager.h | 264 +- .../snapshot/snapshot_task.cpp | 95 +- .../snapshot/snapshot_task.h | 208 +- .../snapshot/snapshot_task_manager.cpp | 37 +- .../snapshot/snapshot_task_manager.h | 96 +- .../snapshotclone_server.cpp | 726 +- .../snapshotclone_server.h | 258 +- .../snapshotclone_service.cpp | 486 +- .../snapshotclone_service.h | 73 +- src/tools/chunkserver_client.cpp | 30 +- src/tools/chunkserver_client.h | 60 +- src/tools/chunkserver_tool_factory.h | 15 +- src/tools/common.cpp | 6 +- src/tools/common.h | 9 +- src/tools/consistency_check.cpp | 78 +- src/tools/consistency_check.h | 131 +- src/tools/copyset_check.cpp | 129 +- src/tools/copyset_check.h | 99 +- src/tools/copyset_check_core.cpp | 325 +- src/tools/copyset_check_core.h | 486 +- src/tools/curve_cli.cpp | 154 +- src/tools/curve_cli.h | 88 +- src/tools/curve_format_main.cpp | 112 +- src/tools/curve_meta_tool.cpp | 37 +- src/tools/curve_meta_tool.h | 44 +- src/tools/curve_tool_define.h | 21 +- src/tools/curve_tool_factory.h | 32 +- src/tools/curve_tool_main.cpp | 53 +- src/tools/etcd_client.h | 29 +- src/tools/mds_client.cpp | 312 +- src/tools/mds_client.h | 1024 ++- src/tools/metric_client.cpp | 25 +- src/tools/metric_client.h | 51 +- src/tools/metric_name.h | 250 +- src/tools/namespace_tool.cpp | 143 +- src/tools/namespace_tool.h | 71 +- src/tools/namespace_tool_core.cpp | 59 +- src/tools/namespace_tool_core.h | 158 +- src/tools/raft_log_tool.cpp | 90 +- src/tools/raft_log_tool.h | 86 +- src/tools/schedule_tool.cpp | 54 +- src/tools/schedule_tool.h | 30 +- src/tools/snapshot_check.h | 42 +- src/tools/snapshot_clone_client.cpp | 41 +- src/tools/snapshot_clone_client.h | 61 +- src/tools/status_tool.cpp | 2388 ++--- src/tools/status_tool.h | 145 +- src/tools/version_tool.cpp | 20 +- src/tools/version_tool.h | 107 +- test/chunkserver/braft_cli_service2_test.cpp | 195 +- test/chunkserver/braft_cli_service_test.cpp | 80 +- test/chunkserver/chunk_service_test.cpp | 78 +- test/chunkserver/chunk_service_test2.cpp | 145 +- test/chunkserver/chunkserver_helper_test.cpp | 10 +- test/chunkserver/chunkserver_service_test.cpp | 30 +- .../chunkserver/chunkserver_snapshot_test.cpp | 936 +- test/chunkserver/chunkserver_test_util.cpp | 208 +- test/chunkserver/chunkserver_test_util.h | 192 +- test/chunkserver/cli2_test.cpp | 352 +- test/chunkserver/cli_test.cpp | 238 +- test/chunkserver/client.cpp | 59 +- test/chunkserver/clone/clone_copyer_test.cpp | 142 +- test/chunkserver/clone/clone_core_test.cpp | 389 +- test/chunkserver/clone/clone_manager_test.cpp | 81 +- test/chunkserver/clone/op_request_test.cpp | 743 +- test/chunkserver/copyset_epoch_test.cpp | 101 +- .../chunkserver/copyset_node_manager_test.cpp | 122 +- test/chunkserver/copyset_node_test.cpp | 2162 +++-- test/chunkserver/copyset_service_test.cpp | 114 +- .../datastore/datastore_mock_unittest.cpp | 2865 +++--- .../datastore/file_helper_unittest.cpp | 35 +- .../datastore/filepool_mock_unittest.cpp | 1719 ++-- .../datastore/filepool_unittest.cpp | 25 +- test/chunkserver/fake_datastore.h | 73 +- test/chunkserver/heartbeat_helper_test.cpp | 75 +- test/chunkserver/heartbeat_test.cpp | 104 +- test/chunkserver/heartbeat_test_common.cpp | 127 +- test/chunkserver/heartbeat_test_common.h | 149 +- test/chunkserver/heartbeat_test_main.cpp | 27 +- test/chunkserver/inflight_throttle_test.cpp | 9 +- test/chunkserver/metrics_test.cpp | 175 +- ...curve_filesystem_adaptor_mock_unittest.cpp | 86 +- .../curve_filesystem_adaptor_unittest.cpp | 62 +- .../curve_snapshot_attachment_test.cpp | 40 +- ...raftsnapshot_chunkfilepool_integration.cpp | 295 +- test/chunkserver/server.cpp | 29 +- test/chunkserver/trash_test.cpp | 98 +- test/client/client_common_unittest.cpp | 19 +- .../client_mdsclient_metacache_unittest.cpp | 587 +- test/client/client_metric_test.cpp | 69 +- test/client/client_session_unittest.cpp | 65 +- test/client/client_unstable_helper_test.cpp | 62 +- test/client/client_userinfo_unittest.cpp | 228 +- test/client/copyset_client_test.cpp | 7997 +++++++++-------- test/client/fake/client_workflow_test.cpp | 73 +- .../client/fake/client_workflow_test4snap.cpp | 52 +- test/client/fake/fakeChunkserver.h | 174 +- test/client/fake/fakeMDS.h | 649 +- test/client/inflight_rpc_control_test.cpp | 30 +- test/client/iotracker_splitor_unittest.cpp | 268 +- test/client/lease_executor_test.cpp | 11 +- test/client/libcbd_libcurve_test.cpp | 45 +- test/client/libcurve_interface_unittest.cpp | 197 +- test/client/mds_failover_test.cpp | 521 +- test/client/mock/mock_chunkservice.h | 163 +- test/client/request_scheduler_test.cpp | 153 +- test/client/request_sender_test.cpp | 17 +- test/common/bitmap_test.cpp | 30 +- test/common/channel_pool_test.cpp | 12 +- test/common/configuration_test.cpp | 40 +- test/common/count_down_event_test.cpp | 35 +- test/common/lru_cache_test.cpp | 66 +- test/common/task_thread_pool_test.cpp | 50 +- test/common/test_name_lock.cpp | 36 +- test/failpoint/failpoint_test.cpp | 51 +- test/fs/ext4_filesystem_test.cpp | 238 +- .../chunkserver/chunkserver_basic_test.cpp | 276 +- .../chunkserver/chunkserver_clone_recover.cpp | 391 +- .../chunkserver_concurrent_test.cpp | 868 +- .../datastore/datastore_basic_test.cpp | 108 +- .../datastore/datastore_clone_case_test.cpp | 194 +- .../datastore/datastore_concurrency_test.cpp | 17 +- .../datastore/datastore_exception_test.cpp | 697 +- .../datastore/datastore_integration_base.h | 32 +- .../datastore/datastore_integration_test.cpp | 252 +- .../datastore/datastore_restart_test.cpp | 397 +- .../datastore_snapshot_case_test.cpp | 211 +- .../datastore/datastore_stress_test.cpp | 24 +- .../client/chunkserver_exception_test.cpp | 333 +- .../client/common/file_operation.cpp | 18 +- .../client/common/file_operation.h | 13 +- .../integration/client/mds_exception_test.cpp | 684 +- .../unstable_chunkserver_exception_test.cpp | 203 +- test/integration/cluster_common/cluster.cpp | 171 +- test/integration/cluster_common/cluster.h | 854 +- .../cluster_common/cluster_basic_test.cpp | 190 +- .../integration/cluster_common/mds.basic.conf | 158 +- test/integration/common/chunkservice_op.cpp | 152 +- test/integration/common/chunkservice_op.h | 223 +- test/integration/common/config_generator.h | 2 +- test/integration/common/peer_cluster.cpp | 432 +- test/integration/common/peer_cluster.h | 391 +- test/integration/heartbeat/common.cpp | 50 +- test/integration/heartbeat/common.h | 307 +- .../heartbeat/heartbeat_basic_test.cpp | 4360 +++++---- .../heartbeat/heartbeat_exception_test.cpp | 111 +- .../raft/raft_config_change_test.cpp | 2308 ++--- .../raft/raft_log_replication_test.cpp | 1202 +-- test/integration/raft/raft_snapshot_test.cpp | 565 +- test/integration/raft/raft_vote_test.cpp | 1394 +-- .../fake_curvefs_client.cpp | 213 +- .../snapshotcloneserver/fake_curvefs_client.h | 186 +- .../snapshotcloneserver_common_test.cpp | 1965 ++-- .../snapshotcloneserver_concurrent_test.cpp | 183 +- .../snapshotcloneserver_exception_test.cpp | 999 +- .../snapshotcloneserver_module.cpp | 48 +- .../snapshotcloneserver_recover_test.cpp | 758 +- .../snapshotcloneserver_test.cpp | 266 +- test/kvstorageclient/etcdclient_test.cpp | 123 +- .../chunkserver_healthy_checker_test.cpp | 85 +- test/mds/heartbeat/heartbeat_manager_test.cpp | 90 +- .../alloc_statistic_helper_test.cpp | 301 +- .../allocstatistic/alloc_statistic_test.cpp | 395 +- test/mds/nameserver2/clean_core_test.cpp | 120 +- test/mds/nameserver2/curvefs_test.cpp | 2879 +++--- test/mds/nameserver2/file_lock_test.cpp | 64 +- test/mds/nameserver2/file_record_test.cpp | 34 +- .../nameserver2/namespace_service_test.cpp | 419 +- test/mds/schedule/coordinator_test.cpp | 314 +- test/mds/schedule/leaderScheduler_test.cpp | 261 +- test/mds/schedule/operatorStep_test.cpp | 106 +- .../mds/schedule/rapidLeaderSheduler_test.cpp | 71 +- test/mds/schedule/recoverScheduler_test.cpp | 129 +- test/mds/schedule/scheduleMetrics_test.cpp | 284 +- .../scheduleService/scheduleService_test.cpp | 37 +- .../schedule/schedulerPOC/scheduler_poc.cpp | 420 +- test/mds/schedule/scheduler_helper_test.cpp | 248 +- test/mds/server/mds_test.cpp | 63 +- test/mds/topology/test_topology.cpp | 1323 +-- .../test_topology_chunk_allocator.cpp | 367 +- test/mds/topology/test_topology_metric.cpp | 265 +- test/resources.list | 48 +- test/snapshotcloneserver/test_clone_core.cpp | 802 +- .../test_curvefs_client.cpp | 66 +- .../test_snapshot_core.cpp | 1519 ++-- .../test_snapshot_service_manager.cpp | 593 +- test/tools/chunkserver_client_test.cpp | 51 +- test/tools/config/data_check.conf | 102 +- test/tools/copyset_check_core_test.cpp | 489 +- test/tools/copyset_check_test.cpp | 164 +- test/tools/curve_cli_test.cpp | 249 +- test/tools/curve_meta_tool_test.cpp | 52 +- test/tools/data_consistency_check_test.cpp | 147 +- test/tools/etcd_client_test.cpp | 53 +- test/tools/mds_client_test.cpp | 872 +- test/tools/metric_client_test.cpp | 95 +- test/tools/namespace_tool_core_test.cpp | 198 +- test/tools/namespace_tool_test.cpp | 193 +- test/tools/raft_log_tool_test.cpp | 36 +- test/tools/segment_parser_test.cpp | 68 +- test/tools/snapshot_clone_client_test.cpp | 83 +- test/tools/status_tool_test.cpp | 510 +- test/tools/version_tool_test.cpp | 241 +- test/util/config_generator.h | 46 +- thirdparties/etcdclient/etcdclient.go | 35 +- tools/curvefsTool.cpp | 2767 +++--- tools/snaptool/queryclone.py | 30 +- ut.sh | 2 +- util/ut_in_image.sh | 4 +- 506 files changed, 53872 insertions(+), 58952 deletions(-) create mode 100644 CHANGELOG-2.7.md diff --git a/CHANGELOG-2.7.md b/CHANGELOG-2.7.md new file mode 100644 index 0000000000..5f799aff4e --- /dev/null +++ b/CHANGELOG-2.7.md @@ -0,0 +1,5 @@ +# CHANGELOG of v2.7 + +[CHANGELOG-2.7](https://docs.opencurve.io/Release/release-notes-v2.7) + +> NOTE: All release notes will be published on our documents site from now on. diff --git a/README.md b/README.md index 1405e0be98..1a988a16ab 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ **A sandbox project hosted by the CNCF Foundation** #### English | [简体中文](README_cn.md) -### 📄 [Documents](https://github.com/opencurve/curve/tree/master/docs) || 🌐 [Official Website](https://www.opencurve.io/Curve/HOME) || 🏠 [Forum](https://ask.opencurve.io/t/topic/7) +### 📄 [Documents](https://docs.opencurve.io/) || 🌐 [Official Website](https://www.opencurve.io/Curve/HOME) || 🏠 [Forum](https://ask.opencurve.io/t/topic/7)
diff --git a/README_cn.md b/README_cn.md index 12d4140388..b722921fe3 100644 --- a/README_cn.md +++ b/README_cn.md @@ -9,7 +9,7 @@ **CNCF基金会的沙箱托管项目** #### [English](README.md) | 简体中文 -### 📄 [文档](https://github.com/opencurve/curve/tree/master/docs) || 🌐 [官网](https://www.opencurve.io/Curve/HOME) || 🏠 [论坛](https://ask.opencurve.io/t/topic/7) +### 📄 [文档](https://docs.opencurve.io/) || 🌐 [官网](https://www.opencurve.io/Curve/HOME) || 🏠 [论坛](https://ask.opencurve.io/t/topic/7)
diff --git a/WORKSPACE b/WORKSPACE index a423f1c46a..ff394ed660 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -96,8 +96,8 @@ bind( actual = "@com_google_googletest//:gtest", ) -#Import the glog files. -# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog" +# Import the glog files. +# When the BUILD file in brpc relies on glog, the direct specified dependency is "@com_github_google_glog//:glog" git_repository( name = "com_github_google_glog", remote = "https://github.com/google/glog", diff --git a/build.sh b/build.sh index 9d714c28d6..f9e880d131 100644 --- a/build.sh +++ b/build.sh @@ -17,7 +17,7 @@ # dir=`pwd` -#step1 清除生成的目录和文件 +# step1 Clear generated directories and files bazel clean rm -rf curvefs_python/BUILD rm -rf curvefs_python/tmplib/ @@ -29,8 +29,8 @@ then exit fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 +# step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'` if [ -z ${tag_version} ] then @@ -38,7 +38,7 @@ then tag_version=9.9.9 fi -#获取git提交版本信息 +# Obtain git submission version information commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` if [ "$1" = "debug" ] then @@ -50,7 +50,7 @@ fi curve_version=${tag_version}+${commit_id}${debug} -#step3 执行编译 +# step3 Execute Compilation # check bazel verion, bazel vesion must = 4.2.2 bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` if [ -z ${bazel_version} ] diff --git a/conf/chunkserver.conf b/conf/chunkserver.conf index 19457b3c18..ebfcddb584 100644 --- a/conf/chunkserver.conf +++ b/conf/chunkserver.conf @@ -1,17 +1,17 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__ global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ global.subnet=127.0.0.0/24 global.enable_external_server=true global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__ global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, usually 16MB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.meta_page_size=4096 # chunk's block size, IO requests must align with it, supported value is |512| and |4096| @@ -21,40 +21,40 @@ global.meta_page_size=4096 # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.block_size=4096 -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666, 127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10 seconds mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__ -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__ -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -70,43 +70,43 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # lease read switch, default is true(open lease read) # if false, all requests will propose to raft(log read) -# 启用lease read,一般开启,否则将退化为log read形式 +# Enable lease read, usually enabled, otherwise it will revert to log read form copyset.enable_lease_read=true -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node, and the added node first acts like a learner to copy data. +# When the gap with the leader is equal to catchup_margin entries, the leader +# will attempt to commit the configuration change entry (generally, the committed entry +# will definitely be committed and applied). A small catchup_margin can +# ensure that the learner can join the replication group quickly. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__ -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency=10 # chunkserver use how many threads to use copyset complete sync. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The current peer's applied_index and leader‘s committed_index difference is less than this value +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -132,26 +132,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Whether to paste the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__ -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__ # Curve File time to live curve.curve_file_timeout_s=30 @@ -159,7 +159,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -179,27 +179,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -219,23 +219,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -249,14 +249,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index 443412215b..f7ab284dd9 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,23 +212,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -241,14 +242,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/client.conf b/conf/client.conf index bac0dc1108..22345400d5 100644 --- a/conf/client.conf +++ b/conf/client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,84 +36,84 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# ** 已废弃,不再使用,请使用 `chunkserver.slowRequestThresholdMS` ** -# ** dreprecated, use `chunkserver.slowRequestThresholdMS` instead ** -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# ** Deprecated, no longer in use, please use `chunkserver.slowRequestThresholdMS` ** +# ** Deprecated, use `chunkserver.slowRequestThresholdMS` instead ** +# When an RPC retry exceeds the maxRetryTimesBeforeConsiderSuspend count +# it is marked as a suspended IO, and the metric will trigger an alert. chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # 请求重试时间超过该阈值后,会标记为slow request @@ -122,41 +122,41 @@ chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.slowRequestThresholdMS=45000 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off, false/do not turn off global.turnOffHealthCheck=true # minimal open file limit diff --git a/conf/cs_client.conf b/conf/cs_client.conf index 09d567d8f7..5bd674e417 100644 --- a/conf/cs_client.conf +++ b/conf/cs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### mds side configuration information ################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads for the queue +# The task of execution threads is to fetch IO and then send it over the network before moving on to the next network task. +# The time taken for a task, from retrieval from the queue to sending the RPC request, is typically between 20 microseconds to 100 microseconds. 20 microseconds is the normal case when leader acquisition is not needed during the send operation. +# If leader acquisition is required during sending, the time can be around 100 microseconds. The throughput of one thread ranges from 100,000 to 500,000 operations per second. +# The performance meets the requirements. schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +#Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/mds.conf b/conf/mds.conf index cc8c661e0d..ef61689b97 100644 --- a/conf/mds.conf +++ b/conf/mds.conf @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 #__CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ mds.dummy.listen.port=6667 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ @@ -7,15 +7,15 @@ global.subnet=127.0.0.0/24 global.port=6666 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # wait dlock timeout mds.etcd.dlock.timeoutMs=10000 @@ -27,68 +27,68 @@ etcd.auth.username= etcd.auth.password= # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=10000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 mds.segment.discard.scanIntevalMs=5000 -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true # Scan scheduler switch mds.enable.scan.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec=60 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=1 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=60 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=300 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec=1800 # Scan operator timeout (seconds) mds.scheduler.scan.limitSec=180 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec=1800 # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour=0 @@ -102,104 +102,104 @@ mds.scheduler.scan.concurrent.per.pool=10 mds.scheduler.scan.concurrent.per.chunkserver=1 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=10000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=30000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files=5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16)*12) * 2621440~=1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs=5000000 -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs=500000 # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName=root mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=10 -#和mds.chunkserver.failure.tolerance设置有关,一个zone 标准配置20台节点,如果允许3台节点failover, -#那么剩余17台机器需要承载原先20台机器的空间,17/20=0.85,即使用量超过这个值即不再往这个池分配, -#具体分为来两种情况, 当不使用chunkfilepool,物理池限制使用百分比,当使用 chunkfilepool 进行chunkfilepool分配时需预留failover空间, +# It is related to the settings of mds.chunkserver.failure.tolerance. A standard configuration for a zone is 20 nodes, and if 3 nodes are allowed to fail over, +# So the remaining 17 machines need to carry the space of the original 20 machines, 17/20=0.85. Even if the usage exceeds this value, they will no longer be allocated to this pool, +# There are two specific situations: when chunkfilepool is not used, the physical pool limits the percentage of usage, and when chunkfilepool is used for chunkfilepool allocation, it is necessary to reserve failover space, mds.topology.PoolUsagePercentLimit=85 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus=false # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +#Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of deviation from the mean scatterWidth of all chunk servers. +# Setting a large percentage deviation for scatterWidth can result in some machines having scatterWidth values that are too small, affecting machine recovery time and reducing cluster reliability. +# Additionally, it can lead to some machines having excessively large scatterWidth values, causing certain chunk server's copysets to be scattered across various machines. +# Once data is written to these servers, the ones with larger scatterWidth become hotspots. Setting the percentage deviation for scatterWidth too small requires a higher level of scatterWidth +# uniformity and copyset algorithm precision, potentially resulting in suboptimal algorithm results. +# It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize=1073741824 -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength=10737418240 -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength=21990232555520 # smallest read/write unit for volume, support |512| and |4096| mds.curvefs.blockSize=4096 @@ -207,29 +207,29 @@ mds.curvefs.blockSize=4096 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr=127.0.0.1:5555 # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_proxy_addr} __CURVEADM_TEMPLATE__ # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/conf/py_client.conf b/conf/py_client.conf index cb7999c5e4..5460949092 100644 --- a/conf/py_client.conf +++ b/conf/py_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,91 +36,91 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # minimal open file limit @@ -128,22 +128,22 @@ global.fileIOSplitMaxSizeKB=64 global.minOpenFileLimit=0 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=10000 # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snap_client.conf b/conf/snap_client.conf index a643e44461..427f521663 100644 --- a/conf/snap_client.conf +++ b/conf/snap_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +###################MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,111 +36,111 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +#################Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=50 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=16000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +#After the number of unstable chunkservers on the same server exceeds this value +#All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +#Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 @@ -149,7 +149,7 @@ global.metricDummyServerStartPort=9000 global.minOpenFileLimit=0 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snapshot_clone_server.conf b/conf/snapshot_clone_server.conf index 1c043686cd..70a3deb864 100644 --- a/conf/snapshot_clone_server.conf +++ b/conf/snapshot_clone_server.conf @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/snap_client.conf __CURVEADM_TEMPLATE__ -# mds root 用户名 +# Mds root username mds.rootUser=root -# mds root 密码 +# Mds root password mds.rootPassword=root_password -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec=300 -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs=5000 -# 日志文件位置 +# Log file location log.dir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ # @@ -26,61 +26,61 @@ s3.config_path=./conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __ server.address=127.0.0.1:5556 # __CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ server.subnet=127.0.0.0/24 server.port=5556 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec=300 -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs=5000 -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum=256 -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs=1000 -# 转储chunk分片大小 +# Dump chunk shard size # for nos, pls set to 1048576 server.chunkSplitSize=8388608 -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs=1000 -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit=1024 -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum=64 -# mds session 时间 +# Mds session time server.mdsSessionTimeUs=5000000 -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency=16 # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum=256 -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum=256 -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum=256 -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs=1000 -# clone chunk分片大小 +# Clone chunk shard size # for nos, pls set to 65536 server.cloneChunkSplitSize=1048576 -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir=/clone -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency=64 -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency=64 -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs=500 -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs=3600000 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times=3 # wait dlock timeout etcd.dlock.timeoutMs=10000 @@ -93,20 +93,20 @@ etcd.auth.password= # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix=snapshotcloneserverleaderlock -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms=0 # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port=8081 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/conf/tools.conf b/conf/tools.conf index 545297d92c..42be38e27c 100644 --- a/conf/tools.conf +++ b/conf/tools.conf @@ -1,16 +1,16 @@ -# mds地址 +# Mds address mdsAddr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ # mds dummy port mdsDummyPort=6700 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_port} __CURVEADM_TEMPLATE__ -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout=500 -# rpc重试次数 +# RPC retry count rpcRetryTimes=5 # the rpc concurrency to chunkserver rpcConcurrentNum=10 -# etcd地址 +# ETCD address etcdAddr=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# snapshot clone server 地址 +# Snapshot clone server address snapshotCloneAddr= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_addr} __CURVEADM_TEMPLATE__ # snapshot clone server dummy port snapshotCloneDummyPort= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/curve-ansible/client.ini b/curve-ansible/client.ini index 8eacc6270c..ecf308581d 100644 --- a/curve-ansible/client.ini +++ b/curve-ansible/client.ini @@ -1,7 +1,7 @@ [client] localhost ansible_ssh_host=127.0.0.1 -# 仅用于生成配置中的mds地址 +# Only used to generate mds addresses in the configuration [mds] localhost ansible_ssh_host=127.0.0.1 diff --git a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml index 7121b28042..8200229894 100644 --- a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml +++ b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 等待copyset健康,每个一段时间检查一次,一共检查若干次,成功则break,如果一直不健康则报错 +# Wait for the copyset to be healthy, check once every period of time, a total of several times. If successful, it will break, and if it remains unhealthy, an error will be reported - name: check copysets status until healthy shell: curve_ops_tool copysets-status --confPath={{ curve_ops_tool_config }} | grep "{{ defined_copysets_status }}" register: result diff --git a/curve-ansible/group_vars/mds.yml b/curve-ansible/group_vars/mds.yml index f575cb79d5..689b1414eb 100644 --- a/curve-ansible/group_vars/mds.yml +++ b/curve-ansible/group_vars/mds.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 集群拓扑信息 +# Cluster topology information cluster_map: servers: - name: server1 diff --git a/curve-ansible/roles/generate_config/defaults/main.yml b/curve-ansible/roles/generate_config/defaults/main.yml index 4d7dfe5514..36d14e676b 100644 --- a/curve-ansible/roles/generate_config/defaults/main.yml +++ b/curve-ansible/roles/generate_config/defaults/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 通用配置 +# General configuration curve_root_username: root curve_root_password: root_password curve_file_timeout_s: 30 @@ -25,7 +25,7 @@ min_file_length: 10737418240 max_file_length: 21990232555520 file_expired_time_us: 5000000 -# mds配置默认值 +# Mds configuration default values mds_etcd_dailtimeout_ms: 5000 mds_etcd_operation_timeout_ms: 5000 mds_etcd_retry_times: 3 @@ -94,7 +94,7 @@ throttle_bps_min_in_MB: 120 throttle_bps_max_in_MB: 260 throttle_bps_per_GB_in_MB: 0.3 -# chunkserver配置默认值 +# Chunkserver Configuration Default Values chunkserver_enable_external_server: true chunkserver_meta_page_size: 4096 chunkserver_location_limit: 3000 @@ -165,7 +165,7 @@ chunkserver_trash_expire_after_sec: 300 chunkserver_trash_scan_period_sec: 120 chunkserver_common_log_dir: ./runlog/ -# 快照克隆配置默认值 +# Default values for snapshot clone configuration snap_client_config_path: /etc/curve/snap_client.conf snap_client_method_retry_time_sec: 120 snap_client_method_retry_interval_ms: 5000 @@ -201,7 +201,7 @@ snap_leader_session_inter_sec: 5 snap_leader_election_timeout_ms: 0 snap_nginx_addr: 127.0.0.1:5555 -# client配置默认值 +# Default values for client configuration client_register_to_mds: true client_mds_rpc_timeout_ms: 500 client_mds_max_rpc_timeout_ms: 2000 @@ -244,7 +244,7 @@ client_discard_enable: true client_discard_granularity: 4096 client_discard_task_delay_ms: 60000 -# nebd默认配置 +# Nebd default configuration client_config_path: /etc/curve/client.conf nebd_client_sync_rpc_retry_times: 50 nebd_client_rpc_retry_inverval_us: 100000 @@ -259,7 +259,7 @@ nebd_server_heartbeat_timeout_s: 30 nebd_server_heartbeat_check_interval_ms: 3000 nebd_server_response_return_rpc_when_io_error: false -# s3配置默认值 +# Default values for s3 configuration s3_http_scheme: 0 s3_verify_ssl: false s3_user_agent_conf: S3 Browser @@ -276,15 +276,15 @@ s3_throttle_bpsTotalLimit: 1280 s3_throttle_bpsReadLimit: 1280 s3_throttle_bpsWriteLimit: 1280 -# 运维工具默认值 +# Default values for operation and maintenance tools tool_rpc_timeout: 500 tool_rpc_retry_times: 5 tool_rpc_concurrent_num: 10 -# snapshotclone_nginx配置 +# snapshotclone_nginx configuration nginx_docker_internal_port: 80 -# etcd默认配置 +# ETCD default configuration etcd_snapshot_count: 10000 etcd_heartbeat_interval: 100 etcd_election_timeout: 1000 diff --git a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 index 0e7e65e9cc..ae43478df7 100644 --- a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 @@ -1,24 +1,24 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip={{ ansible_ssh_host }} global.port={{ chunkserver_base_port }} global.subnet={{ chunkserver_subnet }} global.enable_external_server={{ chunkserver_enable_external_server }} global.external_ip={{ ansible_ssh_host }} global.external_subnet={{ chunkserver_external_subnet }} -# chunk大小,一般16MB +# Chunk size, usually 16MB global.chunk_size={{ chunk_size }} -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB global.meta_page_size={{ chunkserver_meta_page_size }} -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit={{ chunkserver_location_limit }} # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666,127.0.0.1:7777 {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -26,30 +26,30 @@ global.location_limit={{ chunkserver_location_limit }} {% set _ = mds_address.append("%s:%s" % (mds_ip, mds_port)) -%} {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries={{ chunkserver_register_retries }} -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout={{ chunkserver_register_timeout }} -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10s mds.heartbeat_interval={{ chunkserver_heartbeat_interval }} -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout={{ chunkserver_heartbeat_timeout }} # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri={{ chunkserver_stor_uri }} -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri={{ chunkserver_meta_uri }} -# disk类型 +# Disk type chunkserver.disk_type={{ chunkserver_disk_type }} -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes={{ chunkserver_snapshot_throttle_throughput_bytes }} -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles={{ chunkserver_snapshot_throttle_check_cycles }} chunkserver.max_inflight_requests={{ chunkserver_max_inflight_requests }} @@ -64,39 +64,39 @@ test.testcopyset_conf={{ chunkserver_test_testcopyset_conf }} # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term={{ chunkserver_copyset_check_term }} -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli={{ chunkserver_copyset_disable_cli }} copyset.log_applied_task={{ chunkserver_copyset_log_applied_task }} -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms={{ chunkserver_copyset_election_timeout_ms }} -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s={{ chunkserver_copyset_snapshot_interval_s }} -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node. The node added by 'add' first copies data in a way similar to a learner. +# When the difference from the leader reaches 'catchup_margin' entries, +# the leader will attempt to commit the configuration-changing entry. +# Generally, the committed and applied entry will definitely be committed and applied. +# A smaller catchup_margin can significantly ensure that the learner can quickly join the replication group soon after. copyset.catchup_margin={{ chunkserver_copyset_catchup_margin }} -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri={{ chunkserver_copyset_chunk_data_uri }} -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri={{ chunkserver_copyset_raft_log_uri }} -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri={{ chunkserver_copyset_raft_meta_uri }} -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri={{ chunkserver_copyset_raft_snapshot_uri }} -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri={{ chunkserver_copyset_recycler_uri }} -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency={{ chunkserver_copyset_load_concurrency }} -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes={{ chunkserver_copyset_check_retrytimes }} -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The difference between the applied_index of the current peer and the committed_index on the leader is less than this value. +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin={{ chunkserver_copyset_finishload_margin }} -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms={{ chunkserver_copyset_check_loadmargin_interval_ms }} # scan copyset interval copyset.scan_interval_sec={{ chunkserver_copyset_scan_interval_sec }} @@ -115,26 +115,26 @@ copyset.check_syncing_interval_ms={{ chunkserver_copyset_check_syncing_interval_ # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client={{ disable_snapshot_clone }} -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter={{ disable_snapshot_clone }} -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size={{ chunkserver_clone_slice_size }} -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste={{ chunkserver_clone_enable_paste }} -# 克隆的线程数量 +# Number of cloned threads clone.thread_num={{ chunkserver_clone_thread_num }} -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth={{ chunkserver_clone_queue_depth }} -# curve用户名 +# Curve username curve.root_username={{ curve_root_username }} -# curve密码 +# Curve password curve.root_password={{ curve_root_password }} -# client配置文件 +# Client configuration file curve.config_path={{ chunkserver_client_config_path }} -# s3配置文件 +# S3 configuration file s3.config_path={{ chunkserver_s3_config_path }} # Curve File time to live curve.curve_file_timeout_s={{ curve_file_timeout_s }} @@ -142,7 +142,7 @@ curve.curve_file_timeout_s={{ curve_file_timeout_s }} # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2={{ chunkserver_fs_enable_renameat2 }} # @@ -163,27 +163,27 @@ storeng.sync_write={{ chunkserver_storeng_sync_write }} # # Concurrent apply module # -# 并发模块的并发度,一般是10 +# The concurrency of concurrent modules is generally 10 wconcurrentapply.size={{ chunkserver_wconcurrentapply_size }} -# 并发模块线程的队列深度 +# Queue depth of concurrent module threads wconcurrentapply.queuedepth={{ chunkserver_wconcurrentapply_queuedepth }} -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size={{ chunkserver_rconcurrentapply_size }} -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth={{ chunkserver_rconcurrentapply_queuedepth }} # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_format_disk }} -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_dir }} -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size={{ chunkserver_chunkfilepool_cpmeta_file_size }} -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable={{ chunkserver_chunkfilepool_clean_enable }} @@ -195,34 +195,34 @@ chunkfilepool.clean.throttle_iops={{ chunkserver_chunkfilepool_clean_throttle_io # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool={{ walfilepool_use_chunk_file_pool }} -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool={{ chunkserver_format_disk }} -# walpool目录 +# Walpool directory walfilepool.file_pool_dir={{ chunkserver_walfilepool_file_pool_dir }} -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path={{ chunkserver_walfilepool_meta_path }} -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size={{ chunkserver_walfilepool_segment_size }} -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size={{ chunkserver_walfilepool_metapage_size }} -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size={{ chunkserver_walfilepool_meta_file_size }} -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times={{ chunkserver_walfilepool_retry_times }} # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec={{ chunkserver_trash_expire_after_sec }} -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec={{ chunkserver_trash_scan_period_sec }} # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir={{ chunkserver_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curve-ansible/roles/generate_config/templates/client.conf.j2 b/curve-ansible/roles/generate_config/templates/client.conf.j2 index 08d4413780..492ac270bf 100644 --- a/curve-ansible/roles/generate_config/templates/client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/client.conf.j2 @@ -1,8 +1,8 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -11,25 +11,25 @@ {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS={{ client_register_to_mds }} -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS={{ client_mds_rpc_timeout_ms }} -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS={{ client_mds_max_rpc_timeout_ms }} -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS={{ client_mds_max_retry_ms }} -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS={{ client_mds_max_failed_times_before_change_mds }} -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease={{ client_mds_refresh_times_per_lease }} -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS={{ client_mds_rpc_retry_interval_us }} # The normal retry times for trigger wait strategy @@ -42,104 +42,104 @@ mds.maxRetryMsInIOPath={{ client_mds_max_retry_ms_in_io_path }} mds.waitSleepMs={{ client_mds_wait_sleep_ms }} # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS={{ client_metacache_get_leader_timeout_ms }} -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry={{ client_metacache_get_leader_retry }} -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS={{ client_metacache_rpc_retry_interval_us }} # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity={{ client_schedule_queue_capacity }} -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize={{ client_schedule_threadpool_size }} -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity={{ client_isolation_task_queue_capacity }} -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize={{ client_isolation_task_thread_pool_size }} # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS={{ client_chunkserver_op_retry_interval_us }} -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry={{ client_chunkserver_op_max_retry }} -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS={{ client_chunkserver_rpc_timeout_ms }} -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead={{ client_chunkserver_enable_applied_index_read }} -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS={{ client_chunkserver_max_retry_sleep_interval_us }} -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS={{ client_chunkserver_max_rpc_timeout_ms }} -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes={{ client_chunkserver_max_stable_timeout_times }} -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs={{ client_chunkserver_check_health_timeout_ms }} -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold={{ client_chunkserver_server_stable_threshold }} -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff={{ client_chunkserver_min_retry_times_force_timeout_backoff }} -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend={{ client_chunkserver_max_retry_times_before_consider_suspend }} # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum={{ client_file_max_inflight_rpc_num }} -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB={{ client_file_io_split_max_size_kb }} # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel={{ client_log_level }} -# 设置log的路径 +# Set the path of the log global.logPath={{ client_log_path }} -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # @@ -151,15 +151,15 @@ closefd.timeout={{ client_closefd_timeout_sec }} closefd.timeInterval={{ client_closefd_time_interval_sec }} # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort={{ client_metric_dummy_server_start_port }} -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck={{ client_turn_off_health_check }} # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath={{ client_session_map_path }} diff --git a/curve-ansible/roles/generate_config/templates/mds.conf.j2 b/curve-ansible/roles/generate_config/templates/mds.conf.j2 index 13040fa9ea..7e9b8f39b1 100644 --- a/curve-ansible/roles/generate_config/templates/mds.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/mds.conf.j2 @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr={{ ansible_ssh_host }}:{{ mds_port }} @@ -8,9 +8,9 @@ global.subnet={{ mds_subnet }} global.port={{ mds_port }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -19,11 +19,11 @@ global.port={{ mds_port }} {% endfor -%} mds.etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs={{ mds_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs={{ mds_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times={{ mds_etcd_retry_times }} # wait dlock timeout mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} @@ -31,68 +31,68 @@ mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} mds.etcd.dlock.ttlSec={{ mds_etcd_dlock_ttl_sec }} # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs={{ mds_segment_alloc_periodic_persist_inter_ms }} -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs={{ mds_segment_alloc_retry_inter_ms }} mds.segment.discard.scanIntevalMs={{ mds_segment_discard_scan_interval_ms }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec={{ mds_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs={{ mds_leader_election_timeout_ms }} # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler={{ mds_enable_copyset_scheduler }} -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler={{ mds_enable_leader_scheduler }} -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler={{ mds_enable_recover_scheduler }} -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler={{ mds_enable_replica_scheduler }} # Scan scheduler switch mds.enable.scan.scheduler={{ mds_enable_scan_scheduler }} -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec={{ mds_copyset_scheduler_interval_sec }} -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec={{ mds_replica_scheduler_interval_sec }} -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec={{ mds_leader_scheduler_interval_sec }} -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec={{ mds_recover_scheduler_interval_sec }} # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec={{ mds_scan_scheduler_interval_sec }} -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent={{ mds_schduler_operator_concurrent }} -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec={{ mds_schduler_transfer_limit_sec }} -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec={{ mds_scheduler_remove_limit_sec }} -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec={{ mds_scheduler_add_limit_sec }} -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec={{ mds_scheduler_change_limit_sec }} # Scan operator timeout (seconds) mds.scheduler.scan.limitSec={{ mds_scheduler_scan_limit_sec }} -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent={{ mds_scheduler_copyset_mum_range_percent }} -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent={{ mds_schduler_scatterwidth_range_percent }} -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance={{ mds_chunkserver_failure_tolerance }} -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec={{ mds_scheduler_chunkserver_cooling_time_sec }} # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour={{ mds_scheduler_scan_start_hour }} @@ -106,129 +106,129 @@ mds.scheduler.scan.concurrent.per.pool={{ mds_scheduler_scan_concurrent_per_pool mds.scheduler.scan.concurrent.per.chunkserver={{ mds_scheduler_scan_concurrent_per_chunkserver }} # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs={{ mds_heartbeat_interval_ms }} -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs={{ mds_heartbeat_misstimeout_ms }} -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs={{ mds_heartbeat_offlinet_imeout_ms }} -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs={{ mds_heartbeat_clean_follower_after_ms }} # -# namespace cache相关 +#Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count={{ mds_cache_count }} # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs={{ file_expired_time_us }} -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs={{ mds_file_scan_inteval_time_us }} # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName={{ curve_root_username }} mds.auth.rootPassword={{ curve_root_password }} # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum={{ mds_filelock_bucket_num }} # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec={{ mds_topology_topology_update_to_repo_sec }} -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs={{ mds_topology_create_copyset_rpc_timeout_ms }} -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes={{ mds_topology_create_copyset_rpc_retry_times }} -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs={{ mds_topology_create_copyset_rpc_retry_sleep_time_ms }} -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec={{ mds_topology_update_metric_interval_sec }} -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit={{ mds_topology_pool_usage_percent_limit }} -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0: Random, 1: Weight mds.topology.choosePoolPolicy={{ mds_topology_choose_pool_policy }} # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus={{ mds_topology_enable_logicalpool_status}} # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes={{ mds_copyset_copyset_retry_times }} -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance={{ mds_copyset_scatterwidth_variance }} -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation={{ mds_copyset_scatterwidth_standard_devation }} -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange={{ mds_copyset_scatterwidth_range }} -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# The percentage of deviation from the mean scatterWidth of all chunk servers. Setting a too large percentage of scatterWidth deviation can result in some machines having +# excessively small scatterWidth, affecting machine recovery times. Prolonged recovery times can reduce the cluster's reliability. Additionally, it can lead to some machines having +# excessively large scatterWidth, causing certain chunk servers to scatter copysets across various machines. When other machines write data, these machines with larger scatterWidth +# can become hotspots. Setting a too small percentage of scatterWidth deviation requires a greater average scatterWidth, +# which demands higher copyset algorithm requirements. This can lead to the algorithm being unable +# to produce ideal results. It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage={{ mds_copyset_scatterwidth_floating_percentage }} # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize={{ chunk_size }} -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize={{ segment_size }} -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength={{ min_file_length }} -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength={{ max_file_length }} # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs={{ mds_chunkserverclient_rpc_timeout_ms }} -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes={{ mds_chunkserverclient_rpc_retry_times }} -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs={{ mds_chunkserverclient_rpc_retry_interval_ms }} -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes={{ mds_chunkserverclient_update_leader_retry_times }} -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs={{ mds_chunkserverclient_update_leader_retry_interval_ms }} # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr={{ snapshot_nginx_vip }}:{{ nginx_docker_external_port }} # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir={{ mds_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 index d7121c6dad..eadcb92bd7 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress={{ nebd_data_dir }}/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath={{ nebd_data_dir }}/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes={{ nebd_client_sync_rpc_retry_times }} -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs={{ nebd_client_rpc_retry_inverval_us }} -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs={{ nebd_client_rpc_retry_max_inverval_us }} -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs={{ nebd_client_rpc_hostdown_retry_inverval_us }} -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS={{ nebd_client_health_check_internal_s }} -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs={{ nebd_client_delay_health_check_internal_ms }} -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum={{ nebd_client_rpc_send_exec_queue_num }} -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS={{ nebd_client_heartbeat_inverval_s }} -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs={{ nebd_client_heartbeat_rpc_timeout_ms }} -# 日志路径 +# Log Path log.path={{ nebd_log_dir }}/client diff --git a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 index 5262d0af37..7cd700b2db 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath={{ client_config_path }} -#brpc server监听端口 +# brpc server listening port listen.address={{ nebd_data_dir }}/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path={{ nebd_data_dir }}/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec={{ nebd_server_heartbeat_timeout_s }} -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms={{ nebd_server_heartbeat_check_interval_ms }} # return rpc when io error diff --git a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 index ca52b19925..00c20160a0 100644 --- a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path={{ snap_client_config_path }} -# mds root 用户名 +# Mds root username mds.rootUser={{ curve_root_username }} -# mds root 密码 +# Mds root password mds.rootPassword={{ curve_root_password }} -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec={{ snap_client_method_retry_time_sec }} -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs={{ snap_client_method_retry_interval_ms }} -# 日志文件位置 +# Log file location log.dir={{ snap_log_dir }} # @@ -26,53 +26,53 @@ s3.config_path={{ snap_s3_config_path }} server.address={{ ansible_ssh_host }}:{{ snapshot_port }} server.subnet={{ snapshot_subnet }} server.port={{ snapshot_port }} -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec={{ snap_client_async_method_retry_time_sec }} -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs={{ snap_client_async_method_retry_interval_ms }} -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum={{ snap_snapshot_pool_thread_num }} -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs={{ snap_snapshot_task_manager_scan_interval_ms }} -# 转储chunk分片大小 +# Dump chunk shard size server.chunkSplitSize={{ snap_chunk_split_size }} -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs={{ snap_check_snapshot_status_interval_ms }} -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit={{ snap_max_snapshot_limit }} -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum={{ snap_snapshot_core_thread_num }} -# mds session 时间 +# Mds session time server.mdsSessionTimeUs={{ file_expired_time_us }} -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency={{ snap_read_chunk_snapshot_concurrency }} # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum={{ snap_stage1_pool_thread_num }} -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum={{ snap_stage2_pool_thread_num }} -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum={{ snap_common_pool_thread_num }} -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs={{ snap_clone_task_manager_scan_interval_ms }} -# clone chunk分片大小 +# Clone chunk shard size server.cloneChunkSplitSize={{ snap_clone_chunk_split_size }} -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir={{ snap_clone_temp_dir }} -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency={{ snap_create_clone_chunk_concurrency }} -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency={{ snap_recover_chunk_concurrency }} -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs={{ snap_clone_backend_ref_record_scan_interval_ms }} -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_interval_ms }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -81,11 +81,11 @@ server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_in {% endfor -%} etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs={{ snap_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs={{ snap_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times={{ snap_etcd_retry_times }} # wait dlock timeout etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} @@ -93,20 +93,20 @@ etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} etcd.dlock.ttlSec={{ snap_etcd_dlock_ttl_sec }} # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix={{ snap_leader_campagin_prefix }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec={{ snap_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms={{ snap_leader_election_timeout_ms }} # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port={{ snapshot_dummy_port }} diff --git a/curve-ansible/roles/generate_config/templates/tools.conf.j2 b/curve-ansible/roles/generate_config/templates/tools.conf.j2 index 6207e8a4ef..b630b3dfe3 100644 --- a/curve-ansible/roles/generate_config/templates/tools.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/tools.conf.j2 @@ -1,4 +1,4 @@ -# mds地址 +# Mds address {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -8,13 +8,13 @@ mdsAddr={{ mds_address | join(',') }} # mds dummy port mdsDummyPort={{ hostvars[groups.mds[0]].mds_dummy_port }} -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout={{ tool_rpc_timeout }} -# rpc重试次数 +# RPC retry count rpcRetryTimes={{ tool_rpc_retry_times }} # the rpc concurrency to chunkserver rpcConcurrentNum={{ tool_rpc_concurrent_num }} -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -23,7 +23,7 @@ rpcConcurrentNum={{ tool_rpc_concurrent_num }} {% endfor -%} etcdAddr={{ etcd_address | join(',') }} {% if groups.snapshotclone is defined and groups.snapshotclone[0] is defined %} -# snapshot clone server 地址 +# Snapshot clone server address {% set snap_address=[] -%} {% for host in groups.snapshotclone -%} {% set snap_ip = hostvars[host].ansible_ssh_host -%} diff --git a/curve-ansible/roles/install_package/files/disk_uuid_repair.py b/curve-ansible/roles/install_package/files/disk_uuid_repair.py index eb48728e2e..cfa5a32ac3 100644 --- a/curve-ansible/roles/install_package/files/disk_uuid_repair.py +++ b/curve-ansible/roles/install_package/files/disk_uuid_repair.py @@ -17,30 +17,34 @@ # limitations under the License. # -# 检测磁盘上disk.meta中记录的uuid与当前磁盘的实际uuid是否相符合 -# 如果不符合, 更新为当前的uuid +# Check if the uuid recorded in disk.meta on the disk matches the actual uuid of the current disk +# If not, update to the current uuid import os import hashlib import sys import subprocess + def __get_umount_disk_list(): - # 获取需要挂载的设备 + # Obtain devices that need to be mounted cmd = "lsblk -O|grep ATA|awk '{print $1}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) devlist = out_msg.splitlines() - # 查看当前设备的挂载状况 + # View the mounting status of the current device umount = [] for dev in devlist: cmd = "lsblk|grep " + dev + "|awk '{print $7}'" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) if len(out_msg.replace('\n', '')) == 0: umount.append(dev) return umount + def __uninit(): try: cmd = "grep curvefs /etc/fstab" @@ -49,6 +53,7 @@ def __uninit(): except subprocess.CalledProcessError: return True + def __analyse_uuid(kv): uuid = "" uuidkv = kv[0].split("=") @@ -64,25 +69,27 @@ def __analyse_uuid(kv): return "" else: uuidmd5 = uuidmd5kv[1].replace("\n", "") - # 校验 + # Verification if (hashlib.md5(uuid).hexdigest() != uuidmd5): print("uuid[%s] not match uuidmd5[%s]" % (uuid, uuidmd5)) return "" return uuid + def __get_recorduuid(disk): uuid = "" - # 将磁盘挂载到临时目录 + # Mount the disk to a temporary directory cmd = "mkdir -p /data/tmp; mount " + disk + " /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: print("Get record uuid in %s fail." % disk) return False, uuid - # 挂载成功,获取记录的uuid + # Successfully mounted, obtaining the recorded uuid try: cmd = "cat /data/tmp/disk.meta" - out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + out_msg = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) kv = out_msg.splitlines() if len(kv) != 2: @@ -94,7 +101,7 @@ def __get_recorduuid(disk): except subprocess.CalledProcessError as e: print("Get file disk.meta from %s fail, reason: %s." % (disk, e)) - # 卸载磁盘 + # Unmount Disk cmd = "umount " + disk + "; rm -fr /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: @@ -103,75 +110,81 @@ def __get_recorduuid(disk): return True, uuid + def __get_actualuuid(disk): uuid = "" try: cmd = "ls -l /dev/disk/by-uuid/|grep " + disk + "|awk '{print $9}'" - uuid = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + uuid = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Get actual uuid of %s fail, reason: %s." % (disk, e)) return uuid + def __cmp_recorduuid_with_actual(umountDisk): recordList = {} actualList = {} for disk in umountDisk: - # 获取当前disk上记录的uuid - diskFullName = "/dev/" + disk - opRes, recorduuid = __get_recorduuid(diskFullName) - if opRes != True or len(recorduuid) == 0: - return False, recordList, actualList - - # 获取disk的实际uuid - actualuuid = __get_actualuuid(disk).replace("\n", "") - - # 比较记录的和实际的是否相同 - if actualuuid != recorduuid: - recordList[disk] = recorduuid - actualList[disk] = actualuuid - else: + # Obtain the uuid recorded on the current disk + diskFullName = "/dev/" + disk + opRes, recorduuid = __get_recorduuid(diskFullName) + if opRes != True or len(recorduuid) == 0: + return False, recordList, actualList + + # Obtain the actual uuid of the disk + actualuuid = __get_actualuuid(disk).replace("\n", "") + + # Compare whether the recorded and actual values are the same + if actualuuid != recorduuid: + recordList[disk] = recorduuid + actualList[disk] = actualuuid + else: return False, recordList, actualList return True, recordList, actualList + def __mount_with_atual_uuid(diskPath, record, actual): print("%s uuid change from [%s] to [%s]." % (diskPath, record, actual)) - # 从/etc/fstab中获取对应的挂载目录 + # Obtain the corresponding mount directory from/etc/fstab mntdir = "" try: cmd = "grep " + record + " /etc/fstab | awk -F \" \" '{print $2}'" - mntdir = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") + mntdir = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT).replace("\n", "") except subprocess.CalledProcessError as e: print("Get mount dir for %s fail. error: %s." % (diskPath, e)) return False - # 将actual挂载到相应的目录下 + # Mount the actual to the corresponding directory cmd = "mount " + diskPath + " " + mntdir retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("mount %s to %s success." % (diskPath, mntdir)) replaceCmd = "sed -i \"s/" + record + "/" + actual + "/g\"" - # 将新的uuid写入到fstab + # Write the new uuid to fstab cmd = "cp /etc/fstab /etc/fstab.bak;" + replaceCmd + " /etc/fstab > /dev/null" retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to /etc/fstab for disk %s success." % diskPath) - # 将新的uuid写入到diskmeta + # Write the new uuid to diskmeta fileFullName = mntdir + "/disk.meta" filebakName = fileFullName + ".bak" cpcmd = "cp " + fileFullName + " " + filebakName uuidcmd = "echo uuid=" + actual + " > " + fileFullName - uuidmd5cmd = "echo uuidmd5=" + hashlib.md5(actual).hexdigest() + " >> " + fileFullName + uuidmd5cmd = "echo uuidmd5=" + \ + hashlib.md5(actual).hexdigest() + " >> " + fileFullName cmd = cpcmd + ";" + uuidcmd + ";" + uuidmd5cmd retCode = subprocess.call(cmd, shell=True) - if retCode !=0: + if retCode != 0: print("exec [%s] fail." % cmd) return False print("modify actual uuid to %s success." % fileFullName) @@ -182,29 +195,32 @@ def __mount_with_atual_uuid(diskPath, record, actual): def __handle_inconsistent(umountDisk, record, actual): for disk in umountDisk: if disk not in record: - print("record uuid and actual uuid of %s is same, please check other reason" % disk) + print( + "record uuid and actual uuid of %s is same, please check other reason" % disk) continue - # 按照actual uuid做挂载 - res = __mount_with_atual_uuid("/dev/" + disk, record[disk], actual[disk]) + # Mount according to the actual uuid + res = __mount_with_atual_uuid( + "/dev/" + disk, record[disk], actual[disk]) if res: continue else: return False return True + if __name__ == "__main__": - # 查看未挂载成功的磁盘设备列表 + # View the list of disk devices that were not successfully mounted umountDisk = __get_umount_disk_list() if len(umountDisk) == 0: print("All disk mount success.") exit(0) - # 查看是否之前已经挂载过 + # Check if it has been previously mounted if __uninit(): print("Please init env with chunkserver_ctl.sh first.") exit(0) - # 查看当前未挂载成功的磁盘设备记录的uuid和实际uuid + # View the uuid and actual uuid of disk devices that have not been successfully mounted currently cmpRes, record, actual = __cmp_recorduuid_with_actual(umountDisk) if cmpRes == False: print("Compare record uuid with actual uuid fail.") @@ -213,13 +229,10 @@ def __handle_inconsistent(umountDisk, record, actual): print("Record uuid with actual uuid all consistent.") exit(0) - # 将不一致的磁盘按照当前的uuid重新挂载 + # Remount inconsistent disks according to the current uuid if __handle_inconsistent(umountDisk, record, actual): print("fix uuid-changed disk[%s] success." % umountDisk) exit(0) else: print("fxi uuid-changed disk[%s] fail." % umountDisk) exit(-1) - - - diff --git a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 index cba41adfcd..d44a03c682 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 @@ -6,7 +6,7 @@ dataDir={{ chunkserver_data_dir }} raftLogProtocol={{ chunkserver_raft_log_procotol }} source ./chunkserver_deploy.sh -# 使用方式 +# Usage function help() { echo "COMMANDS:" echo " start : start chunkserver" @@ -50,18 +50,18 @@ function ip_value() { }' } -# 从subnet获取ip +# Obtain IP from subnet function get_ip_from_subnet() { subnet=$1 prefix=$(ip_value $subnet) mod=`echo $subnet|awk -F/ '{print $2}'` mask=$((2**32-2**(32-$mod))) - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) ip= for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -76,7 +76,7 @@ function get_ip_from_subnet() { fi } -# 启动chunkserver +# Start chunkserver function start() { if [ $# -lt 1 ] then @@ -87,7 +87,7 @@ function start() { then confPath=$3 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "confPath $confPath not exist!" @@ -104,7 +104,7 @@ function start() { get_ip_from_subnet $external_subnet external_ip=$ip enableExternalServer=true - # external ip和internal ip一致或external ip为127.0.0.1时不启动external server + # Do not start the external server when the external IP and internal IP are consistent or when the external IP is 127.0.0.1 if [ $internal_ip = $external_ip -o $external_ip = "127.0.0.1" ] then enableExternalServer=false @@ -148,7 +148,7 @@ function start_one() { fi jemallocpath={{ jemalloc_path }} - # 检查jemalloc库文件 + # Check the Jemalloc library file if [ ! -f ${jemallocpath} ] then echo "Not found jemalloc library, Path is ${jemallocpath}" @@ -230,7 +230,7 @@ function restart() { } function wait_stop() { - # wait 3秒钟让它退出 + # Wait for 3 seconds to exit retry_times=0 while [ $retry_times -le 3 ] do @@ -244,7 +244,7 @@ function wait_stop() { break fi done - # 如果进程还在,就kill -9 + # If the process is still in progress, kill -9 ps -efl|grep -E "curve-chunkserver .*${dataDir}/chunkserver$1 "|grep -v grep > /dev/null 2>&1 if [ $? -eq 0 ] then @@ -325,12 +325,12 @@ function deploy() { } function format() { - # 格式化chunkfile pool + Format chunkfile pool curve-format $* } function recordmeta() { - # 将当前的磁盘的uuid及其md5备份到磁盘的disk.meta文件中 + # Back up the current disk's uuid and its md5 to the disk's disk.meta file meta_record; } diff --git a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 index db8566728a..7f84ccd28f 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 @@ -1,5 +1,5 @@ #!/bin/bash -#confirm提示,防止误操作 +# confirm prompt to prevent misoperation dataDir={{ chunkserver_data_dir }} function do_confirm { echo "This deployment script will format the disk and delete all the data." @@ -24,14 +24,14 @@ diskList="{{ dlist | join('\n') }}" {% endif %} function deploy_prep { -#清理/etc/fstab残留信息 +# Clean up/etc/fstab residual information grep curvefs /etc/fstab if [ $? -eq 0 ] then sed -i '/curvefs/d' /etc/fstab sed -i '/chunkserver/d' /etc/fstab fi -#将数据盘挂载的目录都卸载掉,为下一步格式化磁盘做准备 +# Uninstall all directories mounted on the data disk to prepare for the next step of formatting the disk for i in `{{ get_disk_list_cmd }}` do mntdir=`lsblk|grep $i|awk '{print $7}'` @@ -49,7 +49,7 @@ function deploy_prep { fi done } -#记录磁盘的盘符信息和磁盘的wwn信息,将信息持久化到diskinfo文件 +# Record the disk letter information and the disk's wwn information, and persist the information to the diskinfo file declare -A disk_map diskinfo=./diskinfo function record_diskinfo { @@ -69,7 +69,7 @@ function record_diskinfo { done } -#根据磁盘数量创建数据目录和日志目录,目前的数据目录格式统一是$dataDir/chunkserver+num,日志目录在$dataDir/log/chunkserver+num +# Create a data directory and log directory based on the number of disks. The current data directory format is $dataDir/chunkserver+num, and the log directory is in $dataDir/log/chunkserver+num function chunk_dir_prep { if [ -d ${dataDir} ] then @@ -90,7 +90,7 @@ function chunk_dir_prep { mkdir -p ${dataDir}/log/chunkserver$i done } -#格式化磁盘文件系统 +# Format Disk File System function disk_format { for disk in ${!disk_map[@]} do @@ -99,7 +99,7 @@ function disk_format { done } -#将创建好的数据目录按照顺序挂载到格式化好的磁盘上,并记录挂载信息到mount.info +# Mount the created data directory onto the formatted disk in order and record the mounting information to mount-info function mount_dir { while [ 1 ] do @@ -128,7 +128,7 @@ function mount_dir { lsblk > ./mount.info } -#持久化挂载信息到fstab文件,防止系统重启后丢失 +# Persist mounting information to fstab file to prevent loss after system restart function fstab_record { grep curvefs /etc/fstab if [ $? -ne 0 ] @@ -141,7 +141,7 @@ function fstab_record { fi } -#将当前的uuid持久化到磁盘上做备份,防止系统重启后uuid发生变化 +# Persist the current uuid to disk for backup to prevent changes in uuid after system restart function meta_record { grep curvefs /etc/fstab if [ $? -eq 0 ] @@ -158,7 +158,7 @@ function meta_record { fi } -#初始化chunkfile pool +# Initialize chunkfile pool function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` @@ -224,20 +224,20 @@ function deploy_all { function deploy_one { local diskname=$1 local dirname=$2 - #目录不存在 + # Directory does not exist if [ ! -d $dirname ] then echo "$dirname is not exist!" exit 1 fi - #磁盘正在挂载使用 + # Disk is being mounted for use mount | grep -w $diskname if [ $? -eq 0 ] then echo "$diskname is being used" exit 1 fi - #目录正在挂载使用 + # Directory is being mounted for use mount | grep -w $dirname if [ $? -eq 0 ] then @@ -265,7 +265,7 @@ function deploy_one { done mount $diskname $dirname lsblk > ./mount.info - #更新fstab + # Update fstab short_diskname=`echo $diskname|awk -F"/" '{print $3}'` ls -l /dev/disk/by-uuid|grep -w $short_diskname if [ $? -ne 0 ] @@ -275,12 +275,12 @@ function deploy_one { fi uuid=`ls -l /dev/disk/by-uuid/|grep -w ${short_diskname}|awk '{print $9}'` echo "UUID=$uuid $dirname ext4 rw,errors=remount-ro 0 0" >> /etc/fstab - # 将uuid及其md5写到diskmeta中 + # Write uuid and its md5 to diskmeta uuidmd5=`echo -n $uuid | md5sum | cut -d ' ' -f1` touch $dirname/disk.meta echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta - #格式化chunkfile pool + # Format chunkfile pool curve-format -allocatePercent={{ chunk_alloc_percent }} \ -filePoolDir=$dirname/chunkfilepool \ diff --git a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 index 6c0b36c932..9aadcb311f 100644 --- a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# 默认配置文件 +# Default configuration file confPath={{ etcd_config_path }} -# 日志文件目录 +# Log file directory logDir={{ etcd_log_dir }} -# 日志文件路径 +# Log file path logPath=${logDir}/etcd.log # pidfile @@ -15,9 +15,9 @@ pidFile=${HOME}/etcd.pid # daemon log daemonLog=${logDir}/daemon-etcd.log -# 启动etcd +# Start etcd function start_etcd() { - # 创建logDir + # Create logDir mkdir -p ${logDir} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -25,14 +25,14 @@ function start_etcd() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logDir} ] then echo "Write permission denied: ${logDir}" exit 1 fi - # 检查logPath是否可写或者是否能够创建 + # Check if logPath is writable or can be created touch ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -40,7 +40,7 @@ function start_etcd() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -48,28 +48,28 @@ function start_etcd() { exit fi - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查是否安装etcd + # Check if etcd is installed if [ -z `command -v etcd` ] then echo "No etcd installed" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found confFile, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -77,8 +77,8 @@ function start_etcd() { exit fi - # pidfile不存在 或 daemon进程不存在 - # 启动daemon,切换路径,并启动etcd + # The pidfile does not exist or the daemon process does not exist + # Start the daemon, switch paths, and start ETCD daemon --name etcd --core \ @@ -90,9 +90,9 @@ function start_etcd() { -- {{ install_etcd_dir }}/etcd --config-file ${confPath} } -# 停止daemon进程和etcd +# Stop the daemon process and ETCD function stop_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -112,7 +112,7 @@ function stop_etcd() { # restart function restart_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -127,7 +127,7 @@ function restart_etcd() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " etcd-daemon start -- start deamon process and watch on etcd process" @@ -139,7 +139,7 @@ function usage() { echo " etcd-daemon start -c /etcd/etcd.conf.yml -l ${HOME}/etcd.log" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -150,7 +150,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 @@ -176,11 +176,11 @@ case $1 in start_etcd ;; "stop") - # 停止daemon和etcd进程 + # Stop the daemon and etcd processes stop_etcd ;; "restart") - # 重启etcd + # Restart etcd restart_etcd ;; *) diff --git a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 index 6d69e6d47d..81f55b7ed7 100644 --- a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-mds路径 +# curve-mds path curveBin={{ curve_bin_dir }}/curve-mds -# 默认配置文件 +# Default configuration file confPath={{ mds_config_path }} -# 日志文件路径 +# Log file path logPath={{ mds_log_dir }} # mdsAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动mds +# Start mds function start_mds() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit 1 fi - # 检查curve-mds + # Check curve-mds if [ ! -f ${curveBin} ] then echo "No curve-mds installed" exit 1 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found mds.conf, Path is ${confPath}" exit 1 fi - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_mds() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_mds() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者是否能够创建 + # Check if consoleLog is writable or can be created touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_mds() { exit 1 fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_mds() { exit 1 fi - # 未指定mdsAddr, 从配置文件中解析出网段 + # No mdsAddr specified, resolving network segment from configuration file if [ -z ${mdsAddr} ] then subnet=`cat $confPath|grep global.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_mds() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_mds() { show_status } -# 停止daemon进程,且停止curve-mds +# Stop the daemon process and stop the curve-mds function stop_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_mds() { # restart function restart_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_mds() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load mds configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, mds应该没有起来 + # If there are no leader related logs in the logs after load mds configuration + # So leaderAddr is empty, and mds should not be up if [ -z ${leaderAddr} ] then echo "MDS may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current MDS is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " ./mds-daemon.sh start -- start deamon process and watch on curve-mds process" @@ -222,7 +222,7 @@ function usage() { echo " ./mds-daemon.sh start -c /etc/curve/mds.conf -l ${HOME}/ -a 127.0.0.1:6666" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 index 50bdc2a07e..d170963075 100644 --- a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 +++ b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 @@ -133,7 +133,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -174,7 +174,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -262,7 +262,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -278,7 +278,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 index 4d7edae130..169ff2b84d 100644 --- a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-snapshotcloneserver路径 +# curve-snapshotcloneserver path curveBin={{ curve_bin_dir }}/curve-snapshotcloneserver -# 默认配置文件 +# Default configuration file confPath={{ snapshot_config_path }} -# 日志文件路径 +# Log file path logPath={{ snapshot_clone_server_log_dir }} # serverAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动snapshotcloneserver +# Starting snapshotcloneserver function start_server() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查curve-snapshotcloneserver + # Check the curve-snapshotcloneserver if [ ! -f ${curveBin} ] then echo "No curve-snapshotcloneserver installed, Path is ${curveBin}" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found snapshot_clone_server.conf, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_server() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_server() { exit fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者能否创建,初始化glog之前的日志存放在这里 + # Check if the consoleLog can be written or created, and store the logs before initializing the glog here touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_server() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_server() { exit fi - # 未指定serverAddr, 从配置文件中解析出网段 + # No serverAddr specified, resolving network segment from configuration file if [ -z ${serverAddr} ] then subnet=`cat $confPath|grep server.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_server() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_server() { show_status } -# 停止daemon进程和curve-snapshotcloneserver +# Stop the daemon process and curve-snapshotcloneserver function stop_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_server() { # restart function restart_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_server() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, snapshotcloneserver应该没有起来 + # If there are no leader related logs in the logs after load configuration + # So the leaderAddr is empty, and the snapshotcloneserver should not be up if [ -z ${leaderAddr} ] then echo "SnapshotClone may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current SnapshotClone is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " snapshot-daemon start -- start deamon process and watch on curve-snapshotcloneserver process" @@ -222,7 +222,7 @@ function usage() { echo " snapshot-daemon start -c /etc/curve/snapshot_clone_server.conf -l ${HOME}/ -a 127.0.0.1:5555" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/vars/main.yml b/curve-ansible/roles/install_package/vars/main.yml index ee545c1d7b..8967883b7c 100644 --- a/curve-ansible/roles/install_package/vars/main.yml +++ b/curve-ansible/roles/install_package/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 包的名称 +# The name of the package package_name: package_version: lib_installed: false diff --git a/curve-ansible/roles/restart_service/defaults/main.yml b/curve-ansible/roles/restart_service/defaults/main.yml index 061c32a4ec..0051d42ecc 100644 --- a/curve-ansible/roles/restart_service/defaults/main.yml +++ b/curve-ansible/roles/restart_service/defaults/main.yml @@ -16,7 +16,7 @@ # check_health: False -# 启动一个chunkserver需要的最大时间 +# The maximum time required to start a chunkserver restart_chunkserver_async: 100 restart_chunkserver_check_delay: 5 restart_chunkserver_check_times: 20 diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml index d74b05abc7..6b3050bb01 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取mds的版本 + # Obtain the version of mds - name: get curve version vars: metric_port: "{{ mds_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml index 73f6bcf636..966d9b95d6 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取snapshotclone的版本 + # Obtain the version of snapshotclone - name: get snapshotclone version vars: metric_port: "{{ snapshot_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/main.yml b/curve-ansible/roles/restart_service/tasks/main.yml index befb68b5b3..a8b077a3a4 100644 --- a/curve-ansible/roles/restart_service/tasks/main.yml +++ b/curve-ansible/roles/restart_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 重启对应的服务 +# Restart the corresponding service - name: restart_service include_tasks: "include/restart_{{ service_name }}.yml" diff --git a/curve-ansible/roles/restart_service/vars/main.yml b/curve-ansible/roles/restart_service/vars/main.yml index 94f0bad0c6..44f7d6797e 100644 --- a/curve-ansible/roles/restart_service/vars/main.yml +++ b/curve-ansible/roles/restart_service/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 服务的名称 +# Name of service service_name: need_restart: true sudo: "" diff --git a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml index 82478df03e..f2a67fdba1 100644 --- a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml +++ b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml @@ -1,4 +1,4 @@ -# 服务的名称 +# Name of service service_name: leader_ip: all_ip: diff --git a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml index 25fecb2337..32602a56cd 100644 --- a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml +++ b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml @@ -27,7 +27,7 @@ poll: "{{ service_poll }}" failed_when: start_chunkserver_res.rc != 0 or "down" in start_chunkserver_res.stdout -# 打印控制台输出 +# Print Console Output - name: print console output debug: var: start_chunkserver_res.stdout_lines diff --git a/curve-ansible/roles/start_service/tasks/main.yml b/curve-ansible/roles/start_service/tasks/main.yml index 483dfd5d9a..be93405394 100644 --- a/curve-ansible/roles/start_service/tasks/main.yml +++ b/curve-ansible/roles/start_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: start_service include_tasks: "include/start_{{ service_name }}.yml" diff --git a/curve-ansible/roles/stop_service/tasks/main.yml b/curve-ansible/roles/stop_service/tasks/main.yml index 0b2bbb486e..d3b8cbd018 100644 --- a/curve-ansible/roles/stop_service/tasks/main.yml +++ b/curve-ansible/roles/stop_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: stop_service include_tasks: "include/stop_{{ service_name }}.yml" diff --git a/curve-ansible/rolling_update_curve.yml b/curve-ansible/rolling_update_curve.yml index fddd6832bf..61949f9f8f 100644 --- a/curve-ansible/rolling_update_curve.yml +++ b/curve-ansible/rolling_update_curve.yml @@ -83,7 +83,7 @@ - { role: generate_config, template_name: topo.json, conf_path: "{{ topo_file_path }}", tags: ["generate_config", "generage_topo_json"] } -# 获取leader节点和follower节点 +# Obtain the leader and follower nodes - name: set mds leader and follower list hosts: mds gather_facts: no @@ -95,7 +95,7 @@ roles: - { role: set_leader_and_follower_list, service_name: mds } -# 按顺序先升级follower节点,再升级leader节点 +# Upgrade the follower node first in order, and then upgrade the leader node - name: update follower and leader server in sequence hosts: mds_servers_followers, mds_servers_leader any_errors_fatal: true @@ -110,14 +110,14 @@ - pause: prompt: "Confirm restart mds in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启mds + # Restart mds roles: - { role: restart_service, service_name: mds, expected_curve_version: "{{ mds_package_version }}", command_need_sudo: "{{ mds_need_sudo | bool }}"} ############################## rolling update chunkserver ############################## -# 1. 更新各节点上的配置 +# 1. Update the configuration on each node - name: prepare chunkserver hosts: chunkservers any_errors_fatal: true @@ -136,8 +136,8 @@ - { role: generate_config, template_name: s3.conf, conf_path: "{{ chunkserver_s3_config_path }}", tags: ["generate_config", "generage_cs_s3_conf"] } -# 逐个重启chunkserver,每重启完一个需要等待copyset健康 -# 继续操作下一个的的时候还需要一个命令行交互确认 +# Restart the chunkservers one by one, and wait for the copyset to be healthy after each restart +# When continuing with the next operation, a command line interaction confirmation is also required - name: restart chunkserver and wait healthy hosts: chunkservers any_errors_fatal: true @@ -203,7 +203,7 @@ - pause: prompt: "Confirm restart snapshotclone in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启snapshot clone + # Restart snapshot clone roles: - { role: restart_service, service_name: snapshotclone, expected_curve_version: "{{ snapshot_package_version }}", command_need_sudo: "{{ snapshot_need_sudo | bool }}" } diff --git a/curve-ansible/server.ini b/curve-ansible/server.ini index eaca5a4515..7e06fbe105 100644 --- a/curve-ansible/server.ini +++ b/curve-ansible/server.ini @@ -14,8 +14,8 @@ localhost ansible_ssh_host=127.0.0.1 [zone1] localhost ansible_ssh_host=127.0.0.1 -# 请确保zone内机器数量一致,如果有多个zone,则在上面根据zone1格式增加zone2,zone3...即可。 -# 如果zone下面有多个机器,则换行一起列出来即可。比如: +# Please ensure that the number of machines in the zone is consistent. If there are multiple zones, add zone2, zone3... based on the zone1 format above. +# If there are multiple machines under the zone, they can be listed together in a new line. For example: # [zone1] # localhost ansible_ssh_host=127.0.0.1 # localhost2 ansible_ssh_host=127.0.0.2 @@ -32,7 +32,7 @@ mds_subnet=127.0.0.1/22 defined_healthy_status="cluster is healthy" mds_package_version="0.0.6.1+160be351" tool_package_version="0.0.6.1+160be351" -# 启动命令是否用sudo +# Whether to use sudo for startup command mds_need_sudo=True mds_config_path=/etc/curve/mds.conf mds_log_dir=/data/log/curve/mds @@ -90,7 +90,7 @@ chunkserver_subnet=127.0.0.1/22 global_enable_external_server=True chunkserver_external_subnet=127.0.0.1/22 chunkserver_s3_config_path=/etc/curve/cs_s3.conf -# chunkserver使用的client相关的配置 +# Client related configurations used by chunkserver chunkserver_client_config_path=/etc/curve/cs_client.conf client_register_to_mds=False client_chunkserver_op_max_retry=3 @@ -149,10 +149,10 @@ sudo_or_not=True ansible_become_user=curve ansible_become_flags=-iu curve update_config_with_puppet=False -# 启动服务要用到ansible的异步操作,否则ansible退出后chunkserver也会退出 -# 异步等待结果的总时间 +# Starting the service requires the asynchronous operation of ansible, otherwise the chunkserver will also exit after ansible exits +# Total time waiting for results asynchronously service_async=5 -# 异步查询结果的间隔 +# Interval between asynchronous query results service_poll=1 install_with_deb=False restart_directly=False diff --git a/curvefs/conf/curvebs_client.conf b/curvefs/conf/curvebs_client.conf index e0eb4d70f2..23fc37b087 100644 --- a/curvefs/conf/curvebs_client.conf +++ b/curvefs/conf/curvebs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,123 +36,123 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck=true # diff --git a/curvefs/monitor/grafana-report.py b/curvefs/monitor/grafana-report.py index 016473a509..16f8ce65cd 100644 --- a/curvefs/monitor/grafana-report.py +++ b/curvefs/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curvefs/monitor/grafana/report/report.tex' -imagedir= '/etc/curvefs/monitor/grafana/report/images/' -pdfpath= '/etc/curvefs/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curvefs/monitor/grafana/report/report.tex' +imagedir = '/etc/curvefs/monitor/grafana/report/images/' +pdfpath = '/etc/curvefs/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view the dashboard in Grafana (if displayed incorrectly, please check the attached PDF).

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Sending dashboard daily report email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple recipients - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The filename here can be anything, whatever name is written will be displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add the body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/curvefs/monitor/grafana/provisioning/dashboards/mds.json b/curvefs/monitor/grafana/provisioning/dashboards/mds.json index 09de6b31f7..a90a8f13c0 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/mds.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/mds.json @@ -290,7 +290,7 @@ { "columns": [], "datasource": null, - "description": "mds的配置", + "description": "Configuration of MDS", "fieldConfig": { "defaults": { "custom": { @@ -336,7 +336,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -352,7 +352,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -368,7 +368,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", diff --git a/curvefs/src/mds/metaserverclient/metaserver_client.cpp b/curvefs/src/mds/metaserverclient/metaserver_client.cpp index 739704f62a..f9b1278562 100644 --- a/curvefs/src/mds/metaserverclient/metaserver_client.cpp +++ b/curvefs/src/mds/metaserverclient/metaserver_client.cpp @@ -21,6 +21,7 @@ */ #include "curvefs/src/mds/metaserverclient/metaserver_client.h" + #include #include @@ -28,30 +29,30 @@ namespace curvefs { namespace mds { -using curvefs::metaserver::Time; -using curvefs::metaserver::CreateRootInodeRequest; -using curvefs::metaserver::CreateRootInodeResponse; -using curvefs::metaserver::CreateManageInodeRequest; -using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::mds::topology::BuildPeerIdWithAddr; +using curvefs::mds::topology::SplitPeerId; using curvefs::metaserver::CreateDentryRequest; using curvefs::metaserver::CreateDentryResponse; +using curvefs::metaserver::CreateManageInodeRequest; +using curvefs::metaserver::CreateManageInodeResponse; +using curvefs::metaserver::CreateRootInodeRequest; +using curvefs::metaserver::CreateRootInodeResponse; using curvefs::metaserver::DeleteDentryRequest; using curvefs::metaserver::DeleteDentryResponse; using curvefs::metaserver::DeleteInodeRequest; using curvefs::metaserver::DeleteInodeResponse; +using curvefs::metaserver::Dentry; using curvefs::metaserver::MetaServerService_Stub; using curvefs::metaserver::MetaStatusCode; -using curvefs::metaserver::Dentry; +using curvefs::metaserver::Time; using curvefs::metaserver::copyset::COPYSET_OP_STATUS; using curvefs::metaserver::copyset::CopysetService_Stub; -using curvefs::mds::topology::SplitPeerId; -using curvefs::mds::topology::BuildPeerIdWithAddr; template FSStatusCode MetaserverClient::SendRpc2MetaServer( - Request *request, Response *response, const LeaderCtx &ctx, - void (T::*func)(google::protobuf::RpcController *, const Request *, - Response *, google::protobuf::Closure *)) { + Request* request, Response* response, const LeaderCtx& ctx, + void (T::*func)(google::protobuf::RpcController*, const Request*, Response*, + google::protobuf::Closure*)) { bool refreshLeader = true; uint32_t maxRetry = options_.rpcRetryTimes; @@ -110,14 +111,14 @@ FSStatusCode MetaserverClient::SendRpc2MetaServer( } } -FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, - std::string *leader) { +FSStatusCode MetaserverClient::GetLeader(const LeaderCtx& ctx, + std::string* leader) { GetLeaderRequest2 request; GetLeaderResponse2 response; request.set_poolid(ctx.poolId); request.set_copysetid(ctx.copysetId); - for (const std::string &item : ctx.addrs) { + for (const std::string& item : ctx.addrs) { LOG(INFO) << "GetLeader from " << item; if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; @@ -162,7 +163,7 @@ FSStatusCode MetaserverClient::GetLeader(const LeaderCtx &ctx, FSStatusCode MetaserverClient::CreateRootInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, - const std::set &addrs) { + const std::set& addrs) { CreateRootInodeRequest request; CreateRootInodeResponse response; request.set_poolid(poolId); @@ -213,7 +214,7 @@ FSStatusCode MetaserverClient::CreateRootInode( FSStatusCode MetaserverClient::CreateManageInode( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t uid, uint32_t gid, uint32_t mode, ManageInodeType manageType, - const std::set &addrs) { + const std::set& addrs) { CreateManageInodeRequest request; CreateManageInodeResponse response; request.set_poolid(poolId); @@ -259,14 +260,14 @@ FSStatusCode MetaserverClient::CreateManageInode( FSStatusCode MetaserverClient::CreateDentry( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t parentInodeId, const std::string &name, uint64_t inodeId, - const std::set &addrs) { + uint64_t parentInodeId, const std::string& name, uint64_t inodeId, + const std::set& addrs) { CreateDentryRequest request; CreateDentryResponse response; request.set_poolid(poolId); request.set_copysetid(copysetId); request.set_partitionid(partitionId); - Dentry *d = new Dentry; + Dentry* d = new Dentry; d->set_fsid(fsId); d->set_inodeid(inodeId); d->set_parentinodeid(parentInodeId); @@ -276,7 +277,7 @@ FSStatusCode MetaserverClient::CreateDentry( request.set_allocated_dentry(d); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); - Time *tm = new Time(); + Time* tm = new Time(); tm->set_sec(now.tv_sec); tm->set_nsec(now.tv_nsec); request.set_allocated_create(tm); @@ -309,11 +310,10 @@ FSStatusCode MetaserverClient::CreateDentry( } } -FSStatusCode -MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, - uint32_t partitionId, uint32_t fsId, - uint64_t parentInodeId, const std::string &name, - const std::set &addrs) { +FSStatusCode MetaserverClient::DeleteDentry( + uint32_t poolId, uint32_t copysetId, uint32_t partitionId, uint32_t fsId, + uint64_t parentInodeId, const std::string& name, + const std::set& addrs) { DeleteDentryRequest request; DeleteDentryResponse response; request.set_poolid(poolId); @@ -342,13 +342,14 @@ MetaserverClient::DeleteDentry(uint32_t poolId, uint32_t copysetId, return ret; } else { switch (response.statuscode()) { - case MetaStatusCode::OK: - return FSStatusCode::OK; - default: - LOG(ERROR) << "DeleteDentry failed, request = " - << request.ShortDebugString() - << ", response statuscode = " << response.statuscode(); - return FSStatusCode::DELETE_DENTRY_FAIL; + case MetaStatusCode::OK: + return FSStatusCode::OK; + default: + LOG(ERROR) << "DeleteDentry failed, request = " + << request.ShortDebugString() + << ", response statuscode = " + << response.statuscode(); + return FSStatusCode::DELETE_DENTRY_FAIL; } } } @@ -372,7 +373,7 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { request.set_partitionid(0); request.set_fsid(fsId); request.set_inodeid(inodeId); - // TODO(@威姐): 适配新的proto + // TODO(@ Wei Jie): Adapt to the new proto request.set_copysetid(1); request.set_poolid(1); request.set_partitionid(1); @@ -398,10 +399,10 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { FSStatusCode MetaserverClient::CreatePartition( uint32_t fsId, uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - uint64_t idStart, uint64_t idEnd, const std::set &addrs) { + uint64_t idStart, uint64_t idEnd, const std::set& addrs) { curvefs::metaserver::CreatePartitionRequest request; curvefs::metaserver::CreatePartitionResponse response; - PartitionInfo *partition = request.mutable_partition(); + PartitionInfo* partition = request.mutable_partition(); partition->set_fsid(fsId); partition->set_poolid(poolId); partition->set_copysetid(copysetId); @@ -448,7 +449,7 @@ FSStatusCode MetaserverClient::CreatePartition( FSStatusCode MetaserverClient::DeletePartition( uint32_t poolId, uint32_t copysetId, uint32_t partitionId, - const std::set &addrs) { + const std::set& addrs) { curvefs::metaserver::DeletePartitionRequest request; curvefs::metaserver::DeletePartitionResponse response; request.set_poolid(poolId); @@ -489,8 +490,8 @@ FSStatusCode MetaserverClient::DeletePartition( } } -FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, - uint32_t copysetId, const std::set &addrs) { +FSStatusCode MetaserverClient::CreateCopySet( + uint32_t poolId, uint32_t copysetId, const std::set& addrs) { CreateCopysetRequest request; CreateCopysetResponse response; auto copyset = request.add_copysets(); @@ -500,7 +501,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, copyset->add_peers()->set_address(BuildPeerIdWithAddr(item)); } - for (const std::string &item : addrs) { + for (const std::string& item : addrs) { if (channel_.Init(item.c_str(), nullptr) != 0) { LOG(ERROR) << "Init channel to metaserver: " << item << " failed!"; return FSStatusCode::RPC_ERROR; @@ -544,7 +545,7 @@ FSStatusCode MetaserverClient::CreateCopySet(uint32_t poolId, } FSStatusCode MetaserverClient::CreateCopySetOnOneMetaserver( - uint32_t poolId, uint32_t copysetId, const std::string &addr) { + uint32_t poolId, uint32_t copysetId, const std::string& addr) { CreateCopysetRequest request; CreateCopysetResponse response; diff --git a/curvefs/src/metaserver/copyset/conf_epoch_file.h b/curvefs/src/metaserver/copyset/conf_epoch_file.h index abe14f2f8b..ff3953b080 100644 --- a/curvefs/src/metaserver/copyset/conf_epoch_file.h +++ b/curvefs/src/metaserver/copyset/conf_epoch_file.h @@ -41,28 +41,30 @@ class ConfEpochFile { explicit ConfEpochFile(curve::fs::LocalFileSystem* fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful; - 1 failed */ int Load(const std::string& path, PoolId* poolId, CopysetId* copysetId, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + *file. The format is as follows: The 'head' indicates the length and is in + *binary format. The rest is in text format for easy viewing when necessary. + *'sync' ensures data persistence. | head |Configuration version + *information| | 8 bytes size_t | uint32_t | Variable length text | + *| length | crc32 | logic pool id | copyset id | epoch| + * The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded; - 1 failed */ int Save(const std::string& path, const PoolId poolId, const CopysetId copysetId, const uint64_t epoch); diff --git a/curvefs/src/metaserver/inflight_throttle.h b/curvefs/src/metaserver/inflight_throttle.h index fb670b6161..dfbe50bebf 100644 --- a/curvefs/src/metaserver/inflight_throttle.h +++ b/curvefs/src/metaserver/inflight_throttle.h @@ -30,7 +30,7 @@ namespace curvefs { namespace metaserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: @@ -40,8 +40,8 @@ class InflightThrottle { ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ bool IsOverLoad() { if (maxInflightRequest_ >= @@ -53,23 +53,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight request std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight request const uint64_t maxInflightRequest_; }; diff --git a/curvefs/test/mds/schedule/coordinator_test.cpp b/curvefs/test/mds/schedule/coordinator_test.cpp index e759da89ed..a5dd3736de 100644 --- a/curvefs/test/mds/schedule/coordinator_test.cpp +++ b/curvefs/test/mds/schedule/coordinator_test.cpp @@ -21,22 +21,24 @@ */ #include "curvefs/src/mds/schedule/coordinator.h" + #include + #include "curvefs/src/mds/common/mds_define.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::schedule::ScheduleOption; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::std::chrono::steady_clock; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curvefs::mds::topology::UNINITIALIZE_ID; @@ -51,7 +53,7 @@ class CoordinatorTest : public ::testing::Test { void SetUp() override { topo_ = std::make_shared(idGenerator_, tokenGenerator_, - storage_); + storage_); metric_ = std::make_shared(topo_); topoAdapter_ = std::make_shared(); coordinator_ = std::make_shared(topoAdapter_); @@ -132,7 +134,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -149,20 +151,20 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -174,19 +176,19 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -204,7 +206,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -217,7 +219,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -228,7 +230,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -237,7 +239,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -270,7 +272,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute @@ -289,21 +291,21 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取metaserver失败 + // Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -315,19 +317,19 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); Operator opRes; ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + // Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); ASSERT_FALSE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); } @@ -345,7 +347,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - info.configChangeInfo, &res)); + info.configChangeInfo, &res)); } { @@ -358,7 +360,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -369,7 +371,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } { @@ -378,7 +380,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(Return(false)); ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, - ConfigChangeInfo{}, &res)); + ConfigChangeInfo{}, &res)); } } @@ -386,15 +388,16 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; coordinator_->InitScheduler(scheduleOption, - std::make_shared(topo_)); + std::make_shared(topo_)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator_->MetaserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为metaserver-1 + // 2. There is a leader change on the copyset and the target leader is + // metaserver-1 Operator testOperator( 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2, 1)); @@ -403,7 +406,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 3. copyset上有remove peer操作 + // 3. There is a remove peer operation on the copyset Operator testOperator( 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -412,7 +415,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 4. copyset上有add peer操作, target不是1 + // 4. There is an add peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2)); @@ -421,7 +425,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 5. copyset上有add peer操作, target是1 + // 5. There is an add peer operation on the copyset, with a target of 1 Operator testOperator( 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -430,7 +434,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 6. copyset上有change peer操作,target不是1 + // 6. There is a change peer operation on the copyset, but the target is + // not 1 Operator testOperator( 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 2)); @@ -439,7 +444,8 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 7. copyset上有change peer操作,target是1 + // 7. There is a change peer operation on the copyset, with a target of + // 1 Operator testOperator( 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 1)); @@ -449,7 +455,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } TEST_F(CoordinatorTest, test_SchedulerSwitch) { - ScheduleOption scheduleOption = GetTrueScheduleOption(); + ScheduleOption scheduleOption = GetTrueScheduleOption(); scheduleOption.copysetSchedulerIntervalSec = 0; scheduleOption.leaderSchedulerIntervalSec = 0; scheduleOption.recoverSchedulerIntervalSec = 0; @@ -459,7 +465,7 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { EXPECT_CALL(*topoAdapter_, Getpools()).Times(0); EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()).Times(0); - // 设置flag都为false + // Set flags to false gflags::SetCommandLineOption("enableCopySetScheduler", "false"); gflags::SetCommandLineOption("enableRecoverScheduler", "false"); gflags::SetCommandLineOption("enableLeaderScheduler", "false"); @@ -471,18 +477,18 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { /* - 场景: - metaserver1: offline 有恢复op - metaserver2: offline 没有恢复op,没有candidate,有其他op - metaserver3: offline 有candidate + Scenario: + metaserver1: offline has recovery op + metaserver2: offline has no recovery op, no candidate, and other op + metaserver3: offline has a candidate metaserver4: online metaserver4: online */ - // 获取option + // Get option ScheduleOption scheduleOption = GetFalseScheduleOption(); coordinator_->InitScheduler(scheduleOption, metric_); - // 构造metaserver + // Construct metaserver std::vector metaserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { @@ -497,7 +503,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op + // Construct op Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, steady_clock::now(), std::make_shared(1, 4)); @@ -508,7 +514,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}); @@ -523,7 +529,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, configChangeInfoForCS3); - // 1. 查询所有metaserver + // 1. Query all metaservers { EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()) .WillOnce(Return(metaserverInfos)); @@ -545,7 +551,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定metaserver, 但metaserver不存在 + // 2. Query specified metaserver, but metaserver does not exist { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(7, _)) .WillOnce(Return(false)); @@ -556,7 +562,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { std::vector{7}, &statusMap)); } - // 3. 查询指定metaserver, 不在恢复中 + // 3. Query specified metaserver, not in recovery { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(6, _)) .WillOnce( diff --git a/curvefs/test/mds/schedule/operatorStep_test.cpp b/curvefs/test/mds/schedule/operatorStep_test.cpp index d6378bb927..821d97fac7 100644 --- a/curvefs/test/mds/schedule/operatorStep_test.cpp +++ b/curvefs/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "curvefs/test/mds/schedule/common.h" namespace curvefs { @@ -29,8 +30,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -48,21 +49,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -75,7 +76,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -89,14 +90,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -109,7 +110,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -126,8 +127,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -140,8 +140,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -157,7 +157,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -173,8 +173,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -198,13 +197,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -217,7 +215,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -233,31 +231,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. The change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -268,24 +266,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the oprator in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -295,7 +293,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } } // namespace schedule diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index d48c6a9ee1..32c6e88d18 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -21,26 +21,27 @@ */ #include + #include "curvefs/src/mds/common/mds_define.h" #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/src/mds/schedule/scheduleMetrics.h" #include "curvefs/src/mds/schedule/scheduler.h" #include "curvefs/src/mds/topology/topology_id_generator.h" +#include "curvefs/test/mds/mock/mock_topoAdapter.h" #include "curvefs/test/mds/mock/mock_topology.h" #include "curvefs/test/mds/schedule/common.h" -#include "curvefs/test/mds/mock/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::MockIdGenerator; -using ::curvefs::mds::topology::MockTokenGenerator; using ::curvefs::mds::topology::MockStorage; +using ::curvefs::mds::topology::MockTokenGenerator; +using ::curvefs::mds::topology::MockTopology; +using ::curvefs::mds::topology::TopologyIdGenerator; using ::std::chrono::steady_clock; namespace curvefs { namespace mds { @@ -172,7 +173,7 @@ TEST_F(TestRecoverSheduler, recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -196,7 +197,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); { - // 1. 所有metaserveronline + // 1. All metaserveronline EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id2, _)) @@ -208,7 +209,8 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); @@ -217,12 +219,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; @@ -232,12 +235,13 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); @@ -254,12 +258,12 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换metaserver + // 5. Unable to select a replacement metaserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetMetaServersInPool(_)) .WillOnce(Return(std::vector{})); @@ -268,7 +272,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 6. 在metaserver上创建copyset失败 + // 6. Failed to create copyset on metaserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); std::vector metaserverList( diff --git a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp index 1041519eb6..0a7036ce15 100644 --- a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp +++ b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp @@ -21,23 +21,25 @@ */ #include "curvefs/src/mds/schedule/scheduleMetrics.h" + #include #include #include + #include "curvefs/src/mds/schedule/operatorController.h" #include "curvefs/test/mds/mock/mock_topology.h" -using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::CopySetKey; +using ::curvefs::mds::topology::MockTopology; using ::curvefs::mds::topology::TopologyIdGenerator; -using ::curvefs::mds::topology::TopologyTokenGenerator; using ::curvefs::mds::topology::TopologyStorage; +using ::curvefs::mds::topology::TopologyTokenGenerator; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curvefs { namespace mds { @@ -82,7 +84,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ::curvefs::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operator of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -150,7 +152,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -167,11 +169,10 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(_)) - .WillOnce(Return("haha")); + EXPECT_CALL(*topo, GetHostNameAndPortById(_)).WillOnce(Return("haha")); EXPECT_CALL(*topo, GetMetaServer(1, _)) .WillOnce(DoAll(SetArgPointee<1>(GetMetaServer(1)), Return(true))); @@ -245,7 +246,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -263,7 +264,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -329,7 +330,6 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("Normal\",\"opType\":\"TransferLeader\",\"poolId") + std::string("\":\"1\",\"startEpoch\":\"1\"}"); - ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) @@ -338,14 +338,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } @@ -358,7 +359,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -426,14 +427,15 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } @@ -446,7 +448,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -459,7 +461,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - // 获取metaserver 或者 server失败 + // Failed to obtain metaserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)).WillOnce(Return(false)); diff --git a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp index 04241d0209..a8c91d7617 100644 --- a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,6 +20,8 @@ * @Author: chenwei */ +#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" + #include #include #include @@ -27,17 +29,16 @@ #include #include "curvefs/proto/schedule.pb.h" -#include "curvefs/src/mds/schedule/scheduleService/scheduleService.h" #include "curvefs/test/mds/mock/mock_coordinator.h" namespace curvefs { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,7 +46,7 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); ASSERT_EQ( 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { request.add_metaserverid(1); QueryMetaServerRecoverStatusResponse response; - // 1. 查询metaserver恢复状态返回成功 + // 1. Querying metaserver recovery status returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( @@ -91,7 +92,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的metaserverid不合法 + // 2. The metaserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..88c324e9e4 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -18,9 +18,8 @@ #include -#include "curvefs/test/volume/common.h" - #include "absl/memory/memory.h" +#include "curvefs/test/volume/common.h" namespace curvefs { namespace volume { @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) { Extents expected = { Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion, - allocSize)}; + allocSize)}; ASSERT_EQ(expected, exts); @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..c9c0133ed9 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -29,57 +29,62 @@ #include "curvefs_python/curve_type.h" -namespace curve { -namespace client { - -class FileClient; - -} // namespace client -} // namespace curve - -class CBDClient { - public: - CBDClient(); - ~CBDClient(); - - int Init(const char* configPath); - void UnInit(); - - int Open(const char* filename, UserInfo_t* userInfo); - int Close(int fd); - - int Create(const char* filename, UserInfo_t* userInfo, size_t size); - int Create2(const CreateContext* context); - int Unlink(const char* filename, UserInfo_t* info); - int DeleteForce(const char* filename, UserInfo_t* info); - int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); - int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); - int Extend(const char* filename, UserInfo_t* info, uint64_t size); - - // 同步读写 - int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT - - // 异步读写 - int AioRead(int fd, AioContext* aioctx); - int AioWrite(int fd, AioContext* aioctx); - - // 获取文件的基本信息 - int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); - int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); - - DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); - int Listdir(DirInfos_t* dirinfo); - void CloseDir(DirInfos_t* dirinfo); - int Mkdir(const char* dirpath, UserInfo_t* info); - int Rmdir(const char* dirpath, UserInfo_t* info); - - std::string GetClusterId(); - - std::vector ListPoolset(); - - private: - std::unique_ptr client_; +namespace curve +{ + namespace client + { + + class FileClient; + + } // namespace client +} // namespace curve + +class CBDClient +{ +public: + CBDClient(); + ~CBDClient(); + + int Init(const char *configPath); + void UnInit(); + + int Open(const char *filename, UserInfo_t *userInfo); + int Close(int fd); + + int Create(const char *filename, UserInfo_t *userInfo, size_t size); + int Create2(const CreateContext *context); + int Unlink(const char *filename, UserInfo_t *info); + int DeleteForce(const char *filename, UserInfo_t *info); + int Recover(const char *filename, UserInfo_t *info, uint64_t fileId); + int Rename(UserInfo_t *info, const char *oldpath, const char *newpath); + int Extend(const char *filename, UserInfo_t *info, uint64_t size); + + // Synchronous read and write + int Read(int fd, char *buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char *buf, unsigned long offset, + unsigned long length); // NOLINT + + // Asynchronous read and write + int AioRead(int fd, AioContext *aioctx); + int AioWrite(int fd, AioContext *aioctx); + + // Obtain basic information about the file + int StatFile(const char *filename, UserInfo_t *info, FileInfo_t *finfo); + int ChangeOwner(const char *filename, const char *owner, UserInfo_t *info); + + DirInfos_t *OpenDir(const char *dirpath, UserInfo_t *userinfo); + int Listdir(DirInfos_t *dirinfo); + void CloseDir(DirInfos_t *dirinfo); + int Mkdir(const char *dirpath, UserInfo_t *info); + int Rmdir(const char *dirpath, UserInfo_t *info); + + std::string GetClusterId(); + + std::vector ListPoolset(); + +private: + std::unique_ptr client_; }; -#endif // CURVEFS_PYTHON_CBD_CLIENT_H_ +#endif // CURVEFS_PYTHON_CBD_CLIENT_H_ diff --git a/curvefs_python/curve_type.h b/curvefs_python/curve_type.h index d6603e238d..5382401d72 100644 --- a/curvefs_python/curve_type.h +++ b/curvefs_python/curve_type.h @@ -34,65 +34,65 @@ #define CURVEINODE_APPENDFILE 2 #define CURVE_INODE_APPENDECFILE 3 -#define CURVE_ERROR_OK 0 -// 文件或者目录已存在 +#define CURVE_ERROR_OK 0 +// The file or directory already exists #define CURVE_ERROR_EXISTS 1 -// 操作失败 +// Operation failed #define CURVE_ERROR_FAILED 2 -// 禁止IO +// Prohibit IO #define CURVE_ERROR_DISABLEIO 3 -// 认证失败 +// Authentication failed #define CURVE_ERROR_AUTHFAIL 4 -// 正在删除 +// Removing #define CURVE_ERROR_DELETING 5 -// 文件不存在 +// File does not exist #define CURVE_ERROR_NOTEXIST 6 -// 快照中 +// In the snapshot #define CURVE_ERROR_UNDER_SNAPSHOT 7 -// 非快照期间 +// During non snapshot periods #define CURVE_ERROR_NOT_UNDERSNAPSHOT 8 -// 删除错误 +// Delete Error #define CURVE_ERROR_DELETE_ERROR 9 -// segment未分配 +// Segment not allocated #define CURVE_ERROR_NOT_ALLOCATE 10 -// 操作不支持 +// Operation not supported #define CURVE_ERROR_NOT_SUPPORT 11 -// 目录非空 +// Directory is not empty #define CURVE_ERROR_NOT_EMPTY 12 -// 禁止缩容 +// Prohibit shrinkage #define CURVE_ERROR_NO_SHRINK_BIGGER_FILE 13 -// session不存在 +// Session does not exist #define CURVE_ERROR_SESSION_NOTEXISTS 14 -// 文件被占用 +// File occupied #define CURVE_ERROR_FILE_OCCUPIED 15 -// 参数错误 +// Parameter error #define CURVE_ERROR_PARAM_ERROR 16 -// MDS一侧存储错误 +// MDS side storage error #define CURVE_ERROR_INTERNAL_ERROR 17 -// crc检查错误 +// CRC check error #define CURVE_ERROR_CRC_ERROR 18 -// request参数存在问题 +// There is an issue with the request parameter #define CURVE_ERROR_INVALID_REQUEST 19 -// 磁盘存在问题 +// There is a problem with the disk #define CURVE_ERROR_DISK_FAIL 20 -// 空间不足 +// Insufficient space #define CURVE_ERROR_NO_SPACE 21 -// IO未对齐 +// IO misalignment #define CURVE_ERROR_NOT_ALIGNED 22 -// 文件被关闭,fd不可用 +// File closed, fd not available #define CURVE_ERROR_BAD_FD 23 -// 文件长度不支持 +// File length not supported #define CURVE_ERROR_LENGTH_NOT_SUPPORT 24 -// 文件状态 -#define CURVE_FILE_CREATED 0 -#define CURVE_FILE_DELETING 1 -#define CURVE_FILE_CLONING 2 +// File Status +#define CURVE_FILE_CREATED 0 +#define CURVE_FILE_DELETING 1 +#define CURVE_FILE_CLONING 2 #define CURVE_FILE_CLONEMETAINSTALLED 3 -#define CURVE_FILE_CLONED 4 -#define CURVE_FILE_BEINGCLONED 5 +#define CURVE_FILE_CLONED 4 +#define CURVE_FILE_BEINGCLONED 5 -// 未知错误 +// Unknown error #define CURVE_ERROR_UNKNOWN 100 #define CURVE_OP_READ 0 @@ -100,11 +100,10 @@ #define CLUSTERIDMAX 256 - typedef void (*AioCallBack)(struct AioContext* context); typedef struct AioContext { - unsigned long offset; //NOLINT - unsigned long length; //NOLINT + unsigned long offset; // NOLINT + unsigned long length; // NOLINT int ret; int op; AioCallBack cb; @@ -117,32 +116,32 @@ typedef struct UserInfo { } UserInfo_t; typedef struct FileInfo { - uint64_t id; - uint64_t parentid; - int filetype; - uint64_t length; - uint64_t ctime; - char filename[256]; - char owner[256]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; + uint64_t id; + uint64_t parentid; + int filetype; + uint64_t length; + uint64_t ctime; + char filename[256]; + char owner[256]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; } FileInfo_t; typedef struct DirInfos { - char* dirpath; - UserInfo_t* userinfo; - uint64_t dirsize; - FileInfo_t* fileinfo; + char* dirpath; + UserInfo_t* userinfo; + uint64_t dirsize; + FileInfo_t* fileinfo; } DirInfos_t; struct CreateContext { - std::string name; - size_t length; - UserInfo user; - std::string poolset; - uint64_t stripeUnit; - uint64_t stripeCount; + std::string name; + size_t length; + UserInfo user; + std::string poolset; + uint64_t stripeUnit; + uint64_t stripeCount; }; #endif // CURVEFS_PYTHON_CURVE_TYPE_H_ diff --git a/curvefs_python/curvefs_tool.py b/curvefs_python/curvefs_tool.py index f2fb582214..7a0cf73e92 100644 --- a/curvefs_python/curvefs_tool.py +++ b/curvefs_python/curvefs_tool.py @@ -21,61 +21,65 @@ import parser import time -fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] -fileStatus = ["Created", "Deleting", "Cloning", "CloneMetaInstalled", "Cloned", "BeingCloned"] +fileType = ["INODE_DIRECTORY", "INODE_PAGEFILE", "INODE_APPENDFILE", + "INODE_APPENDECFILE", "INODE_SNAPSHOT_PAGEFILE"] +fileStatus = ["Created", "Deleting", "Cloning", + "CloneMetaInstalled", "Cloned", "BeingCloned"] kGB = 1024 * 1024 * 1024 kUnitializedFileID = 0 -# 参照curve/include/client/libcurve.h -retCode = { 0 : "OK", - 1 : "EXISTS", - 2 : "FAILED", - 3 : "DISABLEIO", - 4 : "AUTHFAIL", - 5 : "DELETING", - 6 : "NOTEXIST", - 7 : "UNDER_SNAPSHOT", - 8 : "NOT_UNDERSNAPSHOT", - 9 : "DELETE_ERROR", - 10 : "NOT_ALLOCATE", - 11 : "NOT_SUPPORT", - 12 : "NOT_EMPTY", - 13 : "NO_SHRINK_BIGGER_FILE", - 14 : "SESSION_NOTEXISTS", - 15 : "FILE_OCCUPIED", - 16 : "PARAM_ERROR", - 17 : "INTERNAL_ERROR", - 18 : "CRC_ERROR", - 19 : "INVALID_REQUEST", - 20 : "DISK_FAIL", - 21 : "NO_SPACE", - 22 : "NOT_ALIGNED", - 23 : "BAD_FD", - 24 : "LENGTH_NOT_SUPPORT", - 25 : "SESSION_NOT_EXIST", - 26 : "STATUS_NOT_MATCH", - 27 : "DELETE_BEING_CLONED", - 28 : "CLIENT_NOT_SUPPORT_SNAPSHOT", - 29 : "SNAPSTHO_FROZEN", - 100 : "UNKNOWN"} +# Refer to curve/include/client/libcurve.h +retCode = {0: "OK", + 1: "EXISTS", + 2: "FAILED", + 3: "DISABLEIO", + 4: "AUTHFAIL", + 5: "DELETING", + 6: "NOTEXIST", + 7: "UNDER_SNAPSHOT", + 8: "NOT_UNDERSNAPSHOT", + 9: "DELETE_ERROR", + 10: "NOT_ALLOCATE", + 11: "NOT_SUPPORT", + 12: "NOT_EMPTY", + 13: "NO_SHRINK_BIGGER_FILE", + 14: "SESSION_NOTEXISTS", + 15: "FILE_OCCUPIED", + 16: "PARAM_ERROR", + 17: "INTERNAL_ERROR", + 18: "CRC_ERROR", + 19: "INVALID_REQUEST", + 20: "DISK_FAIL", + 21: "NO_SPACE", + 22: "NOT_ALIGNED", + 23: "BAD_FD", + 24: "LENGTH_NOT_SUPPORT", + 25: "SESSION_NOT_EXIST", + 26: "STATUS_NOT_MATCH", + 27: "DELETE_BEING_CLONED", + 28: "CLIENT_NOT_SUPPORT_SNAPSHOT", + 29: "SNAPSTHO_FROZEN", + 100: "UNKNOWN"} + def getRetCodeMsg(ret): - if retCode.has_key(-ret) : + if retCode.has_key(-ret): return retCode[-ret] return "Unknown Error Code" + if __name__ == '__main__': - # 参数解析 + # Parameter parsing args = parser.get_parser().parse_args() - # 初始化client + # Initialize client cbd = curvefs.CBDClient() ret = cbd.Init(args.confpath) if ret != 0: print "init fail" exit(1) - # 获取文件user信息 + # Obtain file user information user = curvefs.UserInfo_t() user.owner = args.user if args.password: @@ -85,7 +89,8 @@ def getRetCodeMsg(ret): if args.optype == "create": if args.stripeUnit or args.stripeCount: - ret = cbd.Create2(args.filename, user, args.length * kGB, args.stripeUnit, args.stripeCount) + ret = cbd.Create2(args.filename, user, args.length * + kGB, args.stripeUnit, args.stripeCount) else: ret = cbd.Create(args.filename, user, args.length * kGB) elif args.optype == "delete": @@ -116,7 +121,7 @@ def getRetCodeMsg(ret): ret = cbd.Mkdir(args.dirname, user) elif args.optype == "rmdir": ret = cbd.Rmdir(args.dirname, user) - elif args.optype == "list" : + elif args.optype == "list": dir = cbd.Listdir(args.dirname, user) for i in dir: print i diff --git a/curvefs_python/libcurvefs.h b/curvefs_python/libcurvefs.h index 55c6bf55fe..b1bdb0275c 100644 --- a/curvefs_python/libcurvefs.h +++ b/curvefs_python/libcurvefs.h @@ -19,56 +19,60 @@ * File Created: Tuesday, 25th September 2018 2:07:05 pm * Author: */ -#ifndef CURVE_LIBCURVE_INTERFACE_H //NOLINT +#ifndef CURVE_LIBCURVE_INTERFACE_H // NOLINT #define CURVE_LIBCURVE_INTERFACE_H -#include #include -#include +#include + #include +#include #include "curvefs_python/curve_type.h" #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -int Init(const char* path); -int Open4Qemu(const char* filename); -int Open(const char* filename, UserInfo_t* info); -int Create(const char* filename, UserInfo_t* info, size_t size); + int Init(const char *path); + int Open4Qemu(const char *filename); + int Open(const char *filename, UserInfo_t *info); + int Create(const char *filename, UserInfo_t *info, size_t size); -// 同步读写 -int Read(int fd, char* buf, unsigned long offset, unsigned long length); //NOLINT -int Write(int fd, const char* buf, unsigned long offset, unsigned long length); //NOLINT + // Synchronous read and write + int Read(int fd, char *buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char *buf, unsigned long offset, + unsigned long length); // NOLINT -// 异步读写 -int AioRead(int fd, AioContext* aioctx); -int AioWrite(int fd, AioContext* aioctx); + // Asynchronous read and write + int AioRead(int fd, AioContext *aioctx); + int AioWrite(int fd, AioContext *aioctx); -// 获取文件的基本信息 -int StatFile4Qemu(const char* filename, FileInfo_t* finfo); -int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); -int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); -int Close(int fd); + // Obtain basic information about the file + int StatFile4Qemu(const char *filename, FileInfo_t *finfo); + int StatFile(const char *filename, UserInfo_t *info, FileInfo_t *finfo); + int ChangeOwner(const char *filename, const char *owner, UserInfo_t *info); + int Close(int fd); -int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); -int Extend(const char* filename, UserInfo_t* info, uint64_t size); -int Unlink(const char* filename, UserInfo_t* info); -int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); -int DeleteForce(const char* filename, UserInfo_t* info); -DirInfos_t* OpenDir(const char* dirpath, UserInfo_t* userinfo); -void CloseDir(DirInfos_t* dirinfo); -int Listdir(DirInfos_t *dirinfo); -int Mkdir(const char* dirpath, UserInfo_t* info); -int Rmdir(const char* dirpath, UserInfo_t* info); + int Rename(UserInfo_t *info, const char *oldpath, const char *newpath); + int Extend(const char *filename, UserInfo_t *info, uint64_t size); + int Unlink(const char *filename, UserInfo_t *info); + int Recover(const char *filename, UserInfo_t *info, uint64_t fileId); + int DeleteForce(const char *filename, UserInfo_t *info); + DirInfos_t *OpenDir(const char *dirpath, UserInfo_t *userinfo); + void CloseDir(DirInfos_t *dirinfo); + int Listdir(DirInfos_t *dirinfo); + int Mkdir(const char *dirpath, UserInfo_t *info); + int Rmdir(const char *dirpath, UserInfo_t *info); -void UnInit(); + void UnInit(); -int GetClusterId(char* buf = nullptr, int len = 0); + int GetClusterId(char *buf = nullptr, int len = 0); #ifdef __cplusplus } #endif -#endif // !CURVE_LIBCURVE_INTERFACE_H //NOLINT +#endif // !CURVE_LIBCURVE_INTERFACE_H //NOLINT diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..eb77fd7f9e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -19,11 +19,12 @@ import os + def exec_cmd(cmd): ret = os.system(cmd) if ret == 0: print cmd + " exec success" - else : + else: print cmd + " exec fail, ret = " + str(ret) @@ -37,10 +38,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/curvesnapshot_python/libcurveSnapshot.cpp b/curvesnapshot_python/libcurveSnapshot.cpp index 5cdce45219..97588ba58c 100644 --- a/curvesnapshot_python/libcurveSnapshot.cpp +++ b/curvesnapshot_python/libcurveSnapshot.cpp @@ -20,60 +20,57 @@ * Author: tongguangxun */ +#include "curvesnapshot_python/libcurveSnapshot.h" + #include -#include #include +#include -#include "curvesnapshot_python/libcurveSnapshot.h" -#include "src/client/libcurve_snapshot.h" -#include "src/client/client_config.h" #include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/concurrent/concurrent.h" -using curve::client::UserInfo; using curve::client::ClientConfig; -using curve::client::SnapshotClient; -using curve::client::SnapCloneClosure; -using curve::client::FileServiceOption; using curve::client::ClientConfigOption; -using curve::common::Mutex; +using curve::client::FileServiceOption; +using curve::client::SnapCloneClosure; +using curve::client::SnapshotClient; +using curve::client::UserInfo; using curve::common::ConditionVariable; +using curve::common::Mutex; class TaskTracker { public: - TaskTracker() - : concurrent_(0), - lastErr_(0) {} + TaskTracker() : concurrent_(0), lastErr_(0) {} /** - * @brief 增加一个追踪任务 + * @brief Add a tracking task */ - void AddOneTrace() { - concurrent_.fetch_add(1, std::memory_order_acq_rel); - } + void AddOneTrace() { concurrent_.fetch_add(1, std::memory_order_acq_rel); } /** - * @brief 获取任务数量 + * @brief Get the number of tasks * - * @return 任务数量 + * @return Number of tasks */ - uint32_t GetTaskNum() const { - return concurrent_; - } + uint32_t GetTaskNum() const { return concurrent_; } /** - * @brief 处理任务返回值 + * @brief processing task return value * - * @param retCode 返回值 + * @param retCode return value */ void HandleResponse(int retCode) { if (retCode < 0) { lastErr_ = retCode; } if (1 == concurrent_.fetch_sub(1, std::memory_order_acq_rel)) { - // 最后一次需拿锁再发信号,防止先发信号后等待导致死锁 + // The last time you need to take the lock and send the signal + // again, to prevent deadlock caused by waiting after sending the + // signal first std::unique_lock lk(cv_m); cv_.notify_all(); } else { @@ -82,30 +79,29 @@ class TaskTracker { } /** - * @brief 等待追踪的所有任务完成 + * @brief Waiting for all tracked tasks to be completed */ void Wait() { std::unique_lock lk(cv_m); - cv_.wait(lk, [this](){ - return concurrent_.load(std::memory_order_acquire) == 0;}); + cv_.wait(lk, [this]() { + return concurrent_.load(std::memory_order_acquire) == 0; + }); } /** - * @brief 获取最后一个错误 + * @brief Get Last Error * - * @return 错误码 + * @return error code */ - int GetResult() { - return lastErr_; - } + int GetResult() { return lastErr_; } private: - // 等待的条件变量 + // Waiting condition variable ConditionVariable cv_; Mutex cv_m; - // 并发数量 + // Concurrent quantity std::atomic concurrent_; - // 错误码 + // Error code int lastErr_; }; @@ -162,32 +158,26 @@ void LocalInfo2ChunkIDInfo(const CChunkIDInfo& localinfo, idinfo->lpid_ = localinfo.lpid_.value; } -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } int ret = globalSnapshotclient->CreateSnapShot( - filename, - UserInfo(userinfo.owner, userinfo.password), - &seq->value); - LOG(INFO) << "create snapshot ret = " << ret - << ", seq = " << seq->value; + filename, UserInfo(userinfo.owner, userinfo.password), &seq->value); + LOG(INFO) << "create snapshot ret = " << ret << ", seq = " << seq->value; return ret; } -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } - return globalSnapshotclient->DeleteSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value); + return globalSnapshotclient->DeleteSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value); } int GetSnapShot(const char* filename, const CUserInfo_t userinfo, @@ -198,10 +188,9 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, } curve::client::FInfo_t fileinfo; - int ret = globalSnapshotclient->GetSnapShot(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fileinfo); + int ret = globalSnapshotclient->GetSnapShot( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + &fileinfo); if (ret == LIBCURVE_ERROR::OK) { snapinfo->id.value = fileinfo.id; snapinfo->parentid.value = fileinfo.parentid; @@ -224,22 +213,18 @@ int GetSnapShot(const char* filename, const CUserInfo_t userinfo, return ret; } -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo) { +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetSnapshotSegmentInfo(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - offset.value, - &seg); + int ret = globalSnapshotclient->GetSnapshotSegmentInfo( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, + offset.value, &seg); if (ret == LIBCURVE_ERROR::OK) { segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; @@ -259,12 +244,10 @@ int GetSnapshotSegmentInfo(const char* filename, return ret; } -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo) { +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, + CSegmentInfo* segInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -274,14 +257,12 @@ int GetOrAllocateSegmentInfo(const char* filename, fileinfo.segmentsize = segmentsize.value; fileinfo.chunksize = chunksize.value; fileinfo.fullPathName = std::string(filename); - fileinfo.filename = std::string(filename); + fileinfo.filename = std::string(filename); fileinfo.userinfo = UserInfo(userinfo.owner, userinfo.password); curve::client::SegmentInfo seg; - int ret = globalSnapshotclient->GetOrAllocateSegmentInfo(false, - offset.value, - &fileinfo, - &seg); + int ret = globalSnapshotclient->GetOrAllocateSegmentInfo( + false, offset.value, &fileinfo, &seg); segInfo->segmentsize.value = seg.segmentsize; segInfo->chunksize.value = seg.chunksize; segInfo->startoffset.value = seg.startoffset; @@ -300,11 +281,8 @@ int GetOrAllocateSegmentInfo(const char* filename, return ret; } -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf) { +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -313,12 +291,11 @@ int ReadChunkSnapshot(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->ReadChunkSnapshot(idinfo, seq.value, - offset.value, len.value, - buf, cb); + int ret = globalSnapshotclient->ReadChunkSnapshot( + idinfo, seq.value, offset.value, len.value, buf, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -340,13 +317,12 @@ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(cidinfo, &idinfo); - int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSeq.value); + int ret = globalSnapshotclient->DeleteChunkSnapshotOrCorrectSn( + idinfo, correctedSeq.value); return ret; } - -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -364,31 +340,23 @@ int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo) { return ret; } - -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus) { +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; } curve::client::FileStatus fs; - int ret = globalSnapshotclient->CheckSnapShotStatus(filename, - UserInfo(userinfo.owner, userinfo.password), - seq.value, - &fs); + int ret = globalSnapshotclient->CheckSnapShotStatus( + filename, UserInfo(userinfo.owner, userinfo.password), seq.value, &fs); filestatus->value = static_cast(fs); return ret; } - -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize) { +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -397,13 +365,11 @@ int CreateCloneChunk(const char* location, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->CreateCloneChunk(location, idinfo, - sn.value, correntSn.value, - chunkSize.value, - cb); + int ret = globalSnapshotclient->CreateCloneChunk( + location, idinfo, sn.value, correntSn.value, chunkSize.value, cb); tracker->Wait(); if (ret < 0) { return ret; @@ -412,10 +378,8 @@ int CreateCloneChunk(const char* location, } } - -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len) { +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len) { if (globalSnapshotclient == nullptr) { LOG(ERROR) << "not init!"; return -LIBCURVE_ERROR::FAILED; @@ -423,13 +387,11 @@ int RecoverChunk(const CChunkIDInfo chunkidinfo, curve::client::ChunkIDInfo idinfo; LocalInfo2ChunkIDInfo(chunkidinfo, &idinfo); auto tracker = std::make_shared(); - SnapCloneTestClosure *cb = new SnapCloneTestClosure(tracker); + SnapCloneTestClosure* cb = new SnapCloneTestClosure(tracker); tracker->AddOneTrace(); - int ret = globalSnapshotclient->RecoverChunk(idinfo, - offset.value, - len.value, - cb); + int ret = + globalSnapshotclient->RecoverChunk(idinfo, offset.value, len.value, cb); tracker->Wait(); if (ret < 0) { return ret; diff --git a/curvesnapshot_python/libcurveSnapshot.h b/curvesnapshot_python/libcurveSnapshot.h index bb45a02f57..7db41cf7c3 100644 --- a/curvesnapshot_python/libcurveSnapshot.h +++ b/curvesnapshot_python/libcurveSnapshot.h @@ -24,6 +24,7 @@ #define CURVESNAPSHOT_PYTHON_LIBCURVESNAPSHOT_H_ #include + #include #ifdef __cplusplus @@ -52,42 +53,36 @@ enum CFileType { }; typedef struct FileInfo { - type_uInt64_t id; - type_uInt64_t parentid; - int filetype; - type_uInt64_t length; - type_uInt64_t ctime; + type_uInt64_t id; + type_uInt64_t parentid; + int filetype; + type_uInt64_t length; + type_uInt64_t ctime; } FileInfo_t; -enum CFileStatus { - Created = 0, - Deleting, - Cloning, - CloneMetaInstalled, - Cloned -}; +enum CFileStatus { Created = 0, Deleting, Cloning, CloneMetaInstalled, Cloned }; typedef struct CChunkIDInfo { - type_uInt64_t cid_; - type_uInt32_t cpid_; - type_uInt32_t lpid_; + type_uInt64_t cid_; + type_uInt32_t cpid_; + type_uInt32_t lpid_; } CChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct CChunkInfoDetail { type_uInt64_t snSize; std::vector chunkSn; } CChunkInfoDetail_t; - -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct CLogicalPoolCopysetIDInfo { type_uInt32_t lpid; type_uInt32_t cpidVecSize; std::vector cpidVec; } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct CSegmentInfo { type_uInt32_t segmentsize; type_uInt32_t chunksize; @@ -98,154 +93,153 @@ typedef struct CSegmentInfo { } CSegmentInfo_t; typedef struct CFInfo { - type_uInt64_t id; - type_uInt64_t parentid; - CFileType filetype; - type_uInt32_t chunksize; - type_uInt32_t segmentsize; - type_uInt64_t length; - type_uInt64_t ctime; - type_uInt64_t seqnum; - char owner[256]; - char filename[256]; - CFileStatus filestatus; + type_uInt64_t id; + type_uInt64_t parentid; + CFileType filetype; + type_uInt32_t chunksize; + type_uInt32_t segmentsize; + type_uInt64_t length; + type_uInt64_t ctime; + type_uInt64_t seqnum; + char owner[256]; + char filename[256]; + CFileStatus filestatus; } CFInfo_t; int Init(const char* path); /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of the + * file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int CreateSnapShot(const char* filename, - const CUserInfo_t userinfo, +int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq); /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int DeleteSnapShot(const char* filename, - const CUserInfo_t userinfo, +int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq); /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot of + * the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int GetSnapShot(const char* fname, const CUserInfo_t userinfo, type_uInt64_t seq, CFInfo_t* snapinfo); /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment information of + * the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int GetSnapshotSegmentInfo(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt64_t offset, - CSegmentInfo *segInfo); +int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt64_t offset, + CSegmentInfo* segInfo); /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ -int ReadChunkSnapshot(CChunkIDInfo cidinfo, - type_uInt64_t seq, - type_uInt64_t offset, - type_uInt64_t len, - char *buf); +int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, + type_uInt64_t offset, type_uInt64_t len, char* buf); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected */ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, type_uInt64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output + * parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot */ -int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo); +int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail* chunkInfo); /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information */ -int CheckSnapShotStatus(const char* filename, - const CUserInfo_t userinfo, - type_uInt64_t seq, - type_uInt32_t* filestatus); +int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, + type_uInt64_t seq, type_uInt32_t* filestatus); /** - * 获取快照分配信息 - * @param: filename是当前文件名 - * @param: offset是当前的文件偏移 - * @param: segmentsize为segment大小 + * Obtain snapshot allocation information + * @param: filename is the current file name + * @param: offset is the current file offset + * @param: segmentsize is the segment size * @param: chunksize - * @param: userinfo是用户信息 - * @param[out]: segInfo是出参 + * @param: userinfo is the user information + * @param[out]: segInfo is the output parameter */ -int GetOrAllocateSegmentInfo(const char* filename, - type_uInt64_t offset, - type_uInt64_t segmentsize, - type_uInt64_t chunksize, - const CUserInfo_t userinfo, - CSegmentInfo *segInfo); +int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, + type_uInt64_t segmentsize, type_uInt64_t chunksize, + const CUserInfo_t userinfo, CSegmentInfo* segInfo); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format of 'location' is defined as A@B. + * - If the source data is on S3, the 'location' format is uri@s3, where 'uri' + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the 'location' format is + * /filename/chunkindex@cs. * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn + * @param: location The URL of the data source + * @param: chunkidinfo The target chunk + * @param: sn The sequence number of the chunk + * @param: chunkSize The size of the chunk + * @param: correntSn Used for modifying the 'correctedSn' when creating the + * clone chunk * - * @return 错误码 + * @return error code */ -int CreateCloneChunk(const char* location, - const CChunkIDInfo chunkidinfo, - type_uInt64_t sn, - type_uInt64_t correntSn, - type_uInt64_t chunkSize); +int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, + type_uInt64_t sn, type_uInt64_t correntSn, + type_uInt64_t chunkSize); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length * - * @return 错误码 + * @return error code */ -int RecoverChunk(const CChunkIDInfo chunkidinfo, - type_uInt64_t offset, - type_uInt64_t len); +int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, + type_uInt64_t len); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInit(); diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..b6b0010c83 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/docs/images/Curve-arch.odg b/docs/images/Curve-arch.odg index 2003f8bd2ecc75dfc03671e1fc3858f04c41b7c6..84aa3bde83b5158aea562d46f4c78404fd71642b 100644 GIT binary patch delta 37384 zcmZ5{WmFwO(N)s(bF|i6a+@u{4z75%6JPP+(vtMZ7*iu~ZTMN1E{bZ%GYp!2Lfk zt(^`D4def|PlAQz`yXu*H5_%C5u|F5P_N`l4zuSo^0@P7d;EOihbo@<rV4t>Q25G4_g`_po?ch7_tW= z-e-Rk#@h<=Xy>V>jgMg}`xD-YqgI9ue=y`M+a1D#8lNgB!D%Ew{PDry&qE z9+1wPMXD>0coHz4R)FJ#Mh=poRvI`99$F~e-MzgF< z38V4(HMb85eIL={t}j!lAj(Qlx{57!pLY(YrxGty;As~yXccpg8joMkn<)$0WtR=} z%9!0L1q*99NZ7sYU774)S!0?#WFO-t{f?Sc&~P(2s{rV0Lm*XI z#%KEApPjs6tqZt$p{pD2s#n(=<2Z*#GooUPG*Sz^>fY@+lCy8?O0$G|d5bIMp`RS- zB}duSL;ySu-5l^WNJ=mhPo`EoM>E>-0ajLUM6(;e;KBE;|%_fA5#z;@{EA2E1a@kdHu599;bLPVOq1mYvM*n$jhM-iA3oR|? zZO4r|WTUm4!^9>}EKf0+29mL8jV#`+=zd;PMW7^FN`S$txkaFpL_LyjcT?d6g69Pu zQ5fbUAV~APbHQBVdmqtp*fTmwWTTzzi|-)tL9Vof@TMXEyD1Z4APeWif;+Lc`?!*& zd%T^edn#JEyI;dF0oyd-lC1`H`<Tv_BiIe4Awr=|F~CBsP59W z2ZX81>I7e^X?Ks{>w!k{+~2WU#y@7JmoqBzlaAaSUP+WKr#ZhqkX@|rnE*G32NT9` zuQO>TL7uNorD;r+%zv3@n^NDtam}9ELyFVt?1w#dUn`6uI*Cm;gN34wXU}o+yb8`Y z0)lOPy;iS!orVS#O714qqCGBm^^^HNzzr>l{_;NKqr>(j=FQQKz|`3jv7~ULn)KKu z$HBgH5v`DB6)NIO`Ol|LRH7dOhV9<{DM(Mt5z65azRm3-?9#aN z`!Rax(DJO|S|(&hdEyW~ND<+xiLUr%ONonF6x`0laHhrcr{_I3A?TA`F7mR>wrs4q zgiatj*3(CF*-OmN{B*LZtdhm&J;3F##zQU37h_|ke+z3pA#$wM4tu%XiLONv>vxl} z?BbZw=8hp4!1{gdN`Sk;_puG{wVPXTT6ZWY|?1rp-(>0GXjJRQh4D(v;=oMVl zm&aeIxVzt8$G?78juR$cW*A`b;j6qmK;Bm6;&C4144X2dxg&7ctRVR1!T=~qn{?PBXaUr zknO4+VAk8FT-nY%MEb+GEdeVG-gnwcByaf1gMsH?hR9i(Sky_6VryXw?XMn>r)b>AaKhk;p?g@O5h6Iy)>rlS6T zGYXPTKf`~~WPCK&|DaIg&Pd}<*e2snxF^N`8EG;?g%9ujFWeo4Z@2g_Ea66AWc)9j z(nT06!1_;;{CdzPmYZeBuh8|V+STbR~Qw@55aZqW2%)puXig{b3op&sYvAQu4v_) zu27k6VSFj9jl6A-9@e(yN5c%F3oSj zw8k`Dq3z)8=*&kctp{;J6lOE}L>F(UKS~~Ye*Z4T;R9}8|2cc?1a%rb?Y;ZP+PH7b zj>cth>R!RITxZT(7y#1iJmkXYFD=CLI$ z%j{?*3j#50c-EfjBv{*erah(%z;XTikd<})mv=i_o7}HV3uyhNh`Ij7rX@P%m(SIW z{ej_gJMyzAm?I$Ml!gdn>y9Op_9Vh_0!kcVboX9`Bb_hIP&-3bO16o_tu5e30=!R_ zCI;+~HWkb+7iLh*2e@zUKP$`lc+nl!N1})-xx(_V zE+tHY*cmvj#sQw9OPXOTZfWv!ENYI3l`fh^E1O@#9y@7B@FTSWv*#BlicYYqv7$1; z!P3!9&_8~cAzb)I*(TFJ_}LHa*4!;?WuIR5zf{HgN8ShbW>sY3hd8>=srmS4#hNo2 z<-zFHUw^NUI32(bPxyv3i!*Jqtj84a{s@2O4qPIr#tU?P4J4*c++tmSOpDEmggVM* z@ZE%YiAU)AQSY;y$G~~}TA&3UTIEM`I8VN3X^Dr{H~vksY2CM(RFTF5&3+1I`=;n^ zd%t7bPu%hqn}*kxxD)vk>T2<<(kPNlHTOp2>)Fiq50cU$)F#bhM^7~3NcrN0hNA9M zk$cyrnQuUoMXQ(A`KG7RHXf-ue3jczh5e^5Z2BZP$wtP!>x!gxjqyD3KAHFtSHs+I zf`mL1Ry0PG#lF!cidq8hKVuc46in9`bwh~7YZOsYwP|0MsN-XCC$(MAo#2bBvuQUq_CqVjslZ6+rs-9= zY{^m2!|=-T*%@t$EQE;Fs9%&VmA-bDKjEAQccc0)9Ifo#SyOfYa%RJZ%NXIxF!7-V z8lj0bHdMoWmP5gvq{z8xDOYi9j`%RKzrX#N`T34kcd(U*r$Ug(V)Wz-J7zo`mA0Pe zo+YB3Ykq1*G|z4sU|I$b9x=1njqB$FJdN5@sfv%Z9Hjq_hA2d*HHt6>Nrho1@WY?C^^p@hvIR8QBzhl(Si>`SiEB zxgx^}FMoZ*(Vn_?Dvm!fH=98l6ER6d@iy&88w26R;hu(M8DrgY!;z)~4oa`ufnv>B zjU(Nguj=+3dw+7>d0X@BymZNcF>8)o{EM-kOMCd@bjvDJ+eaHdeyY)V4-5X%D}Pn0 z%(0*GV)7r zE#@2&3`{oD|DTb26JscGtzlqrt<$>m5T$@?4HYd}JUl!?a(YraZhA68FeM!~BLxE^ zGd(Lm3qLC>j}Si_BM%3k5HFjMpb!O@3YV}vy@VN`v@n@nfRL=FfU2#W2yXy7yqtuj zhOCgerl_2}ysV0jvW|kHrk=KntcIF~-bbZRpLCV=Y_zOC>R7p1>ngii=~!D^TR32P z*}GZWdHA?HSbBTdrX}PflEK;erd8%6GNA@U=y*Xq?c40rR{ss7dPXEV#s9Er4^Kn* zhR6aJ;g-hr4N)390b&S+c%_#)7eRfhzWb(yd_xrAiOH-;txJIve2*z^jjw6UEG|nc zZp^GJP5;^YJu|wdG`aNq_mYa*?+s<;KO25ll{D3-9ezUu!zYZzr(qW$vVoS`M>pI5 z4!7*>jDC&otxX+FQz<}XW}fcPni_7H-e|w+k3XHO+x#2zbeM6l-8wQd(mk@!H#0Oe zyEHr2w=kcUTYyN1usF84pEgv0C=8&SJygQLm@g^HNNV}~Iq3)tAhQmlVj60+&?{hK z?@9#AYQn=JD4-cE7yLDHp0*N9T~PJkq&C}k1MkVb>-&r2KCcu$9ubNb!Vd$`z9U#L*z~#a4RmJ^TV-H7;GXdO*^3uS6hQWCM zl{}g{+nKE|OG86KA_FkNxY%Le8U0as`NL48 zGpZV+82xLON5^%msBwAK7mtR-ZJS!sGvCwMRTXwKW+j!u%53hw!}ZN=wu6DLBBD^} zF-|pB)zZ~$L1Y{a9^Pb&zo;$Qm;rTn0eWcT=8YMsrfpqf^1`u^YuaEr31J`20k!Y110<4;?s_+nHp+pX-7pnC^Sc` z)?H;-6xqMaXXHIsIa|)GKMuV+q;uQ?DZWJlkNpy_clK`wcWd{IZ=0;#Qw1s&2jKY1 zFP*RJZ`bRUT5@3v$JasNi|f}1N2tC?yn^f<7e>(QKSv)A)qjQW0#63(`k$%H<+wuK z8SY$wEAodv;4R^;&*XKd(`O5N?Y{xYw(}z#=v^mz*VtXRcjr~kSx8>z z2AzM@I{i5?JepB??y_RnV5W!? zGO#%$J_|ZGdE0w?{sE#BRZP@782Bw&BvYUCgk^>~<~1oc5sc9L^PaoQLi3=b8|rzD z5~IL#bwm!9c)J}ziJveDdSmRo+=%Rad~NEEfZ%#j@Z@Z2??ofZL~0>_oK9xVO4#IISJ$7a65g)W-){0=^FE~e zvcrkq`p}^!Lg^e$_v!Ey(fLKL@Z)Y05nxDJ973o&LK{ z>^?%xGYiI?euhNmkXc-e-;Ci)sO+kx^?uuDFRPtTXF<2X^AR9&F6jIvd0uJs&q35F zWS;bAxtN81R#Fw4$#f)a4}9=iXw*ENVq?i2NWkpoFC|nz>BxP10dVvLHkyRvpKp;n zFIn*Q)*n*RIx&SWD>@$!BRQ>|q-1!c;-6u-;c*eis9D(LQE*Owe+F}lOw)oI7^mY* zbJ@V1H{Nfnz!h2N)y4Wl7I5VqoRhBQf6P}FZ!@Iu8*LEMs8CNum+)D4k7bf?vdW&J zPS{Nbvtbx225xX5aAXItpw2H0^~0{)ZGY#|Ij=*+re$kL5r&0Lo6!|C!=SH3p!-(psV;6`D`w+K&?+omnJ?9AJR~=YPRwMo< zh1l5QenoYJ-q^1%sD8F%-ET1aGhaG1FBGwa-AI`%V<&8{p8$jj%j8Gi1F)&I>n}K5 zKcuuTPn9nZS-p%6|{&9tm6uid1+*!?TTC=3ib) zZs<>b^Een0+X;itH*BeG@7IMl`j2rFPOFjh*pw;~UtUoHz~PjeV}}8^dAr*L+e7R3 zuV)P9(FN}lpI`3nO5Ww=IH~exI7ac!2hY|i;>r%8YytWQd9Uy~DbyOTQ+}pK@5p3_ z0$PF$V-}8Ae}k+FUD*v@muRaRR}T|hG@teyid;=;Yu6h(?~QC=fj6pWK6ck4=m@bb zzoh`_zv7N|;6a<9#r_5Au)RMKbYjI%G}70gq4Zq1{-QpI0E9#P(V`LxCXaSlSsS!k zz1m=p04-g0bIj(|weW^=krcf0aWDH%jMY6{24f0UL(%hRkIeObkI*rfB3!WLYfOW0 z!ID5_4L{WJmauR&h*a>W9+}+sEr$`Z__Wnwm;EqRt=_k<4F$;t=P6ZJ$6ez+?#EqK z-4B7yh@}tbfj`m5G&bbGLkc{U>hhW{^oXDS0(Q7#BbW|>>|zpB-&o_W!spNW?L8y0 zy~Rt(PO}_Hbr!X2uC@C?;T;(oY8duvWSEh2c6BA!5X(HIwX0td5ux|LKgyHlCZ%Ws z?Zaq!)9k+@+MfrF*ZN;&TDae?elqT3`z>~J^7ss|>ef$*&)ZBcW(`?gGRYLd(@cN1 z1sDwCDCC+o7Gh`3UZ6?Cv;t2q%guq?!8goFt3mibmj3lg9FDE&|Gv>FOsg#_#KETi z>bC1U`7KmN!A1v!3RVaul=E{?|m)5EK)w^b4bF^B(lllzv{2& zX$K~+L!G1eG(4SYFZHX3X`dFEfD%;rVV%=PcsG%JiFRoH?7Y~ zj9Y7uo}qQ*u7-voZ>QzzDt53>$!m7A(uvvLB&ZV+qQBpsBRk(DUVWi{=PD4}Ti~XU z%8JFHGp{alD{I~2SA3VxnXHcAs?R}(_r~enuZvB8&K==8|7!)kc&>`?m?F)A+HBEq zYz#d`;T%K0(?HS+gwhld0-500O?WKoRPn)paM zv)r?Pi1!KMEA_KY5Zh-?R7dzI5S^$u1d&_V^&L4p0yu+SI@10g7e{h-yp9odzIv+F z0%#eR=Qqwgg1;W(h&nIE7O3XI@}oyeRSUle;gwDN;{@G;o<9LMBUJwa7WBMD1;BNc zhk~H`D@j$65d34%9Ibo$SXgb`apk|9!!8Qz!CP6S9f(iQ+93XN-LK5aEhye`aquVZ z2Y>`LiSOio_cjTjpoa_|tp}VHlY{Ns_8k$^e|a6?HW$79akM!xMteI?GN}#*Qeate zGTmxTsvRIfVi|01zhpQ0^zm79!|5f^xxbV`Ef8dEnw9Y8Ed*8nbXyqP74M=y8b(iE z)MdVGEUa%)zBva--+R+2xLAm{bq742Z}6kT;Lb_K21D6ca;IQM5}tc__&wnQS2 z-@Zbu+LUR364s2*DB*=Xn3+Mde8~j(@O{1B&<6$9NV%KdFEUYYIq(%_0JJ-8p9Z9M zdObBP&)UDLi$843=RKeQ2)f^*dYMa*_!2OLuTW*-`qYx_p9wePB#6gB+h8-QRKA7E z9-P0(025_=L38qzjkvY$U8u1R{-VABb`+T{?ZHpESPLa<_dhm;Q+Bt>_s3;K>sx`h z&M3O!+r(Ojg3Z@!agl8t)qQri2{6Exk)>1UvhkUH6AjqpOUgelnk1}F11}d$c0iJv zBfOIC;(nsbD+_%v!-$rly&vLf)2*?@%QJmAXXymq=QoikJRuBz_Z5S?0NHq<1dsf* zA|DvTiQ=avZW2~ZP)*~2>gRQggMxjVsC^Rs?w72wse*35O4dhWXOLdYrvo>|Isyp5 z!E~RpiXLlO!(@7p%#6g`>yx$CA2uk1cnxpQH%Be$inWQ2@+&F+;NC$-GR~m-&_>vt{k4Y-TDdoh z<`$#&d+XwmVYaIu@49OWYwuPsNLnbh>9D`0Q89@=&Yf|IUwuPeHag;&yOBur>7H&m zpOU>UDbOt-(Wszm2)L{Y&MDJ>kAN`Tkt%4)+I|w1c*&w4ukJ#Ybi9m+w<2Vl6^1C% z@-RPzcYCZolE)qt+xM?(kBRQIAX_J;p1$d7KgLZl)NB-3g{D?)wJcc>G?}_bH2z_4 z4C_cPm71aa^x@LSkN^oA5N)%V_^TtzAt@Kse?q9(0w2z}3*4o>I=Bu~jN}cgFXa(E z7jLre28rXZ`U&p%e`kE#+0VTv^G5+im(yx~WG0N0J%Y??qOq02sZc=m3lvxgW9C3* zH0*WRO6(s!W$YAVey45KBL745+}tcTTsX@A)XcR6i|B9W*h~5sKL2Lb2bH}8^V2tI z{<8y$fW{&nqlDQ^s~<*%i=YtzS`Vw(1v_Ud5`x2Y4x@B7Al6ZxxB3{gUlquL^02Jka$b?_CuSP31 zHYfQto=%fmvs9thYyIH0fo}e&x(u`E!xYtBdQ$-EAKrHjOJqytkbPvq)RHj9iP|J{ z;4@xYI%{?b5;C%&Z;HOOg_C`fdm$qCU))!gtVco5sGiwQQyHYXX@92y5L7dv8A&kL zP&T!%tDkd&jZiPES?-mEzZaw|Z68{zqKKyr=4prualtq4sf|)rnKI}^YDassxwnUd z`uc6wL}HooMVaR2mk{xqiv84&f&R<{O6DS7d^9i@1fvz!8KT`k7VCMC6=q2n=?L{G zSD8{|stZh~HV%d6a>&+nzr}L<{072u#mF6sL}ERXDp+J5bE%Fc`lI4{H>bD4L#jS} zkQvVrG4TVpeRNS>3Ko*D!d#F>!9X77EhX`%?%pXYbb^`mct!&be5Wh3vO^wuO7v6$qg( zdz_R(Uq-|yCpe~1Z3wDN!b*Dq&zD;LEtG`8oZeSq()yp0nFussA(?FwY^g9T$`A;j zux(oxW%+OD))TU>A314QpX=wpgI!%;zP!A+wEMK5-REyU#0(%~-y9_viHc7OYCHzl zpL71rHzU z$hn`ujCE(S!bv3*aOwg)areRc2((oT&95^!i{$R10AAm99I8yeFDX9q%_~X4P=ncX zt!uFQvdb~;F({|>^lQftuOXN3vqWPCyFN?5{gPYy@(LfChSgnK`|`E&L(oNYQ0MXe zx1c5Ncv|T6v}ka?o#@*&&_ylU@WUB+tCfUo$u7}AHm^d7CK@P?lCTevNrh5Vs1tVv zXW|C1Zm@52M4m&i#L)(x@CRs&urUlpu@-g)HVAI^wvnTScubUEA&i33%nE`JOBC*u zsifUFMpcxy0kB_csR;M}nxgdQ2c5&bMf&bYz%z&q6nOf!h?`bugrA=Mc57L~-x4+$5**jI&NfhvjO=wDTRwL&0N=6TWe5X7XOD-K-1ur&mw6M+jPJdCkp zZiLum5R240j{|yxj}bxfqkA|9XP+oP4&FrOMXGoAnZplXp$&qX`>T7GU|(8Z8cO9w zg^Tr#(0ZA{0+Lt+S==jb`Cacf1(1|t$7?i@uUWSzJ;VVQH7tT!De-*3RAXJx$CyJx zP>AU)vN5#aO>{K4F$^;5p1&(p@KstGj;GdS4WJZb;l#L=+=4SIPw1F_5Px`Z58SKs z?n&$MP?i0_*23PSg#<;5%~+0&6e8d_N+4gXk-%LGiPZh~DcwAn*U6B#+zsYNCOgi8 z6ks^5ZUBTKezlKv*~@nO!JSZr6Ca4T1)o^a2NFhZjM6Mpbt>vAVyosxVbxxra- z7S-DVLH6ztl?6x@|9r%$oTZc76tZ6YDCMPeCi%@MzfV6tB1D0+p~oOqA9o+xJ!m+l z$PR(He#f*w;O0wo{PwL8i(>Wtj65QF1`%nw83Y3#v%ST@^!sOfM_oHtt>xeRLCQ?x z_CS?R+RdbOeMS8aTIV8#DZ2Tvtn?2$nWqhf-j=+VHe;ulT`%i|i;C(p;aY{qBV3zX zW}){kzmSoU+cLMXI=REK0AUlfxU_iJ6;Iia6p-rYwCv1LQP;YH9e+=m7tg!+T zh7 zjz%*%CB`deN8~e@N0DPvGChX2hX=+OGl&l_=`heTRE%E3UdQx5SIIGyI=UX$l|6d# zwBMEl{sepbM)1?I1z{7;qJlE)!|MT#EAk9!iy9sNCqiMft|iE;Ouc*;RPEjO{Y%|* zZvM3$XZk8WIRW+k2uGwX!&=HOnSm;bGdk!&`pbviKQX;j;u0F53UWk*6kK3?n3)ne@o2Xs5I-WK3bv6kM3MFA2r0@`|So7;5qh`G(50 zBq}T^_(@t*S!qnoz~LOkDCQ2#w_BOqWw+PEPoxKrm(Pb(NJ-1luVN@#M-tbP3yY~4 zJVs2J$C11nNWoL`Q#oKOE1x)oF73^hew%%_T0zqL&N1DHraezsi~e1uhIzOjouLRh zs)}RsGL^xsP^y@s0h=^Qk1wO9>}KQprD9K)o+KbT;i@PS8n zwfu#}9lIv533hWQjJt*y>Upy!|IFf(-MXYQe4ydm3wyXh?4{1eJY1TLrsVJc9LXGH zIh@g!q~TBsg8hI>4n4G>G-@djYUlWF;6uTtb&(l@?i>Zi?Wh2rx<2NU^~jEq?Ya+} zJDKm+ATY}~8?fMY7QV-%h9lR0{sKAwZ}9C^`gRhq=Asha_~=2Jw-}R5G5Nw4nA+A7xlbhi#Mh6RSBvK_4Bg;8RolQ` z=oHIQ|FVCXdr#$$1Y)QW6tAlB#_`2bmo@)Kx1+%~X9=@f(fyj(A8(x17jR@(YJFHJ zZ~oEmjO2pUE$qVokZ!uh)UDdcf4o|`ao&xE@CU7?GC&^1xge~$r$7OXCif*asHW94 zCog+CZs>#h)scFxuwZuzzcjq?ZB zBmp`clOWx4=3do2Z2~o{#b3Qtj`hKD-WqtCH2V?(J%7qc0 z@GYI-H4gACIa+;-=3V})))qKzd}J%-vi@z1CK0kNS*3tef5QDHoTUr{3B02-AWZDC z5)PSEs4P_g?^4{`&paFXI*@hOhH!c4Qw;=WJ!oP&R?NETdH=O_zr+|9!xTsYmTd5; z81KnbOm;lQ;+GpQx7T7ReKq*O<2ipjGtke%+##`#-+6|{_FOOC9LCk)xw>J#-Uj;wR-LfFO8;J0) z>}VbRDA(*ojB!-iLHY^`$X;0k>Z%BSaGhX)1+buB;}&SG1dGLH=gYW;nC%SZKy|mf z;)i98jRF)hMZvU;-*n=v^_OA0^t)@|MH8=p``Sf=@iFBpo0JG0Sa40iaC{5f?qy+> z8=;V{BjUf)(Rw4(Co{K_;x!OH%`q&haCNF(I+6h>DkW4{yHp_6mI@76>7xOUEq>VB zO44tvQm8`KmXC6Sq*D;!8Khf9wn@3YXLlg0O70c2!&9pIn(|gEAn2)M%15dQfmA@j z*v!|n*B_PQtbV4`!f@5cWi;v;ok^$CW-xFJ;}hsQ>SR@|2yt_SQ=&Sk+Eqy>lY_DD zMUjTF#N7oFemo1IDK7vgg#pzcprK3;0oez?-NOi#*fZn`o1|nM;<)k@SNGpp@Wo5z z@bnQO-3`3!87=Jh5D}_EzyUEPi9cFGDQiieZqlyPI1V^>2wObU8DwrUhnNyoc8XFF z=A98$6J;&TqB;3g3o9q&`Gzh0qO;s>lV!tLm&Rl8i@j`?3jY8-qEDHlqKnRJOUi#+ zHy-n6q2(21Xhno7$bkwzH^Z(s!sSG%iHZ!{C{1e)$sZ|zGcRTrPpWXq-wM9LtImzC zko4_F>35Nw`O~y&s#FKOk4zJ$SkNfA`17ynD^f(;I!#s8tJd$4aK7xk3xp7;bf6h7?4F*KG99@_w|<~&AYEk%jNBR(ZZ92M zKm^qex|5p%#e`bO90eUyQ)SlBOa=nXG(%Bv<&ssFnu{OZ(`R)_0%=!xbm^d8vCJ`Z zegjp3j~Ok0zx;?Dq=$B?Vu2Xq$j2?JzlGE?)eF-=DVZzeaD8R!aLa6mSF0m*kj>=rL++;0 zFxAKn=w%?;U*8NTJMuKWw^MbUMrA>ATmSqi3rXa(_fK-w37aQevN?DJRQI7wbTj{* z)yvzsxrd{As#wbk~r*rs(jYi$4;g|L{+SUj+jB(eJRFccZ3sdt=@zA zsE}yoC_x>%-#>uG5DeyI!zFA04pelLZC*&lEA(>m{sC` z*w@rH%bVw6>fJVx`}%DBX(2pCa2>QaC?hceg$ndwzi(|4(~hFO7;`U563GjLad9A- z4pY7kR7)<@3>8T68RYCD*umFk{w6ArD_4x{i@d_X&`<~dcwF?sU%OF9KxXt13c3Vl zk2`ibPji}=o83M&UN)Yp&L}N-5<<(A{ODBg?A|kQJK)P(^tAH>7OyU0Kg(6|)RRD= z{iGzc4HFGB7Muu!1>CYB`d~w1oIJO9HQ`0|me_Zb!TrpN{BaF$FDV8b*Z^<0XXO#yyd=HG3i{?3WL<@%_wR$Jp2G{xRBTMP`E+*&Q z^S5KR>nX+0Q`vLxB)?8xoKTkM=;0&M@GRiic3UTm@`pvbw)P5F=4)H&qg&&h{IqNDHva$tWJrA_ zjfWjTQBHeTf1)|Lov0>GiPVara`yAFo1!uW6My#knr;y@uhmKoDqJ#2S7j;9c1h@A za#YaUVXhPiqrRs`gX8eXThRVT&gD`A-eQ%UA9Kz->H7N02!3I%@E@B!>OUhD`vN?i%N5h2uo{=mKU( z*rO0tk-K1w=6uOF-r`TBVLrjne4BY%J)wo&E6FvNkiOUX9e)a{gluv52d4=%QqC2T zRnJ|@iX~-G?b65tE&co%p)e0X2S-)R(8@i%8AXSS1LfEoZ28UA!@yGluvPc_x6idk zZQ4991#&Te_#ERmz8YKOa&&<%kEQeaQ%DkdaTYgWqR8q@{R z$iJfoZ!kQ^WBgg_MFA;i>CA~Q=R1~&_;I!4fDA6ES8CFf*s~_}*~QxSedx>hVQ&j>a3?{E zryDv^r~;RqKd$?~E>I6BGu~(4S3Rszx|kB9mTk!XHoDtqgQ_-e=I(60bZQW@C2l31 zYO(ijK=ua3`Q^{zB4>rXN@UzUC=jv((Z^b#(2H+&gTTzuM92c$YFUhnQUq?l8!6>~ z^R;!D{7f}fk}$=ql3uBlNDMBBCRmVaqWj?RvKx5YzDoAK+H8u+$1t0fOnn>`s=t~& z84v(8E2T;`UsOoJ&0Kij@8HzeOOCGN>NVkjl0NkSn1{P#_hVl(mZ0LN+W z31z5!TWgzw(?D}GCFnxl2iTSK5;2hI(IR>>VkAAA`&3TL77MSETp=BBu8T9&*AT0A z1ud%&RJ$ZDo)?ZoE=3R*F;VS$X-UOnWo4AGdnB(86!!ppTz5`CPW~o;BBtI{xsN9;!I5nt%H z?l@HAj59;v4W+k=m5+ZZTiR7pS{o!|%5OnYoWmi_3c_>#lzk{Ir8)@ECVw3*s?I8d z2fgKCjRn9`sKb44R2My@2(jnq<%-$D%}Tv6NA;3pvAA0G_`0zV&Nm(!o7#7#O?CEO z%QA|g8e}Fh_6?{>bB%L^$>Rf4#ue1l#OEKz;_7IXOq>e~2*zEb>n?9-cQs`Q)q{L4 z_gLjW~+YDAL*PcMNTvBI&IViBKkB)!J3z?5s1}o^PWY9KP18M)bHqZ zwXX@2Dld!_u}3qPww-b*>NZ&zH@ejDiLBOqxk7%kSn~*pv7sNk&3}Rf)vH?Un5jaZ z@Y5O)aCnuHC*}=H)4%s#yZv*jv_*2$JLf3mend#R;6_3^qwJp-ITcw? z-W|2866OeosJg#oG2KGLrcP$V{lAyyDl=Si51g>@OMafd({Yghf~ zK%7|of9krgh9kfMn~b33q{gA65O1J};RJ6B&j$P5IF=VkomC!M39c1(!cY4FhX$UO zB#~z>oGL3UA+S10#=C1#Y{ESuTji0b29sJXD#WJ$v|ah!2fN=;@;=k!3m+B~B@-fY zpB_bxx7Z%X=L6wG?b4FL6|1ZET3_*3YrGj7zi`u0JEb2Ai)o8Be|J=3{SEk_nQWyb zxsS?QU2}t2`TYe^?Y2zb72~u&C0;5cg49S~kg1`+vM7pomHIBMU7ju+%<=WC1+3gNOM&~zVRdJTWODW+VvHuS$YMl$O<9hrKT-6X_QKCd+H1oUfb_X8eNG|T+~By z9C)rZsru%d)OARj8Uu9czbo+^;%HSAw4?|!dl6N&aX05l79zsQ@Cps~(`Y`FrC91D z;LDRBioh2zEL61z?h^vW%NzKe$frPlACib}eJGp!=L>enl1NxPbTMW+1Py#bL)~dC z*h}v{oMb_R_PO*n*%}q}otlv&{nD5Oyk9CIJgG;!F2c;)g_@Bs()ov)z#ZpX9g*}{r|q(DVSssQK?UI z&QxDP8x}cncW124nt^+jdjA|W@GNk%F6Gz=u(PmPzKPkKdD!WfAEl56z{P1})<+lH~&q)aWO53&_;}FjnPK%Og^aXMk^%sSwA#up&Ut=fq zAM%Q^0|1`uL3l#wWc3D0{T|44Zm{~Vf(QN~=*QNrv3ywc`}g3s%_BJ{E%Tk#D{@7| zd^>EeHW-|*lA1>&9nLRcbNe(I(zN!qV#Y42sJ@v*Fz%Hwd9@>Hr?+hu$4Ql;^-VfR zkq4K>tAFF>Y!o35n&fvmAzf%&p@jY3tmLF&N5of~7#L3i#Pjo)! zwmku%d@i}C`Ci)U@6y~p>Aqt%Mm-&3nj_pKv1+ItS|N|jjiEedRRL#JxcnRX>GALA z`9CMt-UVItFo05{G zFRXz^MV#eeJt$;$+FFsTbS?~5c>V;T+ljmgQ*2-RQIGyzl0s6)2rVhkYSHIL`Gl`1 z;UmqccX-x*?GJAO^^YkT)*L-m=$|kyu{+q)))K}1mAqyE$qVZ$Mzz1MP7i=QlL=nY z5XQLCTHBHtxKW|IgWZdji5aojZB$tjZuMH6pNw1D^LU*xc{<3d%*c%M36 zQ)5_)?{Z!QCx)Hldqs(IbrOMvuu>srnz6N-ki;3oL57*tfv@DhR6sD&f3T?A zz#St{%{PKXBk1@NG|wtPf>uRA8fO1Q!D^rc%*(6|Ifkce*Jq#EcNNaxxyExuhS>6o zeC8Sarg_|ZR?cQY{J6gFTCHey7Ll|{i-m$ZkZuQEJ(!b2WQ+u=m`Y1DPa}>nU7F75 zY^N6K3{H&b%w{J zxjH^HTSeK}&z~i?i6a*j-gnyHIjHeo?zuAa&Xo%@c+XX+gK_ev_k!*wf3J!C{)#Iz z+J?M?+kO~o_6wY-x_s*3VM!cNPRZ{{pPknuVtH*?5p~vvIC$NXEOjefF7s`=C*J;@a81 z8N)dP68bX@*IKUnpSk+1f7jpO&0AF7GAo-6t!x|@32m)fj%!_pw&^Ln^;#G1Cl!Ws zA41V`^xDyD{-!I>UYps@#u46}sa@^#gv7NjG~ySYz1Grm`7n9n(p%?dT6?+=Ug=U^ z*mw1m=0bbTwvO1tdIUNOxYz#<4##(9-Z4R?{?0pB-odXlU7I0sf3JvMEw}Qx62}(r z-sOmBGMs2Hq{q$mo%hF?KJxBiJyOQ$^UnCo@gt{_H(xh+Q)tUp+w8%c=cBh?&n{~6 zc(*QnvKCu)Gp|5M?qNN$fhSs=7v-pui`i~RrDUBW*@E?!;@~ac9>u#Jz?*g3_oS`0 zUVVmj>;0~<`zX-xqM## z794)B-|MUNYUpm}2yBL3+kwij$=Ba!ueG$hIU=~Q)K?5YDTr(n zM;wxOVKN$*_Ik{d#gTyIU1mfpL}xem%sf#XK`&BE+<~=A;{MK3?k9>PsILL9q}N6L zNl2kseu;b6iaMSoj!OBuO&wT?}ige5A`FlA&oj|9mFY)szeu9C1iWy6CRu$gq)~XkHURFyv{5EE&;Q z4La6VEssbW?Kw%E45d#7x*TA{3=w+NKGp}ubf6`$B7;J0SpQ2I2R} zM9QvZ;q~0Rmm@|x0vt-bwRQ{IzSM_HgF{B z^Qn@BIv+XGhwZ7B>Kt+KG ze+HKMi20Em{DsnD1V7b-lV7IWAV5R2HV_yO*lQx0bP=c_{8iP@oEsbolAYo#-&Uwpn1V{6)y455YiGn*W zU{NdMK{E(m1H)gg^{ipq32Y9B+19!+eS-R4fP{Y>8E>)O%8^YvbZ%*H6c(YB z{oA|!Rda4{#D1mr5GB_m*J>A$?eW>A=ZIfYJ(6-%Y{ppI9LW$QL6X_6)~cmeS&l?# zfzv+nS@j%|P>--t8P{&*$lUB9Ivjy+I&{?MWr=yTE4IE+D2vD#`HY5)^BY}(f9esD zP-=zQpiE}B>X8g|(N2B|nwgVI&olUNUq(fq)on$yLyrbJA0v)C?V2u)TEWa>YE2c0 zNGP?~&Z4XQz)TJ55f+hU(88RIW{!YN19TuV&FktBZ$(5b2odP0cR7J7&=P~Q$11hB z*A-&5hlra>CzBy+d3{7$5Jyx?f1m*(GOqxjf!Tts#c{;oxPv39^`;X#`@^pFHM@jT zi)-n3yY>)azna8^*I_Rrgvu2>$f%Z?XQOmjG>*s9dRfy#*xF@cj&NRX3Dk5y`M zh)}hMNVK{yi;>7>caA|XJ!2XpmXZHn8N^IwsWrR|(wNrZ3Oa#-RO29_f9_f@CNHhj zO1ai+e!ZiJq$U5d7+0I&mawCBT61OsyI?{*`iPs_`f6?tw?5ueyaVyXw&ktIT`xQGseBG*gQ8MmpIu6h#2*AgF_c&|e^;Tek90V;5k#ok z(<7-6BXy}!=|FmWAW1V-WQkP?iH~g??`0(Ahy+qAQ1!!CJx7>OiWee}85ZtPn5kCm z`z3vC%O{D{ipJ9=vUsagC>4_QdZN%)$XBK99ryaBZTdjXNFlXCYYa~i>7M9VEVM0J zdp((zd$t}_M-qp>fBJ{|v2lcnuIA(m{KIKR66G5TRVVW|F08nFEOV#1f03*%+Hzas zx85izGs!3i zNUd}k6`5oNg_d29IV9$@&RdPfEskM;@ zHRA9~lpLJMe@{zY*Z6>YqF6*yZXYFCidLydQWDu(f|p^{6!&p$lwGgox-D zp`Puz(%m?7#edT@bM?xV8UH)i{8z5{XAZn`<>;lEgI6w}x+p5PSf6EGsbx8WomK)> zC-z(je>}oEwXqE#vX0aWY-Fs8BVMUIL{#ucB9Sh0P&40b>M)Nf$hL^=QDlV-v>spW$_3ApsASZ}a1)#4#3?T3sv!uFL!u`SnJ>;oSAc>j!%-?7zP6lnlai8EcSTD~m{a z-KvbYjSR@37eNvY48$eT%xp|*kq2ZlncHnf#d=GF<}Q~O=L*?J;SS%8TkI-^O5fZl ze`{3u&;hiUQtK&-JN$TUEiQ!85N94W1~mp_a#nE?8uf{%Fk?Y@0>LB75kotiT^W;k zFUzl=@Xs_{&|QD_3VCJbkl#sSQdk z@*G7Ga$BEE(WWr&>m!s3Tz^*HF85(>f1kp6Q10t#m%E*visnnO)Q^l(ly=J4*`}w6 zHo04^Xjk}LiuS&$Z7nGyjYsW%gCeL2%IGemL1S;7mnAwvtN`FG;AA;Ha`UM2N|)@< z4qZ{TUzd6Prw{+ce-yv0m^pCe^69?o2gxg#vJ6MiMZ*#|=?WLwTBHIN=RvC+e=d#g zBR)b-oyE0ss;j@r?Q8ILjFNJgMXjH1(g&#VW2|~19j?RUjXqM}+efudsr1vQryjE& zVT+q22r@b6N(KpAa?MJF)28H{N$9XAMf3>QVfYMw2AWf(<}!{xqB9t~dnB~5`Ik=#R!79#>Vf8sf0l6w>lO5YKm_9CV2@5L#iugO6j@pVoShub^P zwMR%2^<;5eJjlUqT%%W8I}Kx!!{pH^X&J6#l}hluE?vpQ`b)q5TyfAX~N^b|bq za-6MKQaa*h({L zN|K-!|25Cp>e^}rTPraiOQ+Haf@X#zAVI@DbZX?wxiE-iQfZo~mLtafvSzv4Dz`Sa z%iOIB;yk6*_sZHET|}eef3UBo(cRf58#NtiZ$OgC+1%LJ=f>KU&3d)0d8%6vsbgwx zYZD-nX)(@OWPq6s!CUmi(;d!j@Y&M^@$=$1moN+-68V75X1^!|5n(DxEsJZ|?z%IL z1xb7)rkjWXi^n)(H;#D7gt3=aR*T3+({wIuBJQ&XQP3cn=`arCe~C`3%HQOa-e>1P z2tt!4oVJ#$lPlFDGh3tyL7<0gnxBK3yA7$ zt#y6&fG^-H??4LIctl26|7cbT(y~OWDFr%~rT5vn(r<_)gGR&Q@dQE8Npj2;)Z+2A zNv+M4p3s`InkBSFf2qY<0{r(wcw4Ki)EdFH3GJ{)O^g$k_%IO#L7W_!BeXHC#Ym84 z!&`iFzzuLDP%gG8%8|Q>opZB&Al(eoW6e}YYP)PLm1aDZ4l3d*C*2Tl!6RCFL>FnG zhvh-V{E)GXt+go)lKmVTA%p^I^fCOn{09*re#_h4S(s64#nP~h= zMY18D>wkDgr1?tv3G z2;y429$AhRrKRE>G5+CWN{?_SFrpqJls*o(1CDMtKVz4U_t?3c1LhXxV|Szd!c6=r zDA@Hl{Shv%by`iR@cF~8VGW^3w!|JakiCs_O?a35e^E1mjc4OpuK$>??LTw zt#tq3C4|)_krU8cw?ALYM;fVcY~2*5P?hd7dNG!4sVwPYw67Xs*27qn7(5T$^ahe)~6AwCC6 z5q`PI&SQ>SNKCn7@k+@HN8@7QJX>sw@m!Io!sr;ce``@NuEHrK$`PMv8RsB^S$sEcxpRA0 zi}JB|B9K~%R;;s!try4@y%pFQY4u39X=sUKm}b@r16xRsh?e}dGTgbkJ|gT*6g|hn z$2bSdA}Vr-$ZEMKnG&_Ne5+i!#>c#?V=iHtvs`nqQp=J1p?R%#e9;Cs2>v{j-Iv$w ze~%OZ7>tv4b1;^%5*}b33z3hW!$}4N!&3R|Ou6?*7|~kYfx&ZwgXjC;)c?ZZ;PW{| z*ph!uetz(x{Dr}D@^b?`B1meT9~eA8D3`wmk3Y|GjJ1Aqkw%C}utz)1mQoOp=Zm6H zGI5AlwZ?={bj?!t+3X9f)Cx02ww5njfBafqTxGNpAuDMj>G%SR(jwvrw6feeoaXR_ zvtFB(@N4he*d6H7E@*=Z5E;=AX(GeEVPm2r+GI?2eAy8jae~v!lGU?U@ybj(Uq3Q< z>Cp3+0+^ zL2H{Say?v5YEf}c&c%nd_j|8k*EL-W*esqS7U#0Kc?q?;FsF;(G4^zO{JmOu$#^0a zFXG74Sf_ez*bRoLmStctx1>Nbf76kAq(~^G^VtQ_k~|docAHGTQ3|I=*7CfzKF4`1W;hy_ZR1=4T{i*?&gnC03eJU(_+e;Zp}phSIk zAN%-2RuL2JlUK0u@t%0k9G>F@C$*qvU(9kOKJ4nDEx1Y@X7@oj69|!|JVyW+rr>yT z2@JbBz63205uif7t4V8XWrM#(RqF+N;4?5YcnQKGhp-Ib&&%bCi^uyf<}^Ee{AK<9 zgMBYRI9QHcy~Ro`1;-Kne{puD0FfL=uK45Xaa-_;#ifpS8*Uo{Jqud(ef6CyV^@f< z0Y@abjS!i{Y?cIQ$lqZlMd0i*RI_=Lo`MHE#JiG3&Gt<5w2QM^|&Y ziStN!%?>lptF>1aOeArQjF360S)@nUKGuh*I|@r%s)a$F-s2#6e}IwP)?zBW=f_Je zo+E?GwlgkDg<+9fVnjHcmzjOc|Yd;Q^Ocs$(Ps4nJT^(h z7R%dO3nJFpV)j%A8!U0h77Bkp!LKc3P2_cIb;mHojcJLce^#OT5Peg;fhaDE}-L_yMa6!iO2HhVzK5X6K7}1UaddDw10690IGH+p?N3{6sv$TfZ^R z2hCn;HRtZIyWNkuO`{MFu~!K4WJ#6tJIZmVlF!ZvJVb%ZTu^8W1)SkIvbvCyIg5f! zy!BfWMRB-Ye{Q$i<@36|ZnT0%Zia$cd}Ps)-|1pkVO}459vmFiA|lDVy3wTY{lQ8v z__3Vsg75BS)B$=I~5eKQZm_e|lMyiUT4$vO(VO4GxI7>?|Y$ zgN4Vw=l4>pjG!$_j0%LPf`J`xqrLnQxWY=J5c-5V+QIEdiQ*13f|l4^qod`A%gf8D z<>fd#WS8;fRBi^-Lw013a$~^~&QcFizQ_3fNC6z>Uwq;lMuZ&WelMTk$_zXAvx7Hx zK#=@wf0h|O?=>{@&bS?6R*O{8f^IAIBac0K{Sr&9d_bk}{k{U(74Prw*Y~rDU+_yk zzmna{t`GG0!;ziPOwX>IAA|&16+X-owxDBn5D{t{Y%O8K6rsyr8u40PgT9sPtDEwoxfsG6hEVfA*xEL1bIvTFkmV#4rV|hE^HTj7)*U z$qs9|tzt)%Ve#c#>7k8uc9w6vm*z4txkIbm$RzW(AUI{`I3kJEN{YWE&Imk|@*Q-P z?G2-3V^vUi>5{j)cwE-IU-41D;_TpOX&hnG0m(?oV

H2xs3P*%6LN&{K^_2R)^4Ar=-NdLT>0Rkl!& z0LneRoOmfIz-%6-@|(guThhf?mNKj;X1xvlKUb?qB5l zf9r&o{>i+2Op6=YT&~reJQ71If3cG8e~wo3&(Q<0GaQkk4iTiHWum@CrBYB~Qp%}x z;d>3Gk8+wRsA?(5Q`iAWSy7o5s`1iB%6a*i+8H$>Q6F^RL8?$G2>rooj4)wwrjsI- zUlq=0O{{W#&!mgaV+6%6Vi~OFoAw6)*fBjKMGwqWP9c_gwu_m=l#}R;coS|$f7D#y zq!&}(7ZUB@Y_*&{&Wk*@c`~y@h7)BFeJ+ra)DG@%?b2$vO=$L=^|-gSYcU8 zBPLm3YcnOB$?wT9IiV>~Tj^vfuv36Y*e7-@5HIS9Nvw>8dR&wP#q8iHkY+a6M{K#q zF-%$KNU)I#MZu0GDZh3Ri-sPAe;-6cQ8;IpaE;#(?0yi13%C`I2EuF}w3*A2JBQ!k zkMM{2tD-~MwNP#k%pA&IJNJX^`P`Y=%W%iI5e)%X@ugzS&H+-p-&S~xMc}BZ+ws=w z_eUOEVjTWJO+6q_JV$&p(tc^Imh{N3OmUaQ-w&w`pM4i67uPFPZE@sDe@0}-YIKqAN zXQ}29X;}teHG&B($pDMie*`87k_m7A+jAXTw`_vB40FS2xizE>`PiAjmgQ-hif$+U{RlKtYupZETIk+s&tQbm?Kal=)efcti@F>COv|WI5KU+6x;`x zJENdWXRv4}j98MLyX!q7=~5^BeY0}MBUbSI&5buFix;qf6?R}bd7(?-j{*uiB;2Lkn) z^tMdfrC`2lC5Ibl8$BH)(GaR-TMJyP{?l=yh%9Z}cRdTbe=*~FGEB!n1SBKy*^eZ_ z8UxZO90fPyAcIU{xS2?y9Z8r0ZpYc)$k!qfbTAnQaVDN-BHURuYij}Yp@YB{5Q%LY zBGJcGOf_a}p(mtDngyPP96W^1AED^^?0yg`nnBJuTy zc!f{B6=?P^f8i{ZX%H#8liA9IJNsd#7G3rKJ?N@Mj>NrHfAF#*M@l}{=Dqxr%iLIT zIUZYG6aU&?pV(FGhBY5N*8O^i>`@lr( zDI%a5e>~HpRR`tB9##Z#_S-urmk;)8)zCV*W9$lJvpS}O81DOvK5z$r)Jb-jJC;CK z;G{yUp6*B`6kx<`Xw&wZhuxs-fx%gewia+z8TqZm5vJ&K+e~uxTD5>&jwc20s|l_m zf=H$jas&k7;*k@mI+yRla*sleEQ27DTE^K{f8(s~kpheegqQ1+!e~SK#l-< zwd2bbh=_9x2Z`A5G(-gYm|)opedHN8RYQpQsx^MgawH|zQuq3XI3o5^PDF^vNet_p z>&qy>Jf_Bh1;e7?jskk=n3CyQo@Go&jrLnfcVh@s!I<*$XY^Sz^N?_d@ zf2NE1N(siCYrSeLZlItR3bw|w^%1ceLWHWCvK915x}+`jO<|$`(J6NER<{@;(c)fI z(o8khFEi|_>aJ&XZQt+lz zt{wb(9-_rq-Cd^%t!o{zE$IjEC5Zl3f88)f4gXPVx6^UN#Ho_Xf2 zS5BOK=DWq3UF55_k4fDwxvI7{A6N8ui-_=gq^M(>ydHrBdF#Y0XFhoI)GKG+I`zRT z-ACX5;F&y2eiAA3Lg(W`1o*0L`25&njoZPI)tNjZrywF{F2LohXWl&X{wrrrf4ucU z%XiCLzuEDzA(E;sia?TVl_R2WEWeZ2BG116%855my?>_rl{2rNfgEXh^{uBa3-m}T z@2Ly*QKaO?7zU07Q{h5v$|g{L0u_($#DvDJa-`__W`vskl~b>DzyHd~?|ksqg|{G8 zUVZD#86iiaRYe+^Sad129E+ndf6h%0*(?jbrjbj#+WA+k#+uh1&%N!x+L{Zm52mZ#e4q|a3l(z+!U3<`sF)LKs_x} zet*dCzc2a?s%dmA6fKl^f|uBH*6$MY1S3+BB$HH)og`?p}I9fQeyCczXERrhhe@R==tK;@at!?56 z_1SiDWaD}yUZ!tEf8>jm#g#A602hn=qGa?($_TCWc1f+k9_NTi6ovbqakW28r^@uh zv`tbg_wChybc=uhk)78g@wLLzyu)PdHEzc}stOoSu%-nM)!_@}BxZtuujvwTYOQn-yT*rDw@wx z#zrM3^P#!i=aRb`l!``&sZnvnLCIYE6w20f6x_lNJ4iW2f9;#1$aC4#**Tg-ICA&I zWlPibl@(az_f|f>w{r4h7N2{EC&z}AD=R}s@2{*pXB|7!y>jP-z;oZPe_yeA%x?lQKAKzSOIud`1e+DWi|np>j97Av4tdcDd&mMNL2L zJK~t8%#P_pe?Aig@U%Y1j$6gnZ=NG3;RAo~i@T=`r@nae)Y<0GTe^qZ;O?Kn*}W4h z_fFmEM$46zmJgdCRQDQIjy)jZX8f2ol==wm-gxsI)M^a~aj@`IC? zhpw!=dAa4|m7`E_%^vLTI(2#FZDOTmk=@ZX_AJWy+!uGxv~+(imDDQnH0~pQE`Gdn z4>6UUeSEKF@#A}6ApWi({@z1$=A@Qr|GwK|Yf(j?xYzcl&Qx zj`*;0bY%sKEjYjT%)J&^Mt5Z^P~YEcmC8d*7CI9gN8Y@GWa*oi?^+Cp;cwq-fX~6( ze-~CxetvhvaAKwD^A-5iaGG6M*Tr{Vx>_u?GA*QiSOHW^Q01rLh+Z;`B*7TYY=|RN zp$0Iv%G#zVXIryO=X18n+H@pNxiML@!smip=5y3EM93xcxz&v_g)2Zc>wUJT-BgKN zBW7#e?R^D;GSYo{<;@GjqzV)AhS25Gk8JKDF{TzH)N5Y3A;U75K;?L>423y(;HAH7}N07Sy&^ zlTn1WD(E}hCXq=;`=k7OAl7NDZ2%1-1ol{KLA>Bg;f;#x!3A9oa`e^f8u z=MZ_5xYrRp@$uV-zi64cbMWI=c zudj@qogBG-_Z89r348bY%E%XYf6h)Cf-P@#&xl#Se0VtTkMQ?2qKC6Cmud+N`Ba}h zXSG`A+Tort^+{GCpmkq%Rj6uiWjzl8 zPQE}9jeOe{``x6iRr+bYf2w1@_r;pvtuYnjS`m8l9-_FG_VJFzEwlTUP_VJ39x&?z z#3diQ*QO8n(BRVvaSGH_9in&@?dtSmRlhqQF83AQZHa3M#1txXv#w8z>!(Sy(DV^H zrABAe;5v<7gHtX|pZXG|#PvE8p-}32aGjnadPu!Sqx9)|h`vBpf48$yfX`}4kEDvV zI`wjeSNR-a}4ldvLr@hvUAi#a3wj){3o_ZwQpQ=Eo7^u=;43yewWn#ZO z%#m2`YewPh_lWpUB1OaKJ4m7Y$BG0WKSJM5D*EgZ+Q)(wTPx&hz%eugiszX8#iUMF8L*} zwWMq=9W5udO4F=zqB*q@f5aD$ug1A4o`M5EhhvOa`@U?^tg^e=oIZ{u?xp-8L5ewU z6~?s|!v$Fu3UZ?(Y)c(KT#X11Eab9=7q?!|e=ad1OttmwbM_5<%!P470^|#3p`gST z_z+8gEwj~vGUDmu5P-4D&sO2!D6+M_2Xfpjdje=7nLcDfcXdzu3BFe-EQBmM|=UdQHsml}HIiaqdJ}BlXU55hB~v zUdxZg?$2~MN$t3m?1>l%<*XL}LTj=R?MrNkg>rMkBtEN)cdNszoILS!Dh8sUR<4qS zU3$bo0vKl@7mRK^e!E zIL+?6%|6GAv%2}7F(Y4QybI}$e_5{fhP!U77sf4D#*AuFY58?Bu+P^ zTQkoY6U`w5>b>u7qi4t%BGEq5>IE#q>k-6}L<(}mpwudOK~H5l zqSAKZ-AiZUGZx34QT4pKXV`uJ=LJ$L%1Nz-o=DGqzdEcoT*h_t*L4$kcce$Xs5acz zLXmcxx3ze+k2pe_7RaoQf7_B~&-Mx!<=ZAesnwj4Wtzd95;}cm{UZw+Jnn) zI>Cq|)5@HN*3oP$8A~68d*@{!)5dgs+2xTzaW{-6ibQayPk&GL3>Lu1kL>yu5pIM`JA#MnBqMqWd-}_Lr^boO0e=UB;HF=eYpLN~u z?g_;m0pG)7j@TBgguk1-HsSG%L8)aTt=G*ngzI|D8snuFCmzw|C$!ot3#8VSwcr+F z^IMO`hj@;tmO)FrH6sI`C?Fz_tTZIWtWp(XT9${W!3YF})UbsUY>qS{ld27Xt_cko zf{0||!{#-H;Wc}Be|r$&?4H$lkrlj}wY8`KXBR;qF`psiV`C)_ZZm%}Z!HO>7H?}I zWw072vWD$K-mqJQCGO|-$ZA$n#kuuGloM`7elCrNh&a*#5lOXXT0y44+KbwX1P@zP zjxLgHat^z}h-Ue#r%@{(i*BNIc?h>C5hB5ZZqOC#!iSBse{I&T2iqvM96qYh!CMu$ zZ;k-xvM=%(7V~9}^oZ{k@(@=AF5hC-fOZtsAufc4&V|qt&e~F0jubd6x$DJ$KEHKa z3!RASE9MwLGc@|rSK6LJm9KYoG!51~kp!JcfsCEWbmly}4DDeuD32YTm!W9}B%tQs zR$GhW{`dm#e*`S#pWKB)e`J9#MTFU^my0ku@<(zmAM1}4=@Hf+S$X8QL3oz z8@>hyvevrBk@5B;KpxA`tGve{@f`(i~Be?;0S%Hw%|q)e?+zDL%>=kOQ4zVz7fWr))GHI@wHdO?kRp zQ>+bsHEB5op41fGM2~0O`|Khm&s5uiO_}CGv9&7iYY(S*<>mwH^g*|sUlvA>HVJPZ@^5Y_uYJ=IpKBT`4C zm!+cxJb&tl)DfwtIwDVI`u_o|QPN-9CHVIM0000)WY>ksmkDD+MhVT6qh~2C4SC)W9SgD7qhpk$v zsnlCUY%l8;Y-F1y{r3eE((ERx!U=}=nen`yDSsXhZ4y*)(7$+!>q~=B_U2oza z+fN{iAIlGgd45)0GGTb%O6@@_eHD$UcC53~jiJhi487INR8Bdjt5!PZIWwKq3&<`R z7K+aCxr{Mp6Tf)hF$gDkjwZ&|kRX)9`Fmk%D@1eB55tfQbkR~+Ls9t(yec`|EkvbN zdw=UmC+)>Cr|xe}FloYerT*R9@6aWYa2msJKc2~DY2nHkq1kgKn zSC?-J1~eO2Tk)>6(X1RLFD?w&g<2!v$-#&c) zxLOYGvEv$+`D`d~{BVFx-I^KZ;@R-C*ViL;`26%wKD6d@L&qmGOJA?B>5X*D^vLIc zq&D4?Ak?y~XTv+svrooj%f@DKm9w1181ejc9DE;!X*APVJsYka^Tg6z!#y#z6?RWN z{e&iso^!H^t9c}UhbMjpgNyFUSk}D(jZ5;pvdLX;mpFMF+rci0=-SU6oYiAkp13Z;Lg7anI|eO}wmh*cl3--b<|BiDRHL#aglWEDjrk4m-8td9pj|=NP zEphGQ(#WZQM?I82>cKHc!-=A=u&3317LCiav87?YDQ@31bkn};KIG9|j^xG^!MiA1V2=mL?E?_%V$W#a~)mWIC+!#2uLGw^oG+ zr4YvVlyY$kH}G;+kKop5<)-mXQ?vY@Rr5Q3i8b%8;5}5wFG-;P>*b52wyE1X(u&2^ z9mB2-!XP$NFnU9`ploOy1P=$H{1ROqad;Z_Yl6GFu%I+6XoMiSHJ zJ1&3r3xmAZetR6;Xr`N!i$B$D%l(T~yx?MK-W+SCnQdi_+;UzkZ!}^TnfuuCta6FP zF*?o8;NmZTJh-{2&Q%z(M&@>JWUsY5i^3s)lczXfpHKh#4_|%z``>)`Z(n@(yI z*b^Pu+9FZlY|@33NxE<}`_9m#%eLlFThRY93KG%Y|4egc!|1+Yl95eR6LvfUy91hk z0ivIl)FNA2q*L-e?Eegd5ZcuxH`YdXmSgoI9Wx_cXr{R&!DR z>Z~1kmQ4#IAUPFqYk3|uJoCbqc<%WkNC!B<^TUw1mq4mm^Q$mffk6=U0@0K!YewpE z>5RPFCXGwkPve5Na)fO~?F!(xi2ge?z-2WL;3}tzlWyca-J`q@BvpZ%OtV52IbP~U zqym48GF8+ZlYsQxB8y<}Qr)M2#o9Ni@?uv~-KSxDAgS`)M9wiMcy8Jm0Xj2#`=G@T zfk2Mgt8j|g(CQUBGkg2sF5o!{<;aaRUq$Vxopxp}_gLOShw|R1na#R&FrMH8yC;Zt z%jIxGChW<2WLy)bWn#wp5Xq7Ee-dV46t~zTl#xP5mXokg)2S0db!B6J3i~vAx{y>i zVnX}0V|wReX@^#*vvIXsNmpj?yhcU)v|lP6*Dow{K^@oBfxVUQS}1o&Zi3&osy`x+ zBznh;Ig~s0D{#}MW51{~Yp&NXL~yx_hPqE{u6hv9@^TT#@p2Uf)p81|`4pT_=F^#U z=t@`fD_t(6U}prDSrFqL}*fF4Z2;JFE(V>Jkz5MMqP z0CeGA=+P9BezdMqq#RQxd!H_u-c7J<$JZcs=AY@Y_ns$qNk{%Lai6}OP8_UV=?bw6 zugqR0B_XjRQElN}_~zID{`Hst`SllH{PBPOiTwTTAAbAImw*3%fB*V-U;pN}-~RWP z4F#AlrIMFi$ELp|w81;>i#wE1T+cz!a!mwJR~3_2`3G@y({nV}OI%uLtCdD7hL5i} zc$N%zFg6q4Z4x_dJt+|6%5WUo zTzvU7soQ-zK-zJCPUgHmqBM~;sr?R>HlOiR`Or#4RdSRF;>2U~T8h1N26fZq*o^hj zY4ol%!d;w3VU(revJqYgZ=Z7*acc1zw7u8DKHWm;StklUIO20>{|THZcQpG?>}(Eu zkLBIrY)i2--$9S%9eEr(khj#C-=PoM8gV&`38c2m3vBKLjfcntzg`8I3Q5Ap<8nWkb zRq&1(pt6uZGjh_=JP@h7Mbtf*xQ#-~ez(C?X-`IEmAA8ZnBcjb9*V??(~cUiyie=$ zEu2(V>MQPlqCR#pKzGoe+3|#a`UiSYLwhn=8=X^p(sY8|h?`7*E6*?~C#|N`jF0-6%>Zf3%!c#M_M6$vTf1du!=3c@o7s&2X3NZm z@9usxn{j?5Gh6;#f}sU@ox|5JdclK>(W@6;&BUQ*FTM9DVIb+Xa-%&$W z&S>qRL(-Ps+yR9Ra@&^T44ndpYv-j(wMh>ku$>QAkhWCm4qcgc3R0>|_fU|&_KmU| z&};>NwLOENE%h8uXFgc7%kIw7RN7UOWDi%gQeEEK%Mo4;`~vK?*d-8NUK%FejAioS zx`IyMuIx@Hd)8pbE6LU@{$RB6aPLXi-hkt65^s~MA_(*oW%~5~sWRahOZxOE@+3dl zHGYBVCeyYTkI!I_(njS#l;1y>ApLdH2m7=sA5${Frxo zmY+Bd_melMzSi6_?XwZ;d{1@Wr3))x{3+q0RPhBNjCEXHw}|-l+BDm(CRCEMB3qXrW@U)-A&5cU2fneo0CS_%=CDhWV5W@^%T{^SPoLLVeKW5lL z30^%`sFM5xUoK@OZ6F7k(UQbKZT z5<;AGloFa{T?aUu+y9Rsh&^H?Y7-K%mD)6I)uzO3N{zJCtePeE>Ov7CX3f}?TdfvV z*B<4nReQ$hptoAI{?XpvU+?oj@AG}$^Bw1$@B4k{c|PYo=dpzFN6FPkkKy^EltD(> zF~y>*pB|rMI0Y{wZ+B#%#o8_xn9#(B?bIvh!_+=o%Ty(h%ju{npE6GE)yknsZD#9G zc^*^kPtR{OH@8Q&QDQ&#Uaz%hSqh6%Xp9ZYiZiS>fyy%-z6(6w-LXEFXL?^=i@%(D zFD0~WN;Nb;R#Xh*EXQbpJtu6q!@$X&!s>C#YmM8}9wOaXXkBDJ<3mS%JnihimVSKC zk}lg1{1w`a4CQxY&k6LO@1D9w8FBlW2-p7pJW_B7<;NP{&YCQ@*g0?B@~xVe^N!I{ ztZ?tbwd)M94b3UlNsxsi&qsrYs(4uQdB%M|9l;}!+e!~(JJucw9_=hRw`I8$Uv31B zut0=*rl85HDHVY93M#4cd<-d~FL7nEBQG3h4+{3(pbl;ld&vnC{=lkkb2Fbgf4Hk- zuLb?Rf8{u)8sXtNbe;^hw^elZa*P+_R%z+8T;6NZYdAhbnf{_C67Xl;WAE-m6Vu?_V~J$R!F ze+B#z&NA00)r5CIs3HY#(Tfsh_YYQ3u8l~qD5agMGLm$eHWA8l(Hnh=>rWQzm=~S# zV-AsRTTSL?$uisT5PB~-E2c8y*DDv_Y8v!ORV3++O*Pq)80E7gPtQ6xml??CwGGg# zXNxI^#*8@%%EZN7WHFGYvoZlO0YOyJ5gjj_%n+{%Y!Y?Tw!`t=yha7`GooC@3pN5e z!Ub$*^pe}0;8i^?gLFwQOOt{6d$D5R@%BfsRI` z@TmenxM&ifiBICwmL22s02Ux!WORE+hN>!RrNE+BSDr39;Z`X4_Df|0 zRVCL85xa*EwXyS8yI%m<+RSLKiL7t%=TcV|&2{4(0Q2W&-lrtD>*VhLxUq65qNR4n^11p7vUbAaGD9bL@ z3!pb}1QqZi1&t@qytik9C3cQI!0&Q&mTSpDGQmb+bXlaj=!0sZzt`3{62NHMBB1%a zZV{=1me9fnQc&7*aGQ9xSJ7o$F=evy?6EWJC1@V@?QI%OBLFf2(mrin$6cFOONFtd zt|I561L%;-vnYZf6m$x2QaMsYI~*kq0C}bjFE_JA-0*zh_9psbk=mQH+|CW?cgVyp z=|_Rw^chF)2{-j>X-)5Fb*ZA6md`({1Z5hBg9j5av&o85F-cr};a~Y`!d(?Mz++aL zve7bEjK;|d+eLMyAs+l~Q06ZXmkd&C+!yf8nanH2w*}3*tl@v)`R<-;vDzBjIMhS7 z_a^RH`aUQVjl}rDQW!pfgJ@fUSjevXJ2u;uC{hrPr7%Qsq1AQ`=<%%Yt6{+D;npSr z(STg64Kj3Rv7@qN?e*q#=A&iHsE?&K>F1=e+t^157DEMU{Yz<{G8eveABlvchu4T6 zYF=eGx)^Od(wJBAm@B<{;qudCT5UH<1n7c;LOyR=y=ZjeVc4FGsGOEH@RqrRh}VR3#L*R6!ebV(pFdB=cnSM67TO2J%)T7TDk)8?Bz`f$7sdi?Bcye>g(|gs3}&de z1-PP?1c9Idpc_?Vgsr_o<;u05%{?A>BO!BJxAAG={pCHwOlDxCu7MCkqpss={fS0h zvtHqv>H>=eTW=>62C9-{+o%58kI)zp`B=wJ2$kH78hzK`Wzqw)8_0`&XsbSvN6mJP zUWo<@WS+-!b87nb8BgvI6Vks8G1k3~Om8=EbZxV-Z5Vk1LRK>QCmQ|@9e1bxcF>rE z6cSJ7&}S&#=9DtTu=I}?50I@&OW%Vpd1Y@}7rEQLC|nV)$vlxebzp5RtL>3fRVT+@&SVKf z1nR85yeHC?s;oK=JBK*qtSlLMHQjdq9wLAgR?<@A`=A^5^nw+N}GG}voAv+YOsR3IWN9CO(DDBb@iprz*VoL^>WIDkQqSU`kLC0Y5w@%B( z_tm+4z7scl)55G$bKV>B%PJA-3JMl3Wg0N0#+CCI+IW9TZ9e`R-qFky|q)U+=$=R3T+d#XRBUCGgR=G5~ zh&$xC^knDcC&7; zD4Zw8_&u#!in02_iTTss-;UOw{m8d8DZ%>UL^@4=WwGX!Q_mC5iO2sbb>UH!{@uF| zKYM!eRTkc@9hRvr-r6G5W@Ga%v*Vd3VSLtVw+u05*4{VLqFvmGS6l|JrLd-Bv(y9K z_||~u+uDaok@=9LDoyA&1eSQ8bfFCzWl`SvW>nbVYRMGkb``JVu$j6eUy*xWTB7BJ zSwMRfCnu4slt}vP6rtrb|AfaLedD*`cpVM6YiZ7ohVV=6bk?=(NDvh>5DzV`eZKRp z5#n1O*8JzQe1HvA6#xV9c;9Z+P-3YiXK-JimG#Gp;SnJ|kc28;Iui|ur>LDGzZ7}`wDm*m!YpFWKo!v@GoHVUvKFt4Z=!tBm z9~0#%0mM*M(sh$8mxgP#rgevG$?F{9zwuPG;Bf_=ujC3r`g9^wGHGYNfU$J>OwS ztR32#5=cM7&pP=$J9yXhXLHUh=tc>`Uh39&?(FSWKInA`p6ruOzY^dw^HZq5^2X+i z&CMm(-liK8O=(p?c+-P7uk1J@knNBKu+I+C!7xL?Iv<8yuO4;gRul~o%yTDj<#Hb$ z4EabJ@Qe|64%C=wxM)Ym!aq;-akB8he15h z>x;%XQ)}&IUgOSOH_i3@k}`#+>eA28+)H449JK|9z*(M+7drbTwX=k3J%QI0y<}3PB8jVeR+K38=2CHRpITYEbj;f=xszD??ot&Uh+W8^KSup{by7+(>Sa7BLQfqXGL$#va?nhJX(rfqx!1_mE zr>&oG%NiuIX1rynp|3ab*P9?|4sK0y$tG@Bb=*_^Qnc zfcOZP;lI4N0|!BYjrUK)yo5G52>H{IcPC=qwx43AL2CMkciVW_{$F&=B|%VX9R_Mb z{UHqxAy)DqFJqgba8DNsj7XvXI?ziPmIR@NPrM|j4xOQ_xd}L-{~9IA)BLP(K%PdB z;4TGX`sq}N6s5JtHvcGVq<%R&dm@iL{2@P0kVAl&e#GiqXFc;c2ml~T0su_^iIpqC z9sz>=FzQUbBfxiyp*x3m@`oGi)0{sh?{))~oTy5UUZ+(@5 zxH(daK>k;Rl4~NcBSB(kPpba?;-j4E-_k4?1aS8D@^|&}KY?P+3~4FH0s!z)wo*C( Jz)c$< delta 37199 zcmZ6xWmFx(5;ckj2?P!nTn_GT!S&!CJP_R7f==)Q1b3IUI2^L&~yW7jX_gl~U z$Mo*qH8V9mvsUe@>Ci>|`+58iD)R6MI505CFfeIi&zi9x6cPU08*==go;;HyF z|AXqF2<8@&1{x^VyC6AAX#rfZ$43_`@VUgkf4@(Zm^Ix`37mg4SXwDV= z9ZZEyTSy_v<8ohIJxneO-P}P4R^UfP2EWfT!7<}Qk|&NsEan>j`cbVybK-+ng{8n~ zHRngpnHEag1sP`^?6hKa=HLSg_U|Rk{AEfX;Cy5!hfKuoyy=teK*xwvl3bgTJP2Fg z##8a{er_i^kvPRk%KlbDm*ye127Dr0vU;XSmC>ap&5)bUswR-l3X!vu>=Sl__$3Y! z_}%1^Xvbwmn%4~$OEEtV$hgPQ`A`&#_x0TF<(g2xWB(8-v)I^cr6is@O`cg1!I8K{ z(h!~!3nQC=q!V0aGo4~Si5%-tm7r&Hd`ed!`I1m;@x^S*;s8roDpA~k zip6_sceS0xQ*U2o0g%v0pDssk;ODHg6vR$4Eu}LxF%C($xk|rql`&?Cnv3EyqOJXr zub{5#q~jsA5Tgihx>$BPgXZh6DU}N8Hpez|wYF9@d>>92Vmw`R`3eYys&em_Yw+wB zYw}u@sPGkOmFahbUyue5aOffWVK^a_0fW>^5akh{${%8bz$+L-`=*lHjpfrL0+Nr? zy@n|w-I)e%yTS`@&2vFfo!VY;Ral8voW{=@pDN+E_|}w%z3eW~oA2w5R#n#P@y(ft z@58RvKH%*Y){_R8CGKnUK!z%D(ix)dW!P4zAG+Ih7;H)HsdpwaGJ89b4R8fsYdW~p z$#dH7W#5bf4GpDScZfZdXba-+(Zid><8m7!{qiQIKdh>B88~3SbVh>`Opdtbn9ky& zUZp<;Q2tI+nW~a&drNn`u1*V=ylFwIC_ap*h+#wg=!qqbD>=y)xP9rrN0gFBV=*Uhh7u2e~G@6PmUuP~z z2Pm><&(eo3|5u{N@NY{?oOJ##*yF#6iqLmMU|z{}i47ts%D?IINVp(!vnk@}&ljA- zR7@m|I^ZXVQ|<{(7D88y@M#-jaQG?_;(MNYB8)GUsDCn}U4I2R73_IdD@q#l@!bl{ zH*Hjl0R5^|*+`laYNjn%8mQ5%RcMK1WmtcA3wCbKfZO(7rG6rE<<~aR-`&Mp*)8Y` zNi70{j=dH^hb0c!>gihD0_-`0cN0^^3Nie6dIdwwu~QgV z>EuRh=VxePt#c`XHFnOn0gzs=Y(Kp<1j;Kk;6)sN71!CYo2FBD^Sy=m0~M(PT4@52 zc5S__my05<=+Sk3w$)zMHLvGJSbTN8VSn3%t=#?u6NSFvC%Y&-LB*<(52#h(!AcNk z7A(e*R=-EG-%f$Z<_+JoEc6#U0%bpffVkhMZA3f3dD15NJi(A?v~5G_;mf&NU8~V4 zkg&#FCpZJ$3Pttz_v zBIDY*tUGo+KUoH9t>;CF)$t>(`HX+4))MHS`47S)k9yIm)YpD@>XviJdOLky%Kn&J z?>H6wq}xU6#Pmm3Bt}1AhWRa>nfL9ziaZi>f>66=PetLa<**2d$t}=WLPX*VLRi=x?#MQ-&j1Km0+hb0G48y09MAT-zxW8Zll$ZCVz5T`)W~xy)TIHH$ z!S{!wbG;+Nf7EUt7it3Rlmzf>uU;a<5?pRRdVgaNd|Vhen18#J`6aqER5V@}N>$2m-Zu)jWGVY}srIO|klWYz8g{XR%uC z|DO9xxfAgKu-rQr=rR<9bcI;*tke4)Y~|?-)fOWGZg>YKH=bv`EdNzd(yo(P^t-CI1535~LAcH&;VvOyb=Dx7bQ`ghGZqB>oD;Y`P83f3sEMGEPB(0>1Hjd@)j7vB_G z&QIz52(WiJrGY|@dtqXU4Ik$92zZ+)|737j9ip791ZaDa|7ARnhx7Pmi|R2qDT+(7 zmKUqxj7@nq9U?z{JH3{Z8bU*3IRTq07GH^dF*~b`{}6LW!FgbON0347nykhs@PXVt z5+(Wuh@m6P9HLQ3z$Pt}TS>!V`*0^`I2RYv29Rw$nJI_b4+Ys(Z4}7K?Z6W%!&lVS zYyWjp#;Zg_9#N`FC8J{2F&yL{v`@ncxf)@A;N{^MHm4*ZrS%FgRMZfzYl@MLC8N7W zyBwx3=oVT6nJH7$DG3tL=2U?%OmH+b!|mm=TF%z@i$2$-zY;wKHd|vX(PiX z{rL_g5Ouu~$ij*^AkaBd(8BKln{$dZi3&>iF$BamGom-|KR=Bnfit;MBh>fC+)t^o zOA+q{Ia8m5IJ%KAjVStS`?9X-oJ6gW0G4aFD3}_-5OJd^cxtO~LcM7^lOD%Zp=q&W z92WL>XL27eS?P|l`t0|EyqDI&+;lFnXA+lQ>qQPJ?q;6YC&h0(ydWysOzHglyPQv& zJzPX=o=CPuUPN=0UhmQQBqZH1uqo@q07u_tm`BhXF1&Eu*VKnjo!a=8cRiyM0L5hd z-jAl3*hQOL%B`@!U(vHG7fP3H(dEc?%{gc-Vb0hiLI(As0;9od@k3P1={&AK?8#$Q zYda{KqdKa$hIV8jW4Tk_Izs+X0^1;?Mz2~`noC9uCK4y}Q<@TOoO>Ds;Y50|cWs@4 z-yvw9cD^*Z#IMsHf9Vgw8`cjQ2A;t%0z~vE5x1QR>gH%cE{v!FU4CFo7jIWx2&gqC>|{pU}aTUdtqHZs34^Ws#^R z<(I(tpi~qeUV|a-`S{}<9?rD;u5&GCfGzE_a&%4>(gJ{6zqW#2s_GF`0aTjQhsw-) zxfd-qAoaIdK(s$}PQOtCfm@Cnf}s_|#g{vI#(#5+d%0-x7MwMOxX-{ld=yjqLIlr3P0wK=!zgtQxpO4b{@iFXa3l8CagiQLjRdl*&IV+qPvk@?7 z7X_*)4Dd=ij~8hfO*5v8Z(iGFu5*8ku$MQiqF)zO8bn90um7x!@zKC4?B(mwZoA&u z2wI_5FBbmIIU__6AXJi5x>3SFQu<@tLfw$vf_LVG+4$lVs61Hb0_bI3xlT1Z$^QIH zB-G;WD2$K6?Jy2(=O`7@k56bVw1g3nT1oH%91yfh3O#>d>?n-dn2!M zmmfwy3I3*=_2WC)wW=j{Cl$5?*(p0Myt}v|&5seWB{t*>{~f2U%ezLJ_h$xNjK|)% zHLn=|iRZzYn-)*+U|`1R{{QiO&}5~-+#CkR+&uY5HljH2sG^`Dg^i7kM@&sf#ZFCx zM@2@>PD?^VOHakf&A>^^$ic_WM8m0()9$s2ROi7B@R83GC0+CeEl-HA#Rnz&bAf=+Ds-rHarKK&eW2I)Pu4(RUp{?L( zsbOJZVQP!+ZsTHQ?fT8t*6f?Bb+T>_A`zU8SF%SAA|0Dou$IdYN81)#k6w3S{}}e@ zDw_5h!s0cSstx|`Chf3Hz2F-6kXZZZYRmRu@BHt{V>yTna6wMV_c@3X7?DuDm~gkG zQs?~GZzUC8$#S`fJRIR^rSY{%;rWH(1uaq4jVXo2$pwvRwM8lQt;K2KHAM+U#l?kX zHN_1jrS%Q<6@^Xp$+fwNVEDx0nB;|AL?+N;``A|d#z@Ql?pRECZ%y)GGEN>MJ^kEZ z`qXga+-CdDK-BT?>aC54r^D2Po#xTe(eBZOzS-g7xuv=BzJ=e(=6Q%zNb6%eI};nn z$x(TT`~cHL&o3Al6IxkGF^zA_Cmp^%L>7KzmcxyvI(c-=LzjwFT$nPVR5dl4IPFJ@ z%jFBYZC1*}lE#+2$tRi$tMv!Fl^w39Xwx2^Eysl(9UmkKVnjs|#CpHwx`s&_!Wsp2 zAir?g#kI}5W>|lXiK&TM$mgrCw!tFAoD{heR{o!?O zFLmGZ-JH5zfyLoxmE?R=ysT6Dv3X1|lUc2{o${S&Q(8uit{9bh?b6Y( zuvJs*VwxzGb;(Z(sNWU4>imP;n}uIpK!ooKw0XqR?MQ!suXSGPvx`F$U;k%+hPlmnk?3 z)8-A6Id-B{)3h`@9U{{zirdiAT=0gyDn?DU2IW2o{a9m&T7-`#0L^P_TDvrDM*|Ok z)?aT~wP58qkYeX1=Sy4QbX||Mq=UL3Y|0X~I1nBb==eeB9BTI41I6h+&8_+dL(!ayDq#?pAU|}LYL8NeoyOf#};p! zC%~m1CJ;;Z@2(;eH%i`2#~IUb7@T+!h>*tGtdnO*S$L^Q4YJt3{X35y5)V(Wo|2!EswvLzBn z6d&WYyyGn7zdU<=t@3>y1fHwj?%#g&^w@wlYB*y*R}<8sv&6?!VD_OsE8{*PxZ+8`?q~BvEu;B2#RionVFSqt%us?7X`S_>JZGTK%j-`bZ{t~&{#6wLaMD6JJQTv?kF6cp#6!`;25b74PSOAr|87w>( z!dKb8*p@zq5;>H{`DDx(!-vGTTi(~xb+hm5wQ1K1w^gw=-w)o$fB#`~S#J1L#{j3U zBbtpkTQBk+)Xnzfaxy6kZperRh(PjEdJ!grZ-{(R?SAn2ok*$=6CRyN!oW+f7MXvr zmV*y#V(Y6=Ea?LYLhoHJ>SoC$&lYkVM+T`*!EWhx2I=%y!XG&HhsgI`6y#l(7=CAW zq+*Sre6uJTnI1W;B<>z#9d7I-{p>IO2EQ?-F^%@M<%7cV!DyUksem|;uPG&Txr(?G zx1xP75OAY6Hja%(m7cSN7$Qm?pt5PlTu;t?P8y9by9~#OwJ7_fJ3#gTV1m0Yx?WjJ ztbN=^IWK<;dB-W|iom^fElRYj9+(P3UYRJX{#8Wg?*LAN&p3#2J7j`h38fPj7)|B1 zCCp)5!hR)Yr8o;-31L8(ZLsShyAp)Z9#y?HT`E9~56bl8HrnI-i{#J+-^{pnv4?qU;8_ z9mTMOB;4+Sf;))j{hEw4jBF?p8ij9E13Og{j5BHd`dT5_#X>uo>+tsaQ{B0U5UlFOK5!lT2OAS%O(u$|ZU~ z<}U_74IRO@6d0mWu40nq=zG#8Wx1nY?a4n|*`1Z62+b6(I5b4NDl9@g^s>2^Pms>j z?0(dC5BTBjm|#iId{~{>M!-SyDno3-cCSM<*E!A)DW(VdY6oW&bRnP0-JP>f_ z>$gE=0JNe8o^4%94tc{t$v2i3pJ?~XC3jDWh?@gD58d}p%<@;SAxU2bVx{Bi7wsnF z@~7i|;$~H9ynlsyvX;N{+{hq#NKF!EpN14R$1)l zXyU#?8;V0D2!YN=Yzh(b69b8sk5oorWVPc4;3KEPd$w%WUxZ1zCJT$EYej!0wWbXk zv};ewn>9bv$B6||xqZsS^(#a|VzrXIi2WLCICR$yyzWc2yR|-+RZH`zy|=*@psvJS zf`QDRtFva}&t-l;m?=pvlmS5)C@-(pUndP;_TeN2kD9~LDSGDP=4v&=2R_Qm8#G!5 z0nyRKTgVaEv<3lJb7AT`UVLz{u!g(5N)3Mk^~OQ-jS_rna@MLCm3;Qeak^K?pussD zeI?Z**CaPb)03t*5uEDh)$Dy~-vx>Wstu?OtuunY5!tGHLX0kxxar~CTnTm&In%qz zPkfjAKwrh{HrcMTX>H2f<6)In=@xQ2Q0S7UKi68AKmU#o&LQf^G@f%N5z{YqVM-f868zs2~-Y&r4lq-V*PnPmmbL zkv_fNk;c2&5_P=l4<$Dc;4>O%GQVpB&hLJb0^IfHP_{8NYPb*bd^=ij51*?A>_x3% zSs#hcaPJ_N@RB=0@Yc83sz2}Py`@TSuGaSw9=jf|!W-u&)q(Tg^|vaIQJ0;%I~~Up z+H>*sE)3!AbM2@3rp^mR$2X#|yRL_uyRO$uUSaGX&;}9{o5X8KyM41G)5k9 z@_86fUPm}iU%wxI3QL}?H=?Dr9+FtWgb-VhlE>Xg$TEM;p3y=3WNNEO1ii6zhj~5p z0G@#7D~-;ZBUWd`e}0Q7BsMh&Z*vVcb@n@d)!!~yXh;_MMrID2j|0i;0D-gV;F2yi z|0UG-`yu7CW(^%Gn;tSetl0wjfsS4@>86HLw}Yo`zioxcb1zv^O~WF0(#syNPKtrw zRbbV6=+RwQH_81T_w6o*U#O0BHL|1%|-{d5_Yv2b6R(e|}*i6Mo;-unoOp)n!xU zd1s$-1A|N4Z3XNaIetY%9`+prE3`<^@Q$9H6OUaOv3uMCjD{Hb>yWVt+Ee0Bt1n&v28ju{mO~N?t z&5zK&-GpX83nvhWND|ZA+Z32wCJ9ojDp*rU9#hFmAb`L>tcCN!7uhG39IpHS^awp^i5g^QS+}@K(~cWOhm4 zb>9A6A==05a({~a)3kmLTn^Lzdq@o7$IWQJ`vs9lgiGo~$_CA=KaPi*z?O`K@A#n$4#(DX6jAdc2#%i>#NGb52#ck4?7SM z<1;^JkKHojlHHo=_UMWH^rufcqAgh8-T3635U$23g8dDC)@sADM6cUd= zFV~T@3>Qw1Njp8(DF}BTJVci_9^>0lhOFSU^l+2Fr3%0 z%fIO6Urlu>iPL0=(-oU+pEfQ!;hDo+HLmdvKnWEH|K-Rpr`_dB=73a8p1|`S=PMJr z(5);C*Ljorp-+@3EUu9#7z=i@0Rb9_pkHMcJRf4x1uo%!NddvK{0TCh#1!gACh-Rj zf63`-F36iQDIZ zAqk)74*nXYu&lcMMcwm4d2=#lD(!cG*LcBCI@=&R^@A+gNlh23BMM#m^JWk%djypO z1 z7P4a7W&Gea1E*)uHmMVTa;k|LQi5xdL* zq<#W(55~66mC{Ql9W`oJSlzagmT;H2b+I&9E7UFUtJx46!Ik_o3YJ;5sNx>))L)=% z7>HHLq-^MqZJji5Augd*vzcf849RcgQ;W9wO$a%3s?e3Q4UnTMe!AH|=8iY^U*y1! z2tI_)#a%p^HbVhrrSySz;1$)!~Md3k}Gcb&g7oQP`0d3sQmQ*zR}#hLvk4i z7CmN(I+bCY$mweX)%;OqiCexrzepBM*1-&Z)8q9HMXR?r7yGQ8c6&cqDB~$K0_`DgahH1TCCD#@nz! z%!5Pb0aT05q2*uvwH74=Q7J^rL}=93X~kHvCFmMKly^82=ow(cYknTMq4NYs<$D{r zSkGmkaZ&<_J*|&TS!FepfStKw<||c2o>ijXM^cUMlN&Hcwg2IYirwCqfr3a1u#b1E zbpAXPkC~Rp$|ck{x(EWYJr{JSKf60CvbaMeI$(icJuXF!^-m5#!niaxF?2d`0$ZG^ zu}g+1WeI+!J48|`{F_mmyG1O!jt{RWH30@bk#&7aS<5bCF5%|*6~=;y0n=C1e+5?L z{X_KZN;&pTm1W;-*(VhvT4r`*E!U`j%5nC*Guf0jB-B)~C?wrfLXnmTDO8#C6Mz1q zo7{ry-?{3%68fS~`*l#XQxg0bZgdQE3iL=k35hA_9|6bGi@skJ-|&A^>vMfT$JlcO zB%5Qnz->yI%N_x&QJokIf8SeT+hzM3hW?R2y5dt~gsv>@7g?~oAmz)dNJnqDmhF2A zynuIP99hRK-DhqO^XGr%evu#+y8Y05xF}Q)SX$mqRf_Rt^COh0U(BRR^m-$?c~24( z65@}vboUa3%|h)@RwX96-G~X3xVgE319x8TPOkR%pMm}E+yR;;`Cf13Q<7!gxob7{ zlEfsRw$M-krGu}--JES{A-7q!g1a#Db`g2d5PN)50_jH5Od3bOYfCvcf@qG$a8$BHF&RbT2-qyfO zZ?j0ndx}+-rQD9d+CbV+RBi#Sbnj_@-$2ae)xb8Sckbw<#36iHR)TaygqDQDF16b^ zwN~1^bHD1!VPerBA4pfcQRCd#QBHD=c5`9-b0@HSy+bC5)rbi#tphBGHMmITc3pxh zNr1k1o%4h50G`>{g-$o-mkLtAO3?q;B4Qb~CS$uQ()6kqCOFRrWdvOtyDjz^pYT*L zwgmc6jSq^V^bXx34=wSBu`|{_?s-hwiVb z2P%H*ixo>jDZ9ihm+U{~Y(p|DxG^mzPtCrmnb_A>!SA^eFWIfSwC z4E>Fgsy*q~OTFo0`SZ^pBFTKBGoI9nEOTWH{x#XIzo@50*}P*y4jg1??nDeWx8sfo zzs%Ovif@&Ri9&zpG?j+e84JMSbcBA(i2>&bS*@VL69>rAX!vMC+x zQz!#qa4=K7Yz`}$LP&ZYx~=f{!u}%FHS346DHq`CHowcG^PI%&S9)qaF+}=8wYQI% zU2%Rqp&t(m_-!*ojIgX{P8`o&_O|H~bOR=%Ne5tJ>op&TFN2sSJsqCx)irU7vbtJ7*wd>pm{OY*EW|LzR-(3<8agaMTglzT67>WB zt7o0QO;Ou3LCNntHMbur*L>zp?%q2Cpp5z76igP|jZ+$n4suU^(XQ*Oj zoVPUa>8q5;Sv~}3Th~hy_DGFat8d38$XvjxBlpiwRe}J9M5OoDE|_8%AH6cHPCwaj z5VD5N%@8$1J1hDF`L+~XsEYqOMQ(31=u0)lOSh)m)>uI@iXAB+1BqOA?Ay+YZAEQO z-BBgyPO1Lh&D3{OJpITVPrBY#3RWRd*VaBjjWMGk#|W{j*gEBJWAQ3M|V}d2*XUn^^CYuTIHn^DXL^`ostYWR5Fji3@*6MrD%;wBS5dM{*80 z@|F~|Ju0|Xi`nlp8qlqhUVo!GXkk$vKZs>mw)xE5#OEr@*xoOTVK)p`o5*tC8*Fgp z`CD#1ru*YG5L*9Zi&UW8ODWn_amt)}5TwAGt+Jvz#bYK6zB1CCI?jS;XvzJON+Yb}TFef=|$$1@o zJt533U8WJ@1V0!n?_T~z@rWI*&e|N;g)r?GTXdPs`_|1k^Is}q@DqgAVvw-xPP;}$ z6X8w1^xx`HCRxu_DX!E+IK8dJs^3s;D@yXFGfJ?e?PZU6J6cjVm#D?4sB+DLA|a)s z*-bWn1toi)ZlNNS(=_6q9WE3|@E42RDJI=SpWbO>@Eq4Jef=1SDrzhzK?&o{$qAh% zG-Ju1<$=;s2yHUXe*d4|GWwFpeftS@CqFXxWG8NzUg^`01_n`vl-h-hLibiHy*(iw zO?}`52R8z(aD-lWN6E2{c+DUXi43hQn%QUc3OiEZz6)8{Ywd~O`?BhNWJgN2mxJOU z<~AmB>tb8$ARe}6;h{kv({qKJUTh1_t#kOWd;Si=#=v=D#B#&dm>w@;b)V)UL!jSS z1T?=AFsa|#Rga?=Fly;a?@pFU<0TRd1K96k(~t1XWbUh&x6%3Uanb>rk_qY%F$FGvDqoBrEj$))i$`fIMk`_B4J*Cbt% zN_09lGj7Xq8P=+`=U1B|lwaH1o+K7@Gk0WPrd``kA(pcSEGT!jmSn-Mw0OB7MweyOr1?(< zT;QxnV857ZCsv&(rOm^yWE;K8KMhV%D`kYB{q2peu!XoyF0dx=ZlBZ2MysT%J-j4r zC{Z))pd!vLrYO$qQby*xUjuM4*WW$cm{$#*Ez?i)$7L%@$9!hDMf-Zv1B~R~fwBux z7J#h*ns8?xH3q~eZbgxz-_}LsU_prfGa1oA!?l(AB;m%qFk4>3bxE+EqxWfkGnZNf z{gtO6RNGTh+)85fZ{Z_SF^m)sVsV&sDI(lv#@haS6^#%|IItZoOp1m#EsVARzSH+m z#U8RX0^*W^H!KRcqoSP*dSeD5z0r^U@j!oCpv+=gqHdaWgPP4Izp=H02A|;MkSTNP z_g|p&??XOK`q=VrSzULa48Ilbg4$8;dL6%Y^x^2BxO@LBPDD>Gkvj1i;J87GAqWayg{|f*` z{MNN@w2N9+lC`Zsl^iCA8z-UTl~WW}Z~akq?GrYOoPfdbwf8{xqq?>d0@w0J`DYNqT@`77DetIrsJ?qDJ@#jQ23 zu%O?PF7Ah3-V&pA92dBeK6hp=vKD|Po2H9Dpz|9pqW=KuG~czLliQtggmC9^(45+b z;@UWtnKHxUNR1GFB1vfcQs~(BiO*DlpIG)iJCrg^35(6$wb(ZSHVXjAB{}Ql2mw^uNk&q(5P@ynV=XL9~|>nPU9j$_EXeK zghrW+b3UGew@RX#uVV8d)o*}TMs+}<7z{70{U?RlTqYx64xkeS11Y89+W4cH1qnKGmha5qnFi;C463L zy@X~kf*UxX)v`N|mFCc7)-9@8O6uP-f)U6##KOhhCFaG-Xzy@MA)`23$oa5?APdbW z^+@4`S9c_1$kh`R30T!t+A`xaw&m!FV_?obSG8}Lg&{!?wlU0|L$wpjm=54`wrIr1 z_Ye+=0DJYNNlQjsEtiBkf0pNpgwBzI)$@lTubQ2694A^sL?kbf*5%U+wCW>f_E7@- zX&v2=*h#cI*Wz6c!)S%2wVhG?Qep^`qUley>5cPYvype1K%H2w{hVi?SH7tAXESd+ zTO36w<(WQ92y(K(XI<+8U2RN^@zCz#Tw+KHI)z#eWNT#*i$#ZNTU?xgq%7xL-iL_w)m*H z5ax882U0Qt96M89k#z#JtlWadDZanmmqjIKgl3UVwQPnA(d_-w!WglsDyeSL`?xBs5ep8i})z);!mTUxG>|Et|Yt$ z6L?kNnmRLVQ5#-fC|Uy!a4{Nh@?%|NN*ySzZ73!NqJG3dL^}RdUpn#YbPD<>jm zBg7fNh%ejWcPSUj$OC~tURQg;vwyEnV<`tG*4iJIV<{m@z0}K{DG@Rv*VD3W%Vh^H z{yEZ=&6G=Ys1`4sS3dhA>&K@D{I{{jyOg^lncH>{@2D@kd};&pa$7Kh36X8TNbFFA zYGucTyF_NfXP@>7dnk2VNBQ$|+7#>Gu?z?R2d820qIFcMW`;hOuIj33>OJ(jn_?DR z(V)K9sT-sTn$>Y3)#-Q1qZ^3e^^TGtkNU#r^CEtAk&ZNB(ENI81F7s?6_iw)WQ4t* z*Mq-67We~-@L=7QBBb?GIH8$6E{wDamDSUxZ_^+Y-GKd0_;Hn(Oj!Z zh|{Hh0oi1chB-u0M#fgEAW+O4`Y)RReV7MMQ4BdTL5q?DiP2XNGMzUz?S5Ac7GFQP zi@jt2w&`_+7}U=>DJtte%SRDqQ}D?m(MnSEiZcXGlED)i;`R-SNbF>*8(l9RH8tK` z@nx2Di_iFU%GKi?5f^iApRcUECsY=i!A+_8D%d20jx(GT3-lQrpZ0A$T~ z^DYe_5x-!`e@`VZ49Q7wu+UQ_q0v$&uL%0t7Ce9ng0SrOsxf*ci-)>SwP8xFQ!p z44TiE-xoPFf}^g9g`bw+t3+!9SD3J?NS;wE2j`1z#Xcq6&3@UdJlfavy$KUEU-B;u zRI}NHa3k}6tTLq!NZz#6zV0yHp@DR|K9VDUoDeU~@O#-CpIw+%Hu!dOfAfPBl#f>p zkJ;piu?(BSOGNq5IKT6_txNY6EuJ3Wz(am_kazL9ufaAz9tM5#r-j2Ikc;c3s(mTz z>WTdf%qolHKGu*1(^huM_xygqsDq7o?Yu}#^ziobk`Xq^D<`GjjMndZ@J`k1)~FCs z6DBCZA*Soefot6pFg1T;na(xPRKHWL$1C-p=^ z^|HI_Jj`}~T0}CEMgCj_rbpT?m!iF+s;==xUhgA}q1u-|?o=GCy(rZ7;*awYE6GN9JuaJ5UgSCmB>GTXv+AcwH5Z;y-}(}m{251dS`EZ{!C2h zX(IMWXlmYbUj`PU4+lQG6_=FeX3Tai&p7LBzMQCu@QC_iCl!+e^smB42x)QN?Mbo= zfx~USWTuG18VYpn@X{P>xj5DTkz_ZRZX8ehLz(~tj8e;+cUSS=W%y#p4!G4X$i1=p zQ&xmxbU7ZIPriqI=-n+-Ng-pIqJ)FAYAy@gK7A`N+HHtn+^-_lu_EhOte9!q^spE9 zxp@B}DGMIbicqo(u+lX2v~Z`+Beb+7nvL8xXmHyxLT1#C;PR0}rLKV^aX~ zeAzn_^5{5O6qvd^=D+O7`WWQ1EeQ*bU_29i+D=q9m;*}!!pk@P0!ac8kwbs4f3GDZ zV%yH7093Q{a6DB#EGvBx<)3~Pfs^mXfUNZ+tb#YSQ7W2`Q~B?G8h}KY@sUNe_v$)0 z{i9b5jVxuHrzkVIGg2@kUM^uM`0!`6*1+`E&pOVPnzp_7*{IP_IVY;DL|q=F-K!RQ z`z@`N(|QbG+(!@39`VY@QM|>+X--(|K5MdCSdxdMuigrqbgLtLMJDv`EOc?;l6f-j z6>0_lI+VC|;`h)fen4yVxiHVk&pLB62S!FLJ0x@enUDf+(%BQHyp|didrAOSKUYv) z9PTr$Fc}y%Nj4IXjvU~l#39TvaST~9F)>!Ob{hx2nF|`6jUnh;h_KW*x`dk3JY#d- z64Z|?wn!_rQtR%y)7gd_W7)N#36BIXo4X8eutd$a@^eJNbCMeA;7K+^jtC>nKmQEP zD$b%3#7qojY5oeEp|*kr-U@S2v+j41RehEk8|&cacfuviO$r-EG4)eYaC~?0Szskp z9i$G#aE8awjXF4()G3?uw$N97=Vt0dpV@PufZ!Czwg+ z-B%aD_!1y}YfX!YJ*WA60Vx>-OGPW-;M4=UMVxj6`t#uNh8PVswS>dCfT>MQVoodi zBbc$iVc0|zEdm^bN>|%BN**XQm@6*GQ)e=UFbDQ)q-KAd82mk|a-_|mtl88m1WPQA zytfZ3q0C}t*p#SNM~S>;wTwLJMadFBRDk8MTcB{Xq(~jFzQQ}irw^w?k_%j` z$~DQIIBqU0&BE-esj*nlR%)o&7bq|BiZ=VEZ$WWoD;B4r{^Zz?pEIhl$)&WVdkE`L zeijICemrtp-37VPbY*>7}TC<(s7ZX23v0S(tdJ)uTb2M-3@o{oBUom}u`>I2@!us>|(n4A0Nbk!={8Rm9iLTu>e$~5>ibP-GD zZq=;g)ec5&k`+|d|L!(jBgB`j>^KMKfDHq;=utEm7I9FuYe9JBPQKHUn%%m_k!GT% z=u|mTVO8vPG}Zi3)w%VirKjUBho8coaIDI84Vd;$`s*Hk+^1}(eIKYeQ~=6)%54&8 zeZ8&?iKE8MFG>_D@D$_q`P5a$h&;UsRm>{ZFMrTSB7c$Et@;@nI)=+v>xf{FTXhGA z+4nd1#Oh)+k;S3F&x>bfH}exA6)s(?es(dh&)o5D+8cn3It`hxqYo$_Jzeb5Z{!aF9aHI$$}0?Xie+-{}?4;8wn zOBv~CoDEgDOspHF!NT|9{bg8TiCEl1Ct1>AmpKLAX;-5~4_02wgCy8IEhOQLQF0IM zY}w9=%Ar#Fx4o`rWnSAkJj-2pxGatzQhi3)j!Gv~jJ>`PhdC(ZeGUd}^-X(z_6Pjk zs5gOw+gi+f?H{@I)a=A))wWtIBv9Wo{8%z^@Y|}yMtDL715X(Xayuur4rTV+4Q9>X z)#1qXPCvkniCp_hms??bn(@VyW8>e3h6HX!v(BCheVY|0F6GdB=2y&5Dc(HSU*w** z#k0=U55{1qA78Eu&#{+)ur3w`Z}KlPLmPg=`;1!TkQr)u#RGsbgTtjE6do4c0hagWE6&LSM>`QkV_1XGiqDToVB(>?a+2cJJnL-opxY zXD`?AH9Y=P3N1EhesjH~gqTVIA2ibC5?}VOR;dB4 z*7{evUi*tZ6r19KM7dRD{Vq1&v{7@e>*wr0?pM^iJ}n92Fnbh;UpsCD^P~T=xa7Zt zb1g*RVbCBKjF`EbXsW8GW@B*>OOX4)g6f^uUf2#a2LiC%W}Ws=TFdT6d;B=zzA`LB`B}MSBO2Y#14A8QAj`l?rpeQ;1=+dyXXKj+At&|Pbwc#iuVHR@ zFrjj{xBVvJHBNf1WjNot>$StZn;6DuK|v#wd4EuYK7WyoRdj&`#yBY1F4}x_Jxft3 zA_#7%0nq9BX2Ojw53ZGJ3rg?MNl&}knAp?(aM*}e8HC^pk_@ZnvIQC2#PE3vS0{xR zmb^_pF8Flb*8Vll*I7rmW?6=KuURM;fI{m2-3k2zXU@qgwtVahC}3{t1h4F%`FZ;f;()vo3Rjt(Xp#@ zVuSIN&I1XDqn7uzy%1Bsw}T0ylTeK9*M+oZ@VLbWTykI5l^l6PnhvH0LSnhjT`W`5 zG1P;~9Cuh~bLVxj*to%MiP--eciR^8#fN!8p)Hx*tVI!3Nidpa*m2l6!_xl+lR#|0 zNoK^g^?&|I)@!)oUym5M`V7CRmwEjM#`L?!W_Dfix1Q-%dN1#~(ck3t&UpR$=agq% zwWHae$1v5>s7%$Z{fhzcl>*-WsfMHszZ)}<)0QDu6VrR8rrSoAY1S1Knx&z!HLHB$kh&O_+FG0(!0>s1JG1TuDnB$~Y zYg_^MlINWx?yZe$ZOUVN23gIQujrw$f*gUkoRyt!^1pyzxq4-$zlo#XWsXblWnwE{ zj(=j@*7`SQHm^qzB1uexg@RfT?*x6CZWE6P;z+DpL$pkHL5{FJ$K~N!xd!QsPZa2z zEfA48Oe0@1%a^xCkIWqJJc9c#Zc4x(+dJmH_$+>LxA)>R=k$&6q@0@5W{k;YTh@p?{w4_!(`%%p^#mU6uup*d&+36-n~X__*(Y z1l$seZ;2i`t?*7989OjAbFp{Ke|ZeQaklq8|IBHeyx~2qy-`_uaP_Tec%gC3>pcQ} zo3V=sf*vTkW{&KGn*B5{sxG&`uunE7p(1&5<67k5u`7P>fiY4xc0{Y+ixX;cpMUq5 zlH7Hv9VZ)w+AkwU>OQ;FdSvFAXQ1uceC4%)8NKqX3}qd@()ynN`LTU7{(Yhlco{LW z3DT9OoHN}2?joZ=haCzc~= zf8;WOd;M2&|JAcb@74EaCh*Ib$1a~WG+*(OXT4Rq4-%o{T~BPw`@}zI&%JZW#-km1 z&Rp^8`SHwF$g?Wat*7fb`;6Z^Q4Fg>G2-=03yZf9o5GFyb9R4)$oR&j7k{#y#dUn6 z(r-QSwpL(^ElG z##{JXnef?g zF#uHN#;uL__gm9?1U-I<-lovxzwM)L`)9x}`+v)_|7k~a zS&!tLSn4x|pB6;6h$9Z4)MtE8;@(@>+U3YrbgvbdaV<%E#;A*u_Jr=IiX*J_UHG~D zj+B(=OE_wmBU^w7>T5vSeq|8GlAdtM@icKH!NpS~?J+LomzZ_Y8s>=fdW3Qzx7Z=& z5CO>bbV%8~7B#^>MSmROf{v2j(R^H4O5N`BF4ZhYDj^aiNiyqQjHoxO*;6!4XYbFr zsv0Ws@eqfkqzms@j*J*+TO$i15db3bNHYbJutFnACXx(;IIPZim<7qiJOm9DVdoN{ z{O~Pv4J46$32D?&m5f0_Os|iXlEqZba-@ndD#sD@YSE}+4pV8tc@@zmi}k6n&^V?ZSN16M zas%jA$zu;Y`rD~EhG~Hr!_b>9s2dz9g;v7l_w87YoL3Tkim&$BAyM`N(Dh{x1uL{I z3fh*EGGI0&Eq}!RnBD}=Pq#qojH+#VZTT+6KWUG@VNQ>flv=Y5BpCwDy?=xz(sK_Yat8;=#&I1OH*lwn zw-`rsKUaWRbx#0*hr0=a5LCOA90_}?Ha%)kk8tnDHHYWOlnfza)+W0GZOh>3a#9Yf zvBx#LhZ=Q)^VCt-q7jz1RYEv8L{c5&IOxG2Dj_0J z#f&5x*niZ-7H|amuvZeb%8^QXWRlauags;?5&$y`#t9D?{}PO|DI=H-#R==IAsB_l zl6q*91qo9HTk9dt+gLp~&_}_z3z$_Z=8hBGae$F8*LssGjUAiUf`_`bKFk1u$5b8x z5h**wM#kIHPUXl3{Sj&VuSSSS*_*oCUp42Rh<~^iEBQbOQrlW6&J(C}WHT4)msF1= z9M#9kYEF-&2$CSl^j>>)94V(qBDBb9H!oGs5efAO7nO1CRF2He?xn*K=xabneO{KN zQC6kMM>#~s$>)2C^aH`}X!VFlD7C`t2+Gu;9!WtLt?f(D!nP?r&lCLvppER;ZboWB z7=NItJL%43EJIJEEegH%RKvq2kZReu)XE|< zt_D3$&_i{_KzCqUr4}E#d{BFcxT)moYKU5{3XvAJdl@0oFsFbEcEeIDgNW&l4nVJc z#M#UWZfJSUE}_)oTl(FuJw&*{_bQGDj(=e2cFsP>b}xZvr-L1-mT*w9S*eACZpR(y zo|`~h6cVIuXk$d1 zR)S4PGzXxuMX}PIFFj{c$ujE{m46cZ>{4=sxfck@i9$$@B;th_I_trod_6qmPUuL1(jbC zz3Mc{ZHeD{qomO6u|&xmZGXcvyXs(AuD&*ZF7ceM?(Amq0#y&REcKhAlEi+a`S+hB zH?|{@WVgoFs*=X{s;xpv-!zrP){17}5biW-6v>u3fMt=%lBLQ!f5|Ip4+B1lIKqnm zExT#^C`&lWWmYl|n*0*%I;9rW?F^_jJrZS0nk-xm>_#b4F;$C* zw9nPN(nzgjDFw-jDlNSob4b!7+!a^apom7JOVxeQN?40Fb3SlM@wXKJ@OLRH-NaVO z5ecN$dT!K+L#i$;9lQ`qO8u-c1U>pRM-pxyRX+KsN+mKWTBROINMvi33L@pnEw^+X zeTR9u6_yy0s7PupyMHOklCK&>kk2lW^~)ADF>@q6l3UVIgeOTA0^h74jbx%*YsPT6 zZRu051CfM;h*ZrHDG=cppx0=Oi8}*Hmh>h~&77iyOo>0aw zdFWAIpp`0CwSTA4P|#Pd%KR4?)H`ezzXYp?DdBgba;BAC#aEu^d{B9TX&z^_olK@6tpQ4Bj40udFa zsucCOmuG8{3W_3{tOG7Zr^2vnfKZ+!bp4&33b(>dbvpMn`P6XR*`a7bEP8Q@qBIVJ zyhBG39i46!{Epj&bq=gYk4P42JmB%@6~TdE2Y=mX(5r*8`F5r?lpjJWp+}TsJu-;G zQ3VuT{-gV@_z&P06*IfXF3Ocx_ntK#QJ9Wy1pg~(=|J;?e`s>GB!cbFdfyb3s4gWIp&ZK*FIOHkJR-HQ0-IQI#_C(^$1tr zqkkHLT%EHfgM@W;bsVQxjcaqJe8ZS3K0e)PWKNOC$M6}rzNkgkSMQY>l}g3gS}NS1 zH!6onk4Wwz=1Hvp;>h$KOOpp{RQmS&G+Iid)eKUEvc-K1A=1CMv*TpzC8`_N`ouoY z^H6mtc0-^jB(=Jynman$b=%94bZimcS%1kjmfK6L6*M(KussTu8kc)r-Rbk7TzOEH zt>uA&EAv+LEGx>c&O<~T(I|{fP8Z|_MC1&m8SGP1{im88?&o~+x5$%}$8mJZ$4RXQ zUBIU@?4_K$tc{e(01=T-$#nszTZBl;qA`lDIli?%dqF@Y#pXOyr6e55&if$AmVdLg z;`6a&A{i%WmWY5jl0tN1^vk(0h@=upnkX4XVRLOQa(8=!!fk1?Grl4nRI-*!97n9}9UUS>QmuwLa|fHEX?Teq zCfViO0-wFoBaugZs3_mo`tfE)RDbbSlv-xjh~0H(8VlltIZW7Mz-%{?lZH_b88`IN z%4($+7rYV&vIM)A+q_jStmR7=gHynlMkI)W2CFH#aRiUcE#3ZRr}REM|MV5vG~uMB z2p}9DJZjX;CPY@g_U~KoJsD;0fXgmEj2J!fCxC*+QEK=CqL!XPaxQY^|i7Xd!16 z@rKpaHmWU}>{nrmppu>I9Fbvir69LL5{`3zh2g8N6e7uunZ@}YB7axA*KXxtajgh6 znVnljGzxOsGq#qEw4$!sEU1TEE0_}-UQ*gxap;A_8)p^C24=M@)Eb@bOx#h}%8qDgp1R%kaM;)vVr zaJWU&2WTnmxP2&Wg?~WcveLQyoZa$K+aA|SuC^(Oqxi#pt`Rk%SnY~EY$W>{>1IOJ zIp0Dab&B-JdqczdY3MC9Y*S(W@HvE4k^G^VtRCSik_tayVh@H6O zF$xvE7fRVI}~!9|~Y31y*Nei218 zDuXg81j#n~>@3fKIeMO5r6X{iT%{AM^EBMC(?P{r#1lz3T05Bpa7A`YJYl*-jcXho_{E~D1DgRlWD?*n$s4)<++-Ft3=JeRT2>t>rZljG@7oa zECAUJAuqu!KsPZF=JALW7GMI!JVeTu5F-&^6rFnket2`tUF0XZ)dII6((Lhe4D02d z3uLO^0_?1`dZc6{!L@v(pwB*2lp|M4aOb_U9LYRqkALRe!8yQW;E6NLJ%mWMO5=-( z_@bz6sHoKPrLi7&E!td*xfRYZ0!JRC_rK5(&IK#nAQrPYiekSH6{v^xNxLZ+OPL7| zu#AVuhcDtJ3xZ*(eD-y;MF~OqKlIkg;o(yQ@H2Q~c=)9ZA{-aqXgW1~rs>4+N%#>E zLHTc>xqqqY4S4*eAs!L0SdTzNp#S6N>LiF~YAwmtNs|A-j4;O$8(~}!gEbqXdB~`I ze}t1-zBGg*TBh4zAww3kkqBo{=M7|c_H1~hf7)Up{F-@F=5FxjA~I?iQAb96dIQrH zZ8ofSec8q69iV$zvghnYo3XRS8gKG?WbaGontz&xhGl1V59xLdy>W7gL*yK+=rc{j z14D;~&kV_i&m7{UnU_OE0ZR`b8XkD5W9VGtkVuat`Di?pT4qN?HA{GA$sXLJcTJKu zBWZR`68>3|43Cpxl6=64r9^mE8!-pWWY|o)Xu`BW8iPi;F)|rmOcdt`l?IQ{%pNyj zSAUhNuxb|1RJMc4aD*#1r(L{b=sj+yRlVU1>SB)U!Z1~0#0}01|!gG10$}kFF`wdGy%Ft9De{z zOK59laLH@cm>Y*g962`(;b92YV5SfM?xuY<5u&pfW^d4t}yCrNL#0Pl!SD4sRFVV0Wjtn;qE1?8} z!s&$aRMU_Q(gN99sGfC)pg8Mjk_|OB$xey%2n3?(+}@@`L!C`cC&kvU;C~@Psb%vr zNJGpzC#iAVXbD@w|kHkfqe1R*&?<8`iR*Gw$6lc+#457YQ;hcTP3VtD7D_>&!1!cF6McOd3#~OA6dxOsZfTy!9&G8#DC0j@u?C5FcBUR zM=X0fq3D9>%3Q4!6tacDr40a&sTIXzdVUjG0_+cKKZVF8@u3Z+qJ`X6n4aFJ%7bLM>p>{@Xrtvx5bsem=o`aXEr0KQmDm(cngySEKiR(d_|Mn z%`JUy%AL(ZyFTcgqa{j|u)(j~zHN?c<-HY^=appNoJu;p-nWKxXaaZ6aSgQdAQ|En zyWjg>F^YI8;T2&%(ijVbxJ02f#cziKa0_PK4)FQ>UAj!Ai+`e7SfG-jKuYukjt?m> zFW+BYUQR4C%L#7grv!s?7;YaHxX+b>d5Kse#+Tw3vCOwn+Q#=s3g7Hw%S2YFD=zPh zH=QORInbJW79~zs{OLkm{*~NgUPn0HbA#0FwS*2?9?$UbTN&44^Ijdv7GVYl2X%vd z(xvA?QKqmngnyQ3F;sBu)NocpZe@F|ZBJYFj#ZM)v$gVAgSJwukJ9gALR&1wwN`nQ zOiygu#d*YX1zFUfC37rMs@njG(xbJFs1NC=$t-IvTc(x{nTWA+DD>(46=vV9nO&)M}Lql-upw9Z}^M78BxWu#ZY~; zR;<4wwrJa=!mmFP3DZYx;r$Zcxq5_Q3VM4eZVeY^l@Ay{N|t&5nY*_i724bRrIYzo zVROJ18tvygw2u%c9}`DrBNady+Yru_fv*T_mT5a=FGA&PHy;-+q+G z3eh)2rGN6YT0W_m`;Uzyo9|yMl$BRGJ;3Dt-!ukEQJ)<--CpGFWU&+X#lV*7=_E%J zM> zQ5)AnuPOg7N=FzHk4I=pV`~9$FOk#b2wPf*Cx5H!`-&~u?M)Ff6@e7p@Q>I+>K4V4 zE$nv-y0PH#aYTx~OsN9$=Q$*~y~E^2hdJMy6&+xUR&%+&esXI&AKT)3Di74$qapS& zAtG5%{(N`8*@`U?Hf;9avpMd9Sm3cmsFXIX*2-0QQY?Si%H7bh;$KY`pqmIhEBt+XBg$(-)vbGR&eHIag#Z{PK2W1u_dpBV=FMxd!eX! zx);6|y_en&p*=niuEO`iaG5z8;3Ssb*>`mERXu3x zPV(7zaNXiO38h$%R*YJZM2u1JvSbuD)kUjpFIZ~(rNff050z<(zLz^v+P ztZ9xcA+c4Zd-PZlL3MUV3r5K`HreK8lcV^kBPBl{xPt?hy>kil;3U}|?*K{Kd@0oq zx=90QU;%1ZjW}Y~&MCm1fxlEH8`)$bB90nD1PxbS?I=t|5n)5oElN30@mi4+`%p&o zci0t#oR$-qYt#Wo8bMM{sDE5N%Unmk=UqL$a_UY8EdzIUErU@df%-HXZ`Y_g*{|Bk zk;d6ZPZvovglgDBd$4~wSyR7&*rnlGR8p#F2-}R14^erdlJ7 z;Gt_2x*fp*V2iaaw?>ob_&BZrBW5kIGk27;9l)$hdg3F_o`307bKty-J@0FcK5S%1 z9pgA_#z&A-uXoj=tp&~B+dv9EB7WU4$tEYNMdW5I@3o)wb|eRp1VRL~fam)(s$dhc zhZRBHpBXr*^?a{J6@wfx^wHR?mhB=&2L56I+`%8p$u3jZ66m?trtCXD-Ia(dz^F;z zq3JV?xIs@;?|-aCJp#?&D3C&rzzPnskRwjeEoXvC zFd2p%xetO!Y8n2b-7u?tr~u=NSuI3l6cWMILjlAQFvoU%`IQ2gP2)I-#}Fd-ppOX{ zG~&J2TT4wJeAODi>$A14u8SiTrB<662NAgh z+LveX6s9qDjcY;BFfZ4b6|-_pYaouPdf6T}sRm}(TAvDI^dYE)g6%QhMfQ7_Y6uak zYRZ<=Bl#_SAI*p>{oP~S>cZ(FfLx6g9jdjW^_v=TRcTNqyUMjAhTbK zmf;9n(0>~GvZ&Ipzk3)iZgQXNH*X4y@*K(2BZbLqsP3wh`d(M1s6|at1TdRLq`0jl z((KLMM-IPxxck7_caFXD!qEd~Uu!t}{MjsAvB&JQFM0jGS^@;HQgEbDYTd|+tiv}i z9DeQC%?n2$BFApNcC7Wn%>&m(Qme!(Y}@6t1AlK#IfyX1H-`vQtTVeC_y=(-)q-kmbmOoL9>;bCF?QSQgGzee(B8*;=QM zK!2I_+L2?|FPw(f)`jm~ICiY{*K;_MtSq%y)C)TkTV|^Hz$wPfOpLkDcdEp%b905_ zu_cD7_RQPy;V&^7UD~8(&-3!ic!*GVy&&3+CRyGuQTJxVwcQr|ImkW79E2~GIW{SZ3aeot$ z#sbfs&Fe#q<)l+G-Q!y__n8Wt*OOmVdPG>aZ;O&U(*}$QOO-1B`*rD&B3)5tJJMjY zJ!e0v+!kd?N<^rwNxqD~6kCxfZ5)wY4U!{xij1CA(A`rqrwf&={7Y^PM*>@fN#?eR zN{qcnwt^hh8x z%4$HmRiO4bQm8@bYJd0(aZhO(riq96j!G@Hu4RV64R8Zlz_r_f#t`vw%YR<9MuS(O zBe=uiFmdCHpgcFsaiAmlgBklO>yf*Ya528}?gjJA%F0Lj3*^m}yXFhCTc}5JBfy_l zi^xKxSnrAK7TQ|3gsMbC7gNecMaxOb(5S>@K0||2-sE#Nxf+y;Mu)Lcxqpi4aP3kk z+ffNcXQP8`qNr!4DDq_b>wnx5eth>Ua^!9Y{2X36U>WZ|zS2fs9iP41aCN-(!!5M6 za?*5yKTD;KmrN@URWpL4=>(}}ygRk!t1()Q21=hRCZE{jQI#-8hFGWr7 z@f~nXQzoZtuaAWA9M$EIGM%-4OLF9I5F+g>2jFJwhi#B2E1w+xVt*_22vfW}(qbGr zTI`1xV?{cBO>UR1bsT~~jSM(jWCIS$+0vnOP^u}9T&6HkaJSJ-p7K108+Nvph#c+FU#m!~?r|;hKZf2oX|qh$04Bd|d_^l^LWxjZ0-c@`)g|AOvk6 zwq5=B3xvhpcW~)BvQ|`TA&$JG?_AM81Nqtf!s5!^=g+o$+<*B_$K3-LVa7iAd3V=` zY0b`Vlh5u*!x0el0I7frOh<7`B#PuZ8F!c%m_|oi+)!#wKj)*KgJ4d`U``{2^`6W~ ztXpXXU)k3BM8+Y~05_lixDEaFyYQC0b9`&{NLE85j=V#{yJ6)W<42?VhPy2*cN^|D ztc;#sY5N>z?0;Kn{&=MoQe`nxbRdR8TPxK{+D8Qv4I?kmFQeKpCe|}wzu2YrY2;dZFyWlC_xW#)OB9LtXq$y8M2uqKEbtHRoGhF z=8rJNyFBJNVqJOX?8CO117Ezm?~CS{J9|Dpvhpsua`fYdkKf(5*nIWl1B;y0N*3)J zKHt_lkAE7LjT*WGpgYjE+&tlFrez=*tG2aimLq|ziL~)_OSE;XDqCy&eD>lUzax&^ z`Ecd#X#drfJFi_Gz4|x(7xa*{A6;D;{o>vJnbDiAqp!_mJj6nwRbXq4>foZs)x8AH z``VUYv{-Dj=sl8g!@Becx}UT4y55LNpm1MM?SC~H`xW(7&2yxDYW+gwvlmLO0*)jb z?96xv+h#~MFLy8XIFkh13>mT!JyKL;1-Q?((l`W)=Lc#o_FKG@{mMT39l;-o6dmw@ zEwr`bWS?OTnC+Sg(7l`_h-olCLbA#J=(-TO#f=l4Mdn(zT6y}Q`<`E9>;0O@%TF87 zTz}~dwzU-3O6Clxn+g4{FRoQM>Q_0RJ$~TJjat8)ft7pD2PhF7w&mPFuxludN zi{lij9XO7w)wouv)*1$It=gc(DVJKQHc)CD*BWqzQripHKD?LIsntri7N#6reKn>> zaz+79*Kar!4&4q1;3TZXQz*T~B=g6pPPbJlh-<~o#t8BaH|4WaX|cq~9>US9c7H3i z_@!R8QHc`{HK}xHv>o^*Sm8z;0UdfJrB;(TPCy)dZc6Pl;#y0iTdnpCRP_uom047s z(qGw}9?5Hml4RF6j%Ut@MndEIR6a)@$~^X#*vJ)-iX=cP5dkb*L)RVkArxWP~N~zUpH247Ik3p-W5Jw%cK(K>ocF%Q^^2B=dNWSq) za)YOf4XjMUr^|RQ*KTLxTG1M_waSkJDh!kOh!u^Rnk+I>Scxb~`kKs`oPPx819!CG zVdq2W98QwmhMo{~8qbsV^Hb}vwQ_E5u1b-XUVkfsJ3vu-H)ousP?WRS z@?*bdWhCD1I`4egv1&$rWZu!nyR~{<`~YRvCPtU5MC6v+;f8Z&`heRPJ9r=C^YS`zE=M|=#kh-zRl!Bxj1 z|3RxT-*m`Fr7LnVU2Ku@@f2|^2{{SzL56Vza((v2@S=bPHw!qKXAcTB z>Bwt~@hB%Jga)_}KRkSVy=@H-haRMRW=Md)F7lszhy*N6eVVDv02Dd&2i=yu^ zvH49uA7P`YizN&Tpk5PMqwqvSQ4k_5iP4?qZQ+Q|uYXveJ*`esGciu~M)agINZ^rv zqeXi(>fj^50aRzng;_1rr;6opgmNSzDiuQ#lNy7$w>M0Z3wI_6B8)f^%jF1jJkoWg ze}W)qwKkkE8y1*x9RAeUY{ylCWEQzNEH#NQ+QTH_*Gv)$hM6R=PYFCI|CAt@g`aM^ zO(Wt6dw<@=f;JBsPFYe`TVkFJb|Mh`<0g=@kufk!TYHqY7)YYwL2P6gq%6k7+Jt2? z2I8LJa?)a9!5rx?Kx9c$sWq-rf7-WTCe^4SL^kCa#XF zW=MHh;~_hjcs+94>|hpW@p)~ZW$a&)YZC%cz*1sGvszyr`Mg>XjM=+Fay0g4zhj zj%rl6>Vay(Z+6@UCA-ouDp^oGIx3SR{!GK93CbVuopsY6XsLNt;G@ zh|nCT^hOn_)ZaFH`-xt&R%65&y+#>xaQ?h6AaKN>fm$Oha72(=^Ek7hAs&ot@P9k5 z$t%QSpX<|pO<>WH7yE@AS-=;@r)Nm>W!J3tYWOn4*l^<{N%~0(Nruw4mW41rFq5~9 zvp5NVkRWNoc$JtTELRpj-~4m-dufg=gI1<+=)k!gqs7$wzQ zyP9aV9SdS76n&FB(*mD{z#f52$4j4sskijjD1M6 z2lrXKubjut`&}b$Fse?(pY?H#uWaqh=kamnSFp@S&$&TQpoi=?%yn3L?tg6|M;u5d zW!*7`s{tnC=ZQIzW0OLTpvQ@h+$zkt7)3aeF`PN$dLiSBEM~1=io2AI@hiqhm46ZAo7edQX)imukg51-{;7&PmWCmo^&;S=TjU`MdL&DZ z#R7Z<57EDv@1!oKPaoO5t;LqCwqS0z;d5B!4llP*kN6yk?46h!zHkcg!NUd55Eq2rF47a7S<;l>9}=2Q zeahw0=_7t&7fxSnJ{q6gAyE`xpj=Z$ag(@AE1^Kn^Fv?W^Tfbbx7XTmq%kBlB3t&3 zRl0dx_URN=@P1DA`F~X)zwvdTc%#OxdRLc?ZPaI%nj`F%y@v?RCRF{mGC+u-5w}0R z#g#35h?{){#mAxM+e(hGiH+)!NGuwSR5gcVm5Ri6a&Jy7*8}8oLYTQp*vw`w3lrSR zWODN|%2?*kz*he6<)ZQHXbMzqi=k|bA7&XmttqY?gkt5#|%^%fC(mbwKS*=CczeX)TwyNRlBg5m%E@r-B6 z$5Yn^FQAQ3avLU590nkn;zBmt@cZr~%EQO%U8$a*1p~?z8#<5{bztjYM5Up<1~Z0g z>m3Fysj7`IRMs6BY8h2M$OWX=41e`Wr}&gcDYXrMTH8~M9Xv%Nr5jA*xWLo*!Ze!m z_N+f1kI^w*v?*>duWgN91RUQlL@h;k?@68Z>X@$jdkviSk~N)Z?`549G1c|s2@Wk> z+HiI*xgT`v3+4<%3ywE9i4weMPpy!@Z*?dsiURw;=L`66>sN1DKYiU>&3`;YzKHq# zV(`+iTBgjktSWuum=Jh>p0rvS?r$@-yllP=z^vw-ppu1 z=$b_9(Jn344qaqZR^Y;p3xN2by;t>hmwrmJ&xCxNvs@`&gX|A$!Bj>3Mn1K8DJ)A-@TAX<*doaCS10tA{t{8pFi3I;SBmmOR2thCXrY$Y!)4AA7jO9Bi4&c)A`U-FPj zmpTguB!3IKPak~U=b$eiK7U*;2KU%;P1|}l6gYl3z?Nu{gV&v z+04}OiDB#OCAPegZd)Gt9FWwOdlH1&w*72)=XtA>@z`ErE4a$p&U}n`emV}m55qK? zWiFo$*N$~!Yp&^@SlSZ1C!T&nlScPB*~Ha6Qh&k|zktC-cWEx_WdZS>L#ag zlWEDbrk4m-8td9Zk1OjvEpYAX(#WY>J%5zm>cKII;zZGx*wgCXi^gSiY++h&iuId@ zZuGnEO&;CFXl55QVL7v;6tjYFb#EL#bBw}>Mty}54MtnE+xm^@M^2`;*2`P$)V?<% zPgW0R^(#)&q4+^1nz+$)XC8MVGka|r0Yh`RM3bE`Ib@_!)#3P&k`HTX0)ZdXIDb+6 zg}p_g!zNGUz%;+TEJP@UFutdhi#6QD3!@&v?a|UrRZLTMi(vZ;{Tcr62z}+X; z!%HU0%Jg(1`+JQfrs_K`zxTCCo@-wp2REAK=9J=3wUzDuMJir!v9xTCz0|C>ibk@W z)hZi}*hS$!wmrLCVsVU4i!-?R%O4MJE~-lvMyye|-IeUMc4t#KWXco=?0@s_U;p8& zZ-4)r@BZzJ?|%0WIb}!%@3SP|{PTZ({ng+7@%R6jQ;cNTu4Nb(Q7F8wtv#C>MEc0j zC&rWin<=re65^YM1r0}{qm?#KR5+V-?qrhA9c^`I>e1zu=1^PE|1t^^(b2!5ImR%` zH%u~Gk**0lo{8N7Edc4C7Jt+tTi9e!@;&VT41*9l)Fl~fqdVI%e__)uZlo>DIbrtC z*RE&Ip3)KTX~y~{+R`M3Rg@i-)oAY652HJ5&hJP^Gt(9>37y#|OVf(4%K9?C_Fx*c z|C^9Yv^5eEDiaram&7G<6A@)aCPQxnywt0 z7CfKRdJ?C|(dCt9P=9Cb$g@|pG6IrQ0k^j2QNuGYY=LKfi8a3o zlMNUIQ7;fpxwH+^jtghx-8NA!Wk1RVx^jfNqIL!FTSWgI8sM^;2XK{B#Ys2vp6*fJ z2a>A5O{Q6)iX1O>BT|7sMwu#Vj!8g9ZjnW>cd72vV(pt$d4I7hsqWLTJ&;s+ZX)NH z6FfKVi~yaPy?yA#5P?9B*{g7h*wE?~Ix~Cw;4a`f3FXL*sIQ`Sq^F&k%RQF2(4oBd zX=bx-9gHXV!0rjsyXA7YArtmwJucHO0 zcP*4VBsaltThkwrM-shb#vICh_bYJIrtf}HXVzS=ABf;`7mB)1Yp(hwp6%r#kmKbl z3aaH4RP!k~o6M$$bm&G`^BY|*q+kgI3mK>sLs038f`12nL(F!xSzXX(20xV#tqTVS zKo_QRj{wkvDHuFA;d878ffM4(#{z&Z+zUOLBGM1-Dn-gM4YK#?lIh(9%XWMXVrTxD z9((V3VwZH}4-@z4+v&u?+Erg6cHx!TtE40(b|k7TybIs_`rp6)@;|@+;)_52&p(mB zzx~5+zkm7iSO52~zx?_)zy0>VzicSLd?}SYI;)}jt3;xe$2pnv_=wU()};0aRQ<5gR3)bq zL7aGOUQ4l;&Y*5ojxAUpoks7967J$O3ZpDcmyK{OynW7L#Hqz=(DvR6`*aJXXPqec z;E2zi{U>mu+|lemv9mesJ(hQevn|EWdI89C`_9*ET4BI+JY+(w~gzuU`HX-`IEmAA8Z znBcjb5sJi#(~gu^-lui>7EUTF^%Zx~9)G(SpgZ)R+4l+k^bho)h4y5!HbPe)2AR3& z5<4@lZf3!iMwC_FLG@ zQ@dqh!=3c@TiA^MX3N5c&+dK;n{j?53tN6)f}uHioWsX2dccE=(W4h$%_(Ah*?+VQ zawVF0(86PwaVBgj|Il#SM*gkTBV&=*Z<>7-DPy#5E81Wc-N8hgF(kXMM}ux9V{LVh z>UR3awmQgg=Zrmzea9ZMaz<+h1CqA-%^gtKptNmuoS}>6aC%_L8nTY-RWe{J=pO|vNel8c-wfm=cH?I!11cY+vKVU0{ujp zF};7PO!$r^eR>qRlOODwzrb{p>6I71pTQm}%jgYuEdOj!&spPPI>(Xbntz&AawZSW zH;RuQHbR^4sm;4|VdaZIC0vxMJ|mmlz)jsz zv%0tDE8CYf+c$x=<&>*CGzLH%91z*<5&CAA_v;~fow2%T$%bvI7DD?}EtHl-Gu1-P zB$YA6N+re0A^i*_Ymplk7k{#m`oGQ?IStAvh-FHb8@N$Au~9KICQOrJN@c~&4crt{ zYE;aOebAtoh%YH-e&9wi`9{UejDuT!gpa6qc%2zNHqpCDncn3FZi+c+RLsmkuSqe> z^e#VeTWMy7S51o9dJjkMmzZHEkdVw1XVn6+Ptk7SC)4M?yG035l&qNC@WJ9i4=v7A2$} zrG(_xB!oEWC?zy$O@Bh-QOZYYPd-P=X|**8Nk_`*X^Rp%PEONFb1jKTe!D=O2oA^E zV|z9;b?i^rUlP+%4+7lN55s_4+jxqaLK?DIo zKhV$j=>M5>&pgkq=XdAaIcLt?Gjq4V40H)JK^z}#&Tow~mWXGgbFj(tZ6QCVY<)+Q zemsySPxGccgszXdu%D+h^Yd-EYAfBbPao8u#PwsQon}R*D}rmAgkR@i+YjM^A#ZD+ zit$%cmy$CzXaxo8iL!6rDKgph57RJ@OV$+q>aF4t%rTQDjA^sp?V|bauw7m%!0JqJ z1FEi)P(zDaw6`YBZ&vkA6UVaF3&;ZXjyF zabfOIhxdp5XH1(+a#Ky5L4E6!8k>Hd&G`0(0z|4qu(FZX&K1EQnFEO7tS>&74_to) z>KD6yuB9E@-oUvIi_{`Rrw?y!{|w`-dO?(dlJz~9KS@WcYTEcN zKlnT6b2ma>z+u|eYjSpK@5z%+zf|*>$iiNalK&9UmfJzM=G3>8)~_N=LEwzoo?Ckm zfSrGPj++spdjFZK9Cc0RO|6s6xi&8>Hf_@{#kx{Dea zpP8(g$|#M=O7;PK_c`OffZgzy{0Mtpt!Dp(h-^m;7?n)>bVZg8@kE^y>xjJOR25JT zRs?a9JPPXwnJYK#w5?9;sPhQrzbzBa$Zk$g?KAx}4qzueM@$|TA?D?2+NqkK7;*C) zh;Cz_@!+oOV^62&MLh@aA5`(h? zXLXxhc7)0&CROo+Ib;LjZ~%ntUMUwRq2UOL`EU(HXTQ?S??3rSj8wf=?k1@N3>GodTGT7W5+dE8GavXj7r8#PiY8S{# zF0Ds-KP|Q6dBw8joM}~S_$WeAQ7LFoWOlMFmh?7aPUJ*mtw|IK3HaFT7>ft>%T7>s~SJ5Iy z|8Xtmo zdBjF%$Sn!5-_~vK_F5sESZ%px4J)g#DuJ6gnMWieLG|tVnh&m3huztm?1ON_PPYZ; zZ88X7N1f{#%j;!D78HDZ85 z?l89p&-RCd^JAlv+A@wDGmKM97|4fZ44n^|{DDY;i-KSlkHSAu0 z#PKYs%7DC-(Et2>MBuUq#<6FdA-b1cf^lG{yL35nD=-56P`h5~Q*pY;c?mqzGs0L5 z`;~Nujm|h>#+-su|ond4J7YF6%Q>t>WdK3|Dw14l~>8^2{?5 z1}^ejm}fv%gWH~$97^``uKHT|)a8V@d-gR|a%E8Od_K=M1bgsK4YRAIGhJVEiH}nX zz9ZDxDlwkofb+cfKo<5&XK0YT_mZ^ol=jWPZhGjVyE-iJRlVBR*lT^!?}Q_kIM-BHI$< zbKfqg-=Kyh@^R_X4(+?Z8)?M{?2*b1^rbKQ3mqHzIhx{kH8DdA{Fh=-hEk}s9+sVX z0_5oQFWs?MLCk%0mn4=n6R)0W z#olyZ19tDNaA97FR$N?Cji);+K0B_oU))LyjUItWHz@|G2DZ#}z8oepuSjePY%(~P zm?}@iYwH~wjG0(VuDe4$UA4SjKowj8J+NyPoX(ZCe*>8`*n3(LmBqMT(&MWoSU!sS zd4JLyPsV;lD>S`N`EYb&63d25nah)0Lgar`z=@7{HOV|unEvq&-0oxO;4;;pBzVkl zY;6hs!aW$FLNiPl+W$ToBR8sSwwP`RlmI9|G!#?qXDv2kMd^M?jafb{;NRBz3nKSp z?#{0X%E~UXc^$9OVCq}#ox|f>7rj~VuZ^am&)FNLEjG`&IhL8uitB`ANo-ap=&fC@ z6|0~2r#W<+b-k;uSFY$-$HujY?Q$qPq%u~V(O8M`TUQ+urHO2h-EB+{Ttz^agFO>7URaXdv z{!)Jwf}=#BW~>K>Z>iq+E?{?3ybKR3Q9*Hw##a;U#7u-xA)x!Xpm2psT^;KuYkaBr ziK&dTy*B|$xjFQ03XP|^)-=Ac?l2ijhp;Eje>kg_l>SvM7ZqPK>^l+!Rl!(Mhg|wH zHi-UOT`F$rY~Dj@c)d%+)9{_tCIX>Nv{C%(z-AE+85!85}kbp|Y|*7$(g3G`Gea zAY`W~jkP5xA!|P)clGEpt?2uVD*PTT{5oFG5II)>F8U~Q;bArl?tonI`&b0pH;NN? zfYyDAlY4)9ZsZHz;Vv7e;|}P6mi=RJp^t^|i~1~)OH#@gvXdoN8_+`Bri1VsOnX-;%@1<|3dR`aHfz5Fw}UV<-q+b<7+ zE$}tMrM5_%K>s=sF(beT02IK_dIr{vA0+jwoRPo}Qu}WP#qRKfg#K8{!hSDf1VB)12n;0h zONxhqG=I&03TOm=3zP&v+W!gE|5JuyHw8d&>NM43)MJmLBS8oN>|$YpAW`l!7K!!# z^pObwoY5uV&w;1LW`84PNgAZ)g8y$6_qSY`{R9BujQ+MdaQtP(;lzcd6arEGf&mo* zL5bTQ{uE(#g+QXe*db?Z{a3%(C%q&TNg=P8v4j7a&7I0g^uQ$8A!AY&ER!%ugy#%% zWDQi%-3|cG=Abl3^1v$yl*hL_t)@GlzfH$4(RJkD&nq0q*v`Hsb#7ZolLG zT|1jadao>Y3|5ll3$;{unltAi&n+maqLS-(TLzPD26! P*3+Ya8~_^m&X)fNlZr7z diff --git a/docs/images/Curve-arch.png b/docs/images/Curve-arch.png index a1e04569f25c347f28605f60a18c910fc7dfdff0..c7bf79fc4b8f49e5c2fc29a735b9c4c34c0d4ca6 100644 GIT binary patch literal 283471 zcmZ6SV{|3qwzXrYlXPr#$F@4QZQDDxZQDk7&@uOp&5mu`#+UQmaev(Nv#LhDW2{kc z&AHaBry>>QC6E#D5Wv8|kfkI=mBGNEM8UwIo#7zBz`%Kr3`D@7RIsE(g;YKC&vIdP zXJv81841r~NeajixCC|SgrIupgDP4nFJiae*1V&=5iful7lFgy9RLHNQ4sa+R@s*` zDar+Q+Ej|nQz%no`@qJDZ2t>}HX#uujD+DIY-Es0Ji&LdXTmSyZlu2C9SNXBJ;}{s zh7y1&HubCdVq3W5WKgxFZ8ocfVWW%K3 zon>XYFMT^Gx@ul%w$s5G5+TuNFU0zKzJg1$iNAI z1A;2r!oE+K;TYpPc3=M>`(sX0y!%+{Uyj~{)>OQ~RR&GGnLCDCQ$@EqCm_=lq zPz;;z?uI`ZQD8b*XVwiOLdvuBx>p#3@cccG$0%YnEKF_>`j9JU4@c=X4vF5Tq0@HY z5%uk}*c)#v7yZeJ?u|d%%w4R@$kfy&3oTfD3!5!!qo=TtP$1kZ!o~R6B23{ddkz1- z0!D*l6lCl*MMH_|HVi`o&%?w>1Q+KR2i_=zy|1*-qlhODJ!5<;=?FQX$8us?LsY$# zz!I`7%1$?Wt_~p1y8^cw+VZhvn<6aqjgf##DOB2X7M={9e?~v})g+*yf({5nvDs-1 zMy}`eL9kQFVpGo(gOZ=%w9rKwb0(1G{3Q91TtPt=rwDzr#L0z3p&~z>oU{a~vQVXk z{-EMMexzhEC9T*kz4jaX7-u+UPo51;IX5;5FxPGK|*f22_$zt|A(Pdf$l+{{gA^Tg!jV33vjXKDE@T_DWc z*;<=(cK~=;SlDctoKCZCW?bAix;p!fPHvYYMH`#b*49>pZyk7UfPg=USUO?3XN;SE z95ySGTZxP?`^Xoy5ahEKr{2x1tbi5~Ty9(5G^$*c;7;{lRyO^4Y*COU;OGbnc5 zm-07d@0i*i1yuMvFl7;~Grw}_J+4=;iM=(m5!izB0^*q2+25b8O`M!q2-Tt3*yiTu zVq#)Ie(wm#SsZrl7E?rdK(mg8br{*}JeH#5)KGiKYWGM3%1JGHiE_QJzb(Cq`+KdM z$UI@`a9z%R;P{1LKcPntOOg)4Tq--tAAHEK&d9HZ;Fn!b7wBnHS2{g!%_p)jF)`&f z@XqEy%ZbxZ^cH01ZAa?O2Av+vNM9;|A_55s$ycY4&);f7vI+9tt@Xj@Sm^S;Cjm4l zv`k*1z!muun*|>Krcr(4VYSyLuSG9C$XmI0 zaJ5g5z;XQ5+~E@QNYD4#NnM9T{cNO)G{aWOWlq<)ytJD$y+`*6HwuWfUa+7;lnk1wb8T-Dk8vW=rS8S`Fq!}M^C1VmAz`s)Tn&zw+)FlJusOlnWGzHAeg#R zT6kf;M)*PA2T={-vwLW*lg|x%d=ZD?f_-^AS|!#IWh3(MII_3ke7Xmt_=q|V zZH{-s=h+>Go?A2K@)WCrsx>A(E!cDTGr1xdg& z@J1%o0}4zGHBWFNeD3nVuevI{*J@^}=bYBhO~HvF~vKAHkEnwnUF zNPxcIYuCs`2ym4QyB##-(;n`kLLzqaqzB&#B$TcDIp-}hlkc-&uNU0bcTFy?zMCvO zWMF1wCM1CE<__jY+z@A`ByEm^<@u=v!cxC^j9Be8KRw}0V(}YB%fN$;&`qZxoSlge zt!0~^Hg3;}su{ZLZ#g;yS*#L!h2|_>5FU3GAHP(*1sSIyq-s#v%NtHM6e)@Br4!)Z zBfB6WJrhFK>3H!PQF9V`+ae)l<5sARYKuQ6>g*IxK`0sm*2#H3z~AaP50-=L;feh8a2MmO1wdB(o?O*}4jR^66 zf!^JJ*Ge8Gj|)evjNDpa6nK)dbR0p3@6}v5es8IWf^8C|-V|BFuV)Zt;o-0W{#X3R z-@`9iZ07=IAGSAUcgWj-(hUeo8^5mlh^1YLDx`)uX(_m*AGONOoYkH*xRna(L?bNg zvQH2_!Y1ml9nwMwB}D4ZCb>H`U;{af$p|+CtOcA?vF;BB@Dhpuk|pxMn~B^ZsX-WP zrZanL;7sjBg$5{)A@H&BfAmFVisb*cujyX<*;3g6dLlc>zCJ{+@5>+1bF`a+>D@do z?+!>6Uz>MH6{E9y<1B>JGZlK#r#G8Z2r=ZFDARsP^Z;T8K zqac5d$}U@RQH&V+WbJ}-(E#JYPRCq5T=F@DC|(4pJ0XVicXk785*NcrCaF!XJzRNNVc?jrv+RxtK7#GIJ6E-SZYTPDy#X#+^@qX5N4%!6F zSPlF8T0f|hnr)iyGt-o8{R(|u&4e2Gq26fz>C-rPKe08WaUh4^>e`i#+Y&x-N#OP^ zxq21{=?ci@?9pMrpljCZ+Kd$G#dj$?vw&8#0TGhsH$CY{!PNNgyY+}GQi&>C0?~mP zFfW)>3v8Qf(x-W?+SuZ|5vT0wNI5NjwD>4!yZp0YUnpHeVGYaM)}Auz*aaLbMZ9B| zdL0JxxysvNY+~|9Nz!M#cFPiO>OJqr!k=m9_J`deu8b4;F4`@{(uisF%P90h<(k6K68wu=ziXi%mn2}R@eJunb zq{tBtj{dK0+UXC~BXk@uF%yPs8rPUYtkl_HB;Eb-*Zp-ZNEafA1ElS<9MUT;KG8LL z4VeBNjhdr&!BE7v9v^#J(mJEWbLjTYU6z`v3 zB7GojyeXD^KV5eyaaiof(@rnUjFA8=k~`&(GCY|z(eJ~6DoF`Z-8Q}wGxCIM$eb7o zXvWJwpy7Wd`+<*F>dRA;Y-|U`V#Q7xRAMGbyr-i6Vm~TSe_hNsU-n@0AmC9hvFjoivfR=bnAo~goRjf4#l3f64wK3hX_S} zWF6o*ybP!vOjmaIp*^S0{-n1Bs{{P_@#L5m56{VOc6l4jx>DK@$N_!V03gJBS2s-K ztwiZKU7?{y8S$Ny0lGVF#IrLgsg&E8Yjd66ga3Y>6UAbpJ`ZnOT-@usHGqf@b|!z5 zk|*uJq$3TZ13LTyZ@=#4Wb$}AP8J4cTTzGa{5Q*&lXJJmtnmL4s|;eU55VQ4RiQi6=YQ5uJy#!K_YNv zP9%u?$*y>GRenV6g0vP|VdxI?Qz|Ry*F>lvD#}L#V;1hDGSLtX+E*Vl+(5ROw=~7H zP`MPEw~K>{r|0N{9uK-H0ASnpt1@CH8QK{AEdK8-o)N5$9t58LTbJ4LF}96(bG5BXU=6}tjX)jy z$P*zX7jfsJmVNE5paa&QR0lw-wVBQwe0XK9i#8uC}#M2FlFL6jfLIK`VmC z2ykig{I?CfU(oYuGIhX5JDBH|;iwLs%oK7^u-WMJRM?b}35$L)Pb#W}2bF91G?LTf zol0)wYJ}Eu4wj0^`D`COa(~8tZ;yc|xkK)z*njxJl88iwM`=^Y_-YzE{j`Ml(kw4J z28sR}R#H=ezv=UyYf?g)3#0w3tl4*W$9MZ?lqkJ#bApiKL|q2LdXqEOwXXil%{CKll`gZ$IjN7@Ak(eDTI3>sM2IS3n6#zul_RyqpiLqpUKnc ziM$UL`G4#yOl^u;#Otohfmd>iffN7i*;evw;&?axoVQt}Al4;unDeJ*Izv0RAmWTP zh@S?k#$zDqeOFa+^jbA|RpqWO?`Iq5tlFAiy`auy-R#?|EdalBL%S-H;7?@KsX7_D z(vuhy4etRZK3h?MCuLW7zmYSRoO}Hy8WBBJ%iY)VE;=HZ+^h33OM2}t$Q-QE*|PtL zSX&8;s!+;0Aq3-UE+(OFs5&Cj?TUV`_k#?KT`IbRrJ*@>hDcw9C3MtJwfg#=+=wGIT9&7-f zE7FfWrTzlaY;glkxZR(RX)}5Yt;&MOFOh{bPo0$3mLQ!z2v&<}#@7J+{aJ>4XEca`i^;3Upg^7{h?m^T> z^~cJ2(RJ#ce`T&jz!gz^Si2|lhW7ybwU5XGtBjkExIgwoH5zWB*i}!;mVOI>GC{13 zwZ4#Kv?EKN+$&27GgIx_dXu!%-W#d-I+48PfiHX6z8mi)A0RNEuwhYS^Ka}VEZbxq zphB_~pOjelW)qNZi^E;J4)Wsk%F3C}#(+E~257Bn-QEaInqDZA#BmX0@mXR6JDLlR zKdnzI_J%~^-pbyMcTzK?C|U#+%8Knym}h{1-ZNBZg<9!*)+D262mGM3bE=veG!jAu zq3I$gT}rM7p2X2jn^;xBsq^;Y0E3y^4r+WdlODK{qj(tU#d;{{*B)$-m-;~yE#Z$K z2Z~+yL`dxD(ZMkz1$IqYp*9=*<$L6i#mM{>>y1M9+fg?~Sz&%4-6~t=KFA#lRSMMt zY!aB`1j(@nYZT1GfJDQY>t!3bB}AyaecysXmds*0&v`Z#5EPrEELh>NySKVlIvKn1 znPhlPfVQ{Ks+s~HX0(866E6{eMt(uS)^whIcZY-kJu*H%H-}+^#){BuUc4nM(eyae znsc4~;{qq-H>Vt<-KIjJisvHK4lm3c2#`kT-D%%BPxcuSeCriaU+fT$kM$Qi4iQ!G zb0D1p*>cVGA=F`;-tHq&p6axPsUd67Xs~S<*cVY{z0Uc1@DNbM&7@Ic@>_J;OF=Un zYa2%kj3ad`9b<^O;K4UoMWi9;{z9|%CF=WY8O7k}TtD-`p1Zh^jS2ig%Wn}lAC+** zy0Gb4jyUrz+tot?UfLXC*W4m-3@hr@+CcBe zn&8mm*>vL9JEu0k+dNC}kMC@B&9*!lC%Y?B^=26G|51;8gkx|OuNq#*hOOmqr4RgX zuSC92$RT`+T^{etTm0unHjFM|&RxLA$(kIIC3+5eMu|pnl&_wM-#2c;o(T{{nfSaJ z(h8Ci5t6a%v%iT!?3&ruYSt`%IA&4GQ?AMTcJKsq+`{S8T@gTA-WoR|9>@c+!6Ibi z*cIwPrTK5kE}}qdrGJYS|EO#AoSW7sq1?G%JAFCJJG{2d9W9$RrWC9qxxGb_-cZGM zL4Uo+j`gKlZ>z(lAsjf8vJv}AhqBpZx_9PUF6J+aiTYS|L_$Iy%sGN`b9@7(w6M?Z zY|YK+cR%N$ubVpyQJrRj^B9mLv%hzx6{hD-sV0vdh6c?iKXb(J=(NUm7B$uZ!*EHE zHv)5lLE|#D%}YFJ8!;>=H5kk7{9p$YfcvyH4){Hxz)`V<10__J``QYqfCot}X6xSQ z)Z>C2`gxdZAo==g^vUXftH~Y?xpA+slRU3P9gdf3@w=3-gWb;E#qxA4%B{H7wDl`g z(R;^-`FrS=GP=>=J3Sh*Cg(g&C0ayIEr;jww8wUP;y)d}+@SdC?prh(KOgjNq%QgA zL^m3>oX_GLcrQ7A5(3aOSo#icHh@iw$f-j^^ts(wRb%%ww)K_k9z{80{1wytb%|sil*eA$(ww`XCnPlXzPaq$ zeNclc>KNzF?R`ab(C_xVLp2=cn|8=iwe{&3mp1uOQ@_f%X{c^h{X4V!CIyhK?9wsy zXlX46L`Ok^1sEqoK|UVVT^K5JtRU-S{O#lMwft=mQA0f62LFB6IqTu`88xRm3=d4$ zG&3fO27=3YdB3O$dR4%A)A9a9C>G*ndWc@Dudu(~(7sZqUckN8=je8)uFdXW&QI7E zJO!ZmT1ufQETpSD&hw>PbTN5I7A_|XUSag!A$IcuDo-t%3li1&fgt-1h_RdlXY$2a zT{CS|a)OgE`*hHEg5!q9?qxesc}URmcPuQ?V4C71w<3o(AuLaqv;*j~X4(qo)UW5) z>)&b+YId_{GOKU0vjN!4%_YQ~6LNPc2bsC*g&C*J1JStuR z=^i7DH|0LN&Wipw#x}F7%=m;|@P^-nwOtA6wBbbmB&ISBQkC%inMfdsGZ9>Hs9T+- zuqlOa$pOBkMRHoMoS@hX=l1d;q8 z`NT572N}T%2L(%|x9IahUu_ZAOaJWPM{LYFeJHht4W1IHj(yhujiRy*gkgS@Ozme{ z-|u~{HH5l0OE>XXO8{4v!xk*6`HfalrI#r312Yah1k{sQth8sze)FwSeA$7bw^voZ zot(`Yz0T)0-Rb9BM`$H81~6wFi6^J!(@leR9vYU6LxJAggC^$XEIZof1tjgo$Qu{< zpSj@JvaUpT2-Fik03~P3jIwM#fb@{RAX@5;!%K$TaF^Y!S%dP!ejOn-!(kvO8WZgH8KHgO$^+Fw^97w- z2JN;i77VpzD%)@~MBsg4vO?RrFCw68lZc-V#>|O#_Kt`h{R7-~C)H_c_KG`Jvof6a zvi^Xr+wxq!a9mCB0C#;>_$@HrwF>_&PuvtbkF}?dD2nwm`aQ116E_GLgcqaF^s*HLLvBxe-LW0DDo?^~+qU1oipDQJWN^>q{zCC^gctU)1_ zB8$<#Jr{i&=uz(4t^aSxG#%*mc!lG0tLsNRB}Elgodvu1D_r+(5Baj2@b+wsSNmBE z)(rf!8-tlQO8NlbZ%dCjPZJqxPj~&b+uREqQdC6xP-vV7FeS9BdBlYVLzH{wHSwo` zsG+yDReLSVbl#VB%`4G=ZSdEvdrV#V>Jd3K)zZO52StB8m4TR_#U6?l+V4_zauhYp zeJdgvsHtnsmK?ncgsyDM?BA~k(r@B8lWQ!nVIHY92yx&`1&I3*>mrd=A7VdlW80IA z&BLOitl<+$kF58EOd*t65nb3%4H-O_xA!JEi@o7}bx1P-Bt+)}K)vr|dP9;eyW9eA zxzmWawO`ra)K0L~s)Z}&SEdfVd8h3nm-AEl)F(vlmmtd~Mp_Er(t6%70%aRj*QB`5 zHj=Zw@5RSz?kyj%MZdSN@PS^7&4GI7y@2Q;9U$@8!1|B98JWwq-TzQs2!$97VPOJm zU$$B}BFQIPs=Vf$U|errUZ+sb>mM!7u`OrG*zb4z4m0rI%rWunyPp>O-Ayfim>69r zEB}7J!S|qI`2Dy=)|s~Sms^a^Gzz9IUu3T0oMwJM)AsUolNmSTTv!a|9ti{Wuow;a-Y?*9}8wMMh>VBC+;=;va4h!9LP{vJKJ-#{)fcoZr=5!Dx zUR|vt2<)9q9(QW<;8BCFrNw<+Q16!Cu!mtmw zb|ZSX1u3R~AsU%)3pzk!pm>fj=~-N5RZt{N7l|97M*sE}m1 zL*aL(XaA)nPe#h=05x7#4q0-bKxOjEY@mScbO6L4U8jfX>{caYPTUz0(C}=Q-rce`M@_+@3xxj3T{>5}mXe$_tK`i%se1UjK07&4c_zPxSkMGLqFaks;% z9v$@mcSxqrMxxpyg}v6G`g?mqq#xhG0|EUxUBa><6E{NJF$=m$-A|WC#Zo4L?M2^Z z8e(%-msYQ15@qCb^eLHC#Qe-m-A}0;2(*^BZ~{f_u7IHf18MZ^A?N#)FPPPBfffAT zUfN4jZ@Hv*bK5W>l2zGR0Ruar(X}lz#J;nldJEz5#aOhQ^6$L_ke)Bs?yxF_-i}Z_ zFi9y%I;3KaCM9rnfH#6YUve=y6&hYTO?T6kqDWvivR}SSB7FDar4|KDSLWQP$^4RC zHj3wU+YFpK+kDCHL4Wxvc)}^yp?#mY7b&n03`c{SS6rrCjx(J9PT;LQQO{uI(FVOX z2f{@s@UWrHuQ=Y_r}Ds=_VPP&9l(Q%L{jN^H_IEz7C{*SqCw>fJ2BvyK)rhFQ!6K( zM;hbD`-1Ag#VaLe36XSVY1=yoV-FtsG;_dhpKO+;VX-6>jxrP%OwbVp|9;$(SA8-5 zq5o}Bcu;tHr~hBnfKZ!$9JbHrMFleS9@N(`$}BR_p(R6(JfCf275T+`k=ta>wTMSY z7N>z>HTS`NI^C@~9)$b4caE%w0%G*@h4o^IQ@fQ(>}X^YWF4TtKhqyH&<-ZF7m6H_ z%UeZGgvM;#oaqM{=$$Bidljjb6m3NT0kJuE^k(HwMCsbX_M`qgcBKL(IjR`9GZ}i_ z(j+cqR`L|;zj(z8z)h63^%hvIH9X-g;NSdoETpK`qJjBkXV=)OR!E@Xrpx)Xs$u)w zvjrg}+BuG(hXQ$}$o~>3Go1B^|MjtPTG7!9B597N#L^UFhM5K77za1hB_>9gM_ing zki0k{E>WrE4CgB@*1 zVBOiC8fP+C)ylMV!iXM~jo%Z1{-*qgC7D;_bWNR>73H7)@nS78k~?M2eca|y-h*Pu zPkBpAs%@=5NPa}{tm<6rVq#(l2<+@sTarg(k?(X7RZ!%FL#V>bn`SCQj!eeR){Q`+ z4nl_82q!~6=X&dH|F_a;XIXoDO0iP;5ln1E9H!~n&tXz}disz4BmirL))4jiO#n_v zc=+SlGPwp;{o`QfKDOsrP$x!91ivi#vAc~+)3T>I{rWoop^V=`S{TneTA*}Zn z(_I+bz3%pVd~AB%g5p`QsZ#fg)2W_p!3D>G0ZNv1l;@{y5ho|7+7002#fDxl6+9wh z_Bm_#r_TM!JYswS6%8Zf=gIsZsbmU5Zl{vT{PTr86ENxZ4J)kV+hD{p<~@{uYH<|= z?mU^gcfk-;swrs#m73dsb+1h?rUUw+oeed|&BvT;%pma|{|MJ2;qtuoJ~vEDd5rE+ ztJLamKWKma8m|~w-+~rbKwQkm!ouSBy0d&;up3gykBW;@SJANf;u@YWq3qY&=Zj&R zP1%EchDSyUXOCe~h}MAKoE0|KBh1h$eW~AEfC(6gV9CGeVal!u0#vwSl}PKpG=>*X znlTD@Axab1@Dou77SbK;4uMfWb7>OTgz@(yJ}N2-ol-8ypULlqz`gk|f>}&W4TB&0 zE~Axo%^X_3wo?p*FlPBuZ2$nvy$!mq>^nX5V^oIA(RAQ&3Codx54Fnc347`O6dsfD zqnfRKQ2;ejF$#X!)(J5lD zHkQ&tlxo{=8b9%CdfLC_H4eeN{QZKS8x9w$p*=mEG7`=0()qmuYh@jAuWYwtp!>+ zJx_~_`6 z*_*Eq=ePdj_N&d+-Q;k=yO;&IFHyq@I7=jXJe23%-*R`pbj@t$6iP(q^|F>2|KqUu z+bHp_-j*hzKKCx=+jx7a)m2%GLF}W9vRaLxvT#~&Xjr_Gva%X{ zI(t?Me*TS>EM7vpWC}Tu|EImR^-7ss_8*Pf-iUmI4bi zZ(D^X+I`ic9V&;;<~lYXTAY)0Y?BMT_3?Og3;Grh4!qg)|Fm2B3oZbMB;dC8Rw5-0u%|7MYYm6c@63 zvVs$0gG}UP!U$RGoXS;u;*8bc;S)x|thh>NBA}E$m+&AKdOR*WZxSP? zvheLBl%MkXiz3BNFPZ5(Ah7<(Snn2_AnHU+=A<`2W{0y2EXVS8#;PB;2;Dq=XPt>V z>m8*2S)%%ZI7e3baG%U&fB5~2O7sI=>aCyyFxo97JrG2V&CIeBuBZ%MC~JW-ymE4R zjpjR0CRMO$xtI@O>=dMdM_^`8a#xlt4)2B<+cx-Eo6tAw9y;wXUOD@_G){)?=X1XC zG(Yk-Sxk<6z)v&Y#!>4Fk`gtFN|o2=lS__qjxdMv zh|pS$AECO6D)uTJy~vhUSH#vIF4t{>RBC(9asVhMqx42Ug9fW(-OdN~xhC`GVVO+8 zt7j+gDYq#`jVH7?;&NY%hN5CY;4LyNoB!BpYkLB=56#ZK*@z6zrvRPRSU}UY9!e(P zEm6Rv-`vz6lhtR(^@*cO*#g=d*_cfi%ogIqPc{a_wxU@lN)fm> zPKZE#@Y385AIMP`kwx2{XX5K$qk6mwjT!+(w7IkX#4VS<9N7UVmw!7&q!6um1{XI* z8F_j(MrrW6Y?uw=90@*~L!5Ya5(6O=D+4Dr?s2Sm%2?PYzem!SFzN2)i)6(GIt3cB z)h3i{L5Y6H$bi5#QmRvjw2??fe;yeR_a2)sK!rjAs4$OBSl}M3=1yG-(BHHD{%f;( zZc75k15IfA70NQ4(Vlnaqhv`jmB`_IDfpZewV?uvhr1_5i@mv-*}O_@`@=@@<4Btp zIHWJpBNEZImizSDeqz{d741g?tWs@qG4cKMq%E)IH^w>*?`l@Y9^-P9fNFb-RYnSa z1d1Ae)%vBh3sV~ZSNIB~hE}HMvvFsO{8yME;J7hkW&_vLlDHov@9E@|qr=TDB6!7R zoK=PBwkNb<6Mqes4t_;YX^V->tOjq;nlL!_<=Tiz?RaB&oa5W&Met`7TGPx=P^MTw zFCnHIK+1h<^4vBp)xf@!L7-(u(Lhr`*I`9(pIz`4T)9r;VBQ%VGAwpzcfOVua@l2k zcdBi3CQ7qZhL;^sv80R(HH3}H3LS482$MFUM*HJ8m9c=`7E#rzd4E_v7D`Cb#dc#oKU_wjAv72bD|m6 zmWDXQk#C*d0|xF#hv>}4Mmq++BoCZm6E*WqNA=c4a)P-Y>;tAD69f~uz9|TFk`+Q@ z-5Hp&dS{P7`h+-1H90~5FD3d<%Uh`tCxqH2lKr}dsz zt^~$HM01!K=#zZO{<$}_13$*2+*Y6HHSi|&pT`1;66=t(?@98gA}Z72)-IunC=fRx z&g3L5C5AwS-u7&Bt!Kyk)=Dox)SXNAk*yVusV!8@<~KzGdIN^o3ag z51eT}#7Qygz=x$#!pPcDzuiNTfMk@c6Uk8zt=W*GONhHQ^p^Y@WxDx5ksZkuQj`g~ z?P7e%R6q?Rxnyw7#J!rJHp87XboT8oacfdOcC%kH2-;y}e2^>v`X)o#Hkp3tu( z<#$`2)ekW0f5<$&O&pq>rMBe6Wed!}RNP@_JnpYD9w2vz1N^nw?Ec<@3=3@q@DJDZ zy7xS@0a$c+W@_Aou#dRgNL{In=OgIpmazs+|A=@sX7O9h9vXE{4IIh35oG5j5Ewjy zNwMA6%m_}jQti!+sl)Ei^$;J`^7#j^l+%_nYNF8{7DNsHPSY~wtg48G;NBthi&CfN zrqPUx7ie%5yr1bDbJ#A??OH5D_dL0La<4A8?--Ivs71Zx-6hm5_5W2J2&?MFAOVxb z4v(wawYz-17EZ_nO9|5rF`Dm$fFlf6pp&%wbvQf3{dNoigor@!ufROQip{6pgZXi@y}F8=;uN~BtzU;UGKioQ+RspTpM_>7>6XiU)UWR-cOA-v{ll`m#XRC zTcd3)!I|8kvFeVvozJ3fnTm>~U9Cq5ACVY?Xy!(LxCSs;_cJGiGL#WmpX&#`Q2c5G)ei!WW5ArC0QShebbQp|pLE=)1L;qQ{GT zB>;CifZsnu&>;I0MbP0Q(HQ7LV6xO{HgotGP;qtW3C%L99M{^78d*oZSW9`Zg5XWD zx82mWzPL_XTLXz_Pbawhu3a&p%Ax81)+f(Mox*TZ9IWz7{*zN+_;Q_{B>X;9K6azX z<`Pl?;9_c;7Pov9%lvEs>#FX+Jm2cPMawZWTSwmGO@7QJu9i6XDOvZ>Bk*o#J)nOW zC{Yg1gdBhSsao+b41;8vHWr_ggVUya^@qOk_Q>j4@&#DyD>9ADY||mX;$Qb}3I{oX z6NES{_;Gq9rMO6+_o1H7GulypEnHktihj*Z%bOj*OdU-}W^;{$^IvN0D% z)t{rxCTq5aI2a$wB3eP-45SABhq%Y^HW8}PgFH#GaHx{n4AGp5pW>$C0NCowW8tt& zjo@^tHdffktgEyq^@XlK(20>yO9}#~^W{y+P1A*gEMiN$#7tF@fKyR6MCLk^5}73) zc%t*SvAJdw$^MY9d_s9Oj@#|FuT@{!l4tH4mDtMwDC6tm(pi*Tto07p=~@u8_!sZe zUm1*rB(m!XSyBogRbquVQWe;~EjQ&Zbcn}&QLe@dvcPK43QYl>-#sLs5&`bhxVW^m>IQYfdLLM>Nsih7vZUz2G-M)D; zc3#E*Pf+AS`z#%ln}vouT4pj==a{IPtaagS(K9SB0lEmg$rEAlx@opNh-T4>rrF01 zeIv1L6;t(VlMr<-jyk-t+Ze0tft8&4-u2lOx6Q|{O5^||zE>N|&aN2ynZ#`7eAEN| z9&rOdFeS9XS&tJ>d&i^d;#_a}npJEkQn48txZSKibh{p*G9hr$96?J*mBqKhIw5?Af@^!jD1W_UE%1A*+} zL>+gx|8_|~E$fz`G$e_T+n_vA6)5^E$Sj};!j23*#%l+M-3OM$Hb~22EEDqZ=kD}nEmC+KDi<9TG zep~~#(EL`me~yl)8nXN|KO8jU)X3xKpPHXeUarMqD~r>ot1VK?l{FvRda8h{B2qye zJ%5(KGvhK|89tSDEMo-SHA%77Jh^Q{ymvH{f}K2&H?0#?2jl95-j|qVQuKXOW`H)p znQD!5H0p*GrWxC)(r!*#0^C$Z6-WEMp(q_PR=x^J2{^{@!^r8yLHCf%G)A+vGLkVY z3K2nATxvteZm|4w0g*4~k(Rys^NwEPVRBKpKjRhxjC1vZH2wl<3yFiv+qGJ_*xrB1 z@HpkCfWgm;%nGL43NOh$H%Id+)WLqlT8`yutU=}$H)Yd0C8)%*&J!%-Awlh_E@YV8 zuSu;6MdNLmnGL`eK%i%kNcTpE1;@W7R4mL3}tK_}LiK6Qu&$ckFC0`zw)pX+)k8 z3JKU9pbmlhC-(|~Q3$nam)8tUY!J_qwmyK--VYnzfALbL{DEIlufUuiS;qT*p*DXs#=zr*Rxm6`GG&L9wM|a%1R!T zwU=ERx|1!1R!2adu!$SAX<_@Q$6Dc=vYmn7My0sVm-2Gm(Or@GP!pNf5dXszgWLkE zbz%*-sHg}DAX%CzZEXDAQCCx(^I>Wy@wp^NV-~gZRvF zbehBZ(~@$N_d7kJ6=|_4Vx zc_0d~H%7a+`MBdU_&o7fi~jlLs^jFkNOp%-q>>SLwnu|&a#_u3ZH~1y_+Tf7Iabo0 zSruZBGal5}{}uJm7W-&_r;f=BnvgNQIY^FqVaof+ib`Auy*{RxVVxgL{^$!VE7=#C8MP7*;AI24 z{DZec0m8r?KMdZcq03*}3bJD~2Lpw=hMJ0gb3S|`JH6u8asSc)8QCAoYC7Fa#wd|rvQci7(Cf2rDwn!zd^w|eMx~Oc2&(ljAy=LC)_^6f+>CsxW?Lm z40B~s>o}`akQ5?%z^v!~QWAX|J|u)LED zmJ8uonDW*=iqSZuy7k z{I-U88Ym6MKPoE;r^$R-uu5my;LiIg(&N3&Y?sG-R!y$oX?!!H;W4zW7^#{vKTd!x zX}DQ^+3~urTx9^0ghmBG=LV9#R%vPpgm6sYLe_e5bN( zn9|y}XPQ-N(*K)E`XB>V$|6#>l?(S9CChe_+Z>_d92%4m_vNc9RSF+RivlzwT7f^9 zrNhBast`t1L%;|_YAwoJC0Kiu6A$eBpfEBu7zQjU0#Q^>xDoVB-gh-^TfPUeagcQq zcNs;%BZG_ZvN=(7hdPb~Lx%Ac>~V%J3h|U6SB~6|-5dQZJvEB5qXTE9rolWq4?1`+b$LUeBIgd+9O7OH`ZaR;UL{cuJqIqbC& zrRXjQe5-plb`2y!3p0&VQk0Kdn*9PtimCF4iw3FJ&i(BcLWJ^wgbeEu{UVA>@9=hin?`OG-#0E?iQTJ-JRg>1b25QKyV07V678-YV zcL*-0zr9b@*>&sQQ}=hT>NVG@vF03e%=dkU$!TKLRA86fh$OK)qZmmx6nkBmAc1v@ zoNRyrnNzJdP@Jf#Bx0tVaq(!l#m@5DQ1;_*SWHLJTWdw(v{N7Gl|F0Gj#BP0tp^^H zRHXia=XD9XDuXxiG%2Dv;eLW&A$bO`+WP|TYPVQ^W^K#AK-sx~nc$BY! ze|*=1cX6JAmvOpzS@IvTFr9pqyFRm#2G&udMBHib5smzYI?1=XbDz@P*B0mv8F0L| z*(lwkn?-@5g?*-7(F0@WZ@Zf7nC;0?TZ=w`4r&soS=>X|V-4|ex{$?Ep}V7#i9PU& zCEL8)Wr?QL`M4Z|xA#I{YAk?ae0<1Hab4bWm+K@%j>74l35C%@)OOBlyD{d*Nr6Lh zwSsWQN)f9G-oxg)ikwGP<9HdgrM;I=NM`~15Ep*`{6r2rG4X*_>tj8USfAQdrh}FD zPIhw+1K07_-_=&}+80`xrb-Ec%m5#W=k0r~BuR!l{Xv>hF;X z9AzXbw??vJ1=xPgk4R$tt6X;1=W99R+A!oM@JNE5p`wsg$n>k%!%grLANN1o`MY-U z?|J~@S&0K=Qz?~RaO)P2IvC8u_BAo0=yjhW4fx?h7_fT{hoJ$M7W{iV08T*SJEZPbBcrhfx+@RVn z{z2#H&Je>Bc03IYJ^-xjatso8!(3t?Rwz{&2rs!)-? zzyDp_596G z)poVH3xW*kseC>s>KanGYu7v*)!aUQMIxbFEKUw%zLV6K`K1*bH0rjR$@h6Pagv(C zYyHJSLgn%)Fj|e(MsbfkbZ0hMCJWT#0h3K3WspJ|d_;0%g zzs6Fe24eEq^%x8HIetRmibchX&;YhZ7@Tso-Z7?l5FAm(8r29NFJ7zaaPw5Q-NGuS zp}oNdEYQ?;Xkm3i4Z0#LGKKT>3tRkT-oGhBx@i-F%m^W{?Lae3iLuum^eWOqGC&WX zE-lPn#0np^ES2R@3eEsAV|+P_n# zDzK{FjJFidxA=#J?}wNIkCQAspa5!llcxR~Y|u~ZMv_F#giC&=eo|izW(gQR>Kf?@ znc|9IcKYpqIDN5J`OHnk=#=UOk!Hv;3S$3581nx-&eQNZhC)DyfFA|oAw-zQXONL4 zAgcf<WCE9Iw6<4^sj)8`-*$?&9By^eL=eRaVC7 zg4vc=`kcHpJm-%{Oymd_gFzurNMWC~Y6VyI2wNMNm04t_wDOr6L!l1^&PWxsJR4s1 zpy_EP-1fFMbUazeO|SUz5q0j0w-M7iEd?Ow`^P&U`A%DTY)N5%gVc^fp_6lYpsiK$ z3npt+63Rb_Hyns@lOI0PBG!f5TCJzdp;3jT_G({9DupU@82?|3<7)HUA&e(Uh3OYjL7 zKs->NRc#V=wFs>=aQhqF%>PLW-U7vOG3T*H(xfESrkz5LhbF?x>jZQrh8~ts>hs3r z2ZOr*5EB+#+n`e5F>ejwThd@+pRBqQIzzwWLx1t!ZIA^5fj#~)3WT2j#*5zG-joYQ zcEd9O77vGvx*HoWb{mJ`S5?>A&s6d+cRB1Cc>E~y0LP4;>Av^= zjaBYmJ4^nHBjo$k>N91)%*@>Se71CT0x;iQ4U9BNNn%N#n9RBW`~I9>C$q*$Onsk6 z4B*BCuIg6yTDVTPy># z$PXdo`kF%pjf`6~a6B_ncz!oVI`w01?yvMc8I)<|)M;ed=(2&E`lFe)onWWSHQ@Mi zUv`bEw)@CZfb9`-(<%peIsAZZupKb&6kQ2+dFO22zih;tGsgY6HZ}`EY_yz#iX@|> zlQA?T*_m#2`k7XppWlCWot=^rCN;`*&HYankVB(4LgC(yoxM|m-rHYDS>DBwn)t@kxS(Q?t;oR~gDexi7}$VL*rz-59tq1ZGnV%u-WPg^o72-(PJB z(U$J|elnxRwxoR{oi;Ep6Usn1sOT6t^{U+=$9*Qudcu=-8KZ`riqdK35{Ll(!uI*D0;z!qvew<5w z-FQUDNa1#cO?_bY7pQMjFB!;>6uih05Y{^3P}*L3dI-?Tfl|C6fpDo@BcoDe;YX}> zUYZ40TYA-VnFx7=FQdign_uLEf`bxbs=eOquD5#!Al_p}WcuRgvFc0H7x&t@$b>oi zthM|4%wEw1zKUoECK-IHOKzV@AR$GqoV{UT9b1l9C?fma9e(xD`>m2x0&<@TEy_RM zLnBWn2@c3qLs5r(8^zirU82nsSDdiH)1H@Sj8E4TE+X~gB>Jt#BQ>kJT{|P+lHDnE zonVFpy*I27Ol}Ax)n(Q&V+VNsv4y>Vw8X*BpLvX{PT62c04C*l^7}+5zC^&~U&vSo z2dDuuzRSW6MVG1z2HwG|ldpiWB9{*{*-Zk4aJ}~gHG0VYolL@pMlij+z%fzMdZwT8Q13^ z#zapb12*DgnBL#d+Za4w?VgA}Lr0MhSi?HRO4LrQ+kLl`PwFa}sH9;KJ$Ex!KzJ1& z4OW;HA3=(5R~5$&58$7~W|tRjWY=wYYQa-0oOY`Rzoz((kof#{ybohKW=1AW6g9rIfq_0lVN6o=-%WZuk?gnDOvVj3;?Ol*9<|brM)yUSqUgF$)O; z?Qngq;|rc03krTNRqHEe^LyRwN*Ee$vHl#+79zyq#TNUi^;LkA3sC&DtCkKUZhKoVY)kd%Rd1Sp)O4P(0kumv81u6qR%W@3gGI z3b|`~x8q)YTUl2c93nf=jF{d|4oisO1~1YY6|x=+f2kU&JWKqKl;uCn4~u3|NbW9} z?bqq>oQf`#Y-Q-Casd5IAHmLmsL^u!+S=alMoKI!gE`S6eZ6RiU&1^;F{j(~D%>i&-SunV0 z!nYd3!27_sXIE|Y5=t38%^@v`_u4+3>fj6ihPTL2UzGlzb;&U(d^!(@@{{fB{4sDO zZ5t=TiXMr>j3=LvBN*ArJnOS+^P(xDQd*itu-nJV3VdjxPB9v9Zra8cmcM;YPY1dq z|AUUrF!M#Ieg$2|b%s+V=o%; z+;DDeK#KM*q>AbDmD-g}O19p6;l6{4wxl1=b%%jY_rBf;qN@_eW$3;xP&g|lM=q4U zJB(>Sid;xG-E~fmU?xKwnym?={z5|F@^2iZ#f|~IYuT1kIWIUyqB}43!BK{|-Tk>s zJfyXP(#K522~lT3t*k3wSKqAoJAm^3SbfML7%qQ()dHs(t+#q($ztjhTI_zzp|}pMZzQD!1knt49XI7xndO-97Z(I%{fX@(`IOW$MbJDfDMGRQ&CUb_)i6*okXfK;{OjLLob_pk%U@n8Bu3eeG081W0q>Id zBn#Hb@BN>}gnNIRv>_GWhdpk-_NFcxy($o0b_8slTmV%JLSMfO5@k;->nr+=w!iYV z=x1|CLM}KBU42z>gC9PoIM7Ih&SgQSHE}_P0e@rumOnRd_PBVpm?GC%Hx0z{Jm0!U zIy!k*V|4DntG5&^o(&6_Pk&3<+iEU)!^*XHh$nbS*kDSZwG)q`N6hlLCCMJbK5LwZ zs;|K7x%T8|d-cU67GZxKU^$#-!9}rxk3#i2X3t1qU6=QCg>3OsrD=?xVZXV~Tg8r`7N98|CK< z_>FcC50PSyCz+jTA=IwJIy2rR+uhzNn~6`p9iC@XcnCe@1cKU>ZR>nT3FA5(>gsq~ zxN44@B#zhdP5!vKyn$$Y`TV0o6EJ%-c1fzD<&Z_HI=*^wTd8ymgfoYhd~y><2Iz|U z5y&RW_+te7y=%UDX@_8grAI|53HqR?Ro!O^8WxotnB>N-%`NJ7HDokk_StSVs%TkN zE>8Xav4$Q3AeWo3@Bo!kba@*L1c z+?;3r>x`H4Y2bPO9wB;wE6>QNH6c;CJ=luYaNnM+{HE(@7_~}LDMQM)BDN!xjQ8o#F_oJ98Ve3m4mx(E-Mf}{D zZ;rdC(2O7JI<;rxxdIMMQ|!Q%jvKtm!rG~9KU@=g!iMJXjoCFY8|!gX=SRCh*N;jz zLAoW2f~o2f^bt9F({piQ*1m$h3v~!|^qCq7p_$$AM{cc5ixF=0t6TphX%;Is6&04s z0{+TCbUe{z0tZukPHR#b?8TR9AxQ;-Z|Mufh31soYY|R;;cMCi3HUGW>?N{v(-baZqi30w4@<7XUu<{AO(JZL!U-&faFMJB?{n+H$? zdAVREt0x*sT8~f=lg@2l>fH!;gTtUAzvUkDt={XMr;XGsTe)kz{;d7Byy${@aK^g4 zUvliJ0@%`GUQMa>6YBv1gU!GCV&dH1Ov4vtL*|MfOCeD_pBIu0Cvr{#0o4MISv)DL zCgc5+iwfYWSl-YO9)$wE? z$|5Xn+;3ZEXTK~AT?nkkxQJYkw`@yu!=;dEA+XurQt-T%556Gb;r7clbJ3vJlnxnW zk39!4*tgY35gvaVScyT1puoK*YwI1?!h)xvqC*I4H6n$6G%62FPF-%bc{4|M!YZK{ zf^E2v=_`=CwwYjPnS$13w|633{NT*;rQWeYkACBd-`@D!P8M^WkCk(>X!K+ZUAps| zE!jSDm-Ar5Lqbz)w7h@6mDLxOXN}ie&0nNQ+RUbGem06u)KnVqm*%m*16--8CL%yf zccd4hU>L08KoD;zfi#M>T z)Iyo4?_cLc=idf64>G1vnuxg z+9UmS<(O=}larM9%&jl!gWM#c(BY;B|9_<=|D%vaQ4p;~ z)NKE)`Own=SWIKzjW<`JSHt;-_ZOIo>1^J1cHFe2p;#N|0~7`4Jf+pl)Jb0s*4ND`>KCm(D4w9PBYr@p4aCg&VF^c z;(Fm!(v>D0d8D)8ETYb4SDAb?gL|4NVyKQ3hu@GLjZ%w{fM53a? z=7?M-_V$xG{)Z1homqK>+LQA=f4k3QRzTO7PURptas{ZxsSLeJUV;G4vO4fG4W#DF}j#wst=X2 z>qMoitj9iVTU!oNo5#aU`9m{(Hj&@JEnC4u0~b<^f5^zjx%_tCQ!Gq(cYsx?gkQ3( zgN=RV6dsLXzo`^clhbNi?;Fvxd|Ly@pV}Lfmw=xgtvqdE{KzEy?n6U+73&uY!37vc zSu(4P263dG|B0iD#V<3QDZ3UcfWmJdfbLT;ahWG$6JBa zjCNTmDNh+4Tt;SkQBmXknRkYbkLQL=Uk99*+F5dqhCMjbXB%wCQ8Yv9(XW+Q>V|R? zN5dB4VN+{fj2l!&InF&Decm#nD8x)^JN!on9C1badTT?Su`GB7ZZ5+(bf1ypjV7fa zBC8Q#w@KjII{o0^c;E2o=Wj&8QOJR&#adBsL@IlmOL5!al~&+f*;hNr>VN#p&R-#2^{8L5A_H- z+}vQ$Do#jw;{v%2?FDh04<#cnH(V^&O(Tmz4W9q9g)y=0!UGZn3PTlKs8hqWGB*Eanoj_ z+m*NwG`xJ6kG3J=9s&8XLeW0Xzsj1>u)D;Fx#4mTgyUV$gNt`1C5Eh}`o}AA6Qp^s-b>>oLHb{%2}##)SB z3uA8kIfnQ)IQp0ShyRX11A~g99Af0i?Ek*cV>)2qAnyl#vKm@AkfpcQ0;2MaqN(*8 z9BE6aXmN4LbuOpUa=TiWU^VRbag68Mx%Q8dzi2Gy#*x4$eCW{al3uo4<2+~Q`sR%J zyRaV+=Nzs>sm$9`lb^_eIfvL^Kwrm+tt(o# zGTv>8A&@aaQ;->%UgtrZfxVeFa!8SeeNRgIQFmp`^dv`T?{appRXrbq#V$X+sJ!go ziKl`S?>nx+;J#!)Btri)wI><0_NFG${lN!eC+k3k%q=``D1xM_j2cIDmK#)8i1U^8$z&oSN3@tRpoUx}fW{=9( z`voHImvePqQ7p9$#h}gXwbR24KO<~Zbp8m$%v)z?K=^{l`0d4QR1W=_pvq!e{O8Z} z=PSP4mE6RYRH+L3vLCdg|H6S4e2uyv_g+7`X`aZM z=0u@8^`?D@R%pe+zz`1}uE|7^z9%B~<9$k8<)H`8kaP%3r-Qe$Tdt*M%^HJA{|6#- z)b4t@+1JZ!`-9CJMUv&Z{wW+d)S7p&up41LRIyTUy3e}b5^jl(@5!TX5SY-DGK1el zwKxApWfN|GiJGrM7e0p>RnH9N{jI9t)X%&Z- z;bW=nb!(Ze!9U;i=5(gArD$7+@Z*l=Dw1SVMm>V5kByyPe?nu^C&(O@0v{TW^>b{+ zj+Rbc!i70t3E_gR;B&6=i25az^0^fc1-HBD;+mB{3-`D8&)|XqZ1}`KBc;m8#@*&* zjNz7L+5GEEa1gvP`f5)EEW{=t-tQjc>qYuH{&TJoLL3GICe9D2wq1?+6*i4sQ$?Si z?*fLK5^`l#)XlY`#y?hLwGfZNNTS^f3&-m@yxalK7iy zzOcclDFeX!7$RMSOernIR-Ll}A#!?zr<6YX@5u*1n^3Was+-X@wcU;{2BVdo;aIic z{o1eKAhH_rCO1(&@Va*W?WoQ8Dylj=OY_;XpM{mJmcvGg=l!<8p-Q3lwJW{Z{-A3Y z;|l}yu1>rLeo$_pg)0RZ10+f=#A^w4CB}T}EBO2Z zryuK+$K|l8*I0zCa^RrH2ztX}N!N;N#6ZgWjVvQQnEzo@Y{O^886z!y+Gb8H__?Vy`$+pG-bD5BwY4xq4XKLg}3UaPNCtn7}h$cDbaIL6Vs@ru<|tj+qBHMG&=? zG#~BQISdAolP;O@yg2@gz{*nBYx#CSRt7B%8by1FA?T|$^8l8HXWBhzCYk(#aAb_x z&^)R+Vb;7#C^iR+VYybOeRZeb1(SfI^vpSV<(p7u{#Eg%`3!L7h#%Vh51S@|+3G^> zSx(2}RQ%sUrwYYcd0WO`7kw6I06yaTx zbo+B3Y=vcbgz_xWYr@rV-E>#mnuI4&L%1!0dWuh<%hS6q1dsjVUM2cNsODGCli4eU zxUi976wLlY0?Y1mukeWnw;_`_hQ!~mI3cI{19}k)pC7?E$aeq%`-uIZt$^3+)$aGn z70r>-`$LHuM<%J!Yb?HSB-R>b{Fs{&ibXkQ_0=(cXPh%jey3MJjNF`C6((k1NbZrY#BxKqT;wn%?H(Tm-{~1pfZzb*YB&|AZ+2f4Xoh%PD-$Ke*!>a%SsE|;yso2QM=%hjO zRBMVf&(W2;6OEKA#x*tD7INgI9IC178$oXFr#(-ng^>HCIje`ye5PZ^|wJ;CAh(ORam z{>b!vG7bveT4l%m7YgXfTa~`Lns14bT#9a#<7!-)2ytqv#csA$$NyWsaOgHLLSmIr z(;c5tMp3T_`A_*eAkrJv{ow+UFFbRqokdMdm*Y+Z0Y>l*>5k`Fsa4Wg@O^h*0 z5IX$4JG_uJib!E?)gpqC)kJC;Ag)Q`x8=|vdkPKYUVHa`NL$Gj9M7Uvs=|&3g84sq z<8N$BzncB$NbTW@M2^B~$#D>sVU%fq8@7{;2Rd2342Mg2me92Q0Lpzs$pr^2^(N#_ zYP1mh?Ri%!gpoB*S7l1EPBoMY4l?+d1M;-M4%ki>L?$RM|sr?YFd{BZ|1lLo35|Yt>v_}q7B=TeR ztc)1otv@=Qtu^PkX<09zii0VpUMtit7`J8^EQqo{g2p3d!;y!8BuI7~N1*-0$w@x>zYvP$pjJK`+%-}2?6 z0!tGCaQ!&QcHtvEAMza5YE@;eUbc%o)@RT7jGV*tPqf0k_|}!)VC=A7o`hJk2*H*8 zH95B^NNmRDpgKkU>GIKzXDz}}H*cvIFEO0h$A9ziFmMb|Oy7Dz!`p8dm!h+RkyL>m z0ZphNjvJ-uw*>b2V~4z|9o=s*%MGh30x&NNO0~o7QzzM`%3cc0DBJ}L_tht9&~=$N z>=rP67%@Ihov^6C?qRawdR?{tz1?O^?5|bOi=yibhcT-R=xqoW3S#&^{owj1)=0Q( zDLSKG&%De$CAv&jDq__l+oDC}Z;ku|%jH5HQ39h35h)Ra|5 zth&ktNv>MFi?lu&>t!|VP5R5n*uqP{U5s0$uH2^#J-8i2;sZ^VdCq%8(wsiYjcu~PJa2389~8hYzbscf=~R*W#e-9^BDUl^8%XL zlZ4Abqe^<7jOPKeEV<(u&mE+&A$R<&F{9#a)lvuNS#yb_<=tda87rQG)lo%*fZ#>; zsm-HF$EICH9+%Ix=%9*bz@mud?T3w1iv#hqLZFrxI@he(2M?G0^4(_S_8Wv68Xzpn{`$^5q zETd0G1TxCCoF4O20zf3j`%4bj?7Xa61PtcRW9t>M1ia<5FVg6k3EpU;puLXuSBjB% z7$W!WLED~J4-akc+egrW6*^FXsp~*#%w=RrrUS~fX^=7(gdLW0Ov(s;GxFH**>P5(4au@D!5gbQc*sO?nXF&G$ zmjj5$MxQp7N_Hv?PXZBBVlQt*+k8fYU;`Jma&BI@Oqhn|8hz?d-pDQ){A_x%{#LK^ zbC^1AQ0N#pK-pTh&k%09L!L_0k~4S%T5C2hK3iT`cxX!nK+VVSQw zB~z{v8wT1*+kNFPGM}MlfXO4KhV%6DbLx*Q!LgUtRO{0V)(>H$-g!0~bB5d%Ovp$m zqXD$PO1F-$lIP7us{wwj&{@Xn#jz11@$scCp8TIbx2IX}w*O({JK;`uY)5!{a6Q#k z98TC&iM9Dq2(q6F#(NbOq(?_18xYo{6%`h31J8aqhg%q90le=Z9c%a-zLW^JAcWqWf7dw=^rRU6YIxm5k} zQENYlI#OI~zCxU~WiT23a*vV&4pBUOdZ^raE5OT<)m?vWi;&B2K@k&hBJzbjA9)*- zt>njJI2k4~vQwWUBFT?uv4NLcM_FB30Sq@iYe)RECdQvP1JqmtV25^dq3r&UKnbl+3XJ1v3%OAxa& z(J9T!k}sM+l8QE#dETXdxkC?VWDF^jsN(xvoG#AnERJ7R3-H-fofG^av+ z6{n_W?br2+Xm{2~X5eZR*>l`p?*Gh&MP|ySHm}1Yv-An0x!J=&{1m%136rQp>J^&k zf<5%)P>vNvc(nVVBWK;?=fo}km4%AY;%7o3tDc2YAS*2e_@0I1{4u2cofbQj5Q5K@)eUMK5!e8 zgM&(h@2^raJ>j%ET$}^RRD)-<Oif1lzgL)H zd?goy1bZq$1omS&&TlXD9h~YX=>3v2<}a(au8+B*pqqjAJ4h^~a7#PPTa*N1 zuuEZckHQJgR5a5(N2s|~uStrKza5u}?^~0(i3PbBlm6;SFL;O8*E@yio3@^h(8AB} z@(L=pT4+D$dDhY5sk^0w8P#OcC1Ixa`nq~?K(s0>i&L}Y1;tA}+c)n{r6i%AUA%g8To|pX zRX4>Hl^i8<=`cWNjgAOa4`C;X+avV|f<3lEFRj8Q+R&>ls4YD3RSwA&N+5EfoZpp&sf0x{iUYw7xn7#Wv^WIsL%U{Prc(A_o zzrW+{D3*}Gj1uWGY`X5QpWj;u_Yi*`?*r}&M*xz8+w=2FzV`$DAamMmkG-KI}``kCd`CZT-sWj>sl0m^j zi;)EfO`qHiv{8nix%yYwhW~X;WdPf9yohxc4SDeWnC5csk(h7GZ!JP9E_V70 z1tm6eZ)_N-r;akrN5@Fa^dH(7%z)4sq~P~vTS^{_2Uy=grJ1IiC&yxb4Mc)6lQB9J zwi3KGY;Wa=6>$xyX*Dbl9SYuL2~!nxJGV;EPrG&L?akqK>mjPyfc&B=jWSMY9GyK; zeDB-cZy-we3Nf;ooQO)8p9?B+nyeww$HwjtlaGb-<(ZLhqKCEijK3=fq03f6G0lDe z;wP!K8ulgCY_BHGK1c@#WS||Ef1_@elTivl(@kn#T9Gw6ndMeN*}(KaXr$BIykexo zn-2@`O5z86V$waRC+y~#Vr$PMPiEB3I;hK~qE9M#L93Z!7-xZ`%f+xb?slX!qK5Is zH7M(cc3?mCNcJ;wBQxrnEU~elCpSC4EAGmi!YC*pd~c^DFU>b6twyOb;`ZMI*{Ob1 zkx2#M;1K%*cQ>;~BgLW0tMM`eCAxmgOI!cyT~3P!gIi!>T&NVCXrgfFRP{me^9Xr2 zb?o2SdbMALmqW^`e~2Ga&ilYDkAJ$b}T==NXdrT`Bm6I+-;DAE)UN5 zK5}jeJ~h987~bfb#gM|D|9!(Jm?M1CjSxA$OBfY^eirAieZCMqDr-VHE&nvNGqOVkqIp9f31hY zT0Lz8fuRSYa+@v`Lt4z-n^GaSTfYi<#jglrHK%^)zp5T!S3_RiT)gR=HN;{~zK$y;C2YdEhKIk|@BW`;L({U7(@pE#Ywp^8n?%)29v@ zgpm;X+D?}(CnbgJKB(H>PEsG0?kyCqkXPEUvhtKCimPL*(O!JCvhgvn#3{U)oQv7k z9BQFc;+ffVP%U_)^KeS@e0So5#fZ(GPcEcQizR|f;&CfjJ@6LpQTgC|>;oRg=>am> zS`k6h(Vfr!ptN)EUqZ@0+(1=(D0vT*9>3YP3R8t+gypd^IGW|xfUQSDETYF9{e!1| za?5}Kj{)&`nOSEubh1p1_`u^=$hqA6;^HoDg-A>Ey#(UclPAw;UcGe1t5S7k8ybo8 zU`2Y2Ah40MFlYwD8`^#MEtXm=TG8O6k+i2$R_nn@1lVqwTDUwGl9!2SU~EsO(DKvS zYCsy&n9OciD^U#pBpbCOivLaw=ZuzB8ZNqrB} z{U(c6M(jh)-jkg3ZX95I4oE&Y586GiIW%}jk*=Kw58??9Q8veWCpp+j8ICEC5y!qdCJIXKr*IwSE;f2h4eBXK}J zaHtq?Bd+mB`7DFZ3+cG(=C~X8(3NjB5alk8nM;L$g(CDTz6wNV!=`WFt+gT%8{=)i zKimx5tA(=apNf9xAD`4nQqncgQ+bOV16ar!qr!c?%L^sC*8*R8phJ;qNli~RYL=hH z4iRue==6#CUw7uaSrf2i{3xCkvqeJ6<2=emK&A9_-_qd^Wf?f5Vk{I;48GHve;=EZ z?t25ONwMTgazHz=5&ke$;S%mc1Gkfd59@R_jmk(f@4-gnE^LdxX?Hf+sDCnAebCmA zsDv-o2GP>eV5Ccb0e-~|9Sqr!k+zP-pO^P%6%z@5RvZjgDZq$)qk57sede|99qk?a zNY#WWoPoh*Hi8<$3^34(_>y9v;MMFWVmaspp#`C|RZ^?NS?C?ThU00=+ibH|IxTJc z{esRXhveE53g62q`wgd$U?ZN&7-z9fFxJq{`;2K&z5AMM7BQdBNaL2{;ImQlmz zs7;0y^)@Qo`a!#^I+e3(YW{Pz>1=zIhWbs@|Ao$yRjUg$WIJIX)2%OZ#Enz`XM8l+a4%BL2R^G?8Gk<4+|zPQSIeX2&ZX$w ze~tD!V)be?A!~z+zss*C_qZ%W6u7zQ-LgY>5BQnx=yvw^2z;FiwwI)}U@tnn(< zNQ}yx)gn-xOiVQBQT^$5>x>9F9f%(JXZ#8i%Wo1!rGQZ3;c{gxO0iB|tK)s)>l}0@ z;e3nybrWtQOyKj)lR}Q4qh^oYax9p|19#;o-q2m&ni|~CN6o!?cd0VMV?!b6Ezc+T z895IF-6psMljEhW-?w|YUhHrc9&BQpQM&uPWUj_gea4UywKwapZX?!e($q+6(i?-b z{nb4A`9(^k?^>tY<%Cj-`>A`xIOZBo`ElGkU zZqm$MLSX+XgtK20>Fx12tf zql~E_w5mBYUA_HE!5O(xNbn0Khvjy^u9r02{PU#6MdrEMt#^KMp~*sPnK6Iw&u^q7 zs3)6J5fJTPjENRz6#Rn6TZhdki{+)>3@?`&-%MTnrHA&EqoEis;oc6YO)cEhnxn^S zEv+>6*D+bhB-r?RWtX;ep7M-u(W^$E+<$8Le+m8Vvwc|H0L)Hm4xX{w7M>H`P|au zMp*O{%f~X_NKFn<&kxFkY3L_k2UgC;N3opK!FdlGn+Uts9yKp}(>O1?;0iYda^21<|u^dTwXYqV9X`e+w!@xy-zNU`qzA48QtO&wW0*RYWoKqJk{sd%ZO!^=ff!K zT~fY*H9e=v{Tsi0?+zC(3JObx{vBRLe>ZUMq)^MTwmKyF7lcmTS(qA!d_Xzk0%FIU@X53gbadwVx->u=s34l=9r$PJk&q*xxp^a~RE z|Ehp_L|8zmTa|gmsmTLuDWm#jVak&>`Jj`(yzDN9^^2=6l+08IPt#!q9#E0l!anGF zzSq+bkXb|>ty9a9n_F17)Dg5&P4U6z|Ah#zGZujFBGS3l%Ud#bv~hkYA_E^rgnMzg z3S`vx>0I`B+H-tKXb{Ciz-$s(QbayEV8s-{jAZ&3o-%Pn56*&S(7t1sox~tL`%Qes zSL_&A-qJ@oWXJ9I+m;Jxiv(CI$HuU^O6r3I1g-^~4bp_20@T*U}H0GDy z3E+8pL<#7CzXz6Ub3CF_8?KDxT6>YM0Z0=)py{po{>Q_wo^*S3yGo)Q8CwR&Cei$v znpDDl6DVWdM1(T{=flaC^-KP_5X7HXJ-Y3uyB)eN?KNJ$7G-q8^O3m0RLTi|JcV1r zNZth+3Xgsz_UTXdHEz4faTTt%iF+2%Lx=dV6Dh5@K61<_kZ1FHr(5XzrR>NfCOHr55FhqRR&;amaKGR2)YP=A1to2#RK)cPdD^Vo& zJ|BW*{+aP}32PkQ1DPo14gv@9i%2Q$coz)H!{WPr1rg@Ih{s-!KdcWXFo=>tqI4z= zSU;e{FC5z90$T=;J&v<(&#?*K&o~M|ZKXR_gGlL$GI^c))5au;JfZS&HE(n!4gqDJ zNzO)seNn{y3I30oT78bGKwifX52o|OP#m_`7y96j4T!*i?MXJdgppDhB7#9Ep;e{C zk4wbn)>-_VS^YUNBbKvQRA2h-Ih^b+?%<#_$0!rTX5`>!%9-H>sZM69Z=LawMSm4o zYbCS5d!Bpo-Lw_fVl&cTJIETWvrZb*TkZ%eSu|C;_T&`2a^ zP4Rc+H{m`aIwb^~OBaqdrPZ*UFN@a0;vnOf?&3Y4EQ@N!+ev)4XtbxYkIJu2i?G1y zQk!et^uAX76(s)4u9=h#J*s7kO^0iNy#AYT6FWs;1KrBcr}EkcdU$I18o!4Z@R97Z zjZzYThONx5d1_bEdlH;Q086BG^Q3drtBD4^-Je@}F(&pITkLesnmDTa)a7JO38|yr z_r+v&GNY@_Nk7aLZ)@(}{rVptNlELsPmAjp%FkfZ=O%9TQ=R(a@dB@=gtAbJ%*Z*?1CLWEW>t?b$so>Pt5{h~16@x&8N zG_mcDoe3tkZQD*Jwr$&**iI(4ZCl&_-P%vPwY6Wmx~pz?-G1MD?|bgK&+|KJkKY9g z%Q9afdE|Awyz*u~$J#;=76vtk8=yhTU{-6n4F(8p+uf}%NeO5H4#{}pZx4Qk3|TJv z+3l2mEh8nY?nd+9jTtknH|~u;iJx!qbUWfL2`}&D-hcDjU0*kr8PH`+$)ty~u7nWI zz7yTv-d3noIk>pUNJ@G>oMOar!iqXGD5eE?^nm61aefea*FgOCU-~hYibLeu6V^N( z$IBZ}*#hac)qHr=jE9%Q*Yc8?`ETg?Y}Qx~tR^DjBLhq7-+X>4<%-v{j1$5&BXfCf zu3xWSC~!Q@Cpjy-rr8ZCvLLNQV2u_sE_z81)(hlz1%rUj*q7EbNmlTqNiTy6zfy5;DiT^aWS>8R-S=xyOQ$P!(kCh5FoSis8Drwb~ZFH@OZl9b-UJcbad3z)U3~k zKg}{}MQ6FgR=ga;bE1+IqWlX7{ThPUCG)J*GqUxjk8BB-5cY#1LT#p9ev5zb+YJsKp)f1lz)6QFveBTv(QH0{z%}TqyMvB;YQK^k@nE1~ z9~L^;4o&p@{Cs_5V>k+rcf|3VO}|M&t-SqBUmM8hZ1;(V8|4(|TybHM z*q$t1Ah(*HhKr4@jR+fT!fv}B8WqjwbN-y}Y$+`zO+tb{rOilCJe>)wa!jG8E)U2N z&E|B3UL2mEpP!wZb2m7iErPGC733r%C$Cm-FqueYvfb*^Zne{&&gO-}-zO;au@F5F zCs>>ht-U;ZMl1VfY(E@Cv6ThPWe+fpyF~H+(O}g4a(~7z+>YBy>r+cvYb*X``JtZ# z^$W!hfEJb97yZRkQ`;HPff|IWC>PrIccEX5Q9j|fzlFQ5)=zD@Nl>^4jc2x#1%%Fg00?vI-H%eMoJM)`wPUg>Bd-qrOn1|ghl(~s9M=W}~N0l8v zG%UyvhK7*|z)D)b!NSRii(m7*INgWF@%wz$UkrS_gZ%FhJJXY*kywIxa(Ya^dQa=t zAC*D=!5hO}VC}c?=x8KF#D%%Jz=N2cy?4ub*-3%YH$3$@{A-}R- z&&a>XbcYN_e1AOr3=va@G@~x-okS&f+|Kn^^N^QUEGS;;??bv=+X^d49z~7) z)%uxrj>omOx&kIBP^Q)8h23n51GM@^2Bvs>cUSf^J|Q6kqzS52Z@^^G>2bo^{Ywn? zAk9Ll2SLgNQIXys06s=;KJm*7*{Y(0-~(SYp5ilDe~DZh(g8^2js5(_@Wvemk!ws?78s4*WG- zu3TZW)s>Z$1f__M-ui00-pYjUpCsz<{$+f+fUPg%Yv%3X7U5_J@aY!)-j5b2#&+S* z=L{hz1A8+`?dD;(^br&R5Tk}eAG<<5Odzg=Z+5kSR+a%#-BGs-*RF-xpWac6nen(P z_~Y7)1UQ@i8}aQfBEh7_^E8t^tNnX3rt-jyuB|v}K}uSe(Z~@TkNG+MF{@`75+9u3 zj7tBMo<91XoFrH6AW4k3)BKZ|)U-*N6QRwTk9>DhfGYFvo2C=k7e!xl7Kz>GXW8 zPT&m0;$&-J!Fvg%ta@X?LoRmGMU9O;7KhP&5`T#ke*&%UR`(MG*Bt%VufDGZSOmN&!>X+cz$dD zX8T%td~B8v5gH(ush+KZlY%O;2B4DACcGX=5*u8kYn^yBI+gR3>pWx_iB`o|w2K&*p6*})F=*g`8 z+2UO{458Pjr)8fntVpM9m!3|?vs$c5+>F3qa}(=Aycg6{ri4f)93_{djQuuyXoo(m~isPkoRhLYRAdd{*PjW@)%HuQi0{@r({j44v4 zMKtvLg=#_pJQ@))rzXHAqSu@SfXpw$m0;f3^znIEM`YVj2x+}krZuKJ$zP5g+R#RY z|I%UDW;PCIweh#dN!t;y9(E^oggt2u9SQLB z5%#&*Tm*;;F`Nox_elCKZPIE1EK@8W?!~(>%m91vCg$(rCO*>CX^bq^E2!3~`IQ0t zM|snR8SgDzd#g=eGdaZY@9pVnSBrRdL)wC6`}i9U#Je-c^gx5L!}FB}b#?U`-ENaQ zwFZdnUUbOlp4dR|SK)G5G<@+UZ-FSYrSDE`(T9uF-m6Y`MZ1m6bq{c#y9 z*%qTxGRN^kM4G1U!@N#J{bk?hD`%P}F{G;A01b_K_Q}lX$Q-w4t(_ZdIfUPPhrQoy zRd>$r+FDF|L!(`PuTBN~DH6q6o>npe+D$1wKS_%9>{wY@xm+(1NX&W^mlspdO{5cr zX~Dt4n{Bsrl$7po_QZ7Z>FB2La9 zqb4LPX;L+H7Bb$xeJrJwBJNpwW;RJqR~{rKd};+1?GXys?w4~YrTISIN$RxszcPtR zM4`DF;~E^^by8zwQsZw)%WaB>5<_2$({j>QD2k&30+>>}8eP{^o;$wFA;|GdMDJL5 zBN5-?F#g)O_|sv3b#K_*5m^z^3ddb+)*Ms!&uracA-rgG+GuxV^r5I*b8SyH4DmTXBM=dA7|=%c_d#{|ON8=tt>&qcOZQwd~>NiR%?Gcgii1 zjB1@J4dCryu-2%Gs(`W^o(uRBN^S+rm23T_;_eA+W7H(61 zGsTG<*2!r|TmEL`BB6hZgcn{;RQlMMNKd~^wj4*2+D~MkMVFqA&BpWnxillGPJ-m3 zyzQsTi%d2TC#d(O*eom!peSPdBsw}81qFq1w#)l%2V@}Lp8)$#K=Ad!?F!ddrP=ZW z6JGmZtUU1L6%Wi_R;T^RML|VnUP%?P;|IRm1>gFDoESG5op=ANy16fNy@Nx7&$~u9 zuivD}R;P#D@{F6vFlJgtN(OstXO>@y(KlcoGMDeK*LwGK&bIS=+Q;df3BHY_#TVnL zoU*w41c^N2(e6}jt|VM+`oO#0u~-x4+vUII3!MQ+PiN5m?}nQ8$oeU?;k!p!nul@+ zdig?<$j>Rw6>+c6kUllv+6NRB6!bbvL5wM4aHW+b$vtm(5J4@7YETi-l@)1%vFK?(|6%P>8#YV z0vYA`LgvOoAxEAkT9c8qE)B)Y@Oa7xw-1-E~fWE?e_BGn91MG zQE()pKlk@eT8y9oFIYNarNOlSQA$Z^9&}@NiV!Tt42PnNh`&V)+LVrZ$Hk||>9S2; zn=O=M`1T)y#-{01ruls7Ob9Z8o3bDp8k(9gF3>79)-Z7oP8VXjj0G4zUe$uuwK9jD zFbF#+=!!sSZG_gn-ptB0YzyyD{G($GnbW`n_SWza@frxVBS70Ku27HV-)Z_o2!EJ+D&&=?3TKiaI8 z@IJx|>O)vzEe?s{?_f1)J4W>qpaKC)j<)tEse{=~^st!?2EOqa>!0jPg@uJN4y>`f ztDmx$?4@~oqG2x}fXoeWP%mktARyUgRxz*_>OYzC&h5dnrJOX98`nI-b}7_WM>D8K z+xjLSn36LsfL6ec_uX>)!D0qjlJbCIE$0YAL=Y+tyD44VmYZ8#Xx{k#d%Re)yl^DT zpW;5^cB&d5Eijg3@nhNFyDk&pbsqhVyVB8on)&Lgp@*_R+nEwCq&)>qwKB zj^MO$&3AUjmuqFwnf@Z?X7tz6LS?SCq~UgiAYQ`(u{R*%N)#EY;%<2=d`UWwbld8T z3F8*{d-aF|HH1akguzEgs8+9Np0ju*@IZoimzQhag1qF}2|U-Qi4##HNd?c?^f>w1 zdU%lS+~`tH5=7F_2m(bew-2f+5YCs9d2OBqRv9~_tJ#H%|1mw(T4uj)Y2Za=HasN% zKN%kXpD7|yQTO+qmSk!m{8?#sRm!VaRo1`%LJ&GZ3sF(?>YB79)ld*jX$1X0j)=7K z%Rmk`;ylI^Oy@FB(~xR25efka3nDpAzIwt5Rj6l#_#Y%By?%XQvW33#RLn?Rp<*P{ zVY-pS z;@4NE5g$!yze(vd=vg5XkK>d$!w0~NQ?pvEe+S58!oWjm6v3}AQff%2hkugCYJ5kC z$zoUR@MI&cFDC<~@#Ywe*JJv0q)9QsUf0GiH(?BkZ4QE8HhtvL^Zs=+TC6R5FG=H1 zbbQ5muuSvleI$Yl$b+AQ_c%4y%ZiJ0G#b245|pQWu%mcSZ4K0goP?+E(wl1L{9|Gy zoVTkYsz}$TxO&{LHZ#rznD{tHhhZ5RbCY@m-t>IE8(dVmh)}4huWV_Wb35HU<^{Pv zJB-OULIWOCBq|=Ip|=gFJ!`A)H=}+8gZ)|>Z=i#@@R&#m|B)LSzkIf8`=qni;D{ZU zq?bBTpQLX1@k;9T3%iY~ODrdRBw&wyQ)Nq_-)=u>swxSJtCMm}Gw*I{>|_^$KVM&Q zFR|vzJLWd$r~S%KrwP)Q`AXYG{nNT2@%hz zh9?QKKRexv6cn7MtKxL$a%di}uUn`Ob;JVqV$~dX`o5}I8=PGbKJO%Z+^0<^^A(bT zVpr(#x3uIkt`X{6XLmLHZRzwuTUW0?y%k=q_f`gL^hCRvbgej)oJxe^*T&BY8?D*oZk8$=keK95iKD+{1CZj9!BCqyX54P#X$>*ctT2>Ot z#8jrfc?`pGy9#XgRz!ZxhIoPyGAKRmO@`DLdc-jOH3Zm{0ov|!;Mqb=*Y%WTmS83U z;8kcHl}>iBch*9^Jj?9$bB%LKb&}KwF7fn(p}3}UMHMJNAwz5xqBN^3{{A!zd0X*R72*f#mxlIC8-6FB?Suc6y&F($dS))R?J zk5d$HoN(7BX;1fWs`QLouU1*TfEL{xPEJa6TmkfhFd>Xm;-OQp5B5(Fjz`N{t)BpP zzVAPg`bTM=WU9Ty%^4xuQ@Ee;IWh1(M4@JC!M6{B>G>O$g@Izu>9bT5zRtUE0cpZx zM|sWxOHtY6&I8S6tQ6mi=+<=pp0MXh-W}pNpu$26F|9H=#!qMWUt6?jynin=KsD~_ z6t!$<9f$NZo|?%{!oT~_8BS&)J{PqPdpP~pLz>!miLuQc=s|v-p-$o5>h9ZBloirn z=|^$Y$A{opwYb-dKmM-nH9U+)WhQYIR?*#j0kL=BLphoJWSt{W#7IJM8NuMwSv@On z46nY3z+~~!43cGF{8D}Q-SQL1a=9>t?D>Xuf>5J@blPmGWj#KjKNyMm3eB^7c%bw& z4*VbppzcLM5b2#kM=vs9(!j0c&DHWy_F%wA><6J?z!;-54k%l&?3sJt`T7>V`N?6~ zZ}wUHgES*upZ?3f=!Vo^H`;aJ?np2ZpMwqy204Gqpgidqyu3_`gDbimosr%Gw*}ItacVJkQBYOQ!F9tS$MRVW( z3}eA-{}(YzeMQQ9)O5o;DSfk&%!Ct4*&`q2tm=64EE{}l z+Q|BwiQ)Af6}1dB=0L~Yi>aI}8wh{~Ml1l$GXoJlWH3$p37FpldE^a2SWrlCubsFs zoR!4ymRNbi!utmFRkX;cfQ0GVY4m+}Qp?VMP7}yR7McKK{tz_6$iO{dNh#wS`|H@H$M64#v1K z6%Bh#VbBLQ0wd6*zV=9WlA~`8o&6qBcK#E+R+oY?rz_VvmF8Lwysp_Lm8Wme_|*aj zkM*DeZDq|VCYt8VyQGXbMTKi(VAWp9YoH7t2yEFikZdQY<3{BC?y3KJJ&HDk&-KT3 zYb8-`R-}vi>EY{=3D`Q)sTZJy-st14e|@{%-dpm@u>!*z8qEN9^tyW9g-1b3gZsrt zfP`epbnr7hC!BKJiC;;91+~`gC_%CdPgaUR>Orlg)HH%DdXeUX=SCaO;h?&KZU)wk zGtgkeKFXv51&k+8 znF=5P>sPi$5SelEhjLjcsCG_A?p~w+Z%k$yozEKnd38us0cBCi-E-?Tz!fHETdWRL z**cRIIB8YflJW6_fI@9VOOpP?vM1Ys%J(8EHU?8d{;(!7F|$vrIiCC%ncXRy^idZY z5DIcjTB-K<%C$fUOwGSqyS9Y6`UERP8axRD5a_SH`3pL0ve zHEsbG(7058>;dhIf?kk|f)V*4oq6GV?euvZXwSDoy;?a}Ki@;Yd}1UlO~0wmdLuGQ zSlXrk9M$atzzY+`w_J6%*sT0%aW{}xw@sOfTbISY+C+>A{fGFok|o;fpIm-)={>8%g@{8nIbdq&5aFs&)JU2A zUumeZy&~8(v+_6Y1ke*S7HYOnz`VBVM;r#G zEvECb%-KrgNlqpzv1TMjVAS>1TtitNhk7DLsV4>8F%VQBaIoIN!Wy)lT;b|(3%MGM zBFO-qJNv0bC5B2t=9Z|F$hzT{hCMilY8Wx25B*I?Q(qtiZ2?EDyL%a(9=o`j%W@rTr6$vdA!2$*(jfS0y zgF;fgs9qCyCYiJew6Lj@Qjr}g zIz-ojJ!;{K-Zv_tA4tfK!WB{&q4Wn^TwjgX&|+QLNW6Q*6#|L=Cp!E8bfv$*1Wn^g;I zvbK`N5y6I4<`z-2y9$i2M{Ye47$ck*|1zRarM5V{INFk91c}T^nZuLHRQm2*RW1fJ z-m1WrJ1!3d!Qwy^aw&-MOe#%Vf=r5=fe7W9iIkiZ5pvRnT~iJGIM^`AV@n!EN^q51 z9S*t52OerUj7h0gkEef?)QeER`b`?H2D@feYsNgHec`ue)xGi7;MSe*S7|beXSEBn zAW3L2;9It)_?d@;M?m}+v*JIaPpTqkuakGB7%sqZQV0!=8~Q<_6&<&)cPNH zHZ&>6M88A+l20ua61-ie6q0W|H+y`(VL_+U^sy5`ZitP2EdYtjuQ;nRlq$Zm5zK={ z2>W5~m5yj-n{Dp|e$SOLq5>_b(H+iErw@BxG?l1{^G`eWVA01B zd7{mtVJ9IzlzJ{1QtGVB^@aNOB>NkKj;fIhkC;QHzCEk5-Vtfj*I<#=qe_Vd)+Z4V z_~cQJs}>CwiRd@ZlF(~X&pB>5Gq9Yat!yD(m$YPg8!j*=zybf{o>z7dOtT@Zv8+() z72sRko?ybbgXlagtuL@{;Uludd+B8w2Y+C=FA(wD-s72<2Spm|zJ+5+wVfj~2C$l4 z6e=d2;ZvM{Ch%FKhgMQMxC=?jPzfOO9ZlwR*`$u%wU?yOYGdQv$=k^YmU3Q=wDE_G zPJK?c`6s?FOS18ePEoUP{6qu0>i=_QV}e=lx>+^`VZV0h&^bZI2g@8JWmyakaUtD^ zfS};eC}aZ_jenUZF)FQyZTGk>g{4X-A)#8!?`x6K6Uje2##XbTMJs&C8yvT=R39gu zJdRK51E;-QxmD~eL(celId!Ra!L-L!dUuYoZ{}tVv%}W5EG3YKh~K`ud!u~$NZvaW zX5&KM%M|v>a!zp`g`{hA(8%Qf!hM_OaJ27kf)CSH6;b(K&SU#?|EsGJfyyf@CY^=^ z1}(MP>=8NHquqvNiS#G&h%P=Q*3_3p?S!FemLHSLj))hg#x4F^jEM|os;sY?qG&qO zEnz)cz}|%E_{M;RJY%Za9WaY4^%ND`p$_ERo*+>T=@w=2rH|=C%^F#AQg^}Dk+5;0 z14|7X)<1iq!Nm$|u%la8ne0*)zi+Qc2L`6pC_kxda%DtDP`_1<8HGrzxWZHZMu5^& zW_+x|Ix;}b%O<~!3iiZ3Hs6f-`Zu}S3+*R>B8Gz8^*bfMjrmcRu|&^&!d^Qh2zhKx zsW8bz`8Ndq5=pkQ3=duczp>GQk$s&x-KEAgZViZ#_+6DT_F0%jSV$suFv6Gnpb9rfc;UsU~WXxaO1EEav!nc`4*E0(gdHhrmJaB;0B z?OLx$cgSGM`kcQ}ukTUSrmq3*_If3T{sI?GU3uSaWN#)5-96)4a-q+d6s6OuNx*{~ zlugiJUxbdb+T!~QEl`)Ax%#$lAncycGkaJwBUOVZ>k>pUeQEK%Zg$1~z?(Kqh+v^! ziglQx5%=s%%j`3pP=e1y&kUifcf!Nk%HTB#?aR&d=-*yVkSVQU-+Sk$rb*!sp|*cw zD;bc3r1;sZ=C}UKoKwX)3*Z zn`l0*^Dgd*VkS(wIIGTbmOjfl2r$PZSj8Sr(cV zo5gpsIh4F{_1`Ck=pGh?_=|R1b3JSLYCYQC$R1^p=Znp4f1TBuhk-NDAl38T1SyBYZccO{!#3Q1?qX2$h{VPe%J5Ku6Q@$ znH;$R5Enk5?<^DfURRuS`08BDm%2b2(toAwa0_exmC)i*C*!r-Y6O&gD@eye!VX3IbDwE zcFVcm>396)@|_&3;Qayi-sPcf1{oDnH!2QcjQ*{gI=(7A1H;Q$E-DT=s4$LV{kCiU z?qb+zEdvGh0?NTbX%o(2JRH;GER)d}KJ8uM#svIj?Q@GW*4rwp@=P zdYXvgx{c|O2rl>?dDn_1#OkOLvqmKsflDgJtWAgB9bnf8oa8JVMBQhVW2MUs>Y@03 z7Oq??(PN0p#xB84K`Kr?R_q+A5MSfHXJF~zN_CF){$M2cCs)Cbe}t%@0|~L&bK%!b zJ~1Mv;QgT;PaN|m9)<=MiL5<;(wZ=WDw;REVz+zm2$(U}ma^#IJUU<_sPltdS~zcP z=pG350ZB-dBM8g%tDm>0$Su~5MxS-e!ncC0^bBCdsgqUEfncb0*eC(rDK8TbbO;(5 zta@(HT~qviWij@;`4&K-lM2`QA~fOjQmu|JRwsqCLEd(aDc=3{Rw=w?=j1&d5zE^$ zeW)#>>1JDU)qi~SFeq%LIY-bIF|BZ80u__eOlkR94g1S1KZ5PcJ5=bXsLcnwQ9ega zM#iKJ_4A}skX*xb=^qVks-89uZU(ArnH}%0MfKHiYhAG2?>94Mxdg;N|7BQ|(a4!6 ziAx-m;v;z&^pj0}aHD2_?(S$T9jT2mq^}$_Xu=0`tK(p$H^%s+9BVe)BoL+epg^$7 zVj}G6v$i48HN9T?XeiK1^d5d2||QCkgvofZM?WDMU-`R@TBy1uCExE<~Xva5Si>XbA zPt6t2v+-(mWAUa*^j_I#qxCdvvt1PFXvz~?b35LB5~{s2WJqe4gBBb@h9x$dHzxGi zttkrOU>(c&6EKx#Gj`4+<@;bkz<0!^L+C6|+Km?dObMOJOZ64xyj;pT;NH^^tpxgF zM9&wV_k}t3o9Z35nMSE|brZSo%v43U0(ENJ4~eJ&kr4NlyS>d23~c>>+ zq#0@Ky%^i?vSH0g?j)F5hX>p?QiGT8FrT4^2i17S##`RgB#`NwlvQ9$Xgux3g;{~s zd#<-cSY|%U-~9@!R4)4XyLuA5cOZm^P7m$l(1Fh6m--ot(SL5kS%yQF&^(jj{PcGT zR&Hlkwzo3eBfujB*gj@z7}-WmnhD5OnOgYCO|A*OCGV@N0X*a}w|lb4wtVFSBSpO$ zrG}BPwu3#GG?VC1C}~0^M5ZhD3mNmR3%{vZj)f!#=5Is#c+z~)e0W(mDMh*|N=Mqfb*`W^P(gR)KX z4@#}7<&hkmALFeZ4vfYh#u3dJl--#5hs#nr*2^YzOyUzY0#!>ji8zcHMi2nPbj57g zFGE#PI9(;B6L@w{el%!dAmEQT0xjy(&Nxh4yd1VXNv*YF&hH>_s`@lMSC+#c*}J*% z_3UBnV5)!^7dM>O9;Gs(p<=m?9|v*pnZMq;J%nPw{+4PEUu?d4F)MsM}zak5533`af{jXw`h}z9mn1R;Rb{Q*>(=oi=+%?6?|yJ1}u6ms2L zz=_hhI=65r7_kf=s#S77SqX5Ds;U(sPOEN@NHq}qb5msS!g*Jz`#(AW>;TXl6D;YC zxgX<`+J`m>TL1hJayAF`ZMSiU@i)E-_j`Jla?EIKGyRnL0UT~)(o#*nv;@bJ+f?xg z?2U(1G3Sm-9P8yz9{j)hi$qC8`QFlJV~f55(Ijyq13vsGJQW3`uo6jvg!KcxF|&2O zNMF!sEx$8>+k7P{%NA>^B6m|%Eiqg?7`I>u+x{^{t4Yf0^hJOYkY@W*?t@cA$9`3bh8M2WhXf{M*l{NO&70q;+VJ*iF`dP?f0B|@bWw8;-0~LXtz6Ce)Fei$= zMH52)7tV!|87XJ|2*CHbWNSlCy0e^?=ikNxS;~z%XH(hv(4|MH&e*S%89}bX{NrM98Y+74 znd)nqNgH7RpN>ML?x#;&p8Ll#=L~MB@A$04llg-?5Agl#RZQm_U`xT_&sR+RlIdy! z)NZK%^kJ`+f`69sD$l4|&>>7k=wvDN80kl`LOi+7a8a{0Iao;~W#1XU@5CCV?VSg| zuRxTR!okSL6)yRyf;mPJF%Uw&VvF)cmh3MM>@5fN$$8*%K|}Z=L1E@-vRc<_K|*QXB+-bu{>+hu`t|p3 z42N5t$_#A`#IZ!e$;SPiklz(toXq{STyd1P=k4g-wicWwe`BF9JEYQ>b0LzF-Kfo2 zVu#Xy3n-fB97UX;TQ&UnN}p50mke$?o7cEo7>1<&B?7ere{CD~#l>m75S!i4@9|b} zwCbaHPpyzZRMt8fE9Gb|R-`aVN`aQtM{k0mJ_9Gu<%2Tt4#P}qg7~-7+xoXz6D`Y^ z1ESB zcWC+FjE)cnHk$m$9SEkKQM2sC!pkOsckFDQm!JQ%6}bi77eD3tO>iQ_#P!_WASDRq zWY~#e8&Iy%d6%5?6>50rO)|<#h+RF>NH~!Ia7w44xBmLusZTXGQ;4tXKQtOU7Zo$C zi$!B&GhJA2e|am9LA?~Bm@$HC;lB9GQU5C2o9+C=A~}8oEiV{5sYC(e-S336%{H$m zj({WiuzePfWl#fkX6QXhtxzZeRLl|x%u^6lBBqg9*X3@8Nm7vqeF-=2YOhS`ds4)& zjBwh$AgkilR_xcZKz&5|qy_cZNy6_35Gx4a@_ezA;_xzu$%Wgv_?f|mh9){s>vP@n zSfQt2E0Oa8w!63v5|m*$bC7|R)-lkK|lpB0(jt#M&&k$OkMX^I5lsDTYe;#)uU#MuqB0&DBIXm5BgJS&|Ah=?Y zKaEJyW99Q1^4C8)If&=k$3%#vr68>?Cb+$-4gFsH2X&p4H-zI7zBg<4*|BwuSPgiZ z?BbrD`XGf^JTs_E#7^mE%#2*fM@uD!Hz_dVt?}*@dU~w*^#2MCxBmE_TMBPChtRt4 z!wj3@!V2`4mR>_3ll{oL8eLYOn(u>Tignr#%|`}~W0%Gi^jWCK5k ztcS@%IP+`opk#nOjji61w}u&_H<3vI2+)FsqvjMfX0n*UtGWIXUtvFI`V@B32Pn$5 z#mzg2z^5xxP;ALVlGPb7wEA;jetYb;+K?YJF)JQoaxXc zMGd`_HlG!LqUvMXk)vz7CdD{Q9ubuDk5vupTjby$VS zm}n{{8f|Ysr#5mBzDAhWjP+@MQmBY5)7?v;ZBy9MqjTiJ?<_85MJiDC1{{sGfoA+O zm;^DyzRFaRW+BAYQ!U{>5js?Wl|fsCPu6)lrQ_o!B39$R6-+-8is!JV2CmGi&-shO zk!rmt7(0PBEWCaFaW&mE-@ADJFAgav9E5jSMWzlnP=TQVsYAaVTrbFdekKINyk3Fa z-X1!0#{4k^BT1dJpqu%E+#f_t8G`R5UHVTBKE7DHtYh4?kR#aOHmS0dbvj4U0Q##C zy;0GAMcc?~KCip8iKSFAr>%d8F*a&Kc*X;E9o)-H|TQ7FCAdT>zoxrgeY3L`yLOy z64?kq6n)kOTlJgHB9l`fIt8RM!MovMLl@l)4a77VIw>-yQ-(2x{^~up=<^VwE2o_Y z#7@o&K$d+}WzmPI3hPpU6K0gf{ZpzAc~+z%qJ}pDJpo<>fQr@}>P^qtsA5Gyv0VSH zbQc-y&-f^ykEJ50pDOzs4n`~sqa`)8o#bqWWMdkmZo}cU!Vy-*Txln(JC;JL4b!$* zPV-t?MP88wllV%#VWDx8N-WZii5X_<3g$$lQP@dBql6i8q7&p<5ga@LzbjtV?P<*} z+$^c9R-x;$&5agBy!`Gg7-*JonvghjxF%(!20b0Ojn;YEu>`L)D$aT99bhh5HTzHzcTXD~@Ad1IUJ`soO1(Fg z&i>L9S@mOO$sppVhRuN1Q!ous1*74AW(A0Z`UYZP|)!3%Lij(%<;aszHNK4Ies! z@A6vd=kg$qqi(aRa|+5l-kNq){Frdn${b?;VW)F~$oqBDL6=fYQt(eF_J%RBz@#WO zW4n`TvXtIGy~5@X43^8|URt$0Yt|<%p0>`rseq!tzhSi(=jH`Tdv;blW9*Ouy4`FN zH?4H%+>Z+WO!6&>Sd71?P_`$-D6k(48y6+aJ0_>=p0M;WnOwHtFv-Lrw%5b?_Hi10 z73+&u^au&|5=xcEVCvJD`@a^;6idKZT8t}V<)?)B^L8s;% zOqfDy1G;SP=kFM@J>D>`S2+YoX4@L-MtVe?nXG^x-ww?0; zR((LrO{ZQW-s4mcUx!|R*iTaR`7)kIU1ZzYZyftcL}XB)9t+$4#7S?lBke?)icmds zjlW=5aq?1aQQ>Z@!&?geRl`b!&rO?!!d%*oOUbIfQ(gVa-Bts>0&=TIh|yw_#6b1d zr^z|FY<``-x+M`HES@yEhZtuLYkhbW&fPf%W@5O8h1t}z)=C!iq&+G73Hl`VQfyAY z?by_v*HH>GF%`wLxbo5CmXzd?@5qoEu^2L=Nun{VC421F!qxM7Zr8BxJf00Q-sN4lQiw{adru z`dep&9X`SF*!JUo4>_Pm)`swHRH`*=6HDj&4xVl$3slcN4O8`>W0;97Tr(?406wNv zZSlVisawQJzzRbC55pCeq~Arvgc(#M;0Of?sL`0_@qK7^Qj&e21Vj?3bMZVh`&NTu4i9S#-hM`uxtsoStoRT-$uAdStWMz9bT<;N{`!5Y_ug1x`89}>3sv> z@@f=m{>O1kZm>$rh%qv1&o0A-%{Tp}viL(xIXF1n)^XR}g`_9r7t#JR6T#rSk0(x5vS9S&5q7?V#(!W`rBZJ35YCa zn@}>yHbY%!y=yaw$iwb7^f7*;EUg-ByD1v1K?SZ*UaWPhlh-7ZeZR)YP&4s{N81L+ z5mwe^+7ter$2o3EaCGIZ0hEbJR8Asm^OLo1#tws^VJVF5pTwggv0^J8EqC#QhA4tw zyi6OtB>TkxuWhPrW|=E8HYHi(VN3ZGt6$H|GMr;qJ7+>q48b-8$`qeTpk6Bsh*lae zYl^qhC_>g_pX}~T<+0}k$JHmvC(I_H6xEtU22S3?M~I#Ywbi7NrmR#P02_vBEQb7m zawCV|=1UWcz;F=TF|*BVt9x@7x>eS_&{ z_EzOZP7_xcN!5*vZMti|9U9JLX<*tKpyvs7A1LLfprIn;lKeAbQ$q~Qr*ysoZD~s3 zHQw$ZE>o&QA87aR0fQe{>V$HTy9?ZFz8tzVSNW$`ijJ}{x}eN!!1ra9S0P;#N=bv! zm{z(BX~w9eq0pQ5Mj-0M9{btWM;-H8UeEVw`?k_ttUwF1FPR?ePLjbf(d}ZIVR`b} z6b82{Me=}iw>#5+#P36DLWe_q$Xok2P*C4F1X>^xN<7)C?d z&t|0wOnK2{1Qt&BBZwrG3kqf~odDoo`%I7%o77+0Bpd4jVo+B<7k4;Oan-d~8bO8K zxH|AaElb2;V7v9P@5m~CDZbe9pJ@d(69exk$TEKa`YV@cs8B{YCjI1My_>)QrN3I;p z+E$?GR@N$K5j3rT5Jv&;o;Ng4n6N^N>MZ^ZKZznbDPzDl{g}e1y>O>psZ!5u)h)B# z{S4<-x*#oS5L?AnFjuBfFlA9V(1*J{Tr#zUkcbwwPwwAjCquPj+rZWRciJvLY$}@r zXQy?2SDbwD`|-%Mj7MWg$aaWpO^j6En?0KlQX0eo(n_VZoO?$y{I+ zyv0O``h`@)O*XyFaJVYuwcOaObF}qh-n%Ivcxq5nb$j!Y6AdEYN4M=r8lwu;{pk{q zih}_V7Z(SR-RT2Q0vU}Zf{Kwu#4+M)u>tXa9{=Tj)wv3XZWtGMZbYlPziy)fw5+Sh z`OzGfCTGm_(w}MS6P*av(i&9WkJzMHRIk39@^AtGXFmlJpuk)4Y-}bC3^ev+u5fDD zd)CKjcQ>iZ$-^#?$NWM&tl>9zupRPeQOhZ&~Vxbnk3Ntgb5m zp}ySxE;A&`6Jh4*h`(G+`a2oA65FfZJ-*$S^A`_*O3&Uk{UuT=rX^6MfI~|DNzIma zyVGu=+TAaL%vx*$0XK0Ht{)H~NYu}*9&kgn!Gq7C?sSI;uVrf4^nUo8!qK2 zVORMsB7!-u;W7?9Ogx9nhKV=mcWl9OMz5ap2pxQ3_~AngUms>08HIE2)I{w{gn3+` zl(^zkrzajd1pWg;@}nqv9cV6#^DF9VfAsUg02~Me$g} zJuRUnu)^~8mQ;-o;mB}#7@3Wq5fQv;;&41&gqE#w0KYtL+SbR>6ZS0WFsqg~^6@>r zY=u6q*jfU}cjgx~Al|%hXF_V0He^F~q(iJi{NXXDd~jd-pLT+7E?IoLeU5S8RVE%J z2;%Fok4=rx=Y1*2igd7`k0(_lUEE^lIR>7N^9yTMHCByQtp1g?;G907C49<}si-hI z4@auD4E?p`dfk`seulU|VS4Y0YL2?2239^o3^80IW?bSvn#DFf3}t{pI{`ir6mvOL-#Hb8DV7QX?sX;56hS*_c~Ge#OR$HnpT*?rf}4hM7TQ zv8cu9e7WyZ5p{|7R_l#;eC!9l4U)P?6UXo&x}E+H+P*2uk|t`lZQHh|ZJX1!ZQJ&A zPus?{ZQHhO+uiqkFZaIR_5W{m)~QvMRhgBUJ7VvM-v(G26fD1pLBrGg7OQH$f`SYq zFoCq2Jp}C=M(SK~E57p-V?9UYQP>N4IjJp~s1x9UF66f~Q@vxPaNyQ}h=hWP6*=4< z5gf^md^xTYgGS+e21b$Alkm^CKuZOi<{f<>7fi#HamT{sd?7;j!=p}n zj)R*`EzbU7IkqbUM+AX965o>P?SSXqmMe6&Yy-*mCbiGaN2;Ov>2czpOlsC0rJMHH z{Ah|k$`~H#L^wdknRgKWq^&jO|M+j&q*ige-RPRVN@K~RBFzZuTU#qObOkFnU?{f$ z+07-_irSo)wh+gpe)ABF46(;L>={!qbk@g*Oc9Mkod}^UCy(T<3W;ZXR(T|7``mN$rJ_kz z;LS#`?=C|yhg2TEf}<#}yl)4Z(iCWbUkDs3-XIx$T89&43&oZG#&VZx^a`S5#d@82 z?vXg`4tm3BeL_C}I65{W>iHaAgxp7NMhS}xrVipB7QP3(;! zu6#6n?TvLO8jmsT-4efSAPcCOe`}v~3 zIy9#kuDd)&{m^vH55Mvb zL;gV?Tr41N>B}dFg!~PeqT&_`Uh8z2tfoBIk8?F_)XdYXQJvqRtrvg7s`L}shUo3w zKLRyeAwF^3kK>r#`0f+>fX*2RYaiXtMi9)d!&aM0qmy1If`jN^yeb1x>WXBC*k0*6 z%s2Kg?|YN(HR~KcMc{n%oJ&dESY!i;& zHyJqw8voZ6K3&5J=`Xz7HA16Q3wp(K1X{kHQ0^^A7upnpzGBERwPVvXv>N1FI7yc} zVuIz~7v?JMBvU7B3o@?Torkzfc{5W^a3J>_XZ-cll-#bEFXkmNX8Vno7E5N&kFMZ{ z%lxd2^m@yBYSowmpYJD;79mZ2#s}!n+PN;$WHn3Ck)s}2)3p+U&!hq!nA_+bn2#+L znMmon_rC#CNzr${JicnRJ|33lXI49?%xt(x)d|rV88~zgik9dk;iU1VbNs#&YAbXL z9o=*o4T1;dSV`z)YnO&=;?dG#MkXdQbJpi&={9Nk7>N34(;GI^@%e_A{Zl)kNDjcU zo~fW-YTu&hssaS~?EKzrCWo%r5Nz%xEq2z+CxU{%No+$GwRi@=(xFLSPxH>W%8 z9ZJidBhevbhgz9<$?Y`V?Q&M=vDZ7th2H7aK-j7$%W-O8qv`F;K5)QjR=F@X?%8kA>jV-y;<%*Wo-)dN5^`OQbP1r&?) z{e_cmyX>b(B_yZ3Bw68n9#11(ugwd9M6-`5{hS1#1&cQ^Uq(Y=C7W9k~GoL`BSsah6={?i4LQz)Vo7-vA68Y+tHB5m;?LfP-6^!au#! z#*mL`RtzVvg$KFE8 zjR3Nb8(D9|Qd;8*FH;w$;!EsMq=uMMboVv583V7&%R>Y`?jfe${Z+GN!`ql$B8j1k z-s!Pg%0*u8Uqz)rh(ssru<`=}M|Bloi;IhF5QDY+biTNsW(b9GUsRZR<GI9QGmSeam99y0_9xF9xe73%@Vi865+xd-#3{L3it5WPXzbz97g?(R42N)~Ka zj#7$)AlY!#r?^-e9^tHv5I+T;%~W-`v-8~VI{p#jAusHCiYPzB18tK9riB*JAr?{C>? zr)l%yjBSNX6Lw7TK@g-mWUv+CeebZpmMNUtegO?y<(u?P>Sm4#TY4NZ>`}O~4;(?K z#oTYswTH}9Vvysn3*?c|cRs0nTY}%-_&Wn2EFnN)F-SU?1hHB%4aIzM_9pm>%qOp=R^B%I9>P@ z;bN6%U;JsS&CYIABdQ=b>xzwvKJp@1`6(9_52X2q9)cfRb4`i*Z-9)3)7khev1ZeB z)f`h>Ppj*Z zuNnOKRpOe*@e~S2;rvtd!=-Nt!;-BM{F-Jq@_XmnilKfrI@84YZ&wv?1XP!Cw*zD ziSl-2La2zXOIk!;h-4QP#XnMNoPYZyv;3Q|k;m`n`498i9Vz#>$|K)TbJSr`8n8uG z`@yLlI?fNfmW*kbyCLov0T8=q%-SK#>>n&GE3=}cfxWT%**J~jybaw4j|b}P#&qq# zvx+#KC;$*nBd%*QT+PFkwMh+pOxW6D8#Rc>T%Vo+$%@hgWkiFbiGhzTEA=Tm9-zvv zbZtd5Mx6&>8m%aCq~Dx*4N&kKluB*IH)b3E;@xP%smFhpps}rOk2u@?p(XJ1E^wr!$~3T zU2!w0@<=J(@ElDw5C{y|`cP5sL(Uttab8tK2C`tW;gdOGv`;+gK&s#Y0!Rtd-7gMC zH2`(K&w!?j@JU3(5%7K0+eUM`^y&J zV*DLHHR>TUw7Wj6@&ytcOj7*ZwN*Ep^Q@voHg(%hFfNVxrQg0l>DRO3aw|i9Dax&& z@tXqlpiAcAIRZgfTy(4|Qla7C=e1{PiE-31xSPrY6?#R5oOU z^Mf-C<f^`tP%sfBL2RBwtoxcFU#3h zh)K$ZOku+m2IG$>(>6*ff(oWdH5qUl)@c9b&e^6!F8f_NBnRCszW{eF(iwdX3xPsYf4?fgO#g?)`V}#mi(|q zgOdU#s~m?+z=FvU7}wfF?p;uOJwY+F1uJ?Wk*fXao5G208*8>w53n%YlWK(BRISBa z0dPV-iY>+I11Y@J%o>x^LU2R$)xSm>gz0F~_ZZ6zW3=xqIqf9k09O~_Huixu`KwW+ zQ*)q=j{@zVXdv0vDgJ#Ty!g1XhV`oBo;zF`nM?e1mIV)a#?s~hOL1pTJ zJAx$rflpC>C(z9yXZ)QH+&7X&IRbIfaSEJV2VPTug;CJ|) z)s^1{kkg^s2zB*bbuslK{p#YRWwhd7*WvolZd7SD zr|gaL-afZGyvi#Fa(dpSTQ?Oq0ONk-D;L88RYY|oMyBH|hw<47*rUNSbgrODGfrX5 z9iPv`3kt*KdH%sScwJ8Ww8)~_?ZNKjjOFI4Si16>d3bx7kz~=_*q>&oUB1vOA@`xF z+!mq~c?US>X)N2MP)u;UeHKrc6)ptQtiRIva1aHPQaHYn@4c)BJ0mbM$hW~;3DD&7 zX6zUFPg_`oS|+Ba{1Mid1Azg35B?q9--heiVzijqH}e^)kyXZ_YmRSa?!6LhN78n> z@vQ4lXzz>r-JDXymWE#R3JhnmeIXw`C$0s> z)@(!Lge#oj`RuQ7u?7Vb-UBD~O-FUmP6N@J4FPi+w|po(gi868pj*3pDPW#+yZFaC zK74rfqCkHIj^v74I<(@;@l-mGB674I<5F)GL!K=C_| z>zRF1Ltm)UkBwwxTpzL{&J;W~tvZ^$uq;I&8nIw{URFC)T8uZ@nkA=i4zlauU*q`r zoF}^+D>+viE{Gy9&77GRL;G1z%a+53n@W@^ApHWs@tRE( zr;eIUSLW{G-+rH80U3pVc;{uuKHWAsQy?;sf%ZZg+bL-6DIR>6G*70PsE z+}BPG8Cr7+d?^THd^Bl<&Vb};@5-6zCicM}^*xx+X6?E5OlZqy5tuhIinxsN%WCt@9&g}W|qNSuCF{TL7A_y?x5fH!3T^? zOm>*AZ+c5uT6&=KtV_yoxj%wq%QrXaY zV$b$}4>@n6#wcAx4em;GDj<+`;d~&w8yk^SG9R3Sy_h@&`k5llQR%Cw{5P%556-X% z>T;DgYyDej=n}}IDXx)9{#Ff0_Jx0>!3iPR0WdMY+fp1&vZ}V=Jm?8Fp-vbl2(L8d z%yNEy#yzz#_o6>`-E2`)b=>?M^B8?LT`4Q*bX_=d3=7sA_~3&2QMP112XG$39m_d@ zbN(n7RDl9{9nAEBNxU?KZ7A~tLxbE34$EC!qxlTKuc-wCdc-NPbd|b1`WY<6*Ym!S zeZ0#LZ~o`jfo~TfAaE;gGhGmr@HMZk_Qp%XJP{vAcCFF+1Fyr8`|8g~&wlkaB!+k- z%3e1t8s7k2G%!e-&|!#~kAF?oO-4>)4TWZTznR;r_GBw&361%jRyO|IS7?S!rr#gi z7Eir@aX^*lUI($4=%XV{T>{_QcZnzO!HLzpcPV4A* zNq12~Sv{cE*PjbT`MbUxNjCks&tZnU_>WLon;L+DTwZ>8yX)%CJw~r5e9LgW$xNFu zxvH9$PO|Fve)$Ph?AdEZ4@i8>=*&R4N3Pu06MT$qWMZTbnAq*goSH8JxZGS+*9O$P z6b}uve-;+dIcV6oCpzZ^dYrPjTHQ)N3Yv;q&*muCZN(=c6uW{Ibjk%ux#hL#+bsiW zS$UI}OjHW3ed*gf9vv$cGaCmn?k^oLPfIf*s0Od2ABd@rHz4UbD`CjSy&1I$-B2g` zLu>8I<{e`bTJq!2`J8fMcX(@~N0;tY7ISrA{0tWkoF~&oN{I>20$DqU*Ke;^ zOB;hk+xd6jo^7RC1+nG0%V>r@Rmk|sD;4R=5fA!cm(64 za#No|`m=01cbzo*zHEDV39~GZO@nLD)c2!f|&O<%vwPiUc?sC4@vavq>ImZd(+|`GEW)RxG8e z1ULwl*r`#2kb}?Q;CHO+VKTy_WnJR-nC&! z1H8Zvg=MbDxs7BFd@q;+GEmT278b>qkhL1TktNLW$!=G3KA;m-gb6hW%D5gv-jzEu zsJ@7uiJxSUN2ag)A_JHw<1S0=D-qnXis>eIdW3&{4u>Z`L8GP*{WF`|xaQuK5yc>1 zVCW(DeeDng(F$p7#M3;P&QaasT>k>=(nTgh(9MDaVW9MeQSG6+e7j!_ zgt>W6YF$`*{KKyp?+}BCP`Iv|U5sKIHZY(W<4>VO0cJ%r4q`Cd5}t=V=IG!y8ML{$h1@N_|+Pf@Gz+ z2o3slSOO~LA7&t$oybRkciTIpenh~_MO-tb8`C{O<(!!N9^xVaYYPcqw6S$%l@3=S zt@edQfH;QzfSnSx8Vhu4m?b<{$KdoGB$rum? z(6Q|F>0R_ZKSXb@yshgNhi)HGqdyMMUQA3%Ceb&QoO$K$f{<2&9?-1-o&%8aHIy<% zxh-gEc3b;QEH82yi-Y)WtyY@1N)}*MOL&IBQK4CiNrrn)$<}5MM$dF?S+z$0bMdS< zB(D%ZbXrfRCH>5(iG5YWg(t`T`>*lW9sbCcXNXS)EojRJcWI#{6Z$gkFB=@l3nJ2fZ;fj1ryPYXwmeN?>kTg z2?@;(fVwkaw=1i`c&A1m0k_58)nH5+_zQgbIEJ+#iGW&E8@WC)vWf_gRrw<;XvGc) zs|IrT5Zd_~!XbjkR`)I;Rm}g<8Pr8di@~vGL#xn4%nP!EBG~C5qsYkOPB5j}Q^6ra zjn+$EPxYO0a|#f672qN4{lRi6n2MR9v?;@Zhfvg2*);1M@QXQ1{#njcalD$#yuQK- zs{iFVwtl=`9#gX18OPE6VI&gg&%JP*6SS_Ch0DlT$=JisSJd|OI}Z50fp=%TPNU}! zPk--&8Djdqa;v74%2;YEf}n1~gqGT- znH4IbUrD?$X=xQ^I z7&b)7`pugTM^k#Q1NM99CM-w53o(ADQpJx7CeGx%gB2J7!J8NvgD!TVvJh!4YX1eS zqh>{avM_nJS-fL+?zs&#@^otWRL)HcOi{yM^l#GE)+BgaC5mDHJTK(#qG~f0J&EqL zNMr%!Krux&1>J|*dV+s+a@C$baW|=N+;^4Su&5~*Us7v*Ek{B^P(9|PT5i_Zi$F8x zX@Wto##~i%{%B6Eu7r)Y>d|23{2fzNA=(Bm!lQ;`Q++H|KQJ&X6J9nckWr3)(J%BB zXRNd5P$&bYqnpcd?9=I5hN!G8tjmqa!epBMM)^Ext+if9HWr>E;iW1%%a&xX8O*(#91_g87a{Eo@N)TIwrJh8&+fb$X#Avw8=IuE!I{KOEC9fOB}+52rW~a z`gd|(#SO={fmrp*&i9vlq>?fJ&D5XBo>rWGD&NA@8vQHW3;DA@Txrh;f4YE7WZ}{U zp^YCRo_VuyTNBniMbt2+S+Dv#{J{^=b*0VqHq18k{jpanr?zT}yJM35%vFrTkt>{5 z@ZBL*)6&@KOU3xeAy{qQ)#%C7Nmv`sMZIZ3S-_l9|MFl3wmLlz zh}CbkV7?>3iIWxqv+4 z&c|+MF9!IL3B|kAGbW|u;{oUN4mSbGNyV2^i>f7?LRiQG*O6caKqS6>wUz0vNNM@Z z?e0V4r`LKAnK%Fa2#TwrSQH(I)t4jDd#pHv0Ok5Rqut#8c*eHVy$9KAIR1D-r4|Ve z+>r8ml&b9n%NnFm0@Gk31&0X^q(b(@+ah~01@+W;ZDX(5z5DkfAtm%@Tp#^#x}e!W zF~*{Wv5h7M!q6L#Z102#;IYpaG0u)fz;ie9;wT3i-<1T|7tABnAs=3>Cr? z#=~DphdLi9wdwVMWVB-1&wceF=iH1wwWNKs=Z}d8L});vWNlke-@Gkl->8_X$^e>n@T59h%3uhYoSslv4oBn?gIB&2z zsnG~i=%ww4ZkaT+Om<?&GI z;v;Gqw<5)2f%PQTLCV@R^uHQyDj@^_Ak~1q3@%M_J@aBU|0e(>i!}CAM8_Rf!n7Tp z5|XVZh3EBq&K{#un9|}laDE#);Y{@dc~*14k{GfbUMnzka(e4DNmhI}U;)&r=;R2( z+-2 zXSPxzpFCyQm)Yt$UiPOyaS)*6rYub;uBL0z74N?-iLF4N&E(2d#zl2ou)zvCi2%7@ zfV5k{q0j%+At)oeEg?vtAQ4gm2>KKf6c~tNBoRk2;CF;HSQ|DN;4c-DQ2?S31y+C} zWC!uznaNO2EPxXO|B1!}8iWKTN*=Tc_un;WkPprN|1TlSB`Gt5*FCJD5Nu{nB{T4^ z5gAa$bo8K~bJB1bIzaEGeXe}A>=&Pze|+(>BZI(?W5s}I>aOic;|gf?%3Re*+KO`I z56Ain2RFIQ?PuKlpR=tpk|nM9OzPO8q{gQ38#jA;k#{UW)2SHC?;GvpSbTka2z?bd zxe9TUz39Q((oxoY_Hzuo@J@{Z358H)!((HS(#15lPfx{4Wm5)Nr`vSKyG}C|7dro3 ztZ_%DB2J_Rc>w1&k}2$T`X|%(S?sfgaOr z;FjzAmOo-ZGo%IX#InD3@-#oC)Q&T5kVDk_hM-O%AnU_ZQT*_1=WNN!tx5(a%sV^c zayCy6kdK0CZfIHCNA{V z9iTytf`a0~n**?d*hGf{6Fl`hby4vt(~;l#qsO+KAYJhsuo>flW>U_zg0UNP*Rqlo z)d3mO4Gt=+Gl@Q@1jc1W`2mA|tBbKv;)W*-+PLqf1kQ2x!qXy@K2s=4oM9KNo!ygX zvzAUln)`HZsj}loSUQD-zm&t?c2@tiuT;+Y4WOgly;?gI&X=1eW(`RnA0M0TH-yp4 zSYQ3;EpVi7&3Aj03W0N|@a&yZs+W5XJ(eIUJ8s`T0fN61qoaZf14mBz4j0Sx5)vat zMPzJjY>J5)t6B(F4j-6G%&UOvMVLbXZ$lx0?|)mLN16}JZ*Rqf#GO+d=Vs^W4sXyF zI;C&i#dfr4KLYXG-2n!@;cb>xU77&<{F|!84W68mN zC&LnIT3VSfN^){qs?o%7nEs=?`T2R)#U(M~-zS@EEcf?L1@TEqD%jX5Svr1DQqxdO90Re$5%3CaHtm1!&WZr5EQnzCDAP``}O|-|G z_%{!LfF8w!oJA5z>M6|TEL@HX$E~@m)$_(C<22&=xWfM|v<2Y}8B5R3^nx{PHlQO= z!!lhrVkmR0+o|%D4??bzbwzq#y(IHdz7dl?(9&im?PzEbDEt8y)gT0^p>wP37U2JOCpzvQOH(9s% zyxqS)U37GG5OO(*5*3B8ke=g>PAwua*^Cdksvih*Dt|4M^1f#$7x|7OCvEaC|CT>- z+v$i3H^FQC(7AR;4!Z@ec8FM#t~*-T`)fPXIJH|mASh?%+r`qJEAF?fI;(E{&lxOT zSRk}T!2X`%%C=Lx^Bdcj9pVcqk&m2*`l4ppvg(WQ`HWe+6Bjeo@W|WabZsrg$!WHs zmySPd0D5AYaP-CQc{2qTbAT69%{0gSB%O85scyH>5jAtGL9yP`3#OE zmp_yr0_N`S?g5hy4IiJ^1S_37+GjXxjisy0S3ZYZmn{2uD%1bf>DfBUxHJ13bHAAV-+N9F+2U(!B{>Mbro__v=2N?P^{MDsVC1j|l^Kqqa zkDiDK=<$}WjZF^N;quJ+QiTLe_T;1pY5zu}75>w8)oP;K&HRz0prEsE7hcT0 ze2H9UKfOK__2>K3#M;_hKR^Hw0-~TE_Hk|YpnnVCICOYuf>lsa0qdUMnXpe(7Fbw6yb`LitN;K!b~G z$Wy*MYFRc;rcYit7I)kmqH7rEfznx{Xn!M3d}kbFD&80vJ*RrVQi7XwoXj$Z* z=q2FEK^TCniBw`&9Sh)o;|(}%VY$FLeH^{I*!4hwBJkiX^HvRUlXc8{Ji$y9=S?nQ zf8`H+saGLioiky@rpsdCE~jqSaS?m!m0~B({Z5lIMUFMQOyz2;T7`p?z3bHF)kH4A z=Sk=feJ~$eX3;?Txj(X=NTKia^-;*-W%KJVGL}FbxQ&UK+~>(yi> z`i>>IDCR(r3Jb1b2?d8h;r#aTa_!irGqT+!1{e{t3*y}R-6M%P^+@~cW#jS#Veorj zmH^@RPB;zn_t(3^{Cr{;ve}jEot_x73@*o?PKYNExj(mkzZDfd z0d{1Mv`lOm(cqx8mv2abN>#HW{H)XDKs(Tb#DQsxUAujrb)gHyf$57fB=C7Wzrbk? z;60<&Ya3Zk1E6CsL6s!$)V6Mn%mPYp%+6U@c{$L=^{KM4_1T{_cS3m zl2K;@%ftnT(?X_rcD7)IQ&tkZ1G#-@+-$HTobBi1{x&ZcFJ}xFdMw;_BzDT2)Sbbi z36t`9%R3#ttlLD)(OHc4ECJqaNQ9M%;uV0O%exs7lQY`bEUnM$t7FlI1@q1F{QP>u z1}Mz*SJtli2LfWfW{h~LLOSzXZ(uOJb|cY;y#X}7+20OcH-KdgB`GN=l=GgoI%6<% zZ;_$7-yk}oyG361qD7+=sX^sjS4E$N;_I|70r-*uga5_$RuFM5jj{vgJ65aj-FY(A zVO=dC_xGCR6_SZ~wjsd@Fc22z=M7?EyfO&*yaGCZfe5a4L$(0MyMWte8ddwLTy|O- z7EvW!VE+LK5)|_2lqB`e@QxmGr};pI45zp;UL(C**79fnsPKo+r&HY0m$n-@Gh)Gc zf;&-*v@qrs^p`^4zYyH~1*|ru95Z6yH2#zwxFAE5@ZVhTKg5MW`g+$}Y-FrTXa!RM zld6fH+WmX(@UL{(B#X4-EIL_UZ6Xh`kXwFOT?E|}E*h%V9K{Y2ON<&@Rt~0CuOk$s zkFdd2$Z?mSx)D7lRA&Z&N^pqbj%L^XZSW)X8@wUbv`jZ-FVB$XT%1= zh|;d15f0jqA$&?1pD3t;^50W2dF!uuQhfGijKoduOer8t<=??H{YLf-5)99OciTh< z4|C~~qQQs}$93Wn*qHi!yR!P(?z=G-Bc8=Cfpt*4&}^B)-RPL1$%txXZcqO&@R7m{ zMKWRzy!y?q(U$i?d)50cN;(9gxj+bNq*7;4bTd!81HzFm zmuAin5j-@*>>ewL3$q?85#EJyMbYj1#7Ecpv`Z}OS*&D4Oa@-bET6^c*dGF&=N8NR zhl}qi?f!m-vLxrZn*ncQ^=ubT2QevNWA8;*Nb8=h=Af?B zR2UJRuRe~-vjZt&9h!~zZ5&)9U!`WJZ*}|keCu{~p~LWomUj87h;?k<`c*UN@voez zgd;(P3>sh`y<-tSmWYgvPrBYbm`GOCg=-=Iq;GGZgaEJpz2FZ`%!^KH@j4?kGDt3D`ZzMo&ui#AOot?4AYmhMW zG*D1jnDuc~VbsW0+9mp@C`shMsPh8w?re5@ASXHfpU=^qfT(`g>aKhwOn@lNR52Xh z39F{`Cpkd@f=nyE(WxYGAB`XAYM#y{jkhPH11X6Ncn7_oQ9I3*gacwgK$;ENmR=vA zd&@8%CQ~~wq5Y~qG$Jvn2vR8&87?j8g<(QRD%PGV)8-c_SnGDRzEG-QZfe?Ov*rZw z{Syg;!{)Ht1X#FOux6yEYySTII~s!?u=2lre|i7&0qlVPsVP|*mBn=1mA#9>_z9J@ z*DC#sAp}=!5VV6(L?0a++k%U_|1PMWH*JOivINrk)8Dfoygi|L)}|p8DgeYvxH!^e zjT;E`gNp<&GJNUb;2V1$29FD9h*)yxV*>Lm+{~ET!-T$0>9mqAY&@oBf8J=d=s#0X zQE~()7FQ?@F(&P1+xFnAGzJXSxBkJ0lbM5qgDcD|%uqod`?;d;rV)FX@;;gN=Mbp2 zcDs;^`UV>Y|2akfod$F3f0@MiG2><7tnhNJ(dOnb^Gc^$vH$jo$z+jBNgN4-^8isW zH`wHeE2~O=9I&wQ$2~{ez&yP_f^8mV?w5K278aJz`y+IY;^79wDo$ORi>ql{pzcPI zxg*1-zf_pMHV4VK!ustG!#T(N>m#Y8GTQ6T!8`>Ibv4g8YaJ4}krRvy z7r38}>^hu!@z)A=to7<7E0ky`U>G0(JA2-|CCXyhdbN4}+O$>E=Q|svNc*^$(!#%} ztG_?z061tpKR@ekx)1v)kccolbE*@LFr79I@FNF)P)2V~PEQCK&gnnll#5KcO*M6B zq<8uMbRUKkC&h3DTb7@L`^G?dHDI{lZ%uj*s4!q=gtc+sWrFte0ignI6z`q^@>>Pc zARIoRE#{ldCI@;i&lIuw5!3FI~rtgdv3VuOO zXJze+Ukl&dTdtIwrhclmbB?O4Td4r_Q)A=e2s`&M+MSS%$Jif#2|Dw!eu1vBMCQ~2oRkCJDs-~q7#7Z`}o zOPUamJ%7ehMYKg-<&(%e!lvitd}UqoVgLm#?Zv@?v+qFbx6DlM)`lS=fp-kQQkstEXS>Zhs$v8hH=dWR2O>TVrZ>RqGh|4rC)~{viuNcdxrlh#- zJ`2(52v{&|7Kqz!KP4^iw7Kmln%Huq81uP!bM4-ZYy%(H_p~B3Kl6^8s3?us#Nos0 z*e!PtAz2I$ZH6sl433w2?=2_K4UX1+3Xer#!Q+4lg-U`7u)nme65XFA0pbT zpSE=2L?L8%By{^J-*@4P zj^-<$dA}438lFiT{o>H%Qc?a31n6#{(S4=A?VeJ$%%+BRx!1QM<6`r_w7cnTws<2y zuuz}D z#$n~N%3fd&SQkx&VoJ|xJ^7tlt~SZt==kATb2MlbwGtu1h5QGOK%fx*xnCFeu>%Z6 zK!hZxl7hcB&A;TO4+qK-lk~$_zY|BpBOO);`)d{VuQH^D;x2E8~-k>hVH}>)Pe{Fd0l%)lN{(xl+#w1%k8>)PD~CsIvbPGEejwT=M9~2 z0|bq#I`{peMSCG_*bO;5+!jBcvwLj4olZrv-BNZq@fL9?umQUuY?JIw3IVfjIohu6 zE4ci)H*br9h_2F65g21klbltWc{2Bl8EGVwFH?U{I1#69@A^WKt-ot@+;qfe9(bGZDJG5`j*d(`d2<8<2G(hUh0L8f zYX=!fMK*6k1f;Z{mK;ISPU=4pa-VuHkl=crh$LGa2h`|Y{C}k8D{BxI@)yp0x&!1( z?_PZ%bQ2bOo7pHgdn;~-$(QECV*w^TyEi+k`Ylpi!{wv3weuh9S@=iU_z&pA@RU9T zmGa4Wpa5Z~OyoEsf0HN`pahGS8o+FZ^O3JOS6 z;er6xS(uuXSY~bvGBX~wWMKnCKk{wpIL6Cbl)Ud7qUNx#RSEO;!}guWZH9+KkTqdX zOw`1xk#*)XIe_43pfY`658!C~_Wkw2#Kg>uqNdZ|-)}J7WwTbVUZaN|2lW*)iVlDW zMOH|hLR@`Gozrc-eP12;dFbLT){uc=HA5a zjfMsM1^LLgB5#MwVoRPn(&OYaQCY0*Zm+}H?|W9?hPv-h_IPOqo!sDjk6ZK(m+g44 zpM#G3QlrUhS~%i(T3ui7oOiI+=I0k_PESdZqMtt}26oDHx}QC{Y3i@vZBV08u-(*< zr4gEt)^&L^&t;_$)Ih55kc7rG_3n}#7L#hvX{*KTPkwJpXOd5U%4T@UnYOMJcebg$ zr)c2TGVPSxb1$DIvtT!v#;uip4tG>iP_@Mdh`|RRjv-3}VTJ^U%MJ6D5nq9YN8)|0 zI;b|Sw?&C;w!H41!bb~xx4636dv&Y0{G%v~`87VltTP&--5OXH;RQ2MHbSL=kK4Ro zu5AJKx@eo5n+ALC+nt{3)jDg9mdZchUwpj06bdKk2=o{{mNL-ls`e)xAb z3*<8O=tL~xFCs2-cr^(#1p*<6{_w-wY4ln%Q1DaZZl4d&>sbg5Im>DVo%|O~L@v#O z{f)wCaQH$YLQwQ&24e0>vo^$VyRd=1dX1xhO26-!LkhrcM&7-6{(1cwlqWl=%fZUv z{g=C+2SV%gc&*qN>M#eVs&r~6()z2bB*t}umBtb+y($tRS-J`YKsk)kmVw3t6m7oZ zoA79=XhIuhA%Y$E`a_$nSNchM($m+1ArZenZUjI?wUDkq*;*8(<`vXODJJ;G1m2zID3w!5 z9{4EzS3Utizy^75=YpVuQ) z7+~%ZQ)cOT_jsbI(_033q7w-C!P!p(cH0_s7_vG=KsWOMcupuZfFHsxT^8c;*YDpd z@u=W5)HD%`8SKjt{fZGTd-s2faFGA@QT+o71e`{GVN4b}N-JryU(a0fd;2dZ<4-w* zJ9FNAxhen?&w*FIY@cjp>M>7W_dz0QY;1(NuGXjnvCBWHZE2wtiHV8X#ZYquSWyE^ z2ca*(YWI!IB`UKS=7}p1qQza1i%}h2xTz)+yRIFy!RgIr|0cOWEekx~>@d=5|67Kw z;1^p2JcSxrk#ebg9u{n9gd6N~u7NnfPP1`e5h)KWPDvb*7X|LjL(G*&T<<2##Sis!(Gm2(LjJGO8iG#QgLjV z>vJ`a>NRfEiG9f+?jW9R*C$Z{LPc-@%UR%qcK3_yxTd8d*D#9y$}s=qm0;X4e|w4`4v}?VbmVRuwtDh%3*d<)Rvi7Wkj$reUU1QN`u)f0+G?}O*BHQt!aA{#Gkdh1 z6adSuz%P_4i30d%6l}{4ASl1^P)go!Dh<}nEa@(cpv@R8<_0G_0Z$lTxMUIl>6`=& zfqyZVV8`(>4jLz<(%u9QSQVJOzNW|-Mn?eTG<3s|+>PI1DnW@Wt+$Y!I9%6ynQ|Es z)$41x`>)rRn?X9fkusNJa5zwSw!;y(sv1=;UB{UoM<3GfPpL5ww&N5aPp*=xe5+iy za+N4_Fn)Y7=mdb|@{E&k#7HMNvBC#f4Iot@EOc$V&iO-{}QbO@Kh@hfjW zm0by#^Ctr-hu^oyH)fE*f4B}Y<#*yRm;8skeipZ($WOOqW)0DW5m^sO9)OMuUk*BV|m&ft*3mx z+7}Cy_mX8P=Xugm)v=BXzbabmQKFe=$159CES1kGpawa#Dj6{2NZ|rbB7zZf zfuLE_opc4hrNxf6y!Vak;%`kL>E12J;hn!@CS_o2k9@QNpX_h>!Ev0NzL?UWf(68@ z#kf}NS*zNjDTO=jc9YV53@|v^VRZ~(La_$7I$-N!bj=TubZtLr=0j#v*Z8A&-KIHk za(KPLb62hlpW88n{Ld`03=b;5{y~(kyie1}Lwg4W*Tprl?hwH9p)YRkDsc#D!*;gO z*6WDld-^A(jPp*&1n*M=!^d`!&17N>UhzX{;~%MKD|3^py)~P$#@|KJM+13xu-u#q ze}Ql<9{r|U{X4KX0)!i#7otBbhccKx^gez~^4k^tVIo5nAh zHxsPoQe~vT;=t|VH6_gU9H(>Ty4?mYrymluLM$P>Fd}3rnKGrLv!BYBoL9J4DMuuV z=gz=Bd3A)!-OOp+YD-53IDVldrK7%gQTd!GO$B`@bW^s!6pv5HtRn0A=-c?Ffvimn zK9U95zxq0O+#jc6iD;XJxc7Os`Gk3%(iE4*lWhsCVkPX)2oq(b@zm>XM%Vbvg(2Bc zebsLG)qVLngwDejHPXxGgMwBbS;|+$eB1lr5AF9lwo;be9voCr9*^R6-FA&Ic`$3T zU#k2)ec&TAc=K_4Ev8w|!2?%}h}UgsYV`Zx5%ZuU|WGi(&t8Dl5qFjBO^>7W@5#6XL2q7FgSvjzsWn>Q-2m%i=`AJm`HJf;G?e{ z>MxO9E~wRTIjlE_%O{Waf!7!=XqU>zcVWHmir|Y~YlE$Klb`p+o`;;K_8XTOd@iH4`peR# zdW!y3+8gX&J|>?&@Yo5tEd{oX`gK1q?9X{C!j3B7EB(~o#OI5@l;|H`k4xf0(5ShL z-=}!;eI@d#9eC499{Pz@v7^9#V}t$eVmTA~oG>ikZ6?M(#!)UPyi*;L0yOmy1NyFp zhqY5FySI+kp@dN_53o=V#G#qmv4r6&F7QR#4o__0fRRwfWaNQT;pRNu!OSp3-cQ%R z42I`xv^ge-EXh17NWZPuqV^b#{0;m-Z8c}chZGBfpb1@Y63Rx(nN*6qbr@Pg>O&T= zB!N)Bfpha9bAOwiRTHuSqv?)5)Z(OsKIt!lYil(A5(0&IxDut8PcnYI6GfQLVRl8H zJIk=85osK-llQd?HgqD%j-BzF^+Wifao_GbE+L1isMB1i#wK9$OgwkR8=^lHbB8?iS|*+$pKLih+ijthE6Rt4 zgqSOPB4ZEEg+8YZR>IAQ`*j5Wta7588t&L&NTE|0l;q*%;K!5Rz~S)n;$ML(NHP?$ zK6L&^dS}546220!SmloEhJRC+bKaV(nVlYOVOpeicI(i~-e?`Aqud^6+KA?5mPsbQ zRUPx3DD^nc?`$R+KJn4JWp@RViSh8YIGZ;2By$@{RQD!%Mxx*ax6zFCoC3&n4J6Fq zyR};kWu69&p=`nR)ytb%TTp;7!bNtJj7zz~;UQ6KvS-GJ_2Zg?_}~gt_S$J92O9IQ z-F4n`9JA?}HaUyCb3vz74K2>ZIMz>){a%ojjM5~IvG?cuZF5i9Z4EbagUnR}t}d_d zl9tvcpyA`Uu;ksYgpJWSI&jk0+O5Y&ZTR;!Rw!O-nS)VRF?*Wl7J)sic%;_7i}gIO z=VJ@fyNjT8k^24y^ryScz1>rG_WkSSWj|Jab=?+|3B|$7Z9X$k;LA0O=w%t`67`Bq%UV_HgNNbp z`)YZDTDgpLV~q*^-}nM*+}WzlzeRZ*c@BxP-RL*(!ShgR=&DgjxNhBYMN}Z^GYKvs zB69V+TA%Kl8RRW#eDX=y#NeWc9AocD2C!y(HUD4~{yleKisL>?q>Gf6466e@a4{Tb zTB{<33s3dmS{_eo|J3I_(*oU4t`vjvoJSqVM6g@!nOW%3x z2DCGatj=JujFAJl>;$Y&@r%ouru;l3S?!GMig1Rpb_2jSY9Toc*h-zZ;peMOvpOjj#+ki~<=Ejg{z;{kUa zN*hKYC4`Kaf3WLnWb_@_N%LqjK5<8@hPY;ZWL{k~6=4;_|&|+FRHNsD@jco6CkI3IIH7bB) z1?YkTzFC{GR!E6kSBPChmBeX0!AT$QsMuVarL|Y|Xc<$%zHigE%N{Gr7ML)*rVMsK zTgIfX5p{Q&MhHnd9~N^wTqby2j#UXklVnUHtAGXl?bgZjsPanYZ||Ubs{})UfT%00 z^>WuItwnBIWkU3LdMNSW!&9~TbblIUbqbe zdEBx`elH$64$1iX!s1+gSb}6x%~+MXW54P$1EhjUh6>%3^S$IUJUOs32@E`pIUO_F zV5`K7&Sv#KIqf`Lz!01uQiIQ*HQ54o8W4=<|=B#gcq!Ut^p}q+n=rdWF|c7cY92D8 z$N#;qAD%e1E#32M&p*itWwW&T#v50?JgOSCu;E)7$zPpke*Bo!U%(a=>7p+un2>fA zu9k7A^-Uma{!sR#h3*Pk^SGr@P^g58bgROQMn{U}ne%}I+?6HY%pS z<>flh5!poj;QHFwcR#QaAVl(%JndZuhtTya6g`RyKUHrf$$Y(?|LU7b6zS9Z?E4A8 z6|7>sLL$Ra>jNagb)vWiUVbMD<6p7v|1QjMe~+TY_BkwxizR7&j)ezFw(B6oV%IU1 zF{3`B!yU)rDpJ9V=<79nH}H3r-%<2uB!l8Ga5UZkvkcPC?UUyeE>>;VCQW!wIC?PWlc^%7t*BSt-g@C%S}XI8Ob+MnfB>{zZTVYQSHH~g8}uTb)Y08sA> zc|H~B=)krUBx8Ln-R7&ycwyW7mCI6S*u$Jx8zWr~^ap_fOBp9;$R!+BZCSjU27mAY zTKFKbvMpNEhW7||NJ>#;)qdyH&+z>lPqNlUPFDNqzN}||Sx*_?DWkSuk5`pK+n-)g zq<4Xf#PN!8pA+B~knHc*T5vMdWPKGavHl3P^6O5ga_EHHhG6vgpA+GxrkT)DY3i}J z$HRpnB0BQGU|w@dio^6udEY||9Z^v7Y_|S4-$Vc4DN2mq)%iraqqUXmKC!A-ksQ_>+tw}d*C;6BJPrzNx74ML$9Ip?X zaK3wSXK1JCn9*;0z8joz@k|-9*#Z@m*QU|#A1*40p@hpRRI_)3&((pS!QG9T-G9$p zx2c4~Bazp>Ul^<)D!-mnI%!|1x+Mu}O6HakG1oFNBeIoi4J*iF>2^opE`%70TcxJ2~hXK&=f;h8oI z#(F(8zO0XD1hnHXH`-R=^?*_KI-c`eFtCr_ZSN4ol)I@*=HJocScIgwXtWuk8Ri5| zQ#fB?xELq+1ETdBZQshgWb2j+!C86O^UTZNPc?J+S4zB|6YL++MZOC>Dr`?1E7iBq z-ky7(9i4>CfjeF|z zHB-00X>RMaRb|vcrboB)pDsH>lb591<{uHdE-`{H_uHNaFQ^{uGKTq!X$#5e>cm^z zzRv^dghwrNC+lH}s9&!doWi3ML8!9{^DkQIXPXf5J{#Yp#50XKljmwfpXdf_{gOW7 zg`}0CiuwS zHyT^_r!3P(^68S)W6tB)2$0gyh!n+{;2!{4HzOqS#V6p ze#Q`lM}oLe|Nr`w|BJT%zpG?KHG1sr&ZT6AlG5^yQ%O6=(|tZt9p;!U-ur=t{bD31 z^#2fT6R18v%VaDyqHZqBLKWt>oXOLP3r>mwVUiQ>f7|!LJkomBHFZ6nfE~^dtj5)g zW9Lfke~94yOUC{KpVLX#wRBoOn-YFMTv$%J{=5_~j1ihM|2N>Yy){IY6aN#=fa+gz zs+^v_ef{*{SZ#x2R8dGPQWz;DNuG?Q<@0hEE)DM=2gh3HUXR4z-8i9=q%yzkqmqs6 z@Bxe*JQ)|qa~LvBS2!jEjiE3&^(c#e(kuCgr8v^%LH*tUIYWt|WK@|8o6+KHC61|N zVeo$G!1-!zNWTcZ_$Z@(XqofhqIJI=p|}+`~@9 z;!?qc!flEyK-CKZKjL&bUt{Xem%_xYwuYRK`{uO~O`eE6>2EZ5cxCF%)=!()Q;9!! zGW9%=O7z zNR5+Ats*QE?QfOmhB=N~@9H8RJ$(U8Ft4ppPtV!t!D}hgFE$dJIz|W!PL>-^o2%0y ztwqh1w(u(*VXf=R{YB(?*5pM?S2I2A;@JePgni6u6$dTLk(?D-W0SZ-M7{OKiu??T znLmr3W4@4R70s*aE|ALH8{5eEc!6XS+n-M*@)do18SNA=I6-%_LY*_GYlWctIJk#% z9f-5KDydQ`t<`7$<*VC$=NCFLnXbU;pCQ*n_bh1G)efK-FX!T3)jI~|wGAg;#}P@k z3YpfXpk)^UhbnIsb{;jqL{}T+i-ng6u%nba99EwTuWp>_T$s;$UsVzjb@!R(4?%1< z@oYDam!My0+uqWN;=_4Kx3j?^i&C0CF$|!U` z92VifCIfZt_NQ~M(!-R+j@dRXbsfyO9}9N#p2QLuht@{$U!djY%~ha*F1p9kiHqxa zR>hetMqVGc=vVRN`OBjkuhj<+;m<_u7?yoy**2uKX0Keu5e=fukV?E&nul|`b&pm# z(_)6UYl`piE6^Y90G5n#S0NLpKoSdR-m@XAcQr$*apw*c-yhhV<4o1rb{2T ztjj4w1S99D^W%cQ&V?2JM!{D20W#Np3y#3-PgG^}lngSI0t^Hx84T)DxOL_qpAJ(n z3Qv=c5LBCW*8;roqb{F1Or@Mk3T{Ofta3``=L|dsG}+mmuLAmyvo26kKK9&N2{9U( zZ`R-t&rmzp-DzoQy`N`umkPwbh_sRd#MBv}?@q=f=wqGFK8R6~Fq%qMubm1)LtPHxz|m{8T@Rk!NvU^xb}RAAg5Gk9dG- zBSOzi+uWSlQlFAQ)ckGdS1fRJ%DqnYWKA<*d1NImpdR+z1nrVVjP#Hyc47MVj_5~; zk>qR{GvU@?^UTt`-d_f{1dDQa5Jiql~DLaNlYvHcT!x+ ze$BxLkLON+ub>9M&o%C{hlfYBvOu@{Z`sAxH|ob9>pBNGs?4v6r!H4}X!ax60U_9T z$YT1Ul##q;tcr20@fg0hw|HPC&|Aw8!}%@x1u5zjLepPUhwwHVhCF3J3Wg7I@-vR` z$&+89s^cML@pn7;wneYjF7_8{8nTL;KE<)xyS&{8$P&Uf;x}t)=0$+Fvf&{itHq~C zQ)jn2lRZy&gu^0%Dw+!d6uyFOfkE#O(~QU$u_ z{i&prnBKGf!_X6VmDFQ38;J#7|6L!yTg-9G$ z6{UX$MlPW$T0s;5_X>iFM^6Ii*+=ZJef5wysR~XSZmP;+VJkLl%=Xvwh~5!}zu!Y5 z8LTL!5=f@Ax4a7{DX7x3%3t@)5TPs?rrX<)Quyb`jO>1gy*hfyZbfDNE4BAN0$xY-+S`S zBknLMHcnDZh(kylby~$@)zL~!N)*X_<}c3;u8CFP$Y?0CTPWO^Yv~!8d=N<=`EXE^ zTHN-?%c;g&i1NSZyM4H-QSF9UFn87%!>lQ1!B&l{9G;55QH<8BS;(6?43kS%gQ!Gs z023)U2;AJ<)!^Z;)*ZR$L0)X@3W6fZApX22OwV;Zx6cmP26q`foV<2;f$$Q!Nx!6k zFXHcC>{b&jk0}~#B{jB&?mNP3BP_CM*`8OP<3)mWpBiQc69)+1cNwPM-u1jbi!})aN7SDUeZ`FGO&<<52D~Ndc4Km7r-#nNjPA(Nr=Yzo1;ymM^ViA2 zlV|5lQ`$37+LYpU@{CGGj3;1!l+%W88z#)3!|Hn5AaZ~J1;6k!xA{G?d?$bt6Yq<* z_(eZ6L8TT*@qr*f{E3P`FDdJ#5>Hb;*OlL@n%Du<$*3T>FsV+*`vf_HZv>&<2G_-H-B$;U+YiVo?KZIf*aCwo?r2^2Fr#R`!zN)I`%CPz!_@3`s*+uSIj7%C}FR2@y!nk4G-T`Ukj7oyAj1Ld2S z?Y(ad^8KZd#)9Fp9JH#U9V{c%oZY6tM6Sle!zAJ5;x$djL1o+pRd0S$AMxt(wP%k1+{*ps%%2GE zYIRuHUgt~vc*^y?R3ZH!8Y7>)*8xMu>q3{}{@98Bjm^w2?P+DTJlm~W(M#q;LuS5m z_r&Zs-D<)Y0UKHgkN)QvRmY@0@J7uArm0qx>x)b@Fh`krzATefr^`{$T6cI2C>e7Xp%6%O$gy zYk6trY;zbTe+2fX-d;0OoevACBv+a<=4@%n&Pz%_Y1XZ$;biYKC+t%Ud5`=h^lS>= zx8&gQehPMdg>D`XYkf(GO{HFRAciRD2T9^>eCZ2d#{pDrnQT*^7}A_-;q$>AmT zPI~s0f5A+;>I-|Sa$+ZfD1x$1Z834%JDi_HcT<}ekrUtO4^mc+@EN$+oD$u$k+A{n z+YWQ4x4o%CFC4LEZcQY`Xzfk#D0>{L==k_u7ICAAO{LRx`EnYgV=g%CqsO;_>D4$o zN*lI?(qL!rFCCc{yz5x1otm?9dI;PPANy0LiofjzC&r2Q$K`E7JGZB2a5c!)4v>5i zkdztM3{`HyC3%>ztfNUVmmLDYH(OOQ&0SrtOcs#`dgAL>fTHXl?RgK^YgzgcaF6}z zR-(HQppt!}2MN2o79yS)7M;v{Ht$S_b451(HV)T?V9hpf^(46?8-9D72 zDp0r0MDt&Rm_ZhWj4eMN2Cwx5THHSVHgRpTscwX^s3nWW(4hQ6Y#lyD7s@ZceUgHT zwW`&}!r_?Q*|kcGr(-0kASeIWe~>+0xac0WIL^9%Ac^BsXH7qIgE%O!E`XE3;Kc7B zwY-8hP`~&plKN#;$$^TInzs00^bVV*TI%?3=~{l?AJiG4`&L}d-K8bhf~NzckMxjM zhtp+ZvSBa}87m(ue`{j=7L)sx_bN4AlzeZiC_z$YgU$jv+@d*mYpXL&>J&!bH(ELS zee~_qg``K_hJWMOb5@v?RBhHt78@Sjp)6A~VX7~IungUg_ zn!D*dfxZU&)=Qy#2a67=jElL6dk11tNai zzO=wKs=x%r9l{H;=dli&-#&ql(O`eOiC_s$*Q+kWDw~{`vs-vBtR%2}+T_-u9gyB0 z%5H!r$&#iceipx{Y*5IW>V8mO@6A`5?VV6rDhpj_-Y^{nZnzVrjqa^El<b8Cm(b%WcZEC|oH6eAVms876S@1aork?e^ z1dYURllmM#QL&I^>?i5oJ@Qx+;`cXk!YZ3#d+2j`n!(Wfx>UW(k5FoWBEUH z3vx)9X9?c*=8{Fb7NN-z_oWGP6oTL%eLz;A9L>e?WSZE;X&}wc%x}xJ&OmUi2iCEo zAKS?dPy{lKj@L!ms{E!;4=F4gD>>Jd$Sx|ZaQo^zr<;KlE6ppfUOztm{L;nN z$kO`Lc~xeH-{8oHy#{qaMd0ViOJr$pvp4_S3<*EF^P?i#>KxT@& z1eRZ^^@?xi_Tk}1$yO_{)#Bs5Kc zAdPZFd@csD2`jwHTkwq)1tVpS+&KJh7X+JqNShXG<+MS=7!OdB`a>zb~+UvYMxvPZpQq%I^At^oP`IhR8z3;(mE~ z!P<+@zT>zgllBC#I$pKswVN8qm|t30EdK`Ciu(4qUA>-)PBR8cXlPI`Sfacg)YT@9 zZ*qN57R_o_ce!xxEq6S_5?f`jHl2C2B80zT3Oek}DFI`SCHv;;5Q6pk8aqhpAElc-*(^y--+ zTZfLhPriD&+?uaoFshO2JurYGV&O5M7x{h_sWm!HOoOq!e{6Tr z(lrRCkdn-#t7>)5LGwLkKt3t?3djEhhKlD?i@o9iko7wqwJ)cNp$@Hk<_955dr5Nd z#@KdcdP$N!mXlNu_7%C2VzHAF*uDneZr^FPBdrxJ*t`=tgD5Xv-LfU3kcPpu4NVF9ObFC_P6#djwubQ;k~i4(ZTHws6RK0q4`9@ zYkY!cqAR0KPtH1 z89otwn7c=JN4K*Vn8Z7a_v5&2Ih*%`4S0}UaiQ_*2fjk^F0{8eObvwER!|b^ahJ{d zQ1y*;O&d=iyp^b~|LmH@y7=bfRye)Zl0&)WlX@}3l<(7wKExl>isy1>RR1zJTd3AM z&T)(?VP?hm-f}u#&jyqB$I0Fq7dIJx_m3vDS?8u{;oGTIgy% zyQJL2Ez1>dl4a(=M2N7d_~V>C9k;nL$6r6{nl38F+mZ$i{H$>E z?>Di#;E2!2dApZcFM97|NBx4h9Hj+Sw} z>2Nibiam>i+6M>876kYbVsZ#vNbZg&CjSH+SLD31hRQjGZwZ==b8-08?m~p}V_s?0 zJ!E98JrZ1G+1IUK&H0*3K#2`ps!3Q+aeK6?eF~nLX^_6w*H?s<^gb}+QIfy8@8!L# zEzIgi+ps9rE|$tE=y6&(BEFrK7Eh51M%0~BK)vn<>QQ|=TpX}omGAJ)N>$Q?s4FPp z;bO~iyzdx!iakrs5FAuX!>yy)n=$-BTge0G#K=FUS1@at(_(1czO#MzHC)!X*R#2^ zdf$aQrP|G7zb7m8UFM@k^TP8cObQ~}Rqn`_!|*A_Gf;K{-zS~bTKpzPK}14!$x?~r z#aC`)Xk(?x;#5JVe0Om7&}k3eT(_;;Qd4g?>P1H`V@c<#K<2b^Onl#p$Gy67RvPvN zhmeqvyw5{fQ!M;F^W&DO-SL@RI?Idr-Z){^YoA~i z&;q~L*##B89;8P?H|8mdDhDxAt|-%fH5b`nvIRhs(zbiw&=GU8#jcExCpN$C#l*J= zVSFpX4gCO1LjAHOq2fd)q@t3 zK;~AC1gKAb1lwMgC#SnMfuKZiP1sO%jo>u@F+N zf~|{qyqsfnDr|eOsAN*)kMPJp9cr4Agi)(_Y+3`#v1TS58Hkq#atSk<>9&||GAQ8F(ov^x`@ zdlEQWM}h<=#RM^f12w$Kel38pBz)IJe)9Y0O&5m-f^d7hWF@ijyR0BBxI`qGh`XtJ ztPN@L{obGm<7XW8xdVTK@WR5qk}y1w;+G>OzRf`Mq$H#B(WSuKvQ`2$>|`M4x5wjh z@?p#`MP|E^6w~xc2_lWD`U3T^G?0FQ11GSkKZAdDGBn1~iUv+uiJbBZ$DGMjK@d_{ z+UH5|-e0kG2C~|Vfydn7#?1tr^iww$FaHQDgMK-=YS>d%wdgnx1%~6Ce>j-jS zPhN%-eG*JNC3C2lq9IbY>oV49_2%f6!fZ*Zh@iIM2|LkZz?USWsgxAYhG53ZU`EX* z7zIoBdybcRHN&8jjj?OjCd53&WSoCHQ)^fOAL!&(nw7zGRu;Iwlavu5^qC@h~Kd{YCp6X(SEs)EUg1BJA` z--!MRXA~vJw!r@8%?OiN04yR{m1)ty{x{b^YMDW6s#o+b(&Pw8M*sn3WTL&%N6Bg? z|KlAtHoCu17|6yBGLL>sgvS+tizPg^s<$#PZ9TmY6~ptk`2mr!;1;^hR1-l1?mrCk zZ_hWoH$Ag${dtm4mDVqwBCTvZxvdZ=Us)vc(?Q|1z0SOx@u3uwx_6OkfSbzJW7{L; zS}@5%Ba0?rmCs@|wS1{6{FNt^MPmh1Ix*Vyg$|HqXmAmFe~m!RfiHlHExoSU`#5}1 zTnd(6%g9~EW-xWX>cua8Yh~Fa$o>40MXi`8D?-NHaNTzaOpBiXW2b7}p5`t5km$iu zcPllC8r70uVa=vy!N{XW2={!U5}$6T^gHn3*10Vvg>Akti|^y@z&!R{01@UAz`fZ} zFo!Z9%-)7OT#}HRKH*q>zU)9AoE7SL@$<#YJZht+e3vrzM$k3v)Q<1s<|qoD5wU{@ z7JL!_1T5`Le)%H1oR=uzBw78)oi>hy$7XKaKcBDXd{`V}OP%AsXEj@S55zw)Ox<05 z%WD6~JH&X8E1LDy);u22PQh*ay47ubykboN&0*%%>b5Heo?RL4e>N6)qVucxX7^mW zne*0+QR6Z;H| zW8{7bB*e#DdBlq8jKY}A=<4|P&B$(Tq35;BB|#%Yz`oNml9gM#zdL7XjH^JcR(_m~ zFX(C0EZvNF=ksN#r&S_FbFnud;&_o0%e5(jC1W37KorApoyxZf?QN*g=l$_H=rw^M zw43QCZg=`2z89n&5s&l7q#u+_@i_DGzQ=mwt!I0snW}n1?SYooeiTpZVyD>5dt!bv zzplgRY+2$;r*(p>p1UgzyHJ))WYHRM4FO|!V%nNp)^RiGxIp!h<;PZW3!X4uoFZ5K zKtL;noGNQp%B=(1*4i|_i}_s%>01T;=@%OVwLu^}UYGyCp4Wk+Psg{rmZc)Xk91D! zfV9DSy{$KPva7!z<~(-_UF9(tQ^tDv*YR`tZ)zd{s!*W*J17NM;$! zrpSRLOSLSx$%}@4@1?vXcAn^Dl2`IGdIa3^cWv{x7^n|QaV0W)ABzvM$oAgzgd0?{O>y00HGwjCS78h(p(F5DmQ^{Teu+vZD?M~{qWoL{H!%l8;3MR&OKeHw8Jpv> zle^w(!S?@K=FO{p@lC8RsNWa-11WGekkd(@L}Cn-XAMd25&yS)_F)QNQY;Qj!F0iX1&I(9yM zZ+dTV@MI63^TfV%W_+Z^unw_?C*qFUH%Rpa_D$q(pAnHAGMN6w8)u8B4YtDrYwHbE zz|Lk*m=}GG*q7Fz=QUkzkGl-3stRp}v*OxS`^}h7R|~FF+tg>|*6Fx;Y$hABZ|6Pd zDlUM8d8D+Rk#ik?vx+4rb;3hLn}y}l*|-mhx7!oKyHf+?L6@f1{sD9LG$9gsRwCz* zBaGTmX%dv_5U88E=*=YjeN4#Y5kkxGdfQ z`Vv&gZMW~Z(l$@JD*zp8WmT84vMjeLWDunP0Z!!PLDA2v(BTXZ?gLNzi(9@ zwF}+k%Ko8Ez?XxCRT1H|2=JCkNOq3>jwBLGL{0r~&4dkwobSVNRQN?1D(d2Gxb9Gz zs^VB%KKcQgZZ$ltf`CVSb*{TSp1GYrKV6Htm2)z6AU+tfz5LZ`IdITNYkR}P64!lNZG*E0EqiCGJ!*?Nzd8#%1TmWnu=FoTaQj%y zq{}3~FAh4%Ge9S^GW6>}Cn_ z;(?_xCxTS!-}Qc-OEg)Gst+Kh6$3H%LEaA_R$BjKH;rpnm&Zp^vzd#l-;)^@QQPCa z8gM}6IpLH1AK{J!Wp2&;MIa%eq!j2-V+vqwaj~?7Qq}#whyH$_oEV;G4O2*3ZmX?1 z9}223$c#8&FS5_6naJk>t;%`?;G~FFT{!v$lg0C7?&xrD55e_riW?J2bXn5Owfuvj9DQv6^1A3l$%* zrvo?9qY*?i%tO;}_}hAU>(b&Bl@tW83?}LBUHW6w{8A}}t;y3ma|#wr8_Ve1pzg0Z zkA4H}R8nlV+$Jm9+ILjDn$;~ad)zuBRRZ3I8r$y^n^skD;&MNcE|<#2T#LRBzhe6$ zMW1#aS*3rW6K1L;`AIG5bZYLpFXhC>M)m1unbvzxz0d3+{qS04O|SQsxP12LWonj3 zcxJZM!w7**G3ESI0D*WaUY)gN2~(8~zOMzeXp6p^z8k!`#;>o64cfH3{}pPDP0Yo5 zdil8r_ZHwA`)EVR=lE-U&Qko8g~TZ^N%74i#zGeOhYb}8oGNTfDtc)2xxOXt;9W4vO~5wDHl z>-}_9kM~5`r_CaFo3k=iFatIj=tY}T;5Of+aXi8ig+I_}o>d4})1-eXG{ zXU%x`3xO4OhAyrNH?-PQf`0w=z^HZLp_J79+WaU;g`Or=%v^*D^j4Bb?}KECAl!T# z1=C1oo`2vzbRW#v#n#y;h>>YyQ-Ggh2d+gfJ)OhI{k8e4Z*PyJgR{@kN3usg9l;>i z=W%;Ac2F^u3d_<>8Cj)zh8gj$s}i%dKe6ue+67}A1^L#DQy(Z4*Li|r|5wBR0HqJV zI`5CunCIl9oTvWO1&WLfH82w5?N`+FcfZ3k6w2*y5^P9&74$hHT`u45HJ%EvAE^kz zAogn-jof25D8b^w4I-GaD*Rjs8ZTtCk9}tyZ6=r-B;8S)YJxKYv@In6`ZXuBE%tf} z$^$L%d2^86`FX{#=`P@kIfZqY&<@@gH&rRi3f2m}eIlzc{>_JI1~gne@3iL)@

> zvL~tfSU9*cGFNMgw9aq~X8nXnhS#@mMdi%D%c`(0280`y8Gnrq3x)vl%h=EZV;9Y5 zFs}5lGlDcag~VYnSrCj(Od)chUEKkxd?CV8YSd6uG9HsyTlKKNv(-3Ib~2^bs5v_% z+9o20Xj}UFz9=naD$6Gn_A2)YLPlw6+4q=Z!pUIYrq$R{qB6;0;Dj%&KB9 z<~I*s-vJsUH0@}5hM61&Eb>bcFK@UlIrX;Grm3)9m z!J2n(Aej_Oqz>}=KAM5$*>v{r3O{8}=eG$j4O0>z(A(GKD$%MArgb9hyp+5t!5@?( z1EdACxg63EqZRpqV|4dg^z*hCVA}sG|GA;#%<8K*c#o#VZWXW`{)=TSxb|K1>H(Yu z-U90n(_!`xq@fXin24^)?r<{7gPPz^c(KF1(eX^(+~;UhNU>_rQy^e~M`V7zp(}(O zIQdueg8cRk&aUBR=V{mrHo6itrAFsLZh&E<->+cvn-6x+>Nt|O>Tu2@!4_%9{vNjV zG^6*OaiLp#k8B#U{<*^|M=NUE`+j$%_&Spy2Qbfe9c^VR#UK=A*m_%EGTFU@3n?j| z>VPoHOx0U)p)wVvN+x);PK6c70Jac-M$ zJ7JCqf_F*Jd8iJd12SUi!44V|9*3>LYNB}5<4cx4s(?n=kY>i2d6&i4>XlQUG?eS@+*( z^Pm&|_gSZ-d@r?37Z62EDy;76WmYbdvuvQxQ;UU=b8gE|i6I@qPqRx#usro%FtAJAX~Y1< zfD(Pu01{G?uMyyR`q)c``mU~#>+i}|uHdxHPlHQzgSKShHy7C=;-B3zCbE0E#;+D8hmbJju1OO z-HUnnZo3CqG8ORFe7zW;3#ng<(&%V=uN7Xgj{ZNqePvr5TeKz-AVEWb;1Ghly9al7 zcX#&$0t9z!q;Yp>T!Onc?%vS2J3X8`&-{eBpG&q>)vmQ)S*tH9?12plxYZKG6kRaB z?{6*68$}b1LMd@Ms-oblU7Gnm@rzzdP;u=Dv;zT(`fY&Lb|w@&ok6vf%mHc?29ypy z^UMF4Cux6dyamgNZn0lwZhSw<+p+kOTW$I^K#A&>g~65)uFo#TdGHpJ4T zYIX9m!Ew(*9mjY2zk9T|f;tIaru%Ys@83+*^V8)G(0tvDr`Q#*-Zg_0w9Mg#$p^D9Tt5Y;UmX0dt!x3yoe5#gw-Zoy-Ne`ZTpM129)cPGnw z3P`6?TGqA-0(>mh1l4|L53vqJ8HL5gsY1vlX9-}J_dkUKU*kcZDT(yYbH7?E=V!*C zrdymRvwR11l<_0A68|PEJi*fpazH5T{i@rDF2R6P0kL}_d6F? zl%2~}B@+06g5RPe#ddWYI)P_&e1rx;FB?>n3}zY*ykXx}@|J+1<@Jbsn5`}+_IXZ- zyL^>&P>kHq>#>!GI8zmp54Aj0#k1X}apn)gWZqAr!Ydprz>yUk4q9hnR z#s^Dnt&3CsrQ~*(wXnM$FVhXNn6Qo%zGbkT_;{C)l_9d@W^8@;2N(#-7z#(KfTz7w zA?ez(M@6!IUzJY?Ei2ZB=)Tl>SY^t!&dZJyN@9Qi zeOzsJ1RO8nqav@~atF;Z(U&(D8=v;75h|weQ`kwW;(r#HTN5)I;c-7(DF`4MjeWQG z6c<-SsoS#U#a4F(Iq6`cE!$V3x&JfXBqly5NEyk^J*cgQMU7d2H?0D**zkHC&aKZY z5&m`4s4Pxn*&R<3X6b>6jclVcu))t|%PpmDeHch^b`nzt|8~VIvcIhbITI}s7XDhX zHjskrgXZiS(LX_zW@B8_4)5WcE-!yHyi{ouOnIj<`Ow)@gI68b*BZuNK*BIzAjy^5 z+rpSPo6dd(si^B7XaKGfT(Ab{LK{=Ei2^Krj-tT9(Gz!atx8Uk!We&xMrfH-!LziU z^tt?sf<=F61_=qaoJbA)M_b4cY!ZZkAyL4ZWe@$KYX{oA{qnSmBL@Y^AhHRkkuj7F zhYoT{;Ocf|7)I9`!>VOgr}fIuZ}Okt65TvTni~!M#E>oLFZUS9`)RW@ZEV36LAZkK z^QafLBE5GlHYmlDnFV|}mtMVC2yD6I90*d~pdON{Ff|S3^#8`Fe`BbU-o*3~E=kF9 zMoyU=DJxq3)=s-&qP8?*YaP<|XjQM;JF9b}DEC4WZagur#tV$lx39kSD$MtlS^&n& zdUuQIl-QI{Uvvq)_8z`?Jn# z3A|cY+Mkrg7y9LJVbvG$V;ISf+^gG=sh{3d)*SM2Wzv3j)=6Ifqjy*JO2ogRmHu_s z@G&#f4xa;;k!BP;Dn>#9u<-@8IKH5jpaNY%i+?O>H&~ zd-2E;3QV|ad01GOVN#QPinzR7Cv!^Xsr{Y-j1odTcRXh$MVXQ|uTOWDEM{RgqJ_#! z*2z4Zk*1N2s9Vo!F52_93sL!oOb%o;|E147#%F#&tYrH3Kjn?wQ1y*xE>U(~><9bbQyJ8c46Q4bsg) zXZi9Leimf`&@Wa&${?>o`|4ltgH$(19aE0$);?oo%GBJ=cV%4-xuVZ zwXK41g{n>;WTa2Yej9hlmG=581n{!Xez&O4ko|Qrd5}zf`%ul!ky$gPbclU#Um9Y! z#H5nJJAsbJJwLqm_z%ifylgxC>i3P?p@+BHzbVW;Y5KcqF3oW^v|z(iqwY_k3|O2L zk)iT-ojexpO;7MLZO13IqoS%ne`)udMTOt~5tXCZgs0!4rOBU9VOkj3w4HBp^65VB z7u`3XOxeDjjlcnEn`kG(hktr`DmShEo;Be_KdCs#JGerGYB;#B%X=oa>k_sAvO+5>KP^vtWOiE0`$2dw6Fp zg!m~Nbs3-2imo_^``&HFNMXyzgx2vZNV|G=mfYZB9zCG&aBLIE#uHM&?$D)!5-X?B z4{dyh=jaASctFNI`ufo%5u!0Mbj0;_oYL z+{{~5nJhdrzwN|*+b4Ss?$P}!jae!Wxreuw%22+oY^*cUnrV&-mwHs92*tA{4}Mv`O;Gld@F9opKyb$@IWep3A0>ei!{6q@1RO zj3rj@m0oMOWdws)T(V!Nv*Unb{6JfH1od_n+kTp)`lg>aBgtRDqrIcGvi0S^7cBI|v?F%clr0wj zIj?6IS;mH_PVbZS4#6X9*F7BMn$K5h*xUK~l1m~E#uG6ao&&%7t&I;HWTn@usr<2K zvUTXBE|_`}j$4yuq6_Aa%3GJ)vE23qq+C6?QXf&%#cRp#Pb^GC4jT_ zm*KI!yc5!A?NYWHXDY7M$ENex6+C*sw246H-Br^3i1cQZ&pGf!-Ionmwo4}2?vS}M z%1b=%-tZ=mVqEGMI_uQ-p@ksiKZx{dT9a(S=VM(iTAaNPA17$TA{I0IBhR2L+QsJF zxqCZ4KimG#_~<%jfDeoDc5-Aga@WJ*;O9%mLlfW;Ibv!Cksvnk9u&E7(8Yr#t6}>@FW?|Uo7HmE7PBv#Cs!;>~`dmcyk0K3mi(2^PId& zP(@^-6dka(l(N{_xW=B9gPB}T^RK9!nrFC?==O$|CmQZS+P6JZ0Z$c`K15!Vmq z+M^D*P#FN}j6yl?WARJWu8=&svMt-4<_rNppB4z5wQ=?`nnf@=kvl5I)dOD4;ZL_L zZqFiS_gx*v43}imRT2$kC0ERmx1eeWc;SKEc}PsHhQAx^1Zt^$Q3UDN%8xrAjGKFS zM6$sIX|)qac&;Q}wc6^_SFY0pUOJf8WMOA&@%=M|7Eew1oRm&8t55gobsN}LR!*qa zneMk-ENUR;>D_0!6o)+LOSzvg%x6BQZxp@wMY*7m?d=s>-NG%zSV2Em7vbmw4KGtv)#v4CzX}uS44*dMdrb zqu#(r-E`lP+QPDtz1@aJ_dQ#?^Rc?F3jwl!A-G= zg0=8WWlGcL1+?iv*8!8^XI&>dVoiFT-pFCa))->4nR;2Kl-Kk`!}S39USYBz%lh!F z;HY!9TGlchmj{~R!1Rosz)8EH6UE`VJekHu0;~K?zuo8j1)j{0nDte*5b^3q^tbvH zX`!m@yG~K7GYMHUZJuY zfmrESz>yncqeZp6Zo7PQ7<^YBb`%jo8~e6vdV*+$N0h-opv*_c@_ZoDLAE)NOM$fv zy{-XL1ytR2nqh~f6jf=s%u8}SG^(}cJm~CeQT~z641lYB zbsN86!_$_X7T}GnF?EHg$?_BxUL5KA0A+6WQ#c*A=cJm66g+ki%kBg+ms3@>ngR%X zp2vBPLem9b_#Q6n&F4p49VV^W+Q}Th;I9R=fgdk?L>+o*hhbxPF(;9$57oR@hX*-b zp50(F(ylVD!SD7fjbp>3DJi>6Lri}x)Er|(#&|&iR}`R118hWtzdm>Q4*R%bK{7Eu z8CvY)IZnXW8~>CZYRj8Qs$B0&vsD z!ZtyMK(qe8wLfT&WqI!(9;LQK+Mk{_3oI(zj?vqv+tkCyyR-GGC&bJf)?) zI+*lp{eC$Z+_8o*_5z?d?6{xmc}#+0ntbR zR_cuwtWu@Js`As)Al@&HA^E(wP5P$3f)ZH^l@ z=z&)!BvE$>EHtUpz2UyzLV!40gFvUdVH;R7U0)&}yuu5&BdmI7N~8PxhB)d?>Z3t& zd0kyO#<(xw>8h0safj;5oY{vWF!8`WUj$M)hoYzQd_Ob9{aH}Zlp=glxFGAywOxOT zz4tG?{;mdpr(GrA;RRQJ6x*$CMp$Ky)7%N+C4}8qPhiHZh^^sTOahrm{fTbwP7T}N_$uGwd%1rCU&f+U; zne5|hWZa{y*Rkr4_1Sy4C^KVS2krC;&rUKWk3OQO;L{A?Tg@S;FZJ##|J5|&L!jwe zqu`>TZ^wqjhQROfENpJcftaYqbDqA#CV$J&^p_MW>?PO&kvD;&7bcKZe*UIa5Cg4udu~~g{*VbU+3xmhG3Ywx`zpfh->%x_8f$D=jaAP`{Pmf#;40zZIvh+02e8}S#SN9j-je(8sRo~5G>P6fY$SAG5@2mi&!rv z{bNw)XP$Dpe?Cp)CM%)_LOy`sp1U5`iCR1rgBNX%+8@6W1P*wE#12^59xJW+G8_QH zo%CKe5V9YfGbs~Xd#^Vax1eGvrzTPFP=zNG$7t4!>+@m8)H=hKZ*ezNxF}qgf9-JY zxAZa?4$24BXSsu&bjJiG^Ndni68K}bSnD1`Get*cv=~-@0EVLUp4|@+9e(7}Ej}JP zReA6-ui9T7VU{pE%PeFWbf8v zFYa+Q93UJ0GKdEB*t{I<$o&0YUqHvO3&(s7@F=L;74%dkof@VOn_pV0)YQbknh#FJ z-B&3q;#=mB`RktmynWp#h5h!;r%N?uo`}l?FtqskXHHKw6X+QuKfk<&e+(z~^gTi} z*<38$9$~??{Fl(uK94d<=?HB+s@6+hms#}5DOO<(=@qIbi8^V$TfaTC&Z4p6iepR@ zqr@5Av?3Fnw!fVY+sg3gT6=kp_{|eXvkG^Ls4EcHA;{b+-Zyit+)ZInPH2{YnElko zi{JqiDp?yi1g8qV&Qrj4MH#x{Fr5d{PyAlk?i{B^Vs~Pzviz+_*2LG$%0X z$yQax$te^T62#*TxfQKCVx0y&`zHc?Df~oCHUr3GH2-1K3yL{;2x#G6MU3t1| z6WPH*`7O>uEG|zO_ANV`X?7LD*hOx?=b2-qNaD!reKelHQYTPMw28tr@ABTVSa;%t z+p&UW!{4$t!Ww!3L~qI|-IypTJ_j5=-8$2ap;VMP<)!I3^`XRPcm8%ol{$paiv?%u zPI1=N(Y5?Z_%z>@35%xzn0^_>zGL7KS{ZcWBwLJb$sA-X=aC;oX(* zDarB<$ui{8CDgM#_8~%f>Z3X9XvU-ax@7&CtJooG(=Sg@lV@+cTD_B4m*jf$TkGBB z?kk~3FrVDH%^*!ldLEC9?`RQz7%)(jV=$uYF{}w^dS_JeH*O%=U;pNgr_9 zI7%>1W7TRuR_ux(iFCZBB)>?tM>wWN20))3nxFrcDp zIWW1EX|bdo55LB~w9ARX&&T=W1l@*5ljd}d))LYE>SSx7MucrvdM zthw&NyS18dx;L0=XR{|| zrG`zJVK={m%YLXEgw9mj*CKaIanCOx*>_!j4cv#Yz1}STP9PMQ&emZcJV)_}6Jv_M z<`a@XVCf!F(#*?64+717-@3p3byob1TZ@UYy{}-~Lw?5%{0Ml4sG5Y9EaE35%X%Ic zkWtCEf%?m>{KLN4wD%<5(coz(w+QG{SX0Y3fmP?gU$8Y<#yPO2gp`%x)Yzohu+0nd zo6V$fy21QJ--2(dd2;-S*u}-4>utP0YfIA_I1#S67>86T+$V}oj(zKN*GNb_uq$D7 zQ>iq<6w`LZwk|EynlFH=%$LK4Co~foe6Dcdpwv=q>g3X7vL+mEV+C93Q*9I(2G0n< zoCKW`(U^BPqT_{lw_U`>@8Q&v04l~nM>reJ@xpZN4}9TB@8Hys{Z8X1@=V}LNZ(-h zRePP*u}&wOcdOJ%dmq0ZRU@`UF&X|=C z!sGYx0D@cEJ-vfLX@9=UQ$cSNiW^a>7h0 z>2#^C0_EyWwW}|~Tf=ZUf}ec>p)*la(yj;#{UV`n?ZFBAvvzkdgnn+>($(N}kMqBp zxdEZg$dS!SB5|RZR&YV8;n;MP&(Na1I~&ds;$*`jE?-$GHHG&Bo1rp6%1(>45DxLimk`5aLE0XJ%MC*`xC`7R@crh^ z6Fx1)*%pD9kat{14W11_0p7n0imb{)@JDP1Jwb5Gr0~Q%@IE}zZBcB!_5Z;T^M`4^ zTPJPW_KfO@-b3O~KVj|V`3k-?_N?Bx^iYz&f`$3R+$3xoP9? zZ&Mm0+ohFBA{HULksDs|8+SQo+l70;!gTT3iWiWOb#FI(?W{Y|C1p7+yYMN6?cVQ^ z5Q&&CmU~9RW+Ktg8-?%DQ11<}P;N|WS777j;@GDKluoOpoGQm&{gr6Z z8bMrta9`@^K$Ypu;k)4{5j%#X^%>i@@^V$LLu4kPc1lrSYEi8jNK~Rv8=*pBBSk)w zNm}$X*fvXRX6$S%^B*EmO#HV^@H{inV2*{Cu&PMJyxgKmFcPn- zso2NT&pKUN4$C#9qu zC#`q*1`5npJCQM6fz{eF@deSMf-zU^I%O* zT1ZPAYI!kw?A;$E2N9rF&gXp8Sw9G(N{TsdZ~uWwQ_J(b-<_MdO0v?mbV0R+SLjgI zx`&ylDP@FZt7bGK@HwcTPGcJsp@J(>6P8@oKk_&ml#0oV{Y$5yivDTyr_Vx6h~zMF zryLE{|MUY64(?02ST}Injs-BsztAd<;JthQ1P%n;zR1&wSSpOO$?TsM{=d(@cGlcG)kE91uSH-9k zhJz4hci0mLhlrX^&_uqBAoa&q&A!`699FQ)tWExDr~5Ci7h z%Xk4!_&+utx6LFTRnn!2hl9_1Zd7Xzjid2TQ53m?#w9;R1{pn|PL_kN#*P`D9&NDA z_W-V)<~EUoz7GJ&(8{g~$FrC1{eXlT=9uSdG)fH&8WqboY}bkcyZgJF^iRPoECmI{ zSXPb(*#dmA_P|%$jS>S_nC*h+#SUBKsLz#EO4RDz?Fl^)mZC*+X;&7_4?`v|{aRs& z3%AWOTiW|-n>((?a~IZ5n8H4*Jc|MesZ0eqrc25HKYxUplP zty73M0+TtFihR5}dc!%-hliOuUnD?ckDv8SNaF)7qw!8IW|Po|F_E^-4-_HbB>tsC z9OikxmvqC=`?D%6@LFe1HdLH^o`)@Jh%2BKmJ+jT?mTWnLuIEhmu(mb>Am-yo&d zkuPem7Vh4dn$0IY=?#D0%6uQQX9h9Bh2$`^F=a@{$FplY_p5^K##h=4Pqq*3a#@1J zzv%}UHJ)1>WP7z@N%y!{_Y!n*37FUHwCg-F=#7LK`7g{&_iI&uq}rCIeZHU`#$xTZ zKt?CKR5t1+d~S@NQVgR`K%(47wrN9BQl~eJUAj%{2(WWRz1*?SJdU|e|eUp2+sH}bSR;`VR zpv#MNkmibMEb8QGJ;tL>G`JI4WP)}V0 zp-=AElH^Ik*FT2~)4zj-J8HZWf=j-@(T-FH zW(ht_g5P>i>3#8_5D;JFdHBG1t?E4B(k)D0Cm>;X&`mRRm*tkicd(sq+4hm`YGj<# z@l;2N?O$>K3$ArVu9o{Vi;o`jSddtdNZ30orB^dBJ6z6| zRcYa$SBmH7Cpxkw)pE+bo^tIDIkvxIG4^QAhB|c)_I`;SYstVrVZJEp&EQ?_E% z=Ub1Xg0v06C?)ix9`fQLzf2l4qcO1Gm!XD;0ifnTXd`xp=B$)SfAua3H&yJI;`9jq z1iYK8Zllq&H{ijiRY776*};da9T_qIi?n5%FX?8b>^A23eKpA6aa4xQWrjWX#^n85 zU=D|$>X2n~eL8T%F}}&oG9h%m7}Uyl?<^R6u5*36`t<+|4`($CwFQK~T+}i`-f;5> z#A-)dp_}C!5M!RnJGh+gx5bW<59na0Bs4&HY@r9%pB2#;HCy%c#9On!%sv!t+YwuT zFgK8N^hfr(miZrA-BWrTຠ&_Gv>TAkubC0ibk`}>03O<)FsTqBFqr~4_zNVfO7%X{&rP2#cqFUwiJ)h$VE!2|lN z$S7$P$F2)?Z2mCD%kRPIl+FU4$fq9(dCkBwf-3)bRoJ0H)0ZP+g;?xJt6 z7Thd>SZLd5nk)v%L!fnY!NTR2>X6*3`4iC|L*r)p{}JBUv|B~|KqYBl2-=YDs_;S4 z-Q!<`%}b(8V&H!_uVuX_zB6}C4BY$T@K$7eJefdiH_KRM87<4~dZRZ`h7d~ zXq_beVF8(IFv%kD${-&Yf92ynp!gCHaV5>hI5DrCe+LP790->lr+gh5=8dGqi_gIF z4V-*Ei5;ObWWmA9Hzd+k&g%_pUyvVJZLNpg=_;@hj%7@EwRTK;?;Iz2EFO2vuqyEI zH2oPPU1PoK_0wMa&6946092S9*-+;GocNl8P0nE7_Ljs!yKwTA^bJs1jwer+l*8*V z86K(-a=&*NGq=*R4mC33R#%JrVnqQ)u2JloF?{=$ll$~ZW!EUwoR}02N4J0V?j? zAsc<)=X&l^ua24;0MKyNjVGc#$5QMk`)6>hP_o782%VCA>sM_yuf5z8Z(XUcc!R7< zT8XR<#2&M2`xO%jQLtudz>EI0i_jx{{_4MsvMF=J{hfhC z^62guMq348N?V#;b6P^gbNi27IX@F!9bCg5u81tiuldkA$G8_r%X&;s1Ol`N6qNiM zdc-{sZ@hRo7)qZFgkmV+MC6qU9Us?COUhk(Oqrb+Q0^g&PI3@6c zjvu#rn-KXQl_sKSQvC<)VE(5HHRKAJfV;G#(q@?1)Y*<{q!}@9Mbc ztpf_~*j>3+Ds2_R8o)`W76D5>DP4C?IIw7iT7_uC;55#Kj;;RBuN7ozKcI1A+HD*cmp`doMCFlO#1ScDF#D z)fVk}DCH!pv#F~o|IX9ZozPlScBLT?f-SQ(!o_Ld3;ng#H)-ucqpJnYEH~1m*P~x| zNDtWmG2*eX;jsZ`P>wBLhvKwep_g{|4_vzb%xDFOT{eNc*>e(39Rv&-(_3Z5CPgepVpnqwhKN zJbcjb(!r264LR`y^Ri}W%1F@KTYF9|q4%QvpDUi0P z?LK-kwFv3EUh2KAzF8emDnFzL_gCb({X;Wpopzcz95icd~w`gM9-`Bb+6>_k^FROu1Yn>;IR$5l-s zv7{JyyZSwv4N-BaX5$&?5d=k=r$Uf8(el&JamNc`@i$#}mi3t23Oh9`gCHeY;cZ)bZERY1418qU-VcOJoSZ(1hpf1W(|@ zUEUo(QE%IZpPobfOVVMc%iEuz&@%{1;&PHPIfs)sWRJFA$07{a_2+oM2VLdo1ZAaRH{wPgkp zC2!;hIQ$ZX1PdG4;HA&d&Lg(+a}S5V*XWc&EYxrIfctQ~M3?1Jnf_+W8lJI*ehZ}i zIA&;0_k(pk$El*oS|gL(>EvDo5Em}T`7M@_kO=a4mKga<49$CQyAj#kbK)f09akiy zt-zzYbp?a_B~fWmglG0EdN}Vz@kFj5lb7yqL?!&k-;?WEn>%)1HX^-k7knIH-B{Qk zxc>qnw}X7q)~}1O=(+0tTe?%BwXJ}LE-UvKhY`VXa&d_;hX6rcU zy>MFLde(COFD^NLZNKDr`F7B;ZZ}kHrSd6@s#2Tus^2cyXUxJQHN6e_2W0Hzdx_dU z@wVQ4J!Ix+q3Id~==6LJDnD6xa5}}Fd<}3~@2DbZf30dRZyP$W`*TZfdkMKQUhYZ( zFrfN-mvkK_*<+HFMtye4YPg8n$@N0KOA_BW@$^X8rgt)DY7*CEin-QW-*tQ(0N(~F zvg<$nG2-c3Kw0)WdKO!`^*!yPxjKT2R`-9yy7O`~yMXO>%FZCOWKVy5FIZPVS*Ial zfzX=#K+UQn*WR^{o`QKB?Urr@#q!#yXVvtU9>JGP@HW|^9Da0^DqpUna6&>X67H zH7;jR!rR<9MY>s3JQWjrxOy$VUfZ#TMA30bOBwB%>G#x>QfJEalob46Q01f(ij~fj zu0fh8s$G1ryGB@A^^eR0mJGS1WqL~0anvkU!}tBu-Se%7oAT1FD?Q-d`xFXn(}TA z0S^pYyiN_Qj?%z=j=7;CoQzdfIf}Dwp*S1ho-aO8%dg~#VoJc}ECTg4S)Vz|+ZpVy zltG+0sxNE~ijR^gaXld4X!MTYC;Q&R8wke2zn3Q;B+8 z%47o*ctX=RXt>?ytudB6plakQO2hTL%kJ0QPnZ7Ji$rw0yg3YvAq>aABVG`ls{+zF za0J|ZV~8oUcG?kjuVbHR6S+T-+iNhSIWOI099%d}@veh=6xHl_aHe}-nXN|PNZ-I> zG+)@A9W01SETypS=jC~uVI$7^@Z5?|?Z%p)LAizN^$7#Jq@69}G6=PuGZ80;Jenj; zaL&lZ(YlOyVDs4N35|#K4lS!bm@XG)o*ImBhZGuL1pY)x8iI3tr_lJ1vdAcUriibf zSN|`zv~5&n2`*e;fyc~P4;hP#iaH*4Q9QwRPBmDJ-ZT_25;sGn>Gw(JkH#m9#|202 zKasA!YD~XP4OKo@&;-!id;oQ9Aq-ev+Y&5vW*m-edor5^Mm)<7qw$f4)UN5^?%)y> zrpvFa0dAo6w+q-&ua!5~VQDmwZ5WbR}Fw!SA>HLZ* zS#Llp0v0Ovqm))AgTVSP((ZlS`O|ErqD7^8ZJ=;|g6>fnv}`mSX_t`7_mT9We{iD= zNjy~afr}PNMSq#I_riv6Bt>%2(@^F$OL^KiUmZuDV_Az!9Un9X3hM5;(|b*S=Ig5X z6)0J_t-mbQ=BE4=Tg%plYCNsU8oE|rSwgKe6XNUGmt)R5Iexhdp5CIn;$V|?%~Rjrm1?p zJ8d&4GQIhC6)P>8d5!9R^QIpA00g01pmWmMLPJ%jC2$Rp$!=7X~Gse0CMHkQz!xoEh1B=bKwf zs4r}ai~TM=j+u*1QnlXl4=v0$k1=Twy2!-%>u3rC-oe3rmjI<(e2qcz(1D>~&tM9g z4IkRlxvY;p^Eo045YufHTO%e^@+TffmXt`%?%Fr~JeHw8T3 z=Y6~qFHW!3c1#pp1%&Q%&2h#`|Mdy5TR*3G7wS+yags?7#V-GIVYG-baOECm&}DdL zyq76#>k8TO7T@m>@eS|G%HOjuGUQ=*uMJ%&uP}ac~qD|N9igH7Do|~dFpUw?zP%o`xTP6E-w!M zaL~kqYxx~FF6CyT;>=Buj|VCKe`O*z9_Vg=$>fnHqGFR|?cTe*7yTtTmR zJV%F8RO}9yz~{5+aW{^*^(*IrAGC{~to5Kh0bVoakH3-GGE``IQ?m1uF&*%=KD8LT zLgNhp#cp$#E|$Ir6ImT5SGu9qq!dna<(JYKYxWl@DsUgx*#bC$4X>(XO7OzGvmV%B0m9Kiw zM5>m#RB(S#Q3);G$j-M9Vxy0n!y}So|A|@?7v+kEBLKl(tb_D+C95Y zt5pUF4z^5Q>FgkuqbEVZw4bC=p^kn6e)$*sN zB!UVlIn1Uk#adoLOWA2(kRxA%T`#)ti*{L>F1^X#!4|>(mUhD-aZ+YKs@;2ntxNdb z%RYEoZMi+sM7mGL3OXn8y}uVxoPXytZO&?l6+_YPP;55&0bCV3$DVA5ku8a}mX%Wq z`WK$rHtlRXuitVw@<8y7SA!)6)4}s~Y^!P@ct3HWUAN}n-&Kvemn^~t_SfO7nH@Z2 zeJlhUoOl7xzmPpd$2lGCO#c^zN|cGt#4!~4swea#o+8JY6x+^$4ZjZoxZi+>7gxa= z3>kDfPqaI7Q8u&bU-ysZ`ti(bhR9 zc*4W-u0vT9-PfX^IhlnWQ9>9#V93$)Nw+a_e4&%rTV8dMZs>Y(paC4MeLI0H57G_v zpb#{eMGYN9)mE%7l-JvQH(rtR&bDcrUCvm%pM$gm%|t=oF|e`1I5Wb1&Q#s2 z^<1g0skbMU3ZhoZa7w=^uC8WMA+=>;yCs7Fau@%YXF^_DxZv@wqq#V^v%$ior8=R3 z&m)zat9ekfKHc@%CLnjJXeXbM7VrK>oR{R7;5wb2TDdisPO^Zg_`O~P-DTAC){1$> z=D4ZZjVneqG9}(6?Qm#78;?*l@X^)AGSnY)%#PJ%tvvpS`6Ab&lO63{EOza$juoqmlw&M3h z|C0#x4A4%{a6Sq|5AVBu+(Nes*=koLH{&-&B#nFzf*%}N-N8UaD4#{miX0j)e?9uS zxVN7FyZ^_K8B4&2q`*!z={yI$UsT%AnRGy)shfm1ogzaSrgagTiJ&&EebWl;)OUc+ zT)8%fR5dACuLgEl#zI+8r@J_}cM8S|w%Dt5ykxB-*=Ex{R9kVXEsyB}2-aX~G!o%# zC?0@S5+nx72ogiam$5k}Iyy>(F&-DGy%+-;Y+FGk!p_~!cH2c^IGU;LUuxP)S2xWu zbc=P6vdWkBQGA=MRc;Jd)EMwx0Lu7ynOtAX@gtU8d~RKom2ZYeC%Y<^Io?z>e{lEh zum}t>Q^~EUMju1sep&CBOK_Y_-(Xp@z=wCwCUQ zqD&r|hjQEw_&sTCHxK5gh6{{mL`IZxqVkOY%A0@1!ET=rRl|B?7*y4hzGACny+wKL zk4bgcN@Z_RUV`O9nPE(f%h)0+7y6jr(N?Fgl!{qn2THeC@l>%=N<|HN0Rmj|J|lKq1X z@(9bv-#3Q%egPpFC0cO8z0;=iAp{xx3(;q^v^9@!qJ+hag9xN|bJjP_G8MesF&8d! zV^2wQR6i7%P8dWgUjxM8e(9tpq_COoCI2eyn8PYmj={iYWHgax%Q0ZKuMBWjA0B

1K>3 z?l#V0o(6a-;2y*HX^P$#+RDzR+md0S+@ysR{x0wm9WkGmLOZ)aU#4K`ZO#=puo~t< zJwF$PEh{MVZe4>_yIB#H{PN&i{_A*9?*Fjqr&wQpAWRs7{ zZ>iiD7y^FS*+!DSTlHF&UZvyOY8i044|xD~Wm+vbq4J6f$aXm^y%BCa@(YV&G7HN8 zNmH^wbZ9lBr25pC3*$$+rAX;8F^EZ&}Jm;`nloW1hVEi zw@F2BUFhsHTOYCU;#sW)Nq{d23p78~b$AKbRY%rD#U@_0+%asdZo<2RQK@riCGa(b zHJ670txj)s6<_s4sTp#7; zFW!9{em@gzfdUQ|U*uZ(U9lwleYoQJ|nf)DyS!ssF%rbtM8hN7nH;dj@wjxtl` z;l@Z(&M)w(2svf#|8Th>J-)(5Iu3p_jGo5M)}7&A=;i-}cT)%IlCp(n|buCyP^u5Hee(hiX z;u7WFqyB~42I^+?=8n)XN#0O8qMq431ViE|bT=2M$^FIU-k7-T|E!T36xh145kO-y z6MWp;`K&Nme^9p_%hTI(t(e?Dld}>rpg(0jDr?C9t@l>YLH@{Q;pfun3pgzWpss|# z#Ya8NZvACw+ugho8{K;wo=$~nRKLZ9TQPp}K#r^tfxON|c`I2@wOoE!T)O%J@66w? z_+3ha!3XdA^RmP z-1Ku2-R0N@T)2Nx#$y#ewzKbRbmt5p zZkMRQzuQ_}Ya%->3qwfv<(= z{?IZCqxy5A`RwuE(HAJun&W5Rpo{}JGz2(PuB4Mb%kKJ^6tEnV*Dm~C8^b^H-fYA9 z2{q7@)Uz+Ko(sIWaEpqQMdV~c^$lN!F_R!4{J6?Dg1WqTinBo-2-Y$9sb>Yel?4Qx}Qe0%j(U)-?iKV1lN{nZSkIB>|@9Cd!koZPDiW&97%q zHs{W6ZhPt87ECW1!fG^p9KL5#4qy`oa# zlU9#_>GfX4_BUQVh*yfP=0+$)oAjh*fdJazf0oZqy;fiv5MkD;?)0u|IlZd6=6>R z)PW8W2_yzX-zTdr(Jocdsn6d44h1ca0;E_BP*i(qeSISMCC-IK2O4jTz-zON5To^V zVcuF5D`p<77Bbe;Nu! zKPn{^RmgR*3Zy_(05&cD&q$*x(&(mgOQjL3GJu&Aaj5`;?we?BUprb7Ve+2 zE%WFPk~};BMl%~)OH+g=X?;0{vHrminHa1tZlPwd4Iyoio18@|W zA3tnuY(UiYT>i=n+1b_l`u4YKlY3OY2mUUK!UK0rixmVdz_l!b_KsHMr5h6jxWEC&4CDZHVRcyda6}X?AP35i$KAHz&nAR@Bzon$4#Kp~ecl-FiJGmxoSZOgsum-zmLgyjZf=O zyf`>JlDwmt{(dm&hwj|veQ9D~U<~y4pKrFD-M`q!qQsV3lz!LSi=Q*QJKUdMANrEY z8R+RJCMFVNW4YY!k3T=&gglkRqNpa`ec-?X; zpG*JE3xk${|MfOF)89t)(cKv=L*20SDdb)JQV8B&KJhTAj7%3iYvuSwzU zqq-&3qo^f{@APZL`Scb<^hyQ#Xt6nxMq&tEV1v;*$Z^%tDb;51YrbmEeodx(5i6jA zF0VYX#7RlzJ|a&`)lly545q0^i0^D(Y(@zz!hH#CVPvLJh09*ArbtbpE@+(Nc=f{* zIM^XNIy%HUZ(^LDk9)xm^H!~gaOPp(^~=5BR>RU8Y;_euoP)F=2s^r-#1Gun{HpSP z8OrviZOE+QfX4;?l`j&q6TrxI_w5cuAyu3c)oaVX`< zZPNF!Ru&9j%`>8B%sk+H2mB;T$-@HJER#z2Qdp7)F!fAcD9#Mw3Eb-vJMS9;dbBDi>H7hs4=W! zAtd~@qRjU?)xo^QOyD>4^0#Q~`J<}dFP>)>sc|~%0g}7WwU>MP-9@EpThPD0zJB7@ z?~cgO@bJ3oZE3N!a*eug%caR;q4ZZUqL-H!)9!^1KSHtIH7UnX?SgK{G;BJCFP0`1 z48TgG4Dw4K_8u1A*tq5uY4gpZR0&cqqD*dstMK@qfmhg+$+k*RD5oIsS&4cx4T?bC zMcu$?sZ!@`2{;EKv$fOY5TOIdHxZ7c8+A@=Bg%W6crcc1y;2Lw2oXdmU}rMohqBp- z=g?QgxP?`oYYkA>&vo(BN-`7q8JhtBh((VnXz*Zz+5rBlmLDl-Wf_W}>&Z_nYWxuv~Lm zZ?^p=(roK3*TiMa=U=7FHC~b~0_b^z-G8G;X#Q+daH1KO;QErC|&vMk5Mv>Tk9H%28#Sg zbX6D?oa6%gob!f2Z8c%+f}RfKFVDD zf5|QdLR^JC^@&2@_7R1p!X}jgSZw8tI0?Y{g#GRBLAQL3@RSr4Uk_y-)OyWhG9t%U zScAXDI22+cfZ|ok#^n__v7t<4dw`K!9wg6ul?=upKLvc+E*I|`|4(+Q2!>$+Z(|0h zZF?%l_Ci$|bYX|{H_v#@?-`IzUSauWd1eX|&ff0DvNdyBly&?;`_|@q&y4UoSF@i)Xl1Zp1M-(=VSZ6e0k6b5_}ZMb z@z$C;q;bHHVfawdnqGDu(LAz-UMm%KmT3pGz77W`7 zmh5+VxDPzcB072HD+4Fh5C9U~g)~1OQ9~=pnY8z_f5k=l+W8ugHN_RBz6PNwr+8o+ zYqqF>Yl*3#xsEUdxYO3>=$8RSf=4wjJI3FC&3wM^%+VnANwzg^OhUZJ6yy+4l$W0P zbrt>pVY=x8yu#9^@?W4| z36!- z+pXz|N8dQ723mG0mRw+c{Npz4Iinq}D5{Hj4U@a@=r&j`DVeC$SjvMn3^b6H=6q(f zvpaQeg|029*F{J7vLIi#+Ov7Tz0M9y5DrAKMXhj)T(1cQ?Z#zdZB&pO|L)v=bLrmD zWMx*C9+$RW0nGP?e87P}8GXIG-1tDZcl35S(Q+8He9h!I30CP+{yxJXB>V6&op5`q z?y_rglYOySkr;o$PVb=jG@94Tl&taP8Sod=kiKtR_`FR>tdw?S`@p7Swz7czTfEuE zMgYgh$Ful7CBOBrj#jBdgSe&|v*;51I4u?r5qPLEJ)k zED7v^ew_KAngy;)0OX6PVRCH%1d0h%JQW26#rpFL4P z^oBcRwE+54q~5aFI;~vi=woB?du=!z?FT)4*G&2(0ms|M8Cz$mrbN^lJ47<AdtoIMR5h71&ppP}Tw-Atc=&JVAN%%}TT&NI?DvD-Du$sA@S&g}(vk1=B!K zTKbu8ION~|$zPPd0!bgE7}e3k@+E~Z=wKb=a-*R5EH>Pml(CG zc)upvp+MTAkx0(#OUVu;b$v6MUl!aA4Y%`geN=`omPjDjAVr$vh<;Nuf-C95Htm2O z@YI;#z&Zpt(ed5F;YWSWPA_?nL0k+FF#8B?12Eg^y1!trKn6g2>0NK4`(+j1l@Is~ z7`HsRCRNRcUX};)>3wVAw%V&UJx``#$h_X>T3z!yqF>+!-VDdzJ3U@a{&pqT+0=H2 zBe7wpv&Gm1ZIu>1&X3TtQC4n^>K2=t%tgUNr_1msQi7HDJMjIx9_3{^Kg|j*7{V)^ zn~)|OVCw3yzG(lP8=|rrl^jz|2fpC3?NHKv%NHzeDmZ3;U0(ogVa@Cu#LD#ptiD|Q z@uG2MW2I*J`1k>{F*v=Xmw>D9s)h>S8SJD)Sb--7g_9s1kW@OIqLBdm>~pfa96GUn z@k>DWhIP+>x4Mj}JWCBE7kC{CS#ojc{mPlmky)iiB<+9YF=zXjA+LAb%~d!=5?1+_ z$ZSk4@XELY47~!_v}9G2#`f>79G8)4>z`NvfflL-mUm8^ff>Qri329V)qIQNfn90% zfHHr)1lSEO90|3k_WJnZ8{a_GQo+4u=ubE8 zLW9dB32qulgkJb>X?&AtJ$G!;dX-`)OU}Kl9SQkJ)zoH0WRVI^sM=X4cK3DE8~8$) zHPEtMKsudS2{h%i9Ko4eO__kZ147#&IfE}2lKn2b9~C=i9QWu#jw&AbL2qQT-Wg~! zW)6SlU&Mn4%C0Qk5o~)1&$`eJ+I5>nYQU@9_Od>|(9a(SsIekfRuP$p0nP$>*S7;w za}#U|m}js6g3oGCG3t5Dy?xLVg+Bz! zhOg%W+dv35hp(^yUOWPuL@~4-t|+XxBtLp$tCq#(+A1NgBEMyku`A9M=|C&=*S)iO zqPzWDb_VDffOqmuDOVI%2}G~zr{(JWKTpz2iW^k8sm+57N2uvr_ua5RlG)N6anY4_5)Eh{u&L zI@3&dIM)vzP}<1!nTV9^wn{<7Ng8C4Go9H zleQ-ec~@VOOa2VSgur||6$O>}_NP|60iUSNj>Wk0Vf$sCDBq3M>WS8D7UEM}SjiE8 zgp5>V&RflTnW&<81U2JwsBCJ{n^Tl^ctmSC=~@-tbxSOfA?#oD;XXTG+VEMOn?Yko zwnn!uQlO;$ZKOZ(b9nVb3ML|#i%*nP3Al7WdvC4^ggArXXj>?BSGb&K*FE_?^dN^U zkGqJeIGETi$~~*e06dNzx;f8< zOh+De%)4fPnh2;uIXKn8j5kcg>G0q2=~Hk0N>_dQ;6NE&o`aymBH`Jo#=N-m{o>Mi zyuV>d-T&C#HE}=SpBr386u^p1#2aSFE?9`l0sJ720}^?iNC%Ka*It`0)mF@#*Q&08 zCLT^}p*4E588>+A)i2G=#THTJl#|ecAEfNR-;OJTJ8~J<|9;Ad&IJprq7E13zG`>* zb)VgbB*_drsJ_;{?8nErU1--eF&VXn=SY1%DU3I%$9HbDLeJFb(ORG>{7=s>``K8x z=Q%|u*IirHoSQ&sMF8{(R*V48S6Mh7O{p`qiT5v&jg+%@WtM`-kTHhY zx%PBJIm(MO;R`l0F-EMrAqBP;4n`NY3X93G=8%D-2C*GX#3ACI)s9)U0YX%+hXb55 znN3xH9IYqyUmDad05oJN_E)Lda@0%sf9tp0+nPjRw15y?`H4ff9l5e~0qA~2hz-3b z8K%tvjovx4*4E_-7%o*cRFUIW)i=^tDoz(UxBX|M`=0@AkTc zwbIcormc{cJa=c&%yO^?juDH$CUQ6iT-qu_a`ClM> zP8wVMYkiXYnv^>d9)GzSu$u;Gc|WIDkssiCc_(aPLG;JfQh6JZ~4PeF4nI>Zgy z!H8X$+w&?V=-G}qzm1XQT_dtUa@6!gs^3&b4V^jbWsU17ue7Rh9j23V8DtQVpt~`9 zMB8q7731(C=Z&p@dt)2PyxYmq;O7TLhX2EvvgPN57JYo&Ke3|vf3=`+lUlm)mU2li zgW(onA3X^y4KIl%K%q9q^iIKwo?p-{_Wq;F5&o?tH*O6Q5ZlJqnb;}0o;+^8<{!m? zEuA1o^naaYsq1EsFW`W45o}WxA9o|l3AVcqkI8N~y9k9bQ>u^T1Vc}u&`Mju^Rn@$w5aY2hTtW=~%}RD`@L%SB(RnS~;%t)V++>pVX~+IHrd8v;t> z#GCL@#p0s4upaXQvmjKj&rHT!mbkoV6;wszZVj0<;-NyA7jtwO3^~7j)o(^6)CKaJ znf1*7N5t_*+>|&;Gsv>xKnU@7i}D;fj~#-1lZgrJnNHGkNAK~<_OYBFU72rw14?Wt z0GUTrX^6zICeE{-5=QHumVU$dFbWMC>ko-f^CLv}NU62{I5>w?dEMggE}2`I*LMS7 zl{wWyzqoH=T!6AgDDD$9NQl4@sm|HHE$=;k)xE8gJ{|h_(HNEL$V#KFtL(!BNM5haK~A zAL{pj)m5g)*!^Fm^>(XeGyWw}en_lG15ERrs_56PF6bxjmA0G#J%18C>$G*?LMW4m zU;FXb>njnh7qly+XuJBcg=UOS`{RyS2hIl~KQa#mD}t8M{nEqDG89IeVE7|CA}JHp zncIolqAMHIY%p4AFTo8W6Zxlua7saR@%adq;0dXRDy%IRUkR-3c zqaoPTqw@A$oaZd4MVag80a`z?Pac-bn}9Cxd$KtpV(p>dQhGeZ7eOp9mkKBeJiWPk zqPFu?H)9id5u;OeMoSYl`|6i7z1EZYYH{9FwgB$?O{Sb zya&N=?c$z}E)c?)L!<0vrxuefupCA~1$T9ryhW{I`5&?3Jy_{3CF+c9D9={YK`bP7 z`>nobCdfj>qjtuwq(84%o!l)QuJ8<)E@cT|m6wqUI=>ix{>^Q$u3cG2yGbP<4kVYY zdxD~&rtO1M)Sz%vR#u#^QTS$7?&#|ZbmOPGyn4moOe!|LusHM2*Q_<{buJ!X)QY<7 ze2QiUnD(#=vKu2$o~thkJNBNZsxh`#Zl8bE8;fl)e#k~FP`{rK$pu{_hgvrsfq906H>)^*|d*;7)@4kLGX z-yN)LAX09mfykX;=D6G4!4VAYnE7?R{K$Vu@>89GV#9K&yk*auW0H*na!(hI1y1^t zWixA=3&Bs{Kdz-q1Q9g&oF7bch$z++JgyyR(@F+4RY`@L_ubm{vCQ#aEbV}}@YS{D55-@di_W^CFxgM&8uI^kM+^oO?raMT)!0$(Ba*cD+9Q@|G(Cb)+z}VO zw2iYk(p3XJ-1mw;-!bpSI@4`kM|*GyO%PB{0`KEJ`yf@scNkgA7MTkQj>G$fJ0BB~ zXQcha$C)kDORUqEe@*Ldg3BaqX5iv3w7IIPE{?`#mf@&CYWB+OM0wNmE!Y0|IbB&S z-joH_nS@TnJQO3II|WOLHw=1y4}XIl+?ge6 zUfMM+tCpNnCdY3Tg#gt$2cp@6bH-P`_5cbGtGW8kO@{^KSiC!qS@+GvKp;F_VOBH` zd)jcL?27$ENw5y>_wDPmlp*(E$bsKd>1sq{gSe=v`tx1Def%hdKd7!oqEZK)J$)NI zmuH`(j=dKLrb@V5IJUd9T!J%o%eOmEIPpf@AJJ3Ww>oMBq%4;5ufgf@T8zR@EgWvK zw{LT)@kJs07gf2R_(~quG$2Y>89dw6SXcHe6dR`cm$RzAJOX|{qRj{461g2FZ=4k( zIho&3?D78wC0z90<)!9U7ODH3bYgt`Nuz9WCFw}qzDm)wB>!{W77Ac8VSo2W`48b8 zL&cV8yB4>a7J2+SXd{wmP#-R?hDg1?dkGe2E=mYojvb(A^ z5nBKu`h!PBBTy2Q)_y$iDv)t=%jG9#)@(dS=akGyvt2Wff+z{{rrST!_UD*0zA@in zNy(B%pb?*&t~}%GyWT$NBSoC+2hl_!H=5FYs<%qP;wyApWxIrtA`*Hc)1~UUmQtbx z50v&^MCzW+-T0E(+9NiPNRNesn#f2D@BPyVduY zu|KA*$($`}#C3?oA&9ep`-zIeNDdUWy;KZDwZ)EMk8kT~tRovh!KXjYy*J3yb53so z5zLH>?1UWajWVF4@L`gY?oJPo)eTpx{R@RM^2=O;Wrx$I|A(-5j*cwq`hMySJ@-$D* z01cJ3|{i#dyW`!#5*+M-r`o$C${5sQc`YO(3S?BLo@G0nAy zE?AYOLM8C)1*zJV^oNwS6mQA|dWJaqVV>%) z@9I^Equ|)bDFzq&yRHU|U?`VuH8M$yVNfAo%U^a!`8q>a+R>u`*URJ})h~}BM=5*% zuY0onc6*Jp@-k{rlyHvblyz6(*^)I5F!|#^G?dun)*2gvFg+}6Ox{PHW}@e{`nWYD zbE)YXh&%1lZ}t!oK_eppcAUDh*{W=$`ukHPI4?Z|>IFZy7=NcNq=^+dl;K5 z5CG4%v4la4Y2C~6>iO-EY`@qDOIUwf@!CD_>J2qJ-it8ye3oYfGyV;!+YMBuLZ4g7 zYd~<-f&*O4@|%o}E9n@1xt7!!?xQ_89C3gHHaP>U;9qg2)(J4%ly`{;7z}7d_ThCq zkV43dkHCF*Q8K#N&6qK>>8MoM#AK!`&eC{Qb7l!wf@^t;Bx$Q9NE2#!W=_||(+nvP zu71q^5zz7fsk;D<3%xNZOg1{^{LGMf3}=+>Qn6Jt-HVu1$wMnF396tu8}+(B-#*-o zrK{Jtrw2H8B=oYZ22eco%5?&)b7Y3iyq#I;ps0yZU=JiJ6c|QTYn9${n5HnRm6HN`gtF10Wy00g`T{7SX4%X zTSLov6_M8vZFczM>i87>eAPO-I0SOv&nN1&bCnbWF_vNaEsea;=K$*6f1 zF|7o`{7xORA8xcWiJesSlVJi;LiVUIa?tDExRZ075qQ_$0N~g31KA2` zK#_-1V+?V*^?%a~|H~yJL5xpYJXe9JO+-wL0_OgUlo$m`frX50U%(ZF+0+o|U=4&T@qj1bNpnpME`{ z62%0oK#QSztoUZqCs^`5dNjARLZ2`QL_t&uRSFW{57^W=e~A%O=$f<-#^Symst!6c;L zt@Ylpz@hi^>A^GS&fleI_2P*^bcf-c<(H(gjPMn9vk^_AOOOa?l{F^!iF0=A`?^1d zeWXOmq9UAcE6;X<3@v@SR4EINMURBA;;WQ~@5YAGfnq}pd~`unw9E`Nv`T#g5D;9w zE_`g>kBo4x>Okxa^hRhUCU~h@0HO^rFl$5pKI}CZ=;%>6+ysmkUmxw8t`|-&xLuB^ z>^-durWkWL%#6MZW|#V$a?e$ON$Tu7cZ87kPTh8&Z1&<%>(;t4=+yo|+lF%yVyn%T zYM{=cp&=`N@W!)2jgG1)5{q$Ta}x>*Dl{|{4+p1T$l3W3Xto0A_EdHA{{{m-3@wo# zj9+puk!+-N&23%`=;#7@dWW$JYE;7?Il2tCAJPzKLhlXO`k$yfr zJOpL-`h4baTK=vQqee?K6o5Y!^k2rjPF4 zr5AjU2$XmEylTkm%F4*htkngt76h}H&JwK6_OCCqQL;nfo*W)FnVWy?HP$Jk=nda} z@wR|)Cr^}z`hLFs$AUt+Ql-4CYf))H=9dXvh@$H>ujepYUAEn$`dj$( zYR|H>kM@j7DoJ1d-(o#EO+;yZ+td0Y$D%KNy&CNIjoYcyR9@0(6k}Z#7^Y<432Q(o z>W+Xn6}7*8dqa4lopqDVw#9c3ua=~Zae%1Hl&+H)`p8 znW=~Mzm;%?o4I^nOXy@VnGVtyQ&Uq8lmPC>jUeXaRF{=S(qC*cS4^v^s+zjI$;ruy z5glW=;Q+p^T*~bc(%)Pq1jKmAeGOJCe2oXZM8^mW#0Q1CskRqe*zM+UAHGsnStgcf z@t6b1i9>udTO177;-Hz4vblW0&C(VYH1XEb_V)Je?u~a|-?gzatl7ttt2!3u92qeC z=ADDb?tcBu*snj+A8^DR#wqw_BR8CEoG~+DsP_j=Zv+mF4}=R_2xSGD)zp+62`uW> z*x@yeM%LFkiTaRQegJZ_Rz{XF=CRe{X4B(GmyMIYr{N$#gsNSyS$w$)R$^aoA4YG+ zT5?3Sd5{1+oPutz&f!TGCPk6=!rrYIXxqopOv^CB#$oMD8En>+8Vxbf~b zv#{WDZHx`*g3ow6RxHmEpB(Y55uzHKKi)X+(~x)L3=%sG-6f=qFQD;H)jCCRs74if z80w0N7nxoHReuduf@QYbjK6WdANGbow9PQ4D8R2N7QQlktIS4yJQTKZAlr?h<%F2m;5w{m5wgXtG&*!DUdAxAiLvz zvE0KZPl32Ch49#2j&$76^WD zwIUrI9SxB5Jqhh3{ws&;}^mudU7?_zsuU+HhGwb#qyR(r|tE`rJ)Z>Lcs!7Tk zGHB(^JMPMsMUA5=al6FfbG@-kW1e#EQYJ@q#aNFtv;UGjwqZ`E+QM_I+?`|>>2vNj zdGqaudByS?wz$dt?(Q>%S?AkiEu~Y;sw=x$lKH@InfYMFF1far`u5OuT#yvmsQeI( zltcGCLXs7ohZvl!HymxV=L&2;S6FNK)vaf=`AS(iviUtRw!DaHn;&GpSTdc-jx>Q} zK|xI1KQwfBFdECu%uG#94K&fQ1@r_L0QraVii&tldVSg4u8@d7-QJ(9cTn?2o~{9m zWgibwRoqCVvf4vARc+jlr;CFMA}K0bTClau?e4EQ>YO{FGxch8x!#s|<`uNG$~sWx zG_p4Adn-(dBYL0r=~8p^T%5!)-&}Kiqqw6yv@o#@3^W4tonz_F362-FN(vD@-(r+( ziu~x=I}n4)f{=Yo_b)0u@y{{Bo&+b^?9#%rJ7vDfU@zc|H#b^dUmxzXIlP{aXUy@gt9`5eZca4_R}mx}r~)LP{I z(J{z303Pz`l)@~P$gbZmU%{WK|4-F)9l5R*{R9@^bWw44!1V7LUm<0@cr}rB#_7%^ zG0!1Eyz-}>MFd-4o}OM9KTcqPKQTHy{OwmNha8hzQFu#6TY!I+T0K~7lhwL{q9O|) z9|0gJ8J=np6?mCG0{GfCEf$?xpXNZTEkMY(=`+=V8PeIIA~zmxp%@1NgOif7^!1(K z*<3!qvy*D+NnK&{$Z^kS4(OsJFwNIW;U26QdoATvsP2=gZLv3=!>i9*u(KUlV~P%1TIA-CFE$w=?aY===vBZ zHpCN%O^Z=)oJnk)rD^D8p+_nG-x9+>;620L;xgiOa(rH@%Ojk@)O_fyCYK0=j>(*V zl;fNZS4G=gbn2!OdatonV#U*oZxcsn;IzDr^f73>;W`L~eE&eaJi6wK0dmSH$LI}y zc0b?u=j;iEG)Fp_AD=FkGHIP{WR1lg>K4UUOtsXHyofJUWS3VI(8*u#h%XU=)e5c;G%dt|LRBLtp{K%yO{uqnM3h*~P|ALx(ym2wF4@;&Lu;^BRVVM~j{A9JHfswz}P z-XHAKGBp%31$Lmo8@|j6)N8j@Lu~1^8N(K;Wj0LpEspF&i3-LGDt1&u^*Juxn!Wm=;ubc#RJU%09chG~+a0NN32XP6w5In#}L7?gCn(w-EA7 zDs!{$4YVQtV9e|zxpF`zCX$}tVwwkmEi>hoygxLlC@slP_sZEXM3jr744N}+yg zAt!nnQH0yx?_?3F!z2XfNGU^ze$klUB=0M6Amh_%YR8_LSqhP@mWqvk;9M}U^(zRz z-Zny1#)_AFdNlI&OE`5{)pb@U9DOUt_ilZ4jVkDGdW#1O3&Bd~pB99k25AA%ilDKK zt?Y9qk8j0X@wF^G@)FCCX;<$TM7&Bz-}j9#?jJMN#3bvklsnNNTX&j_2&Ev$thr+3 zA6cSuk$i&#B~qtOA1}>uQG}&|H@22}rsC$mhdmYiQfDD;W#Q&FR9;G#NEa$`7&RTQ z$6gkiOxaJfT|3o8Qo=zEPU|?ITDT>w;kuFC^}^!*lEwUo^>?(%*uPJi+aLE?V=ve4 z&sge^*=j(?p4ICC29ICO@!JN@%ALJFJ(d#O{ulNn&N=@yRHb#<<*!o)jyfEW7btjo zYmmDBh3VFQyQhVDD@}5@eTKDGNZ<-ZJ0nh7U?ys}<>(ED&G53!PjQyX8c%A7#6jEt<%UuUGkJ_$- zlY5%EiHyysNUbxR{WX8&xE=!9iyghJ`fOAu)g9l*Hf*WAn|_8&T1~f#jAUkRj*PCV zD3bjxt6rS>cBuAe?GQm!WWw z5wVd%0%oM3IecJ66e4|d)xfVSN=uQel;Bhz>o*>lAT|0uMmIh|y;Xm`| z=BIT9rhK&qrfX@0F#{HQw6WkxwWYiC{KA`o8ILOl>oqUinvk{t7sr{nw6(t2Q4wm6 z7qETUJZ5UF(hsJL2Iu(?m~8)pi`%7@#T1uDH}*rdz5xbMN9)gN#+Mit^rY310hh^` zZzCse7t6|5nx*ghhQY3yA;+B+EQO-2OmQuKiu zM1{F4CH-{`oJwuH4>_j+d@bBfkpAt>>goHye$tuONFbK#$wo@ds^4!Y7vgwipysxG ze4J?r0vyKyL=nQOOK5awpA9a$ng^enAx(Q0*083(_E+ZspQ_b-8%F)wr)y<7)_yPM z;Mg7U58kxjNR|Pr)h|-5Z=|v3f(W50l}?&JPE>Z>b&ZOHt7xQS@P_Z>8VzMvD;Uer z&FQ3%xIBIdhoPIBJfcH&(Z-`Q*F^$b)1|j86G}@eXu3ggkPUtHFz~)cNku0qDGM+( z$bZcx< zR%B^*dgb=GS9d+|Er@nKkscq|OBVIx@k}RDl@5(Vj!T{h2TG1^N^*OjTVhxh@vJ&u zoa{z@ji;vk5FNg?J-Q8osM(Dd*WFrcFiQ3|eOmR_u_L2hM);jI2d1}EuMsXexu_m^ zqGq|1n@n$F=UJZS6j5u0ytb4~7M=dZD-+Py0=y?QlYqX(w)N1$I~zsupFOIXojVO& z1}Ke92q07iyAfXm7CWZ068n|s{F2sg8^|obb#a#Dr(Qve(}IG6BpOh#s28O0wl_YR zXB8?qIa=xs#L=8*c$mwB9rzn7*DcN}boGr4Xt!b6Dq1S~$}N1z45P0lQqhX!YvEKq z=^mM4Cx=Fu2->yWnHq`YRBqqa1zV@J>Nz_+rxq}!K%!1U_B(-2g!hYl%>?db{AjJqeTPuy=wx!%apxxnscT-lEHjs$E8K=()W-zA2b!%t^U(hGOnpa3Uil zkM0s~ID|X9+g;XUJ~CxyQo#tdqtpTG7MGEUw&^*S<$>0lP{KXK2Qt4$>Zo!O`c{!@ z5m?J@7mTP)Ng^@Jv78|tZz&D~no=IC5o zS^h*$Z~FK5Zbm+9BRBaAGi>~f@(A#w)`-ZHa`%2ou2p-mWUbLlVQ&+);i+%A+1Q>2 zME7YO^kq~;eUyrPOiWIM;aw2c*5>q&(Qxhns*2H=DhVwj9+=}nOf*HqKJxq3b8W1B zv(rJJa0Q!U_;_htUzqFGG%w{R+MtY2spCGA*4&_*YnqrT68hAT&B)@Z@4y|3N>ZirqL6%Ry61?Z35Y;Q3OaP28 z^QRnGDviJ)Kl{N!3GkoGI?!nT2#m`l=@?lH%jaD|POe zuX{=F#fI}0nH=Ap5OQx+V8|E9EB^%&b!4+2;2%Wb&C|-*KlD{qg-V4zNZl&csXF(c zf*D2n1Pgvned!*ok{vdp-h3Oc(HS!EVs!0?jawSpZxp}qHAK#fdL0B>oUxnY_gz8) zepIU0W(6UECBs2N2^P2HKw#|+=j~M&f9~o2PCi$c`BD~}+Xge=pM|&nuP^Zrl_%lJ z6hZFfw$nKeT*yYX27V`(Z{%{-8mgX=b^ZO=50{_69M3u}3TbVh5)2{?G|=RNQ%c_G zA18DFmOJmH-y5KS6jN!$!LhF0uYqf`e{X44!bq_via#Hvq19kfnjg=;LBN38nsrswmj0)B= zq`qOc@8;b8NdxkrawblsnhYDk$JLmi60OF%>KJWvUm+!oo#oF*=C67ilM2ykR>e^w z=4x)%*91AA#k*IW(&EEWzR(r*84WDYIA5K9ttj;(YTFF;4|F2j+|@Rxbvs4z8byI| zN%CxL@<(w9{t?};9;l4=-05~c6=`{61^pbPt*yD@#f%e1M>rn6Er$cBmu-!fIgBrX z{e~d{1hPsL_1k>aVRM*a-b)&N!=aOpoE`1Ge(iDA)-Nt#i5*NXq==!&+LEme|NU&& z8B@zwVce%~h@v83?dc3OgVD2Nj?@LD@6>+BzsmtLY<$>>vxopI!d|N->0^|rcb9Ml zWq@4TXgWCt?6GN(@21Z=p?7$+_O^(dq`c*k>a1)EoGFWnQNiml7a$L3xmELoE%pFB29s7j14XpM$4@b4PuPlGJxbeQ*?@?kh z6@a))1B-K*c#>(qhMqtD_PsN_Un2%6%G|z`IiDKvOb397LEo>YPy50uFG1L}!8$g(;I8R{5#r1wb>zgCZIA_(?V^&hQ=oakwn_Es?7W4u>tSP6eTA zYJt-m#2|rj#%kqc{T>q%>k#vbEM7h{P!MKT75jNxIoy!#6?sm@*pt-{q>SEZov?LlzARh=ty@n z*2%hG38^z)t}ad8nC7eZqqlt!LNU3sb4Z77_3aCr|@*eT6?e zy6L}M9gAXI*nC-OshwV+ArlgGqAW{7tOz!1uKt6V&}cMO&lqM}SU{SNUi1i2Jj&|5 zj9%gD?bNG2^5uFB3W}sS7PMNuVrg;F%3s=QkxqYLFk!33HnB)vhQR}eXB6yNmZ(yS z@8HJAvD!@sb${&STXpx#f7qU|@(5v{=Le-*#+{b~3?N%4BRt*|tKmEHm{8 z5s?H|`t(~p{d0Y62b^t_HE{E2P%ZxU2nYxyj;O!X*ikYYkSmM<#e_h|Bjj}UpSW!% zMY(XXgUZR@HjGpF^XN38V-mRQj>ND1SNcLK$*-N=kFDUc&Ar-8zHIlRI1jpR+FPp` zMT25r=24kpJHp}M05#-0Ln!5@B_r&R(QzS6$!kg;TJK!$$(bi(nn#t3))fRN5BL@|v;wfxAcMEHb6 zLte*jF1gutx5BR6XTy24A$!eUJhYb?#qiC-oBJ~#ldKL05t*v>u8}z&FmXnY*gVwl zIm>7?p6y$kYm#Jf68Y)U&}3N^P;xX6ZcxZ1X&Kx1&oYt+QZf3X)%reSa}HoIt!=|O z60>Tv$1~ec_I)z0$i$ARKWPW5;qyki{c4ANPe-llfgsM=?>iQWroHKr zNGhpuQ}xd1%Ky4*eOf{;Pmk&C?D<}>y2TrsO^5Wvr~UBFjjp$gEd#D6>phLyn*?dPgreCNAFcIR;%%r;-)tBv6!2MZ2;g(SDzJW!~-X z3hQju1X4tlOMT-mN1p(X{7h1JbDpX znwI-=m7V`wv`3YX_ipoFH-9bEwK@}ko0^M`iDIcel%-(l?eH@D0Sb?iKhZbcdcRRP zLHlRSl+9=-knM^+AQ6Ip@-W3;pU0wxJT60x1#e3}2*<7p{wE=WxEw|w@B6&Im_4dZ zZDWVyTHKlqMZ{)AL_~8_&iGBM*V)Ck*QWDYRI0wkA7K+uAG9f;%nGEWy6ch<@Abmx zbE@yo7Fb$p{Hs6WBWTCL8^>$Mo^Fqr?Tpo~h;3x=E{*;uUUPr8!SijAUiDE?h9fH{ z7n}Y_rsMPg67Gsxh}aZ!FZgo1^qd||OUTPPp}JrAHBrIJ)87vF2&x(rCt-W3Z4QZY zJC2V$jgHpNn#{$gm-yMg9Z8wvfpEHnK#|D;*e@I_0V84%1(pTq9qF~LF(uW+S(>u}wOk&rjIB|r1Oz?cWe{)>ls2RP8dFnY!+a8xv>LoUWDBGo2ZQ76 zl*u(#U0~s!LN9U_BgeXYUS3FfKc=x1SRRFDIP-o(e#y$8k507fnxE87uB(Pr-j9=0 zp#PA5DfDnvRzbEMQN@BdG(pk$I0&~jk4-mb^x$O5$#1Gjun<$1;;pS9YxL`G zlF@Jhjd$;^zY}`l5QPEICS;(1TUuIX1mL1iVY5xVYFQ2Np$p&IaC>-27%EYpIH>y( zz!+KG4jExG<>`Vu5U7UGz;|jaP1!7bL4apzg;vPL{l%;h69}OqRl@{vgCl!E73yOw z-yB)_Zo*e^DV6uK#$G80qYvJMu7;R+X_!lU0{nKBfg*pN97K)_ZAhPcVK6Hb;-xORx zstRxFAeU-NlX($7|B*@jgm z(eUF~b@2h0UCDpluyzG5B$H^>3^Q%%A@+5J=T$8#nm?d|q}(vxT<5QD(GtH85yVnM zE3g!hgc23INwsimppJA(5(q?mZ`_xIvbZ?z+Cg2c*v+fm`IPWc`B&(9o5G+YM+kdvOuQ(t-m?~P>bRLVhq}6Q*506ksJ;3XOd%>8Z4^GVLRVu< zyokx)=N<0o<s`Xh_`khQ@xWvo(yb`@{z&=*S;mV>~Sp7d%r|GC>;}nOSvn<~i}y6ZXreRPxVscmEE0jz^SK3%|6v zQPvZdz!T=-6Nfuc64Er)B#hLMKE`Kq&(;`f!+c2hG2*M6n8p@fL}72l%jdDK%!;h% zW$t7(#4U=_ANX-tX-c&k|B=4yuH?;GdMhMlPEzggQVud;k@jKAi5uTEA` zmzSb!=OkNgAhr6%SRlF9vIf5M^xSX&t;5$%{2gE^Iu!&oG+83oJC^nLJaoZ$>|_r!i}uX##$uLqRBY=#6+vRuO$)Yo60^rp2dDNaA4aQ;)JQkfI;RnyQFdqpl9KD zLjyR~V;fnM#XHdKnE~B+6>`NySTZs)Km?B6p{no!(zBlrwdcx!fWlL%0{XwT8~p9U z1~4Xja-|B>i0`pB#LPo>4Nd-lMZ@*}TN`bV&)GR0hV2o$Z2rM?D#8$V?&R#%9QwPs zLvNJpsSvlUh`BVX!r7}bv7*SB4B(2Q#}7 z>-lDGeZo>K%xCbBw1*oIC~V;Kiz$@M<eE9Tjtm zEQi*NxtmAF&dal&X6poy&kug5rtdqd1WK>kWswIHnf;lxz!hpnbRqVm@5XJQUO zB7I0{!mr~(0Cah@4YkEkq{XlDYD^?YlQVU53^Vms_T;LmWyGB*#{hGJ<< zC}WjyjYR>Ito3#)ogF@)J*aopvR*4y!-s}6=zZ%j^}KL<{T!;Ho=VC*z|!S*{i3@+ z5CE!8YS3a|-enkDO@zQRjDO{gfn&ng!#~t*Yt~PuL`?dU7yqNXcW{>C3;rR}HFy9) z_!%0j`D3;kv2~wW+U0eHD6bH@18)Zxf&h`NObO;7tP zov9lp5b0+rpYgc(yKW!6DEMRkpWA1@Gf&K9!)wvv3xzAB>JAQJIj(iB@Y3U93~q-u zBsIm@k%HIvVZg#o-E;J2Muyn}p}g^wPPf-iy(6RFv0nTW9ns>^;OZt0NG$lFYM9|@?eQ$`>;Gy!?FZ zw${`Z3a-*)5d=wHAeWdOyHekHij`6edaqKWz=wfuhzVr#`fAwdEH=s*|sZPq~i>Ig7_cJ-EfJ$vWh6hurxN=2V z5aUc!xoS>)@iQVQ3!v*yhJNw!s^bX^N`M@&Up{2jR8}dlt1rA=yuR$4Scn{-I%2Wu zvweTfBH{n2%zNd-rJ@t|0#{I0#(2G?6BTY*tHwG}-cC^jWc;&)X`N-H$~Y~F9#yA1 zRNEyKhTz;&mw zr{p$_&(E)#{69v_4tJzzKF#JOGDc&eem950q|yBi%k$ociry6zK7Kd4i>6GdcK{66qygVVdK231H1+Sk8zZ@nW??)M$*577yjGZ$ zqjB!`-y_mtQ^e5;zNUEpV7)+)-x-X*z3RD%&_WyeF6IwP(CTXY7*xrS#LK3pRJoFOaBtSpDg8>-=lONFyXA!$-el zGavRufBWTHUCLduv5<@CY?h%=F{f0IKFQWFuwi1h!6_8Vo0su;uj!C4kht_yZ?H%{ zcgWXoozPb6&m^N2r|0DJvn^JYQ_SV&ao>CFM{fql4>#OuHfsH5_kJw!%NXFF91Y5N(M+sTxWZ zBVuNDkfXuC^&hzaCymh~O1quv1hj3s>H0*1 z;=OXso<>7%`i!~2+^4kg8gE*5tbEW2WLb~!f=gUI=`O4qk!eca#> zhlbquxXFq=qCv+pIKQrS9LsmGEl36L(gWVj}1rq z<;phgv=ni={5mr*uTZ^!n}o8q{}cwzJBz2Q1FzRgeYd`RM z+2Cx=}=%XqqCPJa0 zkaws@b8{>qD+yC;L^c^M%^*bakjdkm1kc{e~VQ$Hd7v zTj5FoC9D2+k|>?!r3m0L^5TpYwUZQD$s$=N3Q5%I*loo^V)Vj@ONa5WqT1$U zcRrSG(9Wm`!vkskAH`W3e12m>@D{2p=d~}74VdM!sbN;Y@kO?qL?pEG{ZJ-;4cwSP zCSz{b!t6QRozYw2NaX0n3{@YZ^ToKP1CfZ2@@+}}G0guWN*^5fOwiaq?HMBf${W@@ z+!TOGvZ+tOBg96&dyCju1a*}bqOZpYV{H_7==7Xa{eD%M2{DC;G)AOJV=(_K?87Wi z6OiPyHTl`b5IIPYMkw~v$=%fP>{04?%u(Fbvpb*FS=r>-r6C8w+Z^qYL`NB)SC7xw3$+ zP4V5Dw&rd6R}S=ih^^P-x4|hNdpS&mTg`63eJ1iEW?|LD03G6p! zDtNilfZQOVr|>=wLjA|(zB;OT55VkEpUC-$0H3lK{mlOSp? z!(kJKAVK@u3L7!0zb%{YmkcI{7v{vJPV-I7fQDSEFd=e*Bn_83gZfX|gNphUR~q9a zkGFWS3hEWgij_>EBweMe@>3eXTZ#pABWx_-VynOPe;m4Z(;~*G5FYatGns%X&rgsUeGxbMs4x}Fp z$fF=lqqJegd63{&sfj_SOEe7ZXvsp3%gqtWs%c(0<@>nrudX=+O&l-d#)HK+X%oy! zmnYlqe>MeijHK!+!@tY6i_@v75g6#81HvsZLmdfN!7T#q@r2#a2Mhw*#VnIG zmf*3ZITa&&e(%I2eO_n0@GE}e^L1q2r<^nakU8YNK`{EfiH`F6OxEDRBHHe-UkGE?-ahv50gVm_QyNW2Gw@_m*GiPbQ zhbJV*<&B;4>}%#<{*D=r?iZ15m=H!YlaInWoIJ9y%z_0&%=hmPC8c(I@2>qyxM?D2 z5B8U|h%9rR@pF)1#Kp`Pp`tEk`3n$NwmTk&BRf}w(aJfKy(pAQWuDUG?4MpAvxO{G z0|V0$4%yT#+kAW*T5i3+J;zwk)RfS=zyy)krxZWgMxZcvGE4s19U?_E;rqPKChsw4 zukg0;FM26}47PpmRan|CGpLb+=coUn-%W-uFprUjrsq$yO!OeU z?$h880Cg@*hj+mM1vWF2NMxMj<-&yd8UY$65RNGpR8!nAgL%}^edrg%Ui#ya7maJe z!u0^I9&$;@it#Jr>ffVv4GMgPR2lXTaSi^`d(YUI5!yv~5YbtzZx__xncJFltLH|! zZ`$`_w0NSTp}V`DLPrU#QWo+o5)KVQ6hmdPykniOR5eR2m@8A7ZU+Yg4Q(kW)n2kP zT_|8XSPr8Da=zVOL8AeI_9Ol8#pyX+zm_31~UBN7^71MK|*o@YD-i0WV@EyX4AaldGqeaJb4U6ZtXHI1Rw}U) zL68P*EKdPIGuX9}xJ?fSPB?*>;5m4U#)Qv{azx2{U*fN2JGIG$cT>VHkg3%hkyocm z!T3q^qj$nh8~XRVO#oL+gDIP*S)AUI7|R0BbQX5w07+J4)qi}foZ=u5>(j82tuNZ? zTXw>&_^sHN(RHS55tSWUtkY4x#KX61+kczcs%5iyoVTyeY)7|eUiTEYZ-zImE!MHc zu1eF@(qyuglv9P>4jl=65Ppgb=uq=SF*2F2D~0(}{j+fBdgX4+DE-+Ql%WVjMFMZ= zF4Q8|5H)+}NY=gUXR#>E*DV{C*|=MK%8EHJLO!!x$yHDvpfoG*HVUdZ^nGF= z>urz&$;()$))Yx2$vD4w!|>y72FabN7vc-r?Sm*{Iwn?qRm8$g3;oIY8xoAMcKPoF zV1Zb?naTk~5cQd+5Gu{}cK{3c8yh3O!^Bjc?~Ai0L}YVvgoY>SY^ zM7fdQQ@4La6F9AnqPvhUUenr600%zMq$+?;QMxS)zNj4BYB*wFIVyn5+)M;(cil)F z$3=qK`LFt1+~Q1`Sl?v5@JuFb}7 z#qxJ}4XC$x!vFqxqrpsJi{ok42d^_YWiFXbD&G$hZU$Lv^k>~cv3QMS(D)pFaihya zPM^AVe>4k<`5+Agk23=N2C7Y~AH+(_=r_S0-wnR@3M-8Xqi95}>#+Bak8}6nr*Ykh z&0671nbCh1sB}G(k{T}a5%m}*(>4{=zbrn{w50kYZVXl-z9#y!VPqbeq&*%<+h)>fYY_0fr!q`gw#OhtK6soggVpMm3m^|N%w5vwO zfH4aZ3fPKxgB`4?9@czQ>y3xaiBGMmc79n`DX|cVY$@>Z5I_N+W$-F5fdO~g%S8sS zhu+bbFd{*{?lEq%tPwUE4#hPWi857O7#(};$67^|Le_cysnz#A@@y+4uo5&@RI|<- ziiXp4I8uEn7_8vsT z@z(Tj7{3tza`qt``4;=pvlr~MIiXrvgH zA?kAr4(?4^12H+M2n-r*Mhg*}eJg#et|g9PIxe6bJS)q>BOVbRo(woTn-x7i?gW@G znu{2z(Tc^JyiNCVw8k>WWB5WM5ol3E80SL^8%#d@2?z-ABa`8f-_1!vedQo7bxWj> zPDo5FU!t*ek>G`~-HFQp{Y_8as7Do1&@{L$7bGDd{}#pcBfiK17xm^3P%auQ@=kj~ zz$}Mljz7EcCxUgcOJ2Gc-l@Ajb`r~j;!hnMC7eK+sK?Xl75YVeELdBlmqY^g;D{M@ z;qZ*LE+%6jA>eTtZ(XE-4{3OIiBvRpHjQDS;k4@T^nZ}{j=_*W9pNO2i z+57Vih~E?!7k9he2aRO>$S*1`RpRHK!_D!fXOue9b4dW3rfqO?FS$VCT+7X>NZl1` z9o%`ZisW^+^c6bDS9f0Ms-$ z^E#g;^bmcClQ=#ULb@V}P=ZiqV$nGkkZ%+vYA4Dkl-emZnQ;V6<%xPm!@OK{|FP#q zGftE;9m9=fU;I3{LCDwBE^t*kTJrV4*w*1-xk=fePX#ugp?XWVm*X_AP*HK6&xPhu zj8oBAGkL1@hO@>0(v>HbIqG}tzot*BF0?h3 zv+T?HIsKdcucb|}1fABKPMj>|2iL)T7sq1IY588# zD=RNXM8H*H0!{s(I975p;^N{stTw+{Z8q9nu9N7r&vtftnqz!of=aOtQNUB@1q0q~ zj-(5S6411hv%GwCbc>)X!kuJJ=&>GJ$rx?F;m*o!yZ1&JzWM#=nzi)eA%u~9K@1;R z{KtG!br;liWMTzEsb~cB*QsbA$t>h3|)ES2~}``{MfKsD3M2nflyIIt-u{`?LJ#9$Iv0O z0WZ%P85zmRApo-4YTq}s$GZ9Y7LWqm8}aP>&@LztB{ag`RjZz#C~X*Tx*FT3-OSU; zdA@Z`!$IF$)=TnByveRM<(mlXX&;||o*^xsTTi_;zd|P7@GM`OM66Iz^TKM}Mez9B zs7Cff<*06C69slFP3?-L(}5SA9t#VLBXi2Sc=r_W@o+kxH>lPq_gGKUnAkne)we(R z)3Sa1mcFP9JFLtueo7C{ei&VWmX6NGX656r-Oa3EaOJ8+Du>%$Eeg5bKv!|5^|Or% z3yUwl9My8QZtycB5N-^L^QDA^n%m+N3i{<4>PUqLgu;oe)k%;e+WIKIW6H-y+E1 z{HZc8C=?7N9-E|a;DG|i0(91jvW^S_e_mLa+c{Rp7Us-0=$BtuD4VN$;aOnvevyiz zj^QI_nDUR1Hh-k7tcOQfJb5K0tqv#3kA0I~2yi#lFm2`^i;Oi~YR(n9-Cw};h}G3q zfUr(h{x|S~?IEq09i%_&7V+aE4NU;T>xngWv7v6xnf);{uzvBkjtM6BtsTDq0{Ifl}@J0XY=zf75ZAfyiA6ch*Nlmr{w>dMFNe6`W4 zSPZmBkQ_2mO!h4G&(~teqe}vfwCE7I0QCp;-NuKK;_-&on_nUwi)i{3)wxHHfybvw z1^TceeLckBIEFQ?Vn!6uGryw`374BxmOgqhBGfZUzvnFyyD!eezyxvuCV~AyqO_%j z$Td@h46!;Dm6iONfIjC97$|8q**1>$7pM)c=TNsr%Zk~GmigaMOm{SFnRF~c=;193q=8Z7R>v54E}k% zJ=Zrse7~k3+s=Nvx>(q>L%(NaW`cv&up&>s_UOjl42Hw;*dJTvws*?0>NZ$%)qZDp+?*`eSc07^#d2DIl(bDk z0M!Fy6&I=6QxzV$P4|HZXVvK?CjYSAiw#_w@wyTn$LH45#`^nGTOAasAHIeO137f8 zsi*UGy<<)wEtYTc6zSp>6WFP1nGyUX2N?(f!GJp8TENWDPGTimg@m z?A82crs=-2B^0yxWey}+WV>}39`W6aLp{;jY4A8o85xnt zgZ2@inl=TuzBRh;vhWEg9K+JIo|UX<6i(rVh5anWs%`anzPcJ-1=N`tMQ7DV^@c1i z2sRZ>VqC!D0laDXkJ4hf7X%(xDm&DtsGwe#n@OB|c*{}~Dl%Af(rxH647PLcT+sxy zw6t7!c_4^4O@7;7Ezz;ToDVH+Hn^$i#8#;=A2T8bWo zD;~B{X>qv_AmY1S8}1OUJRYz?d_+#DX6)f+%tVc-sNf(a@|92i^%~@&hRT>LL<0}u zNB%Y#QcT>Q%HP|N()=~9L%*i9QA>J5vh#Pkwbrd+`Abh&Sh)Aq0wzx_7um4r3)P4c zvN*_h+B;_&#;%LocThHF!U!9;va8$6qGz*UepY4)P2tM|C=B3pPjBcW&VH^yvQZBO zlIBlojx^0V2fJINp`)ku9krUU&1lD27mrHHm4mOVGkJGW)~}XDWtq@OR@4kAHhR19 zi5ITY5cB=JrYoIf0l{DHDA9}kqMnYR6&~EmrZi&5FedL`7kyw4XNw)cG?JpE(P~5Z zwRBSA6#zm`mCWdriFkLzm-b`^eE4TZlD+GkotkQiE8S4Kg-|Nn$){Tev2DH8ZU<`u zool(EOiL&g}~yDA;o>`yhjRa3d`?F(3DIX?*KCW?R&u_3Gzr2h^@G&`WOB z$)MQaajswn(GmOD>J&4(yV9yG#O39hA9=cN9PB5D$z1yz$Ys?06lrM>L#8|4%myc3 zyH}WhW`Vqb^s~C(IMaT=VHuJy3#=`P?dk~+|(O*$cVdJN9Xa6#)jme-1I(AiV z=jYu;Qc@@&8Nf+NA?Iz|9IzqyJHNQBL%+jteEL&qc86a=itLSQ$#;_oT$^Jg?fFAT z$yX*V>5V4OyJ!eCJg6u&TbsHoMh=Z@B5D15pl6j(3z>Pzef8_+UXlENvc( zJs%XSxS|D%70wY=smShcG^4WZn#t#ya{J%J^Lnxl?@#QYq@*OE93h(-_WbGn@m!Uf zx;zEb>R)|$xFk?ZBqlaeD484<7N)1&Y`xB61%}Pb+u|~tCk(jV=?(CdjPl94;U-<2 zj5X!J!~5{B4TW-FOEEVuyJg;y6thFY>>FCyYdyR9B}+pqY|g-^8jS5;EKcg-s=e04 z5335h6HyFtn>(pS!Bd$6?{w!=E4C7-Z^Vu=14A{8DL!?2PQLUMtHad}9}_)eE!5*Dr?EUL?^OmzFcmDy;d)D7_k)-8 z0@={LyYLKUR$3a&x-c$>6CwjYHH~Q)3JX@wU;?=PuxY=TicW5jgmV^;V(9{l_1X^z z2>#m}8N=88;a^;F>FMLr4K*b*H2*5u2v8vda*N6~x}?X+k#2(~*`$l0i8GrnIgQPi zcD?p6j7bBxHfh_`|9*Wu2H)h_c^5NOXU!f;uXgB^mtlElEYmIK$j6FkC>a||Xc}U< zcBs-Wj7*p`nniWa$jg^akrqV(o9lmc(^-_O&TI006jWoQXRWC(N}1}wWE1^}iH-Gx zH39xQH%kVEEak^U_X-nD{tNNtQ_qFexbMdFZG= z_!n>>U^Z7EZeT#vOlf9i1*d67EE0hzCjwz}v$3=4+%IC!4qwyHitBEu$kAxM3AUY( zn7BI>$Y^ADm~~{Mtk6P1?LZgaAq&B@$R5fODU^wlKsFmHF(44ebZ>S&vZHr;7C9`e ztgOrrn%-!2xz>2TT)kbR53cu&`*C*&YX$6$x^_NalBYy{Z(S+L5`8a%({MtDVYFLs z@rL!>DWxCQ3Elx8OJTDox+Zf`q5m*}Tq4XDiR@EMN|K&RNJ#i0_~9MID%pSJ`P)k` zuy@uu7zo}A@Ozukr<;Yi7nVxe_3{CioLWrapRknz;Ij~z!B;GLS#2*L28f?sWWg43 zVL_-7wSIkLyS8hzOtvZq{KHwmQz*b1d9bfqikD>Cd1n(>dJM9;xh|3hl=;E;xn)gA-Jn+pvF0~*4H`J@7cp~`3pw~G*{dZ zFqAMBC^3@6Ol<7+>wIjl#{W;YG(0@(6Jp{=hnFPC=HKv-@Xsynig!e>xCsDhF|1Mx0X0-(9a3 z?++Tft3g6ezz_adrgvi$e1OrkEndAq&N8xTt5U~JYAp@dG$YCAF8gew7SC5jzOBLMEz^RZByg9LC&_aY*Mb}rk3IsU)xk1OJ%Gn%$ zpR{Gly?h5a%ldKXf$6l}304+FaU+&sks_UU=s@oXGS(9ATt({M)}}jr zMw|WUi?>ixqfGnK5Aqlxv!x39riC!Q6p9%d&7CHdS*tm%7goOoKfRWV_GFf2{sm@n zOPtfii3!WpXyO`G6v{!qqfO^yH_?27>q7{SO&{goRM!fqwU!^b>qD|8ykBCS>9%|C z+YJJI@;@pBdQQx3aDY?cR+kg)yh>H?R>W@^-J$WhSYc^%4rdFbkq-|)nYB#C%1zWL z-?<-dOS-sHJ{w(>O4?f&Yer~A1d}{O*VC>;BUo9Ry&9%0Ucxr}!|`qo5(>@QK$ns! z&woin>{wu1mP8#a(bK^|!bB|}r?p;tjKznc=Y`~roUdJHHQV=YEeff8=rfF~nnXU~ zVSc00PD(y@*%O+S$!B+96)tM3rf|C(+z@SA7qE0oXTzy#neNN>fDXiNZY;fuQUD9uoF6 zq?DK}vFYwR{-d3|;*Hs|Tm7X}UQt7&iEE8f9W6RR=_VyE90>*Zn@EevJa zmgYPTf4TK8b>DbYNnIApOBW>NSV~r|}?Uj}vIb$WxP-{#txA9FuC5VRo$tOBRDJ zH)tFC*~VT=^I5&AnnId1XsN#aZMCC~&tlg(kNKa!(7uloU!mHao|f))-jd*zpeGh4 zVadM`omTn;YhWyF*R!7-XhCGi#;?}^_#hhF0|6>r+ev{V&b~;>7A7`Gvkb?Nkq;J- zO?7yIj%^y|&e+Ibn3^gIOWv?>K<`ICfQN^H)y%IR=u^BpN?ez<+b0Gk1Z1>b?JSfD zOZcm=Gs67mT__yKbsP{#-bWd65z zx5HognDTDpE(25tuNTwI4Gn&|Fdyzr6*lJFu$*8Uw(WY`*jz-Wb;v;q9*p1x>WuHm zDUQ-5Ea*q__V%eYv;CcXwccP7D30MgG+${vvnuC$R1 zBtOvn0pj0Tr1reCjD3IY7Tm3x>bMvGp|2P)T3ThyT=^aVO6*%! zhs=~uX1<7#vL7gI7;~a=NCiMH!R;3Qukop+TtPlr9%c%LPWWqAa}ps?1|yka%^8_u zf18>oIF^MFzx?6Cecp78f#rCyDQ5Txlb?f^?7vmFLkjLx=HM$kz_sXhdCcX8(sjDy zMy4zJ#~~f_Q%)Ea4j4(KLiG-zuXNF_Pe#No%EBz?{h2mWLIec;YyQx&5y6UJ|980f zPa;jl-V^zqTOUv~Ha?dgf%63&|)6S~8@FI)AUCz61Yo9^x!E>7O!S&enq%Sg^ zZyZf@-nnPBGSg4L;vLm6{W>97SWUN4hK7Tx1CFWBAAUk-DMP&u&IJRAJq=^Y4Q1j@SA%@PofRM_fdNTP0T5Ksl&<&Df2)5(I?$AU zQh%ZeH{(`Z7_DpWCST@GwXLv)G5mIyjex&(zUm?%AmiZS$;uqcYs}`J9{oEE#;G>C zA(8uQ&QzK)zWq_8XBf7S?OfHu!bToXQa&K>g7-5?d6LF^iL$X=Ipkz!3}71y*@y48G0oXHT7sqsJn7~n;)=x z1f-md+rB!Hy?x5QCZ@wm;V)|A_*!B5ykU9D(3V*cXON{iI542bn@HR-{D{^a#6HA{ zi^;dm`B8&|V-@P>$0>AXO`@65HIm8)h>65|^T0PYx3rwNmtNzPFBnF4^yFDnYYL2Y zE9SY(B$xvqS|vwTMk~n|QDxa;Yu!5XN+z1r3K_#S&X+J!=Izn+tWTl>7;J{U;Or$e zj=g6GvO~b_vILS&Kyx$c=R*(Srk#II6&<6`1)B0B&Pd44_A4m1U?saf-IxJz(^$Xb zCFK~V)^dZ4w}<%>XvSa`z~f)Jt~@@|4OEyLtAHi>McQV2_VQw7z9uqOoNtpfYN*sp zPC4C~pnk1cfd=&;3{km~w|_o#tTHW;aGKx7GagL8j$<|4<*Zbc$VXht!8Ry=OmVj3 zaf=JHC7lJcn&f|KLxCq{?ApiZIn&<7tjx5kOg(V16)7Ty5*#23MZ!j&6f(>1TQCJI z`3qO3+t2FZ<|pn9cQuQa);+gKl!1TdLLlKP<$Ol45myied>ctpeB>=L$~-rmW!WE6 zRaEEy`;^k$@8KVl!HgQ(m)C0|{Jz&@i0+@b1=9a7o}vHO<#`coc4ui#R#MB)qslr0)p+9o)<7z)siLeUuiNYvQ~|d~Ncsc2C<*Aos8;ye1B)o3EL9_wJhZq$bidD9gv< z2oMH>jCU|$aoyfFz;XqXC6APZq``8zYcBCxe*;4S&={u5dl=eo=!Kz%jD;+RJGp?o z-Q_9rmFjr3nEr1Hms*Odyj<-SJvMI>FGQA~UXbulY`b`_+;3dmz!lizy#SXnVR0c9 zA9z^Ygb=9^;LOekC$KD%6aH)@-u%6=GQNbj#`AB^lOy8CwE1z%cV>L0y{hE7scks; zwf6qn?^aJ)@vI>#&F$ZDF}Wk7y0VSPfzFmclYNMZi!%TKb| zU3{~5hftzV))<&8c1+-I-^A6{^VqaPsgnzPgLjYNLvq4j@K3V`dXtw zG2GKRYF9h#f6ND#TUYOZ?RF%0ebGEtk)1x!)|#_A<35(+~@l!PKXFlFtA9DV4OIiE4Q z8-CFHf1t@~MeA5^FP4xSfvKU_?&^*`v8z1a({)Ly@}5i=M>UH+OAuf!tGzn&J^ef@m0<*RiKxaK8jfHOKOvszkSUQys#~ z?mJ?OXAAZEx&q+!RB5DFtIq2l#D#Ida`aZd%lkN@6TKjZ)P&=2wJAQqpYSLD$pNyz z0L76u%maxdG9cWh-D>+Jb0IkjkZW}T&dDkqLP5WQ={Fd)2WdCUW=GSHwX8(rwj!6R zy20dO@zOXTCj0gvDZ1)CatVQ!rJb2p>yRQ{e`NI&9qt=1IZZ`b_o0#cY&;$Pi_6$w z(a9k|Yj{B3RSJ{y;&QUc@FV_t;G9};MM+*5&Tb-u_z)H%-X5yTz;-&EhK+32EPt*g zNtRT)N9sgxL3^k(-+C(bHq0u=3lG+1mNcVi+8z(KhEidQZPd>C`S}T{FdtUWPR!R7 ze{NEf)t=-C2!_6WV2sy1-2jWmY>}m2a}xB=N+y00bYLgZ8uKEU zbj8--23U>|zsjSaPN?5+c|7s^WwWC1EYo)Zg`=mrU)sy<$8E!E$igO4=?=3QSqtVJ zzp}&gKw=IcE_-h%JOg4g^^7*os5!rGd$)FrQGS@xn9l75@Nk+GqVczmMAR~OVI?|w zwmmKo&)$19-+6x^9ekdcEn<5TP#xNe2v!NIjN$-=Tn1S8gDMDas!s&RvzbuBg`_Y) z75=H*&&o8uS~7v#Qs?OH__XNVpnu%NU$@%NLZG<%ki&^)oA451{%h;knj(yL6wWEj|<=n9byd%s5u&rJSrM^!qoVuiS2Gl00h^L@9THe;pIj8 zYGe?kfJPN9VC=YYPOlHDI!{>0OE%(r_FruWGG77sy=m&9#wP3DjI z$(W26I?VMSAA_S|KmI_NmKeCA;-9Y%@sPdImB#ynE60QwVxz<22U?wa(*1&=dXObfj!YLV&I@4}dOeZlTE70@Y+yn{j6R*+>}8hP~d zg~x{KbGm>IX5X5HxC%~|E6_Y52uxcgJzFtqg7Yv*AE9HN?f7Q)@z=#Wm|bOvfPcm7 zZa%n4m8pyMnz$8=vFTl0BQ16rLz?<6&*l%;px^OWsM*`jD#l6|$uvT;`i-2KR-%VN zB@FF7YhV>24C}w97JI%OZrpu~K$P9t{sw{)Kb*)5dhq9>2Jpv7IjDg)r~2H)x)ILk zRe94bSb3Q)RLuMx@(9O@P|NA9@SeJoV?)G-qdAjO>*&;i6kL+ki;5fah7ZgNpwTY` zhhdmn7hd1i0pD2?lRUzwW+cH6JikNM3tQc=0ALazr6Uo(c%)8iZ7Xz3w?o2B_bk6c z0UqhEB$cE^DZK8z9Zv^hil{K~xnNTlOzf*|&>erVGgDe_3v3?TTsHEZn8N(CZ#%Dn z`TLdaA)_{4t+f`JLo`u>B`6I}ID{Pvu-UGZN0$ypF0> z%Yd7mYTvvrmrgA+@_0n?d3HNPnO=+JySX*TNrzAJ6?+IAVcxd|SFI*tr5iPl{N<4nKz($GZdpN)U1rTVR- z=!6T23MFENm>P9yq1yb-)lS?X!aG$S8H+%Kq@%EuA1?L6%67Z8V*l>E=zKI@rXg0p z2s`d0QLttdzCq@}dM=)_X!l9JU*)c0;i~y;D35&?!;I z)~vyDuQV@$Qd_P2JjhVJW`XlZbHy|(+Achv^THWw91maQrhZ34MKxYCA+ef>6VmVe z*-$NA#ZB#^dCwOwQ0ZvR^cwzG=QC5G7phRqg5)x-hR(x#?Y)GE7*2!LNMZx7EG5l| zF?^#5OzY;b+7GuOJC?Bn4c)lz>{7BP+{C!Nk;v?1%*dA<0NX%t{eS)krekA zp)Km)fgoV!q6QR1T$6S^*eh%x-fI_JaH#IFMGPjr4e@!VBoaKgV{hVi_i-Sr5KJ%S zMYO5IY4-csWGk`(gDOYMpkt!x-)PJThnL!!C#mcsdhN{%16FIQ!lz_&MTtKubC-`%*oZ9b^bcCMt z=jK7O$P%vwl5|4)&Subum7kvz&Jj%mj?{>68Ru|Ew}`)#%UVcxKi?C%}M zSAbHmj&!dX@(El5Q93)zy60vq#?&RHu3OoymIrjy6ay`Ci(}pCe#~+jn%N&HkYF^f zcekK;bSKx=RgxKp1txoo)1>%GmdtJ`U@+K9Q*-77ElJ{>7QEXoeO@k`mz zYO&Ds>um1tkOpF+T1AO`NYJm8OK8oOzz}k zMn*@E3cY+%1|B@7nNjnf${QzNC8_@LOB;tsNT;FT7~@@9wWu-i>z; ziKv@z`MWnM_gX$0`kPgn5NWEKdSw1lJ+w6Z3L=*LJOahFKr#GkL&bHLWM4%Q)ZNEy zUK5eA^$wm1m#SFFvfpkuhW$dgRF!#3111F68j;kCi(-h~tm3BlL#WlfV(js_PClgK zlUjFa*n|CKSJ2sv9ayMuYUOCW8gyk=+GV#8LD*kXXjUmJg{X&dB{`FPs_|6?qvEnT z3KiZmThaFC^1=ZOq+*V=ZZElWWin9{0Iy^*E|7P+!OL`ws8qTFzxF?hMQknwbI7s~ z){uGHx=0JE8^>6wnVHsWce;*gV}_$OPnqjxU2mweg&`#bo}gdcv4Vx9Xa&bN(Oa(v ztxeykn_&w^VpKMaB=cHpils`7qoYEFGeZqT)Ea~m$0BU>-+pl6>H#*k-BqLw8cC@> zjaWbOs^nFzj{2JqRHO@=K|)LT7aY=CG2T)uENMBhU7e-T;C~1$*jThqRMbs;g=v|%{+gJwDklgg z8ABvCGKCe=ZV8@z$dh$W!m%5Ls+ewlWNY!X0n7vgmYjDLpM;Q;%;?cC%g$9uL~m(M zU0D1nk-o(ztyPSP?w7eN>r&ikt@Go*N9;l5auh@pe!+ncclboRSj=qa&E#D+X02H< zGW-8aXwck^eN?hMSTCK?M8m2(c(;@&)87@GB8f%kro(7Lg~a$j3}7;UMAfl_()5hNtQa zC*2CLC|Ym8yl%|qKqil!(TtcNhTQpCLHbePkC+rdh%&+|+`z+1^w-ieR(o7w$yvY& zJ^EUSSmdxi!grA*2t}Iv^Q$b0f`chjF*K9q)$y?qGyn9KcEI}ijsMgk0LhlN{uTsT zRo|p_g|B2GC6ws@-^R>b7wQAkh)UtTcHdn%JbH1d_3!>}HcD-Qh=~B4C@u%rqS?~L zd+uwI2~LN_wHlpMBuhl(dMtAG3C{GF+VyI0wAQklKFSTmo(i#GOYxAY#9vdOYBuec z0Ohs7gfg}6=DJ&WFRqa>i~UNF&obJKLf)@y!*^v>FmTY_^R`!4Y0$_ZJ-Mcg6UTWZ zfM&*bX;pQlG2JikjMSNxOEZ&+{aqVNI&Lo-*hDq5F27dGXr8QX>j#$Z*>b+4_(wK1 zmD|zS0=UrCh@Y25X>bu!{jZOwY8!{W3{^&3(3qH!!LUrHD=zv3?`w!Hmz3e$Bfa!# zT*t$TCjAJa9^*1C@q+P<=m#(3YxujH>2UMr^X4`=xvLq#dlTsSSU|Mg-Wm=t69-F#T{`=|h>f@9ct46y`ap_u92iR% zLz0+=r=q$DyA+=~?(M;o8QNW;xOI$-TVKrylg-*M37t6iRcxHEn&B3RNNgEdhgf>Z z%=kfhwr2oByPnwei+eqgfpt>O4EMbd$7HJ!x6+F8WKj&#kUf7Ry&*eR_4y20B_yzBj>Mwxg^=98{44kOZFLVS!!s zzfVQW&3N05=eeofxrq%0Sxe3|xABndo4`yTj4oMO6An^r#so$M;1rbA259J^*EcR| zY@goU_#C~7rm5FPAL_F=5dOC*kj(_@4R$Qxhf;@V58{g-Lvklz zbEwrS%yIJt0xN(gAWtwtmehW--72bRJhkNk8jzH6X*NUedF={I9U#6Q-Klo=Ah874 zS=y)0?TtFx#2Ksq#hYY?grMjL3I08=1V@Jl@CBofXV-gy#07noh3i){10b3K>?E-x zN*KUbgvBOI5WhogPd+pLMGO8Gbqj_BX9w?re}pDnI^RI*Is8PWSv9je ztK3}_we`t!l~Q6m4+>u&yS{EV1J5Z?ZSPdEdS}0Ul`b{NTA6S<14o0qHlKoHlcv!a!ttJQLGijOwN3F&sm`RJqL&t_W>W3vGk@gzSHRCgOv@L5 z^rh;{vg7CiouCxL0b()%rXw1}Z{9(FYj}yd2^T>oVE|_!4`@rP$&w4Ff@KCVVq`|- zKLep29@k%-Sq=pmF|i0RKNJ0~E93%or>BG5K? z|07N>mD&4NIoNbCVq6?OuBO>EVKyWk(-?IP4xE~s zMU_nFF7Zb}0&Z%OWxxDBU5BJFuYxVB&s~t@2$)??pKyHSl|F} zm$Awyk3YaNMB<;oG~Zd~yaQnoc(yoJlO)PL3DRRZ-u`rQjH zo5Ws=kSI@s_IwSODBSq(TRFWwm33hIxA)ggxao4GsyGF`4_Q8sQ%!zKZHpqTD9%)g z4gppoZ7B_(D#J^6YiV_IUoY>)v@r_l;!`6;fv186?t=={hRXl@du&|Q(=C%*Yn#yN zsSCpy5}j*m&U^ONQ`iN+Zwa>w5>-y$7RB7rMa8>_ypl=%tcoi1CREs-1A@!9E-dMV z5olSBsyxQgaLU8fQHR#8hGKH{t@vkMEC6ZPbYkc@s;R;4{kNBlmyP-TKa{y@5>?qdS7_aaHK&aKVow z?=Y#e^UFNl9e0!;O;FIu7-GS)RXYV|eS2=DNzNz9pAd|=qV~zxD@{n>d?g$yCG$-x zv-W|*alW5QHM25Zg3?&y>TF=}Df;;tR(W#UyoYL62gdZszHALc_|K$EW?Ra5P#ov%J!lZT_US^kA07l7K?2|b~xKOBCGnx zKQ8)U$Xo|>;)FBD48I?1iX#2Ibg&c@2+}DTAAq@TZm^ghwlXy%0`-$Q8SWueq=VA7C>S*Z76 zLrjs86}4S&fwZ6)(!a9hmQrW%RgmKTbluLG5pL4+{}#&XMbyfkL$|@xOtOIe zixusqNz>Qi%kwJkOZ}r`YN<_rSBV0Ja{g0jd>_c)3#avAnP0MxFFphAsukZAN5Ww7 zvg~D%PiVzD?Dg~HhEjqV?@3fP zheQGTR=`0k;z(fN zMtt;UWGe4M!2^Y{p9pc;O{foYBj^m$7O%Q~X4WVO80abuS)q&a)?bR9mqo)z|GPuc zi>^Zo7b2*T~(qHz^OXKUs3}=OcCRUQR;+}L6#Zv2FZP( z!FVnf^lW#%7wW>&-bTyZo+@;D8`b4{r#e}65-H*hTDpdj*DtgkFl<3(S^9lLiCp$^ zOZ?+&9~~B%n5yFaB9YanEpvzN@6SKqS3Oy{~*B&AyuwYN3TeQ|G;jV zXq%N+3ysN0R=e9$JjdwQF)$8i+ttjQ&=G+%4{eQ&oQoshC=7^H7_X#CwoohnsKYwl zo1MyS&I*jHE7YH#OGGNHF^TY6KE*(fT=H|^RTQVIR~@%DiC#{}ni7#4Ir_O!l28bW z4d0HRWM*2_o5P4686%~SOsEbVBY1{RTLV@-PY{Y)LR9cWp?PUW_kIZYiw&9h>kM~(Q|%%x$Mp?{nb}(v6*^rEQB!G~ z>pOfmeY-ZENia;Y03R5LXuH`~WR$ejagpp(>!g~>x#%!OCN!9YCOq_yKH?^A`WL**w+8_N8eySvp+B^{W;;(tHoV#Vs6pKiVQFY&^0a{D|<`R@Dc zHaAqL)^&5q9zvhl6vv?wL#KX-G?p{kmfRDDsGN`cRzw=fqg zjFR(gc+5@Ua@z3DDbfm_OAp}=UzMXVHT>D?0(<^Dor-xwwmar!TV|Fhrlf>Q$wmu; zgoFelyW0nrsNc;i9$17RB!(6Tj7N+62fTaq)RqtqT-D8XUJsOVdALG@*a%h@b?-SY zPtKa@#l3LmB{&hNrPix_EHg_hs$T1nbFqVfl>g?7hX8BAwJ{kqFwoeSxyG(Ow=y|J zLuVTqdo$z9fdT2#?*-)DFIZ+tt}?pdr9$(e{~D1z!V~iaY{yGr z@G?OqJP@WH3u|Yl)ZAQNlh|E??)a#=oK3ldED7KJeK1Q)NU;Q#3-HJ&KFMiPuQ%F_ z<=^^wP}qv+FfbFRVfy~zvH`&^O(6RL*4%g;|7XPLZCRL)Rl9aRTHK3?qP5=ka*#;Q ziO;jB8h0?h_wBF#y((SA`&C;V41)3EBO!0cmgeRGrd;4-P#Ni+S2bH)!rIlFJ?A-G z%7%x5>u}aS@lN)E#a{;4aC;K|6P+7qcpt{gi`7g<`NS?YMwj$<7KuGrT>8G%jt~YK z?G7pKK^&$=V#rvKU0PlBWuMIr(i%+=5A6j?Xk>z{sD5!yzKcPM%SyWo!nZJ1uM5sD z$b*!`tGVs={n^3lVX7h1{bxh_3jfDA&}eVZP|xP7S9$I)T?Ok~-a6Pgqh~*=%CWWT zYXADKqglhc{6)R4C&neC6|6}P`#CRJnL9IGXAQ+2*l3ysQ>S~lk8>>1kK%h zTsc03Eydw(WLAFOn+>UU!ufdquhhla?DldGwl$gZGOD^!t--)$-2LL#>*r(l@&-)W zKSNe(c$(dV{pjtg_H2Z~bof;Sa16SPPoBNv>wMV#O;aG~bb#}qGw~>nA6JWYVrqoC zfD#v{@COe30i9Z8oma{N`NG3Vey%x41Aeq_HAnH9dEuO#>@!!QqC)Qs0jt_3+IidQ zVKn~j2I^$p@x?FQI{iS|N)0z-_|FY|<`vGHYfR$ekS6Gx6UZl=qRyea8i3~3DvN<* z&rre{E21Gc{h2_|_3!d%jfe!4!;q6mTk$pgiK!l2R%NT+^PG1QuCVMxNpI2v`uw{( z_LJSoIE+R^GjG$Yh*R7##obB0ujvy+P_8)x%08oA3YOI7+uP7Q0p=b=PZBNO%=(4B znV}Io2vZgP^@@0(Ndgo(DLk2Q9jM-BY5TcDG`;wLQ^+J{0FtvIgsonLf{yDWa0SFv zKU)ecf%dzHQn`~$oFz?Emsvl>9F5b4ug!X6NJX6SAQ>-&z`fwGlmEflJ4RR5McbmW zlS)-=Rcza;*mlLXZQI$gZQH8Ywr%6?@4WU}yYJ^a|F+uPb79UkM(<Ym$v;^N|?;2 zGkeh89Lk=Zw$(fg2A|1ZKQ+9>3POIA9X^@CrW=9>FT$wnR{k&F?&Inf+Vmj2QHy7T zLSvIcUZP;tNTk|EkF?wD=E2_E9&>@4^YdefT$0zTTr@d{NIBh-;{-7CzQ`Z%^qf>a;3< zErPtS=nZXzgVgOb4*{Nl=41X6%A+e5qd8pY)#w%1k%#@YTj*}R_9u%z0kUG_9`pT( zU-?q_V=GbZV#0|_XG;LER9BVCzXdf4`e)K_<*A)2mzWGc)UcdROqRVJ$@D^?i*+Bi zW`ikz<>sXCx|?q1FwT={2M!TWeL7EG#CD<|UOK~l*wTy2%<6t?afV)~W6C7sG+ZmZ zpa^PGTpv=2b(Aw>5@G?yZa$M=A~W;%da#hxKd%C@gYaHvCdd^JHy2`QQ+!`vuQZ?g zB8dq$WhFnVW=G?S7oKDmhBD9Rn|YJ(d%VDyN@@P9^g((KVaNZ>Q<|(>Viyyf7ef$oRrq=02alv)) zLi*(e-*riV>y);`X;m6|(RB*4fCVsEj2g=?MC{iml+MznB3uNR8SZ$mOW|`pm0)Mq zKv)f1M#bEZ(ML9CZWLF8_JUywVJzKO55bS7_+h<0q9i78EvZ3QB8i;ay^-XuB>Fo( zU9LZu9``0B==c&hOYD1);*m;7OlERYxF%cU6L25gi%a-HH1NQ#q`tiJdy$mO&wIDn&~`y7!Dq? zT(c0*mCVEa>3bh>EE%v~ujolh?tNaKUEa{rA0AC+o;iiN6oGNhuUafKhQb?i%DUW} zSKBN7bl#k>UBBsXBwR zDB7m7zXG}O2fe=WIUHFsWpbcx?*W$Ed5s@5Ok^(QQUFEN@hQJ*8FHI8Poq5EBqe4g zHaSYyE`GMJ64}7|Zrvl47yJ1GS!KeN&6XHcGVYo2@DWZ|D*+tc5(nV6ee;MIf zsv`tN1rr)boXpjn9S1)o#_|-)xD*>!7=&r@yD6etR-=f72UdcCDy2lgK>Hpa`XS*! zjwrjlIo*(5NFbfl`S9>W@Y3h*i{&qJaH!f?Krj4{V%f_kOYKmN5xpc->YLsdSN>Xw z^Oz%8XqPC~mNciSexXk46;AiD%s?Dhx(%eSOk*lbxox27ZhnWU{!tQ1p2H!QO2K+9 zqp&Us7nv^||5s{7+Enjdp#p-S!@r}W+3kEO55;msxYxz9u3!njf?56p4gYw z!$zxJ;PfVD<#+qSeBQ+oR3UO+xpL$oY`cEjnjBAKJVup zj^BI0_0sKkUpAEKx18e1_t3U+^7s2~-&Q!d4)K)S=a$ykOz>hH`&e@kc#d&(f^BE@ zAE&pY@Jz7vD^H6TS#kYzkxU$5y9?Bzgq0uP&|@FTG<@&cNg*MwHc+M`E71I=jo~rM z<4<|LmxlxQfqg>BS)MdW&mOF19pwvQ!TOl=5#8L(SkRu;;yz;wFAfe{^)5MSD{Rp= zpKEK+USHK)2b9z=+uH=`OE2CBC2WD2+)u6&P-OMeYjxbyhgyx1o{!kWT#WTI8^N-k z-w9Cj!!CJO%Ub-XIL?Xh`sE~2BCbG9zAKsY6u+#uv+Jy^=+I2$un^i$9HncokH&xg zv6gac-myqFu~vU#J-Tpf2RF{@2Z-7UPcQ-My9F}SlR$O2;)wX^)mv3L)YIi0WKyr| z_50DEiVgTb5y5kKJPdT+T9iQYr|wXCT5})?9VLt;%09V)1Ud*sj$=#XCV>%v(Q;XK zeH90E-cCcZS6Y3j%bj;^Y!G>?b8m5TObRyeCn|CnIjkI%x&-*viFEph@m>8Pt-&EY zT+RnU3tjAFT&aP+h?c0|u99!1yd)0$F#@gIxf?kKufDK z&__&60uoz#{1U4i$#YOg0W!C$#ud)bIMn2cD_b8jJL#PawJ_Diu);uU`XX(3WZs@^ zmK_%Hs0D7p+TX=n98XwrVJ)80F{KHHE4FU@~QMi8T) z82u5H(<89CrV9Yv=-)vbXTS-s1&E4xO3bu`=c`-a`+zd3{^a8$Znxj?g#EnSukq=~ zH}<$jdK0@~1ojpuxI;D+aeiOY`bS%Fqe4LCKe}BqXSRX;eFP8pHAzc6 z(Nslxzv;>pQoOZm?RfMpvW$owf|pyh1fZ^E6A<|rhtP`?EV`5XlQpT`31Fy?P09Dt zfZmz8$-G0~H`WO90Y?{>`|KNt2ost&i`}M`IVcRKK1IU*YjO9Dx5NK^rPNd zf#E>Ms@YavbPbjePs@z0`dbc4+gN$lQQieJSS%q>f0Ex;JfHkTwwXaf=z1p0tq(T>YKHZc~cd0TqH9kj#7-FO@XFN`U8IpA)%fqzX zj+&(^V_+=NZ@VCfL|U9XiEmPHQJln}K#die#dOjBmBeG{2=M)NAEhF3_;2L5x0qe`1( z>gVEQ8lSTb*tcYU02@h7Zf0V#Y*BC=`qQhII$!&+^ab&u@{GxW5oY#R&wE-@o(_t_ zCXd_CSOl>E;OB%S7h!pix<1$KD!cFaVRw7Gk3NsB8FiX=GI@J!!h0LEuGrO$mE}0M z{(cW>-iLW3xd|~;V|Al%WK5Xh_cd%JLK?bjp*#y@S0Un68Kt3D?oEV@06}F^?KrTO z3fV0^-eTf?OXP1LK$bC*Cc_8ri7YH7gP82V!J~ox_Ok-jslyWAmcJyC&5@hI`!QZ> zH&k4Hs5Ek7#hE=ck(;#l1N^x~CO%DAj7{m@Pt1WNGc7_SMN}M0=~qtzlYVk+Qb+n9 z2RI5N=CHN!5XCRtA88^Zl?&zob2I%p^_XX#4C+6=S`ismIs2R~!)QNxnClF#NqRuO z@r1uYuBG~3slyiWBqK!QJeCSW#UK710|F9@GfE=@(ht2!Oxvb|MHTBWtDbKC+a?CZS1B~W0`syK zUy3-^;L-{4LTo2Ou6@t6Amac_N)rpkLk0C?8(3Wh6JW?>~H( zvmbuSHnxbz^M;FoQ$ZLq5caxxFDo;GIth0X(B6h5U|g$j)>d*u8_Z?R3@_Z)b`N52 z<1MjUxRoOm#(@;rJOU#EmX~|5U@eF6e!gWINswmE1-o)BI9^0lF8pD7e)&X5hKRlF z_iE(AOffNMVAa!2^9(U$h!(QyQRZh&!^K+HsP0cK_y>-A-UJAc=i;&{32&#bx*Sfv zN$3pWkLPeiidF*3|GEk|nPub4{GLEjmHIG2j%F|c*We9(_-UBvLP^6&oX(;4KyJE!k3;Kibo&Sp2U^1#4#8#Qu{>CwlYYP<~`* z57!dO^)KjMeZToE!s&ik4r_#p^+dRh*vDxLGnXm@!sXsklJES0jLiy5RZ=j7N~Y|Q z8xD|0XLN?xpm%}(YrC%9kAPk_V;0!R;9w2)je#>3cTFCKbr+iT=~TRqlko(?gtGpA z{6m(;im`?|HEqRa@_qtx!rQ?bV@}-xOt$&!gK(Ix;{T;MR6~oJ)=d9;GPM&?KRd~l zoEpK}WmIMol6$^7Zq3_Nj+ldC%ZL*GS;X`Bx_7svtD^a*Km@0t7ksjW;7zW{BEY1a zKgsiXr!G9_$QHHnaC5PTudIm%eLVEbdzfXC5zcq2n06ewm#f`R8(<)P{A2|4!2Ex) z4g_@=NTVYQ8kcJK+gq4a+JKwU%=BSL5AJ42Y39EP_qP82SqF9ZYCNFqaJAm5>5tGR zv$t+vrw5}A`-AIAdAq)!bM^|$kN*$TQHHv#LaxpMbQ_z@RkRipmbvR`@G*=yWco() zFwDd#S^7R0i|%OvH4`Zzy@xzlL+BL58I7ABc?ZKHFZrygj#Yad8}$u5Ov(A6^MiGpGpSl|&hx zhkKmaDXS~AJ2LRgU}DT^GKOCLmWB9YZ-y60igM!+68tpqA^$<8bHuKcj6 z+!i>%`@Z#i9?bf0sRh8^oPU$qfgZ!J1)zpWrwKT8mF!+W>6;I%D>0?-T468!Yz&Tx zD7Iw3ZU*q(f9aMFp3xazRN4lGzF6S`k>F)>ZMi$gZq=sA9hN=r-UhxlfK@BZs1{m= za9D_cZLf5ib4;fC-ku0dLX$ZEGqWf&gF>yX3q6ofR9a-)-%9VX?gK@KD(1UOJM#HZ zQ!hM@+=N6c-qdMym~{uRRZ3dnQBOQcO?zbzmr_dj#EBNo)n@nm8E+|ThG~EOHjTbC zWXa2cAVaCL0GxLDI$ra*Z|7}fJRf$UgY^FJ8)DWiJU*KoFa^k5Gm3e^{W2OkG zxq-xSC2rPoQ(5tEa#vwODgn|p z=^D|NkLP9rljUs5x{Jd1w~{sIgc}AGh}wIBU3l-)yV;dH?HaB zOKjvCW}q25>rYFkj}21sNY(B>0+4USSQ`j}7@rg<_JFNq7uE#BxQN;01$I9(_^Y(! z+Trr&Q@NV0n#Ug*3lb8#&BFiC2LYe)HMXPwTv(XpFPrDj`Ek(7e^UobHl~l~ibK;< zC2LMqR@1sbdO3mj+NNu?JQ{p@(6sJ;3z`w;=6>97^oDHWnpk&m!TZtR?|6B-oQN4s zeU^c!_X|g7ZrDKh23WY7;xyj7El-a>4A0xO;-rr2Fbjagb>QjgLxR5(TbS8eHLaOCx84xgIu&_rZg=sYhO%z1}o3P#7{(RuX+p` zwjcOj0g>|77MyXr4zx4$bWCgslcS z_LXgHh8A%B;7r>&`BJs>949B#1;HP0HQxjTOsn&+JJ^}4DZMsg2}u_~y4Bq=iQV|3 zzaz4wJvJO&xm9MoSdfYyjOS5VnS^G&M!n~iI~-Xrtp4ODUNnk+<8ItMa?j)YTDKYW zNes!1w$b-~zG^A>^08fDzJ2@z*408TUPagO&yLkB;x1LTEbDVfXD>)Hk$_lmSsS+& zBG=9}owv_$+qNzT!o=QYLwAcE{u_{m{Pggdci!?@cJCch^lkK>wg$}sNu?<46P{UW zCg_4Xyz~jTdw(sZ#(aKDnw~Gyc>vArh+XYCKZ`KYTTDSvZy)Q)uoi2(td@R)=rzku zmUv087@&dDh0$%c93e|@23gProH~*xjk#x}_aUnqsS(~>B#&9g{0*| zpB^#3{BgXIr5xe&IQ3mWd%gcEbCvCLU9&}WRg&crRRL7J-Tj`-=BX_DlWn#jM!=`& z%`S6IaLYKRWcCN;y_wlbm)rB=eD_9vOUlXQ$Gg8xAA5iZ=}&fuL?IqvJ7P)=KXV#?+wneQ@Jqqm zX1dNuuKs8NV+b#1ic5C6y!G;qtRhIPK~lnT%iCSS>5+ZlMkalgKpOvdGZY#^Kb3r1 zF18%TC9RNdbgN4c?{#qW?*lwS_TanP(vhI=u>?OH=?YB~Y4Cjm*N;L>WhDy906FSy zkcTQQ8lkq1P!_vZ02)O74+zc{a*r`eDT#2@j7wLX)~WK9pqp;G&A=|({xaFEgcKNh zO2-}x`3p?NKctjHCyM>k!HQS%X`kj5xZFO0ReMZ(cO}jFlGIGpe7gV6#9=Mu;NYL= zg#)W4=rjrJC<8 zf3`|K)E6lit{h=Z%o}NFy&+%ZAg0zly95$FxgENduUKiaM~^s< z?cgHu!!?jq;o zL19a8(Pb5LBYv$v-4Jdx`}y`K0b6YZw!;5pRlGldAd8A9cTfJ|7qr?ClA}Fvq>6!G zW9qHjqIoBox_B7FoR>#JDXa}&`&W_{i-=kIz2?7QHO2cZ*5!fk(Y_U-m(5FsS|N8H8^U0}KY!EqAqz!Aj# zcP5;TnJ=fd$btn^#96@}aEtWGS?v0yWve*UG-BJ)Vhb@od_Zcc#iDG?(y-^<{yKnx zh_oAIA1uc)7HcAk4^E+Xw+#F~}%r2jLP@jU_PP9=;w3qk}?Xc2$hmBwq=vm@gw+bmd4y?S8 z`gW%NRd)deo_v=R$@-%1kNc9xC1{)N~LCY+GnRHWv{RmM{-ysul1BnIwf4SOGXvHl-Gc=vTI2N=&!eKd1O?}o7@v)g@_D|X#0 zYnZg6iGk67o`iYvNh>&8zH4kBw>I0Z+hKTDO1-~z4PJVo4|<=McwNPK2}8z)a9-hE>e80SssI3Pp5_$@i(YlP2xyje= zI(N>j)WzDgk_8C7RO7KWgIY^8AN9G}gy}BH5XZaLP0m2Sjk#J7kulbIpm1O~3W#+% zsfNZoA9gFQr|S^hjFxasi~f+xRwRtB8AL&1WQX8Lpg{*C7d!>0^rPhJb){r{3Ua~F zk38NBHJzs6yX#IZK{#xlUO5oyc_Ul|b+mFPi?`^SU17kf&W95o!+LMcFq)U)Tt;75 z))9|KbHj|2-bc&2_tPY_K3}#S{yeS^J<7ubhuA#69DeY#C`j-0V5XMgnm#5be+Gk4 zIJXV#inJi#^+`TA`wo4~qAAwShHEtW=BNFNu2YhBS29k$->SRiFQf%Quo&~Xw4mY} zH(Bhs&#ofJf5Aj{rUjIi5*){?`-D?fv-y2PQUh%`(sM6q!vK}xOVyqratm%c*VfyJ zWVLTy{C=xvg6BiuJ^Jbp2FYoLqjmNUGA^Zd!zVKDfWLGg=`K*xxj}XABA)nDh}vTp z^+eS0rWq?Y_}ZZEqN#J~%gwJ(%X%ZM?CsB&GUgtquHvk91a(xdh>|b8-agOCvwd(% zM5^IjsCd>37Ha_0LBHZl+47>_N|#3R!-TOzyVWL;**e{I^yLLr%5$Bdf)f|q8<`@D}pDrtWTJw+MATl3fW z%_>1;05IR6X^9wSBIg2q`%y>gO3s=~5RUOX>jK)?L>$_0q`{t6K(Q-?q^1-I0`W38*M1a7X6bcwHAd?cu7PS2HUKPLx$+5hL z!}q=4rJx*Dc&!MaS>J(f;cL(g&~O#?pW)2LtOgB1vF+FX6ib3i-wbChRTm-$clWG) z@sMH>RkvWy{ByNJr3Gr7ZzYC4u@i4*&}TLL%%KqUv4}$&6l7ft6Y4qMIj0251^DrO zM1cN=6-<`Zu-^~CF}-CT5b}tcoaP9E?D#h>Fqq#a#hQWKFY4V&X_@v~OFs%K)W;hg zk&f&K?Ucsp)KS7~g<0$E{#X5e*1MD5F|SYw4I~;we!!nz6MbVSo3U|(wyMy>YLc;$ zNUq_DYofxN@z9BC2g{%XeS?ZuIp5>1qXlz&5b5(dN}>OT#Yok@&!8#zdn1y+JFKA5 zWFViE@wY6Hv?!{y<}@i*-UD4tB3q+f_VnSjQWkD$_%5BZ=sbe_DfRd9TB_e<5}tZG zrOqyU4^{U20zz~C2LES7^7v^-UHmMpvgT`TTYz9QEz2U@BplWq+2h^5(6tphIc0R0 zeyhY*##x&i!p)$I*&0@(rr|Vvd z9*nXgaY`XJEjwfDJS4q@hY2`3Ijtf#64HxDue#YT>SWa>gE>ifpVCGOc~v7BJ1eOo z(hyadg`H3!4z#iISENW$RqSalfPkus$^K($z0RA;!N7UeWaDD!fNRcg`<_5r%-v#X z4ixZ@o^Pb>0j0JjV1NVIzao1SosdY%86_T&#J4h$+XfX%t}n4Y@9yT0mn<3G*NNy{ zSB35VPC6@JLj2jVqi0ntMiLAaSSm?n#gMDbmKd)@dA+B`hP6uS0`a}y9ub!DXCfB0 zYq%W9IV!}E7fTE{#I__uxlB^oSFxHgNSJdfx$cLaMs+Y9y{_#Zp2P%t?^L+9j=sx9{DbHi*b# zRB20{Ecr(#|CGq?P7MwFIXO7n>0}3hhwo8*}`tIF|vY0!R zM9Z59*_E&*D9tT@Y@ct5g*-6+H)CDpL-S2%CT}k<0$-I4&VdxkF7$v6>2NE)*Ioe< zoHtA=Q3#=Odnczt#WFZAZ|^c?3ua8HcJ^t^&b_z8CUD?Ha7HJ?jwRes9~hN26H4@Y ze3Yr?rhtO~BK1i|cE9r6=qP!&tN(Ix#OxDe4al1ss_dAEUGu%)k;L`^3+LbtOa>dK z&x-)TPOM3NJc54rmo?HpNiROC)FqeAj^->}D!(CwcxFacR#t+7g9ZE6*8Vdm8J$-~ zbo3Yn>erOd%Lhx=J^5`!xwSXO#WXYJw}$t;-*r& z>Zru4h2#~L{BGLu+FEXs@*&`pz4|NNo*gRAd%RmTBJ{xx3i_XM0}sBOmKM$x90VAl z6TcG|Rkt!-$#u6D>u`W%#sB;ns!=)TGWt%yt?N=|MAwg?Zb)!x?FqD5MF>s{@^@IY z8=a&1Vi#;7@Wwq4MM!o^qH$~aqe%9AWSN$}s%bs><`bdB_~{6br((Ttxp|YY7`9Bd zw$=K_J;id4FHnDOSC7^Xx%x{JW~UVx9i7d~^=5fdD#WiGmW+^JA6PFn3ub=ga3g^1 zq6$O`PTeNZb2=|R-=~XJCMKqtdAl}kij8jX^}aqLN=gYC8JUGbvgVXP?2WJBBqugt ziy&AV7G=??LTFAW&B-4#zi9wAL{N3)`0el;jfOYN&?WlM49TUJ^x-*d_i2mq^{ zo@;-^2Jp%QN`TFW_m4jD4uYo@z5zT92I-Co24Thy`zH(tAeB@qm1GJG0+Qs;o-bzh zgkuLBACKXV88vMZ214CEqRU71(zwTq+<1S)=dNlk0%=H(2b%*n| zZ9%1rP#>T*IvvfX)A$swmusx#(N4^EseSr(9JrWZLEVM9mgyY!f5pUx zGgvKw+By7QFY-5WFb6)!zej-?NB({`4{&d^fKpBsTuKf)5 zMhK!`|5A|LM!Mt7fUAcbKtVxSTwENZ67AVSYPsHie}AS&NKH*lRU5rTm6nh#o;?LB zlrt2tvoU!*lC^x1Bg2eV@^f?J==PMCQ&I5u^#KLhe}p_@W+1o1AM*Qt3G^t|&LXrE zxPjgf=j|&fDh9BR14|v=&`NC$;@2}&@R(w11(g!%6I>+C%utoIOa`sW@P;TI4!jzNp6 zm9y2Gux8UWlbbn%T(XAlE3B+(lAaTalQPBy&H(AWoPO?1+h?{*?@X=CEi7I(s88fX zUQ%w#^O_~|s!xLFQwA-uXwN`{Lr=Gp)m3x{hna?68orDH*hxqHxYAWeEn01adu{b( z_xLQ5@p1#3Wm#1*!L*`bNB5do0&`|J<=Z3)Q{>wVtOrKE)&4-NGsHX`|Wf9#9Pa`I;Z3UvnyaUmO$->Df2FCxA+dVMKj^@UI>`=i$F% z^m=|WSmn1>9aV&PsH_pgQac+n3=BgZiU|O->E+uaPz;~py7_eNQ9{7xvr@Oqqi|+* zf#x^QM5!}?UEz8tyRjoWT2DX#9&a;Nk1w@iNhQu?GNX@H4~FvN>vMfb(vz~WJJ=wT)DtN)S#s)c@kG!xfhS~3vGdpI(4O8}ZFVTU zzzrWxzO3~g|9gY?fsB$S^-v=<>>6EzI@F{#t^#(a*6(uGd>w130@y@Y!+|$NQ~*lm zv2zA9R&-FhGWx99uupp8dclKvZH-<}lf~lP`pEOa zn9@GEG?&W*1H$y^D9x3jKIPZ(bk0Z&zJeP0T4b?w7N1uTj+TC50i?D3aCFA-~(Aa{I^bDKbV&wzd2wdsxc6y zi)qGM-^*bI3voj#Dj%=$jpL%T8cu<5au?$y3JsYqMsjI$bs}YQHc1yzQ+jGAYlN{aXcF*VkjRRxVaMZiJvS^>Kz)mZThGQcmBM<{Rugf!Id8jm&i;3x#z1=57Y%*5-&1y!5rIRfg z-$n|rW4hP?3x?Dl?{D2cWc6gqR(NlO4Y6(yQn*_bRX@IAAJA_QG6)YGj-(SHx?!}k|%>VPLJ zH)q%pv#7wOp_qA1ZA=ZboLIf9HV=Uc8<8VHWt}^u167HWeBQioO3Y+C5+`gDwefkH z!XNV^cK|_#s0(B&@o#=3<(Yr~yxSo|MZ$!znNGO+@8~OAJEshMcfm-pO^C?hQKr#h zbbk_c(hH>ury9oqrFy1oz2jLTm)SH=&h@$+V8aZzHdb#))-H{g6H!IN&CJX)Ilb%E zc`UY`?0C>{8u*TA+u$A7wT_w}jg3t+-c#}Gu>CP$b!aLFKn>uD9#BRl2&rHXObSuc zx@yjuP<-?!_f1WNA`^uj%{ql>l|`}%XG}`F34nPik&bOb1Z2T%$i~>T3$w3x4_B_- zOem%Tj8o_-nD&B)Nr$^3^s|PjQoO1d`*I)w747NX&F+8^^YZDuC;<5!rshZVJv%>7 z74J9{HHMRK5uSRES>+0$Wjg_(7#h6T8(r5OuU+D4kh_T?gZc-J<8oMI-c0S-$;@*8 z=HPRUPf$n`7Ecgmjj|+7P@$j7a5xINY#IaTLUsJeY)J{elJ3{x;KKaYaFOyVUA)GPvA&yDr!{sONq8ayi^M>GyMh>cIL7C-ZJ3AceHQ)9XXo98UjY!_ z@0+QYB_dgN+xSd>Z7ReIo`1XRJOzmp^F+w75&dCs+4Np?-U2)RiZb9ku4(5{zdS?X zUr~7K{}@!;cmIi{PVm50c2HHZS{fysE^`~KA~ zUP@N_rh&jW=w@dZ8XQk>Ntd^>XvaqFS2dk3-}0$)C@=vW)W*)FmOI1?*zO6s$- zA`^DcG7Mn^FqhK!>DATv5|YqTf|b-IJ89>TecX*=x$ zQsK#ZKNz!}3fmgy-$r;-oT)kS{~OF5I2im2- zCe3gl<{;WWf#121zHto;&UHa>z943dr8So9B>o`ZI5$WkQLDE%Z`g~7IBXYdO_5aY zr=-TYl-Y?@E+oG8SzJ=iv;w+Duz|!Kzw%NLk&`(_Cl`qH(fXWLn@^vX46Pl>`-d}kGWw;N0?O94c9QR8Le zEQm6#k*20_bBZUL(Peu?zombc5Ze<3=0dckb3sguG_fk@#exbMzQ3`z?a$HrB3b8R zYGlIy&`sD~UWyKTPh|V(iS&qO<@=Ui zV_Eg_Gx&0fW`+EUU1=PPi8zm8T2GGzW8{Qv&jjOVb&V^e0-lS1nbX= z-s64i$!_6YQ9BeYRb$?XFgU*r=`0e5`Fuv`sm#nhu`8kLJNM;s<5ZhUr@GX##>LWZ z?{{Q!r`;95)Y-m|!!ls^Ul(H;+wq^jwG{QApN!7047vWLmj7Kp&xj1vOP>p4x3+m<(bab2uYed&$p6Y7e`#e8W+ z(n26HIr;JSwyoP>!>`D6=PGL^)(5{pE|Y`N^XWp3u_s&Ph(2o%l7NyTW;8O9ciJRzYAan)Q{?qPCr08RrcF4Swmb3uq+xz(AO>pA2tRSFDN5{Hz2#MKe`@PY(%!b zbb*s2hpHFw*?4FKfB61f%}xwycyG&rKB>^q9LHEH;5wZVpeUHdn*_Wgk8|hE#R(wY#XH$-)<)Z#_!8r_ zcsjXEc{Uu}y#im)`7hMzOxlKh4S~i#1_lNLYX?!7jQJW3CXHPv0GC>+u~FI2*E0RS z?bo2^=C}~)Nd#1whQsP@K8nwQ1aa>A_yo%2s zWGIm^W^-G2o&!)1+`7>sp;I~%dOeh{Uoyl8Gi485?~1v#PNa2dfJ)r*vOgdPCKez@ z_GN!II}6#;>ls>XU0;X}iBHq$w$a;dvIo5p!qbr{?1C#$J6uSPeBoeW`|LGlNu})> zTPk~e&KAl@$(0gw*=;G=P3V$SFdrTs{{Fqg*e`QZ-SGoJS}iG^N@O>r&a+#|(*453 z+JJbABK992XE`zCa(Sgg_KPE5D`wl~ybyxBAqt3rKppT_6;dQ7Q1#CJU}_!95#f^# z8X!Xt@kUuv)(EB}34CFO`>{{!yLMiavPOh(@fGF0#CI@2S zFZ}afE3<%x`BwgbGr4bBC@~vN5vm+S1lMy5D5@Om^=8hAwRf(N(k2+YzVi~-V(Ivd zYIu+s0{-mmY(7iXD=^C$86hmyQOP-WcJ}AHJBtO1o?2}~ebwnbHxg*-z8{3yT>550 zt?2cX(pHwq);R>6t1mJ&A>$D&3gDWjwVZAOgmtRA_C2D+JK$^phD;sqGw<)d?Pgxi zgLoIu$ZK3A3&cbO;QBK0dJ7Z4g-b zeLhNSYI@vm_tfQfhi%*+iCb&%h=f4w=GE0QvB!er;*P!|-;1L7+32Ios3dTkf2 zQQ61t7TnS)le^>p#_rsB{6RvSRGog(>J)akArD?}6#biw2cDmZ-$v%<6i92=pFO`7D4FsHo zm{@;^6zKhCvhgp4*7Na9Yqj)2&-lamwGkqIHRoNn8hu(l&R_56tc?u@U%w&UeNGvmCfgZl^dNXvv~yYusK zFv&(L9e|tY%G8Tq3%)cnD{-4}(e)$>fbm?p(&{_8M~TB5BkY=-40N!bo_f5O;<4m3 zH@%f11la{$Luhv+XLeoPPL01nlUnF-jz_7G|J9KCslz3HDIxD7@>^U2biPt<*=Eq~ zHN|it;*~podr=#GEx<-;I3}>H$|+se8x9)EjWgE7a9gn_^IqN->$|T#lO~>`fgn=UfjRK(PO<=r;5`z zg1m@N{R6@>qao6zfqCK4Um^Tbs0ieKqs#M|&3q05&c?>3_J-RQNQkOcTdg&k_xge2 z-ws{yAxK@vzEF?kWN4Z2Sse5h}Q?^fvSd?u0{BqX&kSwMin8tS>TDO-qp+&WXzP zYU6aXgS`#j8}_4nXi-{PdMc9>+*a*Y_-6iT9GQY)a|NQzK)E0dSMe?O!7m30r^mY0u9PDXoBi6!6C#X@L{VNj4h52mk}7N=gVh`+r~vu2|Byt2m6$kCK^_zg z2x>?!jr@U&SBo1RG&Yvb>m;$){Zi4nDwQizsSvOc(qU938R1qZjz9$ zYL!5F+!@D})zx^SkdP2N_zI4(u`y=TsUT-Cb^FuVKy_}NLSa=r^yu23MJPT_Toe-pMCzKSir8&^DEpykp{r+i0O9qO-+&nf`bSqF{=`V@Z;DoQ z^%0uL$#oY-TsWmH?$LjQ03kv+e`zp(3TRoKhxTb7U8Amai5TnMr?Gzoqa%dKLxQUQ z0!gvr_<c#gj7_J@Qmp{8u;*NcgjmH5ES|Did~Z(#h(I*ULl7?2zX=| z!e90m{0ez8lPQW3SkU>OYXkGZV(-fYwc1>$C_;F~GCG+A6|?V+uARQvV(x9+*vl=% zbGu#2g6s@%{OZB8UR!gqk}OiU1j0HZQ1s8*2OCzK47@<*6*%4apG$}!U*s$_yAlZ9 zc4?H!9<)GmnbFAPaN{x7k1s^>Cy;<1hX{;$Gz)*sbUf^v0&1`{Ap%)d29M84QaYeC z$bW!=mlb~5`_5`hcGj6HIJN`xU%Qi1!J>fNoq2+-q1>R>ZF6a;EJ-!ZTm$t$4+q1l zb9Q-Sg;Uj_QH~h7mz*5fQ`VDgicv-@gazES3Pnm)WL)TDV0_9?YHCzxIb%(f$86?- zh)TY)lxjFz>U6K+K#_Y4G{n{XAAVP$#6Xz}=2m3r-b>Gf&88ITI=|WmMMtL`W^Sb5 z8pI3Q7iC^M8Y_CyxOAb66DK2^GEPA;30y!{ucxe=PX&UK1k3Nf26lJ#zVx!-zGUT9 zyqMtK%v=lsqJ+}I$x1H}-bI%n?6Tz=cHT-Q`Jj}y$mc&ru{9{*O&wl0^`2T??Xn;8 zYc!)G%L98{&c`nYH0PN6Qwd`;%8-C|LX%pdY!L=nXXwp9VVDq1Q~9KDnj)r&)OwRC z{FbIEJWrO5f()L|930T;BN--9W z?ms^m&b^r~wwS28EW}h)f+dSWW@{x#sFLSeAE`F;U5tyzB_kSQy4IQV(31a})9`KJdv$A)mrF`VdE$$pmj8$__SDOjyzYjV$+ zZcQ5=$B$HXT6iy9+}yBT-U;b8+ToWRKn(ddZu>|;Y%EQ)R z&b?V=R&e<0Kp2cWu>>gPKd*KouC9)zvf;neh~61DC?8zUI%wEmg}*knIOF4KKU@@^ zCh_KDCwN_SS_5;^Hzb^cQVrGWlEJ-}S7K0Nd*Z%J6dfdPcucG_yDTA-Gce5%&^y_v zs(!tGrB*5DNR}1P{?SR138@vWMX?_+qP{?Y}R+d;`<&;T?)~h&_FiiRIAv1?wF@c481Ts^C=AKqe7{Y;sP#Dv}9gjBB;a55Lw6bvO|73D&4`~vdcf(Bou;^XC+;MW`y!?DlR0KDh zviEmfTqrd)xDUJ^zg|msH^9xLUPyu1=B_U17T(0_X#!Tfi+xt|)&-^O&{*+60Y$&t zaMiw3oX6pnslXby*L@b58|d5aREUt%*oBEx&J+*2A-`1)ZL(ppVwp2o5B~LOBA?3= zU+P@Jj<&z#2%J>6&Z3`_B?U0hV414_p=MQ1MCwl^O5^pX7+K@7_;VPC*q4)~>*HDS zg0h^Rzjc#SL$tSLI%ZK(1k*O%Dr(l=lmuN`GF7!~M@IL9vm}W377UACvoGk&UDzy? zS;KuuUf6IoZNvzUmC>5047shhn(nPLQ+lL>2KMi-+RW)&YXKd#94QQJ`loBs_nx>T z*N*$&^Xp7|D}OtoQl7x%%f2-q zKjS2l(?^M)u8q^}E$W>ZfiOx;D5~1{AA5h@6^`fe*Id&0y-Ev3+u4t0p%Fp`9#9fJ zZZP+VPowQ#lr}!)KC=Q&Jo5MC;%o$u2v?e%s$P=zr;Q_cgNU$~9d>51&Ze<0c^=%Z z5{&$B=F#HBc~IKx3XJT=S$T(C|D>IXbq_h?>~I`QxRGWDpKP#}iui>8U3Vt!*+0KL zBI4P^U#Z@Tga5EKrgtW?Lj)(HbE*>DPip^uc*(In$Nun>R=BU;t2 z62Jc3HON0&doW*ycd zj)?|!1e4Y|uuKj2Y|$l^$qmX1aP#ouN$%jVfwKlTx)^b!D5Cvn`~W%^L0d+?GLSfx zuG^+5V0z|KuwiGWAV^c35v9G|KKyz#)^K^0Gh)M**u2I+PQ%%@*%$9BY_JsZ?rpHg ztW+{R6$NHtD|Qg-oH*Gi#|7K`V|&5HQlUvHx)0c^u2aX8U|M`mv=&1`@=v)qNl9a8zCOTGG>b zA7;yMHD2;f=_ohT6f-x*;tV&aiU_v%+k<#k*q}$=B9eUOZu5O{3hW1Ol}((zamY6iQh}`{Bh2z3vVb zPn03yL`ABL=bdxq$O(*^vf7Pc-?xn>Oj&*mdS7baHw!bjipy2`h3tj<>}`A>cf5+& zxM?>XSaSWIFqOs7K1@kC?$+&31#vM(3o$aur+cD+0V+*7{1#|Pm1e`>`(XpWpc%PM zH6oJ$R(QNY#xGfa*ff(uE=q0RxdHzj$@6%u;M=!?3*f&FRV*7W+am3aP;DSZK<#!u z*VYY%z{k(&t1lZ1&EpQ-%r^bp5CLR(Vc>U}!@LT!5S+#%=F+4GFQ#0_>p_euZPaF% z8+$S&wE!!05B}QudWm?)crCUb{*Cnx+tDihG8r$j&LQ*og3rwir(}Z`qo;@ zI#76e+1Mv!Ugt+bkW~r_9Rs=w8Z=-VNdkUk@*Tb@MXkS|EEEf2P2WyT3*cN|9-0Lw z5qW&=U|NHKRA}bi@ zKea1)_@K;g#X#4XY$yoto|T`;5btohd-er|x*Cx4(r5 zz>N{Cde02Up@(BOYM(f@;OBzhQFFbfJAQ9MCmoiStwr^gG1|o;bTZvOW)aJ<0%x`H-U3>*VdI3ABQ9wHg8a6);t_O zX0+u%{LaPcDu(3uSsu5P5()|KH#mgUIA{Gm0!LAjWpzNKLld0`z5oVTMtl#gPQXou zW9LDHpJW}~RVy?7PSSx_u2h~Bv0Y#9M34z13cD2lOxWsYX~{{A{c!Je;Z2F%hwl3w z@~dr|NF&CN`iFwQLn9`aq&qC8sK%=hU}?P8o6|hoE6RLh!Xq}scDc8Z_@^LK`D4Mo z-dgiw{#xowDHtI5h%i6cRUAx~VDFhu0r6JyHnY2NrS36}bd_IpV zH^5SBS9^bTnQ|x=V-VNU)`5g=1?V7YqigWWVI*&t?rjpOV6#*ccdd-LZ5M@kvUw{& zTK~<3a8{5NC~7L4%5gS(8Y1lb-K4*onBVlXD&()7-)Y9mF%}I^;^)~-VZSQNX>PsZ)lqjKhytY`L-4`8Q62 zj&$tL2c_HahAPJ-z$Gto58e|DTv%vEsWy!2?CDBFgysm~U;*EICctZb*qwl=oS+M7 zK(yOy_)ikvi8cZv)J7S-e0r*n?HwVV;Jwa{SVhcCgZ6KineznVqm?}`ZY|G`^NaYs z9XAdj+m$dW6Rlx;+p{#qX`*;`DsL*9?Ov#K_!J`7{nhDAcUqBPQCCNsb#OoSu_#*} zi2)wYEBT1yaSiK9pEU{S%6*UAZoUPOSh-XsrfqxcT3PK|@9;6ln(zKxdZgh{f;yig2bhp{ zbU{(1(u_;DoxRz=uP}q|?n}i*P!RyD>*PMaxIQf8I&X$;kD{NQlvv14Ue*Hq%VU#6 z+Eb>+UsD|xr#KU@-_l6FY)6tM8C>m@50-M_6bAn+@2uPPQqWX18>3D)7dX7{Du(gM zfd1%g4hSd?))Yrs1$A!xeh?E9j~cWvuY*8-NwUo0K+8aXN32g9&^nQ;@0 zHsejF^Puj(S%y%)zdQDNP7#1eD_5c(Z=`(Ld*e3zc0UFXqYU6T6F9^xNc->)tStzV z3Wq`}t+$2nL88`k=V5c{n#);4Yh=XKwqsc@dbDkmB#s2Qc^-ymVQLk{XD5^Q)2P7i z+HsQ;?I;r6)8_M_Gx2xLp zab85oAACveQK@)`;#VmazPk|VZXvs)5GK4W*b{2Wd|-Gt&7>*S>-U+m(NzDNb2J>; zJ(#u*E@R59IgVAsJ`|f;f|}wR%?z%V7rYB0DH$!W@?dOIHmrZ|k+|xPk7t_H&1+H?k6a!pOR#bed)$>8oO#9*j(%OsKLLVQb;LyhpjB$KP^GyDt*w zL_%9FGw)gE3;Y?J7|t4#OUMS*_cun`DNcX?*Ck#rjb0+1Yq@`PY>Wls;-?G!OpX|h z-k0 ~JGk*mecNkh4YHiieRnK&P)THSK6e5&-AzO*+pw)?cIY6M{IpU<{Mkw7w! zy&DS}X*y1>uF%d-T_G2&Y2+f@A6!gP5>Q*LVcz6mHqQYsCvN9Dk5N-7v+ejPCpkMl1>5_K>~M-F zb~4KpK`*{k)pK$1IwO0Rnj2>^r!Fg5y)}uA696JFJzWr~sfLbWjDwI7snnC1KU>GE zXH4#e>B9Y4S+?EZVcBdmMzoc(He3Z9q|wyd3wCYB0ODc$0&QAxtEnw*ih`UlA{y~k zaRCn&mBiYtdFFz{_7!58cPuTLm>`-!L6U$_(39@qg0ghYh9E|%Gl<^@8lkQ<^N3h8 zhiKf6E;GZxX0g<6{xC|Ljt8HFa;(*?Qo=v-1#zTsDeIE|Z&PI4W3~LMY)4 zLH`h2fzKjSzt4n?AK52fL^GmxfaYUX9bP3-Vd67}|A5IId8sEL;F@9ia(NM(((%@< zE8}vdAVmjJ!0_J}sQFTwem-aF>NTIV0h&IDR4x{=YYO- zM!!Q7C!1rrm1SzVOA6ld6il>$vi|=sjF$f=3>Pjo$V$F|w|Nd-b{gT-X6a(QHmQ(v zt15S;IH?UrBpVWlCKpucW7{`dcnn?#Sy)vE?n3+M+PPk9Dgjp=``@aO@=9^6*U#n( zsTw&DN8-D68C&I>srfO%6jT4Pd8jylcC_}~?D&}Vhj!L>uW)as0H0sk{J+(z1RL$sy+fEhi|pGWiw6ZqeQtjQpHjKh()k@RKr{x>knodtlqRCMVW?kYBL>k&j$MHLZxA;4%-a z#fqswE2e-RL05v+xdO>4Y<hR{c;Ig1Jn zjN;=8Csy`wW?o^;m0yFK;wt=&USrc-&)(AqvEp$xWHQIw@%278$n9AcJ9K-Zn>V$p_AHrn_27aw^^w6 zlGbWH>9ww#_7)n$dDPh!hlu9%2Yg-0u$uwpCa6sNtGA8K?~G!FI9*v>bdPl1F^oAk z!hfPDkZ%QTRkL46`x()upPOHL-uAuK`E^iUD_m>VfvZ+4tK}PbmMIy^b=&MvAaN0_8Pd}!Mt%?~$ugKXBdQ9&+U`NN`fdB4P(PG7oA?Xrw&N?q$w8Xg;#HlGp?~g8ca0p4owL9 zROF9_4u)OMmk>YWywCz&va+0NMTmmpeVZ2G_*54`9a%A;)}1MhtciZA7Sq@u4nq>} zEF#N_Id)PEPDRbih29N6vZG_wI_|>XxReV2dZ1mXXgrlW`khdsPJX;*xauijn4@E% znlc8Js0dADHr4i3RI*oL=WLS zp@4)MIBW9Av&y*L(w$T1LBWV`G*;en&SAcF7a~QTT6M~1y|^;Pe64r7Fz!>Z^_?kd zJ6C1f!HAI8{Sq7E(2djWQo#Ah%+a+lCA-v)Prr!zE@RbfNw)WPr~HRpmOj3HRNYVoqU*fO2s@E*ZT5^SJM1kc(@^ zjY41I;yzYxLUn27y87Yx`iOPM)8TOAh&mvJI~bFWZ)BB_uU$m@&LqsNfa0DmdFgK# zYs%fB#{%;4=JXmC)9avtwJeXR2TMKy!U0zJ@RzBfu(LqDvR~KBgV#wEVqC2YAF<5XBEm-sV}&E<2=?Pd|eqF_L7D@ z1=oDZ6_7DH>EErkmGSGS6QBM3v20YJd=IUDLge)-8Km|df2SPzjavo)>!~M2i{#8f zPDD50ZS_HzLxc>J?9gTXqyYJ0{CC`1OP`gn7C5qO<56=onp}yWQnU1%o>@wA;n#cX zQ;8BP%-x%*8sE!zHSwB9?Cl-q!G3ldwHK&b8bVzqp6^nQ*;)v@M4h>s$f-fcqI4g`b196lV>-^<0Hs- z*i*88fX}xLr&^Uk9Gz_&A<(7Q)E;a60y6GOeY5GdTau%)`9IWhzmTJ8-4E8D)s}<7 zhEy&&ud_niWWbeAXLJ5`-y{eYgg`J@WPX(t@08t_ zBGS`}F zAqfPL4(gj)*RCC~Ww!N*5vITCA7~wLz)we3z}U685u~*}pM~}e&W-qv4`Onp7D>jR z$lmnK0e%}T>sG!UnIS@2(O#|31gh+*daGI+8QFLN1D$=$o8J6%^A_HFcF6XihVK z45$dj?CS|#w4cl9(@6DHHiD)U+(x#ccsu7OCwM!L2=uj7cY<0vpLpx<*QA7~ns3k; z$EdUNVbDJB)o0I}1BAy-RtBWJa{K2mmI?L6aizG{R{nDAo+@mvVKe}RIUe?jG-Pe6 zeR-tEX3#J7e)&dOdVGPGwJlAjQ5X8t`@;*+dSPssg(tjZ$`oCbKkslBnyEX94i~~| ztfj@pxL)@pHdFQujUFov%o!WhY$>bf;E|WR)B&$@v=Z>FP3{?^wh^|S;bbuX{krqh z`$g=csDv&O1Nke_;*I1u#&p$^3nyJ$G=v4Sdpy;VJbzP#tp9Cq6DSNwUuci-9db?>azHDn@*C+>n%LoPe5b=2TiRXd@Bnt}+g*dGL3|WE zlRx@?VGhx$+#DZlm`l zLxK;%9uEy23l8Ep6i()kQY-|$M_o#k8{k_aHJ6tUob=wuxGP+_sjQJ0&k8%FKpB38=}_p9`psT8#15>4-dy0%km9bV=-Jld=iiI=TVthh*-uZ zHme{hF9Ly|7`m z&3_5lG}F#bcD)8alBSwY!8oAeMP)?pKa%nODWa7}3SOegp_W*?)9ScAV~Qq~>OCmM{CKwqn`3FZl^1j=oHNp?D{TPxK=ZB5SqT zlb}zE%i4K+)i1NllfKWXe1;;^P9SvVdiO4|2o7slu4&F#_@`qIfx}IK8(XhCM@2lL zd8wM@8_ieXrACxFtkmc*jY+5wqaaud+PvNSOa_CxYNcXviPZHU`p@C;UWX=v}o|<3f|@J-Wr8#yn6lTy_P`@r+p<7z(Y%EpjZjIqOWm zmeT+V=;5=C-H!Kb)nacd30N0Pfn0n%6?9vAWv8Tcug}QqA#gqI9?l#oB{&Fh8G(aC|QMNS4`Vk$#iHiUZm%!k74J%)5b71NUo z_Dh=*IxOrqt86sgVATG{i*sW%{B6e>P6L*MdBhDE*@}lWZ^|Zby^^1&E*`vsQ;B0olF3$F;F`rbTFeD%Rd^n0!^;qCY^W5C^}4T?XR)lm6^GNdjg!H>#)%=@yYp&##YXOm z!+d-H*Hh-`+E70alnx4euoOQFf_`~EY>(opWH{;(I^2~`T2D{1Y6(L&%PaP8X0?I- z2huhvc$h~k#pb+FAW`q|wwU9Ux zoP9L5ohh2Zl4I5$8x%<2aqlAHZ4&z9!Pd>fsWZm6^9;MnAO;8_W7y>+nv)KUS9|Y`*eeH?>-!|^7JYTbi8w~wzS($meR%vk4&JjGeOCiei`0%J%x<@~XRr}Qsb~j6H9^+nxY{d|+{cbhQl{V6|lrj#Z zoZ-h$y?RjQ>1BS|IPMPi?Crij`S4#xO*^W=2$XtXCpor}n}YsQf=BpBVjr3CP`jb( zo*Zau4IwLg7?(K_he^{EGc~u(lZmTR*~8%O*b1+zLN!+3*3s?t$Nq$e`(Tu>F*E-l z<8@=Bk>RFo%x%Lql#s1;@0ED4v!xdID^V~`E$wgHf5ib8G0+w3p)-k)KNXK1R7gp^ z!45bRZ=0|h7hB{T1|qhG*bCJrYI~fX z2Guxh(m`_U`>6GWM^V)JfG~2_&TzAY$8xOY;6CNH>$OMmV!Z|jrP)(;?|J4E%_VY; z8P6cm8>7kR`J)GSM{xd^9J%*ZDGRz)joMmF6X^h;~6;Fdkg)fIy-oBw$r zZB?)Lfuxz>adBT6UAO0BM!g_$D2mML&x-Q0{DJJB!50>*I%&S8js1pS;1up1c5^M# z24t=R*em5*gULY*0-jdfr_X(g*#KT#nI|7{%z&vlOB*kiihO&@+kM&?%ppj2P{6r! z>L<>ljXXYp>&SAZ+-`ZXG>5+Xn`Mb4vLKJqzD2Js>+}@PlbZZ2>GftGDyC$}H$-J6t-C*cCVyvr;J1c7#R2yAPebzvuv2(NT zmkGiw$Gv5X`*^#_rN$p#13%>Szo@UL9hPU8*oTO_Mg7WbCz%r z%#RczU;Jy4Sp#E|=`Qp!{UEC>q33>d-WVJ!;;z2;Mmd_tz9Cc`>7TTQioWQ`v4;jh zc`x*A3t4E1PTLsJ@k%#V*f-(N>NAq%uSQ^_~q5@9bG9VPo?DPS)yKCMtB);>XZ zmB4;ztL3>BESh}Lx|KBV#(L^8Nj?1Xme9?{drc$FAnTOxMUVetXt-_rxjcNE6?WOh z$M3s&8pgD>uM>3c=FTZo+t6(h^POtGcX215m9d1mFAM|56vhb*VSFmPn$eG3Q0&ub zPE_dF1SyvaXfb6x!NDG}ahuSe1NTg)!7DfAqjbE5BYuuJ{hsQoq@eQd;IaS}Q<&<7 z{M_%GgV80(8|6-kh1KFV%Y?H#O$M0qGvP;EI!R&0@XCFCKROS!sR_{IbHU&_5?6HX zF4AxHzBzwB*e;;0=O}Rh;v*|;Ou4G7#WNYwp{KVF?FdZq`k&dD-YdWKv zX@8)3Kr%_6vB_&QG=AILeXx&^HSUXA+nP4yMW0aP>%6m{p7^}{tkb;ox=WJ;mvxoL zwrBT#j(rP~oxt_UWw8;vi&gw7F1xbc{Nv#($2+{e+h%d8FiovAW_a3S9BZ!YL4Uc) zcL?XPGn=cvb4M~=O)E8KY}NVB7jHQE+XM63+FB^oV_8$=x5W5QpxnRo3z<~TF)3C} znrDgNJ7Blr26D9|H582LK^zI)&3g)!hRJCQ6`UoP9$a6;7KZXOn5y_sR+cub)d-Rn z0tojum(GC^vF1<4x8uj>6SDASwPI({R2JknCYvk|GkS}%|N#9TI@<& z8m%Tl*^RY}0cFe-?a5sZWO1sfd$%!+U3r-_xPM-@d;zui@0@wh=ofOb8i}nq1yR~^ zqLeh4N_J)dzV^znNBQy-Y#FzDpK$}^c&OWo=j|%Q8?J_l$O3x`&SP@L)!irLhIJK+T3l!E^6_QVu z!ctUoee>R6ktDma<47hI2D8RZ>2tcWHX9lFT*n$WuVAnq_f!sr7Kd6=)5>w5nkdOJ( zZpiq9c;E&%HF{#-F3slP5qYW~#6fMjQQK}W#DB z#kd>M;ZC%#K#r=LE7Hk9^orMFfh(DQL zjX{LTYQ+lKwooLfDFm?Uhbg@HS02vZr(^#V+6IA{)_y*gd$HNA0$bO=i7YH2@zL7d z`)^I7_vQyf8Sw(vCQ;_bONk`yGtr}E}AITfh6X$r)E z2^C^Vk!_mV8jaa?FOkKv*o7iQf#CpsSuEX&suG>ZBqZ>H zqJ3Enr*bZje-3dseS@qhM@4gwZbEA;BLX8K=extc0PJ}gKDpsg)axTbUc z|7+Tz*n7Nh>Kc&P`3`@MDg7!hD4sx>nY@AUpQc>m^Tx-KE{-iDW|8sCJhGh6lF6{d z$f$3!{9y~FSbE!r31QCtx;C&;Se;>1ol$7wh0>hzYkRBvRV-Rz)9G}$`x3l-fgDIC z2uuG+;aV|B@VpOLQ2Xy$ZIwDJK1^jTAO&XtI$dcqWH6%?~~>2z;i7|2{z<;gdu>CI{c<4%~m)bGJ6}-Oe zS)54%{~zH(xSP?5_?RX0<@J1YLU&uDJ!1)W{jz@2rIC<7Xb6-WT^+j}J|e-AEt^4l zf90uFzBas&>?$g9594QO<0Uy+X^-B*C`MXLvlg#cGxFx&T5=r+{!{yHzdlvmOxxaKXHf&ieac|!Z;BrhjQQA6^<&;o z&+g&;&5KeaiWr1m&xg#Z_k}t{NVd2vo0jG+4{!oHr!K|5jYlm0(i2F|%+LEdDaNC| ziFi?vFM4S^b7R3W$o=!d%!FI-77C~aM&t^&{O+-70PSY<(X@8M8M|8m$^NDRWnsCm zoE!&dw>Or6q{gyp@iPYIi!KmcLG@w$oy8}FR9M}j*hOyS@O8CWL(@b=PvD71lf&)f zA!$Ffvr-)d@M%oe$P#eqwi;*Q(jMr|TUqC9bE{XFV5LiY-ge70)7$&8TkLE}_#N!Y_H(Yd!ml*QXJrmT=l}{5~bxgB&7<=C2+s-5NzaOa|_j{$& zXE71$+^SjD>GVFSf1H4$Wm)p39nVsv1a!2@c-A>Su=J9b^3iKmZf5nGpJsN)B4rHY!f*jXe)kznM zBkr1+XmEFi=;H8RpWYzSQ5?hP-rtkw4-ax&-fTBnpe?ZF5JcS1hyMY8E47#xf2rY7 z`jO}pGbF9#x8I99_IG`1fZnsSm>Q7|Kg__AOFh^?vr%WFRlD;}#@%(8o=6z0xnS0! z%Y8LB-^^fSC6qB~=vsYed&*u4VoH9p*WBS(+h&CVN$U*j3Vovha)ha3pLgh6H*!9o=v}t zx!*h|^_tJW5v@Cp=&@@jF^LQW+vfGu>#4e^k-kultk~AnfT~rZoT$!|$^NMXjS>$= zn$e9Q6wqMkSOY3kvT`n3b>%GN4Ti)9EBoHZ__tY?Wk<|F76Fa&TXf~H!u=?!c8F`h zIkP!e>@i^;#uC}o-8mK#ZlC@Wnv5^RR2;o;NFI}6&o5BpVAR{9Rtl}0Tqo2T%>MS6 z(W=2!k-yy6Jiss)CQuj^IIJ=pLDCAJ4$5nJa*|leU~7n~CXyy)YeR8`1@d^G1gDwy zz6cJkQw{wFWs})({1L*`fJr3{6f8UalTbalxS&DN6W|pIJuC(V7Q`@V!j@G+7f58F zo>HiRJorbSRs}ba1^5qkH;bB7#L?C!UaKb(rjsSv_l%&Y(P!q}?^0LVJG3&C^fs`B zWP=~-$ns<0#;4S*i~Tx`+8esL*;CA$KplHAOQ`?*;&9`4OG_8Na;@t3ewlR}98W&N zA6bq!E?@;;f_TWBFD4R?|Q!HFP4+rd=A&+>J`CBkO}|Q^|v!8GaxV3 zsiD2WvAHg{WH=U@|Jko}P19$3!fymItg6S|0gZ$me?!P56;0z3C|9$wh3GyME*rLJ zv9)uggU^eMV?Q8RyCu|1_-b#RT9eww6(fcQ10gk3I7O6A%Fz*ibjdT-(D<5_;>QeP zRQ&+UBq|{G8ZY*51cfY3Dt8s>m|&8QDJ5f*sZ}vC^aWz=kNszyya$aoC2J;YzR*~o z((A|+X5)L{Iy61dnsFb`8}t=>Yx~5X6KvhKdUw`~wHmy`qsB$?#BS+cYDHN+e`p^3 z4WMpM!KRL}|JQjcR6zBcTBo>ki}n{=TRWRu_w4zlpSB_6LdIJN(#vn1Uvzlebp?tW z3{V{yuzd&ee28F!OgbLRZYH<-Bx&z8(9ONlRj&U3#*~r>X{Vn?rZ*v`Ox4zdf-)n( z+NL4d*blo>h-ncu^&wjMo5%}4jYkp@m8WRv$Tp+9!02ho;Ozx>n?j8X=}VH9Q>2Ot zYmzdNTp7M&6QeZ$lhh3-y*YlU`%DTr#l<(XY1}JnE;(kK_ArV`P45GaBWePb@@GeqBep z+ALiBfDyam&5>y=w2~p1tge3Jen;vAN<0mozvABh3jVQR{BJla^2hdzg0@w(wVC2u zT+6r`QNx$@E*MMjs!#SWfVQYhr;C7VFq4G<^T3igm#^jAx=iZF@{qF%4sMgrMBh?H zpH#G3ZE{cdh$k9ubW|Bof*1I3A;a$11{Ua#Um#LyqJAvU{5#SAstFe`6v$K^1rLRj zo$*koYtvxB0`G1C1IzlN_joZhR7a#rB#eknNveg@|GpK=vN?J*k`2Od7^w!F_}cr5 zZZv5CO-}ijKq!hQq?%0O&iL%N*HIv@R!BA9bo)RE(_x2R_F%iYWm*X`EB{u5` zDrcM64LYjZUp_6tRdof99B;O6r(B{}^U@${_}tG`^r)PLf5T~9b^Ef0_P{Bh8ZCa^ zLxHi-zaIqo$xp#*N7G;tga^oQd)#^o0BZIo^&Ji~$|9)b?JSA+11zEolL=K3g56mf zxU?99YjKs$4>>Ks|4ogt4cR`qT$E(JTx^^0Des1n6WvfggnU5Xh1V5yh^Tv~^g!{;NY1(i!xNn=D zk8O7cI!?;sU;b7ZeUDU!OrBCtVYXtJ34SIV+wRPb@Uuuyv^RN^e?)JcFB9YXvb0d8 zp|CppEP*?mTcbuzf67vp{9l5u0rTUuFBxvI4R2v-8wfxGW`=UU1#^anU;52?b!S{_ND*ue&UggVSON z{xAEtpYxSDnG3aO0(nh~DXGdk?bY-BfbYLOU`?A~$@+*7g%xgsF0G3&v>srEdNY4w zusZTYOX+Wv?@ORcD9!?sz&VsBl1I6 zHFoE)dXxX@17-vp_W7FGR6Ky#Yms`-KM`zFAcIp^l=`#;H!H*G^93k&}F znn_F?G^g=rT5uu$Bv*W6gFnUFTEfDyK_V%U3=as@n1R*s9bs=AG%;E~x&fU!=p~!w z=6cWkSIO~4j)8v8H=K^T;;cYzOMCzOxAAnV-EJZ(v-TK*nbfYF?1Cr^w-9B#!Wt~t z`(3R-6JAxNmeY73CKLjUDP%rCK1h!IjT<4y`WCnfHBk2 z?U`>@SyMqdd7@drAdLn`n9Ok3--##@FY123DU$>IeHoI{K#eheR!9^9TqAuIr0lPb zt{g>NzQP=a6ljyHk171kYVgQ(B`(bE)OTBSye+b9OrUY(#_p~^^@SWwnWVBJT6sme zKBQ4QXVLt$$@TTt4dqu>~Q;Iq12o<{t(XngMxIm zhagu4tDAOzJEQa*I{5h2<0%PEgUqB{O>K_ugZ+}S#R|#(a@1a=bRV}77;@8$Cc6B* zz%AEAvkP}k9Z!Di0AzaQ@WYztZh|MRRqZe`k42J#VqWXeZL~*CidPqzrn7@j7Rih; z$dmYO#~}9u?P7fpAuEsBZF;E{c#xX~HY10$mX3DZ7S!+j=q6g z*1KQAv)t`0PNlyjd<$`eW@$wv@S&rG!6$k?<*0?~f&GYo^|U3nSC_C$6Fg#h>v@&h zZ^vd}Y%Xv_@g6#{dFJZ0o+NLh6MLii(>%s<({6k7F~HP0DWau!2RK0>-=wGrE@$v~ zwskJ|IUOy<$IEe>Xy+|QClI0EdGAgGMql{QvdWn*aHFgaiGA;XyJ|(?iq4YFJB67S zVwmGb&lNqPjDG`u=qX_B>Gg_FX}IowuQxyQrirN|a*zmA?{LyntrU$rOP&tos5uo- z-~HxbwyY$tRB6ugB)p(5yfQ0#^qL0^G^2qJf@Hx?3Ez)wXCWBd36ut}oeKQpgya;9 zzZm?NNZpbidhJxV%hB!H>uO;sdf5wqZPJ&=bZ+FPQoQ$cLLPBl;m!h;Jt)WYY!JLv zFA|t4kQgYju98EJjWTIOH>iHEKt=%HYCVo{)eEsE1xubhIx|qNJ!BfLPJ0ZW{6@G& z_^A(F3B8i8BP`{DOA$5t(s%s+9ZKqZE?D41Xx4!_RqnCCknx%PWV<;Z@3-Sp)C%Y) z`T3H#I~|ubMX9jwBx8N&)4dZhDp6s8dUF*%1zaFN1%>v!7ge0U#1*Cn2@aV>*XHw< zxSk(k3VT}tYE#K@fl&(T-;C398Cd$qSJ)w24yn_3Gaa%)_cHo#BGecSu8l#9O?L=h zKZL%L*1DWkf0TH+mp&W9J1U5%dN9iSPNcBO-6j<0I~~YaeTRS$75W3x`D5_U zz||G-mBN%8P)1Dbs-GVinV0mi*wemK76f@P^5xZKHgCqvJ%xG7=%5nyFkDLA=+v1yi- z%Og>_)0QrAb6(b#`vBV=BdrMsGKzCJ@U}O({-z({_L-hmNrH3bO0Pq2Wypvt!P=ZA zzO3d(CWw2AMUd-~?1tDaRmt&cr~FlqXWOw#jZxKwm)^Y!J+uS1SeVId7k+LMEljCL zAma@2$c|E!zGgFddVGxOLaXj{{K6Pl14TX<1h>LjIb1XTKh&L7TU<@Ftvew|umpE^ zcMlLkaCf)h?rs4B1h?Ss?oQ(xq#?L>;|`5mBb)D>Xa9tAajxrP)ml%js#$ZqV@{>! zOiuVDU$z%y{ngSr8UWyd95m-Z<gY?gx8RFsrO_c-js zTQ51f&9*rpFjk(@5YI>l(s|ti1|l3Jlo)?T5*+2d?rL>7h{LPrfP3I{_{%~b-KQnu zjJ7bruM=n1;ArN~m9$u41}X{A9u_L-LGBcdQ}rw&IAJ%V7Z2Y&!9` z;Oj$`z_VYjW9mP?kGKE+l#l$`CD>yjh@E9yTR9QK)e}~x)NwiplknXQP7eMfzB_jS z6AcbjDYmw_UO6Tj>6=GLu64Bq|HV3<{BnoKM-M2|nq+mU%k_AvF$!$xJ>pm&xP3Xc zMD@wH6;U)$5^2V7L3P)hFHSF9G!lzVh*wdp-b!j19&ncK^>mhLZ5wo}MnEOV=?t~?zwR=j}Qu7*S)Fu)YIX>hXfxy4VC}r)*<{6Xjq@8LKQ#MBu7~4^4 z3;=NQ(1MU~yvNanTzd`R?WQB#7Hoi^WM|pdGpcdm+)$mRKQ>D^v z`mxyil0od7z}^88EUG>ubBb(XF<#LRm)qfqF|%2$+hG203R7k@QO)`%#IWq$YFj4J@!JYBiusej>`9sMv8V7~ z{A)naA{U=v@wWGqAO+uh>b<37x0kM%`raZwSr=8?c5T~ZFiJ|*7Yw7VQ$mJp@CKvlN6_S3Zg`58fyHFdz?Uc$kMt8x~@WF~a{ z%mO!Nf)2VQ@=Ln?Y~fhm_&^1GqJ#*Gj{4ZvXw#itJeK9!n3dszDgM~O`WCGl&`+tB z*|F}eiHB~Vr+PC$y_X;a(dm7s60Q9u3TOa zak8Qg7NO1X7wD0g=JM*|6T6yq>1+Bq1HNMLaDtSRbNxDt7UVshX824L&YCUls)f3| zO~0$o+0k)R(cbTqo_X(ol!K*YBUGGp9w=^6UT1pdUd0kt2>f|-i}}@b(kkp0aviMg zToRuT)rabZp9fU(v$%r=p8qa_a&(`=h&!+^kW9lm$0+M)p2dI3;I3Yz@w6KRnKK~! zI@-H7db({ClckSuniL3#p4pR#?yn8~mb$5%S_^u2`c!;H=PZ!wsY0Z(nP#~!L@$Av zb;o98HsjvdfKV{zm`ZniyGj9R(~uIL$J>mXweGM^cc&Pss+)Iwv!`hhRTy6$$&YuD zRlkMjsV(xiXj*ZIL0Vc7j4lm2Gv0;YE%k|no>HF8MH{+Oe=spbwBcCXwr~@-@!3fG zZn7wdx)JJhm+FIo>P{!(8(J@MHcjcZi849^3Gt?t>8=gC*DSdBi)2 zX9th@b7u>T=~T;+0o9eavg!dCbeOuC>_LxF>$xN$pI^wi!MOvS1}$c8Ft zY|{RAnl-(`fqTdXN@>`F04XXs-)t0w}mz8;yC*q?wsh-zf zMLR1L^J3{P*Ev#Bm{NE)uJcJb@g>WfDyAP!k#{)C|)@yNYn6UEx)RY{qs&kXz!)-}YDk}0w3YC`5UmE-Q& zIaMqy<&n3QJ#~`LVimmTN_{m6mW$g7CTa$1KNj|HOX^`#;HvhxxzLvVm zf~f9QbehP)cn&7(8RrH6kP~Dg zEb;F4Dt38ldx{Sq3tvQ1-H{_euh$Fp=a*$i?fVl8^^<0&j@Nvv>$EG_>Bk;;t+Os6 z7=6cAl+T6DB-(yBP3Qyg@C>GSl(woDKfh_mwPU-(Z=PWX98J?*GqH0I`L#P4Gp^=H zmLmHvbBvQ}0-tV8l00zAs2bYZm|rYMvks-mZoVl5&hZa%u&$W*>TK#qR-E&UuRsMT zC$!N)@R=gW@FScbPi|xQk7@d;mM(7tUYslwWi?D^PyrfY-iu!117-026dQgS(vGhQ z;w=6cRR^lYNI6y2dw8okela}#>o{j@MH~(eN2zQO^Nc$*RM>CE*~`5ELEgV_*MVbF zVV8i?Ep;>k;zBC|j8W5+$$O3|3He3*;PL#wv_97tP6uoE8Lu~osmdt@ULJplc%7)k&{$&&o z5T{$covnpE#tnNLWU?$l@&QDiAR`7HUTr3=v9~Xf1Nt+04G&oXhj;JqfWr0OacVKk z44-}-CO*duTY1o=9~bDiP~8#Wl|S+c^xP&4&1=S6d?gv`tQ1vxoTVLKBcDoEKz}-5 z4ct&dhc}h=ydD3@AtST3Zp_VRd@ox)5N!PC8>t>NovqngeE%urcNI)6a*iD;pGMbZ zpM)bk61b$hPE~*VQ27kpy80GMax64EJ3GTv-jg@T((x)|*OhpjKVH)r{PLSChzfZs zspTUwjGW%^P?y`VZ+C+R&t&&kd;hoGb+*TI6jr!l9HR(}J2bI%zVoQcP}g#wAd?5S zs(Wuz{qBL#e_OR#E+;BMWBL6J9=Tm&{>VcHjCFQ~=^b?Ug?Sb>-bx)97wew>rkL=G z2(xZWPjg;@vW+4Ttz!MHb*cT@SEuB9ZpHpho|5P#9s(fnVlrHE9mO6ppLqFTE^}N% zWFwnFk*HMJd@HAjil~4MypN;OnmFu>U_RWoN?QKt-}91_`{?1*oAuE$HbAtSoh`?q(XTRW;GdqHy**xW zu_&W%(xA}QNS#q*VST0J+}X>U-c_8%uVLJb3eyQ-36HtR<)`}~s&`rW-W?6BjJux& z6GzpUQsUN$=rJSW{^HGB^Xj}wLV%t9rM<^{*E>P}I_+V5`3IyvldV&Flil{<&V2*bfp5p& zxz|$3fn)Wd9V^L3qxQCABLK#v+xu<%4Bqy~q|mX#nv$yui>%Sj+yNNY-3MWWmkrR$ zO$gtZ8TRW6#U2@0o7wu%dt}dfH<%|lZS9`ufP?NRb&2CI zK$M836s%6cx2YxQYkH7NfL7=KaarMTrrHisxAH~MZ*%&X*)TXlcXh~Zv+_k8X8s~Q zY>kHF6ur_PocXU(tG4!?mSgg^vcaiJNK0*q=NIBmaqgjM_4&5!G&}MNW05W8x%o>E zyfs4QnqNCB1zmEcCP^<(+^O@(tMJy6D({q(D=?yCi?!|ZEU z&mW9r5lykaoUTU2U9j3ho26=N+8OTddV}Chm9o3AyQj4k3YVRf;==di_H-S+=EWKL z*aN-8->uWeWs7L-o-I=?o;kL7iv$ijXKqMjC(gOcOWmG1UA{9&)4}(cx^>sku$^=K z`?IzW&uG@?#mD386BA}QdEI(yAW_o{B?@!+QY{CSFHT4MWlG}($?9PWw$VFBuwNE; z-alKzz%L6;YU7DpOdF%Kca34k@e;`;uxk{UgIs)kpGyS*^LmP8rAdcTMrJ9c2)XnqN8@(tP(13*r;t=WSaty~nY_ZXNbw&D#zk4EVu`8J+s#rISKC3VrhAopG@4mf6*q>v^K!#djwN}kQ4tc*GXq>X z!(g{{0&;n73m%jw!6VJIe4>Lg8zX#3oZ9R(|6uH+2 zPByDhJ2%UMWVkRfZuGq5;C(QmL+3KYzvuM0<0c$CoD|rg#fB(I4YV zee=oSp+ZT>1Jv%h20U~FCxJ+Ni}hk2a@YkuCvm_sNCS+EC#u=L>zta~g z67Uo=yih@}9gU>zr!#N#@F+|}Vgh}NEaC=C&9V%&ZKy#_9=4XrRpR5o&qy1`mU!{2 zD?R=xV8m4OI=c#KNJQx3e4l#s#~ zFSw!a!c&K>JlONjJ*S<1dsW*fpWz$U)3>@wPKQaofC5lLuE_**rZ1_?ESinR1DS zZ%Ed1Yuj)0<0PS!L3 zFVF%j$_IKyN4YvMJv{@p(H-Q4%0j4?r|~1mMJQ+C1+|+a$;r&vH+Rystj$%8^+%Sc z4_7zliEfH1yENBpzneAx&E}3V$i4a@X`NSinP+8a-&pyte&aOauRv3S)-OxEw(E0f zBEaWN0mh5iXcAKUGxzvKN7$qr77(N~*6>=(b;&y{TaPTdf?UbqJ=;Q7qj1%`VDTX{TjoWet$lySelsIPN;k{eEf;;01G_%I`F`oIV}}e zX?>2c$(!q*^KGEU7+ZpPV0Y(6_qb42r;5-4L!l^??NRU=PBei%*{;Oqs>WkQbj^MC zc)N_<#3hyFPEE^0gVmknHTK8Bml!#DEQMoS@SrLKerhHk0@DiF*y{UPH57K{>p^7Wm?+m);A^tQ z#(Spbdw8x634pu%p29X_+({vj*|Y(@xsVj!c61u5KA(cosbX79oU)Mp4g#xDC!4Z^ zf6};qs3r)C$+@oPGi)#GOl~*~p*7#R3=cHq!(a7=b}2kspN?Ab#WQ3`6yuIkQGg$ezuilEKg#dZ1(+=?KL8oDLGOOTr;EA*%6RWz(HS z2md9MukXM;jjx-@<7}1bf+-QuI=~V9zQ*i@BD(deerp+TdJP2ic7-b`2oDMM7^xUt zLn_%X>|)`ud@^QKTJ}pY?xJV7d@N4)x#aY}>lEOUojUA|v%mb4;@Qwl4JJ%NlIEf| zBWUV*DBZu}x$4NoGZAnP1m}o=t!7g6BusaF6<4Q-46BB`z>2NG#~gjyrXs#mCg!tc zQy=A=a_cc0cO)K5=pG>e)k$p&zElk|h#7suCz_^Hu zw}z!LCb?d_!l}F_@RUL`*zty)A10}r*C|go-a@nzK4MiwJvGV z84L=N4z|-}oB8_?Wzcm0)XU~6&Gr3dk|^yO(ho8Rw&#}G+|!s_jdWA~wZI3^cqEOc2tZ8S&g=2x zJ7}@&lk2BTyS?Ro<+k=kbt@ldP$OAGOFHpW|Jcr+d3UDJlP?o}$*u zHQ(!()<*S6l@1-h<7!LX8lo=A;i?UYWl*$Q#Se`&ni#}*r&}~t?mhF#TOW=QFVRw; z7%X}`WM50&&YXW5SJ$jCo-(cncf*#N?O)uv&J2>3APUxH3?)?7p+o+#nda(q_LYTM z=Y2d+CgE-*u8`rR))zj1V>FPw?=u9wZamO_wRB~l-L;5dcS;!N@>J_=MkVp+RM@la zaJ6>XOa)U#H5Nnr#N5z;t!i&4t>fK{d^$6+6$VMF&zV?LNP1)vht1UfAk9wQ(yLFe zm_d3Cz0~na%fC=#Y~o|vh!!IO(ZKz^rz-+c9Qce#$}Zzr417Dbx3#rMW}$i3V;E3H zEv2SbVroyM;^eoQ-lld%QiZ!~^h4ESwbWz8D|4>rBSR=OMa@5P*0pMdM>F1hZF#20 z*gt%aFu1=lF2ZVGYpozzTrmTzsin|Lp`*XdZCmy$;BNt-%q-qwtfD#Pa*HBcB1vF{ z7*{HI6|YDftKoN`CW8_Mn*tt*ajUj+NhQ{c1;;Gs5af%xju`Uu?`#d0wht;ZauU~s za(Ol7qUekOQVE}`qn5$ktnRAtmm|-ktbQ{89`PfHvEC`-Jh=-YH!oig6S1;4g&UO*y` zi@#HEhKQL8Ks}*Cj_dCY+L>Y_Yjy@CnfWt7S!`et9)x*f!`Nlcg7f<-OCF4kz=OE)Fz=n``B=BjpL`#`>h7%*S5G9xdB^@v|U)<6$9dJilwwM4b2m9yV6u2R5d%4ukD z(R&^i@i+O3tt>S6n;Y({@U-vCQ2KzI3PmQ&bvu19Jw*O*3LtMGWf2L>_py*qIc0f3q{d}zhvFhEJ}hVmxFr$X>PhmDh!4&aXZ-=JcKVyf*0Ii}g^65{L8F_4tSBtu8qT z0QhR}@@mZ~%k{$&M5>R4ND#~o2LLpWdbPIT)9!^i5~N>?d4M7pwS=U(ZqywKI&b_J z1i3cxjD6%D*Yj1TQr+nN%VpU0wBO3D8&r_03r zGW%9}+3K3z=H1Hbk6Gt(>fkF2O_W7Prq?W@-@$6Es19`nILAXX(M$tee!=hpVK~kI z4)rY^A{lt^1Qi!4mLSTz;znIB<*QqtfO(hqEg&?ufb&Uz2Cwp-@dy1aFl(-z&|Yfw zoFwfBu8Mo$o?ie1GP6i)kvh`X`8JHlk=bVFgs~I_+XME`|GmU6!YH7_{#?B z;6D#+tzHgRyn#Il&grZPvMF5Hn@3_(0y$i#Q?eX?HuRC-&Tl_Z5L~ zgORbspVM|(SKE0XYoNprh`Wfh>t=vQo=-W$ICgoyGJRt>eiT}b2c`^H;7r34nIC1; z`(yVI)o zKQyBjl*KtQC*S5@IGrBi| zhl4|G3U+#WCAz#6glD}>vZGSN^ior6=Ysvkpm(0=$KDw@`kk_Wimyk+(^H2r=*pAX z($q)hVqT8Yde40V8lb6brjj>f_r>S{MGN<3`3KZO_FcnnTUu=mx1;$3#P2lQe0s9(Vmi>7)7i`nM%;;Ej3bXo_6d$ zqqH@CABkoaF4LhwRF2|_KA<9@q~a+giorl|kBr9mo-_Z?{Si&VfB)Fm0#acmmNW|s z)T(wkJh6=O(#UJ>0pu)Qsa^B4VJ0{*O??BloS}Go5wW|y+ZfOO-qOs#ze?o6(>9&E z?>vK0tUm|?aiy38e`IygfBIN{%Wc6=p`kB*E^g;nccNZkRF0aJM`!XqSrGalR>p`U z(xD3H_bPGfFyq$gD%H?SNS_)khk}lpAIEbuucMb50aZIp;xhoVOL}$rJPio9kh)@) zDa5VC9IR5Nf15YOWbWfT9M@dsO88+Z;7#Zg3Q;>)KS`&rjEsb6m z1|SP!LH$+_dvmv>tz*e5-ZrJEOzySm&8O#9ki3C7ss8uz5>nUMPdRlxqnjqjH3!Sy z-EGZrl&sNoca>cU=Gws%%fI=y^~QeHBauX=dD4Zbsi{&%>#e@z9lw=#dhvzQ?+eha z+!$QnpA;k-e$l1ofpr-OMePR`Ex-$B{-~yZ0x%qX*yls|X@+_VHi6u13?FsE#0}fY zMCXF+f{xVmW~0XoY#TdgB|nizRVqTyM!I27Ewg1g)qg2%u^CvoP*VxM*Y(tJnEz6I zyxCeMQ{qJ zba2;#O<*kZh(+q~BItDXmn!>rZ?m7SC@VDr@?`d7Ws=c9-6re9yx(CrZ@)^^M8HP( z_IQgpv_GgxD$=r?h(SE}Ikk*+EQQUO`S!vlnMoygJNwO|&epX_9MUY+O}Ch7Xi$lzh^oMNBJ(e|x27daj&$blpWSF1bE5R* zul;HI2Dsr53(-$Zj+)Kx6vd<%xjX)7c(k8P^r)AwS;C)YM5%#@s`+ubc`~boQSLU3 zynyETgm>#qgLU@;+oJLM9>D5)h})f**}qH6K8_R?&6IseWfF=0D-q-^hqNmB142?D zKbjfCgknRF9X)f&a=$I28`mvznC2;}-T~>(B1_ zGaE#K$_NdNNZB)U?)T0E=h zeQm__^L%}f?94U7@EIFf_Qmn4etCk*Ol*n-9u8g##%nZeza}ANjLswJXPG5xz$6Cj zOjd0HFK!w&b!n`Ek;f+u^Hmdj+Oltm!~YWc{-+yH5?Z9+(=ArBMXWfyO*c-Voj61^ z&MW)aOuvwAw?fN04F8|=Ot+;_@{qy6^WEhnho@lnstJXKp#gP`xT|5QnU7`gvxai< zTENDYBV5!VPeaZ*he@!NOBQgoemqDmL~U_p2VxAAjvo8lo0SkuX+D~uCl_n{W4DJ< zM@ljA{BlC=u-`)*n4dWl=79O9)X;}7+32r3-dpRufRsbSA4~HO14h)!h@b2rQhH-% z8g0hXnOjg+xjFoVOLCXcR8MsN+-y%b&55Wf#(x2$OloJtML$9fbauh+E6W7p?%#Rb z7-RaU(}ekZ5PrI-X#D9WOTMJweSJL0C_gJr4|m12NAL5!w!!dw)-og+A79cM`&SoY zadvt<&GfCMRbyi63q1~6qq;m7v@gIzl>OV8>H%tW=7~wC^wdJu?L~UB)M{azUhb+b z%WeBnX%`;cZQ_u+P^vJr&G)`|yHw^I&0Y@VYifJyFz{*^$wb+F)#i|AI-X7s>F3O^ z9vMBZg)rudjQJbxfk^N*+5YA1d>Ow3MJ-PVGVsJ7HcXS>6%OoT#|S*hRJlQs z%<4ONy1-b~(ldXeb(v4eQ>2f-4(I;`g?z29vn~*Ri=)kXnge4*9@vx@XHjYW4(;5DRtIa`;P z9HrK*nG-}FZI{W+{JjO4O9B3i*S|H#Z_`+!^JSUP&{e&^XMOJhWra>(Y7gGu;{mpcgeMx9QyW#B53nwe|9cM zcmSBm2_Q-o44XnZH!DM@2}g49Qh(JP3cRY9H+0W$^4vK|9#g$W`CL+ZheeEOXogPO^(yqjj>CW$3a+T?H zg?2Yytpg%l^ILOTtAN*Lngyy8x|$lx_q8BgqIi;StKPNJeW+pbu)visp&2UD5^sh3 zi|RhG?RpL4`dqXRWjq!rhDO@2e zS@t9(6Xm4Cu$52}?K|n1xTLjn-@1D?Ll4Pn_2--EYU?J^H&L|n`AAaBGCUyepO1jyF;&f%&V7sRliPEI4B}%v zckXLcO(~Ldr;gsw3d#S(#abj*!o4pA;Tkz7BeLTvZ@_#zzpK=RBAbqWj~1q~k)~g? zB0v;%IFB?lD75hv=}<7KZtlOYb9pdm!II$LD=R*+nbiY-MV<{Y_|jY_mEB66S2$nA z8X)^l(dXyjHf~#oC!>@XGt>VX=U%Rxb8nlnYdy|ve~{ai>k@XqxA52q?APy)|KX(S zQrGWSbyNBw(58B>RH;ya=*(PD*p<@|KT=KILi>3QBWIOn(9nN_q2AN6f@oO3&9I1v ziuYu4C1i$o=uFD6x9&ML{1i{YIk`yi%U{3sdkmssEoGkm$C zk*i_(MbGmJ=jkm?m&ZNq>>EEvntOM0D3~v#lgGhpRlcb9p>rHr;|%hQq*jkr zWhr-3iGk)s`BIg2Rx*)=)RfT`E7@mCnoUaXE3^?-Z>xj@u8%d-Idmw4&-U>9vHiwP z2ys@O$BcD#TfST{U1%)Wjc2>1to_#bXT_KV6S{D$InIE+bM?Nfmlil$Irw8c395(x z@zFZ#GFz@J)g5b~fG_LsV{;sHm15$X``}MzX!p&%L8Fx57XH=P*b8FKhyjv+>|QZ%OJ;Z{GKd$EiWPb5il-sVOkh2C(G+48a(49xtM;BMp?Cwky=IWsY_J8jr zK)j9Yp+aE68Vr9ZY4AJxP`i52WTS~4x4x}+G3dbxVKPmeeZUKA?G80Lg>?O9GOMS3 zn`FGS*=i(hT{k6RwY*k*dq*7an<%Gi=G8)cr9^6eCcjOogz$a&h$33HPz4hmmFZb0 zj_o2dP~#ry6P`yoOP=xdI3PWvLl?RmGPYDJCdMI{0ZubJL^RgnPZ4qBOz2&Krzi5OvLu>sGL-=b{V<8G@D=}Cb? zeBW>B^wRLvB@rj}QsOxe{>9&|`p8W{Iy>UXQs_%xq1i$eC^U?nnmU8U9sBiE^U$I zTvtf``=j!m*oLcT;N8je0cfE2dsPCOX*OWI08{=U{hcdkC~#d$K)U^tm|0 znR*T|KI>vV7M@9k%{MfsmdAW_N^je&8tBvZ zZT%GrIrVT$+^lvAZ9ML`O_jXWSs)iSAFvjh7P_1lWYwL@oe$xC@-zunfUQ$F%0BUb z_1DmwMQw1RheGM-l9+2Un2B^U5x}C;O6&W_KmU{h%kWd4Pg8;&;+SOg@o@YalhuK~ z?XeG~S7P29ow#Iu=3MydS>TWUkt;C`A5q6`5mnm`P$stS?26p~h|2_1hLT2CuNFkI z-s(oH``dyfIh`NNpK|a6F}dc-@x=v&GLp!2>Qv(`=%1QhKN7~ObhP8)>*kKrrlqSB z;%Hfa#v2(OYl}8fQ|8YOo@Rj8EzwtWK+o)t-`dGm*miPlKY(|6fZ@VJRDy2myldk( zzQW=CR*RZNE&Hy}JkR+wmDP62)u1OtKqBP+HXO(Wo-@|!b(2D_FZCEbuKn=%8~trJ+`L6>34!D#g89 z$9(^`tC%Pb>WNaO%)jdCBk28u!-0L;GI-{6MfW*)Bo>zcGUbcLdl3H0jO7hSZLd0| z=X4mOcW|NHtJ`PxBE;B~BX?i|@wXZSaJ=PuNrigu^m{r#QVx96I9xmj8nx^~M(1a3 zrssg|!J43wzpI%9=u0VfO}58@{$b=_C2%27A&Ed$Hk}UE;jL3A7^=XRee|T(5T@cg z^HTo-tj@ZG;aMbJije)&J4MbBT598)nPH(&K?7;*;zN9*27CZ@kX<0`7eDS=4Zzq>!5{HcX-BGHZ9mKh?CbW>w!Q$1M?`uM;Wj&*XU zMOT+@4PvygyF)1M)Vu^)u3p3lqK{YBK997~gY4G!R+ifDG9X$Ud##$?pEyjx$+l93 zEje~wtl7^bT`@l+NF;~nV^MS&_;DXZL~gYNvl$a$Vk-35XgC713+6}jmlUjrbAeglZ)yYwINa%cTDK;NUMUNtrJ(Bp`X>_hyVw^f>VjFhZFS1jy5>?a9 zf|9Eu5C&E?vc=dg^!m(9eV+qIUPoCNHXt$_PTQKCtSJ?5n^B3+Otem>WRCD^qC`*u zE7+GRpBpPVUGxN3p?O^?#fdYK;fs%?y>QZa1_M1b3ikzM7Eu$Hf8g34+~Xv>v9A>@ zh%;g?Zt#cG+lN zn3({t8FD1Ul_z4%eZ=1VTCIzxy?s+*&@9qKMrg|LE%mbM*6?#bMkUmRi4| zVI9k7wUsFH5&PoPYmr-8#!wJns_v~-T>=!gxH_7`7?q`|N%RB*zw+LCe3FpA-XaS` zCoAf!Jq8LCwp>oC5y`6iwbFpwW88S+g~(r9iOJf`t-plkidV{bL2y- zy;yCQMJ_iQLBl=8`2HTBQB3#)6Sc6$h_~fGz8!;J{=UIDW?3wqr^gDF**k<)g|$ZC zj;vMIn4je=d}vUgdm8r`SKaaCgz9^$*_3O}7R9-3|2r!d)2%lZU9pAvMnb*uNS;gA zu(YttD-^V#s`Us#PhXK&&K(EpNtQ&rpxotDD`lhU-i7g0#yClDM9)4G8lFw~d(Dh` zz77$@9B|vC+)Ys*u61PKL}8y^oQ(wqHe^-`2S>rI&qv$>AA(x`3(MGG*G6DyBqRZ?Zg+A z2DSDaIK75NkyL6UlrlFa_t2U<+W{m3OKGkc0;72*ZL=IkcOxYc={a+p-^BGLQ|0Uq zXf!VN@9Q{wJz&?vg+pQCpXvOx?8(O(TUH-@o(A4SmNM#EtFZ*yO zsO~qE!>Zgt7skF&S?qbx+E1kugmo9V>18YTs<_b&dt9!vUVTcF?e}hUKG=jVnUJWr z7@c{0)^jXlQfD<~G%lrG(W0&vJ&TB>#G)8=de63OLaqckT-njXojI@3=zNaNUQZ0V z{^iy`Hm2w6K^iiv?2lhhg^ypdUZN&doS!r{Btz_~JrTW4B^m{W&W6-XT!c^GyQOU} za0_8H*GE9pq3%J#sdb*uzktb0*})+ao$YP-NC7eYD@?h~WQdmUYxMlO0j^T*%D zM{<8495jtcB26(6W>kBVz~E1vTRL7a$J+w=>&Gh{-<7UsqP2MiUR^733H?K^c4~D! zWQ&ycU7lDGbc|g2j%x}|yUvHcc%qb5#kW`Hoa0&==1S)V!kXXl*wvI7`9!dV2s|9W zR~tV6=_|o#d)aKy;|3>bHnvxE>Com@_xd$dCDT$kho-EQ>tKS7cuxsDgt$$pGV+wl z^yW<8KXVW&h^qPZEgw^$dW%$Ee3wD(`$A~#M`mS zSGAwkeUVuU!wv zo!K$Ltw|*e0CzvVz`ik=3AM7hQ$<#ycZzV?H|V7jO<}G1J@Y;Ya`~KDHIG$lOu;4_ zW92rP%}_9ZkODOB~-rlNT#X22-cUp^rO3lJ>w(Z`V>|^SOEJ;;vX54e?o2 zNrnsHPq~l28iFef3L>tat#Htm2EC^MMa*e@H&z>d+|}Ld(I34pLQdsw}OtEwqi8RH**G^kT9iZuV${ZKr9wJZkN-_1Y3^L z{Fc}UQbzim*k@LHZnC4Y*dKD`vFLDFLpy{U7P$HR<67{`uRGXUt5$*gzozWM z?ttB7iwnufj%#_ZkZF4GAGRKCtI!>K*;P1F?5czIjdK`$ze}$=c>9U#(;l8-*<5N+ z*Cs>(C~Y32K{YdR9o`N^{`7E+;hXmKK|_1Pw32zhRK7)!9L-WLH6LG?hZmR}YRhKhx8OKca1loMYMAa#44-l$TYjY-{ zl!r_|E;j|ab1QCDnI5O4CcoDyFx;N0saH-+f_=_hPzBC}MHzUXBtv_juHFruwq2X( zJNjEA;8fb#q4T<04r;gFngvn&=rP^s-CHfXF*SUZ5i^SVm`Q4=t8x9U_l)*3B<0tm zLUlpXRq8jeeOj3uu7>cDONI&oK^hlkC4R;zr^Oy;L)psC?*3b)8p0%rcN}2?gx>?l z-}JZ-XzJ26WMqd*V|1sya?V-}|pH^|rRFX?rO9_Xm5t@q|j zf6*W&y_tJe&I}j>1O&%8Ocx-6?5hdPpOMg%2a2NTh^i&7C6DeGzub}3+@nGf;f2_b@M2#%dx+7VxypB9# z@y}@^TfI?C`lCPI0oE$tP~O0>B6K^5wcJp#2RxGWW!kkY{4bK*~o3Syujx-71kl~((x*eaXeM4^t z!c4RmQ5-mk7!FYOoj>dpc!@oO`nltovR-4+<^XuIwe<~GwGbX29w)1c0yh1sc#`7< z^7h2BrT_eaf_~%X@|RdMzTG#4$IR*ydU0Is(ytj9aL*6k_>2Fb@5G!QGU1n4e+CA3 z@8Gc+67xH4w7c#<-W(%B$|}SPjZtnzSGWp^bd0m#TT4dw!fLs3Z1w3pn+&|JpW6*K z%mgJLzHI%V5^!JQ{)xQp#QzMtb6l*+4i87n1eK`TuHkjpM7|~|;;JsbJ(cQy`L5>C zOiY6*Bso9upK!w8Z!GEh(N;cHx%qxce@#ZLu?3e40Kc7&7g*rnLf+<+H-h+ybZL1x zf#n@zxU;K7L;QcA1OH?B8#_QARe}5gN=`6BWu^Gljz3j5?=;wyjWbXhddS&x`p=v< z$tn8Ih$Z{7Ov4gMveqr=C~?+YU}H8nj01BN#}HgZ!U zjjTos*uOL0Vo-Xj24kDm73u8oV3q2OW#1g9BCVDdue8?B*Wu<^?SH8I%7!?aU~3Wr z1SbU7Bm{SN3ju-!clX8JSqKo^9Ts7PrQ7CS$1Z+dwQzQsdFmw z@r2c40j)%NF<^M)VSg*${v<(fYI`*>!=t|V+SYyR7;{kpeHO&1z5qIAL$tc|Jqxtu zB0_j7@4&=-eu3=^zoF2~E^S=V8mqX$`u#17Bb{P7;Xmtyq@>*Ze58nYr~Qeq{QSh^ zgi5ckFCJTc;pEpCbq2T%`6Z3BjoiFjx3(pIGv{Ki1@ZQ&%GlhNIT@K!Lu!Hm( zXHc*;q319RPnd(o{e8WNm!F)Vp3)1$h&cn2N9F4Agm*F?WA@h83A;(35D_A>9v6&h zC(+xY13C|otLc~H>-#R3o1F;Vf0~&&~ zQ}mg;cZo!)8a2+$Aifz_vD8L18rvd^Re8I?<(it-aT0>k!SupdK0>|PNJ2$r?r6TELZ=OB z6+!3#@cj7Mxc`ofnCfPIm(rNE8dwBo$HWnBCa z>MKv(c&@{x#eiEEP>2Ps|I=-3==PM(6@YB8fY)iTH#CtePm9Djp$YjS$0poOr4<%R zKZ8EV$cTcna%YHyghYIFwAfE}2n3QN z<`GQ6P4rvRyckk!zPP4w8jtJyUNkhIjhZJh!L0+NJo={{+c(E(XlMxdzx|i&ukRiv{FaL%MUVD4*7zdf=b)bp z7ejxFzamx2Hx9ZmNCs;Jd5h|jLic65I8)!6vm>85)~Kh;{DWZNKkI$`n0^zcLX_I; z!c6)8tw?wtEeCVxXWE*|Vq4!$;@#9BGg^}<0wr6jB zo5rH~o|WwgON%3J&z|V5e#uDT69A}CDO0OMil+=C22@iF1Sc7<_LC_$*VjLTd;0rL z=gYNG0IYD?gBCE^k%G2E$qekt(z`)1RW=Dwyq?IZ| z1oKDBmBuRN5|u*#8DN{oXEmt>O=jO;`92WgG;wjBo<(Lgr}uY|$a8(k5TJ25JQ<|)A}TPLIc?VN zZU3WSgs~E233YBI_bdOMgJ>U?s;}wCstS@UE+i_S7Q&F|wu>`^8tobFo={Kkghsq{ z8&pl?eCfYFm|kwvUE3hzlV4o+kVXcBI{=y>C&zR&y+!|JX;iab!1Wwvw7RzT8Q4fI zElWY<+}w-4T>;@K@)5$k_p3xi47yF1{(B|m)~uyUOQs!6h)_ZJuhUdIMdY_RAehO+ z-k4?CM5iOAN)}#T!Zz1V=8_RZZhYwg7Bc5;dpdLkdd2CFSDk&%%Z zd~TPGc3aXiGDS+oiMSOn^VP;uVQ&>o#zR;XvD6^MgM)(;;G@s?7vut*h2BFiaB#El ztt?wIZrj<7I{eCaaudpV1wQDh1tauK1~g^d{u7l)_TW5Pm0T5)8{EJ!ZBISSwOq+7 zqF;)@_~25taXHb0V9C+a^uwW`udVTU4BmHLlbQ3@bAhMs1*LC-cJN&X zY#jPvHR8;_?A)*ishqg8UHrDkCb36dsqJ&LAh2HbL?ny$-N@4kX{kj0`5 zKj;@?+*~gX#g~MXe#j%e-k+&4eS1Ca4nP&AE?BQ(uM&72#o5LS^ZszTBx^n%aCEMF zW&2737XIAV=ml zcQB*fi`(H)Nl#Bt(B}0v&?)+TH zLH zwdlS?e#PGTl#Q$QHFEXoHq03rCUQhKKQcngA`kkuJw-;-Ny)a1K|AfDKcpvQz|;W( z0eN;@HV(=Ag9=S6{fpfD_{);aSVc3n9_U+1rH$|V``L<=_~BvOh|z?98XLRdG%OV< zQXtodb!>WP^?wx!z(#a^-6VHEo*DdA`;Pt8=r4AS^JsBaw^%B-2hL08Ahd0jPdtK( zL8)W5d9}Bz$N5Uc%ey9NoY{0NL)@QMwfrN2 z(J%3w2;Uz6mSK2g1MP1#^Le6Tpx3}?YOoLUbS8Yn(X1~C~ZV2=YCo)#q z$Hc~_@Hic2i3G~A!tKD%xIzTLJmyNoT(&}vlDad$f6%wc=ZIz9Wmo84HybMj>lCbC zhh-+u0tgVwpZCBkoPRp0aD?zVXgpm*2THqYDaa8FLs(}*-~ zh-o^+F`-<$1*R0TU0)!tEr#R`caaKmFtdk;?tlmSWz!(+;Kwq}Hwk9EPqH~7f;ZzC zg6xEBR64q1RGeCkww;562Mf$lv#+eItTS<+azjc>OIc0FK2{s%h=t2-28V_HjE<#{ zaesY&IGimBEk$ClA>gzg*=`VcdwmIoumDjM&F$Yz9X~e3VMm^`G=bX$HW3Q5sW!|X zA3M>*f4rR}=X(95_m2%TFT%34F9}}GZb)>Xh}wjA5bVs4!XSF?44(Ja z&1{?MVGib68mfUqm<;m&ip=4y7P+;uMy4pPLr4&3EAkNK*_T8ozgAP7Dap#kJ(6KL zChN>$o|TCsZ9xsyZ}`sf#IM+>$mqWnFr~ou%5jPui$6Zr@q2E;qHtk01jtC>5D)}C zZY>8l{e2+=l=6LPJo5K1UEa_4p9l#H%F6tQq!Q^F+wXVjHPz{MQx|J2t*qo&&i4h+ zmu?923h=+@d)E-5%`@GbM;{p!h>7zj?;bioLo4FwY9C(NbsI^p&4veu+FYNS2i(z4 z*WPXr&I(t#Y;1%VCPE_*+|B@$vD-Mn-awPomkWlcnl4si~=MSG!p%Xh>AmG|I}#PEI!|oHp5f z-;+vICRO1MX@0S?Ye%zUYSEOtX7W08!4?;G*l(Q5Tw=k)?VTlUCi%v7(eEusa5gKr z@{ZnXEQc@WxD+49Zt0!}PcwNMh?BC31eXt-#rE|gUK76YF$MJ5NXRk8i4|6}?am!2v6CWS%8rQL6JnxnY2+Qd`^o>n^XR#RPu5E9deEtyoC4FZ*Tauojj5^Jl zI^~VuC`)~jdoi2&(Cq~d&m7eMHX>>{>~&GHo5EwP@axydrLEKDaS>r*;Yw{1HqgYs zcO=6J4C>X6i!)~Jd2}QM1R>SP8Y)U#C;ZKEuJ3Bw1PF08J&)m#B8KI15_pJCCNewZ zr=#JvH1TT*ctEySyY3PagS>agI5V6!SsjiSo4JA#E^jSXmJ4?K=lX@}Ik}~d8!Hc- zXE}Mfxz^A|W}Dk%b%#Bq&9?sV+37hz=%*_t*5{gmD4NlZGC_+k=*1_=O4Yyaxv!3- z^KLpz1mW6k>cH_DiwZI87HNolY;3IXPrJ_I;(kVu5Gf1f=HK|%PhDj1*y=jOkKln}G6Ubzk^elha3p2imk^m|ODjt)XMCAsqQw~35mK;+H`Dxr4QnPNiQwiV`DqX>5wZbMMH5abg<2Fv|4Sg52~>lby|f|1Z!7`fS9x9bH%VzAliw` z$r7{Gl4EBTF)S?X4A%NNFIlF&ni?LB7MHj4T088#&C0*~lyz!Fie<;DzM;&}W&#|= z+8D`o8*g0+!#w_byMKjdSq^-A3_5gNZs}|E_rGnKD8`mY zDEGXcO9#|ofzV4VvM{;YWR!rBk#UAf*rrvp!P>V;Nk9q1(O>A`r!DLEPNPn7HmNAp zrTVfazoS3qAI$Q_ev~2Uc=$CauiZXgvTX@FoLP@%geqckt6N2&HbQNr{uMl_L4!e^ zj8HE#A8!??^LyMK*w;eSUgtJDq=-FTOh^XbeNk{%1x~ppUSv*!TvG>kwgVHYnUXdS zF}s0%qn482Z8&RF53<*mSSEA$Tv#XX1=%6pm$`F&P(jCjbZwp#PB5zVOy$NRYeC5T zcsjd?5(%#}$|E(2V)HIi2$#<%e?jGW`0$QraL5*lgUlUfLWxP?OO@SLU+`zja^Nm| zL$HCA00=V0Q4< zOz)6qikkEk*fgqQpZx+JOKoQfy7Tp};@(F9U&$Hv{tRf~CF(YM-OLCu>lG9Tz~*J1 zw%-5JpB67TC2j1sx4!hMgF#!7wxA(q21YPMDQSXz`vT-Htjx@d_2DkF=C-!Bcc-iJ zsEa>fls*7xx}>NExm{Npf}QM%ABEnBHze$;mZT42U)3_ibY>DPq>C1Lv)uVTWMxN( zBwoQ|c~2P)qyPk%y5?&pnRCF5=l;y>$mK6PqKCDzGtrp~d`ru;UokMxyV(YfR@1+a?IH`HtCd%VfA-gtFl-b@ z7H8cqxpI8CbX-+XDawkOzg=_F@GZ7~HJZd!x*s#iA(h;aN1f4Vz_UH64=WD1ubcU$ zgruWskJ3jP95`q+1!6<;S9ms0di0qtxGSW66(8*S*Wt^in1k31W95=m(+h35`Fe+E z!Vud;d`(N6+3dtQn)L}E$iLw* z)P*&49Ov3^6=voLZ;tteZP(HT@)vT06RhC+W!~-hPoWGk_*y&-8d!X~owMEbkw`N+ zad&t9W<%7Z>LK2CdV1$Po!c;2dysOg#x6E0W3bmH(XdwFMo&9Y77&TMV)Zgzr)aoH z(1LEw!)NLVA~Kyk^&?hxC7tvRoG1B+q&mf{(WYO;6TjMWHd5pq2Tm0>%}?RyYclKK z2}D{Fv*4stDdDAVnG%8&@!OW>h3K62Of=VNuM1lx){F1~PLn$&hJXPKuZ2;W=WI*L z_cJ^(>DE6`!ZhKI#-m;Tw0@>7Tn%qV zr?FPBkK-2|jo$E-&394ZucoGL+)THEJ8-6qW4&A*?sr$0cY=Q$I@s|%9>P#=54N{| zwZ)q z-6?^zn=69*ccc9WWcekTzK%y(Tq=%K;bY~kw`^^}IHOVCs2F+7osvH~#&zG=cT>?S z0+uQf$*KKaOycGuyepOz6WjUqQE|(t(*9ws+tdQ)Ana0dav18rtT6HLe&HyD&Yrla z9$`o{c-dDagI7Pq-WMBxW-RRtSzSrR&>SH{>g!$oto;Y$w(nEKLE!DPiq7R3Ov+SD zr>P>zqO5!5ijQ5&z3u?1*y~WLS0EI-`5+;+efVJ1P$~mXD87jD;QU{!_6vv3wI%h+ zqwLZ0lN*DI=NCH!!$sCk31s4#sf=UV3n*qUinCSqSyy5>tP`{IifneYf)5c}ZN+Wz zi9Q0Dp6S~#cs(_AuRgIA;*K;W&8?-~G`F%h@kHM(U|X30_xpM>oI*!yPK3*+-5d;m z;(5wlMV51|4)aG+KyX!<(twCZ$@KlqF`5S6m;T5r+@?fQjKxB@n!6Dlwr>wHbv*smkPB4Rcfz? z8QX`1iO;N|VU_U+iFGlZ8KPc2JUE1FMRCBHy?siaaNFODE!lR#iCiz|)m8es_rB3} z2wiBm%qQyFUoo=1%2`yj`RYmLwBvs0_S_12mpF&C>wE>7AC< zPO4(oNm$tjHexCrE$lr27kpx+m*R3xA%w_WS6V8Msl&j?aSB7IOUO05#g7|DXyqXU zQdMYehT$9-^%Lsj&dp6HSfVubx98SUWNC0PFgNckFNY|eBv1l_k|*|l>4?sRjONIW zeu^_YX^R|h5QjsO(GV-1m{rdkc@OjF5eD!V0LENp(F^*HbgLEV7ACS&uu<)^SHDPm|W93yu3nxZk#dexo=4q_pg1*B%5w8AWB$79h)!g!-A?bnIW`^)Gbn#zJLL zJ#chaQBgUXE1RC0(rI&NHyKH?TCP`qg0YT|;R;?M(ki~6-QQ8kVR2s+K1BeG^pu&{ z*jC!XPmmLFYf-Pb_-sulP=4vRoZ{l<(|_?%xVmi0*a6(mp4W%V&7bcs^6#-KaE8TQ zETFA!AP@+^8V2XIbE>jQY;$yfp{4;A5_e6FjZywHl1`RHKP3LJ!G1rTE7RKBH_+P071$$$x4UH?$K1iCNB2*3g00mbJEuwDu2*gG1F_TT5 zI1Q7tX0Owgrrg}`K){}DQHrW2Pt`A-$uS#G!sfGHsrZCPeTtaNVI~Gx2{@tVCc7Jz zy?I9p^rWV3lG7^Z8M@@><)yuz8FxZZ1%88fi5a2N!t7{@=+|{t9^`-LgC-{?GAGS< zRF#qpYaOv&;4&prC1W#M5O)B}#a?RLH#518nV}ASIc8oP{-D(=%g2l1dEM3%vjvms zN`V=g#cc^GsR9!t+*19CLi)Pd6JB;f_ZKrX);AqQD+oK@k%`IVZ^Ar$-!D4to{McB zCoNX2oI9L?{rl9Xm$=GPWkgNX5@y|2@ewOog%%0bT>se-B)fk%If`L_eY3)!v|*jZ zm5EsLsLuI4P0epb4!-pJT|juq+5GuNT`7l(^5x2fN0P?FFhBSgDc9YR+e8bl0< zV%QP42}v>B?k9Oig#7Yuj*PLi9_GfnYQD*|oNKbjbPw?e)28U+ZyzvU{tcg8$sAkC zN{Gr+;zbcem6s^eLnX9gx;Hgw?nV#w{D%!mn>Kl)vQL;71;p} z5GlQOO$!$ykiGx~1K9l0Cx;^ym6ExzVM+@Z%KK)7De0FVd2)O%8LrJH^&p$(B1j0E zUwTY(-Zx6k$7*a~wbki$zDFx~C;1%|BZVr8gkvICeW=Cf^$7mQF$4dJJI8057- zdWrXTcsG&5eJ?ma1trI;8^9DYM423>75x?!A5$01bxBb#S+mmm!qKjf%HvcVd3O6W z-l6v_7A7Cs=li&0*qpN)N_8wbkdl%jE>9JAMH~2+e+sjs91{MGGO|91JCJj5OiR_% z`m_m**O@mklPla(;NA0{uj*dBYw$wrSdwx>tc}7AzdSx#l!`iT!E9%NWHcA693q+Z zXT&nwVw@DtqRTG@$+@bu#ha5k`c^Z?dgB~C%2MZ~sjyTVvs9}wI>2Jx%fc?|26ex7 zItVB1df|P=23CCknMP%S)t)aqsKsXSN8<2~!uPq0 z4i}YiZJH9uDt@TFQ-dTl9O>a3F^?2+q6#bx2;Sc(yGqfPCbw#kMp@Ed&oh^JScyxz&8YrOIZAa(h0lSB)SdFcL-%AoIu@1I zo`2n20yGBIoJ}!-_jr+%+&o^+`kF*^nih2*y0yXywaIBVRE1`(8k(}o{Rs^{XF98@ zIk2-kh6{}?|6oG0I-R**oQroq)kITxen|W|58oL3aHdH@%eT>kvc%1SC#`Ro+oK-z zSCn~=emhMm)HT!TL`4M`*-Qr2oN!AGMF}3*XnuQNMd*4_%G(jWRLNhCO=LQU1*a!G zCHU=^Qr)el~fA_4Wqp1U(k zcvwylb!!mUz4>RfzIABlwVd#HTs4~k3#cwILgJcy!_r>xO@KeP)Y4=B6EnL zW&yZSxXhs)l(CsRA9gym&7HIhaC$eEg?B$@tBiH7GpX|!2GvPWTNEb2A>#nZ&HByJ zJV36V>46qX{chGqR<>A=GhXy?P^6?}+8bQ@w0#FWIk^}Z%5&>xJwp(zF}2XH`nbpL zY}kmw?sr~wyN_??RzqRwNkJqHRy1r~81W$%M^o)!{hT;3<@XdRy?hFVnXa^yy-}rP zwv)TN3?lxt?3B7NI%^e*yXV0G+4Mc@mDEk|@@So!XTK_EelL*4w7tD+Ke{~2XC;W* z)ve97qdu}a;<`wwiySh=%Q?JKBZq0%QDeqQEdg$-Meg+VYm00N!VdLMw ze*tD6m5}2ZD3O>cQlzG)mdy}2;dIWTg4t;b3JS7WYo9G0mZU){kjqO&h!vf6@$E}r zkM1VdX&n%a=STpWsH}bZN}EFX?Now zn5wtIYE(#YTz9v*x&Lf!Ix}8CTR6fOB10Lu>+&Ze*qD1^kZS=Ey`S9@X77#mU2ZZY zV*L#jd+|7=jE=F^bvZvAY0ijMomKX6O`Ge0bY2Uyc4>DsCC4`=X6i5EXF2_mBxZY- z2iQ`(rXGNeIywxxFm?ep@XX9Fagl@XX@loQpk%bjIa0-n9ugCGjcI0<(OyHIWX|+0OAmgQbaS(Jg`N{ZBC%{ zN{s=Xzm)xUwuC$W6{&tqaE5T36wAW!OPzaFNC?a9N`#BtQO~>4$yl!vi}Q}>=iM+9 z*h5QQY(+9=Rci*#VczJi-S2^y;p@T9HaJAl-PBmnSOJgRUX#xz^=s&-W=tcj`lX4R zgAND8sCh2drST$u^FlzT$}{s`jdjBKH2I&QK-VY9(gf>+Dao^qKt@Y6Ra^a>!i%q< z9ow4M^;DvpB1AYsa&H@1aqoz1NruFf+BP{wF`?g*GP%(t*Qb6e@RJI_~oKMQj?zp;cHg}1v5wGTEJ@0eHno;eD6mxhtt zw?Qu1UUFZm=1|Dds3V@h1Ved>{#_GVDj2GC*szR@jjEF|TgNSgoTh!tj(I@*@uJ+w~5w5@<>7 z%HnwZwXFmcbxgmkFtJtrPz*;q87RF?tVqZMR8}0xZ7>Xz!-b~umkw5tg*Xy^et~NZ zpBg(t32KX&>2vd@@~7z(1O|+DT{Gb3hTqPuI5zDeTP8dX`)5v#ZKJ_sx&jC#ya=(B zRR2Su1QToQZC$iED_+4&kGDmw_C2sQhhWpTd%u+zaP0z*zmb#@>$NXHSXWnH0^=bT zK}21s1>AWcI%x<(OJgIjoeMEpTx|B5n3#O}^r`T9t3MLRojN=pFw0PPl6+thvitJB zNVBz%0pARCPWIdU{P}ZPX{mtMvzq5i-qG@~*I0F#1+J{{c+5RzlTR~E=N%<=oe#W? zV){9+grc8I-#5xcAeKboNgFjWHQ8x>Iu|XG5Q&Yjnka1=ZpZ=*OC99$6oLmdCl@=bu$U~)wy}yl@$D_P{o^=! z#whYU)C>3+_R_{h!6_tiD~;AFRHHvVUvUysaT>tMg*uIiDkzVijoGkI|K(2`HF+Br z4Y}jJMRGjE!E&;TS^bcHl`OvDK}7PeHX4lCy!mvtc`ER*A}6E&SCO*whClkOT^HG7 ztu*@bJiQfyo3V*{(@S(0OC%^LSgW5meL%%{cyxq3C9S2E3RG2H%;}q$>;kJulru!P z-SYupn&roYf)UC-07XA$gcPw4Kn;j|LUmmoHWonjw6?a&z3Vk>y;>kD&2~qHv|5cG zcS5fsj@9Ztw_bg~L^*<9O`bQKk{rDMf|f;^EE?p*h@df}$Cx7Yj!V7LKSiC_%xgp&&COVMx=;ky8wf+qpgkxbELV1+O|)kWvDb0 z5N8QYM^ubbpY@^S@og4XY|BK|^%dz3ua^lD=A|F%YHD<9au>o=^=aPQv%@_jLdb2? z<*|1NXLuIeUM}a(Hi~}vg(gD@f3Tqu!{}>GjplfZ5E|jD-O-0YLOT%=5n&JHJTW)7 zhp;e|YcBWe{a8v_99<-G>^K^vIq4*(4dAWX9f&SY8eM21WMq_^M?Uz8o|~JyywSu0 zKdW!35uXcw6cQK#@$Ofb7|3C z%cHp?Gw}DIBT(MjCJRZ@xTtwQXa~ND@rfsk-42D12=MRxAhXYBls2CZ=-kmm;URh8 zyX!4k5#Qj($yZPfxv{r(7R!3`!~ddQ|RHa-S(_Q!1;X^lRk# zUn%`_RvRxHqP&s8)QA2@&f1~ zagRG7?7q9aZ1%WgFvrhaCznbnCpc=feJj)altDCl4MtE~178jf4QH)C9@Lsc$=O(0 zkx@}aZ5#mP%mB98>4?K>sn(YrC<(mN`989+a1Y>mNk()jtR(!<=1SVz{)!z3%1kRe zo7el5t9&;JsqiE;6@+?vE}H`9E>aiH*S3G6N;uLX=8F7 zoeo!5GRYr+sy5*|7ki@HCzsJFR;<>m{MxrEH*Qx-IJ=@gox?`6fGQG-Up}j*R+9Tq z(9Cb?y49$gmUaNy&BYN*h&v0dG@X}IQFAk#eGIW z>R^|s)%p4PQ?OT*_mn5DI~8q>(w*x0l*V)F|WJSG@`P>-J!!?7y0va|%i%BwZ+ zH=rs|Nmtjq)AzvvmtK>-9^p|q)JEV+NJc$kboXP##>d?Jy+T^$E89na<^`BY9FI%V z=$IILTU#b}_BMyTh}zm(AaPA(&<@cR^V4A6|1UDQ1*(yNE)l?E0Mry!EnutsanuR@ z*@9T{#JH4>1Ya7vtGk#J%L(&0+xI;7CqCR(-ud&WZzg=5<|uUP*XpftS)l*>SR%RQg%7EG)mBry>H)SbD;%=PsAv=tr!wk2;M zrUgGahjo~Val>03|4!%Qw%hEPuh2DYwMsY#A(~HTicY}KTg)scM|r2{aq(Ud|| z4foxyp)B7@#*4w>en&y)t)e7VxkZ(7E%m9=hpWAuIQ2^UAn1Iq#6vKt@!bACXy&KL zj!X(io#mo}oS?(*i0{#2qaE2JmB}c?U&F(&?;Mh(p`nr84igj^DNBBPP|Q8F@U6&D z?AB81|LV%m>`(}&q~S8@^9%XBp6`LN%}#p(LenhpExl&_M}~L8nmC;Y$%Tc5K$Irn zaa-!)0*%K9I8p5?1JM@elScp;oDR0twT%%j*sWgZD}$&xft)=jVE{)lLaO1H`>XjC zq2WSfLs9O7(`hs$j!TfAlLPDELwX24m#v;e&4i|qO|BlQBs`TqR;d|K1`6CKGGo6K zp{Ti|lhdRrIb>$*vkY)@oRZL?c*BtG?^V>r@0rOz{f%dCJgzQ0%t-_6F=zjqla|`% zvsnvHEaT#b4?<;Ihld)r)$1Hf0MMr-#(NLU3Xmth05voKpH5Zyvj`~P6!m3+>t`%u zmo4rk!UX)&IS(ew0qo@zgLy<4WS+uu0iUF`-!2_w_eRla%qvtsxFrX zYbM5L>{cX_#XUVRChd#F>-Ef~{>zLCbH83rOgxfA8;E4h8-egO5orDvJ(WU?Z5^lN zDqK~)AI?z5^ewd5570jBN38t|wqUrXdx$ZV6AUNft_EsL(s-OOm$eDw(m-T7o)!!| zCd!oxj#woFzM?$(IEb{bbZ{Bvzwn1GIF&awhjL4zS?c(9#Xb;LP&b9K`P$ zJ#XsEc|0YJ|BvPxj=PKiz;DGVx-SVJp}&u2Z)Cf9@djdTCFpvv8U#S&KJC)7@>RE) zED3C|oRf_d>i2i;lyx7&#g>U<2rKi^0TF|;sOI?6aG@wHu?*#zlPW*&-WnJg24={Y ze)aso#rYorG9b7>nL^=K7Ci_pS(#4@`z0Y+J4L_tP8~+*Uk~321t)25zOHKgq*7px#~IJHHpYD_hdxS^P8?jH?02qeB-Va}xxoi6U}(d|{C3H1uJ?#6#~Zjl zAv!>e6bD;D&DsE@iF(x@{f3-t5&M(*_jE9a-2WXD;Z!^+(btWDx_VfvGs^EYhBvI1 zLMRLN5~{zz+~EB8o%*j#4%b0b(w#$9)&V5-69xD%7ylnhopvgpK}n@blO2!~+;nib ztZ`MaTlYv01Dp^Tpn3k-fAqS2Sx-GZt*1v)AxS=?N>c9n0VtFHn})zp+7*LMMJ4Rq z&>7N??rlTM8ppsCIsA7F0UNGJJ(MI9UMBicWvSR;>mAJh&fIXxiU3$vUU|f3;uz7j z&RQ!`0q0$&z<>774^)E^qf2RPnVrm~e(CN9p)hh8|1GAK^3uZ>0R{zh+yI(E`wh)hdS8xu5cch1-Va&sy7j@3677yuM;G+0lnS- z`*TQjnA;P{@BL{&-S_7a4Ol$bj$p7Le3~R+stpF{z?BGxEL@Tv9%m(s$zNjf@B+** zLS_qE-G+&>a7$g2AfVd9&rQaBc+_@bl^6X#)mD#ULQVyK@m}2&;J>3Y73SXDR#sq> zGL!%LD=gg%J{ywy7eFD`MPDb?aBn>%ioUZNnQAF*x7rU$#^fB?FT7%VasK zI1`9{PK!C!(62+d{%8FR#FrOVX5xOk>IDLEodjsN+-$iFHwyyXUX1??ATtXwa5&t$ z%fu{0l-3*g#W|(KdRi!kqq$YdWVzhNX6?Mhwnt%9wF5K{(~1={FD@qNqagt5eInjG ztY>N>EdmcDrfF-=j4hE|w&EF0(lpqnt(K${>6-H&2RrAq-c}XjkF0g_KQ|wS+?+lO z0;^o9yeLpGIi(hOK}^of#xqHqEp31VD9w+s()MmDr5Yu%jQm`9W(< zQ$}BJuyT__?(yy1pv~a3_JRX^4My#b{5t#9rB;<+>sO8^&9&Yk`a{K%oEdMYlU1Ai z?{8Ov*pHMD#i8aA@B}nQU$-4=&DRi%tL2ncn_k&EixkryX$ygUdC2OCDaEh~($Ap7 zF%k9pz?P}&az%1v%}|rLesCI_LkBrSJ(8vjEm4sb1Z>cXq;Zso`9glbB(jlFDk26Z znPZVF*Ntf9rz-W{l^DEMvJ)!(C@)U7=93LzQzXhwzTD``5PT$w4B}(lcbIOB)m)(8 zA=6)DLdnCw-CZvpYiMt zK$t9@B*_UNkvEx)-gR_+Bpf#-(G7cXP{)sq^n zkToqOWZxVuj%%IPrp#c$oWD<|+iT}F>R4*jjlR~&8W6s&O#5MOSiJAr(GH@V2A4u@*@0Pp^e)7b9Z9{k@mR%P-oI;XL=l%?vCXZ=%Y{jsD59`czRjbbVq$oYLUxxCxOck zO}DvC32)(COLjkRURYgE6<53ys88nU)?OL$zIn<0Y`X50h6J8A#(SIb$1Rv?w-Ej% z@wRNL&`>lmdu)g{oBPAt^Jd|WbWujS$4PU$uhn=e(%Ir~2$nIc1<&0&Z$#uffOWEr z`Q)xteIRMu;S{yg>&%)3*DBSmafL=)s&hI}k_}oI#^{II^7WMRtWCms&4pSc@<18+ zst0QOkwy$%ilk;f9yi9eaKg#(U};^5(2HWwp)ek;U4@Dxp1{YzDS!P&Z_ska#UZAu zp!)Q*hbwHuqc58|BDhEP?IvsOdT@#W9)h~<-45=GG23g|!c#e=ld`-y+y?I_9}9M* zBZ+uWcCevrw$~rKj@KqZU_S ze=^WMw=RBKN6?M=Q0OlU-16@1V0Qd5t~){`ev7lKQ?+?P!m0_%-d1v9gXHub5q0~= zyTSBEqW3M0;by!=q9WP%NU~3kLld>;b3bpb_UUWJ_B!h4)}@~_K0dN-5z4;avdNd4sn^pjg$Xc#KI8PiyxS8^($WiwLYXZ7m7*zD$G^ThxtpB4o5uj#TX)E*i+y-am(&Q z%&*#M>gje5It0bNH?kfIOnQhMwYMA+CJ7V}9MNK~{78h%ojr0iXgI*f54i6(7~~H} z6y$mwrS7jEAi&CXUfc{>(eC1%F5!9C+thCmkYX z;{RSJv(~VXV=8zZZwssWNOzK&BntR8TI~tIon-W7IMMRBn2p~P)g4IH=Ri(%roAay znWd}Ec4gN;v|dmFA}Ru|QzZe5>+e1m*}*w!JI*pYd2|%ptpXQ^9;|CtEs{fZy_d9o znT9}vRs;90{Q6j3EO+UMf${3NBk&99UM(|gWa!6iXDG|iFb>>zI9Xoz=iv5eiIM52 zd>3T&4-q^u#msL4Bu^` ziwTkCcLB)^dO5IJbc@j<@A2=A<8#KpJzU$b*_H6!t@Tsc6X&WApFV3GPDG`-3WkZU ztz=6B0@;~ZP8YP^YC%4|&0$SNq}x?61}&#?azrHO)wQ z0s_pnC{M4)rmWGvZ}C=jW~AAh(y4Xw>Rw&ot3Ip|w_huuoPFsZ)W1B->0@*!aVFe` zdx-(+JEKoYb0PUI88i{#(iTyy?v?OjzQ47#=+`hv!ftg%ML9xC#10!l3}$+;#j5xk zc%<~kAPCfgG_ou?lSmpFJLv_{BSS)J~`?MsNf;#^O3_iLJXxTN2y zwB)>u_GcMC8_AX(u+=7N(`EHyTr^-n{7RAv}u>7 zj|g@wS*^Dz(imzH2^3(g297my7T_)(mLwlMmt2Myb@N*vCovg3o*L%;+>G-j#q-zo zGrQ54Dhof?`!(LWbv5(8Df5>Gm07gHE>8urOJAd|h-_*SXrywH+q@BuLIR6M%r?|X z?AwIJ=X6z0(04jE#VpU4f$W4RuF!2&XatA0%T!hY=7gRiLMQg`Oz$2VblFY_sgU>|vT*afOumpgs*-L6Vu-Q)?kZp zF+u8|Kx@QYg24>Vj+IZ9eZ}MWfW0u;V5?561TxJ7Z_M3TTirLvntDH^EvktG=aN}U zgkq47=7A$a^6?0dIBJNPp`{PIh#pLc1<47!C#0_~1P&0z%1bArW!Df}CBknNnB-*F z!trrjQr6D$wWIKv88d`r5ta+AR=pqe*`sS6E;j7~v$tKPcCBx#$27x}&>Fv9{0=+w znxx*+EK#y3U`d5efI!|)pDFL4smF%jPH%EBHYgQ0NyL17GD5@NX+QoRB!;c0+Os1Qj-iUfSgAwaChzfe}DjND{+Rp3;`iGq$~y8etx+nH5Yvm#*l zGvzj1XXUM84<1VD)cuOP-Isi-ZLiqCd~>rE`1>dW6vZF9AWA@^;ENMB=c2JvbFY<0 zvB68P!*(|HQ}I@Dp)4WCZy}{gkd`;;2{#m@asrY_gS19i+&hssG)!tmy zE7}z!o;F8u(;cSB{0(OophshyTVrQD)@t4>-VeOU!jW&ep@%>CN36E}Wi4}&{&bpd zfR=uAiIe4ig3YkW^;?a7NA8uUH@=r{vyohK=8={|b&1l3oWHNMx7c^3_C&m1=JfKT zVIjyom*V+L?lr|p$9KpmpUK|Z0QFsWowP9m*bIf%u)G($Uz)&GFYJ>@Q*uDS=2?=e zw0W$A$D_f6x^pzq#mmB+v3b*Y4NqzQ7Vi59t3!T+zMyCh5Al>EC$^`@=d8ulrw3Z4 zO5)?}I>WJIvm+cm<^q}u-+s@THPQ-L%UbELn`H2lm=XoJA7{KG4HtL zT<1;bY162h=vQFUZtf9c885^Kq}NLH+}*y2)gNp8(I5CBKMC)ucQr(}xTn!^d2lGT zB;$-vkoe4G_0>nmY?`&R?u!C}a=9B`{xVXP8>F+eAh|wb%&W%&NKkQ)r<_u(Q#MTE z^Q@$dAd$}0H#ld>IhnnT)I%OiX7}rzGx$0|y4$*Um_Uj_y#)zgH4u)b$N}x}m~S2H zgKEw>2O9Rm8M1hPy)ra)bZa4u=sBso{SS3-*$_w5h3f*rHMmQF;BJEich}%9gS)#+ zaEHO&CAdR?;O-8=-QD**?>YO+`3L7~Pu0wHb#-^GyRLOF>o(uxg#R#Pp$uBViiaeJ zZDp{A<(;__Cb7Bo^97@zj`>k8Yte7>yeZZJ(*i=s7ZM!0B5V22O#LM z6dDsWHLds?cG1BgY1z=@z0Ni!$(bUxF0Vwrt-76#Toy$(@kXJ~?8h6ne<0tX!+rPTg{-kj?xNIW zF*TT+G?$k9VZYfAI3AkM&NsuIi^`)25AW+k%GhBR@pDU%k4X_A!ZYeDFXDiA`Ny%` z#4h!?PZ8{ck3cq|Mnuop!Gdn(TMUB_1@P<8=+SI??(HbO+62;SQllz=y z9Ypdv_b|@4;)}xdP84gx-i(Ynko@BS$cQ%zw%&mTuNg=Iy>oMz?FY!Szw zuDL_P>^eQcO^BHoL_$e2?q5-Jm=r%ur>z$8s9wU2D;eP}~Fxp{Rp7+`~ywpYVr z?1)fsbp#6iCnmo5%nVQTq6MME13vSl&f6r1!<~glCLNJylL0RGo32YrB0=Y?Fr?t; z(6>6~jT`&^D&X?vY_&+5^FdsLG}{3X1sfZ?%OjvB(Pp4q?Wtu8U#I4SgPqA`jN4Ae zhmR2y+y2wHSXlzq8LW-e(ri*BE`~)c!)byRFxK;%C$D>UPqdiONDG6yMb_6TNJ0Fjxf&0Cq`AUDm zY72lVRC$(W0vRziF^hp&_2u1nH1gNXu?nS$(g&Xw!S(ZQ8ophMXV}epVJ=98Kn3Qa z;@5+}fNun7rGqVZzi;lAT-u;M4qpKnLA`r?(fwyUTP2Jf(b-h#IbR! zlMCCzw(%Y?Zl~o#-_<-|`jhaQ@uQEZxPc8#OolGnd;;pz{a7F7t=`>mPIHNp%GaL2 z4sX-@Fq0+ra=Kuu+B-*ZmY`qGgWFoNprQ8K2<{S#`opGloDU7reagF?#$m#TBDr1n z#epI#<8;?G97l6UOCKFER#SXvEFls=p{f-+r6% zG5yGgru|}0fy};+W4g)HH*D0HR+d=E9+ZfWeKs#L^55>&jU|1d{O>4Nb1 zDm_)Wg}SyWf9gx)ljd`Cn5HWeY{nJ+KqyL!7v*lRfEsRS&XnzLGk<*z!snr`-6+hP z0EH@QE|Uo3SxpqJP9pmL7+Y9Jbw*&ihIz|eu}huFc=*!M-0lzDno6Au;^MX4e0O*W z!LV=3-VrqG7%Uk`-RGlXuPp_wz6G4R9~R!sec3%5T!XpXcJ(;x7I2C2^;5k#v>b1g zcj17;jE-*^d<-F{N=Q|DRt9gk2K(K+TALj>VmOa>JQ5G;0nC>7hqLikM}*?&2ZU2xqJ^UvbVnZ)7rg&H`5(6=5L~CP7Pm&P)a;6ovqg(C=l

1uaL~{<+X;j?X!4ZUWt+8qxLh z7A(j02Yi7%%Z988ctBp)?Z%&Rb_~sZ5qqZ@yEG%5`xo)$<>h#Q$N9dtT@8cY-$cUu ztjwpenIsd+1G`g-=>&#rJi`lKw}sC)PgWY0nx~a`wbHlCes<%9x3{u7t|Lx~{_k(J z%OpE7gaGX|$(i|r%-4t+eF``(^=jcmB5NV)W?XvpXtC2R#P4H3&P$PHYFerVQnZ90=vhs6B20(eas zM2~OWk;a)^&n>rIZ#j+=B`AYFV}rxq3EL^|+@H%MakrOTxbhD~3_tIwoPP<*r`{>mJ;3MS&a9#VZpQXVg7hN}(s?q2=%>*GdmsSe=;MDW85hceuA zNpSGBS0iQbAE~{xjljv>t;$Q!=H!Pa``pyh*J{J1xplSaGMIIwRp4lx?P*Ro+i@+_mYi8&?===0_OvJ4~BFH`&QDjE? zLPaaQm;UGn?gNg?)T)vbkcn(lW=w*^P*gsg3XOl&y>%UKzVn{<{idIC7I0z54q-hb zVQ6XUEvk}fe+etF`s3wG(7!ImImP~aMUA2DNtpd(xA_tzrVtqW?-P@GiIVv+-9+b{9!I6itf_K?U0{Et{-p~e? zy2zDIF~<~Qhklx}h#WZn@i~Re6rD<0WG%u%SzTFP+3(Uis7>ni59e(*rLm3!^_-5O z%;oAt!O@|C*Y86dQNrgSnkUyB`XcN#%;jLa!3BJ?lM$vfJ z=O4PVi<-i0jPs)C-8dX2cFL5aNWP2DI7yg@iVhZt1QvS`tSO?Q9NPc#LU6kXHG-lh z#a$&XQTm{r0HbinoO4*oT|d?)mtXGs?i7MXOa#F0LtFH2wBHeXcRtNNqN9cgj8QC~ za(+trZ~k=H&tW%d27tukeZlr+;ckU*C*7v{F>3+g(O5n$t>n{iYhA^_`|9Z z#+2c2%Sh+n!KT>ux)*d^kF_Z1Ccw3r;Q5**T9`815%0Y}ik-aO&5!Ujp9jtgs@`B- zH~Tv(KX;}fzf$*VCx)4}tssiSNHXI#Gi!oIn%j3wkSxK(R}H7whSgG?t3E={Oc-_$ z^V9B;6xI2(>T$cb&AVc*zfJO4tm5xIzb}gC_M4f!-;kdp+_F|<8W99&g`_xJbLrd zhZmS%Gy=e)3)BAk@lF_Hr4qDxJe55yv&riO4H|uh=(CW`M%u1S@c2c4V&`G zw~dh+KJ&6TtUg6~UmdL>z0TT{HJQ&_b9(Cd99(HG(_9}vf);*s z4!@dVP%eLrO5oV#Vgp=iwx)S#OR|Tayx^UY!CEryBHEv%PwxA~-@?x{BFj;V3RB>V z5MD6zW@ICGPoEFSsEc(;$kAbtCbpt$%#inV)eTc_Ho-};D*SBm zC;hCb6kz1-I%#Hj7FFD#(|GrCt7{~2J?hr@zOlgfgU+izLI$s@xE8e9*gpe=f&Kmb zphM0dCiphYvN)Xk_P|%Vw;bvHeqO5#hhk=Tf(*EE)3_m-PO&@$a!xAWbWuv& z!$waz?B$XNmeoE#{ex})88?Fx7NoAsw0a|v`xEzznQ&WR=wqkwGKlq5Zh>Nv00cX8Gd(b^-RP2d=Q1j zzcGl6kDoHA{o3!Wnd0&kJ@#E~0g1!(iH3VqNcL4oj45dZr$fz?l}48~i}>?)XuW%4 z*w{1Csdy>oQVi!OoNxyE?y^Xlc!&KREBy;6OrCK`n?)Uu*j*e$(4P@71V^E zeGUJ(OG?OMjOxqkBys3nF1FDm$C^!*UBRZf*X!+WZ|JyRHZXqSzHHX zj$)Nl_KM1m!Wk;ubesgZENl(x%i0mZ#-=6Jgqt!`zL>ac>XngPC=dbdRx>Nh)~BbI zvAy8!y(&ZTo`q;R+a|Cbl`CVU3T^xcqtu@O$2<6U5RXOtIo*hxpzk3d>i&O-+?XxczIbkg$!PJM z1*J&Zb$Wi#UT?JQh4aCsG=k}({}D?Sfr)2t+dy%;DaO-KlFOc_Z*~X}5dRJO+1`%+ zVenRF^CE*v5A)SRMxEYYCUC|)cm`-~b02(OIbUkE2;lXSLlwn?&MRSy@d3A=KiCZ~&~Fha*FK zmFU+LUIZRI?qoV5Diw?qM2HbyxS2nC+uA_(Gh-Ft^!+yqOI~puQJnX?2AfE#B_af% z)Yt@`sZ6u(eq|1a31I&$u$;>6Tx}Q%+*mCrkZbj*F7vYOKR@FE^?EBQc79-%AV)O; z8yy4~TY9C+MAjkkFs`H5GBm3=;Rtr3^~(JZi-Djte*WFeZ?4~2Hyv%6|6pHyFKGgY`hwtx6%<@Hp$G5$88;RqXr{GK?reLFR6tw6wRvXWvpD1?l0)2 zF(85+3I1vklriyMSq$LO*Z+}Gax^6lx8v(9Nb~l|p7s*ikSVVBa z)OmQTo8)eZPIe~m+8>hOKeE9GjOFb6ahhX0H@AsA~280iO1pdo7MHX91s=W_&sWX>O3}MN2zz$24WuC z30YV6!v5@%!8T^L9zIkB+Z?ijW@4U6aq}I|1~w+0w@gR&;!#jbenIxNpU>kQ#WHr} zK<2zViH>^AqLz9^w3mqKab_&n*=uyS*bar{Gr0%*zM<1R;1fNnpK~FpDOkC}n(imh z=tbV6b!YT2)OS>{)ia#Ef37JSUxRCWBC^@a2;j9eb#WK{9Kvn!8&9;uw&lh6x545e zHJr6rc@0ve+*7q3^iEa9>p9u>8am77khXDNzDbE4Nj;$fWOkFzs8{Q}VksONE%4%X zbkfXIyLY>u*V^K6w)vOVN?TA;SuvsERw6?Yo9pyX36#vlgXV?mN!gsW;|gOvoHS=pwxx?edvg?zoB$b+*zmb+Q+ zv44u=Vi5?pz8Qh#)L{Jj|Tw^5h>d!4h4gk}5T8Dl-!H}dnuPsy0P>CC>7 zo|bi8FBCM-zhm0_T8jAhi|(NJ^tQohrgWmkh!aoN{W88wSR(z&S?B(ALWYo1=NI|T z6&*%@Z;qYeT~}aGj3^Fvnk*#_Cz)b*9#!Ofwmc3w*jPVubbLFHSQ@POM%95Ql9a=* zMBq=vET$+&Ob{N`a{F#%Y~Z(8%CWaEFtiLiGm_&~F@`tb6s&|6fsW<}PQsS$C8JGf zKDw;!AKZ@_=|8_74XFGi8=BWHdD-8vukZoC6U#*E`F*on9vh~I@U3El2V?MV1-;E7 zam6tQw^xv2FJPrIEE|BWpSi5{GM(Yk6M z$M-KFm_6dl!SnIP8Q(m(?h>Ab zmW==29Rk|LxIMwjYqP^N9Qi)FSX-Hy0ZVbL|j^XWLd)so0iG zq<$u9YM2XM-uT=1+Inb9`0~;)tbF+>j(8nS&ySBo7*wLcz6~idZNl>Cdh76;eGMeWTgKkAjV+x{3LtpHV;FnVsr1pF21=cyc>ZN6Qd04Od2zY)mt%J5*EIEO4y zMIl#+p3Lq$zdU%sP3w-ks90EY3RMQR+^5IB%WeWo%zGj+Ix;Ldw|sVCdMJ3^loLLj zCef(41uzfW(9CTB58awx14)HQ(0$l6?EVNn0S=cJ=PdD!nN?Q>tb;~qo4gNpnrt*O zQIUu(!Gi%OZN!xyvO33VfFGZGbDc2y=j_oc3MxmObPz(`Oh2}HL}+O&ZquE|(!-~g zw#g9js`RZz@4(6ZDJ zp74ajCYp_gu0GE?AOChmUIO<1R67{DaM~Wnb8KOGnFtecA6?=Q`T80)gJB|=jJ_~t zpcCvLCL;=1XS#NbU;7~$o@pPIpep5v7OjCb^onAWGU7nWK5cEEL1g{G%>A%)S-2{o z7{iE~4C;Y|(H*4F)3IS)Q|t5|TRNOfJB>0=xI1f?4&Y~dsn z(Zipd(J=D!q8-i$0yS zlcZ8ebWFipD)i=ZW(m#A!!B z%Dp;w<@{+hG!Sty3)Qn}ki#oC6eEgIgh;MO6ZO(UX`$czN+RyL z^}~Q;PgTSk^7%mcxy0|{qG;l60doq+aNtBxU4EYpRskYxwOC>hHY~`#_1(MACJskn z=3a0TbuevPs{D*lT;V#QP#+BfNEN#7gaxHnj_i88}W71#K+Sr=_!lg)0a`_fr_^`gmcn0-M zB~BO8XE9{uN&-N%!cN!RT|Bk=0gm^n@%DU z6+lL2JVdigYi{WhU|%U&vRLT*agt5&k`X_jf*yAwKJ>}p3cY(RLe}@ma9>4_-jm$zN(#Ow#--ntWaM^yR#MK=t5KLOySOoymDPsI z+Urw0!Wr^R^^bcSkHrF1@&nCWxVgo(4pub>6MH&L6GOtNF65hUMKJO#y9f7*nvchB zY;-0C;`HhkbQLt~y4nG5U0QtKEtv2J0BsC7{MI8b_GkRe`~1__0`;lR)NK{JG$$ip zt85POUYC$JNqmQO1#Jg$%^or!);x5dNtz{QvvRYqMhR#*Gfd`lDLnY;wymyg4Coo@ z5U=_&dcV21NByg38{K<#nB6Ez;@Dn4VQ~2Bh4*}6LV(ZF<8&jasxRI$v!83Uu`W`> zUIo3LrjLa~hH%w%jH{v|JTgrE89Qh=tt51&%QvMSuk5&;T1CP+D5eNmK70T^p=oVT z0X9q;c1DGU7#{9t`PV6<%M=*FIOx_GojgaYOm`FJWL?Pk#iRFKz9!q`r3fB?on3W9 zUCfa9SV0yn{$_;NR_l+lmpfSWe{6{V6v+fP#h-5W$Vfb?Om z>~wEE-7h$DXT~KCaDBz=MlgzbR>*e;XgD z*spa@V3l#`aGK{WErCL0xuq0f{lgHO(XWY)!}}F;(wFjc@+9+{f;tXSA&H%v_%Gfa zloPh(EKdDJ(qAQjFvk_`H$Vc;Q@i`D$ghT(_XYe)MsZRSMPV#ggmj^f*z>l<4W~b0 zx9;9_zuvt39Y^o>3sF1msiZq5V*=kWLrcxe41UwKazC|v=@fzi_?BY;_oa29MJBJNm`|^zBQo04o;PD92a`$uegmkhK=CPek z(2!VWuEcu1Hyh9X7xb(oY)4;KC zJFW8?x5UMOmzb~&{^YQ!6uzA)SUHnrEJfP+os*K5gkx3xLUf?hfY z-W?83R&Ok2iPLtF8WeBF&`mkAwxkA2eA!lnrp}B)yyAQYMTm7{e{1_->Oxs{^KYO- zwCz8}Rb`0yY+c8=e?_Sy;X@oHy@AZgZ#4qf*=!P-8Hs-+mEE%Z8bDcYo>y$hZE%^{ zf;yJqRa2if*#r{t^K+XrxvuYI*Sxt*OMArgwtvGq9KKDXeXz!yd|$b2UieBiH=+ad za>M+qtR(bxDMtF&rB1_UFg_`UsBm;P@ageLYV~=kF>{>gcDifV;W9E8acCL05-u@~ zpYVy$Kj_Ny9Vn2BchUlUj9w(x9gfBOrLUrZj{X*NYE5h^&V$Sm5j#d?SF^di$(8(b zf!?8~2+-muZGB5FQ0e`tt}A%AFokG;$feC%{Mx#+@%iocEQ4q|)KboR{OcR>g^epA z$LSkXxc~A@=MQ0BcX_J;`l`hj<6o*5zpE_|@l2KhAq;A)8wPJ)yg_;5gt_65Cdq1= zcr5_q7y{Js#~)HBDKSu5A**wT653`}lqG!~Upf%(q6=e{4%{Z2gIX=@hYPx*l~jg( zh^4y`lSki_C6e&b%zA2~x6_D4#{|}NNwS07l0XMM?u^cRY<3C@_ebt^114w#RUDS5 zX!W~0nb_U$3F;(MqY0I0o39zpcrvvNN^k!LG^|Zfe$>dvI0yx(y5EWBXE?uR`0B>qk(v^^3*)(#i;iSy?gu3d6ml(+lf8jkOtek}&n)?S-uqaReGJg>9- zz8MoGNz*0b5R!EE6KC$Cij=RX47lep*3FaMNv7G2fOcf@;Fe@hSY&0mhZkN`UpVKs zzA^Vr0XaFP-e8i9jl&0ApYbL0Q143(7@G6G=5x!*TRpS8M!6<9R8@+iEuXvN{WNG= zXi9^0UQ@H^Y`ss(NAi*!f59zH%KJr;N*;Gf&yH?ne#NHsHGs{ro3cc!Ad;?qCQV1rYLVlzrJwdvmE2A%MSG7{RM2;Ryf z?G|6^nPsz~8~smD=GJS_Y`<7kMj9wtm*ZymfOW7+L`JW!Z3N{BnCn{cXmU_nq%n`& zUDNl?n4=McmN!^RZg!@o@*}Yd;LpucQ0|+ZS5W#+-Y}hG@M6N} z7SRt`8EWme_WQ#p{L`eZj`3}yl?My%>rB7oQbTOTmrw&j-X{Ky5J5#wd}h+L<8@Ie zzRu>pZj4f63Ft_i6oU7c7+OE-=sfn94g?*$f2peGdGBVh;X^+KJaHbMp>UdE6>S#{ za(s+q?>aXMp>R+rgWNN1ZfxDUR@jN6^qkCN>uzg`bew;;J6|WZ9D1=bZF(F+-W>#Ef@3l_I?XZ8FcJ|K?XbYbV9#d{E9B-qv#FTnh zSN7xa^j`Jij}s5UmXJp(QY|%hYbtja9I@w8`6|!2`-sS`Rv+2l*fuHoI))4@ycLTd zpO{jTHm{@kc(b79*d>XCD*z)9+R4oDR0 zANhSyjj*zb)BuxQs)_CWUJxpmNr`~qsi?P@M%@i9Wgce0>QONpG~!;POEAbv^6l!2 z7b{HV3JcnRS9s^y#LO-8QPVwUF4=`b@8A5Mx)mq(SKbzk@I+&9Zp(#YWRn3Y9kX4T zos)S4$;W)C`4EO6o`YN+iTXGIdUH3}ERbe*)%^t)W${IZ`Gu2v8P>y(<< zIyR?^89Y}_knnr<2)TqP7H*~vJNe;-B zo;Ea(hFtu0RKyMY$lC}WqCoE6s$`GW@)$n!W;dbxlB2!75OT8(DmOB}=Vs$4^aD5I zqtq)I1YfML6IXYgC)+fdtGF32x7~i8VWo$Ac|s-H#W|Z^Tn9|u${ltdZqV4t|NdaxDv}y%Q zt|_jx+Ro@{`4X#^W~y&oYxI%82`boF=~V}CicUyAmPX_Iysvi=5>{0&O}29X8$5Sy z25R%$*i7Vz{m5~{sr<7;;ntRJbGZ{oe1nZz8vdn{&k z%kycbKJB&VQ_7NSW{QoVZe@?-(Le+R;c*@@Q&d*-qxn{UiY-q#u4zaD=hk{+uUkL6 zSY9X;gxlqZRPt%q7B$uWq2YFjKf!D0Yb2K2dX-m0M?x*-`~{ZK-;h}-->k+LFfw!3 zUI(MMIrM%6Z6C&JFX0VaMA>gf4-eV1tDbeVx9PTBoFbPC3nS;IX!!8HsnVm+Y!gEU(yuP-?RgZ?6GCA>kFWIbl_*Mpb>MY14FaS``pAx zx@l>or*N&-g@;t9k#Ich2W2j@YApwT8V(l!yO0-5*S6Vk^8II{QTR0`=OmAkd0!|0 zc;`C`ZrZE4%a@#Trh)HV4OL*X`qMY}WQ`eT#8qSKq-N6E z^=_csYse$T`qG^hs*h@Ele2CC59hwb5zB6^p^GT%69mi0L>+H!)o3h06WYK2r{ux) zLlU_I9ZeEv{PrIR|Aq1Io4*!h{uqSWfj!OlxsXgdj;&??!7)1ZbQ(&V9RZ)-w-ar} zV_A);6G#qta+>e_fBnZb-5K4$f|PW5Mf>PJki><}J`jW$VoDHmGJ%WVK zI$$$fD`)SWV5T=kKM~GNtu){Hp95_=IX2nyH*V4Oe_Y18>K>h3P!nymMV)(^Zdqw~ zPyBT1iU`~;JQ;LkoK`R6zJW|~r!=g=I`u>sDpJ;^3I>ByF zN$xPZQ}dI^l2h{fC%`14o8MmhyQ|#>R>x$>?Fr-Tb`=~n{^*fZU<~{ zPyZl9Etfx~?q?eUys|g>qYuqTrR_S~|Up=x@CoFXpR&yBZHLb0eHC>{y^$TY*O2Yo7ykM&qC}8#rbH=T< z1?AV~a=0!Pr$xToTna8T%}KG1FT8g#3G$sVOmYiJK~jq*k9^^*Qrr^+=UR^Tk2hdT zdyJ5#DVQNkS;!A_7*BRXIGv*m z%kRixKcEnYL29om*)pH7A|c<Nj;Vglj+C8O? zfP?sl#EaE_BlbVJK|~G$-)NaNXYt(o%H*@vQ}*9VmRP_TO+e~^!+`im41cX@oQXFj)mh`A`_{wxipWCw zlB{6r(i*?>>jG_}9b9SW9dCB2lM?)IxA8nbIE$38<#ylcKpq6~o@;vE=s1HTaZ-^F zq7Y3L!a4%a^g__-f`5PE+Pg(`+>jukp^l5dzpQt4(mYv_ z|6wgitUKdSaaGu)qf}6!`Q`l^uDyc+zT`Bjqt&VOH|~W5*?Yj#2IYPAMwpsK0Qyb8 z^IL*h3js4n6d#Mow1@Ken(jR>p}uWA%gXI^nyhLQ1dpnWi-WFbm58rz`|WPg@2ZXE zD!o#iHS&N?@A|34uQCb)Qdi+>DytQkZRpr)%em`ciSm+oowQUWaPfd!B+k`3Nrg-L z7Qb-74+kXP8^uD`uXgh~pK)^VTK@eOmTzxe(ZzPwFTjW(k#`B??w8Tj{lE1TB5d_c zw}*^`6{~V#)slweyAyBsi^^zx#&foPQS`7xi0V_jy-6IRScGL#7vcAQs%AKy@zWQN zWba)fg)R}Ax%Cx*(^6)oL`@;|_=gPs($2oRd{z4XsinQ#^x^c_e(^sy4l;C3C@qes zJGxz@r&h~#NSMmKlg;kQ@eqZCjUNZc=b}R&W{t4?YJVaBvGtRpWq9YGOLp%#bhf^7PW`sZ&rWm`zyDOiF2X`9TbBY z!?Ut-i{LXC50Q}Q!lp~eEdbN^(K2DNw+U6v3N)27qL1<;J+v>t1&>&Zc9y6 zL!F}}gTjzmm4ErYE9?GlQ;|zYBPAKJH=k)A3jiPxv#LA?Q!}f&l4nmYL$=Hb)fC`Y ze(oFYkVgizb&ILisfR_@Ztf!`@y?{d9IwVlFs}c&#Rbn8cvQ&Jf-&iC%CcT)8T(W`*+Ik!ingwV+Ekn;qmIX)SsL)LB5Goq*q%> zKb%ElN{k)howm(FxBm2u6U`!UAJTEt`kD=leb?kZ&+~nJE=VcvsO`pexm3M8-dj1Q zz%ikAw)!c=DV>;k5i*w6-E=LWK-=B7WIJ0(KSs5T{H&?0<-Mg(RND~4t)+@gW?E$rwg?`@ihbtK$mEoFWSV7V9BSBpP`p|x5vFpx1 zChD6ryyF^U|AN<1BxY5|=l*4C5audNxA(0;4UQGf@N=-lNYDPYvF}98^~Oipj@Yjp zXLVp2^C`s-9TWeQ3+_RQ-SIFO3PnE>nk6y)dBEJ~?v`M2(?jo>4}-{(QZL4tV z99jgF)lL}S%>}>U@)@<6y%X7ER*sAP?eq7B@garo=D%iVu~$Y>O!|7K<1*1WMGv~% zql3^8m@>y4<3b8O;awLuII<;>zQBc`m)^(MyGZKj_k(a$M&o9y>RUDg=l$x}RlY-9 zOM3Y@h=zM*nYZenBou&6f`<3)8QML8%CEF9d35>`ymSaLJ|Z{x4|wn4%}}IrlSn}Oj8l5 zu=U+*J&Z)_i}aTQTY`*FeGPsB9cl7)Fv+Cs8(FeuRdJs!NjdK}t53%RZ>!-o`q<`* zLW;`?T)cZMCTmnCV@503@G%>ha51Q9})*) z|9281@(}G^N}o+8Dw84@^Y&G7Cq&G4|nJKR3aTQ_@2VQb}3)(9=%h!}ghTDQDC`iwEue zJn8zvdN{jv=`_ZF7B`p3xPy@#5{*u;v&BFVkh^>D;&`Qf`?xj*V)_2km$<~gG{3U& zLSS55uv0L;fj!I`M|4JI7?`8}Wokf0cU&uG_JLmXn7+<^R0(Eh+r8;X0Sz#E7w3woc3x!8aLb+!6|0E5l`$i9=zO?^t@yghc$1e zdVFijtTGpK$f(!twDR6JY)&Z2pJ*iObhg-;*D{}ctbAxyu_Hz<96$d-%$_%4DiJV$ z?FF+KbNalkv;9oHB|+$3mQC>OzIPj;)IjZdb%Y839`y6O;o0YY4QkG#2G59{VAy0Ka`Z&ag;h=HWste zUF7|hm?whn>5JrPMTDQ$RFmA2d5{XM}`% z6Rox5nYX>jc#)2HUp$}5Eo8g*I^RdQc4pobhyE~RnK9^p=hcQLhfLGa{m%Gu>8ouk zWAA#sc_N_&laiKGJm^QyP=18$OQ;9ODr{_zay^quM>zO7(bZ@YsqBeuJ8&E~iXleq z*%<;6DO!;W(B3ZjhL_&cleWFlr88QI+eMAR7BS*dRaV%!=B9CCoAZE-eOfTqjei=*#Mb|@=>!lCsJCL3Rcsrk#y1U;aIZJxk=#1w4E+zZCfva1l9$~Pm zJ7=(@QIR?L1$Wp}STTbIvo#0Dthdi*gqe`Za=8SLvDKw)ZI@HNB8Okf;~ysy&}s3h zk~q71>j;ZOx5yqA-$}wZF5o4a|KN%JEG~WDbm{t_71=hn=ix*^TGn37;3i`YE*X#f zeXfZ#O)frphIULkE8*aqP^W<5Yz-XTd#GfU*2R9y9fT$ellb}r?3NOgcj9_Fwl_zd zR?Tw*<8iX-YJp=b_C-3C2SM5?aI@spr3vc>1;io0P?B!JKtKAdclruu^$mHqAlv>-_vNaT__K*a- z*~UNBq)4ab)*s4u@0R}rCl&(zLij$xztcwhvV*}sy=jWGNL%v=Hl)f7Im|wx8o7zA zTt3a*k9Jts9;{qhWL~!JaT)Z1eU#7tnMrT}%XHQLjm)>|CW4h=wWA9W{=3(S1#2S?w>a%k0#9rGDI1Y!EtK}iHJnO(8L#9Jp^YC4k zU@&yfW%YGUW7Z+aTHXL!`aQnm>%)A(fA0*WG9BLY@glqZ|e&Mfm zT#D~;vfJ*+-}WrhnrF{jamEQKeaZ5Dvi%KW0@UK@o?0aw!{b0cxR37x^wEVw627Tk$S8P<+q zl?2T8aGXK0$vH=xr!ZM(-+G~aIzj*6nx3Mq3k^y07Ul9KJbd)Ngm^qVePZ;<;2U&k z+DfYqY`Il6;Mq$#FLPJ3vy;7DY`kE9LFrrGrc8kLXFJXpFDWPI z@?B|>#bqOu4$H$hWq!xc%v%1nZVX#^$hc(!xBQw3TJO#EErG;Lji{LtwqW$;nFZiQ zabd%&qjSg`M3B@M>5KXvMMI7e65ns=f72#P4NcIC2GA@%d znYI1c)U>{i)@Gwe@Axoj&;ARg!y`*Y84kPT0n%aYJ1l!5v$bhe&qjrRd|p zKll%ygO$uM3T)9$CI*xT^1n594^RVz+KJ$U&bpsBnrp4rtdT94LA^(XjNWd=CMs6F zLGOxcrlI5B!%|USJ&mkGP$lONb*Y^Pm4FE7Dpn+*Og`v7=jmgbx8r7REP@OJ5zqVY zpRUFai^suk~pezO3=FB z$4BgG&i4C{vo_C0fpZ4-cz^SNZe962B$SjuAu?hZ8g^n5q%WR8A@wZ551pKD-ZQCC zA+jIP$)H&I?;tC6)y!Uxrw8g}a17X|Dg-)S7VG@5tI;T>2LsZRPT|%=!en1bpul&^ z!F7k48wmYhu;@M6S9p7H3~O@xLm=vXy1vC&HRmfl^eoeV-SJL`f0LpfLBeH%J8u5h zvBWJMKgo_wWaKz>BjscUW#W1H^kyFjS zRsEp9rCc@3%kl1it>M6}54JON!%kpPqJNswNt(k$Ls{bfuft#9IO&QK5Vp4$_!T}R z4ZH;$ICD<5xNKph)ItM}F9RF-lz_Y};AzFdWknZJw4Y2j1`mi0`tM)-_q`?8EbLTh zn{QXd1yihuP=P|Lc;GQhNq=%778j0HM%yY90R>)BVG*25sw;n?h%6X6e%2IH)>ey$ zMk=0!eT#BaBkBCe->n*ArRymQDkO%obdT_OOGTQ>W;BbwKN;M)O_~mgE}F#j>EY%u_(9m^B$If53D9O9Al51Edh4M z#)oNEp0X@|a+%=vS?r@@qR%t}ABKPyT^+}}g+xk3`d_>SMcbwe{{Io@}8Is99XixIdv zki@x>T6-MChIcbldm%)OcNp+2FQO6PfqLQJvx}%rFG4ctNr{Ra$S#T_SdjB_(rYH` zfr|V#K7nyVPpI%SpQ!L;7uVJH)*NjM5AyhghR?n$zt%LlrG8gH0*KK#iA&YK3@>Jn zq^`9S(h4nKoJLvli~$QVqxxFkTF7UQ3$)VNbFDjW?1K;}gNQUg3u!G4lDWZI2JxHTcdFU|>) zxbRw(*vrZ8X$o!=02gXVK$y@AI9nKhZc%m&CNt)S#M(V*sRJu1dLoX=bru_cL1q)8 zXgUO7m%XT*BESu{+qUGL79WUXRQ(^pj^+g|AI+aI`C(skG7*3JJ5}7OZq_eMbcn<8 z=lj|${T2GG!NPUHq%a%EBQFS;k2M%@%g#jiq|vjTrIk!gW!0;eL33ABat=zc?85ul z+WrzrZvrx9+DuarX7|X5UFpCkPNYZi$;&xTq!!ylDa``2AuX znqA1n)3#e(A424JsWSSWK%Hv_jE3l|;Cd|Tk#*gC5I=m=IrU(yuu!AjbHc3ENY1 z&<=e$0uKe)2VTBWZtX(ybM75U(xmbmI4QV4{eg1N_`h(i#C}8El*|dvdMf7z;5|lA zjgs{nyBa1~>dn`G9Sd~t0?EYPd{5aZb{vBzHT{RoY_fwJWTMpdhoV=Kf6HApy^5 z5kjnhDeq-aP}t9AQuNTb%(2SyRAnBv{HyJh>}#$%Ft&b$;}>V7lcICi%C(`{i|YbK zlFNVeV(Hao-LX{!G+BCW>9{RML&B7&d!pUd)&_^0IM-+-A+|2$-9uMFd4`Bx<+yx3j#j;`d}5TWkO*z`Q5 zU2=2nCd>=WXohqCF;5hD*AuM4^*#h-+GK_X4-*l(<^U>ppVy=^lWk7X=$AXoC*lF|*AWDU_k^$V3`riz=D%}IUW5cFSLVw`3!}FX zaTpMAbwIq1i7qJ@!#|JrWZ6UM$_g9R^Z;-ixWXxxK7QhGgLg=?t&@` z^>lA%-n2gHj}2}@U;7guL`3{z>ab`BX1EAvvoG_!f=?(QhII&_G;t)?i%?m?CC!i= zI7D?Px3~{lfWu9u`-{&Yua3wrUF%>PJd@RptvBX-WLzQ_@2_A(;`Tos9d~p6X?8*N z)I5~Tb0iQY6=b_~B&rOfZ*i2d9ahgT8@fwJkaJJ7AmzQjdao?zq}+Cl^Sk2|hHCal z5U!CpAVWCL8mg*&$dp(DXJb5Z;i#3(#i$^S%V`20~c)MZZBr>~W_u0SIk|&Pc#5 z=*uQ4VXo7+?Q_19H*Vw}j#8xNsF0V1mS?K4xakvM9l^VdAAeFL<|TnCD_$gD=Cy;y zkzbk7)ZA-a)aTJQj ze~@?F^{HZ&{6gZ=`D|-m5=iSz-mqZSK=tnQcZ7E6?Mo3QVw_ccs6fyl4XWpzCJRTF_zg9f(rpgu5pun(UIk$3dB=;~B{i zc>OJ>%`226TcqQVw6g7o?JA*(^yG>pYkaQ`?!zv0wO>>e*c@a_@FlLV)t2i+DPM=0 zPqF@?2z>*|9|Ej%4?i`nt3Rl(nC?rMJVp$B!MGA2(vzE%L;H z(7_a^K>WtT^}bO-oS$U}Mul+D!Y*gL#d7XUd{7qdC)!?Nt!cvTw;s4om2zF3CRJyE zIN>GEaI$$el@FZP9j3HS_~KM+XMT!L3|t5B(yUhRenpkDroS5X@8~{?%$J4oK@`lP zd1snZWEC=ojp3}2v9CI>g&kci7vXHEMkz-DuUWAMzgk%1U}peVnd8CF*{B4v$o~Ge z3ssLx>WL-k=U84+3Kkep+Smnyb%XsE>KJ4ZB4xjr{P2^gc}l^tSQ=gX!0M0p@|7h0 z+Cuu!7J8oTh{{1HE-=d0)C)VMGN+T~9)IBa=H6=_5mpRpG(lc`^5RrYwX*sKAuK&t z>t{v`XRIze+NH{cnxH}wW+(=~(6uxJOkB2pPL;8u`%`p$Uf){INA$-%PIR%Frp0~J zGgW=8X%n@XGLaFFxCnPpV%GLnWZJ1>ynbBgbLw_)@_9wV^_`K2Xu7Z@IqYZMQW`n~ ze0WU8=bSyJ)Q>#r!M(7hK+Sjp*DFAH9twg?;vo+CQXWzd>XfNhRq`0+Ng*~3{!0C) zFs1DGP>OntE5Zq_stx_yd|GIf8 zqra|GB=%SbU8#G|43~;12A2ee6dX|DpzahG`O~2By!%YSkeUt@a!Xmd=9x^zEi$TV zdbTLM`&|?3r-nSYsw#hV-avhP8wk)2w-@@dQ*nFl8|{FRvP@tdW|T4`>BMH6#K)fNYsTh; zKgF7wUsKYq$BK<9LOjF5s=BEG?xSjx(o$eeF7;ksSp~O;=@<++EEd1)5vSXs5%nJV z+*IhHb4y6Xv(Tz@GzWr&Zs*^I8@{JGLRRutPxLyuL^8iTXEqc)=Ow|5ou?dz2v#-;Kio2s;B5N{Tbo9 zI0XBBut6RAWa_bQCQ!0!^<3fnPvvA;4Ip60 z0dPR%|8}-Szr=$ClaB`c9LJ$bDr|Ksybv?W(LeT~D-gfjxjnOAEqKv82arDYhF9Kl zVxmA83JwJ5QliD6y^dv^6GhARg*D&g49Zw~uls)Ki0H$(L-QMrq-1zh8ZpF-$q0R+ zH4u^mzNq!5FG+8J8z=KGN+5J4z(pRiY29|H_656rwNf}WHn&wZv-PO^d_A~KxZ_Xj zCIM_^{A#63Qf&kJs%*=mF<-4N#S|F*G5+BW`z@S}m|y-T1}?%ktxGn8RSy{h-_Al1 zb|;v8JwLIhrYxz>PF;T_{+Ki?Dd0|-ndfZp#&_$u*&8be3OQ~mcoS)^B+XY8%$b%) zg=bgHI-IsM<`=fSpalv5=*sg$MKmzjPLegQ$E-@kTRVNeK!sY?*I1G>v$# zi6PVWVSbwt=&omg0);zlwOzm0Dzn*$1HN)xb4HAOOF3H)jr)7!`D+ksayC+r)VA7c zkRH4FiLf%z5wx)6Q5aL|(W0F~2nWd`>Qr0wmz8dNW+yh!0&#{(`oB_LEK`jSr9Yu%BZ7D zy5W>HqqXihlkI8;vss$3Fh;{Hr7jk3S{ZaYua=z4zs>B_4?AAiJGgej_LbV$ERTNt!X3G#a5OWuwF>2 z2n`&3zTa^klh0ZTT6-jI%X{xV4Fapfv2bUx{66aYRXOH^Fn;!%{_xhlG z^nuF@Kuf5Of=R9K;OjW!GNHy^8FcdUoE=o{1*jFX^#_lzc++8%J``GM~h-@JcaOnc#FUUv|L$moY2rp?Bl2N=5KxQ9ArK$ek$yOXRK6odOxka z+`hysj)QE!QxoE2t@nD4K?!^r7_C9zxVyyuy)G)Sopu}sl%%|t%YO^iK^ieA5OML~ zcP)!ygWINqZq_Zuel>TpAkZmyr(qUHjm11@dt2G?-dt|VNJOrza&UHk zOSSsjtvjTE=ytqgTU$)H#{7y%s92mj-+(1-0U)@80jlr~jjQ0YdGC{nav?9WBJy&K zbXuj)ZfxL-<9q`uSI3)^l7cG*>k9(c<`mfWm=wq6k53>HHu`9!2Y!MLQX)FvC z0;kx1Tj!Cmv1icq8b8+Lgy6BfEI5^O>_Y?n@P4)G`f(M$THxl85tsxNyNT#i0`z)D zC6D_@%kiQINOl!Izw>IqkmMkpT-Kk&0l&4+z+=$~{0x@&(vMD{RScrHhOFS7yW`EY zxo+yU1ZU9K2c@-N4$qQ>KE5IpQl||M1x=z#=*S1Q%K_XIs(x zd$x9=4c&`*e-M8q2ye^>8$JN;9tUwvGzaz5iVD9X$Zt>o=*Ro^*p1{q%l!MSVx4;b z?&UZ%-Cd?ky!r>Vw~qKtr-2&bFx+L{x-yEtFrC?32HNX_$Nd-6A(wUtGPsRT;XbFK z$zk)GP`p;eFUiT zU0)THMIvok2f&MqyfxetS9kYM|pE@RQx>y?AfH`^4-&6Md;ug_MZ| zq(m~K-?&CRVHbbNp1zamiUVIRW&(hO=I0;l#ZCI_Rs3?}6}$+T@LwgHjBxb@c ze$ZqI{%j^hxiJ;?_0AUfz-sE;M>8<%nb!Tg0_Psrl#=!>c3g;=T4?WQQVP<&BxM#m zd#8}4a&@D>@B1ORcVfF{?Np?#BppA9Cl`sE}o)*HLj;o~aUwjK{1t627bR83zxT zRuYp-jDiFYe=0spqxN6($o5|f~JEVzIkf`Xq}Y}DU71t_U|^B{YjW1!LO zca?xX_L-R9+jrpc!D#R+m;mFGR!%#^DQyyXZfPLC1g*A-rH^{4aDn~fqH6w@Fr7+2<0@GGV$O)hGsyX$8uk z1TGa%F^N);y}6Jfy^U)_00^TuwYE}J9pz6^n^Ni)#S`MWoN@&(Ll?lixdtmK1mc2A zNJN*J$*k4Poh6n*H~`!Bq?t(Awl=L<0r=B8IUpc79q91Z|Z(xW}_@UbDMoNBseV zgrp!nKSV4IhRcSS0nu5wpv%w=?*!*C@brK1L!gV$+;-l=Acl>hC}2Lov}qRtF)=Bg zYsJ!1n}7m>yJj|qGv4GvFap*$D}`9f_AcecWs+dVqzEOaE-?{$(valE>?_r=FOXYk zrtkF$*4hfiAgWU+9hfZa`J8E%Go=ZnU(`LdlpMOH3gGy!n*e$r4KTMr8|%$?dES} zR%~%9_I;o;i$g$kVnBx!85f^ksa=&}`YHwxmQEZxT(Ubr^5%kZ1=%i|rCySHC@u2_ zhrkmU6TwPAhsv5cdUt@?f+Lrc9e04e@%mje7)XRzQ^qO1WhD3ysy;lmF1w&$zZVR* zvO1&3DJ&$!MCY_XY&a{{(=8W%2=xr}=*Q0;J0D0rVT}4K=3U}XzP(q*g3zNly>Nze z;+6WmP6VPIJ%nlw0bVu#n!0fC2 zlbID91P{T}x#I&&e`+@*iR`Fd&C0^2mY$Td`5KMtMkGz07iJ}g87}-gQj`RI(#Lq`T;XrCF zOx5ltoU$tQkSp5j#7%LkAU>b4`xw>*$^B6s@=~8xsbJUc2>;d*{S0R@zv#j@eCX$k zZSS%E_3~;~XXX`o1Xc4zO_9;aEsDWw>yYQW^OB65LQgTvt(Kmc??>$2(BF*SuFa!NiV#X2|caYP#G5F;i5Jn>m2JN{!rfeSy z^N}3hT}wXzCF!_FhdSxn1A)#Zyl-93~i8*dfffO%a3P*Ep=?#j-?5b+*UMQndPO<$1bj%N9(zREQE(lA)(!=fj8rZbQh>@7s+5#s zmc^Kvj|)cH^ip$xiVwo|t>-YOm_f5S)`E51c~Hq|Zx0OPbKMDE3*(p8AJvS~ZLn0W zZ)-#T5QE&jkUgg!lHyx@&Df;i5Oe~|q|KjXkaFfv6geg1Q`0FJz*U+MEUEUg=8cVzM7<=uB9!od+X&-?jX z#f@JCZ4R8ZK}qnSI8nIaJ^1{U82bjDt>8mJVSHd(_Q&<^%}?xkPVh%{?QD(91Ea<7 ztUx!8k+G|bEHvPm>LGAal@=l%S(m6Q&8+QW<`r#^lzngB?tSpmd387n3LyyBbim6t zSjrSWEtY`8l*LFU0cOBsn}bAAK38GSm4U?sRcC&SOP#|V^KiDv@N>n*KWyvi^UgcD!EGq&15W=zdJ9GW8|ei`g4x7T?h}if?Y|C8{bAoS_C>^N zhoec$_9hq1j6a^YqAyECJWX}36HHz9k*DekifXGOzBg7NRXr}CGB7w48tity{VEsL zUNRj1*O0ZGxeZe4!d!GRw6rMCjCzG>$V0^skQ=Rd37%Xse2qNM-DXUFvxMGZC3lqT z)+R~h|2TNVre_h{X;!BdVay#;a<=SJcfnF|V;vYp21JiJyOOtY76BmBBM~05gYtf* zU1k5IUo53!*Zvy{)uEwD^g2fZEVtSG5Fg5=OrfNDe;@ccis6qnCXKxlmSu%$wCy%w@T5; zdtlKW!IuWW<>|umC zh^HBfa>r3UkeTh4#{=q`kckJDcxQVCGGLfHl#u^=xeXUZ3!w~i`Od;CuxW?_p{AAtBTRhl>iFj|WebT2VQOHEg{hcaBn z%IBiRx0KJ#uYFI5n5l>b6hev-LES-=d9&*L?UT8ji%H~1G{0!HNb_sE^-geLEnb@c zMY6#FB_za5>rsiM?kB7iH8KC<2z*Fz)9yAKJa7a|T7&Ds!J?X!%tl`9RU$93VXmYE z@?M#{0aIEI2>JM(HB(R@0c>!CLh!ztGX4uqfFAK)R`&ckL4XYr&*$9Dhs?7OVKfww zmeIY0RR-$XhYyC6XNj9Z;fWb5OqQjGz*Q8C&-uik37m!QFExlpuwG^4ThWkI= z@&`^PUOlkWj{la-#xw6}|5inEtJWi7rw4}2k?>#u#9&b+ z9MtmM-Tg4Lj6}NNc}*UO$D)d`To8^-VGNx@NmH`q zxQ#ZKoCp?|5+aN_-*Y6l46bWYcXufWIT z*qnEa^|cO+Uk+L6+MkuUGj|P(CkI@Eednd6)e#f!lw&eDw`YwoAKraMCMjU{QY-Aq z4G*vB=}~TuEabM^z60;vZ(N|ogaN~Z{ENEhp+8h9C*qv2j^l z3e5F48|OfeFIoynHkJedvy`#?088CTO62W}#F@J$^LjMk>wLxux7D!l*M1z_3LB=#H297#yXW^g5vsgB?Zi;+yIZ*ZeyrG)_Z+DHylM^vTl+?u3 zI6AC2-dEqiH4I5X#w`?(Bvds>FE0Y%fwb24pUjwb1o~Acea)GOMn>2aRC{{{J1wLp z;Cm#Rov4EXQX5g#_v~4XA8&Q%Fdc74@N$QKuR?i3o^oou?G z9}Sw2L1xcCF^R38?l!`z=xnMwdD62teQ^5GR&A@H4^BTmto~G48c1u7b@14oNEy-m z!DZXzU+B9zT%fowfm~bwcs++d59H%#F7woM-{NqZ?MMC|)Njm1 zOI+TeEC8**Vg2ed?f(yQe9s;BeQKkfXbl$IPgn1$YSaDHSJmV5gO|MRW_Kv6=5gk% z>NYTqg6qU^Ej-1QgWyziC?PIN6D`m>U; z0%~86l;aX`c{5n0<=tUucXNR8gCb&M$C^~X6hsZD^TZZ*UD5CQ+?+R^f%}c$^@Oyl z2BT7BbD%SQotRejH+`jpDzIA0kJeB!T_WXok4QUJ^$U(yEW;W#BLpe{WLb z&g9z+Eo)Y=hs=xBaIo98jri0EI##{Ievg(h)GCKey8$e zX(G!FM?i7`7|*G^qdqd;U`8G-jm$RBYfC#FK*nR5CeUX z;bpNw?-WpjZQ5|mdZ=s3f?${F3TPd&!8fa*RjI-OQR5 z^gyG#i7ki2{S5+I+6rcJZq+z3eq5+&5zKO)^lCFxEeWUIF-ezQzYPv)Z?&|5cvN&7 zN$Ra)0mNs)F0YrNn_|FH&Hqv()6><})JN&PkiMs%U8M6c&pE-4g~F6;o9@@}|L~?G zV+IxtSP$~csQSiD*iN@~jQ2+a%JY03#o=&r#qE7=d}T%2J3}RNG`sV}7x8ip-(hdJ zdz`ptpTgDHtmXDiM{b?*mv7Zm_`*34UueYx3Rj0*d$p*tkO=Jj(_jWl7oF>K*FEg= zS-9vv;5y}IR4?iMaYY)c*xN*<6%{XNV+1i ze~SE(KvtomE7RHZ7tjN;#FfY*~fEJZEC){t^Y{-(7aqYYLD&;!jSqaxe*it?KSSc zC2yAEU_Gz`DoojFRU`S(t_uf`A<7W`nplZrtj&2%%&(DOkZ;4si%4B(v!k#CzOr~4 z$J+XRe9V2}PGTjcTXQHKT1HK!(|(=D2)APgXcw{9u?Wq>+=XVwzi)%U4_FpO6nTOu zBAAd{++pFh7lrENYpk#D{Qroeear790U^{=M$MU+p}Y=(pH4d`Vmmoo-_%%xRMMFY z28dTHoH*0LXGrB8a7B5sz`CUu7~jqQ28>U9M*i(lmrC+@Ri2meYJ4hPM^MDTMiyl7 zO`LSClR%^v{{0|LF`L_TXDBU($)Ei?tB^f=M6-T&!X&X|rpcmn_Q_!ZEun@!i{a^j%g_w?%aRnn6Txv_Bs?*e(sA8-PdJE_ykX~N4mSZf{P}89 z1a2{$gW|)MY?IY=pFzOlb_Tn-(SbEWA?)tz{Bqg4(>d#L6MA}>7mJyvi(YwypM9Tk zP`L07Ths*#KNiby6W(_V%|Rxz{|z@+wDkRXr@_#(lgPX>FI^Blt@UD|k1>219%8*E z%Q9Hws92vk^flKjHC7FH&iqZpQN=Djdew^JKk2>xa~|alSQ;zBV57m&Ye^uZINa-E zGZ!Hk4_~~^I%8O!7#H%0p6aydP6qwqhdAm9w}WN7yi>Ek1;7uV~c-N=WV&o zd)b?tBh6iB1tn;$fZc%(2xpMjWAVRq$G$(CbNtMCP={2O=QzOc; z+@qI6Ak3I@5u^YyG|te#XV!SmV^;rKE_&e=nGeVcfHo}2S#LrGSq^r?Dl~&)!ZS>F z^Y2hl01=F0J(rLKAkfl+#T#=8@w8eZ&r*yA@dOvW-0$Sn*5vh3Yws_QyUQAqFlQMg zonO9EO*aYFryut(hB#=s0Bh(occ^W1I(bfl@*vpU)mG#svDS0ZuAVFD!5?d%8`WK; zes6=jSQ#^Z(95}SZ0kE)qVsmh2!@iQ-0qB5cC`9zz&|{xuFx+RcA}s_z10o}N5V0iw=<2~5 zH4(7IRKr9P!#rv_Pin@q5ypdhDtoqm(Wz>#yAP9rb*M<*!4sM_0S-zy-t|a8+Yd=R zjZrJaOU{+_v?Teh`cmcjD+&=l2}uiztL|roo^&S$Z(pZ2l6c-&g820 z4rU~BjJ~QUCXD^xx`{Ptj={w@0sFjG_tguRWSDrKt3RWskMe1KR`<>gxD18k)Gg1O zD$^8?Ur(PE{3akkF>C!B9fxD$V6I=kBkNHkMb#QHdA;CCP_NhQT0SU}^E5wlY?=CE zZ#`bp^NrSa4RizK>7^i$1*)>TF7CJ$nW3W?PABQ<=mjQeDT-vcZbrp1<+T{}-&hn= z)B5^OAD!qM9#%P-*Er0t*4%{9U-X6osiIN%Fz#Va#q#+~-AA7K%>E1_9O-hdk)F;+ zptheow7Dj*S;j}}(}EM<`i-mm@z6V$!BWcQ7rrmL*J>bSN@i3?q$!Gh#o+9Aszhis z?oYP7)g}l#)9Z2%-0X9~So+AvN(sgXC3}hBrYta{Io{l(&St{}H^x&(^HMYz9gEPc znKpy9dKA4Gis-L@fXT)Q#@4*FZoh|JGf;b%Y)<&dx2Z__Sap~Ni_Y#W``#k zCNu%FUQc?e;B%j2CoWWKpN&QH?n>Y%j|cRhsPZ zW6M?@CS46H3;FJBqAQ*lt!w@o5j~h57ehxU-9x;>Zc`o?+yjU633>eU7^ofk`j%9I zQj7RpBHB-PB~ns(QoNm39%{b1V@dJSfXm8voQ8b_U`mMiAq42~$Vi}wjyN1ENVcxo z>v_e72*@x#FX)o8S-L&M+madVA3p1P>8eI)z7Q$?8E9|5l#Py_!H6@LN>`FJtjg~+ zLLT8dyPg<33k`?Ze5afP(GiA0MzhB=Fd)zmV>>aL`#6DdKLGKgc~zFW(_)bjUg8yV zr(KehM)&>qX*zl8Wx}5h+KoX){;fGVrNd!GG5bF~>=Z6^y)q7^Tw@mdHJmp1LGl}N zm|t?#Y0s5l%Ss4zI~L4*#HIr?1|n!3?vXv-FNYPFnUi$l{{F zGVeV#gUG!HFtSehQqtI&4FwV1!Qo~PyCihp*PnU@hFFsDLHjMwmg(5XmR>1jk7lGH zD%k0eW?HZqS1d=@&UK*OoDejm#YC{=h#aPHDPW^bWjNFR*r5OGle4-cj>Ncbdo{=s z?!)EA10Hz2Bv%ijut$B^`d$=77;ZAHjnC?op_Q6iTzvl|;!3H@D2&gHOVnInPU5QL z(?NHFee>sLK!qNk!-XpW9sH}#X9>25>}A7d7~;fJof|;bfuezd>mxQ(>ZM|GSe4d4 zy|Qt$&ZWGfkX+oBro008p5^MKDldbQ+fgSF6ZI=qbi_fw9<2F@%Y;iTkMgQ6jK5XT zGnDfP2v{oNP3&WgAv*A&n6?xKFb@aMrEFoYi6^>Xeu(ZBnRLw=%c^~u6%TO=fg>;1 z5Fp1w!%qM*Y1BKoXO}sd0F#!jSr7Aga8d*I%*+g6fzH6zY>e6?)pH4B&$r2;W*19W zzahHen>jHN6(7@-<27X+0Oj8(_ed3Grno-{_0mC*G=K}n;2PMl#?!wkC21aDe&x^I zx1=7WfwsWE*YR;@=?qrL_ajC@_ zErgn11Hnr!vvxzE)fjMY4}Tl#?iEQjOUK@64Ec(G9E13ap>kyD%Pm zxA}i`U@_w}ARGnE`G*0SsmK2I>x`q(Y|zNebUAj#(MPi1^W-WJ&R4bdP{xaDcs{vZ ze6ToD1-Vx!ER1}O^aCM1qsK_jv_}EZii;D=F;b?)(XJ@I(<{#d?$V?}c5}2wo}&{n zddbuUBv@s>dR{X5hUiQPL1o`S##-8Zp=(}jAMil_ldi@KEamr4h>~&wqwA)1kp^&0 zypJI-7=ot$esZ3}bz(d$Yc}Y7=c`G66-s=%FW?fjqg=`gg~|_@lx7;1w(g5?I(<#k z*^XEx_KAc;$2=|AcD<6kBD4w=Q&w<~3GF<~qCIigaRk_4p#=?vz}jhaUpD2;9;K*1 zgYuQS_Xp<5(DBnqj!}w%{G=orXFRtOD#}agx-oR2fP-Us6W;VtMo&@-CJYX*rx>d5 z`WBjNg4Lp^l~6FyVue4$bG_+FyF?HIW?vn`#JbAVFT9*weh*W*jmE6fSv$Edb{M<+~ay+1fy~87rM+tqFsOEH1 z5YwpSRN(dE@j-daX#iQE#6ETb#E!i{tcKx#etg7d<)r6FyFX@{{;;tEB<(1B;k5tJV-YH2#|VXJED-W72|}-vfwfPiHhqTIV{)Ak_P!B}QP)d+#App(zlV z?i$-nf}@pUADVD^PQzJs@|s-NLW!I*tO&0!8jOumZQCNdu%lbuNn=*uZg)3~%{UfI zx5?d^*X})(U8-cb}EmNo<PDzLgE$b-+c8@cP$vl+QP{;(l7HU2dM$9+(f|k`pEl9*fUmHDY>mP1gnCC8Ls_1Fh3K0DQ-SYPF-eXT-u2@@}Y72cFLWCnHHvMSlUd$6S;T`Jt6J` z3Az}cI%~=c73>3xQUdwkjQ-xjX?bFz>9dM2tZj!w&PPp7IqUDZ&!U5#g@gVKXUCM| zBI4O_yD(zn2f2y52|{|J;fVsY>EGrzP88JB8U)Wy|CHY1$og}cgP+t4qbW!j$DN)V zIzYc?irPhfKX-Ic+PA3W7S4Nvp^J!=Y&xnlZ<|dq2(0>S4Yn|YpF@=lz6q1<$SfGk zBS_4uB0Z(UC1T}(oVn@xOPa!ex^tqobhrKXZ=LO^6c~>2si%gph>snHzp!8I4_hz5 zST~9S^50ZgE}Ryqg+r;R#Fy43q=C zCRXty+VkM8%4a8Q3#B0=jQ&*>KVX&{PWEC3Z9a;qA<~6-qMX5=K_DicJu|QI-clcM zyvHuP5sMSj8c} zGr+srXRIuv~R(HBhcC1lRUuJblEB6-i(@bh>Mst2Mb+_Z_R^e!9J(=dL{P0X#~zOs+%79DOA zB$4Hxdr{05ZS6&0_xyb6BiPWFQKJn1gBQ(ou++?kDKwbeIs^?OA=126*`>YH#4AL2 zsK&rNspg&Ub)vbitn6wDA(*W8s|bc+1^uY-qa;8%KGmjyzf$T z9ZKCyr#X38n-fO_C@}nkoj&-_FKu%24^yDalHISX4RChDN!@U<(qO=X9hu(uAL*&b z6JqMb7JD_;#xQ7e_dTD;@!+euhv%N50XLTuZ|O#5w zD-ZP6!_h4IAA*Ca{R8x>_`G~eRzlQtPG0o188yr*^VEJJi#s277H0y&CG6c#5VF|9 z!k`~n{2!Omjw^1^t!!j5jBR60K_{7V{2#fymgj9&OQ*Zf9TQDn#se|apHSbYQCIlC z)3`gDzZ4$NniGR0&**i})AL_tD;WJd=!a$_$n_c|EtFPLzJ9&7lkA;rke^dNu+VDU za`51z>4YL<7?gYp@3?vxLKJvUP@be{o^B7KONt=4G!a6w>rp!yxbk`oqpyvSm0h*Cd>EM&U->sz;ooX z6RJI;$1}+7?U%C^=d)*2+Qy9dz$TR{jBkO{Mi+seaWVOEwQ8)cAhufPA}7kwgk$Gz z?RW|QYgHQe+^DUjLxs&qS^TwiY#k$qo7*Ofd3iG$5`wz2Pj zCipq%;M_gmpv|n;L*)3Y7whsIAi#R(Jn{K`X`%tEP0l^%_k&r`7Nnh=b5ZYS@?%i# zb!)_rI`Sc2PfaoFsvWN{j5*>}*~2+g`G}j9*#o+}!ZNZE304n#7899-0m)P=bo$%Q z-D;EP23T6!3D;{PqBXUvza)}sBtr@H2+0H>1E`o*^&;p_oCKAXfzi2BUVKB>tl}4X zZLJ>9xt9))De`x7zI%Q_DOOh2fsJoqe`TdER9zp9Qoo&O+^-dDg8*_Kki|8>J(32% zzA(XV8>*yej>%pc)%gMP_8To!q|7CN*FLcpzxb!UlDD1Uwv6&PrkUT9#QYau zXBiY%&~9rSg1ZEV1ShyV!QI_mf&_O7?ko;20lZMLYyjMclL;kqLq=$yo%*YXEIy=ggD)(AWw7j;p`o zGi5gOsp7jaL`w^l{oXCBTRNK#6W;w^n83d^V{8^39b?nuJ2XN#(&g~jykgm4cgNf8 zU^qQK4+G|cd%^q$aK;09SdmVgLJ(x6q%PC6_XtQ{y6wM!{+!5%*6Nspm$v|J$7_G@ zEk5HzLS;jLNJ+`%rrSEOSHa7c0$YSe&ge~FHaxI17s1N`c)jos#L|l>f%GzntL)j% zw35`WC%gr%E)B1y)KpwsIgQ_CkQRUjj>#fc4`0HI@71%3w23A0s&=<@eGbBHgR69kMCLt)TQ7L+@6hOHtSD|%f=Ddf&3w#=z?kz0 zd>s|&I+`&hj+RmSK9Y2JeC~Gx6}wpBrVBM@1xO`hf|XmXb&_C&Q!b%`eqyGyPUc^_Z4H^uK+ z-22`1R5)s*Jf~VzL(ly2o|DDUYzi*-=2V6uX)f2}0i};{QH{f(%dM^V*XVs}-w&Ny zYt^gPgFky;)P1sWON1p6kSG&+J%0c2N8|UYKk}T2y9(DUZl4%b26CNS&7t`*-^y>P z77d@BJ|ZykJcOyy$X|9|aP`GZKq@Nv2nhf3VXbpSDdMNMspIIF`Z)89KzJekI{kO2 z8!)O|cTV>furG2fk78S``H1cp4ugG%J}m1c+Z(4$bS5EeE{AOk{$(qcG%Y*rs+C6I zdb1{T7r9YaQi4&}X~2GTk4HP^Y4rstX%It(tl=?g|CWCh#d5IDml6tGRl$zT{Bxgm zjwnwjH=?{1GxozYruRmm;BwoA$)Nr;d3mqy?9l4tlI7g2UXSBEsLf>uqY73)HH;9Z z{)xHE#1uweK<-D4w9Lg@E?+!UCQ(< zK32Knmi4FAGs`uioaK(lfs;pyZ-!M~eJE;Gq0{FtWm9a(gWC&ZQy1@(N9MvR6n3^e z`)E=lGF9dz$Z+Wa9Tgz?#ks}E**nDSPh&R&x2yjtR1Pl;ZrZxX)~5c2t$d^y`}_q~ zaC!r>tjz|OdK2Q&xeY|9z9gCalGcy7V&iV3Zp@~GX}UZ-WC^rBU~)`dDGyMuR=2F$ z-Eu|CcqT5x>31BWolO3MvQUcBb__tVO3b3Q!XR1mORfGS?p_d-%sJY9V6A)hp*@8CWx8w0%^{Uken9kdjh%G<;_^vD7MiK~O(tu5kCyPAg{qmu1 zCp%JTq@DhB2~7^;;An+$<&i+-mD)cMvESGu4p52u<)r*7rRunrnSO=fn_xaSQaML0*Qt7^ zR1=Wk^i4Y!&IxwTqfXD>0$~gh95&L)N^Pwmp8fgWZZQ)wuiVWQ=dQ{UtMlinjdcCC z(^*c?f$Vb=XHg$U5f}dN+ZXAl(5IgF11kh$N-spZFQ^?f7d zR~Fs@WFU0tZpfY0uS4>!AC)eXqYSUlW%<8PqoLvNF=R<6s-&@#^_{<97Xek3(tKFeXRXMiALxgf6KLqs;{ zPO zR&7VwN^+Rn{GSOreYTL3&4Ar9hwn@6s_asCG_2bW)AX$|OUZdgI0L4un7V{o53b4t{>l?;w5fiR2A z3|X2qXzG5n2{;gbJ9h+KEF$IWI$1u4DrQ2>D%g;EK0Ot0e~st+G}!a!g8ceR7S3PL zC&Yo-yEj?9C}7Ub zkT)EZ%)tLn*NSyp7cacVX}Z<16}PSgA}Pptjv^*pNbRUEE!u>s>U>nC6aOS>AD<3V zcNwprQBB%A7{>elh5x&|r6-2$zp1O5{$QR1QxxL=N9hvZ*L7Gun<>@)(XXR<-aCN< zas2Hn1LXJ3OcjOAn!>9%Ii>PF_%H`$QrcdiN|=La9#FGl1YNIYjYTvpZx7Nmpmq!n z2p|T!%IlSIa!fLXxP}`s34C;WMJzK@2HohNLs~+c(f6}ME6OmJO1dbIieLie`0k%$ zMT?MI3WsC{AJ=&SiV#P?tpi%FEYQi}pW(P7T6>F40HYQ>Igob@RgaVOUFq73Z?=dV5oCtj0!$eetY`4NM35mdyWZ$*e(gd3`{bw+A>cxS z4py2dNZW&oRTvIm@!kJT0!Wh>Cm~0BNX2pj?;HT}k0+rU52|hT?{#|L)fkBOOvzJo zCH(Lf;Fe0#t}PUNM$`F9nDh}47L4WghdnB7ly})cM(ZQ(tr((V$BwxtVWm`c&4=^q zeV|1n5>-&I^idTZ3M=S1yVES}{*F47Z$pKht;m|}D}<#0BVgNtgF|7@!Vcv0aZezaJp~V%|g#njO$h4TSZ6NxEZ$6)M*&W1zMirdmlou7*Llqnu=k>U6h_Tim zx8Mo%9p4^*^$}%|+MiKxVjPPj?;%#A1*dPwfrewgRZMjYJ}DA}q-895svgu{ef^eP zuy{1r;*^G|#(D{)3>X?v8hRx2!-}E8(<*OG=)1ex)pP~>^F*y=E~;|YgazieZhbOg zlsIVN^P?rle81^p!jvm}0b|L|Ve9A6+jdJ@u)!p3FS=zbdKCZtyeeDK?p>ygss)8I zTz*3hvkHCrxP^ht=RjT_02Jd~iNN-yM7ti%m9zzdhlB=qFq5sXrNe+-8lZs#*R|%-d*uVs(Y?JX zd{6*y4D$rqNeGXp<8$x+Yce0#t}{mqlbWiD?dcY3quoZjh}vf;kL4@l1#|+zZ*m!& zijELsTWga+2SSv9;LPYvKeZ7GHwj*(BgnH@nC5GqL#yOk4$Qq)n2W#!i(PqLgrQ5& zdLv%Fwm&xI;633HFtJH-b7Wx#lF;`~&K)twFs7W4w0_GME=37H7`pU|Og}8-a69`$ zC+{s{g4L>4;tyRt66v?`qcy-{d^F*mBakWjx%|srIUVM(`l-azfG2Vma>So3yOD)r zmnt1p%_LcIQ!5n;r}DE{yhmjet=;1l0F^@bnW{W$4-*wV(!6Tg$Cl27Qy$%EmEc*2 z4BGZp2>^M#snCct)@4sx>nC95gkmPqv2Biq+40Pbjsr(RddFf@S57H59mjR?62HWR zTK?LV$NT7|jl7TTFq+p4r^O|)zw@Jyz9I1Eq~2F&Ug)?)T^^~>4J84TPSY3GvI$B3 zlGTNf!2DZdtgE*?@1a6o&d;nZm3>R+3XwAhXJxfXJO-5+`CwUiF@m;(UI9!#4Fb<; zNi>G7BNNBR!SnAiH#Jlkco`v``=LSNAA35;fBB2NWXYkxE66_kitNbvhExYDAv5^z z72BHFig;Q9%H{e3oU?edSzfXg76vs(BZgn>iE>m;4s>U$8Zcf!&Wn0nhE9{u6|%_x ziKede}6!qT+7Q8=pa3ojK_)q4u@^;UY}SsV>6ldy+N>-c5=P zVX5CAh>yL&%aevzAOG)#4wrw=xR_)tkgpiFs#`IG2%@TxRD>kOSqX`HoEv(uKa#SX z{}-zA*Z`67$c2q2Nrk15Yg=6M_TwDkpV9^5*v;!%(^i3H`3wmq#tcTmtd&aC9QC;2 z{%~TRUQ%)lPJM}3$b~oEd9`DN-~&a(v%arS?%tEWXdF4*?sj8fX zs0GV0N$ii|lzDCdlfvPQsaYy$MPd~C*Q8irM*V-_hZM{TFgY#fthp0FO_}D<9kHR) zCZWI_<{E4)B9Us(LMCa&=u&;{YIO_vWwaR#p_Uo)rnd`EYjZ97%v?T*J*rHlvZs!G zt>IVEY!6FgslWsdUe10yukH3uNrOvIYX4v&nmToI9nhrfR&H{3b2nnDxkzS`=x&<~ zYuCwgtMzfU7as5+3`?I2guO3~&b{4*`=ZkNwK5Ck)e>IdfAAXSg4O+(Z-y$Wz{;RY zV|EtXkT>gZebT;@05bNMbo_fPYXd&w!Tk7ZO&M7ucVif{HTQ1Rj8b@LBGxvxX1X>Ds@_kvuwvvEiR% zSw#Td*E;~oXez?T6))Ys(Z{}yK8~SqXpuPZF95fqljT_CJ4in##&04L4Tcd}yzi`w zil&2(eX^fS-#z%6pBspm1Ogva>(5Jmj7-Ee+4JbPULob2SPcA-&TXKQ^YK+08M&CR zKlKt}qUnHx^#hQM(6QE$iTrQ?d{L}Qyt&Ek3M7Yn2`eb@UXjCgRqtcFc^=og&HmpQ z3lDX9CZlMsGuJ6M4DO7b94QNz`iY|wu#CA>3@Nzso-z#PPR_{aJmv({uK2(7uWAoGnS~X{=s9(JTH$suV~J-wBU58rLl52Et^^2`Z3$j#EvQ&QilF zSn|r<&BX6abxmhDa*F;;L?61|DNAU+r>~;YMu~KerZn^o1P}!mJ(+6?|)zccqh1 zaQTf+#g!_T{UaM!Su(uVJaVOJ`6|BFf7`;3Gjr@`M?oz8=GYqpj^P*l=jsDd+iNYx zHw$dX(^_&S@!vNIe!e0BbJ6_HBidG&SD|19O`RjM@uknS7_#g$V?vsGYPOOEDOjJ^ zp44SXcnO~jnb&3_yNS$@lOr}mH*=LC4tQK~tUo-$xP1@bjwwjNfW|qveJC|M zkR9?&i*SM_VqhmC^(7HQ@fMTHC7n_2Lu*sZSkQi4eu7%WYy4I%rjeqlvp&~qj&jD_ z`QvAuX*feKfs?1vojc_i5d^tEnC>KbI&c5;7sds zc9}=>+!2dxwMZCiqFtCA?cnly6IHsA*?=1pCMs~%5hfp^8;5Hhjh|fhA2h@C{J?I0 zJ^-8-JZ9G2mB~edvademCTTeP?VC=oYZlQ2B6sn4qUx24XmHfwwPvZqMvMFPHz+>8 ztGVO*F6%V*zorJ?T^rEo>FIH>v6a#sf|ylnPi%;KTsU?gMJ&$xXm&i0(QbvSk}LAWfg16(&;9+TAh`9i{8FL8V@dXMESt_ zkq^nMJ16}NZg@Kw_(@_T=3vz#1gR`YSS8 z)~kZj(2XXtXie)qRbjhT)~Vn}$)zk`3TQZ~SwgNT{i})J6c4yHC0DRF|VTp^C`nm|o9G z+WjLrjV0h2swT$s5k_Q=qaZQt9^0L||0sVuG32~GYV*z|ESw8b|Le`q?&h@BXCk`l z&Su+r4wK!!k*`wdqb9rrttep zr?1c?5jwWAWMX}L8)H@@0$FYK&gIQh@H~2kJN>y`4th1Su{>(IM}0DY9Cp^>QEk^L zuZk^FF6L9*ON(Ytm&Amw;+l+5?qa^k+?)NObKf?3 z2sIB9kix`<9A(oZw{lox$6)Qa>hVuhGsu^l)+ZuwDL?GFH9Aq%0TK$_nk|?yEdfr| zPr$|U-hPn;Z!FJ0rwe4eHKok(H+tPY@w05Ia6j&wKa+C?zP;TBrQBBA>t9*EG+LxU z4)R>$Wczl7Ky}~6$Jj#d{_Pk1Kqdo!WUeGgz$2h>a;{(j;o1Uuz6ij#wA2e^>X_+? zC4H@0G77HHZPoXJ0^3I*q*PjiWfA-u<|TB_Q(QSfP&H=NjFW(!ladT61s|SZ@PjBY z6xZqI5@B{ULyzBUdwY$#gsV>m#8?jknI$vZmqm{MHr@#3E!r}wFUiE@{@u=H`&Bnu zmJ`s*3sycV_<@ElcXqN0{;wDX-1&!c(^<`y`@tec0U_R-PlG(wv`?c@LecWXgyj2k6dsFETt z3W_X8rpMRCP-gkNVmoWflN@HjaV+J${I0_22(!uR7tQSTeOmMD?k_ue=6agcu#S^TUAWRI@AEj+VBB-kC$6! zq^2;ty|p>EWtT|AfyGoQS6UcfGwE<&CBvmif7~V8ecT_h9A#%I#tQ0Cbq(76>YsxWrhyaWVk-zwy`TJ7&OG zINXAA7&5a5j8{Ea_&}wiKi2XIo|dEbw}0k;e;68>*;0d|FCq-f$pxx89tRY2cM>ps zF+qO?^FyUT+;7i`tBtgG@+t&$LJTc0f{n_gY=Kl;Amg>io9pea4odU6##+ti%PTMr zV9X(Ko(X%pzwSPB#@v@E@EJG#5)odnELr z41h2HIoW`C4}=p2&cYj^5y57E;O&-DKTbMK^4ga6-Nvxc7NFrc-UCQpG{AHlBk|~x+4Z`d>2LIC9lklR{j(gN(#V9z zY-W~gS#>k3z*qDA>H4|ceYdFHDJw0LhdaH&e#Vd^GjRrm!`(jna<+%6mAcriy8Xj&qHR0|6EwMggHHDw;FXEAT?b9`jx9sHc8rYMj;e>%-09x$ZK!7Qs2M zTlvCxz#$Ql7})u>p|2~;IeDC9fVuppz;D`A-xlsK9`CS!^X(w(rdS-CGbPtfHji0@ z@r|Ri^v%cLkb4Z(tres8zf+$ALNF$S7R8u;FQqemuDr4hmdCr~N>x!Wg~v2E_A!r~ zOMnIom!Bsnzv6k@b`*7JI1Z~JY4aKpHzRj&fXC8FwbNZu+#5z(mMSmnb%TA_n;Tfx zkWbA6R<8|!5&*A`t%Ed`yNcT^jFi0D31)ekCOmff`=?CWxoWdu+v{Py)4)~?5{woH zhbM|f8;g)JwS^3A(`wX^@OBAaBQRM&ctah=57n&FLpbg$eY$TaI@ln&=Kv&)oK%@! z&gy=u!+8&c(xuy~$2&zQEE>8S;X5R5o!XD5OLIE&E;*2E9t2=@_uBfKI2Pv-^({2A zg42O4*9p*Z<;HPuO8#%eiN;d3 znA)m>BeJ|JZSO1=8L-z~Fv~TTHKYn_Wk({0*J}2$HP0_xJ^JQD{9SUqc%S=eTZKT- zLEeEom+h0sQJL1a{IiMrH6%d0+?>~95Ii8uW!`w1y$NnW>vZIKY^W4>-x1{b1|0AZevo|lY z4q@Be?p;t-7NxF`IdoOVRe=vs1|u4aYOCtldd+sm(^D6*rv8L5TA#dZhx`Fcpw+>$ z4>4Pk+EiWHIK-uEHz`lBx%EyP?S{a!;5$@XC0!3hc$Rd@Ya!UyZEL(QG&l(i3sqGx z#MVtCe3Nj4H3xvH*iBsMM@d~N^DVgm>rr|MxH+E#>%-+~G?UoRN1QG|&q&eSES^c> z-44P1^yb|m2jM|Ezkt5BZgKYCl8Pjy=dcJS*E)X0L_u3Q{0sM-UhBk_IaRu(?hnmNZsN35M*NYtvGkz@f1J|~{CE+ShCx`T#PKUkowhSumEDV=O|g%2 zphKwfox!?Nn&@;UT;=U#BK8);M6sY^-=+b4Kj*{T8fNSJCUmLtPYszIdk*3druVB( ziS$8AKhdKv(%^o{8(iLQN6qeZJY>y2VSUArXa-q67%owE#g$OWippQiZz|w8J6z-$ z=d8&5Ju&3jIH-*ZN?{##)@#vzB&$u4ml=l^k`0&EjjVZbvK^zogp-H@eRYLcOSLyq znVfYVsv<#J9t(3S{*NdU1X`o25qMcuxoIQP5AaJV0jnk6cc0g^z9K4MpleY9%bQ%N zS77b*>AtC<#?uOK$mlkHmoCV|fGRN_nuT@=3zao&C?BWtQDiX5-v<6u%0dCs(8c0;_^m$$$HGz7>dS3kk0Yo?w}S*Kdo_-?;-yi#8Qoy$1+lX7{*Ww=s0 z=c`hFZld_ypxn$YN}Q2bldx;dW!FvI<<%nIG9+UKr=^2J^Z9`~*!Hig@Kvgd=3?$S z3iO*WiVh?`RHiU1(%0Ht zdr&Qryhb{r&6kn!NbnMmROYb8TY@KIjb%Fv@jX+4i#m|l3R4%UAD&PaMvDpRaxj$@$YF(NZ0u5Py6c=z*| z<^?4zOJ8n)?s1DFn1*?-*fE3oQ(kdv2OjFtGM|`__>g2>q3q6rwK)@N)HncKh7JZ1 z-8j&G2TW9vPD<}_?wLuLsK%nK>*BKYoRTHj(r=2pYK*2y@Fio*ia`V1iMFy9)Rl*zMumv$I2WXNH;M~|2ktY- zf%s$$J?->oml^fG)6ss*-K;D=5xh6xqx3G*;s37@=iz$(V>OXsW6k^2eR8TAa;UQZ z<2~B@?YaMe5SQc+OQWu--IKX|!;YkbxA`&tjQ(=;^$$mv)^YHt9_xt~JY5|B;g~Kw zzN`RZsNOKpcpD-K^Y$FJCi!{9$TftHOxtJe1;fOzr53(IhLf|`srXN!<{U1L!#ruQ zu(WbzbrsupS!olBsOwb4vl{I?aVNL-#$K~+j^|!hANi@`feBX)rOUL=c5}|ex_#d} z?H!2prl#RA;jc3oPgEEr$AR~!9}CK5dvW@aNfC5>{o&2*rbFlK#Q@Q=G@m{|C5s!u z@ISkDCuvcj^tJrDk;vl%M#CKbju2j!lyN?+n+xY{&u;M)apkBDwUeZd38Gq2xRyOO z>#+j92$$7}k#LPxuCIY&iQ#1TRvZMH4P$z9E~uvA8!dS##>g5c45_ZY2k2%DS*F`L zEag`#6TjSf%+YLHuD(G*{GIvg1FnIE5@WtFS(sjjJ#DYpzY$h$G=lg{inNW8x;4fd ze;_W``49B|YCTMqLd(axSU=ch7ufDRBxhHW$J|`lCj+uE+ zx!i=rzA)qXc=TufY=vPN|Df$zX!yPZM)rHB|H~p8i{N0cB`7yb(CP^E0(b^x0d5FG z^s~2Xw3T<^&Y?E}kMZT~#5f4*E{{zJ*otv>i_p^~fi{v5DW#cD|F_46Apc)IHn?my zkO40YVi0wzlrnS+H!&Gz*`peC#88I>weo)^X1wZXXz`+MFP&6g-H&m!+Q;;k$~Vl+ zAJ0~5=|Rg+nT)ozg!>UB17e+g$|16R>o1oRVGn^A8Ku00)vG3FLX@KIet8KK8~9Br zyrV;JGA8y!FUjQCM0{)okiURnAV~uV0QU)@q_D7eG#Cv{W~nE3sIf0da|*Mw!1{CX z@4wE}i;44SzUsE1leI97|PKuzDs6z2NuNw@Szg+W^?E34}TfEZ0!zWyz z5NOjorgyIvx}zDdeC*A$;lm;iSs#+|-Rp(z4+xPD;m>#!%Um~{H`SWJ4x|gq0OvdBN15%*wAdD@ZX6-@FNw`l_ z%H~DO%<<49=Nw>lO8ziLBEzurVx%lg4N{KRhkw#>ti)ghm%6Jjp(%@t#dmJgw zTD?C@8UsnZ65#&cPm3_Mo|F)EuyWk75dvF9(Oo2S5#W zW8f*Dcp;MG95_V%^5OPh;>pPgs@IFfM;sTq9w`@PJa=K@v&qDP#~yGJ)eI!RZ^^n> zG9@DcugIHMBs@lH73LLj?xSboq!)Q&+rJ-X9(x<$f`uE(h|ypTcG!oN(v&{YGl%eZ zp)70SwyoHf-h?(F!9@xO8JQ!S&zMJLzc#6VEt{Bs6>;7Z*>J^WlSF*_^9lxGDFYfN z#W%9eVxF^z5A$gmUdiY2p*LfWfttJqWcWZ^#rgS9Fh1z2=CEXFSB@th`wpeaT!XXw zt8i#rXEn*9fWu7O2Edy74s6TbS0rZWEo?!*^2{Q@OE)zPm!ZJC(M6_pBEF4h{!QGK z>xbOfhWxxo>8~!;0oLgD8gR$>hTj5jjrQ?UdnDqgk*Tq*^NB}NV7=rwW>1h`_9!1H zV`OroptdfOrpB$Q+-&ay2?J^3jD zCMlCpM!2B_4f4i<1-Y9+*f%JTQ7-P0#WBGQ_vRhF69V{$zT)0(HbpShH@3v&04$Z; zjf#hm4h41sufq{~dZE}?AY(`IN?t;t{(?Zp@9v9<62V75-7YG!5A90ZY@@;9;*++h zIQKgS{|JWNB1;4jWd+s2J~r?Br=GPRylTuY?k{__zl!vv9rrWamQQ<6o!1aV7E>2G1 zmha#uH~0m{=RjjzW;&!<3no&HL{7=Pn4j*x&xWmYPlD;W@t#_vFg8 zmywx`5)yiBc#3e`Vk)Dis*RVXwl>BZZJD#)3oz|3SUrE+T2?m?FThYZITaM9frlMS zpSF*`n%eN!Za1>{c4aG;TxS+KZk<;+wP;mDCaN{t)o(S=jlNqJ#zBl-7?^BYDl;Eb z=!V6~-XtX^DOE)iVH34zqU{Q#uUO0zGK)La4J42DLzr)pQMQr}dG+t!X;0D&!DtR7 z;A(rtcgqG(SkVEn{zVF8zCNWYgW$%l5QnZ5SM$A2_1)9QH=)0^GN~r2L(4tTw-HH{4f)YC6EUS?202`1F@Tb zgUcsk!7@<#0}DlM^Azg}iQy^L(eR50p$x8c*yY!dKrc%Cr5s}=j{dZH_nssCmI&<# z7hF5+uuEz|mhQkA=qktC%cGx7J@)L92WmrNM__)X*RU=uc4Tuu1m0yCj z;=YuiEO22_!@m33v}sU*UkGEubq7t(HToNY{@x+b`@tZ+t&r znL)g~2^+*@V@`%W#`U}ozQFGIGd7pA`MN8Wglcy_Q$eFk55O(gTa>#OK5F?KV{ zay2o)f_T8go#~Xrr)XxgT4*>i|=kT$x0pG%*sLqHi0$m01McU@2 z@<*ORHolr=g~hTYYDOw9F1{N5HLF#GCF11?KdaEnjjjF)obO^7P5ebz*qW{{aZU^@ zu+ZfXTk2ug^*JMm-Xph0esa$*su`y|Sc$eQECF38$|E<%hwMgP&DB+DONi z&8?-~q4#lH$so>+_mWD5Plq^;oA$2b+y_o!K^W9r@5pgUnoMPziHI2H{iGLT0}jYi z_{$fXB!1=BJ1ftb&*sxwO13+A07rJhK2`~9p+|VSTu>WF;)W)$Zu+Q)&}z);XJS*#%e7dQ z@HG+i(<`RQd=r)wxSl5UgaQNG(uZur0|Nt7g>zv68$MKBD?h~k zY)xyy@IdCryPVF6s#ohR|wo;g092CaHS z*XrEc^zG=B&>bTwhOsnm=ql^9QpSK=tA?Q$2FPrCVYvGe4D-%^iGy3D| zcZx;#^p5DA`gcU_3nT91YnyE!yk~ zIp)b3IY9y&bniRu9tQ~eV%Y6r-K*Y?%TXh<&{98Of)%4EH#}NvkklOCdNvS`4Qy!n z@5WG2V#<|=q*#Y7P#+%PLb`!ps1QonK+Q_n-8sISA1FzDl#xce{W$Tn@B6{Ku9T?` zOFq}dSEVY3H~eVDYR#m-s(j8xCjhph;Xj_a*Zb(6xqw4cb39fL@#HAwyn~r*EJzsb z)}4%tTMxd(Pz^)^d;#puuoO;u?rdgb6p8`7H1A18+S$VdN%Tv^jZB4Awq^(OgCBt# zaR(?bILRfv?$+_J+3EI=S}==pgPH;Gp1f5{(Pb#F^Xa_jnHIE~%~yTO*oeA5l>^P0 zYeh|>yX24u?(*Dc9S^IuS!({L2ko4kUB_WlYOHjpqfcz3);Mevct?vb?qudm6P}eG zNtQy|x zM4z3N*@|766P@L&=z*|ZU+CHm*IL1CUpJ@KociZyF4!LA@E^A=3ChxA9-GI#{3e`V z-_9HF5XZF0n1Bxxm15pZqQpyO6`qIcxr*yknYuhalysAPt#s7nk!=gh_r6F&QqIm= zt46#XP6a9MJCGl%Ir>4DfLX=uaRYgg-}8{cz?GsxJT`^yW> z_Dgl$(4_i?xhXn&yRuB`s1NQU+L|fXeBKnrHjNV1cWTfaqtBbzzaDRR%S&`uRhiEZ z8xPtg&!uof`%p3^?tMe(N^f8H@cw+%=Kz6YsD-IkC-Up{(O;Y;9gVHai;m>mE+-c@ zy!(F0)nqXpzZG6hB-qe5wSSiWF+P|5Rq2KFzRzv=`%~7^o+hre!*q8z$aNKwkHfSm zXK@QaMdrr9$7IXn>1yuy@g#vQJ?-*oY-~6kKco)~@+GpeVm=#NLNmlq;YFu&!m>de zx%Awu=lQsZo%G;PR=J(X)82IdL?iEFePS??BR}xFZHkysxX}Et-mVXWZd2jop}f09 zO=myk7e}XZUWvT?F6aCH6$Rx5J?s{!r8~l)`C_#0;q!L!LY-a6JzIxSAZA;<=&JA* z&<3c5LayPAUrhZxd8@;uWWF^t+|UQ7#BNY&9d~H)IbS{AEc9kT&wFX&JtC&CCdv(E znHN%R0OlrkBeek+(I|BKiaFV3ft|W2(l05-DS874E!>=HoKq7&b!|6)vm2vF5xtGAv#PglL-s!6#{Ql+EjsqBQ@(E zTXmH82->p!K{#l;fK!g+@T{~3i{JsnSC|%-!iYsaM(t{i@AN21R23T65)BN_|lSQWNxpy zLIFJ%-32vAs~=c|x*IVawx)822m7B^8jF3oEQvp3C`9n2x!a%-tRe*1b;)13C^jeB z^XNo)bc5NV_kme*A4ZAHwH~E~ZGh#G?Z?b%eb|v+%WF)vX67Bvq(+KoIr2LtxvpJc z9TlmTW|Ixlz;PqB#4cjqI!*z-#aNHB)jYxs$nN}bU9cByJV7=731e~@TuRrsq z3d6M8$t{>EM6-hmIAO$FF@I2gLsc37ELr`VbeTO>^Ye6SC#83rN#vyA@d$%{B}ipBxtCC6S7lpCw&OH<@NI zzN14uN|4I_K|oV-GzR# zQh9Ya+yvdpW~%PXDkmRzaD)trNk6C(Q9R7QxyD~dde9Tm`GmGKd34zbZeIOlF52KF zx!ukQqjez`>zkOjpxqSDP17Dfgc^B9_t@9d?6sE@;QzMl4)K*>iN!SnSQiBO`=jDE zAf>_#Z8^?BNZ*~M`&-v=fXTSn;bO-(V#GFdyN>!~E!$WIQMN?PErGISMD=pZIqQuv z<hZ~+SeBdebr;rI;TZ58l;he>Mryh$Xxq9N zJL8|hh{^4h1A{2M`wZe`9dlqKsr;`Gj|&8W6JM}-^#uWuYCY8=BM9wl!ou>B%;oD` zckbqZI<2KUoYTCm6x5e@6n9%IQ#-7Lz9UYfnHfN_p8P`0#`JDYmyw@xKv3gjRkqd9 zvYW<};c=~U-yJGcY_ z6SKB&r4vT8a%^KdBIGJ8RN80_r;jYAahwU(@EyvOJGhzXPG74gZ7kieleiuX9DlOZ zy%;~vLOlyrM-I42`^VPCWhRf6_Zlaej+nTbv2PIM#bWIQ^po4Wx!;jzPhZEG?gU6$8h z6|N0UkNDcCa0eu^6Zw4jAOloe0Q8Mmc?}3&rT|k)yB5Tq8T18i&!`l{cJd5fW^Bi4 zM+S3IZ$YkvUDPIT6eCxGf8uaD!?6uk`L2~)cMp&L$VJ-UL%Ql2samezznPLChWL1p zar7j{qQFnS zHHb2z@84w}l+`pebW)LaVY_6}vSVc===rWiXPfnR9y*zLCVD$R*Vn7^A^a#b_jmJ8 zdr9QEK%qtcahHTP9=6Y_wH*=urGbzST0mVM8?>9mo?7~)bmrZce%@X~n>1&bg8rMD zo&+3yjkC$YCN89DjI)XgqL0`6yYiL|15~rb^$c5^;$JF#XI#b{A+d+Uu=p ztaFB?(R#@Iucl`ryki|JyJ{BTaqGJ;FQPma82l&duF}h1wgFhB2kKi`(gypx`WkMU zJ)WSCedUq-8b4`a3ArZ?NJ`Wu1n)mi6jpnDBtCx#dZTkUu?}60I2ww(i$BI(vSwV0 ziM!~ptc8}&QQVtTWzx)aQ!p~p@+ zP&RO|6>iobnR$4}BXf(6YPr;g{6a_g1)rFbV|3cS5hB(jfljQAw9|L5q8R^|nWx z-OUEUV`IL_)eew<@(Jw9NtV{BqSh->BbBarjoxgyvM*0**R#Y~dn!JlPv?y*zUG5iM>Tl*HX_annR>o<2&@OUCdjiKyUBvH+v0m#`+?t|Yel)WD zqCZ&mn2&Xj7%LhC?XEQ7lk8V> zuT?i^=GZn{3p57l?o6KhTJiWQHIrlqZ&|PIX7zkvl=G)jJz=d_|~Ad!TYOQ<2n0gZeG# zJAl*R!WpMp!rEp1H*2CIc5yG+5!qko&=COSa27lsR;epnE+v#H7e=2kYzfm^PON`V zF? z|8*_s3hI~O{$VY(G*(0PSk^&gfrK*~x2BK^$Nu<-KkQ?zYtu0cJ-(!DkKDs_$WTQW zaP;jpdY*I)%CK&*r4k9p#&4|Bs%(n+| z_5Tg61rRwc7bQlku^mP`l+TbFoQ2q%eot<_pxf8&QRsyWTQ!8;FjN zgywT+L7cl1sId?Wwl&2kLza%tlpN{zdT(KVI%S#lt}-dy>TB?bdV0{=6`MW10mc0{(a`h zr++>B*ErAv{-IAm`xg0L4;YJoO^!?`85pF0?f+l;e;)xQQZTvyx~R7IHO;=Fx%!9V z^6&ellS)_!qLlr{8+dN<4Xn~@eE&xA+-7h)&drlPNjBdwmgT^2hkodr|B1?akp(h= zh?S9>qQ4Jv0G{076l=|GhL1A~wTx`wUxjd^Tpe9Vcrd5#i6Qw4PX(U%#9R@_{@f5`tX(`&@kX1swzSIRyZhU(wFMN(W* zY6nx3FsO<&df|=j&3Ccu>pjl4@bEu4-sln7HdNb@9{4QA+31MW2?(6o9R4u>cbh%+ z0z0!&Zm`_B(az;ZO)0%;au*JY#I}Rq>u}vD_inN4TjDlNs|$YI1z2@o{w=WIB;UxY z7@g#UNRv*R(vt_)SjrjF5O0=!5S|w{Oyn>*R`2vR%|C>(aHY?~{<}VFZYqqC92N#A z>(06MWVK&i+wMH+7tk4c3I@n{uH-3a8FoF_<XJ?9pt5O)K z__#o98U2Oio39OTdyJLzMT?(Dejz2F;q06CGRyh%YQ2N{CM_zD2?94aHzOh<^6>Cj za;ANX17?py%8KDo694SJ{2cJZf9R0SnGx8s+d2j)1Q8_D=05P~BnEd=4}z@1wvb`r z*R?tb-t61jWkp~#2r;rw8Se$5pq^#o7l7XL@$+9@UXqh>6Uyl7W`~C(GpgJYLKUf{ zrXn?@d#yqeuvP;9GQsJ=#m{e+Rvx9X>dC5NOI!=?ShycgnLIrU>Wt^V$EDy|b7SA) z3FB&UtZyEBf#-2lAm=h=ml2wUhB|-BE?0DPtP2i?2Z3K-UsqdQ1l=xoenG>mtgPhb z=3d|2?2ZEAS;gY#n4u#FOJfF9)8NlAB_tuQq=R&^9}Cu& z(PJ^_V%Qfx?P3bse+WYV*c*GWO$-!DGi7`1Inv*$sd?Z+b|OQ82;~)j_|vu$LFJ-o z^WM1K9b{~$7PLgmu)@ycXpEP(%~kfWVe^ZVLr6$SIH$^Nxe^IoL;MmbDVU~^8 zkgFgKslkuk%)MgqjNl=9*}1MRG<`=9wKgL5;xT77o?X4kesAOdg<6n>^I@g9cY45K zEhyBR3q{dlntuDhft3yibnjQ8wdxCA&JNUV{e#**L8Vx7zD}%||*x2rl7yXgAczB`+xfR64;Zw7G z@frB(&W2u+2l)y9&>@)9E>QSiqJwcW;GWpfphY{RP#(_u`^C*j^Y-Jz?=1%38jnH9 zo3IoY6D#*ZF7v1zwTZs+WJivvtB%Q>k{AOy4$nhL7`L<+O+Hg8P~O54gQ{2=$1_W= zGu)qkVLWJQlHZ?2rFMQpH-2A?hY0^NDRPNEmRDMDRz6)=sXpw?1u+hIz9Y@K`a6n~ z^`jS?L4AK;HyX}(G?M@uI|vS+!<@@wPKvwszI`GWiS1o+GB>+50;Q9EM7OO(F_Cpu zK!L7e|4ue}JZQm^^Rchh>Bw0bCDPHsf!E`fKDOrV}<&}A8DVZ_;$a3;wzuj;<9=3NGNXE7?cgs{f!k<sWNzZ`nqmOjWk z{5^^$GGcWY9AJ4-tXwMy@FQW%E20NSU?6~D-_b#bw|O$$Na(&=unQM=K%B=r@H4eh zI2g**C1hk0$??c3Dd7U~u9ZqYmI|sVV^JFJShk6JgM@sDn()Q-atSJ~P@Y(S{T2nG z(=CmB9H8bqqP^WLiIE$ix0zn3HyX$M((-t++&G@jy}iBdusf9Ce6j@c8ybiv z*&E9!Eh@rAN7pwsP0q-$vao1%Ia}-SdTOv?L{^ zAHmL^v*C6IbGwwdrCLp9!-(qM^e%ENi%dp5jvw^q_<2WPFPT)7f<{vljbZn`sHjo7 z?VMe{tLT}uP50Z>I6k8qeK8moBSdFV#~9=Sw{;hH5w(BPJ9>z4(W(y)5z8+5Kw)i6 zWr0`x%oH$`m22*)(aB|e#Tho7L=2i&vTGrcQk|x^Dyf6_)#HB(38JQV4%a!3(n`J$ zkZO@jrp7dc5GtOBUjDe`j}9jW7fPWx=+iCKE~*JtPLSCzIkGF{fL>al=Z!k#>;1f! zjU;h~>6CYyRM+=Ty_A~t<3QZ(4o-SADyigy@gJxZi_iOohdtP%v=%9ZNI|)-FON4} z{_vC2fSEixIN;{u%03fhW5a0AD=Ukb`ruxYo&8HYEGkND8<|RcTc_Q9R_=HbBA(wV zYiBwJ%+uMl__ZphUUxvX!NAhuVoF+Ca8S@t8YUFf&^tZ2UzcF74g5kBOb%1Qt;iy= zx$z2vN*=36ophW}FP|W$hOs5QqJ;?j6b!l9Hc&(qsPQ3TiLt%!C!sO);Dmew=vKx@?!$k6vq7TDE8Q$$e)+qR|spd5&+5Yhn+h-lt3b z<|{tqjj9SUE3;^h>e<26T$1tn5ozi(1j8_cRcWD;ioN)}a^O;8}es=@m~Jf^gn>Ek)&_Q+siBHmOmx_c-( z(gV#Pa26K14quw{k#=nJ`Bmv;34Kt>uOI>Y51{;*Zgz!`zeTbTjq0D2C~%U8AvH0% zVAyV4-_OCh5%!md`bKUykE4kU8qJ#4jf(ZhcY#0Zc1`W zO1;_i(ri9roZ8^%p{=xN_s__%(Y$-%x;f_MdC~{cy7Z@?+>ctnSr{|zh@Tdj$sUay zCYZ!%XlMrM=-^WkAG;46d4hXliL=tu!o4xsYk1hFg^}@yiPxI!wj&}?29u}tsoPPA zcpQfl8BC=#HPcg5Q>&}(q@+-?#%-qZLJJE`%-Ft@l%HpZ?o8pc!+GWJD?;Ryno2@m zSEz^9b!H^4ju&e)c$~sQLVhEC7?p!%o}>s4Fl;dWu(-HLOG`V&))j932t-(bv;AG! zRShN;_w)`BG9fu@c{GqvT4uxOv{TGS&RONBY+^tnpM0&&9$?0mAOLjE+9oJ*% zVr@5u_&bRhx<3T29d`-6-xNGK|1M=nzW2#W=;-N+Zhd&~(_f3n=4bMO1+S*KoJoTI zz0A&K?Z9fX{r#|jd^95NE8x9-Z~c#sxDu97&XV%MhO|jkcw#qRRy#)+<_ap1DitG6E ziTJ6-eoXfzEWcf6q9~`rf5eSRNO9VA|9op9x7(UhYVqWJqFSjz$M4&wUu~kmC}Gq|Ogdun(2-{I63O5=b+}itw;)0RaJb zXX~=5><2*PU}^|FcmUESV6C|+rQPlxn21zvZyKxKVA<^R_6pcs2ssmtuAyN-M@vyr z5%4m=lW*k#IJ9&Bqm2gieW_e7291M_4dG;VVR0B~6ov}^UwM(SQ&%kJi%A}DJ)EDp z`(NH-9H2qk-B(W_GPB@LV(#jzoy~^e)}O9e&B#B!tGYp9o>(M1&U0iq!(B95hN9HrW*|ZJ@fkIXea)wVJ}uarjqjP*I5@8Vx}oiB@dH&$5S+t2gap1J*qoI<*9B zoHWMs0#COzi(zciapgGvgh|71qI5I_p^n!^M?O*$&8hsU8oRpU(w=koO|3Es>4oRz z>E>uEAESgL7pPXJiT~BxZ-+ujD4k1>FuaeJeq{IZ1CfB|!ITlElUy|mM7-YW==Jsl z;3RRd2+0Q8+gl&+9aVK}S?sWGC%37V8=rP}cH%vc^xwbRe7qqb2Z@P@7>uSWdnJ5y=fQfbemhyISsawNP5Vq2{nVwR>D<$>VQrV)NDR zGQM9LKZ%~(Z7T0*%VMj~7CUWq4sw$2c150r;}vHjleN}vp?ZC1D&g6T7B@OIKYJzU zw)MQ|5d4Ag&*)gQ-HZF)2O@br{0!&wKWOpTa-!*jhL{!Sp@@h_a~17QS1Q&rAHp6b zh9np?>JR5uTX(vZ0)a)IJ1YhP9drjGo12@Pq9El{3j?koXI4aB9?gpylSJu{}^yQWE@;gjm3{si1&zNGUNfabFg76eA(F`5*WN zKt9>5Ifwk9r*s0HC(}yOf|G{2tiE2nB1Y!6OjV!YTQ8=u4$Pww$U`Boq6z*4v0vLNJ&+LuGV+KlJaXeispOYG(!T9L zBWW@QjnVXA$lIY)nF+%U5xW2pKGZT|#K%Q6=#r(<0B_~G+7}|zpGj)=yo#nl9Y$$pJ-1wSXB=HV zgpywp&Vnv)5QHD#&AJgXmU&J?EB_gY%`cJEyU&^F)jx+M4so_@S@wKE&~sPmCiS_F z$xMsMMelX^!_Wp*OpWC?0hwkut$RM*qdp$Kr`hzlt84buF@69?L`Ke6s3|WbQu6Q^ zpLIMtaBy-q6bZOI4Atw$nVOrsxx42|km9dZJ0z8s4wuYYkb{DHHjh^}3yba{z{iux zX>k}v9G*n)@o-6@Ldla6^w;_Ch#Gr37<}3JhAIxz($WGDBL_%W2R5?6f!GaqB{qorE@x*w)8xBT&e!M<9nk~1$FfqAnunNyed3f9% z2COhe=h%zK9sKnHulH6!VV6+f>nFLE-zAxeiA63*@frYdAQvHY1gFl-o}NT?NGhVC zp;4m*E`4UX81hH@^PGl?4)mC4hzk748`vCjeHKe5PESui0hKnoR%jJp0)N?-I`DpBC*m)Igd86Z>4?;-1J0+y;^0`i2P|-OhJ=!^uI( zHRd|Uu1gZj0*c9E3KW9yeTLNg)JNNs$#U^Q6T|N=@F-GA2STK)db9Emmy$x;M#n&7 zgUY>jUT|7*Vr;q-lQ0-Q!Ja6R8_4JZ!UvXD(x2~DVyrN2H3KORe}4`J4sxM$6t%O} zOp=){2=CbG{@6`1=-TmF`*Td)C~L`&+FFiMRSBKI%bVrK?#+pbPuDRVlOK@?WrT$l ztgS0{%-Y>=^5P=u3Mv@wdr(+yzr7I)tVR>^J8$=+GOEsu3$-ou1b=L_T93-i^{lt; zYSUK@Qc@yga2AnXDxD!&k0(avT@ViLy%K9jRDV>y?ifJtkkD2r9Njn4U0PpIiX-*R z_;cp6?`$|U2u_lf6Q-vXkmead2<*lfh*F!WVPXa+%u7l=B!9n8s6>?D}FJkG}<+U845plwR&GOIEIc1u$Wlftqx%~wzF zhb>X;a1Pyx-bCSvbs9SvP;wl$^dG5sw9*X9i#$q$vpB`$ ze%py#rJeOu93x&1&?+n@1ciS{KqoS<4{!L9Z0qpc@n^Lohrl`T^bh2+_eBWuhEa~P zk_mve&MiiiDN8$!(^n^B>!)@}vPc0;mU;YN@IM53{*e!iRkh9>W)ED(uBPx zH9|~;>!xkSA%9x!8Ux~Wt?mzR^o|M_Wi!rNe#uOHC;F&lqvWo>aRQ|5+~w2(#FrrI|@qHp`|;MDhk0e)A`fL8U=c}7Qxh*S`%a% zb9e+;Y*%11NwU=dlwzIn&{1Q_sCYVR+Xl;vS?YmV8p?tK`Pf`ByJ%v5cOcR7esS+Z z%FE9u;Zu5c&zZ^&JE(d?75WGSJ=4-4P`mxjA%M5Z*AvgG6w0JETOAeDm?&`a2x(fG z*t78%OqQ~9U^g_Tyx!c~wCk*5{uz2G7S3F6jNX?IFL!LQLnc%1E1EghH=6j!Q&X%= z#mjaO3;yko(Uj@iaZN#IF;|Jffo@Qphhg#aXL6EB;h46TN}0N{>#5Ps`DUd~dm_pm z0W%c;1OeiEzp@c2rEQWKpWn;#HWjNaz9m3bH?g0~hAX_t(mVt@#rlWda69}h10vTf zT*}u(V4Z#Sdc1~3#{S_>aJ1g}286sHiTRrJdf?#UgNur&2g}*=_kuDjG@H>!9Zr_& z@$m4Vp`n2=U-T^sj|~ns4*+|BALoB*pETpPJ}Gm}6 zYPGp+cCLyRV!swOb$na{co8hh)L-bcbvL-UE){Ms6_m6hdEZ7fZ{16HsS9 zVE<+1W{LRBHLOQ2`#TX`_#r#km@U`85K{>J9C96f6UB|l+(C^t8S-H%6Q19Nj;(Hj zP{Bd*Q47zN?tn)WNhPps>3;0aY+TnuS~B+?&IjdhSy%IG(nJ{U2gV9rMt@9Wr$DS3 z;`*?QVv(Qaxa1*d{U|{!WP-~(A?opntlwAJ^pM_HfG{|SAy`TqKf)w1z}4@BTK1jr z1;74iee_J z6bog19xh>D1f~mK_qM*YnEMQF1-`v}WgsPm_KKm9OtRxj2M)BGcPa@l2}MIg1DFUf z`%Uc8Z0Xl<>^+ycvfSLMhH?)8?w(ar*4N(#CRp25ud$>=;^3<27iSRdJ{m5;_Tz@V zo6Lgfju8W*mxJ362)%X<>+rc`!G@ORW84{HVk)iw;cP*|{tL=W^TYKqi`~loFU3N! z2z&}J!8mYvAuAi}1Ux$j3xgDE^A8!uFh;ZDVxE>3Fla(b4%!J7b-q9vFu?a)4-TO5 zK5D;`;*xgn8>Dif{8g@emn;GJJEET*h6jgE7Fd{=MSY4Hh?Yf z^%B%ty1AH28M9c8O#LC_gNaSk+Jy%~^S2CMiS8ijiO$;R^8Jhl_UjrwbJ7GAtkN8>`tALU^Ru{Ovn306*# z)Fx6A^r_cnwEzj=iyDQTI?_nd?K?z(9bNi|&rTyg)$8XTOq$WY*JGSrk+eVjOQ$2Z zwEMd6GLbudZR+?T)~7!Ry;Idy!hL43mS6mZ$|BcZD4~r^{DW49=k7+Ah^N3Cke_#S zu*&!^EG{oDE&T{ez{GO7w?aljTB|d%P%$=^kr_Mq1$9l$oxrYns2TTtwdDf_mCQn| ziH(YiikMjc!8G)~ye0Sp5zn*DzHZs2A<}!L&2@>C0P479URipDa`> z52m`}#0n1x@Y!u9^*6UfqlpFh+%A>IwfRW|-LKErUheaHN9r8w6pAd&TI$Vj1o))v ziO?Qlnr$DZQfEtBoeqR!<4aPP`bX2`ZdT{kv`aVYg4+Fi28zZDqq1w_neam*BNOtN>H?hlw0N@@n0;QKixmrrd7V)x@akelQ#t6; zKPA*PHj1kOrG1212CD!-e=v>;nJ4#CIfFWnNi_S$*SS6+RcQ_!LwV*qckkG=# z&Rt8?Kb-#=1#`w01Yt&cihsr%kTa4wjFz8rN@+Tj?y(b5?x7RtV}c%M3W3ma)TYRX zr}5k1c!d5wIFza)s_$`+p&8esHN%>b4_N(#e}HQ+v>ol$5FJMSgDvOZ9VNKAj$u z5;?q)kq!~3k61WbMwOw-Ka%TQcm3Y)z3(q7Wrb6-a5slNKF@nr4TZj_+`k&%e&Vz@NsgI z1wc9Xy+%hz%x7zg!XVqL5fLA~!x2Vb>`eNne%b-KMG=Yn1tbox?r_Pxk--4{ePHCI zL4QP|GHJo2X&@2I=O8gL0?r3)Is^HMfHI3l>wVvZ4_=azNepzH_P6QWQ?hFGDxJPC z6Of4@Orf6N(VqI;23T5d|XNaUYD0gClQez z);)F2{B+Zj_!KCpSJ;G3jdvS1eR;SV8d51&rm8=z>zewK_EC^aKIw#e^j%+NBTs&& z`IOQA4Z1MClb+BcgUG689HWN@4wOyZOZ7d$(l0N(#bQh_l_pQ#mFlGca_F4Ik0D11 zDHsL!d?D{q!%XPg^I>c<3PTdwpEAXFy#CWE1#6UsnZvC8S&C_w6^=8CVB%oun&f0O zAh*;gAYY5YUm?V)k&!{QEa?J_3FkjqqC8wP{3Dw-qa%~i{SV#G03h8hm5c*#mzU8` zinvF737*aoc>+_2GSS!{a&zYdkEqh2Nm%b`u09I{+Jqn#N)&hH(l#M=t?Vqs2z^?$ zN4?W{fw*zD()PvblH`fP>9v)AC!{M-6a@_dsJ2o~3 z;X5s#K}kd4N}LWusBiVK|HzhZ#N2S@Rz?+Kaxw40kPLT&)%*orXc>^Z2KK~xH?L$O z>@JC>Qp6pkOfKC5L0Lgh9g-P$c&D6AqyJr$mk;J)scbol7*mw8!|m1mZmfz8`nl{m zvxD^qIzMxTq5Hx@&yoH4)qgM_l#@hyj#S zM;2#^b4F}^S#C|ubEmjgMD!qL82()jidlKl)bj7%>EAIz!*6Mma`xY%Sxw+1u!xDH zUY%@htLp1TOG_aT0|2fE$UXurtiWz5J@=f+p=u5os9)w#-6&94pSi_Ay6g_hkm=R0 z+3wbyf_oEB;dFi<`_2`9@V(h5+IQ2C%EU@@lqIp_&Ck>k`%aT5ZES-}FNXO^Hg5g16W zHD4$+r1PU(8e2rfO_zAK-p@SHqLzrI(b0`J*nRuxX4EBnIFaL@kf3a9%ly~~gh_kj zS#OUwrS0urrR(0S9{?p5(!X4OAv)f7TSBUD^2R*}K8t+4w=>Jrr3VsU^{hk|*YuHD zG)&-6&1&0UvN|xt^k(}CuA4frP~H17)y0#8jaSg0IVSNDUMQFz1f~3J*+tM zQrnWK_Ff%5f58yEPXBdQVo3;L{S;T=tfuNDm){^Gn;5~I?`J4<5h047Y4(|I<2*5Y z`q=2o{qug1%K&R&n_q*8Futf7^+BaQ(Vki<9#P>oMN2GfCu-y%r`fJA@Unhw=|u#p z+NTTHFZXzAwpn--FaYWclO7cP*Yqe!fC|bSH{N8y@)k%Hyu7?9WOJ+x>qYxBwW|1- zXLAjxZX+erj7eD6*sE^Wq6a_EW}a@oF(I@OdKqUWe4BZT>p^^3`>C#7pmcJBx#uJM)QxcsW16 zO?~M4h>D=hI6}{DRHmD@Tw}F0(tLphjtK^ZclU6uM-*%!-p$YU>Q3@|gfFtAnQqjC zY|bZBlZzo47*~&TH`uEuc)T3Wk@f*4wA2J;r2hm`P_yD=itB3d z0x>}u1Fi@8z5QVWG>t^lFDHI+RC|@2w`q$Cy90rVmR1>G=BGm5!BAqlz=y~W>U0<@ z1cas_YLvXsvC_*4^NE=^pd6c;QF>%C;UO|YO$`d_k5;4rwtL115B3EqDSZEq1WKMM zE~jtj;wn_J64@F|`ZErS&v1Re+?FV`$yYQJO|l)>1SiNr%c}55j#9c906S`H1mh-& z2nFcm0`EyZ`o{!t#B;6Gwn4ncN%jX{uw0z~Cb#EMoK{C|b9HSLnG(#^6wl8#flYUk__%R+UCuZ>`$) z&bK2SqVR2wP2@96O>9xgL`3=MIS(a?jk{@Z>7KFCQ)LD_TW$I_)Kyf}zwh5oe8h_{ zR^NQh9D2lk%^+tt*D&AyncAcqTRd|#GYM&rj2xSEDzSMc8>{p&-aKa#HsV(oe6oGNs@MAno5@3AaanPDI4owv!(vlD zczZC4Wi*nFE9Nin;3+W5U+;Wgv~0CXfLmSD+1~c@xhF4?@*+@m(1i`6_ED`-j>}^) zk<{O&$;H~LaCj)v&E@WLbi&gA!<7n3Ekv)TB|LAIxXPJEz-4gLuHBm5biE@q5I>O^ z57YHTjlFiQ2C~SwU1EyAC)ScaUxDJeBa+S`9H)On8I@hCagZlY97P@&gbcsL160gL>o-cWC3002hswH3b z#bhzuGT2{Nl>U2I_TPVkP8>nU^)@Y=2b6)>tMIzxZYMqyX-`4>lO6-)2KCeA3Aq-7 zgl8HZYoh)7`hBi+-D14C1{adr8V3RbR5i5y{~0tWC?9A*Zip^_;4;T3fFr4odltF= z*7j_OEz#cW9yog&c)}6R@uELN&}yQ`TaUzv@bqt00qwUA5HS?7Z-u{R85*RXU_V5b zQ#}d$=-t0ov%S+k{b`%maP8Ik-=$DR|0IpzolvGnoUKPKlL&f>!OwL(Fvhp;+>3&j zJGrBYyp$Y&b^qt$XF}j&))2-=9VfPg+?1P4?msOf^icK(Cc*y{%HBY>BZp^8G@ehzznd^5zVD(z^m$gTU3*>`T1%2a3+cJA z;2no}J~=2y?$6%UBvjlWIRc*75`e-TA6qH~b?Mr25neOlUjd}T#Kgp(KYtb$eljMN zR#aFU8ynl$*qE6WXJ^aVQz}F2ujv#k%$a{ieN}*h0=xp0Pjq~IJRm$I;&-2!n;Tng zN0s;tgs+Y~{6OR3!559+*w_Hr5DRnj{@z|!50BQ?Rt+_^;*yfwg}C%|LNv6Tf`Yjw zJAA!7?uWdc=mVjo{-=3lI z@Ww75vfBJQKh9LWWy@sn4ALnm+Y%O1OK2AhZsR1K;7~D%xm%m!zH0ww0<@S=_XOGO ze%*-w5%0u2c55Y&yNbSiSjs&8pR`LF`ddU)qO*Fl^;(ZQt9EOQ!S8fVI?gSRAezsU z7K10sG#6_LV&vRjZrnAq27NWgg*!0*V#$HR{a+#Q@Si_gryK z-?yaK=YrYffgGGRaKNvitp-&hp*KtTqS2i2DvXs{KGMCLrCE^mWZEYzxmk7XNy{S( zCGqb)i!LTks{lo$BhO3!iHCqgG`Y!6ZtQ!~SpyfL&uZK?USb~_H>()HDkk7;1-b9n z?@xZ}c!%KC-_nB=A}u3Mt?;TJ_uIpxyAdviH+AA383besH+iGeQCl1ayShlfeDNDJ z#Mb#N*LOcoVpy3hWp_34QS582q2nPP9utDsAiqb<=1#ZiI!w}|9dr>-XO6jc&Z0_@ z?ZZp2rB4sAQ2X6|KvIW4u7V`SV;|f=VX14CqMS*cx@V{< z^W&KQH4|6o``899QHM+S3*BoliJ6~}t~wQbW0q{6yGryj3Z+uOU~q0>q3UhfH;S!J z*O+!b!Tt#;a~g(4QPaOJWWw^~cfB&&6Z}H^$f@DH($bJ)HE5{UrcUp>_ueo-82lXA zK~>19vvYLUseRY;<#yGpWJvFfaIV#i2zjv7XMK!}h6V%z4fgh4&kmU6d+{(R%QRv; zXHMT@h-wJ@q1Qxc?|3x?Tf`GH7%2> zBWHhI!xlIrKrD9clU=%;W|da;22Dz0daXWs>$+%SYrLLTCyv9-`$}=0} zCJ-|&Fd8Qz4SD{Z#UDT0f?+Z=R5J`H*Nbuh%=;Wi7|keAb-6c!ez|zqW`x+H08(68 zzhRTo;sr-04v0?Q+J?x9cLx}S>XF6osbSlv7eH3^k+lXpgvE~PON@+LZ z#yK=XI=0Q+qF`H7+{UnyNH&v?=0PlbYL{retKA!}2eH6Gax7ii)lD6dwv@((C!^I! zZ*5j_MFR&xOT@YJ;*#=l5Ak84G$T1CJU`eVag}-6`KR<6J%A0M0p{zTH^(*?QsU`) z40VR;;g=|_Y*@=EMIeOL7VvuVbNqDpuPF6-6T@m*c^M}2u8$=gBuEV3?=z)zD!_&n zDDE+ybbRW?K#)uv+(nG%D)Y;Ps`5(8f9iZq&URXm4dADmv^bx%I37&7F!{G> zEIq8dY8@-eVdfX+#K)v6AJ<;M3gZi9!;{dV8&4z$de~%YE`5F0*xIGqjAfxQ53@uy z3e3AspM~)9HQB`u?|vW=AcW}@vX0uf_rX--4&xFBX&-#C{Q$2hKQN`hY|WUM6fBe+ zdpkwK`xSav5n;CUs?p1xBSR z)D%J3ieoWn>zhyf8|BrS`FP4*r^skZ?%usYklpEMn8#O;(GWl!6oHdkX`k?Po?l)z z8EraID=I*^$+2iT-8KY2fV_YVIC!|q@~$n%x%?+u7vBi(SHzHHI( z_)3|6Q~82b<%KySSTq9@wXkyl{ZuHKX4z+3so8C&oR|B(uhT&uyK_KT8%>zv)~Bau z$G;3Rub0Z%IiNINt@a3V5rv=jv>>Y14sOV9I?2y(^&F^6b5sPh(tLx z`5DTj@j`{E@8S+Kvj|E8+^D0U461E8uqIVlc!Q-t6n5`K!i5TF+q7WralQd7=dAH) zZkxlaHBn1UE7rlME3bu3Rc+tYjd#90)UAJXbaZmk?(CGv0p3k!-8+!di4y6g+C|6J zS?^}!sSdRG1{{@)U!BcavBQ;iTN>oo-Bp`d%M55bYbDSR8$e^U?AemJ<`~Ic-e!px{M6UdU3G3oE&TOx8i}}HV zuJnLz{ql|uZtYn|1S_brYOb}aVSq&<#B0TWa)EW|na$pwCjLEn4`JX|Vart$=2A?R zTJIJ&{ywRJyB!(3k`${{;kKnptP^@W?I(RHIAi5-yXSAsJf7pD`7O6m($a~ORoEEy zoqY{9Q)Z`^1_piG^8R8JZQi4zoex1A>crNB?BHQY+f#2r0a&Yr+WDY}%SlB+o%Uc3 z%;wRezN6a3vp#cPwWXE!V2bf3OgkY3F@RMOf?(s znCj6KzZ%>c9CMhbsa7M7n&YUr48~rVpPye`bn^+v_FCp4qFY*^52mvv=(HH9tFY*I zZo1<+yeV7&LYug+I$vlc>>ZA#{&MRMhMIY4Tw5GDS|2C?q7fULGZuq>aw;mH=X-fT zk@fs=WxFe-3<1ie+wf*_X}SoGa$+-(N=e3{mMJQ!RrKIU<6SQWl(Z|f@eJc~wZ*)s9k@Zg@tNt;-+Ipv-=9wzK821% zIyQD}gv@^Jw4*eI$$j7RP2_-u+Bz~V)$z`nQ9ZuziX*M{`A^m&_f8Qg6o5Y;n6&a} zK&&C)FDyoWk4uroUv2)bG{ySA0~4wV|o4D zSHx4o_~$H!ClQ2ow00FWbL)L8cH{GD)D|mMMye)e!DxJ)r7opQn&jcM|?4J7mMh(W{*>E%0)_yqltL!MsA^dzCLXU0mEX30 zTDwqGi;aDzlD%Bv?Uj0x^|IA5DqERQ0M0*kho``s#IQW=HoT@*ful$fz2a!+!EW{* z8CJa59~_A$d8johvRO~bO#ZAsmZ>el-$Jd`5$kU})kif~Ol%Qc*--PPOTGxi3wzE4 zY>xY_^Wh}f1@&y&>m@T#fCznxO|UF$+5I3fE-r{C6*s=D1Hv*^KU#BJju;uGVR!m? zF~Eevq4X8?vN*p~Sy3S}F>&tc?ktVnj1`y*w&5oxK}T_EklyVtl{C(>2g01rE*qc1 z4cRWe$u}bRfxKXuZB@Sq$#al3^4nzj>R(llEMO*NV_AP83G<3pMxISGogyiLEqfM$*gHIxM8zY4J$t zG>_66EITOG`Q1Ft&;JDMpS@$plhO4~tGA*LU(b?I4lFwRBODFg9v_`Kq~5^_%h2n- zUiZO@yJczDd?qZ>olZio@cU}|^-(FGli$*ltw1!Dd*7d5*4cD2_caancf`A=D@kbB zLk87$9va4au^4+b(OKwD#=7RhZq=$S#tIgUsoz5ufe`>h?+w?R5hF;Fo zGXst0BwGLY@dr>bF>1Gk*oQ|(M#jZW1H7PN5?m+jn0{>=TyAiiAJlWnfRNj?F{K2) z(&7^93+Lg)fzj+vv!;Z74YqWbad<-|d1z#6d`Rv)iZS%kM6(w5c!vXd_n`eSZ~4ov z;!7Jl+4RvRMggyxF@H7#Fn(kZ?-KLB7=-%^5_!EjTZ7dM=TwsX(eymT<+Ny_PqDg! ze|RK7z`D-dn$g(s+w@$)&2^#|2t}QfO;JpQs>z7C2>YMGm;J`8j8RT9kLzf)6QUqt zY2JdKmftNO!##LZbS6M>M`>(4&&F0}V{BPcjz%1(Ymuj_C^MNo;b$LmjSC1?^o<^G zPtxMzApLp6d~70MLlKMZHHolT3YSm(ZrWSbKgXWb=B+^ zR=IN>;87^b7VXPk4JZE>d2baJ$MZ#v62UDA?n!WW*Fb>a?iPZ(y9Wy%oWb20+%34f z>)`G#1ANWzzwY`T?^^fu_RGwgsjlv>s;)Y9&ffcgK=-C#cxpISN z_dvvZA;$KXp|oIw5+$>Q>yfnT1S{t+%!D+VHqV|XC@3mu;?U`UsUTbl1E~lPSuvgO z<28L`9aD9Fk&CSwpW|0n3RytjWS&Bts=H16GDBcwXy9Y(n+GCn#_{P%Dz#+9mb7YV zPnN!ckXqv+iZt0wdcHx{>Vm>30x*xY`7c|hxf~@FO*qYOxLWOsQ?2oPRiMAM$XtyhFQaf zN?y-haEEAfKOtARytTpmxeiXG{i8@Z@uJE>Ny$zde;nurv1>mR%!;TP-DMe>A2eqi zvESZmR@+q9^emQkB!kL;XAX>;tNs_)H{E!Gz*h&Ww*WT;c-Pza%J3a^C z(d>AKf#m5SwJP$S-f+d^IVHEzn7HcQs@wwlWg(o=(4Nj_cWN2poZ@V4u*^vq=kk7a z?EAOF-EnOmVH|@@ll0r;(r72+q8)SxDW3%teqhtJm8p314$qFv@5GG}^io(tKcVT#6QAin`@3tGOOc$z*$TV`3sXicR)?2560CnyMCoiw3U)wKjrl{V66htCe zPb6c=qXONOh#u&}WAuYG-Rt~Ki^*)n8BJab6|V28g-E}v-QeiTknV3B&7VsTO!>Ww z>x0T+$*{R3^SE!}bRx6-z0=d4)~nt_N<|{tk(8mNl$QCVN8nFR9{o<7Y;F5-=+34k zIU#?qW~`a%xlJT{b)d?jzuu;S3bV1P>WRa&-CZiyQ__e$JAvA3b2gXP#eO0qt(9L% zG%yIeUJ1U3;rOEY`{O>DQl%WSFWsxP#16Z(Zh!$N$7lQCHhpW1%o$Xcl9Lq&QA1hm z9ux3>lZV6RX}|GzrJlGiKX`NF3ERDB>0#v~;y{r3pqg3AeoxeY}xhp zWQ7n-d^Ppi%+!mjk>CNW0HVj(F5yD=T`eM#~d_##myh8C888 zKewa|fQqo>AZYc#qoo9R1_%MOhL}zA-7%m)WDYq~`-6jnM&$=aWAMr*F|3G=JhF@m z=#o;D!BG-_$?zh-dYjgR{lJAEV*L_dV5Zx0@M1=jKQc1*>|@N3r#xkxAeGiAksK6v z>k6ik_OVV)T}HwF$e`;CrJ#^DM+r`ApUiIOb7G%hdYGF|I$8Ez$@BlJnS**|3eHYX zvm+(ti3qaMC~-PAKJlk?d+Yt2im5JmuK`o24Rll?VIhZu#Ysa`wW=t2K8H@l`Sl>4 zG#t^fhiCcng6lQ|O@)_fROIy$kO7R2f2Mw`=Fc6F-in4cYx^g;P*cdVGH39c0CzkTzYa zvw~MsQx1GOTkm?Jz1`I#H6Bg9-=CsvthJR-2KY#0X}maI8C1lXhLhQ%$e5G>{aNW^ zlutuL!`j+fIP0u8f&d@C%7#l_T3UK|Zz2cM>JjW+)#P-DkB?75P7YwTX*fC6w6r_` zDilDY3Jg>$)2sy`p#U)?=b|r?NMs)~J|rwmLCYXkBC^ zTJjjs?NgJJ<4}N2ANf1NP<3z23VqTGc)7A*B9~4mu6MAhZTmmw-1uiu;9#XX3QeW2Sy7vRbb73!|3?+>~+eDcr^VJ4ZW7jkLl- zrlfl@d6Oym%5Rxu+Pf+Ud#O&TgG$+(2dgZVj}dzuorh`kF=i)XKRV)0XIHl1#fdt~ zicLwbJQ`M>o9YQ8Hc0RuoC%~3^hYWkdgrUZ(YL1Q!GMfJWc{G}cT5y!zyeC?-okp1 z6-4t?2tGYI0RxQRi!K0|>3)5{?zlG&@Y<%rh|x(ntqDUh3S`r0Okx%)^t%8CsDOzfINAgTokbN(0LL4+sX(Pu@$=1~V03hJ&yMrncvc`X4j?9xUFeUEMLQLS z!-z)A$7Qw1wjT`;SVc%imX;m?q8^6?cn;2IEjEr_oEsD!^_!i{#cDon;`S&sI*tyYB$6IT`>~4w%k{mSNCXAwc*LVo7OnJ5j&Dxvey&(WGNvdFOY9YH&g3DZZuu6Udo|~NTGq<&qy}YLML2u&*A&YrTzip)`43EP z$nW#q(fPk`!JnyfwJXG>7w)YrsJmQ_s3w;Fr2C%~|+CZ;pea*ok}4E$Vg1Y z!NBmJ?A#to1T+Q|=$-E`wgK6V7_5Bnr-#ekF+J@}+Y11-yV~r6P96b_`S@oF!Avec ziptvBm3AMWflK2(nTh7Dt*ul(w@f>1RAqJ{>uLW^Nyuv;LUB(sGnbiIhJkTRv!DVC3>-TJtmJ z;%ucllNTAyl+{|CPVguS$+*>Ahwl!1p^wf;-!aLZcd)hjmdu)#h2#%MNo3?em z-;39uyTySV?&Re%GyfT6ieDS7ta}hpm?yHw6K}1J%=+6SE?az~TtKMIqOc@;#$zik z&mf;culDCgJOMg`p3lFZ;|80t;{+>SSA6XBq@0{Kwiygn0(~SwZB1qlvHt#|Rx%R> zshm#vNB!*x=elERTJ>1o};{*3=l`1-A;6xR=3I+~e?C1u!z z<9OViRe07iCdy(bD}61uzjbf_OU;*=#{nDOo+$#>O!@M#*;=9fRJFto!b6gmBdTa>%)M>O%d=uJdtW!{N{T;FQivCD2ZOI8M1+VfgR2=^)X4 z^NGbenzo_BJ#m<|Ibed^&MOrfTkpUwZg?;j%UMaumZ?|m5M^1U8Puz%)Am|7w%y{? zB5nOxrlU1K%~1N};#gHZK9ijz8GAPwd#NOz$!}`gJ%^(J=`{EhI>N_I+iaV(v1=hY$eDkX%`9>~TS^>@AVSTOE9mi>_~nt*`VJ4%a6{}G6bBdB#pJ_bqG?TcXz81> z1frG*;Yi3dxE1HFc|IjobFqAWBTK@*5!x{%w~#de-}G@qkK|FS#r;~+{#Zb>Tp_Iu z;4TB&IK92S$w#3rEjc^hspM#yg1NGsaJXs#+fX$33Be1^5Qw}K0X@mEqf9D4kH#O1Vb2*suM||G+6vI?h zQbPVtGBh=fUkAWMB{$BX4XNQ+Rt~ z%akIffC@8fuQ0!OxmoM6i;~e~cWX3*Fr1aBnE-qo8{K5nG;T7o8f%^H?#&7YBm zu2A*b)fL)hg2!D(XjaY!d0%FYlWDN?VSlS|zItnWCo@dnu*|;5!=FW=YzZ|h$=Z5- zBjcEJ#J6xnW$%Q)NK_`d>JnpRd^The6=ZpcA6F!&jAg?I7fTnGU_5!o#dQ~0qh(+^ zsr{un_$U2ueXa8RcdB-%Y@hA#P$|rqjd~G_fUm(nWU=hs;=qyNkHgA$q{{qY7&Vm` zpCHs}z3N$XNh2|&`6;icG#WmP!r$=g~{7gpdCo#aJ2dk9K0-X7wjFW@evM}#Mz%#PFvomOk735tD%*Z(z zD5Ci><=Fl^L9^snS|-NE*t)n%)mK0=j`9M2ul3$=>4|mpW*%&Z0<=U8MNy6Owev8X zcc&rdshG2EwI#8wD*VAbOE>A3yR2||_!RyV?IwL~E4%^TRI9kZ8QR*3lp%>L$@>G5o$Z!>~`53|E6dOV7URE7!if{GuD0K1E=^W7u!Ps_vPmNyt1+q2-JV0 zHU;3x0PRrhMG`6VCo90l0#464xSlCdlS}3D8`N%g9+{hS0k&&kFgOMTNFEe99nR`B zJ6lY~}a{l&a5i8MStygZ2n zMMn6|<+vxG!eJGJMpO(2k8Ymd951O=(N?p}FD{M(hJVLz*uB1s+gouB=Z3$zIgMK;lUQ%R}`aa;t3-7LOb~ocKhGY@+A`6@Wdtr%(#>u*yJqP4C$R zEbm7o@)JUeg?t##j(Gdgt`lf5gQ}L+_Ba5Sz$l!NyUM;5n%Zbv=-;B43HPe+HB?=6 zy|~G!R4w5ox))GmpBd4RV-|nwrJR!$$|#B7V_uTfU^V&oiwM%~??QEe4wu32!QrqY z&5%eyO#BR3&gdip@8RH_Pgh%QX=-Sn0Bm@5wZnPHjHLACnlqDGxy}dy@{r8R?45Rq=Yj9`#8cc`J5h)tH*6d zIjW*YiJQ;GR6ospf0eSYW#3 zg+t&5_FbaN)mWCi8wFjC^tu$w(2pp(F#@b&Q;~*ABxOfWqk+zug;F|;SUsLyxMFSq ziuE=j%CzlZ-2a6=)~*=eS5h2{>V6WC=#W?8qM|DFdb}w-aD(2R=jP_pYt^L!Yy`lY z1303c0iC#t3ZV&dZf-mb2-EX0h}D#ghFib$JuECN0YOSl4cnFZy2_itImF>K-rM$< zXI`IYPoSX{ux=*vq=4bP)b1l#yf(TwXC8AJcrE~5(VVBQ60OyW$!?>}WnA@`DoYm) zbKh~{h)Sy2-ze^KF}bqPm{TM%A`b7oxcj{{M#_=uI^Msy2OWgQ3ga!fF-zc)3ntek zfA!Zjz;}3-lh&oi>+I|tXAJWWGagBfR|W82aTDCQgEX|=lC+<#g4FR~$2~@HXme1I zhJl5dMNb$c&hzx!CwQ3^Ym%nEV)<>}{XSxeaC|yQp9utSLw1xptE&4=z8bKYu-1Or zOfL@{U8Ix~5p7@TBK<1|1Wz8%*8PVQm6es{xu`t>FPF}MkV|Ffd0X4Pj5be5)*DP$ zz{>Tl3N=K>$H566Tv}R^(u|YHbT4z18`*8M~92c4FMKQu{lmGKIqAmYoBCwU5 zRawFCG;P}-&ec<2+um5x7|n0W6^&2*vY_{Hs>@hDagMAbwhKDJlW z{xZA^u;mpH?Ck77Y-TZl6rwWkq<&^izLLFMR6qIueYzQXly4srh0RELc<~RX`z_Xm z?q8qpQZIwTB1#bl;(7Sq4Q^U!vo`_*DKI1?1Yny=Qz)sa-2zM`EPl2>7eU~lPwsIF zK?9kfy`rq(s~@*w3402Im_Do$kP(>_j`$h^fM0C_OoKqLhou7&SH9-r)=yg2ei`z; z`*9Gr{B4)ExpJYU1PhV{`n2%o+Cq9jWAXe|V=-lMj{FR?njbs%Jg4j@TcS??!B;R+ zENB1E04)NIjH2RXETx=$&_{&#&d$y*C(F!i0VO>!RN(MJd9eVhd&Pl!aa5s&VS*1^3?)Gam%U}r0fe1iHWp8)b47oom_ zrNZqBBFqD4$dv$4=*$6#vrv^I+*y2l|1mlL{8^`W!AK`59N*#m%bn;4<9~9BO25+d z_MaG7%sP8hbkfv!G=0`pbA+AP0Sq~Tr6Nq>SE}CJb@yOvuLbrMVuqOeIbh{!?_SkD zOt2|uw8PuvSS`%)=PZ|@%v||Toz)vT;rHaqSPdhh20`z_&EdQdAd!g!K<8-uiqZ!I z<5x}BHa5rFC^P0*jX2rzF>yawyrL}S?{NvQ`=XV{gL5^W$`Ts8*j-=PUSXF`_LV~D zY=8eIJDh3u2L^vkfRp5K4+x>jKwbC`U?~_ls(K$YiV)ckB}|!^8Yz~;IJ~h^zo2;g z@*X%}a<9-K|Hk?cz)d*uT|XlNwH_D^|CsAj%JrkpOw!{7zOE#-(33k8ryfo8eIStNtf9Dv$r)%@xl!ZFV3Qap$xUO40rVm-CdlyesG49b-7` z{LKe|&N7#Q5ZJ3O@nIw^W&Umga9QbVW#cQWJ@&)pg>9XA>3L4z=BRMy^n!aYXh9WxeCP9E;R#{$0j;v>E`?CX&!_6p*oRBUnmOj1*mt|U<>P5?K~6#RAP0WOf@wwtK2*@QK_hR<4R=7@H7&3n3Nh!um`zy< zUe$Xg0en9I6F+5IT|(Rj>ENuQoUE<8)zY%dKBcHzDz9XI6${oRb`E;qy!>L`~Yaq&j)szrGrsIW^B>v5y>~zHz^{Qpw!FgxcmmdVsH_@1{CEKkU^WMDglsamD2Ew42?5Eq!7R^w}2B zfNlq*Yvq}RvusCi2r8)?N%>Khc%~EnqaFcDe@2ZS}e2wzK4WhF7AY7oUqujV9_+?_SjHnV`+q< z$)Xth2RHKbP4e=MlMksM*k;*gjE7Rdntv@7!C{=#$p@0=#gd_f9R3oNw(2V0@QOd| z-F}y*woJzjUkwxTVi?(&pz6#FwjbtwO8GG|91G~xy1p(fGjosX9jgS8xHzub5htrs znQbiic)(b4;G(mV!5S96x{|?nI@kFnX@5W_XFxh~;AHBbT~S?bZ{7BXgLPtdZmMv% z@T(1yJhiDbg@}a0H#s$#3#cyJ{k?*=tnrOcT7+Y9<}Xd^^zRqYQ_q=MWp>t?+CZ7X z4B>)GbrdWk*mpWk1Ak~g{{HjFl;ca%_{0wlKh>LQ7|hofSe}S{SAIxNe1G4Z&GxrP z^|RZ1zZB43W*?{?57Oq9acEGSEl=-E0%)S@x<9OR%r4I?XzFUn*P60*PIkhe-nIXT z3VG&PT9S@eqF4PC@>jhL63>`Wi-ONt3XGAvl_Zp=h+0Jav*wikvC-56qv6~xqo&N3 z;)x*OB_`AL-ac(5o+)f2PeTjdE5?zL!E$oi#xfDOwgDTMK!XX=%i3@=X#eTpe+w_t zt*KRJXIEBD3)@>&FYybkYT0P-TBqE%`8r9!S@ql4a3(b?i$gDy`LMqLkto7kvPAT( z4=`n6lo>-bwod-__o9}527GPr{dniKPFk@za!U3|EM;o$-M=P&rL))vEXrq?nwa^e zIf2U~j9`g*+^6+OyRp_`PXf@%mMKH|=R#~;M@@dp5}Cv<6KB>bRsDWiG=P_OruEm1 z5uHJ|;?Nr}rfpig008UU!1#{7d@Fx>l1EsVSp!*$mjq^KLcK$jf4Y zRz2`N8TK7toq|_`oTkk0&Wg0W^=J<}+Y)PeV!%zNGUQfQJa(rv5f?L6!5IEMe+qS4_sz4}($J0!fM%!vx5VyCV7}DR^ zcVuuDDjIeUyijD3e^?3)Y z+}!D4Tqq;qYG<&>LLVgkXa|3bR`tOOx9I8;gO|=bfZOFX#{M{6u_SWj&~$=DK(NJ? znM*Zu)^L#lLTt=d17&_DZ7%l+sn>Kju<|JCp~R34Ki4FpBR>5|((u?BThPIcN$%<6 zy|nW=`L|ADR+-HB5`w|o#Be>wedH*swY|x4#dmUHp67Slw}i>MDcnnmPAtD2(5n#M zlSQZ+=Z~~HT5l{PWSQk~7o=!mK7JY~-IbDkNR#S8u?jl1+%45RS@|4)Ydaaaa?AAC z*j3=TJ3TD@#QX96IlN773W(N_N&iLV9Rd|iu+$)}N}#=)FdQmJE&C@)77aIH)I7nu zv^VUqM<_xn*^0!=-Xl2(n~^KY{n9KfXL_ak0tc?C*-IOTCA@_aGt||mqo+Jp5UPya z>s=f+3_a{WQwW3w4P(MrqdGiUU#J&zqM|m{%j3tOyE^Kw&xpu7_7VFXT^x6uKm!z&>arI8wx=HzEIo>hLdA!&H z>($#kUyg*A!P$t1XV1pl)_~E~!yV8wZN@$2R(`Oy+wI-W+#mF!fg-*)vYV!bdo#=x zHdae5tyyC1yhSuW!rkl-bj~)D%Jl#_by&b^Dp%lKTV7{JByQ#ear zci=5_ZD?*&LO7=^s#$Y_J~P|G#HFl+62k^wAZ4bsCu)UzrAb1hD7&G!Db&?+6HZZP z5kbW%j*<%S87FpnTWaFn5G|%)b03J*o1J7kY*~1Y=cqxQ>Aly{14ta7f3by3sjw1T zs$UiMOBv?lW5|UyIG(SJ4>i zr>}3euR$gv(^IHhX8ewT!(nZ!TweL)>74ggz_@OnuT*LFX(dHiux}#8hKQr0Qm7`S zUCyS?Y)dG%A34Q89gdvBL|+<}J3)OwPx{oWCVYW3oE`dgd38Jw2`*jbALwN+#tcvb4Ow0%Jdu9vaXg zi?e6q&060sHrKo^WuNPDl`bzdtNsZqK#{&LK>Vrfh}Dyl&s;a-MY{aaSgx8LZ_jJ) zHVHOPdXPodz`7dIGa8wCGoM?j3)FzJtoH$NT)(Y zK`enOI2ycJB`drs=*IQ;6OyRaSKI7i)+I~|gZt30-yQy`;G?E<4mEbK=k-+O!w7!4 zoH%BGX@$y~*QoRU`dOl96ZIKI%j>5XWHfg6`j9+b?WCEKKujmKHSxH`bFa`jW{@F% z&BweW*@o|lrwjUk!kSq{zZ{fymr0!I$ z{MCoYhxJAK+QJHQOZi?;VE-@*t^2HKt&=-Ra$UD;p0sREYi6?qrwLRFovFNmX$x61 z>VPT^B~RVPX)39_K0zMn#zgC0Ii%Xbl}k!yp#m&AMy4m`Qu75i8sduN( z<)n|?P~K9Eu62O>MY;fOIS;~eB=W_M%?j!Dysvov4 zQ)|(Q|4d;!v3IX#K;>lv$ze^QVn14*SX*A-`-kC|L*QZlqkgoB=tGk9C!S+oPH>GX zHne_s)aQu1cr;2dpk?h}vjkbza*e3h6+>K1)mJ1@WphdVkf(X+*pOT6c8s6u=iy(8 zI&e{2dGIoL&#sGKWRn|;68$u8W{Hn6pD9=jv<{t;HRDGY5SCi>gboX;i;-LuQ|)VBCjlDx}h zQuAk*!InQcVXItLaRBZw4i1I4G<}Vj35h z`R|5q@+OJ2O-@gLCTB>zhup%`5goZSILF>UgDLKpezF)t&P7uUm`mU=>d=TOSHCOZ zO%(LKA6{jcCYZK3|Naz%6;Mj)qAKJ5afIW*+qI=&L~sS}&+re|ij9pxTgg15r7VF! ziF{9pPmws!V56A6plY$%v!s?g+Wb0~Je$*MHOS-1__)2%*KEH0aX65l%%>lu#xR^r zz;vy-dEb|kJUMvJkViyC4S|2ZRu8ybit&f+pdo!C!m^6k*b(Y?r6*>ollv++!)bHp4jr=!)+;;+&Xp7&eZx0b>MJ*6wO zO7&%&&$4kU* zImg7r2z5RCkEP_eydK$<5Zb7zelW6O!1Fq7C{5mj_}jTaOe$KV>U&ndIG?*gJYju| zn#-eZf%!)MaI;O8x{BRHyI;=T9R2GqpHj2R$Mw1<54{Zw!x;P&8f_9tmiSz5&qh?c zZrwO09^?P~z@46;s$(?9FD_bF|7GbO`T-*l#5iy+9RA63b;bP2TBG`(Ao+LX%HK)f zOj8O67aKPv&3o|KCBU26KNM?JRnBxmsZMJ`6G%^`iOO{j(*8(SUUJ~ zo2_pxLX?mD<`hOx^=_gg?MZ?Fcy34L4L=gW?1p8*Z12^-s9d+M_020}8?K_|!NKI` z^ZWXSLbhS$OUhMIO>V)@FzpzxzS=d(I{w6rgGO!LMo$#h)9b|6?Req%;aOT@xH2l{ zx{*Qg4)VdOMX>vHUAKC;K6m%&iI0(`J7RTP^T{~ zUtjhVaad>+T>M)##hIB;r#5!5ecU3>XfG8GXhWdjF>sFZvYZEbCi~Z_~y_W8NTR*J<2Lr|T9}$^_YlzKo zQ+~{?Az1C37vt)PYhaNtaVb=>Q=6C$X9T9+Y1YW#^$q8IbpH{RW3Q-Z+1})G9{sO} z=7bmi*5dOe24c2znszHcM^u*O%mnzG4l2754=TOyXMXWdAxN^sv3iOdn(W2PAw17O6pq=c# z>Akt#D(3p9Je>m5VorrTy&ymbO>TcJiNwBtI1?5J{5_gW)iO#ZFSVs%BJ;7KO*1Bl zq$R2&MfLlcZIAILL9%3LxLH^9izvN1QDgfpcSk~`bpq_lT#x9TT!SO;Z&jC$OhlbH zZ)VuIM;%*yt?6sd4 zRH@h!;N#P}>X$7I>R{wRsvP-C}FWu>SfQF00|xgNS&pK$So&?!e*M<`>4j?a;zsiu4WUwOspB7;4)in;vArg+D-rqt8l1SmK7iQe0$|Bj1fPN?yc zT_DqyjS7wVGe`TIyYV+Wcb8I|=-Gm;QnhBWeMF3~<_=a24AO#2 z`UF>|^fx~uON~`Vq;O8D#zW&;miG$2sMQO4;U)xA<2?}G+|SF_okS9yN0o2`oolOql7#*Qau8!6Bt@ZtV2D&J!J7lXQb zztgQkuh0I4g&!ZMfI^UzK$SR@@TqPcbNdj30nDyWrkuijA&mMk0s_;IO}#!(gI+nZ z?8$ZIjoXmP?G?E%)xuq}j7@Hw$=O;QM<>(=`|qxS7#;qS=X@7}yA z#0LU<7NNK?JdaS+ivN`BfpnYZe|mL)~! zsMr4#B=T^C`}gTwdVJ;{UM|#~HHRASV7~5By>6h&ReVcu^{)}D*V&Erx02vxz16QZ z=b>$+{NA z$=&+kYkgp9jWG06n_#<@!-t5xJO*jm zNU!K1-4Ra*h-(Jiv#?~z+njBfu6}&&KFO7slJTbnawF0=dgUU|5SZn*f6$C$f*a ziN39^$v=6T*BFupaawFs5Rf<=V>IpU1yt7}l^W|A4Nsb{$8Y*F5G6>*T~De%v^pfp zZZXqUX^KDGuzoGuLW3Lk`_WSCppAmMWkTlOal_sMXaO@{#gEqFt|PI$FE&6- zi^QciJC?f1*?F$Xo#otlty*J5bBj|oXKsIfLbDfw@Sl%r-%iB|>wLZ!)%!g|xV(y5 z4|6C$*vVs(g~k5#UpvO3zuv~UG~5Y4^=2}tN`V?D-%-a?P_Fsu!oPmG)|+j1V>M~P zeHK+8FE$~2O`xB}T9FP9l!6O9VLoH4(De=Rig8YYWi*6tlPy)-?!>Sy2lzah!3AP%o=}z*^P`<(hfNQ9w-0qT4>j`? zBdxtIwMOI8gh$gMauT~HH_#2kPGu`@1O3Dphg@D+9=A@z-Xs;|C3and0^FQOZC3}x{~8!aVx2C z*zMdu+ugl+Esrr@J}8EkB5}w=*|pzDC?>^&5)vXgzV(~&{?RSi&j0?sK(uE!yP(|- zNM`&-!tOpd1RRqe85#OXO4knPw9LW(1v$j`1|pdE~vVPpJtbSURyqd5Wzk z^b{Wy%@*D|h&b`T(}5EiwZoV&k@(|*6Q2;T<|P;g%EEu3lZKlX?O^o7LV%HP-MY9V zUBuTsc)5Qrg|C}V@!+Eo_#&ewUba;t{k!Rj$9kxO=jCS~`olr#K$?tdYGj<+S$+0L zTzX3O4G&F2VKh}M3mbPDO?Mtb*~U=|W<)wl@@Fq)cVn<`E|o=>;EVW|h}?>?^F|uN z{H#iFQ!)_#nb9$Z-WsO=w1g+k4y}3<>j8GDH9U&bkt_Bv8TjnF>-pL zt6$C6agSfjw6;2eEdBlcnq%+vuD1|pFiVE=CA3uh^dt*Dz4a4{7kPn?4G(DgnxpCE z2&><_nOJ%0oP}y!z%RwqTIzpDXT3NSg?Q&f~mpZQGw1a77rCm2F_I({5C-%D+;k zjO5;$Q}xxFnAiUKB#cR!A}ZHT*-7Sr&2q)yXU0x1Y+v_%M@dWXdn%M=mtUz?dAj?Z zJ}oty-9o!6oC%FsMT^_oX$Jx?{$pTYYA0H&DO&IH>LOUwHrwC5%9G44=G&~6t~gp) z8ajUxgP=7fJ}+LqQr6|>THtE9nc*Bx=1UKCDq7_OMh*5-RIDYQ{bg^AiNZD&yq@&H z-mwb~gGt&8(&sOlG~o=RJa3`|>TGLc)#{Qrkbj7(qTUrFzv8(s49k!C{N!5~3DIfL zy<`GUi^ff{pe@NYP48fy?UWZ=$=~Jc{9r$aRlefp5+G&t0HGB@$ zSx;}>N_4IFWwR9`;^G<;gHEXcJ%2GabfZ_Y1!!-Qi*) z6>4IM>V!7>y|aaoE53cc2+gzGa%!+{!>eU_*GGC29 zf5Cn~5SvaQUSr$c*BWcwcSAJZ8StmE+G}9;^Hfnm-Su=iv&w<6y(~n5`pqV98Fa02 ze=?OqP{}S=aZKh)8++1wGyWbd$m%63ukq#iGT+AggWO#CT$Sk4;sech>tEA{@Gj|M zM$`+}T20n1CHY(MpVY+W+Dm4PJQI1yT}}dm44OG+Lo2gcPVL}~*(ZHYKTl-~(x3M5 zWK0x%vdfZ89KkW+H2$%1^4GtG*xMw3mXSkly)W(E!>R1$s)Jg!>FY(hJ@J2+mL{AHvsLd>AhyV3&_q z@{+f#q&izj4Var7J58@;-&2{tbtINpmqwLJjBE`?J25O!eQx4Tkr>)W+>Vb49RBls zw=NPbL&wQL`*K0hv6P04=Ix+E*ieyn_p>hY=8sHCb&GiEylwM!D2HW!WHiI&VAv22 zcuJGQdcNX1m;abjf+Cl?$X32Jx>r>}N#&Cl%6ZkccFKCgCfrW> zpS`XnKF{-yP?_5ffrtp)f%35v?u$6{)~kP$b)s*xm~IxK(a5K?x7y84G1Z#vh|;dL zT(a+l5Vs+@KxRLZo;s%*<7LF=$IoqYjQ6`pNjy&c!C8G#%*w`Riyb0MV5`Qf$pdGV z)$30(E6WoDMSJY5b~Ierl=z*ZVRHJSP1qm!JM8R&-=ZAq!=Fkdq$XnDA8Zs`Y037n zY1r9o5G-(mU0JJ10!}%vaov|)M|;wg$SJG)%i)7>mML-!1)3Zwi!R(b3n^6S%(nTw z)S$!Cqwz1xI4vyotW%CByv0&FL^M0Ab#5irTXk9(6@_`9urQb^Uj~2X?R52Cn-_}o zwsLu#-6W=VhNnDw?-oupw}YIBm~+$0f|#C1w!J(U`WB>Ds~e*l_m+lR*cw-|(XW&D zOw8C0Pk#M0h2+0cmDQNLC3+O&l1-oM`XPqFquph&>N9sbg1YQrZ+iK~7}f`4c^IJQ zZCJdMt%#q&x-7|v^PTy1Qrw`snLp~n*W4*0dLGcd^?SVj-fEfxG0DKSl2hXn|31!x;iVe3M_+_3MHzw!7dOcv%shAmr0W- zLCmRN+qGC$nB9zZrknf%K7%#Tmz{v4G#+ag;p#wd^aMQUO+svsVCj4Cq+8bv>b(#0S{D$PgI7%W5yxPEAoC;@sA#($Il!w=V{%r5GtSqgJYOY*uep zyU@tF!*7<@y2T#)se5F``g3fJWFiDtYf_9k?f1t`NE1*rl6XB#SxGfFN~Q=@Q^|P2 zsXIXl^r!cW0-JB09%S*h_Y2E~lqP28ls|81#5_teaU-FyJj`^}%eXGW)uL>9VhXp{ zHNJg$O8fV>*NS}#qOiPlfI?(RD3G3=bEKupLD9CJmxLWZj;#!qi{YMrTVIUT)<0akeRhr# zFElWQ1sv!2blqOldeyOD7O2#NXHik1<)nEqlr^UzHrU9EHHVZOT_3n|f}^GAlIIxj zMZ0Y@zm9C5@8y@-sEdBqO)u7n{5u?K=`{DeD&L5otT0o@%ss@&^>Y@cpW=_X-uXX8 ztQnoWpRIq$;WTki5BH{zULO?{FTlj~m5lV{=g`A&t-phxK7S-*h4N4NQrQ*} zHM0tAyR>%gd5CkO#Pk9@UeZ%n!k{Op%rQ5B_{|Rk$?J>WqA-eae~ z9ecnBkj#W?xLiNo z0)x@#9Tx^l8{W_o)h}1<*H0g4r616V8RHz;JJqpO>VYB9KBIcWF8E#DRwIc|eh2DW z8Ta940_dZLT1GK~xPf#2eLZd1VzN$|84+aE6zs7>QO~;Fm7mFHj_0oJf(({w<5gNc zBrSQM(NillKG1uF6CX=TRW}!1OBi;An2x#P$!HxQz#zBOa#?nkx4l=edyLpVt~HmG0TAFY%<-JTu0{rfm5?$Khg#VeYQds4 zQ&dnex>=5F4|@Zv!Nn^GK$4&#XZ>t*=t~jGxz6P~w%S?ie7u?ax_eeWBooi@>M3b) z(2<6>hULs=UX&hrRy#iev&q(fNM#d^3zjt z-mlLBW&A<=ZQFx{=C`uKcuG;r$!gsX(%;5kp@@DdWVB~5dh;?CVW~wJE$_?#tPT-N*}K-QmTZvRvH>}(^Hzq&?F5|JCuunmS5|5Gc52Z zoq21I*vRF&3kgrk3LKov)}=?N_93`-Y3~4EI0s@odE6WuN9J$%tt&DwzW{!6^4jJI zY&N}qW$q%Fi~+4uhy4n+*z6Oj*@XW|c8s{`=wulxZGl3`al4kzMgZ-mqO#eoUMwFZ z%~Pv`Dlp|XrxMVUq&z#}PKIBmk$Z<3q%ME&bYZVCyD+k$Bb%j>cFj)rD5pVJu#fES zky33uVMcg8yGlKs(UQt-LS5g8#ZAvIp;eLp2}#x3BiQgWimZ~W4Wn zesIQ>rY)AfU(mk2!b$72PLq0l;{1dFEHmioM3ODZMLh8)vj@kWHzdGG&G6u_dzU`l zce8*662nQ;vcNH+T_m-HFt_IqJQU=qvhga+M|*sZYh?)6(T+;k$!A=z!-2=ejF(Zg zvtUNQeD=HB-($Y2>VhR)&KI-!s$=S^+#xFlJ<+2NwuxR(})S zWej5PFjQDV@!~J=HgEa3X56$Bycnc(GS%;CIW8K-W$pla`DfLPAgMI>@%0_WzH}6s z8SBdcP+;W-lfDnPF@CKNaZ^sCg^tT(y$i4W?ZZ*kS=enr{HM9cFW2$vQDb2(-2{C? zs&V5`Z*;eN7olRSC*e16pYxF6&*@t3#Lw#UGkTK_`0~eG4pcW5AG0PoBKQtbU_O6G zKL!aoBzn=_z?%=GS$nVV6Z+6)I2n7QTZ|e))E#D|-@nfVTd0Dycb%RPlB?D-H3u;K zP0C`da=G4oe@kh4LE~N5aBcED4kT;>72{neXBW8TMcjAlF29P9gR56FSzOQ55KBWp zNgy=F5szg&FeO_wSJhLfh!1uUYSM;Uac=sk?w65SccrGhr(du*vedIAYX!gd^w$Nz z>Xx>62g@LQ*v+PAbtw3i?Ke(ujOy!lHMIUq13Nr?V7$#Qd&FLh93%a7_%ib2N5ll| zzngz;pF3*2C4nZ=;cTPf;!u`_3_sVms!^|*Z=Nfi+mVFyvUIfY4eK1vnHd3T1BRrc zBoXgVDi4i-*s3!}OGFF=P&htZ*;NqGz z-o^@^` zkMx)c@`!f~z*uR7T$LAypfXB4s7L4PW#bitW+C2s=-={F+ z`n1-i1zZPZFw9?%6lDXCTt8!%KNdr>Mn_fSV7tWk)6$ALEVs=qENUvc=Vu&T-`_vB zgjha3_Axn61l2@7@N2PXe6qTKhBL=)$CkDyd?|qXp;J}V8e3^#QOV%_Kv=XKwT?13 zeX227OzE(I!P69KVP00%{b*Dd8q#Y2Q&36pMai3*O;0K!;S!V?m@RB4qU%eZ5n9dz zNwY-B-0=9r9zHRUhGuN~c;6;2Np+7KPwsDWQqUeePc+Hg42lsRWFVNZtTULz^d;K5 zOl&$N&eeDAFVPp3c2FU~j*W4ef^CGk610cuXbfkLg+oASURqQkK93c2vd31m7uJJ^ zZi*6EA)wa8#^FkST^PCEW?y=1@K^d|6J8HMe0C~`(aad>&CSbQBnuOHMB@{-O%XLq|e!4T`oHD!iU2J`!zy}!~p{*dl$`$ zn2+;H(Zw|MYe1bu{Web`q_E6D6lm9Zhd-7=sn9~RkbP@Tm6CLTG%|}HLl=8pL`eSI zdD$nZM!L0jXGT`3^FVzfDDqa^sGJ0*haN^Gm;`SGn~`b6P)x4!VUB||&OGu!2#LTV zn7fS|MbV0Kyks@x+HI?XZH?!M_p&*kQC zr{2^q667V|!+&Kc)DuRE_GO=wN9?D|L~R9FP47THPF@wG;R|~IblGL!AVMY-RK~&} zYi$DSHDz0dS{9Y)_5thqU`<Q4Ttw10pg9|8q6rLvIgot~o1)t&1g`$2`ppuz4z3irY`)1bDROivjL zQt~hA|48uhmp?~18+2lf0TCin0CmHV@ zwi8E^NKlz=1$H~Q-5c!ObkV>0ZvQKYEeSDIL$TF2QT9#>JQ|vnbnfo(o{y#VpRw=S zeEVGGxtH2nHQS?@K!SGeq^o+hJ!Y4sw*&w14>MFo`hP2P_f@sU3Nm&)xvwUXpv5GcdzYREBAK|L!i&<}i6fM1j{SSVp@;|N> zKQJ$pqd<1m^K7x~1l2m1hzp|?)RnM3NQv8CLC6X>Ba|p^{rwEF6Vz<|v#RD@EvHxh zqT+$1^qt>rpUVN-o9v;1LEpO?3jj#Q|6n{PpAc{%w+vBrR+DpplbfIxYj?9Z1$~G4 z)f;81+I?ZdHY8IlRG#nF@IQLx_BYsQA_r%ljjkUryeZf#hu@(%6OZMIk=@7tjlboW z|4pyCfug&lS$>O?&yy3?MqDRT4DzZF41$WeDMUCa= z&P@96G$tK~SyrOy`@Q2DG~j=RIH*P)tPq^aNXTCEO#fgat@P5qyJ?gFn-Xbq^i#usU^;U z;an}4{tW?K|Gc+K-uK;2zG)8=?!PKOtcXa(H3bKY^3wRtulBpMo4T^$pOTRztGUw+ zSTr|i?VOntmf63|{9BG7Yu3RORS`G+_t*#-^LYV3TAq*qx#Is zE?MIN?bXeHtO}Xn?X#*V%Y2h_tXqSI7cmQ-*Dt+aq)uf_l~^1@={wpCs>(lSk2C&f zGKBJ1ixKVr=p$nW@;eZ$NNkIt1K9tb=1ELH);TxTgcU)DZOY{2H5~2o ziJI&qz`kb+HZh&mO;9otlEtq7=}SOKz^=O*!Kc9dt`Fuo#w0>vGfMXxo2V?pg2m$G zdjn=nHxc$Pi=y8Q9$!B}{pdnxz7(clkfJP;QjVS@w~pTL55|3(>!gZ4+7Pq3XwY8kMgCZ<(ZQ}r^! zz9ipPbylmBuwH90ln9$fd}bBnfv}VRU{FC=8WIhKC~ll?qWB&vh9^}v`qb_i zp`-H?n8c{J(}Jw=O&omdKIS!%vvNNTtch?P=VoPEXC}-zrsPC1sgrwY^lEBAX<>Y~ z*hwOA#xt`8JaTa0YM)!k6tc-Bn&S;|>chy78pA_y8o#2~4j?lWO5d>{Xk%qa5vE1 zKG;oRPdy+u>g7-m@7jq0Tq6AHeBvIp{`w)i;?xhJIUaahMN;ru$Dd!3{ z8;5ZJ(*O)l)`TTJC&8!7+HcBJQm%~*-mfkQNOE)IzYP83dfg*tQ}bZ~XL^hUZZqK##$TVVNW2Ywx~M)tCAkvp{hv=v*{v z=~*u~g;aH+j-~{#+)v{}OuLRNMSX&%MXO&HZ3iI?1-3dOeh4LOu}Rbi$_!BUbQt==n{cc1V+xE<}UksF_Y8R(p*EW5Hm?h-%kz>#xqXvmT z29E0-8v{>zPj0Dhan^&{3s1kokir}e)ufXy_zA7bb9v2a#;AAYCVKm1tWT!4E<9TO z-dNd39#4{Qk>6;0ry@D=(o$piM41pwUX;Aj)NnrUlvf#V9jxtta_NB-l*fF3UBc|C zx*|m_^$h?{80rwME>O~7$Y#S$8(~5bmOm4)Kfh0GQLHOp7UPnVRMFBLYwha$JB`ZU zL8JZ{X-0U;c;QhDBNTkrEV`CiFbD=+&KZ4j>0~h3g=yNHE*c~4r%9#{Ex`IsLsDHd z;X~_|OHJBAz38<|Q%FbZeQ3l3k=WS$lWFSD)lRl*Xq+uc&x*gOXTDwM#Sr+(?3%tq zj%iTzNy!-ekUgO(5C!nJLg00HACn8fVR?4ttoA==uP}Z~^z0muOX@X{}sbO1vImCn)Wq)LzA(vzA?cYPOF(q=@+FewP zXXBjElwO}ZO~%^KRs}z>KyoSZCV6RwwaNurF1 zeCbqF>1G>ScJkfReb?z?zyjV%MqD4w=@SX1WA9`?&PswX(`ii08%tP{58ueR{=urGs0lF1^3_zu8jp{=&S+weaA_5e&K6f{Gs*(; zcpM3FIjNcYXL<6>$`-|5)-r926+CN&0V#UK2lkeDp}`Zny2G`r>NN6{5vO+!y8<+F zeQ-M>A3v$nrTLD^PwpN~zFUu8z*$f6vrYt&S#g^qJSaNg^IlG$o^?W)$2nnwgCEe_DNw0MIdifY_i_KbQ{WKbq`CVSA(g z?nb}vp3ObcH&-0_L!}gFcEW5rxNZ4$!t!VnW1#snrZaPV4tIZm+mF-aM4*Px+r(py zAMZxwRxg^2GufZA#s>r7jE;^;<$K&^eWltf&f; zPAb`vNxLt+4o3x<y0m5aDUQG2Pk+~qA6s<}z3EeXVjG}u-wEo}ewB}eyYrlY zI`1un^c8*UB!^3NfO;UwevZ@;dv2F0Eyu0&a27?8jG>KVCmJC|t)hAT(j6&?8bCTOGY?H5*)N^s8A)7KbldV@;%M6qgP5hMM8`NFT4w2xjCOseLt}h=maTWW1Uv>B|5a9Q(F{3TR z-Xy@7F>Io~UYpO&edYu5)d+Sl-A6G|ExGmKGUd%2hOWkbTD-{0Sj^uARO0#JW7Ov9 z=`Mt4Ym4ITJ3`f#e4JRMN}>aMb8~k#bZ5^lE%T^P9bIWy*;+%dzmNmPis#;2o(^gA z&_H!&i_siqu2ZIb`j)8ni3VLq#{~k0N4X?25kVO-D$7ksS$&Wsmk~g^U)wN{@T#YT zTw<|mPxLf{yVl}*T9!&$#JCa)G8(f3V?aP`_tEA{v}&;=yA9cz5kZ~YDq@b0OVq=CUw@3+_xGyg8bh!{;tI%KI77A)oEAjXCYp zkn-zSL+Ke|p8C5@CM{%@=Jyw?*Qd3boPQ;w3e7WaP$~3+cBP0q1#w~mxIlqGHlXQ z{TC-8$S=sSX;1YHMeU^W1{#4d(@>C=go2o~HcSi&B~^8r`S{m`FWGf{AjS9cFEspT zN5@F(8(>&|x!n8hO@XRwepV`JBDl!03fv{<2fnN0{DvHu&F_!7Z4bfd&uu@V~8A-$A^70vnAD5^;XYSH#PCw33l8CM8TGnBG&EEuZ$pRiA&2XcLQPLqoyi$*-ypJ+yiHXw{iihr(~d!eW*z8BIS~y1}4z>r5LR zH_6^$4&%Y&>6FI+d2=#@#B}wKy1b$WY}0?k#Z((k(YFQl+Jffog^Y@OYYjNOcZ9Di&8rSAghG=&-r#zv0$`2ypl<$vT;3V z-H&DMK8u1ZYm0s>Z*Y~R71lg#s$I)!%Ja0WRN)k&!f`uEWtSK5pFlJse+vp?8fwHQ zXmbBb@d_GXb0)?+>wCL4u>%Xyh00IsLv>SCGt_`?7TPi@!315A7Z%l{j0!~Cc=v@5 zBXZ(RXP%a#riTLVnVcqh+`3}NrA@vVYLnmuyPG6eE=J&9_!{HTCbGTv{OR>4e8lxn ziA8SaWa@;;ch%+qNK+_(ps~H^8t)ECN4KvptfklVp$ed%Hd+bDZ!%*+&o{hRPWALn z^QJnud+tObgU+JDVe6i*U{oOAP5=_Y%+m}C{jHIb6|J;?a8 zIA^J9jVW zu4v6KNlIgCDt-$(`8{G>QgX-_F1c@SUn|G9{gs_}Izc=*%=*+}$PBU0zGb#@0bTpF z{}+?|I{d2BO=c#!Y{wOje92*|&aP6j-K*@FdqOJB^1NS|-Smizufqt&#+Z1v{;x<1 z8Xd=j9UPtR;kTM;hBzHoZ#!!~6{UBs>n{he_?&Oz`Imcpui0~6E#;KG{^KRiD z8qJJ$NE2HQ7ZhCUaIpsuB1<5;UhCIcYOKdbaTWe)Ey9tf;4Cv-3>wzDhpSy>r1R-v zkPeUVet+IwE#+La{w_1TzeS~KGu(+kW-Tp_O!f=F@g1S^t&e7LS9b-ElpN)uN0%NZ zA%N*g3=YPET6D8n@0%cE#7Z^>y`?A?#S+M ziMpg+;7}VBA8~oeV-Gwf_^1rr|5Ll{V|HPG5&D!$u%QypEUhx zysnM+@X6_241Bu?#=5d)QGI;g(x_bx+AvC~c2qcQFevQhSZ}BLoE_oi#z?m6p%9%s z54+?bkwV{<5E;2Np0=pxpTU&JqWe*p9X>>U^I-SRqqBvW6I*6m{ebhnC6-Xh;d9|n zsrmh0_#9MMst@SAE>9pG_J_tsY4%vDJl4)C|did1TnZ z`kzV)4SiZ_9+`h_)UDz^a|bi>axKf%S)TMncwud|UAf}bFgcHG7auq`zFmBTdVIL7 zDpY103?V{zi(ll^lGC4QLg@#tkzL;Qy*5ec8Z~A<(3vF5&hMh+I84t@`MB66w(eda zwn^nlz7->{e3IWgneF?T1U*j)3TEYh@G|Ow9)U-KD%K#z zY{KQ*NS3GQlX*pP+}W$?GPn{8(-^9QIAK}BDbOfKJ{gG!!(KcRPk(H?d2`MXeWzi-iApDxsUQT3J4z+N4Wyw5wy6dig`XMiqXy~5n%T#Y&NDTz-f+WElvmTkrOQ-8LH-|- z@L-@DfFO#AWkVm}!hdvGHQ^w2{6*%Ym<+^UaRz=Kiei8daKjapk|RKJ+nhXzqyco$e%Rh*4)$xp!S4p%IaNj3Bl~{SqS_ z*=9oVey6v%p`D9Hg(&D~e+bp4uWrb_OlWHd$LGgQw&Uffo9=Y;*;uK&TtbUxMzkPpk+4(jX!5)noMjZ~KX5cp z$WYUj0Lx}hwT%{QcH4dSMRnkfzpPvA@9A!c3VNgE9tl!3ajxcBUU5A(-K$p z+}ylR+xNw%K)%<6gjlIo>(-W@@?tmSO$E^)<;xbg<^=L+X!w1B$AS5d72VCpXQPI6 z7u-u^x#^gRH#W1Sf0^vVd4F&1VX21|4btkBt0cbW8UPbu ztZ1QUYBNRAKSJ{DaPXeq%JRfZIW(AWv5%w)r>pC;w8*xcP|E0SO)$1MQ5pUzvFQ$F zaeBt#G&=|>70lk{G8v+*0M#pxG<=ti1)Lr2`mEOas+1jdyIu6XRut#PqWb|`Qd^$M zq#q^@YopYAkyJE-LP)Zzw8uII5cu8q7X~+7jIzXq6y*-Wj}N|#+2XHU{u-$*&)O5I zd8K@4{bTeT&uKL-dFM{5@IlpYujeuz`ltS%n;ii@Ka}Uk?N@%(6pta}1_u>MyR~#^ z-fY@U_{j2TfjeKe(D=+}e!jhN(|7ijz^g;U;u2`O*M~!JbEYMEGkmZ*E3-nWMj*zaZhjAQb;W*Z9JNKjWvW8i zcgIwG2^DXDL04T_F-qUMQb|SQ&&A15%!+XxPQ}@Z(qjwS!j(3%2>#t^3RW`#uugy+q;-hH$##pI!&(SiXVCpOSw#zVmJKx{c z-;-t5_&XL~@ZADP>%EQ^EjE{iHNwq?qVSw3RM)LuQ!(%NIS1>EI6P~-(j?Bi9>((x z_OmCElFwQMwNfhDww|j7z&iIa%KZwq!hs}Q8y@RocDoUapM8^w%{}pX=hXM*`QZ|A z_m&+FK)epXjkmot3j-IwLXPH!O6ZTe%X7X+56s7lUmWfi@0!0hxFH@9!!n<)0xkDBD;Et&0<=ncFV%u4F8y>E}SLk;4)XBbW2@v7ch zWO1^6%-FmXqq;xM1z{^VZf7UnZD0Qm#0=;o@hv)FQphMuXl~2y0Dg`Y9HSeM_V6$T zzQH}{`$>%(#uceV^Rz`}v8~k2ED?DNZUAP7;?k5g>@OWP#03fpe-tWeKT%U+K#xq) zzNIKt(Ft9ikjd1n!eZtdvf>A`!tK<2k&qy7xQ=i~%-P788ZOJC>Jd$6E9cD}^_2HP ziP^o_==8b^jL#XOHg3LG4J1nFO{>A1*2=ctruwUH0|%=}9X_f)f2~(lFZdEb&bxI< z!bqyu@$nvLbbdPN*H@KNtF?7)MN~9HGT=v|fF=^Il984bl@Z_E*5$HTQ~WTas@8;t zSY)dQ&Dp}Z`k1zy8%VI(g?h!!LeAe1_ZxM%w^xZqx*dP~jNcl5-<^=tr0DWUCdcU! z;?Q+MC3P8DA#`8&cAcXhp^(rs4rW|%Ik-Bd?%tt3^{i^W$nU2mC7-$`nI|1n)J34w zyFz)t$%setG<^8&)RNm79U(ix&?_ZeFf*z(2*UpwlXJAJ`c!+3Thk}>w^^y zZD^F0wx^Lfv#p5W%aR&QGyu&J7d~cRDZac5+t4 z3nu|@b0Kluc>A2cy|xJ}jfx@OCM7mZ(jp3sh9-DS-N(5ATe~;3FK%_!U6aOsVQi)b zJN?8!Z$W5SUn4^fT}<`=cR`#bZ;fM*<2zjglB_x6^uw=!L5yNe`- z^dK|9+7|z)gK}L7@Yu>}V#MXe{XMA+iKBitSH$+THvFx9yL*9|){00-1dOE&#=42L z@=`!!RwZuQQ{4VBOQJI2cNq}H7z@daQSb16OCqY_@;9ZNl+HpGXsS*-@F@qM#cAMR z>tjU@sNa7iv2lzMnFWQEfu6tK{oO&ddH1EEuUc!y#W{+B;l)G*^9d7Ie?6>(gsy#WoPa9YGJLQIz) z&YKdsY2KDgadn^CbzL=(96iEZkDrMwagLGWm;!cdIxxF3uOsL0qE~u<*CQx|_n0a3 z>SS$mc$Nrwe2qa7Lu+hQH8~P>^<4qhPJiQ;!9$;MUcbJ+M1w;> z1Wvdv_hp9H>J2#2&AAhNSk`}#(SGXmUo!&j%c8drkPJWQc=)P{A1=G40(rOXjf3z) z49Q8ow~H&q1WR$uky^{g z$D{2cM($5z=>NR>c;KKnWVl?tR&Byn+zeGde}i~**ypiHt?T)iKAZ%ah1%gaKJ+d< z*-=jD9x7kt$6U6i3dLSK)Nc&i1a?TOS7;}o-$H}5rYYGm6Ojs=8c8XArjBc0fz58| z26@*nd#k8{Cp@kg`dMkHyfvjxR0vkJc7zoQ7Uja0F_E@Ia#E7S!&DL1yk$*4(puf6 zQg6!E;*@*32&PEp6|(8dImZWqZX+<$VMeQ1-ud# z&&?fkO=^K{G;?k@73aBKil;&5^V)X~iVK`w=yxaUkRs&Rl=Ek*Lt)%eCq%zYV!(`^(715HDct&k!;pX5T}xd< zV)S`wT8>1(#<%|bHMsv`S@iq;nGI6C%`Td{tU~29;WXCMmk$aG$-FFWWKVTnAg1fx zW^+GI2(^9DE>8EeY8^iF(NzL!t{d-U+k9n5WMz6=dH~lxP?)J*=4WU0cSbrYjEgQ# ziuSICr$=HMyIg3%+OJhAPIUvVU3Iy&-PZkcGL!Q4z*1R4(1EcPl>|MYO+;9+uK7CP z1A0T|JH>=eVB+Dm#z1fe9J;uH=AmNTP$;!AxmqrZYDBm<`PvjxJ}_91v+Jhw!j;%f z1I61p=%toX>i=PBy8{cbCsA!oI?QF6M2c&{&QSd+YdyS|0a`);{Hzy(xtbOz6sCK7 zEx>ZE)cbv^PaH*y0f&PFCOeYHSe>7vnQ>o*by9*K zx#fRBw51z0XJ0W*d)NKzW@G$_A2fWNQng=eypdGcRA9`to=;12BY9HpHAY4d1o_4u zWO5p@LX3;aO6-wjKMHxRwPU;KE(+?N9Q@?2pVVz2Ljihp81Q!a+C(Qm4m9f!*8-g0lQC*UjBuxJ zPg+Sok;AO)x?K#v**%pP>%y!5EOX9mvr7zx6`DA8e+)}F@4c_aYdr0Xom(^fsH-Ns zxw3J1JN72N*Z`T*tzC5>7z8HlYLVyob*v&Upp*PO=)I(8~Ox+Cq>|ce--O% z5HJ|?yXDFH@Gkd8qUig;`=$D8Jvg$JyMBQ#rdsKsH!|~_@JYOX^tqMEUa9e7qf{vX zZlu+6_|Q|pt9pQ6&J*>7RnW?tXj?M2+Vn+i*Lq;LFA859r1dro$EkqZh)y4)gwc&D_8jv)&8V#F7e{+LlV&-eP>TmpK< zW=du@6NoM6i#}`I-(94iH>#8!+B-YMZ^>1sM?$rMBqGw)-?U_XD?6q~DtPMjQk+kb zoCN`X*}Z?6r@-~N=mhCnpeQa_J_?@~TAGW!KqSap0p8Ock*YwCx(t-aA3w_W)HdXQ z+6rJ`i$pvLofwf7QEmoFc&UWpxYPHkWJ1)50g|G{pRaP()M)549WGQCDuLW`A!zu^ z%`Jz0SX znQHRqMnqSKUJWb)I^VOHJ?^?lWL)M-zTlq%x`i^?3n^cXK3YK>icjUcRN{*>kB2pz z5=j%aZdGH7Zac&5%JT_PZX%b_NLGL{!|Z&-~y;9RqGEHtpJ=_Q;sKN?bV z&DTWm-o4LcZ0wv~i0ljoDNW`hef!ah7OC&=JnTl1ARgS*HLh-FeVMbv9Rknq)j9!6 z$+itbiX`zA#N~!%M%)&IMLCu`ehwO>*thGKRgAnhhqFI{B5F3=xAgR+9cev-R3y&S zs>QCLiBeqxyNb2AoQ2E}^Nz=rqPT<&Z(u*ixUaU3XqDu(*QtM$^s@!+AD7U;>B!9A zusy`+0^(Y$B4VVVy&g#8Q}JoOTP=)}`P~iUqw1eB@l-D^=6+f8D|U}+n%lvmLHUj@ z)+(>$xa6RD^*}IGr+x2knf|3UhR(s=i`awtgGQIlg#Ovxhq6+Z^S37uR8^Y7C(fB7Z&y_pGRU7=pyy ziAOaMa>jfvBdw4q66gQi4JMYrD=LRLKbVw6cZVr^N%?M9k5&$^CH9ohIFE};#noU9 zN1(JK`YPs$uj$Lf0YANekc0$+3MAzUVLQrG9`rnjQq5>N6qYvT;|C0RAwJi4S)_TdOu^szW&MGW9KFwj278Pwb`! zoZhN0hupmSYHWzi?pXiEOc z!}GZE^@sO0o<>D5z=Tpzo7HvtnKxW)4L~Md@KZ|qTj3Kn_Hi5hIWZKv(m4q8k5x(X zZSJg1ac)~0S$+YTvdFnqGx%a-fqpor^geTuyNR2{4fS>6+-XfYLF*Ak!%y+Jyt^(c7%t-VzTw&G>#cmaY&w( ztIdz0KQVZF2-=vh?FO5$386BkN$CRo=1@$ zEji{stECG7j?=x2zY~Ijq?@O}qIz5v(&FWa~?Ht#MoK9MQ_8s1!7HWH%Qz za_K+l;(Z|B@bYASEUPFc$T!`nwO{+YD!57PwZpS^V$Ddx(w>Vzi^*g_y zQk?Z9#$sG+0I6<<3uQu2vU_!H)L$p#9l!yk;BuLQ@l$nfILp%Nw+7Hst8bJhFm0|h zR>(>6+y-v`RLPI6a|XwEoUNU^Z}OGRTqE5-G(_t9)QF-h&hxl7v!)2o8WOB~peO!R zz;4|ejDO4UcKwwlU+LW`SMuvwfS9o~{RW{V{rUXOuxYY)`;zz}oX0M9a>`cBZv{hK z4?c6Mni<%`DR|xzFO28%6g>Z1mlyWoH&HMQZw^@gRGgg15htuVgbMICq)(wJmbod` zVfhITPv|;=t16`Vz$#p!J`HKb{rfEx(k}?+LMRl|+skd`{c+x&Lvhx3Mh4DN0;0lu<5mx|;0?}-Y2J(;WlJZ={c?lPN~;_v!go4AQoPs1rqQgM&d6>* zHD@qQWoTeweN8Ar7t?j?8OY-9E#UM*+;ZT-?AbO94d2_dI(6-4OWH&OczEXo6NT!e zjp?>l-kVnIz7Whk*!Mcl(=X`3F;doS@WH4??`c$25?}l~TeXY2gzd$FiKq9-X)Gq= z>+SwJ5&{&_-&QapvL)?`#V05lf8(z(2pgoqg=8lD60yjc&6#BpD!fv`;ci!%Y8%#3 zeKez^5#Rm%%;cv`AL9C&15<3y3Um&&$*&B>ne|xC*u&>|fFa1M(VaJa=C(u3w7ko~ zHiB*jjU*R=00r=Oq1jEX&pEjKU5WGER~6)nS^Z9ZT{R`h-(T(M9LqRJ#Hf)Ei%Wer z@7Fq=74n|C-sf7mMJ5Nta=et1DDjr2WA$9f;0JH|hIW>yq*z#ubAW}?4P4waDr~Q_ z$wrKW)#oZ>a#50W>ER7eoA@I=equ;4=yJ~W-FA)a>%+V`b2c{MU=H6fnKdFc?` z-Dior_+D;oH*a;2_si2rY0SBvizE{hiK>#;P)$dS?xJnwD`qYazn1{$ujg2V5A0mj z)w!f38yvaI(Ue#l!UcKWQHoXXg;uQnP0o=qQZr~768K6x9o1P}m+x_U_-5hkZFME| zDI;Hhq5bO`ZQcWKqgAsWJ-fG3X!7bSA>yq*+w$i9jAIk6o>8D9JjLhgh8>qo{t$=M zpoo~5q( z?ylRCTyVup@K*{9@3*3N?*xDO|G}JoM1F->ON`Tufi{(muGP6M;J;~Z$TxC`Ej7E< zPiIMCmw0$W!NsB{js{F);q6^&w#~^^tC=t~Dl4z;Aj%Gz z&ZPGs*qCeznRC8MM(yLQZh%bz`)|h{1gnx9wD&cBAQq!n812vKCp;G`I5+K8_$oc1 zqkN{qp3sFI=fl_EiE~z?gfMEf4566}j`%H-BIfB?;V$Z|9$F43Lqi6&I}ws6avioV zWqnBbw*;AXreMH2i$TwI4Sq_J^Mnj?Vq$#$u7`yMwdVh!?5zT#`l3gF6p)ezX^`&j zZjkO4knZkAkS^&E7`j8cK}x#2yJP4Y2JYbZ{vQ7EaPN6!4twvjbL|zM)la!*A7$wU zlfA??E>eHwqMm_Pqw_Q=@tTK2{ahXJv_gz!w#)Wo(6RZQFBY4&c!gE#)$dY{I5DcK zN(%q!F6J(g$x(Mh&pWPd>aG$l7;T+xruvhHXy`4ej&aM&Sj(rEAmL3eS3GJ-u$ztocEFKFaegs6-$dFJbslPhQ zN!B1gK}hEK<-Y%y!{2DnTAwk9MliLKMy)qXz5$~+F@WmzY>>s3G|xthNTcN`b{Kj& z^V*|FP7xB(c+5Z!V5HYf#f+%Ldkag4Ln>6j5%!pVT9KQ`qfm7x)>Zy7U1E3Xn2alz zn8&?@jLU=ZSSO-ZvkwvzLAvGq0g=0|6Rb9L%kU%dw%4nAtA*2 z7=Y3ru7a=>Y8}HjUJv!O5T@}tydGQfHc65=fVO__vHX-oGX9nD=oboEM=O>tE-U29 zMaHmw)@JNv^c?|rJ9|b`y`YE@nxO9R-?C6%k>QtG>nSsSZf~$UdfM?Y@RJ!fitkIlvGtC-(_^ct zWB3ck<=v8gKClzGesG~v(;n(G+RbY~%AB)VHgRHG&;Ize>vcQ0X+!mjhr&lNMUp}Y_)%c4yO@%U9Nx>+!E7kJ+D2-S%33FpgI`yJvZ>GbsmWTPqe zsknsqoa7-D5x1EmD~C$*P`?`N4tBaXCFh@R_PIq$?fP3#dmX9w7>WIvSF*XCrifJG zv{0#P>pY+8hAU!tcQd%PTp}aAp4r9nmsqedD%ZP&;lhje)ru!v+0{Dr6+zoh zen}6ojs6fBtyNcGb$2%yIFMb4;-CbIT8WTzjB+_~$De-Q76A{1qy4+H+>)aWg=J&8 zbE6a6Gfyx#a^XhWXM8&f;1J+UHG*ESz=xxLoVMI>L;p~x$h63Nq7)W6N-~gw%K9*G zHBi-m_xkii1l3?n8?#a8s8K8$^*|1gx%x!21?$ z@|$EDjv2G-J)UpiTHgUD;-;!xqYG9Tyt3;GZg>w4n5gsdBE;Q+8S^s4a$c=w(+_pJ zKEhc9RQj2ZmwRChvF3{2-YLz^JTX6g0$SaG;(0qB_Sa_=*{%*}5*tyn1cv$~)INR% z%RiImHteJ0CF4(Li1FDC;nlvz1+1?o|M*R=>Gqpv|KXx65`b6j5icXSUB*vJ{ds#G zt04jNv*-L#cZNx=rsO(dwff}`_{cmNFBoqwi`X{e_mIHETeexnjsWT85b2&d%6Q_l z!GLr~lvT!ism~t*L^0W;Qho5aWv`*_h3ZyEZuU~6|C|^K@*~R3{JL?Ls_UsUtU4#T z_|osUE&y^T^CD3?*LUG525u>I&Z`UDpGhqgl0sDE3THXW&wI~TYaS8;sKh7LnS>nq z_v;)7v@RXDg}*EA!#pZla#>#IeWgn?GzGK$%~$?fMDA2Gi6*rf@qkg{i{`*Pc?uDdkA%d*($s*k&E zJNcfERfG*aF$)q0U*5|qdwEdwhlwN1nyD1D7W+f8qSup0DImQ^hgzr}zexYu0+7T) zM|ETtpQGPo=#d~!sK#3l9QzH1`GPlEp(DUW=fLo&{dl>p0LCkh_C%^vSCtdJn}d2| zBt6>k%D=tY$K^U2?mjqJT)1#swqrqG@W4iM8BD`|cG5Q3msG+m<3-s1nY|od)G|4> zM@CL&4^2H8`{nDJ%<@@@(3bs@<5=2sK^yBzTY2>2@3ZUnKalLF5lj6Q3FCxQ)mkL% zp~wZ1GVqDYNpy6Sl!{xOhJ!)z{J5CG;?Q<-LS6HReoB(gdxCymYQ$G3zS)K1)vdyv3F(3Jjn{6_H@#=ozyZf&sn5U6nIDeYiF z!Qh4()wIkaF|*ks7gF<;8^!3WG)%{@aEst1(pBm9wxn=Uq$IE2e<~e0LlOR}|@;6*4^^<@X z-bmvNykXa+K>kD{yHF9OBvz(YW$|8|_O{2&owCEx-*s6L`cSKf>}9M1?&mHcoZWpG zWvzBVsob~DYFd2C1iDkQG6&1K@zCwBt82J%bp@HveZVC${7BaZl&YPUxDZOrbkfdc z!7K|rfxA)fq_UG^TjN=sb@-!smp@-tSY^Q-ns=TA-&CwfBqvGudc6Cek-u*RgB4`` zGfSQy_$7Db%xk);nmCu$YbQZ!w?435tFO;>9R0=}I;QrR;TJ*9!nCGi&@IiOJbMEwa{YDK?AX|=>{bQYJHk@4yBiDRaZ9);_GGM$|^_}fKa z7c1&0(Woq99lMO1>Fb`bcXPn=1r@9anx_?iD!s&ccMc3}CaYoXQB$=q%Dwhtres{X zaa{KZaMFbw)vs6;i4&ea7O1Y=NRzm;=G?V5 z=r&N}gqjT@c?HwpniesxOF5t|YPTDiT)|Q%u1S$7K*q*@kZ{L3nOorGGExY9zq=Tn z#hVLgNTDcYd2|`Pj3HolT#bFYRUTjR{$q7W7==fFaI;Wc*u~d43WOgz>r=nBzSRr5 z8yyh7@$+%dtI|5>w-1uW(YF#acrK>qRwV3rl63yI2}8g}I?@+d3%t9e2h6V2Z4l|V z-<4O?qLClpmKS7eI`@$Ed^un7@i3r2DNna_L+0VQ)CADJ4gtUJS<#L?xI2tQ<5}!- zcXsxyK_|8`-Zf2S%x@X_$Hn;Ev=|P)bkWwM5O@5ToT8?-F@}*H1ClX|Q5}x8bOJ-{ zEkcZED49`LxR#LlBJf~kr(GfOqq&e^*{|Mo#^^OI<{8`1??uV~C>NB+e|!Ud%*u$e z{*wAGKv?fe@&0&TCmlrNhnK2}h6eyK(h5_S^K_*pYYlFzTXlT90a`HoEph` zxqZZ3I4?2db!i>xHAC{1_{8*k;nT3Mhv)qL?6SRv9kl7sUY<(8k@Z6iAuOw#&1xlbC!P(-%M+d-=tBF_nN!PhmcBo=e>= z>*;KE9VxAf)L3y_QX|hUGu|Iy`diH8MIVHisr9mKJ|uflfjJ$f|1Vu+GO-h@jl`os z=(k0yZQ7bri>6O!NHY*>og2V)vg5f@7Xz8s=$AW2Kgmo&0ZE5I+! z(J)6gwp+5yCOS^I1xOG_Ap5qbWJ4@VzRc?H?nT$@!|sF~LJU(1`9&#ZF`EXSx&eBX z^UjhToK)-`{MLqk7}qy0-qM1!{xwkbpV6JXU_OE$$O%N>SPX$hR}^%Qf5reiaCAsW z=#GR-?&QNFGQMB9HT?IWELGF)qQf59A0)vIr((OybpTNtxGN+q+)9@8EySA!AU7~C zA?atCeDXR2lx2&7gt@#cD~~rf9eL{1Z&DOUxcF>P@XpADKN>dGGxyg@SHws%aqppk z82Yq&@Z9oLk!$X}4)gmQEP{c3BHsodO{Iz{p9m|V%4=ykQgo(D*S&rAy0v91`o@Hn zUm_==d6tL8SV{x#QwTcDd2E>suhzB@LJX9K1eJL;@AcA(j*f1-H8ioE*|G#`Za?+< zeT}*+OX7}B``pg&CBvP%Pe7zC9Qcf-v(!uEHm&q-(uebpE0=)aR$qmt8-#-S^if9y zm2i*ug8S!)UU$5H5~!RIAPYwwa|C2>=t&?;vM-7ZbC48cgVv3yXy3NUopQ_~ytuuR zlY$eJ@mE2v7o=gMZ1bEC$8Q6XqfORS$vTj^SOArDkL;#tOgIoeQPLhCc@aHIe&0V> z%O|y)+UWlT_!!&S=nd5}cYD8LotUm_L*)v>2XOlLW|cB6OIcSI%zT)n6MW5F@7Mkg z>JyWu+I;wx`96iFGWk_Nl%U2YlCld8Dnr@X{rkpN9JKeKdsF--PE=YG=F|+@-P-N& zfDFWaCqMv-(U2JWR~_*7z0xmsxva;2#DaN4y=hI1uDYq6ou->1e&S{~)ng;Nk!l#I zvClVZ$NNLeg%kV)!$MK=9vlw&3PcbMTa5(uJ|oXpw%VO|8-zl&Ri8mT>r2Z3gRjvQ zLKxtKy^K0Q76Z1_t$H;vLSW8Adbf_-@fwFm)UOl0iL;lvh8NSKq})*TNxmks4>q8X zCJ#gKMXgrTRJm<6Y0vSu`l!@4`HN)Kg3@?N=vCq`76F7ZwgfD z`!-h#12MgOyLT@;5z#5CXrbNmKgnb&mX<T$as*ag+RsiW)7X<+q#`3;HlyYoqdFKjGlICt9EaKOsQ?E*VQ5 zTEeZkT4&))veJB;izh*3R57}40uWZr+czV=y`nSN_Jh(u!+}>)r64N8_~MuLL2QPA z+*_wWK|#jCQZZh51_3vZZX_Qt$s~2vWC;H8A4MpK3ZiJA%slUWUhgkyC#j39a4Xa# z4AAfUrZRNF{||b#`HoHoilZ3LVdnsub%z(Jcan)J5_P?gw?XURKhn_TUx)@*q%I?ye2fDGT!h2z^%;;98K=e+KA!vNV8f6Sj6r8TUFRH5bJTuBd-U~8( z{h#mPdndp7>pPs^X)O89_K+Cr-X`@tIH`Mn$j&VuR>#)MQ1c>?Jm8*dI?(v6Avs87Y{ zG2h_`Og8F)qvJ>2jDH2FLmc6u#x}cGcz_12&w0bjLohCO>67;e`tlH0iuZ42)RRJb z`=YwgeE(H`h<*O&E{=9pRLyh8f-vbDvkF3if`?T0(qL#e`fzWesUi)XUWu~-^6`+! z&4n~#c4+`;1%bvOAQ_ordE!ISf=NOVYQ(0K>!d-UL}f>-{WztZ|^)@NU)Ad?YomSB#w* zq$u3e=>z8kF)cu!2pc5J!!4Nvseu9Ort&1HccIJsR;2{|ba$`4J>W1Tdh)^8k%$oL z-5YM{HHjw+ztQ3WkP52m@Oh#2as|2z1WQ+TyizX?L$#WJfC~I@b#8aMD&upJP)>~E zJY1V}{LYljZJ>o@Mv$TC+ZJLt(t>%Dw|93^_ayl4`D=aa+7k|6t zkA!O6y4;=?d|*mccMnpewpIFQ^Xzt7vluPwaUjz&vAcI6_@Tl6t#lEB5c0u2Y7&pH z4<%I6ZkEh*Q|f7;P00Uh8W*tN6u;NSs|_VH)_-BUzgQdK1{6fiSiLi8qxsUXyvF~w zz$W({9ew96f5JJa3V>*yP&D^&Ao<`$E@-}}N!`-REGvZywB51zL8ylF-nRt%il6@t zj@?LN+{VGV_wiW;haG$YBr(F|(@`1xEBDuj@e@?~SRvt#zVzwVg{+^!*pZ1~ z!~TiO*+3NzNY^Q_PUW7IOlO({gatNaG!&BzoBu$JR!~sOk!sz*^Ui33HKjLV3Isd! zB|J#sPWkR5d~5r#M&`K5A4*Y}H+EG$LjTA8`D=hLT=^vuWH+0%RvAjuL#2F$R^kG= zAK=jA{*!dLwr+ysmgzW*NDnO_Yhal}!ECz_4xv;hgl${w*%%h;UmsBQQ5_7*Bs@w& zmXRgc87bWekoe>}xrsvq^E3dWh}zzmN`~kS9WnklV<3O}zh|a#j zdfh({fEWRX>A#N-wcY;F^k(7yZOsoxuL8oucLG2!io~UZ_X4Jmt606hZU^4);V2__ zg_gQbtqOHKV`=clB3itF!S}doJamu_E`j5*g;cp2mUG+~2NYy)5NC}B#g&vUJj-wu zl(DB?K-eV^C>ZRJJdx>t$d{5U2`uudK2!CZ$}_m9hge{~oI_ug_aZ{m`y!@Al4n*{ z)I@w|j^mPFG+kslgcDs_X6mWE5nAni=mKs5B*``0@#~9fUYKeXT!4Bx;r>P7QJ0~sTGol`})bq99$z7&zylV6{iipf~O7H;V#Zk#u zaX2qVB6i#K>nj^xPT!75mJswdj|Y5ksV%Gj`g)rq_-yd2qCyC(k=p_9^#KnZ;$bUf z+Y2@=8V_%E5fiSeDXsq-xh>|dKp?2>@JX%DXyun)B1cVz>;m*jLx9o&Ho9wEqD(QI zxJMCk{|Qh7nU4Kf_Rf(@>@&9*Q6y$vP1MWjVZ5mDJx`A)d|(-TtD55Jy#7{~Z-`bH zCkEfAzh}EN(KjRxkjMchG8#Vl6{E^=2lSl6odSUyf;v*3nBaV|Wxry^Ec0r$TX_-# zwCiuz30bMJm4u#|B|?@<-By2l0=i!9s7%XcaGGvXN_P$p`baL*Yr&XEg#=21$HHR2 zR0YYvLsb#i_6C;#$LrZ}a?l4(*~0gM^C$`FU1A#NT;OMm8QKh#72wztu*6zO%0_R|yP@Ar9lOWCLe+^g7$)#*4x3 zCRss;SX^=MmCrEI%{{zti8P>!)R7gF_{^~D=%z`pb0RE{r-s@j6z^1O9V{FZhIwQ2Nz!$pvRFOfXY zcALd|DD+Wr7`qW!@u}S8`>)uqJ~vE!wB@+iDVpquivvFM?W{y*^!0p0Phh4?epmYn z?caC1;0wSKVMaxx0tuj>zG!h+eUY9WlLTnkc6o724KQYp9AeXjV4aJY9vnpYx317$ zPA-63rEnmPW;V4QL<(}x^J1i53&1V^v}%NOSRP3+m4!SI6d-+O?W3e}!9!$m#YxR{ zovV+J${ktS&Ed%^7Kp4nDwEAytCBC z=grtkk;(|5+k@g2O%QG-;9ww@=lOd?saKUCuT0aawid3--+=y<0osq}#>z<&RQd8B z5`$CQ$>lRD@OXs@6?&>ZH4&KK#( z=Ad|r#=obnJUU-%?Yo{sKCzFxTot8qhx}68Vxu$SJP*PqvisCwY~xMTb^p??dqm(g zeLm*yXvG~@%E`zh@HnZ7r%uC@+0wdatb9pk z2-k^zCGyAcx>=5Y_Y2&>xPZRF6Yb6&FG1{y^S6=FF!U3Cr!Lf!nG7b)Ye0U{4D(ua z^JlGuw9WK5us81!2&d^6`0y+PC5f-9S4wLJag^^YSLnvq31l`QhMM*w#Ihd(*Lurx zNwQZD9BG&W$C|U7Kk8{Sf@t=jwIFhio$^%h8uhx9+Pl2*2hMz(LDLtJ{uPLBuv#z_ zl`KSlvq<1{&@OSCZ$j{9RPky;MbzgqN%#CX zS+}3#wtX+=BG3~8#u?F*_3wpNQp*IchHqHgp4QR}e&pgt=E2AE+qLq|)Zeu-gJv>R z6W#wHzWE8Nlb2ui_q!OUDO6c#RDg%XHHT2v{55-_6K6^D^VGnpsl6&m8WG{Y zO24-4SdxCv7x|2mbOfBr^4;oL_^ox+EIH&8Z>t$#Ud5Ci21%iv$OiE!g~U%d5m`6w zPBlz$I}9*z%}}cQ;#A^U#3HMK>&%RXd!_DfI=qDLh7w6K6_-8xbgLyNB59byg1B#< z*KCQCf9pSzIqU(++zi6LFfMM~w;p93_>sk|8p&Ktp`^(g#7$iT!mktp&NSZb)S(Vfg zadcpQwNM(0e8TPDo(ENX$r-J;rv&?XUVjgvNyh$ceQ0?BCjZ?j;g~9;%s!b~Q9B|b z>FV0z_H>xW%*xPLYb(mhh>4kqR^1#xfhKE^yNzLtof>J>Wz5s%pvd5hH`(idS@*Tp zIdv=lgZsQ1R&jhY`Y2E*GhxYd&Igy3)@o?j$LRg#!W_R!B*4OnKyd33;GHkONpNNvXlVC-+ z9?)?KrAZ+Q=1FG-B+A8FzMG4A{2fWeG~pgdpj)7s<~1O61<>;4%p^_S6Rp*>O!4o ziRPcte+KN)Dz+2UnDMlkD9Im_eONBH{zq(|O=#+i!!Ze`4nN73Sm)l%D% zfOV{V`bmLg@)9KjnK{^v9N9?aqF_F#58lVX@^nN?e>djwHX26{A?wp>p zy6hFG#Jlh1UR3?{jpzk-JdP+fRP8*ml?l1EIVZRe*wmjMB1zfg962C#zpG8kmMP9$2ZL>mAzKn8Bk!*KwO+0`CjSU)eIRxCThc`r$)u(>RyA0CN4R&)8pYBx^ET(x&6VNPgzRNDwMOZ0k8AevJNtv9*%eo6F#2y}R+ zv=c4-# za5R}99)^hVP6nRN-`$R4-eo?>?SvACpVZwP3%CqAE#sE$MX!-U+cRg>Tz6ml^?IL* z%V3=GxgQ!AmeY~bMYGmobanaQ;}d=|WE(`JvSsmg+N7x4pbl5QB=#e8Z|{sx_jaEr z91jo@=PQ#-#Is^`s3s9`W{_$sE>s?k5<9=s22qy;neKNO_m#6s`x6=*;oxHqNtnSg z{!*_DV3$33{FRFR()6(2)Ily-rf5Omfj za-Q$)%KLKiv&s9pLphyQRMKSFaN$8(^hf>E=<<}sr)r&mZ?a4$LHhG^_|Oon{*J>2 z{Mg^2`TX}5Zk4`V=@paygYzTfrRixuIL@>-!Y05z)$nv@Utg}a$5UH`x3j-%`SPdi zb5uk(X)$Mh^u{AjOwb5d>oc9bQ6Tz&a&cIpJ->vN?P6Bs@y8|CpWrTTSh(GF030`j zmHG#KT_Kk;HuU0sJ)3-zOVZV6w$G+Q>V$_(PJggP436o8T>bGPsuj;6chCNX9}J%~ z6zjwt?h&$65WbuEGVi60FLt>6wDn@4ezmzsHqpv%kCm03FiGq;Yf!Mu3~OysH~3t_ zT-&;n5f!_7`wNrE{rYaE*g?oZ@ZvLhQ<_3Sr0j6n2|lVGOKDt3jaVVmU;o@Ch88FH z(Ox;YaUm3hubxh@VGK;gChJ3fSB=uT1x?>~f(@{?B@49}k7F%a?8gtO+3%!$pMIdt zcI+!Q?YcElQtjb7E;@Pn%YT_%$~oXSH_%N|`9p;mF+|tEe%ATT!BATKQmTY0o-@=z zn3ZsA*FV7JV7%_Y0YHe_l9ZBc*w@3F@Ux^KDWNqbW+C9(+npmhn3(llNt@Jz8$>l{ zS*g7D&cHqu2Y|A2w0dGb(GC9Es+VCEp7?6Y3DX*V13W0WlLHzM)_Alod?zsm(;K84 z1qgDi_)KWU8prLeG*-ql3tnvMFr&(6a~cuI`42YQv5yYI%Q9M7^bPs6x@*>gM#l~Y zUWLAoKQ)^SFXkgi^cwkiycTP5C12jAy)HUspK!pD@CiI?rACJ2(o>DLr^s;Pw}RCJ zx2o55aW7G|VsS4&R|W>74ebg8GBAD2a}SD8`>*srIV}Ayu*LFrAMQO%r1Gkw_%+Vk zI*ObT`NAl#UJlNQIHQLsE28qI-F^owbkA9&O)S1vIN!(b+Sic?r0WaT(glT1Ovwp7 z=gH>uWVoNZoUK0gJ8`?6^fy&;xc;G#msoSoiQ5zLcY?rL-ZC{oi7@_%e&zSO6xO^u z_}QifgsJ3fU?h#xaCau83@^!Oi^dfyifjBbmdehvG0wd58Y0SJ_}e1G@6m6eq;y?+~s%z60`Ta znXFjf<5og5ZBrEF5iSN^9S~@Ja9djqlSb8nsEc$7|Ec3HB(Y}dJpLn!?vR`!eAMRL z>7!c`U2z|0J6Pii#U=nH#(cXfJJuN35wW6^t+mwt>MJHdFh$=6%*ZJr(|Y`5E7jKx zn|LSn39WFd4TUAPYw7qlnSEd3W z+ya2J2w^;%bD-^k{zODHCiSK|gc5zz>q7dV~uPl(yaD>>EZ z9F{^#<1X|P>zsP`*1)GIk+iysUxnKM7)3(9XT7B9z$yct8po^U!z5cOQEf5Jfr^fW z@qE9J{njLYfhqlC9bs`}*8$*5myU;e2q;I7-z+b4{b zm!B{*?!lmI4b=~=ZdsEfXk~}!oV)*Jm4Fh0$MDu~hxn2@QL>aWscE{VtJ?92_RP9L zoK*6^;Hkz1UQa-Kj-ji%Tetn^8_dd zP9jsC>+s1EFf~JZ{^UQcvF5wv-`O6=&jS8NZz~b~|3on!$n~yYnJdZ=V#yEW3heKe z`Jp*Y4@rjuxzAFSj6?`~rVcS9LaLw-BO8n>5YXUN@&En?d57%om>)d27yU0g>MA)R z7G=l(zUk}(M{N@_+QWmASh_^I|E;)s1XB#7`Onwa-SOgtd;jcJmRW-^)7>Ym(qQjv zh&#hS^NHTpy1p=H@nH{m@%x(Df3MlmNLJ8MRc^ z!(~HbFatH;`@a>6Kl8UPH*TGBZ_mYj?6|a$Vw{;UFyP>*7})ts%#6}XORL3rat{`& z@-0UsS<@lRH`{fkhaO95;^x^}hvsA7S4M{oB1S zn$j5Veww9@-AatQO3*2iOv~->CsxbgJ!0xB*w7*U5k_;QkdXgjS)f0!P$}*+<(Dc% zU?W3d^<}{TEs*L;kTn(Lpl(}K6$#U&txWK5WjrTWJokvcXz8~OThhR508VCQ1a9=N zwxN|$V$Rk5n4n(&%oPKAgoToM*BM{J3Ea4f&Pfz006pS5uqodyE@hs3B%Q z42XtQ?gAPz*prUJYV7Ur?Y;Qv<)RX;Badw9o?dLePb{za9PrFzL5pBU$c^ZFJg?2> zT5A-|goHz%?)q}W^_nBRbof+v9hFS0V-G6zV7B`ZTp__V{3jYfcf4S5o1E;(oLl#Z zX`jXaUFP7cv<%$Xs~h|S?%iK+hOb=v~P zk#&GR6q><-Cv$T>5&cY7#M1CLQ8bYGUq5j4-QuACQVg5GEs~;`EsfGcmb4Dwub4PR zI!L+4#HPANg3Cc4U3Swl6i>5&9#kcgx3oC#6ZRyxv#3&4`v?^i)ot$glD$@4PI2Fo z9=n@(k04lCZomrzQ8v9Dx!xu5eHPWbBaA zRdSTFN`{&P>xT+bO#$+EmJ)CdowDPDB=b>93r)~IUl{qHXO+cer7|^rD4=4t&qpk= zbx1L4cCB+$Fc@zj$rS@&|JnC%r6|6^9o#=%UzuKj_>EXOVP`_V&<`v*zgE&hOmaBH ziH;9{Q}KEq7SJPLT$>Yn7TubdVViqIGB!!z{Ya`SRa1EB2gw!yeG9~>*8kcM*-4Yh zKA8NSmnP4`MsTx%rlpoV{zlX28E|-2199bff6=t=Kje5qs`r#BMfoLMnatnxuv})| z0C5(Ap@k9|N%5=bYClLeE^E)uod$4y<|${P-&(n^7ek4iMwTLJu2~U}QC?9!W&-s% z+QnfWg*V$T*#E87#6)E{B^|B)Xes_J7z-wmYwg^emXus2POmQ7?T5YKkdTk?aDE$8 zmo^kW6B}1y+E7X|!v+sBXTR9SfH>RCTkIti%sbJ!lgTWYz)D^g^t!s~I9EpVY?_SR2;e00g@&hoxxi5H(C6SEhL9;Ym81-5t? z7tB|$8LSU|XX|A`7tmx@M9{`4G%{ge-urD@<2+f)y+Km61)qIu8n@9-fU6R4(G$O~ zUT_GWe;50@Cc8h$$#;KhP9r@U>+gN=GQiOoe!EKFr0JR{Vxzr(rB-LBZ6QDb2N-x_ z&PeQht++u`@=9@u>l*!hsL0=z!RpXsk{{oDHX9M46blr1_J{9Z<+UYh-OYoqceBLJWnl)}$qsO!X=`1o9@1~CM3Imj~CZ|MghJy>ECKQE)e~f4K$jcV+F>fqrwZ)Ye?gEgc z-Yy-mt{$(PT_P4H)=YaKw1!CbQCdC|tC|dF9<7`<6botJY=~C>5^|>5x%}wYrXXzE zUcF1`TJ-uD-l9eAn&$8Sq+PgEOm*yZ+>9dX_FJ^)1A+)6uKfG~2`~7ppj^IbGTHlz zw7ax}uUae+_2Cv8vSj`xZO|oVXF*S7EzY(hsJK1(@rnH5&lWD1Y|3c6kc+x1s zH%YO=DxCsp;GAu5PlYx;q4o1nZLz*r7Fu_KX#oq&!LI4S7JP?kXOIqD3~G6bk^#r@ zZJaHYfmb}Q4lkiZuCX3ff@A0y=i zu-GdgCjSPsITVXGoZL#6DT>fy$`%Jir++6hvk71gkKkkzQE)ymL`kDp+*O&$9n>zJ>KY$6p{&p>el`k*$$23T zOZ6E38sGlWc}rC6_9%zx;XE7?Z|eB7Ee@~L<(tB6#2EGSS;ro|W_~2Oy?&&|;e5Zg zL3n^$4VkltgHjAMfw7{4F!tn(Tw^;+wv*F+DFuqCSDJ^CEF+R7&whEB(zR32-<+W; zY`R#_yR6rJeVRUWjEN^(yTP@N&{Cqs8}Mw3iJVWY=EJpqq&o*!b6qY9Ieq*?+iPDBx>TE@y6?{k%%`0>oVDUHKJo-dz#Oigxq{FuIz^Qy} zc+6deGYHt&vOj`^SP(vn@w3%OA;*dJ9~)$4q;9&eN5Qdey;u%j3DvRsK>IU_#Zq)s z2jR%^?j+Eeud8*C(Tq9~dhJZ)**SMu7Q!iAK=A|=KPSiDOuk;+^qA&)dlUD(<>;+e z`RbZK4_};);CxwZQ%r&_G3*KK88=W(h@I_$P z@k|t}X$CfPM!@_hUqy_uMjK901^0{WsweRl_FTRE`V8&41!6hi?=?I zu7fpN7s;#X!}K<%hJnE2HzBiLJ99C(7gXWZ%K+#ZjRvQv22SpBX!Oyac792@b|5Ay z?Xy{oZtY=5y;r;F4COh0adOEdytzj7+>~(m&6#$a)Xj`Sy7IvvCB5n$0R>c!{KGEkvV0^#dFZJ(7oyG9G3o&#*NzkoO09f;2UR&929&WyVWJ#=70EAVy4n?o$nUrB zA<|Zk+otb^?SlOi`RJ|Zalg?G;>=DXzokOUtA+Rf?{WG(YkRV>75}SErXqlOrZcJq zkrQ;i^Rp>JvZQ`zUV4H0DEXWvYTbco^I@aX6h%MZWx_txx$pF_*yB6^!o${{=y0}? zFfc6z=eJ??%1_*1w@z$OW4iJfMzV8_ zuwC|d4Q@i4Z$iAfhYa1kCm(#mP1NzS+Z$K!n@LxgXJH0pb(+VrAQGz7g@StR0WWO0 zX`I~Hs3%aa`(g-t1>$vyD5Jbh-4!viL}jdrE>D^h8Sn46fNN7Ta$Bs8F4ddQ@kVlB zOhZSXc6*?p+A?bVvy*)|4|$)~g#U?B{5xs2an#hRpN+f!j{|J}C=C`0M;0eHAD;Sp zzYnEGEf!*BVLWNZo_M__Avev4PXsR69Q4j($`!exaEaB3q;Y6G=g z>ve7-E05fs*Fo3Th548NABP5HATrbgH)N*k9?mU7=3#=drMJ6^wdVI|9>OojZ@$1H zdJ=5cH}#uN{Qy&Tf7RkE`Ov6&zwq+y&a=z=AYvR;Ctt@?R|bMa0*)#1)+<=tsr7sT zEXC6=&7M!|Rh>*MvQQ=TSmG*J-A=^{S{50a1bBqWH+;8_Zuc7mSajBP4^OcRh}o&k z57kI6dUkjX=CjkOlBNTKxp0ERz>4mIvjsR{v?{)v-O7(ZbfBhoPYrRPxtjLq1Agkx zxw2mIC^nu=R9L5y<5}JqGg{Jjh!^Oqx~=p0A4x6AAH!u+Eg_=_<4+BJ_i66?N(H&0 zv$>Co4KUEzj_ps)`|z^4%ATBX;t>V-?oqg!7;!IjeJ=5Zn5EEBo-)zmxS6@Du zf6!nf%m{5H3+;P;zDo>A^N$EOCWYaI_8X#GA)?F6FaJS0#cF7*4G+1!asc$0^XITF zVJfNyJ}^?|PNM!CorON#gnB4e-<7~Fu%wsPqnEkv+V=HP00>Vs=|9@$7%W`#*WyQ=)W&AMS1UnZK-rC`^pH8pXJN>07Z;3vy=lW{uRz zb5^8%v1iNEz9cj5ZaRcP5u2Qyqaf>Zm-3dNV7TlZ;8@knHP%O%he6%d8>GmUG_ObI;v0dyZ_ke_moXB+r#7697>W;YZP`W9WnJ~{&SUo9$ zv+v9!3!>xh?FFKT29iDy`}%qY!g~mn&O9lRxULcvd5$Fdx}AD^*id&X#&~*yR9sPt zRvixV)i6JjroN+O>5t9gh#-wejRnLK2`LtGco_|35$$!}u@h9DOr>Hr&Gbrw?;frG z7Fq}+_AIn0cB+Qcc~)nbb?b-zzm2+Og*vG0=U1j-GEKNxBWBWI`Qryos6eVqCO(rN ziSN5goJ}#x3Mro0*oJiDl(fckuugZN9u`h3XXgn~%eC09x1<{zYnypG#Pi_m5CKft#Rs9F~4gN&?HtZuTPf#SX z;JsblHiGa$JPvVyECYpXNjm>{JC@*zWZmgFQ_yMlrIM@(uk^f8l(7qFiN_xB4{>Y%Gk@e6PVJ{0d9;;4- zH%F^wi}gfR%e5Jw(wd8fb#iHITjFBdYkL`K+utOrHTrDzJL8)on|y8!BwFeJYrK0MC1Evx6{u%^ znxG%;F#*?{_JR6CoA&ckrZ(g)89`vb`l~biU2Bq)>fs7!mX-o*RF$pAwQi zT#?rl&g^~rlML%1)8fe|BV=_EC!V8yPC`5om<9491v~%O@}DUwE!*@IS(~%`3`v5t zO^%mgIm8I}<9L2t$r)6K%5!g{@GVyw@8-p`U=DfGi|*~U00W5ag78a_-!#I%0Y!qQ zZP8EPzLU4;B|99Kff<**BTRw%aa;%z7IDxPd=l3j>7pbGJxR~t+f1qxbVu(z=ZVsTPDS6adUkw**n>?C?>v9B?jLh(&gs7kd9bOccYs`d5#(}5@3IK*2_{JPQ$O<8*qc_P zNN1rv)SFo~_x`iueMA2@0XV3legkOuKN6NEWdN4Hf<13>%YNTxLkh_rpDgDZf|x#F zb1)#g*ulNsI#N{8?P2k^vA-zk^fYp5$=^|m5>MF{Vow@^uh^dH!G44h<^L^uisVZM zfDp2d=i!qeL5(~}Fso-RJHEPzRLx&g)JA@aGbKrpofG?|>nuHXY-l9~SD^1^i{r?d z5_^~12z(#RVzln5PW9EHu^orE9S350xxWY}4# ztU)ZAT7l=Z{{4i=-+7UsEomKxc1Zn}TT z#r%G`V$F}hUi>!|j1NK2ZGK%yOgDAJU!jY%fTePu&gOLT&p3_g35ouozuR$56%#O6-kf30y##jnN-l8Y-+!r7jdOC zu!HD!;zJQ#3~#?VwsJ)(*Tn_E@{eqD#-|p73k>+(mkvQD;9rZP6Zts9_b6D$0ihr} zT77e75XcW^KVZZ=JsfVhS8iq^2!zE|*%m*_JYQL1Jq%8N7QK47UnaAS z{Fh0AD5~DK)E|~E*V)1pZ(qMy3ERb@w;H^R_1}i?MYc+~ybXK-(NPdM2bFC_FX@-_ zMQ2{t1qK^dr1%mDLlv zp<{WNe=UY(V48pc%g=_$F)3mUJUK#=v~A~76@)<8;w)RuXyGhFR3EG?3Btp;8d-G2 z)Vx1XM*}0m0+~Yt?_4K-Z-ZD~U{=l`Lu*F9m$XcMt9*)n&~`D`s@0b4jhqNGE(cyt z8zJw)&hPpJOvp{;(I7@0vsDzsH(Y`ahBp9%Xc0TyANd8W8P; zUf)*MB7^Q5LL4jvntlWsxy)5Q<0RvE5I+b;RhlAm;jK_8RwM3W#jzg}UAwZ9FZGp$ z1ZrM2xv?!o;tUL?8}u?@L~{x+54)J4(4J5HZTg1w*UdsRapkahv*Jz?($vK;h-4}R z_Fn}FR0>m63F$Ob8*QO9WXc#@RwP1I3Y(Q4s!NBlfFZcu5ATC;2J`I(YjlG(w*R`vyqt@rxa1}4s$CThN%>>qdU zUn$Va3=a>)m$+N%$qUOI(4QigUssl2r@p0y5bE&bv){=f^vt!)u$F%P#)MJIG2n2% zc|hn}=bz2@D;qSZ@)xptBRT1Uj07 zo;&(0PCRP)O&3AOsDOQhrrf^~j(1q5cl?2e$d?9<6wc^F`aE=S;b=!C47{+TOa8pp zd9(}Rt3zm0#?aCB%D+J~9}1xcsZRczO%PEk1EAFdIt;Oawk#pi-#!oWavQs(`WnuPXm}_emYWten5UUEImxwJk+Ljb?WM z&(04e9(+Z0xBL^>R)x-1OIQ@9BJo!jxzWN~ zR@;!&%TsD$D!3Us@p;LKLy3y0;P|EfRh88erqt40Exfv5g%P5v<9bPI+)!p1X5=Gd z<;P9;|M_cOS2Q)%oS8Q@Dtuea^MgMlzzT3EmLtuxw_6D1%pCiRfP$QiR*nOg`|$X7 z2ddXmGC4tjoWhu;nQ>Gy0BNefjunK&*B^4*{OlBu(*CC2IY4pI`_GWcmRrlDJ~+g> zW;|<0N&_J2t{gLMUvTcz6Ri`b*eE@Yvx1ZPziv00>px!Q<==PB6PB_E91Zcu>0T?S zq^4c(D71#TED7pjD2pH~c>mik{Sv1Bh$s;>9z3E%)6kavxmE)bk7_>PQey_p zR1I5TerkDqQH`JxdSXKB*1u>u^y3S6;BmVtAf_Nu|BSgT4wt2{(>ir_O$H9A!dF*K z*Fk4%tA!|qW)e{j9Qyvf0dkNLrK0t~IT`wx-vo82K)2mvzCQHdUgAzdLxnJ_Zy`a% zvQ@vBj=G9)d^2$JSdVBnA1!p%MPQDN{LltY5lRdg?);w#SlpB7UwWb zAFatJleom>3b^DRRBqUJ%yVd?PHKn*+@0?i1ebZl?i>Q?qa)K(iK5zE@z^cb3KP>% zzBYG(KA8x_`Kw^fR#y6TNWl~J;_j7}Rpb9OBDMuNerV4yXZ_yllSiC;8*Rp`j1 zJ80gU5N5Pzsl!rYRao!nnCt+=6ax!^0W0VXhAE_Dp85e57k76g)MXU$JAW}QTJQhz zjKY!nyDZGSyj1(DJNwMncn@Oo4_#6sj`Ml#mlf8grIg3cYBl&LYw($#z1Z>a>C~9e z$Dz8~9F83*n_Y-oVsgtC!tg%N$b6a$-y&32OZ%$YOOsu^hFdx&!@;odA45XiE0-sA zsD(F0!F6mKNE`NeCC{(%6CbUPw-r)x<%|2;XN?vMZD-#kI`|0>M$lgX^&x8D-b zYWR$G{KnNZbUa;w<>;wh{E zI{wdI+p)K~lfj9PU!cLmm;AmpU@t$RVo>yTySyG>ao&*bC$qw7LqtV1XLlwlm-X`N z`&dT;`OTMwre1r4-yNJ{FfZ4+bqH!>f>uC@sR2xs11>`zi+Fho4!B!^0#=%%~;9NE0*m>il~N&CQY+Uf4rAZ=2xR`fvhboC2D|;kavqom^|3 zZlg(~UQ?6a{f z6(js4B56K5BkM`PE1vniM@z1CW7GaRuhHz&+kjUw@Lo~+%sv1B4uB=f? zqs{P5(D&(!;xZ;Q*xNSeRnOCQNJ1eBd59EAN-9N3v6WIqxfN1sN|K7Tl(?v>wW75$ zRpXQ2!N(3OAwm2H;(_1i&Eihiljlj-O)s$PWYg7DK1fFtH>y#f*Bkh86rECknj>g4 z1{0HXi4B}G`VODBPHvh7%BPG08pLxmQY z1P7JN3pVJ-*)FG3W+)*8Ca>|U-;=@Z7(z!5vuopx!R{tLZ4R^A( zS&VZWd90=-tyi+n`k2( z&Br)Ur{fdb3d^E@$kO&f9OIZ*dY(G#%pa*DZL-JsbjO6TrKKlwd29RZOy3Jw(BQRR z)R9ec-6T{vVj6Ox^y1V|Gs{gk8pR@8%e+Z>g{n728B9TG$|4Q*fl-lMo zW8meG_#s;-%e#rL^j*c<+@UQKyZFO)qNIH>-k^JyvDPlc$zk;egkPH*a3yh<*Hq$yNpPpN=(Gl7+1Bte(;) z3Z(SYwrt1o7VTAr96sP5@sZg^8czAIElBpOlgp;3>UQ*|cLLkNwnC_Y{xo|c zO+V?#UbhF2k@W1lbBYXrq>zlPC8##PqNArDm=yu?`XS5a`MkeMPpYIqNDs-bElJHf zv5K4#{7S_ZrFMUQ*t#hF)QhH;blW{ZgF3~ZEn<3y%==wR>5SqM@7vp+j6 ziz5IC!{_YTbu`6E+=U_k{Zi)#W~B8Y?trLi`*7< z4#B|u{nhZ62!Xx4v;p72qjB`qQmd`!W@{^|ghDTMJ^*Q+tR=tpb>YHazYxt- z(WW|C%DgA)E7YEB+X3w89A1M+(?`{BBxVJ5wnjs}_yS9=6e_PT_bjXQafz>T9(Zto zg|=E*C%4s*pf`StmWvV3*h16QAv>fbyEQnK`B;AeR?E;mNjgOvX+a!mL~-jxd)=<8wbcad$g(C~^-bB=Ta1*8m-YTb}NVQA&IT-aD*;(bUYX?7$#p}DU~ zq`d688$>GXWu*-t|IU7Pxan&Bu0FQDFc|gW#n1M9OjLPD|7GAJ9-|<=R)qUT|PsH#>#CDctao6ZgF18er$-M>ykL+AAk`?4ldoWy9kSNFW%FY0NT}>@jgd@$g_rt!c zF}RM|e$soVN;50zr<}gviTfzJ6iPvBHB(>p&yZhU^UObW)7K`H`hGLL{+=@%=H(R? z^x>a$XWKh(_D;&eu1-4ZQXQRcR?*` z$DkUo2zQXbVzWxK1%8%xxZ^~zZ)g1RB3(jl?gmys&+x}%upvgVA)Y(?!E-dRBTeN8 z2*G1LUoqk6@uz(OVGI&Jn>>dfVrM%tL7 zQ zuL}9j;$G^%i4N+hfFk?f{*F1=v)5b13)CuPkDih*MLrHI2u;*Doemss_Jl&UKHb|p zvxB4_`;F1i6mM|`vr^B7fk{LJ>kBfX2Ewq|B9YM?oO$8}ndu@Fp4aP;6}9E34Ocqw z&DG6lFEhT)GnKFXY5bh-M6mF1h52>Ekk2i1FbbK-L)r0pRPa~q)PpuYHFI4Z`?m+X zEy@#n1~&J36-uA`r`c<(^T)ZkB9u|3gm*g+1qQ!No^nEY*eYzqBSouuF40x6Ru~Mz)TxOF zba;GxW!Au-B(y?=Tr8k`F08Y3DRcoN9<$f0QK!Uqt<+zQ=wvWiGg_@oaq;-WP#j>8 z_7Dx@>1Q@gFy;M1C-u%bjeLq+@E5q%u9_I0_hI#eOLCSQ%bZkFFtuQN^}6{QQD@bA z^b2&$BMo%O@842|Decv8Ld1AM|xplevlJg&GNu4IHb{N>#V{E6@urli&Ta_oZ z#Iqtb9PHwZd8NPCQDPE4z0z0HfO!p21<(=vMy(;vbn5f z1smulO(08bKmdQ&%BMs>xyQz~Ht-UcbJ#~F_jFWRxmNMH=65`KyXp%sWNE za3l^0nK>&Xv*~GMKEH`3$N9M4iPnzCto1(H;d7b`YNnv0Cl(yXeJD3BuKX?a73+?y z-w2AD;D#MXq*WBHHYD__bj?* z@yt9yq1unf-OKCUl9!*~#A24J7vtj;UcujGlbhP&)gNbFdeVHy(D()%zyE9F0sFN( zpTZ|Obqs6-HZ^gCX8Rfl?JDPXTaEtw1^bn%{H!v3tQqMiHqx6}R^VlpO7~D{ul@&x zeW(bqm}$>;z-4Fmz}oPF3yt#iG78~x36bvhpaLv!6duv8xufDJZ-fz6c#>mi`&kPP z-b#yZy>T%x$Y_;C%+Le}q}17F;<}kLH6USO+ydLQ;-G6QPw)EB*lHDVf1vZH`zYMO za@mziA-iv_z_G3L%e%x&=59sC+zV6~vlj+~GORg&PKR@JEPJ~TJ<%_bnJkqnID!!a zE&)qn3k_v)dgPIYJL&NlrcCd@k%@4MvLIDsc29XL@u9fC!L2ru+`|$x@jB~^MeCg+ zI2RYlFcyI3r!kiPF6bVQP46__g&!7}sn|yykJ;(DR<>!O8U_JG)*>p0*F)Uv(32O& z8paig8XiE-tDT9<-g`Itf(F>18Ad#|jRPupjgweZco7-~vxFFjReMdrCsCHPiP#!=e2osl8I-oFw0%5y!$k zJeUzLzqGMUQ>9ifTD$k+b#{U0E6yx6RcU^aeFLq{6V=Bf3!BWs~&n*5#1_fd+bnw`LX0CK=qMFfOsZw{)3Tx83LiK zz4|(3*jX7W>HUJQ-mz;xp6KK_qN{)pF}N9FzFt@e-T$xZiLlA%HP zZU@LQmGZaG^YeN|gIk~P}Y=xZjtFS~;^IU>y{u{YEvq&_; zJR06Uj?2YKLX)K+@vV1y*E9#K-{^MRiSPGVi}RGIA#X8_2@kb!<6dbSNe$m~>*iHZ z1Y9X7S4vL`zj9l)HJE@cURMv08K3XNFs|5OCiKTI-wboOUv15Bg0q0R>WCZGpR*)L zdrf7BnjX*P;z$i|@RWn}}A% zgJ_UaFhFPXR$uL(vRN^vUeOOX@si;3RW{HMZyQj^?;1S&cC%RfcMGQus;);Lx?!Dw zYn42?;0sVUeAwNN=k}^}-p4jC*~}uZ`+=jN<%Zp5oc=RXBkBvp8hM4IM~r>#cvVDO zdM03w%SO~k=RTr!8`oz0vYvGX#)C8ZCHv6Wzp&BTyS{ZM48ed=iIr9A=MJ<_W`KZz zCmjd#!}vxuVTpPyOju-&rkITQ=-9-X=?E6`94%<4`1~%h_FJm@fs>HFqGou*?cJPW zUOc^DjYgHuOm_n$<$x#wwD11d=br~`tI5YZZaFIbVMlY&hnf7H>TLRovv{lIT~7V( zfg?QbSg5A^1+vP4EzR-b;nfj_gGWgbhH=3#0sT#~Z43N|dKQz0k?qOo_*XC{u>|4g<@E~aPpCvTJ)@14Hn5eLSZeeJ-&g@zlYBPm-lPs1)1WzgAtT6y_*1UPt(R~z$%?a%);zbo!;Q4V5%C+-IW={>h zhG{X6AM=$ma^bcf>*C-0iCB>k;T}{luwYafF>wI2d*2^m^2t#IMId9%!#c8{WdMnY zmGnf2{(!NEH>5IJO6ogz7L0N-cAPb(Y{ct!?ndA1kDee?31?B)JYOq@ql!rq4~Puu zxA>VqKYoH}GC2u-IlL}CF)@UF0s&b$MrAiJ$$ud}fj8X&Y5CrvzbhmI4_cn#7%VHZ zHna)aP>k)7zSoP_f&uD2S(6X#burEU9oGoUVSkxp~PSE zQ(4p7H)O0b=(F;F0}@WnSQ<*klqo1O__`kpnfXl=c^XF3WD#KyIE(j4D{S{NbiOrn z0@;tZ`RdJQ&z3Z~&E@F|aZvI6G+#3hq2lU!HEIc8*qn2{B>m|-?%qqrdPb88y5BX_ z#D{f-USJyT*<$OBk);htt~l)}Wpq97?j2uR;!9xnR*`&swSac? z!thjpsU4nCwJ$j$L@JclHY7$8+EhD+Hh6ehxf^383j*l!;xfD$I}a|20dJWlnFCJ( zsr_%~$M9|0f(|5dzOARsrPO7l5_9h|k;Vg^C8Wa+eT*dak+xaBSMO<#BqBM=@wU-P z4_&XqJWjdDpGl60*>EGk)~URL?oSoepTetZU|- zO9FZog10au0lIoAT|tGJC{R8+e;A4&n9)kW>*$-Omgws1K=sd2@T> z_E1o4UU3}>0IH|>7G~WK;7@kznb`ZU5#u0#$ixynBz?JPWJmi6Z6D4tkJ4?Q!uvN_ zB#u~%AR`VAjfveT-GJl(FMPgN7uk(>n-I(k#3>JRV~N`CG(7G?cdNnItwqW#>KdiT zK5t5)oR^Aay{CCs9__KhuKoSw==EtNVTU)5fQkYW6=8Plfqg*6`buVs|J;g|x=wKW z)i#jxX|kCuiS*b|n`2pJ98=I?;~fHN9hwUiGIsZl@IAKO*4Nybk{flGNZZQezTBMa z=9YzUMAO(4>CY zZ`kuFq(dC3@A;6*?=YbSqjD$M`+PTs-qww{c(pNDa(fTyl9H1QIr5@FIbHPzhPkQLUb81*S}>P~MONdnWDJrp4* ziuK#HKU?ovh}>4umhI0F>f@Ycsjhi&e;;w0kayC-I~7z-t~wbIJUd{xsbR!*M)jb$ z$yrx6jM~R1bfG4Z9p*jy)9Cd2YRks_J?aI|41#&h&j#6k7N~W#ODWzDN@qpz9~$ms z$ykEw9zp{7S(R!6kbCM5xkNr@YC;FO%-C6&Z$q`M>xNaqhQv? ziKM8ESND7p-j#_{@P)Ijk-Ngi`<{`Rk~h2&janPfI>W8A*YeO(&peI~9$_e;v;r-x zf-!!_5OgYJ-aAevGO!y*2pS+Tun*$yv5Yn06wIP_3oPPbZ{!rrU?vOg=%ZKqv?rWk zl$P~0o0ghYETdgeMu1pM!Fx5bRR*9EHPqa4z;7O`Zq&$PNDLVBq4;P|eB&7siofZT@^r<;9QdGI%esy*ER`2gpdVi?|gMPufrnziTO?!1STcu}28LPtm(#3t4vG%~0Cub+R*PrnLNN!smoT`urrZm(m> z6d5^JynB!BGQ=&j<8yZ>4+DhO6u07@neTUTAAupQu^Uymb@dB-G*f| zW&oNND`?&-r4S2=^7V7e6zq@WPqQ$nDT;}gGcT{9i&$R|g~ts%KFcJNQPIJOBfvPc z#`8`0Y6ObwdbeC#$34u!pCykp^$zYMn5CqS{3CyoRSI0l9bMz=>E!lBo$3~9%MbOy ztfLBfuIXQ6=9T<69x)-=Xiy9c-Y$Yg#@QOcc)pJPqkhQThGjoJs`BbTl3!qxL`~UF z7y)iWuj?h|4p0-L{5rXFn~mK@hP>byW`{KsQr*AI5Z2n^>U!D_~N!5_%|{o2iR0yZPO5JcB3ti zip=BDQruZ8t*j7|nyRW4a(7PtR_6nVK0HQ8M@yRq(9rx^)J4P%5-M|4KSo!h8H;pr zadNqHi919Of(n}*p4;Ly@9+-w82K8N;UK z-tq@N?+(Y4_+9e)Q`{2(G}Ri6d@DN<+M+A6)4qnM1!>&e1`ia)R1ZcHu4!jHGG@Vi z4o-^qk@XdyFHO(NT9#*?!YCQU41@=DLMi>Mwz1LcD_0roKdmzzBwcWMiY!) zZ{Ki_2$HtN{4hR9R>HOKJ)D3JPkigSaY>mfJfu5lD!_f5QQ6d0i{DRCBILB6uPkE3 zU|Q~p$`*9ofb|84iPsIT23zL7**vm%T%=LqmMjmuV>xK!H)&65l=0(VlyZIIKoqSj zE?5CN(p^rJd|5ZzM*L2)E(<3p(7N(tUaVMes`4zY?%o>spn`f2lNAh}Etq7mw`Sqn z0->RTH4@N6&BTgO1QoFdX(<&xy~oX$=W-XdJJ;c+f7bZR7pWB zYYJ3NgO1CJ?=d%OfoL?8iv*310`Nzp(4BXk(f{%GTmXC1bVZTd= zYi4z+Mt&KtFzIs1yOKGr>wG9yQB^Q0v(8=&ie(-P7wQIJ3SK*il)bOoXEv&ZaZ z=Dc@izPlGwwh(7_ZpMY&BQ?_O7u%#+dR2Uw)P8vglG57c1x9?|mZgL;oZR4Imf6Ml0zejiL=&@W>as7C<8s#K{FoQJzh%J_h>#{{lL&lu zswyb!nURkCCufJ3CXbPX$XKw0aeixNevp|V{|;CVoEXO7mm?TvOvKtRrYnzii?AiALD)JuevnK=>Rm zeBilBvgscd(iMF~kqhu6e@Zx05&$5Agd(t;jXuUV2r=y}(f(rs25Q(Bc6BQ0-!!mm}zZ9Iz5VR?}&h_sU56f31Fk3TdL zb@0@^vb*N*_W8gzm^n0d;C(2(Za$s>1)>N1x9d}8*iK>Xw?4m8>OCmi&^jdr4l%4N zRo09?cdtr^bH;@B83y)3K^~@NRo6dUz3hFHv zUy-cOh9knhBP?fUj@-B*+Y@4_H9K!nTJyUTZ$}cBy}Q$PY810|Z<*S5(v>7kQP?wC zcUKaXvRur^ZeWv>d2FRdQfG~{v5(#iBIrlYhTzF|0CZxBhA8K&z*U&5JFkBp`8o)t?em zzfEj#r*C}8#zd4Aq5|6=y=A6kb&d}M3-q?vr~BPPV0s@%t1fOwlLZbIEJWX+cd!U$_Rzm^Db11B)X8)*r zl9wHZT(ZRDMUn zJHtb1`0*ejq%^fvZG+JfIq4ZTtxLsn)ed{_za?PBnF6y?L{o(}YkE-N-v;HSE zwQD*+n{2Z3&=JU^)1S504srPmxJ_H)@O;4)>BP@Jy0zaD(4 zWh1N{mDc&j*@-JPA+y&)ow)~>{?L`=gif|gd1mdiL26EP0)FH>s#$O9d#e2x2Ltd zt13w(U=P-g-Dmrwj55b%|7XRp*JNrHeYs>vck5BCnMglB6h)U-@&XbFi#4Sp@>=5t97WIBwFoFpm*v10u8 z>&;2U0>F$lT)WhhBvjP|}sx>O6HO*p=Oz0c7KO!bLL&@aB_+*rbg4q0VHDaLS< z8jg%s?Tcah_JC1pPcJ(nJ^iiDfcMEHsI{sGr=|SSXYpvmM53Sxa1h@*vQ%cEyO&qg zJI!y#-s??$nEl=`8Qb2&pc`Ab6MD{EWVRy`9U~*Dc(&13 zHGeE3wdL46WZF2P3JyzXbOlO0zw4#A?49)=2RHA{(-bL7w8FM&R0_*^@!g)KR?or5 z^OfhZIUk?TJH+QOl+dYD#R8QK^n31RN=}ldN5O9OGtR_fCw=p-n1|f~Q7(R_Oj!FD zlFzNFS@k9=Z|d>uIUybs+N!T zpG)IK2KL?oLt)WNi>; zX@hTYLj>->0uaEdVB5wXJV$*T*&_Gh*Furn*(IUmr|fWL{Vl*M8^~xcWH+Gey5Cf2 z;>ucDkYXg-5FM`{zXe~r`61+uu5bwOI&mR#G7Mz;)F-WnHJ;Y>e!8KEe^I9GCkR;i{hevR`l?XA@?K<(5;I_x!LUuJ0 zM&zX38av{Ap@XCd`w@)(CLIkZHgmQ~l0_eO_VeyWBzvVtEOt8+trE$`>ocvFfDP<-u2fQ{%mcrZ zPZ%8^Rc7a;sVm#?>!@kzrQI;WwpdDY!KoVY^E?O%0sA~C;jy8pva=B7gB3S~iqDtZska`_K$%(j=7p1UQMve@q3?kf>WSKBA%6q+*fs>cSZ!aXr+hN= z)&DyZfMci_{v{_ZLKde_E&#MQr{{XN7P_k~0$&wv##=U~)jgXflOyJ)mu4rI52+mI zREi$D+OkQyz5d2faavYuZD7UhWAW#{KjrpPX4)*|z>!xL&n?&>TbP^voM(}#A;g>- znE1-R#7UwwJ*X7Dwo2Ab+UgUVc08bpidEcDP;do}PhVNEzM_O@abOgTdrox#!+0%d zpW?iVYNd|@EPsBLRQ}j;D>}JS`FO~RRUR-Au~L~ycO)x63Ba^n@cI3n&z$xUVYHe5 z9FxAXc0|5C{T5(Ww7E#e?_hMHes%qu(lIi}*7&Mp@lR*o(nHIa*{N^aZF~5|lGlci z@Z480vg|IcB-nPHXVeP5ZzlJecCK9q4L1CSTKQKn1R~eLro#Q2kSqFeiMtfSgBU7V zPcQRDC}<&UX`f!7yCzeBQT0lJle}j)2Wkyp-_Q=<%*XSDXH@idqi@E}0vKwjsy-R90I((o6Eedzu+TCqyY3U?~DI4YZ*_Cqw2s7*JUzfBQ;|LHX$OzD5x3497 z7E3Gq>2y|*YS-qa^K0v|*&5rP9`uj-H#;@p=8(k}PSlIDUVZG|w&?XklHMAmVa0;ja!PD!39UJYuSt9ZE$!Y+{QgWhu(obde3tfTO)3r0&8nBfT+E)U%L`;E1FMY5?{_cN^H zDz3Zr0xLWV=8rTgO4^sNLA;;NN9h?c7`YYp^8m_Ms(RCboxbEe zf%y%2m@cjDF~4pS&I&Hz_QR{TIu-K;_>EWux9Sr? z&c=E$-Zl}R`G%@`UjjFy@<6V%b5nT#mOl8nBi;O$UyMc=&Ri$tT`xl)Wn=>Q>&W%x zCv5Rkr-Q-?j}%C_jL&;ScbX0)nJ zOlHfhlyGO$i#XGhTbERHBc?M_EAP?K;gS*yDgtlSkD&6>mgic@AiL>dwGrlN^X@G@uF|ALVyWXWF)KtXz^ZB5N}PrTs8+$Kw=#q~#(rS!c3 zdY|aMZ_nW$_Z1x^!E_WY@ug8-$Ij1*g!RtGG8}M;`jG<50nO)GwRPQ}sPnmfi$DJMKz{6|0Uk5;`&``Qi4GGaPL&g_G)evW(_bz>5l=Ahc7a70xuo< z<|EJVKUFcy`etja27pr5SnECzg42oiXTRFY5CJ^ug#A>f(E^4iOs|_wgt2%os3*BT zEP-T_v&l-W?@5gBh-7|c+SJZfq2FM8U0c+|@gK{eVn^X-aY*0j8Mx%yv0P<4@m^~qO{H@rn~qH>oYnTf%sIcA-DO_ z`u>s)$ffK0{gFu|9L5`gZXmfOOi|z2S@@%;mvz&!tC;kB+FCtP#ay?ILWJnFfnIYg{ ze{hM^>pZwKpUKl@d<_H93L#}y0`Y$z{rV)Cn$}R4HA900>yy;S2W0i+(S!dWKCa{Q z0H3615LEFS7N10)OAvpiK~Gzjh55D^J$AG#pI!alC7czEZm4HaM|O+L8;>nFQHjHL z1c@b%$T(!#--#;1I+nfQ(<+Q4ixIQ`rDpO27Sw?EKXkowc$`t&FPb)N(Aaj;*tTuE zu^ZdT#I~I@Zk)!pZQC}^r0=`W{;qxY$v<<=WX+oOtY_)D?;k$G!T7KtVQ3Fb+X@iO zWQT!2PvG!2#V%RE;jme zkNR`JN$p0|le%PkQBwBut}^n2`8Q;HPmBg8B7o_@x%f|MeAKw}TuPmg>5jvv;+hI z2+VC`gQG;0;SN7S9h~s*TJ-6ElXZW_v=F{_Ir25lSI^S=LUb){E)b6Kz{BI=ls6@g zH$UhmlJIzvQ4Qz*7z-HqzPIlvBdjQ((NiWzS}u*D=74JA*)hULEuG@4(JaR_qKfY> zyJR>#y*j#>>q&=d9F3a9()z9#){|$NPFcf@NC`>CRqKq2+e6j!P!ZEpJ`2)xMy8iQak(zv_TRU5u97SUeX zZYU@Y3A8O7l}uSj1x`UZ$5N}N+;(u`bvg39*|GRphj|aoCt=o$t`ciG~?8JN1Gaj z)D%#!J8Kc)$R8H3z~ky+OTTWMr8$PECQ>E~KP}pnQxKBZ}3b5pfTT zMfrLC2aPxFQ9wi@F4K#k;hS)N>~dIkHnfGOspRzcfxJHhvA%tod-^deGVpVZHYoh()t=Bu6_k{)zY77^Zxn_JwaQ(66!c zqM*1}PQtwO1j+-MN!ObelP^w9CJ8eDu-B_WK_{vN?@09em|7IUl>H?t7@5o3f3xme z#P&NP2EfvC(j`0Do2FdDrmEV@k2#<69SV)xBzFu72&f{Mw4H~j0gky(aS@P1WZD`=~nc2qeHe(gEql zmn)wTH{aX|Ww8ig7U`dRZYKfge*?I{>L3bplScp08(?=5{|1)#%?-Mqxrdo>0}6Lw zTmzOTT9+&2dH00=z}+?WwyTiiF*!;EncK4#*xP=IppUsw(X8#N|WQ9t1O zp4k`~6N)H^H0w~0>+PHLy?d;PpTIr<(FZXn61Z_*)nZU`iNIY_l20Fa(#1UVB5HbH5}^G^KGU1>EM?kRCF8+zyP_ zabOCsummYEOqhW-+aMsX-jTw)t8W>-b=2ABn%+^(-oX70c%NwjYK$}fn9&*xO3BxG zuWyfDU9nUb*I}hHbs_^LFvH>YjA>Y(6?h}Y!nO@nHoZL$ye;raJCg8F&$bquE9gcP zeOi|uVn+?ZRw(@FPEdPUsRB>L+>qqkO=ugVr7S70s z3(>3p1BZfODE_cGIa?rG#`q=)?c)9i10UV_oEfpDJ~^tKJ|1QJ(A*waS|8!dC)62) z-qDka+G)jeZA~&E8F|5r8U}gU1JLV_%O=DPEBXW487N=_>R3@IBZ4EX(%avI2_k`<1n}UZ#?U?$Sn@UG%$F|UvaD|i5O8+6UpUYzLIhwiZq|7kDqTb&oC#sqg1#4ZJlJLkwx zO@9*9MmFl}7p=^Z292ISh3@|IcljbSAIlZ{8mpkJwsDOuRu)Dd_EqzrTKK;L-h7C> ztgL8O1{(1M@R{S=9@nfbM4cLb3MiBR)cyYzx)cNEYJA4aj>H%PHlZag=L$SpTObZU z#o+0!l>22-(-Ida-3TJOYkpmaQ=Ozq-)M4A2>Mk_cnQ4eieDkZTyYG=JjGt9aX-E< zN-AeMBxJ-AG+l4C$ghnT^Vy%jOAN1+scn;UL+RR8hAmK< z$RtZcSV+RB)RELn*a3)F`X1@SIoz`iix}JW;=kM9-&YU6Yg8`S!>20J^}HpX<&~{K z%2W<{+3zjhs{o{|OA+s5sc1K#a3|I}5|5jmKKl<8aEwGnJd*|m^?jw7>AZMVQwi5x zRDmDz-`!*gjeL-Kr0>=;*>2YwN$%s&bKJ7R6rlCOYcV{yDtM`?^;Tu>F87T4kx#Rl z1nC-ucGy%Gt=z%aLG?QM#6{P!o4i2iYJ&)V_N_tJyWP3;^p~Dt%y#}fE(?bE`WK}> zk;V{W)>hXB)d~liuSai13Tc+Q`sBEeDco`qY0j}!>sf24cp|B8_Y;y|k+Tz;a}(3E zJ6hWLizy|tU7jyq04(eH7DLPCQQ}Lf8cIaGU1=v1y%k%FA4=v!p{Wh-_%_sWhgq|T z(0DjY%{=?VJr8cik?h_}vBEm%I&~VKivj)>7kKOf;Rgocop|j&dVSp0a*4NAFmM;`!@c-C8D>NZvj#`e%LgVrO>!iwJ>%53*3D%M>;w5{H z;QxX&Y1GbmrxtwL#YSgB`1LieCj+)(i}|+ zQy=eTH1p&?(vu)9Ppk$v?<~q%_`{+nsZZdo7U+E4D&Pm)HZuR5%&%!Mz@de1M<3Na( z@&R#SUd;)GNTv=~88U|El0(z?BDbVPRON*{5^iP;PBWifXJ;yHLcoSrJtz zDs5qz3GRSxbWdEBzmrql5WdJFbzN3^fG5l4V8vdw1k`Y;6w$@*?}O3U3Z0z>0@ zzuWiOuzm+%n>>H|Xw#3zT>s>OJFjU-j4?m~g5~7|og6zOUyt$w&1fO!_*TrUkP>tw zk+DxsdB^xA0>i;FhK1kezF@^ z5^IyYGp__C^mdoiK$DbK-T%0cy~J(|UvJ_q@rb+k2kAP+(uAJX^K18-zkSO#2X5>D zPyswvS=P~sX);1xh=VgWy&qO<2_Dg3nm^rJV42M9l8McAI6-pqj{sFFzuY4S{nM9T zBF$yn3mR?b<69Es3}~HRRDvn18ountovcGBjTSaY^JIDC7FzfzzzV~cJ+;q-+oYa; zbB4*j|E2NtnUe+&-25v#5eK~zHs)8YEtGWU;%Fi4+h4hTipS-xAAJjB+y%myCp ztGnkRhpJh6xz!H_<9780&F=-wHq99$PsZ_}jvF>}0|FS}Dk8AA)E{RnKYQf^O<19- zb1VIz3{uonG0tXLJIq_WQLjeu$~4GF5M;}Awq`Aw-aY4a{(I?gxTJn`ZuM>COi-9w z3TwZqB|gv*4<+<`JidTn=+aL^Gh#ITIW#yQlUDZb`Z)%CTo{-)&JAK6uv3@PyFTz%7`K7Fi z4cgsCqeQ@=zfAU;5ynkuADEkO@E`+Ku%))+wA)`tz6n57waMqruG>+(=K}(u&P$c? zy&#LQ8P3CIz5JEA*R2fE9IHf~kIvW-0T@vI3|j_JC9n7OD1Lz+vO?Yi&Y`!Bi5fc- zrs(ii>TNWXWqA86>+i?Iijn*2cF*@3O7l_U^LK9lX3wa&lKsn4F(6I5t@o9JBw7{u zoD?L-a*~9}MR)%Z-*r=;q=y0l_>A<(wM6IuJ9=Ay0sc-y!O>ASJwzS=+G8Em?K7YI zw}4p&=Z!UE=-Rl_tKBkb1VF)*1%`Ujbeqm{uA%lI5-6+Sm6WQay#~f)7dmK;CHu`? zeYbo-j;BwY)(B^YDdN1pR)BI@fa|^sXMSM((Xsskm*yZ|vp!_`L@hAeU@@GIj|yuF zr`tZwup()L&KYwk+&^SO^=SZ&DHVUoXn2+vlf!gl7L$R`0CVGJYNn}~wgzsbpe^|; ze#Elta-XK$NmNyY1;2ZyURS{H3dBmRFBdn%>AzYTmyvPv7~@V8G-u%Iu7|_F8P_J^ z=u!{W_^T~@&g|*%jJ|%@pbPcNX^9y$HnjUGsFQv7mO#zCLc>1CYgj-nuwGs`nBcS$ zOslG@I->UGn&O=hABvX&DxiY z$OF-r|4~KA2>rCdu0uQ5LJyq>v;e}fTjIz&(Vvf%8aJiQ#U!w-_U!1pt~AJ`XA1Ls z;;CYBoyHAP)q2S@r&&{=k=H){-Zg;Ze-zbwXyq+;=pN21X|G_hM$~+jsb43wjwomr ztRDo2ewx$jp7vERRMT3jep!@hcfv}I?)gnX$m^rL#Ts|^;otJX#OZ zPk%N%x-ZvZU_Uve)$sq z?^82=aT!U>U0bsUqY%;0vJ#<#B-m<5j^L5~?m zgRg@RRCt~hUNCA|E=je~R6$KB{gGI>mH5G#Ytxn-}l!STZbZ-koWSq%Pil0IXr)yhdiE5DBApB0s-J`+u?ZTQL5h26y%kRB{X*6W)N4$5_q_Vm4vX^1|C_lu!f``WrI@T zIp&;qP~6j?4{^m!fTBvI8$xnz7G}a^5jHVMKsq5Iu8!yo$QZQv;Z zNvTwT*zWr`9Sf5X%wpEw_E+aBFu$0v;9ylqZ4rY6-1(+b^xg>u7rysuKM{*jBSScW zCXySQ;kI&%h~8VF#%*n5E?Jt$3;3~=*28P~Kpjb!TPQ?rxBdKJd`WQ}cfaJS;D|kn z$aLBZ`*uy6r#ITCuN)pP?1_8u)h5AwI#(zF%+cU@T^ZObOFgwX-U7cNlaI=uEYW8T zH~l^w77{C|d~6J%7SZ_mM7@F(L()=SX=>4l zH{8k2gOWLJIwup0H|!)iR)T`Z4?)S;EwcDDL)vdo{nY`&OL5){dZ;rGe-&aK zWI3^#GhbwXBF$wQ<&EpSTXLzatRMqWS%PIBWL0QyHi7BPKdg38v+bB%2P@hSx3oR_cRkP8i8Z$vm z`-A^KpgbTcvD1MaxI-m~4%+z)l_|xaJcYbOA*C$ zu%~4dvC@ZuAMWgvXi5JL$Z0;*6dDtOI^_YX*)9;}+kuA0fo(n-UkR$*OE?YT%&7fFXv z*2}88NZ;fVyWKqkmnEm;QDs(!nEZol zShDoin$$y7R9~R6X>rBAfkBpOz(>~6(y~p}$1?OR34T@fo87r!qm5%$$YrhX5F-gt z;WcPXJnMdsNJL>DNmt>1oU?)h96Hk9px|8-`p1n&yW`5RN7dz;CDDO{=~+#FZG0O= z_{q*H!SDN;mhsctJt7AsUOZ-FbNKS6(@G1a0Df1L)z*mi^Q8H?mM3W7%A-_Atc?DPo7RWBb6nVur$=10PvC&W>pR>KF=?de)JPj5atBsm6}s zY_^KGMa2l9vVwivbVg|)H>cLiIf3zVRhmpuC@%Fs;6qf-$4D=X_<_#Se5!GX&L=(S zNxV3XURg3tU7$X0!&NfI{>4H@?T>j{3amKtjdaYa>PmXgI%b1;_2+cR zzHVQ8ZqIH3FSF+K>mbJUb9*ZJhU&aak{{IzEqHZ(lSdG!ts)_jwz%m{`}Md-7P#7D zFp?b&i|P*N;9l>E^#F}Ev$nqg_$e>x2;)Mkcw>@0{G|6AxAsX4803d*FIXWIBYygA zzy#$b&>7goCn`XaIk(7qXWYZBYVVT9itFTa3XHsv_HoBLa5fH_gd4c$ytW_|JJBiR zHUqq_c58KZ*xfsL^m^NotD(iSyzPNI-*%1T{l<)Dx z9Sdj5@=(^IpHt*5WM_7r{7PD6!2^GoR}tXb7bS`%JXUpSD~2tC&C?L(bAf@KX%T}v zcPf1O3l%c;@7e?(IW!)dF8RW+M8Gv)m_2R^tr|+)FO2^@%4LH`LzCpP7h5WRSHc6T{_ zmcZ*ii;Z+irbs;SO zA%;kkMXlLUx`F|Td|OX#V)a1OO+OKK+A3&CYc=(zC(l;{!~%ys$%|)Rwv6^6_4&ga zj?jZ_0A|F|qpC6~(6wy_N}!@6wZo-x^6mgl;M{tHqg-zEU16+8f(O>(L_8AAUM}{@ zSj94b+Bw7PtAY}LQ%w=2^@lXhp#~U;Z%5(Zfc?@WBYG-tz<@tMNv73{+dGMhY z8CFHCdJk@@ZGwsPeHO95E=d8erdZhDZ6WvP14g8DS^nd@u^leSHP?X41Zz2!H^Kc@ z;cp&fePd`jlo$8LygJa$UV1~kej@uBQEgPV=Bwk?kaBhOq*mwiz7_d@b}xyJ2x8#$0n2hwKvu-vpZ*@ zeF_d@2V(GsQh1k2Zju$8%jysvx4+gGuls9)6Bl}5|5x}zJ)|#k8I84?9zA{!F=@B7 z53O~^SfJ*6XI?6itT#tX7qk(O*8M7jTu0K_TNpjFU-X*!5Kk`(=Q#Y@@fn>3z{w^{ z-O)z)6L9J<8QwK|6WN?GLlK{0+;lNO{DiGyyii={7FAHD_-2SqDy<3FE~SdyUvsB2 zWyeohxw8+TI*VzqGGe>EM=c%;C7KScjCEaUvar`4@JRS0FdxjLQd;)#2j-Gb}wx7rmo>#ojbeCaI#&Sx$Fn8ctSH1h56f23A-5j2BHRnnYb zJv12dSK!{9d|_b^2%(cH%gTn@Kx0;Yy<5oB3=EYLR{vwlB2c zCYh=B<|ITq18F&J|7P^=us3#Cm%G*RdZ>2csc%mPDl1V=Qn1G{BQihj*T5OF_*|j* za$4&0NbI@g27`>wFQdfHo8P@KL)rGw$3xPm-4>-qH{~>Si=5D)33DNB?pO9Es@`yZ zp;{)lzsM-Gxqdo~wL41J-M&>+#xpCoM|a@7P7qvA)8@%%AEml!l3lJ((e z&h+0(`9mKutIcsbvx!r)%ApSv4|9WL~}iBB@E&GCW}lNoGoBl=GF`gexL@|OOj zI}4cwU`F|xz^GQsLIfAHdy}6Bn5poYI~}s9SPWBg4O5)z^uni3Y#_nH(sAb5?o3H zyoSOTMRZwo!)}APbCqaZ!hN&yK{HO^(@TedW(rl~A63PW00n|h5OEU-rk@h;E{vp< zh)8FO6<=W_uXA%6i~#Bx$wB~m?(J!6ogd=*s$USlU<^|YeF*p;k}=U3WR#y~77OQ= zbcv4$(NdIM1u**XB8JkI`6VvIb&|9O0UPoT&Vt##Q&z-YDc5Z{Rc3k=y?|oK49xU( z>eaZNH^IfzZxtd zXyn$a{>mStRKf&xzD>IyhtrT_ePTl)?J8)f2!txjo`94$vS_I9zykL{`ulAI;xEf&8c3<=wPK5BNa6 za)ISqw1k{7(UW2(y7q5t?ZFO1c@o?-p)e(vV_j}nRZtj9L6!H=A3OJ+bxnoA%k?>I z=Gghld;4h;EcnWSHNn^qc+H^8DR8PzaDf+nET1V+KVWwNM~k<#D>D}i9`{@_@M#y; zf__E#!4Q5FUsO**yZ(H7f)j};xoI9MJZN1ZpLR^^S4VTGIREd5wwwZTKoPcDtGnx# zaGl&?bRIMkAW?^3Ky|_ENI{Bn{6m3i26n$D4V{bjVZV!NVIN#LnKkz>7|Hfj~`VnSJVglwX67~bHu1%Duqu8ByL859Df%tsioDyuhz3>KQn|N zo!Rr^1c}PCKBH74qm&-$I@g57(t0YcaZ$VKa0-_N}`b4(w_OHBLG>dy~86>Zn|c*Rj@Tb+w}MQQ~)(m zVBBns(TRQS_sa|AIiN}U!}fdn#l2`)6j|7Kd;%q%=%-;baN&dBtTt2dW*r_;*YbCM zOvXo5kg(PBp}fz?Ghx#;VCQ3p0b&?zeYf^Vip0doQSbz7!z_k|p!AP@c8yvoD}d6I zZEybjA^vHpK9!;3-+8Vc&lckJ8>c=Kfj-uSQ&Jc(~KD0ffU=n3U=kamvqP|m2^oH z?`m0fl@j-Ljtbbyz0U*~Z<04B7s;y^ejNmY zfme1P*>ZHG4Et=Jj8jH2i&r;m3PlJE_%x4X!VL%pePe+qrja#5fw}z#(f4{PlsKV` zC+gQTI|I+hh9kv?${iGmc`N>pE!XpvjqCkY^d$J=y124gm=g34Y=G`j@H^UfoZw`v ziB(632Az5kx=;n`PLS_?c84e{UoFjbzjNm=ZkcU7sRf}rCHGhXb2k4jY&Jpxnd9#~ z5Inm|ZLV6sR`u!u_9ilE%j2<+dHM^j^_9QZfG&U6eWpP&ce zsFeo{% zEEoY8v+A6*+tC>3wuA#BJuXii53-b#_jZiHzcJB+DM)%O@s*xmIO(G-s?Ga-^XE2G zp;QI-!6dmH-|SBY>4C3f7~syv+=|=twc0P=pd-FQlK!hQR!@MTr3li~48w z7!dU=`jrWfTjk%7@ck~$E^Rmyb0&=b|7MzV3RB#l4xgpko4K9sde~0D#8ouPlQ{v#$==%9uf4yb6oEohhm> z_mYulIC%eIPJjz6e4@lX(igQ5#gTLE<=;|Osca0yt_3p<@Mbr^xeiS1I?v*n>%=Z@#LcYPruou{p^GEXXWr7;TIF#MXOrI?j`AG-m}tK@B( zDxe|?=(Ej zoN0QU8+?12u3AfUm#WPApYAl>l{KfHvj96Bos|fe^sw&*j|l-43x$%ZW0)R!#1Ex4 zjbaN2yg<@<;?umoc<=q_4cz`}j>NP4f0(!WlY+x|m5s16lz^uQ^xmSB5qg*K-y@uD z6s{A}2LXBUjim0;TUL~_1c?Vir)s=}gNtH3b}C2DN+HTR(Z8;FTO1{ap39`j<;jn1 zJz=Jc2M?)8r2$*C%T3>N%0ha6jKIjEci?d(?baCl;FIyIJ{H)0my4+>Vd1^I&fTG; zwKNZp7hgIHP~ae`^L;BdCy2-NmbdD5zlZsGhE7n}cyL4;N{{Qq^f(&8|MZRry<4UB zU8js-N6fal&IBM|>=>l+Q+jhw3kF(J!SgH5=8>fkFg6UFQN99U2@bPPmOznWcY)q- zd>qdrglHNA`2)(Vbm|D1HPcEYwCl1NSdfMqDU>1^{dsb>?JOmOPUfLs9a3oJp6fE4 z#rbwh27@R)z8`dB*{mm>CitspDo@QCYeaNr5YO#R zK4i0?9-zk&uJ7}R(Q-4{?(BJrPa5x}IuRc>jtSUBL9hGJH5r1K6x2Q>Xq6%B;V!?g zQY%AlaHc%E%Z;vS7FFxw@uoCuN#WtG-q~z(-Ze0iZaq~lirpck2MuWR$U|d}>#u)% ziQ-ygi}b2BJ}7bX&~bhKbvn&IG^)<`kTKUTCRG5JcdFC6MwS{Ci>z;v{Vh#OJ*V-= zFZXMPs4tp$-#6PM4wu}}F8ugY@MjlZ;1=7>+UX$2nRfX=vVDUX3B?|xJ zlJ-ot@{HQt0%N|FI_Sl&%Hy8`| zqWd-57O0*xX zOOok8+Q2X%LtA+ozJDOYZeyQX;?0`yK${ms&wq3F0372FLmC(TBtnNZ8%}jwBNRYP zpB#e&Qv)_ZXz=4c6LxByU+5X}Oy3O=ekx#23)`zV6ds`7FG8XsiV&{1%8BT}VzCt} zvQhd?TnaW~D~>Vs46)9l6phVni?8=T;3bTLNi&NrNW%E^i~U2IxCmx+J9!FaF$~#x zqcPNY78+Y|IFW4LcCXm0V}_K-HUtUB`B4mF$^JsX24@bZi%TVWR@mJA&p&b3-vKlJDKYbGJc&-bp=qS zmB}_PGRc}LlxzLrajS6_agu@?u-yvpe{y+&ENyAY4Od8b)i3u$WC)2}xbqI6%8lYT$Z4LJdQ@C!no1=kfvw3NA zW~73;0vBWwb0)mGm`)doAVtScI9b)iOd;A;^kKBF`_fWQSJZ9lyU;PO-tq|x-J-tB zpWW`mWG+Xu3tUhD2JnFZLHwvX$o^(}arMDRU8BW${Jx2tPh*vc?-X;*(=S5(a`SbZ zCv+&feK%>IP+I$En1!hG_jZ{6k?P6O0PQQr^T+kY7IWJhXsR@QRhw^ni1DU>>0I!; zHukJ0A9tQB+qTH@+Q&;s{yPA946ECkPk2yQzO-BIMQXKpZS*bZ4U3QZ zPa76_*yx((2GV)`u|4Sj-7-;IHhAi9(cfU%;4Xtyr~$i^NO&OTUPA5G&FQgmFlNPY zLY!aK`lL7;vWKS1#Q30bVqPZRVhvnu)^ZB^Wv3&?JnuQS9z9-ibdQlkY$sM9r30&< zMUneFDERYJ!4kw$-%4-L$x4a46V9;RJ<%8`l(UiXyy zE^mO+!N?r_N6HX{Sk@YC6VlD1Onk#Q7%40H>wCt1V*xxOufuoG;#3rxj?S55>B+vA z>M6MhPmT#OQkDGrwpOw_H2kX_ZEnX48@bT%F$^9^4wpwwno-kcSA&a0^Uh!VqnsRT zZ;~9{CWzImH7_)pZ9*2c=NlcF@0ct$QNiAgp$2Gf>8NUJaBl6@S9UGrA)k!Y5`BwmS6d5C49 zjO$Z!-C~OEy@ADCj*(HG{TAizxE{h~cUn3hwC=~Q=#+YYJL?=QMeE*eW>?jVS?;uX zPX?XX;;Jkggjrkp{z$usXd;8vqeG;qU(!p{Mr)b$BD;CHBn;q%xKHda?P1lbMMP+i z6me5snTTKiv>*XeOC)%Hl?q-DLw@VhoGrYyHr7W|C2RWYwh{uUaEZp%{L^T>5;i_= zpHgU-GQ)a0MhT<)YwH~U_nSM_`QT7E8-Ck+g4RC}0JJ!T1!l_f`1LOws`b5VG(|i^ zn$sk(RJ>7%EULwwgP{K@5t48DAOUh2F7F?uaMG>?p3R46c4s$#FYYmS&K5CyKtiq2 z!c&h!YT5Ge$9RP1Mttv$1-YR}mC3wnxq3t=Vw}$4ZQ`QOND`upPTD$edgC$8aw<4G zUSxIcY{kC#ecbQ$s4m^<6kUD%53x#8sD>h${l*y*p$QV(^1_xVREaKm$=5%_+{FS+F{`% z-(OD(jo24^v|igrtFT`$vlP7Pg=lu;p{B%2A_{8lv8k}CO_5*Ghl8v8!&7v`^S~iQ;vlL@$h+6ME%sORP@jWIh^Yx>*XW(VNJyW;s zFZ;o1WsFzNh#ER2x0mWj;szsFN=f}VP7^l*GhoYNu+dwdt6H$#2ahs4X9V23O&o{w z4Z4PBdc3i$R2k8iP%k2Xc$L~cvZbNsy$7f|M=!J2R165Gm<|`%H6?_Tm~R~RJ1y;YI}56_BbPuK5Hgu3(7J?oCQv*Wj$7IIb2 zcmn3{8y(+X02ysw+K8=Q#>@7$$%i^!#m?+{;8i*I*9>ZL zFEbcESV)YgDhDOh>-Zqc-Qs4 zku#n5;MaZ<#RptKs+4ayu2?Be1x1y4-gkj&@wD;9`VCdJ7-v8F;>2Rp)0;LIVm1N# z+AEysakzDQV%)X}MZwCmW#K1}z?O+w8{Dg}LTbe;R{AJt1LNd*+=?#tzec^eSo}U> zO*X}7J=NP|)L-9!jut9Oaf9;iq9y?2<5Q#B+&}HZ+>yyd+Exsy;imU?XEFn6eXqvd7p4>jO79v=kV3A^Ob02HnyMt8qP&O1RDku`#e_mi6kJ7|^6A z0z-xU?-A*_3{*e!2J@|L&!3!ZwqS`-tA4Uptfc><+YzHllM?ju@0q^)axY0!q4J5u zU9&J5M75nbhmPj-IWYb|akdN5nevyLgY#z$YIyAvRF=J0BT*TLhqV@|6+~2EEM((I ze?luPc+0=b_2U=qKI5r*<1m17jM0B}Fk1nv)fl79V`}PqUXEWe<2o=*KDd@I*LZ|x zE?+brzTl+g3d5=vW0ars{0=s*meKKW`H^|4O22X_BR0)Cg+D~XX0R9Zjp``+6jQVJ zX;tJPtH1knA95n~ff)@0mYrro)!95|9aGfl?PTG^JeWQsu+m#LXiF`%Jp)L2frQlo zhUymSr+jud0`;Sh8-~XCeH7bq)=tjysyjs45@Cife))7J zYG1tig@%MO%6~oC$fqIzmfcJgfl1ek>^%uI?}BP3;*le~MxSumlBwP;`vwt;hL-LP zTNUyUx!`oSbW#mW-LWf_B0UTHp-WJfJZi1g(&<*=S0`pwU@C5~{LC7ug-Q4X87ViM zZ98$xu|!B83FO`+IMX(7mh&O6*mUw5a=q8Zyc8!++ZDTL<=!;r)l1(K*-p?tW9q3# zW=+l=p-1n7R{t>=FF@>3S9+YAZjt-p8nO(iSPIGZ<9WlUzVBJsc;;H)Iuj1FRX4}v zUkC(%3PisyPEJeZdsQ8at=?y{L_Zw1n|IR~dv`}G>($1j%dl5${y{x93!(l&7XSz! zp0@QVWmB}SbRbmY<~lk8LRfr3!Bt*IsfYx|Xj-kT)N1f237LYw9sRQ zFCggBP?e%yliYozv(<_)pAMFmlthpwno1kByQx$4;v32T`ZeVd*!ISW>ye^C`! z2jIv!oD8!m*{g-dCp%rN_uSo<<1hxJ=87)pAEX>HZ0E4NZ1nV{2qkryCd*uIbRPJ5 zgiGpV_I(I3|J19^kZ<&7rB}y<*WP~qjWFHdI3FT9Bi?{<3rn%<`%}fseUs+ttFDL< zsyr5xoxo++1Q}NX&3FO}D+~Scc7k7f<)}o*{;$@~A-PwfH&4^~U30)RJqBa%=n_<_xET}H4A6VCQ#qi}}MUu%s5Yfyz z5K$5`UsxKo90dM>Z=ncD{$Ay+5^&02=(ZCJ=CMoedPtH0v9y4*51hpRbb$o03n(rd z5DVcDc_M^iqEZn>-R|g{F~f(YKLS(Vbs%d?sW~0o6b=mN(<9aZP%Usef&n!r0m~bc zdibs8R(mZ#jBe{%$$@hw;MUkUCJed4r*rj3<(7v|PYVFl2eIX?VJucHX_bR@Wc_lE z)Wz<30z)WlTZczBfygSpr^lM}E>mXoJw+x^*sRRyxZPwgp0F-La3MX9f*j8n&Ukc) z92mV?rNVn{Gp78X5ZRdf5|sZ9k?l19q~Cx5sN{i6A75zFy?D)>dXqen-@#zcp~JVn zk$!ml#DOVkH^KeeE^KnbVAh=PHL%;vtFI&+^2HT5m#VuLkf#*|mE_b4wwMYJCh22& zz3%FZG>#m13K*FeNrF_5eWE0mbbndZMjEa(PyOdv0rE;V+lo>Hpg?IPVodZkVfoqb zcdyabO$9AK=EOGqjzl9`i0Ze96t7%sBQ+-5u$BrNH-s^M5?~Aj1``%suPi8nAwj6H ze>R+6`%@rFO}|p(W!RJ|ZEfCun93>yg=rH*iocU8vQmGay@PsK7c z1rMh1OQ2Bh)t;#cahj02tV8+S^-CWuu^X2KfWGw0k&`z#d|rcSlLc7o1eI;~)v%eI zW5}?0jpf;o$hhnB|BEc`WR>>6D7ik=7a;cItI#hm!-au}FP*-6t$RKJrta&% zbjBtdusIvGOG*1AJ-pU0t!!tb3wgNEvqeGV3{#!E>fh{KY^$*#6HoQ zThG&+<{M44awK>$hn3bZ{5{ip8%Y~~@CV({(be12?|+RM{c?u{@dZ+xUU?Y1ajN~u zoGooIEgfE@(Yp*f!H@O)2qwqKWe`nyt<%{gbra$hfHzkNsbqKR92An)$CAGbD!p7~ zvs)vd{W`ib<3`&SZzGs1Tk_`STiDHOfQh8D_b((4Hv7K|C$W_{`1pIOsHlc@o=ML# zsC|mV6Txbu9%Vc7UN<euZ($} zO?xA+*1|dG;>OVr=WAZ)N*bVHKC{Xlku#!>C0b`#`aLTQr7yz{-M0&-dti#r-KCl2 zS+lj@LXXu$Vwx*~^h$m?VE#jGtNJidUuhWMoDPPLZ`dtSg^aATy9XjmeVkv17}0+- zBb%i)Lt6D2Ni>ZJolTq}4GKv5D`Rz=>W&}QiYk;;%GMWl{cGGhF<&9JN`Kf7UBu9` zu$V(|{U8-ygd@WM-*Bq^C~6=d2;K1Qg_T{w9%MT`|`s9EEM@-x=4J$X4+24)C-tT#8AqW|>UF&owIxrWZF7 zD~`)a{Lh}e@6P%A-7@dBtnT6)YJ$`!1CG<#K?1+tx?(%qjk;2 zZM|ZS%cN9EEQB*Q1Tx`|WFnB8`cOc8+ksEf9SoWxK;8$Cdd7M6FX*!dythZA0nlLoQ+0rcI}1V zuu%`kko}pP#Vn>&N$;9ct;?3-GG+3=3l7pOeNo()oNRAUm)1fADdL|RV?+oMCTWlr zu=@R*UtNRUY~haGrU*>Q>$DYoNDRG_jI44;~1^<6WhKb!YX1bNL#p|qm+!w$mkw+) z7D5CGX!p!t9>yknQH-bH09P5>kMkWvi3tzXs4k$O@f#?Kvv$!s^>aQ}$&iHAY3DqH z$ZnlIj}9!U(m#>d?sb|vr9;*;js$^zP#XW8J_-0Ew^Uzk(T8T)#)&4t;3Wo5$eap`garcf z4ECA&lJ82^3{Pvum@>$S)jU9AM+KZAQ6t)Bb)15*`lM1?dWpH66rPhMa^r|-nV0&v z*-o?EwEMAOE3%O6%ZWj3c5-V!hp>+g5M+$UCn^#WBp;WW^vqm^M|I#vdlr+(KT@b= zKQ19A)abAwA)VJ8FFpI$l5btgTiLYEkn*Oq(*Z1gu1I{)Bnv#DYPKsMKaMT*6 z)cM*fc-hv|6-U17t_`s75XWs6Te-}!kU2q?2+8~6!?SYalVm5C>K6v=_+Lff0o45x^<4sz329p@3y^i-qT1sn5NSE z!zUiBA*+?T;Wo@Y)oZ^fo84l;$%#8rd8PYn0T*KUc(x7eh8hVWgP_qs-=}8-5hFaY zh@CZ?r(cZylyN75or_3d+5Bw7vG9Qyht}HShYKh^20%M5KYJm_6I{d132124?l)Iv zX*>+gdTk|~qRFO_XxXob#;wU9`{3~n6)mXS#00;f`D^i-iA|bAZ;f@w$V-}mm_PJR zxIB~#dR1bws_~E@t@!3DC52cu&p89Dt2w7%5PDL~l*(!u4SiRwngTI0K~x6-i^TVh zG7cU=(7@9dxu#dZ-aE3*?Nn@{#dv8S?DqxbrL~>8F*c}eP~LfDk>8kJwzWbSp8n+#jt+XCTG0kB zb4LCFX>9(w`1F8wT1SzuT~mA0ZabWkyHud@_CEu>4w8Q58aNu}viM6EZ1lMi5eena z$!+88xB-wnJALk_`0K+cF~x#0_Bks2UUi@Dkw@M95rSr#Fn7!Dm%bEI_HyWzPd8>AXwl?x>Q6UaAuXM;*dF1ve^ZH>d@69iQ|tmMzDA$ zeIP_pQ<^ww_An%M-Qb@i!{O|Iz;fHpq8kDZ&Jm4D0()PNC9Pjd22%D{i7z>*U%bP? z6Q4k*nJ{OmM{%gi$)>DazfN1`v#CW;tSYTULsEXG%I;2Gq*=`bV}W6wU&1qTt(Yn^ z`^{sZgaa>!XDG31x#c=`K@k>_!s&6}pKOxalcI=n8Th}Sr%5C4X(Rk+823Y3&tQDG zd&hcjAsYL&$`D-106#@!Z2Gau>%5gC*o`f4k@^bwHG_ zO7T9ARv>MTU1=vIxa1d={>76I^igeSsJ{*aKIbxYC;7C*(mOfMX{WTf1pE-83FI}8 zisj_sRWmNS`??L4s2qAh8b13w(WWq%&BNqa*v2Ep^E-0(Hd-Vb$M}0I8>a7W&%+Z< zpNgv`E|@<=g0+x#5)0?=`V@{|Ly~3sPUFAu?9rXI{}D@|jg{MoE$qRB;3UHsdI4c@ zjYa!25N(uhXBpIVU*==mck&aha*{hXo(h7mcwxoO^&YOO5qj1}CGVT8aC*5|t;p?* z_)9VZk}x+a`LUo+YeDo&+?HtvW|tG9zbpe2;QLuieM6M7kfx^m&%iNkUz`BZ(C9ta z)Td&);R5pBI3u1M5pIE&0 z(fDoRNzdX?Q3eK}tgq4HtfV%#p1vM9atvEk>(yz_`H!R>S-3;upR`4wo^mS(DuEwc zY^^7F?#KjtI;5M4Asu_lg-*%FaP8XtV$EjdaBaWubm>iY5b@eMaRX+)Y@uVmcM;A_ zIY&7FogY|+LJ+{7x_Cfrp%PJTHi*=lR9ew?tgfo=dtqyt5#Cdwv5w!3@lu_9Z$y&P zzxwI~4Ts*R*~pu0Hn0wcCT~04VC$j7z!vtnp#Rx|{<-B|_nqTjw;&-m&z8|HPMRde zVdF^%M-ehmNV?)x@Z)QGXe%rn5SR@XP}amZ0Lg(mm5HC_f^eT~VGzvWM zr-!%40=q{4A!P$S=R`eb*q70N(OE^bnK$h{WiT z^V0LUT_mi(al|JQmF>VIdLu@ZpU;golO$EEuCS_NlxO#i$9Z&X5BwAT7|i+eX1sdu zHuS_^%&gnUm$^_Xsa_KMl4eNwd2T@*SAX~p1TEZ`y3zi(x_$SyH_J%Y(h03d1|vk8 z)%0PqtF=B0oL&TjfmlTf-nkbbw{r5ry;p^>~7k|YJ2{jx?i-Feb}_J=y^LN zSOQP|ko-8D=Un+p#@sbLjNV{DDjHFJQ_t52cQpm8GUx|1Njwamd;XUr!cPq4pHxp9 z(Ep1i716x&-q!lHdv1*+7h!r%7&sVISC_j;+7cOhVHw^XZ@t+GbW3?G8%S;i2tO0v z-mmV`kk<&$lzA|Y)q-Tg)+|9xtxH@KE4Hz92Yvw6X93OV3zO@Oex#i17D2o2NpGgp zOCGa*_4%<~0mY|7+Kc=c=d8Ra%-*s_=N`pBB^|}5N6=Fxt4#?~;`F7buC%6q~X;4^*`MU z`wZ{Lr}rRDt_qNH$xJUU6sDVZu4*I-#^RNg ztQiHzF>~B*g*XYluqnAPEdhtf*<%=zQR{*;#U}f7hl5&l7+;6d;3**uO2Yd zoSZ2=Bs3T2tQ3@vN0NW;vpp;mmB}1EP2n9n@7>Z)=Qa07*ph>a0wq}NIXAtjM_}OM zp>KioF)}Fb;W20+z70KXXd6=>-P%ZhBGJsb7iz8~=;d5TAE@{BDWnx55mI0zkcRF` zr_=f)(ueC+-`qkk{e7;OnVU~jmE@il2($CyDD?L*6>?7Wn1W3hRFR0F4($w3+ z-JP2-c8j-t8yq%rr7uO$u4!YI7amai=GwFy`#-1&p--iIrhGMUnQ z!HA)*f^4_Fa=GI0FQlGGwz(X=qZ~&| zH>Dpm%9QMu+-p!*?FppZ#tH;J`?Nf&$R!lBu$HZ@tQd!Q8cKM8V63vLi-p`aYK#sf zDN6+uShU-&2hDnVDx1X5{m$xCxORGg{9)DGv#vMKlsFjqtR;EL8HqzVQ^(C>bDzO^ z&}d)#ALx3WoXNKu*61lozRohnePbJVMgyUPFP%Kpw-AB))@`uVXR>aY5}VRHO6oE7 zEoocC)fF`m6~aETv*hD z*)vtLRNdSn%eMWh;CpN*zTMOO4iBcfppc67aTmsX#xX(1Q2jeW*WTfExoab<$71Us zom+TpMX#P2^Mr_b^lmD@5gh6XAjvfNuE85hgBd%QJR{V@!4zol+%!f1C6r?XZrzdSpyQO|W}L6$82 z?&+%a9h;iz-iq2kR9Sv$6go5lGia^DaTOuC>2~gCUi^AMnYYQfHg3pJPvx!w-O9)}e2bA;#n1y{O%Na_cn0RatO!KpgP z2@5x^EvyIyP5;Uk-iz@OVVf+Z2!3}z&d`oy>Cxh$g_nGKs6SlXdM!dJ%{>MOmnfnd z@I)PVt}7(a|IO|0yl2kz5Q5CKQVkqvW*=yBQ8Epbc*#+`i06$@`ni_stk`$*Cng2Q*NgHH?%ZD5( zPPxJERX1QOrOoiq*YoQlk=JOc!SO3F;KpD2u()ygOVaGVa=;XBXcY(ybvmEJ^>y__ zGNdGfJ#I0uNP^yg#UM0Y%v1I(wLeQkyRr9%47|VVs>pn11<45%`5eF9vs|0wf1bx6 zsD&4gV|MZGzwPl6N%+2T1l@Ynd^9gPz3C2-alKm_K_^{0#73#AsA|W82)g_GfZYyi z4utG2)>GmkP<4!FW8%hLrD=nbgiM#?t9ihU0Wcs#m}rO%jS=m$eFBe0#j3f7U6``@ zxv|sJZt^D#C(oB($D3V$ECNdX} zoQL+keoJV3WZ2WSk^NZjwPOi_aS|nO{MPTRE-47aTv|`faCk`K*EpY>y^))6!9Jkc z8jUC?5%jqvo4QVzpSWYeOT+)d1vc5Q_i5yPU~Y2IRTvV1*e65^Nw_dSx@8deuP~fo z(8|x|Go};rK;3eK805rT+GK#Oz$Z;M`6ETFXE-}p3>W(IZ8-5IL2dc4xdPI zH^{KJn{69D+#2txXI?N#;JdGZLGLLgjz{7E!iImEuR1oM$2vyyzDkJtBg;-!A993V zCZ8mQ(3sZUVjx>j1^d+Y;vNZYA^L_vYmn0<;Vaq6}D#lilklf}4KW@NbMcEkUcko+l!T4Z0d$5svw& zQ8zEV`+6Xd+;0ms&CtBB@x|pnR={g1BCxxibzu>bkYg5hSrn^&)DL>^7_sc~6xKPX zuwN@w0pmaooI<-7Mb3^AN3O{g82}K<#lsx_hZsptNNU_jxnY(W)5zfQGb@!-L-=Z< zvrrfz^`vXSKn*o2OP*-^tY=|xpiha zkCG@|64I2F!|vcXAdT5gby>0PZR76LlpGIiB_*ZEVQC zAv2baHm`!|`ZBV5!3~qsgT=~8k<+ASY&Aw)hx6B@ac)bF4ogVZimgin6P7_9q-_pK z0E%AC^m{(n^Pcr7&JU$5c&2OvF9O{=pca(9c4?anWUJntIk`E0s-UW_n_2lrjZ49* z;YMWM=dBF$-e$~ka z9cwR#ZOe3G3Q4u6PO1vAG-wBhA{M&4J-6$b@UvS~7*4DB9X@NuXIgC23dLT5OjvQK zP1g?WN*LSKp4Tnp7f&}{7C?UG=l=N-C$^H1?AE+h?{tV>scy;Q^S*glL}#4V1gtjH zQ#*H2nOyB-0aW3MOAo#*JB(R{Tl*g29VL6?dB}m;TMx*|pO0?0;cO49Vk0PYF1<1o z3qGVysm1w#s4Fe@=It02XOJi3rM?ob+qcH3_bJ|cV_R3YP7%dfc8Qo+v<&1y0hQ-1 zo9{AJlkThtBNYS2iyTeO(w6cG=t93%sHD@1-e`^qjttLGHI`aovgz)}UJI$F`nGpk z=07dva?p)0C@Qs&U`4JBG+OW3HmoMORtD7{A3`1*F=rY1EU|gHv`Vrd@%b#_v+s;) z#OCP!rM6oBz0niyl+BAd*_e3zGsYU=o^y}ttQZ~U(rV^z-n>6;v+EDF{(1AQL|yw0 zMdQJN&pC@{Bf;s!z4B0cFuGnewZBg(4g$JXvF?-BvTF;s9-#Vk^0s>DM-t8P^9ke< z_*;V1K=&Ctx%y;>YLApygHApixW$y0{Jx}m!|5n?nz!6f@j?JptLns&nQv8#q$G0V z=&_%UZnd!B^~4;XG;Rf57bz_U(WLwC4$LRa5Wec*# zq0AYcw9X)X)0S%Q=pZi&MY`wk*~vRYx1sei;>_7$priVg4`}=egL>74rWb7wgL%_u z)%o5-7RWzyb(qCe+A9S}f7)AQ4>z1XaT7&RUUn35MjFCG6VRH!H!+TK)Fbh+6F@=M zDN`snoH=vr8>l57AFv$1%WZxkW*3#;`pI>06Z1I6qyRHlYvo!zTtO})Ga{h);A3CR z6%-{CqVoCtmvOJXd-6i=9AZg=X)T*Qg%Rj-7YV}8+##yS>s1mk0g%L999#x|R-dG4 z_O5D4+z2O+-&u6FN~hB$(c!1Gb4t-!9E_R4kMouj`-qN7lcccZGi3rTvPXbyG}N_Y zI~OpkM{mPZo?oU_dQ!atc%q@rYo`FWn#vS-)x%CgKiIhL#0E3*cbU`j)r;xHuZ~48 z9>2WG&M5rp58iC`){DRo8`b3hK)CnuCss)4Z6Ij zM>U|wlEmHqaN^hgOw_x1U4!mjEMJF~?`1>9|6c(xllUJX83ooLJzX2Rgf(L8>`(JU z>X|LuIUd7sgN)_*XtDo@M|c27f=OE+Bc3DK^~4iyws8&~1BhGCJ8WyfJnu^meC;}^ zmoy55?=U{|nl9oLc%l@1Z<_02+A)kg7+(NVpeOtE>i&bQ!6{!jxcelWw7^nY?sPae zx#>trcH|HxlTprO-tA5DuH)a=`D+a1U5W#1kk7gt_T^ zC{G<*Z~n&n0JUW1Z_K zM`(Yq<>x4U;RuIwSP{sWhUFc(#;@Pl1UQi=h}{}-Fr{HNf1X28G6u?JVOWSfv!ovRE-ossoZ z*Hf2`@6w$nJ|lp);E4RJkV^WI*>9H8GnTPPXL3k8J=X~S5E+jH*^3N&=6MuQigTC6 z_RrEl9T^t>^trJt{5=~X)hx{gwUXK~fAhd=-p6}o@aH|8~ z8KO!?JdYaigSEB!d@ujkR@(=(f^H~RAL@H^ z4(x9mxmmAyd1Xk;x*o+mpv48OCu3T|o}YhNVG9J2Zy)J(}+`_X)xEQ4V1&;!!`TXmAuPxDGJX|%@h6+R!F z5S2XIdBQ+=ddZCVfZu9oWeE3`a2To_3Hp9-)ZW_q5d0P9`AsMEE`IE z#W6}&#!KEgYOsk-$lgo!Ub&O<=X)pTaGLkGj?!`uC3YkjdHkBT-|bj=m9C`Y(}#Ku z8P~o@``DBWm_x=G9;`3?o>uyi&-T>2lNXFoB*)Rsq+ApH$I%@8s&+ud^PBf1awt{G z+%dR#IWyN=Dz7DE-&9oT>*4du&`L>=5a)-2XuQ`5^oD<-_k|$3T$Dy5*jJgmNlm5=|Ou9u~}z%{Rf)nqa(_kc^nB=HYPROs+y~`V z#z;yUZRw(Nb~k*U?vwbVT13V8aNRrhsDB#b(D2JBp*LW2++rt!ZaUlYmqz$-V%M$* z%Dnu0&oU1R2Yun2pUJAdD@uSUgIW_Dt%a)@;Kw7U?lQ?@cHB7wK{WsB&R4W1CwXyx z&wL0CEW;r{VEdx4YNR^R2S;z;B(LoJ05ze*J2Ff=sgud$`r((WW%uJ+S`@-4E3c8) zjH>x!_eOeom*IkmL2L+9YfBg#oaNrBlR-+w4~EYVx3)7n>2gS+g>rJ3MdzsP>wSK( z_0`5R=={@?O3l*_Gmo6Y%5A!$YVvw2_hM{>PY4Z0B<5z-xne-i33}+uI zwt00q!-zlqjh;xPMyKI!!h#ypF{xWnlFc&m7iaRB{)O3}{OXh4$ep2AZaF;RX{eb1 ztKO(^!mWsgO4GfhVR>b({r?yz3ma$@Uhzw-whlWq32thrD`KIcW_encfg0WE% zIaA<16!;SR|7mJC{qEHB`{F>VRyW*^8}1BHM^_A_Nmfl<_4MdV{%h%PHZkidb(sR* z-fwPaWoyBj?{#f6SWF8kbF-=XB1pMt{~7q=JA^^^kD8x+9&MDSqr>RU%q^1{SS#Qa z=lF`eUH+A52;h4zJofI`J4;dl2M){Np|s4HnDBrl#ALclb9GkMIidFPV8$_V|hH z@1kK{Ji7?I^d?7WiyeeNRQMa{VS>w(5X%3KdHyf%RI4)=9qv@IRQP)e +#include #include #include -#include #include #include @@ -35,16 +35,16 @@ namespace curve { namespace chunkserver { /* for IDs */ -using LogicPoolID = uint32_t; -using CopysetID = uint32_t; -using ChunkID = uint64_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +using LogicPoolID = uint32_t; +using CopysetID = uint32_t; +using ChunkID = uint64_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; using ChunkSizeType = uint32_t; -using PageSizeType = uint32_t; +using PageSizeType = uint32_t; -using GroupNid = uint64_t; +using GroupNid = uint64_t; using ChunkServerID = uint32_t; // braft @@ -60,57 +60,55 @@ using PosixFileSystemAdaptor = braft::PosixFileSystemAdaptor; using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; - -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or +// validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { - uint64_t readCount; - uint64_t writeCount; - uint64_t readBytes; - uint64_t writeBytes; - uint64_t readIops; - uint64_t writeIops; - uint64_t readBps; - uint64_t writeBps; + uint64_t readCount; + uint64_t writeCount; + uint64_t readBytes; + uint64_t writeBytes; + uint64_t readIops; + uint64_t writeIops; + uint64_t readBps; + uint64_t writeBps; }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: - * | group id | - * | 32 | 32 | + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical + * format, as follows: | group id | | 32 | 32 | * | logic pool id | copyset id | */ -inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupNid ToGroupNid(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + *Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string + *format */ -inline GroupId ToGroupId(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupId ToGroupId(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return std::to_string(ToGroupNid(logicPoolId, copysetId)); } -#define ToBraftGroupId ToGroupId +#define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + *Parsing LogicPoolID from Copy Group ID in Numeric Format */ -inline LogicPoolID GetPoolID(const GroupNid &groupId) { - return groupId >> 32; -} +inline LogicPoolID GetPoolID(const GroupNid& groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + *Parsing CopysetID from Copy Group ID in Numeric Format */ -inline CopysetID GetCopysetID(const GroupNid &groupId) { +inline CopysetID GetCopysetID(const GroupNid& groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ -inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +/*Format output string for group ID (logicPoolId, copysetId)*/ +inline std::string ToGroupIdString(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { std::string groupIdString; groupIdString.append("("); groupIdString.append(std::to_string(logicPoolId)); @@ -121,7 +119,7 @@ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, groupIdString.append(")"); return groupIdString; } -#define ToGroupIdStr ToGroupIdString +#define ToGroupIdStr ToGroupIdString // Meta page is header of chunkfile, and is used to store meta data of // chunkfile. diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 58459c8bb2..92fa097295 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -23,17 +23,18 @@ #ifndef INCLUDE_CLIENT_LIBCURVE_H_ #define INCLUDE_CLIENT_LIBCURVE_H_ -#include #include -#include +#include + #include #include +#include #include "libcurve_define.h" // NOLINT #define IO_ALIGNED_BLOCK_SIZE 4096 -#define PATH_MAX_SIZE 4096 -#define NAME_MAX_SIZE 256 +#define PATH_MAX_SIZE 4096 +#define NAME_MAX_SIZE 256 enum FileType { INODE_DIRECTORY = 0, @@ -44,38 +45,38 @@ enum FileType { }; typedef struct FileStatInfo { - uint64_t id; - uint64_t parentid; - FileType filetype; - uint64_t length; - uint64_t ctime; - char filename[NAME_MAX_SIZE]; - char owner[NAME_MAX_SIZE]; - int fileStatus; - uint64_t stripeUnit; - uint64_t stripeCount; - uint32_t blocksize; + uint64_t id; + uint64_t parentid; + FileType filetype; + uint64_t length; + uint64_t ctime; + char filename[NAME_MAX_SIZE]; + char owner[NAME_MAX_SIZE]; + int fileStatus; + uint64_t stripeUnit; + uint64_t stripeCount; + uint32_t blocksize; } FileStatInfo_t; -// 存储用户信息 +// Storing User Information typedef struct C_UserInfo { - // 当前执行的owner信息, owner信息需要以'\0'结尾 + // The current owner information needs to end with'\0' char owner[NAME_MAX_SIZE]; - // 当owner="root"的时候,需要提供password作为计算signature的key - // password信息需要以'\0'结尾 + // When owner="root", password needs to be provided as the key for + // calculating the signature password information needs to end with '\0' char password[NAME_MAX_SIZE]; } C_UserInfo_t; typedef struct DirInfo { - // 当前listdir的目录路径 - char* dirpath; - // 当前listdir操作的用户信息 - C_UserInfo_t* userinfo; - // 当前dir大小,也就是文件数量 - uint64_t dirSize; - // 当前dir的内的文件信息内容,是一个数组 - // fileStat是这个数组的头,数组大小为dirSize - FileStatInfo_t* fileStat; + // The directory path of the current listdir + char* dirpath; + // User information for the current listdir operation + C_UserInfo_t* userinfo; + // The current dir size, which is the number of files + uint64_t dirSize; + // The file information content within the current dir is an array + // fileStat is the header of this array, with an array size of dirSize + FileStatInfo_t* fileStat; } DirInfo_t; #ifdef __cplusplus @@ -85,21 +86,20 @@ extern "C" { const char* LibCurveErrorName(LIBCURVE_ERROR err); /** - * 初始化系统 - * @param: path为配置文件路径 - * @return: 成功返回0,否则返回-1. + * Initialize the system + * @param: path is the configuration file path + * @return: Successfully returns 0, otherwise returns -1 */ int Init(const char* path); /** - * 打开文件,qemu打开文件的方式 - * @param: filename文件名, filename中包含用户信息 - * 例如:/1.img_userinfo_ - * @return: 返回文件fd + * Open a file , the way qemu to open a file + * @param: filename File name, which contains user information + * For example:/1.img_userinfo_ + * @return: Return the file fd */ int Open4Qemu(const char* filename); - /** * increase epoch * @param: filename, filename include userinfo @@ -109,41 +109,43 @@ int Open4Qemu(const char* filename); int IncreaseEpoch(const char* filename); /** - * 打开文件,非qemu场景 - * @param: filename文件名 - * @param: userinfo为要打开的文件的用户信息 - * @return: 返回文件fd + * Open file, non qemu scene + * @param: filename File name + * @param: userinfo is the user information of the file to be opened + * @return: Return the file fd */ int Open(const char* filename, const C_UserInfo_t* userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回 0, 失败返回小于0,可能有多种可能,比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size length + * @return: Success returns 0, failure returns less than 0, and there may be + * multiple possibilities, such as internal errors or the file already exists */ -int Create(const char* filename, - const C_UserInfo_t* userinfo, - size_t size); +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回读取长度, 否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: Offset The offset within the file + * @param: length is the length to be read + * @return: Successfully returned the read length, otherwise + * -LIBCURVE_ERROR::FAILED, etc */ int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回 写入长度,否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: Offset The offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the write length, otherwise - + * LIBCURVE_ERROR::FAILED, etc */ int Write(int fd, const char* buf, off_t offset, size_t length); @@ -158,18 +160,20 @@ int Write(int fd, const char* buf, off_t offset, size_t length); int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise - LIBCURVE_ERROR::FAILED */ int AioRead(int fd, CurveAioContext* aioctx); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO + * information + * @return: Successfully returns 0, otherwise -LIBCURVE_ERROR::FAILED */ int AioWrite(int fd, CurveAioContext* aioctx); @@ -182,51 +186,58 @@ int AioWrite(int fd, CurveAioContext* aioctx); int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路径 - * @param: newpath目标路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Rename File + * @param: userinfo is the user information + * @param: oldpath source path + * @param: newpath Target Path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Rename(const C_UserInfo_t* userinfo, const char* oldpath, const char* newpath); // NOLINT +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath); // NOLINT /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize); // NOLINT +int Extend(const char* filename, const C_UserInfo_t* userinfo, + uint64_t newsize); // NOLINT /** - * 扩展文件,Qemu场景在线扩容 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Expanding files, Qemu scene online expansion + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT - +int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Unlink(const char* filename, const C_UserInfo_t* userinfo); /** - * 强制删除文件, unlink删除文件在mds一侧并不是真正的删除, - * 而是放到了垃圾回收站,当使用DeleteForce接口删除的时候是直接删除 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Forced deletion of files, unlink deletion of files on the mds side is not a + * true deletion, Instead, it was placed in the garbage collection bin, and when + * deleted using the DeleteForce interface, it was directly deleted + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); @@ -239,96 +250,107 @@ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED and so on */ int Recover(const char* filename, const C_UserInfo_t* userinfo, - uint64_t fileId); + uint64_t fileId); /** - * 在获取目录内容之前先打开文件夹 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回一个非空的DirInfo_t指针,否则返回一个空指针 + * Open the folder before obtaining directory content + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned a non empty DirInfo_ Pointer t, otherwise + * return a null pointer */ DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 枚举目录内容, 用户OpenDir成功之后才能list - * @param[in][out]: dirinfo为OpenDir返回的指针, 内部会将mds返回的信息放入次结构中 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Enumerate directory contents, only after the user OpenDir is successful can + * they be listed + * @param[in][out]: dirinfo is the pointer returned by OpenDir, which internally + * places the information returned by mds into the substructures + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Listdir(DirInfo_t* dirinfo); /** - * 关闭打开的文件夹 - * @param: dirinfo为opendir返回的dir信息 + * Close Open Folder + * @param: dirinfo is the dir information returned by opendir */ void CloseDir(DirInfo_t* dirinfo); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int StatFile(const char* filename, - const C_UserInfo_t* userinfo, +int StatFile(const char* filename, const C_UserInfo_t* userinfo, FileStatInfo* finfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: finfo is an output parameter that carries the basic information of + * the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int StatFile4Qemu(const char* filename, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the + * root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ -int ChangeOwner(const char* filename, - const char* newOwner, +int ChangeOwner(const char* filename, const char* newOwner, const C_UserInfo_t* userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, + * -LIBCURVE_ERROR::AUTHFAILED, etc */ int Close(int fd); void UnInit(); /** - * @brief: 获取集群id, id用UUID标识 - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 否则返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain the cluster ID, which is identified by UUID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Successfully returns 0, otherwise returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); @@ -343,24 +365,23 @@ class FileClient; enum class UserDataType { RawBuffer, // char* - IOBuffer // butil::IOBuf* + IOBuffer // butil::IOBuf* }; -// 存储用户信息 +// Storing User Information typedef struct UserInfo { - // 当前执行的owner信息 + // Current owner information for execution std::string owner; - // 当owner=root的时候,需要提供password作为计算signature的key + // When owner=root, password needs to be provided as the key for calculating + // the signature std::string password; UserInfo() = default; UserInfo(const std::string& own, const std::string& pwd = "") - : owner(own), password(pwd) {} + : owner(own), password(pwd) {} - bool Valid() const { - return !owner.empty(); - } + bool Valid() const { return !owner.empty(); } } UserInfo_t; inline bool operator==(const UserInfo& lhs, const UserInfo& rhs) { @@ -380,14 +401,14 @@ class CurveClient { virtual ~CurveClient(); /** - * 初始化 - * @param configPath 配置文件路径 - * @return 返回错误码 + * Initialize + * @param configPath Configuration file path + * @return returns an error code */ virtual int Init(const std::string& configPath); /** - * 反初始化 + * Deinitialization */ virtual void UnInit(); @@ -400,62 +421,59 @@ class CurveClient { virtual int IncreaseEpoch(const std::string& filename); /** - * 打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Open File + * @param filename File name, format: File name_ User name_ * @param[out] sessionId session Id - * @return 成功返回fd,失败返回-1 + * @return successfully returns fd, failure returns -1 */ - virtual int Open(const std::string& filename, - const OpenFlags& openflags); + virtual int Open(const std::string& filename, const OpenFlags& openflags); /** - * 重新打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Reopen File + * @param filename File name, format: File name_ User name_ * @param sessionId session Id - * @param[out] newSessionId reOpen之后的新sessionId - * @return 成功返回fd,失败返回-1 + * @param[out] newSessionId New sessionId after reOpen + * @return successfully returns fd, failure returns -1 */ - virtual int ReOpen(const std::string& filename, - const OpenFlags& openflags); + virtual int ReOpen(const std::string& filename, const OpenFlags& openflags); /** - * 关闭文件 - * @param fd 文件fd - * @return 返回错误码 + * Close File + * @param fd file fd + * @return returns an error code */ virtual int Close(int fd); /** - * 扩展文件 - * @param filename 文件名,格式为:文件名_用户名_ - * @param newsize 扩展后的大小 - * @return 返回错误码 + * Extension file + * @param filename File name, format: File name_ User name_ + * @param newsize The expanded size + * @return returns an error code */ - virtual int Extend(const std::string& filename, - int64_t newsize); + virtual int Extend(const std::string& filename, int64_t newsize); /** - * 获取文件大小 - * @param fd 文件fd - * @return 返回错误码 + * Get File Size + * @param fd file fd + * @return returns an error code */ virtual int64_t StatFile(int fd, FileStatInfo* fileStat); /** - * 异步读 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous reading + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType); /** - * 异步写 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous writing + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType); @@ -469,8 +487,8 @@ class CurveClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 测试使用,设置fileclient - * @param client 需要设置的fileclient + * Test usage, set fileclient + * @param client The fileclient that needs to be set */ void SetFileClient(FileClient* client); diff --git a/include/etcdclient/etcdclient.h b/include/etcdclient/etcdclient.h index 42f63a7436..b3ce392aba 100644 --- a/include/etcdclient/etcdclient.h +++ b/include/etcdclient/etcdclient.h @@ -18,7 +18,6 @@ /* package command-line-arguments */ - #line 1 "cgo-builtin-export-prolog" #include /* for ptrdiff_t below */ @@ -27,21 +26,22 @@ #define GO_CGO_EXPORT_PROLOGUE_H #ifndef GO_CGO_GOSTRING_TYPEDEF -typedef struct { const char *p; ptrdiff_t n; } _GoString_; +typedef struct { + const char* p; + ptrdiff_t n; +} _GoString_; #endif #endif /* Start of preamble from import "C" comments. */ - #line 19 "etcdclient.go" #include -enum EtcdErrCode -{ - // grpc errCode, 具体的含义见: +enum EtcdErrCode { + // The specific meaning of grpc errCode is as follows: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -62,7 +62,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom error code EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -79,31 +79,26 @@ enum EtcdErrCode EtcdObjectLenNotEnough = 30, }; -enum OpType { - OpPut = 1, - OpDelete = 2 -}; +enum OpType { OpPut = 1, OpDelete = 2 }; struct EtcdConf { - char *Endpoints; + char* Endpoints; int len; int DialTimeout; }; struct Operation { enum OpType opType; - char *key; - char *value; + char* key; + char* value; int keyLen; int valueLen; }; #line 1 "cgo-generated-wrapper" - /* End of preamble from import "C" comments. */ - /* Start of boilerplate cgo prologue. */ #line 1 "cgo-gcc-export-header-prolog" @@ -130,15 +125,23 @@ typedef double _Complex GoComplex128; static assertion to make sure the file is being used on architecture at least with matching size of GoInt. */ -typedef char _check_for_64_bit_pointer_matching_GoInt[sizeof(void*)==64/8 ? 1:-1]; +typedef char + _check_for_64_bit_pointer_matching_GoInt[sizeof(void*) == 64 / 8 ? 1 : -1]; #ifndef GO_CGO_GOSTRING_TYPEDEF typedef _GoString_ GoString; #endif -typedef void *GoMap; -typedef void *GoChan; -typedef struct { void *t; void *v; } GoInterface; -typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; +typedef void* GoMap; +typedef void* GoChan; +typedef struct { + void* t; + void* v; +} GoInterface; +typedef struct { + void* data; + GoInt len; + GoInt cap; +} GoSlice; #endif @@ -148,8 +151,7 @@ typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; extern "C" { #endif - -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required extern GoUint32 NewEtcdClientV3(struct EtcdConf p0); @@ -159,66 +161,77 @@ extern GoUint32 EtcdClientPut(int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientPutRewtihRevision */ struct EtcdClientPutRewtihRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientPutRewtihRevision_return EtcdClientPutRewtihRevision( + int p0, char* p1, char* p2, int p3, int p4); /* Return type for EtcdClientGet */ struct EtcdClientGet_return { - GoUint32 r0; - char* r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + char* r1; + GoInt r2; + GoInt64 r3; }; extern struct EtcdClientGet_return EtcdClientGet(int p0, char* p1, int p2); /* Return type for EtcdClientList */ struct EtcdClientList_return { - GoUint32 r0; - GoUint64 r1; - GoInt64 r2; + GoUint32 r0; + GoUint64 r1; + GoInt64 r2; }; -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit -extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, int p3, int p4); +extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, + int p3, int p4); /* Return type for EtcdClientListWithLimitAndRevision */ struct EtcdClientListWithLimitAndRevision_return { - GoUint32 r0; - GoUint64 r1; - GoInt r2; - GoInt64 r3; + GoUint32 r0; + GoUint64 r1; + GoInt r2; + GoInt64 r3; }; -extern struct EtcdClientListWithLimitAndRevision_return EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, int p4, GoInt64 p5, GoInt64 p6); +extern struct EtcdClientListWithLimitAndRevision_return +EtcdClientListWithLimitAndRevision(unsigned int p0, char* p1, char* p2, int p3, + int p4, GoInt64 p5, GoInt64 p6); extern GoUint32 EtcdClientDelete(int p0, char* p1, int p2); /* Return type for EtcdClientDeleteRewithRevision */ struct EtcdClientDeleteRewithRevision_return { - GoUint32 r0; - GoInt64 r1; + GoUint32 r0; + GoInt64 r1; }; -extern struct EtcdClientDeleteRewithRevision_return EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); +extern struct EtcdClientDeleteRewithRevision_return +EtcdClientDeleteRewithRevision(int p0, char* p1, int p2); -extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, struct Operation p2); +extern GoUint32 EtcdClientTxn2(int p0, struct Operation p1, + struct Operation p2); -extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, struct Operation p3); +extern GoUint32 EtcdClientTxn3(int p0, struct Operation p1, struct Operation p2, + struct Operation p3); -extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, int p4, int p5, int p6); +extern GoUint32 EtcdClientCompareAndSwap(int p0, char* p1, char* p2, char* p3, + int p4, int p5, int p6); /* Return type for EtcdElectionCampaign */ struct EtcdElectionCampaign_return { - GoUint32 r0; - GoUint64 r1; + GoUint32 r0; + GoUint64 r1; }; -extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, char* p2, int p3, GoUint32 p4, GoUint32 p5); +extern struct EtcdElectionCampaign_return EtcdElectionCampaign(char* p0, int p1, + char* p2, int p3, + GoUint32 p4, + GoUint32 p5); extern GoUint32 EtcdLeaderObserve(GoUint64 p0, char* p1, int p2); @@ -226,23 +239,25 @@ extern GoUint32 EtcdLeaderResign(GoUint64 p0, GoUint64 p1); /* Return type for EtcdClientGetSingleObject */ struct EtcdClientGetSingleObject_return { - GoUint32 r0; - char* r1; - GoInt r2; + GoUint32 r0; + char* r1; + GoInt r2; }; -extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject(GoUint64 p0); +extern struct EtcdClientGetSingleObject_return EtcdClientGetSingleObject( + GoUint64 p0); /* Return type for EtcdClientGetMultiObject */ struct EtcdClientGetMultiObject_return { - GoUint32 r0; - char* r1; - GoInt r2; - char* r3; - GoInt r4; + GoUint32 r0; + char* r1; + GoInt r2; + char* r3; + GoInt r4; }; -extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject(GoUint64 p0, GoInt p1); +extern struct EtcdClientGetMultiObject_return EtcdClientGetMultiObject( + GoUint64 p0, GoInt p1); extern void EtcdClientRemoveObject(GoUint64 p0); diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf index 71ca380f13..8bc37cb542 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/var/lib/nebd/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath=/var/lib/nebd/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/var/log/nebd/client diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf index b03e7a25c6..4dcb28c7e6 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf @@ -1,14 +1,14 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf -#brpc server监听端口 +# brpc server listening port listen.address=/var/lib/nebd/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/var/lib/nebd/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 \ No newline at end of file diff --git a/monitor/grafana-report.py b/monitor/grafana-report.py index a400263e8c..0170470996 100644 --- a/monitor/grafana-report.py +++ b/monitor/grafana-report.py @@ -13,17 +13,18 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' -sourcefile= '/etc/curve/monitor/grafana/report/report.tex' -imagedir= '/etc/curve/monitor/grafana/report/images/' -pdfpath= '/etc/curve/monitor/grafana/report/report.pdf' +sourcefile = '/etc/curve/monitor/grafana/report/report.tex' +imagedir = '/etc/curve/monitor/grafana/report/images/' +pdfpath = '/etc/curve/monitor/grafana/report/report.pdf' clustername = '【CURVE】xxxxxxxxx' grafanauri = '127.0.0.1:3000' reporteruri = '127.0.0.1:8686' dashboardid = 'xxxxxxxxx' apitoken = 'xxxxxxxxx' + def get_images(): image_name_list = [] file = open(sourcefile, 'r') @@ -32,16 +33,17 @@ def get_images(): # print (line) prefix_image_name = re.findall(r'image\d+', line) if prefix_image_name: - print (prefix_image_name) + print(prefix_image_name) image_name_list.append(prefix_image_name[0]) line = file.readline() file.close() return image_name_list + def getMsgImage(image_name): file_name = imagedir+image_name+'.png' - print (file_name) + print(file_name) fp = open(file_name, 'rb') msgImage = MIMEImage(fp.read()) fp.close() @@ -49,6 +51,7 @@ def getMsgImage(image_name): msgImage.add_header("Content-Disposition", "inline", filename=file_name) return msgImage + def attach_body(msgRoot): image_list = get_images() @@ -57,36 +60,41 @@ def attach_body(msgRoot): image_body += ('%s' % (image, image)) msgRoot.attach(getMsgImage(image)) - html_str = '%s' % (image_body) + html_str = '%s' % ( + image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view it in the Grafana dashboard (if the display is chaotic, please refer to the attached PDF)

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) - content = MIMEText(mailMsg,'html','utf-8') + content = MIMEText(mailMsg, 'html', 'utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Send dashboard daily email + + def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) - dt = Time.strftime("%Y%m%d",time_local) + dt = Time.strftime("%Y%m%d", time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % ( + clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join(to_address) # Send to multiple people - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 - pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) + # The file name here can be written arbitrarily, including the name you want to write and the name displayed in the email + pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format( + dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add Body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) @@ -94,11 +102,13 @@ def send_mail(): smtp.sendmail(sender, to_address, msgRoot.as_string()) smtp.quit() + def clear(): shutil.rmtree(imagedir) os.mkdir(imagedir) os.chmod(imagedir, 0777) + def generate_report(): downloadcmd = ( "wget -O %s " @@ -108,10 +118,12 @@ def generate_report(): print(downloadcmd) os.system(downloadcmd) + def main(): generate_report() send_mail() clear() + if __name__ == '__main__': main() diff --git a/monitor/grafana/dashboards/chunkserver.json b/monitor/grafana/dashboards/chunkserver.json index 2770cd2802..e48e7a0721 100644 --- a/monitor/grafana/dashboards/chunkserver.json +++ b/monitor/grafana/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process running time", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successful requests processed per second for all RPCs on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the read_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the write_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "Percentile values of RPC-level read chunk latency", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read-write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "Number of errors per second for read_chunk at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "Number of read_chunk operations successfully processed per second at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "Number of read_chunk requests received per second at the chunk service layer.", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/dashboards/client.json b/monitor/grafana/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/dashboards/client.json +++ b/monitor/grafana/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/dashboards/etcd.json b/monitor/grafana/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/dashboards/etcd.json +++ b/monitor/grafana/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/mds.json b/monitor/grafana/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/dashboards/mds.json +++ b/monitor/grafana/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/report.json b/monitor/grafana/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/dashboards/report.json +++ b/monitor/grafana/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/dashboards/snapshotcloneserver.json b/monitor/grafana/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/chunkserver.json b/monitor/grafana/provisioning/dashboards/chunkserver.json index 2770cd2802..89ce686aa7 100644 --- a/monitor/grafana/provisioning/dashboards/chunkserver.json +++ b/monitor/grafana/provisioning/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successfully processed requests per second for all RPCs on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the rpc level in read_chunk", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "Write_chunk The number of errors per second at the rpc level", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "The quantile value of read chunk delay at the rpc level", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read and write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "The number of read_chunk errors per second at the chunk service level", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "The number of read_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/client.json b/monitor/grafana/provisioning/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/provisioning/dashboards/client.json +++ b/monitor/grafana/provisioning/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/etcd.json b/monitor/grafana/provisioning/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/provisioning/dashboards/etcd.json +++ b/monitor/grafana/provisioning/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/mds.json b/monitor/grafana/provisioning/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/provisioning/dashboards/mds.json +++ b/monitor/grafana/provisioning/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/report.json b/monitor/grafana/provisioning/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/provisioning/dashboards/report.json +++ b/monitor/grafana/provisioning/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/nebd/etc/nebd/nebd-client.conf b/nebd/etc/nebd/nebd-client.conf index 1207e5bbd0..6baa9c2a51 100644 --- a/nebd/etc/nebd/nebd-client.conf +++ b/nebd/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -# 文件锁路径 +# File lock path metacache.fileLockPath=/data/nebd/lock # __CURVEADM_TEMPLATE__ ${prefix}/data/lock __CURVEADM_TEMPLATE__ -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/data/log/nebd/client # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ diff --git a/nebd/etc/nebd/nebd-server.conf b/nebd/etc/nebd/nebd-server.conf index a6d2fbe534..1ef0966cc6 100644 --- a/nebd/etc/nebd/nebd-server.conf +++ b/nebd/etc/nebd/nebd-server.conf @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/client.conf __CURVEADM_TEMPLATE__ -#brpc server监听端口 +# brpc server listening port listen.address=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/data/nebd/nebdserver.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/nebdserver.meta __CURVEADM_TEMPLATE__ -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 # return rpc when io error diff --git a/nebd/nebd-package/usr/bin/nebd-daemon b/nebd/nebd-package/usr/bin/nebd-daemon index fb8242d1dc..3204bc8732 100755 --- a/nebd/nebd-package/usr/bin/nebd-daemon +++ b/nebd/nebd-package/usr/bin/nebd-daemon @@ -138,7 +138,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -179,7 +179,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -267,7 +267,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -283,7 +283,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/nebd/src/common/configuration.cpp b/nebd/src/common/configuration.cpp index 69a23ebe43..3c331c7cee 100644 --- a/nebd/src/common/configuration.cpp +++ b/nebd/src/common/configuration.cpp @@ -22,10 +22,10 @@ #include "nebd/src/common/configuration.h" -#include +#include #include +#include #include -#include namespace nebd { namespace common { @@ -54,8 +54,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -73,38 +75,33 @@ std::string Configuration::DumpConfig() { return ""; } - std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -113,7 +110,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -122,7 +119,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -141,18 +138,17 @@ bool Configuration::GetInt64Value(const std::string& key, int64_t* out) { return false; } -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -161,18 +157,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -181,11 +176,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -195,7 +190,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -215,16 +210,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -233,7 +227,7 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; } diff --git a/nebd/src/common/configuration.h b/nebd/src/common/configuration.h index 95df251e80..642d3be2ad 100644 --- a/nebd/src/common/configuration.h +++ b/nebd/src/common/configuration.h @@ -20,8 +20,8 @@ * Author: hzchenwei7 */ -#include #include +#include #ifndef NEBD_SRC_COMMON_CONFIGURATION_H_ #define NEBD_SRC_COMMON_CONFIGURATION_H_ @@ -39,79 +39,80 @@ class Configuration { std::string DumpConfig(); std::map ListConfig() const; - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] outThe value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); bool GetInt64Value(const std::string& key, int64_t* out); - void SetIntValue(const std::string &key, const int value); + void SetIntValue(const std::string& key, const int value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); private: - std::string confFile_; - std::map config_; + std::string confFile_; + std::map config_; }; } // namespace common diff --git a/nebd/src/common/crc32.h b/nebd/src/common/crc32.h index 627218fcbd..238b1ce4fc 100644 --- a/nebd/src/common/crc32.h +++ b/nebd/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef NEBD_SRC_COMMON_CRC32_H_ #define NEBD_SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace nebd { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/nebd/src/common/file_lock.h b/nebd/src/common/file_lock.h index 277cfebcf7..dfd644b98b 100644 --- a/nebd/src/common/file_lock.h +++ b/nebd/src/common/file_lock.h @@ -28,31 +28,30 @@ namespace nebd { namespace common { -// 文件锁 +// File lock class FileLock { public: explicit FileLock(const std::string& fileName) - : fileName_(fileName), fd_(-1) {} + : fileName_(fileName), fd_(-1) {} FileLock() : fileName_(""), fd_(-1) {} ~FileLock() = default; /** - * @brief 获取文件锁 - * @return 成功返回0,失败返回-1 + * @brief Get file lock + * @return returns 0 for success, -1 for failure */ int AcquireFileLock(); - /** - * @brief 释放文件锁 + * @brief Release file lock */ void ReleaseFileLock(); private: - // 锁文件的文件名 + // Lock the file name of the file std::string fileName_; - // 锁文件的fd + // Lock file fd int fd_; }; diff --git a/nebd/src/common/name_lock.h b/nebd/src/common/name_lock.h index ae34c182a9..eaebf6e806 100644 --- a/nebd/src/common/name_lock.h +++ b/nebd/src/common/name_lock.h @@ -23,86 +23,89 @@ #ifndef NEBD_SRC_COMMON_NAME_LOCK_H_ #define NEBD_SRC_COMMON_NAME_LOCK_H_ +#include +#include +#include // NOLINT #include #include #include -#include -#include -#include // NOLINT #include "nebd/src/common/uncopyable.h" -namespace nebd { -namespace common { - -class NameLock : public Uncopyable { - public: - explicit NameLock(int bucketNum = 256); - - /** - * @brief 对指定string加锁 - * - * @param lockStr 被加锁的string - */ - void Lock(const std::string &lockStr); - - /** - * @brief 尝试指定sting加锁 - * - * @param lockStr 被加锁的string - * - * @retval 成功 - * @retval 失败 - */ - bool TryLock(const std::string &lockStr); - - /** - * @brief 对指定string解锁 - * - * @param lockStr 被加锁的string - */ - void Unlock(const std::string &lockStr); - - - private: - struct LockEntry { - std::atomic ref_; - std::mutex lock_; - }; - using LockEntryPtr = std::shared_ptr; - - struct LockBucket { - std::mutex mu; - std::unordered_map lockMap; - }; - using LockBucketPtr = std::shared_ptr; - - int GetBucketOffset(const std::string &lockStr); - - private: - std::vector locks_; -}; - -class NameLockGuard : public Uncopyable { - public: - NameLockGuard(NameLock &lock, const std::string &lockStr) : //NOLINT - lock_(lock), - lockStr_(lockStr) { - lock_.Lock(lockStr_); - } - - ~NameLockGuard() { - lock_.Unlock(lockStr_); - } - - private: - NameLock &lock_; - std::string lockStr_; -}; - - -} // namespace common -} // namespace nebd - - -#endif // NEBD_SRC_COMMON_NAME_LOCK_H_ +namespace nebd +{ + namespace common + { + + class NameLock : public Uncopyable + { + public: + explicit NameLock(int bucketNum = 256); + + /** + * @brief locks the specified string + * + * @param lockStr locked string + */ + void Lock(const std::string &lockStr); + + /** + * @brief Attempt to specify sting lock + * + * @param lockStr locked string + * + * @retval succeeded + * @retval failed + */ + bool TryLock(const std::string &lockStr); + + /** + * @brief unlocks the specified string + * + * @param lockStr locked string + */ + void Unlock(const std::string &lockStr); + + private: + struct LockEntry + { + std::atomic ref_; + std::mutex lock_; + }; + using LockEntryPtr = std::shared_ptr; + + struct LockBucket + { + std::mutex mu; + std::unordered_map lockMap; + }; + using LockBucketPtr = std::shared_ptr; + + int GetBucketOffset(const std::string &lockStr); + + private: + std::vector locks_; + }; + + class NameLockGuard : public Uncopyable + { + public: + NameLockGuard(NameLock &lock, const std::string &lockStr) + : // NOLINT + lock_(lock), + lockStr_(lockStr) + { + lock_.Lock(lockStr_); + } + + ~NameLockGuard() { lock_.Unlock(lockStr_); } + + private: + NameLock &lock_; + std::string lockStr_; + }; + + } // namespace common +} // namespace nebd + +#endif // NEBD_SRC_COMMON_NAME_LOCK_H_ diff --git a/nebd/src/common/stringstatus.h b/nebd/src/common/stringstatus.h index fc4c9a6364..db47e08933 100644 --- a/nebd/src/common/stringstatus.h +++ b/nebd/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ -#define NEBD_SRC_COMMON_STRINGSTATUS_H_ +#ifndef NEBD_SRC_COMMON_STRINGSTATUS_H_ +#define NEBD_SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace nebd { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,26 +49,28 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..9e454f15a7 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -26,9 +26,10 @@ #include #include #include + +#include #include #include -#include namespace nebd { namespace common { @@ -53,7 +54,8 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -64,7 +66,7 @@ class TimeUtility { } }; -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd -#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ +#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..c9ab8e873e 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -22,8 +22,8 @@ #include "nebd/src/part1/async_request_closure.h" -#include #include +#include #include #include @@ -40,11 +40,10 @@ void AsyncRequestClosure::Run() { int64_t sleepUs = GetRpcRetryIntervalUs(aioCtx->retryCount); LOG_EVERY_SECOND(WARNING) << OpTypeToString(aioCtx->op) << " rpc failed" - << ", error = " << cntl.ErrorText() - << ", fd = " << fd + << ", error = " << cntl.ErrorText() << ", fd = " << fd << ", log id = " << cntl.log_id() - << ", retryCount = " << aioCtx->retryCount - << ", sleep " << (sleepUs / 1000) << " ms"; + << ", retryCount = " << aioCtx->retryCount << ", sleep " + << (sleepUs / 1000) << " ms"; bthread_usleep(sleepUs); Retry(); } else { @@ -52,7 +51,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +72,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } @@ -83,10 +82,9 @@ int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { return requestOption_.rpcRetryIntervalUs; } - return std::max( - requestOption_.rpcRetryIntervalUs, - std::min(requestOption_.rpcRetryIntervalUs * retryCount, - requestOption_.rpcRetryMaxIntervalUs)); + return std::max(requestOption_.rpcRetryIntervalUs, + std::min(requestOption_.rpcRetryIntervalUs * retryCount, + requestOption_.rpcRetryMaxIntervalUs)); } void AsyncRequestClosure::Retry() const { diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..0df2f03172 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -32,12 +32,9 @@ namespace nebd { namespace client { struct AsyncRequestClosure : public google::protobuf::Closure { - AsyncRequestClosure(int fd, - NebdClientAioContext* ctx, + AsyncRequestClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : fd(fd), - aioCtx(ctx), - requestOption_(option) {} + : fd(fd), aioCtx(ctx), requestOption_(option) {} void Run() override; @@ -47,94 +44,70 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; }; struct AioWriteClosure : public AsyncRequestClosure { - AioWriteClosure(int fd, - NebdClientAioContext* ctx, + AioWriteClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} WriteResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioReadClosure : public AsyncRequestClosure { - AioReadClosure(int fd, - NebdClientAioContext* ctx, + AioReadClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} ReadResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioDiscardClosure : public AsyncRequestClosure { - AioDiscardClosure(int fd, - NebdClientAioContext* ctx, + AioDiscardClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} DiscardResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioFlushClosure : public AsyncRequestClosure { - AioFlushClosure(int fd, - NebdClientAioContext* ctx, + AioFlushClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} FlushResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; inline const char* OpTypeToString(LIBAIO_OP opType) { switch (opType) { - case LIBAIO_OP::LIBAIO_OP_READ: - return "Read"; - case LIBAIO_OP::LIBAIO_OP_WRITE: - return "Write"; - case LIBAIO_OP::LIBAIO_OP_DISCARD: - return "Discard"; - case LIBAIO_OP::LIBAIO_OP_FLUSH: - return "Flush"; - default: - return "Unknown"; + case LIBAIO_OP::LIBAIO_OP_READ: + return "Read"; + case LIBAIO_OP::LIBAIO_OP_WRITE: + return "Write"; + case LIBAIO_OP::LIBAIO_OP_DISCARD: + return "Discard"; + case LIBAIO_OP::LIBAIO_OP_FLUSH: + return "Flush"; + default: + return "Unknown"; } } diff --git a/nebd/src/part1/heartbeat_manager.h b/nebd/src/part1/heartbeat_manager.h index 13289cb2d0..c9020e84cc 100644 --- a/nebd/src/part1/heartbeat_manager.h +++ b/nebd/src/part1/heartbeat_manager.h @@ -25,52 +25,52 @@ #include -#include // NOLINT #include #include +#include // NOLINT +#include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "nebd/src/common/interrupt_sleep.h" namespace nebd { namespace client { -// Heartbeat 管理类 -// 定期向nebd-server发送已打开文件的心跳信息 +// Heartbeat Management Class +// Regularly send heartbeat information of opened files to nebd-server class HeartbeatManager { public: explicit HeartbeatManager(std::shared_ptr metaCache); - ~HeartbeatManager() { - Stop(); - } + ~HeartbeatManager() { Stop(); } /** - * @brief: 启动心跳线程 + * @brief: Start heartbeat thread */ void Run(); /** - * @brief: 停止心跳线程 + * @brief: Stop heartbeat thread */ void Stop(); /** - * @brief 初始化 - * @param heartbeatOption heartbeat 配置项 - * @return 0 初始化成功 / -1 初始化失败 + * @brief initialization + * @param heartbeatOption heartbeat configuration item + * @return 0 initialization successful/-1 initialization failed */ int Init(const HeartbeatOption& option); private: /** - * @brief: 心跳线程执行函数,定期发送心跳消息 + * @brief: Heartbeat thread execution function, sending heartbeat messages + * regularly */ void HeartBetaThreadFunc(); /** - * @brief: 向part2发送心跳消息,包括当前已打开的卷信息 + * @brief: Send a heartbeat message to part2, including information about + * the currently opened volume */ void SendHeartBeat(); @@ -79,7 +79,7 @@ class HeartbeatManager { HeartbeatOption heartbeatOption_; - std::shared_ptr metaCache_; + std::shared_ptr metaCache_; std::thread heartbeatThread_; nebd::common::InterruptibleSleeper sleeper_; diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index ab6093e415..dc254c9286 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -21,12 +21,14 @@ */ #include "nebd/src/part1/libnebd.h" + #include "nebd/src/part1/libnebd_file.h" extern "C" { bool g_inited = false; -// Note: 配置文件路径是否有上层传下来比较合适,评估是否要修改 +// Note: It is more appropriate to pass down the configuration file path from +// the upper level, and evaluate whether it needs to be modified const char* confpath = "/etc/nebd/nebd-client.conf"; int nebd_lib_init() { if (g_inited) { @@ -67,17 +69,13 @@ int nebd_lib_uninit() { return 0; } -int nebd_lib_open(const char* filename) { - return Open4Nebd(filename, nullptr); -} +int nebd_lib_open(const char* filename) { return Open4Nebd(filename, nullptr); } int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* flags) { return Open4Nebd(filename, flags); } -int nebd_lib_close(int fd) { - return Close4Nebd(fd); -} +int nebd_lib_close(int fd) { return Close4Nebd(fd); } int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length) { (void)fd; @@ -114,32 +112,20 @@ int nebd_lib_sync(int fd) { return 0; } -int64_t nebd_lib_filesize(int fd) { - return GetFileSize4Nebd(fd); -} +int64_t nebd_lib_filesize(int fd) { return GetFileSize4Nebd(fd); } -int64_t nebd_lib_blocksize(int fd) { - return GetBlockSize4Nebd(fd); -} +int64_t nebd_lib_blocksize(int fd) { return GetBlockSize4Nebd(fd); } -int nebd_lib_resize(int fd, int64_t size) { - return Extend4Nebd(fd, size); -} +int nebd_lib_resize(int fd, int64_t size) { return Extend4Nebd(fd, size); } int nebd_lib_flush(int fd, NebdClientAioContext* context) { return Flush4Nebd(fd, context); } -int64_t nebd_lib_getinfo(int fd) { - return GetInfo4Nebd(fd); -} +int64_t nebd_lib_getinfo(int fd) { return GetInfo4Nebd(fd); } -int nebd_lib_invalidcache(int fd) { - return InvalidCache4Nebd(fd); -} +int nebd_lib_invalidcache(int fd) { return InvalidCache4Nebd(fd); } -void nebd_lib_init_open_flags(NebdOpenFlags* flags) { - flags->exclusive = 1; -} +void nebd_lib_init_open_flags(NebdOpenFlags* flags) { flags->exclusive = 1; } } // extern "C" diff --git a/nebd/src/part1/libnebd.h b/nebd/src/part1/libnebd.h index 380776d71b..8a39ee3977 100644 --- a/nebd/src/part1/libnebd.h +++ b/nebd/src/part1/libnebd.h @@ -27,19 +27,19 @@ extern "C" { #endif +#include +#include +#include #include #include -#include -#include #include -#include +#include #include -#include -// 文件路径最大的长度,单位字节 -#define NEBD_MAX_FILE_PATH_LEN 1024 +// The maximum length of the file path, in bytes +#define NEBD_MAX_FILE_PATH_LEN 1024 -// nebd异步请求的类型 +// Types of nebd asynchronous requests typedef enum LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -55,139 +55,147 @@ void nebd_lib_init_open_flags(NebdOpenFlags* flags); struct NebdClientAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*LibAioCallBack)(struct NebdClientAioContext* context); struct NebdClientAioContext { - off_t offset; // 请求的offset - size_t length; // 请求的length - int ret; // 记录异步返回的返回值 - LIBAIO_OP op; // 异步请求的类型,详见定义 - LibAioCallBack cb; // 异步请求的回调函数 - void* buf; // 请求的buf - unsigned int retryCount; // 记录异步请求的重试次数 + off_t offset; // Requested offset + size_t length; // Requested length + int ret; // Record the return value returned asynchronously + LIBAIO_OP + op; // The type of asynchronous request, as defined in the definition + LibAioCallBack cb; // Callback function for asynchronous requests + void* buf; // Buf requested + unsigned int + retryCount; // Record the number of retries for asynchronous requests }; // int nebd_lib_fini(void); /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_init(void); int nebd_lib_init_with_conf(const char* confPath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_uninit(void); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int nebd_lib_open(const char* filename); int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* openflags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_close(int fd); /** - * @brief 同步读文件 - * @param fd:文件的fd - * buf:存放读取data的buf - * offset:读取的位置offset - * length:读取的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file reading + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length); /** - * @brief 同步写文件 - * @param fd:文件的fd - * buf:存放写入data的buf - * offset:写入的位置offset - * length:写入的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file writing + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_discard(int fd, struct NebdClientAioContext* context); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pread(int fd, struct NebdClientAioContext* context); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pwrite(int fd, struct NebdClientAioContext* context); /** - * @brief sync文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief sync file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_sync(int fd); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t nebd_lib_filesize(int fd); int64_t nebd_lib_blocksize(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int nebd_lib_resize(int fd, int64_t size); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_flush(int fd, struct NebdClientAioContext* context); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t nebd_lib_getinfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_invalidcache(int fd); diff --git a/nebd/src/part1/libnebd_file.h b/nebd/src/part1/libnebd_file.h index 6361094ab2..33e39a58c2 100644 --- a/nebd/src/part1/libnebd_file.h +++ b/nebd/src/part1/libnebd_file.h @@ -26,83 +26,89 @@ #include "nebd/src/part1/libnebd.h" /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init4Nebd(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit4Nebd(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open4Nebd(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close4Nebd(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend4Nebd(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t GetFileSize4Nebd(int fd); int64_t GetBlockSize4Nebd(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get info of the file + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error + * code */ int64_t GetInfo4Nebd(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache4Nebd(int fd); diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index bd1a2202ea..7f9ec811fd 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -22,30 +22,42 @@ #include "nebd/src/part1/nebd_client.h" -#include -#include -#include #include -#include -#include +#include #include +#include +#include +#include +#include + #include -#include "nebd/src/part1/async_request_closure.h" #include "nebd/src/common/configuration.h" +#include "nebd/src/part1/async_request_closure.h" -#define RETURN_IF_FALSE(val) if (val == false) { return -1; } +#define RETURN_IF_FALSE(val) \ + if (val == false) { \ + return -1; \ + } -// 修改brpc的health_check_interval参数,这个参数用来控制健康检查的周期 -// ## 健康检查 -// 连接断开的server会被暂时隔离而不会被负载均衡算法选中,brpc会定期连接被隔离的server,以检查他们是否恢复正常,间隔由参数-health_check_interval控制: // NOLINT -// | Name | Value | Description | Defined At | // NOLINT -// | ------------------------- | ----- | ---------------------------------------- | ----------------------- | // NOLINT -// | health_check_interval (R) | 3 | seconds between consecutive health-checkings | src/brpc/socket_map.cpp | // NOLINT -// 一旦server被连接上,它会恢复为可用状态。如果在隔离过程中,server从命名服务中删除了,brpc也会停止连接尝试。 // NOLINT +// Modify health_check_interval parameter is used to control the period of +// health checks +// ## Health Check +// The disconnected servers will be temporarily isolated and not selected by the +// load balancing algorithm. brpc will periodically connect to the isolated +// servers to check if they have returned to normal. The interval is determined +// by the parameter-health_check_interval://NOLINT | Name | +// Value | Description | Defined At | +// // NOLINT | ------------------------- | ----- | +// ---------------------------------------- | ----------------------- | // +// NOLINT | health_check_interval (R) | 3 | seconds between consecutive +// health-checkings | src/brpc/socket_map.cpp | // +// NOLINT Once the server is connected, it will return to an available state. If +// the server is removed from the naming service during the isolation process, +// brpc will also stop connection attempts// NOLINT namespace brpc { - DECLARE_int32(health_check_interval); - DECLARE_int32(circuit_breaker_max_isolation_duration_ms); +DECLARE_int32(health_check_interval); +DECLARE_int32(circuit_breaker_max_isolation_duration_ms); } // namespace brpc namespace nebd { @@ -53,7 +65,7 @@ namespace client { using nebd::common::FileLock; -NebdClient &nebdClient = NebdClient::GetInstance(); +NebdClient& nebdClient = NebdClient::GetInstance(); constexpr int32_t kBufSize = 128; @@ -98,8 +110,7 @@ int NebdClient::Init(const char* confpath) { } metaCache_ = std::make_shared(); - heartbeatMgr_ = std::make_shared( - metaCache_); + heartbeatMgr_ = std::make_shared(metaCache_); ret = heartbeatMgr_->Init(heartbeatOption); if (ret != 0) { @@ -139,7 +150,7 @@ void NebdClient::Uninit() { } int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { - // 加文件锁 + // Add file lock std::string fileLockName = option_.fileLockPath + "/" + ReplaceSlash(filename); FileLock fileLock(fileLockName); @@ -150,8 +161,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { return -1; } - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); OpenFileRequest request; @@ -168,8 +178,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "OpenFile rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "OpenFile rpc failed, error = " << cntl->ErrorText() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -177,7 +186,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "OpenFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", filename = " << filename << ", log id = " << cntl->log_id(); return -1; @@ -199,8 +208,7 @@ int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { } int NebdClient::Close(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { NebdFileService_Stub stub(channel); CloseFileRequest request; @@ -219,7 +227,7 @@ int NebdClient::Close(int fd) { if (response.retcode() != RetCode::kOK) { LOG(ERROR) << "CloseFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); } @@ -240,8 +248,7 @@ int NebdClient::Close(int fd) { } int NebdClient::Extend(int fd, int64_t newsize) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { (void)channel; nebd::client::NebdFileService_Stub stub(&channel_); @@ -255,17 +262,15 @@ int NebdClient::Extend(int fd, int64_t newsize) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "Resize RPC failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "Resize RPC failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "ExtendFile failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() - << ", fd = " << fd - << ", newsize = " << newsize + << ", retmsg = " << response.retmsg() + << ", fd = " << fd << ", newsize = " << newsize << ", log id = " << cntl->log_id(); return -1; } else { @@ -276,15 +281,13 @@ int NebdClient::Extend(int fd, int64_t newsize) { int64_t ret = ExecuteSyncRpc(task); if (ret < 0) { - LOG(ERROR) << "Extend failed, fd = " << fd - << ", newsize = " << newsize; + LOG(ERROR) << "Extend failed, fd = " << fd << ", newsize = " << newsize; } return ret; } int64_t NebdClient::GetFileSize(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -295,15 +298,14 @@ int64_t NebdClient::GetFileSize(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetFileSize failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetFileSize failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetFileSize failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -366,8 +368,8 @@ int NebdClient::Discard(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioDiscardClosure* done = new(std::nothrow) AioDiscardClosure( - fd, aioctx, option_.requestOption); + AioDiscardClosure* done = new (std::nothrow) + AioDiscardClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Discard(&done->cntl, &request, &done->response, done); @@ -386,8 +388,8 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioReadClosure* done = new(std::nothrow) AioReadClosure( - fd, aioctx, option_.requestOption); + AioReadClosure* done = new (std::nothrow) + AioReadClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Read(&done->cntl, &request, &done->response, done); @@ -398,9 +400,7 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { return 0; } -static void EmptyDeleter(void* m) { - (void)m; -} +static void EmptyDeleter(void* m) { (void)m; } int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { auto task = [this, fd, aioctx]() { @@ -410,8 +410,8 @@ int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { request.set_offset(aioctx->offset); request.set_size(aioctx->length); - AioWriteClosure* done = new(std::nothrow) AioWriteClosure( - fd, aioctx, option_.requestOption); + AioWriteClosure* done = new (std::nothrow) + AioWriteClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); @@ -431,8 +431,8 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { nebd::client::FlushRequest request; request.set_fd(fd); - AioFlushClosure* done = new(std::nothrow) AioFlushClosure( - fd, aioctx, option_.requestOption); + AioFlushClosure* done = new (std::nothrow) + AioFlushClosure(fd, aioctx, option_.requestOption); done->cntl.set_timeout_ms(-1); done->cntl.set_log_id(logId_.fetch_add(1, std::memory_order_relaxed)); stub.Flush(&done->cntl, &request, &done->response, done); @@ -444,8 +444,7 @@ int NebdClient::Flush(int fd, NebdClientAioContext* aioctx) { } int64_t NebdClient::GetInfo(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::GetInfoRequest request; @@ -456,15 +455,14 @@ int64_t NebdClient::GetInfo(int fd) { *rpcFailed = cntl->Failed(); if (*rpcFailed) { - LOG(WARNING) << "GetInfo rpc failed, error = " - << cntl->ErrorText() + LOG(WARNING) << "GetInfo rpc failed, error = " << cntl->ErrorText() << ", log id = " << cntl->log_id(); return -1; } else { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "GetInfo failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -482,8 +480,7 @@ int64_t NebdClient::GetInfo(int fd) { } int NebdClient::InvalidCache(int fd) { - auto task = [&](brpc::Controller* cntl, - brpc::Channel* channel, + auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { nebd::client::NebdFileService_Stub stub(channel); nebd::client::InvalidateCacheRequest request; @@ -502,7 +499,7 @@ int NebdClient::InvalidCache(int fd) { if (response.retcode() != nebd::client::RetCode::kOK) { LOG(ERROR) << "InvalidCache failed, " << "retcode = " << response.retcode() - <<", retmsg = " << response.retmsg() + << ", retmsg = " << response.retmsg() << ", fd = " << fd << ", log id = " << cntl->log_id(); return -1; @@ -526,8 +523,7 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { LOG_IF(ERROR, ret != true) << "Load nebdserver.serverAddress failed"; RETURN_IF_FALSE(ret); - ret = conf->GetStringValue("metacache.fileLockPath", - &option_.fileLockPath); + ret = conf->GetStringValue("metacache.fileLockPath", &option_.fileLockPath); LOG_IF(ERROR, ret != true) << "Load metacache.fileLockPath failed"; RETURN_IF_FALSE(ret); @@ -550,7 +546,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcHostDownRetryIntervalUs", &requestOption.rpcHostDownRetryIntervalUs); - LOG_IF(ERROR, ret != true) << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcHostDownRetryIntervalUs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetInt64Value("request.rpcHealthCheckIntervalS", @@ -560,7 +557,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { ret = conf->GetInt64Value("request.rpcMaxDelayHealthCheckIntervalMs", &requestOption.rpcMaxDelayHealthCheckIntervalMs); - LOG_IF(ERROR, ret != true) << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT + LOG_IF(ERROR, ret != true) + << "Load request.rpcMaxDelayHealthCheckIntervalMs failed"; // NOLINT RETURN_IF_FALSE(ret); ret = conf->GetUInt32Value("request.rpcSendExecQueueNum", @@ -581,8 +579,8 @@ int NebdClient::InitNebdClientOption(Configuration* conf) { int NebdClient::InitHeartBeatOption(Configuration* conf, HeartbeatOption* heartbeatOption) { - bool ret = conf->GetInt64Value("heartbeat.intervalS", - &heartbeatOption->intervalS); + bool ret = + conf->GetInt64Value("heartbeat.intervalS", &heartbeatOption->intervalS); LOG_IF(ERROR, ret != true) << "Load heartbeat.intervalS failed"; RETURN_IF_FALSE(ret); @@ -604,8 +602,7 @@ int NebdClient::InitChannel() { option_.requestOption.rpcHealthCheckIntervalS; brpc::FLAGS_circuit_breaker_max_isolation_duration_ms = option_.requestOption.rpcMaxDelayHealthCheckIntervalMs; - int ret = channel_.InitWithSockFile( - option_.serverAddress.c_str(), nullptr); + int ret = channel_.InitWithSockFile(option_.serverAddress.c_str(), nullptr); if (ret != 0) { LOG(ERROR) << "Init Channel failed, socket addr = " << option_.serverAddress; @@ -652,7 +649,6 @@ std::string NebdClient::ReplaceSlash(const std::string& str) { return ret; } - void NebdClient::InitLogger(const LogOption& logOption) { static const char* kProcessName = "nebd-client"; @@ -661,8 +657,9 @@ void NebdClient::InitLogger(const LogOption& logOption) { google::InitGoogleLogging(kProcessName); } -int NebdClient::ExecAsyncRpcTask(void* meta, - bthread::TaskIterator& iter) { // NOLINT +int NebdClient::ExecAsyncRpcTask( + void* meta, + bthread::TaskIterator& iter) { // NOLINT (void)meta; if (iter.is_queue_stopped()) { return 0; diff --git a/nebd/src/part1/nebd_client.h b/nebd/src/part1/nebd_client.h index c814f9f711..815c4c7fe7 100644 --- a/nebd/src/part1/nebd_client.h +++ b/nebd/src/part1/nebd_client.h @@ -27,30 +27,28 @@ #include #include -#include #include +#include #include -#include "nebd/src/part1/nebd_common.h" -#include "nebd/src/common/configuration.h" +#include "include/curve_compiler_specific.h" #include "nebd/proto/client.pb.h" -#include "nebd/src/part1/libnebd.h" +#include "nebd/src/common/configuration.h" #include "nebd/src/part1/heartbeat_manager.h" +#include "nebd/src/part1/libnebd.h" +#include "nebd/src/part1/nebd_common.h" #include "nebd/src/part1/nebd_metacache.h" -#include "include/curve_compiler_specific.h" - namespace nebd { namespace client { -using RpcTask = std::function; +using RpcTask = std::function; using nebd::common::Configuration; class NebdClient { public: - static NebdClient &GetInstance() { + static NebdClient& GetInstance() { static NebdClient client; return client; } @@ -58,93 +56,100 @@ class NebdClient { ~NebdClient() = default; /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the + * first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + *Size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error + * code */ int64_t GetFileSize(int fd); int64_t GetBlockSize(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the + * information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an + * error code */ int64_t GetInfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache(int fd); @@ -159,17 +164,17 @@ class NebdClient { void InitLogger(const LogOption& logOption); /** - * @brief 替换字符串中的 '/' 为 '+' + * @brief replaces'/'with'+'in the string * - * @param str 需要替换的字符串 - * @return 替换后的字符串 + * @param str The string that needs to be replaced + * @return The replaced string */ std::string ReplaceSlash(const std::string& str); int64_t ExecuteSyncRpc(RpcTask task); - // 心跳管理模块 + // Heartbeat management module std::shared_ptr heartbeatMgr_; - // 缓存模块 + // Cache module std::shared_ptr metaCache_; NebdClientOption option_; @@ -183,7 +188,8 @@ class NebdClient { std::vector> rpcTaskQueues_; - static int ExecAsyncRpcTask(void* meta, bthread::TaskIterator& iter); // NOLINT + static int ExecAsyncRpcTask( + void* meta, bthread::TaskIterator& iter); // NOLINT void PushAsyncTask(const AsyncRpcTask& task) { static thread_local unsigned int seed = time(nullptr); @@ -197,7 +203,7 @@ class NebdClient { } }; -extern NebdClient &nebdClient; +extern NebdClient& nebdClient; } // namespace client } // namespace nebd diff --git a/nebd/src/part1/nebd_common.h b/nebd/src/part1/nebd_common.h index 432f24534f..7c03839178 100644 --- a/nebd/src/part1/nebd_common.h +++ b/nebd/src/part1/nebd_common.h @@ -25,49 +25,49 @@ #include -// rpc request配置项 +// rpc request configuration item struct RequestOption { - // 同步rpc的最大重试次数 + // Maximum number of retries for synchronous rpc int64_t syncRpcMaxRetryTimes; - // rpc请求的重试间隔 + // The retry interval for rpc requests int64_t rpcRetryIntervalUs; - // rpc请求的最大重试间隔 + // Maximum retry interval for rpc requests int64_t rpcRetryMaxIntervalUs; - // rpc hostdown情况下的重试时间 + // The retry time in the case of rpc hostdown int64_t rpcHostDownRetryIntervalUs; - // brpc的健康检查周期时间 + // Health check cycle time for brpc int64_t rpcHealthCheckIntervalS; - // brpc从rpc失败到进行健康检查的最大时间间隔 + // The maximum time interval between RPC failure and health check in BRPC int64_t rpcMaxDelayHealthCheckIntervalMs; - // rpc发送执行队列个数 + // Number of RPC send execution queues uint32_t rpcSendExecQueueNum = 2; }; -// 日志配置项 +// Log Configuration Item struct LogOption { - // 日志存放目录 + // Log storage directory std::string logPath; }; -// nebd client配置项 +// nebd client configuration item struct NebdClientOption { // part2 socket file address std::string serverAddress; - // 文件锁路径 + // File lock path std::string fileLockPath; - // rpc request配置项 + // rpc request configuration item RequestOption requestOption; - // 日志配置项 + // Log Configuration Item LogOption logOption; }; -// heartbeat配置项 +// heartbeat configuration item struct HeartbeatOption { // part2 socket file address std::string serverAddress; - // heartbeat间隔 + // heartbeat interval int64_t intervalS; - // heartbeat rpc超时时间 + // heartbeat RPC timeout int64_t rpcTimeoutMs; }; diff --git a/nebd/src/part1/nebd_metacache.h b/nebd/src/part1/nebd_metacache.h index 3b596bdf62..5435e3af5f 100644 --- a/nebd/src/part1/nebd_metacache.h +++ b/nebd/src/part1/nebd_metacache.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART1_NEBD_METACACHE_H_ #define NEBD_SRC_PART1_NEBD_METACACHE_H_ +#include #include #include -#include #include "nebd/src/common/file_lock.h" #include "nebd/src/common/rw_lock.h" @@ -42,16 +42,13 @@ struct NebdClientFileInfo { NebdClientFileInfo() = default; - NebdClientFileInfo( - int fd, const std::string& fileName, - const FileLock& fileLock) - : fd(fd), - fileName(fileName), - fileLock(fileLock) {} + NebdClientFileInfo(int fd, const std::string& fileName, + const FileLock& fileLock) + : fd(fd), fileName(fileName), fileLock(fileLock) {} }; /** - * @brief: 保存当前已打开文件信息 + * @brief: Save the information of the currently opened file */ class NebdClientMetaCache { public: @@ -59,33 +56,33 @@ class NebdClientMetaCache { ~NebdClientMetaCache() = default; /** - * @brief: 添加文件信息 - * @param: fileInfo 文件信息 + * @brief: Add file information + * @param: fileInfo: file information */ void AddFileInfo(const NebdClientFileInfo& fileInfo); /** - * @brief: 删除文件信息 - * @param: fd 文件描述符 + * @brief: Delete file information + * @param: fd: file descriptor */ void RemoveFileInfo(int fd); /** - * @brief: 获取对应fd的文件信息 - * @param: fd 文件fd + * @brief: Obtain the file information of the corresponding fd + * @param: fd: file fd * @param[out]: fileInfo - * @return: 0 成功 / -1 返回 + * @return: 0 succeeded/-1 returned */ int GetFileInfo(int fd, NebdClientFileInfo* fileInfo) const; /** - * @brief: 获取当前已打开文件信息 - * @return: 当前已打开文件信息 + * @brief: Get information about currently opened files + * @return: Currently opened file information */ std::vector GetAllFileInfo() const; private: - // 当前已打开文件信息 + // Currently opened file information std::unordered_map fileinfos_; mutable nebd::common::RWLock rwLock_; }; diff --git a/nebd/src/part2/define.h b/nebd/src/part2/define.h index 4c2fc54022..8a66854c59 100644 --- a/nebd/src/part2/define.h +++ b/nebd/src/part2/define.h @@ -25,24 +25,25 @@ #include #include -#include -#include + #include +#include +#include #include "nebd/src/common/rw_lock.h" namespace nebd { namespace server { -using nebd::common::RWLock; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; using ::google::protobuf::RpcController; +using nebd::common::RWLock; const char CURVE_PREFIX[] = "cbd"; const char TEST_PREFIX[] = "test"; -// nebd异步请求的类型 +// Types of nebd asynchronous requests enum class LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -70,54 +71,55 @@ using RWLockPtr = std::shared_ptr; struct NebdServerAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*NebdAioCallBack)(struct NebdServerAioContext* context); -// nebd server端异步请求的上下文 -// 记录请求的类型、参数、返回信息、rpc信息 +// Context of Nebd server-side asynchronous requests +// Record the type, parameters, return information, and rpc information of the +// request struct NebdServerAioContext { - // 请求的offset + // Requested offset off_t offset = 0; - // 请求的size + // Requested size size_t size = 0; - // 记录异步返回的返回值 + // Record the return value returned asynchronously int ret = -1; - // 异步请求的类型,详见定义 + // The type of asynchronous request, as defined in the definition LIBAIO_OP op = LIBAIO_OP::LIBAIO_OP_UNKNOWN; - // 异步请求结束时调用的回调函数 + // Callback function called at the end of asynchronous request NebdAioCallBack cb; - // 请求的buf + // Buf requested void* buf = nullptr; - // rpc请求的相应内容 + // The corresponding content of the rpc request Message* response = nullptr; - // rpc请求的回调函数 - Closure *done = nullptr; - // rpc请求的controller + // Callback function for rpc requests + Closure* done = nullptr; + // Controller for rpc requests RpcController* cntl = nullptr; // return rpc when io error bool returnRpcWhenIoError = false; }; struct NebdFileInfo { - // 文件大小 + // File size uint64_t size; - // object/chunk大小 + // object/chunk size uint64_t obj_size; - // object数量 + // Number of objects uint64_t num_objs; // block size uint32_t block_size; }; using ExtendAttribute = std::map; -// nebd server 端文件持久化的元数据信息 +// Metadata information for file persistence on the Nebd server side struct NebdFileMeta { int fd; std::string fileName; ExtendAttribute xattr; }; -// part2配置项 +// part2 Configuration Item const char LISTENADDRESS[] = "listen.address"; const char METAFILEPATH[] = "meta.file.path"; const char HEARTBEATTIMEOUTSEC[] = "heartbeat.timeout.sec"; diff --git a/nebd/src/part2/file_entity.cpp b/nebd/src/part2/file_entity.cpp index 0899472c72..272e761ace 100644 --- a/nebd/src/part2/file_entity.cpp +++ b/nebd/src/part2/file_entity.cpp @@ -57,13 +57,13 @@ std::ostream& operator<<(std::ostream& os, const OpenFlags* flags) { } NebdFileEntity::NebdFileEntity() - : fd_(0) - , fileName_("") - , status_(NebdFileStatus::CLOSED) - , timeStamp_(0) - , fileInstance_(nullptr) - , executor_(nullptr) - , metaFileManager_(nullptr) {} + : fd_(0), + fileName_(""), + status_(NebdFileStatus::CLOSED), + timeStamp_(0), + fileInstance_(nullptr), + executor_(nullptr), + metaFileManager_(nullptr) {} NebdFileEntity::~NebdFileEntity() {} @@ -117,8 +117,7 @@ int NebdFileEntity::Open(const OpenFlags* openflags) { return -1; } LOG(INFO) << "Open file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; if (openflags) { openFlags_.reset(new OpenFlags{*openflags}); @@ -157,26 +156,28 @@ int NebdFileEntity::Reopen(const ExtendAttribute& xattr) { } LOG(INFO) << "Reopen file success. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return fd_; } int NebdFileEntity::Close(bool removeMeta) { CHECK(executor_ != nullptr) << "file entity is not inited. " << "filename: " << fileName_; - // 用于和其他用户请求互斥,避免文件被close后,请求发到后端导致返回失败 + // This is used to prevent conflicts with other user requests to ensure that + // a file is not closed, and requests sent to the backend after the file has + // been closed result in failures. WriteLockGuard writeLock(rwLock_); - // 这里的互斥锁是为了跟open请求互斥,以下情况可能导致close和open并发 - // part2重启,导致文件被reopen,然后由于超时,文件准备被close - // 此时用户发送了挂载卷请求对文件进行open + // The mutex lock here is to prevent conflicts with open requests. The + // following scenarios may lead to concurrent close and open operations: + // part2 restarts, causing the file to be reopened. Due to a timeout, the + // file is about to be closed. At this point, a user sends a request to + // mount a volume, which involves opening the file. std::unique_lock lock(fileStatusMtx_); if (status_ == NebdFileStatus::OPENED) { int ret = executor_->Close(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Close file failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::CLOSED; @@ -186,15 +187,13 @@ int NebdFileEntity::Close(bool removeMeta) { int ret = metaFileManager_->RemoveFileMeta(fileName_); if (ret != 0) { LOG(ERROR) << "Remove file record failed. " - << "fd: " << fd_ - << ", filename: " << fileName_; + << "fd: " << fd_ << ", filename: " << fileName_; return -1; } status_ = NebdFileStatus::DESTROYED; } LOG(INFO) << "Close file success. " - << "fd: " << fd_ - << ", filename: " << fileName_ + << "fd: " << fd_ << ", filename: " << fileName_ << ", meta removed? " << (removeMeta ? "yes" : "no"); return 0; } @@ -204,8 +203,7 @@ int NebdFileEntity::Discard(NebdServerAioContext* aioctx) { int ret = executor_->Discard(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Discard file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -219,8 +217,7 @@ int NebdFileEntity::AioRead(NebdServerAioContext* aioctx) { int ret = executor_->AioRead(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioRead file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -234,8 +231,7 @@ int NebdFileEntity::AioWrite(NebdServerAioContext* aioctx) { int ret = executor_->AioWrite(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "AioWrite file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -249,8 +245,7 @@ int NebdFileEntity::Flush(NebdServerAioContext* aioctx) { int ret = executor_->Flush(fileInstance_.get(), aioctx); if (ret < 0) { LOG(ERROR) << "Flush file failed. " - << "fd: " << fd_ - << ", fileName: " << fileName_ + << "fd: " << fd_ << ", fileName: " << fileName_ << ", context: " << *aioctx; return -1; } @@ -264,8 +259,7 @@ int NebdFileEntity::Extend(int64_t newsize) { int ret = executor_->Extend(fileInstance_.get(), newsize); if (ret < 0) { LOG(ERROR) << "Extend file failed. " - << "fd: " << fd_ - << ", newsize: " << newsize + << "fd: " << fd_ << ", newsize: " << newsize << ", fileName" << fileName_; return -1; } @@ -279,8 +273,7 @@ int NebdFileEntity::GetInfo(NebdFileInfo* fileInfo) { int ret = executor_->GetInfo(fileInstance_.get(), fileInfo); if (ret < 0) { LOG(ERROR) << "Get file info failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -293,8 +286,7 @@ int NebdFileEntity::InvalidCache() { int ret = executor_->InvalidCache(fileInstance_.get()); if (ret < 0) { LOG(ERROR) << "Invalid cache failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -318,8 +310,7 @@ int NebdFileEntity::ProcessSyncRequest(ProcessTask task) { int ret = task(); if (ret < 0) { LOG(ERROR) << "Process sync request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -340,18 +331,19 @@ int NebdFileEntity::ProcessAsyncRequest(ProcessTask task, return -1; } - // 对于异步请求,将此closure传给aiocontext,从而在请求返回时释放读锁 + // For asynchronous requests, pass this closure to aiocontext to release the + // read lock when the request returns done->SetClosure(aioctx->done); aioctx->done = doneGuard.release(); int ret = task(); if (ret < 0) { - // 如果请求失败,这里要主动释放锁,并将aiocontext还原回去 + // If the request fails, the lock should be actively released here and + // the aiocontext should be restored back brpc::ClosureGuard doneGuard(done); aioctx->done = done->GetClosure(); done->SetClosure(nullptr); LOG(ERROR) << "Process async request failed. " - << "fd: " << fd_ - << ", fileName" << fileName_; + << "fd: " << fd_ << ", fileName" << fileName_; return -1; } return 0; @@ -381,11 +373,11 @@ int NebdFileEntity::UpdateFileStatus(NebdFileInstancePtr fileInstance) { } bool NebdFileEntity::GuaranteeFileOpened() { - // 文件如果已经被用户close了,就不允许后面请求再自动打开进行操作了 + // If the file has already been closed by the user, subsequent requests for + // automatic opening for operation are not allowed if (status_ == NebdFileStatus::DESTROYED) { LOG(ERROR) << "File has been destroyed. " - << "filename: " << fileName_ - << ", fd: " << fd_; + << "filename: " << fileName_ << ", fd: " << fd_; return false; } @@ -393,8 +385,7 @@ bool NebdFileEntity::GuaranteeFileOpened() { int ret = Open(openFlags_.get()); if (ret != fd_) { LOG(ERROR) << "Get opened file failed. " - << "filename: " << fileName_ - << ", fd: " << fd_ + << "filename: " << fileName_ << ", fd: " << fd_ << ", ret: " << ret; return false; } @@ -404,8 +395,8 @@ bool NebdFileEntity::GuaranteeFileOpened() { std::ostream& operator<<(std::ostream& os, const NebdFileEntity& entity) { std::string standardTime; - TimeUtility::TimeStampToStandard( - entity.GetFileTimeStamp() / 1000, &standardTime); + TimeUtility::TimeStampToStandard(entity.GetFileTimeStamp() / 1000, + &standardTime); os << "[filename: " << entity.GetFileName() << ", fd: " << entity.GetFd() << ", status: " << NebdFileStatus2Str(entity.GetFileStatus()) << ", timestamp: " << standardTime << "]"; diff --git a/nebd/src/part2/file_entity.h b/nebd/src/part2/file_entity.h index fb1e1448d8..c57d90e2ad 100644 --- a/nebd/src/part2/file_entity.h +++ b/nebd/src/part2/file_entity.h @@ -25,42 +25,44 @@ #include #include -#include -#include + #include +#include +#include #include // NOLINT +#include #include -#include +#include "nebd/proto/client.pb.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/timeutility.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" -#include "nebd/src/part2/request_executor.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/request_executor.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::BthreadRWLock; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; using nebd::common::TimeUtility; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; class NebdFileInstance; class NebdRequestExecutor; using NebdFileInstancePtr = std::shared_ptr; -// 处理用户请求时需要加读写锁,避免close时仍有用户IO未处理完成 -// 对于异步IO来说,只有返回时才能释放读锁,所以封装成Closure -// 在发送异步请求前,将closure赋值给NebdServerAioContext +// When processing user requests, it is necessary to add a read write lock to +// avoid user IO still not being processed when closing For asynchronous IO, the +// read lock can only be released on return, so it is encapsulated as a Closure +// Assign the closure value to NebdServerAioContext before sending an +// asynchronous request class NebdRequestReadLockClosure : public Closure { public: explicit NebdRequestReadLockClosure(BthreadRWLock& rwLock) // NOLINT - : rwLock_(rwLock) - , done_(nullptr) { + : rwLock_(rwLock), done_(nullptr) { rwLock_.RDLock(); } ~NebdRequestReadLockClosure() {} @@ -71,13 +73,9 @@ class NebdRequestReadLockClosure : public Closure { rwLock_.Unlock(); } - void SetClosure(Closure* done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } - Closure* GetClosure() { - return done_; - } + Closure* GetClosure() { return done_; } private: BthreadRWLock& rwLock_; @@ -96,134 +94,132 @@ class NebdFileEntity : public std::enable_shared_from_this { virtual ~NebdFileEntity(); /** - * 初始化文件实体 - * @param option: 初始化参数 - * @return 成功返回0, 失败返回-1 + * Initialize File Entity + * @param option: Initialize parameters + * @return returns 0 for success, -1 for failure */ virtual int Init(const NebdFileEntityOption& option); /** - * 打开文件 - * @return 成功返回fd,失败返回-1 + * Open File + * @return successfully returns fd, failure returns -1 */ virtual int Open(const OpenFlags* openflags); /** - * 重新open文件,如果之前的后端存储的连接还存在则复用之前的连接 - * 否则与后端存储建立新的连接 - * @param xattr: 文件reopen需要的信息 - * @return 成功返回fd,失败返回-1 + * Reopen the file and reuse the previous backend storage connection if it + * still exists Otherwise, establish a new connection with the backend + * storage + * @param xattr: Information required for file reopening + * @return successfully returns fd, failure returns -1 */ virtual int Reopen(const ExtendAttribute& xattr); /** - * 关闭文件 - * @param removeMeta: 是否要移除文件元数据记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + *Close File + * @param removeMeta: Do you want to remove the file metadata record? True + *means remove, false means not remove If it is a close request passed from + *part1, this parameter is true If it is a close request initiated by the + *heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(bool removeMeta); /** - * 给文件扩容 - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int64_t newsize); /** - * 获取文件信息 - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(); - virtual std::string GetFileName() const { - return fileName_; - } + virtual std::string GetFileName() const { return fileName_; } - virtual int GetFd() const { - return fd_; - } + virtual int GetFd() const { return fd_; } virtual void UpdateFileTimeStamp(uint64_t timestamp) { timeStamp_.store(timestamp); } - virtual uint64_t GetFileTimeStamp() const { - return timeStamp_.load(); - } + virtual uint64_t GetFileTimeStamp() const { return timeStamp_.load(); } - virtual NebdFileStatus GetFileStatus() const { - return status_.load(); - } + virtual NebdFileStatus GetFileStatus() const { return status_.load(); } private: /** - * 更新文件状态,包括元信息文件和内存状态 - * @param fileInstancea: open或reopen返回的文件上下文信息 - * @return: 成功返回0,失败返回-1 + * Update file status, including meta information files and memory status + * @param fileInstancea: The file context information returned by open or + * reopen + * @return: Success returns 0, failure returns -1 */ int UpdateFileStatus(NebdFileInstancePtr fileInstance); /** - * 请求统一处理函数 - * @param task: 实际请求执行的函数体 - * @return: 成功返回0,失败返回-1 + * Request Unified Processing Function + * @param task: The actual request to execute the function body + * @return: Success returns 0, failure returns -1 */ using ProcessTask = std::function; int ProcessSyncRequest(ProcessTask task); int ProcessAsyncRequest(ProcessTask task, NebdServerAioContext* aioctx); - // 确保文件处于opened状态,如果不是则尝试进行open - // 无法open或者open失败,则返回false, - // 如果文件处于open状态,则返回true + // Ensure that the file is in an open state, and if not, attempt to open it + // Unable to open or failed to open, returns false, + // If the file is in the open state, return true bool GuaranteeFileOpened(); private: - // 文件读写锁,处理请求前加读锁,close文件的时候加写锁 - // 避免close时还有请求未处理完 + // File read/write lock, apply read lock before processing requests, and + // apply write lock when closing files Avoiding pending requests during + // close BthreadRWLock rwLock_; - // 互斥锁,用于open、close之间的互斥 + // Mutex lock, used for mutual exclusion between open and close bthread::Mutex fileStatusMtx_; - // nebd server为该文件分配的唯一标识符 + // The unique identifier assigned by the nebd server to this file int fd_; - // 文件名称 + // File Name std::string fileName_; std::unique_ptr openFlags_; - // 文件当前状态,opened表示文件已打开,closed表示文件已关闭 + // The current state of the file, where 'opened' indicates that the file is + // open and 'closed' indicates that the file is closed std::atomic status_; - // 该文件上一次收到心跳时的时间戳 + // The timestamp of the last time the file received a heartbeat std::atomic timeStamp_; - // 文件在executor open时返回上下文信息,用于后续文件的请求处理 + // When the file is opened by the executor, contextual information is + // returned for subsequent file request processing NebdFileInstancePtr fileInstance_; - // 文件对应的executor的指针 + // Pointer to the executor corresponding to the file NebdRequestExecutor* executor_; - // 元数据持久化管理 + // Metadata Persistence Management MetaFileManagerPtr metaFileManager_; }; using NebdFileEntityPtr = std::shared_ptr; diff --git a/nebd/src/part2/file_manager.cpp b/nebd/src/part2/file_manager.cpp index 5c1dc2a15c..d139829f4f 100644 --- a/nebd/src/part2/file_manager.cpp +++ b/nebd/src/part2/file_manager.cpp @@ -34,8 +34,7 @@ namespace nebd { namespace server { NebdFileManager::NebdFileManager(MetaFileManagerPtr metaFileManager) - : isRunning_(false) - , metaFileManager_(metaFileManager) {} + : isRunning_(false), metaFileManager_(metaFileManager) {} NebdFileManager::~NebdFileManager() {} @@ -62,14 +61,14 @@ int NebdFileManager::Fini() { } int NebdFileManager::Load() { - // 从元数据文件中读取持久化的文件信息 + // Reading persistent file information from metadata files std::vector fileMetas; int ret = metaFileManager_->ListFileMeta(&fileMetas); if (ret < 0) { LOG(ERROR) << "Load file metas failed."; return ret; } - // 根据持久化的信息重新open文件 + // Reopen files based on persistent information int maxFd = 0; for (auto& fileMeta : fileMetas) { NebdFileEntityPtr entity = @@ -174,8 +173,7 @@ int NebdFileManager::InvalidCache(int fd) { return entity->InvalidCache(); } -NebdFileEntityPtr -NebdFileManager::GetFileEntity(int fd) { +NebdFileEntityPtr NebdFileManager::GetFileEntity(int fd) { ReadLockGuard readLock(rwLock_); auto iter = fileMap_.find(fd); if (iter == fileMap_.end()) { @@ -221,7 +219,7 @@ NebdFileEntityPtr NebdFileManager::GenerateFileEntity( } } - // 检测是否存在冲突的文件记录 + // Detect for conflicting file records auto iter = fileMap_.find(fd); if (iter != fileMap_.end()) { LOG(ERROR) << "File entity conflict. " diff --git a/nebd/src/part2/file_manager.h b/nebd/src/part2/file_manager.h index bac54fd1fa..f81a3d72d0 100644 --- a/nebd/src/part2/file_manager.h +++ b/nebd/src/part2/file_manager.h @@ -25,27 +25,28 @@ #include #include + #include +#include // NOLINT #include #include -#include // NOLINT #include -#include "nebd/src/common/rw_lock.h" +#include "nebd/proto/client.pb.h" #include "nebd/src/common/name_lock.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" -#include "nebd/src/part2/util.h" #include "nebd/src/part2/file_entity.h" #include "nebd/src/part2/metafile_manager.h" -#include "nebd/proto/client.pb.h" +#include "nebd/src/part2/util.h" namespace nebd { namespace server { using nebd::common::NameLock; using nebd::common::NameLockGuard; -using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; +using nebd::common::WriteLockGuard; using OpenFlags = nebd::client::ProtoOpenFlags; using FileEntityMap = std::unordered_map; @@ -54,119 +55,124 @@ class NebdFileManager { explicit NebdFileManager(MetaFileManagerPtr metaFileManager); virtual ~NebdFileManager(); /** - * 停止FileManager并释放FileManager资源 - * @return 成功返回0,失败返回-1 + * Stop FileManager and release FileManager resources + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 启动FileManager - * @return 成功返回0,失败返回-1 + * Start FileManager + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 打开文件 - * @param filename: 文件的filename - * @return 成功返回fd,失败返回-1 + * Open File + * @param filename: The filename of the file + * @return successfully returns fd, failure returns -1 */ virtual int Open(const std::string& filename, const OpenFlags* flags); /** - * 关闭文件 - * @param fd: 文件的fd - * @param removeRecord: 是否要移除文件记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + * Close File + * @param fd: fd of the file + * @param removeRecord: Do you want to remove the file record? True means + * remove, false means not remove If it is a close request passed from + * part1, this parameter is true If it is a close request initiated by the + * heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(int fd, bool removeRecord); /** - * 给文件扩容 - * @param fd: 文件的fd - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param fd: fd of the file + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int fd, int64_t newsize); /** - * 获取文件信息 - * @param fd: 文件的fd - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fd: fd of the file + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(int fd, NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(int fd, NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @param fd: 文件的fd - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @param fd: fd of the file + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(int fd); - // 根据fd从map中获取指定的entity - // 如果entity已存在,返回entity指针,否则返回nullptr + // Obtain the specified entity from the map based on fd + // If entity already exists, return entity pointer; otherwise, return + // nullptr virtual NebdFileEntityPtr GetFileEntity(int fd); virtual FileEntityMap GetFileEntityMap(); - // 将所有文件状态输出到字符串 + // Output all file states to a string std::string DumpAllFileStatus(); // set public for test - // 启动时从metafile加载文件记录,并reopen文件 + // Load file records from metafile at startup and reopen the file int Load(); private: - // 分配新的可用的fd,fd不允许和已经存在的重复 - // 成功返回的可用fd,失败返回-1 + // Assign new available fds, fds are not allowed to duplicate existing ones + // Successfully returned available fd, failed returned -1 int GenerateValidFd(); - // 根据文件名获取file entity - // 如果entity存在,直接返回entity指针 - // 如果entity不存在,则创建新的entity,并插入map,然后返回 + // Obtain file entity based on file name + // If entity exists, directly return the entity pointer + // If the entity does not exist, create a new entity, insert a map, and then + // return NebdFileEntityPtr GetOrCreateFileEntity(const std::string& fileName); - // 根据fd和文件名生成file entity, - // 如果fd对于的entity已存在,直接返回entity指针 - // 如果entity不存在,则生成新的entity,并插入map,然后返回 + // Generate file entity based on fd and file name, + // If fd already exists for entity, directly return the entity pointer + // If the entity does not exist, generate a new entity, insert a map, and + // then return NebdFileEntityPtr GenerateFileEntity(int fd, const std::string& fileName); - // 删除指定fd对应的entity + // Delete the entity corresponding to the specified fd void RemoveEntity(int fd); private: - // 当前filemanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of the filemanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件名锁,对同名文件加锁 + // File name lock, lock files with the same name NameLock nameLock_; - // fd分配器 + // Fd distributor FdAllocator fdAlloc_; - // nebd server 文件记录管理 + // nebd server file record management MetaFileManagerPtr metaFileManager_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; - // 文件fd和文件实体的映射 + // Mapping of file fd and file entities FileEntityMap fileMap_; }; using NebdFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/heartbeat_manager.cpp b/nebd/src/part2/heartbeat_manager.cpp index 4516874807..739bf586a7 100644 --- a/nebd/src/part2/heartbeat_manager.cpp +++ b/nebd/src/part2/heartbeat_manager.cpp @@ -20,11 +20,12 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/heartbeat_manager.h" + #include +#include #include "nebd/src/common/timeutility.h" -#include "nebd/src/part2/heartbeat_manager.h" namespace nebd { namespace server { @@ -69,7 +70,7 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, const auto& iter = nebdClients_.find(pid); if (iter == nebdClients_.end()) { nebdClients_[pid] = - std::make_shared(pid, version, timestamp); + std::make_shared(pid, version, timestamp); nebdClientNum_ << 1; } else { nebdClients_[pid]->timeStamp = timestamp; @@ -79,8 +80,8 @@ void HeartbeatManager::UpdateNebdClientInfo(int pid, const std::string& version, } void HeartbeatManager::CheckTimeoutFunc() { - while (sleeper_.wait_for( - std::chrono::milliseconds(checkTimeoutIntervalMs_))) { + while ( + sleeper_.wait_for(std::chrono::milliseconds(checkTimeoutIntervalMs_))) { LOG_EVERY_N(INFO, 60 * 1000 / checkTimeoutIntervalMs_) << "Checking timeout, file status: " << fileManager_->DumpAllFileStatus(); @@ -107,24 +108,24 @@ void HeartbeatManager::CheckTimeoutFunc() { bool HeartbeatManager::CheckNeedClosed(NebdFileEntityPtr entity) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - entity->GetFileTimeStamp(); - // 文件如果是opened状态,并且已经超时,则需要调用close - bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED - && interval > (uint64_t)1000 * heartbeatTimeoutS_; + // If the file is in an open state and has timed out, you need to call close + bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED && + interval > (uint64_t)1000 * heartbeatTimeoutS_; return needClose; } std::ostream& operator<<(std::ostream& os, NebdClientInfo* info) { std::string standardTime; TimeUtility::TimeStampToStandard(info->timeStamp / 1000, &standardTime); - os << "pid: " << info->pid << ", version: " - << info->version.GetValueByKey(kVersion) + os << "pid: " << info->pid + << ", version: " << info->version.GetValueByKey(kVersion) << ", last time received heartbeat: " << standardTime; return os; } void HeartbeatManager::RemoveTimeoutNebdClient() { WriteLockGuard writeLock(rwLock_); - auto iter = nebdClients_.begin(); + auto iter = nebdClients_.begin(); while (iter != nebdClients_.end()) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - iter->second->timeStamp; diff --git a/nebd/src/part2/heartbeat_manager.h b/nebd/src/part2/heartbeat_manager.h index 73943bc4bc..69b4c3eed2 100644 --- a/nebd/src/part2/heartbeat_manager.h +++ b/nebd/src/part2/heartbeat_manager.h @@ -24,32 +24,34 @@ #define NEBD_SRC_PART2_HEARTBEAT_MANAGER_H_ #include -#include // NOLINT + #include -#include #include +#include #include +#include // NOLINT #include "nebd/src/common/interrupt_sleep.h" #include "nebd/src/common/rw_lock.h" #include "nebd/src/common/stringstatus.h" -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/define.h" +#include "nebd/src/part2/file_manager.h" namespace nebd { namespace server { using nebd::common::InterruptibleSleeper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; struct HeartbeatManagerOption { - // 文件心跳超时时间(单位:秒) + // File heartbeat timeout (in seconds) uint32_t heartbeatTimeoutS; - // 心跳超时检测线程的检测间隔(时长:毫秒) + // Heartbeat timeout detection thread detection interval (duration: + // milliseconds) uint32_t checkTimeoutIntervalMs; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager; }; @@ -57,42 +59,44 @@ const char kNebdClientMetricPrefix[] = "nebd_client_pid_"; const char kVersion[] = "version"; struct NebdClientInfo { - NebdClientInfo(int pid2, const std::string& version2, - uint64_t timeStamp2) : - pid(pid2), timeStamp(timeStamp2) { + NebdClientInfo(int pid2, const std::string& version2, uint64_t timeStamp2) + : pid(pid2), timeStamp(timeStamp2) { version.ExposeAs(kNebdClientMetricPrefix, - std::to_string(pid2) + "_version"); + std::to_string(pid2) + "_version"); version.Set(kVersion, version2); version.Update(); } - // nebd client的进程号 + // Process number of nebd client int pid; - // nebd version的metric + // The metric of nebd version nebd::common::StringStatus version; - // 上次心跳的时间戳 + // Time stamp of last heartbeat uint64_t timeStamp; }; -// 负责文件心跳超时管理 +// Responsible for managing file heartbeat timeout class HeartbeatManager { public: explicit HeartbeatManager(HeartbeatManagerOption option) - : isRunning_(false) - , heartbeatTimeoutS_(option.heartbeatTimeoutS) - , checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs) - , fileManager_(option.fileManager) { + : isRunning_(false), + heartbeatTimeoutS_(option.heartbeatTimeoutS), + checkTimeoutIntervalMs_(option.checkTimeoutIntervalMs), + fileManager_(option.fileManager) { nebdClientNum_.expose("nebd_client_num"); } virtual ~HeartbeatManager() {} - // 启动心跳检测线程 + // Start Heartbeat Detection Thread virtual int Run(); - // 停止心跳检测线程 + // Stop Heartbeat Detection Thread virtual int Fini(); - // part2收到心跳后,会通过该接口更新心跳中包含的文件在内存中记录的时间戳 - // 心跳检测线程会根据该时间戳判断是否需要关闭文件 + // After receiving the heartbeat, part2 will update the timestamp of the + // files included in the heartbeat recorded in memory through this interface + // The heartbeat detection thread will determine whether the file needs to + // be closed based on this timestamp virtual bool UpdateFileTimestamp(int fd, uint64_t timestamp); - // part2收到心跳后,会通过该接口更新part1的时间戳 + // After receiving the heartbeat, part2 will update the timestamp of part1 + // through this interface virtual void UpdateNebdClientInfo(int pid, const std::string& version, uint64_t timestamp); std::map> GetNebdClients() { @@ -101,31 +105,32 @@ class HeartbeatManager { } private: - // 心跳检测线程的函数执行体 + // Function execution body of heartbeat detection thread void CheckTimeoutFunc(); - // 判断文件是否需要close + // Determine if the file needs to be closed bool CheckNeedClosed(NebdFileEntityPtr entity); - // 从内存中删除已经超时的nebdClientInfo + // Delete nebdClientInfo that has timed out from memory void RemoveTimeoutNebdClient(); private: - // 当前heartbeatmanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of heartbeatmanager, where true indicates + // running and false indicates not running std::atomic isRunning_; - // 文件心跳超时时长 + // File heartbeat timeout duration uint32_t heartbeatTimeoutS_; - // 心跳超时检测线程的检测时间间隔 + // Heartbeat timeout detection thread detection time interval uint32_t checkTimeoutIntervalMs_; - // 心跳检测线程 + // Heartbeat detection thread std::thread checkTimeoutThread_; - // 心跳检测线程的sleeper + // sleeper for Heartbeat Detection Thread InterruptibleSleeper sleeper_; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager_; - // nebd client的信息 + // Information on nebd client std::map> nebdClients_; - // nebdClient的计数器 + // Counters for nebdClient bvar::Adder nebdClientNum_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; }; diff --git a/nebd/src/part2/main.cpp b/nebd/src/part2/main.cpp index e72bb27cbf..0780796ae6 100644 --- a/nebd/src/part2/main.cpp +++ b/nebd/src/part2/main.cpp @@ -20,31 +20,32 @@ * Author: hzwuhongsong */ +#include #include #include -#include + #include "nebd/src/part2/nebd_server.h" #include "src/common/log_util.h" DEFINE_string(confPath, "/etc/nebd/nebd-server.conf", "nebd server conf path"); int main(int argc, char* argv[]) { - // 解析参数 + // Parsing parameters google::ParseCommandLineFlags(&argc, &argv, false); curve::common::DisableLoggingToStdErr(); google::InitGoogleLogging(argv[0]); std::string confPath = FLAGS_confPath.c_str(); - // 启动nebd server + // Start nebd server auto server = std::make_shared<::nebd::server::NebdServer>(); int initRes = server->Init(confPath); if (initRes < 0) { - LOG(ERROR) << "init nebd server fail"; + LOG(ERROR) << "init nebd server fail"; return -1; } server->RunUntilAskedToQuit(); - // 停止nebd server + // Stop nebd server server->Fini(); google::ShutdownGoogleLogging(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 6fcdc5c94b..03c5f1d366 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -20,19 +20,18 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { NebdMetaFileManager::NebdMetaFileManager() - : metaFilePath_("") - , wrapper_(nullptr) - , parser_(nullptr) {} + : metaFilePath_(""), wrapper_(nullptr), parser_(nullptr) {} NebdMetaFileManager::~NebdMetaFileManager() {} @@ -52,9 +51,10 @@ int NebdMetaFileManager::Init(const NebdMetaFileManagerOption& option) { int NebdMetaFileManager::UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta) { WriteLockGuard writeLock(rwLock_); - bool needUpdate = metaCache_.find(fileName) == metaCache_.end() - || fileMeta != metaCache_[fileName]; - // 如果元数据信息没发生变更,则不需要写文件 + bool needUpdate = metaCache_.find(fileName) == metaCache_.end() || + fileMeta != metaCache_[fileName]; + // If the metadata information has not changed, there is no need to write a + // file if (!needUpdate) { return 0; } @@ -105,29 +105,29 @@ int NebdMetaFileManager::UpdateMetaFile(const FileMetaMap& fileMetas) { } int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { - // 写入tmp文件 + // Write tmp file std::string tmpFilePath = metaFilePath_ + ".tmp"; - int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT|O_RDWR, 0644); - // open文件失败 + int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT | O_RDWR, 0644); + // Open file failed if (fd <= 0) { LOG(ERROR) << "Open tmp file " << tmpFilePath << " fail"; return -1; } - // 写入 + // Write std::string jsonString = root.toStyledString(); - int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), - jsonString.size(), 0); + int writeSize = + wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); wrapper_->close(fd); if (writeSize != static_cast(jsonString.size())) { LOG(ERROR) << "Write tmp file " << tmpFilePath << " fail"; return -1; } - // 重命名 + // Rename int res = wrapper_->rename(tmpFilePath.c_str(), metaFilePath_.c_str()); if (res != 0) { - LOG(ERROR) << "rename file " << tmpFilePath << " to " - << metaFilePath_ << " fail"; + LOG(ERROR) << "rename file " << tmpFilePath << " to " << metaFilePath_ + << " fail"; return -1; } return 0; @@ -138,7 +138,8 @@ int NebdMetaFileManager::LoadFileMeta() { FileMetaMap tempMetas; std::ifstream in(metaFilePath_, std::ios::binary); if (!in) { - // 这里不应该返回错误,第一次初始化的时候文件可能还未创建 + // There should be no error returned here, the file may not have been + // created during the first initialization LOG(WARNING) << "File not exist: " << metaFilePath_; return 0; } @@ -149,8 +150,7 @@ int NebdMetaFileManager::LoadFileMeta() { bool ok = Json::parseFromStream(reader, in, &root, &errs); in.close(); if (!ok) { - LOG(ERROR) << "Parse meta file " << metaFilePath_ - << " fail: " << errs; + LOG(ERROR) << "Parse meta file " << metaFilePath_ << " fail: " << errs; return -1; } @@ -173,31 +173,28 @@ int NebdMetaFileManager::ListFileMeta(std::vector* fileMetas) { return 0; } -int NebdMetaFileParser::Parse(Json::Value root, - FileMetaMap* fileMetas) { +int NebdMetaFileParser::Parse(Json::Value root, FileMetaMap* fileMetas) { if (!fileMetas) { LOG(ERROR) << "the argument fileMetas is null pointer"; return -1; } fileMetas->clear(); - // 检验crc + // Check crc if (root[kCRC].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no crc"; + LOG(ERROR) << "Parse json: " << root << " fail, no crc"; return -1; } uint32_t crcValue = root[kCRC].asUInt(); root.removeMember(kCRC); std::string jsonString = root.toStyledString(); - uint32_t crcCalc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crcCalc = + nebd::common::CRC32(jsonString.c_str(), jsonString.size()); if (crcValue != crcCalc) { - LOG(ERROR) << "Parse json: " << root - << " fail, crc not match"; + LOG(ERROR) << "Parse json: " << root << " fail, crc not match"; return -1; } - // 没有volume字段 + // No volume field const auto& volumes = root[kVolumes]; if (volumes.isNull()) { LOG(WARNING) << "No volumes in json: " << root; @@ -208,22 +205,21 @@ int NebdMetaFileParser::Parse(Json::Value root, NebdFileMeta meta; if (volume[kFileName].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no filename"; + LOG(ERROR) << "Parse json: " << root << " fail, no filename"; return -1; } else { meta.fileName = volume[kFileName].asString(); } if (volume[kFd].isNull()) { - LOG(ERROR) << "Parse json: " << root - << " fail, no fd"; + LOG(ERROR) << "Parse json: " << root << " fail, no fd"; return -1; } else { meta.fd = volume[kFd].asInt(); } - // 除了filename和fd的部分统一放到xattr里面 + // Except for the parts of filename and fd, they are uniformly placed in + // xattr Json::Value::Members mem = volume.getMemberNames(); ExtendAttribute xattr; for (auto iter = mem.begin(); iter != mem.end(); iter++) { @@ -238,13 +234,13 @@ int NebdMetaFileParser::Parse(Json::Value root, } Json::Value NebdMetaFileParser::ConvertFileMetasToJson( - const FileMetaMap& fileMetas) { + const FileMetaMap& fileMetas) { Json::Value volumes; for (const auto& meta : fileMetas) { Json::Value volume; volume[kFileName] = meta.second.fileName; volume[kFd] = meta.second.fd; - for (const auto &item : meta.second.xattr) { + for (const auto& item : meta.second.xattr) { volume[item.first] = item.second; } volumes.append(volume); @@ -252,7 +248,7 @@ Json::Value NebdMetaFileParser::ConvertFileMetasToJson( Json::Value root; root[kVolumes] = volumes; - // 计算crc + // Calculate crc std::string jsonString = root.toStyledString(); uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); root[kCRC] = crc; diff --git a/nebd/src/part2/metafile_manager.h b/nebd/src/part2/metafile_manager.h index a46255a467..35200fa9bc 100644 --- a/nebd/src/part2/metafile_manager.h +++ b/nebd/src/part2/metafile_manager.h @@ -24,16 +24,17 @@ #define NEBD_SRC_PART2_METAFILE_MANAGER_H_ #include -#include -#include -#include + #include +#include // NOLINT +#include #include // NOLINT -#include // NOLINT +#include +#include -#include "nebd/src/common/rw_lock.h" -#include "nebd/src/common/posix_wrapper.h" #include "nebd/src/common/crc32.h" +#include "nebd/src/common/posix_wrapper.h" +#include "nebd/src/common/rw_lock.h" #include "nebd/src/part2/define.h" #include "nebd/src/part2/util.h" @@ -41,9 +42,9 @@ namespace nebd { namespace server { using nebd::common::PosixWrapper; +using nebd::common::ReadLockGuard; using nebd::common::RWLock; using nebd::common::WriteLockGuard; -using nebd::common::ReadLockGuard; using FileMetaMap = std::unordered_map; const char kVolumes[] = "volumes"; @@ -53,17 +54,15 @@ const char kCRC[] = "crc"; class NebdMetaFileParser { public: - int Parse(Json::Value root, - FileMetaMap* fileMetas); + int Parse(Json::Value root, FileMetaMap* fileMetas); Json::Value ConvertFileMetasToJson(const FileMetaMap& fileMetas); }; struct NebdMetaFileManagerOption { std::string metaFilePath = ""; - std::shared_ptr wrapper - = std::make_shared(); - std::shared_ptr parser - = std::make_shared(); + std::shared_ptr wrapper = std::make_shared(); + std::shared_ptr parser = + std::make_shared(); }; class NebdMetaFileManager { @@ -71,37 +70,38 @@ class NebdMetaFileManager { NebdMetaFileManager(); virtual ~NebdMetaFileManager(); - // 初始化,主要从文件读取元数据信息并加载到内存 + // Initialization, mainly reading metadata information from files and + // loading it into memory virtual int Init(const NebdMetaFileManagerOption& option); - // 列出文件记录 + // List file records virtual int ListFileMeta(std::vector* fileMetas); - // 更新文件元数据 + // Update file metadata virtual int UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta); - // 删除文件元数据 + // Delete file metadata virtual int RemoveFileMeta(const std::string& fileName); private: - // 原子写文件 + // Atomic writing file int AtomicWriteFile(const Json::Value& root); - // 更新元数据文件并更新内存缓存 + // Update metadata files and update memory cache int UpdateMetaFile(const FileMetaMap& fileMetas); - // 初始化从持久化文件读取到内存 + // Initialize reading from persistent files to memory int LoadFileMeta(); private: - // 元数据文件路径 + // Meta Data File Path std::string metaFilePath_; - // 文件系统操作封装 + // File system operation encapsulation std::shared_ptr wrapper_; - // 用于解析Json格式的元数据 + // Metadata for parsing Json format std::shared_ptr parser_; - // MetaFileManager 线程安全读写锁 + // MetaFileManager thread safe read write lock RWLock rwLock_; - // meta文件内存缓存 + // Meta file memory cache FileMetaMap metaCache_; }; using MetaFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/nebd_server.cpp b/nebd/src/part2/nebd_server.cpp index 74e5e2329d..89baaad537 100644 --- a/nebd/src/part2/nebd_server.cpp +++ b/nebd/src/part2/nebd_server.cpp @@ -20,19 +20,22 @@ * Author: lixiaocui */ +#include "nebd/src/part2/nebd_server.h" + #include + #include + #include "nebd/src/common/file_lock.h" -#include "nebd/src/part2/nebd_server.h" +#include "nebd/src/common/nebd_version.h" #include "nebd/src/part2/file_service.h" #include "nebd/src/part2/heartbeat_service.h" -#include "nebd/src/common/nebd_version.h" namespace nebd { namespace server { -int NebdServer::Init(const std::string &confPath, - std::shared_ptr curveClient) { +int NebdServer::Init(const std::string& confPath, + std::shared_ptr curveClient) { if (isRunning_) { LOG(WARNING) << "NebdServer is inited"; return -1; @@ -75,7 +78,7 @@ int NebdServer::Init(const std::string &confPath, LOG(INFO) << "NebdServer init heartbeatManager ok"; LOG(INFO) << "NebdServer init ok"; - // 暴露版本信息 + // Expose version information LOG(INFO) << "nebd version: " << nebd::common::NebdVersion(); nebd::common::ExposeNebdVersion(); return 0; @@ -100,7 +103,7 @@ int NebdServer::Fini() { } if (curveClient_ != nullptr) { - curveClient_ ->UnInit(); + curveClient_->UnInit(); } if (heartbeatManager_ != nullptr) { @@ -110,7 +113,7 @@ int NebdServer::Fini() { return 0; } -bool NebdServer::LoadConfFromFile(const std::string &confPath) { +bool NebdServer::LoadConfFromFile(const std::string& confPath) { conf_.SetConfigPath(confPath); return conf_.LoadConfig(); } @@ -172,16 +175,16 @@ MetaFileManagerPtr NebdServer::InitMetaFileManager() { return metaFileManager; } -bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption *opt) { - bool getOk = conf_.GetUInt32Value( - HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); +bool NebdServer::InitHeartbeatManagerOption(HeartbeatManagerOption* opt) { + bool getOk = + conf_.GetUInt32Value(HEARTBEATTIMEOUTSEC, &opt->heartbeatTimeoutS); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.timeout.sec fail"; return false; } - getOk = conf_.GetUInt32Value( - HEARTBEATCHECKINTERVALMS, &opt->checkTimeoutIntervalMs); + getOk = conf_.GetUInt32Value(HEARTBEATCHECKINTERVALMS, + &opt->checkTimeoutIntervalMs); if (false == getOk) { LOG(ERROR) << "NebdServer get heartbeat.check.interval.ms fail"; return false; @@ -212,24 +215,24 @@ bool NebdServer::InitHeartbeatManager() { bool NebdServer::StartServer() { // add service bool returnRpcWhenIoError; - bool ret = conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, - &returnRpcWhenIoError); + bool ret = + conf_.GetBoolValue(RESPONSERETURNRPCWHENIOERROR, &returnRpcWhenIoError); if (false == ret) { LOG(ERROR) << "get " << RESPONSERETURNRPCWHENIOERROR << " fail"; return false; } NebdFileServiceImpl fileService(fileManager_, returnRpcWhenIoError); - int addFileServiceRes = server_.AddService( - &fileService, brpc::SERVER_DOESNT_OWN_SERVICE); + int addFileServiceRes = + server_.AddService(&fileService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add file service fail"; return false; } NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); - addFileServiceRes = server_.AddService( - &heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); + addFileServiceRes = + server_.AddService(&heartbeatService, brpc::SERVER_DOESNT_OWN_SERVICE); if (0 != addFileServiceRes) { LOG(ERROR) << "NebdServer add heartbeat service fail"; return false; @@ -238,17 +241,17 @@ bool NebdServer::StartServer() { // start brcp server brpc::ServerOptions option; option.idle_timeout_sec = -1; - // 获取文件锁 + // Obtain file lock common::FileLock fileLock(listenAddress_ + ".lock"); if (fileLock.AcquireFileLock() != 0) { LOG(ERROR) << "Address already in use"; return -1; } - int startBrpcServerRes = server_.StartAtSockFile( - listenAddress_.c_str(), &option); + int startBrpcServerRes = + server_.StartAtSockFile(listenAddress_.c_str(), &option); if (0 != startBrpcServerRes) { LOG(ERROR) << "NebdServer start brpc server fail, res=" - << startBrpcServerRes; + << startBrpcServerRes; return false; } diff --git a/nebd/src/part2/nebd_server.h b/nebd/src/part2/nebd_server.h index c4ee40f23e..8a1275d23e 100644 --- a/nebd/src/part2/nebd_server.h +++ b/nebd/src/part2/nebd_server.h @@ -24,8 +24,10 @@ #define NEBD_SRC_PART2_NEBD_SERVER_H_ #include -#include + #include +#include + #include "nebd/src/common/configuration.h" #include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/heartbeat_manager.h" @@ -34,17 +36,17 @@ namespace nebd { namespace server { -using ::nebd::common::Configuration; using ::curve::client::CurveClient; +using ::nebd::common::Configuration; class NebdServer { public: NebdServer() {} virtual ~NebdServer() {} - int Init(const std::string &confPath, - std::shared_ptr curveClient = - std::make_shared()); + int Init(const std::string& confPath, + std::shared_ptr curveClient = + std::make_shared()); int RunUntilAskedToQuit(); @@ -52,62 +54,64 @@ class NebdServer { private: /** - * @brief 从配置文件加载配置项 - * @param[in] confPath 配置文件路径 - * @return false-加载配置文件失败 true-加载配置文件成功 + * @brief Load configuration items from the configuration file + * @param[in] confPath Configuration file path + * @return false-Failed to load configuration file, true-Successfully loaded + * configuration file */ - bool LoadConfFromFile(const std::string &confPath); + bool LoadConfFromFile(const std::string& confPath); /** - * @brief 初始化NebdFileManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize NebdFileManager + * @return false-initialization failed, true-initialization successful */ bool InitFileManager(); /** - * @brief 初始化request_executor_curve - * @return false-初始化失败 true-初始化成功 + * @brief initialization request_executor_curve + * @return false-initialization failed, true-initialization successful */ bool InitCurveRequestExecutor(); /** - * @brief 初始化NebdMetaFileManager - * @return nullptr-初始化不成功 否则表示初始化成功 + * @brief Initialize NebdMetaFileManager + * @return nullptr - initialization failed; otherwise, it indicates + * successful initialization */ MetaFileManagerPtr InitMetaFileManager(); /** - * @brief 初始化HeartbeatManagerOption + * @brief Initialize HeartbeatManagerOption * @param[out] opt - * @return false-初始化失败 true-初始化成功 + * @return false-initialization failed, true-initialization successful */ - bool InitHeartbeatManagerOption(HeartbeatManagerOption *opt); + bool InitHeartbeatManagerOption(HeartbeatManagerOption* opt); /** - * @brief 初始化HeartbeatManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize HeartbeatManager + * @return false-initialization failed, true-initialization successful */ bool InitHeartbeatManager(); /** - * @brief 启动brpc service - * @return false-启动service失败 true-启动service成功 + * @brief Start brpc service + * @return false-Failed to start service, true-Successfully started service */ bool StartServer(); private: - // 配置项 + // Configuration Item Configuration conf_; - // NebdServer监听地址 + // NebdServer Listening Address std::string listenAddress_; - // NebdServer是否处于running状态 - bool isRunning_ = false; + // Is NebdServer in running state + bool isRunning_ = false; // brpc server brpc::Server server_; - // 用于接受和处理client端的各种请求 + // Used to accept and process various requests from the client side std::shared_ptr fileManager_; - // 负责文件心跳超时处理 + // Responsible for handling file heartbeat timeout std::shared_ptr heartbeatManager_; // curveclient std::shared_ptr curveClient_; diff --git a/nebd/src/part2/request_executor.h b/nebd/src/part2/request_executor.h index 0d69e3c9c8..2098ca87a4 100644 --- a/nebd/src/part2/request_executor.h +++ b/nebd/src/part2/request_executor.h @@ -24,8 +24,9 @@ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_H_ #include -#include #include +#include + #include "nebd/src/part2/define.h" namespace nebd { @@ -41,14 +42,16 @@ class CurveRequestExecutor; using OpenFlags = nebd::client::ProtoOpenFlags; -// 具体RequestExecutor中会用到的文件实例上下文信息 -// RequestExecutor需要用到的文件上下文信息都记录到FileInstance内 +// The file instance context information used in the specific RequestExecutor +// The file context information required for RequestExecutor is recorded in +// FileInstance class NebdFileInstance { public: NebdFileInstance() {} virtual ~NebdFileInstance() {} - // 需要持久化到文件的内容,以kv形式返回,例如curve open时返回的sessionid - // 文件reopen的时候也会用到该内容 + // The content that needs to be persisted to the file is returned in kv + // format, such as the sessionid returned when curve open This content will + // also be used when reopening files ExtendAttribute xattr; }; @@ -65,7 +68,8 @@ class NebdRequestExecutor { virtual int GetInfo(NebdFileInstance* fd, NebdFileInfo* fileInfo) = 0; virtual int Discard(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int AioRead(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; - virtual int AioWrite(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; // NOLINT + virtual int AioWrite(NebdFileInstance* fd, + NebdServerAioContext* aioctx) = 0; // NOLINT virtual int Flush(NebdFileInstance* fd, NebdServerAioContext* aioctx) = 0; virtual int InvalidCache(NebdFileInstance* fd) = 0; }; diff --git a/nebd/src/part2/request_executor_curve.h b/nebd/src/part2/request_executor_curve.h index 11606d1bb1..a96409e5c4 100644 --- a/nebd/src/part2/request_executor_curve.h +++ b/nebd/src/part2/request_executor_curve.h @@ -23,12 +23,13 @@ #ifndef NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ #define NEBD_SRC_PART2_REQUEST_EXECUTOR_CURVE_H_ -#include #include +#include #include -#include "nebd/src/part2/request_executor.h" -#include "nebd/src/part2/define.h" + #include "include/client/libcurve.h" +#include "nebd/src/part2/define.h" +#include "nebd/src/part2/request_executor.h" namespace nebd { namespace server { @@ -54,17 +55,22 @@ void CurveAioCallback(struct CurveAioContext* curveCtx); class FileNameParser { public: /** - * @brief 解析fileName - * 一般格式: - * qemu "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" //NOLINT - * nbd "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" // NOLINT + * @brief parsing fileName + * General format: + * qemu + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" + * //NOLINT nbd + * "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" + * // NOLINT * @param[in] fileName - * @return 解析结果 - * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "/etc/curve/client.conf" //NOLINT - * nbd "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" //NOLINT + * @return Parsing Result + * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", + * "/etc/curve/client.conf" //NOLINT nbd + * "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" + * //NOLINT */ - static std::pair - Parse(const std::string& fileName); + static std::pair Parse( + const std::string& fileName); }; class CurveRequestExecutor : public NebdRequestExecutor { @@ -74,7 +80,7 @@ class CurveRequestExecutor : public NebdRequestExecutor { return executor; } ~CurveRequestExecutor() {} - void Init(const std::shared_ptr &client); + void Init(const std::shared_ptr& client); std::shared_ptr Open(const std::string& filename, const OpenFlags* openflags) override; std::shared_ptr Reopen( @@ -90,40 +96,42 @@ class CurveRequestExecutor : public NebdRequestExecutor { private: /** - * @brief 构造函数 + * @brief constructor */ CurveRequestExecutor() {} /** - * @brief 从NebdFileInstance中解析出curve_client需要的fd - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中文件的fd, 如果小于0,表示解析结果错误 + * @brief Parse the fd needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the fd of the file in curve_client. If less than 0, it + * indicates an error in the parsing result. */ int GetCurveFdFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 从NebdFileInstance中解析出curbe_client需要的filename - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中的filename, 如果为空,表示解析出错 + * @brief Parse the filename needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the filename in curve_client. If empty, it indicates an + * error in the parsing. */ std::string GetFileNameFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 将NebdServerAioContext类型转换为CurveAioContext类型 - * @param[in] nebdCtx NebdServerAioContext类型 - * @param[out] curveCtx CurveAioContext类型 - * @return -1转换失败,0转换成功 + * @brief Convert NebdServerAioContext type to CurveAioContext type + * @param[in] nebdCtx NebdServerAioContext type + * @param[out] curveCtx CurveAioContext type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdCtxToCurveCtx( - NebdServerAioContext *nebdCtx, CurveAioContext *curveCtx); + int FromNebdCtxToCurveCtx(NebdServerAioContext* nebdCtx, + CurveAioContext* curveCtx); /** - * @brief 将LIBAIO_OP类型转换为curve_client中LIBCURVE_OP类型 - * @param[in] op LIBAIO_OP类型 - * @param[out] out LIBCURVE_OP类型 - * @return -1转换失败,0转换成功 + * @brief Convert LIBAIO_OP types to LIBCURVE_OP types in the curve_client + * @param[in] op LIBAIO_OP type + * @param[out] out LIBCURVE_OP type + * @return -1 conversion failed, 0 conversion succeeded */ - int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP *out); + int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP* out); private: std::shared_ptr<::curve::client::CurveClient> client_; diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..0894d69ebe 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART2_UTIL_H_ #define NEBD_SRC_PART2_UTIL_H_ -#include #include // NOLINT #include +#include #include "nebd/src/part2/define.h" @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/configuration_test.cpp b/nebd/test/common/configuration_test.cpp index 4c9e7b7c21..ef24eeb42a 100644 --- a/nebd/test/common/configuration_test.cpp +++ b/nebd/test/common/configuration_test.cpp @@ -21,15 +21,15 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "nebd/src/common/configuration.h" + #include +#include -#include -#include #include +#include #include - -#include "nebd/src/common/configuration.h" +#include namespace nebd { namespace common { @@ -86,9 +86,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -136,10 +134,10 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } @@ -148,18 +146,19 @@ TEST_F(ConfigurationTest, SaveConfig) { Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } @@ -301,7 +300,7 @@ TEST_F(ConfigurationTest, GetSetDoubleAndFloatValue) { } // namespace common } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); int ret = RUN_ALL_TESTS(); diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..574667ad8b 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include // NOLINT @@ -32,29 +33,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +63,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -80,14 +80,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -95,12 +95,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = std::thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd diff --git a/nebd/test/part1/heartbeat_manager_unittest.cpp b/nebd/test/part1/heartbeat_manager_unittest.cpp index 72de6802d4..3d95f9adf4 100644 --- a/nebd/test/part1/heartbeat_manager_unittest.cpp +++ b/nebd/test/part1/heartbeat_manager_unittest.cpp @@ -20,14 +20,15 @@ * Author: hzchenwei7 */ -#include -#include +#include "nebd/src/part1/heartbeat_manager.h" + #include +#include +#include #include #include // NOLINT -#include "nebd/src/part1/heartbeat_manager.h" #include "nebd/src/part1/nebd_metacache.h" #include "nebd/test/part1/fake_heartbeat_service.h" @@ -66,24 +67,20 @@ class HeartbeatManagerTest : public testing::Test { HeartbeatOption option; }; -TEST_F(HeartbeatManagerTest, InitTest) { - ASSERT_EQ(0, manager->Init( - option)); -} +TEST_F(HeartbeatManagerTest, InitTest) { ASSERT_EQ(0, manager->Init(option)); } TEST_F(HeartbeatManagerTest, InvokeTimesTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); - // metaCache中数据为空,不发送心跳消息 + // The data in metaCache is empty and no heartbeat message will be sent for (int i = 0; i < 10; ++i) { ASSERT_EQ(0, fakeHeartBeatService.GetInvokeTimes()); std::this_thread::sleep_for(std::chrono::seconds(1)); } - // 添加数据 + // Add data NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); @@ -91,7 +88,7 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { int times = fakeHeartBeatService.GetInvokeTimes(); ASSERT_TRUE(times >= 9 && times <= 11); - // 清空metaCache数据 + // Clear MetaCache data metaCache->RemoveFileInfo(1); std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -103,13 +100,12 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { } TEST_F(HeartbeatManagerTest, RequestValidTest) { - ASSERT_EQ(0, manager->Init( - option)); + ASSERT_EQ(0, manager->Init(option)); manager->Run(); std::vector currentFileInfos; - // 添加一个文件 + // Add a file NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); HeartbeatFileInfo info; @@ -126,7 +122,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 添加第二个文件 + // Add second file fileInfo = NebdClientFileInfo(2, "/test2", FileLock("/test2.lock")); metaCache->AddFileInfo(fileInfo); info.set_fd(2); @@ -147,7 +143,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 删除第一个文件 + // Delete the first file metaCache->RemoveFileInfo(1); currentFileInfos.erase(currentFileInfos.begin()); @@ -166,7 +162,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { } // namespace client } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part1/nebd_client_unittest.cpp b/nebd/test/part1/nebd_client_unittest.cpp index 6822947653..2f3e18910f 100644 --- a/nebd/test/part1/nebd_client_unittest.cpp +++ b/nebd/test/part1/nebd_client_unittest.cpp @@ -20,18 +20,18 @@ * Author: wuhanqing */ -#include -#include +#include "nebd/src/part1/nebd_client.h" + #include +#include +#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT -#include "nebd/src/part1/nebd_client.h" #include "nebd/src/part1/libnebd.h" #include "nebd/src/part1/libnebd_file.h" - #include "nebd/test/part1/fake_file_service.h" #include "nebd/test/part1/mock_file_service.h" #include "nebd/test/utils/config_generator.h" @@ -79,16 +79,14 @@ void AioRpcFailCallBack(NebdClientAioContext* ctx) { template void MockClientFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); } template void MockClientRpcFailedFunc(google::protobuf::RpcController* cntl_base, - const Request* request, - Response* response, + const Request* request, Response* response, google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); static int invokeTimes = 0; @@ -110,20 +108,20 @@ class NebdFileClientTest : public ::testing::Test { void TearDown() override {} void AddFakeService() { - ASSERT_EQ(0, server.AddService( - &fakeService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&fakeService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void AddMockService() { - ASSERT_EQ(0, server.AddService( - &mockService, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Add service failed"; + ASSERT_EQ( + 0, server.AddService(&mockService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Add service failed"; } void StartServer(const std::string& address = kNebdServerTestAddress) { - ASSERT_EQ(0, server.StartAtSockFile( - address.c_str(), nullptr)) << "Start server failed"; + ASSERT_EQ(0, server.StartAtSockFile(address.c_str(), nullptr)) + << "Start server failed"; } void StopServer() { @@ -137,15 +135,15 @@ class NebdFileClientTest : public ::testing::Test { }; using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; TEST_F(NebdFileClientTest, AioRpcFailTest) { AddMockService(); @@ -167,7 +165,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Write(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; auto start = std::chrono::system_clock::now(); @@ -177,9 +176,11 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { cond.wait(ulk, []() { return aioOpReturn.load(); }); ASSERT_TRUE(aioOpReturn.load()); auto end = std::chrono::system_clock::now(); - auto elpased = std::chrono::duration_cast(end - start).count(); // NOLINT + auto elpased = + std::chrono::duration_cast(end - start) + .count(); // NOLINT - // 重试睡眠时间: 100ms + 200ms + ... + 900ms = 4500ms + // Retrying sleep time: 100ms + 200ms + ... + 900ms = 4500ms ASSERT_TRUE(elpased >= 4000 && elpased <= 5000); } @@ -196,7 +197,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); @@ -218,7 +220,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); @@ -240,7 +243,8 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(10) .WillRepeatedly( - Invoke(MockClientRpcFailedFunc)); // NOLINT + Invoke(MockClientRpcFailedFunc)); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); @@ -261,10 +265,12 @@ TEST_F(NebdFileClientTest, NoNebdServerTest) { auto start = std::chrono::system_clock::now(); ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); auto end = std::chrono::system_clock::now(); - auto elapsed = std::chrono::duration_cast( - end - start).count(); + auto elapsed = + std::chrono::duration_cast(end - start) + .count(); - // rpc failed的清空下,睡眠100ms后继续重试,共重试10次 + // Clear RPC failed and continue to retry after sleeping for 100ms, a + // total of 10 retries ASSERT_TRUE(elapsed >= 900 && elapsed <= 1100); } ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); @@ -380,8 +386,8 @@ TEST_F(NebdFileClientTest, ReOpenTest) { int fd = Open4Nebd(kFileName, nullptr); ASSERT_GT(fd, 0); - // 文件已经被打开,并占用文件锁 - // 再次打开时,获取文件锁失败,直接返回 + // The file has been opened and is occupying the file lock + // When reopening, obtaining the file lock failed and returned directly ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); ASSERT_EQ(0, Close4Nebd(fd)); @@ -406,9 +412,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, OpenFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); } @@ -417,9 +424,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, CloseFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(0, Close4Nebd(0)); } @@ -428,9 +436,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, ResizeFile(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); } @@ -439,9 +447,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetFileSize4Nebd(1)); } @@ -450,9 +459,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); ASSERT_EQ(-1, GetBlockSize4Nebd(1)); } @@ -461,9 +470,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, GetInfo(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, GetInfo4Nebd(1)); } @@ -474,7 +484,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke(MockClientFunc))); // NOLINT ASSERT_EQ(-1, InvalidCache4Nebd(1)); } @@ -496,7 +507,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { .Times(1) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + Invoke( + MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, AioWrite4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -518,9 +530,8 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Read(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, AioRead4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -542,9 +553,10 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Discard(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); // NOLINT + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); // NOLINT aioOpReturn = false; ASSERT_EQ(0, Discard4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -566,9 +578,9 @@ TEST_F(NebdFileClientTest, ResponseFailTest) { response.set_retcode(RetCode::kNoOK); EXPECT_CALL(mockService, Flush(_, _, _, _)) .Times(1) - .WillOnce(DoAll( - SetArgPointee<2>(response), - Invoke(MockClientFunc))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke(MockClientFunc))); aioOpReturn = false; ASSERT_EQ(0, Flush4Nebd(1, ctx)); std::unique_lock ulk(mtx); @@ -596,14 +608,12 @@ TEST_F(NebdFileClientTest, InitAndUninitTest) { } // namespace client } // namespace nebd - int main(int argc, char* argv[]) { - std::vector nebdConfig { + std::vector nebdConfig{ std::string("nebdserver.serverAddress=") + kNebdServerTestAddress, std::string("metacache.fileLockPath=/tmp"), std::string("request.syncRpcMaxRetryTimes=10"), - std::string("log.path=.") - }; + std::string("log.path=.")}; nebd::common::NebdClientConfigGenerator generator; generator.SetConfigPath(kNebdClientConf); diff --git a/nebd/test/part2/file_manager_unittest.cpp b/nebd/test/part2/file_manager_unittest.cpp index 0d13a7b18c..0b59f918aa 100644 --- a/nebd/test/part2/file_manager_unittest.cpp +++ b/nebd/test/part2/file_manager_unittest.cpp @@ -20,15 +20,17 @@ * Author: yangyaokai */ -#include +#include "nebd/src/part2/file_manager.h" + #include -#include +#include + #include +#include -#include "nebd/src/part2/file_manager.h" #include "nebd/src/part2/file_entity.h" -#include "nebd/test/part2/mock_request_executor.h" #include "nebd/test/part2/mock_metafile_manager.h" +#include "nebd/test/part2/mock_request_executor.h" namespace nebd { namespace server { @@ -38,11 +40,11 @@ const char testFile2[] = "test:/cinder/222"; const char unknownFile[] = "un:/cinder/666"; using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -66,12 +68,10 @@ class FileManagerTest : public ::testing::Test { metaFileManager_ = std::make_shared(); fileManager_ = std::make_shared(metaFileManager_); } - void TearDown() { - delete aioContext_; - } + void TearDown() { delete aioContext_; } using TestTask = std::function; - // 构造初始环境 + // Construct initial environment void InitEnv() { NebdFileMeta meta; meta.fd = 1; @@ -80,18 +80,14 @@ class FileManagerTest : public ::testing::Test { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); } - void UnInitEnv() { - ASSERT_EQ(fileManager_->Fini(), 0); - } + void UnInitEnv() { ASSERT_EQ(fileManager_->Fini(), 0); } void ExpectCallRequest(RequestType type, int ret) { switch (type) { @@ -125,20 +121,19 @@ class FileManagerTest : public ::testing::Test { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件状态为OPENED + // The file status is OPENED ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件状态为CLOSED + // The file status is CLOSED EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); @@ -147,53 +142,47 @@ class FileManagerTest : public ::testing::Test { void RequestFailTest(RequestType type, TestTask task) { InitEnv(); - // 将文件close + // Close the file NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // open文件失败 - EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .Times(0); + // Open file failed + EXPECT_CALL(*executor_, Open(testFile1, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 更新元数据文件失败 + // Failed to update metadata file EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 执行处理函数失败 + // Failed to execute processing function EXPECT_CALL(*executor_, Open(testFile1, _)) - .WillOnce(Return(mockInstance_)); + .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ExpectCallRequest(type, -1); ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 将文件状态置为DESTROYED - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Set the file status to DESTROYED + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(entity1->Close(true), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); - EXPECT_CALL(*executor_, Open(testFile1, _)) - .Times(0); + EXPECT_CALL(*executor_, Open(testFile1, _)).Times(0); ASSERT_EQ(-1, task(1)); - // 直接将文件删除 + // Delete files directly ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(-1, task(1)); @@ -216,17 +205,14 @@ TEST_F(FileManagerTest, RunTest) { fileMetas.emplace_back(meta); EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(0)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); - // 重复run返回失败 + // Repeated run returns failed ASSERT_EQ(fileManager_->Run(), -1); - // 校验结果 + // Verification results FileEntityMap entityMap = fileManager_->GetFileEntityMap(); ASSERT_EQ(1, entityMap.size()); ASSERT_NE(nullptr, entityMap[meta.fd]); @@ -239,44 +225,36 @@ TEST_F(FileManagerTest, RunFailTest) { std::vector fileMetas; fileMetas.emplace_back(meta); - // list file meta失败 - EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(Return(-1)); + // List file meta failed + EXPECT_CALL(*metaFileManager_, ListFileMeta(_)).WillOnce(Return(-1)); ASSERT_EQ(fileManager_->Run(), -1); - // reopen失败不影响Run成功 + // Reopen failure does not affect Run success EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(nullptr)); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(nullptr)); ASSERT_EQ(fileManager_->Run(), 0); ASSERT_EQ(fileManager_->Fini(), 0); - // 更新metafile失败不影响Run成功 + // Failure to update metafile does not affect the success of Run EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) - .WillOnce(DoAll(SetArgPointee<0>(fileMetas), - Return(0))); - EXPECT_CALL(*executor_, Reopen(_, _)) - .WillOnce(Return(mockInstance_)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(1); + .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); + EXPECT_CALL(*executor_, Reopen(_, _)).WillOnce(Return(mockInstance_)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)).WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(NotNull())).Times(1); ASSERT_EQ(fileManager_->Run(), 0); } TEST_F(FileManagerTest, OpenTest) { InitEnv(); - // open一个不存在的文件 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open a non-existent file + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); - // 重复open + // Repeat open fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); @@ -288,15 +266,13 @@ TEST_F(FileManagerTest, OpenTest) { ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - EXPECT_CALL(*executor_, Close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*executor_, Close(_)).WillOnce(Return(0)); ASSERT_EQ(entity2->Close(false), 0); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::CLOSED); - // open 已经close的文件, fd不变 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Open closed files, keep fd unchanged + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); @@ -304,79 +280,67 @@ TEST_F(FileManagerTest, OpenTest) { TEST_F(FileManagerTest, OpenFailTest) { InitEnv(); - // 调用后端open接口时出错 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(nullptr)); - EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .Times(0); + // Error calling backend open interface + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)).Times(0); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // 持久化元数据信息失败 - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + // Persisting metadata information failed + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*executor_, Close(_)) - .Times(1); + .WillOnce(Return(-1)); + EXPECT_CALL(*executor_, Close(_)).Times(1); fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // Open一个非法的filename - EXPECT_CALL(*executor_, Open(_, _)) - .Times(0); + // Open an illegal filename + EXPECT_CALL(*executor_, Open(_, _)).Times(0); fd = fileManager_->Open(unknownFile, nullptr); ASSERT_EQ(fd, -1); } TEST_F(FileManagerTest, CloseTest) { InitEnv(); - // 指定的fd不存在,直接返回成功 + // The specified fd does not exist, return success directly ASSERT_EQ(nullptr, fileManager_->GetFileEntity(2)); ASSERT_EQ(0, fileManager_->Close(2, true)); NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,且文件状态为OPENED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // The file exists and its status is OPENED, while removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为false - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // File exists, file status is CLOSED, removeRecord is false + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .Times(0); + // The file exists, the file status is CLOSED, and removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).Times(0); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::DESTROYED); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); - EXPECT_CALL(*executor_, Open(testFile2, _)) - .WillOnce(Return(mockInstance_)); + EXPECT_CALL(*executor_, Open(testFile2, _)).WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); NebdFileEntityPtr entity2 = fileManager_->GetFileEntity(2); ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,文件状态为OPENED,removeRecord为true - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // File exists, file status is OPENED, removeRecord is true + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile2)) - .WillOnce(Return(0)); + .WillOnce(Return(0)); ASSERT_EQ(0, fileManager_->Close(fd, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); } @@ -387,36 +351,31 @@ TEST_F(FileManagerTest, CloseFailTest) { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // executor close 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .Times(0); + // Executor close failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)).Times(0); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // remove file meta 失败 - EXPECT_CALL(*executor_, Close(NotNull())) - .WillOnce(Return(0)); + // Remove file meta failed + EXPECT_CALL(*executor_, Close(NotNull())).WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) - .WillOnce(Return(-1)); + .WillOnce(Return(-1)); ASSERT_EQ(-1, fileManager_->Close(1, true)); ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); } TEST_F(FileManagerTest, ExtendTest) { - auto task = [&](int fd)->int { - return fileManager_->Extend(fd, 4096); - }; + auto task = [&](int fd) -> int { return fileManager_->Extend(fd, 4096); }; RequestSuccssTest(RequestType::EXTEND, task); RequestFailTest(RequestType::EXTEND, task); } TEST_F(FileManagerTest, GetInfoTest) { NebdFileInfo fileInfo; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { return fileManager_->GetInfo(fd, &fileInfo); }; RequestSuccssTest(RequestType::GETINFO, task); @@ -424,16 +383,14 @@ TEST_F(FileManagerTest, GetInfoTest) { } TEST_F(FileManagerTest, InvalidCacheTest) { - auto task = [&](int fd)->int { - return fileManager_->InvalidCache(fd); - }; + auto task = [&](int fd) -> int { return fileManager_->InvalidCache(fd); }; RequestSuccssTest(RequestType::INVALIDCACHE, task); RequestFailTest(RequestType::INVALIDCACHE, task); } TEST_F(FileManagerTest, AioReadTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioRead(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -457,7 +414,7 @@ TEST_F(FileManagerTest, AioReadTest) { TEST_F(FileManagerTest, AioWriteTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->AioWrite(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -481,7 +438,7 @@ TEST_F(FileManagerTest, AioWriteTest) { TEST_F(FileManagerTest, DiscardTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Discard(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -505,7 +462,7 @@ TEST_F(FileManagerTest, DiscardTest) { TEST_F(FileManagerTest, FlushTest) { NebdServerAioContext aioContext; - auto task = [&](int fd)->int { + auto task = [&](int fd) -> int { int ret = fileManager_->Flush(fd, &aioContext); if (ret < 0) { if (aioContext.done != nullptr) { @@ -544,7 +501,7 @@ TEST_F(FileManagerTest, UpdateTimestampTest) { } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/nebd/test/part2/heartbeat_manager_unittest.cpp b/nebd/test/part2/heartbeat_manager_unittest.cpp index 2ae0e8d221..9d1e0eaabb 100644 --- a/nebd/test/part2/heartbeat_manager_unittest.cpp +++ b/nebd/test/part2/heartbeat_manager_unittest.cpp @@ -20,10 +20,12 @@ * Author: yangyaokai */ +#include "nebd/src/part2/heartbeat_manager.h" + #include + #include -#include "nebd/src/part2/heartbeat_manager.h" #include "nebd/test/part2/mock_file_entity.h" #include "nebd/test/part2/mock_file_manager.h" @@ -35,11 +37,11 @@ namespace server { using ::testing::_; using ::testing::AtLeast; -using ::testing::Return; -using ::testing::NotNull; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::NotNull; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -53,16 +55,16 @@ class HeartbeatManagerTest : public ::testing::Test { option.fileManager = fileManager_; heartbeatManager_ = std::make_shared(option); } - std::shared_ptr fileManager_; + std::shared_ptr fileManager_; std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { ASSERT_EQ(heartbeatManager_->Run(), 0); - // 已经在run了不允许重复Run或者Init + // It is already running, and duplicate Run or Init is not allowed ASSERT_EQ(heartbeatManager_->Run(), -1); - // 构造file entity + // Construct file entity uint64_t curTime = TimeUtility::GetTimeofDayMs(); std::shared_ptr entity1 = std::make_shared(); @@ -71,51 +73,44 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { std::shared_ptr entity3 = std::make_shared(); EXPECT_CALL(*entity1, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity1, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); EXPECT_CALL(*entity2, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); + .WillRepeatedly(Return(curTime - 2 * 10 * 1000)); EXPECT_CALL(*entity2, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::CLOSED)); - EXPECT_CALL(*entity3, GetFileTimeStamp()) - .WillRepeatedly(Return(curTime)); + .WillRepeatedly(Return(NebdFileStatus::CLOSED)); + EXPECT_CALL(*entity3, GetFileTimeStamp()).WillRepeatedly(Return(curTime)); EXPECT_CALL(*entity3, GetFileStatus()) - .WillRepeatedly(Return(NebdFileStatus::OPENED)); + .WillRepeatedly(Return(NebdFileStatus::OPENED)); - // 构造file map + // Construct a file map FileEntityMap entityMap; entityMap.emplace(1, entity1); entityMap.emplace(2, entity2); entityMap.emplace(3, entity3); EXPECT_CALL(*fileManager_, GetFileEntityMap()) - .WillRepeatedly(Return(entityMap)); + .WillRepeatedly(Return(entityMap)); - // 预期结果 - EXPECT_CALL(*entity1, Close(false)) - .Times(AtLeast(1)); - EXPECT_CALL(*entity2, Close(false)) - .Times(0); - EXPECT_CALL(*entity3, Close(false)) - .Times(0); + // Expected results + EXPECT_CALL(*entity1, Close(false)).Times(AtLeast(1)); + EXPECT_CALL(*entity2, Close(false)).Times(0); + EXPECT_CALL(*entity3, Close(false)).Times(0); ::sleep(2); ASSERT_EQ(heartbeatManager_->Fini(), 0); - // 重复Fini,也返回成功 + // Repeat Fini and return success ASSERT_EQ(heartbeatManager_->Fini(), 0); } TEST_F(HeartbeatManagerTest, UpdateTimeStampTest) { std::shared_ptr entity = std::make_shared(); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(entity)); - EXPECT_CALL(*entity, UpdateFileTimeStamp(100)) - .Times(1); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(entity)); + EXPECT_CALL(*entity, UpdateFileTimeStamp(100)).Times(1); ASSERT_TRUE(heartbeatManager_->UpdateFileTimestamp(1, 100)); - EXPECT_CALL(*fileManager_, GetFileEntity(1)) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*fileManager_, GetFileEntity(1)).WillOnce(Return(nullptr)); ASSERT_FALSE(heartbeatManager_->UpdateFileTimestamp(1, 100)); } @@ -136,7 +131,7 @@ TEST_F(HeartbeatManagerTest, UpdateNebdClientInfo) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/heartbeat_service_test.cpp b/nebd/test/part2/heartbeat_service_test.cpp index 7d60ce6981..7e29edd10c 100644 --- a/nebd/test/part2/heartbeat_service_test.cpp +++ b/nebd/test/part2/heartbeat_service_test.cpp @@ -20,13 +20,15 @@ * Author: charisu */ -#include +#include "nebd/src/part2/heartbeat_service.h" + #include #include +#include + #include #include "nebd/proto/heartbeat.pb.h" -#include "nebd/src/part2/heartbeat_service.h" #include "nebd/test/part2/mock_heartbeat_manager.h" using ::testing::_; using ::testing::Return; @@ -41,15 +43,15 @@ class HeartbeatServiceTest : public ::testing::Test { void SetUp() override { heartbeatManager_ = std::make_shared(); } - std::shared_ptr heartbeatManager_; + std::shared_ptr heartbeatManager_; }; TEST_F(HeartbeatServiceTest, KeepAlive) { - // 启动server + // Start server brpc::Server server; NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); ASSERT_EQ(0, server.AddService(&heartbeatService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(0, server.StartAtSockFile(kSockFile_.c_str(), &option)); @@ -68,7 +70,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { nebd::client::NebdHeartbeatService_Stub stub(&channel); brpc::Controller cntl; - // 正常情况 + // Normal situation EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillRepeatedly(Return(true)); @@ -76,7 +78,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kOK, response.retcode()); - // 有文件更新时间戳失败 + // Failed to update timestamp with file EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillOnce(Return(false)) @@ -86,14 +88,14 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kNoOK, response.retcode()); - // 停止server + // Stop server server.Stop(0); server.Join(); } } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/metafile_manager_test.cpp b/nebd/test/part2/metafile_manager_test.cpp index 7027cb9da6..dbde2d4ee3 100644 --- a/nebd/test/part2/metafile_manager_test.cpp +++ b/nebd/test/part2/metafile_manager_test.cpp @@ -20,11 +20,13 @@ * Author: charisu */ +#include "nebd/src/part2/metafile_manager.h" + #include #include + #include -#include "nebd/src/part2/metafile_manager.h" #include "nebd/test/part2/mock_posix_wrapper.h" using ::testing::_; @@ -37,8 +39,7 @@ const char metaPath[] = "/tmp/nebd-test-metafilemanager.meta"; void FillCrc(Json::Value* root) { std::string jsonString = root->toStyledString(); - uint32_t crc = nebd::common::CRC32(jsonString.c_str(), - jsonString.size()); + uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); (*root)[kCRC] = crc; } @@ -61,19 +62,19 @@ TEST_F(MetaFileManagerTest, nomaltest) { NebdMetaFileManager metaFileManager; ASSERT_EQ(metaFileManager.Init(option), 0); std::vector fileMetas; - // 文件不存在 + // File does not exist ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 添加两条记录,curve和test各一 + // Add two records, one for curve and one for test NebdFileMeta fileMeta1; fileMeta1.fileName = "test:volume1"; fileMeta1.fd = 1; ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 使用相同的内容Update + // Update using the same content ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 插入不同的meta + // Insert different meta NebdFileMeta fileMeta2; fileMeta2.fileName = "cbd:volume2"; fileMeta2.fd = 2; @@ -89,9 +90,9 @@ TEST_F(MetaFileManagerTest, nomaltest) { // remove meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta(fileMeta2.fileName)); - // remove 不存在的meta + // remove non-existent meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta("unknown")); - // 校验结果 + // Verification results fileMetas.clear(); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -111,35 +112,28 @@ TEST_F(MetaFileManagerTest, UpdateMetaFailTest) { fileMetaMap.emplace(fileMeta.fileName, fileMeta); std::vector fileMetas; - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // rename失败 + // Rename failed NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); @@ -160,15 +154,12 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - // 先插入一条数据 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Insert a piece of data first + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -176,33 +167,26 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { fileMetaMap.erase(fileMeta.fileName); root = parser.ConvertFileMetasToJson(fileMetaMap); - // open临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(-1)); + // Open temporary file failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // 写入临时文件失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) - .WillOnce(Return(0)); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); + // Failed to write temporary file + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // rename失败 - EXPECT_CALL(*wrapper_, open(_, _, _)) - .WillOnce(Return(1)); + // Rename failed + EXPECT_CALL(*wrapper_, open(_, _, _)).WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) .WillOnce(Return(root.toStyledString().size())); - EXPECT_CALL(*wrapper_, close(_)) - .Times(1); - EXPECT_CALL(*wrapper_, rename(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper_, close(_)).Times(1); + EXPECT_CALL(*wrapper_, rename(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -215,7 +199,7 @@ TEST(MetaFileParserTest, Parse) { Json::Value volumes; FileMetaMap fileMetas; - // 正常情况 + // Normal situation volume[kFileName] = "cbd:volume1"; volume[kFd] = 1; volumes.append(volume); @@ -225,18 +209,19 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(0, parser.Parse(root, &fileMetas)); - // 空指针 + // Null pointer ASSERT_EQ(-1, parser.Parse(root, nullptr)); - // crc校验不正确 + // Incorrect crc verification root[kCRC] = root[kCRC].asUInt() + 1; ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有crc字段 + // No crc field root.removeMember(kCRC); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有volumes字段或volumes字段是null,不应该报错 + // There is no volumes field or the volumes field is null, and an error + // should not be reported root.clear(); root["key"] = "value"; FillCrc(&root); @@ -249,7 +234,7 @@ TEST(MetaFileParserTest, Parse) { ASSERT_EQ(0, parser.Parse(root, &fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 记录中没有filename + // There is no filename in the record volume.clear(); volumes.clear(); root.clear(); @@ -259,7 +244,7 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 记录中没有fd + // The record does not contain an 'fd'. volume.clear(); volumes.clear(); root.clear(); @@ -273,7 +258,7 @@ TEST(MetaFileParserTest, Parse) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_nebd_server.cpp b/nebd/test/part2/test_nebd_server.cpp index 1f6f8ef112..effcdc05b3 100644 --- a/nebd/test/part2/test_nebd_server.cpp +++ b/nebd/test/part2/test_nebd_server.cpp @@ -21,27 +21,28 @@ */ #include + #include "nebd/src/part2/nebd_server.h" #include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; TEST(TestNebdServer, test_Init_Run_Fini) { NebdServer server; auto curveClient = std::make_shared(); std::string confPath; - // 1. 配置文件不存在, init失败 + // 1. Configuration file does not exist, init failed confPath = "./nebd.conf"; ASSERT_EQ(-1, server.Init(confPath)); - // 2. 配置文件存在, 监听端口未设置 + // 2. Configuration file exists, listening port not set confPath = "./nebd/test/part2/nebd-server-err.conf"; Configuration conf; conf.SetBoolValue("response.returnRpcWhenIoError", false); @@ -49,55 +50,54 @@ TEST(TestNebdServer, test_Init_Run_Fini) { conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 3、配置文件中没有client配置 + // 3. There is no client configuration in the configuration file conf.SetStringValue("listen.address", "/tmp/nebd-server.sock"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 4. curveclient init失败 + // 4. Curveclient init failed conf.SetStringValue("curveclient.confPath", "/etc/curve/client.conf"); conf.SaveConfig(); EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 5、初始化fileManager失败 + // 5. Failed to initialize fileManager EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 6、没有heartbeat.timeout字段 + // 6. There is no heartbeat.timeout field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetStringValue("meta.file.path", "./nebd-server-test.meta"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7、没有heartbeat.check.interval.ms字段 + // 7. No heartbeat.check.interval.ms field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.timeout.sec", 30); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - - // 8. 初始化成功 + // 8. Initialized successfully EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.check.interval.ms", 3000); conf.SaveConfig(); ASSERT_EQ(0, server.Init(confPath, curveClient)); - // 9. run成功 + // 9. Run successful EXPECT_CALL(*curveClient, UnInit()).Times(2); std::thread nebdServerThread(&NebdServer::RunUntilAskedToQuit, &server); sleep(1); - // 10、再次Run会失败 + // 10. Running again will fail ASSERT_EQ(-1, server.RunUntilAskedToQuit()); - // 11、Run之后Init会失败 + // 11. Init will fail after Run ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7. stop成功 + // 7. Stop successful ASSERT_EQ(0, server.Fini()); - // 8. 再次stop不会重复释放资源 + // 8. Stopping again will not repeatedly release resources ASSERT_EQ(0, server.Fini()); nebdServerThread.join(); } @@ -105,7 +105,7 @@ TEST(TestNebdServer, test_Init_Run_Fini) { } // namespace server } // namespace nebd -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/nebd/test/part2/test_request_executor_curve.cpp b/nebd/test/part2/test_request_executor_curve.cpp index 2b749d0615..8d8c3811f2 100644 --- a/nebd/test/part2/test_request_executor_curve.cpp +++ b/nebd/test/part2/test_request_executor_curve.cpp @@ -21,36 +21,30 @@ */ #include -#include "nebd/src/part2/request_executor_curve.h" -#include "nebd/test/part2/mock_curve_client.h" #include "nebd/proto/client.pb.h" #include "nebd/proto/heartbeat.pb.h" #include "nebd/src/part2/file_service.h" +#include "nebd/src/part2/request_executor_curve.h" +#include "nebd/test/part2/mock_curve_client.h" namespace nebd { namespace server { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; -using ::testing::SaveArg; using ::testing::Invoke; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestReuqestExecutorCurveClosure : public google::protobuf::Closure { public: TestReuqestExecutorCurveClosure() : runned_(false) {} ~TestReuqestExecutorCurveClosure() {} - void Run() { - runned_ = true; - } - bool IsRunned() { - return runned_; - } - void Reset() { - runned_ = false; - } + void Run() { runned_ = true; } + bool IsRunned() { return runned_; } + void Reset() { runned_ = false; } private: bool runned_; @@ -60,7 +54,7 @@ void NebdUnitTestCallback(NebdServerAioContext* context) { std::cout << "callback" << std::endl; } -class TestReuqestExecutorCurve : public ::testing::Test { +class TestReuqestExecutorCurve : public ::testing::Test { protected: void SetUp() { curveClient_ = std::make_shared(); @@ -77,7 +71,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(fileName, _)).Times(0); @@ -86,23 +80,21 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { ASSERT_TRUE(nullptr == ret); } - // 2. curveclient open失败 + // 2. Curveclient open failed { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(-1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr == ret); } - // 3. open成功 + // 3. Open successful { - EXPECT_CALL(*curveClient_, Open(curveFileName, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*curveClient_, Open(curveFileName, _)).WillOnce(Return(1)); std::shared_ptr ret = executor.Open(fileName, nullptr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -117,16 +109,16 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(_, _)).Times(0); - std::shared_ptr ret = executor.Reopen( - errFileName, xattr); + std::shared_ptr ret = + executor.Reopen(errFileName, xattr); ASSERT_TRUE(nullptr == ret); } - // 2. repoen失败 + // 2. repoen failed { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(-1)); @@ -135,14 +127,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { ASSERT_TRUE(nullptr == ret); } - // 3. reopen成功 + // 3. reopen successful { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(1)); - std::shared_ptr ret = + std::shared_ptr ret = executor.Reopen(fileName, xattr); ASSERT_TRUE(nullptr != ret); - auto *curveIns = dynamic_cast(ret.get()); + auto* curveIns = dynamic_cast(ret.get()); ASSERT_TRUE(nullptr != curveIns); ASSERT_EQ(curveFileName, curveIns->fileName); ASSERT_EQ(1, curveIns->fd); @@ -153,14 +145,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { TEST_F(TestReuqestExecutorCurve, test_Close) { auto executor = CurveRequestExecutor::GetInstance(); - // 1. nebdFileIns不是CurveFileInstance类型, close失败 + // 1. nebdFileIns is not of type CurveFileInstance, close failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Close(_)).Times(0); ASSERT_EQ(-1, executor.Close(nebdFileIns)); } - // 2. nebdFileIns中的fd<0, close失败 + // 2. fd<0 in nebdFileIns, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -168,7 +160,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 3. 调用curveclient的close接口失败, close失败 + // 3. Calling the close interface of curveclient failed, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -177,7 +169,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 4. close成功 + // 4. close successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -191,21 +183,21 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, extend失败 + // 1. nebdFileIns is not of type CurveFileInstance, extend failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(nebdFileIns, 1)); } - // 2. nebdFileIns中的fileName为空, extend失败 + // 2. FileName in nebdFileIns is empty, extend failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 3. 调用curveclient的extend接口失败, extend失败 + // 3. Calling the extend interface of curveclient failed, extend failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -214,7 +206,7 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 4. extend成功 + // 4. extend successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -229,43 +221,40 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { NebdFileInfo fileInfo; int curveFd = 123; - // 1. nebdFileIns不是CurveFileInstance类型, stat失败 + // 1. nebdFileIns is not of type CurveFileInstance, stat failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(nebdFileIns, &fileInfo)); } - // 2. nebdFileIns中的fd为空, stat失败 + // 2. Fd in nebdFileIns is empty, stat failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - - // 3. 调用curveclient的stat接口失败, stat失败 + // 3. Calling the stat interface of curveclient failed, stat failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; - EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - // 4. stat成功 + // 4. stat successful { const uint64_t size = 10ull * 1024 * 1024 * 1024; const uint32_t blocksize = 4096; auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; EXPECT_CALL(*curveClient_, StatFile(curveFd, _)) - .WillOnce(Invoke( - [size, blocksize](int /*fd*/, FileStatInfo* info) { - info->length = size; - info->blocksize = blocksize; - return 0; - })); + .WillOnce(Invoke([size, blocksize](int /*fd*/, FileStatInfo* info) { + info->length = size; + info->blocksize = blocksize; + return 0; + })); ASSERT_EQ(0, executor.GetInfo(curveFileIns, &fileInfo)); ASSERT_EQ(size, fileInfo.size); ASSERT_EQ(blocksize, fileInfo.block_size); @@ -278,14 +267,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步读失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous read failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步读失败 + // 2. fd<0 in nebdFileIns, asynchronous read failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -293,7 +282,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioRead接口失败, 异步读失败 + // 3. Calling the AioRead interface of curveclient failed, asynchronous read + // failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -307,15 +297,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 4. 异步读取成功 + // 4. Asynchronous read successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioRead(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioRead(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -327,14 +316,15 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步写失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous write + // failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步写失败 + // 2. fd<0 in nebdFileIns, asynchronous write failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -342,7 +332,8 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioWrite接口失败, 异步写失败 + // 3. Calling the AioWrite interface of curveclient failed, asynchronous + // write failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -356,15 +347,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 4. 异步写入成功 + // 4. Asynchronous write successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioWrite(1, _, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioWrite(curveFileIns, &aiotcx)); curveCtx->cb(curveCtx); } @@ -379,8 +369,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { // 1. not an curve volume { std::unique_ptr nebdFileIns(new NebdFileInstance()); - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(nebdFileIns.get(), &aioctx)); } @@ -389,8 +378,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { std::unique_ptr curveFileIns( new CurveFileInstance()); curveFileIns->fd = -1; - EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioDiscard(_, _)).Times(0); ASSERT_EQ(-1, executor.Discard(curveFileIns.get(), &aioctx)); } @@ -419,8 +407,7 @@ TEST_F(TestReuqestExecutorCurve, test_Discard) { curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; EXPECT_CALL(*curveClient_, AioDiscard(_, _)) - .WillOnce(DoAll(SaveArg<1>(&curveCtx), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.Discard(curveFileIns.get(), &aioctx)); curveCtx->cb(curveCtx); } @@ -448,13 +435,13 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 不合法 + // 1. nebdFileIns is not of type CurveFileInstance, illegal { auto nebdFileIns = new NebdFileInstance(); ASSERT_EQ(-1, executor.InvalidCache(nebdFileIns)); } - // 2. fd<0, 不合法 + // 2. fd<0, illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -462,14 +449,14 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 3. filename为空,不合法 + // 3. The filename is empty and illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 4. 合法 + // 4. legitimate { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -478,11 +465,10 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { } } - TEST(TestFileNameParser, test_Parse) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); - std::pair res( - "/cinder/volume-1234_cinder_", "/client.conf"); + std::pair res("/cinder/volume-1234_cinder_", + "/client.conf"); ASSERT_EQ(res, FileNameParser::Parse(fileName)); fileName = "cbd:pool1//cinder/volume-1234_cinder_"; @@ -500,11 +486,10 @@ TEST(TestFileNameParser, test_Parse) { ASSERT_EQ(res, FileNameParser::Parse(fileName)); } - } // namespace server } // namespace nebd -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); diff --git a/proto/chunk.proto b/proto/chunk.proto index af5cd3fb5a..c19303c854 100755 --- a/proto/chunk.proto +++ b/proto/chunk.proto @@ -20,7 +20,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/chunk"; -// Qos 参数 +// Qos parameters message QosRequestParas { optional uint32 clientId = 1; optional int32 dmclockDelta = 2; @@ -28,38 +28,38 @@ message QosRequestParas { } message QosResponseParas { - optional int32 phase = 1; // 0: 代表 reservation 阶段; 1: 代表 priority 阶段 + optional int32 phase = 1; // 0: represents the reservation stage; 1: Representing the priority stage optional int32 cost = 2; // } // For chunk enum CHUNK_OP_TYPE { - CHUNK_OP_DELETE = 0; // 删除 chunk - CHUNK_OP_READ = 1; // 读 chunk - CHUNK_OP_WRITE = 2; // 写 chunk + CHUNK_OP_DELETE = 0; // Delete chunk + CHUNK_OP_READ = 1; // Read chunk + CHUNK_OP_WRITE = 2; // Write chunk CHUNK_OP_READ_SNAP = 3; // read chunk snapshot - // TODO(wudemiao): 后期替换成CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, - // 保证和chunkserver的接口一致 + // TODO(wudemiao): later replaced with CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, + // Ensure consistency with chunkserver interface CHUNK_OP_DELETE_SNAP = 4; // delete chunk snapshot - CHUNK_OP_CREATE_CLONE = 5; // 创建clone chunk - CHUNK_OP_RECOVER = 6; // 恢复clone chunk - CHUNK_OP_PASTE = 7; // paste chunk 内部请求 + CHUNK_OP_CREATE_CLONE = 5; // Create clone chunk + CHUNK_OP_RECOVER = 6; // Restore clone chunk + CHUNK_OP_PASTE = 7; // paste chunk internal request CHUNK_OP_UNKNOWN = 8; // unknown Op CHUNK_OP_SCAN = 9; // scan oprequest }; -// read/write 的实际数据在 rpc 的 attachment 中 +// The actual data of read/write is in the attachment of rpc message ChunkRequest { required CHUNK_OP_TYPE opType = 1; // for all - required uint32 logicPoolId = 2; // for all // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 2; // for all // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 3; // for all required uint64 chunkId = 4; // for all optional uint64 appliedIndex = 5; // for read optional uint32 offset = 6; // for read/write - optional uint32 size = 7; // for read/write/clone 读取数据大小/写入数据大小/创建快照请求中表示请求创建的chunk大小 + optional uint32 size = 7; // for read/write/clone Read data size/Write data size/Create snapshot request represents the chunk size of the request creation optional QosRequestParas deltaRho = 8; // for read/write - optional uint64 sn = 9; // for write/read snapshot 写请求中表示文件当前版本号,读快照请求中表示请求的chunk的版本号 - optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn 用于修改chunk的correctedSn + optional uint64 sn = 9; // for write/read snapshot, in the write request, represents the current version number of the file, and in the read snapshot request, represents the version number of the requested chunk + optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn used to modify the correctedSn of a chunk optional string location = 11; // for CreateCloneChunk optional string cloneFileSource = 12; // for write/read optional uint64 cloneFileOffset = 13; // for write/read @@ -72,28 +72,28 @@ message ChunkRequest { }; enum CHUNK_OP_STATUS { - CHUNK_OP_STATUS_SUCCESS = 0; // 成功 - CHUNK_OP_STATUS_REDIRECTED = 1; // 不是 leader,重定向 - CHUNK_OP_STATUS_DISK_FAIL = 2; // 磁盘返回错误 - CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC 校验失败 - CHUNK_OP_STATUS_INVALID_REQUEST = 4; // 请求参数不对 - CHUNK_OP_STATUS_NOSPACE = 5; // 空间不够 - CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // copyset 不存在 - CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // chunk或其快照文件不存在 - CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // 其他错误 - CHUNK_OP_STATUS_OVERLOAD = 9; // 过载,表示服务端有过多请求未处理返回 - CHUNK_OP_STATUS_BACKWARD = 10; // 请求的版本落后当前chunk的版本 - CHUNK_OP_STATUS_CHUNK_EXIST = 11; // chunk已存在 + CHUNK_OP_STATUS_SUCCESS = 0; // Success + CHUNK_OP_STATUS_REDIRECTED = 1; // Not a leader, redirect + CHUNK_OP_STATUS_DISK_FAIL = 2; // Disk returned error + CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC verification failed + CHUNK_OP_STATUS_INVALID_REQUEST = 4; // The request parameters are incorrect + CHUNK_OP_STATUS_NOSPACE = 5; // Insufficient space + CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // Copyset does not exist + CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // Chunk or its snapshot file does not exist + CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // Other errors + CHUNK_OP_STATUS_OVERLOAD = 9; // Overload indicates that the server has too many requests that have not been processed and returned + CHUNK_OP_STATUS_BACKWARD = 10; // The requested version falls behind the current chunk version + CHUNK_OP_STATUS_CHUNK_EXIST = 11; // Chunk already exists CHUNK_OP_STATUS_EPOCH_TOO_OLD = 12; // request epoch too old }; message ChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - optional uint64 appliedIndex = 3; // 返回当前最新的 committedIndex, 注意 read 和 write 都要返回 + optional string redirect = 2; // Not the leader, redirect to the leader + optional uint64 appliedIndex = 3; // Return the latest committedIndex, note that both read and write must be returned optional QosResponseParas phaseCost = 4; // for read/write - optional uint64 chunkSn = 5; // for GetChunkInfo 表示chunk文件版本号,0表示不存在 - optional uint64 snapSn = 6; // for GetChunkInfo 表示chunk文件快照的版本号,0表示不存在 + optional uint64 chunkSn = 5; // for GetChunkInfo represents the version number of the chunk file, while 0 indicates that it does not exist + optional uint64 snapSn = 6; // for GetChunkInfo represents the version number of the Chunk file snapshot, while 0 indicates that it does not exist }; message GetChunkInfoRequest { @@ -104,8 +104,8 @@ message GetChunkInfoRequest { message GetChunkInfoResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - repeated uint64 chunkSn = 3; // chunk 版本号 和 snapshot 版本号 + optional string redirect = 2; // Not the leader, redirect to the leader + repeated uint64 chunkSn = 3; // Chunk version number and snapshot version number }; message GetChunkHashRequest { @@ -118,7 +118,7 @@ message GetChunkHashRequest { message GetChunkHashResponse { required CHUNK_OP_STATUS status = 1; - optional string hash = 2; // 能标志chunk数据状态的hash值,一般是crc32c + optional string hash = 2; // The hash value that can indicate the status of chunk data, usually crc32c }; message CreateS3CloneChunkRequest { @@ -131,7 +131,7 @@ message CreateS3CloneChunkRequest { message CreateS3CloneChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // Not the leader, redirect to the leader }; message UpdateEpochRequest { diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..5a0bdd89ff 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, both logicPoolId and copysetId are used. After entering the rpc service, they will be converted to a string +// GroupId of type, passed to raft // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // LogicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/proto/common.proto b/proto/common.proto index 3cae9f9e65..0dc409b609 100644 --- a/proto/common.proto +++ b/proto/common.proto @@ -21,13 +21,13 @@ package curve.common; option cc_generic_services = true; option go_package = "proto/common"; -// 1. braft场景: id不使用,address为braft里面的PeerId,格式为{ip}:{port}:{index} -// 2. curve-raft场景:id是peer id,address为{ip}:{port} -// 当前chunkserver id就是peer id +// 1. In the braft scenario: 'id' is not used, and 'address' is the PeerId within braft, in the format {ip}:{port}:{index}. +// 2. In the curve-raft scenario: 'id' represents the peer id, and 'address' is in the format {ip}:{port}. +// The current chunkserver id is the peer id. message Peer { - optional uint64 id = 1; // peer id,全局唯一 -// optional bool isLearner = 2; // 是否是learner (暂时不支持) - optional string address = 3; // peer的地址信息 + optional uint64 id = 1; // Peer ID, globally unique +// optional bool isLearner = 2; // Whether it is a learner (not supported for now) + optional string address = 3; // Address information of the peer } message CopysetInfo { diff --git a/proto/copyset.proto b/proto/copyset.proto index fe3d271d53..10aab0485c 100755 --- a/proto/copyset.proto +++ b/proto/copyset.proto @@ -23,7 +23,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/copyset"; -// copyset epoch message,用于epoch序列化和反序列化 +// copyset epoch message for epoch serialization and deserialization message ConfEpoch { required uint32 logicPoolId = 1; required uint32 copysetId = 2; @@ -32,15 +32,15 @@ message ConfEpoch { } message CopysetRequest { - // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + // logicPoolId is actually uint16, but proto does not have uint16 required uint32 logicPoolId = 1; required uint32 copysetId = 2; - repeated string peerid = 3; // 当前复制组配置,可以为空 + repeated string peerid = 3; // The current replication group configuration can be empty }; enum COPYSET_OP_STATUS { COPYSET_OP_STATUS_SUCCESS = 0; - COPYSET_OP_STATUS_EXIST = 1; // copyset node 已经存在 + COPYSET_OP_STATUS_EXIST = 1; // copyset node already exists COPYSET_OP_STATUS_COPYSET_NOTEXIST = 2; COPYSET_OP_STATUS_FAILURE_UNKNOWN = 3; COPYSET_OP_STATUS_COPYSET_IS_HEALTHY = 4; @@ -48,7 +48,7 @@ enum COPYSET_OP_STATUS { message CopysetResponse { optional COPYSET_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // If not the leader, redirect to the leader. }; message Copyset { @@ -69,27 +69,27 @@ message CopysetStatusRequest { required uint32 logicPoolId = 1; required uint32 copysetId = 2; required common.Peer peer = 3; - required bool queryHash = 4; // 考虑到计算copyset hash值是一个非常耗时的操作,所以设置一个bool变量可以选择不查 + required bool queryHash = 4; // Considering that calculating the copyset hash value is a very time-consuming operation, setting a bool variable can choose not to check } -// 大部分字段只能是optional,因为copyset node可能不存在 +// Most fields can only be optional, as the copyset node may not exist message CopysetStatusResponse { - required COPYSET_OP_STATUS status = 1; // op状态 - optional uint32 state = 2; // copyset状态 + required COPYSET_OP_STATUS status = 1; // OP status + optional uint32 state = 2; // Copyset status optional common.Peer peer = 3; // peer optional common.Peer leader = 4; // leader - optional bool readOnly = 5; // 是否只读 - optional int64 term = 6; // 当前任期 - optional int64 committedIndex = 7; // 当前的committed index - optional int64 knownAppliedIndex = 8; // 当前copyset已知的applied index,当前peer可能未apply - optional int64 pendingIndex = 9; // 当前副本未决的op log index起始index - optional int64 pendingQueueSize = 10; // 当前副本未决的op log queue的长度 - optional int64 applyingIndex = 11; // 当前副本正在apply的op log index - optional int64 firstIndex = 12; // 当前副本第一条op log index(包括盘和memory) - optional int64 lastIndex = 13; // 当前副本最后一条op log index(包括盘和memory) - optional int64 diskIndex = 14; // 当前副本已经持久化的最大op log index(不包含memory) - optional uint64 epoch = 15; // 当前copyset配置版本 - optional string hash = 16; // 当前copyset的数据hash值 + optional bool readOnly = 5; // Read Only + optional int64 term = 6; // Current term of office + optional int64 committedIndex = 7; // Current committed index + optional int64 knownAppliedIndex = 8; // The current copyset has a known applied index, but the current peer may not have applied it + optional int64 pendingIndex = 9; // The open op log index starting index for the current replica + optional int64 pendingQueueSize = 10; // The length of the pending op log queue for the current replica + optional int64 applyingIndex = 11; // The current copy is applying the op log index + optional int64 firstIndex = 12; // The first op log index of the current replica (including disk and memory) + optional int64 lastIndex = 13; // The last op log index of the current replica (including disk and memory) + optional int64 diskIndex = 14; // The maximum op log index that the current replica has persisted (excluding memory) + optional uint64 epoch = 15; // Current copyset configuration version + optional string hash = 16; // The data hash value of the current copyset } service CopysetService { diff --git a/proto/heartbeat.proto b/proto/heartbeat.proto index 6b51d40277..dd600f7112 100644 --- a/proto/heartbeat.proto +++ b/proto/heartbeat.proto @@ -33,13 +33,13 @@ message CopySetInfo { required uint32 copysetId = 2; // copyset replicas, IP:PORT:ID, e.g. 127.0.0.1:8200:0 repeated common.Peer peers = 3; - // epoch, 用来标记配置变更,每变更一次,epoch会增加 + // epoch is used to mark configuration changes. Every time a change is made, epoch will increase required uint64 epoch = 4; - // 该复制组的leader + // The leader of this replication group required common.Peer leaderPeer = 5; - // 配置变更相关信息 + // Configuration change related information optional ConfigChangeInfo configChangeInfo = 6; - // copyset的性能信息 + // Performance information of copyset optional CopysetStatistics stats = 7; // whether the current copyset is on scaning optional bool scaning = 8; @@ -51,11 +51,11 @@ message CopySetInfo { message ConfigChangeInfo { required common.Peer peer = 1; - // 配置变更的类型 + // Types of configuration changes required ConfigChangeType type = 2; - // 配置变更是否成功 + // Whether the configuration change was successful required bool finished = 3; - // 变更的error信息 + // Changed error information optional CandidateError err = 4; }; @@ -81,13 +81,13 @@ message ChunkServerStatisticInfo { required uint32 writeRate = 2; required uint32 readIOPS = 3; required uint32 writeIOPS = 4; - // 已使用的chunk占用的磁盘空间 + // Disk space occupied by used chunks required uint64 chunkSizeUsedBytes = 5; - // chunkfilepool中未使用的chunk占用的磁盘空间 + // Disk space occupied by unused chunks in chunkfilepool required uint64 chunkSizeLeftBytes = 6; - // 回收站中chunk占用的磁盘空间 + // Disk space occupied by chunks in the recycle bin required uint64 chunkSizeTrashedBytes = 7; - // chunkfilepool的大小 + // The size of chunkfilepool optional uint64 chunkFilepoolSize = 8; // percentage of chunkfilepool formatting optional uint32 chunkFilepoolFormatPercent = 9; @@ -102,27 +102,27 @@ message ChunkServerHeartbeatRequest { required DiskState diskState = 6; required uint64 diskCapacity = 7; required uint64 diskUsed = 8; - // 返回该chunk上所有copyset的信息 + // Returns information about all copysets on this chunk repeated CopySetInfo copysetInfos = 9; - // 时间窗口内该chunkserver上leader的个数 + // The number of leaders on this chunkserver within the time window required uint32 leaderCount = 10; - // 时间窗口内该chunkserver上copyset的个数 + // The number of copysets on this chunkserver within the time window required uint32 copysetCount = 11; - // chunkServer相关的统计信息 + // ChunkServer related statistical information optional ChunkServerStatisticInfo stats = 12; optional string version = 13; }; enum ConfigChangeType { - // 配置变更命令: leader转换 + // Configuration change command: leader conversion TRANSFER_LEADER = 1; - // 配置变更命令: 复制组增加一个成员 + // Configuration change command: Add a member to the replication group ADD_PEER = 2; - // 配置变更命令: 复制组删除一个成员 + // Configuration change command: Delete a member from a replication group REMOVE_PEER = 3; - // 配置变更命令: 没有配置变更 + // Configuration change command: No configuration changes NONE = 4; - // 配置变更命令:change复制组一个成员 + // Configuration change command: change a member of a replication group CHANGE_PEER = 5; // start scan on the peer START_SCAN_PEER = 6; @@ -136,40 +136,40 @@ message CopySetConf { repeated common.Peer peers = 3; required uint64 epoch = 4; optional ConfigChangeType type = 5; - // configchangeItem 是目标节点 - // 对于TRANSFER_LEADER: 表示目标节点; 对于ADD_PEER: 表示待加入节点 - // 对于REMOVE_PEER: 表示待删除节点; 对于CHANGE_PEER: 表示待加入节点 + // ConfigchangeItem is the target node + // For TRANSFER_LEADER: represents the target node; For ADD_PEER: indicates the node to be added + // For MOVE_PEER: represents the node to be deleted; For CHANGE_PEER: indicates the node to be added // SCAN_PEER: to scan the node optional common.Peer configchangeItem = 6; - // oldPeer, 这个只在ConfigChangeType=对于CHANGE_PEER的情况下会赋值, - // 表示待删除节点。 - // chunkserver收到CHANGE_PEER,根据peers,configchangeItem,oldPeer拼出新的conf + // OldPeer, this only applies to ConfigChangeType=for In the case of CHANGE_PEER, a value will be assigned, + // Represents a node to be deleted. + // Chunkserver received CHANGE_PEER, according to peers, configchangeItem, oldPeer, spell out a new conf optional common.Peer oldPeer = 7; }; enum HeartbeatStatusCode { - // 正常返回 + // Normal return hbOK = 0; - // 必要的参数为初始化 + // The necessary parameters are initialization hbParamUnInitialized = 1; - // chunkserver不在topology中 + // Chunkserver is not in topology hbChunkserverUnknown = 2; - // chunkserver状态为retired + // Chunkserver status is retired hbChunkserverRetired = 3; - // chunkserver的ip和port与topology中的不匹配 + // The IP and port of chunkserver do not match those in topology hbChunkserverIpPortNotMatch = 4; - // chunkserver的token不匹配 + // Chunkserver token mismatch hbChunkserverTokenNotMatch = 5; - // 无copyset上报 + // No copyset reported hbRequestNoCopyset = 6; - // copyset转换为topology格式失败 + // Copyset conversion to topology format failed hbAnalyseCopysetError = 7; } message ChunkServerHeartbeatResponse { - // 返回需要进行变更的copyset的信息 + // Returns information about the copyset that needs to be changed repeated CopySetConf needUpdateCopysets = 1; - // 错误码 + // Error code optional HeartbeatStatusCode statusCode = 2; }; diff --git a/proto/nameserver2.proto b/proto/nameserver2.proto index 85947d96ad..57b8a80c3d 100644 --- a/proto/nameserver2.proto +++ b/proto/nameserver2.proto @@ -31,18 +31,18 @@ enum FileType { }; enum FileStatus { - // 文件创建完成 + // File creation completed kFileCreated = 0; - // 文件删除中 + // Deleting files kFileDeleting = 1; - // 文件正在克隆 + // File is being cloned kFileCloning = 2; - // 文件元数据安装完毕 + // File metadata installation completed kFileCloneMetaInstalled = 3; - // 文件克隆完成 + // File cloning completed kFileCloned = 4; - // 文件正在被克隆 + // The file is being cloned kFileBeingCloned = 5; } @@ -78,15 +78,15 @@ message FileInfo { optional uint64 ctime = 9; optional uint64 seqNum = 10; optional FileStatus fileStatus = 11; - //用于文件转移到回收站的情况下恢复场景下的使用, - //RecycleBin(回收站)目录下使用/其他场景下不使用 + // Used to restore usage in scenarios where files are transferred to the recycle bin, + // Used in the RecycleBin directory/not used in other scenarios optional string originalFullPathName = 12; - // cloneSource 当前用于存放克隆源(当前主要用于curvefs) - // 后期可以考虑存放 s3相关信息 + // CloneSource is currently used to store clone sources (currently mainly used for curvefs) + //Later on, we can consider storing s3 related information optional string cloneSource = 13; - // cloneLength 克隆源文件的长度,用于clone过程中进行extent + // CloneLength The length of the clone source file used for extension during the clone process optional uint64 cloneLength = 14; optional uint64 stripeUnit = 15; optional uint64 stripeCount = 16; @@ -99,68 +99,68 @@ message FileInfo { // status code enum StatusCode { - // 执行成功 + // Execution successful kOK = 0; - // 文件已存在 + // File already exists kFileExists = 101; - // 文件不存在 + // File does not exist kFileNotExists = 102; - // 非目录类型 + // Non directory type kNotDirectory = 103; - // 传入参数错误 + // Incoming parameter error kParaError = 104; - // 缩小文件,目前不支持缩小文件 + // Shrinking files, currently not supported kShrinkBiggerFile = 105; - // 扩容单位错误,非segment size整数倍 + // Expansion unit error, not an integer multiple of segment size kExtentUnitError = 106; - // segment未分配 + // Segment not allocated kSegmentNotAllocated = 107; - // segment分配失败 + // Segment allocation failed kSegmentAllocateError = 108; - // 目录不存在 + // Directory does not exist kDirNotExist = 109; - // 功能不支持 + // Function not supported kNotSupported = 110; - // owner认证失败 + // Owner authentication failed kOwnerAuthFail = 111; - // 目录非空 + // Directory is not empty kDirNotEmpty = 112; - // 文件已处于快照中 + // The file is already in a snapshot kFileUnderSnapShot = 120; - // 文件不在快照中 + // The file is not in the snapshot kFileNotUnderSnapShot = 121; - // 快照删除中 + // Snapshot deletion in progress kSnapshotDeleting = 122; - // 快照文件不存在 + // The snapshot file does not exist kSnapshotFileNotExists = 123; - // 快照文件删除失败 + // Snapshot file deletion failed kSnapshotFileDeleteError = 124; - // session不存在 + // Session does not exist kSessionNotExist = 125; - // 文件已被占用 + // The file is already in use kFileOccupied = 126; kCloneFileNameIllegal = 127; kCloneStatusNotMatch = 128; - // 文件删除失败 + // File deletion failed kCommonFileDeleteError = 129; - // 文件id不匹配 + // File ID mismatch kFileIdNotMatch = 130; - // 文件在删除中 + // The file is being deleted kFileUnderDeleting = 131; - // 文件长度不符合要求 + // The file length does not meet the requirements kFileLengthNotSupported = 132; - // 文件正在被克隆 + // The file is being cloned kDeleteFileBeingCloned = 133; - // client版本不匹配 + // Client version mismatch kClientVersionNotMatch = 134; - // snapshot功能禁用中 + // The snapshot function is disabled kSnapshotFrozen = 135; - // 快照克隆服务连不上 + // The snapshot clone service cannot be connected kSnapshotCloneConnectFail = 136; - // 快照克隆服务未初始化 + // The snapshot clone service is not initialized kSnapshotCloneServerNotInit = 137; // recover file status is CloneMetaInstalled kRecoverFileCloneMetaInstalled = 138; @@ -170,9 +170,9 @@ enum StatusCode { kEpochTooOld = 140; // poolset doesn't exist kPoolsetNotExist = 141; - // 元数据存储错误 + // Metadata storage error kStorageError = 501; - // 内部错误 + // Internal error KInternalError = 502; }; @@ -311,20 +311,20 @@ message ExtendFileResponse { } message ChangeOwnerRequest { - // 需要变更owner的文件的fileName + // Need to change the fileName of the owner's file required string fileName = 1; - // 希望文件owner变更后的新的owner + // Hope the new owner after the file owner changes required string newOwner = 2; - // ChangerOwner接口只能通过root权限进行调用,需要传入root权限的owner + // The ChangerOwner interface can only be called with root permission, and an owner with root permission needs to be passed in required string rootOwner = 3; - // 对root身份进行校验的的signature + // The signature for verifying the root identity required string signature = 4; - // 用来在mds端重新计算signature + // Used to recalculate the signature on the mds side required uint64 date = 5; } -// 返回ChangeOwner的执行结果,成功返回statusCode::kOK -// 失败可能返回kFileNotExists、kOwnerAuthFail、kFileOccupied、kStorageError等,可能返回的错误码将来继续补充 +// Returns the execution result of ChangeOwner, successfully returning statusCode::kOK +// Failure may return kFileNotExists, kOwnerAuthFail, kFileOccupied, kStorageError, etc. The error codes that may be returned will continue to be supplemented in the future message ChangeOwnerResponse { required StatusCode statusCode = 1; } @@ -395,8 +395,8 @@ message CheckSnapShotStatusRequest { required uint64 date = 5; } -// statusCode为kOK时,fileStatus和progress才会赋值 -// 只有fileStatus是kFileDeleting时,progress表示快照文件删除进度,否则progress返回0 +// FileStatus and progress are only assigned values when statusCode is kOK +// Only when fileStatus is kFileDeleting, progress represents the progress of snapshot file deletion, otherwise progress returns 0 message CheckSnapShotStatusResponse { required StatusCode statusCode = 1; optional FileStatus fileStatus = 2; @@ -431,7 +431,7 @@ message OpenFileRequest { optional string clientVersion = 5; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -456,7 +456,7 @@ message CloseFileRequest { optional uint32 clientPort = 7; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -478,7 +478,7 @@ message ReFreshSessionRequest { optional uint32 clientPort = 8; } -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -531,9 +531,9 @@ message GetAllocatedSizeRequest { message GetAllocatedSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的分配大小 + // Allocation size of files or directories optional uint64 allocatedSize = 2; - // key是逻辑池id,value是分配大小 + // Key is the logical pool id, and value is the allocation size map allocSizeMap = 3; } @@ -543,7 +543,7 @@ message GetFileSizeRequest { message GetFileSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的file length + // The file length of a file or directory optional uint64 fileSize = 2; } diff --git a/proto/schedule.proto b/proto/schedule.proto index 2dde693556..9c92bb4ef5 100644 --- a/proto/schedule.proto +++ b/proto/schedule.proto @@ -34,7 +34,7 @@ message RapidLeaderScheduleResponse { required sint32 statusCode = 1; } -// 如果chunkServerID为空,则返回所有chunkserver的恢复状态 +// If chunkServerID is empty, return the recovery status of all chunkservers message QueryChunkServerRecoverStatusRequest { repeated uint32 chunkServerID = 1; } diff --git a/proto/topology.proto b/proto/topology.proto index 6e88d4e102..f9864de5e9 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/robot/Resources/keywords/deploy.py b/robot/Resources/keywords/deploy.py index 93d7926a45..0a556c7021 100644 --- a/robot/Resources/keywords/deploy.py +++ b/robot/Resources/keywords/deploy.py @@ -9,6 +9,7 @@ import random import time + def add_config(): etcd = [] for host in config.etcd_list: @@ -16,168 +17,183 @@ def add_config(): etcd_addrs = ",".join(etcd) # add mds config for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/mds.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf"%host + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s:6666/g' mds.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change offline time - ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf"%(config.offline_timeout*1000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change offline time + ori_cmd = R"sed -i 's/mds.heartbeat.offlinetimeoutMs=.*/mds.heartbeat.offlinetimeoutMs=%d/g' mds.conf" % ( + config.offline_timeout*1000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change clean_follower_afterMs time - ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf"%(300000) + assert rs[3] == 0, "change host %s mds config fail" % host + # change clean_follower_afterMs time + ori_cmd = R"sed -i 's/mds.heartbeat.clean_follower_afterMs=.*/mds.heartbeat.clean_follower_afterMs=%d/g' mds.conf" % ( + 300000) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #change scheduler time + assert rs[3] == 0, "change host %s mds config fail" % host + # change scheduler time ori_cmd = R"sed -i 's/mds.copyset.scheduler.intervalSec=.*/mds.copyset.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = R"sed -i 's/mds.replica.scheduler.intervalSec=.*/mds.replica.scheduler.intervalSec=0/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # ori_cmd = R"sed -i 's/mds.recover.scheduler.intervalSec=.*/mds.recover.scheduler.intervalSec=0/g' mds.conf" # rs = shell_operator.ssh_exec(ssh, ori_cmd) # assert rs[3] == 0,"change host %s mds config fail"%host ori_cmd = R"sed -i 's/mds.leader.scheduler.intervalSec=.*/mds.leader.scheduler.intervalSec=5/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host # change topology update time ori_cmd = R"sed -i 's/mds.topology.TopologyUpdateToRepoSec=.*/mds.topology.TopologyUpdateToRepoSec=1/g' mds.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add mysql conf - ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s mds config fail" % host + # add mysql conf + ori_cmd = R"sed -i 's/mds.DbUrl=localhost/mds.DbUrl=%s/g' mds.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host - #add etcd conf - ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s mds config fail" % host + # add etcd conf + ori_cmd = R"sed -i 's/mds.etcd.endpoint=127.0.0.1:2379/mds.etcd.endpoint=%s/g' mds.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s mds config fail"%host + assert rs[3] == 0, "change host %s mds config fail" % host ori_cmd = "sudo mv mds.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s mds conf fail"%host + assert rs[3] == 0, "mv %s mds conf fail" % host # add client config mds_addrs = [] for host in config.mds_list: mds_addrs.append(host + ":6666") addrs = ",".join(mds_addrs) for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s client config fail"%host -#将client.conf配置成py_client.conf(主机用),方便client复现死锁问题 + assert rs[3] == 0, "change host %s client config fail" % host + # Configure client.conf to py_client.conf(for the host) to facilitate client replication of deadlock issues ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /etc/curve/client.conf /etc/curve/py_client.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host # add chunkserver config addrs = ",".join(mds_addrs) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm *.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/chunkserver.conf.example %s:~/chunkserver.conf" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - #change global ip - ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf"%host + # change global ip + ori_cmd = R"sed -i 's/global.ip=127.0.0.1/global.ip=%s/g' chunkserver.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change global subnet - subnet=host+"/24" - ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf"%subnet + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change global subnet + subnet = host+"/24" + ori_cmd = R"sed -i 's#global.subnet=127.0.0.0/24#global.subnet=%s#g' chunkserver.conf" % subnet rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #change mds ip - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf"%(addrs) + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # change mds ip + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' chunkserver.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/chunkserver.snapshot_throttle_throughput_bytes=.*/chunkserver.snapshot_throttle_throughput_bytes=104857600/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.expire_afterSec=.*/trash.expire_afterSec=0/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - + assert rs[3] == 0, "change host %s chunkserver config fail" % host + ori_cmd = R"sed -i 's/trash.scan_periodSec=.*/trash.scan_periodSec=10/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host - #open use snapshot + assert rs[3] == 0, "change host %s chunkserver config fail" % host + # open use snapshot ori_cmd = R"sed -i 's/clone.disable_curve_client=true/clone.disable_curve_client=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's/clone.disable_s3_adapter=true/clone.disable_s3_adapter=false/g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#curve.config_path=conf/cs_client.conf#curve.config_path=/etc/curve/conf/cs_client.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = R"sed -i 's#s3.config_path=conf/s3.conf#s3.config_path=/etc/curve/conf/s3.conf#g' chunkserver.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s chunkserver config fail"%host + assert rs[3] == 0, "change host %s chunkserver config fail" % host ori_cmd = "sudo mv chunkserver.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s chunkserver conf fail"%host + assert rs[3] == 0, "mv %s chunkserver conf fail" % host # add s3 and client conf\cs_client conf client_host = random.choice(config.client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,client_host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/cs_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf"%(addrs) + ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' cs_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s cs_client config fail"%host + assert rs[3] == 0, "change host %s cs_client config fail" % host ori_cmd = "sudo mv client.conf /etc/curve/conf && sudo mv cs_client.conf /etc/curve/conf/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s client conf fail"%host + assert rs[3] == 0, "mv %s client conf fail" % host for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/"%\ - (config.pravie_key_path,host) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 client.conf conf/snapshot_clone_server.conf conf/snap_client.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) ori_cmd = "sed -i \"s/client.config_path=\S*/client.config_path=\/etc\/curve\/snap_client.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改snapshot_clone_server.conf etcd配置 - ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf"%(etcd_addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modify snapshot_clone_server.conf etcd configuration + ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf" % ( + etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改数据库配置项 - ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf"%(config.abnormal_db_host) + assert rs[3] == 0, "change host %s snapshot config fail" % host + # Modifying Database Configuration Items + ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf" % ( + config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot clone server config fail"%host + assert rs[3] == 0, "change host %s snapshot clone server config fail" % host ori_cmd = "sed -i \"s/s3.config_path=\S*/s3.config_path=\/etc\/curve\/s3.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host - ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host + ori_cmd = "sed -i \"s/server.address=\S*/server.address=%s:5556/g\" snapshot_clone_server.conf" % host rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host -#change snap_client.conf - ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf"%(addrs) + assert rs[3] == 0, "change host %s snapshot config fail" % host +# change snap_client.conf + ori_cmd = "sed -i \"s/mds.listen.addr=\S*/mds.listen.addr=%s/g\" snap_client.conf" % ( + addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s snapshot config fail"%host + assert rs[3] == 0, "change host %s snapshot config fail" % host ori_cmd = "sudo mv snapshot_clone_server.conf /etc/curve/ && sudo mv snap_client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s snapshot_clone_server conf fail"%host + assert rs[3] == 0, "mv %s snapshot_clone_server conf fail" % host ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -187,29 +203,32 @@ def add_config(): snap_addrs_list.append(host + ":5556") snap_addrs = ",".join(snap_addrs_list) for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/"%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 conf/tools.conf %s:~/" %\ + (config.pravie_key_path, host) shell_operator.run_exec2(cmd) - ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf"%addrs + ori_cmd = R"sed -i 's/mdsAddr=127.0.0.1:6666/mdsAddr=%s/g' tools.conf" % addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf"%etcd_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/etcdAddr=127.0.0.1:2379/etcdAddr=%s/g' tools.conf" % etcd_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host - ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf"%snap_addrs + assert rs[3] == 0, "change host %s tools config fail" % host + ori_cmd = R"sed -i 's/snapshotCloneAddr=127.0.0.1:5555/snapshotCloneAddr=%s/g' tools.conf" % snap_addrs rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"change host %s tools config fail"%host + assert rs[3] == 0, "change host %s tools config fail" % host ori_cmd = "sudo mv tools.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mv %s tools conf fail"%host + assert rs[3] == 0, "mv %s tools conf fail" % host + def clean_env(): - host_list = config.client_list + config.mds_list + config.chunkserver_list + host_list = config.client_list + config.mds_list + config.chunkserver_list host_list = list(set(host_list)) for host in host_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd1 = "sudo tc qdisc del dev bond0.106 root" shell_operator.ssh_exec(ssh, ori_cmd1) ori_cmd2 = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" @@ -217,33 +236,42 @@ def clean_env(): ori_cmd3 = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd3) + def destroy_mds(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_etcd(): for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def destroy_snapshotclone_server(): for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep |grep -v sudo | grep snapshotcloneserver | awk '{print $2}' | sudo xargs kill -9" shell_operator.ssh_exec(ssh, ori_cmd) + def stop_nebd(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep nebd | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: logger.debug("snapshotcloneserver not up") continue - + + def initial_chunkserver(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -257,22 +285,24 @@ def initial_chunkserver(host): assert rs[1] == [], "kill chunkserver fail" ori_cmd = "sudo find /data/ -name chunkserver.dat -exec rm -rf {} \;" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("delete dat ,return is %s"%rs[1]) - assert rs[3] == 0,"rm %s dat fail"%host + logger.debug("delete dat ,return is %s" % rs[1]) + assert rs[3] == 0, "rm %s dat fail" % host ori_cmd = "sh recycle_chunks.sh -d /data -chunks chunkfilepool -wals chunkfilepool" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("recycle chunk ,return is %s"%rs[1]) - assert rs[3] == 0,"recycle %s chunk fail"%host + logger.debug("recycle chunk ,return is %s" % rs[1]) + assert rs[3] == 0, "recycle %s chunk fail" % host ssh.close() except Exception as e: logger.error("%s" % e) raise return 0 + def recycle_chunk(): cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/clean_curve.yml --tags chunkserver" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible clean chunk fail" + assert ret == 0, "ansible clean chunk fail" + def drop_all_chunkserver_dat(): thread = [] @@ -286,34 +316,39 @@ def drop_all_chunkserver_dat(): logger.debug("drop cs dat get result is %d" % t.get_result()) assert t.get_result() == 0 + def destroy_test_env(): try: cmd = "cp robot/init_env.sh . && bash init_env.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"init env fail" + assert ret == 0, "init env fail" host = config.client_list[0] except Exception: logger.error("init env fail.") raise + def change_cfg(): try: - cmd = "bash %s/change_cfg.sh"%config.fs_cfg_path + cmd = "bash %s/change_cfg.sh" % config.fs_cfg_path ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"change fs cfg fail" + assert ret == 0, "change fs cfg fail" except Exception: logger.error("change fs cfg fail.") raise + def destroy_curvefs(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) shell_operator.ssh_exec(ssh, cmd) cmd = "echo 'yes' | /home/nbs/.curveadm/bin/curveadm stop" ret = shell_operator.run_exec(cmd) @@ -323,186 +358,218 @@ def destroy_curvefs(): logger.error("destroy curvefs fail.") raise + def use_ansible_deploy(): try: cmd = "cp robot/ansible_deploy.sh . && bash ansible_deploy.sh" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible deploy fail" + assert ret == 0, "ansible deploy fail" host = config.client_list[0] - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ."%\ - (config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s:/etc/curve/client.conf ." %\ + (config.pravie_key_path, host) ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"cp client.conf fail" + assert ret == 0, "cp client.conf fail" except Exception: logger.error("deploy curve fail.") raise + def deploy_all_servers(): try: cmd = "/home/nbs/.curveadm/bin/curveadm cluster checkout citest" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"checkout fail" + assert ret == 0, "checkout fail" cmd = "/home/nbs/.curveadm/bin/curveadm deploy" ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"deploy mds\etcd\metaserver fail" + assert ret == 0, "deploy mds\etcd\metaserver fail" except Exception: logger.error("deploy curvefs fail.") raise -def remk_test_dir(): + +def remk_test_dir(): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) for test_dir in config.fs_mount_dir: - ori_cmd = "rm -rf %s/%s"%(config.fs_mount_path,test_dir) + ori_cmd = "rm -rf %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm test dir %s fail,error is %s"%(test_dir,rs[1]) - ori_cmd = "mkdir %s/%s"%(config.fs_mount_path,test_dir) + assert rs[3] == 0, "rm test dir %s fail,error is %s" % ( + test_dir, rs[1]) + ori_cmd = "mkdir %s/%s" % (config.fs_mount_path, test_dir) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mkdir %s fail,error is %s"%(test_dir,rs[1]) + assert rs[3] == 0, "mkdir %s fail,error is %s" % (test_dir, rs[1]) except Exception: logger.error(" remk test dir fail.") raise -def mount_test_dir(mountpoint="",mountfile=""): + +def mount_test_dir(mountpoint="", mountfile=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) - else: + --fstype volume" % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) + else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountpoint,mountpoint) + " % (mountpoint, config.fs_mount_path, mountpoint, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: if mountfile == "": mountfile = mountpoint if config.fs_use_curvebs: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-bs-%s.yaml \ - --fstype volume"%(mountpoint,config.fs_mount_path,mountfile,mountfile) + --fstype volume" % (mountpoint, config.fs_mount_path, mountfile, mountfile) else: cmd = "/home/nbs/.curveadm/bin/curveadm mount %s %s%s -c client-%s.yaml\ - "%(mountpoint,config.fs_mount_path,mountfile,mountfile) + " % (mountpoint, config.fs_mount_path, mountfile, mountfile) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"mount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "mount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("mount dir fail.") raise + def umount_test_dir(mountpoint=""): try: test_client = config.fs_test_client[0] - ssh = shell_operator.create_ssh_connect(test_client, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + test_client, 1046, config.abnormal_user) if mountpoint == "": for mountpoint in config.fs_mount_dir: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) else: - cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s"%(config.fs_mount_path,mountpoint) + cmd = "/home/nbs/.curveadm/bin/curveadm umount %s%s" % ( + config.fs_mount_path, mountpoint) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"umount %s dir fail,error is %s"%(mountpoint,rs[2]) + assert rs[3] == 0, "umount %s dir fail,error is %s" % ( + mountpoint, rs[2]) except Exception: logger.error("umount dir fail.") raise + def install_deb(): try: -# mkdeb_url = config.curve_workspace + "mk-deb.sh" -# exec_mkdeb = "bash %s"%mkdeb_url -# shell_operator.run_exec2(exec_mkdeb) - cmd = "ls %scurve-mds*.deb"%config.curve_workspace + # mkdeb_url = config.curve_workspace + "mk-deb.sh" + # exec_mkdeb = "bash %s"%mkdeb_url + # shell_operator.run_exec2(exec_mkdeb) + cmd = "ls %scurve-mds*.deb" % config.curve_workspace mds_deb = shell_operator.run_exec2(cmd) version = mds_deb.split('+')[1] for host in config.mds_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite *%s* aws-sdk_1.0_amd64.deb" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"mds install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "mds install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) - + for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*"%version + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-sdk*%s*" % version rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"sdk install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "sdk install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) for host in config.chunkserver_list: cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %s*.deb %s:~/" %\ - (config.pravie_key_path,config.curve_workspace,host) + (config.pravie_key_path, config.curve_workspace, host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb"%(version,version) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite curve-chunkserver*%s* curve-tools*%s* aws-sdk_1.0_amd64.deb" % ( + version, version) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0, "chunkserver install deb fail,error is %s %s"%(rs[1],rs[2]) - rm_deb = "rm *%s*"%version + assert rs[3] == 0, "chunkserver install deb fail,error is %s %s" % ( + rs[1], rs[2]) + rm_deb = "rm *%s*" % version shell_operator.ssh_exec(ssh, rm_deb) except Exception: logger.error("install deb fail.") raise + def start_nebd(): - cmd = "ls nebd/nebd*.deb" - nebd_deb = shell_operator.run_exec2(cmd) - version = nebd_deb.split('+')[1] - assert nebd_deb != "","can not get nebd deb" - for host in config.client_list: - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/"%\ - (config.pravie_key_path,config.curve_workspace,host) - shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s"%version - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"install nebd deb fail,error is %s"%rs - rm_deb = "rm nebd_*%s"%version - shell_operator.ssh_exec(ssh, rm_deb) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/"%\ - (config.pravie_key_path,host) - shell_operator.run_exec2(cmd) - ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp %s nebd conf fail"%host - ori_cmd = "sudo nebd-daemon start" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if rs[3] != 0: - logger.debug("nebd start fail,error is %s"%rs[1]) - ori_cmd == "sudo nebd-daemon restart" - rs2 = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs2[3] == 0,"restart nebd fail, return is %s"%rs2[1] - time.sleep(5) - ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] != "","start nebd fail!" + cmd = "ls nebd/nebd*.deb" + nebd_deb = shell_operator.run_exec2(cmd) + version = nebd_deb.split('+')[1] + assert nebd_deb != "", "can not get nebd deb" + for host in config.client_list: + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 %snebd/*.deb %s:~/" %\ + (config.pravie_key_path, config.curve_workspace, host) + shell_operator.run_exec2(cmd) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo dpkg -i --force-overwrite nebd_*%s" % version + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "install nebd deb fail,error is %s" % rs + rm_deb = "rm nebd_*%s" % version + shell_operator.ssh_exec(ssh, rm_deb) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 nebd/etc/nebd/*.conf %s:~/" %\ + (config.pravie_key_path, host) + shell_operator.run_exec2(cmd) + ori_cmd = "sudo cp nebd-client.conf nebd-server.conf /etc/nebd/" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cp %s nebd conf fail" % host + ori_cmd = "sudo nebd-daemon start" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[3] != 0: + logger.debug("nebd start fail,error is %s" % rs[1]) + ori_cmd == "sudo nebd-daemon restart" + rs2 = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs2[3] == 0, "restart nebd fail, return is %s" % rs2[1] + time.sleep(5) + ori_cmd = "ps -ef|grep nebd-server | grep -v daemon |grep -v grep |awk '{print $2}'" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] != "", "start nebd fail!" + def add_config_file(): for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo cp -r /etc/curve-bak /etc/curve" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"add host %s config fail,error is %s"%(host,rs[2]) + assert rs[3] == 0, "add host %s config fail,error is %s" % ( + host, rs[2]) + def start_abnormal_test_services(): try: for host in config.etcd_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo rm -rf /etcd/default.etcd" shell_operator.ssh_exec(ssh, ori_cmd) etcd_cmd = "cd etcdrun && sudo nohup ./run.sh new &" @@ -510,52 +577,59 @@ def start_abnormal_test_services(): ori_cmd = "ps -ef|grep -v grep | grep -w etcd | awk '{print $2}'" time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("etcd pid is %s"%rs[1]) + logger.debug("etcd pid is %s" % rs[1]) assert rs[1] != [], "up etcd fail" for host in config.mds_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) mds_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, mds_cmd) time.sleep(1) ori_cmd = "ps -ef|grep -v grep | grep -v curve-mds.log | grep -v sudo | grep -w curve-mds | awk '{print $2}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[1] != [], "up mds fail" - logger.debug("mds pid is %s"%rs[1]) + logger.debug("mds pid is %s" % rs[1]) for host in config.snap_server_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, ori_cmd) except Exception: logger.error("up servers fail.") raise + def create_pool(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) mds = [] mds_addrs = "" for mds_host in config.mds_list: mds.append(mds_host + ":6666") mds_addrs = ",".join(mds) physical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_physicalpool"%(mds_addrs) + -op=create_physicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, physical_pool) if rs[3] == 0: logger.info("create physical pool sucess") else: - assert False,"create physical fail ,msg is %s"%rs[2] + assert False, "create physical fail ,msg is %s" % rs[2] for host in config.chunkserver_list: - ssh2 = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh2 = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo nohup ./chunkserver_ctl.sh start all &" shell_operator.ssh_background_exec2(ssh2, ori_cmd) time.sleep(60) logical_pool = "curve-tool -cluster_map=topo.json -mds_addr=%s\ - -op=create_logicalpool"%(mds_addrs) + -op=create_logicalpool" % (mds_addrs) rs = shell_operator.ssh_exec(ssh, logical_pool) time.sleep(180) + def restart_cinder_server(): for client_host in config.client_list: - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ori_cmd = "sudo cp /usr/curvefs/curvefs.py /srv/stack/cinder/lib/python2.7/site-packages/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /usr/curvefs/_curvefs.so /srv/stack/cinder/lib/python2.7/site-packages/" @@ -563,21 +637,22 @@ def restart_cinder_server(): time.sleep(2) ori_cmd = "sudo service cinder-volume restart" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[1] == [],"rs is %s"%rs + assert rs[1] == [], "rs is %s" % rs + def wait_cinder_server_up(): cinder_host = config.nova_host - ssh = shell_operator.create_ssh_connect(cinder_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + cinder_host, 1046, config.abnormal_user) ori_cmd = R"source OPENRC && cinder get-host-list --all-services | grep pool1 | grep curve2 | awk '{print $16}'" i = 0 while i < 360: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - status = "".join(rs[1]).strip() - if status == "up": - break - i = i + 5 - time.sleep(5) - assert status == "up","up curve2 cinder service fail,please check" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + status = "".join(rs[1]).strip() + if status == "up": + break + i = i + 5 + time.sleep(5) + assert status == "up", "up curve2 cinder service fail,please check" if status == "up": - time.sleep(10) - + time.sleep(10) diff --git a/robot/Resources/keywords/fault_inject.py b/robot/Resources/keywords/fault_inject.py index 48e95382c4..507b5af8cf 100644 --- a/robot/Resources/keywords/fault_inject.py +++ b/robot/Resources/keywords/fault_inject.py @@ -15,6 +15,7 @@ import string import types + def block_ip(chain): ori_cmd = "iptables -I %s 2>&1" % chain cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -30,6 +31,7 @@ def cancel_block_ip(chain): print cmd # rc = shell_operator.run_exec(cmd) + def net_work_delay(dev, time): ori_cmd = "tc qdisc add dev %s root netem delay %dms 2>&1" % (dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -37,37 +39,45 @@ def net_work_delay(dev, time): print cmd # rc = shell_operator.run_exec(cmd) -def package_loss_all(ssh,dev, percent): - ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % (dev, percent) + +def package_loss_all(ssh, dev, percent): + ori_cmd = "sudo tc qdisc add dev %s root netem loss %d%% 2>&1" % ( + dev, percent) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def package_delay_all(ssh, dev,ms): + +def package_delay_all(ssh, dev, ms): ori_cmd = "sudo tc qdisc add dev %s root netem delay %dms" % (dev, ms) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def cancel_tc_inject(ssh,dev): + +def cancel_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc del dev %s root" % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) -def show_tc_inject(ssh,dev): + +def show_tc_inject(ssh, dev): ori_cmd = "sudo tc qdisc show dev %s " % dev rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] # rc = shell_operator.run_exec(cmd) + def package_reorder_all(dev, ms, percent1, percent2): - ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % (dev, ms, percent1, percent2) + ori_cmd = "tc qdisc change dev %s root netem delay %s reorder %d%% %d%%" % ( + dev, ms, percent1, percent2) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def package_duplicate_all(dev, percent): ori_cmd = "tc qdisc add dev %s root netem duplicate %d%%" % (dev, percent) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, @@ -77,7 +87,8 @@ def package_duplicate_all(dev, percent): def eth_down_for_a_monent(dev, time): - ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % (dev, time) + ori_cmd = "ip link set %s down 2>&1 && sleep %d 2>&1 && ip link set %s up 2>&1" % ( + dev, time) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd @@ -86,110 +97,125 @@ def eth_down_for_a_monent(dev, time): def add_rate_limit(dev, downlink, uplink): ori_cmd = "wget -N -P /tmp nos.netease.com/nfit-software/taaslimit.sh 2>&1 && chmod a+rx /tmp/taaslimit.sh 2>&1 " \ - "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % (dev, downlink, uplink) + "&& mv /tmp/taaslimit.sh /sbin/taaslimit 2>&1 && chown root:root /sbin/taaslimit && taaslimit %s %d %d 2>&1" % ( + dev, downlink, uplink) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) + def del_rate_limit(dev): - ori_cmd = "taaslimit clear %s 2>&1" %(dev) + ori_cmd = "taaslimit clear %s 2>&1" % (dev) cmd = shell_operator.gen_remote_cmd(config.ssh_user, config.ssh_hostname, 1046, config.ssh_key, ori_cmd, sudo_flag=True, sudo_way="") print cmd # rc = shell_operator.run_exec(cmd) -def inject_cpu_stress(ssh,stress=50): - cmd = "sudo nohup python cpu_stress.py %d &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) + +def inject_cpu_stress(ssh, stress=50): + cmd = "sudo nohup python cpu_stress.py %d &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up cpu stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up cpu stress fail" + def del_cpu_stress(ssh): cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no cpu stress running") return cmd = "ps -ef|grep -v grep | grep cpu_stress.py | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop cpu stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop cpu stess fail" + -def inject_mem_stress(ssh,stress): - cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &"%stress - shell_operator.ssh_background_exec2(ssh,cmd) +def inject_mem_stress(ssh, stress): + cmd = "sudo nohup /usr/local/stress/memtester/bin/memtester %dG > memtest.log &" % stress + shell_operator.ssh_background_exec2(ssh, cmd) time.sleep(5) cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[1] != [],"up memster stress fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[1] != [], "up memster stress fail" + def del_mem_stress(ssh): cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'" - rs = shell_operator.ssh_exec(ssh,cmd) + rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] == []: logger.info("no memtester stress running") return cmd = "ps -ef|grep -v grep | grep memtester | awk '{print $2}'| sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,cmd) - assert rs[3] == 0,"stop memtester stess fail" + rs = shell_operator.ssh_exec(ssh, cmd) + assert rs[3] == 0, "stop memtester stess fail" -def inject_clock_offset(ssh,time): + +def inject_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"+%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"inject clock offet fail,return is %s"%rs[2] + assert rs[3] == 0, "inject clock offet fail,return is %s" % rs[2] + -def del_clock_offset(ssh,time): +def del_clock_offset(ssh, time): cmd = "sudo date -s `date -d \"-%d min\" | awk \'{print $4}\'`" % time rs = shell_operator.ssh_exec(ssh, cmd) assert rs[3] == 0, "del clock offet fail,return is %s" % rs[2] + def listen_network_stress(ip): ori_cmd = "iperf -s" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) # assert rs[3] == 0,"up iperf fail: %s"%rs[1] + def inject_network_stress(ip): - ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001"%ip + ori_cmd = "iperf -c %s -b 20000M -t 10 -p 5001" % ip ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"inject iperf fail: %s"%rs[2] + assert rs[3] == 0, "inject iperf fail: %s" % rs[2] + def stop_network_stress(ip): ori_cmd = "ps -ef|grep iperf |grep -v grep| awk '{print $2}' | sudo xargs kill -9" ssh = shell_operator.create_ssh_connect(ip, 1046, config.abnormal_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"stop iperf fail: %s"%rs[2] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "stop iperf fail: %s" % rs[2] ori_cmd = "ps -ef|grep iperf |grep -v grep" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[1] == [],"stop iperf fail,pid %s"%rs[1] + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[1] == [], "stop iperf fail,pid %s" % rs[1] + def ipmitool_cycle_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power cycle" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"cycle restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "cycle restart host fail,return is %s" % rs + def ipmitool_reset_restart_host(ssh): ori_cmd = "sudo ipmitool chassis power reset" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reset restart host fail,return is %s"%rs + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reset restart host fail,return is %s" % rs -def get_hostip_dev(ssh,hostip): - ori_cmd = "ip a|grep %s | awk '{print $7}'"%hostip + +def get_hostip_dev(ssh, hostip): + ori_cmd = "ip a|grep %s | awk '{print $7}'" % hostip rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"error is %s"%rs[2] + assert rs[3] == 0, "error is %s" % rs[2] return "".join(rs[1]).strip() + def clear_RecycleBin(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool clean-recycle --isTest" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"clean RecyclenBin fail,msg is %s"%rs[1] + assert rs[3] == 0, "clean RecyclenBin fail,msg is %s" % rs[1] starttime = time.time() ori_cmd = "curve_ops_tool list -fileName=/RecycleBin |grep Total" while time.time() - starttime < 180: @@ -199,9 +225,10 @@ def clear_RecycleBin(): else: logger.debug("deleting") if rs[3] != 0: - logger.debug("list /RecycleBin fail,error is %s"%rs[1]) - time.sleep(3) - assert rs[3] == 0,"delete /RecycleBin fail,error is %s"%rs[1] + logger.debug("list /RecycleBin fail,error is %s" % rs[1]) + time.sleep(3) + assert rs[3] == 0, "delete /RecycleBin fail,error is %s" % rs[1] + def loop_map_unmap_file(): thread = [] @@ -209,7 +236,7 @@ def loop_map_unmap_file(): filename = "nbdthrash" + str(i) t = mythread.runThread(test_curve_stability_nbd.nbd_all, filename) thread.append(t) - logger.debug("thrash map unmap %s" %filename) + logger.debug("thrash map unmap %s" % filename) config.thrash_thread = thread for t in thread: @@ -217,29 +244,32 @@ def loop_map_unmap_file(): # logger.debug("get result is %d" % t.get_result()) # assert t.get_result() == 0 + def stop_map_unmap(): try: if config.thrash_thread == []: - assert False,"map umap not up" + assert False, "map umap not up" thread = config.thrash_thread config.thrash_map = False logger3.info("set thrash_map to false") time = 0 for t in thread: - assert t.exitcode == 0,"map/umap thread error" + assert t.exitcode == 0, "map/umap thread error" result = t.get_result() - logger.debug("thrash map/umap time is %d"%result) - assert result > 0,"map/umap thread error" + logger.debug("thrash map/umap time is %d" % result) + assert result > 0, "map/umap thread error" time = time + result - logger.info("map/umap all time is %d"%time) + logger.info("map/umap all time is %d" % time) except: - raise + raise + def stop_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "sudo supervisorctl stop all" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"stop rwio fail,rs is %s"%rs[1] + assert rs[3] == 0, "stop rwio fail,rs is %s" % rs[1] ori_cmd = "ps -ef|grep -v grep | grep randrw | awk '{print $2}'| sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "ps -ef|grep -v grep | grep -w /home/nbs/vdbench50406/profile | awk '{print $2}'| sudo xargs kill -9" @@ -247,114 +277,133 @@ def stop_rwio(): time.sleep(3) ssh.close() + def run_rwio(): - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) - ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "lsblk |grep nbd0 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd0": logger.error("map is error") - assert False,"output is %s"%output - ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" + assert False, "output is %s" % output + ori_cmd = "lsblk |grep nbd1 | awk '{print $1}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output != "nbd1": logger.error("map is error") - assert False,"output is %s"%output + assert False, "output is %s" % output ori_cmd = "sudo supervisorctl stop all && sudo supervisorctl reload" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo nohup /home/nbs/vdbench50406/vdbench -jn -f /home/nbs/vdbench50406/profile &" rs = shell_operator.ssh_background_exec2(ssh, ori_cmd) - #write 60s io + # write 60s io time.sleep(60) # assert rs[3] == 0,"start rwio fail" ssh.close() + def init_recover_disk(fio_size): - ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based"%int(fio_size) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ori_cmd = "sudo fio -name=/dev/nbd2 -direct=1 -iodepth=32 -rw=write -ioengine=libaio -bs=1024k -size=%dG -numjobs=1 -time_based" % int( + fio_size) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"write fio fail" + assert rs[3] == 0, "write fio fail" cmd = "sudo curve-nbd unmap cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap recover fail:%s"%rs[2] + assert rs[3] == 0, "unmap recover fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") config.recover_vol_md5 = md5 cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def recover_disk(): cmd = "sudo curve recover --user test --filename /recover" - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"recover file fail:%s"%rs[2] + assert rs[3] == 0, "recover file fail:%s" % rs[2] md5 = test_curve_stability_nbd.get_vol_md5("recover") - assert md5 == config.recover_vol_md5,"Data is inconsistent after translation,md5 is %s,recover md5 is %s"%(config.recover_vol_md5,md5) - + assert md5 == config.recover_vol_md5, "Data is inconsistent after translation,md5 is %s,recover md5 is %s" % ( + config.recover_vol_md5, md5) + + def get_chunkserver_list(): client_host = config.client_list[0] logger.info("|------begin get chunkserver list------|") cmd = "curve_ops_tool chunkserver-list > cs_list" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) -def get_chunkserver_id(host,cs_id): + +def get_chunkserver_id(host, cs_id): client_host = config.client_list[0] - logger.info("|------begin get chunkserver %s id %d------|"%(host,cs_id)) - cmd = "cat cs_list | grep %s |grep -w chunkserver%d"%(host,cs_id) + logger.info("|------begin get chunkserver %s id %d------|" % (host, cs_id)) + cmd = "cat cs_list | grep %s |grep -w chunkserver%d" % (host, cs_id) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: return -1 -def get_cs_copyset_num(host,cs_id): + +def get_cs_copyset_num(host, cs_id): client_host = config.client_list[0] cs_number = int(cs_id) + 8200 - cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'"%(host,cs_number) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'" % ( + host, cs_number) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) chunkserver_info = "".join(rs[1]).strip().split(',') - chunkserver_id = re.findall(r"\d+",chunkserver_info[0]) + chunkserver_id = re.findall(r"\d+", chunkserver_info[0]) if chunkserver_id != []: return int(chunkserver_id[0]) else: - return -1 + return -1 -def stop_vm(ssh,uuid): - stop_cmd = "source OPENRC && nova stop %s"%uuid + +def stop_vm(ssh, uuid): + stop_cmd = "source OPENRC && nova stop %s" % uuid rs = shell_operator.ssh_exec(ssh, stop_cmd) - assert rs[3] == 0,"stop vm fail,error is %s"%rs[2] + assert rs[3] == 0, "stop vm fail,error is %s" % rs[2] time.sleep(5) -def start_vm(ssh,uuid): - start_cmd = "source OPENRC && nova start %s"%uuid + +def start_vm(ssh, uuid): + start_cmd = "source OPENRC && nova start %s" % uuid rs = shell_operator.ssh_exec(ssh, start_cmd) - assert rs[3] == 0,"start vm fail,error is %s"%rs[2] + assert rs[3] == 0, "start vm fail,error is %s" % rs[2] -def restart_vm(ssh,uuid): - restart_cmd = "source OPENRC && nova reboot %s"%uuid + +def restart_vm(ssh, uuid): + restart_cmd = "source OPENRC && nova reboot %s" % uuid rs = shell_operator.ssh_exec(ssh, restart_cmd) - assert rs[3] == 0,"reboot vm fail,error is %s"%rs[2] + assert rs[3] == 0, "reboot vm fail,error is %s" % rs[2] + -def check_vm_status(ssh,uuid): - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%uuid +def check_vm_status(ssh, uuid): + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % uuid i = 0 while i < 180: - rs = shell_operator.ssh_exec(ssh, ori_cmd) - if "".join(rs[1]).strip() == "ACTIVE": - return True - elif "".join(rs[1]).strip() == "ERROR": - return False - else: - time.sleep(5) - i = i + 5 - assert False,"start vm fail" - -def check_vm_vd(ip,nova_ssh,uuid): + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if "".join(rs[1]).strip() == "ACTIVE": + return True + elif "".join(rs[1]).strip() == "ERROR": + return False + else: + time.sleep(5) + i = i + 5 + assert False, "start vm fail" + + +def check_vm_vd(ip, nova_ssh, uuid): i = 0 while i < 300: try: @@ -363,19 +412,21 @@ def check_vm_vd(ip,nova_ssh,uuid): rs = shell_operator.ssh_exec(ssh, ori_cmd) output = "".join(rs[1]).strip() if output == "vdc": - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - shell_operator.ssh_exec(nova_ssh,ori_cmd) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + shell_operator.ssh_exec(nova_ssh, ori_cmd) elif output == "": break except: i = i + 5 time.sleep(5) - assert rs[3] == 0,"start vm fail,ori_cmd is %s" % rs[1] + assert rs[3] == 0, "start vm fail,ori_cmd is %s" % rs[1] + def init_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_host - ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'"%config.vm_stability_host + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_host + ori_cmd2 = "source OPENRC && nova list|grep %s | awk '{print $2}'" % config.vm_stability_host try: rs = shell_operator.ssh_exec(ssh, ori_cmd) rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) @@ -384,23 +435,23 @@ def init_vm(): uuid = "".join(rs[1]).strip() uuid2 = "".join(rs2[1]).strip() - for i in range(1,10): + for i in range(1, 10): ori_cmd = "bash curve_test.sh delete" shell_operator.ssh_exec(ssh, ori_cmd) - ori_cmd = "source OPENRC && nova reboot %s --hard"%uuid - ori_cmd2 = "source OPENRC && nova reboot %s --hard"%uuid2 - rs = shell_operator.ssh_exec(ssh,ori_cmd) - rs2 = shell_operator.ssh_exec(ssh,ori_cmd2) + ori_cmd = "source OPENRC && nova reboot %s --hard" % uuid + ori_cmd2 = "source OPENRC && nova reboot %s --hard" % uuid2 + rs = shell_operator.ssh_exec(ssh, ori_cmd) + rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) time.sleep(60) - rs1 = check_vm_status(ssh,uuid) - rs2 = check_vm_status(ssh,uuid2) + rs1 = check_vm_status(ssh, uuid) + rs2 = check_vm_status(ssh, uuid2) if rs1 == True and rs2 == True: break - assert rs1 == True,"hard reboot vm fail" - assert rs2 == True,"hard reboot vm fail" + assert rs1 == True, "hard reboot vm fail" + assert rs2 == True, "hard reboot vm fail" - check_vm_vd(config.vm_host,ssh,uuid) - check_vm_vd(config.vm_stability_host,ssh,uuid2) + check_vm_vd(config.vm_host, ssh, uuid) + check_vm_vd(config.vm_stability_host, ssh, uuid2) except: logger.error("init vm error") raise @@ -408,42 +459,49 @@ def init_vm(): def remove_vm_key(): - cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s"%config.vm_host + cmd = "ssh-keygen -f ~/.ssh/known_hosts -R %s" % config.vm_host shell_operator.run_exec(cmd) print cmd -def attach_new_vol(fio_size,vdbench_size): - ori_cmd = "bash curve_test.sh create %d %d"%(int(fio_size),int(vdbench_size)) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + +def attach_new_vol(fio_size, vdbench_size): + ori_cmd = "bash curve_test.sh create %d %d" % ( + int(fio_size), int(vdbench_size)) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "attach vol fail,return is %s" % rs[2] logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"attach vol fail,return is %s"%rs[2] - logger.info("exec cmd %s"%ori_cmd) get_vol_uuid() ssh.close() + def detach_vol(): stop_rwio() ori_cmd = "bash curve_test.sh delete" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("exec cmd %s" % ori_cmd) + assert rs[3] == 0, "retcode is %d,error is %s" % (rs[3], rs[2]) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"retcode is %d,error is %s"%(rs[3],rs[2]) - logger.info("exec cmd %s"%ori_cmd) ssh.close() + def clean_nbd(): for client_ip in config.client_list: - logger.info("|------begin test clean client %s------|"%(client_ip)) + logger.info("|------begin test clean client %s------|" % (client_ip)) cmd = "sudo curve-nbd list-mapped |grep nbd" - ssh = shell_operator.create_ssh_connect(client_ip, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_ip, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) if rs[1] != []: for nbd_info in rs[1]: - nbd = re.findall("/dev/nbd\d+",nbd_info) + nbd = re.findall("/dev/nbd\d+", nbd_info) cmd = "sudo curve-nbd unmap " + nbd[0] rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"unmap %s fail,error is %s"%(nbd,rs[2]) + assert rs[3] == 0, "unmap %s fail,error is %s" % (nbd, rs[2]) cmd = "ps -ef|grep curve-nbd|grep -v grep | awk '{print $2}' | sudo xargs kill -9" rs = shell_operator.ssh_exec(ssh, cmd) return @@ -451,159 +509,174 @@ def clean_nbd(): def map_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - stripeUnit = [524288,1048576,2097152,4194304] - stripeCount = [1,2,4,8,16] - cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + stripeUnit = [524288, 1048576, 2097152, 4194304] + stripeCount = [1, 2, 4, 8, 16] + cmd = "curve create --filename /fiofile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /fiofile fail:%s"%rs[2] - cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /fiofile fail:%s" % rs[2] + cmd = "curve create --filename /vdbenchfile --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /vdbenchfile fail:%s"%rs[2] - #test recover recyclebin file - cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d"%(random.choice(stripeUnit),random.choice(stripeCount)) + assert rs[3] == 0, "create /vdbenchfile fail:%s" % rs[2] + # test recover recyclebin file + cmd = "curve create --filename /recover --length 10 --user test --stripeUnit %d --stripeCount %d" % ( + random.choice(stripeUnit), random.choice(stripeCount)) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"create /recover fail:%s"%rs[2] + assert rs[3] == 0, "create /recover fail:%s" % rs[2] time.sleep(3) cmd = "sudo curve-nbd map cbd:pool1//fiofile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map fiofile fail:%s"%rs[2] + assert rs[3] == 0, "map fiofile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//vdbenchfile_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "map vdbenchfile fail:%s" % rs[2] cmd = "sudo curve-nbd map cbd:pool1//recover_test_ >/dev/null 2>&1" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"map recover fail:%s"%rs[2] + assert rs[3] == 0, "map recover fail:%s" % rs[2] + def delete_nbd(): client_host = config.client_list[0] - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) cmd = "curve delete --filename /fiofile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /fiofile fail:%s"%rs[2] + assert rs[3] == 0, "delete /fiofile fail:%s" % rs[2] cmd = "curve delete --filename /vdbenchfile --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /vdbenchfile fail:%s"%rs[2] + assert rs[3] == 0, "delete /vdbenchfile fail:%s" % rs[2] cmd = "curve delete --filename /recover --user test" rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"delete /recover fail:%s"%rs[2] + assert rs[3] == 0, "delete /recover fail:%s" % rs[2] + def check_host_connect(ip): - cmd = "ping %s -w3"%ip + cmd = "ping %s -w3" % ip status = shell_operator.run_exec(cmd) if status == 0: return True else: return False + def get_chunkserver_status(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) grep_cmd = "bash /home/nbs/chunkserver_ctl.sh status all" - rs = shell_operator.ssh_exec(ssh,grep_cmd) + rs = shell_operator.ssh_exec(ssh, grep_cmd) chunkserver_lines = rs[1] - logger.debug("get lines is %s"%chunkserver_lines) - up_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "active" in x, chunkserver_lines)] - down_cs = [int(i.split()[0][11:]) for i in filter(lambda x: "down" in x, chunkserver_lines)] - return {'up':up_cs, 'down':down_cs} + logger.debug("get lines is %s" % chunkserver_lines) + up_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "active" in x, chunkserver_lines)] + down_cs = [int(i.split()[0][11:]) + for i in filter(lambda x: "down" in x, chunkserver_lines)] + return {'up': up_cs, 'down': down_cs} ssh.close() -def kill_mult_cs_process(host,num): + +def kill_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - up_cs = cs_status["up"] - if up_cs == []: - raise Exception("no chunkserver up") + cs_status = get_chunkserver_status(host) + up_cs = cs_status["up"] + if up_cs == []: + raise Exception("no chunkserver up") except Exception as e: - logger.debug("cs_status is %s"%cs_status) - logger.error("%s"%e) - raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) + logger.error("%s" % e) + raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(up_cs) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ - ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'"%(cs,cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) + ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) pid_chunkserver = "".join(rs[1]).strip() - logger.info("test kill host %s chunkserver %s"%(host,cs)) - kill_cmd = "sudo kill -9 %s"%pid_chunkserver - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[2]))) - assert rs[3] == 0,"kill chunkserver fail" + logger.info("test kill host %s chunkserver %s" % (host, cs)) + kill_cmd = "sudo kill -9 %s" % pid_chunkserver + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[2]))) + assert rs[3] == 0, "kill chunkserver fail" up_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs -def start_mult_cs_process(host,num): + +def start_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) operate_cs = [] - for i in range(0,num): + for i in range(0, num): try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - raise Exception("no chunkserver down") + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + raise Exception("no chunkserver down") except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % (cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" down_cs.remove(cs) operate_cs.append(cs) ssh.close() return operate_cs + def up_all_cs(): operate_cs = [] for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue except Exception as e: - logger.error("%s"%e) - assert False - #raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.error("%s" % e) + assert False + #raise AssertionError() + logger.debug("cs_status is %s" % cs_status) cs = random.choice(down_cs) for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: + if get_cs_copyset_num(host, cs) == 0: ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat;sudo rm -rf /data/chunkserver%d/copysets;\ - sudo rm -rf /data/chunkserver%d/recycler"%(cs,cs,cs) + sudo rm -rf /data/chunkserver%d/recycler" % (cs, cs, cs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail" + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail" time.sleep(2) ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: - assert False,"up chunkserver fail" + assert False, "up chunkserver fail" ssh.close() + def stop_host_cs_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) try: @@ -612,17 +685,18 @@ def stop_host_cs_process(host): if up_cs == []: raise Exception("no chunkserver up") except Exception as e: - logger.error("%s"%e) + logger.error("%s" % e) raise AssertionError() - logger.debug("cs_status is %s"%cs_status) + logger.debug("cs_status is %s" % cs_status) ori_cmd = "ps -ef|grep -v grep | grep -w curve-chunkserver |grep -v sudo | awk '{print $2}' | sudo xargs kill -9" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s"%ori_cmd) - print "test kill host %s chunkserver %s"%(host,up_cs) - assert rs[3] == 0,"kill chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s" % ori_cmd) + print "test kill host %s chunkserver %s" % (host, up_cs) + assert rs[3] == 0, "kill chunkserver fail" ssh.close() -def start_host_cs_process(host,csid=-1): + +def start_host_cs_process(host, csid=-1): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) down_cs = cs_status["down"] @@ -636,17 +710,19 @@ def start_host_cs_process(host,csid=-1): if csid == -1: ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start all" else: - if get_cs_copyset_num(host,csid) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(csid) + if get_cs_copyset_num(host, csid) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + csid) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" %csid - print "test up host %s chunkserver %s"%(host, down_cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % csid + print "test up host %s chunkserver %s" % (host, down_cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] ssh.close() -def restart_mult_cs_process(host,num): + +def restart_mult_cs_process(host, num): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) for i in range(0, num): try: @@ -680,6 +756,7 @@ def restart_mult_cs_process(host,num): assert False, "up chunkserver fail" up_cs.remove(cs) + def kill_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep -v sudo | grep curve-mds | awk '{print $2}'" @@ -689,10 +766,11 @@ def kill_mds_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill mds fail,process is %s"%pid + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill mds fail,process is %s" % pid + def start_mds_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -703,12 +781,13 @@ def start_mds_process(host): return up_cmd = "sudo nohup /usr/bin/curve-mds --confPath=/etc/curve/mds.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "mds up fail" + def kill_etcd_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" @@ -718,35 +797,37 @@ def kill_etcd_process(host): return for pid in pids[1]: pid = pid.strip() - kill_cmd = "sudo kill -9 %s"%pid - rs = shell_operator.ssh_exec(ssh,kill_cmd) - logger.debug("exec %s,stdout is %s"%(kill_cmd,"".join(rs[1]))) - assert rs[3] == 0,"kill etcd fail" + kill_cmd = "sudo kill -9 %s" % pid + rs = shell_operator.ssh_exec(ssh, kill_cmd) + logger.debug("exec %s,stdout is %s" % (kill_cmd, "".join(rs[1]))) + assert rs[3] == 0, "kill etcd fail" + def start_etcd_process(host): -# ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) -# ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] != []: -# logger.debug("etcd already up") -# return -# mkdir_cmd = "sudo rm -rf /etcd/default.etcd" -# rs = shell_operator.ssh_exec(ssh, mkdir_cmd) -# up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" + # ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + # ori_cmd = "ps -ef|grep -v grep | grep etcd | awk '{print $2}'" + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] != []: + # logger.debug("etcd already up") + # return + # mkdir_cmd = "sudo rm -rf /etcd/default.etcd" + # rs = shell_operator.ssh_exec(ssh, mkdir_cmd) + # up_cmd = " cd etcdrun && sudo nohup ./run.sh existing &" # shell_operator.ssh_background_exec2(ssh, up_cmd) -# logger.debug("exec %s"%(up_cmd)) -# time.sleep(2) -# rs = shell_operator.ssh_exec(ssh, ori_cmd) -# if rs[1] == []: -# assert False, "etcd up fail" + # logger.debug("exec %s"%(up_cmd)) + # time.sleep(2) + # rs = shell_operator.ssh_exec(ssh, ori_cmd) + # if rs[1] == []: + # assert False, "etcd up fail" try: - cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" - ret = shell_operator.run_exec(cmd) - assert ret == 0 ,"ansible start etcd fail" + cmd = "ansible-playbook -i curve/curve-ansible/server.ini curve/curve-ansible/start_curve.yml --tags etcd" + ret = shell_operator.run_exec(cmd) + assert ret == 0, "ansible start etcd fail" except Exception: - logger.error("ansible start etcd fail.") - raise - + logger.error("ansible start etcd fail.") + raise + + def stop_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "ps -ef|grep -v grep | grep mysql" @@ -756,8 +837,9 @@ def stop_mysql_process(host): return ori_cmd = "sudo killall mysqld" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) - assert rs[3] == 0,"stop mysql fail" + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) + assert rs[3] == 0, "stop mysql fail" + def start_mysql_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -774,73 +856,82 @@ def start_mysql_process(host): if rs[1] == []: assert False, "mysql up fail" + def get_cluster_iops(): return 100 + def exec_deleteforce(): client_list = config.client_list host = random.choice(client_list) - cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/"%(config.pravie_key_path,host) + cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/deleteforce-test.py %s:~/" % ( + config.pravie_key_path, host) shell_operator.run_exec2(cmd) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "sudo cp ~/deleteforce-test.py /usr/curvefs/" shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo python /usr/curvefs/deleteforce-test.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.info("exec deleteforce return is %s"%rs[1]) - assert rs[3] == 0,"rc is %d"%rs[3] - + logger.info("exec deleteforce return is %s" % rs[1]) + assert rs[3] == 0, "rc is %d" % rs[3] + + def get_all_chunk_num(): chunkserver_list = config.chunkserver_list num = 0 for host in chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) cs_status = get_chunkserver_status(host) cs_list = cs_status["up"] + cs_status["down"] for cs in cs_list: - ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l"%cs + ori_cmd = "ls /data/chunkserver%d/chunkfilepool/ |wc -l" % cs rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0 num = num + int("".join(rs[1]).strip()) - logger.info("now num is %d"%(num)) + logger.info("now num is %d" % (num)) return num def check_nbd_iops(limit_iops=3000): - ssh = shell_operator.create_ssh_connect(config.client_list[0],1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "iostat -d nb0 3 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("now nbd0 iops is %d with 4k randrw"%iops) - assert iops >= limit_iops,"vm iops not ok,is %d"%iops + logger.info("now nbd0 iops is %d with 4k randrw" % iops) + assert iops >= limit_iops, "vm iops not ok,is %d" % iops + def check_chunkserver_online(num=120): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) ori_cmd = "curve_ops_tool chunkserver-status | grep chunkserver" - + starttime = time.time() i = 0 while time.time() - starttime < 300: rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.debug("get chunkserver status fail,rs is %s"%rs[1]) + logger.debug("get chunkserver status fail,rs is %s" % rs[1]) time.sleep(10) continue status = "".join(rs[1]).strip() - online_num = re.findall(r'(?<=online = )\d+',status) - logger.info("chunkserver online num is %s"%online_num) + online_num = re.findall(r'(?<=online = )\d+', status) + logger.info("chunkserver online num is %s" % online_num) if int(online_num[0]) != num: - logger.debug("chunkserver online num is %s"%online_num) + logger.debug("chunkserver online num is %s" % online_num) time.sleep(10) else: break if int(online_num[0]) != num: ori_cmd = "curve_ops_tool chunkserver-list -checkHealth=false -checkCSAlive | grep OFFLINE" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("chunkserver offline list is %s"%rs[1]) - assert int(online_num[0]) == num,"chunkserver online num is %s"%online_num + logger.error("chunkserver offline list is %s" % rs[1]) + assert int( + online_num[0]) == num, "chunkserver online num is %s" % online_num + def wait_health_ok(): host = random.choice(config.mds_list) @@ -858,9 +949,10 @@ def wait_health_ok(): ori_cmd2 = "curve_ops_tool copysets-status -detail | grep \"unhealthy copysets statistic\"" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) health = rs2[1] - logger.debug("copysets status is %s"%health) + logger.debug("copysets status is %s" % health) time.sleep(10) - assert check == 1,"cluster is not healthy in %d s"%config.recover_time + assert check == 1, "cluster is not healthy in %d s" % config.recover_time + def rapid_leader_schedule(): host = random.choice(config.mds_list) @@ -877,12 +969,12 @@ def rapid_leader_schedule(): else: ori_cmd2 = "curve_ops_tool check-operator -opName=change_peer" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) - logger.debug("operator status is %s"%rs2[1]) + logger.debug("operator status is %s" % rs2[1]) time.sleep(10) - assert check == 1,"change operator num is not 0 in %d s"%config.recover_time + assert check == 1, "change operator num is not 0 in %d s" % config.recover_time ori_cmd = "curve_ops_tool rapid-leader-schedule" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rapid leader schedule not ok" + assert rs[3] == 0, "rapid leader schedule not ok" ori_cmd = "curve_ops_tool check-operator -opName=transfer_leader -leaderOpInterval=1| grep \"Operator num is\"" starttime = time.time() while time.time() - starttime < 60: @@ -893,6 +985,7 @@ def rapid_leader_schedule(): else: time.sleep(1) + def wait_cluster_healthy(limit_iops=8000): check_chunkserver_online() host = random.choice(config.mds_list) @@ -912,46 +1005,53 @@ def wait_cluster_healthy(limit_iops=8000): ori_cmd2 = "curve_ops_tool status" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) cluster_status = "".join(rs2[1]).strip() - logger.debug("cluster status is %s"%cluster_status) + logger.debug("cluster status is %s" % cluster_status) ori_cmd2 = "curve_ops_tool copysets-status -detail" rs2 = shell_operator.ssh_exec(ssh, ori_cmd2) copysets_status = "".join(rs2[1]).strip() - logger.debug("copysets status is %s"%copysets_status) - assert check == 1,"cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s"%(config.recover_time,cluster_status,copysets_status) - rapid_leader_schedule() - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + logger.debug("copysets status is %s" % copysets_status) + assert check == 1, "cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s" % ( + config.recover_time, cluster_status, copysets_status) + rapid_leader_schedule() + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) i = 0 while i < 300: ori_cmd = "iostat -d nb0 1 2 |grep nb0 | awk 'END {print $6}'" rs = shell_operator.ssh_exec(ssh, ori_cmd) kb_wrtn = "".join(rs[1]).strip() iops = int(kb_wrtn) / int(config.fio_iosize) - logger.info("vm iops is %d"%iops) + logger.info("vm iops is %d" % iops) if iops >= limit_iops: break i = i + 2 time.sleep(2) - assert iops >= limit_iops,"vm iops not ok in 300s" + assert iops >= limit_iops, "vm iops not ok in 300s" + def clean_kernel_log(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0," rollback log fail, %s"%rs[1] + assert rs[3] == 0, " rollback log fail, %s" % rs[1] ssh.close() + def check_io_error(): for host in config.client_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) ori_cmd = "sudo grep \'I/O error\' /var/log/kern.log -R | grep -v nbd2" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: ori_cmd = "sudo logrotate -vf /etc/logrotate.d/rsyslog" shell_operator.ssh_exec(ssh, ori_cmd) - assert False," rwio error,log is %s"%rs[1] + assert False, " rwio error,log is %s" % rs[1] ssh.close() + def check_copies_consistency(): host = random.choice(config.mds_list) ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -966,15 +1066,16 @@ def check_copies_consistency(): rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] == 0: break - logger.info("check_hash false return is %s,return code is %d"%(rs[1],rs[3])) + logger.info( + "check_hash false return is %s,return code is %d" % (rs[1], rs[3])) time.sleep(3) i = i + 3 if rs[3] != 0: - assert False,"exec check_hash false fail,return is %s"%rs[1] + assert False, "exec check_hash false fail,return is %s" % rs[1] check_hash = "true" ori_cmd = ori_cmdpri + check_hash - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.debug("exec %s,stdout is %s"%(ori_cmd,"".join(rs[1]))) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.debug("exec %s,stdout is %s" % (ori_cmd, "".join(rs[1]))) if rs[3] == 0: print "check consistency ok!" else: @@ -983,15 +1084,18 @@ def check_copies_consistency(): chunkID = message["chunkID"] hosts = message["hosts"] chunkservers = message["chunkservers"] - for i in range(0,3): + for i in range(0, 3): host = hosts[i] chunkserver = chunkservers[i] - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) - ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s"%(chunkserver,groupId,chunkID,chunkserver) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) + ori_cmd = "sudo cp /data/%s/copysets/%s/data/chunk_%s /data/log/%s" % ( + chunkserver, groupId, chunkID, chunkserver) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[3] != 0: - logger.error("cp chunk fail,is %s"%rs[1]) - assert False,"checkconsistecny fail,error is %s"%("".join(rs[1]).strip()) + logger.error("cp chunk fail,is %s" % rs[1]) + assert False, "checkconsistecny fail,error is %s" % ( + "".join(rs[1]).strip()) # check_data_consistency() except: logger.error("check consistency error") @@ -999,132 +1103,151 @@ def check_copies_consistency(): raise # run_rwio() + def check_data_consistency(): try: - #wait run 60s io - #time.sleep(60) - ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user) + # wait run 60s io + # time.sleep(60) + ssh = shell_operator.create_ssh_connect( + config.client_list[0], 1046, config.abnormal_user) ori_cmd = "grep \"Data Validation error\" /home/nbs/output/ -R && \ grep \"Data Validation error\" /home/nbs/nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] != []: t = time.time() - ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d"%(int(t),int(t)) + ori_cmd = "mv /home/nbs/output /home/nbs/vdbench-output/output-%d && mv /home/nbs/nohup.out /home/nbs/nohup-%d" % ( + int(t), int(t)) rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "mkdir output && touch nohup.out" rs = shell_operator.ssh_exec(ssh, ori_cmd) # logger.error("find error in %s"%rs[1]) - assert False,"find data consistency error,save log to vm /root/vdbench-output/output-%d"%int(t) + assert False, "find data consistency error,save log to vm /root/vdbench-output/output-%d" % int( + t) except Exception as e: ssh.close() raise ssh.close() + def test_kill_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test kill chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test kill chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: -# check_chunkserver_status(chunkserver_host) - kill_mult_cs_process(chunkserver_host,num) + # check_chunkserver_status(chunkserver_host) + kill_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("error:%s"%e) - start_mult_cs_process(chunkserver_host,num) - raise + logger.error("error:%s" % e) + start_mult_cs_process(chunkserver_host, num) + raise return chunkserver_host -def test_start_chunkserver_num(num,host=None): + +def test_start_chunkserver_num(num, host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test start chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test start chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - start_mult_cs_process(chunkserver_host,num) + start_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_outcs_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test out one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test out one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(5) while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) if num == 0: break - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if num != 0: - # assert num != 0 - raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d"%(chunkserver_host,cs_list[0],config.recover_time,num)) + # assert num != 0 + raise Exception("host %s chunkserver %d not recover to 0 in %d,now is %d" % ( + chunkserver_host, cs_list[0], config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) + # raise AssertionError() + logger.error("error is %s" % e) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) raise - return chunkserver_host,begin_num + return chunkserver_host, begin_num + -def test_upcs_recover_copyset(host,copyset_num): +def test_upcs_recover_copyset(host, copyset_num): if host == None: chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host - logger.info("|------begin test up one chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test up one chunkserver,host %s------|" % + (chunkserver_host)) try: - cs_list = start_mult_cs_process(chunkserver_host,1) + cs_list = start_mult_cs_process(chunkserver_host, 1) time.sleep(10) - #time.sleep(config.recover_time) + # time.sleep(config.recover_time) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 time.sleep(60) - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + logger.info("cs copyset num is %d" % num) if abs(num - copyset_num) <= 10: break if abs(num - copyset_num) > 10: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs_list[0],num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs_list[0], num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],copyset_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], copyset_num, config.recover_time, num)) except Exception as e: - logger.error("error is :%s"%e) - raise + logger.error("error is :%s" % e) + raise return chunkserver_host + def stop_all_cs_not_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop all chunkserver,host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop all chunkserver,host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) down_list = list["down"] dict = {} for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) dict[cs] = num time.sleep(config.offline_timeout + 10) check_nbd_iops() for cs in dict: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != dict[cs]: - # assert num != 0 - raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % (cs,dict[cs],num)) + # assert num != 0 + raise Exception("stop all chunkserver not recover fail,cs id %d,copysets num from %d to %d" % ( + cs, dict[cs], num)) except Exception as e: # raise AssertionError() logger.error("error is %s" % e) @@ -1132,11 +1255,15 @@ def stop_all_cs_not_recover(): raise start_host_cs_process(chunkserver_host) + def pendding_all_cs_recover(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test pendding all chunkserver,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: stop_host_cs_process(chunkserver_host) list = get_chunkserver_status(chunkserver_host) @@ -1149,13 +1276,14 @@ def pendding_all_cs_recover(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in down_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) i = 0 @@ -1164,13 +1292,14 @@ def pendding_all_cs_recover(): i = i + 60 time.sleep(60) for cs in down_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: break if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1180,7 +1309,7 @@ def pendding_all_cs_recover(): raise test_start_mds() for cs in down_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1189,23 +1318,28 @@ def pendding_all_cs_recover(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def pendding_all_cs_recover_online(): cs_host = list(config.chunkserver_list) chunkserver_host = random.choice(config.cs_list) cs_host.remove(chunkserver_host) - logger.info("|------begin test pendding all chunkserver online,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - ssh_mds = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + logger.info( + "|------begin test pendding all chunkserver online,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + ssh_mds = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) try: list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1216,13 +1350,14 @@ def pendding_all_cs_recover_online(): mds_addrs = ",".join(mds) get_chunkserver_list() for cs in up_list: - chunkserver_id = get_chunkserver_id(chunkserver_host,cs) + chunkserver_id = get_chunkserver_id(chunkserver_host, cs) assert chunkserver_id != -1 csid_list.append(chunkserver_id) pendding_cmd = "sudo curve-tool -mds_addr=%s -op=set_chunkserver \ - -chunkserver_id=%d -chunkserver_status=pendding"%(mds_addrs,chunkserver_id) - rs = shell_operator.ssh_exec(ssh_mds,pendding_cmd) - assert rs[3] == 0,"pendding chunkserver %d fail,rs is %s"%(cs,rs) + -chunkserver_id=%d -chunkserver_status=pendding" % (mds_addrs, chunkserver_id) + rs = shell_operator.ssh_exec(ssh_mds, pendding_cmd) + assert rs[3] == 0, "pendding chunkserver %d fail,rs is %s" % ( + cs, rs) time.sleep(180) test_kill_mds(2) chunkserver_host2 = random.choice(config.cs_list) @@ -1236,7 +1371,7 @@ def pendding_all_cs_recover_online(): i = i + 60 time.sleep(60) for cs in up_list: - num = get_cs_copyset_num(chunkserver_host,cs) + num = get_cs_copyset_num(chunkserver_host, cs) if num != 0: break if num == 0: @@ -1244,7 +1379,8 @@ def pendding_all_cs_recover_online(): stop_host_cs_process(chunkserver_host) wait_health_ok() if num != 0: - logger.error("exist chunkserver %d copyset %d"%(chunkserver_id,num)) + logger.error("exist chunkserver %d copyset %d" % + (chunkserver_id, num)) raise Exception("online pendding chunkserver fail") except Exception as e: # raise AssertionError() @@ -1254,7 +1390,7 @@ def pendding_all_cs_recover_online(): raise test_start_mds() for cs in up_list: - start_host_cs_process(chunkserver_host,cs) + start_host_cs_process(chunkserver_host, cs) time.sleep(60) list = get_chunkserver_status(chunkserver_host) up_list = list["up"] @@ -1263,146 +1399,162 @@ def pendding_all_cs_recover_online(): while i < config.recover_time: i = i + 10 time.sleep(10) - num = get_cs_copyset_num(chunkserver_host,cs) - logger.info("cs copyset num is %d"%num) + num = get_cs_copyset_num(chunkserver_host, cs) + logger.info("cs copyset num is %d" % num) if num > 0: break if num == 0: - logger.error("get host %s chunkserver %d copyset num is %d"%(chunkserver_host,cs,num)) + logger.error("get host %s chunkserver %d copyset num is %d" % + (chunkserver_host, cs, num)) raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs,1,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs, 1, config.recover_time, num)) + def test_suspend_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend recover,host %s------|"%(chunkserver_host)) + logger.info("|------begin test suspend recover,host %s------|" % + (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(config.offline_timeout - 5) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" start_host_cs_process(chunkserver_host) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_suspend_delete_recover_copyset(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test suspend delete recover,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test suspend delete recover,host %s------|" % (chunkserver_host)) try: - cs_list = kill_mult_cs_process(chunkserver_host,1) - begin_num = get_cs_copyset_num(chunkserver_host,cs_list[0]) - #time.sleep(config.recover_time) + cs_list = kill_mult_cs_process(chunkserver_host, 1) + begin_num = get_cs_copyset_num(chunkserver_host, cs_list[0]) + # time.sleep(config.recover_time) i = 0 time.sleep(10) while i < config.recover_time: check_nbd_iops() i = i + 1 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(1) - logger.info("now cs copyset num is %d,begin_num is %d"%(num,begin_num)) - if num > 0 and abs(begin_num - num) > 10 : + logger.info("now cs copyset num is %d,begin_num is %d" % + (num, begin_num)) + if num > 0 and abs(begin_num - num) > 10: break elif num == 0: - cs_list = start_host_cs_process(chunkserver_host,cs_list[0]) - assert False,"copyset is 0" - start_host_cs_process(chunkserver_host,cs_list[0]) + cs_list = start_host_cs_process(chunkserver_host, cs_list[0]) + assert False, "copyset is 0" + start_host_cs_process(chunkserver_host, cs_list[0]) time.sleep(300) i = 0 while i < config.recover_time: check_nbd_iops() i = i + 60 - num = get_cs_copyset_num(chunkserver_host,cs_list[0]) + num = get_cs_copyset_num(chunkserver_host, cs_list[0]) time.sleep(60) - logger.info("cs copyset num is %d"%num) + logger.info("cs copyset num is %d" % num) if abs(num - begin_num) < 10: break if abs(num - begin_num) > 10: raise Exception( - "host %s chunkserver %d not recover to %d in %d,now is %d" % \ - (chunkserver_host, cs_list[0],begin_num,config.recover_time,num)) + "host %s chunkserver %d not recover to %d in %d,now is %d" % + (chunkserver_host, cs_list[0], begin_num, config.recover_time, num)) except Exception as e: -# raise AssertionError() - logger.error("error is %s"%e) + # raise AssertionError() + logger.error("error is %s" % e) cs_list = start_host_cs_process(chunkserver_host) raise + def test_kill_mds(num=1): start_iops = get_cluster_iops() - logger.info("|------begin test kill mds num %d------|"%(num)) + logger.info("|------begin test kill mds num %d------|" % (num)) mds_ips = list(config.mds_list) try: - for i in range(0,num): + for i in range(0, num): mds_host = random.choice(mds_ips) - logger.info("mds ip is %s"%mds_host) + logger.info("mds ip is %s" % mds_host) kill_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) mds_ips.remove(mds_host) except Exception as e: - logger.error("kill mds %s fail"%mds_host) - raise + logger.error("kill mds %s fail" % mds_host) + raise return mds_host + def test_start_mds(): start_iops = get_cluster_iops() try: - logger.info("mds list is %s"%config.mds_list) + logger.info("mds list is %s" % config.mds_list) for mds_host in config.mds_list: start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_snap(): start_iops = get_cluster_iops() try: - logger.info("snap list is %s"%config.snap_server_list) + logger.info("snap list is %s" % config.snap_server_list) for snap_host in config.snap_server_list: start_snap_process(snap_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_start_nginx(): client_host = config.client_list[0] - logger.info("|------begin start nginx,host %s------|"%(client_host)) + logger.info("|------begin start nginx,host %s------|" % (client_host)) cmd = "sudo docker start 5ac540f1608d" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"start nginx docker fail %s"%rs[1] + assert rs[3] == 0, "start nginx docker fail %s" % rs[1] + def start_snap_process(host): ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) @@ -1413,12 +1565,13 @@ def start_snap_process(host): return up_cmd = "cd snapshot/temp && sudo nohup curve-snapshotcloneserver -conf=/etc/curve/snapshot_clone_server.conf &" shell_operator.ssh_background_exec2(ssh, up_cmd) - logger.debug("exec %s"%(up_cmd)) + logger.debug("exec %s" % (up_cmd)) time.sleep(2) rs = shell_operator.ssh_exec(ssh, ori_cmd) if rs[1] == []: assert False, "snap up fail" + def test_round_restart_mds(): logger.info("|------begin test round restart mds------|") start_iops = get_cluster_iops() @@ -1430,29 +1583,33 @@ def test_round_restart_mds(): start_mds_process(mds_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart mds %s fail"%mds_host) + logger.error("round restart mds %s fail" % mds_host) raise + def test_kill_etcd(num=1): - logger.info("|------begin test kill etcd num %d------|"%(num)) + logger.info("|------begin test kill etcd num %d------|" % (num)) start_iops = get_cluster_iops() etcd_ips = list(config.etcd_list) try: - for i in range(0,num): + for i in range(0, num): etcd_host = random.choice(etcd_ips) - logger.info("etcd ip is %s"%etcd_host) + logger.info("etcd ip is %s" % etcd_host) kill_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) etcd_ips.remove(etcd_host) except Exception as e: - logger.error("kill etcd %s fail"%etcd_host) + logger.error("kill etcd %s fail" % etcd_host) raise return etcd_host + def test_start_etcd(): start_iops = get_cluster_iops() try: @@ -1460,9 +1617,11 @@ def test_start_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - raise + raise + def test_round_restart_etcd(): logger.info("|------begin test round restart etcd------|") @@ -1475,11 +1634,13 @@ def test_round_restart_etcd(): start_etcd_process(etcd_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: - logger.error("round restart etcd %s fail"%etcd_host) + logger.error("round restart etcd %s fail" % etcd_host) raise + def test_kill_mysql(): logger.info("|------begin test kill mysql------|") start_iops = get_cluster_iops() @@ -1488,12 +1649,14 @@ def test_kill_mysql(): stop_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_mysql_process(mysql_host) raise return mysql_host + def test_start_mysql(host): start_iops = get_cluster_iops() mysql_host = host @@ -1501,69 +1664,84 @@ def test_start_mysql(host): start_mysql_process(mysql_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise + def test_stop_chunkserver_host(): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test stop chunkserver host %s------|"%(chunkserver_host)) + logger.info("|------begin test stop chunkserver host %s------|" % + (chunkserver_host)) try: stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: start_host_cs_process(chunkserver_host) raise e return chunkserver_host + def test_start_chunkserver_host(host=None): start_iops = get_cluster_iops() if host == None: - chunkserver_host = random.choice(config.chunkserver_list) + chunkserver_host = random.choice(config.chunkserver_list) else: chunkserver_host = host try: start_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_restart_chunkserver_num(num): start_iops = get_cluster_iops() chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test restart chunkserver num %d,host %s------|"%(num,chunkserver_host)) + logger.info("|------begin test restart chunkserver num %d,host %s------|" % + (num, chunkserver_host)) try: - restart_mult_cs_process(chunkserver_host,num) + restart_mult_cs_process(chunkserver_host, num) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def stop_scheduler(): - ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + config.mds_list[0], 1046, config.abnormal_user) for mds_host in config.mds_list: - logger.info("|------begin stop copyset scheduler %s------|"%(mds_host)) - cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false"%mds_host - rs = shell_operator.ssh_exec(ssh,cmd) + logger.info("|------begin stop copyset scheduler %s------|" % + (mds_host)) + cmd = "curl -L %s:6666/flags/enableCopySetScheduler?setvalue=false" % mds_host + rs = shell_operator.ssh_exec(ssh, cmd) time.sleep(180) + def test_start_all_chunkserver(): start_iops = get_cluster_iops() try: for chunkserver_host in config.chunkserver_list: - start_host_cs_process(chunkserver_host) - end_iops = get_cluster_iops() - if float(end_iops) / float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + start_host_cs_process(chunkserver_host) + end_iops = get_cluster_iops() + if float(end_iops) / float(start_iops) < 0.9: + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: raise e + def test_stop_all_chunkserver(): start_iops = get_cluster_iops() logger.info("|------begin test stop all chunkserver------|") @@ -1572,18 +1750,21 @@ def test_stop_all_chunkserver(): stop_host_cs_process(chunkserver_host) end_iops = get_cluster_iops() if float(end_iops)/float(start_iops) < 0.9: - raise Exception("client io is slow, = %d more than 5s" % (end_iops)) + raise Exception( + "client io is slow, = %d more than 5s" % (end_iops)) except Exception as e: test_start_all_chunkserver() raise e + def test_kill_diff_host_chunkserver(): start_iops = get_cluster_iops() chunkserver_list = list(config.chunkserver_list) chunkserver_host1 = random.choice(chunkserver_list) chunkserver_list.remove(chunkserver_host1) chunkserver_host2 = random.choice(chunkserver_list) - logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|"%(chunkserver_host1,chunkserver_host2)) + logger.info("|------begin test kill diff host chunkserver,host1 %s,host2 %s------|" % + (chunkserver_host1, chunkserver_host2)) try: kill_mult_cs_process(chunkserver_host1, 1) kill_mult_cs_process(chunkserver_host2, 1) @@ -1602,44 +1783,52 @@ def test_kill_diff_host_chunkserver(): start_mult_cs_process(chunkserver_host1, 1) start_mult_cs_process(chunkserver_host2, 1) + def test_reboot_nebd(): client_host = random.choice(config.client_list) - logger.info("|------begin test reboot nebd %s------|"%(client_host)) + logger.info("|------begin test reboot nebd %s------|" % (client_host)) cmd = "sudo nebd-daemon restart" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) - assert rs[3] == 0,"reboot nebd daemon fail,return is %s"%rs[1] + assert rs[3] == 0, "reboot nebd daemon fail,return is %s" % rs[1] + def test_cs_loss_package(percent): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s loss package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (chunkserver_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: raise Exception("client io slow op more than 5s") except Exception as e: - raise + raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_loss_package(percent): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s loss package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s loss package------|" % + (mds_host, dev)) try: package_loss_all(ssh, dev, percent) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1648,18 +1837,21 @@ def test_mds_loss_package(percent): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_cs_delay_package(ms): start_iops = get_cluster_iops() chunkserver_list = config.chunkserver_list chunkserver_host = random.choice(chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,chunkserver_host) - logger.info("|------begin test host %s dev %s delay package------|"%(chunkserver_host,dev)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, chunkserver_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (chunkserver_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1668,18 +1860,21 @@ def test_cs_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_mds_delay_package(ms): start_iops = get_cluster_iops() mds_list = config.mds_list mds_host = random.choice(mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - dev = get_hostip_dev(ssh,mds_host) - logger.info("|------begin test host %s dev %s delay package------|"%(mds_host,dev)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + dev = get_hostip_dev(ssh, mds_host) + logger.info("|------begin test host %s dev %s delay package------|" % + (mds_host, dev)) try: package_delay_all(ssh, dev, ms) - show_tc_inject(ssh,dev) + show_tc_inject(ssh, dev) # check_nbd_iops(1) end_iops = get_cluster_iops() if float(end_iops) / float(start_iops) < 0.1: @@ -1688,75 +1883,93 @@ def test_mds_delay_package(ms): raise finally: time.sleep(60) - cancel_tc_inject(ssh,dev) + cancel_tc_inject(ssh, dev) + def test_chunkserver_cpu_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver cpu stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver cpu stress,host %s------|" % (chunkserver_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,chunkserver_host) + %s:~/" % (config.pravie_key_path, chunkserver_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh - + + def test_mds_cpu_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds cpu stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds cpu stress,host %s------|" % (mds_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,mds_host) + %s:~/" % (config.pravie_key_path, mds_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_client_cpu_stress(stress=50): -# client_host = random.choice(config.client_list) + # client_host = random.choice(config.client_list) client_host = config.client_list[0] - logger.info("|------begin test client cpu stress,host %s------|"%(client_host)) + logger.info("|------begin test client cpu stress,host %s------|" % + (client_host)) cmd = "scp -i %s -o StrictHostKeyChecking=no -P 1046 robot/Resources/keywords/cpu_stress.py \ - %s:~/"%(config.pravie_key_path,client_host) + %s:~/" % (config.pravie_key_path, client_host) shell_operator.run_exec2(cmd) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) - inject_cpu_stress(ssh,stress) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) + inject_cpu_stress(ssh, stress) return ssh + def test_chunkserver_mem_stress(stress=50): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver mem stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver mem stress,host %s------|" % (chunkserver_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_mds_mem_stress(stress=50): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds mem stress,host %s------|"%(mds_host)) + logger.info("|------begin test mds mem stress,host %s------|" % (mds_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_client_mem_stress(stress=50): client_host = config.client_list[0] - logger.info("|------begin test client mem stress,host %s------|"%(client_host)) + logger.info("|------begin test client mem stress,host %s------|" % + (client_host)) cmd = "free -g |grep Mem|awk \'{print $2}\'" - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) rs = shell_operator.ssh_exec(ssh, cmd) all_mem = int("".join(rs[1]).strip()) stress = all_mem * stress / 100 - inject_mem_stress(ssh,stress) + inject_mem_stress(ssh, stress) return ssh + def test_chunkserver_network_stress(): chunkserver_host = random.choice(config.chunkserver_list) - logger.info("|------begin test chunkserver network stress,host %s------|"%(chunkserver_host)) + logger.info( + "|------begin test chunkserver network stress,host %s------|" % (chunkserver_host)) t1 = mythread.runThread(listen_network_stress, chunkserver_host) t2 = mythread.runThread(inject_network_stress, chunkserver_host) t1.start() @@ -1764,9 +1977,11 @@ def test_chunkserver_network_stress(): t2.start() return chunkserver_host + def test_mds_network_stress(): mds_host = random.choice(config.mds_list) - logger.info("|------begin test mds network stress,host %s------|"%(mds_host)) + logger.info( + "|------begin test mds network stress,host %s------|" % (mds_host)) t1 = mythread.runThread(listen_network_stress, mds_host) t2 = mythread.runThread(inject_network_stress, mds_host) t1.start() @@ -1774,9 +1989,11 @@ def test_mds_network_stress(): t2.start() return mds_host + def test_client_network_stress(): client_host = config.client_list[0] - logger.info("|------begin test client network stress,host %s------|"%(client_host)) + logger.info( + "|------begin test client network stress,host %s------|" % (client_host)) t1 = mythread.runThread(listen_network_stress, client_host) t2 = mythread.runThread(inject_network_stress, client_host) t1.start() @@ -1784,23 +2001,31 @@ def test_client_network_stress(): t2.start() return client_host + def test_chunkserver_clock_offset(offset): chunkserver_host = random.choice(config.chunkserver_list) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh + def test_mds_clock_offset(offset): mds_host = random.choice(config.mds_list) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) - inject_clock_offset(ssh,offset) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) + inject_clock_offset(ssh, offset) return ssh -#使用cycle会从掉电到上电有1秒钟的间隔 +# There is a 1-second interval from power down to power up when using cycle + + def test_ipmitool_restart_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool cycle,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool cycle,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1812,13 +2037,16 @@ def test_ipmitool_restart_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_client(): client_host = config.client_list[0] - logger.info("|------begin test client ipmitool cycle,host %s------|"%(client_host)) - ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user) + logger.info( + "|------begin test client ipmitool cycle,host %s------|" % (client_host)) + ssh = shell_operator.create_ssh_connect( + client_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1830,13 +2058,17 @@ def test_ipmitool_restart_client(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%client_host + assert status, "restart host %s fail" % client_host + +# There is no interval between power-off and power-on when using reset + -#使用reset从掉电到上电没有间隔 def test_ipmitool_reset_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) - logger.info("|------begin test chunkserver ipmitool reset,host %s------|"%(chunkserver_host)) - ssh = shell_operator.create_ssh_connect(chunkserver_host, 1046, config.abnormal_user) + logger.info( + "|------begin test chunkserver ipmitool reset,host %s------|" % (chunkserver_host)) + ssh = shell_operator.create_ssh_connect( + chunkserver_host, 1046, config.abnormal_user) ipmitool_reset_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1848,13 +2080,16 @@ def test_ipmitool_reset_chunkserver(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%chunkserver_host + assert status, "restart host %s fail" % chunkserver_host start_host_cs_process(chunkserver_host) + def test_ipmitool_restart_mds(): mds_host = random.choice(config.mds_reset_list) - logger.info("|------begin test mds ipmitool cycle,host %s------|"%(mds_host)) - ssh = shell_operator.create_ssh_connect(mds_host, 1046, config.abnormal_user) + logger.info( + "|------begin test mds ipmitool cycle,host %s------|" % (mds_host)) + ssh = shell_operator.create_ssh_connect( + mds_host, 1046, config.abnormal_user) ipmitool_cycle_restart_host(ssh) time.sleep(60) starttime = time.time() @@ -1866,11 +2101,12 @@ def test_ipmitool_restart_mds(): else: logger.debug("wait host up") time.sleep(5) - assert status,"restart host %s fail"%mds_host + assert status, "restart host %s fail" % mds_host start_mds_process(mds_host) start_etcd_process(mds_host) start_host_cs_process(mds_host) + def clean_last_data(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) ori_cmd = "rm /root/perf/test-ssd/fiodata/* && rm /root/perf/test-ssd/cfg/*" @@ -1879,19 +2115,20 @@ def clean_last_data(): ori_cmd = "rm /root/perf/fiodata -rf" rs = shell_operator.ssh_exec(ssh, ori_cmd) + def analysis_data(ssh): ori_cmd = "cd /root/perf/ && python gen_randrw_data.py" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"gen randrw data fail,error is %s"%rs[1] + assert rs[3] == 0, "gen randrw data fail,error is %s" % rs[1] ori_cmd = "cat /root/perf/test.csv" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"get data fail,error is %s"%rs[1] + assert rs[3] == 0, "get data fail,error is %s" % rs[1] for line in rs[1]: if 'randread,4k' in line: randr_4k_iops = line.split(',')[4] elif 'randwrite,4k' in line: randw_4k_iops = line.split(',')[8] - elif 'write,512k' in line: + elif 'write,512k' in line: write_512k_iops = line.split(',')[8] elif 'read,512k' in line: read_512k_iops = line.split(',')[4] @@ -1900,24 +2137,29 @@ def analysis_data(ssh): read_512k_BW = float(read_512k_iops)*1000/2 write_512k_BW = float(write_512k_iops)*1000/2 logger.info("get one volume Basic data:-------------------------------") - logger.info("4k rand read iops is %d/s"%int(randr_4k_iops)) - logger.info("4k rand write iops is %d/s"%int(randw_4k_iops)) - logger.info("512k read BW is %d MB/s"%int(read_512k_BW)) - logger.info("512k write BW is %d MB/s"%int(write_512k_BW)) + logger.info("4k rand read iops is %d/s" % int(randr_4k_iops)) + logger.info("4k rand write iops is %d/s" % int(randw_4k_iops)) + logger.info("512k read BW is %d MB/s" % int(read_512k_BW)) + logger.info("512k write BW is %d MB/s" % int(write_512k_BW)) filename = "onevolume_perf.txt" - with open(filename,'w') as f: - f.write("4k randwrite %d/s 56000\n"%int(randw_4k_iops)) - f.write("4k randread %d/s 75000\n"%int(randr_4k_iops)) - f.write("512k write %dMB/s 135\n"%int(write_512k_BW)) - f.write("512k read %dMB/s 450\n"%int(read_512k_BW)) + with open(filename, 'w') as f: + f.write("4k randwrite %d/s 56000\n" % int(randw_4k_iops)) + f.write("4k randread %d/s 75000\n" % int(randr_4k_iops)) + f.write("512k write %dMB/s 135\n" % int(write_512k_BW)) + f.write("512k read %dMB/s 450\n" % int(read_512k_BW)) if randr_4k_iops < 75000: - assert float(75000 - randr_4k_iops)/75000 < 0.02,"4k_randr_iops did not meet expectations,expect more than 75000" + assert float(75000 - randr_4k_iops) / \ + 75000 < 0.02, "4k_randr_iops did not meet expectations,expect more than 75000" if randw_4k_iops < 56000: - assert float(56000 - randw_4k_iops)/56000 < 0.02,"4k_randw_iops did not meet expectations,expect more than 56000" + assert float(56000 - randw_4k_iops) / \ + 56000 < 0.02, "4k_randw_iops did not meet expectations,expect more than 56000" if read_512k_BW < 450: - assert float(450 - read_512k_BW)/450 < 0.02,"512k_read_bw did not meet expectations,expect more than 450" + assert float(450 - read_512k_BW) / \ + 450 < 0.02, "512k_read_bw did not meet expectations,expect more than 450" if write_512k_BW < 135: - assert float(135 - write_512k_BW)/135 < 0.02,"512k_write_bw did not meet expectations,expect more than 135" + assert float(135 - write_512k_BW) / \ + 135 < 0.02, "512k_write_bw did not meet expectations,expect more than 135" + def perf_test(): ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) @@ -1929,7 +2171,7 @@ def perf_test(): -bs=4k -size=200G -runtime=300 -numjobs=1 -time_based" shell_operator.ssh_exec(ssh, init_io) start_test = "cd /root/perf && nohup python /root/perf/io_test.py &" - shell_operator.ssh_background_exec2(ssh,start_test) + shell_operator.ssh_background_exec2(ssh, start_test) time.sleep(60) final = 0 starttime = time.time() @@ -1942,123 +2184,134 @@ def perf_test(): else: logger.debug("wait io test finally") time.sleep(60) - assert final == 1,"io test have not finall" + assert final == 1, "io test have not finall" ori_cmd = "cp -r /root/perf/test-ssd/fiodata /root/perf" rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"cp fiodata fail,error is %s"%rs[1] + assert rs[3] == 0, "cp fiodata fail,error is %s" % rs[1] analysis_data(ssh) + def add_data_disk(): ori_cmd = "bash attach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"attach thrash vol fail,rs is %s"%rs[1] + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "attach thrash vol fail,rs is %s" % rs[1] ori_cmd = "cat thrash_vm" - rs = shell_operator.ssh_exec(ssh,ori_cmd) - logger.info("rs is %s"%rs[1]) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + logger.info("rs is %s" % rs[1]) vm_list = [] for i in rs[1]: - logger.info("uuid is %s"%i) - vm_list.append(i.strip()) + logger.info("uuid is %s" % i) + vm_list.append(i.strip()) vm_ip_list = [] for vm in vm_list: - ori_cmd = "source OPENRC && nova list|grep %s"%vm - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ori_cmd = "source OPENRC && nova list|grep %s" % vm + rs = shell_operator.ssh_exec(ssh, ori_cmd) ret = "".join(rs[1]).strip() - ip = re.findall(r'\d+\.\d+\.\d+\.\d+',ret) - logger.info("get vm %s ip %s"%(vm,ip)) + ip = re.findall(r'\d+\.\d+\.\d+\.\d+', ret) + logger.info("get vm %s ip %s" % (vm, ip)) vm_ip_list.append(ip[0]) ssh.close() ssh = shell_operator.create_ssh_connect(config.vm_host, 22, config.vm_user) for ip in vm_ip_list: - ori_cmd = "ssh %s -o StrictHostKeyChecking=no "%ip + "\"" + " supervisorctl reload && supervisorctl start all " + "\"" + ori_cmd = "ssh %s -o StrictHostKeyChecking=no " % ip + "\"" + \ + " supervisorctl reload && supervisorctl start all " + "\"" logger.info("exec cmd %s" % ori_cmd) rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"start supervisor fail,rs is %s"%rs[1] + assert rs[3] == 0, "start supervisor fail,rs is %s" % rs[1] ssh.close() def create_vm_image(vm_name): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%(vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % ( + vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("vm uuid is %s" % rs[1]) thrash_vm_uuid = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-create %s image-%s"%(thrash_vm_uuid,vm_name) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm %s image fail"%(thrash_vm_uuid) + ori_cmd = "source OPENRC && nova image-create %s image-%s" % ( + thrash_vm_uuid, vm_name) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm %s image fail" % (thrash_vm_uuid) starttime = time.time() - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm image image-%s fail"%(vm_name) + assert False, "create vm image image-%s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait image create image-%s fail"%(vm_name) - ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'"%vm_name + assert False, "wait image create image-%s fail" % (vm_name) + ori_cmd = "source OPENRC && nova image-list|grep image-%s|awk '{print $2}'" % vm_name rs = shell_operator.ssh_exec(ssh, ori_cmd) return "".join(rs[1]).strip() + def get_all_curvevm_active_num(num): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) starttime = time.time() while time.time() - starttime < 600: - ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm status fail" + ori_cmd = "source OPENRC && nova list |grep %s | grep ACTIVE | wc -l" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm status fail" if int("".join(rs[1]).strip()) == num: break else: time.sleep(10) active_num = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"get vm uuid fail" + ori_cmd = "source OPENRC && nova list |grep %s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "get vm uuid fail" for uuid in rs[1]: uuid = uuid.strip() status = "up" cmd = "source OPENRC && nova show %s |grep os-server-status |awk \'{print $4}\'" % uuid st = shell_operator.ssh_exec(ssh, cmd) status = "".join(st[1]).strip() - assert status == "up","get vm status fail,not up.is %s,current vm id is %s"%(status,uuid) + assert status == "up", "get vm status fail,not up.is %s,current vm id is %s" % ( + status, uuid) return active_num + def init_create_curve_vm(num): image_id = config.image_id salt = ''.join(random.sample(string.ascii_letters + string.digits, 8)) - logger.info("vm name is thrash-%s"%salt) - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) + logger.info("vm name is thrash-%s" % salt) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s"%(config.image_id,config.avail_zone,salt) - rs = shell_operator.ssh_exec(ssh,ori_cmd) + --meta use-vpc=True --meta instance_image_type=curve thrash-%s" % (config.image_id, config.avail_zone, salt) + rs = shell_operator.ssh_exec(ssh, ori_cmd) logger.info("exec cmd %s" % ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] - vm_name = "thrash-%s"%salt + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] + vm_name = "thrash-%s" % salt starttime = time.time() - ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'"%vm_name + ori_cmd = "source OPENRC && nova list|grep %s|awk '{print $6}'" % vm_name while time.time() - starttime < 600: rs = shell_operator.ssh_exec(ssh, ori_cmd) if "".join(rs[1]).strip() == "ACTIVE": break elif "".join(rs[1]).strip() == "ERROR": - assert False,"create vm %s fail"%(vm_name) + assert False, "create vm %s fail" % (vm_name) else: time.sleep(10) if "".join(rs[1]).strip() != "ACTIVE": - assert False,"wait vm ok %s fail"%(vm_name) + assert False, "wait vm ok %s fail" % (vm_name) new_image_id = create_vm_image(vm_name) config.vm_prefix = vm_name - for i in range(1,num): + for i in range(1, num): ori_cmd = "source OPENRC && nova boot --flavor 400 --image %s --vnc-password 000000 --availability-zone %s \ --key-name cyh --nic vpc-net=ff89c80a-585d-4b19-992a-462f4d2ddd27:77a410be-1cf4-4992-8894-0c0bc67f5e48 \ - --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d"%(new_image_id,config.avail_zone,salt,i) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"create vm fail,return is %s"%rs[1] + --meta use-vpc=True --meta instance_image_type=curve thrash-%s-%d" % (new_image_id, config.avail_zone, salt, i) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "create vm fail,return is %s" % rs[1] starttime = time.time() while time.time() - starttime < 300: active_num = int(get_all_curvevm_active_num(num)) @@ -2067,28 +2320,32 @@ def init_create_curve_vm(num): break else: time.sleep(10) - assert active_num == num,"some vm are abnormal,%d is acitve"%active_num + assert active_num == num, "some vm are abnormal,%d is acitve" % active_num + def reboot_curve_vm(): - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done "%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"reboot curve vm fail" + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + ori_cmd = "vm=`source OPENRC && nova list |grep %s |awk '{print $2}'`;source OPENRC;for i in $vm;do nova reboot $i;done " % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "reboot curve vm fail" + def clean_curve_data(): ori_cmd = "bash detach_thrash.sh" - ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"detach thrash vol fail,rs is %s"%rs[1] - ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete vm fail,rs is %s"%rs[1] - ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'"%config.vm_prefix - rs = shell_operator.ssh_exec(ssh,ori_cmd) + ssh = shell_operator.create_ssh_connect( + config.nova_host, 1046, config.nova_user) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "detach thrash vol fail,rs is %s" % rs[1] + ori_cmd = "vm=`source OPENRC && nova list|grep %s | awk '{print $2}'`;source OPENRC;for i in $vm;do nova delete $i;done" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete vm fail,rs is %s" % rs[1] + ori_cmd = "source OPENRC && nova image-list |grep image-%s | awk '{print $2}'" % config.vm_prefix + rs = shell_operator.ssh_exec(ssh, ori_cmd) image_id = "".join(rs[1]).strip() - ori_cmd = "source OPENRC && nova image-delete %s"%(image_id) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"delete image fail,rs is %s"%rs + ori_cmd = "source OPENRC && nova image-delete %s" % (image_id) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "delete image fail,rs is %s" % rs time.sleep(30) ori_cmd = "curve_ops_tool list -fileName=/nova |grep Total" rs = shell_operator.ssh_exec(ssh, ori_cmd) @@ -2097,46 +2354,53 @@ def clean_curve_data(): else: ori_cmd = "curve_ops_tool list -fileName=/nova" rs = shell_operator.ssh_exec(ssh, ori_cmd) - logger.error("No deleted files: %s"%rs[1]) - assert False,"vm or image not be deleted" + logger.error("No deleted files: %s" % rs[1]) + assert False, "vm or image not be deleted" + def do_thrasher(action): - #start level1 + # start level1 if type(action) is types.StringType: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX"%action) + logger.debug( + "Startup FailureXXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX" % action) globals()[action]() else: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1]))) + logger.debug("Startup FailureXXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX" % ( + action[0], str(action[1]))) globals()[action[0]](action[1]) + def start_retired_and_down_chunkservers(): for host in config.chunkserver_list: - ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user) + ssh = shell_operator.create_ssh_connect( + host, 1046, config.abnormal_user) try: - cs_status = get_chunkserver_status(host) - down_cs = cs_status["down"] - if down_cs == []: - continue - logger.debug("down_cs is %s"%down_cs) - for cs in down_cs: - if get_cs_copyset_num(host,cs) == 0: - ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat"%(cs) - rs = shell_operator.ssh_exec(ssh, ori_cmd) - assert rs[3] == 0,"rm chunkserver%d chunkserver.dat fail"%cs - ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d"%cs - logger.debug("exec %s"%ori_cmd) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - assert rs[3] == 0,"start chunkserver fail,error is %s"%rs[1] - time.sleep(2) - ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ + cs_status = get_chunkserver_status(host) + down_cs = cs_status["down"] + if down_cs == []: + continue + logger.debug("down_cs is %s" % down_cs) + for cs in down_cs: + if get_cs_copyset_num(host, cs) == 0: + ori_cmd = "sudo rm -rf /data/chunkserver%d/chunkserver.dat" % ( + cs) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "rm chunkserver%d chunkserver.dat fail" % cs + ori_cmd = "sudo /home/nbs/chunkserver_ctl.sh start %d" % cs + logger.debug("exec %s" % ori_cmd) + rs = shell_operator.ssh_exec(ssh, ori_cmd) + assert rs[3] == 0, "start chunkserver fail,error is %s" % rs[1] + time.sleep(2) + ori_cmd = "ps -ef|grep -v grep | grep -w chunkserver%d | awk '{print $2}' && \ ps -ef|grep -v grep | grep -w /etc/curve/chunkserver.conf.%d |grep -v sudo | awk '{print $2}'" % (cs, cs) - rs = shell_operator.ssh_exec(ssh,ori_cmd) - if rs[1] == []: - assert False,"up chunkserver fail" + rs = shell_operator.ssh_exec(ssh, ori_cmd) + if rs[1] == []: + assert False, "up chunkserver fail" except: raise ssh.close() + def get_level_list(level): if level == "level1": return config.level1 diff --git a/robot/Resources/keywords/snapshot_operate.py b/robot/Resources/keywords/snapshot_operate.py index f21c2be296..d902cd0737 100644 --- a/robot/Resources/keywords/snapshot_operate.py +++ b/robot/Resources/keywords/snapshot_operate.py @@ -18,8 +18,9 @@ def create_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam else: return rc + def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=config.user_name, size=config.size, - pass_word=config.pass_word): + pass_word=config.pass_word): curvefs = swig_operate.LibCurve() rc = curvefs.libcurve_create(file_name, user_name, size, pass_word) if rc != 0: @@ -28,9 +29,11 @@ def create_curve_file_for_snapshot_delete(file_name="/lc-delete", user_name=conf else: return rc + def delete_curve_file_for_shanpshot(): curvefs = swig_operate.LibCurve() - rc = curvefs.libcurve_delete(config.snapshot_file_name, config.user_name, config.pass_word) + rc = curvefs.libcurve_delete( + config.snapshot_file_name, config.user_name, config.pass_word) if rc != 0: logger.info("delete_curve_file_for_shanpshot file fail. rc = %s" % rc) return rc @@ -44,21 +47,25 @@ def write_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_name buf=config.buf, offset=config.offset, length=config.length): curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def read_4k_length_curve_file(): curvefs = swig_operate.LibCurve() - fd = curvefs.libcurve_open(config.snapshot_file_name, config.user_name, config.pass_word) + fd = curvefs.libcurve_open( + config.snapshot_file_name, config.user_name, config.pass_word) content = curvefs.libcurve_read(fd, "", 0, 4096) return content @@ -68,22 +75,26 @@ def modify_curve_file_for_snapshot(file_name=config.snapshot_file_name, user_nam curvefs = swig_operate.LibCurve() fd = curvefs.libcurve_open(file_name, user_name, pass_word) buf = "tttttttt" * 512 - logger.info("fd=%s, buf=%s, offset=%s, length=%s" % (fd, buf, offset, length)) + logger.info("fd=%s, buf=%s, offset=%s, length=%s" % + (fd, buf, offset, length)) rs = curvefs.libcurve_write(fd, buf, offset, length) if rs < 0: - logger.error("write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) + logger.error( + "write_curve_file_for_snapshot libcurve_write file fail. rc = %s" % rs) return rs raise AssertionError rc = curvefs.libcurve_close(fd) if rc != 0: - logger.info("write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) + logger.info( + "write_curve_file_for_snapshot close libcurve file fail. rc = %s" % rc) return rc def snapshot_normal_create(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_curve_file_for_snapshot file and return seq.value = %s" % seq) + logger.info( + "create_curve_file_for_snapshot file and return seq.value = %s" % seq) return seq @@ -93,7 +104,8 @@ def snapshot_create_with_not_exist_file(file_name="/notexistfile", user_name=con rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_not_exist_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_not_exist_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_not_exist_file file fail. rc = %s" % rc) return rc @@ -103,25 +115,28 @@ def snapshot_create_with_empty_str_file(file_name=" ", user_name=config.user_nam rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_empty_str_file , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_empty_str_file file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_empty_str_file file fail. rc = %s" % rc) return rc -# "特殊字符`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" -# "特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" def snapshot_create_with_special_file_name(file_name="/特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?", user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() rc = client.create_snapshot(file_name, user_name, password) logger.info("snapshot_create_with_special_file_name , return rc = %s" % rc) if rc != 0: - logger.error("snapshot_create_with_special_file_name file fail. rc = %s" % rc) + logger.error( + "snapshot_create_with_special_file_name file fail. rc = %s" % rc) return rc def get_sanpshot_info(seq): client = snapshot_client.CurveSnapshot() - finfo = client.get_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + finfo = client.get_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) # logger.info("get_sanpshot_info , file snapshot info.status = %s, owner = %s, filename = %s, " # "length = %s, chunksize = %s, seqnum = %s, segmentsize = %s , parentid = %s, " # "filetype = %s, ctime = %s" % ( @@ -131,25 +146,28 @@ def get_sanpshot_info(seq): return finfo -# 创建并获取快照文件信息 +# Create and obtain snapshot file information def create_snapshot_and_get_snapshot_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() seq = client.create_snapshot(file_name, user_name, password) - logger.info("create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) + logger.info( + "create_snapshot_and_get_snapshot_info create snapshot success. seq = %s" % seq.value) finfo = client.get_snapshot(file_name, user_name, password, seq) return finfo -# 正常获取快照文件分配信息 +# Obtain snapshot file allocation information normally def get_normal_snapshot_segment_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): seq = snapshot_normal_create(file_name, user_name, password) client = snapshot_client.CurveSnapshot() offset = curvesnapshot.type_uInt64_t() offset.value = 0 - seginfo = client.get_snapshot_SegmentInfo(file_name, user_name, password, seq, offset) - logger.info("get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) + seginfo = client.get_snapshot_SegmentInfo( + file_name, user_name, password, seq, offset) + logger.info( + "get_normal_snapshot_segment_info seq = %s, seginfo = %s" % seq % seginfo) return seginfo @@ -159,7 +177,7 @@ def get_normal_chunk_info(file_name=config.snapshot_file_name, user_name=config. client = snapshot_client.CurveSnapshot() chunkinfo = client.get_chunk_Info(seginfo.chunkvec[0]) logger.info("get_normal_chunkInfo chunkInfo info = %s" % chunkinfo) - return chunkinfo # 可以对chunInfo.chunkSn进行断言验证 + return chunkinfo # Can perform assertion validation on chunInfo.chunkSn def get_chunk_info_with_chunk_id_info(idinfo): @@ -175,7 +193,7 @@ def get_snapshot_first_segment_info(seq): offset = curvesnapshot.type_uInt64_t() offset.value = 0 seginfo = client.get_snapshot_SegmentInfo(config.snapshot_file_name, config.user_name, config.pass_word, seq, - offset) + offset) # logger.info( # "get_snapshot_first_segment_info seq = %s, segmsize = %s, chunksize = %s, startoffset = %s, chunkvecsize = %s, " # % ( @@ -220,7 +238,8 @@ def read_chunk_snapshot(idinfo, seq): buf = "tttttttt" * 512 rc = client.read_chunk_snapshot(idinfo, seq, offset, len, buf) if rc != len.value: - logger.info("read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) + logger.info( + "read_chunk_snapshot fail , expect len = %s, real len = %s" % (len.value, rc)) return rc logger.info("read_chunk_snapshot ,return buf = %s" % buf) return buf @@ -228,14 +247,16 @@ def read_chunk_snapshot(idinfo, seq): def check_snapshot_status(seq): client = snapshot_client.CurveSnapshot() - status = client.check_snapshot_status(config.snapshot_file_name, config.user_name, config.pass_word, seq) + status = client.check_snapshot_status( + config.snapshot_file_name, config.user_name, config.pass_word, seq) logger.info("check_snapshot_status rc = %s " % status) return status def delete_file_snapshot(seq): client = snapshot_client.CurveSnapshot() - rc = client.delete_snapshot(config.snapshot_file_name, config.user_name, config.pass_word, seq) + rc = client.delete_snapshot( + config.snapshot_file_name, config.user_name, config.pass_word, seq) return rc @@ -253,7 +274,8 @@ def create_clone_chunk_with_s3_object(chunkidinfo): seq.value = 1 correctseq = curvesnapshot.type_uInt64_t() correctseq.value = 0 - rc = client.create_clone_chunk(config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) + rc = client.create_clone_chunk( + config.snapshot_s3_object_location, idinfo, seq, correctseq, chunksize) return rc diff --git a/robot/curve_choas.txt b/robot/curve_choas.txt index ff39c335e5..0f9b389152 100644 --- a/robot/curve_choas.txt +++ b/robot/curve_choas.txt @@ -37,7 +37,7 @@ test one volume perf stop rwio perf test -#启动大压力情况下的混沌测试:分等级进行随机故障注入。每次注入完成后恢复集群所有业务,目前设置100次的全流程注入 +# Conduct chaos testing under high stress: Inject faults of various levels randomly. Restore all cluster operations after each injection. Currently set for 100 rounds of full injection inject cluster chaos test [Tags] P2 chaos longtime @@ -47,17 +47,17 @@ inject cluster chaos test ${num} evaluate int(10) init create curve vm ${num} :FOR ${i} IN RANGE 10 - log "启动第"${i}"轮故障" + log "Starting Round "${i}" of Fault Injection" ${choas1} evaluate random.choice($choas_level1) random - log "开始启动一级故障" + log "Starting Level 1 Fault" do thrasher ${choas1} sleep 30 ${choas2} evaluate random.choice($choas_level2) random - log "开始启动二级故障" + log "Starting Level 2 Fault" do thrasher ${choas2} sleep 30 ${choas3} evaluate random.choice($choas_level3) random - log "开始启动三级故障" + log "Starting Level 3 Fault" do thrasher ${choas3} sleep 30 clean env diff --git a/robot/curve_robot.txt b/robot/curve_robot.txt index 8709a96b6e..9f49ca2caa 100644 --- a/robot/curve_robot.txt +++ b/robot/curve_robot.txt @@ -1628,7 +1628,7 @@ test kill chunkserver one check loop read ${new_fd} [Teardown] file clean ${new_fd} -# create snapshot 相关用例 +# Create snapshot related use cases create snapshot with notexist file [Tags] P0 base first release test-snapshot @@ -1698,7 +1698,7 @@ create snapshot with nomal file and check first chunk snapshot [Teardown] delete curve file for shanpshot -# 创建文件->写文件->创建快照->修改文件->读快照验证(修改前数据)->删除重新快照->验证快照数据(修改后数据) +# Create file ->Write file ->Create snapshot ->Modify file ->Read snapshot verification (data before modification) ->Delete re snapshot ->Verify snapshot data (data after modification) create snapshot and check chunk snapshot after cow [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -1744,7 +1744,7 @@ create snapshot repeat should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsnapshot info 用例 +# Getsnapshot info use case get empty file snapshot info [Tags] P0 base first release test-snapshot @@ -1871,7 +1871,7 @@ delete snapshoting curve file should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsegmentinfo 相关用例 +# Use cases related to getsegmentinfo check snapshot segmentinfo after modify file [Tags] P0 base first release test-snapshot @@ -1981,7 +1981,7 @@ get empty file snapshot segmentinfo should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# snapshot chunkinfo 用例验证 +# Snapshot chunkinfo use case validation check empty file snapshot chunkinfo after modify file [Tags] P0 base first release test-snapshot @@ -2038,10 +2038,10 @@ get snapshot chunkinfo with notexist chunidinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} - #TODO: 此处需要判断错误,当前是死循环,不停轮询查询id信息 + # TODO: An error needs to be determined here. Currently, it is a dead loop and constantly polls for ID information ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2063,7 +2063,7 @@ check snapshot chunkinfo after delete snapshot ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} - # 此处应该再重新获取下segmentinfo, chunkvec[0]应该不存在 + # We should retrieve segmentinfo again here, chunkvec [0] should not exist ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} should be equal ${chunkinfo.snSize.value} ${expect_size} should be equal ${chunkinfo.chunkSn[0]} ${expect_first_sn} @@ -2071,7 +2071,7 @@ check snapshot chunkinfo after delete snapshot [Teardown] delete curve file for shanpshot -# read snapshot chunk 用例 CLDCFS-1249 +# Read snapshot chunk use case CLDCFS-1249 read snapshot chunk with notexist idinfo [Tags] P0 base first release no-need @@ -2081,10 +2081,10 @@ read snapshot chunk with notexist idinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # TODO:当前客户端死循环打印错误,此处校验结果应该返回错误 + # TODO: The current client has a loop printing error, and the verification result should return an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2101,7 +2101,7 @@ read snapshot chunk with error seq ${seginfo} get snapshot first segment info ${seq} ${seq.value} evaluate int(8) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处校验结果应该返回错误 + # The verification result should return an error here ${expect_rst} evaluate int(-6) should be equal ${content} ${expect_rst} ${seq.value} evaluate int(1) @@ -2110,7 +2110,7 @@ read snapshot chunk with error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 查询快照状态用例 +# Query snapshot status use case check empty file snapshot status [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2197,7 +2197,7 @@ check snapshot status use error seq [Teardown] delete curve file for shanpshot -# 删除快照相关用例 +# Delete snapshot related use cases repeat delete snapshot [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2230,7 +2230,7 @@ delete snapshot use error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 删除chunk快照(当前无限重试,需要调用方设置重试次数) CLDCFS-1254 +# Delete chunk snapshot (currently infinite retries, caller needs to set retry count) CLDCFS-1254 delete chunk snapshot with snapshot seq [Tags] P0 base first release no-need ${rc} create curve file for snapshot @@ -2243,7 +2243,7 @@ delete chunk snapshot with snapshot seq ${rc} delete chunk snapshot with correct sn ${seginfo.chunkvec[0]} ${seq} should be equal ${rc} ${expect_rc} ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处判断返回结果是否为错误 + # Determine whether the returned result is an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2291,8 +2291,8 @@ repeat delete chunk snapshot [Teardown] delete curve file for shanpshot -# 创建clone&recover -# 步骤:创建文件、写文件、创建快照记录seq,触发cow,获取快照信息(版本号),createclonechunk(指定s3上对象,correctedseq=快照seq),恢复快照,验证chunk数据是否为s3数据 +# Create clone&recover +# Steps: Create a file, write a file, create a snapshot record seq, trigger Cow, obtain snapshot information (version number), create clonechunk (specify an object on s3, correctedseq=snapshot seq), restore the snapshot, verify if the chunk data is s3 data create clone and recover chunk [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2311,7 +2311,7 @@ create clone and recover chunk should be equal ${rc} ${expect_rc} ${rc} recover chunk data ${seginfo.chunkvec[0]} should be equal ${rc} ${expect_rc} - # check数据 + # Check data ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} ${expect_content} evaluate str("aaaaaaaa")*512 should be equal ${content} ${expect_content} diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..ae00f97a66 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_closure.h" + #include namespace curve { @@ -28,21 +29,22 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although before the request proposal was given to the copyset + * Confirmed the identity of the leader, but processed it in copyset + * When requesting, it is still possible to determine the identity of the + * current copyset Becoming a non leader, so it needs to be determined that + * ChunkClosure has been adjusted When using, the status of the request. If + * it is OK, it indicates that it is Normal apply processing, otherwise the + * request will be forwarded */ if (status().ok()) { return; @@ -61,13 +63,13 @@ void ScanChunkClosure::Run() { case CHUNK_OP_STATUS_CHUNK_NOTEXIST: LOG(WARNING) << "scan chunk failed, read chunk not exist. " << request_->ShortDebugString(); - break; + break; case CHUNK_OP_STATUS_FAILURE_UNKNOWN: LOG(ERROR) << "scan chunk failed, read chunk unknown failure. " << request_->ShortDebugString(); - break; - default: - break; + break; + default: + break; } } diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..6700527c26 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -24,20 +24,23 @@ #define SRC_CHUNKSERVER_CHUNK_CLOSURE_H_ #include + #include -#include "src/chunkserver/op_request.h" #include "proto/chunk.pb.h" +#include "src/chunkserver/op_request.h" namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft + * for processing through the braft::Task, There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and + * returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred + * before it could be processed, such as leader If the step down becomes a non + * leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,37 +52,37 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; class ScanChunkClosure : public google::protobuf::Closure { public: - ScanChunkClosure(ChunkRequest *request, ChunkResponse *response) : - request_(request), response_(response) {} + ScanChunkClosure(ChunkRequest* request, ChunkResponse* response) + : request_(request), response_(response) {} ~ScanChunkClosure() = default; void Run() override; public: - ChunkRequest *request_; - ChunkResponse *response_; + ChunkRequest* request_; + ChunkResponse* response_; }; class SendScanMapClosure : public google::protobuf::Closure { public: - SendScanMapClosure(FollowScanMapRequest * request, - FollowScanMapResponse *response, - uint64_t timeout, - uint32_t retry, - uint64_t retryIntervalUs, - brpc::Controller* cntl, - brpc::Channel *channel) : - request_(request), response_(response), - rpcTimeoutMs_(timeout), retry_(retry), - retryIntervalUs_(retryIntervalUs), - cntl_(cntl), channel_(channel) {} + SendScanMapClosure(FollowScanMapRequest* request, + FollowScanMapResponse* response, uint64_t timeout, + uint32_t retry, uint64_t retryIntervalUs, + brpc::Controller* cntl, brpc::Channel* channel) + : request_(request), + response_(response), + rpcTimeoutMs_(timeout), + retry_(retry), + retryIntervalUs_(retryIntervalUs), + cntl_(cntl), + channel_(channel) {} ~SendScanMapClosure() = default; @@ -89,13 +92,13 @@ class SendScanMapClosure : public google::protobuf::Closure { void Guard(); public: - FollowScanMapRequest *request_; - FollowScanMapResponse *response_; + FollowScanMapRequest* request_; + FollowScanMapResponse* response_; uint64_t rpcTimeoutMs_; uint32_t retry_; uint64_t retryIntervalUs_; - brpc::Controller *cntl_; - brpc::Channel *channel_; + brpc::Controller* cntl_; + brpc::Channel* channel_; }; } // namespace chunkserver diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..85d3d241a5 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -22,31 +22,30 @@ #include "src/chunkserver/chunk_service.h" -#include #include #include +#include -#include #include +#include #include +#include "include/curve_compiler_specific.h" +#include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/op_request.h" -#include "src/chunkserver/chunk_service_closure.h" #include "src/common/fast_align.h" -#include "include/curve_compiler_specific.h" - namespace curve { namespace chunkserver { using ::curve::common::is_aligned; ChunkServiceImpl::ChunkServiceImpl( - const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr& epochMap) + const ChunkServiceOptions& chunkServiceOptions, + const std::shared_ptr& epochMap) : chunkServiceOptions_(chunkServiceOptions), copysetNodeManager_(chunkServiceOptions.copysetNodeManager), inflightThrottle_(chunkServiceOptions.inflightThrottle), @@ -55,15 +54,11 @@ ChunkServiceImpl::ChunkServiceImpl( maxChunkSize_ = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; } -void ChunkServiceImpl::DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::DeleteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -76,7 +71,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -86,24 +81,17 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::WriteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -116,11 +104,11 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); DVLOG(9) << "Get write I/O request, op: " << request->optype() - << " offset: " << request->offset() - << " size: " << request->size() << " buf header: " - << *(unsigned int *) cntl->request_attachment().to_string().c_str() + << " offset: " << request->offset() << " size: " << request->size() + << " buf header: " + << *(unsigned int*)cntl->request_attachment().to_string().c_str() << " attachement size " << cntl->request_attachment().size(); if (request->has_epoch()) { @@ -134,7 +122,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +132,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -154,24 +142,18 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -184,7 +166,8 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured + // for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +176,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -203,19 +186,15 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared( + nodePtr, controller, request, response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done) { +void ChunkServiceImpl::CreateS3CloneChunk( + RpcController* controller, const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, Closure* done) { (void)controller; (void)request; brpc::ClosureGuard doneGuard(done); @@ -223,15 +202,11 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, LOG(INFO) << "Invalid request, serverSide Not implement yet"; } -void ChunkServiceImpl::ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -244,7 +219,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +229,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -264,25 +239,17 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::RecoverChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -295,7 +262,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +272,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,26 +282,19 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + // RecoverChunk request and ReadChunk request share ReadChunkRequest + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -347,13 +307,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -363,25 +323,17 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( - RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); + RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -401,7 +353,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -412,31 +364,26 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated + * from Chunk Service, And it does not go through QoS or raft consistency + * protocols, so it is not allowed to inherit here OpRequest or QoSRequest to be + * re encapsulated, but directly processed in place */ -void ChunkServiceImpl::GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, + Closure* done) { (void)controller; - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - nullptr, - nullptr, - done); + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, nullptr, nullptr, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -449,10 +396,9 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkInfo failed, copyset node is not found: " @@ -460,7 +406,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +422,15 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); - if (chunkInfo.snapSn > 0) - response->add_chunksn(chunkInfo.snapSn); + if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -497,14 +442,14 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, } } -void ChunkServiceImpl::GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,10 +462,9 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkHash failed, copyset node is not found: " @@ -531,21 +475,19 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, CSErrorCode ret; std::string hash; - ret = nodePtr->GetDataStore()->GetChunkHash(request->chunkid(), - request->offset(), - request->length(), - &hash); + ret = nodePtr->GetDataStore()->GetChunkHash( + request->chunkid(), request->offset(), request->length(), &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -557,18 +499,17 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, } } -void ChunkServiceImpl::UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done) { +void ChunkServiceImpl::UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); LOG(INFO) << "Update fileId: " << request->fileid() - << " to epoch: " << request->epoch() - << " success."; + << " to epoch: " << request->epoch() << " success."; } else { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); LOG(WARNING) << "Update fileId: " << request->fileid() @@ -579,7 +520,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..04e37feac9 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -23,9 +23,9 @@ #ifndef SRC_CHUNKSERVER_CHUNK_SERVICE_H_ #define SRC_CHUNKSERVER_CHUNK_SERVICE_H_ -#include #include #include +#include #include "proto/chunk.pb.h" #include "src/chunkserver/config_info.h" @@ -34,84 +34,71 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; class ChunkServiceImpl : public ChunkService { public: explicit ChunkServiceImpl(const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr &epochMap); + const std::shared_ptr& epochMap); ~ChunkServiceImpl() {} - void DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void DeleteChunkSnapshotOrCorrectSn(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); + void DeleteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void WriteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); + + void DeleteChunkSnapshotOrCorrectSn(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); void CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done); - void RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done); - - void GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done); - - void UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done); + const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, + Closure* done); + void RecoverChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, Closure* done); + + void GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, Closure* done); + + void UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, Closure* done); private: /** - * 验证op request的offset和length是否越界和对齐 + * Verify if the offset and length of the op request are out of bounds and + * aligned * @param offset[in]: op request' offset * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * @return true indicates legality, otherwise false is returned */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; private: ChunkServiceOptions chunkServiceOptions_; - CopysetNodeManager *copysetNodeManager_; + CopysetNodeManager* copysetNodeManager_; std::shared_ptr inflightThrottle_; - uint32_t maxChunkSize_; + uint32_t maxChunkSize_; std::shared_ptr epochMap_; uint32_t blockSize_; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..fca11199f5 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_service_closure.h" + #include #include "src/chunkserver/chunkserver_metrics.h" @@ -30,55 +31,52 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All brpcDone_ All operations that need to be done before calling are + // placed within this lifecycle brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // When calling the closure, subtract 1, and add 1 to what the closure + // creates This line must be placed in brpcDone_ After calling, UT needs to + // test the performance of inflightio when it exceeds the limit Will add a + // sleep to the incoming closure to control the number of inflightio if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::READ_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::WRITE_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::RECOVER_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::PASTE_CHUNK); break; } @@ -88,62 +86,51 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the + // return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 - hasError = (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && - (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + // If it is a read request, return CHUNK_OP_STATUS_CHUNK_NOTEXIST + // also believes that it is correct + hasError = (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && + (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::READ_CHUNK, - request_->size(), - latencyUs, - hasError); + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::READ_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::WRITE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::WRITE_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::RECOVER_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::RECOVER_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::PASTE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::PASTE_CHUNK, request_->size(), + latencyUs, hasError); break; } default: diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..48c418033c 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -24,66 +24,71 @@ #define SRC_CHUNKSERVER_CHUNK_SERVICE_CLOSURE_H_ #include + #include #include "proto/chunk.pb.h" -#include "src/chunkserver/op_request.h" #include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/op_request.h" #include "src/common/timeutility.h" namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc +// layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( - std::shared_ptr inflightThrottle, - const ChunkRequest *request, - ChunkResponse *response, - google::protobuf::Closure *done) - : inflightThrottle_(inflightThrottle) - , request_(request) - , response_(response) - , brpcDone_(done) - , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 - if (nullptr != inflightThrottle_) { - inflightThrottle_->Increment(); - } - // 统计请求数量 - OnRequest(); + std::shared_ptr inflightThrottle, + const ChunkRequest* request, ChunkResponse* response, + google::protobuf::Closure* done) + : inflightThrottle_(inflightThrottle), + request_(request), + response_(response), + brpcDone_(done), + receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { + // What does the closure create add 1, and when the closure is called, + // subtract 1 + if (nullptr != inflightThrottle_) { + inflightThrottle_->Increment(); } + // Count the number of requests + OnRequest(); + } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the + * closure Currently, this function mainly performs some metric statistics + * on the returned results of read and write requests If there are similar + * scenarios in the future (doing some processing at the end of the service + * request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was + * incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request - const ChunkRequest *request_; - // rpc请求的response - ChunkResponse *response_; - // rpc请求回调 - google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Request for rpc requests + const ChunkRequest* request_; + // Response to rpc requests + ChunkResponse* response_; + // Rpc request callback + google::protobuf::Closure* brpcDone_; + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 22f302c9da..8101735949 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -47,12 +47,12 @@ #include "src/common/uri_parser.h" #include "src/common/log_util.h" +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::curve::common::UriParser; +using ::curve::fs::FileSystemType; using ::curve::fs::LocalFileSystem; using ::curve::fs::LocalFileSystemOption; using ::curve::fs::LocalFsFactory; -using ::curve::fs::FileSystemType; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; -using ::curve::common::UriParser; DEFINE_string(conf, "ChunkServer.conf", "Path of configuration file"); DEFINE_string(chunkServerIp, "127.0.0.1", "chunkserver ip"); @@ -60,19 +60,19 @@ DEFINE_bool(enableExternalServer, false, "start external server or not"); DEFINE_string(chunkServerExternalIp, "127.0.0.1", "chunkserver external ip"); DEFINE_int32(chunkServerPort, 8200, "chunkserver port"); DEFINE_string(chunkServerStoreUri, "local://./0/", "chunkserver store uri"); -DEFINE_string(chunkServerMetaUri, - "local://./0/chunkserver.dat", "chunkserver meta uri"); +DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", + "chunkserver meta uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); -DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); +DEFINE_string(recycleUri, "local://./0/recycler", "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); DEFINE_int32(chunkFilePoolAllocatedPercent, 80, "format percent for chunkfillpool."); DEFINE_uint32(chunkFormatThreadNum, 1, "number of threads while file pool formatting"); DEFINE_string(chunkFilePoolMetaPath, - "./chunkfilepool.meta", "chunk file pool meta path"); + "./chunkfilepool.meta", "chunk file pool meta path"); DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); @@ -80,972 +80,1067 @@ DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", - "WAL filepool meta path"); - - -const char* kProtocalCurve = "curve"; - -namespace curve { -namespace chunkserver { - -int ChunkServer::Run(int argc, char** argv) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - - RegisterCurveSegmentLogStorageOrDie(); - - // ==========================加载配置项===============================// - LOG(INFO) << "Loading Configuration."; - common::Configuration conf; - conf.SetConfigPath(FLAGS_conf.c_str()); - - // 在从配置文件获取 - LOG_IF(FATAL, !conf.LoadConfig()) - << "load chunkserver configuration fail, conf path = " - << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 - LoadConfigFromCmdline(&conf); - - // 初始化日志模块 - curve::common::DisableLoggingToStdErr(); - google::InitGoogleLogging(argv[0]); - - // 打印参数 - conf.PrintConfig(); - curve::common::ExposeCurveVersion(); - - // ============================初始化各模块==========================// - LOG(INFO) << "Initializing ChunkServer modules"; - - // 优先初始化 metric 收集模块 - ChunkServerMetricOptions metricOptions; - InitMetricOptions(&conf, &metricOptions); - ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); - LOG_IF(FATAL, metric->Init(metricOptions) != 0) - << "Failed to init chunkserver metric."; - - // 初始化并发持久模块 - ConcurrentApplyModule concurrentapply; - ConcurrentApplyOption concurrentApplyOptions; - InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); - LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) - << "Failed to initialize concurrentapply module!"; - - // 初始化本地文件系统 - std::shared_ptr fs( - LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - LocalFileSystemOption lfsOption; - LOG_IF(FATAL, !conf.GetBoolValue( - "fs.enable_renameat2", &lfsOption.enableRenameat2)); - LOG_IF(FATAL, 0 != fs->Init(lfsOption)) - << "Failed to initialize local filesystem module!"; - - // 初始化chunk文件池 - FilePoolOptions chunkFilePoolOptions; - InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); - - LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) - << "Failed to init chunk file pool"; - - // Init Wal file pool - std::string raftLogUri; - LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &raftLogUri)); - std::string raftLogProtocol = UriParser::GetProtocolFromUri(raftLogUri); - std::shared_ptr walFilePool = nullptr; - bool useChunkFilePoolAsWalPool = true; - uint32_t useChunkFilePoolAsWalPoolReserve = 15; - if (raftLogProtocol == kProtocalCurve) { - LOG_IF(FATAL, !conf.GetBoolValue( - "walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); - - if (!useChunkFilePoolAsWalPool) { - FilePoolOptions walFilePoolOptions; - InitWalFilePoolOptions(&conf, &walFilePoolOptions); - walFilePool = std::make_shared(fs); - LOG_IF(FATAL, false == walFilePool->Initialize(walFilePoolOptions)) - << "Failed to init wal file pool"; - LOG(INFO) << "initialize walpool success."; - } else { - walFilePool = chunkfilePool; - LOG_IF(FATAL, !conf.GetUInt32Value( - "walfilepool.use_chunk_file_pool_reserve", - &useChunkFilePoolAsWalPoolReserve)); - LOG(INFO) << "initialize to use chunkfilePool as walpool success."; + "WAL filepool meta path"); + +const char *kProtocalCurve = "curve"; + +namespace curve +{ + namespace chunkserver + { + + int ChunkServer::Run(int argc, char **argv) + { + gflags::ParseCommandLineFlags(&argc, &argv, true); + + RegisterCurveSegmentLogStorageOrDie(); + + // ==========================Load Configuration Items===============================// + LOG(INFO) << "Loading Configuration."; + common::Configuration conf; + conf.SetConfigPath(FLAGS_conf.c_str()); + + // Obtaining from the configuration file + LOG_IF(FATAL, !conf.LoadConfig()) + << "load chunkserver configuration fail, conf path = " + << conf.GetConfigPath(); + // The command line can override parameters in the configuration file + LoadConfigFromCmdline(&conf); + + // 初始化日志模块 + curve::common::DisableLoggingToStdErr(); + google::InitGoogleLogging(argv[0]); + + // Print parameters + conf.PrintConfig(); + curve::common::ExposeCurveVersion(); + + // ============================Initialize each module==========================// + LOG(INFO) << "Initializing ChunkServer modules"; + + // Prioritize initializing the metric collection module + ChunkServerMetricOptions metricOptions; + InitMetricOptions(&conf, &metricOptions); + ChunkServerMetric *metric = ChunkServerMetric::GetInstance(); + LOG_IF(FATAL, metric->Init(metricOptions) != 0) + << "Failed to init chunkserver metric."; + + // Initialize concurrent persistence module + ConcurrentApplyModule concurrentapply; + ConcurrentApplyOption concurrentApplyOptions; + InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); + LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) + << "Failed to initialize concurrentapply module!"; + + // Initialize local file system + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + LocalFileSystemOption lfsOption; + LOG_IF(FATAL, !conf.GetBoolValue("fs.enable_renameat2", + &lfsOption.enableRenameat2)); + LOG_IF(FATAL, 0 != fs->Init(lfsOption)) + << "Failed to initialize local filesystem module!"; + + // Initialize chunk file pool + FilePoolOptions chunkFilePoolOptions; + InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); + std::shared_ptr chunkfilePool = std::make_shared(fs); + + LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) + << "Failed to init chunk file pool"; + + // Init Wal file pool + std::string raftLogUri; + LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &raftLogUri)); + std::string raftLogProtocol = UriParser::GetProtocolFromUri(raftLogUri); + std::shared_ptr walFilePool = nullptr; + bool useChunkFilePoolAsWalPool = true; + uint32_t useChunkFilePoolAsWalPoolReserve = 15; + if (raftLogProtocol == kProtocalCurve) + { + LOG_IF(FATAL, !conf.GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); + + if (!useChunkFilePoolAsWalPool) + { + FilePoolOptions walFilePoolOptions; + InitWalFilePoolOptions(&conf, &walFilePoolOptions); + walFilePool = std::make_shared(fs); + LOG_IF(FATAL, false == walFilePool->Initialize(walFilePoolOptions)) + << "Failed to init wal file pool"; + LOG(INFO) << "initialize walpool success."; + } + else + { + walFilePool = chunkfilePool; + LOG_IF(FATAL, !conf.GetUInt32Value( + "walfilepool.use_chunk_file_pool_reserve", + &useChunkFilePoolAsWalPoolReserve)); + LOG(INFO) << "initialize to use chunkfilePool as walpool success."; + } + } + + // Remote Copy Management Module Options + CopyerOptions copyerOptions; + InitCopyerOptions(&conf, ©erOptions); + auto copyer = std::make_shared(); + LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) + << "Failed to initialize clone copyer."; + + // Clone Management Module Initialization + CloneOptions cloneOptions; + InitCloneOptions(&conf, &cloneOptions); + uint32_t sliceSize; + LOG_IF(FATAL, !conf.GetUInt32Value("clone.slice_size", &sliceSize)); + bool enablePaste = false; + LOG_IF(FATAL, !conf.GetBoolValue("clone.enable_paste", &enablePaste)); + cloneOptions.core = + std::make_shared(sliceSize, enablePaste, copyer); + LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) + << "Failed to initialize clone manager."; + + // Initialize registration module + RegisterOptions registerOptions; + InitRegisterOptions(&conf, ®isterOptions); + registerOptions.useChunkFilePoolAsWalPoolReserve = + useChunkFilePoolAsWalPoolReserve; + registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; + registerOptions.fs = fs; + registerOptions.chunkFilepool = chunkfilePool; + registerOptions.blockSize = chunkfilePool->GetFilePoolOpt().blockSize; + registerOptions.chunkSize = chunkfilePool->GetFilePoolOpt().fileSize; + Register registerMDS(registerOptions); + ChunkServerMetadata metadata; + ChunkServerMetadata localMetadata; + // Get Meta from Local + std::string metaPath = + UriParser::GetPathFromUri(registerOptions.chunkserverMetaUri); + + auto epochMap = std::make_shared(); + if (fs->FileExists(metaPath)) + { + LOG_IF(FATAL, GetChunkServerMetaFromLocal( + registerOptions.chunserverStoreUri, + registerOptions.chunkserverMetaUri, + registerOptions.fs, &localMetadata) != 0) + << "Failed to GetChunkServerMetaFromLocal."; + LOG_IF(FATAL, registerMDS.RegisterToMDS(&localMetadata, &metadata, + epochMap) != 0) + << "Failed to register to MDS."; + } + else + { + // If it cannot be obtained locally, register with MDS + LOG(INFO) << "meta file " << metaPath + << " do not exist, register to mds"; + LOG_IF(FATAL, + registerMDS.RegisterToMDS(nullptr, &metadata, epochMap) != 0) + << "Failed to register to MDS."; + } + + // Trash module initialization + TrashOptions trashOptions; + InitTrashOptions(&conf, &trashOptions); + trashOptions.localFileSystem = fs; + trashOptions.chunkFilePool = chunkfilePool; + trashOptions.walPool = walFilePool; + trash_ = std::make_shared(); + LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; + + // Initialize replication group management module + CopysetNodeOptions copysetNodeOptions; + InitCopysetNodeOptions(&conf, ©setNodeOptions); + copysetNodeOptions.concurrentapply = &concurrentapply; + copysetNodeOptions.chunkFilePool = chunkfilePool; + copysetNodeOptions.walFilePool = walFilePool; + copysetNodeOptions.localFileSystem = fs; + copysetNodeOptions.trash = trash_; + if (nullptr != walFilePool) + { + FilePoolOptions poolOpt = walFilePool->GetFilePoolOpt(); + uint32_t maxWalSegmentSize = poolOpt.fileSize + poolOpt.metaPageSize; + copysetNodeOptions.maxWalSegmentSize = maxWalSegmentSize; + + if (poolOpt.getFileFromPool) + { + // overwrite from file pool + copysetNodeOptions.maxChunkSize = poolOpt.fileSize; + copysetNodeOptions.metaPageSize = poolOpt.metaPageSize; + copysetNodeOptions.blockSize = poolOpt.blockSize; + } + } + + // Bandwidth limitation of install snapshot + int snapshotThroughputBytes; + LOG_IF(FATAL, + !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", + &snapshotThroughputBytes)); + /** + * CheckCycles is used for finer bandwidth control, with + * snapshotThroughputBytes=100MB, Taking checkCycles=10 as an example, it + * can ensure a bandwidth of 10MB every 1/10 second without accumulation, + * such as the first one The bandwidth of 1/10 second is 10MB, but it + * expires. In the second 1/10 second, only 10MB of bandwidth can be used, + * and Not a bandwidth of 20MB + */ + int checkCycles; + LOG_IF(FATAL, + !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", + &checkCycles)); + scoped_refptr snapshotThrottle = + new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); + snapshotThrottle_ = snapshotThrottle; + copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; + + butil::ip_t ip; + if (butil::str2ip(copysetNodeOptions.ip.c_str(), &ip) < 0) + { + LOG(FATAL) << "Invalid server IP provided: " << copysetNodeOptions.ip; + return -1; + } + butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); + // Register curve snapshot storage + RegisterCurveSnapshotStorageOrDie(); + CurveSnapshotStorage::set_server_addr(endPoint); + copysetNodeManager_ = &CopysetNodeManager::GetInstance(); + LOG_IF(FATAL, copysetNodeManager_->Init(copysetNodeOptions) != 0) + << "Failed to initialize CopysetNodeManager."; + + // init scan model + ScanManagerOptions scanOpts; + InitScanOptions(&conf, &scanOpts); + scanOpts.copysetNodeManager = copysetNodeManager_; + LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) + << "Failed to init scan manager."; + + // Heartbeat module initialization + HeartbeatOptions heartbeatOptions; + InitHeartbeatOptions(&conf, &heartbeatOptions); + heartbeatOptions.copysetNodeManager = copysetNodeManager_; + heartbeatOptions.fs = fs; + heartbeatOptions.chunkFilePool = chunkfilePool; + heartbeatOptions.chunkserverId = metadata.id(); + heartbeatOptions.chunkserverToken = metadata.token(); + heartbeatOptions.scanManager = &scanManager_; + LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) + << "Failed to init Heartbeat manager."; + + // Metric indicators for monitoring some modules + metric->MonitorTrash(trash_.get()); + metric->MonitorChunkFilePool(chunkfilePool.get()); + if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) + { + metric->MonitorWalFilePool(walFilePool.get()); + } + metric->ExposeConfigMetric(&conf); + + // ========================Add RPC Service===============================// + // TODO(lixiaocui): Add delay metric to each interface in rpc + brpc::Server server; + brpc::Server externalServer; + // We need call braft::add_service to add endPoint to braft::NodeManager + braft::add_service(&server, endPoint); + + // copyset service + CopysetServiceImpl copysetService(copysetNodeManager_); + int ret = + server.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CopysetService"; + + // inflight throttle + int maxInflight; + LOG_IF(FATAL, !conf.GetIntValue("chunkserver.max_inflight_requests", + &maxInflight)); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); + CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; + + // chunk service + ChunkServiceOptions chunkServiceOptions; + chunkServiceOptions.copysetNodeManager = copysetNodeManager_; + chunkServiceOptions.cloneManager = &cloneManager_; + chunkServiceOptions.inflightThrottle = inflightThrottle; + + ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); + ret = server.AddService(&chunkService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkService"; + + // We need to replace braft::CliService with our own implementation + auto service = server.FindServiceByName("CliService"); + ret = server.RemoveService(service); + CHECK(0 == ret) << "Fail to remove braft::CliService"; + BRaftCliServiceImpl braftCliService; + ret = server.AddService(&braftCliService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService"; + + // braftclient service + BRaftCliServiceImpl2 braftCliService2; + ret = server.AddService(&braftCliService2, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService2"; + + // We need to replace braft::FileServiceImpl with our own implementation + service = server.FindServiceByName("FileService"); + ret = server.RemoveService(service); + CHECK(0 == ret) << "Fail to remove braft::FileService"; + kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); + ret = + server.AddService(&kCurveFileService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CurveFileService"; + + // chunkserver service + ChunkServerServiceImpl chunkserverService(copysetNodeManager_); + ret = + server.AddService(&chunkserverService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkServerService"; + + // scan copyset service + ScanServiceImpl scanCopysetService(&scanManager_); + ret = + server.AddService(&scanCopysetService, brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ScanCopysetService"; + + // Start rpc service + LOG(INFO) << "Internal server is going to serve on: " + << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; + if (server.Start(endPoint, NULL) != 0) + { + LOG(ERROR) << "Fail to start Internal Server"; + return -1; + } + /* Start external server + External server is used to provide services to external clients and + tools Different from communication between MDS and chunkserver*/ + if (registerOptions.enableExternalServer) + { + ret = externalServer.AddService(©setService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add CopysetService at external server"; + ret = externalServer.AddService(&chunkService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add ChunkService at external server"; + ret = externalServer.AddService(&braftCliService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; + ret = externalServer.AddService(&braftCliService2, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; + braft::RaftStatImpl raftStatService; + ret = externalServer.AddService(&raftStatService, + brpc::SERVER_DOESNT_OWN_SERVICE); + CHECK(0 == ret) << "Fail to add RaftStatService at external server"; + std::string externalAddr = + registerOptions.chunkserverExternalIp + ":" + + std::to_string(registerOptions.chunkserverPort); + LOG(INFO) << "External server is going to serve on: " << externalAddr; + if (externalServer.Start(externalAddr.c_str(), NULL) != 0) + { + LOG(ERROR) << "Fail to start External Server"; + return -1; + } + } + + // =======================Start each + // module==================================// + LOG(INFO) << "ChunkServer starts."; + /** + * Placing module startup after RPC service startup is mainly to address + * memory growth issues Control the number of copysets for concurrent + * recovery. Copyset recovery requires the RPC service to be started first + */ + LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; + LOG_IF(FATAL, cloneManager_.Run() != 0) << "Failed to start clone manager."; + LOG_IF(FATAL, heartbeat_.Run() != 0) + << "Failed to start heartbeat manager."; + LOG_IF(FATAL, copysetNodeManager_->Run() != 0) + << "Failed to start CopysetNodeManager."; + LOG_IF(FATAL, scanManager_.Run() != 0) << "Failed to start scan manager."; + LOG_IF(FATAL, !chunkfilePool->StartCleaning()) + << "Failed to start file pool clean worker."; + + // =======================Wait for the process to + // exit==================================// + while (!brpc::IsAskedToQuit()) + { + bthread_usleep(1000000L); + } + // scanmanager stop maybe need a little while, so stop it first before stop + // service NOLINT + LOG(INFO) << "ChunkServer is going to quit."; + LOG_IF(ERROR, scanManager_.Fini() != 0) + << "Failed to shutdown scan manager."; + + if (registerOptions.enableExternalServer) + { + externalServer.Stop(0); + externalServer.Join(); + } + + server.Stop(0); + server.Join(); + + LOG_IF(ERROR, heartbeat_.Fini() != 0) + << "Failed to shutdown heartbeat manager."; + LOG_IF(ERROR, copysetNodeManager_->Fini() != 0) + << "Failed to shutdown CopysetNodeManager."; + LOG_IF(ERROR, cloneManager_.Fini() != 0) + << "Failed to shutdown clone manager."; + LOG_IF(ERROR, copyer->Fini() != 0) << "Failed to shutdown clone copyer."; + LOG_IF(ERROR, trash_->Fini() != 0) << "Failed to shutdown trash."; + LOG_IF(ERROR, !chunkfilePool->StopCleaning()) + << "Failed to shutdown file pool clean worker."; + concurrentapply.Stop(); + + google::ShutdownGoogleLogging(); + return 0; } - } - - // 远端拷贝管理模块选项 - CopyerOptions copyerOptions; - InitCopyerOptions(&conf, ©erOptions); - auto copyer = std::make_shared(); - LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) - << "Failed to initialize clone copyer."; - - // 克隆管理模块初始化 - CloneOptions cloneOptions; - InitCloneOptions(&conf, &cloneOptions); - uint32_t sliceSize; - LOG_IF(FATAL, !conf.GetUInt32Value("clone.slice_size", &sliceSize)); - bool enablePaste = false; - LOG_IF(FATAL, !conf.GetBoolValue("clone.enable_paste", &enablePaste)); - cloneOptions.core = - std::make_shared(sliceSize, enablePaste, copyer); - LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) - << "Failed to initialize clone manager."; - - // 初始化注册模块 - RegisterOptions registerOptions; - InitRegisterOptions(&conf, ®isterOptions); - registerOptions.useChunkFilePoolAsWalPoolReserve = - useChunkFilePoolAsWalPoolReserve; - registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; - registerOptions.fs = fs; - registerOptions.chunkFilepool = chunkfilePool; - registerOptions.blockSize = chunkfilePool->GetFilePoolOpt().blockSize; - registerOptions.chunkSize = chunkfilePool->GetFilePoolOpt().fileSize; - Register registerMDS(registerOptions); - ChunkServerMetadata metadata; - ChunkServerMetadata localMetadata; - // 从本地获取meta - std::string metaPath = UriParser::GetPathFromUri( - registerOptions.chunkserverMetaUri); - - auto epochMap = std::make_shared(); - if (fs->FileExists(metaPath)) { - LOG_IF(FATAL, GetChunkServerMetaFromLocal( - registerOptions.chunserverStoreUri, - registerOptions.chunkserverMetaUri, - registerOptions.fs, &localMetadata) != 0) - << "Failed to GetChunkServerMetaFromLocal."; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - &localMetadata, &metadata, epochMap) != 0) - << "Failed to register to MDS."; - } else { - // 如果本地获取不到,向mds注册 - LOG(INFO) << "meta file " - << metaPath << " do not exist, register to mds"; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - nullptr, &metadata, epochMap) != 0) - << "Failed to register to MDS."; - } - - // trash模块初始化 - TrashOptions trashOptions; - InitTrashOptions(&conf, &trashOptions); - trashOptions.localFileSystem = fs; - trashOptions.chunkFilePool = chunkfilePool; - trashOptions.walPool = walFilePool; - trash_ = std::make_shared(); - LOG_IF(FATAL, trash_->Init(trashOptions) != 0) - << "Failed to init Trash"; - - // 初始化复制组管理模块 - CopysetNodeOptions copysetNodeOptions; - InitCopysetNodeOptions(&conf, ©setNodeOptions); - copysetNodeOptions.concurrentapply = &concurrentapply; - copysetNodeOptions.chunkFilePool = chunkfilePool; - copysetNodeOptions.walFilePool = walFilePool; - copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.trash = trash_; - if (nullptr != walFilePool) { - FilePoolOptions poolOpt = walFilePool->GetFilePoolOpt(); - uint32_t maxWalSegmentSize = poolOpt.fileSize + poolOpt.metaPageSize; - copysetNodeOptions.maxWalSegmentSize = maxWalSegmentSize; - - if (poolOpt.getFileFromPool) { - // overwrite from file pool - copysetNodeOptions.maxChunkSize = poolOpt.fileSize; - copysetNodeOptions.metaPageSize = poolOpt.metaPageSize; - copysetNodeOptions.blockSize = poolOpt.blockSize; + + void ChunkServer::Stop() + { + brpc::AskToQuit(); } - } - - // install snapshot的带宽限制 - int snapshotThroughputBytes; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", - &snapshotThroughputBytes)); - /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 - */ - int checkCycles; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", - &checkCycles)); - scoped_refptr snapshotThrottle - = new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); - snapshotThrottle_ = snapshotThrottle; - copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; - - butil::ip_t ip; - if (butil::str2ip(copysetNodeOptions.ip.c_str(), &ip) < 0) { - LOG(FATAL) << "Invalid server IP provided: " << copysetNodeOptions.ip; - return -1; - } - butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage - RegisterCurveSnapshotStorageOrDie(); - CurveSnapshotStorage::set_server_addr(endPoint); - copysetNodeManager_ = &CopysetNodeManager::GetInstance(); - LOG_IF(FATAL, copysetNodeManager_->Init(copysetNodeOptions) != 0) - << "Failed to initialize CopysetNodeManager."; - - // init scan model - ScanManagerOptions scanOpts; - InitScanOptions(&conf, &scanOpts); - scanOpts.copysetNodeManager = copysetNodeManager_; - LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) - << "Failed to init scan manager."; - - // 心跳模块初始化 - HeartbeatOptions heartbeatOptions; - InitHeartbeatOptions(&conf, &heartbeatOptions); - heartbeatOptions.copysetNodeManager = copysetNodeManager_; - heartbeatOptions.fs = fs; - heartbeatOptions.chunkFilePool = chunkfilePool; - heartbeatOptions.chunkserverId = metadata.id(); - heartbeatOptions.chunkserverToken = metadata.token(); - heartbeatOptions.scanManager = &scanManager_; - LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) - << "Failed to init Heartbeat manager."; - - // 监控部分模块的metric指标 - metric->MonitorTrash(trash_.get()); - metric->MonitorChunkFilePool(chunkfilePool.get()); - if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { - metric->MonitorWalFilePool(walFilePool.get()); - } - metric->ExposeConfigMetric(&conf); - - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric - brpc::Server server; - brpc::Server externalServer; - // We need call braft::add_service to add endPoint to braft::NodeManager - braft::add_service(&server, endPoint); - - // copyset service - CopysetServiceImpl copysetService(copysetNodeManager_); - int ret = server.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CopysetService"; - - // inflight throttle - int maxInflight; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.max_inflight_requests", - &maxInflight)); - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); - CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; - - // chunk service - ChunkServiceOptions chunkServiceOptions; - chunkServiceOptions.copysetNodeManager = copysetNodeManager_; - chunkServiceOptions.cloneManager = &cloneManager_; - chunkServiceOptions.inflightThrottle = inflightThrottle; - - ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); - ret = server.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkService"; - - // We need to replace braft::CliService with our own implementation - auto service = server.FindServiceByName("CliService"); - ret = server.RemoveService(service); - CHECK(0 == ret) << "Fail to remove braft::CliService"; - BRaftCliServiceImpl braftCliService; - ret = server.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService"; - - // braftclient service - BRaftCliServiceImpl2 braftCliService2; - ret = server.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService2"; - - // We need to replace braft::FileServiceImpl with our own implementation - service = server.FindServiceByName("FileService"); - ret = server.RemoveService(service); - CHECK(0 == ret) << "Fail to remove braft::FileService"; - kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); - ret = server.AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CurveFileService"; - - // chunkserver service - ChunkServerServiceImpl chunkserverService(copysetNodeManager_); - ret = server.AddService(&chunkserverService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkServerService"; - - // scan copyset service - ScanServiceImpl scanCopysetService(&scanManager_); - ret = server.AddService(&scanCopysetService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ScanCopysetService"; - - // 启动rpc service - LOG(INFO) << "Internal server is going to serve on: " - << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; - if (server.Start(endPoint, NULL) != 0) { - LOG(ERROR) << "Fail to start Internal Server"; - return -1; - } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ - if (registerOptions.enableExternalServer) { - ret = externalServer.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add CopysetService at external server"; - ret = externalServer.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add ChunkService at external server"; - ret = externalServer.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; - ret = externalServer.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; - braft::RaftStatImpl raftStatService; - ret = externalServer.AddService(&raftStatService, - brpc::SERVER_DOESNT_OWN_SERVICE); - CHECK(0 == ret) << "Fail to add RaftStatService at external server"; - std::string externalAddr = registerOptions.chunkserverExternalIp + ":" + - std::to_string(registerOptions.chunkserverPort); - LOG(INFO) << "External server is going to serve on: " << externalAddr; - if (externalServer.Start(externalAddr.c_str(), NULL) != 0) { - LOG(ERROR) << "Fail to start External Server"; - return -1; + + void ChunkServer::InitChunkFilePoolOptions( + common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", + &chunkFilePoolOptions->fileSize)); + + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + &chunkFilePoolOptions->metaPageSize)) + << "Not found `global.meta_page_size` in config file"; + + LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", + &chunkFilePoolOptions->blockSize)) + << "Not found `global.block_size` in config file"; + + LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", + &chunkFilePoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + &chunkFilePoolOptions->getFileFromPool)); + + if (chunkFilePoolOptions->getFileFromPool == false) + { + std::string chunkFilePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), + chunkFilePoolUri.size()); + } + else + { + std::string metaUri; + LOG_IF(FATAL, !conf->GetStringValue( + "chunkfilepool.meta_path", &metaUri)); + ::memcpy( + chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + + std::string chunkFilePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), + chunkFilePoolUri.size()); + std::string pool_size; + LOG_IF(FATAL, !conf->GetStringValue( + "chunkfilepool.chunk_file_pool_size", &pool_size)); + LOG_IF(FATAL, !curve::common::ToNumbericByte( + pool_size, &chunkFilePoolOptions->filePoolSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.allocated_by_percent", + &chunkFilePoolOptions->allocatedByPercent)); + LOG_IF(FATAL, + !conf->GetUInt32Value("chunkfilepool.allocate_percent", + &chunkFilePoolOptions->allocatedPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "chunkfilepool.chunk_file_pool_format_thread_num", + &chunkFilePoolOptions->formatThreadNum)); + LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", + &chunkFilePoolOptions->needClean)); + LOG_IF(FATAL, + !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", + &chunkFilePoolOptions->bytesPerWrite)); + LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", + &chunkFilePoolOptions->iops4clean)); + + std::string copysetUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.raft_snapshot_uri", ©setUri)); + curve::common::UriParser::ParseUri(copysetUri, + &chunkFilePoolOptions->copysetDir); + + std::string recycleUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); + curve::common::UriParser::ParseUri(recycleUri, + &chunkFilePoolOptions->recycleDir); + + bool useChunkFilePoolAsWalPool; + LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); + + chunkFilePoolOptions->isAllocated = [=](const std::string &filename) + { + return Trash::IsChunkOrSnapShotFile(filename) || + (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); + }; + + if (0 == chunkFilePoolOptions->bytesPerWrite || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) + { + LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " + << "and should be aligned to 4K, " + << "but now is: " << chunkFilePoolOptions->bytesPerWrite; + } + } + } + + void ChunkServer::InitConcurrentApplyOptions( + common::Configuration *conf, + ConcurrentApplyOption *concurrentApplyOptions) + { + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.size", + &concurrentApplyOptions->rconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.size", + &concurrentApplyOptions->wconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.queuedepth", + &concurrentApplyOptions->rqueuedepth)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.queuedepth", + &concurrentApplyOptions->wqueuedepth)); + } + + void ChunkServer::InitWalFilePoolOptions(common::Configuration *conf, + FilePoolOptions *walPoolOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", + &walPoolOptions->fileSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", + &walPoolOptions->metaPageSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); + + if (walPoolOptions->getFileFromPool == false) + { + std::string filePoolUri; + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.file_pool_dir", + &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, filePoolUri.c_str(), + filePoolUri.size()); + } + else + { + std::string metaUri; + LOG_IF(FATAL, !conf->GetStringValue( + "walfilepool.meta_path", &metaUri)); + + std::string pool_size; + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", + &pool_size)); + LOG_IF(FATAL, !curve::common::ToNumbericByte( + pool_size, &walPoolOptions->filePoolSize)); + LOG_IF(FATAL, !conf->GetUInt64Value("walfilepool.wal_file_pool_size", + &walPoolOptions->filePoolSize)); + LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.allocated_by_percent", + &walPoolOptions->allocatedByPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.allocated_percent", + &walPoolOptions->allocatedPercent)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.thread_num", + &walPoolOptions->formatThreadNum)); + + std::string copysetUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.raft_log_uri", ©setUri)); + curve::common::UriParser::ParseUri(copysetUri, + &walPoolOptions->copysetDir); + + std::string recycleUri; + LOG_IF(FATAL, + !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); + curve::common::UriParser::ParseUri(recycleUri, + &walPoolOptions->recycleDir); + + walPoolOptions->isAllocated = [](const string &filename) + { + return Trash::IsWALFile(filename); + }; + ::memcpy( + walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + } + } + + void ChunkServer::InitCopysetNodeOptions( + common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", ©setNodeOptions->port)); + if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) + { + LOG(FATAL) << "Invalid server port provided: " + << copysetNodeOptions->port; + } + + LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", + ©setNodeOptions->electionTimeoutMs)); + LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", + ©setNodeOptions->snapshotIntervalS)); + bool ret = conf->GetBoolValue("copyset.enable_lease_read", + ©setNodeOptions->enbaleLeaseRead); + LOG_IF(WARNING, ret == false) + << "config no copyset.enable_lease_read info, using default value " + << copysetNodeOptions->enbaleLeaseRead; + LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", + ©setNodeOptions->catchupMargin)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", + ©setNodeOptions->chunkDataUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", + ©setNodeOptions->logUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", + ©setNodeOptions->raftMetaUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", + ©setNodeOptions->raftSnapshotUri)); + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + ©setNodeOptions->recyclerUri)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", + ©setNodeOptions->maxChunkSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + ©setNodeOptions->metaPageSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", + ©setNodeOptions->blockSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", + ©setNodeOptions->locationLimit)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", + ©setNodeOptions->loadConcurrency)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", + ©setNodeOptions->checkRetryTimes)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", + ©setNodeOptions->finishLoadMargin)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_loadmargin_interval_ms", + ©setNodeOptions->checkLoadMarginIntervalMs)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", + ©setNodeOptions->syncConcurrency)); + + LOG_IF(FATAL, !conf->GetBoolValue( + "copyset.enable_odsync_when_open_chunkfile", + ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); + if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) + { + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_chunk_limits", + ©setNodeOptions->syncChunkLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_threshold", + ©setNodeOptions->syncThreshold)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_syncing_interval_ms", + ©setNodeOptions->checkSyncingIntervalMs)); + LOG_IF(FATAL, + !conf->GetUInt32Value("copyset.sync_trigger_seconds", + ©setNodeOptions->syncTriggerSeconds)); + } + } + + void ChunkServer::InitCopyerOptions(common::Configuration *conf, + CopyerOptions *copyerOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", + ©erOptions->curveUser.owner)); + LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", + ©erOptions->curveUser.password)); + LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", + ©erOptions->curveConf)); + LOG_IF(FATAL, + !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); + bool disableCurveClient = false; + bool disableS3Adapter = false; + LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", + &disableCurveClient)); + LOG_IF(FATAL, + !conf->GetBoolValue("clone.disable_s3_adapter", &disableS3Adapter)); + LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", + ©erOptions->curveFileTimeoutSec)); + + if (disableCurveClient) + { + copyerOptions->curveClient = nullptr; + } + else + { + copyerOptions->curveClient = std::make_shared(); + } + + if (disableS3Adapter) + { + copyerOptions->s3Client = nullptr; + } + else + { + copyerOptions->s3Client = std::make_shared(); + } + } + + void ChunkServer::InitCloneOptions(common::Configuration *conf, + CloneOptions *cloneOptions) + { + LOG_IF(FATAL, + !conf->GetUInt32Value("clone.thread_num", &cloneOptions->threadNum)); + LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", + &cloneOptions->queueCapacity)); + } + + void ChunkServer::InitScanOptions(common::Configuration *conf, + ScanManagerOptions *scanOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", + &scanOptions->intervalSec)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", + &scanOptions->scanSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", + &scanOptions->chunkMetaPageSize)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", + &scanOptions->timeoutMs)); + LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", + &scanOptions->retry)); + LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", + &scanOptions->retryIntervalUs)); + } + + void ChunkServer::InitHeartbeatOptions(common::Configuration *conf, + HeartbeatOptions *heartbeatOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", + &heartbeatOptions->storeUri)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", &heartbeatOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", + &heartbeatOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", + &heartbeatOptions->intervalSec)); + LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", + &heartbeatOptions->timeout)); + } + + void ChunkServer::InitRegisterOptions(common::Configuration *conf, + RegisterOptions *registerOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", + ®isterOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetStringValue( + "global.ip", ®isterOptions->chunkserverInternalIp)); + LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", + ®isterOptions->enableExternalServer)); + LOG_IF(FATAL, + !conf->GetStringValue("global.external_ip", + ®isterOptions->chunkserverExternalIp)); + LOG_IF(FATAL, !conf->GetIntValue("global.port", + ®isterOptions->chunkserverPort)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", + ®isterOptions->chunserverStoreUri)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", + ®isterOptions->chunkserverMetaUri)); + LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", + ®isterOptions->chunkserverDiskType)); + LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", + ®isterOptions->registerRetries)); + LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", + ®isterOptions->registerTimeout)); + } + + void ChunkServer::InitTrashOptions(common::Configuration *conf, + TrashOptions *trashOptions) + { + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + &trashOptions->trashPath)); + LOG_IF(FATAL, !conf->GetIntValue("trash.expire_afterSec", + &trashOptions->expiredAfterSec)); + LOG_IF(FATAL, !conf->GetIntValue("trash.scan_periodSec", + &trashOptions->scanPeriodSec)); } - } - - // =======================启动各模块==================================// - LOG(INFO) << "ChunkServer starts."; - /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 - */ - LOG_IF(FATAL, trash_->Run() != 0) - << "Failed to start trash."; - LOG_IF(FATAL, cloneManager_.Run() != 0) - << "Failed to start clone manager."; - LOG_IF(FATAL, heartbeat_.Run() != 0) - << "Failed to start heartbeat manager."; - LOG_IF(FATAL, copysetNodeManager_->Run() != 0) - << "Failed to start CopysetNodeManager."; - LOG_IF(FATAL, scanManager_.Run() != 0) - << "Failed to start scan manager."; - LOG_IF(FATAL, !chunkfilePool->StartCleaning()) - << "Failed to start file pool clean worker."; - - // =======================等待进程退出==================================// - while (!brpc::IsAskedToQuit()) { - bthread_usleep(1000000L); - } - // scanmanager stop maybe need a little while, so stop it first before stop service NOLINT - LOG(INFO) << "ChunkServer is going to quit."; - LOG_IF(ERROR, scanManager_.Fini() != 0) - << "Failed to shutdown scan manager."; - - if (registerOptions.enableExternalServer) { - externalServer.Stop(0); - externalServer.Join(); - } - - server.Stop(0); - server.Join(); - - LOG_IF(ERROR, heartbeat_.Fini() != 0) - << "Failed to shutdown heartbeat manager."; - LOG_IF(ERROR, copysetNodeManager_->Fini() != 0) - << "Failed to shutdown CopysetNodeManager."; - LOG_IF(ERROR, cloneManager_.Fini() != 0) - << "Failed to shutdown clone manager."; - LOG_IF(ERROR, copyer->Fini() != 0) - << "Failed to shutdown clone copyer."; - LOG_IF(ERROR, trash_->Fini() != 0) - << "Failed to shutdown trash."; - LOG_IF(ERROR, !chunkfilePool->StopCleaning()) - << "Failed to shutdown file pool clean worker."; - concurrentapply.Stop(); - - google::ShutdownGoogleLogging(); - return 0; -} - -void ChunkServer::Stop() { - brpc::AskToQuit(); -} - -void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->fileSize)); - - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &chunkFilePoolOptions->metaPageSize)) - << "Not found `global.meta_page_size` in config file"; - - LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - &chunkFilePoolOptions->blockSize)) - << "Not found `global.block_size` in config file"; - - LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getFileFromPool)); - - if (chunkFilePoolOptions->getFileFromPool == false) { - std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->filePoolDir, - chunkFilePoolUri.c_str(), - chunkFilePoolUri.size()); - } else { - std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.meta_path", &metaUri)); - ::memcpy( - chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); - - std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", - &chunkFilePoolUri)); - - ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), - chunkFilePoolUri.size()); - std::string pool_size; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_size", &pool_size)); - LOG_IF(FATAL, !curve::common::ToNumbericByte( - pool_size, &chunkFilePoolOptions->filePoolSize)); - LOG_IF(FATAL, - !conf->GetBoolValue("chunkfilepool.allocated_by_percent", - &chunkFilePoolOptions->allocatedByPercent)); - LOG_IF(FATAL, - !conf->GetUInt32Value("chunkfilepool.allocate_percent", - &chunkFilePoolOptions->allocatedPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "chunkfilepool.chunk_file_pool_format_thread_num", - &chunkFilePoolOptions->formatThreadNum)); - LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", - &chunkFilePoolOptions->needClean)); - LOG_IF(FATAL, - !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", - &chunkFilePoolOptions->bytesPerWrite)); - LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", - &chunkFilePoolOptions->iops4clean)); - - std::string copysetUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.raft_snapshot_uri", ©setUri)); - curve::common::UriParser::ParseUri(copysetUri, - &chunkFilePoolOptions->copysetDir); - - std::string recycleUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); - curve::common::UriParser::ParseUri(recycleUri, - &chunkFilePoolOptions->recycleDir); - - bool useChunkFilePoolAsWalPool; - LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); - - chunkFilePoolOptions->isAllocated = [=](const std::string& filename) { - return Trash::IsChunkOrSnapShotFile(filename) || - (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); - }; - - if (0 == chunkFilePoolOptions->bytesPerWrite - || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 - || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { - LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " - << "and should be aligned to 4K, " - << "but now is: " << chunkFilePoolOptions->bytesPerWrite; + + void ChunkServer::InitMetricOptions(common::Configuration *conf, + ChunkServerMetricOptions *metricOptions) + { + LOG_IF(FATAL, !conf->GetUInt32Value("global.port", &metricOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &metricOptions->ip)); + LOG_IF(FATAL, + !conf->GetBoolValue("metric.onoff", &metricOptions->collectMetric)); } - } -} - -void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOptions) { - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.size", &concurrentApplyOptions->rconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.size", &concurrentApplyOptions->wconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.queuedepth", &concurrentApplyOptions->rqueuedepth)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); -} - -void ChunkServer::InitWalFilePoolOptions( - common::Configuration *conf, FilePoolOptions *walPoolOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", - &walPoolOptions->fileSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", - &walPoolOptions->metaPageSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", - &walPoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "walfilepool.enable_get_segment_from_pool", - &walPoolOptions->getFileFromPool)); - - if (walPoolOptions->getFileFromPool == false) { - std::string filePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.file_pool_dir", &filePoolUri)); - ::memcpy(walPoolOptions->filePoolDir, - filePoolUri.c_str(), - filePoolUri.size()); - } else { - std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.meta_path", &metaUri)); - - std::string pool_size; - LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", - &pool_size)); - LOG_IF(FATAL, !curve::common::ToNumbericByte( - pool_size, &walPoolOptions->filePoolSize)); - LOG_IF(FATAL, !conf->GetUInt64Value("walfilepool.wal_file_pool_size", - &walPoolOptions->filePoolSize)); - LOG_IF(FATAL, !conf->GetBoolValue("walfilepool.allocated_by_percent", - &walPoolOptions->allocatedByPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.allocated_percent", - &walPoolOptions->allocatedPercent)); - LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.thread_num", - &walPoolOptions->formatThreadNum)); - - std::string copysetUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.raft_log_uri", ©setUri)); - curve::common::UriParser::ParseUri(copysetUri, - &walPoolOptions->copysetDir); - - std::string recycleUri; - LOG_IF(FATAL, - !conf->GetStringValue("copyset.recycler_uri", &recycleUri)); - curve::common::UriParser::ParseUri(recycleUri, - &walPoolOptions->recycleDir); - - walPoolOptions->isAllocated = [](const string& filename) { - return Trash::IsWALFile(filename); - }; - ::memcpy( - walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); - } -} - -void ChunkServer::InitCopysetNodeOptions( - common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { - LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", ©setNodeOptions->port)); - if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) { - LOG(FATAL) << "Invalid server port provided: " - << copysetNodeOptions->port; - } - - LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", - ©setNodeOptions->electionTimeoutMs)); - LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", - ©setNodeOptions->snapshotIntervalS)); - bool ret = conf->GetBoolValue("copyset.enable_lease_read", - ©setNodeOptions->enbaleLeaseRead); - LOG_IF(WARNING, ret == false) - << "config no copyset.enable_lease_read info, using default value " - << copysetNodeOptions->enbaleLeaseRead; - LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", - ©setNodeOptions->catchupMargin)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", - ©setNodeOptions->chunkDataUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", - ©setNodeOptions->logUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", - ©setNodeOptions->raftMetaUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", - ©setNodeOptions->raftSnapshotUri)); - LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", - ©setNodeOptions->recyclerUri)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - ©setNodeOptions->maxChunkSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - ©setNodeOptions->metaPageSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - ©setNodeOptions->blockSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", - ©setNodeOptions->locationLimit)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", - ©setNodeOptions->loadConcurrency)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", - ©setNodeOptions->checkRetryTimes)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", - ©setNodeOptions->finishLoadMargin)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_loadmargin_interval_ms", - ©setNodeOptions->checkLoadMarginIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", - ©setNodeOptions->syncConcurrency)); - - LOG_IF(FATAL, !conf->GetBoolValue( - "copyset.enable_odsync_when_open_chunkfile", - ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); - if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) { - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_chunk_limits", - ©setNodeOptions->syncChunkLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_threshold", - ©setNodeOptions->syncThreshold)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_syncing_interval_ms", - ©setNodeOptions->checkSyncingIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_trigger_seconds", - ©setNodeOptions->syncTriggerSeconds)); - } -} - -void ChunkServer::InitCopyerOptions( - common::Configuration *conf, CopyerOptions *copyerOptions) { - LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", - ©erOptions->curveUser.owner)); - LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", - ©erOptions->curveUser.password)); - LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", - ©erOptions->curveConf)); - LOG_IF(FATAL, - !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); - bool disableCurveClient = false; - bool disableS3Adapter = false; - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", - &disableCurveClient)); - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_s3_adapter", - &disableS3Adapter)); - LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", - ©erOptions->curveFileTimeoutSec)); - - if (disableCurveClient) { - copyerOptions->curveClient = nullptr; - } else { - copyerOptions->curveClient = std::make_shared(); - } - - if (disableS3Adapter) { - copyerOptions->s3Client = nullptr; - } else { - copyerOptions->s3Client = std::make_shared(); - } -} - -void ChunkServer::InitCloneOptions( - common::Configuration *conf, CloneOptions *cloneOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("clone.thread_num", - &cloneOptions->threadNum)); - LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", - &cloneOptions->queueCapacity)); -} - -void ChunkServer::InitScanOptions( - common::Configuration *conf, ScanManagerOptions *scanOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", - &scanOptions->intervalSec)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", - &scanOptions->scanSize)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &scanOptions->chunkMetaPageSize)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", - &scanOptions->timeoutMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", - &scanOptions->retry)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", - &scanOptions->retryIntervalUs)); -} - -void ChunkServer::InitHeartbeatOptions( - common::Configuration *conf, HeartbeatOptions *heartbeatOptions) { - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - &heartbeatOptions->storeUri)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.port", - &heartbeatOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - &heartbeatOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", - &heartbeatOptions->intervalSec)); - LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", - &heartbeatOptions->timeout)); -} - -void ChunkServer::InitRegisterOptions( - common::Configuration *conf, RegisterOptions *registerOptions) { - LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - ®isterOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", - ®isterOptions->chunkserverInternalIp)); - LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", - ®isterOptions->enableExternalServer)); - LOG_IF(FATAL, !conf->GetStringValue("global.external_ip", - ®isterOptions->chunkserverExternalIp)); - LOG_IF(FATAL, !conf->GetIntValue("global.port", - ®isterOptions->chunkserverPort)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - ®isterOptions->chunserverStoreUri)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", - ®isterOptions->chunkserverMetaUri)); - LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", - ®isterOptions->chunkserverDiskType)); - LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", - ®isterOptions->registerRetries)); - LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", - ®isterOptions->registerTimeout)); -} - -void ChunkServer::InitTrashOptions( - common::Configuration *conf, TrashOptions *trashOptions) { - LOG_IF(FATAL, !conf->GetStringValue( - "copyset.recycler_uri", &trashOptions->trashPath)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.expire_afterSec", &trashOptions->expiredAfterSec)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.scan_periodSec", &trashOptions->scanPeriodSec)); -} - -void ChunkServer::InitMetricOptions( - common::Configuration *conf, ChunkServerMetricOptions *metricOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", &metricOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue( - "global.ip", &metricOptions->ip)); - LOG_IF(FATAL, !conf->GetBoolValue( - "metric.onoff", &metricOptions->collectMetric)); -} - -void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 - google::CommandLineFlagInfo info; - if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { - conf->SetStringValue("global.ip", FLAGS_chunkServerIp); - } else { - LOG(FATAL) - << "chunkServerIp must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("enableExternalServer", &info) && - !info.is_default) { - conf->SetBoolValue( - "global.enable_external_server", FLAGS_enableExternalServer); - } - if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && - !info.is_default) { - conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); - } - - if (GetCommandLineFlagInfo("chunkServerPort", &info) && !info.is_default) { - conf->SetIntValue("global.port", FLAGS_chunkServerPort); - } else { - LOG(FATAL) - << "chunkServerPort must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && - !info.is_default) { - conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); - } else { - LOG(FATAL) - << "chunkServerStoreUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && - !info.is_default) { - conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); - } else { - LOG(FATAL) - << "chunkServerMetaUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) { - conf->SetStringValue("copyset.chunk_data_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_log_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); - conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); - } else { - LOG(FATAL) - << "copySetUri must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_snapshot_uri", FLAGS_raftSnapshotUri); - } else { - LOG(FATAL) - << "raftSnapshotUri must be set when run chunkserver in command."; - } - if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_log_uri", FLAGS_raftLogUri); - } else { - LOG(FATAL) - << "raftLogUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("recycleUri", &info) && - !info.is_default) { - conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); - } else { - LOG(FATAL) - << "recycleUri must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.chunk_file_pool_dir", FLAGS_chunkFilePoolDir); - } else { - LOG(FATAL) - << "chunkFilePoolDir must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) { - conf->SetUInt32Value("chunkfilepool.allocate_percent", - FLAGS_chunkFilePoolAllocatedPercent); - } - - if (GetCommandLineFlagInfo("chunkFormatThreadNum", &info)) { - conf->SetUInt64Value("chunkfilepool.chunk_file_pool_format_thread_num", - FLAGS_chunkFormatThreadNum); - } - - if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.meta_path", FLAGS_chunkFilePoolMetaPath); - } else { - LOG(FATAL) - << "chunkFilePoolMetaPath must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("walFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); - } else { - LOG(FATAL) - << "walFilePoolDir must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); - } else { - LOG(FATAL) - << "walFilePoolMetaPath must be set when run chunkserver in command."; - } - - if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { - conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); - } - - // 设置日志存放文件夹 - if (FLAGS_log_dir.empty()) { - if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT - LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf - << ", will log to /tmp"; + + void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) + { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file + google::CommandLineFlagInfo info; + if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) + { + conf->SetStringValue("global.ip", FLAGS_chunkServerIp); + } + else + { + LOG(FATAL) + << "chunkServerIp must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("enableExternalServer", &info) && + !info.is_default) + { + conf->SetBoolValue("global.enable_external_server", + FLAGS_enableExternalServer); + } + if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && + !info.is_default) + { + conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); + } + + if (GetCommandLineFlagInfo("chunkServerPort", &info) && !info.is_default) + { + conf->SetIntValue("global.port", FLAGS_chunkServerPort); + } + else + { + LOG(FATAL) + << "chunkServerPort must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && + !info.is_default) + { + conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); + } + else + { + LOG(FATAL) << "chunkServerStoreUri must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && + !info.is_default) + { + conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); + } + else + { + LOG(FATAL) << "chunkServerMetaUri must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.chunk_data_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_log_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); + conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); + } + else + { + LOG(FATAL) << "copySetUri must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.raft_snapshot_uri", + FLAGS_raftSnapshotUri); + } + else + { + LOG(FATAL) + << "raftSnapshotUri must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.raft_log_uri", FLAGS_raftLogUri); + } + else + { + LOG(FATAL) << "raftLogUri must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) + { + conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); + } + else + { + LOG(FATAL) << "recycleUri must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && !info.is_default) + { + conf->SetStringValue("chunkfilepool.chunk_file_pool_dir", + FLAGS_chunkFilePoolDir); + } + else + { + LOG(FATAL) + << "chunkFilePoolDir must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) + { + conf->SetUInt32Value("chunkfilepool.allocate_percent", + FLAGS_chunkFilePoolAllocatedPercent); + } + + if (GetCommandLineFlagInfo("chunkFormatThreadNum", &info)) + { + conf->SetUInt64Value("chunkfilepool.chunk_file_pool_format_thread_num", + FLAGS_chunkFormatThreadNum); + } + + if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && + !info.is_default) + { + conf->SetStringValue("chunkfilepool.meta_path", + FLAGS_chunkFilePoolMetaPath); + } + else + { + LOG(FATAL) << "chunkFilePoolMetaPath must be set when run chunkserver " + "in command."; + } + + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && !info.is_default) + { + conf->SetStringValue("walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + } + else + { + LOG(FATAL) + << "walFilePoolDir must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && + !info.is_default) + { + conf->SetStringValue("walfilepool.meta_path", + FLAGS_walFilePoolMetaPath); + } + else + { + LOG(FATAL) << "walFilePoolMetaPath must be set when run chunkserver in " + "command."; + } + + if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) + { + conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); + } + + // Set log storage folder + if (FLAGS_log_dir.empty()) + { + if (!conf->GetStringValue("chunkserver.common.logDir", + &FLAGS_log_dir)) + { // NOLINT + LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf + << ", will log to /tmp"; + } + } + + if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && + !info.is_default) + { + conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + FLAGS_enableChunkfilepool); + } + + if (GetCommandLineFlagInfo("enableWalfilepool", &info) && + !info.is_default) + { + conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", + FLAGS_enableWalfilepool); + } + + if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && + !info.is_default) + { + conf->SetIntValue("copyset.load_concurrency", + FLAGS_copysetLoadConcurrency); + } } - } - - if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && - !info.is_default) { - conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", - FLAGS_enableChunkfilepool); - } - - if (GetCommandLineFlagInfo("enableWalfilepool", &info) && - !info.is_default) { - conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", - FLAGS_enableWalfilepool); - } - - if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && - !info.is_default) { - conf->SetIntValue("copyset.load_concurrency", - FLAGS_copysetLoadConcurrency); - } -} - -int ChunkServer::GetChunkServerMetaFromLocal( - const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata) { - std::string proto = UriParser::GetProtocolFromUri(storeUri); - if (proto != "local") { - LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; - return -1; - } - // 从配置文件中获取chunkserver元数据的文件路径 - proto = UriParser::GetProtocolFromUri(metaUri); - if (proto != "local") { - LOG(ERROR) << "Chunkserver meta protocal " - << proto << " is not supported yet"; - return -1; - } - // 元数据文件已经存在 - if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 - if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { - LOG(ERROR) << "Fail to read persisted chunkserver meta data"; + + int ChunkServer::GetChunkServerMetaFromLocal( + const std::string &storeUri, const std::string &metaUri, + const std::shared_ptr &fs, ChunkServerMetadata *metadata) + { + std::string proto = UriParser::GetProtocolFromUri(storeUri); + if (proto != "local") + { + LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; + return -1; + } + // Obtain the file path for chunkserver metadata from the configuration file + proto = UriParser::GetProtocolFromUri(metaUri); + if (proto != "local") + { + LOG(ERROR) << "Chunkserver meta protocal " << proto + << " is not supported yet"; + return -1; + } + // The metadata file already exists + if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) + { + // Get File Content + if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) + { + LOG(ERROR) << "Fail to read persisted chunkserver meta data"; + return -1; + } + + LOG(INFO) << "Found persisted chunkserver data, skipping registration," + << " chunkserver id: " << metadata->id() + << ", token: " << metadata->token(); + return 0; + } return -1; } - LOG(INFO) << "Found persisted chunkserver data, skipping registration," - << " chunkserver id: " << metadata->id() - << ", token: " << metadata->token(); - return 0; - } - return -1; -} - -int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata) { - int fd; - std::string metaFile = UriParser::GetPathFromUri(metaUri); - - fd = fs->Open(metaFile.c_str(), O_RDONLY); - if (fd < 0) { - LOG(ERROR) << "Failed to open Chunkserver metadata file " << metaFile; - return -1; - } - - #define METAFILE_MAX_SIZE 4096 - int size; - char json[METAFILE_MAX_SIZE] = {0}; - - size = fs->Read(fd, json, 0, METAFILE_MAX_SIZE); - if (size < 0) { - LOG(ERROR) << "Failed to read Chunkserver metadata file"; - return -1; - } else if (size >= METAFILE_MAX_SIZE) { - LOG(ERROR) << "Chunkserver metadata file is too large: " << size; - return -1; - } - if (fs->Close(fd)) { - LOG(ERROR) << "Failed to close chunkserver metadata file"; - return -1; - } - - if (!ChunkServerMetaHelper::DecodeChunkServerMeta(json, metadata)) { - LOG(ERROR) << "Failed to decode chunkserver meta: " << json; - return -1; - } - - return 0; -} - -} // namespace chunkserver -} // namespace curve + int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, + const std::string &metaUri, + ChunkServerMetadata *metadata) + { + int fd; + std::string metaFile = UriParser::GetPathFromUri(metaUri); + + fd = fs->Open(metaFile.c_str(), O_RDONLY); + if (fd < 0) + { + LOG(ERROR) << "Failed to open Chunkserver metadata file " << metaFile; + return -1; + } + +#define METAFILE_MAX_SIZE 4096 + int size; + char json[METAFILE_MAX_SIZE] = {0}; + + size = fs->Read(fd, json, 0, METAFILE_MAX_SIZE); + if (size < 0) + { + LOG(ERROR) << "Failed to read Chunkserver metadata file"; + return -1; + } + else if (size >= METAFILE_MAX_SIZE) + { + LOG(ERROR) << "Chunkserver metadata file is too large: " << size; + return -1; + } + if (fs->Close(fd)) + { + LOG(ERROR) << "Failed to close chunkserver metadata file"; + return -1; + } + + if (!ChunkServerMetaHelper::DecodeChunkServerMeta(json, metadata)) + { + LOG(ERROR) << "Failed to decode chunkserver meta: " << json; + return -1; + } + + return 0; + } + + } // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..6698281fec 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_H_ -#include #include -#include "src/common/configuration.h" +#include + +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/clone_manager.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/heartbeat.h" -#include "src/chunkserver/scan_manager.h" -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/register.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/scan_manager.h" #include "src/chunkserver/scan_service.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; @@ -43,81 +44,84 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); private: - void InitChunkFilePoolOptions(common::Configuration *conf, - FilePoolOptions *chunkFilePoolOptions); + void InitChunkFilePoolOptions(common::Configuration* conf, + FilePoolOptions* chunkFilePoolOptions); - void InitWalFilePoolOptions(common::Configuration *conf, - FilePoolOptions *walPoolOption); + void InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOption); - void InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOption); + void InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOption); - void InitCopysetNodeOptions(common::Configuration *conf, - CopysetNodeOptions *copysetNodeOptions); + void InitCopysetNodeOptions(common::Configuration* conf, + CopysetNodeOptions* copysetNodeOptions); - void InitCopyerOptions(common::Configuration *conf, - CopyerOptions *copyerOptions); + void InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions); - void InitCloneOptions(common::Configuration *conf, - CloneOptions *cloneOptions); + void InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions); - void InitScanOptions(common::Configuration *conf, - ScanManagerOptions *scanOptions); + void InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions); - void InitHeartbeatOptions(common::Configuration *conf, - HeartbeatOptions *heartbeatOptions); + void InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions); - void InitRegisterOptions(common::Configuration *conf, - RegisterOptions *registerOptions); + void InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions); - void InitTrashOptions(common::Configuration *conf, - TrashOptions *trashOptions); + void InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions); - void InitMetricOptions(common::Configuration *conf, - ChunkServerMetricOptions *metricOptions); + void InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions); - void LoadConfigFromCmdline(common::Configuration *conf); + void LoadConfigFromCmdline(common::Configuration* conf); - int GetChunkServerMetaFromLocal(const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata); + int GetChunkServerMetaFromLocal(const std::string& storeUri, + const std::string& metaUri, + const std::shared_ptr& fs, + ChunkServerMetadata* metadata); - int ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata); + int ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing + // tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; @@ -125,4 +129,3 @@ class ChunkServer { } // namespace curve #endif // SRC_CHUNKSERVER_CHUNKSERVER_H_ - diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..96afcf39e8 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -20,19 +20,20 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/chunkserver_helper.h" + #include +#include +#include #include "src/common/crc32.h" -#include "src/chunkserver/chunkserver_helper.h" namespace curve { namespace chunkserver { const uint64_t DefaultMagic = 0x6225929368674118; bool ChunkServerMetaHelper::EncodeChunkServerMeta( - const ChunkServerMetadata &meta, std::string *out) { + const ChunkServerMetadata& meta, std::string* out) { if (!out->empty()) { LOG(ERROR) << "out string must empty!"; return false; @@ -50,8 +51,8 @@ bool ChunkServerMetaHelper::EncodeChunkServerMeta( return true; } -bool ChunkServerMetaHelper::DecodeChunkServerMeta( - const std::string &meta, ChunkServerMetadata *out) { +bool ChunkServerMetaHelper::DecodeChunkServerMeta(const std::string& meta, + ChunkServerMetadata* out) { std::string jsonStr(meta); std::string err; json2pb::Json2PbOptions opt; @@ -63,7 +64,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." @@ -75,8 +76,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return true; } -uint32_t ChunkServerMetaHelper::MetadataCrc( - const ChunkServerMetadata &meta) { +uint32_t ChunkServerMetaHelper::MetadataCrc(const ChunkServerMetadata& meta) { uint32_t crc = 0; uint32_t ver = meta.version(); uint32_t id = meta.id(); @@ -87,7 +87,7 @@ uint32_t ChunkServerMetaHelper::MetadataCrc( crc = curve::common::CRC32(crc, reinterpret_cast(&id), sizeof(id)); crc = curve::common::CRC32(crc, token, meta.token().size()); crc = curve::common::CRC32(crc, reinterpret_cast(&magic), - sizeof(magic)); + sizeof(magic)); return crc; } diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..f8a361d94e 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -21,8 +21,9 @@ */ #include "src/chunkserver/chunkserver_metrics.h" -#include + #include +#include #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/passive_getfn.h" @@ -31,13 +32,15 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + : rps_(&reqNum_, 1), + iops_(&ioNum_, 1), + eps_(&errorNum_, 1), bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric +int IOMetric::Init(const std::string& prefix) { + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -94,9 +97,8 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } } - -int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric +int CSIOMetric::Init(const std::string& prefix) { + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -161,30 +163,30 @@ void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); @@ -196,7 +198,7 @@ int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -209,30 +211,36 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage *logStorage) { + CurveSegmentLogStorage* logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), - walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : hasInited_(false), + leaderCount_(nullptr), + chunkLeft_(nullptr), + walSegmentLeft_(nullptr), + chunkTrashed_(nullptr), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} -ChunkServerMetric *ChunkServerMetric::self_ = nullptr; +ChunkServerMetric* ChunkServerMetric::self_ = nullptr; -ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 +ChunkServerMetric* ChunkServerMetric::GetInstance() { + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock + // protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -245,14 +253,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +286,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -293,8 +301,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { if (!option_.collectMetric) { return 0; } @@ -321,9 +329,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, return 0; } -CopysetMetricPtr -ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( + const LogicPoolID& logicPoolId, const CopysetID& copysetId) { if (!option_.collectMetric) { return nullptr; } @@ -332,18 +339,18 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -356,8 +363,8 @@ void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { if (!option_.collectMetric) { @@ -371,7 +378,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } @@ -381,7 +388,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { if (!option_.collectMetric) { return; } @@ -391,7 +398,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash *trash) { +void ChunkServerMetric::MonitorTrash(Trash* trash) { if (!option_.collectMetric) { return; } @@ -417,7 +424,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { if (!option_.collectMetric) { return; } diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..b91fbf0f6e 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ -#include #include +#include + +#include #include #include -#include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/configuration.h" -#include "src/chunkserver/datastore/file_pool.h" +#include "src/common/uncopyable.h" using curve::common::Configuration; using curve::common::ReadLockGuard; @@ -54,57 +55,59 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template using AdderPtr = std::shared_ptr>; +template +using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and +// write requests Statistics can be conducted on quantile values, maximum +// values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -120,100 +123,109 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), - pasteMetric_(nullptr), downloadMetric_(nullptr) {} + : readMetric_(nullptr), + writeMetric_(nullptr), + recoverMetric_(nullptr), + pasteMetric_(nullptr), + downloadMetric_(nullptr) {} ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : logicPoolId_(0), + copysetId_(0), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); + int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, + * number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ - void MonitorDataStore(CSDataStore *datastore); + void MonitorDataStore(CSDataStore* datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +233,10 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +277,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,173 +357,175 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 - static ChunkServerMetric *GetInstance(); + // Implementation singleton + static ChunkServerMetric* GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ - int Init(const ChunkServerMetricOptions &option); + int Init(const ChunkServerMetricOptions& option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ - void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnRequest(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ - void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnResponse(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + *Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the + *specified metric already exists */ - int CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure + * returns nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + *Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + *belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + *Monitor the chunk allocation pool, mainly monitoring the number of chunks + *in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ - void MonitorChunkFilePool(FilePool *chunkFilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + *Monitor the allocation pool of wall segments, mainly monitoring the number + *of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ - void MonitorWalFilePool(FilePool *walFilePool); + void MonitorWalFilePool(FilePool* walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + *Monitor Recycle Bin + * @param trash: Object pointer to trash */ - void MonitorTrash(Trash *trash); + void MonitorTrash(Trash* trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + *Update configuration item data + * @param conf: Configuration content */ - void ExposeConfigMetric(common::Configuration *conf); + void ExposeConfigMetric(common::Configuration* conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } + CopysetMetricMap* GetCopysetMetricMap() { return ©setMetricMap_; } uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { - if (leaderCount_ == nullptr) - return 0; + if (leaderCount_ == nullptr) return 0; return leaderCount_->get_value(); } uint32_t GetTotalChunkCount() { - if (chunkCount_ == nullptr) - return 0; + if (chunkCount_ == nullptr) return 0; return chunkCount_->get_value(); } uint32_t GetTotalSnapshotCount() { - if (snapshotCount_ == nullptr) - return 0; + if (snapshotCount_ == nullptr) return 0; return snapshotCount_->get_value(); } uint32_t GetTotalCloneChunkCount() { - if (cloneChunkCount_ == nullptr) - return 0; + if (cloneChunkCount_ == nullptr) return 0; return cloneChunkCount_->get_value(); } uint32_t GetTotalWalSegmentCount() { - if (nullptr == walSegmentCount_) - return 0; + if (nullptr == walSegmentCount_) return 0; return walSegmentCount_->get_value(); } uint32_t GetChunkLeftCount() const { - if (chunkLeft_ == nullptr) - return 0; + if (chunkLeft_ == nullptr) return 0; return chunkLeft_->get_value(); } uint32_t GetWalSegmentLeftCount() const { - if (nullptr == walSegmentLeft_) - return 0; + if (nullptr == walSegmentLeft_) return 0; return walSegmentLeft_->get_value(); } uint32_t GetChunkTrashedCount() const { - if (chunkTrashed_ == nullptr) - return 0; + if (chunkTrashed_ == nullptr) return 0; return chunkTrashed_->get_value(); } @@ -522,32 +537,32 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 - static ChunkServerMetric *self_; + // Self pointing pointer for singleton mode + static ChunkServerMetric* self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..ed048dc460 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,41 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId); +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId); -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const PeerId& peer, + const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const PeerId& peer, + const braft::cli::CliOptions& options); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..ba779bb8d7 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -22,10 +22,10 @@ #include "src/chunkserver/cli2.h" -#include -#include #include #include +#include +#include #include @@ -34,16 +34,14 @@ namespace curve { namespace chunkserver { -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader) { +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader) { if (conf.empty()) { return butil::Status(EINVAL, "Empty group configuration"); } - butil::Status st(-1, - "Fail to get leader of copyset node %s", + butil::Status st(-1, "Fail to get leader of copyset node %s", ToGroupIdString(logicPoolId, copysetId).c_str()); PeerId leaderId; Configuration::const_iterator iter = conf.begin(); @@ -53,7 +51,7 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status(-1, "Fail to init channel to %s", iter->to_string().c_str()); } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -84,11 +82,9 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -101,10 +97,10 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, AddPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *addPeer = new Peer(); + Peer* addPeer = new Peer(); request.set_allocated_addpeer(addPeer); *addPeer = peer; AddPeerResponse2 response; @@ -128,17 +124,15 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -151,10 +145,10 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, RemovePeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *removePeer = new Peer(); + Peer* removePeer = new Peer(); request.set_allocated_removepeer(removePeer); *removePeer = peer; RemovePeerResponse2 response; @@ -179,17 +173,15 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options) { +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -203,11 +195,11 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, ChangePeersRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); *leaderPeer = leader; request.set_allocated_leader(leaderPeer); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ChangePeersResponse2 response; @@ -229,17 +221,15 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, new_conf.add_peer(response.newpeers(i).address()); } LOG(INFO) << "Configuration of replication group `" - << ToGroupIdString(logicPoolId, copysetId) - << "' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << "' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -256,10 +246,10 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, TransferLeaderRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *transfereePeer = new Peer(); + Peer* transfereePeer = new Peer(); request.set_allocated_transferee(transfereePeer); *transfereePeer = peer; TransferLeaderResponse2 response; @@ -274,18 +264,23 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// reset peer does not follow a consistency protocol and directly resets them, +// thus posing certain risks Application scenario: Extreme situation where most +// nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, +// which will be unavailable for a period of time. To cope with this scenario, +// we have introduced The reset peer tool directly resets replication group +// members to only contain surviving replicas. Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the +// check-copyset tool that most of the replicas in the replication group have +// indeed been suspended +// 2. When resetting the peer, ensure that the remaining replicas have the +// latest data, otherwise there is a risk of data loss +// 3. Reset peer is suitable for situations where the other two replicas cannot +// be restored, otherwise it may disrupt the cluster +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options) { if (newPeers.empty()) { return butil::Status(EINVAL, "new_conf is empty"); @@ -294,7 +289,7 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, brpc::Channel channel; if (channel.Init(requestPeerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - requestPeerId.to_string().c_str()); + requestPeerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -302,11 +297,11 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, ResetPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *requestPeerPtr = new Peer(); + Peer* requestPeerPtr = new Peer(); *requestPeerPtr = requestPeer; request.set_allocated_requestpeer(requestPeerPtr); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ResetPeerResponse2 response; @@ -318,15 +313,14 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options) { brpc::Channel channel; PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -334,7 +328,7 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(peer); + Peer* peerPtr = new Peer(peer); request.set_allocated_peer(peerPtr); SnapshotResponse2 response; CliService2_Stub stub(&channel); @@ -351,7 +345,7 @@ butil::Status SnapshotAll(const Peer& peer, PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..512850b747 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,50 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader); - -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 变更配置 -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options); - -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 重置复制组 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader); + +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options); + +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options); + +// Change configuration +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options); + +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options); + +// Reset replication group +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/chunkserver/clone_copyer.h b/src/chunkserver/clone_copyer.h index 6ccb7d7dc1..3c640f4693 100644 --- a/src/chunkserver/clone_copyer.h +++ b/src/chunkserver/clone_copyer.h @@ -24,56 +24,57 @@ #define SRC_CHUNKSERVER_CLONE_COPYER_H_ #include + +#include #include -#include #include -#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/location_operator.h" +#include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" #include "src/client/libcurve_file.h" -#include "src/client/client_common.h" -#include "include/client/libcurve.h" +#include "src/common/location_operator.h" #include "src/common/s3_adapter.h" namespace curve { namespace chunkserver { -using curve::common::S3Adapter; using curve::client::FileClient; using curve::client::UserInfo; -using curve::common::LocationOperator; -using curve::common::OriginType; using curve::common::GetObjectAsyncCallBack; using curve::common::GetObjectAsyncContext; +using curve::common::LocationOperator; +using curve::common::OriginType; +using curve::common::S3Adapter; using std::string; class DownloadClosure; struct CopyerOptions { - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser; - // curvefs 的配置文件路径 + // Profile path for curvefs std::string curveConf; - // s3adapter 的配置文件路径 + // Configuration file path for s3adapter std::string s3Conf; - // curve client的对象指针 + // Object pointer to curve client std::shared_ptr curveClient; - // s3 adapter的对象指针 + // Object pointer to s3 adapter std::shared_ptr s3Client; // curve file's time to live uint64_t curveFileTimeoutSec; }; struct AsyncDownloadContext { - // 源chunk的位置信息 + // Location information of the source chunk string location; - // 请求下载数据在对象中的相对偏移 + // Request to download the relative offset of data in the object off_t offset; - // 请求下载数据的的长度 + // The length of the requested download data size_t size; - // 存放下载数据的缓冲区 + // Buffer for storing downloaded data char* buf; }; @@ -85,9 +86,9 @@ struct CurveOpenTimestamp { // lastest use time, using seconds int64_t lastUsedSec; // Init functions - CurveOpenTimestamp(): fd(-1), fileName(""), lastUsedSec(0) {} - CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec): - fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} + CurveOpenTimestamp() : fd(-1), fileName(""), lastUsedSec(0) {} + CurveOpenTimestamp(int _fd, string _file, uint64_t _lastUsedSec) + : fd(_fd), fileName(_file), lastUsedSec(_lastUsedSec) {} }; std::ostream& operator<<(std::ostream& out, const AsyncDownloadContext& rhs); @@ -98,40 +99,34 @@ class OriginCopyer { virtual ~OriginCopyer() = default; /** - * 初始化资源 - * @param options: 配置信息 - * @return: 成功返回0,失败返回-1 + * Initialize Resources + * @param options: Configuration information + * @return: Success returns 0, failure returns -1 */ virtual int Init(const CopyerOptions& options); /** - * 释放资源 - * @return: 成功返回0,失败返回-1 + * Release resources + * @return: Success returns 0, failure returns -1 */ virtual int Fini(); /** - * 异步地从源端拷贝数据 - * @param done:包含下载请求的上下文信息, - * 数据下载完成后执行该closure进行回调 + * Asynchronous copying of data from the source + * @param done: Contains contextual information for download requests, + *After the data download is completed, execute the closure for callback */ virtual void DownloadAsync(DownloadClosure* done); private: - void DownloadFromS3(const string& objectName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); - void DownloadFromCurve(const string& fileName, - off_t off, - size_t size, - char* buf, - DownloadClosure* done); + void DownloadFromS3(const string& objectName, off_t off, size_t size, + char* buf, DownloadClosure* done); + void DownloadFromCurve(const string& fileName, off_t off, size_t size, + char* buf, DownloadClosure* done); static void DeleteExpiredCurveCache(void* arg); private: - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser_; // mutex for protect curveOpenTime_ std::mutex timeMtx_; @@ -139,13 +134,13 @@ class OriginCopyer { std::list curveOpenTime_; // curve file's time to live uint64_t curveFileTimeoutSec_; - // 负责跟curve交互 + // Responsible for interacting with curve std::shared_ptr curveClient_; - // 负责跟s3交互 - std::shared_ptr s3Client_; - // 保护fdMap_的互斥锁 - std::mutex mtx_; - // 文件名->文件fd 的映射 + // Responsible for interacting with s3 + std::shared_ptr s3Client_; + // Protect fdMap_ Mutex lock for + std::mutex mtx_; + // File name ->Mapping of file fd std::unordered_map fdMap_; // Timer for clean expired curve file bthread::TimerThread timer_; diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index b3efe70f36..422a5cce31 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -20,451 +20,479 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/clone_core.h" + #include +#include -#include "src/common/bitmap.h" -#include "src/chunkserver/clone_core.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/op_request.h" +#include "src/common/bitmap.h" #include "src/common/timeutility.h" -namespace curve { -namespace chunkserver { - -using curve::common::Bitmap; -using curve::common::TimeUtility; - -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} - -DownloadClosure::DownloadClosure(std::shared_ptr readRequest, - std::shared_ptr cloneCore, - AsyncDownloadContext* downloadCtx, - Closure* done) - : isFailed_(false) - , beginTime_(TimeUtility::GetTimeofDayUs()) - , downloadCtx_(downloadCtx) - , cloneCore_(cloneCore) - , readRequest_(readRequest) - , done_(done) { - // 记录初始metric - if (readRequest_ != nullptr) { - const ChunkRequest* request = readRequest_->GetChunkRequest(); - ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - csMetric->OnRequest(request->logicpoolid(), - request->copysetid(), - CSIOMetricType::DOWNLOAD); - } -} - -void DownloadClosure::Run() { - std::unique_ptr selfGuard(this); - std::unique_ptr contextGuard(downloadCtx_); - brpc::ClosureGuard doneGuard(done_); - butil::IOBuf copyData; - copyData.append_user_data( - downloadCtx_->buf, downloadCtx_->size, ReadBufferDeleter); - - CHECK(readRequest_ != nullptr) << "read request is nullptr."; - // 记录结束metric - const ChunkRequest* request = readRequest_->GetChunkRequest(); - ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); - uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; - csMetric->OnResponse(request->logicpoolid(), - request->copysetid(), - CSIOMetricType::DOWNLOAD, - downloadCtx_->size, - latencyUs, - isFailed_); - - // 从源端拷贝数据失败 - if (isFailed_) { - LOG(ERROR) << "download origin data failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " AsyncDownloadContext: " << *downloadCtx_; - cloneCore_->SetResponse( - readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return; - } - - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - // release doneGuard,将closure交给paste请求处理 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, - doneGuard.release()); - } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 - cloneCore_->SetReadChunkResponse(readRequest_, ©Data); - - // paste clone data是异步操作,很快就能处理完 - cloneCore_->PasteCloneData(readRequest_, - ©Data, - downloadCtx_->offset, - downloadCtx_->size, - nullptr); - } -} - -void CloneClosure::Run() { - // 释放资源 - std::unique_ptr selfGuard(this); - std::unique_ptr requestGuard(request_); - std::unique_ptr responseGuard(response_); - brpc::ClosureGuard doneGuard(done_); - // 如果userResponse不为空,需要将response_中的相关内容赋值给userResponse - if (userResponse_ != nullptr) { - if (response_->has_status()) { - userResponse_->set_status(response_->status()); +namespace curve +{ + namespace chunkserver + { + + using curve::common::Bitmap; + using curve::common::TimeUtility; + + static void ReadBufferDeleter(void *ptr) { delete[] static_cast(ptr); } + + DownloadClosure::DownloadClosure(std::shared_ptr readRequest, + std::shared_ptr cloneCore, + AsyncDownloadContext *downloadCtx, + Closure *done) + : isFailed_(false), + beginTime_(TimeUtility::GetTimeofDayUs()), + downloadCtx_(downloadCtx), + cloneCore_(cloneCore), + readRequest_(readRequest), + done_(done) + { + // Record initial metric + if (readRequest_ != nullptr) + { + const ChunkRequest *request = readRequest_->GetChunkRequest(); + ChunkServerMetric *csMetric = ChunkServerMetric::GetInstance(); + csMetric->OnRequest(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD); + } + } + + void DownloadClosure::Run() + { + std::unique_ptr selfGuard(this); + std::unique_ptr contextGuard(downloadCtx_); + brpc::ClosureGuard doneGuard(done_); + butil::IOBuf copyData; + copyData.append_user_data(downloadCtx_->buf, downloadCtx_->size, + ReadBufferDeleter); + + CHECK(readRequest_ != nullptr) << "read request is nullptr."; + // Record End Metric + const ChunkRequest *request = readRequest_->GetChunkRequest(); + ChunkServerMetric *csMetric = ChunkServerMetric::GetInstance(); + uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; + csMetric->OnResponse(request->logicpoolid(), request->copysetid(), + CSIOMetricType::DOWNLOAD, downloadCtx_->size, + latencyUs, isFailed_); + + // Copying data from the source failed + if (isFailed_) + { + LOG(ERROR) << "download origin data failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " AsyncDownloadContext: " << *downloadCtx_; + cloneCore_->SetResponse( + readRequest_, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return; + } + + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + // Release doneGuard, hand over the closure to the pass request for + // processing + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, + doneGuard.release()); + } + else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) + { + // Error or end of processing call closure returned to user + cloneCore_->SetReadChunkResponse(readRequest_, ©Data); + + // Paste clone data is an asynchronous operation that can be processed + // quickly + cloneCore_->PasteCloneData(readRequest_, ©Data, + downloadCtx_->offset, downloadCtx_->size, + nullptr); + } + } + + void CloneClosure::Run() + { + // Release resources + std::unique_ptr selfGuard(this); + std::unique_ptr requestGuard(request_); + std::unique_ptr responseGuard(response_); + brpc::ClosureGuard doneGuard(done_); + // If userResponse is not empty, you need to set the response_ Assign the + // relevant content in to userResponse + if (userResponse_ != nullptr) + { + if (response_->has_status()) + { + userResponse_->set_status(response_->status()); + } + if (response_->has_redirect()) + { + userResponse_->set_redirect(response_->redirect()); + } + if (response_->has_appliedindex()) + { + userResponse_->set_appliedindex(response_->appliedindex()); + } + } } - if (response_->has_redirect()) { - userResponse_->set_redirect(response_->redirect()); + + int CloneCore::CloneReadByLocalInfo( + std::shared_ptr readRequest, const CSChunkInfo &chunkInfo, + Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *request = readRequest->request_; + off_t offset = request->offset(); + size_t length = request->size(); + const uint32_t blockSize = chunkInfo.blockSize; + + // offset and length must be aligned with blockSize + if (offset % blockSize != 0 || length % blockSize != 0) + { + LOG(ERROR) << "Invalid offset or length: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " offset: " << offset << " length: " << length + << " block size: " << blockSize; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + return -1; + } + + uint32_t beginIndex = offset / blockSize; + uint32_t endIndex = (offset + length - 1) / blockSize; + + // When submitting a request to CloneManager, the chunk must be a clone + // chunk However, due to other requests for the same chunk, it is possible + // that the chunk has already been overwritten at this time So here we need + // to first determine whether the chunk is a clone chunk, and then determine + // whether to copy the data if so + bool needClone = chunkInfo.isClone && + (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS); + if (needClone) + { + // The TODO(yyk) block can be optimized, but the optimization method may + // determine complex conditions Currently, the decision to trigger + // copying is only based on whether there are unwritten pages If the + // data within the requested read range in the chunk has a page that has + // not been written, it is necessary to copy the data from the source + // side + AsyncDownloadContext *downloadCtx = + new (std::nothrow) AsyncDownloadContext; + downloadCtx->location = chunkInfo.location; + downloadCtx->offset = offset; + downloadCtx->size = length; + downloadCtx->buf = new (std::nothrow) char[length]; + DownloadClosure *downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); + copyer_->DownloadAsync(downloadClosure); + return 0; + } + + // Performing this step indicates that there is no need to copy data. If it + // is a recover request, it can directly return success If it is a ReadChunk + // request, read the chunk directly and return + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + } + else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) + { + // Error or end of processing call closure returned to user + return ReadChunk(readRequest); + } + return 0; + } + + void CloneCore::CloneReadByRequestInfo( + std::shared_ptr readRequest, Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *chunkRequest = readRequest->request_; + + auto func = ::curve::common::LocationOperator::GenerateCurveLocation; + std::string location = + func(chunkRequest->clonefilesource(), chunkRequest->clonefileoffset()); + + AsyncDownloadContext *downloadCtx = new (std::nothrow) AsyncDownloadContext; + downloadCtx->location = location; + downloadCtx->offset = chunkRequest->offset(); + downloadCtx->size = chunkRequest->size(); + downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; + DownloadClosure *downloadClosure = new (std::nothrow) DownloadClosure( + readRequest, shared_from_this(), downloadCtx, doneGuard.release()); + copyer_->DownloadAsync(downloadClosure); + return; } - if (response_->has_appliedindex()) { - userResponse_->set_appliedindex(response_->appliedindex()); + + int CloneCore::HandleReadRequest(std::shared_ptr readRequest, + Closure *done) + { + brpc::ClosureGuard doneGuard(done); + const ChunkRequest *request = readRequest->request_; + + // Obtain chunk information + CSChunkInfo chunkInfo; + ChunkID id = readRequest->ChunkId(); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); + + /* + * Chunk exists: Check and analyze Bitmap to determine if it can be read + * locally Chunk does not exist: if it contains clone information, it will be + * read from clonesource, otherwise an error will be returned Because the + * upper level ReadChunkRequest::OnApply has already processed NoExist And + * the situation where cloneinfo does not exist + */ + switch (errorCode) + { + case CSErrorCode::Success: + return CloneReadByLocalInfo(readRequest, chunkInfo, + doneGuard.release()); + case CSErrorCode::ChunkNotExistError: + if (existCloneInfo(request)) + { + CloneReadByRequestInfo(readRequest, doneGuard.release()); + return 0; + } + // Otherwise, fallthrough will directly return an error + FALLTHROUGH_INTENDED; + default: + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; + } } - } -} - -int CloneCore::CloneReadByLocalInfo( - std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* request = readRequest->request_; - off_t offset = request->offset(); - size_t length = request->size(); - const uint32_t blockSize = chunkInfo.blockSize; - - // offset and length must be aligned with blockSize - if (offset % blockSize != 0 || length % blockSize != 0) { - LOG(ERROR) << "Invalid offset or length: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " offset: " << offset - << " length: " << length - << " block size: " << blockSize; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - return -1; - } - - uint32_t beginIndex = offset / blockSize; - uint32_t endIndex = (offset + length - 1) / blockSize; - - // 请求提交到CloneManager的时候,chunk一定是clone chunk - // 但是由于有其他请求操作相同的chunk,此时chunk有可能已经被遍写过了 - // 所以此处要先判断chunk是否是clone chunk,如果是再判断是否要拷贝数据 - bool needClone = chunkInfo.isClone && - (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS); - if (needClone) { - // TODO(yyk) 这一块可以优化,但是优化方法判断条件可能比较复杂 - // 目前只根据是否存在未写过的page来决定是否要触发拷贝 - // chunk中请求读取范围内的数据存在page未被写过,则需要从源端拷贝数据 - AsyncDownloadContext* downloadCtx = - new (std::nothrow) AsyncDownloadContext; - downloadCtx->location = chunkInfo.location; - downloadCtx->offset = offset; - downloadCtx->size = length; - downloadCtx->buf = new (std::nothrow) char[length]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); - copyer_->DownloadAsync(downloadClosure); - return 0; - } - - // 执行到这一步说明不需要拷贝数据,如果是recover请求可以直接返回成功 - // 如果是ReadChunk请求,则直接读chunk并返回 - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 - return ReadChunk(readRequest); - } - return 0; -} - -void CloneCore::CloneReadByRequestInfo(std::shared_ptr - readRequest, Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* chunkRequest = readRequest->request_; - - auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - std::string location = func(chunkRequest->clonefilesource(), - chunkRequest->clonefileoffset()); - - AsyncDownloadContext* downloadCtx = - new (std::nothrow) AsyncDownloadContext; - downloadCtx->location = location; - downloadCtx->offset = chunkRequest->offset(); - downloadCtx->size = chunkRequest->size(); - downloadCtx->buf = new (std::nothrow) char[chunkRequest->size()]; - DownloadClosure* downloadClosure = - new (std::nothrow) DownloadClosure(readRequest, - shared_from_this(), - downloadCtx, - doneGuard.release()); - copyer_->DownloadAsync(downloadClosure); - return; -} - -int CloneCore::HandleReadRequest( - std::shared_ptr readRequest, - Closure* done) { - brpc::ClosureGuard doneGuard(done); - const ChunkRequest* request = readRequest->request_; - - // 获取chunk信息 - CSChunkInfo chunkInfo; - ChunkID id = readRequest->ChunkId(); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - - /* - * chunk存在:按照查看分析bitmap判断是否可以本地读 - * chunk不存在:如包含clone信息则从clonesource读,否则返回错误 - * 因为上层ReadChunkRequest::OnApply已经处理了NoExist - * 并且cloneinfo不存在的情况 - */ - switch (errorCode) { - case CSErrorCode::Success: - return CloneReadByLocalInfo(readRequest, chunkInfo, - doneGuard.release()); - case CSErrorCode::ChunkNotExistError: - if (existCloneInfo(request)) { - CloneReadByRequestInfo(readRequest, doneGuard.release()); + + int CloneCore::ReadChunk(std::shared_ptr readRequest) + { + const ChunkRequest *request = readRequest->request_; + off_t offset = request->offset(); + size_t length = request->size(); + std::unique_ptr chunkData(new char[length]); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode; + errorCode = dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData.get(), offset, length); + if (CSErrorCode::Success != errorCode) + { + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + LOG(ERROR) << "read chunk failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << offset << " read length: " << length + << " error code: " << errorCode; + return -1; + } + + // After successful reading, update the apply index + readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); + // After completing the data reading, Return can return the results to the + // user + readRequest->cntl_->response_attachment().append(chunkData.get(), length); + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; } - // 否则fallthrough直接返回错误 - FALLTHROUGH_INTENDED; - default: - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; - } -} - -int CloneCore::ReadChunk(std::shared_ptr readRequest) { - const ChunkRequest* request = readRequest->request_; - off_t offset = request->offset(); - size_t length = request->size(); - std::unique_ptr chunkData(new char[length]); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData.get(), - offset, - length); - if (CSErrorCode::Success != errorCode) { - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << offset - << " read length: " << length - << " error code: " << errorCode; - return -1; - } - - // 读成功后需要更新 apply index - readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - // Return 完成数据读取后可以将结果返回给用户 - readRequest->cntl_->response_attachment().append( - chunkData.get(), length); - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - return 0; -} - -int CloneCore::SetReadChunkResponse( - std::shared_ptr readRequest, - const butil::IOBuf* cloneData) { - const ChunkRequest* request = readRequest->request_; - CSChunkInfo chunkInfo; - ChunkID id = readRequest->ChunkId(); - std::shared_ptr dataStore = readRequest->datastore_; - CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - - // 如果chunk不存在,需要判断请求是否带源chunk的信息 - // 如果带了源chunk信息,说明用了lazy分配chunk机制,可以直接返回clone data - // 有一种情况,当请求的chunk是lazy allocate的,请求时chunk在本地是存在的, - // 并且请求读取的部分区域已经被写过,在从源端拷贝数据的时候,chunk又被删除了 - // 这种情况下会被当成正常请求返回,但是返回的数据不符合预期 - // 由于当前我们的curve file都是延迟删除的,文件真正删除时能够确保没有用户IO - // 如果后续添加了一些改动触发到这个问题,则需要进行修复 - // TODO(yyk) fix it - bool expect = errorCode == CSErrorCode::Success || - (errorCode == CSErrorCode::ChunkNotExistError && - existCloneInfo(request)); - if (!expect) { - LOG(ERROR) << "get chunkinfo failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " error code: " << errorCode; - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return -1; - } - - size_t length = request->size(); - butil::IOBuf responseData; - // 如果chunk存在,则要从chunk中读取已经写过的区域合并后返回 - if (errorCode == CSErrorCode::Success) { - char* chunkData = new (std::nothrow) char[length]; - int ret = ReadThenMerge( - readRequest, chunkInfo, cloneData, chunkData); - responseData.append_user_data(chunkData, length, ReadBufferDeleter); - if (ret < 0) { - SetResponse(readRequest, - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - return ret; + + int CloneCore::SetReadChunkResponse( + std::shared_ptr readRequest, + const butil::IOBuf *cloneData) + { + const ChunkRequest *request = readRequest->request_; + CSChunkInfo chunkInfo; + ChunkID id = readRequest->ChunkId(); + std::shared_ptr dataStore = readRequest->datastore_; + CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); + + // If the chunk does not exist, it is necessary to determine whether the + // request contains information about the source chunk If the source chunk + // information is provided, it indicates that the lazy allocation chunk + // mechanism is used, and clone data can be directly returned There is a + // situation where the requested chunk is lazily allocated and the requested + // chunk exists locally, And the requested read area has already been + // written, and when copying data from the source, the chunk has been + // deleted again In this case, it will be returned as a normal request, but + // the returned data does not meet expectations Due to the current delayed + // deletion of our curve files, it is ensured that there is no user IO when + // the files are truly deleted If some changes are added later that trigger + // this issue, it needs to be fixed + // TODO(yyk) fix it + bool expect = errorCode == CSErrorCode::Success || + (errorCode == CSErrorCode::ChunkNotExistError && + existCloneInfo(request)); + if (!expect) + { + LOG(ERROR) << "get chunkinfo failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " error code: " << errorCode; + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return -1; + } + + size_t length = request->size(); + butil::IOBuf responseData; + // If a chunk exists, read the regions that have already been written from + // the chunk and merge them back + if (errorCode == CSErrorCode::Success) + { + char *chunkData = new (std::nothrow) char[length]; + int ret = ReadThenMerge(readRequest, chunkInfo, cloneData, chunkData); + responseData.append_user_data(chunkData, length, ReadBufferDeleter); + if (ret < 0) + { + SetResponse(readRequest, + CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + return ret; + } + } + else + { + responseData = *cloneData; + } + readRequest->cntl_->response_attachment().append(responseData); + + // After successful reading, update the apply index + readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); + SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + return 0; } - } else { - responseData = *cloneData; - } - readRequest->cntl_->response_attachment().append(responseData); - - // 读成功后需要更新 apply index - readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - return 0; -} - -int CloneCore::ReadThenMerge(std::shared_ptr readRequest, - const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData) { - const ChunkRequest* request = readRequest->request_; - std::shared_ptr dataStore = readRequest->datastore_; - - off_t offset = request->offset(); - size_t length = request->size(); - uint32_t blockSize = chunkInfo.blockSize; - uint32_t beginIndex = offset / blockSize; - uint32_t endIndex = (offset + length - 1) / blockSize; - // 获取chunk文件已经写过和未被写过的区域 - std::vector copiedRanges; - std::vector uncopiedRanges; - if (chunkInfo.isClone) { - chunkInfo.bitmap->Divide(beginIndex, - endIndex, - &uncopiedRanges, - &copiedRanges); - } else { - BitRange range; - range.beginIndex = beginIndex; - range.endIndex = endIndex; - copiedRanges.push_back(range); - } - - // 需要读取的起始位置在chunk中的偏移 - off_t readOff; - // 读取到的数据要拷贝到缓冲区中的相对偏移 - off_t relativeOff; - // 每次从chunk读取的数据长度 - size_t readSize; - // 1.Read 对于已写过的区域,从chunk文件中读取 - CSErrorCode errorCode; - for (auto& range : copiedRanges) { - readOff = range.beginIndex * blockSize; - readSize = (range.endIndex - range.beginIndex + 1) * blockSize; - relativeOff = readOff - offset; - errorCode = dataStore->ReadChunk(request->chunkid(), - request->sn(), - chunkData + relativeOff, - readOff, - readSize); - if (CSErrorCode::Success != errorCode) { - LOG(ERROR) << "read chunk failed: " - << " logic pool id: " << request->logicpoolid() - << " copyset id: " << request->copysetid() - << " chunkid: " << request->chunkid() - << " read offset: " << readOff - << " read length: " << readSize - << " error code: " << errorCode; - return -1; + + int CloneCore::ReadThenMerge(std::shared_ptr readRequest, + const CSChunkInfo &chunkInfo, + const butil::IOBuf *cloneData, char *chunkData) + { + const ChunkRequest *request = readRequest->request_; + std::shared_ptr dataStore = readRequest->datastore_; + + off_t offset = request->offset(); + size_t length = request->size(); + uint32_t blockSize = chunkInfo.blockSize; + uint32_t beginIndex = offset / blockSize; + uint32_t endIndex = (offset + length - 1) / blockSize; + // Obtain the regions where the chunk file has been written and not written + std::vector copiedRanges; + std::vector uncopiedRanges; + if (chunkInfo.isClone) + { + chunkInfo.bitmap->Divide(beginIndex, endIndex, &uncopiedRanges, + &copiedRanges); + } + else + { + BitRange range; + range.beginIndex = beginIndex; + range.endIndex = endIndex; + copiedRanges.push_back(range); + } + + // The offset of the starting position to be read in the chunk + off_t readOff; + // The relative offset of the read data to be copied into the buffer + off_t relativeOff; + // The length of data read from chunk each time + size_t readSize; + // 1. Read for regions that have already been written, read from the chunk + // file + CSErrorCode errorCode; + for (auto &range : copiedRanges) + { + readOff = range.beginIndex * blockSize; + readSize = (range.endIndex - range.beginIndex + 1) * blockSize; + relativeOff = readOff - offset; + errorCode = + dataStore->ReadChunk(request->chunkid(), request->sn(), + chunkData + relativeOff, readOff, readSize); + if (CSErrorCode::Success != errorCode) + { + LOG(ERROR) << "read chunk failed: " + << " logic pool id: " << request->logicpoolid() + << " copyset id: " << request->copysetid() + << " chunkid: " << request->chunkid() + << " read offset: " << readOff + << " read length: " << readSize + << " error code: " << errorCode; + return -1; + } + } + + // 2. Merge: For areas that have not been written before, copy them from the + // downloaded area on the source side for merging + for (auto &range : uncopiedRanges) + { + readOff = range.beginIndex * blockSize; + readSize = (range.endIndex - range.beginIndex + 1) * blockSize; + relativeOff = readOff - offset; + cloneData->copy_to(chunkData + relativeOff, readSize, relativeOff); + } + return 0; + } + + void CloneCore::PasteCloneData(std::shared_ptr readRequest, + const butil::IOBuf *cloneData, off_t offset, + size_t cloneDataSize, Closure *done) + { + const ChunkRequest *request = readRequest->request_; + bool dontPaste = + CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() && !enablePaste_; + if (dontPaste) + return; + + // After the data copy is completed, it is necessary to generate a + // PaseChunkRequest and paste the data to the chunk file + ChunkRequest *pasteRequest = new ChunkRequest(); + pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); + pasteRequest->set_logicpoolid(request->logicpoolid()); + pasteRequest->set_copysetid(request->copysetid()); + pasteRequest->set_chunkid(request->chunkid()); + pasteRequest->set_offset(offset); + pasteRequest->set_size(cloneDataSize); + std::shared_ptr req = nullptr; + + ChunkResponse *pasteResponse = new ChunkResponse(); + CloneClosure *closure = new CloneClosure(); + closure->SetRequest(pasteRequest); + closure->SetResponse(pasteResponse); + closure->SetClosure(done); + // If it is a request for a recover chunk, the result of the pass needs to + // be returned through rpc + if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) + { + closure->SetUserResponse(readRequest->response_); + } + + ChunkServiceClosure *pasteClosure = new (std::nothrow) + ChunkServiceClosure(nullptr, pasteRequest, pasteResponse, closure); + + req = std::make_shared( + readRequest->node_, pasteRequest, pasteResponse, cloneData, + pasteClosure); + req->Process(); } - } - - // 2.Merge 对于未写过的区域,从源端下载的区域中拷贝出来进行merge - for (auto& range : uncopiedRanges) { - readOff = range.beginIndex * blockSize; - readSize = (range.endIndex - range.beginIndex + 1) * blockSize; - relativeOff = readOff - offset; - cloneData->copy_to(chunkData + relativeOff, readSize, relativeOff); - } - return 0; -} - -void CloneCore::PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done) { - const ChunkRequest* request = readRequest->request_; - bool dontPaste = CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype() - && !enablePaste_; - if (dontPaste) return; - - // 数据拷贝完成以后,需要将产生PaseChunkRequest将数据Paste到chunk文件 - ChunkRequest* pasteRequest = new ChunkRequest(); - pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); - pasteRequest->set_logicpoolid(request->logicpoolid()); - pasteRequest->set_copysetid(request->copysetid()); - pasteRequest->set_chunkid(request->chunkid()); - pasteRequest->set_offset(offset); - pasteRequest->set_size(cloneDataSize); - std::shared_ptr req = nullptr; - - ChunkResponse* pasteResponse = new ChunkResponse(); - CloneClosure* closure = new CloneClosure(); - closure->SetRequest(pasteRequest); - closure->SetResponse(pasteResponse); - closure->SetClosure(done); - // 如果是recover chunk的请求,需要将paste的结果通过rpc返回 - if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - closure->SetUserResponse(readRequest->response_); - } - - ChunkServiceClosure* pasteClosure = - new (std::nothrow) ChunkServiceClosure(nullptr, - pasteRequest, - pasteResponse, - closure); - - req = std::make_shared(readRequest->node_, - pasteRequest, - pasteResponse, - cloneData, - pasteClosure); - req->Process(); -} - -inline void CloneCore::SetResponse( - std::shared_ptr readRequest, CHUNK_OP_STATUS status) { - auto applyIndex = readRequest->node_->GetAppliedIndex(); - readRequest->response_->set_appliedindex(applyIndex); - readRequest->response_->set_status(status); -} - -} // namespace chunkserver -} // namespace curve + + inline void CloneCore::SetResponse( + std::shared_ptr readRequest, CHUNK_OP_STATUS status) + { + auto applyIndex = readRequest->node_->GetAppliedIndex(); + readRequest->response_->set_appliedindex(applyIndex); + readRequest->response_->set_status(status); + } + + } // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/clone_core.h b/src/chunkserver/clone_core.h index c91183feb3..3f3eb2ef69 100644 --- a/src/chunkserver/clone_core.h +++ b/src/chunkserver/clone_core.h @@ -23,25 +23,26 @@ #ifndef SRC_CHUNKSERVER_CLONE_CORE_H_ #define SRC_CHUNKSERVER_CLONE_CORE_H_ +#include #include #include #include -#include + #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/timeutility.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/datastore/define.h" +#include "src/common/timeutility.h" namespace curve { namespace chunkserver { +using common::TimeUtility; +using curve::chunkserver::CSChunkInfo; using ::google::protobuf::Closure; using ::google::protobuf::Message; -using curve::chunkserver::CSChunkInfo; -using common::TimeUtility; class ReadChunkRequest; class PasteChunkInternalRequest; @@ -51,151 +52,147 @@ class DownloadClosure : public Closure { public: DownloadClosure(std::shared_ptr readRequest, std::shared_ptr cloneCore, - AsyncDownloadContext* downloadCtx, - Closure *done); + AsyncDownloadContext* downloadCtx, Closure* done); void Run(); - void SetFailed() { - isFailed_ = true; - } + void SetFailed() { isFailed_ = true; } - AsyncDownloadContext* GetDownloadContext() { - return downloadCtx_; - } + AsyncDownloadContext* GetDownloadContext() { return downloadCtx_; } protected: - // 下载是否出错出错 + // Is there an error in downloading bool isFailed_; - // 请求开始的时间 + // Request start time uint64_t beginTime_; - // 下载请求上下文信息 + // Download request context information AsyncDownloadContext* downloadCtx_; - // clone core对象 + // Clone core object std::shared_ptr cloneCore_; - // read chunk请求对象 + // Read chunk request object std::shared_ptr readRequest_; - // DownloadClosure生命周期结束后需要执行的回调 + // Callbacks to be executed after the end of the DownloadClosure lifecycle Closure* done_; }; class CloneClosure : public Closure { public: - CloneClosure() : request_(nullptr) - , response_(nullptr) - , userResponse_(nullptr) - , done_(nullptr) {} + CloneClosure() + : request_(nullptr), + response_(nullptr), + userResponse_(nullptr), + done_(nullptr) {} void Run(); - void SetClosure(Closure *done) { - done_ = done; - } + void SetClosure(Closure* done) { done_ = done; } void SetRequest(Message* request) { - request_ = dynamic_cast(request); + request_ = dynamic_cast(request); } void SetResponse(Message* response) { - response_ = dynamic_cast(response); + response_ = dynamic_cast(response); } void SetUserResponse(Message* response) { - userResponse_ = dynamic_cast(response); + userResponse_ = dynamic_cast(response); } private: - // paste chunk的请求结构体 - ChunkRequest *request_; - // paste chunk的响应结构体 - ChunkResponse *response_; - // 真正要返回给用户的响应结构体 - ChunkResponse *userResponse_; - // CloneClosure生命周期结束后需要执行的回调 - Closure *done_; + // Request structure for paste chunk + ChunkRequest* request_; + // Response structure of paste chunk + ChunkResponse* response_; + // The response structure that truly needs to be returned to the user + ChunkResponse* userResponse_; + // Callbacks to be executed after the end of the CloneClosure lifecycle + Closure* done_; }; class CloneCore : public std::enable_shared_from_this { friend class DownloadClosure; + public: CloneCore(uint32_t sliceSize, bool enablePaste, std::shared_ptr copyer) - : sliceSize_(sliceSize) - , enablePaste_(enablePaste) - , copyer_(copyer) {} + : sliceSize_(sliceSize), enablePaste_(enablePaste), copyer_(copyer) {} virtual ~CloneCore() {} /** - * 处理读请求的逻辑 - * @param readRequest[in]:读请求信息 - * @param done[in]:任务完成后要执行的closure - * @return: 成功返回0,失败返回-1 + * Logic for processing read requests + * @param readRequest[in]: Read request information + * @param done[in]: The closure to be executed after the task is completed + * @return: Success returns 0, failure returns -1 */ int HandleReadRequest(std::shared_ptr readRequest, Closure* done); protected: /** - * 本地chunk文件存在情况下,按照本地记录的clone和bitmap信息进行数据读取 - * 会涉及读取远程文件结合本地文件进行merge返回结果 - * @param[in/out] readRequest: 用户请求&响应上下文 - * @param[in] chunkInfo: 对应本地的chunkinfo - * @return 成功返回0,失败返回负数 + * When a local chunk file exists, read data based on the locally recorded + * clone and bitmap information Will involve reading remote files and + * merging with local files to return results + * @param[in/out] readRequest: User Request&Response Context + * @param[in] chunkInfo: corresponds to the local chunkinfo + * @return Success returns 0, failure returns a negative number */ int CloneReadByLocalInfo(std::shared_ptr readRequest, - const CSChunkInfo &chunkInfo, Closure* done); + const CSChunkInfo& chunkInfo, Closure* done); /** - * 本地chunk文件不存在情况下,按照用户请求上下文中带的clonesource信息进行数据读取 - * 不涉及merge本地结果 - * @param[in/out] readRequest: 用户请求&响应上下文 + * When the local chunk file does not exist, read the data according to the + * clonesource information in the user request context Not involving merge + * local results + * @param[in/out] readRequest: User Request&Response Context */ void CloneReadByRequestInfo(std::shared_ptr readRequest, - Closure* done); + Closure* done); /** - * 从本地chunk中读取请求的区域,然后设置response - * @param readRequest: 用户的ReadRequest - * @return: 成功返回0,失败返回-1 + * Read the requested area from the local chunk and set the response + * @param readRequest: User's ReadRequest + * @return: Success returns 0, failure returns -1 */ int ReadChunk(std::shared_ptr readRequest); /** - * 设置read chunk类型的response,包括返回的数据和其他返回参数 - * 从本地chunk中读取已被写过的区域,未写过的区域从克隆下来的数据中获取 - * 然后将数据在内存中merge - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端拷贝下来的数据,数据起始偏移同请求中的偏移 - * @return: 成功返回0,失败返回-1 + * Set the response of the read chunk type, including the returned data and + * other return parameters Read the written area from the local chunk, and + * obtain the unwritten area from the cloned data Then merge the data into + * memory + * @param readRequest: User's ReadRequest + * @param cloneData: The data copied from the source has the same starting + * offset as the offset in the request + * @return: Success returns 0, failure returns -1 */ int SetReadChunkResponse(std::shared_ptr readRequest, const butil::IOBuf* cloneData); - // 从本地chunk中读取已经写过的区域合并到clone data中 + // Read the previously written regions from the local chunk and merge them + // into clone data int ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, - const butil::IOBuf* cloneData, - char* chunkData); + const butil::IOBuf* cloneData, char* chunkData); /** - * 将从源端下载下来的数据paste到本地chunk文件中 - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端下载的数据 - * @param offset: 下载的数据在chunk文件中的偏移 - * @param cloneDataSize: 下载的数据长度 - * @param done:任务完成后要执行的closure + * Paste the downloaded data from the source into the local chunk file + * @param readRequest: User's ReadRequest + * @param cloneData: Data downloaded from the source + * @param offset: The offset of the downloaded data in the chunk file + * @param cloneDataSize: Download data length + * @param done: The closure to be executed after the task is completed */ void PasteCloneData(std::shared_ptr readRequest, - const butil::IOBuf* cloneData, - off_t offset, - size_t cloneDataSize, - Closure* done); + const butil::IOBuf* cloneData, off_t offset, + size_t cloneDataSize, Closure* done); inline void SetResponse(std::shared_ptr readRequest, CHUNK_OP_STATUS status); private: - // 每次拷贝的slice的大小 + // The size of each copied slice uint32_t sliceSize_; - // 判断read chunk类型的请求是否需要paste, true需要paste,false表示不需要 + // Determine whether a read chunk type request requires paste. True requires + // paste, while false indicates no need bool enablePaste_; - // 负责从源端下载数据 + // Responsible for downloading data from the source std::shared_ptr copyer_; }; diff --git a/src/chunkserver/clone_manager.cpp b/src/chunkserver/clone_manager.cpp index 6fc428bdba..c41d844500 100644 --- a/src/chunkserver/clone_manager.cpp +++ b/src/chunkserver/clone_manager.cpp @@ -28,8 +28,7 @@ namespace chunkserver { CloneManager::CloneManager() : isRunning_(false) {} CloneManager::~CloneManager() { - if (isRunning_.load(std::memory_order_acquire)) - Fini(); + if (isRunning_.load(std::memory_order_acquire)) Fini(); } int CloneManager::Init(const CloneOptions& options) { @@ -38,9 +37,8 @@ int CloneManager::Init(const CloneOptions& options) { } int CloneManager::Run() { - if (isRunning_.load(std::memory_order_acquire)) - return 0; - // 启动线程池 + if (isRunning_.load(std::memory_order_acquire)) return 0; + // Start Thread Pool LOG(INFO) << "Begin to run clone manager."; tp_ = std::make_shared>(); int ret = tp_->Start(options_.threadNum, options_.queueCapacity); @@ -56,8 +54,7 @@ int CloneManager::Run() { } int CloneManager::Fini() { - if (!isRunning_.load(std::memory_order_acquire)) - return 0; + if (!isRunning_.load(std::memory_order_acquire)) return 0; LOG(INFO) << "Begin to stop clone manager."; isRunning_.store(false, std::memory_order_release); @@ -69,10 +66,9 @@ int CloneManager::Fini() { std::shared_ptr CloneManager::GenerateCloneTask( std::shared_ptr request, - ::google::protobuf::Closure *done) { - // 如果core是空的,任务无法被处理,所以返回空 - if (options_.core == nullptr) - return nullptr; + ::google::protobuf::Closure* done) { + // If the core is empty, the task cannot be processed, so it returns empty + if (options_.core == nullptr) return nullptr; std::shared_ptr cloneTask = std::make_shared(request, options_.core, done); @@ -80,11 +76,9 @@ std::shared_ptr CloneManager::GenerateCloneTask( } bool CloneManager::IssueCloneTask(std::shared_ptr cloneTask) { - if (!isRunning_.load(std::memory_order_acquire)) - return false; + if (!isRunning_.load(std::memory_order_acquire)) return false; - if (cloneTask == nullptr) - return false; + if (cloneTask == nullptr) return false; tp_->Enqueue(cloneTask->Closure()); diff --git a/src/chunkserver/clone_manager.h b/src/chunkserver/clone_manager.h index 01f7088218..96e489d5c1 100644 --- a/src/chunkserver/clone_manager.h +++ b/src/chunkserver/clone_manager.h @@ -25,16 +25,17 @@ #include #include -#include // NOLINT -#include // NOLINT + #include -#include +#include // NOLINT #include +#include // NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/clone_task.h" #include "src/chunkserver/clone_core.h" +#include "src/chunkserver/clone_task.h" +#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace chunkserver { @@ -44,18 +45,16 @@ using curve::common::TaskThreadPool; class ReadChunkRequest; struct CloneOptions { - // 核心逻辑处理类 + // Core logic processing class std::shared_ptr core; - // 最大线程数 + // Maximum number of threads uint32_t threadNum; - // 最大队列深度 + // Maximum queue depth uint32_t queueCapacity; - // 任务状态检查的周期,单位ms + // The cycle of task status check, in ms uint32_t checkPeriod; - CloneOptions() : core(nullptr) - , threadNum(10) - , queueCapacity(100) - , checkPeriod(5000) {} + CloneOptions() + : core(nullptr), threadNum(10), queueCapacity(100), checkPeriod(5000) {} }; class CloneManager { @@ -64,49 +63,51 @@ class CloneManager { virtual ~CloneManager(); /** - * 初始化 + * Initialize * - * @param options[in]:初始化参数 - * @return 错误码 + * @param options[in]: initialization parameters + * @return error code */ virtual int Init(const CloneOptions& options); /** - * 启动所有线程 + * Start all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 停止所有线程 + * Stop all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 生成克隆任务 - * @param request[in]:请求信息 - * @return:返回生成的克隆任务,如果生成失败,返回nullptr + * Generate Clone Task + * @param request[in]: Request information + * @return: Returns the generated clone task. If the generation fails, + * returns nullptr */ virtual std::shared_ptr GenerateCloneTask( std::shared_ptr request, ::google::protobuf::Closure* done); /** - * 发布克隆任务,产生克隆任务放到线程池中处理 - * @param task[in]:克隆任务 - * @return 成功返回true,失败返回false + * Publish clone tasks, generate clone tasks, and place them in the thread + * pool for processing + * @param task[in]: Clone task + * @return returns true for success, false for failure */ virtual bool IssueCloneTask(std::shared_ptr cloneTask); private: - // 克隆任务管理相关的选项,调Init的时候初始化 + // Clone task management related options, initialization when calling Init CloneOptions options_; - // 处理克隆任务的异步线程池 + // Asynchronous thread pool for processing cloning tasks std::shared_ptr> tp_; - // 当前线程池是否处于工作状态 + // Is the current thread pool in working state std::atomic isRunning_; }; diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..cd55f0b439 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -25,37 +25,33 @@ #include #include + #include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::Uncopyable; -class CloneTask : public Uncopyable - , public std::enable_shared_from_this{ +class CloneTask : public Uncopyable, + public std::enable_shared_from_this { public: CloneTask(std::shared_ptr request, std::shared_ptr core, ::google::protobuf::Closure* done) - : core_(core) - , readRequest_(request) - , done_(done) - , isComplete_(false) {} + : core_(core), readRequest_(request), done_(done), isComplete_(false) {} virtual ~CloneTask() {} virtual std::function Closure() { auto sharedThis = shared_from_this(); - return [sharedThis] () { - sharedThis->Run(); - }; + return [sharedThis]() { sharedThis->Run(); }; } virtual void Run() { @@ -65,18 +61,16 @@ class CloneTask : public Uncopyable isComplete_ = true; } - virtual bool IsComplete() { - return isComplete_; - } + virtual bool IsComplete() { return isComplete_; } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 6a39c6ce3e..aa8fa0077c 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -22,20 +22,20 @@ #include "src/chunkserver/conf_epoch_file.h" -#include #include +#include #include "src/common/crc32.h" namespace curve { namespace chunkserver { -// conf.epoch文件最大长度 +// Maximum length of conf.epoch file const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; -int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, - CopysetID *copysetID, uint64_t *epoch) { +int ConfEpochFile::Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch) { int fd = fs_->Open(path.c_str(), O_RDWR); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -47,7 +47,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, char json[kConfEpochFileMaxSize] = {0}; int size = 0; - // 1. read数据 + // 1. Read data size = fs_->Read(fd, json, 0, kConfEpochFileMaxSize); if (size <= 0) { LOG(ERROR) << "LoadConfEpoch read failed: " << path @@ -58,7 +58,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, } fs_->Close(fd); - // 2.反序列化 + // 2. Deserialization ConfEpoch confEpoch; std::string jsonStr(json); std::string err; @@ -71,7 +71,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return -1; } - // 3. 验证crc + // 3. Verify CRC uint32_t crc32c = ConfEpochCrc(confEpoch); if (crc32c != confEpoch.checksum()) { LOG(ERROR) << "conf epoch crc error: " << jsonStr; @@ -89,15 +89,15 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return 0; } -int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, +int ConfEpochFile::Save(const std::string& path, const LogicPoolID logicPoolID, const CopysetID copysetID, const uint64_t epoch) { - // 1. 转换成conf message + // 1. Convert to conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); confEpoch.set_copysetid(copysetID); confEpoch.set_epoch(epoch); - // 计算crc + // Calculate crc uint32_t crc32c = ConfEpochCrc(confEpoch); confEpoch.set_checksum(crc32c); @@ -113,7 +113,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 2. open文件 + // 2. Open file int fd = fs_->Open(path.c_str(), O_RDWR | O_CREAT); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -122,7 +122,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 3. write文件 + // 3. Write file if (static_cast(out.size()) != fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path @@ -132,7 +132,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 4. 落盘 + // 4. Falling disc if (0 != fs_->Fsync(fd)) { LOG(ERROR) << "SaveConfEpoch sync failed, path: " << path << ", errno: " << errno @@ -145,20 +145,20 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return 0; } -uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch &confEpoch) { +uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch& confEpoch) { uint32_t crc32c = 0; uint32_t logicPoolId = confEpoch.logicpoolid(); uint32_t copysetId = confEpoch.copysetid(); uint64_t epoch = confEpoch.epoch(); uint64_t magic = kConfEpochFileMagic; - crc32c = curve::common::CRC32( - crc32c, reinterpret_cast(&logicPoolId), sizeof(logicPoolId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&logicPoolId), + sizeof(logicPoolId)); + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), sizeof(copysetId)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), sizeof(epoch)); - crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), sizeof(magic)); return crc32c; diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 91ee27ec6b..979dd90032 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -23,13 +23,13 @@ #ifndef SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ #define SRC_CHUNKSERVER_CONF_EPOCH_FILE_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/fs/fs_common.h" #include "include/chunkserver/chunkserver_common.h" #include "proto/copyset.pb.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { @@ -38,47 +38,44 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; /** - * 配置版本序列化和反序列化的工具类 - * TODO(wudemiao): 后期替换采用json编码 + * Tool classes for configuring version serialization and deserialization + * TODO(wudemiao): Post replacement using JSON encoding */ class ConfEpochFile { public: - explicit ConfEpochFile(std::shared_ptr fs) - : fs_(fs) {} + explicit ConfEpochFile(std::shared_ptr fs) : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read + * epoch value + * @return 0, successful- 1 failed */ - int Load(const std::string &path, - LogicPoolID *logicPoolID, - CopysetID *copysetID, - uint64_t *epoch); + int Load(const std::string& path, LogicPoolID* logicPoolID, + CopysetID* copysetID, uint64_t* epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot + * file. The format is as follows: The 'head' indicates the length and is in + * binary format. The rest is in text format for easy viewing when necessary. + * 'sync' ensures data persistence. | head | + * Configuration version information | | 8 bytes size_t | uint32_t | + * Variable length text | | length | crc32 | logic pool id + * | copyset id | epoch| The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded- 1 failed */ - int Save(const std::string &path, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const uint64_t epoch); + int Save(const std::string& path, const LogicPoolID logicPoolID, + const CopysetID copysetID, const uint64_t epoch); private: - static uint32_t ConfEpochCrc(const ConfEpoch &confEpoch); + static uint32_t ConfEpochCrc(const ConfEpoch& confEpoch); std::shared_ptr fs_; }; diff --git a/src/chunkserver/config_info.h b/src/chunkserver/config_info.h index 67c3f57524..c00809413f 100644 --- a/src/chunkserver/config_info.h +++ b/src/chunkserver/config_info.h @@ -23,33 +23,34 @@ #ifndef SRC_CHUNKSERVER_CONFIG_INFO_H_ #define SRC_CHUNKSERVER_CONFIG_INFO_H_ -#include #include +#include -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/inflight_throttle.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/trash.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { -using curve::fs::LocalFileSystem; using curve::chunkserver::concurrent::ConcurrentApplyModule; +using curve::fs::LocalFileSystem; class FilePool; class CopysetNodeManager; class CloneManager; /** - * copyset node的配置选项 + * Configuration options for copyset node */ struct CopysetNodeOptions { - // follower to candidate 超时时间,单位ms,默认是1000ms + // follower to candidate timeout, in ms, defaults to 1000ms int electionTimeoutMs; - // 定期打快照的时间间隔,默认3600s,也就是1小时 + // The time interval for taking regular snapshots is 3600s by default, which + // is 1 hour int snapshotIntervalS; // If true, read requests will be invoked in current lease leader node. @@ -57,79 +58,86 @@ struct CopysetNodeOptions { // Default: true bool enbaleLeaseRead; - // 如果follower和leader日志相差超过catchupMargin, - // 就会执行install snapshot进行恢复,默认: 1000 + // If the difference between the follower and leader logs exceeds + // catchupMargin, Will execute install snapshot for recovery, default: 1000 int catchupMargin; - // 是否开启pthread执行用户代码,默认false + // Enable pthread to execute user code, default to false bool usercodeInPthread; - // 所有uri个格式: ${protocol}://${绝对或者相对路径} - // eg: - // posix: local - // bluestore: bluestore + // All uri formats: ${protocol}://${absolute or relative path} + // eg: + // posix: local + // bluestore: bluestore - // raft log uri, 默认raft_log + // Raft log uri, default raft_log std::string logUri; - // raft meta uri, 默认raft_meta + // Raft meta uri, default raft_meta std::string raftMetaUri; - // raft snapshot uri,默认raft_snpashot + // Raft snapshot uri, default raft_snpashot std::string raftSnapshotUri; - // chunk data uri,默认data + // Chunk data uri, default data std::string chunkDataUri; - // chunk snapshot uri,默认snapshot + // Chunk snapshot uri, default snapshot std::string chunkSnapshotUri; - // copyset data recycling uri,默认recycler + // Copyset data recycling uri, default recycler std::string recyclerUri; std::string ip; uint32_t port; - // chunk文件的大小 + // Chunk file size uint32_t maxChunkSize; // WAL segment file size uint32_t maxWalSegmentSize; - // chunk文件的page大小 + // The page size of the chunk file uint32_t metaPageSize; // alignment for I/O request uint32_t blockSize; - // clone chunk的location长度限制 + // Location length limit for clone chunks uint32_t locationLimit; - // 并发模块 - ConcurrentApplyModule *concurrentapply; - // Chunk file池子 + // Concurrent module + ConcurrentApplyModule* concurrentapply; + // Chunk file pool std::shared_ptr chunkFilePool; // WAL file pool std::shared_ptr walFilePool; - // 文件系统适配层 + // File System Adaptation Layer std::shared_ptr localFileSystem; - // 回收站, 心跳模块判断该chunkserver不在copyset配置组时, - // 通知copysetManager将copyset目录移动至回收站 - // 一段时间后实际回收物理空间 + // When the recycle bin and heartbeat module determine that the chunkserver + // is not in the copyset configuration group, Notify the copysetManager to + // move the copyset directory to the recycle bin Actual recovery of physical + // space after a period of time std::shared_ptr trash; - // snapshot流控 - scoped_refptr *snapshotThrottle; + // Snapshot flow control + scoped_refptr* snapshotThrottle; - // 限制chunkserver启动时copyset并发恢复加载的数量,为0表示不限制 + // Limit the number of copyset concurrent recovery loads during chunkserver + // startup, with a value of 0 indicating no limit uint32_t loadConcurrency = 0; // chunkserver sync_thread_pool number of threads. uint32_t syncConcurrency = 20; // copyset trigger sync timeout uint32_t syncTriggerSeconds = 25; - // 检查copyset是否加载完成出现异常时的最大重试次数 - // 可能的异常:1.当前大多数副本还没起来;2.网络问题等导致无法获取leader - // 3.其他的原因导致无法获取到leader的committed index + // Check if the copyset has completed loading and the maximum number of + // retries when an exception occurs Possible exceptions: 1. Currently, most + // replicas have not yet been restored; 2. Network issues and other issues + // preventing the acquisition of leaders + // 3. Due to other reasons, it is not possible to obtain the committed index + // of the leader uint32_t checkRetryTimes = 3; - // 当前peer的applied_index与leader上的committed_index差距小于该值 - // 则判定copyset已经加载完成 + // the difference bewteen the current peer's applied_index and leader's + // committed_index is less than this value Then it is determined that the + // copyset has been loaded successfully uint32_t finishLoadMargin = 2000; - // 循环判定copyset是否加载完成的内部睡眠时间 + // Internal sleep time for loop determination of whether copyset has been + // loaded and completed uint32_t checkLoadMarginIntervalMs = 1000; // enable O_DSYNC when open chunkfile @@ -145,11 +153,11 @@ struct CopysetNodeOptions { }; /** - * ChunkServiceManager 的依赖项 + *Dependencies for ChunkServiceManager */ struct ChunkServiceOptions { - CopysetNodeManager *copysetNodeManager; - CloneManager *cloneManager; + CopysetNodeManager* copysetNodeManager; + CloneManager* cloneManager; std::shared_ptr inflightThrottle; }; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..87e8d70135 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -22,33 +22,34 @@ #include "src/chunkserver/copyset_node.h" -#include -#include -#include #include -#include #include -#include -#include +#include +#include +#include +#include + #include #include -#include -#include -#include #include #include +#include +#include +#include +#include +#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/chunk_closure.h" -#include "src/chunkserver/op_request.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/fs/fs_common.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/crc32.h" #include "src/common/fs_util.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" namespace braft { DECLARE_bool(raft_enable_leader_lease); @@ -59,37 +60,36 @@ namespace chunkserver { using curve::fs::FileSystemInfo; -const char *kCurveConfEpochFilename = "conf.epoch"; +const char* kCurveConfEpochFilename = "conf.epoch"; uint32_t CopysetNode::syncTriggerSeconds_ = 25; -std::shared_ptr> - CopysetNode::copysetSyncPool_ = nullptr; - -CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf) : - logicPoolId_(logicPoolId), - copysetId_(copysetId), - conf_(initConf), - epoch_(0), - peerId_(), - nodeOptions_(), - raftNode_(nullptr), - chunkDataApath_(), - chunkDataRpath_(), - appliedIndex_(0), - leaderTerm_(-1), - configChange_(std::make_shared()), - lastSnapshotIndex_(0), - scaning_(false), - lastScanSec_(0), - enableOdsyncWhenOpenChunkFile_(false), - isSyncing_(false), - checkSyncingIntervalMs_(500) { -} +std::shared_ptr> CopysetNode::copysetSyncPool_ = + nullptr; + +CopysetNode::CopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& initConf) + : logicPoolId_(logicPoolId), + copysetId_(copysetId), + conf_(initConf), + epoch_(0), + peerId_(), + nodeOptions_(), + raftNode_(nullptr), + chunkDataApath_(), + chunkDataRpath_(), + appliedIndex_(0), + leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), + scaning_(false), + lastScanSec_(0), + enableOdsyncWhenOpenChunkFile_(false), + isSyncing_(false), + checkSyncingIntervalMs_(500) {} CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -98,17 +98,16 @@ CopysetNode::~CopysetNode() { delete nodeOptions_.snapshot_file_system_adaptor; nodeOptions_.snapshot_file_system_adaptor = nullptr; } - LOG(INFO) << "release copyset node: " - << GroupIdString(); + LOG(INFO) << "release copyset node: " << GroupIdString(); } -int CopysetNode::Init(const CopysetNodeOptions &options) { +int CopysetNode::Init(const CopysetNodeOptions& options) { std::string groupId = GroupId(); std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -135,12 +134,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.locationLimit = options.locationLimit; dsOptions.enableOdsyncWhenOpenChunkFile = options.enableOdsyncWhenOpenChunkFile; - dataStore_ = std::make_shared(options.localFileSystem, - options.chunkFilePool, - dsOptions); + dataStore_ = std::make_shared( + options.localFileSystem, options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -150,10 +148,10 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { syncThread_.Init(this); dataStore_->SetCacheCondPtr(syncThread_.cond_); dataStore_->SetCacheLimits(options.syncChunkLimit, - options.syncThreshold); + options.syncThreshold); LOG(INFO) << "init sync thread success limit = " - << options.syncChunkLimit << - "syncthreshold = " << options.syncThreshold; + << options.syncChunkLimit + << "syncthreshold = " << options.syncThreshold; } recyclerUri_ = options.recyclerUri; @@ -166,21 +164,21 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have + * multiple copies of the same copyset, Pay attention to this point and not + * distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); concurrentapply_ = options.concurrentapply; - /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -189,10 +187,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { << "Copyset: " << GroupIdString(); return -1; } - metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( - logicPoolId_, copysetId_); + metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric(logicPoolId_, + copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in + // the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +212,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,19 +236,20 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the + // WriteChunk operation may result in errors concurrentapply_->Flush(); } } -void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { +void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions& options) { auto groupId = GroupId(); nodeOptions_.initial_conf = conf_; nodeOptions_.election_timeout_ms = options.electionTimeoutMs; @@ -257,20 +257,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { nodeOptions_.node_owns_fsm = false; nodeOptions_.snapshot_interval_s = options.snapshotIntervalS; nodeOptions_.log_uri = options.logUri; - nodeOptions_.log_uri.append("/").append(groupId) - .append("/").append(RAFT_LOG_DIR); + nodeOptions_.log_uri.append("/").append(groupId).append("/").append( + RAFT_LOG_DIR); nodeOptions_.raft_meta_uri = options.raftMetaUri; - nodeOptions_.raft_meta_uri.append("/").append(groupId) - .append("/").append(RAFT_META_DIR); + nodeOptions_.raft_meta_uri.append("/").append(groupId).append("/").append( + RAFT_META_DIR); nodeOptions_.snapshot_uri = options.raftSnapshotUri; - nodeOptions_.snapshot_uri.append("/").append(groupId) - .append("/").append(RAFT_SNAP_DIR); + nodeOptions_.snapshot_uri.append("/").append(groupId).append("/").append( + RAFT_SNAP_DIR); nodeOptions_.usercode_in_pthread = options.usercodeInPthread; nodeOptions_.snapshot_throttle = options.snapshotThrottle; - CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkFilePool, - options.localFileSystem); + CurveFilesystemAdaptor* cfa = new CurveFilesystemAdaptor( + options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(kCurveConfEpochFilename); @@ -282,47 +281,52 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { new scoped_refptr(cfa); } -void CopysetNode::on_apply(::braft::Iterator &iter) { +void CopysetNode::on_apply(::braft::Iterator& iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of + // the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which + * includes All Contextual ChunkOpRequest for Op */ - braft::Closure *closure = iter.done(); + braft::Closure* closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node + * is normal and Op is directly obtained from memory Apply in + * context */ - ChunkClosure - *chunkClosure = dynamic_cast(iter.done()); + ChunkClosure* chunkClosure = + dynamic_cast(iter.done()); CHECK(nullptr != chunkClosure) << "ChunkClosure dynamic cast failed"; std::shared_ptr& opRequest = chunkClosure->request_; - concurrentapply_->Push(opRequest->ChunkId(), ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT - &ChunkOpRequest::OnApply, opRequest, - iter.index(), doneGuard.release()); + concurrentapply_->Push( + opRequest->ChunkId(), + ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT + &ChunkOpRequest::OnApply, opRequest, iter.index(), + doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply - * 2.2. follower apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op + * log entry will be deserialized, Then obtain Op information for + * application 2.2. follower apply */ ChunkRequest request; butil::IOBuf data; auto opReq = ChunkOpRequest::Decode(log, &request, &data, iter.index(), GetLeaderId()); auto chunkId = request.chunkid(); - concurrentapply_->Push(chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT - &ChunkOpRequest::OnApplyFromLog, opReq, - dataStore_, std::move(request), data); + concurrentapply_->Push( + chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT + &ChunkOpRequest::OnApplyFromLog, opReq, dataStore_, + std::move(request), data); } } } @@ -331,11 +335,11 @@ void CopysetNode::on_shutdown() { LOG(INFO) << GroupIdString() << " is shutdown"; } -void CopysetNode::on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { snapshotFuture_ = - std::async(std::launch::async, - &CopysetNode::save_snapshot_background, this, writer, done); + std::async(std::launch::async, &CopysetNode::save_snapshot_background, + this, writer, done); } void CopysetNode::WaitSnapshotDone() { @@ -345,12 +349,12 @@ void CopysetNode::WaitSnapshotDone() { } } -void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,37 +363,41 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that + * conf.epoch is stored in the data directory */ - std::string - filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; + std::string filePathTemp = + writer->get_path() + "/" + kCurveConfEpochFilename; if (0 != SaveConfEpoch(filePathTemp)) { done->status().set_error(errno, "invalid: %s", strerror(errno)); LOG(ERROR) << "SaveConfEpoch failed. " - << "Copyset: " << GroupIdString() - << ", errno: " << errno << ", " + << "Copyset: " << GroupIdString() << ", errno: " << errno + << ", " << ", error message: " << strerror(errno); return; } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the + // list of snapshot files in the meta information When raft + // downloads a snapshot, after downloading the chunk, a separate + // snapshot list will be obtained bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through + // absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); - std::string filePath = curve::common::CalcRelativePath( - writer->get_path(), chunkApath); + std::string filePath = + curve::common::CalcRelativePath(writer->get_path(), chunkApath); writer->add_file(filePath); } } else { @@ -401,16 +409,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ - writer->add_file(kCurveConfEpochFilename); + writer->add_file(kCurveConfEpochFilename); } -int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { +int CopysetNode::on_snapshot_load(::braft::SnapshotReader* reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,15 +427,19 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section + // does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 - bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - delete_file(chunkDataApath_, true); + // Before loading snapshot data, clean the files in the copyset data + // directory first Otherwise, it may result in some residual data after + // the snapshot is loaded If delete_file or rename fails, the current + // node status will be set to ERROR If delete_file or during the rename + // the process restarts, and after copyset is set, the snapshot will be + // loaded Since rename ensures atomicity, after loading the snapshot, + // the data directory must be restored + bool ret = + nodeOptions_.snapshot_file_system_adaptor->get()->delete_file( + chunkDataApath_, true); if (!ret) { LOG(ERROR) << "delete chunk data dir failed. " << "Copyset: " << GroupIdString() @@ -437,8 +449,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { LOG(INFO) << "delete chunk data dir success. " << "Copyset: " << GroupIdString() << ", path: " << chunkDataApath_; - ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - rename(snapshotChunkDataDir, chunkDataApath_); + ret = nodeOptions_.snapshot_file_system_adaptor->get()->rename( + snapshotChunkDataDir, chunkDataApath_); if (!ret) { LOG(ERROR) << "rename snapshot data dir " << snapshotChunkDataDir << "to chunk data dir " << chunkDataApath_ << " failed. " @@ -449,13 +461,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { << "to chunk data dir " << chunkDataApath_ << " success. " << "Copyset: " << GroupIdString(); } else { - LOG(INFO) << "load snapshot data path: " - << snapshotChunkDataDir << " not exist. " + LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir + << " not exist. " << "Copyset: " << GroupIdString(); } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +480,25 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, + * the data store may return "chunk not exist." This is because the newly + * added peer initially has no data, and when the data store is initialized, + * it is not aware of the data that the new peer receives after the leader + * recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot + * operation, it is performed through a rename operation. If a file was + * previously open in the data store, the rename operation can succeed, but + * the old file can only be deleted after the data store closes it. + * Therefore, it is necessary to reinitialize the data store, close the + * file's file descriptor (fd), and then reopen the new file. Otherwise, the + * data store will continue to operate on the old file. Once the data store + * closes, the corresponding fd, any subsequent write operations will be + * lost. Additionally, if the datastore is not reinitialized and the new + * file is not reopened, it may result in reading the old data rather than + * the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +507,9 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that + * there is no need for on_configuration_committed. It should be noted that + * the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -510,7 +528,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * @@ -536,7 +554,7 @@ void CopysetNode::on_leader_start(int64_t term) { << " become leader, term is: " << leaderTerm_; } -void CopysetNode::on_leader_stop(const butil::Status &status) { +void CopysetNode::on_leader_stop(const butil::Status& status) { (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); @@ -544,7 +562,7 @@ void CopysetNode::on_leader_stop(const butil::Status &status) { << ", peer id: " << peerId_.to_string() << " stepped down"; } -void CopysetNode::on_error(const ::braft::Error &e) { +void CopysetNode::on_error(const ::braft::Error& e) { LOG(FATAL) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " meet raft error: " << e; @@ -556,7 +574,7 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, // Loading snapshot should not increase epoch. When loading // snapshot, the index is equal with lastSnapshotIndex_. LOG(INFO) << "index: " << index - << ", lastSnapshotIndex_: " << lastSnapshotIndex_; + << ", lastSnapshotIndex_: " << lastSnapshotIndex_; if (index != lastSnapshotIndex_) { std::unique_lock lock_guard(confLock_); conf_ = conf; @@ -569,63 +587,47 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, << ", epoch: " << epoch_.load(std::memory_order_acquire); } -void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << " stops following" << ctx; + << ", peer id: " << peerId_.to_string() << " stops following" + << ctx; } -void CopysetNode::on_start_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_start_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << "start following" << ctx; + << ", peer id: " << peerId_.to_string() << "start following" + << ctx; } -LogicPoolID CopysetNode::GetLogicPoolId() const { - return logicPoolId_; -} +LogicPoolID CopysetNode::GetLogicPoolId() const { return logicPoolId_; } -CopysetID CopysetNode::GetCopysetId() const { - return copysetId_; -} +CopysetID CopysetNode::GetCopysetId() const { return copysetId_; } -void CopysetNode::SetScan(bool scan) { - scaning_ = scan; -} +void CopysetNode::SetScan(bool scan) { scaning_ = scan; } -bool CopysetNode::GetScan() const { - return scaning_; -} +bool CopysetNode::GetScan() const { return scaning_; } -void CopysetNode::SetLastScan(uint64_t time) { - lastScanSec_ = time; -} +void CopysetNode::SetLastScan(uint64_t time) { lastScanSec_ = time; } -uint64_t CopysetNode::GetLastScan() const { - return lastScanSec_; -} +uint64_t CopysetNode::GetLastScan() const { return lastScanSec_; } std::vector& CopysetNode::GetFailedScanMap() { return failedScanMaps_; } -std::string CopysetNode::GetCopysetDir() const { - return copysetDirPath_; -} +std::string CopysetNode::GetCopysetDir() const { return copysetDirPath_; } uint64_t CopysetNode::GetConfEpoch() const { std::lock_guard lockguard(confLock_); return epoch_.load(std::memory_order_relaxed); } -int CopysetNode::LoadConfEpoch(const std::string &filePath) { +int CopysetNode::LoadConfEpoch(const std::string& filePath) { LogicPoolID loadLogicPoolID = 0; CopysetID loadCopysetID = 0; uint64_t loadEpoch = 0; - int ret = epochFile_->Load(filePath, - &loadLogicPoolID, - &loadCopysetID, + int ret = epochFile_->Load(filePath, &loadLogicPoolID, &loadCopysetID, &loadEpoch); if (0 == ret) { if (logicPoolId_ != loadLogicPoolID || copysetId_ != loadCopysetID) { @@ -643,7 +645,7 @@ int CopysetNode::LoadConfEpoch(const std::string &filePath) { return ret; } -int CopysetNode::SaveConfEpoch(const std::string &filePath) { +int CopysetNode::SaveConfEpoch(const std::string& filePath) { return epochFile_->Save(filePath, logicPoolId_, copysetId_, epoch_); } @@ -678,17 +680,17 @@ void CopysetNode::SetCopysetNode(std::shared_ptr node) { raftNode_ = node; } -void CopysetNode::SetSnapshotFileSystem(scoped_refptr *fs) { +void CopysetNode::SetSnapshotFileSystem(scoped_refptr* fs) { nodeOptions_.snapshot_file_system_adaptor = fs; } bool CopysetNode::IsLeaderTerm() const { - if (0 < leaderTerm_.load(std::memory_order_acquire)) - return true; + if (0 < leaderTerm_.load(std::memory_order_acquire)) return true; return false; } -bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT /* * Why not use lease_status.state==LEASE_VALID directly to judge? * @@ -707,13 +709,12 @@ bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) co return term > 0 && term == lease_status.term; } -bool CopysetNode::IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT return lease_status.state == braft::LEASE_EXPIRED; } -PeerId CopysetNode::GetLeaderId() const { - return raftNode_->leader_id(); -} +PeerId CopysetNode::GetLeaderId() const { return raftNode_->leader_id(); } butil::Status CopysetNode::TransferLeader(const Peer& peer) { butil::Status status; @@ -722,15 +723,15 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { if (raftNode_->leader_id() == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << "Skipped transferring leader to leader itself. " - << "peerid: " << peerId - << ", Copyset: " << GroupIdString(); + << "peerid: " << peerId << ", Copyset: " << GroupIdString(); return status; } int rc = raftNode_->transfer_leadership_to(peerId); if (rc != 0) { - status = butil::Status(rc, "Failed to transfer leader of copyset " + status = butil::Status(rc, + "Failed to transfer leader of copyset " "%s to peer %s, error: %s", GroupIdString().c_str(), peerId.to_string().c_str(), berror(rc)); @@ -741,9 +742,8 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { transferee_ = peer; status = butil::Status::OK(); - LOG(INFO) << "Transferred leader of copyset " - << GroupIdString() - << " to peer " << peerId; + LOG(INFO) << "Transferred leader of copyset " << GroupIdString() + << " to peer " << peerId; return status; } @@ -761,14 +761,13 @@ butil::Status CopysetNode::AddPeer(const Peer& peer) { if (peer == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << peerId << " is already a member of copyset " - << GroupIdString() - << ", skip adding peer"; + << GroupIdString() << ", skip adding peer"; return status; } } ConfigurationChangeDone* addPeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::ADD_PEER, peer); addPeerDone->expectedCfgChange = expectedCfgChange; raftNode_->add_peer(peerId, addPeerDone); @@ -797,13 +796,13 @@ butil::Status CopysetNode::RemovePeer(const Peer& peer) { if (!peerValid) { butil::Status status = butil::Status::OK(); - DVLOG(6) << peerId << " is not a member of copyset " - << GroupIdString() << ", skip removing"; + DVLOG(6) << peerId << " is not a member of copyset " << GroupIdString() + << ", skip removing"; return status; } ConfigurationChangeDone* removePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::REMOVE_PEER, peer); removePeerDone->expectedCfgChange = expectedCfgChange; raftNode_->remove_peer(peerId, removePeerDone); @@ -831,7 +830,7 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { return st; } ConfigurationChangeDone* changePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange; expectedCfgChange.type = ConfigChangeType::CHANGE_PEER; expectedCfgChange.alterPeer.set_address(adding.begin()->to_string()); @@ -845,18 +844,22 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it + * is equal, it means that no one has modified appliedindex. In this + * case, it tries to update appliedIndex with the value of index, and if + * the update is successful, it's done. If curIndex is not equal to + * appliedindex, it indicates that someone else has updated appliedIndex + * in the meantime. In this case, it returns the current value of + * appliedindex through curIndex and returns false. This entire process + * is atomic. */ - while (!appliedIndex_.compare_exchange_strong(curIndex, - index, - std::memory_order_acq_rel)) { //NOLINT + while (!appliedIndex_.compare_exchange_strong( + curIndex, index, + std::memory_order_acq_rel)) { // NOLINT if (index <= curIndex) { break; } @@ -876,27 +879,29 @@ CurveSegmentLogStorage* CopysetNode::GetLogStorage() const { return logStorage_; } -ConcurrentApplyModule *CopysetNode::GetConcurrentApplyModule() const { +ConcurrentApplyModule* CopysetNode::GetConcurrentApplyModule() const { return concurrentapply_; } -void CopysetNode::Propose(const braft::Task &task) { - raftNode_->apply(task); -} +void CopysetNode::Propose(const braft::Task& task) { raftNode_->apply(task); } -int CopysetNode::GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer) { +int CopysetNode::GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the + * following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, + * and A is the leader. A receives a configuration change log that adds D, + * and assume that B also receives the configuration change log {ABC+D}. + * Then, leader A crashes, and B is elected as the new leader. Before B + * commits the noop entry, the maximum epoch value it can query on B is + * still 5, but the queried configuration is {ABCD}. Therefore, here, before + * the new leader B commits the noop entry, which is effectively committing + * the hidden configuration change log {ABC+D}, it does not allow returning + * the configuration and configuration change information to the user to + * avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -922,9 +927,9 @@ uint64_t CopysetNode::LeaderTerm() const { return leaderTerm_.load(std::memory_order_acquire); } -int CopysetNode::GetHash(std::string *hash) { +int CopysetNode::GetHash(std::string* hash) { int ret = 0; - int fd = 0; + int fd = 0; int len = 0; uint32_t crc32c = 0; std::vector files; @@ -934,7 +939,8 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of + // calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { @@ -953,7 +959,7 @@ int CopysetNode::GetHash(std::string *hash) { } len = fileInfo.st_size; - char *buff = new (std::nothrow) char[len]; + char* buff = new (std::nothrow) char[len]; if (nullptr == buff) { return -1; } @@ -974,15 +980,15 @@ int CopysetNode::GetHash(std::string *hash) { return 0; } -void CopysetNode::GetStatus(NodeStatus *status) { +void CopysetNode::GetStatus(NodeStatus* status) { raftNode_->get_status(status); } -void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status) { +void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status) { raftNode_->get_leader_lease_status(status); } -bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { +bool CopysetNode::GetLeaderStatus(NodeStatus* leaderStaus) { NodeStatus status; GetStatus(&status); if (status.leader_id.is_empty()) { @@ -997,16 +1003,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { brpc::Controller cntl; cntl.set_timeout_ms(500); brpc::Channel channel; - if (channel.Init(status.leader_id.addr, nullptr) !=0) { - LOG(WARNING) << "can not create channel to " - << status.leader_id.addr + if (channel.Init(status.leader_id.addr, nullptr) != 0) { + LOG(WARNING) << "can not create channel to " << status.leader_id.addr << ", copyset " << GroupIdString(); return false; } CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(status.leader_id.to_string()); request.set_logicpoolid(logicPoolId_); request.set_copysetid(copysetId_); @@ -1016,16 +1021,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { CopysetService_Stub stub(&channel); stub.GetCopysetStatus(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(WARNING) << "get leader status failed: " - << cntl.ErrorText() + LOG(WARNING) << "get leader status failed: " << cntl.ErrorText() << ", copyset " << GroupIdString(); return false; } if (response.status() != COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { LOG(WARNING) << "get leader status failed" - << ", status: " << response.status() - << ", copyset " << GroupIdString(); + << ", status: " << response.status() << ", copyset " + << GroupIdString(); return false; } @@ -1078,9 +1082,8 @@ void CopysetNode::SyncAllChunks() { CSErrorCode r = dataStore_->SyncChunk(chunk); if (r != CSErrorCode::Success) { LOG(FATAL) << "Sync Chunk failed in Copyset: " - << GroupIdString() - << ", chunkid: " << chunk - << " data store return: " << r; + << GroupIdString() << ", chunkid: " << chunk + << " data store return: " << r; } }); } @@ -1093,11 +1096,11 @@ void SyncChunkThread::Init(CopysetNode* node) { } void SyncChunkThread::Run() { - syncThread_ = std::thread([this](){ + syncThread_ = std::thread([this]() { while (running_) { std::unique_lock lock(mtx_); - cond_->wait_for(lock, - std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); + cond_->wait_for( + lock, std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); node_->SyncAllChunks(); } }); @@ -1111,9 +1114,7 @@ void SyncChunkThread::Stop() { } } -SyncChunkThread::~SyncChunkThread() { - Stop(); -} +SyncChunkThread::~SyncChunkThread() { Stop(); } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/copyset_node.h b/src/chunkserver/copyset_node.h index cf7a34aeec..74033cbc80 100755 --- a/src/chunkserver/copyset_node.h +++ b/src/chunkserver/copyset_node.h @@ -23,53 +23,53 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_H_ -#include #include #include +#include +#include #include +#include +#include #include #include -#include -#include -#include +#include "proto/chunk.pb.h" +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/scan.pb.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "src/chunkserver/conf_epoch_file.h" #include "src/chunkserver/config_info.h" -#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/raft_node.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_writer.h" -#include "src/common/string_util.h" +#include "src/chunkserver/raftsnapshot/define.h" #include "src/common/concurrent/task_thread_pool.h" -#include "src/chunkserver/raft_node.h" -#include "proto/heartbeat.pb.h" -#include "proto/chunk.pb.h" -#include "proto/common.pb.h" -#include "proto/scan.pb.h" +#include "src/common/string_util.h" namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; -using ::google::protobuf::Closure; -using ::curve::mds::heartbeat::ConfigChangeType; using ::curve::common::Peer; using ::curve::common::TaskThreadPool; +using ::curve::mds::heartbeat::ConfigChangeType; +using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; -extern const char *kCurveConfEpochFilename; +extern const char* kCurveConfEpochFilename; struct ConfigurationChange { ConfigChangeType type; Peer alterPeer; ConfigurationChange() : type(ConfigChangeType::NONE) {} - ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) : - type(type2), alterPeer(alterPeer2) {} + ConfigurationChange(const ConfigChangeType& type2, const Peer& alterPeer2) + : type(type2), alterPeer(alterPeer2) {} bool IsEmpty() { return type == ConfigChangeType::NONE && !alterPeer.has_address(); } @@ -79,7 +79,7 @@ struct ConfigurationChange { } bool operator==(const ConfigurationChange& rhs) { return type == rhs.type && - alterPeer.address() == rhs.alterPeer.address(); + alterPeer.address() == rhs.alterPeer.address(); } ConfigurationChange& operator=(const ConfigurationChange& rhs) { type = rhs.type; @@ -92,17 +92,18 @@ class ConfigurationChangeDone : public braft::Closure { public: void Run() { if (!expectedCfgChange.IsEmpty() && - *curCfgChange == expectedCfgChange) { + *curCfgChange == expectedCfgChange) { curCfgChange->Reset(); } delete this; } explicit ConfigurationChangeDone( - std::shared_ptr cfgChange) - : curCfgChange(cfgChange) {} - // copyset node中当前的配置变更信息 + std::shared_ptr cfgChange) + : curCfgChange(cfgChange) {} + // Current configuration change information in the copyset node std::shared_ptr curCfgChange; - // 这次配置变更对应的配置变更信息 + // The configuration change information corresponding to this configuration + // change ConfigurationChange expectedCfgChange; }; @@ -116,6 +117,7 @@ class SyncChunkThread : public curve::common::Uncopyable { void Run(); void Init(CopysetNode* node); void Stop(); + private: bool running_; std::mutex mtx_; @@ -125,7 +127,7 @@ class SyncChunkThread : public curve::common::Uncopyable { }; /** - * 一个Copyset Node就是一个复制组的副本 + * A Copyset Node is a replica of a replication group */ class CopysetNode : public braft::StateMachine, public std::enable_shared_from_this { @@ -133,38 +135,37 @@ class CopysetNode : public braft::StateMachine, // for ut mock CopysetNode() = default; - CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf); + CopysetNode(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& initConf); virtual ~CopysetNode(); /** - * 初始化copyset node配置 + * Initialize copyset node configuration * @param options - * @return 0,成功,-1失败 + * @return 0, successful, -1 failed */ - virtual int Init(const CopysetNodeOptions &options); + virtual int Init(const CopysetNodeOptions& options); /** - * Raft Node init,使得Raft Node运行起来 + * Raft Node init to make Raft Node run * @return */ virtual int Run(); /** - * 关闭copyset node + * Close copyset node */ virtual void Fini(); /** - * 返回复制组的逻辑池ID + * Returns the logical pool ID of the replication group * @return */ LogicPoolID GetLogicPoolId() const; /** - * 返回复制组的复制组ID + * Returns the replication group ID of the replication group * @return */ CopysetID GetCopysetId() const; @@ -180,13 +181,13 @@ class CopysetNode : public braft::StateMachine, virtual std::vector& GetFailedScanMap(); /** - * 返回复制组数据目录 + * Return to the replication group data directory * @return */ std::string GetCopysetDir() const; /** - * 返回当前副本是否在leader任期 + * Returns whether the current replica is in the leader's tenure * @return */ virtual bool IsLeaderTerm() const; @@ -195,111 +196,115 @@ class CopysetNode : public braft::StateMachine, * check if current node is in lease leader * @return */ - virtual bool IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** * check if current node is expired * @return */ - virtual bool IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT + virtual bool IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const; // NOLINT /** - * 返回当前的任期 - * @return 当前的任期 + * Return to current tenure + * @return Current tenure */ virtual uint64_t LeaderTerm() const; /** - * 返回leader id + * Return leader id * @return */ virtual PeerId GetLeaderId() const; /** - * @brief 切换复制组的Leader - * @param[in] peerId 目标Leader的成员ID - * @return 心跳任务的引用 + * @brief Switch the leader of the replication group + * @param[in] peerId The member ID of the target leader + * @return Reference to Heartbeat Task */ butil::Status TransferLeader(const Peer& peer); /** - * @brief 复制组添加新成员 - * @param[in] peerId 新成员的ID - * @return 心跳任务的引用 + * @brief Add new members to the replication group + * @param[in] peerId The ID of the new member + * @return Reference to Heartbeat Task */ butil::Status AddPeer(const Peer& peer); /** - * @brief 复制组删除成员 - * @param[in] peerId 将要删除成员的ID - * @return 心跳任务的引用 + * @brief Copy Group Delete Members + * @param[in] peerId The ID of the member to be deleted + * @return Reference to Heartbeat Task */ butil::Status RemovePeer(const Peer& peer); /** - * @brief 变更复制组成员 - * @param[in] newPeers 新的复制组成员 - * @return 心跳任务的引用 + * @brief Change replication group members + * @param[in] newPeers New replication group member + * @return Reference to Heartbeat Task */ butil::Status ChangePeer(const std::vector& newPeers); /** - * 返回copyset的配置版本 + * Returns the configuration version of the copyset * @return */ virtual uint64_t GetConfEpoch() const; /** - * 更新applied index,只有比它大的才更新 + * Update the applied index, only those larger than it will be updated * @param index */ virtual void UpdateAppliedIndex(uint64_t index); /** - * 返回当前最新的applied index + * Returns the current latest applied index * @return */ virtual uint64_t GetAppliedIndex() const; /** - * @brief: 查询配置变更的状态 - * @param type[out]: 配置变更类型 - * @param oldConf[out]: 老的配置 - * @param alterPeer[out]: 变更的peer - * @return 0查询成功,-1查询异常失败 + * @brief: Query the status of configuration changes + * @param type[out]: Configuration change type + * @param oldConf[out]: Old configuration + * @param alterPeer[out]: Changed Peer + * @return 0 query successful, -1 query exception failed */ - virtual int GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer); + virtual int GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer); /** - * @brief: 获取copyset node的状态值,用于比较多个副本的数据一致性 - * @param hash[out]: copyset node状态值 - * @return 0成功,-1失败 + * @brief: Obtain the status value of the copyset node for comparing data + * consistency across multiple replicas + * @param hash[out]: copyset node status value + * @return 0 succeeded, -1 failed */ - virtual int GetHash(std::string *hash); + virtual int GetHash(std::string* hash); /** - * @brief: 获取copyset node的status,实际调用的raft node的get_status接口 + * @brief: Get the status of the copyset node, actually calling the + * get_status interface of the Raft node * @param status[out]: copyset node status */ - virtual void GetStatus(NodeStatus *status); + virtual void GetStatus(NodeStatus* status); /** * @brief: get raft node leader lease status * @param status[out]: raft node leader lease status */ - virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status); + virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status); /** - * 获取此copyset的leader上的status + * Obtain the status on the leader of this copyset * @param leaderStaus[out]: leader copyset node status - * @return 获取成功返回true,获取失败返回false + * @return returns true for successful acquisition, false for failed + * acquisition */ - virtual bool GetLeaderStatus(NodeStatus *leaderStaus); + virtual bool GetLeaderStatus(NodeStatus* leaderStaus); /** - * 返回data store指针 + * Return data store pointer * @return */ virtual std::shared_ptr GetDataStore() const; @@ -311,19 +316,19 @@ class CopysetNode : public braft::StateMachine, virtual CurveSegmentLogStorage* GetLogStorage() const; /** - * 返回ConcurrentApplyModule + * Returning ConcurrentApplyModule */ virtual ConcurrentApplyModule* GetConcurrentApplyModule() const; /** - * 向copyset node propose一个op request + * Propose an op request to the copyset node * @param task */ - virtual void Propose(const braft::Task &task); + virtual void Propose(const braft::Task& task); /** - * 获取复制组成员 - * @param peers:返回的成员列表(输出参数) + * Get replication group members + * @param peers: List of returned members (output parameters) * @return */ virtual void ListPeers(std::vector* peers); @@ -333,87 +338,95 @@ class CopysetNode : public braft::StateMachine, * @param options * @return */ - void InitRaftNodeOptions(const CopysetNodeOptions &options); + void InitRaftNodeOptions(const CopysetNodeOptions& options); /** - * 下面的接口都是继承StateMachine实现的接口 + * The following interfaces are all interfaces that inherit the + * implementation of StateMachine */ public: /** - * op log apply的时候回调函数 - * @param iter:可以batch的访问已经commit的log entries + * Callback function when applying op log + * @param iter: Allows batch access to already committed log entries. */ - void on_apply(::braft::Iterator &iter) override; + void on_apply(::braft::Iterator& iter) override; /** - * 复制关闭的时候调用此回调 + * Call this callback when replication is closed */ void on_shutdown() override; /** - * raft snapshot相关的接口,仅仅保存raft snapshot meta - * 和snapshot文件的list,这里并没有拷贝实际的数据,因为 - * 在块存储场景所有操作是幂等,所以,并不真实的拷贝数据 + * Interfaces related to raft snapshot, which only store raft snapshot meta + * and a list of snapshot files. Actual data is not copied here because + * in the context of block storage, all operations are idempotent, so there + * is no need to actually copy the data. */ - void on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) override; + void on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) override; /** - * load日志有两种情况: - * 1. Follower节点Install snapshot追赶leader,这个时候 - * snapshot目录下面有chunk数据和snapshot数据 - * 2. 节点重启,会执行snapshot load,然后回放日志,这个时 - * 候snapshot目录下面没有数据,什么都不用做 - * TODO(wudemiao): install snapshot的时候会存在空间 - * double的可能性,考虑如下场景,follower落后,然后通过从 - * leader install snapshot恢复数据,其首先会从leader将 - * 所有数据下载过来,然后在调用snapshot load加载快照,这个 - * 期间空间占用了就double了;后期需要通过控制单盘参与install - * snapshot的数量 + * There are two scenarios for loading logs: + * 1. Follower nodes catch up with the leader by installing a snapshot. In + * this case, there are chunk data and snapshot data under the snapshot + * directory. + * 2. When a node restarts, it performs a snapshot load and then replays + * the logs. In this case, there is no data under the snapshot directory, so + * nothing needs to be done. + * TODO(wudemiao): When installing a snapshot, there is a possibility of + * doubling the space usage. Consider the following scenario: a follower + * lags behind and then recovers data by installing a snapshot from the + * leader. It will first download all the data from the leader and then call + * snapshot load to load the snapshot. During this period, the space usage + * doubles. Later, we need to control the number of disks participating in + * the installation of snapshots. */ - int on_snapshot_load(::braft::SnapshotReader *reader) override; + int on_snapshot_load(::braft::SnapshotReader* reader) override; /** - * new leader在apply noop之后会调用此接口,表示此 leader可 - * 以提供read/write服务了。 - * @param term:当前leader任期 + * The new leader will call this interface after applying noop, indicating + * that this leader can provide read/write services. + * @param term: Current leader term */ void on_leader_start(int64_t term) override; /** - * leader step down的时候调用 - * @param status:复制组的状态 + * Called when the leader step is down + * @param status: The status of the replication group */ - void on_leader_stop(const butil::Status &status) override; + void on_leader_stop(const butil::Status& status) override; /** - * 复制组发生错误的时候调用 - * @param e:具体的 error + * Called when an error occurs in the replication group + * @param e: Specific error */ - void on_error(const ::braft::Error &e) override; + void on_error(const ::braft::Error& e) override; /** - * 配置变更日志entry apply的时候会调用此函数,目前会利用此接口 - * 更新配置epoch值 - * @param conf:当前复制组最新的配置 + * This function will be called when configuring the change log entry + * application, and currently this interface will be utilized Update + * configuration epoch value + * @param conf: The latest configuration of the current replication group * @param index log index */ - void on_configuration_committed(const Configuration& conf, int64_t index) override; //NOLINT + void on_configuration_committed(const Configuration& conf, + int64_t index) override; // NOLINT /** - * 当follower停止following主的时候调用 - * @param ctx:可以获取stop following的原因 + * Called when the follower stops following the main + * @param ctx: Can obtain the reason for stop following */ - void on_stop_following(const ::braft::LeaderChangeContext &ctx) override; + void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; /** - * Follower或者Candidate发现新的leader后调用 - * @param ctx:leader变更上下,可以获取new leader和start following的原因 + * Called after the Follower or Candidate finds a new leader + * @param ctx: Change the leader up and down to obtain the reasons for the + * new leader and start following */ - void on_start_following(const ::braft::LeaderChangeContext &ctx) override; + void on_start_following(const ::braft::LeaderChangeContext& ctx) override; /** - * 用于测试注入mock依赖 + * Used for testing injection mock dependencies */ public: void SetCSDateStore(std::shared_ptr datastore); @@ -435,22 +448,22 @@ class CopysetNode : public braft::StateMachine, // shared to sync pool static std::shared_ptr> copysetSyncPool_; /** - * 从文件中解析copyset配置版本信息 - * @param filePath:文件路径 - * @return 0: successs, -1 failed + * Parsing copyset configuration version information from a file + * @param filePath: File path + * @return 0: success, -1 fail */ - int LoadConfEpoch(const std::string &filePath); + int LoadConfEpoch(const std::string& filePath); /** - * 保存copyset配置版本信息到文件中 - * @param filePath:文件路径 - * @return 0 成功,-1 failed + * Save the copyset configuration version information to a file + * @param filePath: File path + * @return 0 success, -1 fail */ - int SaveConfEpoch(const std::string &filePath); + int SaveConfEpoch(const std::string& filePath); public: - void save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done); + void save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done); void ShipToSync(ChunkID chunkId) { if (enableOdsyncWhenOpenChunkFile_) { @@ -470,58 +483,58 @@ class CopysetNode : public braft::StateMachine, void WaitSnapshotDone(); private: - inline std::string GroupId() { - return ToGroupId(logicPoolId_, copysetId_); - } + inline std::string GroupId() { return ToGroupId(logicPoolId_, copysetId_); } inline std::string GroupIdString() { return ToGroupIdString(logicPoolId_, copysetId_); } private: - // 逻辑池 id + // Logical Pool ID LogicPoolID logicPoolId_; - // 复制组 id + // Copy Group ID CopysetID copysetId_; - // 复制组的配置 - Configuration conf_; - // 复制组的配置操作锁 - mutable std::mutex confLock_; - // 复制组的配置版本 + // Configuration of replication groups + Configuration conf_; + // Configuration operation lock for replication group + mutable std::mutex confLock_; + // Copy the configuration version of the group std::atomic epoch_; - // 复制组副本的peer id + // Peer ID of the replication group replica PeerId peerId_; - // braft Node的配置参数 + // Configuration parameters for the braft Node NodeOptions nodeOptions_; - // CopysetNode对应的braft Node + // The braft Node corresponding to CopysetNode std::shared_ptr raftNode_; - // chunk file的绝对目录 + // Absolute directory for chunk files std::string chunkDataApath_; - // chunk file的相对目录 + // Relative directory for chunk files std::string chunkDataRpath_; - // copyset绝对路径 + // copyset absolute path std::string copysetDirPath_; - // 文件系统适配器 + // File system adapter std::shared_ptr fs_; - // Chunk持久化操作接口 + // Chunk Persistence Operation Interface std::shared_ptr dataStore_; // The log storage for braft CurveSegmentLogStorage* logStorage_; - // 并发模块 - ConcurrentApplyModule *concurrentapply_ = nullptr; - // 配置版本持久化工具接口 + // Concurrent module + ConcurrentApplyModule* concurrentapply_ = nullptr; + // Configure version persistence tool interface std::unique_ptr epochFile_; - // 复制组的apply index + // Apply index of replication group std::atomic appliedIndex_; - // 复制组当前任期,如果<=0表明不是leader + // Copy the current tenure of the group. If<=0, it indicates that it is not + // a leader std::atomic leaderTerm_; - // 复制组数据回收站目录 + // Copy Group Data Recycle Bin Directory std::string recyclerUri_; - // 复制组的metric信息 + // Copy the metric information of the group CopysetMetricPtr metric_; - // 正在进行中的配置变更 + // Configuration changes in progress std::shared_ptr configChange_; - // transfer leader的目标,状态为TRANSFERRING时有效 + // The target of the transfer leader is valid when the status is + // TRANSFERRING Peer transferee_; int64_t lastSnapshotIndex_; // scan status diff --git a/src/chunkserver/copyset_node_manager.cpp b/src/chunkserver/copyset_node_manager.cpp index 78f4afec89..9c856ccb50 100755 --- a/src/chunkserver/copyset_node_manager.cpp +++ b/src/chunkserver/copyset_node_manager.cpp @@ -22,27 +22,26 @@ #include "src/chunkserver/copyset_node_manager.h" -#include #include #include +#include -#include #include #include +#include +#include "src/chunkserver/braft_cli_service.h" +#include "src/chunkserver/braft_cli_service2.h" +#include "src/chunkserver/chunk_service.h" #include "src/chunkserver/config_info.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_service.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_file_service.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/chunkserver/chunk_service.h" -#include "src/chunkserver/op_request.h" -#include "src/chunkserver/copyset_service.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/braft_cli_service2.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/raftsnapshot/curve_file_service.h" - namespace curve { namespace chunkserver { @@ -51,7 +50,7 @@ using curve::common::TimeUtility; std::once_flag addServiceFlag; -int CopysetNodeManager::Init(const CopysetNodeOptions ©setNodeOptions) { +int CopysetNodeManager::Init(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; CopysetNode::syncTriggerSeconds_ = copysetNodeOptions.syncTriggerSeconds; CopysetNode::copysetSyncPool_ = @@ -71,10 +70,9 @@ int CopysetNodeManager::Run() { CopysetNode::copysetSyncPool_->Start(copysetNodeOptions_.syncConcurrency); assert(copysetNodeOptions_.syncConcurrency > 0); int ret = 0; - // 启动线程池 + // Start Thread Pool if (copysetLoader_ != nullptr) { - ret = copysetLoader_->Start( - copysetNodeOptions_.loadConcurrency); + ret = copysetLoader_->Start(copysetNodeOptions_.loadConcurrency); if (ret < 0) { LOG(ERROR) << "CopysetLoadThrottle start error. ThreadNum: " << copysetNodeOptions_.loadConcurrency; @@ -82,7 +80,7 @@ int CopysetNodeManager::Run() { } } - // 启动加载已有的copyset + // Start loading existing copyset ret = ReloadCopysets(); if (ret == 0) { loadFinished_.exchange(true, std::memory_order_acq_rel); @@ -141,28 +139,26 @@ int CopysetNodeManager::ReloadCopysets() { } uint64_t poolId = GetPoolID(groupId); uint64_t copysetId = GetCopysetID(groupId); - LOG(INFO) << "Parsed groupid " << groupId - << " as " << ToGroupIdString(poolId, copysetId); + LOG(INFO) << "Parsed groupid " << groupId << " as " + << ToGroupIdString(poolId, copysetId); if (copysetLoader_ == nullptr) { LoadCopyset(poolId, copysetId, false); } else { - copysetLoader_->Enqueue( - std::bind(&CopysetNodeManager::LoadCopyset, - this, - poolId, - copysetId, - true)); + copysetLoader_->Enqueue(std::bind(&CopysetNodeManager::LoadCopyset, + this, poolId, copysetId, true)); } } - // 如果加载成功,则等待所有copyset加载完成,关闭线程池 + // If loading is successful, wait for all copysets to load and close the + // thread pool if (copysetLoader_ != nullptr) { while (copysetLoader_->QueueSize() != 0) { ::sleep(1); } - // queue size为0,但是线程池中的线程仍然可能还在执行 - // stop内部会去join thread,以此保证所有任务执行完以后再退出 + // Even when the queue size is 0, the threads in the thread pool may + // still be executing. The 'stop' function internally performs thread + // joining to ensure that all tasks are completed before exiting. copysetLoader_->Stop(); copysetLoader_ = nullptr; } @@ -174,8 +170,8 @@ bool CopysetNodeManager::LoadFinished() { return loadFinished_.load(std::memory_order_acquire); } -void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void CopysetNodeManager::LoadCopyset(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, bool needCheckLoadFinished) { LOG(INFO) << "Begin to load copyset " << ToGroupIdString(logicPoolId, copysetId) @@ -183,8 +179,9 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, << (needCheckLoadFinished ? "Yes." : "No."); uint64_t beginTime = TimeUtility::GetTimeofDayMs(); - // chunkserver启动加载copyset阶段,会拒绝外部的创建copyset请求 - // 因此不会有其他线程加载或者创建相同copyset,此时不需要加锁 + // chunkserver starts the loading copyset phase and will reject external + // requests to create copysets Therefore, no other threads will load or + // create the same copyset, and locking is not necessary at this time Configuration conf; std::shared_ptr copysetNode = CreateCopysetNodeUnlocked(logicPoolId, copysetId, conf); @@ -205,7 +202,7 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, } LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " end, time used (ms): " - << TimeUtility::GetTimeofDayMs() - beginTime; + << TimeUtility::GetTimeofDayMs() - beginTime; } bool CopysetNodeManager::CheckCopysetUntilLoadFinished( @@ -224,9 +221,12 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( } NodeStatus leaderStaus; bool getSuccess = node->GetLeaderStatus(&leaderStaus); - // 获取leader状态失败一般是由于还没选出leader或者leader心跳还未发送到当前节点 - // 正常通过几次重试可以获取到leader信息,如果重试多次都未获取到 - // 则认为copyset当前可能无法选出leader,直接退出 + // Failure to obtain leader status is usually because a leader has not + // been elected yet, or the leader's heartbeat has not been received by + // the current node. Typically, leader information can be obtained + // through several retries. If multiple retries fail to obtain the + // information, it is assumed that the copyset may not be able to elect + // a leader at the moment, and the operation exits directly. if (!getSuccess) { ++retryTimes; ::usleep(1000 * copysetNodeOptions_.electionTimeoutMs); @@ -235,8 +235,10 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( NodeStatus status; node->GetStatus(&status); - // 当前副本的最后一个日志落后于leader上保存的第一个日志 - // 这种情况下此副本会通过安装快照恢复,可以忽略避免阻塞检查线程 + // When the last log of the current replica lags behind the first log + // saved on the leader, in this situation, the replica will recover by + // installing a snapshot, and it can be safely ignored to avoid blocking + // the checking thread. bool mayInstallSnapshot = leaderStaus.first_index > status.last_index; if (mayInstallSnapshot) { LOG(WARNING) << "Copyset " @@ -250,73 +252,73 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( return false; } - // 判断当前副本已经apply的日志是否接近已经committed的日志 - int64_t margin = leaderStaus.committed_index - - status.known_applied_index; - bool catchupLeader = margin - < (int64_t)copysetNodeOptions_.finishLoadMargin; + // Determine whether the logs that have been applied to the current + // replica are close to the logs that have been committed + int64_t margin = + leaderStaus.committed_index - status.known_applied_index; + bool catchupLeader = + margin < (int64_t)copysetNodeOptions_.finishLoadMargin; if (catchupLeader) { LOG(INFO) << "Load copyset " << ToGroupIdString(logicPoolId, copysetId) << " finished, " << "leader CommittedIndex: " << leaderStaus.committed_index - << ", node appliedIndex: " - << status.known_applied_index; + << ", node appliedIndex: " << status.known_applied_index; return true; } retryTimes = 0; ::usleep(1000 * copysetNodeOptions_.checkLoadMarginIntervalMs); } - LOG(WARNING) << "check copyset " - << ToGroupIdString(logicPoolId, copysetId) + LOG(WARNING) << "check copyset " << ToGroupIdString(logicPoolId, copysetId) << " failed."; return false; } std::shared_ptr CopysetNodeManager::GetCopysetNode( - const LogicPoolID &logicPoolId, const CopysetID ©setId) const { - /* 加读锁 */ + const LogicPoolID& logicPoolId, const CopysetID& copysetId) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); - if (copysetNodeMap_.end() != it) - return it->second; + if (copysetNodeMap_.end() != it) return it->second; return nullptr; } void CopysetNodeManager::GetAllCopysetNodes( - std::vector *nodes) const { - /* 加读锁 */ + std::vector* nodes) const { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); for (auto it = copysetNodeMap_.begin(); it != copysetNodeMap_.end(); ++it) { nodes->push_back(it->second); } } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 如果本地copyset还未全部加载完成,不允许外部创建copyset + // If the local copyset has not been fully loaded yet, external copyset + // creation is not allowed if (!loadFinished_.load(std::memory_order_acquire)) { LOG(WARNING) << "Create copyset failed: load unfinished " << ToGroupIdString(logicPoolId, copysetId); return false; } - // copysetnode析构的时候会去调shutdown,可能导致协程切出 - // 所以创建copysetnode失败的时候,不能占着写锁,等写锁释放后再析构 + // When copysetnode is deconstructed, shutdown may be called, which may lead + // to coprocessor disconnection So when creating a copysetnode fails, it + // cannot occupy the write lock, wait for the write lock to be released + // before destructing std::shared_ptr copysetNode = nullptr; - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); if (copysetNodeMap_.end() == copysetNodeMap_.find(groupId)) { - copysetNode = std::make_shared(logicPoolId, - copysetId, - conf); + copysetNode = + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) - << " init failed"; + << " init failed"; return false; } if (0 != copysetNode->Run()) { @@ -325,8 +327,7 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } copysetNodeMap_.insert(std::pair>( - groupId, - copysetNode)); + groupId, copysetNode)); LOG(INFO) << "Create copyset success " << ToGroupIdString(logicPoolId, copysetId); return true; @@ -336,8 +337,8 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, return false; } -bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers) { Configuration conf; for (Peer peer : peers) { @@ -348,13 +349,10 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, } std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf) { + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf) { std::shared_ptr copysetNode = - std::make_shared(logicPoolId, - copysetId, - conf); + std::make_shared(logicPoolId, copysetId, conf); if (0 != copysetNode->Init(copysetNodeOptions_)) { LOG(ERROR) << "Copyset " << ToGroupIdString(logicPoolId, copysetId) << " init failed"; @@ -369,13 +367,13 @@ std::shared_ptr CopysetNodeManager::CreateCopysetNodeUnlocked( return copysetNode; } -int CopysetNodeManager::AddService(brpc::Server *server, - const butil::EndPoint &listenAddress) { +int CopysetNodeManager::AddService(brpc::Server* server, + const butil::EndPoint& listenAddress) { int ret = 0; uint64_t maxInflight = 100; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); - CopysetNodeManager *copysetNodeManager = this; + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); + CopysetNodeManager* copysetNodeManager = this; ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = copysetNodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -401,7 +399,7 @@ int CopysetNodeManager::AddService(brpc::Server *server, ret = server->RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; ret = server->AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // add other services @@ -413,70 +411,71 @@ int CopysetNodeManager::AddService(brpc::Server *server, brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; auto epochMap = std::make_shared(); - ret = server->AddService(new ChunkServiceImpl( - chunkServiceOptions, epochMap), - brpc::SERVER_OWNS_SERVICE); + ret = server->AddService( + new ChunkServiceImpl(chunkServiceOptions, epochMap), + brpc::SERVER_OWNS_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; } while (false); return ret; } -bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { copysetNodeMap_.erase(it); ret = true; LOG(INFO) << "Delete copyset " - << ToGroupIdString(logicPoolId, copysetId) - <<" success."; + << ToGroupIdString(logicPoolId, copysetId) << " success."; } } return ret; } -bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { bool ret = false; GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be + // evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { if (0 != copysetNodeOptions_.trash->RecycleCopySet( - it->second->GetCopysetDir())) { + it->second->GetCopysetDir())) { LOG(ERROR) << "Failed to remove copyset " << ToGroupIdString(logicPoolId, copysetId) << " persistently."; @@ -519,18 +518,18 @@ bool CopysetNodeManager::DeleteBrokenCopyset(const LogicPoolID& poolId, return true; } -bool CopysetNodeManager::IsExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { - /* 加读锁 */ +bool CopysetNodeManager::IsExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); return copysetNodeMap_.end() != copysetNodeMap_.find(groupId); } bool CopysetNodeManager::InsertCopysetNodeIfNotExist( - const LogicPoolID &logicPoolId, const CopysetID ©setId, + const LogicPoolID& logicPoolId, const CopysetID& copysetId, std::shared_ptr node) { - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); diff --git a/src/chunkserver/copyset_node_manager.h b/src/chunkserver/copyset_node_manager.h index 8294b21e0f..5336025227 100755 --- a/src/chunkserver/copyset_node_manager.h +++ b/src/chunkserver/copyset_node_manager.h @@ -23,209 +23,215 @@ #ifndef SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ #define SRC_CHUNKSERVER_COPYSET_NODE_MANAGER_H_ -#include //NOLINT -#include #include +#include //NOLINT #include +#include #include "src/chunkserver/copyset_node.h" #include "src/common/concurrent/rw_lock.h" -#include "src/common/uncopyable.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::BthreadRWLock; using curve::common::ReadLockGuard; -using curve::common::WriteLockGuard; using curve::common::TaskThreadPool; +using curve::common::WriteLockGuard; class ChunkOpRequest; /** - * Copyset Node的管理者 + * Manager of Copyset Node */ class CopysetNodeManager : public curve::common::Uncopyable { public: using CopysetNodePtr = std::shared_ptr; - // 单例,仅仅在 c++11或者更高版本下正确 - static CopysetNodeManager &GetInstance() { + // Single example, only correct in c++11 or higher versions + static CopysetNodeManager& GetInstance() { static CopysetNodeManager instance; return instance; } virtual ~CopysetNodeManager() = default; - int Init(const CopysetNodeOptions ©setNodeOptions); + int Init(const CopysetNodeOptions& copysetNodeOptions); int Run(); int Fini(); /** - * @brief 加载目录下的所有copyset + * @brief Load all copysets in the directory * - * @return 0表示加载成功,非0表示加载失败 + * @return 0 indicates successful loading, non 0 indicates failed loading */ int ReloadCopysets(); /** - * 创建copyset node,两种情况需要创建copyset node - * TODO(wudemiao): 后期替换之后删除掉 - * 1.集群初始化,创建copyset - * 2.恢复的时候add peer + * To create a copyset node, there are two situations where you need to + * create a copyset node + * TODO(wudemiao): Delete after later replacement + * 1. Cluster initialization, creating copyset + * 2. add peer during recovery */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf); /** - * 都是创建copyset,目前两个同时存在,后期仅仅保留一个 + * Both are creating copysets, currently both exist simultaneously, and only + * one will be retained in the future */ - bool CreateCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool CreateCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::vector peers); /** - * 删除copyset node内存实例(停止copyset, 销毁copyset内存实例并从copyset - * manager的copyset表中清除copyset表项,并不影响盘上的copyset持久化数据) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Delete the copyset node memory instance (stop copyset, destroy the + * copyset memory instance, and remove it from the copyset Clearing the + * copyset table entry in the manager's copyset table does not affect the + * persistence data of the copyset on the disk + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool DeleteCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool DeleteCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 彻底删除copyset node内存数据(停止copyset, 销毁copyset内存实例并从 - * copyset manager的copyset表中清除copyset表项,并将copyset持久化数据从盘 - * 上彻底删除) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Completely delete the copyset node's memory data (stop copyset, destroy + * the copyset memory instance, and remove it from the Clear the copyset + * table entries in the copyset manager's copyset table and persist the + * copyset data from the disk Completely delete on) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ - bool PurgeCopysetNodeData(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + bool PurgeCopysetNodeData(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** * @brief Delete broken copyset * @param[in] poolId logical pool id * @param[in] copysetId copyset id * @return true if delete success, else return false - */ + */ bool DeleteBrokenCopyset(const LogicPoolID& poolId, const CopysetID& copysetId); /** - * 判断指定的copyset是否存在 - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return true存在,false不存在 + * Determine whether the specified copyset exists + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true exists, false does not exist */ - bool IsExist(const LogicPoolID &logicPoolId, const CopysetID ©setId); + bool IsExist(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 获取指定的copyset - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return nullptr则为没查询到 + * Get the specified copyset + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return nullptr means that no query was found */ - virtual CopysetNodePtr GetCopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId) const; + virtual CopysetNodePtr GetCopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) const; /** - * 查询所有的copysets - * @param nodes:出参,返回所有的copyset + * Query all copysets + * @param nodes: Issue parameters and return all copysets */ - void GetAllCopysetNodes(std::vector *nodes) const; + void GetAllCopysetNodes(std::vector* nodes) const; /** - * 添加RPC service - * TODO(wudemiao): 目前仅仅用于测试,后期完善了会删除掉 - * @param server:rpc Server - * @param listenAddress:监听的地址 - * @return 0成功,-1失败 + * Add RPC service + * TODO(wudemiao): Currently only used for testing, and will be removed + * after later refinement + * @param server: rpc Server + * @param listenAddress: The address to listen to + * @return 0 succeeded, -1 failed */ - int AddService(brpc::Server *server, - const butil::EndPoint &listenAddress); + int AddService(brpc::Server* server, const butil::EndPoint& listenAddress); - virtual const CopysetNodeOptions &GetCopysetNodeOptions() const { + virtual const CopysetNodeOptions& GetCopysetNodeOptions() const { return copysetNodeOptions_; } /** * @brief: Only for test */ - void SetCopysetNodeOptions( - const CopysetNodeOptions& copysetNodeOptions) { + void SetCopysetNodeOptions(const CopysetNodeOptions& copysetNodeOptions) { copysetNodeOptions_ = copysetNodeOptions; } /** - * 加载copyset,包括新建一个copyset或者重启一个copyset - * @param logicPoolId: 逻辑池id + * Load copyset, including creating a new copyset or restarting a copyset + * @param logicPoolId: Logical Pool ID * @param copysetId: copyset id - * @param needCheckLoadFinished: 是否需要判断copyset加载完成 + * @param needCheckLoadFinished: Do you need to determine if the copyset + * loading is complete */ - void LoadCopyset(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + void LoadCopyset(const LogicPoolID& logicPoolId, const CopysetID& copysetId, bool needCheckLoadFinished); /** - * 检测指定的copyset状态,直到copyset加载完成或出现异常 - * @param node: 指定的copyset node - * @return true表示加载成功,false表示检测过程中出现异常 + * Detect the specified copyset state until the copyset load is completed or + * an exception occurs + * @param node: The specified copyset node + * @return true indicates successful loading, while false indicates an + * exception occurred during the detection process */ bool CheckCopysetUntilLoadFinished(std::shared_ptr node); /** - * 获取copysetNodeManager加载copyset的状态 - * @return false-copyset未加载完成 true-copyset已加载完成 + * Obtain the status of copysetNodeManager loading copyset + * @return false-copyset not loaded complete, true-copyset loaded complete */ virtual bool LoadFinished(); protected: CopysetNodeManager() - : copysetLoader_(nullptr) - , running_(false) - , loadFinished_(false) {} + : copysetLoader_(nullptr), running_(false), loadFinished_(false) {} private: /** - * 如果指定copyset不存在,则将copyset插入到map当中(线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param node:要插入的copysetnode - * @return copyset不存在,则插入到map并返回true; - * copyset如果存在,则返回false + * If the specified copyset does not exist, insert the copyset into the map + * (thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param node: The copysetnode to be inserted + * @return If the copyset does not exist, insert it into the map and return + * true; If copyset exists, return false */ - bool InsertCopysetNodeIfNotExist(const LogicPoolID &logicPoolId, - const CopysetID ©setId, + bool InsertCopysetNodeIfNotExist(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, std::shared_ptr node); /** - * 创建一个新的copyset或加载一个已存在的copyset(非线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param conf:此copyset的配置成员 - * @return 创建或加载成功返回copysetnode,否则返回nullptr + * Create a new copyset or load an existing copyset (non thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param conf: The configuration members of this copyset + * @return Successfully created or loaded, returns copysetnode, otherwise + * returns nullptr */ std::shared_ptr CreateCopysetNodeUnlocked( - const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf); + const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const Configuration& conf); private: - using CopysetNodeMap = std::unordered_map>; - // 保护复制组 map的读写锁 + using CopysetNodeMap = + std::unordered_map>; + // Protect the read write lock of the replication group map mutable BthreadRWLock rwLock_; - // 复制组map + // Copy Group Map CopysetNodeMap copysetNodeMap_; - // 复制组配置选项 + // Copy Group Configuration Options CopysetNodeOptions copysetNodeOptions_; - // 控制copyset并发启动的数量 + // Control the number of concurrent starts of copyset std::shared_ptr> copysetLoader_; - // 表示copyset node manager当前是否正在运行 + // Indicates whether the copyset node manager is currently running Atomic running_; - // 表示copyset node manager当前是否已经完成加载 + // Indicates whether the copyset node manager has currently completed + // loading Atomic loadFinished_; }; diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e09516c0ad..9082024b4c 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -20,36 +20,36 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_service.h" + #include #include -#include #include +#include -#include "src/chunkserver/copyset_service.h" #include "src/chunkserver/copyset_node_manager.h" namespace curve { namespace chunkserver { -void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); LOG(INFO) << "Received create copyset request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 解析request中的peers + // Analyzing Peers in Request Configuration conf; for (int i = 0; i < request->peerid_size(); ++i) { PeerId peer; int ret = peer.parse(request->peerid(i)); if (ret != 0) { - cntl->SetFailed(EINVAL, - "Fail to parse peer id %s", + cntl->SetFailed(EINVAL, "Fail to parse peer id %s", request->peerid(i).c_str()); return; } @@ -59,12 +59,9 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, LogicPoolID logicPoolID = request->logicpoolid(); CopysetID copysetID = request->copysetid(); GroupId groupId = ToGroupId(logicPoolID, copysetID); - if (false == copysetNodeManager_->IsExist(logicPoolID, - copysetID)) { - if (true == - copysetNodeManager_->CreateCopysetNode(logicPoolID, - copysetID, - conf)) { + if (false == copysetNodeManager_->IsExist(logicPoolID, copysetID)) { + if (true == copysetNodeManager_->CreateCopysetNode(logicPoolID, + copysetID, conf)) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { response->set_status( @@ -80,10 +77,10 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, << COPYSET_OP_STATUS_Name(response->status()); } -void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done) { +void CopysetServiceImpl::CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); @@ -103,31 +100,32 @@ void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, if (true == copysetNodeManager_->IsExist(copyset.logicpoolid(), copyset.copysetid())) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); - LOG(WARNING) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) - << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); //NOLINT + LOG(WARNING) + << "Create copyset " + << ToGroupIdString(copyset.logicpoolid(), + copyset.copysetid()) + << " failed, response code: " + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST); // NOLINT return; } - if (false == - copysetNodeManager_->CreateCopysetNode(copyset.logicpoolid(), - copyset.copysetid(), - peers)) { + if (false == copysetNodeManager_->CreateCopysetNode( + copyset.logicpoolid(), copyset.copysetid(), peers)) { response->set_status( COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Create copyset " << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " failed, response code: " - << COPYSET_OP_STATUS_Name(COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN); //NOLINT + << COPYSET_OP_STATUS_Name( + COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN); // NOLINT return; } LOG(INFO) << "Create copyset " - << ToGroupIdString(copyset.logicpoolid(), - copyset.copysetid()) + << ToGroupIdString(copyset.logicpoolid(), copyset.copysetid()) << " success."; } @@ -151,7 +149,7 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, // if copyset node exist in the manager means its data is complete if (copysetNodeManager_->IsExist(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_COPYSET_IS_HEALTHY); - LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; + LOG(WARNING) << "Delete broken copyset, " << groupId << " is healthy"; } else if (!copysetNodeManager_->DeleteBrokenCopyset(poolId, copysetId)) { response->set_status(COPYSET_OP_STATUS_FAILURE_UNKNOWN); LOG(ERROR) << "Delete broken copyset " << groupId << " failed"; @@ -161,17 +159,17 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, } } -void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done) { +void CopysetServiceImpl::GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); LOG(INFO) << "Received GetCopysetStatus request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -183,14 +181,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, return; } - // 获取raft node status + // Obtain raft node status NodeStatus status; nodePtr->GetStatus(&status); response->set_state(status.state); - Peer *peer = new Peer(); + Peer* peer = new Peer(); response->set_allocated_peer(peer); peer->set_address(status.peer_id.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); response->set_allocated_leader(leader); leader->set_address(status.leader_id.to_string()); response->set_readonly(status.readonly); @@ -204,13 +202,14 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_lastindex(status.last_index); response->set_diskindex(status.disk_index); - // 获取配置的版本 + // Obtain the version of the configuration response->set_epoch(nodePtr->GetConfEpoch()); /** - * 考虑到query hash需要读取copyset的所有chunk数据,然后计算hash值 - * 是一个非常耗时的操作,所以在request会设置query hash字段,如果 - * 为false,那么就不需要查询copyset的hash值 + * Considering that calculating the hash value for query hash requires + * reading all chunk data from a copyset, which is a very time-consuming + * operation, the request will have a "query hash" field. If it is set to + * false, then there is no need to query the hash value of the copyset. */ if (request->queryhash()) { std::string hash; @@ -228,8 +227,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); LOG(INFO) << "GetCopysetStatus success: " - << ToGroupIdString(request->logicpoolid(), - request->copysetid()); + << ToGroupIdString(request->logicpoolid(), request->copysetid()); } } // namespace chunkserver diff --git a/src/chunkserver/copyset_service.h b/src/chunkserver/copyset_service.h index fabf6df8fc..7025b6e9dd 100755 --- a/src/chunkserver/copyset_service.h +++ b/src/chunkserver/copyset_service.h @@ -28,51 +28,48 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; /** - * 复制组管理的Rpc服务,目前仅有创建复制组 + * The Rpc service for replication group management currently only creates + * replication groups */ class CopysetServiceImpl : public CopysetService { public: - explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) : - copysetNodeManager_(copysetNodeManager) {} + explicit CopysetServiceImpl(CopysetNodeManager* copysetNodeManager) + : copysetNodeManager_(copysetNodeManager) {} ~CopysetServiceImpl() {} /** - * 创建复制组,一次只能创建一个 + * Create replication groups, only one can be created at a time */ - void CreateCopysetNode(RpcController *controller, - const CopysetRequest *request, - CopysetResponse *response, - Closure *done); + void CreateCopysetNode(RpcController* controller, + const CopysetRequest* request, + CopysetResponse* response, Closure* done); /* - * 创建复制组,一次可以创建多个 + * Create replication groups, multiple can be created at once */ - void CreateCopysetNode2(RpcController *controller, - const CopysetRequest2 *request, - CopysetResponse2 *response, - Closure *done); + void CreateCopysetNode2(RpcController* controller, + const CopysetRequest2* request, + CopysetResponse2* response, Closure* done); /** * @brief Delete broken copyset */ void DeleteBrokenCopyset(RpcController* controller, const CopysetRequest* request, - CopysetResponse* response, - Closure* done); + CopysetResponse* response, Closure* done); - void GetCopysetStatus(RpcController *controller, - const CopysetStatusRequest *request, - CopysetStatusResponse *response, - Closure *done); + void GetCopysetStatus(RpcController* controller, + const CopysetStatusRequest* request, + CopysetStatusResponse* response, Closure* done); private: - // 复制组管理者 + // Copy Group Manager CopysetNodeManager* copysetNodeManager_; }; diff --git a/src/chunkserver/heartbeat.cpp b/src/chunkserver/heartbeat.cpp index b81fe6bdb3..5ce5a575cd 100644 --- a/src/chunkserver/heartbeat.cpp +++ b/src/chunkserver/heartbeat.cpp @@ -21,606 +21,694 @@ * 2018/12/20 Wenyu Zhou Initial version */ -#include -#include +#include "src/chunkserver/heartbeat.h" + +#include #include #include -#include +#include +#include -#include #include +#include -#include "src/fs/fs_common.h" -#include "src/common/timeutility.h" -#include "src/chunkserver/heartbeat.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/heartbeat_helper.h" #include "src/common/curve_version.h" +#include "src/common/timeutility.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" using curve::fs::FileSystemInfo; -namespace curve { -namespace chunkserver { -TaskStatus Heartbeat::PurgeCopyset(LogicPoolID poolId, CopysetID copysetId) { - if (!copysetMan_->PurgeCopysetNodeData(poolId, copysetId)) { - LOG(ERROR) << "Failed to clean copyset " - << ToGroupIdStr(poolId, copysetId) << " and its data."; - - return TaskStatus(-1, "Failed to clean copyset"); - } - - LOG(INFO) << "Successfully cleaned copyset " - << ToGroupIdStr(poolId, copysetId) << " and its data."; - - return TaskStatus::OK(); -} - -int Heartbeat::Init(const HeartbeatOptions &options) { - toStop_.store(false, std::memory_order_release); - options_ = options; - - butil::ip_t csIp; - storePath_ = curve::common::UriParser::GetPathFromUri(options_.storeUri); - if (butil::str2ip(options_.ip.c_str(), &csIp) < 0) { - LOG(ERROR) << "Invalid Chunkserver IP provided: " << options_.ip; - return -1; - } - csEp_ = butil::EndPoint(csIp, options_.port); - LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; - - // mdsEps不能为空 - ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); - if (mdsEps_.empty()) { - LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; - return -1; - } - // 检查每个地址的合法性 - for (auto addr : mdsEps_) { - butil::EndPoint endpt; - if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { - LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; - return -1; - } - } - - inServiceIndex_ = 0; - LOG(INFO) << "MDS address: " << options_.mdsListenAddr; - - copysetMan_ = options.copysetNodeManager; - - // 初始化timer - waitInterval_.Init(options_.intervalSec * 1000); - - // 获取当前unix时间戳 - startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); - - // init scanManager - scanMan_ = options.scanManager; - return 0; -} - -int Heartbeat::Run() { - // start scan thread - hbThread_ = Thread(&Heartbeat::HeartbeatWorker, this); - return 0; -} - -int Heartbeat::Stop() { - LOG(INFO) << "Stopping Heartbeat manager."; - - waitInterval_.StopWait(); - toStop_.store(true, std::memory_order_release); - hbThread_.join(); - - LOG(INFO) << "Stopped Heartbeat manager."; - return 0; -} - -int Heartbeat::Fini() { - Stop(); - // stop scan thread - LOG(INFO) << "Heartbeat manager cleaned up."; - return 0; -} - -int Heartbeat::GetFileSystemSpaces(size_t* capacity, size_t* avail) { - int ret; - struct FileSystemInfo info; - - ret = options_.fs->Statfs(storePath_, &info); - if (ret != 0) { - LOG(ERROR) << "Failed to get file system space information, " - << " error message: " << strerror(errno); - return -1; - } - - *capacity = info.total; - *avail = info.available; - - return 0; -} - -int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, - CopysetNodePtr copyset) { - int ret; - LogicPoolID poolId = copyset->GetLogicPoolId(); - CopysetID copysetId = copyset->GetCopysetId(); - - info->set_logicalpoolid(poolId); - info->set_copysetid(copysetId); - info->set_epoch(copyset->GetConfEpoch()); - - // for scan - info->set_scaning(copyset->GetScan()); - if (copyset->GetLastScan() > 0) { - info->set_lastscansec(copyset->GetLastScan()); - } - auto failedScanMaps = copyset->GetFailedScanMap(); - if (!failedScanMaps.empty()) { - for (auto &map : failedScanMaps) { - info->add_scanmap()->CopyFrom(map); - } - } - - std::vector peers; - copyset->ListPeers(&peers); - for (Peer peer : peers) { - auto replica = info->add_peers(); - replica->set_address(peer.address().c_str()); - } - - PeerId leader = copyset->GetLeaderId(); - auto replica = new ::curve::common::Peer(); - replica->set_address(leader.to_string()); - info->set_allocated_leaderpeer(replica); - - curve::mds::heartbeat::CopysetStatistics* stats = - new curve::mds::heartbeat::CopysetStatistics(); - CopysetMetricPtr copysetMetric = - ChunkServerMetric::GetInstance()->GetCopysetMetric(poolId, copysetId); - if (copysetMetric != nullptr) { - IOMetricPtr readMetric = - copysetMetric->GetIOMetric(CSIOMetricType::READ_CHUNK); - IOMetricPtr writeMetric = - copysetMetric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); - if (readMetric != nullptr && writeMetric != nullptr) { - stats->set_readrate(readMetric->bps_.get_value(1)); - stats->set_writerate(writeMetric->bps_.get_value(1)); - stats->set_readiops(readMetric->iops_.get_value(1)); - stats->set_writeiops(writeMetric->iops_.get_value(1)); - info->set_allocated_stats(stats); - } else { - LOG(ERROR) << "Failed to get copyset io metric." - << "logic pool id: " << poolId - << ", copyset id: " << copysetId; +namespace curve +{ + namespace chunkserver + { + TaskStatus Heartbeat::PurgeCopyset(LogicPoolID poolId, CopysetID copysetId) + { + if (!copysetMan_->PurgeCopysetNodeData(poolId, copysetId)) + { + LOG(ERROR) << "Failed to clean copyset " + << ToGroupIdStr(poolId, copysetId) << " and its data."; + + return TaskStatus(-1, "Failed to clean copyset"); + } + + LOG(INFO) << "Successfully cleaned copyset " + << ToGroupIdStr(poolId, copysetId) << " and its data."; + + return TaskStatus::OK(); } - } - - ConfigChangeType type; - Configuration conf; - Peer peer; - - if ((ret = copyset->GetConfChange(&type, &conf, &peer)) != 0) { - LOG(ERROR) << "Failed to get config change state of copyset " - << ToGroupIdStr(poolId, copysetId); - return ret; - } else if (type == curve::mds::heartbeat::NONE) { - return 0; - } - - ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); - replica = new(std::nothrow) ::curve::common::Peer(); - if (replica == nullptr) { - LOG(ERROR) << "apply memory error"; - return -1; - } - replica->set_address(peer.address()); - confChxInfo->set_allocated_peer(replica); - confChxInfo->set_type(type); - confChxInfo->set_finished(false); - info->set_allocated_configchangeinfo(confChxInfo); - - return 0; -} - -int Heartbeat::BuildRequest(HeartbeatRequest* req) { - int ret; - - req->set_chunkserverid(options_.chunkserverId); - req->set_token(options_.chunkserverToken); - req->set_starttime(startUpTime_); - req->set_ip(options_.ip); - req->set_port(options_.port); - - /* - * TODO(wenyu): DiskState field is not valid yet until disk health feature - * is ready - */ - curve::mds::heartbeat::DiskState* diskState = - new curve::mds::heartbeat::DiskState(); - diskState->set_errtype(0); - diskState->set_errmsg(""); - req->set_allocated_diskstate(diskState); - - ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); - curve::mds::heartbeat::ChunkServerStatisticInfo* stats = - new curve::mds::heartbeat::ChunkServerStatisticInfo(); - IOMetricPtr readMetric = metric->GetIOMetric(CSIOMetricType::READ_CHUNK); - IOMetricPtr writeMetric = metric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); - if (readMetric != nullptr && writeMetric != nullptr) { - stats->set_readrate(readMetric->bps_.get_value(1)); - stats->set_writerate(writeMetric->bps_.get_value(1)); - stats->set_readiops(readMetric->iops_.get_value(1)); - stats->set_writeiops(writeMetric->iops_.get_value(1)); - } - CopysetNodeOptions opt = copysetMan_->GetCopysetNodeOptions(); - uint64_t chunkFileSize = opt.maxChunkSize; - uint64_t walSegmentFileSize = opt.maxWalSegmentSize; - uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize - + metric->GetTotalChunkCount() * chunkFileSize; - uint64_t usedWalSegmentSize = metric->GetTotalWalSegmentCount() - * walSegmentFileSize; - uint64_t trashedChunkSize = metric->GetChunkTrashedCount() * chunkFileSize; - uint64_t leftChunkSize = metric->GetChunkLeftCount() * chunkFileSize; - - // leftWalSegmentSize will be 0 when CHUNK and WAL share file pool - uint64_t leftWalSegmentSize = metric->GetWalSegmentLeftCount() - * walSegmentFileSize; - uint64_t chunkPoolSize = options_.chunkFilePool->Size() * - options_.chunkFilePool->GetFilePoolOpt().fileSize; - - // compute format progress rate. - const ChunkFormatStat& formatStat = - options_.chunkFilePool->GetChunkFormatStat(); // NOLINT - - stats->set_chunkfilepoolsize(chunkPoolSize); - stats->set_chunksizeusedbytes(usedChunkSize+usedWalSegmentSize); - stats->set_chunksizeleftbytes(leftChunkSize+leftWalSegmentSize); - stats->set_chunksizetrashedbytes(trashedChunkSize); - if (formatStat.preAllocateNum != 0) { - stats->set_chunkfilepoolformatpercent( - 100 * formatStat.allocateChunkNum / formatStat.preAllocateNum); - } else { - stats->set_chunkfilepoolformatpercent(100); - } - req->set_allocated_stats(stats); - - size_t cap, avail; - ret = GetFileSystemSpaces(&cap, &avail); - if (ret != 0) { - LOG(ERROR) << "Failed to get file system space information for path " - << storePath_; - return -1; - } - req->set_diskcapacity(cap); - req->set_diskused(cap - avail); - - std::vector copysets; - copysetMan_->GetAllCopysetNodes(©sets); - - req->set_copysetcount(copysets.size()); - int leaders = 0; - - for (CopysetNodePtr copyset : copysets) { - curve::mds::heartbeat::CopySetInfo* info = req->add_copysetinfos(); - - ret = BuildCopysetInfo(info, copyset); - if (ret != 0) { - LOG(ERROR) << "Failed to build heartbeat information of copyset " - << ToGroupIdStr(copyset->GetLogicPoolId(), - copyset->GetCopysetId()); - continue; + + int Heartbeat::Init(const HeartbeatOptions &options) + { + toStop_.store(false, std::memory_order_release); + options_ = options; + + butil::ip_t csIp; + storePath_ = curve::common::UriParser::GetPathFromUri(options_.storeUri); + if (butil::str2ip(options_.ip.c_str(), &csIp) < 0) + { + LOG(ERROR) << "Invalid Chunkserver IP provided: " << options_.ip; + return -1; + } + csEp_ = butil::EndPoint(csIp, options_.port); + LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; + + // mdsEps cannot be empty + ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); + if (mdsEps_.empty()) + { + LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; + return -1; + } + // Check the legality of each address + for (auto addr : mdsEps_) + { + butil::EndPoint endpt; + if (butil::str2endpoint(addr.c_str(), &endpt) < 0) + { + LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; + return -1; + } + } + + inServiceIndex_ = 0; + LOG(INFO) << "MDS address: " << options_.mdsListenAddr; + + copysetMan_ = options.copysetNodeManager; + + // Initialize timer + waitInterval_.Init(options_.intervalSec * 1000); + + // Obtain the current Unix timestamp + startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); + + // init scanManager + scanMan_ = options.scanManager; + return 0; } - if (copyset->IsLeaderTerm()) { - ++leaders; + + int Heartbeat::Run() + { + // start scan thread + hbThread_ = Thread(&Heartbeat::HeartbeatWorker, this); + return 0; } - } - req->set_leadercount(leaders); - req->set_version(curve::common::CurveVersion()); - - return 0; -} - -void Heartbeat::DumpHeartbeatRequest(const HeartbeatRequest& request) { - DVLOG(6) << "Heartbeat request: Chunkserver ID: " - << request.chunkserverid() - << ", IP: " << request.ip() << ", port: " << request.port() - << ", copyset count: " << request.copysetcount() - << ", leader count: " << request.leadercount(); - for (int i = 0; i < request.copysetinfos_size(); i ++) { - const curve::mds::heartbeat::CopySetInfo& info = - request.copysetinfos(i); - - std::string peersStr = ""; - for (int j = 0; j < info.peers_size(); j ++) { - peersStr += info.peers(j).address() + ","; + + int Heartbeat::Stop() + { + LOG(INFO) << "Stopping Heartbeat manager."; + + waitInterval_.StopWait(); + toStop_.store(true, std::memory_order_release); + hbThread_.join(); + + LOG(INFO) << "Stopped Heartbeat manager."; + return 0; } - DVLOG(6) << "Copyset " << i << " " - << ToGroupIdStr(info.logicalpoolid(), info.copysetid()) - << ", epoch: " << info.epoch() - << ", leader: " << info.leaderpeer().address() - << ", peers: " << peersStr; - - if (info.has_configchangeinfo()) { - const ConfigChangeInfo& cxInfo = info.configchangeinfo(); - DVLOG(6) << "Config change info: peer: " << cxInfo.peer().address() - << ", finished: " << cxInfo.finished() - << ", errno: " << cxInfo.err().errtype() - << ", errmsg: " << cxInfo.err().errmsg(); + int Heartbeat::Fini() + { + Stop(); + // stop scan thread + LOG(INFO) << "Heartbeat manager cleaned up."; + return 0; } - } -} - -void Heartbeat::DumpHeartbeatResponse(const HeartbeatResponse& response) { - int count = response.needupdatecopysets_size(); - if (count > 0) { - LOG(INFO) << "Received " << count << " config change commands:"; - for (int i = 0; i < count; i ++) { - CopySetConf conf = response.needupdatecopysets(i); - - int type = (conf.has_type()) ? conf.type() : 0; - std::string item = (conf.has_configchangeitem()) ? - conf.configchangeitem().address() : ""; - - std::string peersStr = ""; - for (int j = 0; j < conf.peers_size(); j ++) { - peersStr += conf.peers(j).address(); + + int Heartbeat::GetFileSystemSpaces(size_t *capacity, size_t *avail) + { + int ret; + struct FileSystemInfo info; + + ret = options_.fs->Statfs(storePath_, &info); + if (ret != 0) + { + LOG(ERROR) << "Failed to get file system space information, " + << " error message: " << strerror(errno); + return -1; } - LOG(INFO) << "Config change " << i << ": " - << "Copyset < " << conf.logicalpoolid() - << ", " << conf.copysetid() << ">, epoch: " - << conf.epoch() << ", Peers: " << peersStr - << ", type: " << type << ", item: " << item; - } - } else { - LOG(INFO) << "Received no config change command."; - } -} - -int Heartbeat::SendHeartbeat(const HeartbeatRequest& request, - HeartbeatResponse* response) { - brpc::Channel channel; - if (channel.Init(mdsEps_[inServiceIndex_].c_str(), NULL) != 0) { - LOG(ERROR) << csEp_.ip << ":" << csEp_.port - << " Fail to init channel to MDS " - << mdsEps_[inServiceIndex_]; - return -1; - } - - curve::mds::heartbeat::HeartbeatService_Stub stub(&channel); - brpc::Controller cntl; - cntl.set_timeout_ms(options_.timeout); - - DumpHeartbeatRequest(request); - - stub.ChunkServerHeartbeat(&cntl, &request, response, nullptr); - if (cntl.Failed()) { - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == ETIMEDOUT || - cntl.ErrorCode() == brpc::ELOGOFF || - cntl.ErrorCode() == brpc::ERPCTIMEDOUT) { - LOG(WARNING) << "current mds: " << mdsEps_[inServiceIndex_] - << " is shutdown or going to quit," - << cntl.ErrorText(); - inServiceIndex_ = (inServiceIndex_ + 1) % mdsEps_.size(); - LOG(INFO) << "next heartbeat switch to " - << mdsEps_[inServiceIndex_]; - } else { - LOG(ERROR) << csEp_.ip << ":" << csEp_.port - << " Fail to send heartbeat to MDS " - << mdsEps_[inServiceIndex_] << "," - << " cntl errorCode: " << cntl.ErrorCode() - << " cntl error: " << cntl.ErrorText(); + *capacity = info.total; + *avail = info.available; + + return 0; } - return -1; - } else { - DumpHeartbeatResponse(*response); - } - - return 0; -} - -int Heartbeat::ExecTask(const HeartbeatResponse& response) { - int count = response.needupdatecopysets_size(); - for (int i = 0; i < count; i ++) { - CopySetConf conf = response.needupdatecopysets(i); - CopysetNodePtr copyset = copysetMan_->GetCopysetNode( - conf.logicalpoolid(), conf.copysetid()); - - // 判断copyconf是否合法 - if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) { - continue; + + int Heartbeat::BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo *info, + CopysetNodePtr copyset) + { + int ret; + LogicPoolID poolId = copyset->GetLogicPoolId(); + CopysetID copysetId = copyset->GetCopysetId(); + + info->set_logicalpoolid(poolId); + info->set_copysetid(copysetId); + info->set_epoch(copyset->GetConfEpoch()); + + // for scan + info->set_scaning(copyset->GetScan()); + if (copyset->GetLastScan() > 0) + { + info->set_lastscansec(copyset->GetLastScan()); + } + auto failedScanMaps = copyset->GetFailedScanMap(); + if (!failedScanMaps.empty()) + { + for (auto &map : failedScanMaps) + { + info->add_scanmap()->CopyFrom(map); + } + } + + std::vector peers; + copyset->ListPeers(&peers); + for (Peer peer : peers) + { + auto replica = info->add_peers(); + replica->set_address(peer.address().c_str()); + } + + PeerId leader = copyset->GetLeaderId(); + auto replica = new ::curve::common::Peer(); + replica->set_address(leader.to_string()); + info->set_allocated_leaderpeer(replica); + + curve::mds::heartbeat::CopysetStatistics *stats = + new curve::mds::heartbeat::CopysetStatistics(); + CopysetMetricPtr copysetMetric = + ChunkServerMetric::GetInstance()->GetCopysetMetric(poolId, copysetId); + if (copysetMetric != nullptr) + { + IOMetricPtr readMetric = + copysetMetric->GetIOMetric(CSIOMetricType::READ_CHUNK); + IOMetricPtr writeMetric = + copysetMetric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); + if (readMetric != nullptr && writeMetric != nullptr) + { + stats->set_readrate(readMetric->bps_.get_value(1)); + stats->set_writerate(writeMetric->bps_.get_value(1)); + stats->set_readiops(readMetric->iops_.get_value(1)); + stats->set_writeiops(writeMetric->iops_.get_value(1)); + info->set_allocated_stats(stats); + } + else + { + LOG(ERROR) << "Failed to get copyset io metric." + << "logic pool id: " << poolId + << ", copyset id: " << copysetId; + } + } + + ConfigChangeType type; + Configuration conf; + Peer peer; + + if ((ret = copyset->GetConfChange(&type, &conf, &peer)) != 0) + { + LOG(ERROR) << "Failed to get config change state of copyset " + << ToGroupIdStr(poolId, copysetId); + return ret; + } + else if (type == curve::mds::heartbeat::NONE) + { + return 0; + } + + ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + replica = new (std::nothrow)::curve::common::Peer(); + if (replica == nullptr) + { + LOG(ERROR) << "apply memory error"; + return -1; + } + replica->set_address(peer.address()); + confChxInfo->set_allocated_peer(replica); + confChxInfo->set_type(type); + confChxInfo->set_finished(false); + info->set_allocated_configchangeinfo(confChxInfo); + + return 0; } - // 解析该chunkserver上的copyset是否需要删除 - // 需要删除则清理copyset - if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) { - LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - PurgeCopyset(conf.logicalpoolid(), conf.copysetid()); - continue; + int Heartbeat::BuildRequest(HeartbeatRequest *req) + { + int ret; + + req->set_chunkserverid(options_.chunkserverId); + req->set_token(options_.chunkserverToken); + req->set_starttime(startUpTime_); + req->set_ip(options_.ip); + req->set_port(options_.port); + + /* + * TODO(wenyu): DiskState field is not valid yet until disk health feature + * is ready + */ + curve::mds::heartbeat::DiskState *diskState = + new curve::mds::heartbeat::DiskState(); + diskState->set_errtype(0); + diskState->set_errmsg(""); + req->set_allocated_diskstate(diskState); + + ChunkServerMetric *metric = ChunkServerMetric::GetInstance(); + curve::mds::heartbeat::ChunkServerStatisticInfo *stats = + new curve::mds::heartbeat::ChunkServerStatisticInfo(); + IOMetricPtr readMetric = metric->GetIOMetric(CSIOMetricType::READ_CHUNK); + IOMetricPtr writeMetric = metric->GetIOMetric(CSIOMetricType::WRITE_CHUNK); + if (readMetric != nullptr && writeMetric != nullptr) + { + stats->set_readrate(readMetric->bps_.get_value(1)); + stats->set_writerate(writeMetric->bps_.get_value(1)); + stats->set_readiops(readMetric->iops_.get_value(1)); + stats->set_writeiops(writeMetric->iops_.get_value(1)); + } + CopysetNodeOptions opt = copysetMan_->GetCopysetNodeOptions(); + uint64_t chunkFileSize = opt.maxChunkSize; + uint64_t walSegmentFileSize = opt.maxWalSegmentSize; + uint64_t usedChunkSize = metric->GetTotalSnapshotCount() * chunkFileSize + + metric->GetTotalChunkCount() * chunkFileSize; + uint64_t usedWalSegmentSize = + metric->GetTotalWalSegmentCount() * walSegmentFileSize; + uint64_t trashedChunkSize = metric->GetChunkTrashedCount() * chunkFileSize; + uint64_t leftChunkSize = metric->GetChunkLeftCount() * chunkFileSize; + + // leftWalSegmentSize will be 0 when CHUNK and WAL share file pool + uint64_t leftWalSegmentSize = + metric->GetWalSegmentLeftCount() * walSegmentFileSize; + uint64_t chunkPoolSize = options_.chunkFilePool->Size() * + options_.chunkFilePool->GetFilePoolOpt().fileSize; + + // compute format progress rate. + const ChunkFormatStat &formatStat = + options_.chunkFilePool->GetChunkFormatStat(); // NOLINT + + stats->set_chunkfilepoolsize(chunkPoolSize); + stats->set_chunksizeusedbytes(usedChunkSize + usedWalSegmentSize); + stats->set_chunksizeleftbytes(leftChunkSize + leftWalSegmentSize); + stats->set_chunksizetrashedbytes(trashedChunkSize); + if (formatStat.preAllocateNum != 0) + { + stats->set_chunkfilepoolformatpercent( + 100 * formatStat.allocateChunkNum / formatStat.preAllocateNum); + } + else + { + stats->set_chunkfilepoolformatpercent(100); + } + req->set_allocated_stats(stats); + + size_t cap, avail; + ret = GetFileSystemSpaces(&cap, &avail); + if (ret != 0) + { + LOG(ERROR) << "Failed to get file system space information for path " + << storePath_; + return -1; + } + req->set_diskcapacity(cap); + req->set_diskused(cap - avail); + + std::vector copysets; + copysetMan_->GetAllCopysetNodes(©sets); + + req->set_copysetcount(copysets.size()); + int leaders = 0; + + for (CopysetNodePtr copyset : copysets) + { + curve::mds::heartbeat::CopySetInfo *info = req->add_copysetinfos(); + + ret = BuildCopysetInfo(info, copyset); + if (ret != 0) + { + LOG(ERROR) << "Failed to build heartbeat information of copyset " + << ToGroupIdStr(copyset->GetLogicPoolId(), + copyset->GetCopysetId()); + continue; + } + if (copyset->IsLeaderTerm()) + { + ++leaders; + } + } + req->set_leadercount(leaders); + req->set_version(curve::common::CurveVersion()); + + return 0; } - // 解析是否有配置变更需要执行 - if (!conf.has_type()) { - LOG(INFO) << "Failed to parse task for copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - continue; + void Heartbeat::DumpHeartbeatRequest(const HeartbeatRequest &request) + { + DVLOG(6) << "Heartbeat request: Chunkserver ID: " << request.chunkserverid() + << ", IP: " << request.ip() << ", port: " << request.port() + << ", copyset count: " << request.copysetcount() + << ", leader count: " << request.leadercount(); + for (int i = 0; i < request.copysetinfos_size(); i++) + { + const curve::mds::heartbeat::CopySetInfo &info = + request.copysetinfos(i); + + std::string peersStr = ""; + for (int j = 0; j < info.peers_size(); j++) + { + peersStr += info.peers(j).address() + ","; + } + + DVLOG(6) << "Copyset " << i << " " + << ToGroupIdStr(info.logicalpoolid(), info.copysetid()) + << ", epoch: " << info.epoch() + << ", leader: " << info.leaderpeer().address() + << ", peers: " << peersStr; + + if (info.has_configchangeinfo()) + { + const ConfigChangeInfo &cxInfo = info.configchangeinfo(); + DVLOG(6) << "Config change info: peer: " << cxInfo.peer().address() + << ", finished: " << cxInfo.finished() + << ", errno: " << cxInfo.err().errtype() + << ", errmsg: " << cxInfo.err().errmsg(); + } + } } - // 如果有配置变更需要执行,下发变更到copyset - if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) { - continue; + void Heartbeat::DumpHeartbeatResponse(const HeartbeatResponse &response) + { + int count = response.needupdatecopysets_size(); + if (count > 0) + { + LOG(INFO) << "Received " << count << " config change commands:"; + for (int i = 0; i < count; i++) + { + CopySetConf conf = response.needupdatecopysets(i); + + int type = (conf.has_type()) ? conf.type() : 0; + std::string item = (conf.has_configchangeitem()) + ? conf.configchangeitem().address() + : ""; + + std::string peersStr = ""; + for (int j = 0; j < conf.peers_size(); j++) + { + peersStr += conf.peers(j).address(); + } + + LOG(INFO) << "Config change " << i << ": " + << "Copyset < " << conf.logicalpoolid() << ", " + << conf.copysetid() << ">, epoch: " << conf.epoch() + << ", Peers: " << peersStr << ", type: " << type + << ", item: " << item; + } + } + else + { + LOG(INFO) << "Received no config change command."; + } } - if (conf.epoch() != copyset->GetConfEpoch()) { - LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is not same as current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; - continue; + int Heartbeat::SendHeartbeat(const HeartbeatRequest &request, + HeartbeatResponse *response) + { + brpc::Channel channel; + if (channel.Init(mdsEps_[inServiceIndex_].c_str(), NULL) != 0) + { + LOG(ERROR) << csEp_.ip << ":" << csEp_.port + << " Fail to init channel to MDS " + << mdsEps_[inServiceIndex_]; + return -1; + } + + curve::mds::heartbeat::HeartbeatService_Stub stub(&channel); + brpc::Controller cntl; + cntl.set_timeout_ms(options_.timeout); + + DumpHeartbeatRequest(request); + + stub.ChunkServerHeartbeat(&cntl, &request, response, nullptr); + if (cntl.Failed()) + { + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == ETIMEDOUT || + cntl.ErrorCode() == brpc::ELOGOFF || + cntl.ErrorCode() == brpc::ERPCTIMEDOUT) + { + LOG(WARNING) << "current mds: " << mdsEps_[inServiceIndex_] + << " is shutdown or going to quit," + << cntl.ErrorText(); + inServiceIndex_ = (inServiceIndex_ + 1) % mdsEps_.size(); + LOG(INFO) << "next heartbeat switch to " + << mdsEps_[inServiceIndex_]; + } + else + { + LOG(ERROR) << csEp_.ip << ":" << csEp_.port + << " Fail to send heartbeat to MDS " + << mdsEps_[inServiceIndex_] << "," + << " cntl errorCode: " << cntl.ErrorCode() + << " cntl error: " << cntl.ErrorText(); + } + return -1; + } + else + { + DumpHeartbeatResponse(*response); + } + + return 0; } - // 根据不同的变更类型下发配置 - switch (conf.type()) { - case curve::mds::heartbeat::TRANSFER_LEADER: + int Heartbeat::ExecTask(const HeartbeatResponse &response) + { + int count = response.needupdatecopysets_size(); + for (int i = 0; i < count; i++) { - if (!HeartbeatHelper::ChunkServerLoadCopySetFin( - conf.configchangeitem().address())) { + CopySetConf conf = response.needupdatecopysets(i); + CopysetNodePtr copyset = + copysetMan_->GetCopysetNode(conf.logicalpoolid(), conf.copysetid()); + + // Determine whether copyconf is legal + if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) + { + continue; + } + + // Resolve whether the copyset on the chunkserver needs to be deleted + // If deletion is required, clean the copyset + if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) + { + LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + PurgeCopyset(conf.logicalpoolid(), conf.copysetid()); + continue; + } + + // Resolve if there are any configuration changes that need to be + // executed + if (!conf.has_type()) + { + LOG(INFO) << "Failed to parse task for copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + continue; + } + + // If there are configuration changes that need to be executed, issue + // the changes to the copyset + if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) + { + continue; + } + + if (conf.epoch() != copyset->GetConfEpoch()) + { + LOG(WARNING) << "Config change epoch:" << conf.epoch() + << " is not same as current:" + << copyset->GetConfEpoch() << " on copyset(" + << conf.logicalpoolid() << "," << conf.copysetid() + << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; + continue; + } + + // Distribute configurations based on different change types + switch (conf.type()) + { + case curve::mds::heartbeat::TRANSFER_LEADER: + { + if (!HeartbeatHelper::ChunkServerLoadCopySetFin( + conf.configchangeitem().address())) + { + LOG(INFO) + << "Transfer leader to " + << conf.configchangeitem().address() << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << " reject. target chunkserver is loading copyset"; + break; + } + LOG(INFO) << "Transfer leader to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << " reject. target chunkserver is loading copyset"; + << conf.configchangeitem().address() << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->TransferLeader(conf.configchangeitem()); break; } - LOG(INFO) << "Transfer leader to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->TransferLeader(conf.configchangeitem()); + case curve::mds::heartbeat::ADD_PEER: + LOG(INFO) << "Adding peer " << conf.configchangeitem().address() + << " to copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->AddPeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::REMOVE_PEER: + LOG(INFO) << "Removing peer " + << conf.configchangeitem().address() + << " from copyset" + << ToGroupIdStr(conf.logicalpoolid(), + conf.copysetid()); + copyset->RemovePeer(conf.configchangeitem()); + break; + + case curve::mds::heartbeat::CHANGE_PEER: + { + std::vector newPeers; + if (HeartbeatHelper::BuildNewPeers(conf, &newPeers)) + { + LOG(INFO) + << "Change peer from " << conf.oldpeer().address() + << " to " << conf.configchangeitem().address() + << " on copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + copyset->ChangePeer(newPeers); + } + else + { + LOG(ERROR) + << "Build new peer for copyset" + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << " failed"; + } + } break; - } - case curve::mds::heartbeat::ADD_PEER: - LOG(INFO) << "Adding peer " << conf.configchangeitem().address() - << " to copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->AddPeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::REMOVE_PEER: - LOG(INFO) << "Removing peer " << conf.configchangeitem().address() - << " from copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->RemovePeer(conf.configchangeitem()); - break; - - case curve::mds::heartbeat::CHANGE_PEER: - { - std::vector newPeers; - if (HeartbeatHelper::BuildNewPeers(conf, &newPeers)) { - LOG(INFO) << "Change peer from " - << conf.oldpeer().address() << " to " - << conf.configchangeitem().address() << " on copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); - copyset->ChangePeer(newPeers); - } else { - LOG(ERROR) << "Build new peer for copyset" - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << " failed"; + case curve::mds::heartbeat::START_SCAN_PEER: + { + ConfigChangeType type; + Configuration tmpConf; + Peer peer; + LogicPoolID poolId = conf.logicalpoolid(); + CopysetID copysetId = conf.copysetid(); + int ret = copyset->GetConfChange(&type, &tmpConf, &peer); + // if copyset happen conf change, can't scan and wait retry + if (0 != ret) + { + LOG(ERROR) << "Failed to get config change state of copyset" + << ToGroupIdStr(poolId, copysetId); + return ret; + } + else if (type != curve::mds::heartbeat::NONE) + { + LOG(INFO) << "drop scan peer request to copyset: " + << ToGroupIdStr(poolId, copysetId) + << " because exist config" + << " ConfigChangeType: " << type; + } + else + { + LOG(INFO) + << "Scan peer " << conf.configchangeitem().address() + << "to copyset " << ToGroupIdStr(poolId, copysetId); + scanMan_->Enqueue(poolId, copysetId); + } } - } - break; + break; - case curve::mds::heartbeat::START_SCAN_PEER: - { - ConfigChangeType type; - Configuration tmpConf; - Peer peer; - LogicPoolID poolId = conf.logicalpoolid(); - CopysetID copysetId = conf.copysetid(); - int ret = copyset->GetConfChange(&type, &tmpConf, &peer); - // if copyset happen conf change, can't scan and wait retry - if (0 != ret) { - LOG(ERROR) << "Failed to get config change state of copyset" - << ToGroupIdStr(poolId, copysetId); + case curve::mds::heartbeat::CANCEL_SCAN_PEER: + { + // todo Abnormal scenario + LogicPoolID poolId = conf.logicalpoolid(); + CopysetID copysetId = conf.copysetid(); + int ret = scanMan_->CancelScanJob(poolId, copysetId); + if (ret < 0) + { + LOG(ERROR) + << "cancel scan peer failed, " + << "peer address: " << conf.configchangeitem().address() + << "copyset groupId: " + << ToGroupIdStr(poolId, copysetId); + } return ret; - } else if (type != curve::mds::heartbeat::NONE) { - LOG(INFO) << "drop scan peer request to copyset: " - << ToGroupIdStr(poolId, copysetId) - << " because exist config" - << " ConfigChangeType: " << type; - } else { - LOG(INFO) << "Scan peer " - << conf.configchangeitem().address() - << "to copyset " - << ToGroupIdStr(poolId, copysetId); - scanMan_->Enqueue(poolId, copysetId); } - } - break; + break; - case curve::mds::heartbeat::CANCEL_SCAN_PEER: - { - // todo Abnormal scenario - LogicPoolID poolId = conf.logicalpoolid(); - CopysetID copysetId = conf.copysetid(); - int ret = scanMan_->CancelScanJob(poolId, copysetId); - if (ret < 0) { - LOG(ERROR) << "cancel scan peer failed, " - << "peer address: " - << conf.configchangeitem().address() - << "copyset groupId: " - << ToGroupIdStr(poolId, copysetId); + default: + LOG(ERROR) << "Invalid configchange type: " << conf.type(); + break; } - return ret; } - break; - default: - LOG(ERROR) << "Invalid configchange type: " << conf.type(); - break; - } - } - - return 0; -} - -void Heartbeat::HeartbeatWorker() { - int ret; - int errorIntervalSec = 2; - - LOG(INFO) << "Starting Heartbeat worker thread."; - - // 处理配置等于0等异常情况 - if (options_.intervalSec <= 4) { - errorIntervalSec = 2; - } else { - errorIntervalSec = options_.intervalSec / 2; - } - - while (!toStop_.load(std::memory_order_acquire)) { - HeartbeatRequest req; - HeartbeatResponse resp; - - LOG(INFO) << "building heartbeat info"; - ret = BuildRequest(&req); - if (ret != 0) { - LOG(ERROR) << "Failed to build heartbeat request"; - ::sleep(errorIntervalSec); - continue; + return 0; } - LOG(INFO) << "sending heartbeat info"; - ret = SendHeartbeat(req, &resp); - if (ret != 0) { - LOG(WARNING) << "Failed to send heartbeat to MDS"; - ::sleep(errorIntervalSec); - continue; - } + void Heartbeat::HeartbeatWorker() + { + int ret; + int errorIntervalSec = 2; - LOG(INFO) << "executing heartbeat info"; - ret = ExecTask(resp); - if (ret != 0) { - LOG(ERROR) << "Failed to execute heartbeat tasks"; - ::sleep(errorIntervalSec); - continue; - } + LOG(INFO) << "Starting Heartbeat worker thread."; + + // Handling abnormal situations such as configuration equal to 0 + if (options_.intervalSec <= 4) + { + errorIntervalSec = 2; + } + else + { + errorIntervalSec = options_.intervalSec / 2; + } + + while (!toStop_.load(std::memory_order_acquire)) + { + HeartbeatRequest req; + HeartbeatResponse resp; + + LOG(INFO) << "building heartbeat info"; + ret = BuildRequest(&req); + if (ret != 0) + { + LOG(ERROR) << "Failed to build heartbeat request"; + ::sleep(errorIntervalSec); + continue; + } + + LOG(INFO) << "sending heartbeat info"; + ret = SendHeartbeat(req, &resp); + if (ret != 0) + { + LOG(WARNING) << "Failed to send heartbeat to MDS"; + ::sleep(errorIntervalSec); + continue; + } - waitInterval_.WaitForNextExcution(); - } + LOG(INFO) << "executing heartbeat info"; + ret = ExecTask(resp); + if (ret != 0) + { + LOG(ERROR) << "Failed to execute heartbeat tasks"; + ::sleep(errorIntervalSec); + continue; + } - LOG(INFO) << "Heartbeat worker thread stopped."; -} + waitInterval_.WaitForNextExcution(); + } + + LOG(INFO) << "Heartbeat worker thread stopped."; + } -} // namespace chunkserver -} // namespace curve + } // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/heartbeat.h b/src/chunkserver/heartbeat.h index df86d8e88a..16d5c1a1fa 100644 --- a/src/chunkserver/heartbeat.h +++ b/src/chunkserver/heartbeat.h @@ -24,58 +24,58 @@ #ifndef SRC_CHUNKSERVER_HEARTBEAT_H_ #define SRC_CHUNKSERVER_HEARTBEAT_H_ +#include // NodeImpl #include -#include // NodeImpl -#include -#include -#include #include +#include #include +#include #include //NOLINT +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/common/wait_interval.h" -#include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/scan_manager.h" #include "proto/heartbeat.pb.h" #include "proto/scan.pb.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/scan_manager.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/wait_interval.h" using ::curve::common::Thread; namespace curve { namespace chunkserver { -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; -using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; -using CopySetConf = curve::mds::heartbeat::CopySetConf; -using CandidateError = curve::mds::heartbeat::CandidateError; -using TaskStatus = butil::Status; -using CopysetNodePtr = std::shared_ptr; +using ConfigChangeInfo = curve::mds::heartbeat::ConfigChangeInfo; +using CopySetConf = curve::mds::heartbeat::CopySetConf; +using CandidateError = curve::mds::heartbeat::CandidateError; +using TaskStatus = butil::Status; +using CopysetNodePtr = std::shared_ptr; /** - * 心跳子系统选项 + * Heartbeat subsystem options */ struct HeartbeatOptions { - ChunkServerID chunkserverId; - std::string chunkserverToken; - std::string storeUri; - std::string mdsListenAddr; - std::string ip; - uint32_t port; - uint32_t intervalSec; - uint32_t timeout; - CopysetNodeManager* copysetNodeManager; - ScanManager* scanManager; + ChunkServerID chunkserverId; + std::string chunkserverToken; + std::string storeUri; + std::string mdsListenAddr; + std::string ip; + uint32_t port; + uint32_t intervalSec; + uint32_t timeout; + CopysetNodeManager* copysetNodeManager; + ScanManager* scanManager; std::shared_ptr fs; std::shared_ptr chunkFilePool; }; /** - * 心跳子系统处理模块 + * Heartbeat subsystem processing module */ class Heartbeat { public: @@ -83,110 +83,110 @@ class Heartbeat { ~Heartbeat() {} /** - * @brief 初始化心跳子系统 - * @param[in] options 心跳子系统选项 - * @return 0:成功,非0失败 + * @brief Initialize heartbeat subsystem + * @param[in] options Heartbeat subsystem options + * @return 0: Success, non 0 failure */ int Init(const HeartbeatOptions& options); /** - * @brief 清理心跳子系统 - * @return 0:成功,非0失败 + * @brief Clean heartbeat subsystem + * @return 0: Success, non 0 failure */ int Fini(); /** - * @brief 启动心跳子系统 - * @return 0:成功,非0失败 + * @brief: Start the heartbeat subsystem + * @return 0: Success, non 0 failure */ int Run(); private: /** - * @brief 停止心跳子系统 - * @return 0:成功,非0失败 + * @brief Stop heartbeat subsystem + * @return 0: Success, non 0 failure */ int Stop(); /* - * 心跳工作线程 + * Heartbeat Worker Thread */ void HeartbeatWorker(); /* - * 获取Chunkserver存储空间信息 + * Obtain Chunkserver storage space information */ int GetFileSystemSpaces(size_t* capacity, size_t* free); /* - * 构建心跳消息的Copyset信息项 + * Building a Copyset information item for heartbeat messages */ int BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, CopysetNodePtr copyset); /* - * 构建心跳请求 + * Build Heartbeat Request */ int BuildRequest(HeartbeatRequest* request); /* - * 发送心跳消息 + * Send heartbeat message */ int SendHeartbeat(const HeartbeatRequest& request, HeartbeatResponse* response); /* - * 执行心跳任务 + * Perform Heartbeat Tasks */ int ExecTask(const HeartbeatResponse& response); /* - * 输出心跳请求信息 + * Output heartbeat request information */ void DumpHeartbeatRequest(const HeartbeatRequest& request); /* - * 输出心跳回应信息 + * Output heartbeat response information */ void DumpHeartbeatResponse(const HeartbeatResponse& response); /* - * 清理复制组实例及持久化数据 + * Clean up replication group instances and persist data */ TaskStatus PurgeCopyset(LogicPoolID poolId, CopysetID copysetId); private: - // 心跳线程 + // Heartbeat Thread Thread hbThread_; - // 控制心跳模块运行或停止 + // Control the heartbeat module to run or stop std::atomic toStop_; - // 使用定时器 + // Using a timer ::curve::common::WaitInterval waitInterval_; - // Copyset管理模块 + // Copyset Management Module CopysetNodeManager* copysetMan_; - // ChunkServer目录 + // ChunkServer directory std::string storePath_; - // 心跳选项 + // Heartbeat Options HeartbeatOptions options_; - // MDS的地址 + // MDS address std::vector mdsEps_; - // 当前供服务的mds + // Current mds for service int inServiceIndex_; - // ChunkServer本身的地址 + // ChunkServer's own address butil::EndPoint csEp_; - // 模块初始化时间, unix时间 + // Module initialization time, unix time uint64_t startUpTime_; - ScanManager *scanMan_; + ScanManager* scanMan_; }; } // namespace chunkserver diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index 02a2fc65c9..bc9bbd3708 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -20,34 +20,37 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat_helper.h" + #include #include +#include + #include -#include "src/chunkserver/heartbeat_helper.h" + #include "include/chunkserver/chunkserver_common.h" #include "proto/chunkserver.pb.h" namespace curve { namespace chunkserver { -bool HeartbeatHelper::BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers) { - // 检验目标节点和待删除节点是否有效 +bool HeartbeatHelper::BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers) { + // Verify if the target node and the node to be deleted are valid std::string target(conf.configchangeitem().address()); std::string old(conf.oldpeer().address()); if (!PeerVaild(target) || !PeerVaild(old)) { return false; } - // 生成newPeers + // Generate newPeers for (int i = 0; i < conf.peers_size(); i++) { std::string peer = conf.peers(i).address(); - // 检验conf中的peer是否有效 + // Verify if the peer in conf is valid if (!PeerVaild(peer)) { return false; } - // newPeers中不包含old副本 + // newPeers does not contain old copies if (conf.peers(i).address() != old) { newPeers->emplace_back(conf.peers(i)); } @@ -57,49 +60,51 @@ bool HeartbeatHelper::BuildNewPeers( return true; } -bool HeartbeatHelper::PeerVaild(const std::string &peer) { +bool HeartbeatHelper::PeerVaild(const std::string& peer) { PeerId peerId; return 0 == peerId.parse(peer); } -bool HeartbeatHelper::CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set) { - // chunkserver中不存在需要变更的copyset, 报警 +bool HeartbeatHelper::CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset) { + // There is no copyset that needs to be changed in chunkserver, alarm if (copyset == nullptr) { - LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() - << "," << conf.copysetid() << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); + LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()); return false; } - // 下发的变更epoch < copyset实际的epoch,报错 + // The issued change epoch is less than the actual epoch of the copyset, and + // an error is reported if (conf.epoch() < copyset->GetConfEpoch()) { LOG(WARNING) << "Config change epoch:" << conf.epoch() - << " is smaller than current:" << copyset->GetConfEpoch() - << " on copyset(" - << conf.logicalpoolid() << "," << conf.copysetid() - << "), groupId: " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << ", refuse change"; + << " is smaller than current:" << copyset->GetConfEpoch() + << " on copyset(" << conf.logicalpoolid() << "," + << conf.copysetid() << "), groupId: " + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << ", refuse change"; return false; } return true; } -bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, - const CopySetConf &conf, const CopysetNodePtr ©set) { +bool HeartbeatHelper::NeedPurge(const butil::EndPoint& csEp, + const CopySetConf& conf, + const CopysetNodePtr& copyset) { (void)copyset; - // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset + // CLDCFS-1004 bug-fix: mds issued a copyset with epoch 0 and empty + // configuration if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " - << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) - << "in peer " << csEp - << ", witch is not exist in mds record"; + << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) + << "in peer " << csEp << ", witch is not exist in mds record"; return true; } - // 该chunkserrver不在copyset的配置中,需要清理 + // The chunkserrver is not in the configuration of the copyset and needs to + // be cleaned up std::string chunkserverEp = std::string(butil::endpoint2str(csEp).c_str()); for (int i = 0; i < conf.peers_size(); i++) { if (conf.peers(i).address().find(chunkserverEp) != std::string::npos) { @@ -117,7 +122,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { PeerId peer; peer.parse(peerId); - const char *ip = butil::ip2str(peer.addr.ip).c_str(); + const char* ip = butil::ip2str(peer.addr.ip).c_str(); int port = peer.addr.port; brpc::Channel channel; if (channel.Init(ip, port, NULL) != 0) { @@ -133,7 +138,7 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { stub.ChunkServerStatus(&cntl, &req, &rep, nullptr); if (cntl.Failed()) { LOG(WARNING) << "Send ChunkServerStatusRequest failed, cntl.errorText =" - << cntl.ErrorText(); + << cntl.ErrorText(); return false; } @@ -142,4 +147,3 @@ bool HeartbeatHelper::ChunkServerLoadCopySetFin(const std::string peerId) { } // namespace chunkserver } // namespace curve - diff --git a/src/chunkserver/heartbeat_helper.h b/src/chunkserver/heartbeat_helper.h index 43ada5f6ea..c06fedb61b 100644 --- a/src/chunkserver/heartbeat_helper.h +++ b/src/chunkserver/heartbeat_helper.h @@ -24,74 +24,83 @@ #define SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ #include -#include + #include #include +#include + #include "proto/heartbeat.pb.h" #include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -using ::curve::mds::heartbeat::CopySetConf; using ::curve::common::Peer; +using ::curve::mds::heartbeat::CopySetConf; using CopysetNodePtr = std::shared_ptr; class HeartbeatHelper { public: /** - * 根据mds下发的conf构建出指定复制组的新配置,给ChangePeer使用 + * Build a new configuration for the specified replication group based on + * the conf issued by mds, and use it for ChangePeer * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[out] newPeers 指定复制组的目标配置 + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[out] newPeers specifies the target configuration for the + * replication group * - * @return false-生成newpeers失败 true-生成newpeers成功 + * @return false - Failed to generate newpeers, true - Successfully + * generated newpeers */ - static bool BuildNewPeers( - const CopySetConf &conf, std::vector *newPeers); + static bool BuildNewPeers(const CopySetConf& conf, + std::vector* newPeers); /** - * 判断字符串peer(正确的形式为: ip:port:0)是否有效 + * Determine whether the string peer (correct form: ip:port:0) is valid * - * @param[in] peer 指定字符串 + * @param[in] peer specifies the string * - * @return false-无效 true-有效 + * @return false - invalid, true - valid */ - static bool PeerVaild(const std::string &peer); + static bool PeerVaild(const std::string& peer); /** - * 判断mds下发过来的copysetConf是否合法,以下两种情况不合法: - * 1. chunkserver中不存在该copyset - * 2. mds下发的copyset中记录的epoch小于chunkserver上copyset此时的epoch + * Determine whether the copysetConf sent by mds is legal, and the following + * two situations are illegal: + * 1. The copyset does not exist in chunkserver + * 2. The epoch recorded in the copyset issued by mds is smaller than the + * epoch recorded in the copyset on chunkserver at this time * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-copysetConf不合法,true-copysetConf合法 + * @return false-copysetConf is illegal, true-copysetConf is legal */ - static bool CopySetConfValid( - const CopySetConf &conf, const CopysetNodePtr ©set); + static bool CopySetConfValid(const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断chunkserver(csEp)中指定copyset是否需要删除 + * Determine whether the specified copyset in chunkserver(csEp) needs to be + * deleted * - * @param[in] csEp 该chunkserver的ip:port - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] csEp The ip:port of this chunkserver + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-该chunkserver上的copyset无需清理; - * true-该chunkserver上的copyset需要清理 + * @return false-The copyset on the chunkserver does not need to be cleaned; + * true-The copyset on this chunkserver needs to be cleaned up */ - static bool NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, - const CopysetNodePtr ©set); + static bool NeedPurge(const butil::EndPoint& csEp, const CopySetConf& conf, + const CopysetNodePtr& copyset); /** - * 判断指定chunkserver copyset是否已经加载完毕 + * Determine whether the specified chunkserver copyset has been loaded + * completely * - * @return false-copyset加载完毕 true-copyset未加载完成 + * @return false-copyset loading completed, true-copyset not loaded + * completed */ static bool ChunkServerLoadCopySetFin(const std::string ipPort); }; } // namespace chunkserver } // namespace curve #endif // SRC_CHUNKSERVER_HEARTBEAT_HELPER_H_ - diff --git a/src/chunkserver/inflight_throttle.h b/src/chunkserver/inflight_throttle.h index 86af93daf7..71462b5e97 100644 --- a/src/chunkserver/inflight_throttle.h +++ b/src/chunkserver/inflight_throttle.h @@ -30,18 +30,17 @@ namespace curve { namespace chunkserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: explicit InflightThrottle(uint64_t maxInflight) - : inflightRequestCount_(0), - kMaxInflightRequest_(maxInflight) { } + : inflightRequestCount_(0), kMaxInflightRequest_(maxInflight) {} virtual ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ inline bool IsOverLoad() { if (kMaxInflightRequest_ >= @@ -53,23 +52,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ inline void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ inline void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight requests std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight requests const uint64_t kMaxInflightRequest_; }; diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 817e65c79f..e03c079341 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -22,44 +22,41 @@ #include "src/chunkserver/op_request.h" -#include +#include #include #include -#include +#include #include #include #include -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_closure.h" #include "src/chunkserver/clone_manager.h" #include "src/chunkserver/clone_task.h" +#include "src/chunkserver/copyset_node.h" namespace curve { namespace chunkserver { -ChunkOpRequest::ChunkOpRequest() : - datastore_(nullptr), - node_(nullptr), - cntl_(nullptr), - request_(nullptr), - response_(nullptr), - done_(nullptr) { -} +ChunkOpRequest::ChunkOpRequest() + : datastore_(nullptr), + node_(nullptr), + cntl_(nullptr), + request_(nullptr), + response_(nullptr), + done_(nullptr) {} ChunkOpRequest::ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - datastore_(nodePtr->GetDataStore()), - node_(nodePtr), - cntl_(dynamic_cast(cntl)), - request_(request), - response_(response), - done_(done) { -} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : datastore_(nodePtr->GetDataStore()), + node_(nodePtr), + cntl_(dynamic_cast(cntl)), + request_(request), + response_(response), + done_(done) {} void ChunkOpRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -71,18 +68,19 @@ void ChunkOpRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ - if (0 == Propose(request_, cntl_ ? &cntl_->request_attachment() : - nullptr)) { + if (0 == + Propose(request_, cntl_ ? &cntl_->request_attachment() : nullptr)) { doneGuard.release(); } } -int ChunkOpRequest::Propose(const ChunkRequest *request, - const butil::IOBuf *data) { - // 打包op request为task +int ChunkOpRequest::Propose(const ChunkRequest* request, + const butil::IOBuf* data) { + // Pack op request as task braft::Task task; butil::IOBuf log; if (0 != Encode(request, data, &log)) { @@ -93,10 +91,13 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, task.data = &log; task.done = new ChunkClosure(shared_from_this()); /** - * 由于apply是异步的,有可能某个节点在term1是leader,apply了一条log, - * 但是中间发生了主从切换,在很短的时间内这个节点又变为term3的leader, - * 之前apply的日志才开始进行处理,这种情况下要实现严格意义上的复制状态 - * 机,需要解决这种ABA问题,可以在apply的时候设置leader当时的term + * Due to the asynchronous nature of the application, it is possible that a + * node in term1 is a leader and has applied a log, But there was a + * master-slave switch in the middle, and in a short period of time, this + * node became the leader of term3 again, Previously applied logs were only + * processed, in which case strict replication status needs to be + * implemented To solve this ABA problem, you can set the term of the leader + * at the time of application */ task.expected_term = node_->LeaderTerm(); @@ -106,8 +107,8 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, } void ChunkOpRequest::RedirectChunkRequest() { - // 编译时加上 --copt -DUSE_BTHREAD_MUTEX - // 否则可能发生死锁: CLDCFS-1120 + // Compile with --copt -DUSE_BTHREAD_MUTEX + // Otherwise, a deadlock may occur: CLDCFS-1120 // PeerId leader = node_->GetLeaderId(); // if (!leader.is_empty()) { // response_->set_redirect(leader.to_string()); @@ -115,9 +116,8 @@ void ChunkOpRequest::RedirectChunkRequest() { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); } -int ChunkOpRequest::Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log) { +int ChunkOpRequest::Encode(const ChunkRequest* request, + const butil::IOBuf* data, butil::IOBuf* log) { // 1.append request length const uint32_t metaSize = butil::HostToNet32(request->ByteSize()); log->append(&metaSize, sizeof(uint32_t)); @@ -135,8 +135,8 @@ int ChunkOpRequest::Encode(const ChunkRequest *request, } std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId) { uint32_t metaSize = 0; @@ -171,35 +171,35 @@ std::shared_ptr ChunkOpRequest::Decode(butil::IOBuf log, return std::make_shared(); case CHUNK_OP_TYPE::CHUNK_OP_SCAN: return std::make_shared(index, leaderId); - default:LOG(ERROR) << "Unknown chunk op"; + default: + LOG(ERROR) << "Unknown chunk op"; return nullptr; } } ApplyTaskType ChunkOpRequest::Schedule(CHUNK_OP_TYPE opType) { switch (opType) { - case CHUNK_OP_READ: - case CHUNK_OP_RECOVER: - return ApplyTaskType::READ; - default: - return ApplyTaskType::WRITE; + case CHUNK_OP_READ: + case CHUNK_OP_RECOVER: + return ApplyTaskType::READ; + default: + return ApplyTaskType::WRITE; } } namespace { uint64_t MaxAppliedIndex( - const std::shared_ptr& node, - uint64_t current) { + const std::shared_ptr& node, + uint64_t current) { return std::max(current, node->GetAppliedIndex()); } } // namespace void DeleteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->DeleteChunk(request_->chunkid(), - request_->sn()); + auto ret = datastore_->DeleteChunk(request_->chunkid(), request_->sn()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); @@ -211,21 +211,19 @@ void DeleteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "delete chunk failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteChunk(request.chunkid(), - request.sn()); - if (CSErrorCode::Success == ret) - return; + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete failed: " @@ -239,16 +237,14 @@ void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } ReadChunkRequest::ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, cntl, request, response, done), - cloneMgr_(cloneMgr), - concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), - applyIndex(0) { -} + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done), + cloneMgr_(cloneMgr), + concurrentApplyModule_(nodePtr->GetConcurrentApplyModule()), + applyIndex(0) {} void ReadChunkRequest::Process() { brpc::ClosureGuard doneGuard(done_); @@ -267,21 +263,20 @@ void ReadChunkRequest::Process() { * extend from std::enable_shared_from_this, * use shared_from_this() to return a shared_ptr */ - auto thisPtr - = std::dynamic_pointer_cast(shared_from_this()); + auto thisPtr = + std::dynamic_pointer_cast(shared_from_this()); /* * why push read requests to concurrent layer: * 1. all I/O operators including read and write requests are executed * in concurrent layer, we can separate disk I/O from other logic. * 2. ensure linear consistency of read semantics. */ - auto task = std::bind(&ReadChunkRequest::OnApply, - thisPtr, - node_->GetAppliedIndex(), - doneGuard.release()); - concurrentApplyModule_->Push(request_->chunkid(), - ChunkOpRequest::Schedule(request_->optype()), // NOLINT - task); + auto task = std::bind(&ReadChunkRequest::OnApply, thisPtr, + node_->GetAppliedIndex(), doneGuard.release()); + concurrentApplyModule_->Push( + request_->chunkid(), + ChunkOpRequest::Schedule(request_->optype()), // NOLINT + task); return; } @@ -298,16 +293,19 @@ void ReadChunkRequest::Process() { } void ReadChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { - // 先清除response中的status,以保证CheckForward后的判断的正确性 + ::google::protobuf::Closure* done) { + // Clear the status in the response first to ensure the correctness of the + // judgment after CheckForward response_->clear_status(); CSChunkInfo chunkInfo; - CSErrorCode errorCode = datastore_->GetChunkInfo(request_->chunkid(), - &chunkInfo); + CSErrorCode errorCode = + datastore_->GetChunkInfo(request_->chunkid(), &chunkInfo); do { bool needLazyClone = false; - // 如果需要Read的chunk不存在,但是请求包含Clone源信息,则尝试从Clone源读取数据 + // If the chunk that needs to be read does not exist, but the request + // contains Clone source information, try reading data from the Clone + // source if (CSErrorCode::ChunkNotExistError == errorCode) { if (existCloneInfo(request_)) { needLazyClone = true; @@ -324,14 +322,15 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果需要从源端拷贝数据,需要将请求转发给clone manager处理 - if ( needLazyClone || NeedClone(chunkInfo) ) { + // If you need to copy data from the source, you need to forward the + // request to the clone manager for processing + if (needLazyClone || NeedClone(chunkInfo)) { applyIndex = index; - std::shared_ptr cloneTask = - cloneMgr_->GenerateCloneTask( + std::shared_ptr cloneTask = cloneMgr_->GenerateCloneTask( std::dynamic_pointer_cast(shared_from_this()), done); - // TODO(yyk) 尽量不能阻塞队列,后面要具体考虑 + // TODO(yyk) should try not to block the queue, and specific + // considerations should be taken later bool result = cloneMgr_->IssueCloneTask(cloneTask); if (!result) { LOG(ERROR) << "issue clone task failed: " @@ -340,14 +339,16 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果请求成功转发给了clone manager就可以直接返回了 + // If the request is successfully forwarded to the clone manager, it + // can be returned directly return; } - // 如果是ReadChunk请求还需要从本地读取数据 + // If it is a ReadChunk request, data needs to be read locally if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_READ) { ReadChunk(); } - // 如果是recover请求,说明请求区域已经被写过了,可以直接返回成功 + // If it is a recover request, it indicates that the request area has + // been written and can directly return success if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_RECOVER) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } @@ -362,57 +363,51 @@ void ReadChunkRequest::OnApply(uint64_t index, } void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } bool ReadChunkRequest::NeedClone(const CSChunkInfo& chunkInfo) { - // 如果不是 clone chunk,就不需要拷贝 + // If it's not a clone chunk, there's no need to copy it if (chunkInfo.isClone) { off_t offset = request_->offset(); size_t length = request_->size(); uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 如果是clone chunk,且存在未被写过的page,就需要拷贝 - if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) - != Bitmap::NO_POS) { + // If it is a clone chunk and there are unwritten pages, it needs to be + // copied + if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != + Bitmap::NO_POS) { return true; } } return false; } -static void ReadBufferDeleter(void* ptr) { - delete[] static_cast(ptr); -} +static void ReadBufferDeleter(void* ptr) { delete[] static_cast(ptr); } void ReadChunkRequest::ReadChunk() { - char *readBuffer = nullptr; + char* readBuffer = nullptr; size_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); - auto ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - size); + auto ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer, request_->offset(), size); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "read failed: " << " data store return: " << ret @@ -421,50 +416,47 @@ void ReadChunkRequest::ReadChunk() { LOG(ERROR) << "read failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } void WriteChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(request_)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request_->clonefilesource(), - request_->clonefileoffset()); + cloneSourceLocation = + func(request_->clonefilesource(), request_->clonefileoffset()); } - auto ret = datastore_->WriteChunk(request_->chunkid(), - request_->sn(), - cntl_->request_attachment(), - request_->offset(), - request_->size(), - &cost, - cloneSourceLocation); + auto ret = datastore_->WriteChunk( + request_->chunkid(), request_->sn(), cntl_->request_attachment(), + request_->offset(), request_->size(), &cost, cloneSourceLocation); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); } else if (CSErrorCode::BackwardRequestError == ret) { - // 打快照那一刻是有可能出现旧版本的请求 - // 返回错误给客户端,让客户端带新版本来重试 + // At the moment of taking a snapshot, there may be requests for older + // versions Return an error to the client and ask them to try again with + // the new version of the original LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret || CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * internalerror一般是磁盘错误,为了防止副本不一致,让进程退出 - * TODO(yyk): 当前遇到write错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 - */ + * An internal error is usually a disk error. To prevent inconsistent + * replicas, the process is forced to exit + * TODO(yyk): Currently encountering a write error, directly fatally + * exit the entire process ChunkServer will consider only flagging this + * copyset in the later stage to ensure good availability + */ LOG(FATAL) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); @@ -472,8 +464,7 @@ void WriteChunkRequest::OnApply(uint64_t index, LOG(ERROR) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); @@ -481,27 +472,24 @@ void WriteChunkRequest::OnApply(uint64_t index, } void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request + const ChunkRequest& request, + const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing uint32_t cost; - std::string cloneSourceLocation; + std::string cloneSourceLocation; if (existCloneInfo(&request)) { auto func = ::curve::common::LocationOperator::GenerateCurveLocation; - cloneSourceLocation = func(request.clonefilesource(), - request.clonefileoffset()); + cloneSourceLocation = + func(request.clonefilesource(), request.clonefileoffset()); } - auto ret = datastore->WriteChunk(request.chunkid(), - request.sn(), - data, - request.offset(), - request.size(), - &cost, + auto ret = datastore->WriteChunk(request.chunkid(), request.sn(), data, + request.offset(), request.size(), &cost, cloneSourceLocation); - if (CSErrorCode::Success == ret) { - return; - } else if (CSErrorCode::BackwardRequestError == ret) { + if (CSErrorCode::Success == ret) { + return; + } else if (CSErrorCode::BackwardRequestError == ret) { LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request.ShortDebugString(); @@ -519,24 +507,22 @@ void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, } void ReadSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - char *readBuffer = nullptr; + char* readBuffer = nullptr; uint32_t size = request_->size(); - readBuffer = new(std::nothrow)char[size]; - CHECK(nullptr != readBuffer) << "new readBuffer failed, " - << errno << ":" << strerror(errno); - auto ret = datastore_->ReadSnapshotChunk(request_->chunkid(), - request_->sn(), - readBuffer, - request_->offset(), - request_->size()); + readBuffer = new (std::nothrow) char[size]; + CHECK(nullptr != readBuffer) + << "new readBuffer failed, " << errno << ":" << strerror(errno); + auto ret = datastore_->ReadSnapshotChunk( + request_->chunkid(), request_->sn(), readBuffer, request_->offset(), + request_->size()); butil::IOBuf wrapper; wrapper.append_user_data(readBuffer, size, ReadBufferDeleter); do { /** - * 1.成功 + * 1. Success */ if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); @@ -548,7 +534,8 @@ void ReadSnapshotRequest::OnApply(uint64_t index, * 2.chunk not exist */ if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT + response_->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT break; } /** @@ -560,30 +547,29 @@ void ReadSnapshotRequest::OnApply(uint64_t index, << ", request: " << request_->ShortDebugString(); } /** - * 4.其他错误 + * 4. Other errors */ LOG(ERROR) << "read snapshot failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } while (0); response_->set_appliedindex(MaxAppliedIndex(node_, index)); } void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) { + const ChunkRequest& request, + const butil::IOBuf& data) { (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing Read doesn't need to do anything } void DeleteSnapshotRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); CSErrorCode ret = datastore_->DeleteSnapshotChunkOrCorrectSn( request_->chunkid(), request_->correctedsn()); @@ -594,8 +580,7 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(WARNING) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "delete snapshot or correct sn failed: " << " data store return: " << ret @@ -604,20 +589,20 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, LOG(ERROR) << "delete snapshot or correct sn failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void DeleteSnapshotRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( - request.chunkid(), request.correctedsn()); + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->DeleteSnapshotChunkOrCorrectSn(request.chunkid(), + request.correctedsn()); if (CSErrorCode::Success == ret) { return; } else if (CSErrorCode::BackwardRequestError == ret) { @@ -636,14 +621,12 @@ void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastor } void CreateCloneChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - auto ret = datastore_->CreateCloneChunk(request_->chunkid(), - request_->sn(), - request_->correctedsn(), - request_->size(), - request_->location()); + auto ret = datastore_->CreateCloneChunk( + request_->chunkid(), request_->sn(), request_->correctedsn(), + request_->size(), request_->location()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -652,44 +635,41 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * TODO(yyk): 当前遇到createclonechunk错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 + * TODO(yyk): Currently encountering the createclonechunk error, + * directly fatally exit the entire process ChunkServer will consider + * only flagging this copyset in the later stage to ensure good + * availability */ LOG(FATAL) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } else if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); + << ", request: " << request_->ShortDebugString(); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST); } else { LOG(ERROR) << "create clone failed: " << ", request: " << request_->ShortDebugString(); - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void CreateCloneChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->CreateCloneChunk(request.chunkid(), - request.sn(), + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), request.correctedsn(), - request.size(), - request.location()); - if (CSErrorCode::Success == ret) - return; + request.size(), request.location()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::ChunkConflictError == ret) { LOG(WARNING) << "create clone chunk exist: " - << ", request: " << request.ShortDebugString(); + << ", request: " << request.ShortDebugString(); return; } @@ -714,8 +694,9 @@ void PasteChunkInternalRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been + * successfully handed over to the raft for processing, So, done_ cannot be + * called, only if the proposal fails, it needs to be returned in advance */ if (0 == Propose(request_, &data_)) { doneGuard.release(); @@ -723,13 +704,12 @@ void PasteChunkInternalRequest::Process() { } void PasteChunkInternalRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); auto ret = datastore_->PasteChunk(request_->chunkid(), - data_.to_string().c_str(), //NOLINT - request_->offset(), - request_->size()); + data_.to_string().c_str(), // NOLINT + request_->offset(), request_->size()); if (CSErrorCode::Success == ret) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -746,16 +726,15 @@ void PasteChunkInternalRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request - auto ret = datastore->PasteChunk(request.chunkid(), - data.to_string().c_str(), - request.offset(), - request.size()); - if (CSErrorCode::Success == ret) - return; +void PasteChunkInternalRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { + // NOTE: Prioritize the use of datastore/request passed in as parameters + // during processing + auto ret = + datastore->PasteChunk(request.chunkid(), data.to_string().c_str(), + request.offset(), request.size()); + if (CSErrorCode::Success == ret) return; if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "paste chunk failed: " @@ -767,27 +746,22 @@ void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr data } void ScanChunkRequest::OnApply(uint64_t index, - ::google::protobuf::Closure *done) { + ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); // read and calculate crc, build scanmap uint32_t crc = 0; size_t size = request_->size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request_->has_readmetapage() && request_->readmetapage()) { - ret = datastore_->ReadChunkMetaPage(request_->chunkid(), - request_->sn(), + ret = datastore_->ReadChunkMetaPage(request_->chunkid(), request_->sn(), readBuffer.get()); } else { - ret = datastore_->ReadChunk(request_->chunkid(), - request_->sn(), - readBuffer.get(), - request_->offset(), - size); + ret = datastore_->ReadChunk(request_->chunkid(), request_->sn(), + readBuffer.get(), request_->offset(), size); } if (CSErrorCode::Success == ret) { @@ -808,39 +782,32 @@ void ScanChunkRequest::OnApply(uint64_t index, scanManager_->GenScanJobs(jobKey); response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); } else if (CSErrorCode::InternalError == ret) { LOG(FATAL) << "scan chunk failed, read chunk internal error" << ", request: " << request_->ShortDebugString(); } else { - response_->set_status( - CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); } } -void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT - const ChunkRequest &request, - const butil::IOBuf &data) { +void ScanChunkRequest::OnApplyFromLog( + std::shared_ptr datastore, // NOLINT + const ChunkRequest& request, const butil::IOBuf& data) { (void)data; uint32_t crc = 0; size_t size = request.size(); - std::unique_ptr readBuffer(new(std::nothrow)char[size]); - CHECK(nullptr != readBuffer) - << "new readBuffer failed " << strerror(errno); + std::unique_ptr readBuffer(new (std::nothrow) char[size]); + CHECK(nullptr != readBuffer) << "new readBuffer failed " << strerror(errno); // scan chunk metapage or user data auto ret = 0; if (request.has_readmetapage() && request.readmetapage()) { - ret = datastore->ReadChunkMetaPage(request.chunkid(), - request.sn(), - readBuffer.get()); + ret = datastore->ReadChunkMetaPage(request.chunkid(), request.sn(), + readBuffer.get()); } else { - ret = datastore->ReadChunk(request.chunkid(), - request.sn(), - readBuffer.get(), - request.offset(), - size); + ret = datastore->ReadChunk(request.chunkid(), request.sn(), + readBuffer.get(), request.offset(), size); } if (CSErrorCode::Success == ret) { @@ -861,10 +828,10 @@ void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, / } } -void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, +void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc) { // send rpc to leader - brpc::Channel *channel = new brpc::Channel(); + brpc::Channel* channel = new brpc::Channel(); if (channel->Init(peer_.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to chunkserver for send scanmap: " << peer_; @@ -873,7 +840,7 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, } // build scanmap - ScanMap *scanMap = new ScanMap(); + ScanMap* scanMap = new ScanMap(); scanMap->set_logicalpoolid(request.logicpoolid()); scanMap->set_copysetid(request.copysetid()); scanMap->set_chunkid(request.chunkid()); @@ -882,20 +849,17 @@ void ScanChunkRequest::BuildAndSendScanMap(const ChunkRequest &request, scanMap->set_offset(request.offset()); scanMap->set_len(request.size()); - FollowScanMapRequest *scanMapRequest = new FollowScanMapRequest(); + FollowScanMapRequest* scanMapRequest = new FollowScanMapRequest(); scanMapRequest->set_allocated_scanmap(scanMap); ScanService_Stub stub(channel); brpc::Controller* cntl = new brpc::Controller(); cntl->set_timeout_ms(request.sendscanmaptimeoutms()); - FollowScanMapResponse *scanMapResponse = new FollowScanMapResponse(); - SendScanMapClosure *done = new SendScanMapClosure( - scanMapRequest, - scanMapResponse, - request.sendscanmaptimeoutms(), - request.sendscanmapretrytimes(), - request.sendscanmapretryintervalus(), - cntl, channel); + FollowScanMapResponse* scanMapResponse = new FollowScanMapResponse(); + SendScanMapClosure* done = new SendScanMapClosure( + scanMapRequest, scanMapResponse, request.sendscanmaptimeoutms(), + request.sendscanmapretrytimes(), request.sendscanmapretryintervalus(), + cntl, channel); LOG(INFO) << "logid = " << cntl->log_id() << " Sending scanmap: " << scanMap->ShortDebugString() << " to leader: " << peer_.addr; diff --git a/src/chunkserver/op_request.h b/src/chunkserver/op_request.h index c29484f79b..d83a7ab827 100755 --- a/src/chunkserver/op_request.h +++ b/src/chunkserver/op_request.h @@ -23,21 +23,21 @@ #ifndef SRC_CHUNKSERVER_OP_REQUEST_H_ #define SRC_CHUNKSERVER_OP_REQUEST_H_ -#include -#include #include +#include +#include #include -#include "proto/chunk.pb.h" #include "include/chunkserver/chunkserver_common.h" +#include "proto/chunk.pb.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/datastore/define.h" #include "src/chunkserver/scan_manager.h" -using ::google::protobuf::RpcController; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; using ::curve::chunkserver::concurrent::ApplyTaskType; +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::google::protobuf::RpcController; namespace curve { namespace chunkserver { @@ -49,12 +49,10 @@ class CloneCore; class CloneTask; class ScanManager; - -inline bool existCloneInfo(const ChunkRequest *request) { +inline bool existCloneInfo(const ChunkRequest* request) { if (request != nullptr) { - if (request->has_clonefilesource() && - request->has_clonefileoffset()) { - return true; + if (request->has_clonefilesource() && request->has_clonefileoffset()) { + return true; } } return false; @@ -63,97 +61,104 @@ inline bool existCloneInfo(const ChunkRequest *request) { class ChunkOpRequest : public std::enable_shared_from_this { public: ChunkOpRequest(); - ChunkOpRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + ChunkOpRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ChunkOpRequest() = default; /** - * 处理request,实际上是Propose给相应的copyset + * Processing a request actually involves proposing to the corresponding + * copyset */ virtual void Process(); /** - * request正常情况从内存中获取上下文on apply逻辑 - * @param index:此op log entry的index - * @param done:对应的ChunkClosure + * request normally obtains context on apply logic from memory + * @param index: The index of this op log entry + * @param done: corresponding ChunkClosure */ - virtual void OnApply(uint64_t index, - ::google::protobuf::Closure *done) = 0; + virtual void OnApply(uint64_t index, ::google::protobuf::Closure* done) = 0; /** - * NOTE: 子类实现过程中优先使用参数传入的datastore/request - * 从log entry反序列之后得到request详细信息进行处理,request - * 相关的上下文和依赖的data store都是从参数传递进去的 - * 1.重启回放日志,从磁盘读取op log entry然后执行on apply逻辑 - * 2. follower执行on apply的逻辑 - * @param datastore:chunk数据持久化层 - * @param request:反序列化后得到的request 细信息 - * @param data:反序列化后得到的request要处理的数据 + * NOTE: During subclass implementation, prioritize the use of + * datastore/request passed in as parameters Obtain detailed request + * information from the reverse sequence of the log entry for processing, + * request The relevant context and dependent data store are passed in from + * parameters + * 1. Restart the replay log, read the op log entry from the disk, and then + * execute the on apply logic + * 2. follower execute the logic of on apply + * @param datastore: chunk data persistence layer + * @param request: The detailed request information obtained after + * deserialization + * @param data: The data to be processed by the request obtained after + * deserialization */ virtual void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) = 0; + const ChunkRequest& request, + const butil::IOBuf& data) = 0; /** - * 返回request的done成员 + * Return the done member of the request */ - ::google::protobuf::Closure *Closure() { return done_; } + ::google::protobuf::Closure* Closure() { return done_; } /** - * 返回chunk id + * Return chunk id */ ChunkID ChunkId() { return request_->chunkid(); } /** - * 返回请求类型 + * Return request type */ CHUNK_OP_TYPE OpType() { return request_->optype(); } /** - * 返回请求大小 + * Return request size */ uint32_t RequestSize() { return request_->size(); } /** - * 转发request给leader + * Forward request to leader */ virtual void RedirectChunkRequest(); public: /** - * Op序列化工具函数 + * Op Serialization Tool Function * | data | * | op meta | op data | * | op request length | op request | * | 32 bit | .... | - * 各个字段解释如下: - * data: encode之后的数据,实际上就是一条op log entry的data - * op meta: 就是op的元数据,这里是op request部分的长度 - * op data: 就是request通过protobuf序列化后的数据 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @param log:出参,存放序列化好的数据,用户自己保证data!=nullptr - * @return 0成功,-1失败 + * The explanation of each field is as follows: + * data: The data after encoding is actually the data of an op log entry + * op meta: refers to the metadata of op, where is the length of the op + * request section op data: refers to the data serialized by the request + * through protobuf + * @param request: Chunk Request + * @param data: The data content contained in the request + * @param log: Output parameter to store serialized data. Users are + * responsible for ensuring that data != nullptr. + * @return 0 succeeded, -1 failed */ - static int Encode(const ChunkRequest *request, - const butil::IOBuf *data, - butil::IOBuf *log); + static int Encode(const ChunkRequest* request, const butil::IOBuf* data, + butil::IOBuf* log); /** - * 反序列化,从log entry得到ChunkOpRequest,当前反序列出的ChunkRequest和data - * 都会从出参传出去,而不会放在ChunkOpRequest的成员变量里面 - * @param log:op log entry - * @param request: 出参,存放反序列上下文 - * @param data:出参,op操作的数据 - * @return nullptr,失败,否则返回相应的ChunkOpRequest + * Deserialize, obtain ChunkOpRequest from log entry, and deserialize the + * current ChunkRequest and data Will be passed out from the output + * parameter, rather than being placed in the member variable of + * ChunkOpRequest + * @param log: op log entry + * @param request: Provide parameters to store the reverse sequence context + * @param data: Output parameters, op operation data + * @return nullptr, failed, otherwise return the corresponding + * ChunkOpRequest */ static std::shared_ptr Decode(butil::IOBuf log, - ChunkRequest *request, - butil::IOBuf *data, + ChunkRequest* request, + butil::IOBuf* data, uint64_t index, PeerId leaderId); @@ -161,49 +166,43 @@ class ChunkOpRequest : public std::enable_shared_from_this { protected: /** - * 打包request为braft::task,propose给相应的复制组 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @return 0成功,-1失败 + * Package the request as braft::task and propose it to the corresponding + * replication group + * @param request: Chunk Request + * @param data: The data content contained in the request + * @return 0 succeeded, -1 failed */ - int Propose(const ChunkRequest *request, - const butil::IOBuf *data); + int Propose(const ChunkRequest* request, const butil::IOBuf* data); protected: - // chunk持久化接口 + // chunk Persistence Interface std::shared_ptr datastore_; - // 复制组 + // Copy Group std::shared_ptr node_; // rpc controller - brpc::Controller *cntl_; - // rpc 请求 - const ChunkRequest *request_; - // rpc 返回 - ChunkResponse *response_; + brpc::Controller* cntl_; + // rpc request + const ChunkRequest* request_; + // rpc return + ChunkResponse* response_; // rpc done closure - ::google::protobuf::Closure *done_; + ::google::protobuf::Closure* done_; }; class DeleteChunkRequest : public ChunkOpRequest { public: - DeleteChunkRequest() : - ChunkOpRequest() {} + DeleteChunkRequest() : ChunkOpRequest() {} DeleteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadChunkRequest : public ChunkOpRequest { @@ -211,154 +210,118 @@ class ReadChunkRequest : public ChunkOpRequest { friend class PasteChunkInternalRequest; public: - ReadChunkRequest() : - ChunkOpRequest() {} + ReadChunkRequest() : ChunkOpRequest() {} ReadChunkRequest(std::shared_ptr nodePtr, - CloneManager* cloneMgr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done); + CloneManager* cloneMgr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done); virtual ~ReadChunkRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; - const ChunkRequest* GetChunkRequest() { - return request_; - } + const ChunkRequest* GetChunkRequest() { return request_; } private: - // 根据chunk信息判断是否需要拷贝数据 + // Determine whether to copy data based on chunk information bool NeedClone(const CSChunkInfo& chunkInfo); - // 从chunk文件中读数据 + // Reading data from chunk file void ReadChunk(); private: CloneManager* cloneMgr_; - // 并发模块 + // Concurrent module ConcurrentApplyModule* concurrentApplyModule_; - // 保存 apply index + // Save the apply index uint64_t applyIndex; }; class WriteChunkRequest : public ChunkOpRequest { public: - WriteChunkRequest() : - ChunkOpRequest() {} - WriteChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + WriteChunkRequest() : ChunkOpRequest() {} + WriteChunkRequest(std::shared_ptr nodePtr, RpcController* cntl, + const ChunkRequest* request, ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~WriteChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done); + void OnApply(uint64_t index, ::google::protobuf::Closure* done); void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class ReadSnapshotRequest : public ChunkOpRequest { public: - ReadSnapshotRequest() : - ChunkOpRequest() {} + ReadSnapshotRequest() : ChunkOpRequest() {} ReadSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~ReadSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class DeleteSnapshotRequest : public ChunkOpRequest { public: - DeleteSnapshotRequest() : - ChunkOpRequest() {} + DeleteSnapshotRequest() : ChunkOpRequest() {} DeleteSnapshotRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~DeleteSnapshotRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class CreateCloneChunkRequest : public ChunkOpRequest { public: - CreateCloneChunkRequest() : - ChunkOpRequest() {} + CreateCloneChunkRequest() : ChunkOpRequest() {} CreateCloneChunkRequest(std::shared_ptr nodePtr, - RpcController *cntl, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - cntl, - request, - response, - done) {} + RpcController* cntl, const ChunkRequest* request, + ChunkResponse* response, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, cntl, request, response, done) {} virtual ~CreateCloneChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; }; class PasteChunkInternalRequest : public ChunkOpRequest { public: - PasteChunkInternalRequest() : - ChunkOpRequest() {} + PasteChunkInternalRequest() : ChunkOpRequest() {} PasteChunkInternalRequest(std::shared_ptr nodePtr, - const ChunkRequest *request, - ChunkResponse *response, - const butil::IOBuf* data, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done) { - if (data != nullptr) { - data_ = *data; - } + const ChunkRequest* request, + ChunkResponse* response, const butil::IOBuf* data, + ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done) { + if (data != nullptr) { + data_ = *data; } + } virtual ~PasteChunkInternalRequest() = default; void Process() override; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: butil::IOBuf data_; @@ -366,28 +329,22 @@ class PasteChunkInternalRequest : public ChunkOpRequest { class ScanChunkRequest : public ChunkOpRequest { public: - ScanChunkRequest(uint64_t index, PeerId peer) : - ChunkOpRequest(), index_(index), peer_(peer) {} + ScanChunkRequest(uint64_t index, PeerId peer) + : ChunkOpRequest(), index_(index), peer_(peer) {} ScanChunkRequest(std::shared_ptr nodePtr, - ScanManager* scanManager, - const ChunkRequest *request, - ChunkResponse *response, - ::google::protobuf::Closure *done) : - ChunkOpRequest(nodePtr, - nullptr, - request, - response, - done), - scanManager_(scanManager) {} + ScanManager* scanManager, const ChunkRequest* request, + ChunkResponse* response, ::google::protobuf::Closure* done) + : ChunkOpRequest(nodePtr, nullptr, request, response, done), + scanManager_(scanManager) {} virtual ~ScanChunkRequest() = default; - void OnApply(uint64_t index, ::google::protobuf::Closure *done) override; + void OnApply(uint64_t index, ::google::protobuf::Closure* done) override; void OnApplyFromLog(std::shared_ptr datastore, - const ChunkRequest &request, - const butil::IOBuf &data) override; + const ChunkRequest& request, + const butil::IOBuf& data) override; private: - void BuildAndSendScanMap(const ChunkRequest &request, uint64_t index, + void BuildAndSendScanMap(const ChunkRequest& request, uint64_t index, uint32_t crc); ScanManager* scanManager_; uint64_t index_; diff --git a/src/chunkserver/passive_getfn.h b/src/chunkserver/passive_getfn.h index ac6655d1b2..56b6cd01eb 100644 --- a/src/chunkserver/passive_getfn.h +++ b/src/chunkserver/passive_getfn.h @@ -23,70 +23,70 @@ #ifndef SRC_CHUNKSERVER_PASSIVE_GETFN_H_ #define SRC_CHUNKSERVER_PASSIVE_GETFN_H_ -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { - /** - * 获取datastore中chunk文件的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreChunkCountFunc(void* arg); - /** - * @brief: Get the number of WAL segment in CurveSegmentLogStorage - * @param arg: The pointer to CurveSegmentLogStorage - */ - uint32_t GetLogStorageWalSegmentCountFunc(void* arg); - /** - * 获取datastore中快照chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreSnapshotCountFunc(void* arg); - /** - * 获取datastore中clone chunk的数量 - * @param arg: datastore的对象指针 - */ - uint32_t GetDatastoreCloneChunkCountFunc(void* arg); - /** - * 获取chunkserver上chunk文件的数量 - * @param arg: nullptr - */ - uint32_t GetTotalChunkCountFunc(void* arg); - /** - * @brief: Get the total number of WAL segment in chunkserver - * @param arg: The pointer to ChunkServerMetric - */ - uint32_t GetTotalWalSegmentCountFunc(void* arg); +/** + * Obtain the number of chunk files in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreChunkCountFunc(void* arg); +/** + * @brief: Get the number of WAL segment in CurveSegmentLogStorage + * @param arg: The pointer to CurveSegmentLogStorage + */ +uint32_t GetLogStorageWalSegmentCountFunc(void* arg); +/** + * Obtain the number of snapshot chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks in the datastore + * @param arg: Object pointer to datastore + */ +uint32_t GetDatastoreCloneChunkCountFunc(void* arg); +/** + * Obtain the number of chunk files on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalChunkCountFunc(void* arg); +/** + * @brief: Get the total number of WAL segment in chunkserver + * @param arg: The pointer to ChunkServerMetric + */ +uint32_t GetTotalWalSegmentCountFunc(void* arg); - /** - * 获取chunkserver上快照chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalSnapshotCountFunc(void* arg); - /** - * 获取chunkserver上clone chunk的数量 - * @param arg: nullptr - */ - uint32_t GetTotalCloneChunkCountFunc(void* arg); - /** - * 获取chunkfilepool中剩余chunk的数量 - * @param arg: chunkfilepool的对象指针 - */ - uint32_t GetChunkLeftFunc(void* arg); - /** - * 获取walfilepool中剩余chunk的数量 - * @param arg: walfilepool的对象指针 - */ - uint32_t GetWalSegmentLeftFunc(void* arg); - /** - * 获取trash中chunk的数量 - * @param arg: trash的对象指针 - */ - uint32_t GetChunkTrashedFunc(void* arg); +/** + * Obtain the number of snapshot chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalSnapshotCountFunc(void* arg); +/** + * Obtain the number of clone chunks on the chunkserver + * @param arg: nullptr + */ +uint32_t GetTotalCloneChunkCountFunc(void* arg); +/** + * Obtain the number of remaining chunks in the chunkfilepool + * @param arg: Object pointer to chunkfilepool + */ +uint32_t GetChunkLeftFunc(void* arg); +/** + * Obtain the number of remaining chunks in the walfilepool + * @param arg: Object pointer to walfilepool + */ +uint32_t GetWalSegmentLeftFunc(void* arg); +/** + * Obtain the number of chunks in the trash + * @param arg: Object pointer to trash + */ +uint32_t GetChunkTrashedFunc(void* arg); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_file_adaptor.h b/src/chunkserver/raftsnapshot/curve_file_adaptor.h index 2f6b23ec0b..b4467bb268 100644 --- a/src/chunkserver/raftsnapshot/curve_file_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_file_adaptor.h @@ -31,10 +31,9 @@ namespace chunkserver { class CurveFileAdaptor : public braft::PosixFileAdaptor { public: explicit CurveFileAdaptor(int fd) : PosixFileAdaptor(fd) {} - // close之前必须先sync,保证数据落盘,其他逻辑不变 - bool close() override { - return sync() && braft::PosixFileAdaptor::close(); - } + // Before closing, you must first synchronize to ensure that the data is + // dropped and other logic remains unchanged + bool close() override { return sync() && braft::PosixFileAdaptor::close(); } }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_file_service.cpp b/src/chunkserver/raftsnapshot/curve_file_service.cpp index f1d5d931e0..4395234d6f 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.cpp +++ b/src/chunkserver/raftsnapshot/curve_file_service.cpp @@ -36,15 +36,17 @@ // Authors: Zhangyi Chen(chenzhangyi01@baidu.com) -#include -#include -#include -#include +#include "src/chunkserver/raftsnapshot/curve_file_service.h" + +#include #include #include -#include +#include +#include +#include +#include + #include -#include "src/chunkserver/raftsnapshot/curve_file_service.h" namespace curve { namespace chunkserver { @@ -52,9 +54,9 @@ namespace chunkserver { CurveFileService& kCurveFileService = CurveFileService::GetInstance(); void CurveFileService::get_file(::google::protobuf::RpcController* controller, - const ::braft::GetFileRequest* request, - ::braft::GetFileResponse* response, - ::google::protobuf::Closure* done) { + const ::braft::GetFileRequest* request, + ::braft::GetFileResponse* response, + ::google::protobuf::Closure* done) { scoped_refptr reader; brpc::ClosureGuard done_gurad(done); brpc::Controller* cntl = (brpc::Controller*)controller; @@ -63,21 +65,23 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, if (iter == _reader_map.end()) { lck.unlock(); /** - * 为了和文件不存在的错误区分开来,且考虑到install snapshot - * 的uri format为:remote://ip:port/reader_id,所以使用ENXIO - * 代表reader id不存在的错误 + * In order to distinguish between the error of a non-existent file + * and considering that the uri format for installing a snapshot is: + * remote://ip:port/reader_id, ENXIO is used to represent the error of a + * non-existent reader id. */ cntl->SetFailed(ENXIO, "Fail to find reader=%" PRId64, - request->reader_id()); + request->reader_id()); return; } // Don't touch iter ever after reader = iter->second; lck.unlock(); - LOG(INFO) << "get_file for " << cntl->remote_side() << " path=" - << reader->path() << " filename=" << request->filename() - << " offset=" << request->offset() << " count=" - << request->count(); + LOG(INFO) << "get_file for " << cntl->remote_side() + << " path=" << reader->path() + << " filename=" << request->filename() + << " offset=" << request->offset() + << " count=" << request->count(); if (request->count() <= 0 || request->offset() < 0) { cntl->SetFailed(brpc::EREQUEST, "Invalid request=%s", @@ -88,10 +92,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, butil::IOBuf buf; bool is_eof = false; size_t read_count = 0; - // 1. 如果是read attch meta file + // 1. If it is a read attach meta file if (request->filename() == BRAFT_SNAPSHOT_ATTACH_META_FILE) { - // 如果没有设置snapshot attachment,那么read文件的长度为零 - // 表示没有 snapshot attachment文件列表 + // If no snapshot attachment is set, then the length of the read file is + // zero, indicating that there are no snapshot attachment files in the + // list. bool snapshotAttachmentExist = false; { std::unique_lock lck(_mutex); @@ -104,7 +109,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } } if (snapshotAttachmentExist) { - // 否则获取snapshot attachment file list + // Otherwise, obtain the snapshot attachment file list std::vector files; _snapshot_attachment->list_attach_files(&files, reader->path()); CurveSnapshotAttachMetaTable attachMetaTable; @@ -121,7 +126,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, request->reader_id()); return; } - CurveSnapshotFileReader *reader = + CurveSnapshotFileReader* reader = dynamic_cast(it->second.get()); if (reader != nullptr) { reader->set_attach_meta_table(attachMetaTable); @@ -135,11 +140,11 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } if (0 != attachMetaTable.save_to_iobuf_as_remote(&buf)) { - // 内部错误: EINTERNAL + // Internal error: EINTERNAL LOG(ERROR) << "Fail to serialize " - "LocalSnapshotAttachMetaTable as iobuf"; + "LocalSnapshotAttachMetaTable as iobuf"; cntl->SetFailed(brpc::EINTERNAL, - "serialize snapshot attach meta table fail"); + "serialize snapshot attach meta table fail"); return; } else { LOG(INFO) << "LocalSnapshotAttachMetaTable encode buf length = " @@ -149,17 +154,15 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, read_count = buf.size(); } } else { - // 2. 否则其它文件下载继续走raft原先的文件下载流程 + // 2. Otherwise, the download of other files will continue to follow the + // original file download process of Raft const int rc = reader->read_file( - &buf, request->filename(), - request->offset(), request->count(), - request->read_partly(), - &read_count, - &is_eof); + &buf, request->filename(), request->offset(), request->count(), + request->read_partly(), &read_count, &is_eof); if (rc != 0) { cntl->SetFailed(rc, "Fail to read from path=%s filename=%s : %s", - reader->path().c_str(), - request->filename().c_str(), berror(rc)); + reader->path().c_str(), request->filename().c_str(), + berror(rc)); return; } } @@ -177,13 +180,13 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } void CurveFileService::set_snapshot_attachment( - SnapshotAttachment *snapshot_attachment) { + SnapshotAttachment* snapshot_attachment) { _snapshot_attachment = snapshot_attachment; } CurveFileService::CurveFileService() { - _next_id = ((int64_t)getpid() << 45) | - (butil::gettimeofday_us() << 17 >> 17); + _next_id = + ((int64_t)getpid() << 45) | (butil::gettimeofday_us() << 17 >> 17); } int CurveFileService::add_reader(braft::FileReader* reader, diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp index 18479b26a6..d46a7f18b9 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp @@ -20,16 +20,17 @@ * Author: tongguangxun */ +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include -#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include namespace curve { namespace chunkserver { CurveFilesystemAdaptor::CurveFilesystemAdaptor( - std::shared_ptr chunkFilePool, - std::shared_ptr lfs) { + std::shared_ptr chunkFilePool, + std::shared_ptr lfs) { lfs_ = lfs; chunkFilePool_ = chunkFilePool; uint64_t metapageSize = chunkFilePool->GetFilePoolOpt().metaPageSize; @@ -39,8 +40,7 @@ CurveFilesystemAdaptor::CurveFilesystemAdaptor( } CurveFilesystemAdaptor::CurveFilesystemAdaptor() - : tempMetaPageContent(nullptr) { -} + : tempMetaPageContent(nullptr) {} CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { if (tempMetaPageContent != nullptr) { @@ -50,14 +50,14 @@ CurveFilesystemAdaptor::~CurveFilesystemAdaptor() { LOG(INFO) << "release raftsnapshot filesystem adaptor!"; } -braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, - int oflag, const ::google::protobuf::Message* file_meta, - butil::File::Error* e) { - (void) file_meta; +braft::FileAdaptor* CurveFilesystemAdaptor::open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e) { + (void)file_meta; static std::once_flag local_s_check_cloexec_once; static bool local_s_support_cloexec_on_open = false; - std::call_once(local_s_check_cloexec_once, [&](){ + std::call_once(local_s_check_cloexec_once, [&]() { int fd = lfs_->Open("/dev/zero", O_RDONLY | O_CLOEXEC); local_s_support_cloexec_on_open = (fd != -1); if (fd != -1) { @@ -69,18 +69,21 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, if (cloexec && !local_s_support_cloexec_on_open) { oflag &= (~O_CLOEXEC); } - // Open就使用sync标志是为了避免集中在close一次性sync,对于16MB的chunk文件可能会造成抖动 + // The use of the sync flag in Open is to avoid focusing on the close + // one-time sync, which may cause jitter for 16MB chunk files oflag |= O_SYNC; - // 先判断当前文件是否需要过滤,如果需要过滤,就直接走下面逻辑,不走chunkfilepool - // 如果open操作携带create标志,则从chunkfilepool取,否则保持原来语意 - // 如果待打开的文件已经存在,则直接使用原有语意 - if (!NeedFilter(path) && - (oflag & O_CREAT) && + // First, determine whether the current file needs to be filtered. If it + // needs to be filtered, simply follow the following logic instead of + // chunkfilepool If the open operation carries the create flag, it will be + // taken from chunkfilepool, otherwise it will maintain its original meaning + // If the file to be opened already exists, use the original meaning + // directly + if (!NeedFilter(path) && (oflag & O_CREAT) && false == lfs_->FileExists(path)) { - // 从chunkfile pool中取出chunk返回 + // Removing a chunk from the chunkfile pool returns int rc = chunkFilePool_->GetFile(path, tempMetaPageContent); - // 如果从FilePool中取失败,返回错误。 + // If retrieving from FilePool fails, an error is returned. if (rc != 0) { LOG(ERROR) << "get chunk from chunkfile pool failed!"; return NULL; @@ -93,17 +96,17 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, int fd = lfs_->Open(path.c_str(), oflag); if (e) { *e = (fd < 0) ? butil::File::OSErrorToFileError(errno) - : butil::File::FILE_OK; + : butil::File::FILE_OK; } if (fd < 0) { if (oflag & O_CREAT) { LOG(ERROR) << "snapshot create chunkfile failed, filename = " - << path.c_str() << ", errno = " << errno; + << path.c_str() << ", errno = " << errno; } else { LOG(WARNING) << "snapshot open chunkfile failed," - << "may be deleted by user, filename = " - << path.c_str() << ",errno = " << errno; + << "may be deleted by user, filename = " + << path.c_str() << ",errno = " << errno; } return NULL; } @@ -115,10 +118,12 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, } bool CurveFilesystemAdaptor::delete_file(const std::string& path, - bool recursive) { - // 1. 如果是目录且recursive=true,那么遍历目录内容回收 - // 2. 如果是目录且recursive=false,那么判断目录内容是否为空,不为空返回false - // 3. 如果是文件直接回收 + bool recursive) { + // 1. If it is a directory and recursive=true, then traverse the directory + // content to recycle + // 2. If it is a directory and recursive=false, then determine whether the + // directory content is empty, and return false if it is not empty + // 3. If the file is directly recycled if (lfs_->DirExists(path)) { std::vector dircontent; lfs_->List(path, &dircontent); @@ -130,20 +135,21 @@ bool CurveFilesystemAdaptor::delete_file(const std::string& path, } } else { if (lfs_->FileExists(path)) { - // 如果在过滤名单里,就直接删除 - if (NeedFilter(path)) { - return lfs_->Delete(path) == 0; - } else { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // If it is on the filtering list, delete it directly + if (NeedFilter(path)) { + return lfs_->Delete(path) == 0; + } else { + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be + // deleted directly return chunkFilePool_->RecycleFile(path) == 0; - } + } } } return true; } -bool CurveFilesystemAdaptor::RecycleDirRecursive( - const std::string& path) { +bool CurveFilesystemAdaptor::RecycleDirRecursive(const std::string& path) { std::vector dircontent; lfs_->List(path, &dircontent); bool rc = true; @@ -152,7 +158,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( if (lfs_->DirExists(todeletePath)) { RecycleDirRecursive(todeletePath); } else { - // 如果在过滤名单里,就直接删除 + // If it is on the filtering list, delete it directly if (NeedFilter(todeletePath)) { if (lfs_->Delete(todeletePath) != 0) { LOG(ERROR) << "delete " << todeletePath << ", failed!"; @@ -173,16 +179,18 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( } bool CurveFilesystemAdaptor::rename(const std::string& old_path, - const std::string& new_path) { + const std::string& new_path) { if (!NeedFilter(new_path) && lfs_->FileExists(new_path)) { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // The chunkfilepool will internally check the legality of the + // corresponding path file, and if it does not match, it will be deleted + // directly chunkFilePool_->RecycleFile(new_path); } return lfs_->Rename(old_path, new_path) == 0; } void CurveFilesystemAdaptor::SetFilterList( - const std::vector& filter) { + const std::vector& filter) { filterList_.assign(filter.begin(), filter.end()); } diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h index 4e6737b8d4..b29a0948a8 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h @@ -33,53 +33,59 @@ #include "src/chunkserver/raftsnapshot/curve_file_adaptor.h" /** - * RaftSnapshotFilesystemAdaptor目的是为了接管braft - * 内部snapshot创建chunk文件的逻辑,目前curve内部 - * 会从chunkfilepool中直接取出已经格式化好的chunk文件 - * 但是braft内部由于install snapshot也会创建chunk文件 - * 这个创建文件不感知chunkfilepool,因此我们希望install - * snapshot也能从chunkfilepool中直接取出chunk文件,因此 - * 我们对install snapshot流程中的文件系统做了一层hook,在 - * 创建及删除文件操作上直接使用curve提供的文件系统接口即可。 + * The purpose of RaftSnapshotFilesystemAdaptor is to take over the logic of + * creating chunk files for internal snapshots in braft. Currently, within + * Curve, we directly retrieve pre-formatted chunk files from the chunk file + * pool. However, within braft, the creation of chunk files during an install + * snapshot process does not interact with the chunk file pool. Therefore, we + * want the install snapshot process to also be able to retrieve chunk files + * directly from the chunk file pool. To achieve this, we have implemented a + * hook in the file system operations within the install snapshot process. This + * hook allows us to use the file system interface provided by Curve for file + * creation and deletion. */ -using curve::fs::LocalFileSystem; using curve::chunkserver::FilePool; +using curve::fs::LocalFileSystem; namespace curve { namespace chunkserver { /** - * CurveFilesystemAdaptor继承raft的PosixFileSystemAdaptor类,在raft - * 内部其快照使用PosixFileSystemAdaptor类进行文件操作,因为我们只希望在其创建文件 - * 或者删除文件的时候使用chunkfilepool提供的getchunk和recyclechunk接口,所以这里 - * 我们只实现了open和delete_file两个接口。其他接口在调用的时候仍然使用原来raft的内部 - * 的接口。 + * CurveFilesystemAdaptor inherits from Raft's PosixFileSystemAdaptor class. + * Within the Raft framework, it uses the PosixFileSystemAdaptor class for file + * operations during snapshots. However, we only want to use the `getchunk` and + * `recyclechunk` interfaces provided by the chunkfilepool when creating or + * deleting files. Therefore, in this context, we have only implemented the + * `open` and `delete_file` interfaces. Other interfaces are still used with the + * original internal Raft interfaces when called. */ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { public: /** - * 构造函数 - * @param: chunkfilepool用于获取和回收chunk文件 - * @param: lfs用于进行一些文件操作,比如打开或者删除目录 + * Constructor + * @param: chunkfilepool is used to retrieve and recycle chunk files + * @param: lfs is used for some file operations, such as opening or deleting + * directories */ CurveFilesystemAdaptor(std::shared_ptr filePool, - std::shared_ptr lfs); + std::shared_ptr lfs); CurveFilesystemAdaptor(); virtual ~CurveFilesystemAdaptor(); /** - * 打开文件,在raft内部使用open来创建一个文件,并返回FileAdaptor结构 - * @param: path是当前待打开的路径 - * @param: oflag为打开文件参数 - * @param: file_meta是当前文件的meta信息,这个参数内部未使用 - * @param: e为打开文件是的错误码 - * @return: FileAdaptor是raft内部封装fd的一个类,fd是open打开path的返回值 - * 后续所有对于该文件的读写都是通过该FileAdaptor指针进行的,其内部封装了 - * 读写操作,其内部定义如下。 - * class PosixFileAdaptor : public FileAdaptor { - * friend class PosixFileSystemAdaptor; - * public: - * PosixFileAdaptor(int fd) : _fd(fd) {} + * Open the file, use open inside the raft to create a file, and return the + * FileAdaptor structure + * @param: path is the current path to be opened + * @param: oflag is the parameter for opening a file + * @param: file_meta is the meta information of the current file, which is + * not used internally + * @param: e is the error code for opening the file + * @return: FileAdaptor is a class within Raft that encapsulates a file + * descriptor (fd). After opening a path with the `open` call, all + * subsequent read and write operations on that file are performed through a + * pointer to this FileAdaptor class. It internally defines the following + * operations: class PosixFileAdaptor : public FileAdaptor { friend class + * PosixFileSystemAdaptor; public: PosixFileAdaptor(int fd) : _fd(fd) {} * virtual ~PosixFileAdaptor(); * * virtual ssize_t write(const butil::IOBuf& data, @@ -94,61 +100,70 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { * int _fd; * }; */ - virtual braft::FileAdaptor* open(const std::string& path, int oflag, - const ::google::protobuf::Message* file_meta, - butil::File::Error* e); + virtual braft::FileAdaptor* open( + const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e); /** - * 删除path对应的文件或目录 - * @param: path是待删除的文件路径 - * @param: recursive是否递归删除 - * @return: 成功返回true,否则返回false + * Delete the file or directory corresponding to the path + * @param: path is the file path to be deleted + * @param: Recursive whether to recursively delete + * @return: Successfully returns true, otherwise returns false */ virtual bool delete_file(const std::string& path, bool recursive); /** - * rename到新路径 - * 为什么要重载rename? - * 由于raft内部使用的是本地文件系统的rename,如果目标new path - * 已经存在文件,那么就会覆盖该文件。这样raft内部会创建temp_snapshot_meta - * 文件,这个是为了保证原子修改snapshot_meta文件而设置的,然后通过rename保证 - * 修改snapshot_meta文件修改的原子性。如果这个temp_snapshot_meta是从chunkfilpool - * 取的,那么如果直接rename,这个temp_snapshot_meta文件所占用的chunk文件 - * 就永远收不回来了,这种情况下会消耗大量的预分配chunk,所以这里重载rename,先 - * 回收new path,然后再rename, - * @param: old_path旧文件路径 - * @param: new_path新文件路径 + * Rename to a new path. + * Why override the rename function? + * Raft internally uses the rename function of the local file system. If the + * target new path already exists as a file, it will overwrite that file. + * This behavior leads to the creation of a 'temp_snapshot_meta' file, which + * is set up to ensure the atomic modification of the 'snapshot_meta' file. + * Using rename helps ensure the atomicity of modifying the 'snapshot_meta' + * file. However, if the 'temp_snapshot_meta' file is allocated from the + * chunk file pool and renamed directly, the chunk file used by the + * 'temp_snapshot_meta' file will never be released. In this situation, a + * significant number of pre-allocated chunks can be consumed. Therefore, + * the rename function is overridden here to first release the resources + * associated with the new path, and then perform the rename operation. + * @param: old_path - The old file path + * @param: new_path - The new file path */ virtual bool rename(const std::string& old_path, - const std::string& new_path); + const std::string& new_path); - // 设置过滤哪些文件,这些文件不从chunkfilepool取 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Set which files to filter and do not retrieve them from chunkfilepool + // Delete these files directly during recycling without entering the + // chunkfilepool void SetFilterList(const std::vector& filter); private: - /** - * 递归回收目录内容 - * @param: path为待回收的目录路径 - * @return: 成功返回true,否则返回false - */ + /** + * Recursive recycling of directory content + * @param: path is the directory path to be recycled + * @return: Successfully returns true, otherwise returns false + */ bool RecycleDirRecursive(const std::string& path); /** - * 查看文件是否需要过滤 + * Check if the file needs to be filtered */ bool NeedFilter(const std::string& filename); private: - // 由于chunkfile pool获取新的chunk时需要传入metapage信息 - // 这里创建一个临时的metapage,其内容无关紧要,因为快照会覆盖这部分内容 - char* tempMetaPageContent; - // 我们自己的文件系统,这里文件系统会做一些打开及删除目录操作 + // Due to the need to pass in metapage information when obtaining new chunks + // in the chunkfile pool Create a temporary metapage here, whose content is + // irrelevant as the snapshot will overwrite this part of the content + char* tempMetaPageContent; + // Our own file system, where the file system performs some opening and + // deleting directory operations std::shared_ptr lfs_; - // 操作chunkfilepool的指针,这个FilePool_与copysetnode的 - // chunkfilepool_应该是全局唯一的,保证操作chunkfilepool的原子性 + // Pointer to operate chunkfilepool, this FilePool_ Related to copysetnode + // Chunkfilepool_ It should be globally unique, ensuring the atomicity of + // the chunkfilepool operation std::shared_ptr chunkFilePool_; - // 过滤名单,在当前vector中的文件名,都不从chunkfilepool中取文件 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Filter the list and do not retrieve file names from chunkfilepool in the + // current vector Delete these files directly during recycling without + // entering the chunkfilepool std::vector filterList_; }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp index 93d4a7c324..cbd77403da 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include "src/common/fs_util.h" namespace curve { @@ -31,11 +32,11 @@ CurveSnapshotAttachment::CurveSnapshotAttachment( : fileHelper_(fs) {} void CurveSnapshotAttachment::list_attach_files( - std::vector *files, const std::string& raftSnapshotPath) { + std::vector* files, const std::string& raftSnapshotPath) { std::string raftBaseDir = - getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); + getCurveRaftBaseDir(raftSnapshotPath, RAFT_SNAP_DIR); std::string dataDir; - if (raftBaseDir[raftBaseDir.length()-1] != '/') { + if (raftBaseDir[raftBaseDir.length() - 1] != '/') { dataDir = raftBaseDir + "/" + RAFT_DATA_DIR; } else { dataDir = raftBaseDir + RAFT_DATA_DIR; @@ -43,23 +44,23 @@ void CurveSnapshotAttachment::list_attach_files( std::vector snapFiles; int rc = fileHelper_.ListFiles(dataDir, nullptr, &snapFiles); - // list出错一般认为就是磁盘出现问题了,这种情况直接让进程挂掉 - // Attention: 这里还需要更仔细考虑 + // An error in the list is generally believed to be due to a disk issue, + // which directly causes the process to crash Attention: More careful + // consideration is needed here CHECK(rc == 0) << "List dir failed."; files->clear(); - // 文件路径格式与snapshot_meta中的格式要相同 + // File path format and the format in snapshot_meta should be the same for (const auto& snapFile : snapFiles) { std::string snapApath; - // 添加绝对路径 + // Add absolute path snapApath.append(dataDir); snapApath.append("/").append(snapFile); - std::string filePath = curve::common::CalcRelativePath( - raftSnapshotPath, snapApath); + std::string filePath = + curve::common::CalcRelativePath(raftSnapshotPath, snapApath); files->emplace_back(filePath); } } - } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h index 10e2172673..94b6009714 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h @@ -23,62 +23,71 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_ATTACHMENT_H_ #include + +#include #include #include -#include -#include "src/chunkserver/raftsnapshot/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" +#include "src/chunkserver/raftsnapshot/define.h" namespace curve { namespace chunkserver { /** - * 用于获取snapshot attachment files的接口,一般用于一些下载 - * 快照获取需要额外下载的文件list + * The interface used to obtain snapshot attachment files, usually used for some + * downloads List of files that require additional downloads for snapshot + * acquisition */ -class SnapshotAttachment : - public butil::RefCountedThreadSafe { +class SnapshotAttachment + : public butil::RefCountedThreadSafe { public: SnapshotAttachment() = default; virtual ~SnapshotAttachment() = default; /** - * 获取snapshot attachment文件列表 - * @param files[out]: attachment文件列表 - * @param snapshotPath[in]: braft快照的路径 + * Obtain a list of snapshot attachment files + * @param files[out]: attachment file list + * @param snapshotPath[in]: Path to the brace snapshot */ - virtual void list_attach_files(std::vector *files, - const std::string& raftSnapshotPath) = 0; + virtual void list_attach_files(std::vector* files, + const std::string& raftSnapshotPath) = 0; }; -// SnapshotAttachment接口的实现,用于raft加载快照时,获取chunk快照文件列表 +// Implementation of the SnapshotAttachment interface, used to obtain a list of +// chunk snapshot files when loading snapshots in the raft class CurveSnapshotAttachment : public SnapshotAttachment { public: explicit CurveSnapshotAttachment(std::shared_ptr fs); virtual ~CurveSnapshotAttachment() = default; /** - * 获取raft snapshot的attachment,这里就是获取chunk的快照文件列表 - * @param files[out]: data目录下的chunk快照文件列表 - * @param raftSnapshotPath: braft快照的路径 - * 返回的文件路径使用 绝对路径:相对路径 的格式,相对路径包含data目录 + *Obtain the attachment of the raft snapshot, which is the list of snapshot + *files for the chunk + * @param files[out]: List of chunk snapshot files in the data directory + * @param raftSnapshotPath: Path to the brace snapshot + * The returned file path uses an absolute path: in the format of a relative + *path, which includes the data directory */ - void list_attach_files(std::vector *files, + void list_attach_files(std::vector* files, const std::string& raftSnapshotPath) override; + private: DatastoreFileHelper fileHelper_; }; /* -* @brif 通过具体的某个raft的snapshot实例地址获取raft实例基础地址 -* @param[in] specificSnapshotDir 某个具体snapshot的目录 - 比如/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ -* @param[in] raftSnapshotRelativeDir 上层业务指的所有snapshot的相对基地址 - 比如raft_snapshot -* @return 返回raft实例的绝对基地址,/data/chunkserver1/copysets/4294967812/ +* @brif obtains the base address of a raft instance through the snapshot +instance address of a specific raft +* @param[in] specificSnapshotDir The directory of a specific snapshot + For +example,/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ +* @param[in] raftSnapshotRelativeDir The relative base addresses of all +snapshots referred to by the upper level business For example, raft_ Snapshot +* @return returns the absolute base address of the raft +instance,/data/chunkserver1/copysets/4294967812/ */ inline std::string getCurveRaftBaseDir(std::string specificSnapshotDir, - std::string raftSnapshotRelativeDir) { + std::string raftSnapshotRelativeDir) { std::string::size_type m = specificSnapshotDir.find(raftSnapshotRelativeDir); if (m == std::string::npos) { diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp index 6a996695bd..5cceb37171 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp @@ -48,22 +48,19 @@ CurveSnapshotCopier::CurveSnapshotCopier(CurveSnapshotStorage* storage, bool filter_before_copy_remote, braft::FileSystemAdaptor* fs, braft::SnapshotThrottle* throttle) - : _tid(INVALID_BTHREAD) - , _cancelled(false) - , _filter_before_copy_remote(filter_before_copy_remote) - , _fs(fs) - , _throttle(throttle) - , _writer(NULL) - , _storage(storage) - , _reader(NULL) - , _cur_session(NULL) -{} - -CurveSnapshotCopier::~CurveSnapshotCopier() { - CHECK(!_writer); -} - -void *CurveSnapshotCopier::start_copy(void* arg) { + : _tid(INVALID_BTHREAD), + _cancelled(false), + _filter_before_copy_remote(filter_before_copy_remote), + _fs(fs), + _throttle(throttle), + _writer(NULL), + _storage(storage), + _reader(NULL), + _cur_session(NULL) {} + +CurveSnapshotCopier::~CurveSnapshotCopier() { CHECK(!_writer); } + +void* CurveSnapshotCopier::start_copy(void* arg) { CurveSnapshotCopier* c = reinterpret_cast(arg); c->copy(); return NULL; @@ -71,7 +68,7 @@ void *CurveSnapshotCopier::start_copy(void* arg) { void CurveSnapshotCopier::copy() { do { - // 下载snapshot meta中记录的文件 + // Download the files recorded in the snapshot meta load_meta_table(); if (!ok()) { break; @@ -86,7 +83,7 @@ void CurveSnapshotCopier::copy() { copy_file(files[i]); } - // 下载snapshot attachment文件 + // Download snapshot attachment file load_attach_meta_table(); if (!ok()) { break; @@ -99,8 +96,8 @@ void CurveSnapshotCopier::copy() { } while (0); if (!ok() && _writer && _writer->ok()) { LOG(WARNING) << "Fail to copy, error_code " << error_code() - << " error_msg " << error_cstr() - << " writer path " << _writer->get_path(); + << " error_msg " << error_cstr() << " writer path " + << _writer->get_path(); _writer->set_error(error_code(), error_cstr()); } if (_writer) { @@ -123,9 +120,9 @@ void CurveSnapshotCopier::load_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_META_FILE, &meta_buf, + NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -153,9 +150,9 @@ void CurveSnapshotCopier::load_attach_meta_table() { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, - &meta_buf, NULL); + scoped_refptr session = + _copier.start_to_copy_to_iobuf(BRAFT_SNAPSHOT_ATTACH_META_FILE, + &meta_buf, NULL); _cur_session = session.get(); lck.unlock(); session->join(); @@ -169,21 +166,22 @@ void CurveSnapshotCopier::load_attach_meta_table() { return; } - // 如果attach meta table为空,那么说明没有snapshot attachment files + // If the attach_meta_table is empty, then there are no snapshot attachment + // files if (0 == meta_buf.size()) { return; } - if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote(meta_buf) - != 0) { + if (_remote_snapshot._attach_meta_table.load_from_iobuf_as_remote( + meta_buf) != 0) { LOG(WARNING) << "Bad attach_meta_table format"; set_error(-1, "Bad attach_meta_table format"); return; } } -int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, - braft::SnapshotReader* last_snapshot) { +int CurveSnapshotCopier::filter_before_copy( + CurveSnapshotWriter* writer, braft::SnapshotReader* last_snapshot) { std::vector existing_files; writer->list_files(&existing_files); std::vector to_remove; @@ -200,8 +198,7 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, for (size_t i = 0; i < remote_files.size(); ++i) { const std::string& filename = remote_files[i]; braft::LocalFileMeta remote_meta; - CHECK_EQ(0, _remote_snapshot.get_file_meta( - filename, &remote_meta)); + CHECK_EQ(0, _remote_snapshot.get_file_meta(filename, &remote_meta)); if (!remote_meta.has_checksum()) { // Redownload file if this file doen't have checksum writer->remove_file(filename); @@ -214,8 +211,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, if (local_meta.has_checksum() && local_meta.checksum() == remote_meta.checksum()) { LOG(INFO) << "Keep file=" << filename - << " checksum=" << remote_meta.checksum() - << " in " << writer->get_path(); + << " checksum=" << remote_meta.checksum() << " in " + << writer->get_path(); continue; } // Remove files from writer so that the file is to be copied from @@ -232,21 +229,20 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, continue; } if (!local_meta.has_checksum() || - local_meta.checksum() != remote_meta.checksum()) { + local_meta.checksum() != remote_meta.checksum()) { continue; } LOG(INFO) << "Found the same file=" << filename << " checksum=" << remote_meta.checksum() << " in last_snapshot=" << last_snapshot->get_path(); if (local_meta.source() == braft::FILE_SOURCE_LOCAL) { - std::string source_path = last_snapshot->get_path() + '/' - + filename; - std::string dest_path = writer->get_path() + '/' - + filename; + std::string source_path = + last_snapshot->get_path() + '/' + filename; + std::string dest_path = writer->get_path() + '/' + filename; _fs->delete_file(dest_path, false); if (!_fs->link(source_path, dest_path)) { - PLOG(ERROR) << "Fail to link " << source_path - << " to " << dest_path; + PLOG(ERROR) + << "Fail to link " << source_path << " to " << dest_path; continue; } // Don't delete linked file @@ -272,8 +268,8 @@ int CurveSnapshotCopier::filter_before_copy(CurveSnapshotWriter* writer, } void CurveSnapshotCopier::filter() { - _writer = reinterpret_cast(_storage->create( - !_filter_before_copy_remote)); + _writer = reinterpret_cast( + _storage->create(!_filter_before_copy_remote)); if (_writer == NULL) { set_error(EIO, "Fail to create snapshot writer"); return; @@ -283,12 +279,13 @@ void CurveSnapshotCopier::filter() { braft::SnapshotReader* reader = _storage->open(); if (filter_before_copy(_writer, reader) != 0) { LOG(WARNING) << "Fail to filter writer before copying" - ", path: " << _writer->get_path() + ", path: " + << _writer->get_path() << ", destroy and create a new writer"; _writer->set_error(-1, "Fail to filter"); _storage->close(_writer, false); - _writer = reinterpret_cast( - _storage->create(true)); + _writer = + reinterpret_cast(_storage->create(true)); } if (reader) { _storage->close(reader); @@ -319,16 +316,16 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { butil::File::Error e; bool rc = false; if (braft::FLAGS_raft_create_parent_directories) { - butil::FilePath sub_dir = butil::FilePath( - _writer->get_path()).Append(sub_path.DirName()); + butil::FilePath sub_dir = + butil::FilePath(_writer->get_path()).Append(sub_path.DirName()); rc = _fs->create_directory(sub_dir.value(), &e, true); } else { - rc = create_sub_directory( - _writer->get_path(), sub_path.DirName().value(), _fs, &e); + rc = create_sub_directory(_writer->get_path(), + sub_path.DirName().value(), _fs, &e); } if (!rc) { - LOG(ERROR) << "Fail to create directory for " << file_path - << " : " << butil::File::ErrorToString(e); + LOG(ERROR) << "Fail to create directory for " << file_path << " : " + << butil::File::ErrorToString(e); set_error(braft::file_error_to_os_error(e), "Fail to create directory"); } @@ -340,8 +337,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { set_error(ECANCELED, "%s", berror(ECANCELED)); return; } - scoped_refptr session - = _copier.start_to_copy_to_file(filename, file_path, NULL); + scoped_refptr session = + _copier.start_to_copy_to_file(filename, file_path, NULL); if (session == NULL) { LOG(WARNING) << "Fail to copy " << filename << " path: " << _writer->get_path(); @@ -355,14 +352,13 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { _cur_session = NULL; lck.unlock(); if (!session->status().ok()) { - // 如果是文件不存在,那么删除刚开始open的文件 + // If the file does not exist, delete the file that was just opened if (session->status().error_code() == ENOENT) { bool rc = _fs->delete_file(file_path, false); if (!rc) { - LOG(ERROR) << "Fail to delete file" << file_path - << " : " << ::berror(errno); - set_error(errno, - "Fail to create delete file " + file_path); + LOG(ERROR) << "Fail to delete file" << file_path << " : " + << ::berror(errno); + set_error(errno, "Fail to create delete file " + file_path); } return; } @@ -371,7 +367,8 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { session->status().error_cstr()); return; } - // 如果是attach file,那么不需要持久化file meta信息 + // If it is an attach file, then there is no need to persist the file meta + // information if (!attch && _writer->add_file(filename, &meta) != 0) { set_error(EIO, "Fail to add file to writer"); return; @@ -394,16 +391,13 @@ std::string CurveSnapshotCopier::get_rfilename(const std::string& filename) { } void CurveSnapshotCopier::start() { - if (bthread_start_background( - &_tid, NULL, start_copy, this) != 0) { + if (bthread_start_background(&_tid, NULL, start_copy, this) != 0) { PLOG(ERROR) << "Fail to start bthread"; copy(); } } -void CurveSnapshotCopier::join() { - bthread_join(_tid, NULL); -} +void CurveSnapshotCopier::join() { bthread_join(_tid, NULL); } void CurveSnapshotCopier::cancel() { BAIDU_SCOPED_LOCK(_mutex); diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h index 1c991720b0..fdc1ef960a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h @@ -43,8 +43,10 @@ #define SRC_CHUNKSERVER_RAFTSNAPSHOT_CURVE_SNAPSHOT_COPIER_H_ #include -#include + #include +#include + #include "src/chunkserver/raftsnapshot/curve_snapshot.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" @@ -75,7 +77,9 @@ class CurveSnapshotCopier : public braft::SnapshotCopier { braft::SnapshotReader* last_snapshot); void filter(); void copy_file(const std::string& filename, bool attach = false); - // 这里的filename是相对于快照目录的路径,为了先把文件下载到临时目录,需要把前面的..去掉 + // The filename here is the path relative to the snapshot directory. In + // order to download the file to the temporary directory first, it is + // necessary to Remove std::string get_rfilename(const std::string& filename); braft::raft_mutex_t _mutex; diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h index 97c553661c..5221a0df8a 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h @@ -44,10 +44,12 @@ #include #include + +#include +#include #include #include -#include -#include + #include "proto/curve_storage.pb.h" #include "src/chunkserver/raftsnapshot/define.h" @@ -55,9 +57,10 @@ namespace curve { namespace chunkserver { /** - * snapshot attachment文件元数据表,同上面的 - * CurveSnapshotAttachMetaTable接口,主要提供attach文件元数据信息 - * 的查询、序列化和反序列等接口 + * Snapshot attachment file metadata table, similar to the above + * CurveSnapshotAttachMetaTable interface. This table primarily provides + * interfaces for querying, serializing, and deserializing attachment file + * metadata */ class CurveSnapshotAttachMetaTable { public: @@ -70,7 +73,7 @@ class CurveSnapshotAttachMetaTable { int get_attach_file_meta(const std::string& filename, braft::LocalFileMeta* file_meta) const; // list files in the attach meta table - void list_files(std::vector *files) const; + void list_files(std::vector* files) const; // deserialize int load_from_iobuf_as_remote(const butil::IOBuf& buf); // serialize @@ -79,39 +82,31 @@ class CurveSnapshotAttachMetaTable { private: typedef std::map Map; // file -> file meta - Map _file_map; + Map _file_map; }; class CurveSnapshotFileReader : public braft::LocalDirReader { public: CurveSnapshotFileReader(braft::FileSystemAdaptor* fs, - const std::string& path, - braft::SnapshotThrottle* snapshot_throttle) - : LocalDirReader(fs, path), - _snapshot_throttle(snapshot_throttle) - {} + const std::string& path, + braft::SnapshotThrottle* snapshot_throttle) + : LocalDirReader(fs, path), _snapshot_throttle(snapshot_throttle) {} virtual ~CurveSnapshotFileReader() = default; - void set_meta_table(const braft::LocalSnapshotMetaTable &meta_table) { + void set_meta_table(const braft::LocalSnapshotMetaTable& meta_table) { _meta_table = meta_table; } void set_attach_meta_table( - const CurveSnapshotAttachMetaTable &attach_meta_table) { + const CurveSnapshotAttachMetaTable& attach_meta_table) { _attach_meta_table = attach_meta_table; } - int read_file(butil::IOBuf* out, - const std::string &filename, - off_t offset, - size_t max_count, - bool read_partly, - size_t* read_count, + int read_file(butil::IOBuf* out, const std::string& filename, off_t offset, + size_t max_count, bool read_partly, size_t* read_count, bool* is_eof) const override; - braft::LocalSnapshotMetaTable get_meta_table() { - return _meta_table; - } + braft::LocalSnapshotMetaTable get_meta_table() { return _meta_table; } private: braft::LocalSnapshotMetaTable _meta_table; diff --git a/src/chunkserver/raftsnapshot/define.h b/src/chunkserver/raftsnapshot/define.h index 012da7f1ba..79b1dcf355 100644 --- a/src/chunkserver/raftsnapshot/define.h +++ b/src/chunkserver/raftsnapshot/define.h @@ -29,12 +29,13 @@ namespace chunkserver { const char RAFT_DATA_DIR[] = "data"; const char RAFT_META_DIR[] = "raft_meta"; -// TODO(all:fix it): RAFT_SNAP_DIR注意当前这个目录地址不能修改 -// 与当前外部依赖curve-braft代码强耦合(两边硬编码耦合) +// TODO(all:fix it): Note that the RAFT_SNAP_DIR directory address should not be +// modified at this time. This is tightly coupled with the current external +// dependency on curve-braft code (hardcoded coupling on both sides). const char RAFT_SNAP_DIR[] = "raft_snapshot"; -const char RAFT_LOG_DIR[] = "log"; +const char RAFT_LOG_DIR[] = "log"; #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 -#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" +#define BRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" #define BRAFT_SNAPSHOT_ATTACH_META_FILE "__raft_snapshot_attach_meta" #define BRAFT_PROTOBUF_FILE_TEMP ".tmp" diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 1616800c55..edbf2a27f7 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -20,29 +20,30 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/register.h" + #include #include +#include +#include #include #include +#include "proto/topology.pb.h" +#include "src/chunkserver/chunkserver_helper.h" #include "src/common/crc32.h" #include "src/common/string_util.h" -#include "src/chunkserver/register.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/chunkserver_helper.h" -#include "proto/topology.pb.h" namespace curve { namespace chunkserver { -Register::Register(const RegisterOptions &ops) { +Register::Register(const RegisterOptions& ops) { this->ops_ = ops; - // 解析mds的多个地址 + // Parsing multiple addresses of mds ::curve::common::SplitString(ops.mdsListenAddr, ",", &mdsEps_); - // 检验每个地址的合法性 + // Verify the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -52,9 +53,9 @@ Register::Register(const RegisterOptions &ops) { inServiceIndex_ = 0; } -int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap) { +int Register::RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap) { ::curve::mds::topology::ChunkServerRegistRequest req; ::curve::mds::topology::ChunkServerRegistResponse resp; req.set_disktype(ops_.chunkserverDiskType); @@ -105,7 +106,8 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, curve::mds::topology::TopologyService_Stub stub(&channel); stub.RegistChunkServer(&cntl, &req, &resp, nullptr); - // TODO(lixiaocui): 后续错误码和mds共享后改成枚举类型 + // TODO(lixiaocui): Change to enumeration type after sharing error codes + // and mds in the future if (!cntl.Failed() && resp.statuscode() == 0) { break; } else { @@ -158,7 +160,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, return 0; } -int Register::PersistChunkServerMeta(const ChunkServerMetadata &metadata) { +int Register::PersistChunkServerMeta(const ChunkServerMetadata& metadata) { int fd; std::string metaFile = curve::common::UriParser::GetPathFromUri(ops_.chunkserverMetaUri); diff --git a/src/chunkserver/register.h b/src/chunkserver/register.h index f89683087d..d45a15fdf5 100644 --- a/src/chunkserver/register.h +++ b/src/chunkserver/register.h @@ -23,13 +23,14 @@ #ifndef SRC_CHUNKSERVER_REGISTER_H_ #define SRC_CHUNKSERVER_REGISTER_H_ -#include #include +#include #include -#include "src/fs/local_filesystem.h" + #include "proto/chunkserver.pb.h" -#include "src/chunkserver/epoch_map.h" #include "src/chunkserver/datastore/file_pool.h" +#include "src/chunkserver/epoch_map.h" +#include "src/fs/local_filesystem.h" using ::curve::fs::LocalFileSystem; @@ -37,7 +38,7 @@ namespace curve { namespace chunkserver { const uint32_t CURRENT_METADATA_VERSION = 0x01; -// register配置选项 +// Register Configuration Options struct RegisterOptions { std::string mdsListenAddr; std::string chunkserverInternalIp; @@ -61,7 +62,7 @@ struct RegisterOptions { class Register { public: - explicit Register(const RegisterOptions &ops); + explicit Register(const RegisterOptions& ops); ~Register() {} /** @@ -71,16 +72,16 @@ class Register { * @param[out] metadata chunkserver meta * @param[in,out] epochMap epochMap to update */ - int RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap); + int RegisterToMDS(const ChunkServerMetadata* localMetadata, + ChunkServerMetadata* metadata, + const std::shared_ptr& epochMap); /** - * @brief 持久化ChunkServer元数据 + * @brief Persisting ChunkServer metadata * * @param[in] metadata */ - int PersistChunkServerMeta(const ChunkServerMetadata &metadata); + int PersistChunkServerMeta(const ChunkServerMetadata& metadata); private: RegisterOptions ops_; @@ -92,4 +93,3 @@ class Register { } // namespace curve #endif // SRC_CHUNKSERVER_REGISTER_H_ - diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..511ad103f0 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -20,21 +20,24 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/trash.h" + #include +#include + #include -#include "src/chunkserver/trash.h" -#include "src/common/string_util.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/copyset_node.h" + #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/raftlog/define.h" +#include "src/common/string_util.h" +#include "src/common/uri_parser.h" using ::curve::chunkserver::RAFT_DATA_DIR; +using ::curve::chunkserver::RAFT_LOG_DIR; using ::curve::chunkserver::RAFT_META_DIR; using ::curve::chunkserver::RAFT_SNAP_DIR; -using ::curve::chunkserver::RAFT_LOG_DIR; namespace curve { namespace chunkserver { @@ -60,13 +63,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -100,8 +103,8 @@ int Trash::Fini() { return 0; } -int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 +int Trash::RecycleCopySet(const std::string& dirPath) { + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,10 +116,11 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 - std::string dst = trashPath_ + "/" + - dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + - '.' + std::to_string(std::time(nullptr)); + // If the directory already exists in the recycle bin, this deletion failed + std::string dst = + trashPath_ + "/" + + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + + std::to_string(std::time(nullptr)); if (localFileSystem_->DirExists(dst)) { LOG(WARNING) << "recycle error: " << dst << " already exist in " << trashPath_; @@ -137,28 +141,28 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } void Trash::DeleteEligibleFileInTrashInterval() { - while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 - DeleteEligibleFileInTrash(); - } + while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { + // Scan Recycle Bin + DeleteEligibleFileInTrash(); + } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +176,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -180,10 +184,10 @@ void Trash::DeleteEligibleFileInTrash() { } } -bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) +bool Trash::IsCopysetInTrash(const std::string& dirName) { + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low + // 32-bit composed of copysetId(>0) The directory is in decimal form For + // example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -196,7 +200,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { return GetPoolID(groupId) >= 1 && GetCopysetID(groupId) >= 1; } -bool Trash::NeedDelete(const std::string ©setDir) { +bool Trash::NeedDelete(const std::string& copysetDir) { int fd = localFileSystem_->Open(copysetDir, O_RDONLY); if (0 > fd) { LOG(ERROR) << "Trash fail open " << copysetDir; @@ -219,15 +223,15 @@ bool Trash::NeedDelete(const std::string ©setDir) { return true; } -bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { +bool Trash::IsChunkOrSnapShotFile(const std::string& chunkName) { return FileNameOperator::FileType::UNKNOWN != - FileNameOperator::ParseFileName(chunkName).type; + FileNameOperator::ParseFileName(chunkName).type; } -bool Trash::RecycleChunksAndWALInDir( - const std::string ©setPath, const std::string &filename) { +bool Trash::RecycleChunksAndWALInDir(const std::string& copysetPath, + const std::string& filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // Is it a file to see if it needs to be recycled if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +242,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } @@ -257,13 +261,13 @@ bool Trash::RecycleChunksAndWALInDir( return ret; } -bool Trash::RecycleChunkfile( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleChunkfile(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to FilePool"; + << " to FilePool"; return false; } @@ -271,13 +275,12 @@ bool Trash::RecycleChunkfile( return true; } -bool Trash::RecycleWAL( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleWAL(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { - LOG(ERROR) << "Trash failed recycle WAL " << filepath - << " to WALPool"; + LOG(ERROR) << "Trash failed recycle WAL " << filepath << " to WALPool"; return false; } @@ -285,12 +288,12 @@ bool Trash::RecycleWAL( return true; } -bool Trash::IsWALFile(const std::string &fileName) { +bool Trash::IsWALFile(const std::string& fileName) { int match = 0; int64_t first_index = 0; int64_t last_index = 0; - match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, - &first_index, &last_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, &first_index, + &last_index); if (match == 2) { LOG(INFO) << "recycle closed segment wal file, path: " << fileName << " first_index: " << first_index @@ -298,8 +301,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return true; } - match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, - &first_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, &first_index); if (match == 1) { LOG(INFO) << "recycle open segment wal file, path: " << fileName << " first_index: " << first_index; @@ -308,7 +310,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return false; } -uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { +uint32_t Trash::CountChunkNumInCopyset(const std::string& copysetPath) { std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; @@ -317,15 +319,14 @@ uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { // Traverse subdirectories uint32_t chunkNum = 0; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; bool isDir = localFileSystem_->DirExists(filePath); if (!isDir) { // valid: chunkfile, snapshotfile, walfile - if (!(IsChunkOrSnapShotFile(file) || - IsWALFile(file))) { - LOG(WARNING) << "Trash find a illegal file:" - << file << " in " << copysetPath; + if (!(IsChunkOrSnapShotFile(file) || IsWALFile(file))) { + LOG(WARNING) << "Trash find a illegal file:" << file << " in " + << copysetPath; continue; } ++chunkNum; diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index a3a3c89d53..b35f4aef71 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -25,169 +25,179 @@ #include #include -#include "src/fs/local_filesystem.h" + #include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" +#include "src/fs/local_filesystem.h" -using ::curve::common::Thread; using ::curve::common::Atomic; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; using ::curve::common::InterruptibleSleeper; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; +using ::curve::common::Thread; -namespace curve { -namespace chunkserver { -struct TrashOptions{ - // copyset的trash路径 - std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 - int expiredAfterSec; - // 扫描trash目录的时间间隔 - int scanPeriodSec; - - std::shared_ptr localFileSystem; - std::shared_ptr chunkFilePool; - std::shared_ptr walPool; -}; - -class Trash { - public: - int Init(TrashOptions options); - - int Run(); - - int Fini(); - - /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 - */ - void DeleteEligibleFileInTrash(); - - int RecycleCopySet(const std::string &dirPath); - - /* - * @brief 获取回收站中chunk的个数 - * - * @return chunk个数 - */ - uint32_t GetChunkNum() {return chunkNum_.load();} - - /** - * @brief is WAL or not ? - * - * @param fileName file name - * - * @retval true yes - * @retval false no - */ - static bool IsWALFile(const std::string& fileName); - - /* - * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 - * - * @param[in] chunkName 文件名 - * - * @return true-符合chunk或snapshot文件命名规则 - */ - static bool IsChunkOrSnapShotFile(const std::string& chunkName); - - private: - /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 - */ - void DeleteEligibleFileInTrashInterval(); - - /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 - * - * @param[in] copysetDir copyset的目录路径 - * - * @return true-可以被删除 - */ - bool NeedDelete(const std::string ©setDir); - - /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 - * - * @param[in] dirName 文目录路径 - * - * @return true-符合copyset目录命名规则 - */ - bool IsCopysetInTrash(const std::string &dirName); - - /* - * @brief Recycle Chunkfile and wal file in Copyset - * - * @param[in] copysetDir copyset dir - * @param[in] filename filename - */ - bool RecycleChunksAndWALInDir( - const std::string ©setDir, const std::string &filename); - - /* - * @brief Recycle Chunkfile - * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 - */ - bool RecycleChunkfile( - const std::string &filepath, const std::string &filename); - - /** - * @brief Recycle WAL - * - * @param copysetPath copyset dir - * @param filename file name - * - * @retval true success - * @retval false failure - */ - bool RecycleWAL(const std::string& filepath, const std::string& filename); - - /* - * @brief 统计copyset目录中的chunk个数 - * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 - */ - uint32_t CountChunkNumInCopyset(const std::string ©setPath); - - private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 - int expiredAfterSec_; - - // 扫描trash目录的时间间隔 - int scanPeriodSec_; - - // 回收站中chunk的个数 - Atomic chunkNum_; - - Mutex mtx_; - - // 本地文件系统 - std::shared_ptr localFileSystem_; - - // chunk池子 - std::shared_ptr chunkFilePool_; - - // wal pool - std::shared_ptr walPool_; - - // 回收站全路径 - std::string trashPath_; - - // 后台清理回收站的线程 - Thread recycleThread_; - - // false-开始后台任务,true-停止后台任务 - Atomic isStop_; - - InterruptibleSleeper sleeper_; -}; -} // namespace chunkserver -} // namespace curve - -#endif // SRC_CHUNKSERVER_TRASH_H_ - +namespace curve +{ + namespace chunkserver + { + struct TrashOptions + { + // The trash path of copyset + std::string trashPath; + // The file can be physically recycled after being placed in trash for + // expiredAfteSec seconds + int expiredAfterSec; + // Time interval for scanning the trash directory + int scanPeriodSec; + + std::shared_ptr localFileSystem; + std::shared_ptr chunkFilePool; + std::shared_ptr walPool; + }; + + class Trash + { + public: + int Init(TrashOptions options); + + int Run(); + + int Fini(); + + /* + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash + * directory + */ + void DeleteEligibleFileInTrash(); + + int RecycleCopySet(const std::string &dirPath); + + /* + * @brief Get the number of chunks in the recycle bin + * + * @return Number of chunks + */ + uint32_t GetChunkNum() { return chunkNum_.load(); } + + /** + * @brief is WAL or not ? + * + * @param fileName file name + * + * @retval true yes + * @retval false no + */ + static bool IsWALFile(const std::string &fileName); + + /* + * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 + * + * @param[in] chunkName 文件名 + * + * @return true-符合chunk或snapshot文件命名规则 + */ + static bool IsChunkOrSnapShotFile(const std::string &chunkName); + + private: + /* + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling + * at regular intervals + */ + void DeleteEligibleFileInTrashInterval(); + + /* + * @brief NeedDelete Does the file need to be deleted, and the time it takes + * to place the trash is greater than ExpiredAfterSec in trash can be + * deleted + * + * @param[in] copysetDir copyset directory path + * + * @return true - can be deleted + */ + bool NeedDelete(const std::string ©setDir); + + /* + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle + * bin + * + * @param[in] dirName directory path + * + * @return true - Complies with copyset directory naming rules + */ + bool IsCopysetInTrash(const std::string &dirName); + + /* + * @brief Recycle Chunkfile and wal file in Copyset + * + * @param[in] copysetDir copyset dir + * @param[in] filename filename + */ + bool RecycleChunksAndWALInDir( + const std::string ©setDir, const std::string &filename); + + /* + * @brief Recycle Chunkfile + * + * @param[in] filepath file path + * @param[in] filename File name + */ + bool RecycleChunkfile(const std::string &filepath, + const std::string &filename); + + /** + * @brief Recycle WAL + * + * @param copysetPath copyset dir + * @param filename file name + * + * @retval true success + * @retval false failure + */ + bool RecycleWAL(const std::string &filepath, const std::string &filename); + + /* + * @brief counts the number of chunks in the copyset directory + * + * @param[in] copysetPath chunk directory + * @return returns the number of chunks + */ + uint32_t CountChunkNumInCopyset(const std::string ©setPath); + + private: + // The file can be physically recycled after being placed in trash for + // expiredAfterSec seconds + int expiredAfterSec_; + + // Time interval for scanning the trash directory + int scanPeriodSec_; + + // Number of chunks in the Recycle Bin + Atomic chunkNum_; + + Mutex mtx_; + + // Local File System + std::shared_ptr localFileSystem_; + + // chunk Pool + std::shared_ptr chunkFilePool_; + + // wal pool + std::shared_ptr walPool_; + + // Recycle Bin Full Path + std::string trashPath_; + + // Thread for background cleaning of the recycle bin + Thread recycleThread_; + + // false-Start background task, true-Stop background task + Atomic isStop_; + + InterruptibleSleeper sleeper_; + }; + } // namespace chunkserver +} // namespace curve + +#endif // SRC_CHUNKSERVER_TRASH_H_ diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 592e9d2a06..d2345e85fc 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -22,48 +22,57 @@ #include "src/client/chunk_closure.h" -#include -#include #include +#include +#include #include "src/client/client_common.h" #include "src/client/copyset_client.h" +#include "src/client/io_tracker.h" #include "src/client/metacache.h" #include "src/client/request_closure.h" #include "src/client/request_context.h" #include "src/client/service_helper.h" -#include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from +// the RPC return logic namespace curve { namespace client { -ClientClosure::BackoffParam ClientClosure::backoffParam_; -FailureRequestOption ClientClosure::failReqOpt_; +ClientClosure::BackoffParam ClientClosure::backoffParam_; +FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the corresponding cooysetId leader may change + // So set the timeout time for this retry request to the default value + // This is to retry this request as soon as possible + // There will be a delay of 1-2 seconds when migrating from copysetleader to + // client GetLeader to obtain a new leader For a request, GetLeader may + // still return the old Leader The rpc timeout time may be set to 2s/4s, and + // the leader information will be obtained after the timeout To retry the + // request on the new Leader as soon as possible, set the rpc timeout time + // to the default value if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 - if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT + // When a certain IO retry exceeds a certain number of times, an + // exponential backoff must be performed during the timeout period When + // the underlying chunkserver is under high pressure, unstable may also + // be triggered Due to copyset leader may change, the request timeout + // time will be set to the default value And chunkserver cannot process + // it within this time, resulting in IO hang In the case of real + // downtime, the request will be processed after a certain number of + // retries If you keep trying again, it's not a downtime situation, and + // at this point, the timeout still needs to enter the exponential + // backoff logic + if (retriedTimes < + failReqOpt_ + .chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; } else { @@ -71,25 +80,23 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } reqDone->SetNextTimeOutMS(nextTimeout); - LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout << ", " + << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); return; } if (rpcstatus == CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD) { uint64_t nextsleeptime = OverLoadBackOff(reqDone->GetRetriedTimes()); LOG(WARNING) << "chunkserver overload, sleep(us) = " << nextsleeptime - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << ", " << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); bthread_usleep(nextsleeptime); return; } @@ -103,19 +110,19 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } } - LOG(WARNING) - << "Rpc failed " - << (retryDirectly_ ? "retry directly, " - : "sleep " + std::to_string(nextSleepUS) + " us, ") - << *reqCtx_ << ", cntl status = " << cntlstatus - << ", response status = " - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(rpcstatus)) - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "Rpc failed " + << (retryDirectly_ + ? "retry directly, " + : "sleep " + std::to_string(nextSleepUS) + " us, ") + << *reqCtx_ << ", cntl status = " << cntlstatus + << ", response status = " + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast( + rpcstatus)) + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (nextSleepUS != 0) { bthread_usleep(nextSleepUS); @@ -134,8 +141,11 @@ uint64_t ClientClosure::OverLoadBackOff(uint64_t currentRetryTimes) { random_time -= nextsleeptime / 10; nextsleeptime += random_time; - nextsleeptime = std::min(nextsleeptime, failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT - nextsleeptime = std::max(nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT + nextsleeptime = + std::min(nextsleeptime, + failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT + nextsleeptime = std::max( + nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT return nextsleeptime; } @@ -153,10 +163,11 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified Request Callback Function Entry +// The overall processing logic is the same as before +// Perform corresponding processing for different request types and return +// status codes Each subclass needs to implement SendRetryRequest for retry +// requests void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,80 +187,81 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 - metaCache_->GetUnstableHelper().ClearTimeout( - chunkserverID_, chunkserverEndPoint_); + // As long as RPC returns normally, clear the timeout counter + metaCache_->GetUnstableHelper().ClearTimeout(chunkserverID_, + chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: - OnSuccess(); - break; - - // 2.1 不是leader - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: - MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); - needRetry = true; - OnRedirected(); - break; - - // 2.2 Copyset不存在,大概率都是配置变更了 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: - needRetry = true; - OnCopysetNotExist(); - break; - - // 2.3 chunk not exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: - OnChunkNotExist(); - break; - - // 2.4 非法参数,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: - OnInvalidRequest(); - break; + // 1. Request successful + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: + OnSuccess(); + break; + + // 2.1 is not a leader + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: + MetricHelper::IncremRedirectRPCCount(fileMetric_, + reqCtx_->optype_); + needRetry = true; + OnRedirected(); + break; - // 2.5 返回backward - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: - if (reqCtx_->optype_ == OpType::WRITE) { + // 2.2 Copyset does not exist, most likely due to configuration + // changes + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; - OnBackward(); - } else { - LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " return backward, " - << *reqCtx_ - << ", status=" << status_ + OnCopysetNotExist(); + break; + + // 2.3 Chunk not exist, return directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: + OnChunkNotExist(); + break; + + // 2.4 Illegal parameter, returned directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: + OnInvalidRequest(); + break; + + // 2.5 Return to feedback + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: + if (reqCtx_->optype_ == OpType::WRITE) { + needRetry = true; + OnBackward(); + } else { + LOG(ERROR) + << OpTypeToString(reqCtx_->optype_) + << " return backward, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); + } + break; + + // 2.6 Return Chunk Exist, directly return without retrying + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: + OnChunkExist(); + break; + + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: + OnEpochTooOld(); + break; + + default: + needRetry = true; + LOG(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed for UNKNOWN reason, " << *reqCtx_ << ", status=" + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast(status_)) << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); - } - break; - - // 2.6 返回chunk exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: - OnChunkExist(); - break; - - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: - OnEpochTooOld(); - break; - - default: - needRetry = true; - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed for UNKNOWN reason, " << *reqCtx_ - << ", status=" - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(status_)) - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); } } @@ -264,22 +276,22 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying + // again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout + // requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } - LOG_EVERY_SECOND(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed, error code: " - << cntl_->ErrorCode() - << ", error: " << cntl_->ErrorText() - << ", " << *reqCtx_ + LOG_EVERY_SECOND(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed, error code: " << cntl_->ErrorCode() + << ", error: " << cntl_->ErrorText() << ", " << *reqCtx_ << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); ProcessUnstableState(); @@ -291,26 +303,27 @@ void ClientClosure::ProcessUnstableState() { chunkserverID_, chunkserverEndPoint_); switch (state) { - case UnstableState::ServerUnstable: { - std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); - int ret = metaCache_->SetServerUnstable(ip); - if (ret != 0) { - LOG(WARNING) << "Set server(" << ip << ") unstable failed, " - << "now set chunkserver(" << chunkserverID_ << ") unstable"; + case UnstableState::ServerUnstable: { + std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); + int ret = metaCache_->SetServerUnstable(ip); + if (ret != 0) { + LOG(WARNING) + << "Set server(" << ip << ") unstable failed, " + << "now set chunkserver(" << chunkserverID_ << ") unstable"; + metaCache_->SetChunkserverUnstable(chunkserverID_); + } + break; + } + case UnstableState::ChunkServerUnstable: { metaCache_->SetChunkserverUnstable(chunkserverID_); + break; } - break; - } - case UnstableState::ChunkServerUnstable: { - metaCache_->SetChunkserverUnstable(chunkserverID_); - break; - } - case UnstableState::NoUnstable: { - RefreshLeader(); - break; - } - default: - break; + case UnstableState::NoUnstable: { + RefreshLeader(); + break; + } + default: + break; } } @@ -319,64 +332,58 @@ void ClientClosure::OnSuccess() { auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkNotExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " not exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " not exists, " + << *reqCtx_ << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " exists, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnEpochTooOld() { reqDone_->SetFailed(status_); LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " epoch too old, reqCtx: " << *reqCtx_ - << ", status: " << status_ - << ", retried times: " << reqDone_->GetRetriedTimes() - << ", IO id: " << reqDone_->GetIOTracker()->GetID() - << ", request id: " << reqCtx_->id_ - << ", remote side: " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " epoch too old, reqCtx: " << *reqCtx_ + << ", status: " << status_ + << ", retried times: " << reqDone_->GetRetriedTimes() + << ", IO id: " << reqDone_->GetIOTracker()->GetID() + << ", request id: " << reqCtx_->id_ << ", remote side: " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnRedirected() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (response_->has_redirect() ? response_->redirect() : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (response_->has_redirect() ? response_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (response_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(response_->redirect()); @@ -390,13 +397,11 @@ void ClientClosure::OnRedirected() { void ClientClosure::OnCopysetNotExist() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " copyset not exists, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); RefreshLeader(); } @@ -443,23 +448,20 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If the refresh leader obtains new leader information + // Do not sleep before retrying retryDirectly_ = (leaderId != chunkserverID_); } } void ClientClosure::OnBackward() { const auto latestSn = metaCache_->GetLatestFileSn(); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " return BACKWARD, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " return BACKWARD, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); reqCtx_->seq_ = latestSn; } @@ -467,38 +469,26 @@ void ClientClosure::OnBackward() { void ClientClosure::OnInvalidRequest() { reqDone_->SetFailed(status_); LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " failed for invalid format, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " failed for invalid format, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); MetricHelper::IncremFailRPCCount(fileMetric_, reqCtx_->optype_); } void WriteChunkClosure::SendRetryRequest() { - client_->WriteChunk(reqCtx_->idinfo_, - reqCtx_->fileId_, - reqCtx_->epoch_, - reqCtx_->seq_, - reqCtx_->writeData_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->fileId_, reqCtx_->epoch_, + reqCtx_->seq_, reqCtx_->writeData_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } -void WriteChunkClosure::OnSuccess() { - ClientClosure::OnSuccess(); -} +void WriteChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); } void ReadChunkClosure::SendRetryRequest() { - client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } void ReadChunkClosure::OnSuccess() { @@ -516,9 +506,7 @@ void ReadChunkClosure::OnChunkNotExist() { void ReadChunkSnapClosure::SendRetryRequest() { client_->ReadChunkSnapshot(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + reqCtx_->offset_, reqCtx_->rawlength_, done_); } void ReadChunkSnapClosure::OnSuccess() { @@ -528,10 +516,8 @@ void ReadChunkSnapClosure::OnSuccess() { } void DeleteChunkSnapClosure::SendRetryRequest() { - client_->DeleteChunkSnapshotOrCorrectSn( - reqCtx_->idinfo_, - reqCtx_->correctedSeq_, - done_); + client_->DeleteChunkSnapshotOrCorrectSn(reqCtx_->idinfo_, + reqCtx_->correctedSeq_, done_); } void GetChunkInfoClosure::SendRetryRequest() { @@ -548,17 +534,16 @@ void GetChunkInfoClosure::OnSuccess() { } void GetChunkInfoClosure::OnRedirected() { - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " redirected, " << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (chunkinforesponse_->has_redirect() ? chunkinforesponse_->redirect() - : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (chunkinforesponse_->has_redirect() + ? chunkinforesponse_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (chunkinforesponse_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(chunkinforesponse_->redirect()); @@ -571,19 +556,14 @@ void GetChunkInfoClosure::OnRedirected() { } void CreateCloneChunkClosure::SendRetryRequest() { - client_->CreateCloneChunk(reqCtx_->idinfo_, - reqCtx_->location_, - reqCtx_->seq_, - reqCtx_->correctedSeq_, - reqCtx_->chunksize_, - done_); + client_->CreateCloneChunk(reqCtx_->idinfo_, reqCtx_->location_, + reqCtx_->seq_, reqCtx_->correctedSeq_, + reqCtx_->chunksize_, done_); } void RecoverChunkClosure::SendRetryRequest() { - client_->RecoverChunk(reqCtx_->idinfo_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + client_->RecoverChunk(reqCtx_->idinfo_, reqCtx_->offset_, + reqCtx_->rawlength_, done_); } int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { @@ -601,7 +581,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->UpdateLeader(lpId, cpId, leaderAddr.addr_); if (ret != 0) { LOG(WARNING) << "Update leader of copyset (" << lpId << ", " << cpId - << ") in metaCache fail"; + << ") in metaCache fail"; return -1; } @@ -609,7 +589,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->GetLeader(lpId, cpId, &leaderId, &leaderEp); if (ret != 0) { LOG(INFO) << "Get leader of copyset (" << lpId << ", " << cpId - << ") from metaCache fail"; + << ") from metaCache fail"; return -1; } @@ -617,5 +597,5 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..3ca5a609df 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -23,15 +23,16 @@ #ifndef SRC_CLIENT_CHUNK_CLOSURE_H_ #define SRC_CLIENT_CHUNK_CLOSURE_H_ -#include #include #include +#include + #include #include #include "proto/chunk.pb.h" -#include "src/client/client_config.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/client/request_closure.h" #include "src/common/math_util.h" @@ -42,15 +43,15 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkResponse; using curve::chunkserver::GetChunkInfoResponse; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for saving Rpc context, + * Contains cntl and response retries */ class ClientClosure : public Closure { public: @@ -59,67 +60,59 @@ class ClientClosure : public Closure { virtual ~ClientClosure() = default; - void SetCntl(brpc::Controller* cntl) { - cntl_ = cntl; - } + void SetCntl(brpc::Controller* cntl) { cntl_ = cntl; } virtual void SetResponse(Message* response) { response_.reset(static_cast(response)); } - void SetChunkServerID(ChunkServerID csid) { - chunkserverID_ = csid; - } + void SetChunkServerID(ChunkServerID csid) { chunkserverID_ = csid; } - ChunkServerID GetChunkServerID() const { - return chunkserverID_; - } + ChunkServerID GetChunkServerID() const { return chunkserverID_; } void SetChunkServerEndPoint(const butil::EndPoint& endPoint) { chunkserverEndPoint_ = endPoint; } - EndPoint GetChunkServerEndPoint() const { - return chunkserverEndPoint_; - } + EndPoint GetChunkServerEndPoint() const { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -132,45 +125,43 @@ class ClientClosure : public Closure { SetBackoffParam(); DVLOG(9) << "Client clousre conf info: " - << "chunkserverOPRetryIntervalUS = " - << failReqOpt_.chunkserverOPRetryIntervalUS - << ", chunkserverOPMaxRetry = " - << failReqOpt_.chunkserverOPMaxRetry; + << "chunkserverOPRetryIntervalUS = " + << failReqOpt_.chunkserverOPRetryIntervalUS + << ", chunkserverOPMaxRetry = " + << failReqOpt_.chunkserverOPMaxRetry; } - Closure* GetClosure() const { - return done_; - } + Closure* GetClosure() const { return done_; } - // 测试使用,设置closure - void SetClosure(Closure* done) { - done_ = done; - } + // Test usage, set closure + void SetClosure(Closure* done) { done_ = done; } - static FailureRequestOption GetFailOpt() { - return failReqOpt_; - } + static FailureRequestOption GetFailOpt() { return failReqOpt_; } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying + * Scenario 1: rpc timeout, which will exponentially increase the current + * rpc timeout and then directly retry Scenario 2: Underlying Overload, then + * it is necessary to sleep for a period of time before retrying, and the + * sleep time increases exponentially based on the number of retries + * @param: rpcstatue returns the value for rpc + * @param: cntlstatus is the return value of this rpc controller */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After the underlying chunkserver overload, it is necessary to backoff + * based on the number of retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the current time required for sleep */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After the rpc timeout, it is necessary to backoff based on the number of + * retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the next RPC timeout time */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -207,32 +198,33 @@ class ClientClosure : public Closure { void RefreshLeader(); - static FailureRequestOption failReqOpt_; - - brpc::Controller* cntl_; - std::unique_ptr response_; - CopysetClient* client_; - Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 - ChunkServerID chunkserverID_; - butil::EndPoint chunkserverEndPoint_; - - // 记录当前请求的相关信息 - MetaCache* metaCache_; - RequestClosure* reqDone_; - FileMetric* fileMetric_; - RequestContext* reqCtx_; - ChunkIDInfo chunkIdInfo_; - - // 发送重试请求前是否睡眠 + static FailureRequestOption failReqOpt_; + + brpc::Controller* cntl_; + std::unique_ptr response_; + CopysetClient* client_; + Closure* done_; + // The chunkserverID is saved here to distinguish which chunkserver the + // current rpc is sent to This makes it easy to directly find which + // chunkserver is currently returning the failure in the rpc closure + ChunkServerID chunkserverID_; + butil::EndPoint chunkserverEndPoint_; + + // Record relevant information for the current request + MetaCache* metaCache_; + RequestClosure* reqDone_; + FileMetric* fileMetric_; + RequestContext* reqCtx_; + ChunkIDInfo chunkIdInfo_; + + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 - int status_; + // response status code + int status_; - // rpc 状态码 - int cntlstatus_; + // rpc status code + int cntlstatus_; }; class WriteChunkClosure : public ClientClosure { @@ -308,7 +300,7 @@ class RecoverChunkClosure : public ClientClosure { void SendRetryRequest() override; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CHUNK_CLOSURE_H_ diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..97598a7038 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -28,8 +28,8 @@ #include #include -#include #include +#include #include "include/client/libcurve.h" #include "src/common/throttle.h" @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -90,12 +90,10 @@ typedef struct ChunkIDInfo { ChunkIDInfo(ChunkID cid, LogicPoolID lpid, CopysetID cpid) : cid_(cid), cpid_(cpid), lpid_(lpid) {} - bool Valid() const { - return lpid_ > 0 && cpid_ > 0; - } + bool Valid() const { return lpid_ > 0 && cpid_ > 0; } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +104,8 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the +// logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +116,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +146,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -162,7 +161,7 @@ typedef struct FInfo { uint64_t stripeCount; std::string poolset; - OpenFlags openflags; + OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; FInfo() { @@ -187,10 +186,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -198,17 +197,17 @@ struct PeerAddr { bool IsEmpty() const { return (addr_.ip == butil::IP_ANY && addr_.port == 0) && - addr_.socket_file.empty(); + addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 - int Parse(const std::string &str) { + // Parse address information from a string + int Parse(const std::string& str) { int idx; char ip_str[64]; if (2 > sscanf(str.c_str(), "%[^:]%*[:]%d%*[:]%d", ip_str, &addr_.port, @@ -224,8 +223,9 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format + // In the get leader call, this value can be directly passed into the + // request std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), @@ -233,32 +233,32 @@ struct PeerAddr { return std::string(str); } - bool operator==(const PeerAddr &other) const { + bool operator==(const PeerAddr& other) const { return addr_ == other.addr_; } }; -inline const char *OpTypeToString(OpType optype) { +inline const char* OpTypeToString(OpType optype) { switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::CREATE_CLONE: - return "CreateCloneChunk"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::DISCARD: - return "Discard"; - case OpType::UNKNOWN: - default: - return "Unknown"; + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::CREATE_CLONE: + return "CreateCloneChunk"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::DISCARD: + return "Discard"; + case OpType::UNKNOWN: + default: + return "Unknown"; } } @@ -279,16 +279,14 @@ class SnapCloneClosure : public google::protobuf::Closure { class ClientDummyServerInfo { public: - static ClientDummyServerInfo &GetInstance() { + static ClientDummyServerInfo& GetInstance() { static ClientDummyServerInfo clientInfo; return clientInfo; } - void SetIP(const std::string &ip) { localIP_ = ip; } + void SetIP(const std::string& ip) { localIP_ = ip; } - std::string GetIP() const { - return localIP_; - } + std::string GetIP() const { return localIP_; } void SetPort(uint32_t port) { localPort_ = port; } @@ -309,22 +307,22 @@ class ClientDummyServerInfo { inline void TrivialDeleter(void*) {} -inline const char *FileStatusToName(FileStatus status) { +inline const char* FileStatusToName(FileStatus status) { switch (status) { - case FileStatus::Created: - return "Created"; - case FileStatus::Deleting: - return "Deleting"; - case FileStatus::Cloning: - return "Cloning"; - case FileStatus::CloneMetaInstalled: - return "CloneMetaInstalled"; - case FileStatus::Cloned: - return "Cloned"; - case FileStatus::BeingCloned: - return "BeingCloned"; - default: - return "Unknown"; + case FileStatus::Created: + return "Created"; + case FileStatus::Deleting: + return "Deleting"; + case FileStatus::Cloning: + return "Cloning"; + case FileStatus::CloneMetaInstalled: + return "CloneMetaInstalled"; + case FileStatus::Cloned: + return "Cloned"; + case FileStatus::BeingCloned: + return "BeingCloned"; + default: + return "Unknown"; } } @@ -359,7 +357,7 @@ struct CreateFileContext { std::string poolset; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_COMMON_H_ diff --git a/src/client/client_metric.h b/src/client/client_metric.h index 826b8b9b2d..603a0176b1 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -28,9 +28,9 @@ #include #include -#include "src/common/timeutility.h" #include "src/client/client_common.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -48,11 +48,11 @@ struct SlowRequestMetric { : count(prefix, name + "_total") {} }; -// 秒级信息统计 +// Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + // Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + // persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -60,21 +60,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +// Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + // Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + // Call throughput PerSecondMetric bps; - // 调用超时次数qps + // Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + // Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + // Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -102,33 +102,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +// File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + // Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + // Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + // The maximum number of request bytes for the current file request, which + // is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + // Libcurve's lowest level read rpc interface statistics information metric + // statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + // Libcurve's lowest level write rpc interface statistics information metric + // statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + // User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + // User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + // Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; // Number of slow requests @@ -153,52 +156,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +// Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + // Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + // openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + // createFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + // closeFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + // GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + // RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + // GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + // GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + // DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + // RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + // Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + // deleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + // changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + // Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + // Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + // GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + // ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + // Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -245,8 +248,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -255,13 +258,14 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + * operation */ - static void IncremUserQPSCount(FileMetric* fm, - uint64_t length, + static void IncremUserQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -286,9 +290,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS + * calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -308,13 +314,18 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the + * user for RPS calculation rps: receive request persecond, which is the + * number of requests received by the current interface per second qps: + * query request persecond, which is the number of requests processed by the + * current interface per second eps: error request persecond, which is the + * number of requests that make errors per second on the current interface + * rps minus qps is the number of requests that the current client is + * waiting for per second, which will persistently occupy the current memory + * for one second + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -334,9 +345,10 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -354,9 +366,11 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed + * out, used for timeoutQps calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write + * operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -374,9 +388,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric The metric pointer of the current file + * @param opType request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -394,13 +408,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for QPS and bps calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCQPSCount(FileMetric* fm, - uint64_t length, + static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -419,13 +434,14 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing + *RPC interfaces, used for RPS calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write + *operation */ - static void IncremRPCRPSCount(FileMetric* fm, - OpType type) { + static void IncremRPCRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -440,9 +456,7 @@ class MetricHelper { } } - static void LatencyRecord(FileMetric* fm, - uint64_t duration, - OpType type) { + static void LatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -457,8 +471,7 @@ class MetricHelper { } } - static void UserLatencyRecord(FileMetric* fm, - uint64_t duration, + static void UserLatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { @@ -502,7 +515,7 @@ class MetricHelper { } } }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_METRIC_H_ diff --git a/src/client/config_info.h b/src/client/config_info.h index 620d464eae..e324a6e8ba 100644 --- a/src/client/config_info.h +++ b/src/client/config_info.h @@ -24,6 +24,7 @@ #define SRC_CLIENT_CONFIG_INFO_H_ #include + #include #include @@ -31,9 +32,9 @@ namespace curve { namespace client { /** - * log的基本配置信息 - * @logLevel: 是log打印等级 - * @logPath: log打印位置 + * Basic configuration information of log + * @logLevel: It is the log printing level + * @logPath: Log printing location */ struct LogInfo { int logLevel = 2; @@ -41,8 +42,9 @@ struct LogInfo { }; /** - * in flight IO控制信息 - * @fileMaxInFlightRPCNum: 为一个文件中最大允许的inflight IO数量 + * in flight IO control information + * @fileMaxInFlightRPCNum: is the maximum allowed number of inflight IOs in a + * file */ struct InFlightIOCntlInfo { uint64_t fileMaxInFlightRPCNum = 2048; @@ -78,27 +80,29 @@ struct MetaServerOption { }; /** - * 租约基本配置 - * @mdsRefreshTimesPerLease: 一个租约内续约次数,client与mds之间通过租约保持心跳 - * 如果双方约定租约有效期为10s,那么client会在这10s内 - * 发送mdsRefreshTimesPerLease次心跳,如果连续失败, - * 那么client认为当前mds存在异常,会阻塞后续的IO,直到 - * 续约成功。 + * Basic configuration of lease + * @mdsRefreshTimesPerLease: The number of renewals within a lease, and the + * heartbeat between client and mds is maintained through the lease If both + * parties agree that the lease term is 10 seconds, then the client will be + * within these 10 seconds send mdsRefreshTimesPerLease heartbeats, if + * consecutive failures occur, So the client believes that there is an + * abnormality in the current mds, which will block subsequent IO until + * successfully renewed the contract. */ struct LeaseOption { uint32_t mdsRefreshTimesPerLease = 5; }; /** - * rpc超时,判断是否unstable的参数 + * RPC timeout, parameter to determine if it is unstable * @maxStableChunkServerTimeoutTimes: - * 一个chunkserver连续超时请求的阈值, 超过之后会检查健康状态, - * 如果不健康,则标记为unstable + * The threshold for a chunkserver to continuously timeout requests, after + * which the health status will be checked, If not healthy, mark as unstable * @checkHealthTimeoutMS: - * 检查chunkserver是否健康的http请求超时时间 + * Check if chunkserver is healthy HTTP request timeout * @serverUnstableThreashold: - * 一个server上超过serverUnstableThreashold个chunkserver都标记为unstable, - * 整个server上的所有chunkserver都标记为unstable + * More than serverUnstableThreashold chunkservers on a server are marked + * as unstable, All chunkservers on the entire server are marked as unstable */ struct ChunkServerUnstableOption { uint32_t maxStableChunkServerTimeoutTimes = 64; @@ -107,37 +111,40 @@ struct ChunkServerUnstableOption { }; /** - * 发送失败的chunk request处理 + * Handling of failed chunk request: * @chunkserverOPMaxRetry: - * 最大重试次数,一个RPC下发到底层chunkserver,最大允许的失败 - * 次数,超限之后会向上返回用户。 + * Maximum retry count allowed for an RPC sent to the underlying chunk server. + * If exceeded, it will be propagated to the user. * @chunkserverOPRetryIntervalUS: - * 相隔多久再重试,一个rpc失败之后client会根据其返回 - * 状态决定是否需要睡眠一段时间再重试,目前除了 - * TIMEOUT、REDIRECTED,这两种返回值,其他返回值都是需要 - * 先睡眠一段时间再重试。 - * @chunkserverRPCTimeoutMS: 为每个rpc发送时,其rpc controller配置的超时时间 + * Time interval between retries. After a failed RPC, the client will sleep for + * a period determined by the RPC response status before retrying. Currently, + * except for TIMEOUT and REDIRECTED, all other response + * values require sleeping for some time before retrying. + * @chunkserverRPCTimeoutMS: Timeout configured for each RPC sent when creating + * its RPC controller. * @chunkserverMaxRPCTimeoutMS: - * 在底层chunkserver返回TIMEOUT时,说明当前请求在底层 - * 无法及时得到处理,原因可能是底层的排队任务太多,这时候如果 - * 以相同的rpc - * 超时时间再去发送请求,很有可能最后还是超时, - * 所以为了避免底层处理请求时,rpc在client一侧已经超时的这种 - * 状况,为rpc超时时间增加了指数退避逻辑,超时时间会逐渐增加, - * 最大不能超过该值。 + * When the underlying chunkserver returns TIMEOUT, it means the current request + * cannot be processed promptly, possibly due to a large number of queued tasks. + * In such cases, sending requests with the same RPC timeout again may still + * result in timeouts. To avoid this, exponential backoff logic is applied to + * increase the timeout gradually, but it cannot exceed this maximum value. * @chunkserverMaxRetrySleepIntervalUS: - * 在底层返回OVERLOAD时,表明当前chunkserver - * 压力过大,这时候睡眠时间会进行指数退避,睡眠时间会加长,这样 - * 能够保证client的请求不会将底层chunkserver打满,但是睡眠时间 - * 最长不能超过该值。 - * @chunkserverMaxStableTimeoutTimes: 一个chunkserver连续超时请求的阈值, - * 超过之后 会标记为unstable。因为一个chunkserver所在的server如果宕机 - * 那么发向该chunkserver的请求都会超时,如果同一个chunkserver - * 的rpc连续超时超过该阈值,那么client就认为这个chunkserver - * 所在的server可能宕机了,就将该server上的所有leader - * copyset 标记为unstable,促使其下次发送rpc前,先去getleader。 + * When the underlying chunk server returns OVERLOAD, indicating excessive + * pressure, the sleep interval is exponentially extended to ensure that client + * requests do not overwhelm the underlying chunk server. + * However, the maximum sleep time cannot exceed this value. + * @chunkserverMaxStableTimeoutTimes: + * Threshold for consecutive timeouts on an RPC from a chunk server. If + * exceeded, the chunk server is marked as unstable. This is because if a server + * where a chunk server resides crashes, requests sent to + * that chunk server will all time out. If the same chunk server's RPCs + * consecutively timeout beyond this threshold, the client assumes that the + * server where it resides may have crashed and marks all leader copysets on + * that server as unstable, prompting a leader retrieval before sending any + * RPCs. * @chunkserverMinRetryTimesForceTimeoutBackoff: - * 当一个请求重试次数超过阈值时,还在重试 使其超时时间进行指数退避 + * When a request exceeds the retry count threshold, it continues to retry with + * exponential backoff for its timeout duration. */ struct FailureRequestOption { uint32_t chunkserverOPMaxRetry = 3; @@ -154,9 +161,11 @@ struct FailureRequestOption { }; /** - * 发送rpc给chunkserver的配置 - * @inflightOpt: 一个文件向chunkserver发送请求时的inflight 请求控制配置 - * @failRequestOpt: rpc发送失败之后,需要进行rpc重试的相关配置 + * Configuration for sending rpc to chunkserver + * @inflightOpt: Configuration of inflight request control when a file sends a + * request to chunkserver + * @failRequestOpt: After rpc sending fails, relevant configuration for rpc + * retry needs to be carried out */ struct IOSenderOption { InFlightIOCntlInfo inflightOpt; @@ -164,10 +173,12 @@ struct IOSenderOption { }; /** - * scheduler模块基本配置信息,schedule模块是用于分发用户请求,每个文件有自己的schedule - * 线程池,线程池中的线程各自配置一个队列 - * @scheduleQueueCapacity: schedule模块配置的队列深度 - * @scheduleThreadpoolSize: schedule模块线程池大小 + Basic Configuration Information for the Scheduler Module + * The scheduler module is used for distributing user requests. Each file has + its own scheduler thread pool, and each thread in the pool is configured with + its own queue. + * @scheduleQueueCapacity: The queue depth configured by the schedule module + * @scheduleThreadpoolSize: schedule module thread pool size */ struct RequestScheduleOption { uint32_t scheduleQueueCapacity = 1024; @@ -176,26 +187,29 @@ struct RequestScheduleOption { }; /** - * metaccache模块配置信息 + * MetaCache Module Configuration * @metacacheGetLeaderRetry: - * 获取leader重试次数,一个rpc发送到chunkserver之前需要先 - * 获取当前copyset的leader,如果metacache中没有这个信息, - * 就向copyset的peer发送getleader请求,如果getleader失败, - * 需要重试,最大重试次数为该值。 + * Number of retries to get the leader. Before an RPC is sent to the + * chunkserver, it needs to first obtain the leader for the current copyset. If + * this information is not available in the metacache, a getleader request is + * sent to a copyset's peers. If getleader fails, it needs to be retried, with a + * maximum retry count defined by this value. * @metacacheRPCRetryIntervalUS: - * 如上所述,如果getleader请求失败,会发起重试,但是并 - * 不会立即进行重试,而是选择先睡眠一段时间在重试。该值代表 - * 睡眠长度。 - * @metacacheGetLeaderRPCTimeOutMS: 发送getleader rpc请求的rpc - * controller最大超时时间 + * As mentioned above, if a getleader request fails, it will be retried, but not + * immediately. Instead, there will be a delay before the retry. This value + * represents the length of that delay. + * @metacacheGetLeaderRPCTimeOutMS: The maximum timeout duration for the RPC + * controller when sending a 'getleader' RPC request * @metacacheGetLeaderBackupRequestMS: - * 因为一个copyset有三个或者更多的peer,getleader - * 会以backuprequest的方式向这些peer发送rpc,在brpc内部 - * 会串行发送,如果第一个请求超过一定时间还没返回,就直接向 - * 下一个peer发送请求,而不用等待上一次请求返回或超时,这个触发 - * backup request的时间就为该值。 - * @metacacheGetLeaderBackupRequestLbName: 为getleader backup rpc - * 选择底层服务节点的策略 + * Since a copyset has three or more peers, getleader requests are + * sent to these peers in a backuprequest manner. + * Internally, in brpc, these requests are sent + * serially. If the first request takes too long to return, the next request is + * sent to the next peer without waiting for the previous one to return or time + * out. The time at which backup requests are triggered is determined by this + * value. + * @metacacheGetLeaderBackupRequestLbName: Strategy for selecting the underlying + * service nodes for getleader backup RPCs. */ struct MetaCacheOption { uint32_t metacacheGetLeaderRetry = 3; @@ -208,21 +222,23 @@ struct MetaCacheOption { }; /** - * IO 拆分模块配置信息 + * IO Split Module Configuration * @fileIOSplitMaxSizeKB: - * 用户下发IO大小client没有限制,但是client会将用户的IO进行拆分, - * 发向同一个chunkserver的请求锁携带的数据大小不能超过该值。 + * The size of user-issued IOs is not restricted by the client. However, the + * client will split the user's IOs, and the data size carried by requests sent + * to the same chunkserver cannot exceed this value. */ struct IOSplitOption { uint64_t fileIOSplitMaxSizeKB = 64; }; /** - * 线程隔离任务队列配置信息 - * 线程隔离主要是为了上层做异步接口调用时,直接将其调用任务推到线程池中而不是让其阻塞到放入 - * 分发队列线程池。 - * @isolationTaskQueueCapacity: 隔离线程池的队列深度 - * @isolationTaskThreadPoolSize: 隔离线程池容量 + * Configuration information for thread-isolated task queues. + * Thread isolation is primarily used to push asynchronous interface calls + * directly into the thread pool instead of blocking them until they are placed + * in the dispatch queue thread pool. + * @isolationTaskQueueCapacity: The queue depth of the isolation thread pool. + * @isolationTaskThreadPoolSize: The capacity of the isolation thread pool. */ struct TaskThreadOption { uint64_t isolationTaskQueueCapacity = 500000; @@ -250,7 +266,8 @@ struct ThrottleOption { }; /** - * IOOption存储了当前io 操作所需要的所有配置信息 + * IOOption stores all the configuration information required for the current IO + * operation */ struct IOOption { IOSplitOption ioSplitOpt; @@ -264,11 +281,12 @@ struct IOOption { }; /** - * client一侧常规的共同的配置信息 - * @mdsRegisterToMDS: 是否向mds注册client信息,因为client需要通过dummy - * server导出 metric信息,为了配合普罗米修斯的自动服务发现机制,会将其监听的 - * ip和端口信息发送给mds。 - * @turnOffHealthCheck: 是否关闭健康检查 + * Common client-side configuration options: + * @mdsRegisterToMDS: Whether to register client information with the MDS. Since + * the client needs to export metric information through a dummy server to + * support Prometheus's automatic service discovery mechanism, it sends its + * listening IP and port information to the MDS. + * @turnOffHealthCheck: Whether to disable health checks. */ struct CommonConfigOpt { bool mdsRegisterToMDS = false; @@ -284,7 +302,8 @@ struct CommonConfigOpt { }; /** - * ClientConfigOption是外围快照系统需要设置的配置信息 + * ClientConfigOption is the configuration information that needs to be set for + * the peripheral snapshot system */ struct ClientConfigOption { LogInfo loginfo; @@ -296,25 +315,24 @@ struct ClientConfigOption { struct ChunkServerBroadCasterOption { uint32_t broadCastMaxNum; - ChunkServerBroadCasterOption() - : broadCastMaxNum(200) {} + ChunkServerBroadCasterOption() : broadCastMaxNum(200) {} }; struct ChunkServerClientRetryOptions { - uint32_t rpcTimeoutMs; - uint32_t rpcMaxTry; - uint32_t rpcIntervalUs; - uint32_t rpcMaxTimeoutMs; + uint32_t rpcTimeoutMs; + uint32_t rpcMaxTry; + uint32_t rpcIntervalUs; + uint32_t rpcMaxTimeoutMs; ChunkServerClientRetryOptions() - : rpcTimeoutMs(500), - rpcMaxTry(3), - rpcIntervalUs(100000), - rpcMaxTimeoutMs(8000) {} + : rpcTimeoutMs(500), + rpcMaxTry(3), + rpcIntervalUs(100000), + rpcMaxTimeoutMs(8000) {} }; /** - * FileServiceOption是QEMU侧总体配置信息 + * FileServiceOption is the overall configuration information on the QEMU side */ struct FileServiceOption { LogInfo loginfo; diff --git a/src/client/copyset_client.cpp b/src/client/copyset_client.cpp index 964929d18f..9211070715 100644 --- a/src/client/copyset_client.cpp +++ b/src/client/copyset_client.cpp @@ -24,21 +24,21 @@ #include #include + #include #include -#include "src/client/request_sender.h" -#include "src/client/metacache.h" #include "src/client/client_config.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache.h" #include "src/client/request_closure.h" +#include "src/client/request_scheduler.h" +#include "src/client/request_sender.h" namespace curve { namespace client { -int CopysetClient::Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler, - FileMetric* fileMetric) { +int CopysetClient::Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, + RequestScheduler* scheduler, FileMetric* fileMetric) { if (nullptr == metaCache || scheduler == nullptr) { LOG(ERROR) << "metacache or scheduler is null!"; return -1; @@ -47,7 +47,7 @@ int CopysetClient::Init(MetaCache *metaCache, metaCache_ = metaCache; scheduler_ = scheduler; fileMetric_ = fileMetric; - senderManager_ = new(std::nothrow) RequestSenderManager(); + senderManager_ = new (std::nothrow) RequestSenderManager(); if (nullptr == senderManager_) { return -1; } @@ -63,30 +63,33 @@ int CopysetClient::Init(MetaCache *metaCache, return 0; } bool CopysetClient::FetchLeader(LogicPoolID lpid, CopysetID cpid, - ChunkServerID* leaderid, butil::EndPoint* leaderaddr) { - // 1. 先去当前metacache中拉取leader信息 - if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, false, fileMetric_)) { + ChunkServerID* leaderid, + butil::EndPoint* leaderaddr) { + // 1. First, pull the leader information from the current metacache + if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, false, + fileMetric_)) { return true; } - // 2. 如果metacache中leader信息拉取失败,就发送RPC请求获取新leader信息 - if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, - leaderaddr, true, fileMetric_)) { + // 2. If the pull of leader information in the metacache fails, send an RPC + // request to obtain new leader information + if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, true, + fileMetric_)) { LOG(WARNING) << "Get leader address form cache failed, but " - << "also refresh leader address failed from mds." - << "(<" << lpid << ", " << cpid << ">)"; + << "also refresh leader address failed from mds." + << "(<" << lpid << ", " << cpid << ">)"; return false; } return true; } -// 因为这里的CopysetClient::ReadChunk(会在两个逻辑里调用 -// 1. 从request scheduler下发的新的请求 -// 2. clientclosure再重试逻辑里调用copyset client重试 -// 这两种状况都会调用该接口,因为对于重试的RPC有可能需要重新push到队列中 -// 非重试的RPC如果重新push到队列中会导致死锁。 +// Because the CopysetClient::ReadChunk (will be called in two logics) here +// 1. New requests issued from the request scheduler +// 2. Calling copyset client to retry in the clientclosure retry logic +// Both of these situations will call the interface, as retrying RPCs may +// require re pushing to the queue If non retrying RPC is pushed back into the +// queue, it will cause a deadlock. int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, @@ -94,26 +97,31 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, RequestClosure* reqclosure = static_cast(done); brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { - // session过期之后需要重新push到队列 + // After the session expires, it needs to be re pushed to the queue LOG(WARNING) << "session not valid, read rpc ReSchedule!"; doneGuard.release(); reqclosure->ReleaseInflightRPCToken(); @@ -123,20 +131,17 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, } auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkClosure *readDone = new ReadChunkClosure(this, done); - senderPtr->ReadChunk(idinfo, sn, offset, - length, sourceInfo, readDone); + ReadChunkClosure* readDone = new ReadChunkClosure(this, done); + senderPtr->ReadChunk(idinfo, sn, offset, length, sourceInfo, readDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, size_t length, +int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, + uint64_t epoch, uint64_t sn, + const butil::IOBuf& data, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, google::protobuf::Closure* done) { std::shared_ptr senderPtr = nullptr; @@ -146,23 +151,28 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, + // RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of + // the retry RPC here Will not block the brpc thread as it is common to + // all files. Avoid affecting other files Because the session renewal + // failure may only be a network issue, IO is actually still possible + // after the renewal is successful Normal distribution, so failure cannot + // be directly returned upwards. Hang on at the bottom and continue + // sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return + // to the user through closure + // After the return call, doneguard will call the run of the closure, + // releasing the inflight rpc count, Then the closure is returned to the + // user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" - << ", copyset id = " << idinfo.cpid_ - << ", logical pool id = " << idinfo.lpid_ - << ", chunk id = " << idinfo.cid_ - << ", offset = " << offset - << ", len = " << length; + << ", copyset id = " << idinfo.cpid_ + << ", logical pool id = " << idinfo.lpid_ + << ", chunk id = " << idinfo.cid_ + << ", offset = " << offset << ", len = " << length; return 0; } else { LOG(WARNING) << "session not valid, write rpc ReSchedule!"; @@ -175,19 +185,18 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, auto task = [&](Closure* done, std::shared_ptr senderPtr) { WriteChunkClosure* writeDone = new WriteChunkClosure(this, done); - senderPtr->WriteChunk(idinfo, fileId, epoch, sn, - data, offset, length, sourceInfo, - writeDone); + senderPtr->WriteChunk(idinfo, fileId, epoch, sn, data, offset, length, + sourceInfo, writeDone); }; return DoRPCTask(idinfo, task, doneGuard.release()); } -int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, off_t offset, size_t length, Closure *done) { - +int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, + off_t offset, size_t length, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - ReadChunkSnapClosure *readDone = new ReadChunkSnapClosure(this, done); + ReadChunkSnapClosure* readDone = new ReadChunkSnapClosure(this, done); senderPtr->ReadChunkSnapshot(idinfo, sn, offset, length, readDone); }; @@ -195,21 +204,22 @@ int CopysetClient::ReadChunkSnapshot(const ChunkIDInfo& idinfo, } int CopysetClient::DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, Closure *done) { - + uint64_t correctedSn, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - DeleteChunkSnapClosure *deleteDone = new DeleteChunkSnapClosure( - this, done); - senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, - correctedSn, deleteDone); + DeleteChunkSnapClosure* deleteDone = + new DeleteChunkSnapClosure(this, done); + senderPtr->DeleteChunkSnapshotOrCorrectSn(idinfo, correctedSn, + deleteDone); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { +int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { - GetChunkInfoClosure *chunkInfoDone = new GetChunkInfoClosure(this, done); // NOLINT + GetChunkInfoClosure* chunkInfoDone = + new GetChunkInfoClosure(this, done); // NOLINT senderPtr->GetChunkInfo(idinfo, chunkInfoDone); }; @@ -217,9 +227,9 @@ int CopysetClient::GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done) { } int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string& location, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - Closure* done) { + const std::string& location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { CreateCloneChunkClosure* createCloneDone = new CreateCloneChunkClosure(this, done); @@ -230,22 +240,22 @@ int CopysetClient::CreateCloneChunk(const ChunkIDInfo& idinfo, return DoRPCTask(idinfo, task, done); } -int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, +int CopysetClient::RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, Closure* done) { auto task = [&](Closure* done, std::shared_ptr senderPtr) { RecoverChunkClosure* recoverChunkDone = new RecoverChunkClosure(this, done); - senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, - len); + senderPtr->RecoverChunk(idinfo, recoverChunkDone, offset, len); }; return DoRPCTask(idinfo, task, done); } -int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, - std::function senderptr)> task, Closure *done) { +int CopysetClient::DoRPCTask( + const ChunkIDInfo& idinfo, + std::function senderptr)> + task, + Closure* done) { RequestClosure* reqclosure = static_cast(done); ChunkServerID leaderId; @@ -253,30 +263,30 @@ int CopysetClient::DoRPCTask(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); while (reqclosure->GetRetriedTimes() < - iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { + iosenderopt_.failRequestOpt.chunkserverOPMaxRetry) { reqclosure->IncremRetriedTimes(); - if (false == FetchLeader(idinfo.lpid_, idinfo.cpid_, - &leaderId, &leaderAddr)) { + if (false == + FetchLeader(idinfo.lpid_, idinfo.cpid_, &leaderId, &leaderAddr)) { bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } - auto senderPtr = senderManager_->GetOrCreateSender(leaderId, - leaderAddr, iosenderopt_); + auto senderPtr = senderManager_->GetOrCreateSender(leaderId, leaderAddr, + iosenderopt_); if (nullptr != senderPtr) { task(doneGuard.release(), senderPtr); break; } else { LOG(WARNING) << "create or reset sender failed, " - << ", leaderId = " << leaderId; + << ", leaderId = " << leaderId; bthread_usleep( - iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); + iosenderopt_.failRequestOpt.chunkserverOPRetryIntervalUS); continue; } } return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/copyset_client.h b/src/client/copyset_client.h index 3dc1fc66f7..0881a7ffac 100644 --- a/src/client/copyset_client.h +++ b/src/client/copyset_client.h @@ -23,11 +23,11 @@ #ifndef SRC_CLIENT_COPYSET_CLIENT_H_ #define SRC_CLIENT_COPYSET_CLIENT_H_ -#include #include +#include -#include #include +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" @@ -43,12 +43,14 @@ namespace client { using curve::common::Uncopyable; using ::google::protobuf::Closure; -// TODO(tongguangxun) :后续除了read、write的接口也需要调整重试逻辑 +// TODO(tongguangxun): In addition to the read and write interfaces, the retry +// logic needs to be adjusted in the future class MetaCache; class RequestScheduler; /** - * 负责管理 ChunkServer 的链接,向上层提供访问 - * 指定 copyset 的 chunk 的 read/write 等接口 + * Responsible for managing connections to ChunkServers and providing + * upper-layer access to read/write interfaces for specific chunks within a + * copyset. */ class CopysetClient { public: @@ -68,120 +70,101 @@ class CopysetClient { senderManager_ = nullptr; } - int Init(MetaCache *metaCache, - const IOSenderOption& ioSenderOpt, + int Init(MetaCache* metaCache, const IOSenderOption& ioSenderOpt, RequestScheduler* scheduler = nullptr, FileMetric* fileMetic = nullptr); /** - * 返回依赖的Meta Cache + * Return dependent Meta Cache */ - MetaCache* GetMetaCache() { - return metaCache_; - } + MetaCache* GetMetaCache() { return metaCache_; } /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param souceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param idinfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - google::protobuf::Closure *done); + int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + google::protobuf::Closure* done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param writeData:要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& writeData, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - Closure *done); + * Write Chunk + * @param idinfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param writeData: The data to be written + *@param offset: write offset + * @param length: The length written + * @param sourceInfo: chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf& writeData, off_t offset, + size_t length, const RequestSourceInfo& sourceInfo, + Closure* done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + *Reading Chunk snapshot files + * @param idinfo: the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - Closure *done); + int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, + size_t length, Closure* done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param idinfo is the ID information related to chunk + * @param correctedSn: Version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - Closure *done); + uint64_t correctedSn, Closure* done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure + * Obtain information about chunk files + * @param idinfo: the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - Closure *done); + int GetChunkInfo(const ChunkIDInfo& idinfo, Closure* done); /** - * @brief lazy 创建clone chunk - * @param idinfo为chunk相关的id信息 - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - Closure *done); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param:offset 偏移 - * @param:len 长度 - * @param done:上一层异步回调的closure - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - uint64_t offset, - uint64_t len, - Closure *done); + * @brief lazy Create clone chunk + * @param idinfo: the ID information related to chunk + * @param location: URL of the data source + * @param sn: chunk's serial number + * @param correntSn: used to modify the chunk when creating CloneChunk + * @param chunkSize: Chunk size + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo& idinfo, const std::string& location, + uint64_t sn, uint64_t correntSn, uint64_t chunkSize, + Closure* done); + + /** + * @brief Actual recovery chunk data + * @param idinfo is the ID information related to chunk + * @param offset: offset + * @param len: length + * @param done: closure of asynchronous callback on the previous layer + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, uint64_t len, + Closure* done); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId) { @@ -189,24 +172,21 @@ class CopysetClient { } /** - * session过期,需要将重试RPC停住 + * session expired, retry RPC needs to be stopped */ - void StartRecycleRetryRPC() { - sessionNotValid_ = true; - } + void StartRecycleRetryRPC() { sessionNotValid_ = true; } /** - * session恢复通知不再回收重试的RPC + * session recovery notification no longer recycles retried RPCs */ - void ResumeRPCRetry() { - sessionNotValid_ = false; - } + void ResumeRPCRetry() { sessionNotValid_ = false; } /** - * 在文件关闭的时候接收上层关闭通知, 根据session有效状态 - * 置位exitFlag, 如果sessio无效状态下再有rpc超时返回,这 - * 些RPC会直接错误返回,如果session正常,则将继续正常下发 - * RPC,直到重试次数结束或者成功返回 + * Receive upper-layer closure notification when the file is closed. + * Set the exitFlag based on the session's validity status. If there are RPC + * timeouts under an invalid session state, these RPCs will return errors + * directly. If the session is valid, RPCs will continue to be issued until + * the retry limit is reached or they return successfully. */ void ResetExitFlag() { if (sessionNotValid_) { @@ -218,47 +198,49 @@ class CopysetClient { friend class WriteChunkClosure; friend class ReadChunkClosure; - // 拉取新的leader信息 - bool FetchLeader(LogicPoolID lpid, - CopysetID cpid, - ChunkServerID* leaderid, + // Pull new leader information + bool FetchLeader(LogicPoolID lpid, CopysetID cpid, ChunkServerID* leaderid, butil::EndPoint* leaderaddr); /** - * 执行发送rpc task,并进行错误重试 - * @param[in]: idinfo为当前rpc task的id信息 - * @param[in]: task为本次要执行的rpc task - * @param[in]: done是本次rpc 任务的异步回调 - * @return: 成功返回0, 否则-1 + * Execute the send rpc task and retry with an error + * @param[in]: idinfo is the ID information of the current rpc task + * @param[in]: task is the rpc task executed this time + * @param[in]: done is the asynchronous callback for this RPC task + * @return: Successfully returns 0, otherwise -1 */ - int DoRPCTask(const ChunkIDInfo& idinfo, + int DoRPCTask( + const ChunkIDInfo& idinfo, std::function)> task, - Closure *done); + Closure* done); private: - // 元数据缓存 - MetaCache *metaCache_; - // 所有ChunkServer的链接管理者 - RequestSenderManager *senderManager_; - // 配置 + // Metadata cache + MetaCache* metaCache_; + // Link managers for all ChunkServers + RequestSenderManager* senderManager_; + // Configuration IOSenderOption iosenderopt_; - // session是否有效,如果session无效那么需要将重试的RPC停住 - // RPC停住通过将这个rpc重新push到request scheduler队列,这样不会 - // 阻塞brpc内部的线程,防止一个文件的操作影响到其他文件 + // Check if the session is valid. If the session is invalid, it's necessary + // to pause the retry RPCs by re-pushing this RPC into the request scheduler + // queue. This ensures that it doesn't block the internal threads of BRPC + // and prevents operations on one file from affecting other files. bool sessionNotValid_; - // request 调度器,在session过期的时候重新将RPC push到调度队列 + // request scheduler to push RPC back to the scheduling queue when the + // session expires RequestScheduler* scheduler_; - // 当前copyset client对应的文件metric + // The file metric corresponding to the current copyset client FileMetric* fileMetric_; - // 是否在停止状态中,如果是在关闭过程中且session失效,需要将rpc直接返回不下发 + // Is it in a stopped state? If it is during the shutdown process and the + // session fails, it is necessary to directly return rpc without issuing it bool exitFlag_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_COPYSET_CLIENT_H_ diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 63836653de..343b6cd5f8 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -24,21 +24,22 @@ #include #include + #include #include "src/client/iomanager4file.h" #include "src/client/mds_client.h" -#include "src/common/timeutility.h" #include "src/common/curve_define.h" #include "src/common/fast_align.h" +#include "src/common/timeutility.h" namespace curve { namespace client { using curve::client::ClientConfig; +using curve::common::is_aligned; using curve::common::TimeUtility; using curve::mds::SessionStatus; -using curve::common::is_aligned; bool CheckAlign(off_t off, size_t length, size_t blocksize) { return is_aligned(off, blocksize) && is_aligned(length, blocksize); @@ -105,18 +106,16 @@ void FileInstance::UnInitialize() { int FileInstance::Read(char* buf, off_t offset, size_t length) { if (CURVE_UNLIKELY(!CheckAlign(offset, length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << offset - << ", length: " << length - << ", block size: " << blocksize_; + << ", length: " << length << ", block size: " << blocksize_; return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin Read "<< finfo_.fullPathName - << ", offset = " << offset - << ", len = " << length; + DLOG_EVERY_SECOND(INFO) << "begin Read " << finfo_.fullPathName + << ", offset = " << offset << ", len = " << length; return iomanager4file_.Read(buf, offset, length, mdsclient_.get()); } -int FileInstance::Write(const char *buf, off_t offset, size_t len) { +int FileInstance::Write(const char* buf, off_t offset, size_t len) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; @@ -135,7 +134,7 @@ int FileInstance::Write(const char *buf, off_t offset, size_t len) { int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -144,20 +143,20 @@ int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioRead " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioRead " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioRead(aioctx, mdsclient_.get(), dataType); } -int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { +int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { if (CURVE_UNLIKELY(readonly_)) { DVLOG(9) << "open with read only, do not support write!"; return -1; } if (CURVE_UNLIKELY( - !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { + !CheckAlign(aioctx->offset, aioctx->length, blocksize_))) { LOG(ERROR) << "IO not aligned, off: " << aioctx->offset << ", length: " << aioctx->length << ", block size: " << blocksize_; @@ -166,9 +165,9 @@ int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { return -LIBCURVE_ERROR::NOT_ALIGNED; } - DLOG_EVERY_SECOND(INFO) << "begin AioWrite " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioWrite " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioWrite(aioctx, mdsclient_.get(), dataType); } @@ -181,7 +180,7 @@ int FileInstance::Discard(off_t offset, size_t length) { return -1; } -int FileInstance::AioDiscard(CurveAioContext *aioctx) { +int FileInstance::AioDiscard(CurveAioContext* aioctx) { if (CURVE_LIKELY(!readonly_)) { return iomanager4file_.AioDiscard(aioctx, mdsclient_.get()); } @@ -190,16 +189,23 @@ int FileInstance::AioDiscard(CurveAioContext *aioctx) { return -1; } -// 两种场景会造成在Open的时候返回LIBCURVE_ERROR::FILE_OCCUPIED -// 1. 强制重启qemu不会调用close逻辑,然后启动的时候原来的文件sessio还没过期. -// 导致再次去发起open的时候,返回被占用,这种情况可以通过load sessionmap -// 拿到已有的session,再去执行refresh。 -// 2. 由于网络原因,导致open rpc超时,然后再去重试的时候就会返回FILE_OCCUPIED -// 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh -// 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 -// 等待一段时间再去Open,如果依然失败,就向上层返回失败。 +// Two scenarios can lead to returning LIBCURVE_ERROR::FILE_OCCUPIED when +// opening: +// 1. Forcibly restarting QEMU does not trigger the close logic, and when +// starting, the original session file has not expired yet. +// This causes a return of "occupied" +// when attempting to open it again. This situation can be resolved by +// loading the session map, obtaining the existing session, and then +// performing a refresh. +// 2. Due to network issues, the open RPC times out, and when retrying, it +// returns FILE_OCCUPIED. +// At this point, the file hasn't been successfully opened yet, so the +// session information isn't stored, and it's impossible to open it through +// refresh. In this case, you need to obtain the session lease duration on +// the MDS side, then wait for a period on the client side before attempting +// to Open again. If it still fails, return a failure to the upper layer. int FileInstance::Open(std::string* sessionId) { - LeaseSession_t lease; + LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; FileEpoch fEpoch; @@ -218,8 +224,8 @@ int FileInstance::Open(std::string* sessionId) { return -ret; } -int FileInstance::GetFileInfo(const std::string &filename, FInfo_t *fi, - FileEpoch_t *fEpoch) { +int FileInstance::GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch) { LIBCURVE_ERROR ret = mdsclient_->GetFileInfo(filename, finfo_.userinfo, fi, fEpoch); return -ret; @@ -240,12 +246,12 @@ int FileInstance::Close() { FileInstance* FileInstance::NewInitedFileInstance( const FileServiceOption& fileServiceOption, - const std::shared_ptr& mdsClient, - const std::string& filename, + const std::shared_ptr& mdsClient, const std::string& filename, const UserInfo& userInfo, - const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and readonly into openflags // NOLINT + const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and + // readonly into openflags // NOLINT bool readonly) { - FileInstance *instance = new (std::nothrow) FileInstance(); + FileInstance* instance = new (std::nothrow) FileInstance(); if (instance == nullptr) { LOG(ERROR) << "Create FileInstance failed, filename: " << filename; return nullptr; @@ -266,10 +272,8 @@ FileInstance* FileInstance::NewInitedFileInstance( } FileInstance* FileInstance::Open4Readonly( - const FileServiceOption& opt, - const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const FileServiceOption& opt, const std::shared_ptr& mdsclient, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags) { FileInstance* instance = FileInstance::NewInitedFileInstance( opt, mdsclient, filename, userInfo, openflags, true); @@ -279,8 +283,8 @@ FileInstance* FileInstance::Open4Readonly( } FileEpoch_t fEpoch; - int ret = mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, - &fEpoch); + int ret = + mdsclient->GetFileInfo(filename, userInfo, &instance->finfo_, &fEpoch); if (ret != 0) { LOG(ERROR) << "Get file info failed!"; instance->UnInitialize(); diff --git a/src/client/file_instance.h b/src/client/file_instance.h index 432a3402e4..952fc7e3d4 100644 --- a/src/client/file_instance.h +++ b/src/client/file_instance.h @@ -25,13 +25,13 @@ #include #include -#include "src/client/mds_client.h" #include "include/client/libcurve.h" #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" -#include "src/client/service_helper.h" #include "src/client/iomanager4file.h" #include "src/client/lease_executor.h" +#include "src/client/mds_client.h" +#include "src/client/service_helper.h" namespace curve { namespace client { @@ -42,55 +42,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { ~FileInstance() = default; /** - * 初始化 - * @param: filename文件名用于初始化iomanager的metric信息 - * @param: mdsclient为全局的mds client - * @param: userinfo为user信息 - * @param: fileservicopt fileclient的配置选项 - * @param: clientMetric为client端要统计的metric信息 - * @param: readonly是否以只读方式打开 - * @return: 成功返回true、否则返回false + * Initialize + * @param: filename The filename used to initialize the iomanager's metric + * information. + * @param: mdsclient The global mds client. + * @param: userinfo User information. + * @param: fileservicopt The configuration options for the fileclient. + * @param: clientMetric Metric information to be collected on the client + * side. + * @param: readonly Whether to open in read-only mode. + * @return: Returns true on success, otherwise returns false. */ bool Initialize(const std::string& filename, const std::shared_ptr& mdsclient, - const UserInfo& userinfo, - const OpenFlags& openflags, + const UserInfo& userinfo, const OpenFlags& openflags, const FileServiceOption& fileservicopt, bool readonly = false); /** - * 打开文件 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Open File + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED */ int Open(std::string* sessionId = nullptr); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode read + * @param: buf The current buffer to be read + * @param: offset The offset within the file + * @param: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: buf The current buffer to be written + * @param: offset The offset within the file + * @parma: length The length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length); /** - * 异步模式读 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read. + * @param: aioctx The I/O context for asynchronous read/write, which holds + * basic I/O information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 on success, less than 0 on failure */ int AioRead(CurveAioContext* aioctx, UserDataType dataType); /** - * 异步模式写 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write. + * @param: aioctx An asynchronous read/write IO context that stores basic IO + * information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, UserDataType dataType); @@ -113,69 +119,61 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { void UnInitialize(); - IOManager4File* GetIOManager4File() { - return &iomanager4file_; - } + IOManager4File* GetIOManager4File() { return &iomanager4file_; } /** - * 获取lease, 测试代码使用 + * Obtain a release to test code usage */ - LeaseExecutor* GetLeaseExecutor() const { - return leaseExecutor_.get(); - } + LeaseExecutor* GetLeaseExecutor() const { return leaseExecutor_.get(); } - int GetFileInfo(const std::string& filename, - FInfo_t* fi, FileEpoch_t *fEpoch); + int GetFileInfo(const std::string& filename, FInfo_t* fi, + FileEpoch_t* fEpoch); - void UpdateFileEpoch(const FileEpoch_t &fEpoch) { + void UpdateFileEpoch(const FileEpoch_t& fEpoch) { iomanager4file_.UpdateFileEpoch(fEpoch); } /** - * @brief 获取当前instance对应的文件信息 + * @brief Get the file information corresponding to the current instance * - * @return 当前instance对应文件的信息 + * @return The information of the file corresponding to the current instance */ - FInfo GetCurrentFileInfo() const { - return finfo_; - } + FInfo GetCurrentFileInfo() const { return finfo_; } static FileInstance* NewInitedFileInstance( const FileServiceOption& fileServiceOption, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags, - bool readonly); + const std::string& filename, const UserInfo& userInfo, + const OpenFlags& openflags, bool readonly); static FileInstance* Open4Readonly( const FileServiceOption& opt, const std::shared_ptr& mdsclient, - const std::string& filename, - const UserInfo& userInfo, + const std::string& filename, const UserInfo& userInfo, const OpenFlags& openflags = DefaultReadonlyOpenFlags()); private: void StopLease(); private: - // 保存当前file的文件信息 + // Save file information for the current file FInfo finfo_; - // 当前FileInstance的初始化配置信息 - FileServiceOption fileopt_; + // The initialization configuration information of the current FileInstance + FileServiceOption fileopt_; - // MDSClient是FileInstance与mds通信的唯一出口 + // MDSClient is the only exit for FileInstance to communicate with mds std::shared_ptr mdsclient_; - // 每个文件都持有与MDS通信的lease,LeaseExecutor是续约执行者 + // Each file holds a lease for communication with MDS, and the LeaseExecutor + // is the renewal executor std::unique_ptr leaseExecutor_; - // IOManager4File用于管理所有向chunkserver端发送的IO - IOManager4File iomanager4file_; + // IOManager4File is used to manage all IO sent to the chunkserver end + IOManager4File iomanager4file_; - // 是否为只读方式 - bool readonly_ = false; + // Whether to open in read-only mode + bool readonly_ = false; // offset and length must align with `blocksize_` // 4096 for backward compatibility @@ -184,7 +182,7 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { bool CheckAlign(off_t off, size_t length, size_t blocksize); -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_FILE_INSTANCE_H_ diff --git a/src/client/inflight_controller.h b/src/client/inflight_controller.h index 5c59f4edce..ddef520d0d 100644 --- a/src/client/inflight_controller.h +++ b/src/client/inflight_controller.h @@ -28,8 +28,8 @@ namespace curve { namespace client { -using curve::common::Mutex; using curve::common::ConditionVariable; +using curve::common::Mutex; class InflightControl { public: @@ -40,8 +40,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight全部回来,这段期间是hang的, - * 在close文件时调用 + * @brief calls the interface to wait for all inflight returns, which is a + * period of hang, Called when closing a file */ void WaitInflightAllComeBack() { LOG(INFO) << "wait inflight to complete, count = " << curInflightIONum_; @@ -53,7 +53,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight回来,这段期间是hang的 + * @brief calls the interface to wait for inflight to return, which is + * during the hang period */ void WaitInflightComeBack() { if (curInflightIONum_.load(std::memory_order_acquire) >= @@ -67,14 +68,14 @@ class InflightControl { } /** - * @brief 递增inflight num + * @brief increment inflight num */ void IncremInflightNum() { curInflightIONum_.fetch_add(1, std::memory_order_release); } /** - * @brief 递减inflight num + * @brief decreasing inflight num */ void DecremInflightNum() { std::lock_guard lk(inflightComeBackmtx_); @@ -90,24 +91,30 @@ class InflightControl { } /** - * WaitInflightComeBack会检查当前未返回的io数量是否超过我们限制的最大未返回inflight数量 - * 但是真正的inflight数量与上层并发调用的线程数有关。 - * 假设我们设置的maxinflight=100,上层有三个线程在同时调用GetInflightToken, - * 如果这个时候inflight数量为99,那么并发状况下这3个线程在WaitInflightComeBack - * 都会通过然后向下并发执行IncremInflightNum,这个时候真正的inflight为102, - * 下一个下发的时候需要等到inflight数量小于100才能继续,也就是等至少3个IO回来才能继续 - * 下发。这个误差是可以接受的,他与scheduler一侧并发度有关,误差有上限。 - * 如果想要精确控制inflight数量,就需要在接口处加锁,让原本可以并发的逻辑变成了 - * 串行,这样得不偿失。因此我们这里选择容忍一定误差范围。 + * WaitInflightComeBack checks if the current number of pending IOs exceeds + * our maximum allowed inflight limit. However, the actual inflight count is + * influenced by concurrent calls from upper-layer threads. Suppose we set + * maxinflight to 100, and there are three upper-layer threads + * simultaneously calling GetInflightToken. If, at this moment, the inflight + * count is 99, then in a concurrent scenario, all three threads in + * WaitInflightComeBack will pass and proceed to concurrently execute + * IncremInflightNum. Consequently, the actual inflight count becomes 102. + * The next dispatch operation will need to wait until the inflight count is + * less than 100 to proceed, which means it needs at least 3 IOs to return + * before proceeding. This margin of error is acceptable and is related to + * the concurrency level on the scheduler side, with a defined upper limit. + * If precise control over the inflight count is required, it would + * necessitate adding locks at the interface level, converting originally + * concurrent logic into serial, which would not be a cost-effective + * solution. Therefore, we choose to tolerate a certain margin of error in + * this scenario. */ void GetInflightToken() { WaitInflightComeBack(); IncremInflightNum(); } - void ReleaseInflightToken() { - DecremInflightNum(); - } + void ReleaseInflightToken() { DecremInflightNum(); } /** * @brief Get current inflight io num, only use in test code @@ -117,16 +124,16 @@ class InflightControl { } private: - uint64_t maxInflightNum_ = 0; + uint64_t maxInflightNum_ = 0; std::atomic curInflightIONum_{0}; - Mutex inflightComeBackmtx_; - ConditionVariable inflightComeBackcv_; - Mutex inflightAllComeBackmtx_; - ConditionVariable inflightAllComeBackcv_; + Mutex inflightComeBackmtx_; + ConditionVariable inflightComeBackcv_; + Mutex inflightAllComeBackmtx_; + ConditionVariable inflightAllComeBackcv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_INFLIGHT_CONTROLLER_H_ diff --git a/src/client/io_condition_varaiable.h b/src/client/io_condition_varaiable.h index a220168db3..9b721bd60f 100644 --- a/src/client/io_condition_varaiable.h +++ b/src/client/io_condition_varaiable.h @@ -23,12 +23,13 @@ #ifndef SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ #define SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace client { -// IOConditionVariable是用户同步IO场景下IO等待条件变量 +// IOConditionVariable is the IO waiting condition variable in the user +// synchronous IO scenario class IOConditionVariable { public: IOConditionVariable() : retCode_(-1), done_(false), mtx_(), cv_() {} @@ -36,9 +37,10 @@ class IOConditionVariable { ~IOConditionVariable() = default; /** - * 条件变量唤醒函数,因为底层的RPC request是异步的,所以用户下发同步IO的时候需要 - * 在发送读写请求的时候暂停等待IO返回。 - * @param: retcode是当前IO的返回值 + * Condition variable wakeup function. Since the underlying RPC requests are + * asynchronous, when users initiate synchronous IO, they need to pause and + * wait for the IO to return while sending read/write requests. + * @param: retcode is the return value of the current IO. */ void Complete(int retcode) { std::unique_lock lk(mtx_); @@ -48,7 +50,8 @@ class IOConditionVariable { } /** - * 是用户IO需要等待时候调用的函数,这个函数会在Complete被调用的时候返回 + * This is a function called when user IO needs to wait, and this function + * will return when Complete is called */ int Wait() { std::unique_lock lk(mtx_); @@ -58,20 +61,20 @@ class IOConditionVariable { } private: - // 当前IO的返回值 - int retCode_; + // The return value of the current IO + int retCode_; - // 当前IO是否完成 - bool done_; + // Is the current IO completed + bool done_; - // 条件变量使用的锁 - std::mutex mtx_; + // Locks used by conditional variables + std::mutex mtx_; - // 条件变量用于等待 + // Condition variable used for waiting std::condition_variable cv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_CONDITION_VARAIABLE_H_ diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 85d6dae911..b835ebf503 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -20,21 +20,22 @@ * Author: tongguangxun */ +#include "src/client/io_tracker.h" + #include #include #include #include -#include "src/client/splitor.h" +#include "src/client/discard_task.h" #include "src/client/iomanager.h" -#include "src/client/io_tracker.h" -#include "src/client/request_scheduler.h" +#include "src/client/metacache_struct.h" #include "src/client/request_closure.h" -#include "src/common/timeutility.h" +#include "src/client/request_scheduler.h" #include "src/client/source_reader.h" -#include "src/client/metacache_struct.h" -#include "src/client/discard_task.h" +#include "src/client/splitor.h" +#include "src/common/timeutility.h" namespace curve { namespace client { @@ -44,24 +45,22 @@ using curve::chunkserver::CHUNK_OP_STATUS; std::atomic IOTracker::tracekerID_(1); DiscardOption IOTracker::discardOption_; -IOTracker::IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric, +IOTracker::IOTracker(IOManager* iomanager, MetaCache* mc, + RequestScheduler* scheduler, FileMetric* clientMetric, bool disableStripe) : mc_(mc), scheduler_(scheduler), iomanager_(iomanager), fileMetric_(clientMetric), disableStripe_(disableStripe) { - id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); - scc_ = nullptr; - aioctx_ = nullptr; - data_ = nullptr; - type_ = OpType::UNKNOWN; - errcode_ = LIBCURVE_ERROR::OK; - offset_ = 0; - length_ = 0; + id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); + scc_ = nullptr; + aioctx_ = nullptr; + data_ = nullptr; + type_ = OpType::UNKNOWN; + errcode_ = LIBCURVE_ERROR::OK; + offset_ = 0; + length_ = 0; reqlist_.clear(); reqcount_.store(0, std::memory_order_release); opStartTimePoint_ = curve::common::TimeUtility::GetTimeofDayUs(); @@ -162,8 +161,7 @@ int IOTracker::ReadFromSource(const std::vector& reqCtxVec, void IOTracker::StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { data_ = const_cast(buf); offset_ = offset; length_ = length; @@ -190,8 +188,7 @@ void IOTracker::StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, } void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle) { + const FileEpoch* fEpoch, Throttle* throttle) { if (nullptr == data_) { ReturnOnFail(); return; @@ -199,8 +196,7 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, switch (userDataType_) { case UserDataType::RawBuffer: - writeData_.append_user_data(data_, length_, - TrivialDeleter); + writeData_.append_user_data(data_, length_, TrivialDeleter); break; case UserDataType::IOBuffer: writeData_ = *reinterpret_cast(data_); @@ -211,9 +207,9 @@ void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, throttle->Add(false, length_); } - int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, - offset_, length_, - mdsclient, fileInfo, fEpoch); + int ret = + Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, offset_, + length_, mdsclient, fileInfo, fEpoch); if (ret == 0) { uint32_t subIoIndex = 0; @@ -284,14 +280,14 @@ void IOTracker::DoDiscard(MDSClient* mdsClient, const FInfo* fileInfo, Done(); } -void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, uint64_t offset, uint64_t len, - char *buf, SnapCloneClosure* scc) { - scc_ = scc; - data_ = buf; +void IOTracker::ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) { + scc_ = scc; + data_ = buf; offset_ = offset; length_ = len; - type_ = OpType::READ_SNAP; + type_ = OpType::READ_SNAP; int ret = -1; do { @@ -316,8 +312,8 @@ void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, } } -void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq) { +void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq) { type_ = OpType::DELETE_SNAP; int ret = -1; @@ -343,8 +339,8 @@ void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, } } -void IOTracker::GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo) { +void IOTracker::GetChunkInfo(const ChunkIDInfo& cinfo, + ChunkInfoDetail* chunkInfo) { type_ = OpType::GET_CHUNK_INFO; int ret = -1; @@ -384,10 +380,10 @@ void IOTracker::CreateCloneChunk(const std::string& location, break; } - newreqNode->seq_ = sn; - newreqNode->chunksize_ = chunkSize; - newreqNode->location_ = location; - newreqNode->correctedSeq_ = correntSn; + newreqNode->seq_ = sn; + newreqNode->chunksize_ = chunkSize; + newreqNode->location_ = location; + newreqNode->correctedSeq_ = correntSn; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -415,8 +411,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, break; } - newreqNode->rawlength_ = len; - newreqNode->offset_ = offset; + newreqNode->rawlength_ = len; + newreqNode->offset_ = offset; FillCommonFields(cinfo, newreqNode); reqlist_.push_back(newreqNode); @@ -433,8 +429,8 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, } void IOTracker::FillCommonFields(ChunkIDInfo idinfo, RequestContext* req) { - req->optype_ = type_; - req->idinfo_ = idinfo; + req->optype_ = type_; + req->idinfo_ = idinfo; req->done_->SetIOTracker(this); } @@ -459,9 +455,7 @@ void IOTracker::InitDiscardOption(const DiscardOption& opt) { discardOption_ = opt; } -int IOTracker::Wait() { - return iocv_.Wait(); -} +int IOTracker::Wait() { return iocv_.Wait(); } void IOTracker::Done() { if (type_ == OpType::READ || type_ == OpType::WRITE) { @@ -510,15 +504,15 @@ void IOTracker::Done() { MetricHelper::IncremUserEPSCount(fileMetric_, type_); if (type_ == OpType::READ || type_ == OpType::WRITE) { if (LIBCURVE_ERROR::EPOCH_TOO_OLD == errcode_) { - LOG(WARNING) << "file [" << fileMetric_->filename << "]" - << ", epoch too old, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + LOG(WARNING) + << "file [" << fileMetric_->filename << "]" + << ", epoch too old, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ << ", length = " << length_; } else { LOG(ERROR) << "file [" << fileMetric_->filename << "]" - << ", IO Error, OpType = " << OpTypeToString(type_) - << ", offset = " << offset_ - << ", length = " << length_; + << ", IO Error, OpType = " << OpTypeToString(type_) + << ", offset = " << offset_ + << ", length = " << length_; } } else { if (OpType::CREATE_CLONE == type_ && @@ -533,13 +527,13 @@ void IOTracker::Done() { DestoryRequestList(); - // scc_和aioctx都为空的时候肯定是个同步调用 + // When both scc_ and aioctx are empty, it is definitely a synchronous call. if (scc_ == nullptr && aioctx_ == nullptr) { iocv_.Complete(ToReturnCode()); return; } - // 异步函数调用,在此处发起回调 + // Asynchronous function call, where a callback is initiated if (aioctx_ != nullptr) { aioctx_->ret = ToReturnCode(); aioctx_->cb(aioctx_); @@ -548,7 +542,7 @@ void IOTracker::Done() { scc_->Run(); } - // 回收当前io tracker + // Recycle the current io tracker iomanager_->HandleAsyncIOResponse(this); } @@ -565,12 +559,13 @@ void IOTracker::ReturnOnFail() { } void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, - LIBCURVE_ERROR* errout) { + LIBCURVE_ERROR* errout) { switch (errcode) { case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: *errout = LIBCURVE_ERROR::OK; break; - // chunk或者copyset对于用户来说是透明的,所以直接返回错误 + // Chunks or copysets are transparent to users, so they directly return + // errors case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: *errout = LIBCURVE_ERROR::NOTEXIST; @@ -599,5 +594,5 @@ void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6369410ae3..e87ffcc23b 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -49,44 +49,45 @@ class IOManager; class FileSegment; class DiscardTaskManager; -// IOTracker用于跟踪一个用户IO,因为一个用户IO可能会跨chunkserver, -// 因此在真正下发的时候会被拆分成多个小IO并发的向下发送,因此我们需要 -// 跟踪发送的request的执行情况。 +// IOTracker is used to track a user's IO, as a user's IO may cross +// chunkservers, Therefore, when it is actually distributed, it will be split +// into multiple small IOs and sent down concurrently. Therefore, we need to +// Track the execution status of the sent request. class CURVE_CACHELINE_ALIGNMENT IOTracker { friend class Splitor; public: - IOTracker(IOManager* iomanager, - MetaCache* mc, - RequestScheduler* scheduler, - FileMetric* clientMetric = nullptr, - bool disableStripe = false); + IOTracker(IOManager* iomanager, MetaCache* mc, RequestScheduler* scheduler, + FileMetric* clientMetric = nullptr, bool disableStripe = false); ~IOTracker() = default; /** - * @brief StartRead同步读 - * @param buf 读缓冲区 - * @param offset 读偏移 - * @param length 读长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartRead Sync Read + * @param buf read buffer + * @param offset read offset + * @param length Read length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartRead(void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, Throttle* throttle = nullptr); /** - * @brief StartWrite同步写 - * @param buf 写缓冲区 - * @param offset 写偏移 - * @param length 写长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartWrite Sync Write + * @param buf write buffer + * @param offset write offset + * @param length Write length + * @param mdsclient transparently transmits to the splitter for + * communication with mds + * @param fileInfo Basic information of the file corresponding to the + * current io */ void StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle = nullptr); + const FileEpoch* fEpoch, Throttle* throttle = nullptr); /** * @brief start an async read operation @@ -105,8 +106,8 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { * @param fEpoch file epoch info */ void StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, - const FInfo_t* fileInfo, - const FileEpoch* fEpoch, Throttle* throttle = nullptr); + const FInfo_t* fileInfo, const FileEpoch* fEpoch, + Throttle* throttle = nullptr); void StartDiscard(off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -116,46 +117,44 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { DiscardTaskManager* taskManager); /** - * chunk相关接口是提供给snapshot使用的,上层的snapshot和file - * 接口是分开的,在IOTracker这里会将其统一,这样对下层来说不用 - * 感知上层的接口类别。 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 + * The chunk-related interfaces are intended for use by snapshots. The + * upper-level snapshot and file interfaces are separate. However, in the + * IOTracker, they are unified so that the lower levels do not need to be + * aware of the upper-level interface category. + * @param: chunkidinfo The target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is the read buffer + * @param: scc is the asynchronous callback */ - void ReadSnapChunk(const ChunkIDInfo &cinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); + void ReadSnapChunk(const ChunkIDInfo& cinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: seq是需要修正的版本号 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo is the target chunk + * @param: seq is the version number that needs to be corrected */ - void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, - uint64_t correctedSeq); + void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& cinfo, + uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot */ - void GetChunkInfo(const ChunkIDInfo &cinfo, - ChunkInfoDetail *chunkInfo); + void GetChunkInfo(const ChunkIDInfo& cinfo, ChunkInfoDetail* chunkInfo); /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief lazy Create clone chunk + * @param: location is the URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when CreateCloneChunk + * @param: chunkSize chunk size + * @param: scc is an asynchronous callback */ void CreateCloneChunk(const std::string& location, const ChunkIDInfo& chunkidinfo, uint64_t sn, @@ -163,47 +162,51 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief Actual recovery chunk data + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: chunkSize Chunk size + * @param: scc is an asynchronous callback */ void RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * Wait用于同步接口等待,因为用户下来的IO被client内部线程接管之后 - * 调用就可以向上返回了,但是用户的同步IO语意是要等到结果返回才能向上 - * 返回的,因此这里的Wait会让用户线程等待。 - * @return: 返回读写信息,异步IO的时候返回0或-1.0代表成功,-1代表失败 - * 同步IO返回length或-1,length代表真实读写长度,-1代表读写失败 + * Wait is used for synchronous interface waiting. When the user's IO is + * taken over by client internal threads, the call can return to the upper + * layer. However, the user's synchronous IO semantics require waiting for + * the result to return before returning to the upper layer, so Wait here + * will make the user thread wait. + * @return: Returns read/write information. For asynchronous IO, it returns + * 0 or -1. 0 means success, -1 means failure. For synchronous IO, it + * returns the length or -1. 'length' represents the actual read/write + * length, and -1 represents read/write failure. */ int Wait(); /** - * 每个request都要有自己的OP类型,这里提供接口可以在io拆分的时候获取类型 + * Each request must have its own OP type, and an interface is provided here + * to obtain the type during IO splitting */ - OpType Optype() {return type_;} + OpType Optype() { return type_; } - // 设置操作类型,测试使用 + // Set operation type, test usage void SetOpType(OpType type) { type_ = type; } /** - * 因为client的IO都是异步发送的,且一个IO被拆分成多个Request,因此在异步 - * IO返回后就应该告诉IOTracker当前request已经返回,这样tracker可以处理 - * 返回的request。 - * @param: 待处理的异步request + * Because client IOs are all sent asynchronously, and a single IO is split + * into multiple Requests, after asynchronous IO returns, it should inform + * the IOTracker that the current request has returned. This way, the + * tracker can handle the returned request. + * @param: The asynchronous request to be processed. */ void HandleResponse(RequestContext* reqctx); /** - * 获取当前tracker id信息 + * Obtain the current tracker ID information */ - uint64_t GetID() const { - return id_; - } + uint64_t GetID() const { return id_; } // set user data type void SetUserDataType(const UserDataType dataType) { @@ -222,9 +225,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { readDatas_[subIoIndex] = data; } - bool IsStripeDisabled() const { - return disableStripe_; - } + bool IsStripeDisabled() const { return disableStripe_; } static void InitDiscardOption(const DiscardOption& opt); @@ -232,38 +233,40 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { void ReleaseAllSegmentLocks(); /** - * 当IO返回的时候调用done,由done负责向上返回 + * When IO returns, call done, which is responsible for returning upwards */ void Done(); /** - * 在io拆分或者,io分发失败的时候需要调用,设置返回状态,并向上返回 + * When IO splitting or IO distribution fails, it needs to be called, set + * the return status, and return upwards */ void ReturnOnFail(); /** - * 用户下来的大IO会被拆分成多个子IO,这里在返回之前将子IO资源回收 + * The user's incoming large IO will be split into multiple sub IOs, and the + * sub IO resources will be reclaimed before returning here */ void DestoryRequestList(); /** - * 填充request context common字段 - * @param: idinfo为chunk的id信息 - * @param: req为待填充的request context + * Fill in the request context common field + * @param: IDInfo is the ID information of the chunk + * @param: req is the request context to be filled in */ void FillCommonFields(ChunkIDInfo idinfo, RequestContext* req); /** - * chunkserver errcode转化为libcurve client的errode - * @param: errcode为chunkserver侧的errode - * @param[out]: errout为libcurve自己的errode + * Convert chunkserver errcode to libcurve client errode + * @param: errcode is the error code on the chunkserver side + * @param[out]: errout is libcurve's own errode */ void ChunkServerErr2LibcurveErr(curve::chunkserver::CHUNK_OP_STATUS errcode, LIBCURVE_ERROR* errout); /** - * 获取一个初始化后的RequestContext - * return: 如果分配失败或者初始化失败,返回nullptr - * 反之,返回一个指针 + * Obtain an initialized RequestContext + * @return: If allocation or initialization fails, return nullptr + * On the contrary, return a pointer */ RequestContext* GetInitedRequestContext() const; @@ -283,8 +286,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // perform write operation void DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo, - const FileEpoch* fEpoch, - Throttle* throttle); + const FileEpoch* fEpoch, Throttle* throttle); void DoDiscard(MDSClient* mdsclient, const FInfo_t* fileInfo, DiscardTaskManager* taskManager); @@ -296,12 +298,13 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { } private: - // io 类型 - OpType type_; + // IO type + OpType type_; - // 当前IO的数据内容,data是读写数据的buffer - off_t offset_; - uint64_t length_; + // The current IO data content, where data is the buffer for reading and + // writing data + off_t offset_; + uint64_t length_; // user data pointer void* data_; @@ -315,48 +318,52 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // save read data std::vector readDatas_; - // 当用户下发的是同步IO的时候,其需要在上层进行等待,因为client的 - // IO发送流程全部是异步的,因此这里需要用条件变量等待,待异步IO返回 - // 之后才将这个等待的条件变量唤醒,然后向上返回。 - IOConditionVariable iocv_; + // When a user sends synchronous IO, they need to wait in the upper layer + // because the client's IO sending process is all asynchronous, so here we + // need to use a conditional variable to wait for asynchronous IO to return + // Afterwards, the waiting condition variable is awakened and then returned + // upwards. + IOConditionVariable iocv_; - // 异步IO的context,在异步IO返回时,通过调用aioctx - // 的异步回调进行返回。 + // The context of asynchronous IO is called aioctx when asynchronous IO + // returns Asynchronous callback for return. CurveAioContext* aioctx_; - // 当前IO的errorcode + // The errorcode of the current IO LIBCURVE_ERROR errcode_; - // 当前IO被拆分成reqcount_个小IO + // The current IO is split into reqcount_ Small IO std::atomic reqcount_; - // 大IO被拆分成多个request,这些request放在reqlist中国保存 - std::vector reqlist_; + // The large IO is split into multiple requests, which are stored in the + // reqlist in China + std::vector reqlist_; // store segment indices that can be discarded std::unordered_set discardSegments_; - // metacache为当前fileinstance的元数据信息 + // metacache is the metadata information of the current fileinstance MetaCache* mc_; - // scheduler用来将用户线程与client自己的线程切分 - // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 + // The scheduler is used to separate user threads from the client's own + // threads After the large IO is split, the split reqlist is passed to the + // scheduler and sent downwards RequestScheduler* scheduler_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 - // iomanager可以将该tracker释放 + // For asynchronous IO, the Tracker needs to notify the upper level that the + // current IO processing has ended The iomanager can release the tracker IOManager* iomanager_; - // 发起时间 + // Initiation time uint64_t opStartTimePoint_; - // client端的metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // 当前tracker的id + // The ID of the current tracker uint64_t id_; - // 快照克隆系统异步调用回调指针 + // Asynchronous call callback pointer for snapshot cloning system SnapCloneClosure* scc_; bool disableStripe_; @@ -365,11 +372,11 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // so store corresponding segment lock and release after operations finished std::vector segmentLocks_; - // id生成器 + // ID generator static std::atomic tracekerID_; static DiscardOption discardOption_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IO_TRACKER_H_ diff --git a/src/client/iomanager.h b/src/client/iomanager.h index e985b1527f..04664fe870 100644 --- a/src/client/iomanager.h +++ b/src/client/iomanager.h @@ -23,8 +23,8 @@ #ifndef SRC_CLIENT_IOMANAGER_H_ #define SRC_CLIENT_IOMANAGER_H_ -#include "src/client/io_tracker.h" #include "src/client/client_common.h" +#include "src/client/io_tracker.h" #include "src/common/concurrent/concurrent.h" namespace curve { @@ -34,48 +34,41 @@ using curve::common::Atomic; class IOManager { public: - IOManager() { - id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); - } + IOManager() { id_ = idRecorder_.fetch_add(1, std::memory_order_relaxed); } virtual ~IOManager() = default; /** - * @brief 获取当前iomanager的ID信息 + * @brief Get the ID information of the current iomanager */ - virtual IOManagerID ID() const { - return id_; - } + virtual IOManagerID ID() const { return id_; } /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ - virtual void GetInflightRpcToken() { - return; - } + virtual void GetInflightRpcToken() { return; } /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ - virtual void ReleaseInflightRpcToken() { - return; - } + virtual void ReleaseInflightRpcToken() { return; } /** - * @brief 处理异步返回的response - * @param: iotracker是当前reponse的归属 + * @brief handles response returned asynchronously + * @param: iotracker The ownership of the current reponse */ virtual void HandleAsyncIOResponse(IOTracker* iotracker) = 0; protected: - // iomanager id目的是为了让底层RPC知道自己归属于哪个iomanager + // The purpose of the iomanager id is to let the underlying RPC know which + // iomanager it belongs to IOManagerID id_; private: // global id recorder - static Atomic idRecorder_; + static Atomic idRecorder_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER_H_ diff --git a/src/client/iomanager4chunk.h b/src/client/iomanager4chunk.h index f9cedeca02..209829f3ef 100644 --- a/src/client/iomanager4chunk.h +++ b/src/client/iomanager4chunk.h @@ -24,15 +24,15 @@ #define SRC_CLIENT_IOMANAGER4CHUNK_H_ #include -#include // NOLINT +#include // NOLINT +#include // NOLINT #include -#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager.h" +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/iomanager.h" +#include "src/client/metacache.h" #include "src/client/request_scheduler.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -41,107 +41,109 @@ class IOManager4Chunk : public IOManager { public: IOManager4Chunk(); ~IOManager4Chunk() = default; - bool Initialize(IOOption ioOpt, MDSClient* mdsclient); + bool Initialize(IOOption ioOpt, MDSClient* mdsclient); - /** - * 读取seq版本号的快照数据 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return:成功返回真实读取长度,失败为-1 - */ - int ReadSnapChunk(const ChunkIDInfo &chunkidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: correctedSeq是需要修正的版本号 - */ - int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &chunkidinfo, + /** + * Read snapshot data of seq version number + * @param: chunkidinfo target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned the true read length, failed with -1 + */ + int ReadSnapChunk(const ChunkIDInfo& chunkidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param: chunkidinfo target chunk + * @param: correctedSeq is the version number that needs to be corrected + */ + int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo& chunkidinfo, uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(const ChunkIDInfo &chunkidinfo, - ChunkInfoDetail *chunkInfo); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(const ChunkIDInfo& chunkidinfo, + ChunkInfoDetail* chunkInfo); - /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * @return 成功返回0, 否则-1 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @detail + * - The format of the location is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual address of the chunk object. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * @return successfully returns 0, otherwise -1 + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param scc 异步回调 - * @return 成功返回0, 否则-1 + * @param offset offset + * @param len length + * @param scc asynchronous callback + * @return successfully returns 0, otherwise -1 */ int RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: 是异步返回的io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: It is an io returned asynchronously */ void HandleAsyncIOResponse(IOTracker* iotracker) override; - /** - * 析构,回收资源 - */ + /** + * Deconstruct and recycle resources + */ void UnInitialize(); - /** - * 获取metacache,测试代码使用 - */ - MetaCache* GetMetaCache() {return &mc_;} - /** - * 设置scahuler,测试代码使用 - */ + /** + * Obtain Metacache, test code usage + */ + MetaCache* GetMetaCache() { return &mc_; } + /** + * Set up scahuler to test code usage + */ void SetRequestScheduler(RequestScheduler* scheduler) { - scheduler_ = scheduler; + scheduler_ = scheduler; } private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前snapshot client元数据信息 - MetaCache mc_; + // metacache stores the current snapshot client metadata information + MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_IOMANAGER4CHUNK_H_ diff --git a/src/client/iomanager4file.cpp b/src/client/iomanager4file.cpp index b6f1b09527..992554264d 100644 --- a/src/client/iomanager4file.cpp +++ b/src/client/iomanager4file.cpp @@ -20,14 +20,15 @@ * Author: tongguangxun */ +#include "src/client/iomanager4file.h" + #include -#include // NOLINT +#include // NOLINT -#include "src/client/metacache.h" -#include "src/client/iomanager4file.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" +#include "src/client/metacache.h" #include "src/client/splitor.h" namespace curve { @@ -36,8 +37,7 @@ Atomic IOManager::idRecorder_(1); IOManager4File::IOManager4File() : scheduler_(nullptr), exit_(false) {} bool IOManager4File::Initialize(const std::string& filename, - const IOOption& ioOpt, - MDSClient* mdsclient) { + const IOOption& ioOpt, MDSClient* mdsclient) { ioopt_ = ioOpt; disableStripe_ = false; @@ -55,8 +55,9 @@ bool IOManager4File::Initialize(const std::string& filename, return false; } - // IO Manager中不控制inflight IO数量,所以传入UINT64_MAX - // 但是IO Manager需要控制所有inflight IO在关闭的时候都被回收掉 + // The IO Manager does not control the number of inflight IOs, so UINT64_MAX + // is passed. However, the IO Manager needs to ensure that all inflight IOs + // are reclaimed upon shutdown. inflightCntl_.SetMaxInflightNum(UINT64_MAX); scheduler_ = new (std::nothrow) RequestScheduler(); @@ -114,7 +115,7 @@ void IOManager4File::UnInitialize() { { std::unique_lock lk(exitMtx); - exitCv.wait(lk, [&](){ return exitFlag; }); + exitCv.wait(lk, [&]() { return exitFlag; }); } taskPool_.Stop(); @@ -128,8 +129,9 @@ void IOManager4File::UnInitialize() { discardTaskManager_->Stop(); { - // 这个锁保证设置exit_和delete scheduler_是原子的 - // 这样保证在scheduler_被析构的时候lease线程不会使用scheduler_ + // This lock ensures that setting exit_ and deleting scheduler_ are + // atomic. This ensures that the lease thread won't use scheduler_ when + // it is being destructed. std::unique_lock lk(exitMtx_); exit_ = true; @@ -140,8 +142,8 @@ void IOManager4File::UnInitialize() { } } -int IOManager4File::Read(char* buf, off_t offset, - size_t length, MDSClient* mdsclient) { +int IOManager4File::Read(char* buf, off_t offset, size_t length, + MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::READ); FlightIOGuard guard(this); @@ -162,9 +164,7 @@ int IOManager4File::Read(char* buf, off_t offset, } } -int IOManager4File::Write(const char* buf, - off_t offset, - size_t length, +int IOManager4File::Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::WRITE); FlightIOGuard guard(this); @@ -175,8 +175,7 @@ int IOManager4File::Write(const char* buf, IOTracker temp(this, &mc_, scheduler_, fileMetric_, disableStripe_); temp.SetUserDataType(UserDataType::IOBuffer); temp.StartWrite(&data, offset, length, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); int rc = temp.Wait(); return rc; @@ -223,8 +222,7 @@ int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient, inflightCntl_.IncremInflightNum(); auto task = [this, ctx, mdsclient, temp]() { temp->StartAioWrite(ctx, mdsclient, this->GetFileInfo(), - this->GetFileEpoch(), - throttle_.get()); + this->GetFileEpoch(), throttle_.get()); }; taskPool_.Enqueue(task); @@ -286,9 +284,7 @@ void IOManager4File::UpdateFileThrottleParams( } } -void IOManager4File::SetDisableStripe() { - disableStripe_ = true; -} +void IOManager4File::SetDisableStripe() { disableStripe_ = true; } void IOManager4File::HandleAsyncIOResponse(IOTracker* iotracker) { inflightCntl_.DecremInflightNum(); @@ -330,5 +326,5 @@ void IOManager4File::GetInflightRpcToken() { inflightRpcCntl_.GetInflightToken(); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/iomanager4file.h b/src/client/iomanager4file.h index eaecc8497f..9571a3845d 100644 --- a/src/client/iomanager4file.h +++ b/src/client/iomanager4file.h @@ -28,12 +28,13 @@ #include #include // NOLINT -#include // NOLINT -#include #include +#include // NOLINT +#include #include "include/curve_compiler_specific.h" #include "src/client/client_common.h" +#include "src/client/discard_task.h" #include "src/client/inflight_controller.h" #include "src/client/iomanager.h" #include "src/client/mds_client.h" @@ -42,7 +43,6 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/throttle.h" -#include "src/client/discard_task.h" namespace curve { namespace client { @@ -57,14 +57,13 @@ class IOManager4File : public IOManager { ~IOManager4File() = default; /** - * 初始化函数 - * @param: filename为当前iomanager服务的文件名 - * @param: ioopt为当前iomanager的配置信息 - * @param: mdsclient向下透传给metacache - * @return: 成功true,失败false + * Initialization function + * @param: filename is the file name of the current iomanager service + * @param: ioopt is the configuration information of the current iomanager + * @param: mdsclient penetrates downwards to Metacache + * @return: Success true, failure false */ - bool Initialize(const std::string& filename, - const IOOption& ioOpt, + bool Initialize(const std::string& filename, const IOOption& ioOpt, MDSClient* mdsclient); /** @@ -73,39 +72,47 @@ class IOManager4File : public IOManager { void UnInitialize(); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode reading + * @param: buf is the current buffer to be read + * @param: offset is the offset in file + * @parma: length is the length to be read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @return: Successfully returned reading the true length, -1 indicates + * failure */ int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 同步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @param:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: buf is the current buffer to be written + * @param: offset is the offset within the file + * @param: length is the length to be read + * @return: Success returns the true length of the write, -1 indicates + * failure */ int Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 异步模式读 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioRead(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); /** - * 异步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and + * communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); @@ -128,88 +135,71 @@ class IOManager4File : public IOManager { int AioDiscard(CurveAioContext* aioctx, MDSClient* mdsclient); /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ void GetInflightRpcToken() override; /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ void ReleaseInflightRpcToken() override; /** - * 获取metacache,测试代码使用 + * Obtain Metacache, test code usage */ - MetaCache* GetMetaCache() { - return &mc_; - } + MetaCache* GetMetaCache() { return &mc_; } /** - * 设置scheduler,测试代码使用 + * Set the scheduler to test the code using */ void SetRequestScheduler(RequestScheduler* scheduler) { scheduler_ = scheduler; } /** - * 获取metric信息,测试代码使用 + * Obtain metric information and test code usage */ - FileMetric* GetMetric() { - return fileMetric_; - } + FileMetric* GetMetric() { return fileMetric_; } /** - * 重新设置io配置信息,测试使用 + * Reset IO configuration information for testing use */ - void SetIOOpt(const IOOption& opt) { - ioopt_ = opt; - } + void SetIOOpt(const IOOption& opt) { ioopt_ = opt; } /** - * 测试使用,获取request scheduler + * Test usage, obtain request scheduler */ - RequestScheduler* GetScheduler() { - return scheduler_; - } + RequestScheduler* GetScheduler() { return scheduler_; } /** - * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 - * @param: fi为当前需要更新的文件信息 + * When the lease excutor detects a version update, it needs to notify the + * iomanager to update the file version information + * @param: fi is the current file information that needs to be updated */ void UpdateFileInfo(const FInfo_t& fi); - const FInfo* GetFileInfo() const { - return mc_.GetFileInfo(); - } + const FInfo* GetFileInfo() const { return mc_.GetFileInfo(); } void UpdateFileEpoch(const FileEpoch& fEpoch) { mc_.UpdateFileEpoch(fEpoch); } - const FileEpoch* GetFileEpoch() const { - return mc_.GetFileEpoch(); - } + const FileEpoch* GetFileEpoch() const { return mc_.GetFileEpoch(); } /** - * 返回文件最新版本号 + * Return the latest version number of the file */ - uint64_t GetLatestFileSn() const { - return mc_.GetLatestFileSn(); - } + uint64_t GetLatestFileSn() const { return mc_.GetLatestFileSn(); } /** - * 更新文件最新版本号 + * Update the latest version number of the file */ - void SetLatestFileSn(uint64_t newSn) { - mc_.SetLatestFileSn(newSn); - } + void SetLatestFileSn(uint64_t newSn) { mc_.SetLatestFileSn(newSn); } /** * @brief get current file inodeid * @return file inodeid */ - uint64_t InodeId() const { - return mc_.InodeId(); - } + uint64_t InodeId() const { return mc_.InodeId(); } void UpdateFileThrottleParams( const common::ReadWriteThrottleParams& params); @@ -220,26 +210,30 @@ class IOManager4File : public IOManager { friend class LeaseExecutor; friend class FlightIOGuard; /** - * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 将新下发的IO全部失败返回 + * lease related interface, when LeaseExecutor contract renewal fails, calls + * LeaseTimeoutDisableIO Failed to return all newly issued IOs */ void LeaseTimeoutBlockIO(); /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO */ void ResumeIO(); /** - * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 + * When the lesaeexcutor detects a version change, it calls the interface + * and waits for inflight to return. During this period, IO is hanging */ void BlockIO(); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: iotracker是返回的异步io + * Because the bottom layer of the curve client is asynchronous IO, each IO + * is assigned an IOtracker to track IO After this IO is completed, the + * underlying layer needs to inform the current IO manager to release this + * IOTracker, HandleAsyncIOResponse is responsible for releasing the + * IOTracker + * @param: iotracker is an asynchronous io returned */ void HandleAsyncIOResponse(IOTracker* iotracker) override; @@ -250,9 +244,7 @@ class IOManager4File : public IOManager { iomanager->inflightCntl_.IncremInflightNum(); } - ~FlightIOGuard() { - iomanager->inflightCntl_.DecremInflightNum(); - } + ~FlightIOGuard() { iomanager->inflightCntl_.DecremInflightNum(); } private: IOManager4File* iomanager; @@ -261,42 +253,45 @@ class IOManager4File : public IOManager { bool IsNeedDiscard(size_t len) const; private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前文件的所有元数据信息 + // metacache stores all metadata information for the current file MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver + // end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; - // client端metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // task thread pool为了将qemu线程与curve线程隔离 + // The task thread pool is used to isolate the QEMU thread from the curve + // thread curve::common::TaskThreadPool taskPool_; - // inflight IO控制 + // inflight IO control InflightControl inflightCntl_; - // inflight rpc控制 + // inflight rpc control InflightControl inflightRpcCntl_; std::unique_ptr throttle_; - // 是否退出 + // Exit or not bool exit_; - // lease续约线程与qemu一侧线程调用是并发的 - // qemu在调用close的时候会关闭iomanager及其对应 - // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 - // scheduler线程现在需要block IO或者resume IO,所以 - // 如果在lease续约线程需要通知iomanager的时候,这时候 - // 如果iomanager的资源scheduler已经被释放了,就会 - // 导致crash,所以需要对这个资源加一把锁,在退出的时候 - // 不会有并发的情况,保证在资源被析构的时候lease续约 - // 线程不会再用到这些资源. + // The lease renewal thread and the QEMU-side thread are concurrent. + // When QEMU calls close, it closes the iomanager and its corresponding + // resources. The lease renewal thread notifies the iomanager's scheduler + // thread when renewal succeeds or fails, indicating whether it needs to + // block or resume IO. Therefore, if the lease renewal thread needs to + // notify the iomanager at this point, and if the iomanager's scheduler + // resources have already been released, it may lead to a crash. So, it's + // necessary to add a lock to protect this resource, ensuring that there is + // no concurrency when exiting. This ensures that the lease renewal thread + // won't use these resources when they are being destructed. std::mutex exitMtx_; // enable/disable stripe for read/write of stripe file diff --git a/src/client/lease_executor.cpp b/src/client/lease_executor.cpp index c8db8ddd30..797c0f0075 100644 --- a/src/client/lease_executor.cpp +++ b/src/client/lease_executor.cpp @@ -19,11 +19,12 @@ * File Created: Saturday, 23rd February 2019 1:41:31 pm * Author: tongguangxun */ +#include "src/client/lease_executor.h" + #include -#include "src/common/timeutility.h" -#include "src/client/lease_executor.h" #include "src/client/service_helper.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -145,9 +146,7 @@ void LeaseExecutor::Stop() { } } -bool LeaseExecutor::LeaseValid() { - return isleaseAvaliable_.load(); -} +bool LeaseExecutor::LeaseValid() { return isleaseAvaliable_.load(); } void LeaseExecutor::IncremRefreshFailed() { failedrefreshcount_.fetch_add(1); @@ -190,7 +189,7 @@ void LeaseExecutor::ResetRefreshSessionTask() { return; } - // 等待前一个任务退出 + // Waiting for the previous task to exit task_->Stop(); task_->WaitTaskExit(); @@ -203,5 +202,5 @@ void LeaseExecutor::ResetRefreshSessionTask() { isleaseAvaliable_.store(true); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/lease_executor.h b/src/client/lease_executor.h index 2236dc9982..829d264adc 100644 --- a/src/client/lease_executor.h +++ b/src/client/lease_executor.h @@ -41,16 +41,13 @@ namespace client { class RefreshSessionTask; /** - * lease refresh结果,session如果不存在就不需要再续约 - * 如果session存在但是lease续约失败,继续续约 - * 续约成功了FInfo_t中才会有对应的文件信息 + * Please refresh the result. If the session does not exist, there is no need to + * renew it If the session exists but the lease renewal fails, continue to renew + * the contract Successfully renewed the contract, FInfo_ Only in t will there + * be corresponding file information */ struct LeaseRefreshResult { - enum class Status { - OK, - FAILED, - NOT_EXIST - }; + enum class Status { OK, FAILED, NOT_EXIST }; Status status; FInfo_t finfo; }; @@ -62,19 +59,22 @@ class LeaseExecutorBase { }; /** - * 每个vdisk对应的fileinstance都会与mds保持心跳 - * 心跳通过LeaseExecutor实现,LeaseExecutor定期 - * 去mds续约,同时将mds端当前file最新的版本信息带回来 - * 然后检查版本信息是否变更,如果变更就需要通知iomanager - * 更新版本。如果续约失败,就需要将用户新发下来的io直接错误返回 + * The fileinstance corresponding to each vdisk will maintain heartbeat with the + * mds Heartbeat is achieved through LeaseExecutor, which periodically Go to MDS + * to renew the contract and bring back the latest version information of the + * current file on the MDS side Then check if the version information has + * changed, and if so, notify the iomanager Updated version. If the renewal + * fails, the user's newly sent IO needs to be returned in error */ class LeaseExecutor : public LeaseExecutorBase { public: /** - * 构造函数 - * @param: leaseopt为当前lease续约的option配置 - * @param: mdsclient是与mds续约的client - * @param: iomanager会在续约失败或者版本变更的时候进行io调度 + * Constructor + * @param: leaseopt is the option configuration for the current lease + * renewal + * @param: mdsclient is a renewed client with mds + * @param: iomanager will schedule IO in case of contract renewal failure or + * version change */ LeaseExecutor(const LeaseOption& leaseOpt, const UserInfo& userinfo, MDSClient* mdscllent, IOManager4File* iomanager); @@ -82,26 +82,27 @@ class LeaseExecutor : public LeaseExecutorBase { ~LeaseExecutor(); /** - * LeaseExecutor需要finfo保存filename - * LeaseSession_t是当前leaeexcutor的执行配置 - * @param: fi为当前需要续约的文件版本信息 - * @param: lease为续约的lease信息 - * @return: 成功返回true,否则返回false + * LeaseExecutor requires finfo to save the filename + * LeaseSession_t is the execution configuration of the current leaeexcutor + * @param: fi is the current version information of the file that needs to + * be renewed + * @param: lease is the lease information for renewal + * @return: Successfully returns true, otherwise returns false */ - bool Start(const FInfo_t& fi, const LeaseSession_t& lease); + bool Start(const FInfo_t& fi, const LeaseSession_t& lease); /** - * 停止续约 + *Stop Renewal */ void Stop(); /** - * 当前lease如果续约失败则通知iomanagerdisable io + *Notify iomanagerdisable io if the current lease renewal fails */ bool LeaseValid(); /** - * 测试使用,主动失效增加刷新失败 + *Test use, active failure increases refresh failure */ void InvalidLease() { for (uint32_t i = 0; i <= leaseoption_.mdsRefreshTimesPerLease; i++) { @@ -110,20 +111,21 @@ class LeaseExecutor : public LeaseExecutorBase { } /** - * @brief 续约任务执行者 - * @return 是否继续执行refresh session任务 + * @brief Renew Task Executor + * @return Do you want to continue executing the refresh session task */ bool RefreshLease() override; /** - * @brief 测试使用,重置refresh session task + * @brief test use, reset refresh session task */ void ResetRefreshSessionTask(); private: /** - * 一个lease期间会续约rfreshTimesPerLease次,每次续约失败就递增 - * 当连续续约rfreshTimesPerLease次失败的时候,则disable IO + * During a lease period, rfreshTimesPerLease will be renewed times, + * increasing every time the renewal fails When consecutive renewals of + * rfreshTimesPerLease fail times, disable IO */ void IncremRefreshFailed(); @@ -135,44 +137,46 @@ class LeaseExecutor : public LeaseExecutorBase { void CheckNeedUpdateFileInfo(const FInfo& fileInfo); private: - // 与mds进行lease续约的文件名 - std::string fullFileName_; + // File name for lease renewal with mds + std::string fullFileName_; - // 用于续约的client - MDSClient* mdsclient_; + // client for renewal + MDSClient* mdsclient_; - // 用于发起refression的user信息 - UserInfo_t userinfo_; + // User information used to initiate a expression + UserInfo_t userinfo_; - // IO管理者,当文件需要更新版本信息或者disable io的时候调用其接口 - IOManager4File* iomanager_; + // IO manager, calls its interface when a file needs to update version + // information or disable IO + IOManager4File* iomanager_; - // 当前lease执行的配置信息 - LeaseOption leaseoption_; + // Configuration information for the current lease execution + LeaseOption leaseoption_; - // mds端传过来的lease信息,包含当前文件的lease时长,及sessionid - LeaseSession_t leasesession_; + // The lease information transmitted from the mds end, including the lease + // duration of the current file and the sessionid + LeaseSession_t leasesession_; - // 记录当前lease是否可用 - std::atomic isleaseAvaliable_; + // Record whether the current lease is available + std::atomic isleaseAvaliable_; - // 记录当前连续续约失败的次数 - std::atomic failedrefreshcount_; + // Record the current number of consecutive renewal failures + std::atomic failedrefreshcount_; - // refresh session定时任务,会间隔固定时间执行一次 + // refresh session scheduled tasks will be executed at fixed intervals std::unique_ptr task_; }; -// RefreshSessin定期任务 -// 利用brpc::PeriodicTaskManager进行管理 -// 定时器触发时调用OnTriggeringTask,根据返回值决定是否继续定时触发 -// 如果不再继续触发,调用OnDestroyingTask进行清理操作 +// RefreshSession Recurring Task +// Manage using brpc::PeriodicTaskManager +// Call OnTriggeringTask when the timer is triggered, and decide whether to +// continue timing triggering based on the return value If no longer triggered, +// call OnDestroyingTask for cleaning operation class RefreshSessionTask : public brpc::PeriodicTask { public: using Task = std::function; - RefreshSessionTask(LeaseExecutorBase* leaseExecutor, - uint64_t intervalUs) + RefreshSessionTask(LeaseExecutorBase* leaseExecutor, uint64_t intervalUs) : leaseExecutor_(leaseExecutor), refreshIntervalUs_(intervalUs), stopped_(false), @@ -193,10 +197,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { virtual ~RefreshSessionTask() = default; /** - * @brief 定时器超时后执行当前函数 - * @param next_abstime 任务下次执行的绝对时间 - * @return true 继续定期执行当前任务 - * false 停止执行当前任务 + * @brief: Execute current function after timer timeout + * @param next_abstime Absolute time for the next execution of the task + * @return true Continue to regularly execute the current task + * false Stop executing the current task */ bool OnTriggeringTask(timespec* next_abstime) override { std::lock_guard lk(stopMtx_); @@ -209,7 +213,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 停止再次执行当前任务 + * @brief Stop executing the current task again */ void Stop() { std::lock_guard lk(stopMtx_); @@ -217,7 +221,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 任务停止后调用 + * @brief is called after the task stops */ void OnDestroyingTask() override { std::unique_lock lk(terminatedMtx_); @@ -226,7 +230,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 等待任务退出 + * @brief Wait for the task to exit */ void WaitTaskExit() { std::unique_lock lk(terminatedMtx_); @@ -236,12 +240,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 获取refresh session时间间隔(us) - * @return refresh session任务时间间隔(us) + * @brief Get refresh session time interval (us) + * @return refresh session task time interval (us) */ - uint64_t RefreshIntervalUs() const { - return refreshIntervalUs_; - } + uint64_t RefreshIntervalUs() const { return refreshIntervalUs_; } private: LeaseExecutorBase* leaseExecutor_; @@ -255,7 +257,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { bthread::ConditionVariable terminatedCv_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LEASE_EXECUTOR_H_ diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index 06273c5d0b..4c4d3fb632 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -49,7 +49,7 @@ #include "src/common/uuid.h" bool globalclientinited_ = false; -curve::client::FileClient *globalclient = nullptr; +curve::client::FileClient* globalclient = nullptr; using curve::client::UserInfo; @@ -71,9 +71,9 @@ char g_processname[kProcessNameMax]; class LoggerGuard { private: - friend void InitLogging(const std::string &confPath); + friend void InitLogging(const std::string& confPath); - explicit LoggerGuard(const std::string &confpath) { + explicit LoggerGuard(const std::string& confpath) { InitInternal(confpath); } @@ -83,13 +83,13 @@ class LoggerGuard { } } - void InitInternal(const std::string &confpath); + void InitInternal(const std::string& confpath); private: bool needShutdown_ = false; }; -void LoggerGuard::InitInternal(const std::string &confPath) { +void LoggerGuard::InitInternal(const std::string& confPath) { curve::common::Configuration conf; conf.SetConfigPath(confPath); @@ -127,14 +127,18 @@ void LoggerGuard::InitInternal(const std::string &confPath) { needShutdown_ = true; } -void InitLogging(const std::string &confPath) { +void InitLogging(const std::string& confPath) { static LoggerGuard guard(confPath); } } // namespace FileClient::FileClient() - : rwlock_(), fdcount_(0), fileserviceMap_(), clientconfig_(), mdsClient_(), + : rwlock_(), + fdcount_(0), + fileserviceMap_(), + clientconfig_(), + mdsClient_(), csClient_(std::make_shared()), csBroadCaster_(std::make_shared(csClient_)), inited_(false), @@ -214,8 +218,8 @@ void FileClient::UnInit() { inited_ = false; } -int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, - const OpenFlags &openflags) { +int FileClient::Open(const std::string& filename, const UserInfo_t& userinfo, + const OpenFlags& openflags) { LOG(INFO) << "Opening filename: " << filename << ", flags: " << openflags; ClientConfig clientConfig; if (openflags.confPath.empty()) { @@ -235,7 +239,7 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return -LIBCURVE_ERROR::FAILED; } - FileInstance *fileserv = FileInstance::NewInitedFileInstance( + FileInstance* fileserv = FileInstance::NewInitedFileInstance( clientConfig.GetFileServiceOption(), mdsClient, filename, userinfo, openflags, false); if (fileserv == nullptr) { @@ -266,9 +270,9 @@ int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, return fd; } -int FileClient::Open4ReadOnly(const std::string &filename, - const UserInfo_t &userinfo, bool disableStripe) { - FileInstance *instance = FileInstance::Open4Readonly( +int FileClient::Open4ReadOnly(const std::string& filename, + const UserInfo_t& userinfo, bool disableStripe) { + FileInstance* instance = FileInstance::Open4Readonly( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo); if (instance == nullptr) { @@ -293,8 +297,8 @@ int FileClient::Open4ReadOnly(const std::string &filename, return fd; } -int FileClient::IncreaseEpoch(const std::string &filename, - const UserInfo_t &userinfo) { +int FileClient::IncreaseEpoch(const std::string& filename, + const UserInfo_t& userinfo) { LOG(INFO) << "IncreaseEpoch, filename: " << filename; FInfo_t fi; FileEpoch_t fEpoch; @@ -324,8 +328,7 @@ int FileClient::IncreaseEpoch(const std::string &filename, return ret2; } -int FileClient::Create(const std::string& filename, - const UserInfo& userinfo, +int FileClient::Create(const std::string& filename, const UserInfo& userinfo, size_t size) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -350,8 +353,8 @@ int FileClient::Create2(const CreateFileContext& context) { if (mdsClient_ != nullptr) { ret = mdsClient_->CreateFile(context); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Create file failed, filename: " << context.name - << ", ret: " << ret; + << "Create file failed, filename: " << context.name + << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -359,8 +362,8 @@ int FileClient::Create2(const CreateFileContext& context) { return -ret; } -int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -374,8 +377,8 @@ int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Read(buf, offset, len); } -int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 +int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -400,9 +403,9 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { return iter->second->Discard(offset, length); } -int FileClient::AioRead(int fd, CurveAioContext *aioctx, +int FileClient::AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -420,9 +423,9 @@ int FileClient::AioRead(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioWrite(int fd, CurveAioContext *aioctx, +int FileClient::AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -440,7 +443,7 @@ int FileClient::AioWrite(int fd, CurveAioContext *aioctx, return ret; } -int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { +int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { ReadLockGuard lk(rwlock_); auto iter = fileserviceMap_.find(fd); if (CURVE_UNLIKELY(iter == fileserviceMap_.end())) { @@ -451,8 +454,8 @@ int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { } } -int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, - const std::string &newpath) { +int FileClient::Rename(const UserInfo_t& userinfo, const std::string& oldpath, + const std::string& newpath) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RenameFile(userinfo, oldpath, newpath); @@ -466,7 +469,7 @@ int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, return -ret; } -int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -481,7 +484,7 @@ int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -496,7 +499,7 @@ int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, +int FileClient::Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { @@ -510,7 +513,7 @@ int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, return -ret; } -int FileClient::StatFile(int fd, FileStatInfo *finfo) { +int FileClient::StatFile(int fd, FileStatInfo* finfo) { FInfo_t fi; { ReadLockGuard lk(rwlock_); @@ -519,7 +522,7 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { LOG(ERROR) << "StatFile failed not found fd = " << fd; return -LIBCURVE_ERROR::FAILED; } - FileInstance *instance = fileserviceMap_[fd]; + FileInstance* instance = fileserviceMap_[fd]; fi = instance->GetCurrentFileInfo(); } BuildFileStatInfo(fi, finfo); @@ -527,8 +530,8 @@ int FileClient::StatFile(int fd, FileStatInfo *finfo) { return LIBCURVE_ERROR::OK; } -int FileClient::StatFile(const std::string &filename, - const UserInfo_t &userinfo, FileStatInfo *finfo) { +int FileClient::StatFile(const std::string& filename, + const UserInfo_t& userinfo, FileStatInfo* finfo) { FInfo_t fi; FileEpoch_t fEpoch; int ret; @@ -548,8 +551,8 @@ int FileClient::StatFile(const std::string &filename, return -ret; } -int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, - std::vector *filestatVec) { +int FileClient::Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + std::vector* filestatVec) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Listdir(dirpath, userinfo, filestatVec); @@ -563,7 +566,7 @@ int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, return -ret; } -int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { CreateFileContext context; @@ -588,7 +591,7 @@ int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { +int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(dirpath, userinfo); @@ -601,9 +604,9 @@ int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { return -ret; } -int FileClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +int FileClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->ChangeOwner(filename, newOwner, userinfo); @@ -651,7 +654,7 @@ int FileClient::Close(int fd) { return -LIBCURVE_ERROR::FAILED; } -int FileClient::GetClusterId(char *buf, int len) { +int FileClient::GetClusterId(char* buf, int len) { std::string result = GetClusterId(); if (result.empty()) { @@ -685,7 +688,7 @@ std::string FileClient::GetClusterId() { return {}; } -int FileClient::GetFileInfo(int fd, FInfo *finfo) { +int FileClient::GetFileInfo(int fd, FInfo* finfo) { int ret = -LIBCURVE_ERROR::FAILED; ReadLockGuard lk(rwlock_); @@ -707,11 +710,11 @@ std::vector FileClient::ListPoolset() { const auto ret = mdsClient_->ListPoolset(&out); LOG_IF(WARNING, ret != LIBCURVE_ERROR::OK) - << "Failed to list poolset, error: " << ret; + << "Failed to list poolset, error: " << ret; return out; } -void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { +void FileClient::BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo) { finfo->id = fi.id; finfo->parentid = fi.parentid; finfo->ctime = fi.ctime; @@ -722,9 +725,9 @@ void FileClient::BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo) { finfo->stripeCount = fi.stripeCount; memcpy(finfo->filename, fi.filename.c_str(), - std::min(sizeof(finfo->filename), fi.filename.size() + 1)); + std::min(sizeof(finfo->filename), fi.filename.size() + 1)); memcpy(finfo->owner, fi.owner.c_str(), - std::min(sizeof(finfo->owner), fi.owner.size() + 1)); + std::min(sizeof(finfo->owner), fi.owner.size() + 1)); finfo->fileStatus = static_cast(fi.filestatus); } @@ -758,7 +761,7 @@ bool FileClient::StartDummyServer() { return false; } - // 获取本地ip + // Obtain local IP std::string ip; if (!common::NetCommon::GetLocalIP(&ip)) { LOG(ERROR) << "Get local ip failed!"; @@ -775,14 +778,13 @@ bool FileClient::StartDummyServer() { } // namespace client } // namespace curve - -// 全局初始化与反初始化 -int GlobalInit(const char *configpath); +// Global initialization and deinitialization +int GlobalInit(const char* configpath); void GlobalUnInit(); -int Init(const char *path) { return GlobalInit(path); } +int Init(const char* path) { return GlobalInit(path); } -int Open4Qemu(const char *filename) { +int Open4Qemu(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -800,7 +802,7 @@ int Open4Qemu(const char *filename) { return globalclient->Open(realname, userinfo); } -int IncreaseEpoch(const char *filename) { +int IncreaseEpoch(const char* filename) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -818,7 +820,7 @@ int IncreaseEpoch(const char *filename) { return globalclient->IncreaseEpoch(realname, userinfo); } -int Extend4Qemu(const char *filename, int64_t newsize) { +int Extend4Qemu(const char* filename, int64_t newsize) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -840,7 +842,7 @@ int Extend4Qemu(const char *filename, int64_t newsize) { static_cast(newsize)); } -int Open(const char *filename, const C_UserInfo_t *userinfo) { +int Open(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -850,7 +852,7 @@ int Open(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Read(int fd, char *buf, off_t offset, size_t length) { +int Read(int fd, char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -859,7 +861,7 @@ int Read(int fd, char *buf, off_t offset, size_t length) { return globalclient->Read(fd, buf, offset, length); } -int Write(int fd, const char *buf, off_t offset, size_t length) { +int Write(int fd, const char* buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -877,7 +879,7 @@ int Discard(int fd, off_t offset, size_t length) { return globalclient->Discard(fd, offset, length); } -int AioRead(int fd, CurveAioContext *aioctx) { +int AioRead(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -888,7 +890,7 @@ int AioRead(int fd, CurveAioContext *aioctx) { return globalclient->AioRead(fd, aioctx); } -int AioWrite(int fd, CurveAioContext *aioctx) { +int AioWrite(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -896,11 +898,11 @@ int AioWrite(int fd, CurveAioContext *aioctx) { DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length << " op: " << aioctx->op - << " buf: " << *(unsigned int *)aioctx->buf; + << " buf: " << *(unsigned int*)aioctx->buf; return globalclient->AioWrite(fd, aioctx); } -int AioDiscard(int fd, CurveAioContext *aioctx) { +int AioDiscard(int fd, CurveAioContext* aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "Not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -909,7 +911,7 @@ int AioDiscard(int fd, CurveAioContext *aioctx) { return globalclient->AioDiscard(fd, aioctx); } -int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { +int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -919,8 +921,8 @@ int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { filename, UserInfo(userinfo->owner, userinfo->password), size); } -int Rename(const C_UserInfo_t* userinfo, - const char* oldpath, const char* newpath) { +int Rename(const C_UserInfo_t* userinfo, const char* oldpath, + const char* newpath) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -930,7 +932,7 @@ int Rename(const C_UserInfo_t* userinfo, oldpath, newpath); } -int Extend(const char *filename, const C_UserInfo_t *userinfo, +int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -941,7 +943,7 @@ int Extend(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), newsize); } -int Unlink(const char *filename, const C_UserInfo_t *userinfo) { +int Unlink(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -951,7 +953,7 @@ int Unlink(const char *filename, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { +int DeleteForce(const char* filename, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -961,7 +963,7 @@ int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { filename, UserInfo(userinfo->owner, userinfo->password), true); } -int Recover(const char *filename, const C_UserInfo_t *userinfo, +int Recover(const char* filename, const C_UserInfo_t* userinfo, uint64_t fileId) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; @@ -972,21 +974,21 @@ int Recover(const char *filename, const C_UserInfo_t *userinfo, filename, UserInfo(userinfo->owner, userinfo->password), fileId); } -DirInfo_t *OpenDir(const char *dirpath, const C_UserInfo_t *userinfo) { +DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return nullptr; } - DirInfo_t *dirinfo = new (std::nothrow) DirInfo_t; - dirinfo->dirpath = const_cast(dirpath); - dirinfo->userinfo = const_cast(userinfo); + DirInfo_t* dirinfo = new (std::nothrow) DirInfo_t; + dirinfo->dirpath = const_cast(dirpath); + dirinfo->userinfo = const_cast(userinfo); dirinfo->fileStat = nullptr; return dirinfo; } -int Listdir(DirInfo_t *dirinfo) { +int Listdir(DirInfo_t* dirinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1027,7 +1029,7 @@ int Listdir(DirInfo_t *dirinfo) { return ret; } -void CloseDir(DirInfo_t *dirinfo) { +void CloseDir(DirInfo_t* dirinfo) { if (dirinfo != nullptr) { if (dirinfo->fileStat != nullptr) { delete[] dirinfo->fileStat; @@ -1037,7 +1039,7 @@ void CloseDir(DirInfo_t *dirinfo) { } } -int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1047,7 +1049,7 @@ int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { UserInfo(userinfo->owner, userinfo->password)); } -int Rmdir(const char *dirpath, const C_UserInfo_t *userinfo) { +int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1066,7 +1068,7 @@ int Close(int fd) { return globalclient->Close(fd); } -int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { +int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { curve::client::UserInfo_t userinfo; std::string realname; bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( @@ -1084,8 +1086,8 @@ int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { return globalclient->StatFile(realname, userinfo, finfo); } -int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, - FileStatInfo *finfo) { +int StatFile(const char* filename, const C_UserInfo_t* cuserinfo, + FileStatInfo* finfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1095,8 +1097,8 @@ int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, return globalclient->StatFile(filename, userinfo, finfo); } -int ChangeOwner(const char *filename, const char *newOwner, - const C_UserInfo_t *cuserinfo) { +int ChangeOwner(const char* filename, const char* newOwner, + const C_UserInfo_t* cuserinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1108,7 +1110,7 @@ int ChangeOwner(const char *filename, const char *newOwner, void UnInit() { GlobalUnInit(); } -int GetClusterId(char *buf, int len) { +int GetClusterId(char* buf, int len) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1117,7 +1119,7 @@ int GetClusterId(char *buf, int len) { return globalclient->GetClusterId(buf, len); } -int GlobalInit(const char *path) { +int GlobalInit(const char* path) { int ret = 0; if (globalclientinited_) { LOG(INFO) << "global cient already inited!"; @@ -1154,74 +1156,74 @@ void GlobalUnInit() { } } -const char *LibCurveErrorName(LIBCURVE_ERROR err) { +const char* LibCurveErrorName(LIBCURVE_ERROR err) { switch (err) { - case LIBCURVE_ERROR::OK: - return "OK"; - case LIBCURVE_ERROR::EXISTS: - return "EXISTS"; - case LIBCURVE_ERROR::FAILED: - return "FAILED"; - case LIBCURVE_ERROR::DISABLEIO: - return "DISABLEIO"; - case LIBCURVE_ERROR::AUTHFAIL: - return "AUTHFAIL"; - case LIBCURVE_ERROR::DELETING: - return "DELETING"; - case LIBCURVE_ERROR::NOTEXIST: - return "NOTEXIST"; - case LIBCURVE_ERROR::UNDER_SNAPSHOT: - return "UNDER_SNAPSHOT"; - case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: - return "NOT_UNDERSNAPSHOT"; - case LIBCURVE_ERROR::DELETE_ERROR: - return "DELETE_ERROR"; - case LIBCURVE_ERROR::NOT_ALLOCATE: - return "NOT_ALLOCATE"; - case LIBCURVE_ERROR::NOT_SUPPORT: - return "NOT_SUPPORT"; - case LIBCURVE_ERROR::NOT_EMPTY: - return "NOT_EMPTY"; - case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: - return "NO_SHRINK_BIGGER_FILE"; - case LIBCURVE_ERROR::SESSION_NOTEXISTS: - return "SESSION_NOTEXISTS"; - case LIBCURVE_ERROR::FILE_OCCUPIED: - return "FILE_OCCUPIED"; - case LIBCURVE_ERROR::PARAM_ERROR: - return "PARAM_ERROR"; - case LIBCURVE_ERROR::INTERNAL_ERROR: - return "INTERNAL_ERROR"; - case LIBCURVE_ERROR::CRC_ERROR: - return "CRC_ERROR"; - case LIBCURVE_ERROR::INVALID_REQUEST: - return "INVALID_REQUEST"; - case LIBCURVE_ERROR::DISK_FAIL: - return "DISK_FAIL"; - case LIBCURVE_ERROR::NO_SPACE: - return "NO_SPACE"; - case LIBCURVE_ERROR::NOT_ALIGNED: - return "NOT_ALIGNED"; - case LIBCURVE_ERROR::BAD_FD: - return "BAD_FD"; - case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: - return "LENGTH_NOT_SUPPORT"; - case LIBCURVE_ERROR::SESSION_NOT_EXIST: - return "SESSION_NOT_EXIST"; - case LIBCURVE_ERROR::STATUS_NOT_MATCH: - return "STATUS_NOT_MATCH"; - case LIBCURVE_ERROR::DELETE_BEING_CLONED: - return "DELETE_BEING_CLONED"; - case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: - return "CLIENT_NOT_SUPPORT_SNAPSHOT"; - case LIBCURVE_ERROR::SNAPSTHO_FROZEN: - return "SNAPSTHO_FROZEN"; - case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: - return "RETRY_UNTIL_SUCCESS"; - case LIBCURVE_ERROR::EPOCH_TOO_OLD: - return "EPOCH_TOO_OLD"; - case LIBCURVE_ERROR::UNKNOWN: - break; + case LIBCURVE_ERROR::OK: + return "OK"; + case LIBCURVE_ERROR::EXISTS: + return "EXISTS"; + case LIBCURVE_ERROR::FAILED: + return "FAILED"; + case LIBCURVE_ERROR::DISABLEIO: + return "DISABLEIO"; + case LIBCURVE_ERROR::AUTHFAIL: + return "AUTHFAIL"; + case LIBCURVE_ERROR::DELETING: + return "DELETING"; + case LIBCURVE_ERROR::NOTEXIST: + return "NOTEXIST"; + case LIBCURVE_ERROR::UNDER_SNAPSHOT: + return "UNDER_SNAPSHOT"; + case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: + return "NOT_UNDERSNAPSHOT"; + case LIBCURVE_ERROR::DELETE_ERROR: + return "DELETE_ERROR"; + case LIBCURVE_ERROR::NOT_ALLOCATE: + return "NOT_ALLOCATE"; + case LIBCURVE_ERROR::NOT_SUPPORT: + return "NOT_SUPPORT"; + case LIBCURVE_ERROR::NOT_EMPTY: + return "NOT_EMPTY"; + case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: + return "NO_SHRINK_BIGGER_FILE"; + case LIBCURVE_ERROR::SESSION_NOTEXISTS: + return "SESSION_NOTEXISTS"; + case LIBCURVE_ERROR::FILE_OCCUPIED: + return "FILE_OCCUPIED"; + case LIBCURVE_ERROR::PARAM_ERROR: + return "PARAM_ERROR"; + case LIBCURVE_ERROR::INTERNAL_ERROR: + return "INTERNAL_ERROR"; + case LIBCURVE_ERROR::CRC_ERROR: + return "CRC_ERROR"; + case LIBCURVE_ERROR::INVALID_REQUEST: + return "INVALID_REQUEST"; + case LIBCURVE_ERROR::DISK_FAIL: + return "DISK_FAIL"; + case LIBCURVE_ERROR::NO_SPACE: + return "NO_SPACE"; + case LIBCURVE_ERROR::NOT_ALIGNED: + return "NOT_ALIGNED"; + case LIBCURVE_ERROR::BAD_FD: + return "BAD_FD"; + case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: + return "LENGTH_NOT_SUPPORT"; + case LIBCURVE_ERROR::SESSION_NOT_EXIST: + return "SESSION_NOT_EXIST"; + case LIBCURVE_ERROR::STATUS_NOT_MATCH: + return "STATUS_NOT_MATCH"; + case LIBCURVE_ERROR::DELETE_BEING_CLONED: + return "DELETE_BEING_CLONED"; + case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: + return "CLIENT_NOT_SUPPORT_SNAPSHOT"; + case LIBCURVE_ERROR::SNAPSTHO_FROZEN: + return "SNAPSTHO_FROZEN"; + case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: + return "RETRY_UNTIL_SUCCESS"; + case LIBCURVE_ERROR::EPOCH_TOO_OLD: + return "EPOCH_TOO_OLD"; + case LIBCURVE_ERROR::UNKNOWN: + break; } static thread_local char message[64]; diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index 1f1202bbbb..cd24b8afc6 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -24,19 +24,20 @@ #define SRC_CLIENT_LIBCURVE_FILE_H_ #include + #include +#include #include #include #include -#include #include "include/client/libcurve.h" +#include "src/client/chunkserver_broadcaster.h" #include "src/client/client_common.h" #include "src/client/file_instance.h" #include "src/common/concurrent/rw_lock.h" -#include "src/client/chunkserver_broadcaster.h" -// TODO(tongguangxun) :添加关键函数trace功能 +// TODO(tongguangxun): Add key function trace function namespace curve { namespace client { @@ -48,28 +49,28 @@ class FileClient { virtual ~FileClient() = default; /** - * file对象初始化函数 - * @param: 配置文件路径 + * file object initialization function + * @param: Configuration file path */ virtual int Init(const std::string& configpath); /** - * 打开或创建文件 - * @param: filename文件名 - * @param: userinfo是操作文件的用户信息 - * @return: 返回文件fd + * Open or create a file + * @param: filename File name + * @param: userinfo is the user information for operating the file + * @return: Return the file fd */ - virtual int Open(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Open(const std::string& filename, const UserInfo_t& userinfo, const OpenFlags& openflags = {}); /** - * 打开文件,这个打开只是创建了一个fd,并不与mds交互,没有session续约 - * 这个Open接口主要是提供给快照克隆镜像系统做数据拷贝使用 - * @param: filename文件名 - * @param: userinfo当前用户信息 + * Open the file. This only creates an fd and does not interact with mds. + * There is no session renewal This Open interface is mainly provided for + * data copying in snapshot clone image systems + * @param: filename File name + * @param: userinfo Current user information * @param disableStripe enable/disable stripe feature for a stripe file - * @return: 返回文件fd + * @return: Return the file fd */ virtual int Open4ReadOnly(const std::string& filename, const UserInfo_t& userinfo, @@ -83,19 +84,19 @@ class FileClient { * * @return 0 for success, -1 for fail */ - int IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo); + int IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回0, 失败可能有多种可能 - * 比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when + * opening or creating + * @param: size file length. When create is true, create a file with size + * length + * @return: Success returns 0, failure may have multiple possibilities + * For example, internal errors or files that already exist */ - virtual int Create(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Create(const std::string& filename, const UserInfo_t& userinfo, size_t size); /** @@ -105,22 +106,24 @@ class FileClient { virtual int Create2(const CreateFileContext& context); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int Write(int fd, const char* buf, off_t offset, size_t length); @@ -135,21 +138,25 @@ class FileClient { virtual int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * @return: Successfully returned the number of bytes read, otherwise an + * error code less than 0 will be returned */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic + * IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * @return: Successfully returns the number of bytes written, otherwise + * returns an error code less than 0 */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); @@ -163,33 +170,31 @@ class FileClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路劲 - * @param: newpath目标路径 + * Rename File + * @param: userinfo is the user information + * @param: oldpath Yuanlujin + * @param: newpath Target Path */ - virtual int Rename(const UserInfo_t& userinfo, - const std::string& oldpath, + virtual int Rename(const UserInfo_t& userinfo, const std::string& oldpath, const std::string& newpath); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - virtual int Extend(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce=true只能用于从回收站删除,false为放入垃圾箱 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce=true can only be used to delete from the recycle bin, + * false means to put it in the trash can */ - virtual int Unlink(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Unlink(const std::string& filename, const UserInfo_t& userinfo, bool deleteforce = false); /** @@ -198,96 +203,98 @@ class FileClient { * @param: filename * @param: fileId */ - virtual int Recover(const std::string& filename, - const UserInfo_t& userinfo, + virtual int Recover(const std::string& filename, const UserInfo_t& userinfo, uint64_t fileId); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - virtual int Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, + virtual int Listdir(const std::string& dirpath, const UserInfo_t& userinfo, std::vector* filestatVec); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Mkdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Rmdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information + * of the current file + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int StatFile(const std::string& filename, - const UserInfo_t& userinfo, - FileStatInfo* finfo); - - /** - * stat file - * @param: fd is file descriptor. - * @param: finfo is an output para, carry the base info of current file. - * @return: returns int::ok if success, - * otherwise returns an error code less than 0 - */ + const UserInfo_t& userinfo, FileStatInfo* finfo); + + /** + * stat file + * @param: fd is file descriptor. + * @param: finfo is an output para, carry the base info of current file. + * @return: returns int::ok if success, + * otherwise returns an error code less than 0 + */ virtual int StatFile(int fd, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to + * -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED, etc */ virtual int ChangeOwner(const std::string& filename, const std::string& newOwner, const UserInfo_t& userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Success returns int::OK, otherwise an error code less than 0 + * will be returned */ virtual int Close(int fd); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ virtual void UnInit(); /** - * @brief: 获取集群id - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 失败返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain cluster ID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); /** - * @brief 获取集群id - * @return 成功返回集群id,失败返回空 + * @brief Get cluster ID + * @return Successfully returned cluster ID, failed returned empty */ std::string GetClusterId(); /** - * @brief 获取文件信息,测试使用 - * @param fd 文件句柄 - * @param[out] finfo 文件信息 - * @return 成功返回0,失败返回-LIBCURVE_ERROR::FAILED + * @brief to obtain file information for testing purposes + * @param fd file handle + * @param[out] finfo file information + * @return success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetFileInfo(int fd, FInfo* finfo); @@ -295,33 +302,33 @@ class FileClient { std::vector ListPoolset(); /** - * 测试使用,获取当前挂载文件数量 - * @return 返回当前挂载文件数量 + * Test usage to obtain the current number of mounted files + * @return Returns the current number of mounted files */ - uint64_t GetOpenedFileNum() const { - return openedFileNum_.get_value(); - } + uint64_t GetOpenedFileNum() const { return openedFileNum_.get_value(); } private: - static void BuildFileStatInfo(const FInfo_t &fi, FileStatInfo *finfo); + static void BuildFileStatInfo(const FInfo_t& fi, FileStatInfo* finfo); bool StartDummyServer(); private: BthreadRWLock rwlock_; - // 向上返回的文件描述符,对于QEMU来说,一个vdisk对应一个文件描述符 + // The file descriptor returned upwards, for QEMU, one vdisk corresponds to + // one file descriptor std::atomic fdcount_; - // 每个vdisk都有一个FileInstance,通过返回的fd映射到对应的instance + // Each vdisk has a FileInstance, which is mapped to the corresponding + // instance through the returned fd std::unordered_map fileserviceMap_; // std::unordered_map fileserviceFileNameMap_; - // FileClient配置 + // FileClient Configuration ClientConfig clientconfig_; - // fileclient对应的全局mdsclient + // Global mdsclient corresponding to fileclient std::shared_ptr mdsClient_; // chunkserver client @@ -329,10 +336,10 @@ class FileClient { // chunkserver broadCaster std::shared_ptr csBroadCaster_; - // 是否初始化成功 + // Is initialization successful bool inited_; - // 挂载文件数量 + // Number of mounted files bvar::Adder openedFileNum_; }; diff --git a/src/client/libcurve_snapshot.h b/src/client/libcurve_snapshot.h index d8b2ce841a..24f9d2f163 100644 --- a/src/client/libcurve_snapshot.h +++ b/src/client/libcurve_snapshot.h @@ -27,305 +27,304 @@ #include #include -#include "src/client/mds_client.h" -#include "src/client/config_info.h" #include "src/client/client_common.h" +#include "src/client/config_info.h" #include "src/client/iomanager4chunk.h" +#include "src/client/mds_client.h" namespace curve { namespace client { -// SnapshotClient为外围快照系统与MDS和Chunkserver通信的出口 +// SnapshotClient is the exit for peripheral snapshot systems to communicate +// with MDS and Chunkserver class SnapshotClient { public: - SnapshotClient(); - ~SnapshotClient() = default; - /** - * 初始化函数,外围系统直接传入配置选项 - * @param: opt为外围配置选项 - * @return:0为成功,-1为失败 - */ - int Init(const ClientConfigOption& opt); + SnapshotClient(); + ~SnapshotClient() = default; + /** + * Initialization function, peripheral system directly passes in + * configuration options + * @param: opt is the peripheral configuration option + * @return: 0 indicates success, -1 indicates failure + */ + int Init(const ClientConfigOption& opt); - /** - * file对象初始化函数 - * @param: 配置文件路径 - */ - int Init(const std::string& configpath); + /** + * file object initialization function + * @param: Configuration file path + */ + int Init(const std::string& configpath); - /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t* seq); - /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq); - /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapShot(const std::string& fname, - const UserInfo_t& userinfo, - uint64_t seq, - FInfo* snapinfo); - /** - * 列出当前文件对应版本列表的文件信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seqvec是当前文件的版本列表 - * @param: snapif是出参,获取多个seq号的文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - const std::vector* seqvec, - std::map* snapif); - /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo); + /** + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of + * the file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t* seq); + /** + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq); + /** + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot + * of the file + * @param: snapinfo is a parameter that saves the basic information of the + * current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapShot(const std::string& fname, const UserInfo_t& userinfo, + uint64_t seq, FInfo* snapinfo); + /** + * List the file information corresponding to the version list of the + * current file + * @param: userinfo is the user information + * @param: filenam file name + * @param: seqvec is the version list of the current file + * @param: snapif is a parameter that obtains file information for multiple + * seq numbers + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, + const std::vector* seqvec, + std::map* snapif); + /** + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment + * information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo); - /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, - uint64_t len, char *buf, SnapCloneClosure* scc); - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 - */ - int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, - uint64_t correctedSeq); - /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 - */ - int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail *chunkInfo); - /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - */ - int CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - FileStatus* filestatus); - /** - * @brief 创建clone文件 - * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param: destination clone目标文件名 - * @param: userinfo 用户信息 - * @param: size 文件大小 - * @param: sn 版本号 - * @param: chunksize是要创建文件的chunk大小 - * @param stripeUnit stripe size - * @param stripeCount stripe count - * @param poolset poolset of destination file - * @param[out] fileinfo 创建的目标文件的文件信息 - * - * @return 错误码 - */ - int CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo); + /** + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc); + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correntSn + * of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected + */ + int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, + uint64_t correctedSeq); + /** + * Obtain the version information of the chunk, where chunkInfo is the + * output parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot + */ + int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail* chunkInfo); + /** + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + */ + int CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + /** + * @brief Create clone file + * @detail + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param: destination clone Destination file name + * @param: userinfo User information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the file to be created + * @param stripeUnit stripe size + * @param stripeCount stripe count + * @param poolset poolset of destination file + * @param[out] fileinfo The file information of the target file created + * + * @return error code + */ + int CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo); - /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * - * @return 错误码 - */ - int CreateCloneChunk(const std::string &location, - const ChunkIDInfo &chunkidinfo, uint64_t sn, - uint64_t correntSn, uint64_t chunkSize, - SnapCloneClosure* scc); + /** + * @brief lazy Create clone chunk + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * + * @return error code + */ + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize, + SnapCloneClosure* scc); - /** - * @brief 实际恢复chunk数据 - * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param: scc是异步回调 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo &chunkidinfo, - uint64_t offset, uint64_t len, - SnapCloneClosure* scc); + /** + * @brief Actual recovery chunk data + * + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: scc is an asynchronous callback + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc); - /** - * @brief 通知mds完成Clone Meta - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneMeta(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Meta + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); - /** - * @brief 通知mds完成Clone Chunk - * - * @param:destination 目标文件 - * @param:userinfo用户信息 - * - * @return 错误码 - */ - int CompleteCloneFile(const std::string &destination, - const UserInfo_t& userinfo); + /** + * @brief Notify mds to complete Clone Chunk + * + * @param: destination target file + * @param: userinfo User Information + * + * @return error code + */ + int CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); - /** - * 设置clone文件状态 - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * - * @return 错误码 - */ - int SetCloneFileStatus(const std::string &filename, - const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID = 0); + /** + * Set clone file status + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * + * @return error code + */ + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); - /** - * @brief 获取文件信息 - * - * @param:filename 文件名 - * @param:userinfo 用户信息 - * @param[out] fileInfo 文件信息 - * - * @return 错误码 - */ - int GetFileInfo(const std::string &filename, - const UserInfo_t& userinfo, - FInfo* fileInfo); + /** + * @brief Get file information + * + * @param: filename File name + * @param: userinfo User Information + * @param[out] fileInfo file information + * + * @return error code + */ + int GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + FInfo* fileInfo); - /** - * @brief 查询或分配文件segment信息 - * - * @param:userinfo 用户信息 - * @param:offset 偏移值 - * @param:segInfo segment信息 - * - * @return 错误码 - */ - int GetOrAllocateSegmentInfo(bool allocate, - uint64_t offset, - const FInfo_t* fi, - SegmentInfo *segInfo); + /** + * @brief Query or allocate file segment information + * + * @param: userinfo User Information + * @param: offset offset value + * @param: segInfo segment information + * + * @return error code + */ + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + const FInfo_t* fi, SegmentInfo* segInfo); - /** - * @brief 为recover rename复制的文件 - * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * - * @return 错误码 - */ - int RenameCloneFile(const UserInfo_t& userinfo, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination); + /** + * @brief is the file copied for recover rename + * + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * + * @return error code + */ + int RenameCloneFile(const UserInfo_t& userinfo, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination); - /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - */ - int DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t id = 0); + /** + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + */ + int DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t id = 0); - /** - * 析构,回收资源 - */ - void UnInit(); - /** - * 获取iomanager信息,测试代码使用 - */ - IOManager4Chunk* GetIOManager4Chunk() {return &iomanager4chunk_;} + /** + * Deconstruct and recycle resources + */ + void UnInit(); + /** + * Obtain iomanager information and test code usage + */ + IOManager4Chunk* GetIOManager4Chunk() { return &iomanager4chunk_; } private: - /** - * 获取logicalpool中copyset的serverlist - * @param: lpid是逻辑池id - * @param: csid是逻辑池中的copysetid数据集 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED - */ - int GetServerList(const LogicPoolID& lpid, - const std::vector& csid); + /** + * Obtain the serverlist of copyset in the logicalpool + * @param: lpid is the logical pool id + * @param: csid is the copysetid dataset in the logical pool + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise + * LIBCURVE_ERROR::FAILED + */ + int GetServerList(const LogicPoolID& lpid, + const std::vector& csid); private: - // MDSClient负责与Metaserver通信,所有通信都走这个接口 - MDSClient mdsclient_; + // MDSClient is responsible for communicating with Metaserver, and all + // communication goes through this interface + MDSClient mdsclient_; - // IOManager4Chunk用于管理发向chunkserver端的IO - IOManager4Chunk iomanager4chunk_; + // IOManager4Chunk is used to manage IO sent to the chunkserver end + IOManager4Chunk iomanager4chunk_; - // 用于client 配置读取 - ClientConfig clientconfig_; + // Used for client configuration reading + ClientConfig clientconfig_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_LIBCURVE_SNAPSHOT_H_ diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index 9ace95e823..e8d8a35f6d 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -21,11 +21,11 @@ */ #include "src/client/mds_client.h" -#include #include +#include -#include #include +#include #include "src/client/lease_executor.h" #include "src/common/net_common.h" @@ -35,6 +35,7 @@ namespace curve { namespace client { +using curve::common::ChunkServerLocation; using curve::common::NetCommon; using curve::common::TimeUtility; using curve::mds::FileInfo; @@ -42,24 +43,23 @@ using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::ProtoSession; using curve::mds::StatusCode; -using curve::common::ChunkServerLocation; using curve::mds::topology::CopySetServerInfo; -// rpc发送和mds地址切换状态机 +// Rpc sending and mds address switching state machine int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { - // 记录上一次正在服务的mds index + // Record the last serving mds index int lastWorkingMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前正在使用的mds index + // Record the currently used mds index int curRetryMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前mds重试的次数 + // Record the number of current mds retries uint64_t currentMDSRetryCount = 0; - // 执行起始时间点 + // Execution start time point uint64_t startTime = TimeUtility::GetTimeofDayMs(); - // rpc超时时间 + // RPC timeout uint64_t rpcTimeOutMS = retryOpt_.rpcTimeoutMs; // The count of normal retry @@ -68,16 +68,18 @@ int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { int retcode = -1; bool retryUnlimit = (maxRetryTimeMS == 0); while (GoOnRetry(startTime, maxRetryTimeMS)) { - // 1. 创建当前rpc需要使用的channel和controller,执行rpc任务 + // 1. Create the channels and controllers required for the current RPC + // and execute the RPC task retcode = ExcuteTask(curRetryMDSIndex, rpcTimeOutMS, rpctask); - // 2. 根据rpc返回值进行预处理 + // 2. Preprocessing based on rpc return value if (retcode < 0) { curRetryMDSIndex = PreProcessBeforeRetry( retcode, retryUnlimit, &normalRetryCount, ¤tMDSRetryCount, curRetryMDSIndex, &lastWorkingMDSIndex, &rpcTimeOutMS); continue; - // 3. 此时rpc是正常返回的,更新当前正在服务的mds地址index + // 3. At this point, rpc returns normally and updates the index of + // the currently serving mds address } else { currentWorkingMDSAddrIndex_.store(curRetryMDSIndex); break; @@ -98,11 +100,11 @@ bool RPCExcutorRetryPolicy::GoOnRetry(uint64_t startTimeMS, } int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, - uint64_t *timeOutMS) { + int* lastWorkingMDSIndex, + uint64_t* timeOutMS) { int nextMDSIndex = 0; bool rpcTimeout = false; bool needChangeMDS = false; @@ -115,44 +117,48 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, bthread_usleep(retryOpt_.waitSleepMs * 1000); } - // 1. 访问存在的IP地址,但无人监听:ECONNREFUSED - // 2. 正常发送RPC情况下,对端进程挂掉了:EHOSTDOWN - // 3. 对端server调用了Stop:ELOGOFF - // 4. 对端链接已关闭:ECONNRESET - // 5. 在一个mds节点上rpc失败超过限定次数 - // 在这几种场景下,主动切换mds。 + // 1. Access to an existing IP address, but no one is listening: + // ECONNREFUSED + // 2. In the normal RPC scenario, the remote process has crashed: + // EHOSTDOWN + // 3. The remote server called Stop: ELOGOFF + // 4. The remote connection has been closed: ECONNRESET + // 5. RPC failures on a single MDS node exceed the specified limit. + // In these scenarios, actively switch the MDS. } else if (status == -EHOSTDOWN || status == -ECONNRESET || status == -ECONNREFUSED || status == -brpc::ELOGOFF || *curMDSRetryCount >= retryOpt_.maxFailedTimesBeforeChangeAddr) { needChangeMDS = true; - // 在开启健康检查的情况下,在底层tcp连接失败时 - // rpc请求会本地直接返回 EHOSTDOWN - // 这种情况下,增加一些睡眠时间,避免大量的重试请求占满bthread - // TODO(wuhanqing): 关闭健康检查 + // When health checks are enabled, in the event of a failure in the + // underlying TCP connection, RPC requests will directly return + // EHOSTDOWN locally. In this situation, add some sleep time to avoid a + // large number of retry requests overwhelming bthread. + // TODO(wuhanqing): Disable health checks. if (status == -EHOSTDOWN) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } } else if (status == -brpc::ERPCTIMEDOUT || status == -ETIMEDOUT) { rpcTimeout = true; needChangeMDS = false; - // 触发超时指数退避 + // Trigger timeout index backoff *timeOutMS *= 2; *timeOutMS = std::min(*timeOutMS, retryOpt_.maxRPCTimeoutMS); *timeOutMS = std::max(*timeOutMS, retryOpt_.rpcTimeoutMs); } - // 获取下一次需要重试的mds索引 + // Obtain the mds index that needs to be retried next time nextMDSIndex = GetNextMDSIndex(needChangeMDS, curRetryMDSIndex, lastWorkingMDSIndex); // NOLINT - // 更新curMDSRetryCount和rpctimeout + // Update curMDSRetryCount and rpctimeout if (nextMDSIndex != curRetryMDSIndex) { *curMDSRetryCount = 0; *timeOutMS = retryOpt_.rpcTimeoutMs; } else { ++(*curMDSRetryCount); - // 还是在当前mds上重试,且rpc不是超时错误,就进行睡眠,然后再重试 + // Try again on the current mds, and if the rpc is not a timeout error, + // go to sleep and try again if (!rpcTimeout) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } @@ -161,20 +167,21 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, return nextMDSIndex; } /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex + * Obtain the next MDS index to retry based on the input state. The MDS + * switching logic is as follows: Record three states: curRetryMDSIndex, + * lastWorkingMDSIndex, currentWorkingMDSIndex + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ + * 2. If an RPC fails, it triggers a switch to curRetryMDSIndex. If at this + * point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, then + * sequentially switch to the next MDS index. If lastWorkingMDSIndex is not + * equal to currentWorkingMDSIndex, it means that another interface has updated + * currentWorkingMDSAddrIndex_, so this time, switch directly to + * currentWorkingMDSAddrIndex_. */ int RPCExcutorRetryPolicy::GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex) { + int* lastWorkingindex) { int nextMDSIndex = 0; if (std::atomic_compare_exchange_strong( ¤tWorkingMDSAddrIndex_, lastWorkingindex, @@ -194,13 +201,14 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, assert(mdsindex >= 0 && mdsindex < static_cast(retryOpt_.addrs.size())); - const std::string &mdsaddr = retryOpt_.addrs[mdsindex]; + const std::string& mdsaddr = retryOpt_.addrs[mdsindex]; brpc::Channel channel; int ret = channel.Init(mdsaddr.c_str(), nullptr); if (ret != 0) { LOG(WARNING) << "Init channel failed! addr = " << mdsaddr; - // 返回EHOSTDOWN给上层调用者,促使其切换mds + // Return EHOSTDOWN to the upper level caller, prompting them to switch + // mds return -EHOSTDOWN; } @@ -211,14 +219,15 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, return task(mdsindex, rpcTimeOutMS, &channel, &cntl); } - -MDSClient::MDSClient(const std::string &metricPrefix) - : inited_(false), metaServerOpt_(), mdsClientMetric_(metricPrefix), +MDSClient::MDSClient(const std::string& metricPrefix) + : inited_(false), + metaServerOpt_(), + mdsClientMetric_(metricPrefix), rpcExcutor_() {} MDSClient::~MDSClient() { UnInitialize(); } -LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { +LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption& metaServerOpt) { if (inited_) { LOG(INFO) << "MDSClient already started!"; return LIBCURVE_ERROR::OK; @@ -229,7 +238,7 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { rpcExcutor_.SetOption(metaServerOpt.rpcRetryOpt); std::ostringstream oss; - for (const auto &addr : metaServerOpt_.rpcRetryOpt.addrs) { + for (const auto& addr : metaServerOpt_.rpcRetryOpt.addrs) { oss << " " << addr; } @@ -238,19 +247,15 @@ LIBCURVE_ERROR MDSClient::Initialize(const MetaServerOption &metaServerOpt) { return LIBCURVE_ERROR::OK; } - -void MDSClient::UnInitialize() { - inited_ = false; -} +void MDSClient::UnInitialize() { inited_ = false; } #define RPCTaskDefine \ [&](CURVE_UNUSED int addrindex, CURVE_UNUSED uint64_t rpctimeoutMS, \ brpc::Channel* channel, brpc::Controller* cntl) -> int -LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -278,12 +283,12 @@ LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, bool flag = response.has_protosession() && response.has_fileinfo(); if (flag) { - const ProtoSession &leasesession = response.protosession(); + const ProtoSession& leasesession = response.protosession(); lease->sessionID = leasesession.sessionid(); lease->leaseTime = leasesession.leasetime(); lease->createTime = leasesession.createtime(); - const curve::mds::FileInfo &protoFileInfo = response.fileinfo(); + const curve::mds::FileInfo& protoFileInfo = response.fileinfo(); LOG(INFO) << "OpenFile succeeded, filename: " << filename << ", file info " << protoFileInfo.DebugString(); ServiceHelper::ProtoFileInfo2Local(protoFileInfo, fi, fEpoch); @@ -349,9 +354,9 @@ LIBCURVE_ERROR MDSClient::CreateFile(const CreateFileContext& context) { rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid) { +LIBCURVE_ERROR MDSClient::CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -385,9 +390,9 @@ LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, - const UserInfo_t &uinfo, FInfo_t *fi, - FileEpoch_t *fEpoch) { +LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string& filename, + const UserInfo_t& uinfo, FInfo_t* fi, + FileEpoch_t* fEpoch) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -421,19 +426,17 @@ LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs) { +LIBCURVE_ERROR MDSClient::IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; IncreaseFileEpochResponse response; mdsClientMetric_.increaseEpoch.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.increaseEpoch.latency); - MDSClientBase::IncreaseEpoch( - filename, userinfo, &response, cntl, channel); + MDSClientBase::IncreaseEpoch(filename, userinfo, &response, cntl, + channel); if (cntl->Failed()) { mdsClientMetric_.increaseEpoch.eps.count << 1; @@ -445,10 +448,10 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, LIBCURVE_ERROR retcode; MDSStatusCode2LibcurveError(stcode, &retcode); LOG(ERROR) << "IncreaseEpoch: filename = " << filename - << ", owner = " << userinfo.owner - << ", errocde = " << retcode - << ", error msg = " << StatusCode_Name(stcode) - << ", log id = " << cntl->log_id(); + << ", owner = " << userinfo.owner + << ", errocde = " << retcode + << ", error msg = " << StatusCode_Name(stcode) + << ", log id = " << cntl->log_id(); return retcode; } @@ -466,12 +469,12 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, csinfo.peerID = response.cslocs(i).chunkserverid(); EndPoint internal; butil::str2endpoint(response.cslocs(i).hostip().c_str(), - response.cslocs(i).port(), &internal); + response.cslocs(i).port(), &internal); EndPoint external; const bool hasExternalIp = response.cslocs(i).has_externalip(); if (hasExternalIp) { butil::str2endpoint(response.cslocs(i).externalip().c_str(), - response.cslocs(i).port(), &external); + response.cslocs(i).port(), &external); } csinfo.internalAddr = PeerAddr(internal); csinfo.externalAddr = PeerAddr(external); @@ -508,10 +511,10 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if ((stcode == StatusCode::kOK || stcode == StatusCode::kFileUnderSnapShot) && hasinfo) { - FInfo_t *fi = new (std::nothrow) FInfo_t; + FInfo_t* fi = new (std::nothrow) FInfo_t; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - fi, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), fi, + &fEpoch); *seq = fi->seqnum; delete fi; if (stcode == StatusCode::kOK) { @@ -527,8 +530,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, if (hasinfo) { FInfo_t fi; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), - &fi, &fEpoch); // NOLINT + ServiceHelper::ProtoFileInfo2Local(response.snapshotfileinfo(), &fi, + &fEpoch); // NOLINT *seq = fi.seqnum; } @@ -545,8 +548,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq) { auto task = RPCTaskDefine { (void)addrindex; @@ -578,10 +581,10 @@ LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif) { +LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -612,8 +615,8 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, for (int i = 0; i < response.fileinfo_size(); i++) { FInfo_t tempInfo; FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), - &tempInfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(i), &tempInfo, + &fEpoch); snapif->insert(std::make_pair(tempInfo.seqnum, tempInfo)); } @@ -628,10 +631,10 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo) { + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -692,11 +695,11 @@ LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease) { +LIBCURVE_ERROR MDSClient::RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -728,40 +731,39 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, } switch (stcode) { - case StatusCode::kSessionNotExist: - case StatusCode::kFileNotExists: - resp->status = LeaseRefreshResult::Status::NOT_EXIST; - break; - case StatusCode::kOwnerAuthFail: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kOK: - if (response.has_fileinfo()) { - FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - &resp->finfo, - &fEpoch); - resp->status = LeaseRefreshResult::Status::OK; - } else { - LOG(WARNING) << "session response has no fileinfo!"; - return LIBCURVE_ERROR::FAILED; - } - if (nullptr != lease) { - if (!response.has_protosession()) { - LOG(WARNING) << "session response has no protosession"; + case StatusCode::kSessionNotExist: + case StatusCode::kFileNotExists: + resp->status = LeaseRefreshResult::Status::NOT_EXIST; + break; + case StatusCode::kOwnerAuthFail: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kOK: + if (response.has_fileinfo()) { + FileEpoch_t fEpoch; + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), + &resp->finfo, &fEpoch); + resp->status = LeaseRefreshResult::Status::OK; + } else { + LOG(WARNING) << "session response has no fileinfo!"; return LIBCURVE_ERROR::FAILED; } - ProtoSession leasesession = response.protosession(); - lease->sessionID = leasesession.sessionid(); - lease->leaseTime = leasesession.leasetime(); - lease->createTime = leasesession.createtime(); - } - break; - default: - resp->status = LeaseRefreshResult::Status::FAILED; - return LIBCURVE_ERROR::FAILED; - break; + if (nullptr != lease) { + if (!response.has_protosession()) { + LOG(WARNING) << "session response has no protosession"; + return LIBCURVE_ERROR::FAILED; + } + ProtoSession leasesession = response.protosession(); + lease->sessionID = leasesession.sessionid(); + lease->leaseTime = leasesession.leasetime(); + lease->createTime = leasesession.createtime(); + } + break; + default: + resp->status = LeaseRefreshResult::Status::FAILED; + return LIBCURVE_ERROR::FAILED; + break; } return LIBCURVE_ERROR::OK; }; @@ -769,10 +771,10 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, - FileStatus *filestatus) { + FileStatus* filestatus) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -807,9 +809,9 @@ LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, } LIBCURVE_ERROR -MDSClient::GetServerList(const LogicPoolID &logicalpooid, - const std::vector ©setidvec, - std::vector> *cpinfoVec) { +MDSClient::GetServerList(const LogicPoolID& logicalpooid, + const std::vector& copysetidvec, + std::vector>* cpinfoVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -875,7 +877,7 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { +LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext* clsctx) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -927,19 +929,14 @@ LIBCURVE_ERROR MDSClient::ListPoolset(std::vector* out) { }; return ReturnError( - rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); + rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, - const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileinfo) { +LIBCURVE_ERROR MDSClient::CreateCloneFile( + const std::string& source, const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, + FInfo* fileinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -970,8 +967,8 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, if (stcode == StatusCode::kOK) { FileEpoch_t fEpoch; - ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), - fileinfo, &fEpoch); + ServiceHelper::ProtoFileInfo2Local(response.fileinfo(), fileinfo, + &fEpoch); fileinfo->sourceInfo.name = response.fileinfo().clonesource(); fileinfo->sourceInfo.length = response.fileinfo().clonelength(); } @@ -982,20 +979,20 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::CloneMetaInstalled, userinfo); } -LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo) { return SetCloneFileStatus(destination, FileStatus::Cloned, userinfo); } -LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID) { auto task = RPCTaskDefine { (void)addrindex; @@ -1028,9 +1025,9 @@ LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, } LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo) { + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1050,23 +1047,23 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, auto statuscode = response.statuscode(); switch (statuscode) { - case StatusCode::kParaError: - LOG(WARNING) << "GetOrAllocateSegment: error param!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kOwnerAuthFail: - LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; - return LIBCURVE_ERROR::AUTHFAIL; - case StatusCode::kFileNotExists: - LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; - return LIBCURVE_ERROR::FAILED; - case StatusCode::kSegmentNotAllocated: - LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; - return LIBCURVE_ERROR::NOT_ALLOCATE; - case StatusCode::kEpochTooOld: - LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; - return LIBCURVE_ERROR::EPOCH_TOO_OLD; - default: - break; + case StatusCode::kParaError: + LOG(WARNING) << "GetOrAllocateSegment: error param!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kOwnerAuthFail: + LOG(WARNING) << "GetOrAllocateSegment: auth failed!"; + return LIBCURVE_ERROR::AUTHFAIL; + case StatusCode::kFileNotExists: + LOG(WARNING) << "GetOrAllocateSegment: file not exists!"; + return LIBCURVE_ERROR::FAILED; + case StatusCode::kSegmentNotAllocated: + LOG(WARNING) << "GetOrAllocateSegment: segment not allocated!"; + return LIBCURVE_ERROR::NOT_ALLOCATE; + case StatusCode::kEpochTooOld: + LOG(WARNING) << "GetOrAllocateSegment return epoch too old!"; + return LIBCURVE_ERROR::EPOCH_TOO_OLD; + default: + break; } PageFileSegment pfs = response.pagefilesegment(); @@ -1094,7 +1091,7 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, return ReturnError(rpcExcutor_.DoRPCTask(task, 0)); } -LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, +LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo* fileInfo, uint64_t offset) { auto task = RPCTaskDefine { (void)addrindex; @@ -1133,9 +1130,9 @@ LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, +LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId, uint64_t destinationId) { auto task = RPCTaskDefine { @@ -1177,8 +1174,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize) { +LIBCURVE_ERROR MDSClient::Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1211,8 +1208,8 @@ LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1251,8 +1248,8 @@ LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, +LIBCURVE_ERROR MDSClient::RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileid) { auto task = RPCTaskDefine { (void)addrindex; @@ -1285,9 +1282,9 @@ LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo) { +LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1326,9 +1323,9 @@ LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec) { +LIBCURVE_ERROR MDSClient::Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1379,8 +1376,8 @@ LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, - CopysetPeerInfo *chunkserverInfo) { +LIBCURVE_ERROR MDSClient::GetChunkServerInfo( + const PeerAddr& csAddr, CopysetPeerInfo* chunkserverInfo) { if (!chunkserverInfo) { LOG(ERROR) << "chunkserverInfo pointer is null!"; return LIBCURVE_ERROR::FAILED; @@ -1403,7 +1400,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, std::vector strs; curve::common::SplitString(csAddr.ToString(), ":", &strs); - const std::string &ip = strs[0]; + const std::string& ip = strs[0]; uint64_t port; bool succ = curve::common::StringToUll(strs[1], &port); @@ -1428,7 +1425,7 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, << ", log id = " << cntl->log_id(); if (statusCode == 0) { - const auto &csInfo = response.chunkserverinfo(); + const auto& csInfo = response.chunkserverinfo(); ChunkServerID csId = csInfo.chunkserverid(); std::string internalIp = csInfo.hostip(); std::string externalIp = internalIp; @@ -1440,9 +1437,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, butil::str2endpoint(internalIp.c_str(), port, &internal); EndPoint external; butil::str2endpoint(externalIp.c_str(), port, &external); - *chunkserverInfo = - CopysetPeerInfo(csId, PeerAddr(internal), - PeerAddr(external)); + *chunkserverInfo = CopysetPeerInfo( + csId, PeerAddr(internal), PeerAddr(external)); return LIBCURVE_ERROR::OK; } else { return LIBCURVE_ERROR::FAILED; @@ -1453,8 +1449,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, } LIBCURVE_ERROR -MDSClient::ListChunkServerInServer(const std::string &serverIp, - std::vector *csIds) { +MDSClient::ListChunkServerInServer(const std::string& serverIp, + std::vector* csIds) { auto task = RPCTaskDefine { (void)addrindex; (void)rpctimeoutMS; @@ -1497,82 +1493,81 @@ MDSClient::ListChunkServerInServer(const std::string &serverIp, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -void MDSClient::MDSStatusCode2LibcurveError(const StatusCode &status, - LIBCURVE_ERROR *errcode) { +void MDSClient::MDSStatusCode2LibcurveError(const StatusCode& status, + LIBCURVE_ERROR* errcode) { switch (status) { - case StatusCode::kOK: - *errcode = LIBCURVE_ERROR::OK; - break; - case StatusCode::kFileExists: - *errcode = LIBCURVE_ERROR::EXISTS; - break; - case StatusCode::kSnapshotFileNotExists: - case StatusCode::kFileNotExists: - case StatusCode::kDirNotExist: - case StatusCode::kPoolsetNotExist: - *errcode = LIBCURVE_ERROR::NOTEXIST; - break; - case StatusCode::kSegmentNotAllocated: - *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; - break; - case StatusCode::kShrinkBiggerFile: - *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; - break; - case StatusCode::kNotSupported: - *errcode = LIBCURVE_ERROR::NOT_SUPPORT; - break; - case StatusCode::kOwnerAuthFail: - *errcode = LIBCURVE_ERROR::AUTHFAIL; - break; - case StatusCode::kSnapshotFileDeleteError: - *errcode = LIBCURVE_ERROR::DELETE_ERROR; - break; - case StatusCode::kFileUnderSnapShot: - *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; - break; - case StatusCode::kFileNotUnderSnapShot: - *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; - break; - case StatusCode::kSnapshotDeleting: - *errcode = LIBCURVE_ERROR::DELETING; - break; - case StatusCode::kDirNotEmpty: - *errcode = LIBCURVE_ERROR::NOT_EMPTY; - break; - case StatusCode::kFileOccupied: - *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; - break; - case StatusCode::kSessionNotExist: - *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; - break; - case StatusCode::kParaError: - *errcode = LIBCURVE_ERROR::PARAM_ERROR; - break; - case StatusCode::kStorageError: - *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; - break; - case StatusCode::kFileLengthNotSupported: - *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; - break; - case ::curve::mds::StatusCode::kCloneStatusNotMatch: - *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; - break; - case ::curve::mds::StatusCode::kDeleteFileBeingCloned: - *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; - break; - case ::curve::mds::StatusCode::kClientVersionNotMatch: - *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; - break; - case ::curve::mds::StatusCode::kSnapshotFrozen: - *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; - break; - default: - *errcode = LIBCURVE_ERROR::UNKNOWN; - break; + case StatusCode::kOK: + *errcode = LIBCURVE_ERROR::OK; + break; + case StatusCode::kFileExists: + *errcode = LIBCURVE_ERROR::EXISTS; + break; + case StatusCode::kSnapshotFileNotExists: + case StatusCode::kFileNotExists: + case StatusCode::kDirNotExist: + case StatusCode::kPoolsetNotExist: + *errcode = LIBCURVE_ERROR::NOTEXIST; + break; + case StatusCode::kSegmentNotAllocated: + *errcode = LIBCURVE_ERROR::NOT_ALLOCATE; + break; + case StatusCode::kShrinkBiggerFile: + *errcode = LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE; + break; + case StatusCode::kNotSupported: + *errcode = LIBCURVE_ERROR::NOT_SUPPORT; + break; + case StatusCode::kOwnerAuthFail: + *errcode = LIBCURVE_ERROR::AUTHFAIL; + break; + case StatusCode::kSnapshotFileDeleteError: + *errcode = LIBCURVE_ERROR::DELETE_ERROR; + break; + case StatusCode::kFileUnderSnapShot: + *errcode = LIBCURVE_ERROR::UNDER_SNAPSHOT; + break; + case StatusCode::kFileNotUnderSnapShot: + *errcode = LIBCURVE_ERROR::NOT_UNDERSNAPSHOT; + break; + case StatusCode::kSnapshotDeleting: + *errcode = LIBCURVE_ERROR::DELETING; + break; + case StatusCode::kDirNotEmpty: + *errcode = LIBCURVE_ERROR::NOT_EMPTY; + break; + case StatusCode::kFileOccupied: + *errcode = LIBCURVE_ERROR::FILE_OCCUPIED; + break; + case StatusCode::kSessionNotExist: + *errcode = LIBCURVE_ERROR::SESSION_NOT_EXIST; + break; + case StatusCode::kParaError: + *errcode = LIBCURVE_ERROR::PARAM_ERROR; + break; + case StatusCode::kStorageError: + *errcode = LIBCURVE_ERROR::INTERNAL_ERROR; + break; + case StatusCode::kFileLengthNotSupported: + *errcode = LIBCURVE_ERROR::LENGTH_NOT_SUPPORT; + break; + case ::curve::mds::StatusCode::kCloneStatusNotMatch: + *errcode = LIBCURVE_ERROR::STATUS_NOT_MATCH; + break; + case ::curve::mds::StatusCode::kDeleteFileBeingCloned: + *errcode = LIBCURVE_ERROR::DELETE_BEING_CLONED; + break; + case ::curve::mds::StatusCode::kClientVersionNotMatch: + *errcode = LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT; + break; + case ::curve::mds::StatusCode::kSnapshotFrozen: + *errcode = LIBCURVE_ERROR::SNAPSTHO_FROZEN; + break; + default: + *errcode = LIBCURVE_ERROR::UNKNOWN; + break; } } - LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // logic error if (retcode >= 0) { @@ -1581,12 +1576,12 @@ LIBCURVE_ERROR MDSClient::ReturnError(int retcode) { // rpc error or special defined error switch (retcode) { - case -LIBCURVE_ERROR::NOT_SUPPORT: - return LIBCURVE_ERROR::NOT_SUPPORT; - case -LIBCURVE_ERROR::FILE_OCCUPIED: - return LIBCURVE_ERROR::FILE_OCCUPIED; - default: - return LIBCURVE_ERROR::FAILED; + case -LIBCURVE_ERROR::NOT_SUPPORT: + return LIBCURVE_ERROR::NOT_SUPPORT; + case -LIBCURVE_ERROR::FILE_OCCUPIED: + return LIBCURVE_ERROR::FILE_OCCUPIED; + default: + return LIBCURVE_ERROR::FAILED; } } diff --git a/src/client/mds_client.h b/src/client/mds_client.h index 36822fa31c..10c4a682cd 100644 --- a/src/client/mds_client.h +++ b/src/client/mds_client.h @@ -26,10 +26,10 @@ #include #include +#include #include #include #include -#include #include "include/client/libcurve.h" #include "proto/nameserver2.pb.h" @@ -48,28 +48,28 @@ class RPCExcutorRetryPolicy { RPCExcutorRetryPolicy() : retryOpt_(), currentWorkingMDSAddrIndex_(0), cntlID_(1) {} - void SetOption(const MetaServerOption::RpcRetryOption &option) { + void SetOption(const MetaServerOption::RpcRetryOption& option) { retryOpt_ = option; } using RPCFunc = std::function; + brpc::Channel*, brpc::Controller*)>; /** - * 将client与mds的重试相关逻辑抽离 - * @param: task为当前要进行的具体rpc任务 - * @param: maxRetryTimeMS是当前执行最大的重试时间 - * @return: 返回当前RPC的结果 + * Detach the retry related logic between client and mds + * @param: task is the specific rpc task to be carried out currently + * @param: maxRetryTimeMS is the maximum retry time currently executed + * @return: Returns the result of the current RPC */ int DoRPCTask(RPCFunc task, uint64_t maxRetryTimeMS); /** - * 测试使用: 设置当前正在服务的mdsindex + * Test usage: Set the currently serving mdsindex */ void SetCurrentWorkIndex(int index) { currentWorkingMDSAddrIndex_.store(index); } /** - * 测试使用:获取当前正在服务的mdsindex + * Test usage: Obtain the currently serving mdsindex */ int GetCurrentWorkIndex() const { return currentWorkingMDSAddrIndex_.load(); @@ -77,105 +77,117 @@ class RPCExcutorRetryPolicy { private: /** - * rpc失败需要重试,根据cntl返回的不同的状态,确定应该做什么样的预处理。 - * 主要做了以下几件事: - * 1. 如果上一次的RPC是超时返回,那么执行rpc 超时指数退避逻辑 - * 2. 如果上一次rpc返回not connect等返回值,会主动触发切换mds地址重试 - * 3. 更新重试信息,比如在当前mds上连续重试的次数 - * @param[in]: status为当前rpc的失败返回的状态 - * @param normalRetryCount The total count of normal retry - * @param[in][out]: curMDSRetryCount当前mds节点上的重试次数,如果切换mds - * 该值会被重置为1. - * @param[in]: curRetryMDSIndex代表当前正在重试的mds索引 - * @param[out]: lastWorkingMDSIndex上一次正在提供服务的mds索引 - * @param[out]: timeOutMS根据status对rpctimeout进行调整 + * When an RPC fails, it needs to be retried, and based on different + * statuses returned by `cntl`, determine what kind of preprocessing should + * be done. The main tasks performed are as follows: + * 1. If the last RPC timed out, execute RPC timeout exponential backoff + * logic. + * 2. If the last RPC returned values like "not connect," it will actively + * trigger MDS address switching and retry. + * 3. Update retry information, such as the number of consecutive retries on + * the current MDS. + * @param[in]: status is the status of the current RPC failure. + * @param[in]: normalRetryCount is the total count of normal retries. + * @param[in][out]: curMDSRetryCount is the number of retries on the current + * MDS node. If MDS switching occurs, this value will be reset to 1. + * @param[in]: curRetryMDSIndex represents the current MDS index being + * retried. + * @param[out]: lastWorkingMDSIndex is the index of the MDS that was + * providing service in the last attempt. + * @param[out]: timeOutMS is adjusted based on the status to control the RPC + * timeout. * - * @return: 返回下一次重试的mds索引 + * @return: Returns the next MDS index for the next retry. */ int PreProcessBeforeRetry(int status, bool retryUnlimit, - uint64_t *normalRetryCount, - uint64_t *curMDSRetryCount, int curRetryMDSIndex, - int *lastWorkingMDSIndex, uint64_t *timeOutMS); + uint64_t* normalRetryCount, + uint64_t* curMDSRetryCount, int curRetryMDSIndex, + int* lastWorkingMDSIndex, uint64_t* timeOutMS); /** - * 执行rpc发送任务 - * @param[in]: mdsindex为mds对应的地址索引 - * @param[in]: rpcTimeOutMS是rpc超时时间 - * @param[in]: task为待执行的任务 - * @return: channel获取成功则返回0,否则-1 + * Execute rpc send task + * @param[in]: mdsindex is the address index corresponding to mds + * @param[in]: rpcTimeOutMS is the rpc timeout time + * @param[in]: task is the task to be executed + * @return: If the channel is successfully obtained, 0 will be returned. + * Otherwise, -1 */ int ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, RPCExcutorRetryPolicy::RPCFunc task); /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex - * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. - * 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ - * @param[in]: needChangeMDS表示当前外围需不需要切换mds,这个值由 - * PreProcessBeforeRetry函数确定 - * @param[in]: currentRetryIndex为当前正在重试的mds索引 - * @param[in][out]: - * lastWorkingindex为上一次正在服务的mds索引,正在重试的mds - * 与正在服务的mds索引可能是不同的mds。 - * @return: 返回下一次要重试的mds索引 + * Get the next MDS index to retry based on the input state. MDS switching + * logic: Record three states: curRetryMDSIndex, lastWorkingMDSIndex, + * currentWorkingMDSIndex. + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex, + * lastWorkingMDSIndex = currentWorkingMDSIndex. + * 2. If an RPC fails, it will trigger a switch of curRetryMDSIndex. If at + * this point, lastWorkingMDSIndex is equal to currentWorkingMDSIndex, it + * will sequentially switch to the next MDS index. If lastWorkingMDSIndex is + * not equal to currentWorkingMDSIndex, it means that another interface has + * updated currentWorkingMDSAddrIndex_. In this case, the switch will + * directly go to currentWorkingMDSAddrIndex_. + * @param[in]: needChangeMDS indicates whether the current peripheral needs + * to switch MDS. This value is determined by the PreProcessBeforeRetry + * function. + * @param[in]: currentRetryIndex is the current MDS index being retried. + * @param[in][out]: lastWorkingIndex is the index of the last MDS being + * served in the last retry. The MDS being retried and the MDS being served + * may be different. + * @return: Returns the next MDS index to retry. */ int GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, - int *lastWorkingindex); - /** - * 根据输入参数,决定是否继续重试,重试退出条件是重试时间超出最大允许时间 - * IO路径上和非IO路径上的重试时间不一样,非IO路径的重试时间由配置文件的 - * mdsMaxRetryMS参数指定,IO路径为无限循环重试。 + int* lastWorkingindex); + /** + * Based on the input parameters, decide whether to continue retry. The + * condition for retry exit is that the retry time exceeds the maximum + * allowed time The retry time on IO paths is different from that on non IO + * paths, and the retry time on non IO paths is determined by the + * configuration file The mdsMaxRetryMS parameter specifies that the IO path + * is an infinite loop retry. * @param[in]: startTimeMS - * @param[in]: maxRetryTimeMS为最大重试时间 - * @return:需要继续重试返回true, 否则返回false + * @param[in]: maxRetryTimeMS is the maximum retry time + * @return: Need to continue retrying and return true, otherwise return + * false */ bool GoOnRetry(uint64_t startTimeMS, uint64_t maxRetryTimeMS); /** - * 递增controller id并返回id + *Increment controller id and return id */ uint64_t GetLogId() { return cntlID_.fetch_add(1, std::memory_order_relaxed); } private: - // 执行rpc时必要的配置信息 + // Necessary configuration information for executing rpc MetaServerOption::RpcRetryOption retryOpt_; - // 记录上一次重试过的leader信息 + // Record the leader information from the last retry std::atomic currentWorkingMDSAddrIndex_; - // controller id,用于trace整个rpc IO链路 - // 这里直接用uint64即可,在可预测的范围内,不会溢出 + // controller ID, used to trace the entire RPC IO link + // Simply use uint64 here, within a predictable range, without overflow std::atomic cntlID_; }; - struct LeaseRefreshResult; -// MDSClient是client与MDS通信的唯一窗口 +// MDSClient is the only window where the client communicates with MDS class MDSClient : public MDSClientBase, public std::enable_shared_from_this { public: - explicit MDSClient(const std::string &metricPrefix = ""); + explicit MDSClient(const std::string& metricPrefix = ""); virtual ~MDSClient(); - LIBCURVE_ERROR Initialize(const MetaServerOption &metaopt); + LIBCURVE_ERROR Initialize(const MetaServerOption& metaopt); /** - * 创建文件 - * @param: context创建文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK - * 文件已存在返回LIBCURVE_ERROR::EXIST - * 否则返回LIBCURVE_ERROR::FAILED - * 如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, + * Create File + * @param: context Create file information + * @return: Successfully returned LIBCURVE_ERROR::OK + * File already exists Return LIBCURVE_ERROR::EXIST + * Otherwise, return LIBCURVE_ERROR::FAILED + * If authentication fails, return LIBCURVE_ERROR::AUTHFAIL */ LIBCURVE_ERROR CreateFile(const CreateFileContext& context); /** @@ -190,29 +202,31 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR OpenFile(const std::string &filename, - const UserInfo_t &userinfo, FInfo_t *fi, - FileEpoch_t *fEpoch, - LeaseSession *lease); + LIBCURVE_ERROR OpenFile(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, LeaseSession* lease); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: csid为要获取的copyset列表 - * @param: cpinfoVec保存获取到的server信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: csid is the list of copysets to obtain + * @param: cpinfoVec saves the obtained server information + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR - GetServerList(const LogicPoolID &logicPoolId, - const std::vector &csid, - std::vector> *cpinfoVec); + GetServerList(const LogicPoolID& logicPoolId, + const std::vector& csid, + std::vector>* cpinfoVec); /** - * 获取当前mds所属的集群信息 - * @param[out]: clsctx 为要获取的集群信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the cluster information to which the current mds belongs + * @param[out]: clsctx is the cluster information to be obtained + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be + * returned LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetClusterInfo(ClusterContext *clsctx); + LIBCURVE_ERROR GetClusterInfo(ClusterContext* clsctx); LIBCURVE_ERROR ListPoolset(std::vector* out); @@ -229,9 +243,9 @@ class MDSClient : public MDSClientBase, * otherwise return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetOrAllocateSegment(bool allocate, uint64_t offset, - const FInfo_t *fi, - const FileEpoch_t *fEpoch, - SegmentInfo *segInfo); + const FInfo_t* fi, + const FileEpoch_t* fEpoch, + SegmentInfo* segInfo); /** * @brief Send DeAllocateSegment request to current working MDS @@ -239,7 +253,7 @@ class MDSClient : public MDSClientBase, * @param offset segment start offset * @return LIBCURVE_ERROR::OK means success, other value means fail */ - virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo *fileInfo, + virtual LIBCURVE_ERROR DeAllocateSegment(const FInfo* fileInfo, uint64_t offset); /** @@ -253,10 +267,9 @@ class MDSClient : public MDSClientBase, * return LIBCURVE_ERROR::AUTHFAIL for auth fail, * otherwise return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetFileInfo(const std::string &filename, - const UserInfo_t &userinfo, - FInfo_t *fi, - FileEpoch_t *fEpoch); + LIBCURVE_ERROR GetFileInfo(const std::string& filename, + const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch); /** * @brief Increase epoch and return chunkserver locations @@ -269,29 +282,29 @@ class MDSClient : public MDSClientBase, * * @return LIBCURVE_ERROR::OK for success, LIBCURVE_ERROR::FAILED for fail. */ - LIBCURVE_ERROR IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - FInfo_t* fi, - FileEpoch_t *fEpoch, - std::list> *csLocs); + LIBCURVE_ERROR IncreaseEpoch( + const std::string& filename, const UserInfo_t& userinfo, FInfo_t* fi, + FileEpoch_t* fEpoch, std::list>* csLocs); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ - LIBCURVE_ERROR Extend(const std::string &filename, - const UserInfo_t &userinfo, uint64_t newsize); + LIBCURVE_ERROR Extend(const std::string& filename, + const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce Does it force deletion without placing it in the + * garbage bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds */ - LIBCURVE_ERROR DeleteFile(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR DeleteFile(const std::string& filename, + const UserInfo_t& userinfo, bool deleteforce = false, uint64_t id = 0); /** @@ -300,253 +313,266 @@ class MDSClient : public MDSClientBase, * @param: filename * @param: fileId is inodeid,default 0 */ - LIBCURVE_ERROR RecoverFile(const std::string &filename, - const UserInfo_t &userinfo, uint64_t fileId); + LIBCURVE_ERROR RecoverFile(const std::string& filename, + const UserInfo_t& userinfo, uint64_t fileId); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param: seq是出参,返回创建快照时文件的版本信息 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is an output parameter that returns the version information + * of the file when creating the snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CreateSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t *seq); + LIBCURVE_ERROR CreateSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t* seq); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR DeleteSnapShot(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq); + LIBCURVE_ERROR DeleteSnapShot(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: snapif是出参,保存文件的基本信息 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: snapif is a parameter that saves the basic information of the + * file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR ListSnapShot(const std::string &filename, - const UserInfo_t &userinfo, - const std::vector *seq, - std::map *snapif); - /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param: segInfo是出参,保存chunk信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR ListSnapShot(const std::string& filename, + const UserInfo_t& userinfo, + const std::vector* seq, + std::map* snapif); + /** + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param: segInfo is the output parameter, saving chunk information * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string &filename, - const UserInfo_t &userinfo, + LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, - SegmentInfo *segInfo); - /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: filestatus为快照状态 - */ - LIBCURVE_ERROR CheckSnapShotStatus(const std::string &filename, - const UserInfo_t &userinfo, uint64_t seq, - FileStatus *filestatus); - - /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param: resp是mds端传递过来的lease信息 - * @param[out]: lease当前文件的session信息 + SegmentInfo* segInfo); + /** + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: filestatus is the snapshot status + */ + LIBCURVE_ERROR CheckSnapShotStatus(const std::string& filename, + const UserInfo_t& userinfo, uint64_t seq, + FileStatus* filestatus); + + /** + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param: resp is the release information passed from the mds end + * @param[out]: lease the session information of the current file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED - */ - LIBCURVE_ERROR RefreshSession(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid, - LeaseRefreshResult *resp, - LeaseSession *lease = nullptr); - /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED + */ + LIBCURVE_ERROR RefreshSession(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid, + LeaseRefreshResult* resp, + LeaseSession* lease = nullptr); + /** + * To close the file, it is necessary to carry the session ID, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return + * LIBCURVE_ERROR::AUTHFAIL, Otherwise, return LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR CloseFile(const std::string &filename, - const UserInfo_t &userinfo, - const std::string &sessionid); + LIBCURVE_ERROR CloseFile(const std::string& filename, + const UserInfo_t& userinfo, + const std::string& sessionid); /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged * - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] destFileId 创建的目标文件的Id + * @param[out] destFileId The ID of the target file created * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CreateCloneFile(const std::string &source, - const std::string &destination, - const UserInfo_t &userinfo, uint64_t size, + LIBCURVE_ERROR CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, - const std::string& poolset, - FInfo *fileinfo); + const std::string& poolset, FInfo* fileinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneMeta(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneMeta(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR CompleteCloneFile(const std::string &destination, - const UserInfo_t &userinfo); + LIBCURVE_ERROR CompleteCloneFile(const std::string& destination, + const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR SetCloneFileStatus(const std::string &filename, - const FileStatus &filestatus, - const UserInfo_t &userinfo, + LIBCURVE_ERROR SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const UserInfo_t& userinfo, uint64_t fileID = 0); /** - * @brief 重名文件 + * @brief duplicate file * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file * - * @return 错误码 + * @return error code */ - LIBCURVE_ERROR RenameFile(const UserInfo_t &userinfo, - const std::string &origin, - const std::string &destination, + LIBCURVE_ERROR RenameFile(const UserInfo_t& userinfo, + const std::string& origin, + const std::string& destination, uint64_t originId = 0, uint64_t destinationId = 0); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR ChangeOwner(const std::string &filename, - const std::string &newOwner, - const UserInfo_t &userinfo); + LIBCURVE_ERROR ChangeOwner(const std::string& filename, + const std::string& newOwner, + const UserInfo_t& userinfo); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ - LIBCURVE_ERROR Listdir(const std::string &dirpath, - const UserInfo_t &userinfo, - std::vector *filestatVec); + LIBCURVE_ERROR Listdir(const std::string& dirpath, + const UserInfo_t& userinfo, + std::vector* filestatVec); /** - * 向mds注册client metric监听的地址和端口 - * @param: ip客户端ip - * @param: dummyServerPort为监听端口 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Register the address and port for client metric listening with mds + * @param: IP client IP + * @param: dummyServerPort is the listening port + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, + * LIBCURVE_ERROR::AUTHFAILED, etc */ - LIBCURVE_ERROR Register(const std::string &ip, uint16_t port); + LIBCURVE_ERROR Register(const std::string& ip, uint16_t port); /** - * 获取chunkserver信息 - * @param[in] addr chunkserver地址信息 - * @param[out] chunkserverInfo 待获取的信息 - * @return:成功返回ok + * Obtain chunkserver information + * @param[in] addr chunkserver address information + * @param[out] chunkserverInfo Information to be obtained + * @return: Successfully returned OK */ LIBCURVE_ERROR - GetChunkServerInfo(const PeerAddr &addr, - CopysetPeerInfo *chunkserverInfo); + GetChunkServerInfo(const PeerAddr& addr, + CopysetPeerInfo* chunkserverInfo); /** - * 获取server上所有chunkserver的id - * @param[in]: ip为server的ip地址 - * @param[out]: csIds用于保存chunkserver的id - * @return: 成功返回LIBCURVE_ERROR::OK,失败返回LIBCURVE_ERROR::FAILED + * Obtain the IDs of all chunkservers on the server + * @param[in]: ip is the IP address of the server + * @param[out]: csIds is used to save the id of the chunkserver + * @return: Successfully returned LIBCURVE_ERROR::OK, failure returns + * LIBCURVE_ERROR::FAILED */ - LIBCURVE_ERROR ListChunkServerInServer(const std::string &ip, - std::vector *csIds); + LIBCURVE_ERROR ListChunkServerInServer(const std::string& ip, + std::vector* csIds); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInitialize(); /** - * 将mds侧错误码对应到libcurve错误码 - * @param: statecode为mds一侧错误码 - * @param[out]: 出参errcode为libcurve一侧的错误码 + * Map the mds side error code to the libcurve error code + * @param: statecode is the error code on the mds side + * @param[out]: The errcode of the output parameter is the error code on the + * side of libcurve */ - void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode &statcode, - LIBCURVE_ERROR *errcode); + void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode& statcode, + LIBCURVE_ERROR* errcode); LIBCURVE_ERROR ReturnError(int retcode); private: - // 初始化标志,放置重复初始化 + // Initialization flag, placing duplicate initialization bool inited_ = false; - // 当前模块的初始化option配置 + // Initialization option configuration for the current module MetaServerOption metaServerOpt_; - // client与mds通信的metric统计 + // Metric statistics of communication between client and mds MDSClientMetric mdsClientMetric_; RPCExcutorRetryPolicy rpcExcutor_; diff --git a/src/client/mds_client_base.h b/src/client/mds_client_base.h index 64178e43e9..6cb3340231 100644 --- a/src/client/mds_client_base.h +++ b/src/client/mds_client_base.h @@ -38,120 +38,120 @@ namespace curve { namespace client { -using curve::mds::OpenFileRequest; -using curve::mds::OpenFileResponse; -using curve::mds::CreateFileRequest; -using curve::mds::CreateFileResponse; +using curve::mds::ChangeOwnerRequest; +using curve::mds::ChangeOwnerResponse; +using curve::mds::CheckSnapShotStatusRequest; +using curve::mds::CheckSnapShotStatusResponse; using curve::mds::CloseFileRequest; using curve::mds::CloseFileResponse; -using curve::mds::RenameFileRequest; -using curve::mds::RenameFileResponse; -using curve::mds::ExtendFileRequest; -using curve::mds::ExtendFileResponse; +using curve::mds::CreateCloneFileRequest; +using curve::mds::CreateCloneFileResponse; +using curve::mds::CreateFileRequest; +using curve::mds::CreateFileResponse; +using curve::mds::CreateSnapShotRequest; +using curve::mds::CreateSnapShotResponse; +using curve::mds::DeAllocateSegmentRequest; +using curve::mds::DeAllocateSegmentResponse; using curve::mds::DeleteFileRequest; using curve::mds::DeleteFileResponse; -using curve::mds::RecoverFileRequest; -using curve::mds::RecoverFileResponse; +using curve::mds::DeleteSnapShotRequest; +using curve::mds::DeleteSnapShotResponse; +using curve::mds::ExtendFileRequest; +using curve::mds::ExtendFileResponse; using curve::mds::GetFileInfoRequest; using curve::mds::GetFileInfoResponse; +using curve::mds::GetOrAllocateSegmentRequest; +using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::IncreaseFileEpochRequest; using curve::mds::IncreaseFileEpochResponse; -using curve::mds::DeleteSnapShotRequest; -using curve::mds::DeleteSnapShotResponse; -using curve::mds::ReFreshSessionRequest; -using curve::mds::ReFreshSessionResponse; using curve::mds::ListDirRequest; using curve::mds::ListDirResponse; -using curve::mds::ChangeOwnerRequest; -using curve::mds::ChangeOwnerResponse; -using curve::mds::CreateSnapShotRequest; -using curve::mds::CreateSnapShotResponse; -using curve::mds::CreateCloneFileRequest; -using curve::mds::CreateCloneFileResponse; -using curve::mds::SetCloneFileStatusRequest; -using curve::mds::SetCloneFileStatusResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; -using curve::mds::DeAllocateSegmentRequest; -using curve::mds::DeAllocateSegmentResponse; -using curve::mds::CheckSnapShotStatusRequest; -using curve::mds::CheckSnapShotStatusResponse; using curve::mds::ListSnapShotFileInfoRequest; using curve::mds::ListSnapShotFileInfoResponse; -using curve::mds::GetOrAllocateSegmentRequest; -using curve::mds::GetOrAllocateSegmentResponse; +using curve::mds::OpenFileRequest; +using curve::mds::OpenFileResponse; +using curve::mds::RecoverFileRequest; +using curve::mds::RecoverFileResponse; +using curve::mds::ReFreshSessionRequest; +using curve::mds::ReFreshSessionResponse; +using curve::mds::RenameFileRequest; +using curve::mds::RenameFileResponse; +using curve::mds::SetCloneFileStatusRequest; +using curve::mds::SetCloneFileStatusResponse; +using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::GetChunkServerListInCopySetsRequest; using curve::mds::topology::GetChunkServerListInCopySetsResponse; using curve::mds::topology::GetClusterInfoRequest; using curve::mds::topology::GetClusterInfoResponse; -using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::ListChunkServerResponse; -using curve::mds::IncreaseFileEpochRequest; -using curve::mds::IncreaseFileEpochResponse; using curve::mds::topology::ListPoolsetRequest; using curve::mds::topology::ListPoolsetResponse; extern const char* kRootUserName; -// MDSClientBase将所有与mds的RPC接口抽离,与业务逻辑解耦 -// 这里只负责rpc的发送,具体的业务处理逻辑通过reponse和controller向上 -// 返回给调用者,有调用者处理 +// MDSClientBase abstracts all RPC interfaces with the MDS, decoupling them from +// business logic. Here, it is responsible only for sending RPC requests, while +// the specific business logic processing is returned to the caller through +// responses and controllers, which are handled by the caller. class MDSClientBase { public: /** - * 打开文件 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Open File + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void OpenFile(const std::string& filename, - const UserInfo_t& userinfo, - OpenFileResponse* response, - brpc::Controller* cntl, + void OpenFile(const std::string& filename, const UserInfo_t& userinfo, + OpenFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建文件 - * @param: filename创建文件的文件名 - * @param: userinfo为user信息 - * @param: size文件长度 - * @param: normalFile表示创建的是普通文件还是目录文件,如果是目录则忽略size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create File + * @param: filename The file name used to create the file + * @param: userinfo is the user information + * @param: size File length + * @param: normalFile indicates whether the created file is a regular file + * or a directory file. If it is a directory, size is ignored + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateFile(const CreateFileContext& context, - CreateFileResponse* response, - brpc::Controller* cntl, + CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: userinfo为user信息 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * To close the file, it is necessary to carry the sessionid, so that the + * mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: userinfo is the user information + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CloseFile(const std::string& filename, - const UserInfo_t& userinfo, - const std::string& sessionid, - CloseFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void CloseFile(const std::string& filename, const UserInfo_t& userinfo, + const std::string& sessionid, CloseFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取文件信息,fi是出参 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain file information, where fi is the output parameter + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetFileInfo(const std::string& filename, - const UserInfo_t& userinfo, - GetFileInfoResponse* response, - brpc::Controller* cntl, + void GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, + GetFileInfoResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -164,175 +164,177 @@ class MDSClientBase { * @param[in] channel rpc channel * */ - void IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo, - IncreaseFileEpochResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void IncreaseEpoch(const std::string& filename, const UserInfo_t& userinfo, + IncreaseFileEpochResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void CreateSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, CreateSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteSnapShot(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, - DeleteSnapShotResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, + uint64_t seq, DeleteSnapShotResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain snapshot file information with version number seq in the form of a + * list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ListSnapShot(const std::string& filename, - const UserInfo_t& userinfo, + void ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, const std::vector* seq, ListSnapShotFileInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the chunk information of the snapshot and update it to the + * metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the + * snapshot + * @param: offset is the offset within the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetSnapshotSegmentInfo(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, uint64_t offset, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * The file interface needs to maintain a heartbeat with MDS when opening + * files, and refresh is used to renew the contract The renewal result will + * be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RefreshSession(const std::string& filename, - const UserInfo_t& userinfo, + void RefreshSession(const std::string& filename, const UserInfo_t& userinfo, const std::string& sessionid, ReFreshSessionResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CheckSnapShotStatus(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t seq, + const UserInfo_t& userinfo, uint64_t seq, CheckSnapShotStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: copysetidvec为要获取的copyset列表 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the serverlist information corresponding to the copysetid and + * update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: copysetidvec is the list of copysets to obtain + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetServerList(const LogicPoolID& logicalpooid, const std::vector& copysetidvec, GetChunkServerListInCopySetsResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取mds对应的cluster id - * @param[out]: response为该rpc的respoonse,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the cluster ID corresponding to the mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void GetClusterInfo(GetClusterInfoResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); - void ListPoolset(ListPoolsetResponse* response, - brpc::Controller* cntl, + void ListPoolset(ListPoolsetResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 创建clone文件 - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * Create clone file + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void CreateCloneFile(const std::string& source, const std::string& destination, - const UserInfo_t& userinfo, - uint64_t size, - uint64_t sn, - uint32_t chunksize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + uint32_t chunksize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, CreateCloneFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 通知mds完成Clone Meta - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief Notify mds to complete Clone Meta + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ void SetCloneFileStatus(const std::string& filename, const FileStatus& filestatus, - const UserInfo_t& userinfo, - uint64_t fileID, + const UserInfo_t& userinfo, uint64_t fileID, SetCloneFileStatusResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** * Get or Alloc SegmentInfo,and update to Metacache @@ -344,68 +346,63 @@ class MDSClientBase { * @param[in|out]: cntl rpc controller * @param[in]:channel rpc channel */ - void GetOrAllocateSegment(bool allocate, - uint64_t offset, - const FInfo_t* fi, - const FileEpoch_t *fEpoch, + void GetOrAllocateSegment(bool allocate, uint64_t offset, const FInfo_t* fi, + const FileEpoch_t* fEpoch, GetOrAllocateSegmentResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); void DeAllocateSegment(const FInfo* fileInfo, uint64_t segmentOffset, DeAllocateSegmentResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 重名文件 - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief duplicate file + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void RenameFile(const UserInfo_t& userinfo, - const std::string &origin, - const std::string &destination, - uint64_t originId, - uint64_t destinationId, - RenameFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RenameFile(const UserInfo_t& userinfo, const std::string& origin, + const std::string& destination, uint64_t originId, + uint64_t destinationId, RenameFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void Extend(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t newsize, - ExtendFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void Extend(const std::string& filename, const UserInfo_t& userinfo, + uint64_t newsize, ExtendFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: Does deleteforce force deletion without placing it in the garbage + * bin + * @param: id is the file id, with a default value of 0. If the user does + * not specify this value, the id will not be passed to mds + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void DeleteFile(const std::string& filename, - const UserInfo_t& userinfo, - bool deleteforce, - uint64_t fileid, - DeleteFileResponse* response, - brpc::Controller* cntl, + void DeleteFile(const std::string& filename, const UserInfo_t& userinfo, + bool deleteforce, uint64_t fileid, + DeleteFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** @@ -417,61 +414,59 @@ class MDSClientBase { * @param[in|out]: cntl, return RPC status * @param[in]:channel */ - void RecoverFile(const std::string& filename, - const UserInfo_t& userinfo, - uint64_t fileid, - RecoverFileResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void RecoverFile(const std::string& filename, const UserInfo_t& userinfo, + uint64_t fileid, RecoverFileResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only + * the root user can perform changes + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void ChangeOwner(const std::string& filename, - const std::string& newOwner, - const UserInfo_t& userinfo, - ChangeOwnerResponse* response, - brpc::Controller* cntl, - brpc::Channel* channel); + void ChangeOwner(const std::string& filename, const std::string& newOwner, + const UserInfo_t& userinfo, ChangeOwnerResponse* response, + brpc::Controller* cntl, brpc::Channel* channel); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 - */ - void Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, - ListDirResponse* response, - brpc::Controller* cntl, + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS + */ + void Listdir(const std::string& dirpath, const UserInfo_t& userinfo, + ListDirResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取chunkserverID信息 - * @param[in]: ip为当前client的监听地址 - * @param[in]: port为监听端口 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain chunkserverID information + * @param[in]: IP is the listening address of the current client + * @param[in]: port is the listening port + * @param[out]: response is the response of the rpc, provided for external + * processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC + * status + * @param[in]: channel is the current channel established with MDS */ - void GetChunkServerInfo(const std::string& ip, - uint16_t port, + void GetChunkServerInfo(const std::string& ip, uint16_t port, GetChunkServerInfoResponse* reponse, - brpc::Controller* cntl, - brpc::Channel* channel); + brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取server上的所有chunkserver的id - * @param[in]: ip为当前server的地址 - * @param[out]: response是当前rpc调用的response,返回给外部处理 - * @param[in|out]: cntl既是入参也是出参 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the IDs of all chunkservers on the server + * @param[in]: IP is the address of the current server + * @param[out]: response is the response of the current rpc call, returned + * to external processing + * @param[in|out]: cntl is both an input and output parameter + * @param[in]: channel is the current channel established with MDS */ void ListChunkServerInServer(const std::string& ip, ListChunkServerResponse* response, @@ -480,8 +475,8 @@ class MDSClientBase { private: /** - * 为不同的request填充user信息 - * @param: request是待填充的变量指针 + * Fill in user information for different requests + * @param: request is the pointer to the variable to be filled in */ template void FillUserInfo(T* request, const UserInfo_t& userinfo) { @@ -499,7 +494,7 @@ class MDSClientBase { std::string CalcSignature(const UserInfo& userinfo, uint64_t date) const; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_MDS_CLIENT_BASE_H_ diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index 7c0a25a262..2265f6b6dd 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -19,27 +19,26 @@ * File Created: Tuesday, 25th September 2018 2:06:35 pm * Author: tongguangxun */ -#include +#include "src/client/metacache.h" #include +#include +#include #include #include -#include #include "proto/cli.pb.h" - -#include "src/client/metacache.h" -#include "src/client/mds_client.h" #include "src/client/client_common.h" +#include "src/client/mds_client.h" #include "src/common/concurrent/concurrent.h" namespace curve { namespace client { -using curve::common::WriteLockGuard; -using curve::common::ReadLockGuard; using curve::client::ClientConfig; +using curve::common::ReadLockGuard; +using curve::common::WriteLockGuard; void MetaCache::Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient) { @@ -87,12 +86,9 @@ bool MetaCache::IsLeaderMayChange(LogicPoolID logicPoolId, return flag; } -int MetaCache::GetLeader(LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkServerID* serverId, - EndPoint* serverAddr, - bool refresh, - FileMetric* fm) { +int MetaCache::GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, + ChunkServerID* serverId, EndPoint* serverAddr, + bool refresh, FileMetric* fm) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo targetInfo; @@ -123,7 +119,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, << "logicpool id = " << logicPoolId << ", copyset id = " << copysetId; - // 重试失败,这时候需要向mds重新拉取最新的copyset信息了 + // The retry failed. At this point, it is necessary to retrieve the + // latest copyset information from mds again ret = UpdateCopysetInfoFromMDS(logicPoolId, copysetId); if (ret == 0) { continue; @@ -135,8 +132,8 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, if (ret == -1) { LOG(WARNING) << "get leader failed after retry!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } @@ -148,25 +145,24 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetInfo* toupdateCopyset, FileMetric* fm) { ChunkServerID csid = 0; - PeerAddr leaderaddr; + PeerAddr leaderaddr; GetLeaderRpcOption rpcOption(metacacheopt_.metacacheGetLeaderRPCTimeOutMS); - GetLeaderInfo getLeaderInfo(logicPoolId, - copysetId, toupdateCopyset->csinfos_, - toupdateCopyset->GetCurrentLeaderIndex(), - rpcOption); - int ret = ServiceHelper::GetLeader( - getLeaderInfo, &leaderaddr, &csid, fm); + GetLeaderInfo getLeaderInfo( + logicPoolId, copysetId, toupdateCopyset->csinfos_, + toupdateCopyset->GetCurrentLeaderIndex(), rpcOption); + int ret = ServiceHelper::GetLeader(getLeaderInfo, &leaderaddr, &csid, fm); if (ret == -1) { LOG(WARNING) << "get leader failed!" - << ", copyset id = " << copysetId - << ", logicpool id = " << logicPoolId; + << ", copyset id = " << copysetId + << ", logicpool id = " << logicPoolId; return -1; } ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr); - // 如果更新失败,说明leader地址不在当前配置组中,从mds获取chunkserver的信息 + // If the update fails, it indicates that the leader address is not in the + // current configuration group. Obtain chunkserver information from MDS if (ret == -1 && !leaderaddr.IsEmpty()) { CopysetPeerInfo csInfo; ret = mdsclient_->GetChunkServerInfo(leaderaddr, &csInfo); @@ -177,8 +173,8 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, return -1; } - UpdateCopysetInfoIfMatchCurrentLeader( - logicPoolId, copysetId, leaderaddr); + UpdateCopysetInfoIfMatchCurrentLeader(logicPoolId, copysetId, + leaderaddr); *toupdateCopyset = GetCopysetinfo(logicPoolId, copysetId); ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr, csInfo); } @@ -201,18 +197,16 @@ int MetaCache::UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, return -1; } - // 更新chunkserverid到copyset映射关系 + // Update chunkserverid to copyset mapping relationship UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); return 0; } void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( - LogicPoolID logicPoolId, - CopysetID copysetId, - const PeerAddr& leaderAddr) { + LogicPoolID logicPoolId, CopysetID copysetId, const PeerAddr& leaderAddr) { std::vector> copysetInfos; (void)mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); @@ -224,15 +218,15 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( << ", copyset id = " << copysetId << ", current leader = " << leaderAddr.ToString(); - // 更新chunkserverid到copyset的映射关系 + // Update the mapping relationship between chunkserverid and copyset UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); } } CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, - CopysetID copysetId) { + CopysetID copysetId) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); CopysetInfo ret; @@ -250,8 +244,7 @@ CopysetInfo MetaCache::GetServerList(LogicPoolID logicPoolId, * the copyset client will call UpdateLeader. * return the ChunkServerID to invoker */ -int MetaCache::UpdateLeader(LogicPoolID logicPoolId, - CopysetID copysetId, +int MetaCache::UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, const EndPoint& leaderAddr) { const auto key = CalcLogicPoolCopysetID(logicPoolId, copysetId); @@ -329,11 +322,13 @@ void MetaCache::SetChunkserverUnstable(ChunkServerID csid) { ChunkServerID leaderid; if (cpinfo->second.GetCurrentLeaderID(&leaderid)) { if (leaderid == csid) { - // 只设置leaderid为当前serverid的Lcopyset + // Set only the Lcopyset with leaderid as the current + // serverid cpinfo->second.SetLeaderUnstableFlag(); } } else { - // 当前copyset集群信息未知,直接设置LeaderUnStable + // The current copyset cluster information is unknown, set + // LeaderUnStable directly cpinfo->second.SetLeaderUnstableFlag(); } } @@ -346,24 +341,24 @@ void MetaCache::AddCopysetIDInfo(ChunkServerID csid, chunkserverCopysetIDMap_[csid].emplace(cpidinfo); } -void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo& cpinfo) { +void MetaCache::UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, cpinfo.cpid_); - // 先获取原来的chunkserver到copyset映射 + // First, obtain the original chunkserver to copyset mapping auto previouscpinfo = lpcsid2CopsetInfoMap_.find(key); if (previouscpinfo != lpcsid2CopsetInfoMap_.end()) { std::vector newID; std::vector changedID; - // 先判断当前copyset有没有变更chunkserverid + // Determine if the current copyset has changed the chunkserverid for (auto iter : previouscpinfo->second.csinfos_) { changedID.push_back(iter.peerID); } for (auto iter : cpinfo.csinfos_) { - auto it = std::find(changedID.begin(), changedID.end(), - iter.peerID); + auto it = + std::find(changedID.begin(), changedID.end(), iter.peerID); if (it != changedID.end()) { changedID.erase(it); } else { @@ -371,7 +366,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 删除变更的copyset信息 + // Delete changed copyset information for (auto chunkserverid : changedID) { { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); @@ -382,7 +377,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 更新新的copyset信息到chunkserver + // Update new copyset information to chunkserver for (auto chunkserverid : newID) { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); chunkserverCopysetIDMap_[chunkserverid].emplace(lpid, cpinfo.cpid_); @@ -390,8 +385,8 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } -CopysetInfo MetaCache::GetCopysetinfo( - LogicPoolID lpid, CopysetID csid) { +CopysetInfo MetaCache::GetCopysetinfo(LogicPoolID lpid, + CopysetID csid) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, csid); auto cpinfo = lpcsid2CopsetInfoMap_.find(key); @@ -412,10 +407,8 @@ FileSegment* MetaCache::GetFileSegment(SegmentIndex segmentIndex) { WriteLockGuard lk(rwlock4Segments_); auto ret = segments_.emplace( - std::piecewise_construct, - std::forward_as_tuple(segmentIndex), - std::forward_as_tuple(segmentIndex, - fileInfo_.segmentsize, + std::piecewise_construct, std::forward_as_tuple(segmentIndex), + std::forward_as_tuple(segmentIndex, fileInfo_.segmentsize, metacacheopt_.discardGranularity)); return &(ret.first->second); @@ -435,5 +428,5 @@ void MetaCache::CleanChunksInSegment(SegmentIndex segmentIndex) { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/metacache.h b/src/client/metacache.h index a9a6e1fab7..1340a3eb25 100644 --- a/src/client/metacache.h +++ b/src/client/metacache.h @@ -61,69 +61,78 @@ class MetaCache { virtual ~MetaCache() = default; /** - * 初始化函数 - * @param: metacacheopt为当前metacache的配置option信息 - * @param: mdsclient为与mds通信的指针。 - * 为什么这里需要把mdsclient传进来? - * 因为首先metacache充当的角色就是对于MDS一侧的信息缓存 - * 所以对于底层想使用metacache的copyset client或者chunk closure - * 来说,他只需要知道metacache就可以了,不需要再去向mds查询信息, - * 在copyset client或者chunk closure发送IO失败之后会重新获取leader - * 然后再重试,如果leader获取不成功,需要向mds一侧查询当前copyset的最新信息, - * 这里将查询mds封装在内部了,这样copyset client和chunk closure就不感知mds了 + * Initialization function + * @param: metacacheopt is the configuration option information for the + * current Metacache + * @param: mdsclient is the pointer that communicates with mds. + * Why does it need to pass in mdsclient here? + * Because the first role that Metacache plays is to cache information on + * the MDS side So for low-level users who want to use Metacache's copyset + * client or chunk closure For example, he only needs to know the Metacache + * and no longer needs to query information from MDS, After the copyset + * client or chunk closure fails to send IO, it will retrieve the leader + * again Then try again. If the leader acquisition is unsuccessful, you need + * to query the latest information of the current copyset from the mds side, + * Here, the query mds is encapsulated internally, so that the copyset + * client and chunk closure are not aware of mds */ - void Init(const MetaCacheOption &metaCacheOpt, MDSClient *mdsclient); + void Init(const MetaCacheOption& metaCacheOpt, MDSClient* mdsclient); /** - * 通过chunk index获取chunkid信息 - * @param: chunkidx以index查询chunk对应的id信息 - * @param: chunkinfo是出参,存储chunk的版本信息 - * @param: 成功返回OK, 否则返回UNKNOWN_ERROR + * Obtain chunk information through chunk index + * @param: chunkidx queries the ID information corresponding to chunks using + * index + * @param: chunkinfo is an outgoing parameter that stores the version + * information of the chunk + * @param: Successfully returns OK, otherwise returns UNKNOWN_ ERROR */ virtual MetaCacheErrorType GetChunkInfoByIndex(ChunkIndex chunkidx, - ChunkIDInfo_t *chunkinfo); + ChunkIDInfo_t* chunkinfo); /** * @brief Update cached chunk info by chunk index */ virtual void UpdateChunkInfoByIndex(ChunkIndex cindex, - const ChunkIDInfo &chunkinfo); + const ChunkIDInfo& chunkinfo); /** - * sender发送数据的时候需要知道对应的leader然后发送给对应的chunkserver - * 如果get不到的时候,外围设置refresh为true,然后向chunkserver端拉取最新的 - * server信息,然后更新metacache。 - * 如果当前copyset的leaderMayChange置位的时候,即使refresh为false,也需要 - * 先去拉取新的leader信息,才能继续下发IO. - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: serverId对应chunkserver的id信息,是出参 - * @param: serverAddr为serverid对应的ip信息 - * @param: refresh,如果get不到的时候,外围设置refresh为true, - * 然后向chunkserver端拉取最新的 - * @param: fm用于统计metric - * @param: 成功返回0, 否则返回-1 + * When the sender sends data, it needs to know the corresponding leader and + * send it to the corresponding chunkserver. If it cannot retrieve the + * leader, and the external setting has "refresh" set to true, it will then + * fetch the latest server information from the chunkserver side and update + * the metacache. If the "leaderMayChange" flag of the current copyset is + * set, even if "refresh" is set to false, it is still necessary to fetch + * the new leader information before continuing with IO operations. + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: The serverId corresponds to the ID information of the + * chunkserver, which is the output parameter + * @param: serverAddr is the IP information corresponding to serverid + * @param: refresh. If it cannot be obtained, set the peripheral refresh to + * true, Then pull the latest data from the chunkserver end + * @param: fm for statistical metrics + * @param: Successfully returns 0, otherwise returns -1 */ virtual int GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, - ChunkServerID *serverId, butil::EndPoint *serverAddr, - bool refresh = false, FileMetric *fm = nullptr); + ChunkServerID* serverId, butil::EndPoint* serverAddr, + bool refresh = false, FileMetric* fm = nullptr); /** - * 更新某个copyset的leader信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @param leaderAddr leader地址 - * @return: 成功返回0, 否则返回-1 + * Update the leader information of a copyset + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param leaderAddr leader address + * @return: Successfully returns 0, otherwise returns -1 */ virtual int UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const butil::EndPoint &leaderAddr); + const butil::EndPoint& leaderAddr); /** - * 更新copyset数据信息,包含serverlist - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: csinfo是要更新的copyset info + * Update copyset data information, including serverlist + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: csinfo is the copyset info to be updated */ virtual void UpdateCopysetInfo(LogicPoolID logicPoolId, CopysetID copysetId, - const CopysetInfo &csinfo); + const CopysetInfo& csinfo); // Add copysets info to cache, and skip already copyset void AddCopysetsInfo( @@ -131,26 +140,26 @@ class MetaCache { std::vector>&& copysetsInfo); /** - * 通过chunk id更新chunkid信息 - * @param: cid为chunkid - * @param: cidinfo为当前chunk对应的id信息 + * Update chunk information through chunk id + * @param: cid is chunkid + * @param: cininfo is the ID information corresponding to the current chunk */ - virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo &cidinfo); + virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo& cidinfo); /** - * 获取当前copyset的server list信息 - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 当前copyset的copysetinfo信息 + * Obtain the server list information for the current copyset + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: The copysetinfo information of the current copyset */ virtual CopysetInfo GetServerList(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 将ID转化为cache的key - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 为当前的key + * Convert ID to key for cache + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: is the current key */ static LogicPoolCopysetID CalcLogicPoolCopysetID(LogicPoolID logicPoolId, CopysetID copysetId) { @@ -159,45 +168,45 @@ class MetaCache { } /** - * @brief: 标记整个server上的所有chunkserver为unstable状态 + * @brief: Mark all chunkservers on the entire server as unstable * - * @param: serverIp server的ip地址 - * @return: 0 设置成功 / -1 设置失败 + * @param: serverIp The IP address of the server + * @return: 0 set successfully/-1 set failed */ - virtual int SetServerUnstable(const std::string &endPoint); + virtual int SetServerUnstable(const std::string& endPoint); /** - * 如果leader所在的chunkserver出现问题了,导致RPC失败。这时候这个 - * chunkserver上的其他leader copyset也会存在同样的问题,所以需要 - * 通知当前chunkserver上的leader copyset. 主要是通过设置这个copyset - * 的leaderMayChange标志,当该copyset的再次下发IO的时候会查看这个 - * 状态,当这个标志位置位的时候,IO下发需要先进行leader refresh, - * 如果leaderrefresh成功,leaderMayChange会被reset。 - * SetChunkserverUnstable就会遍历当前chunkserver上的所有copyset - * 并设置这个chunkserver的leader copyset的leaderMayChange标志。 - * @param: csid是当前不稳定的chunkserver ID + * If the chunkserver where the leader is located encounters a problem, + * leading to RPC failures, then other leader copysets on this chunkserver + * will also face the same issue. Therefore, it is necessary to notify the + * leader copysets on the current chunkserver. This is primarily done by + * setting the "leaderMayChange" flag for these copysets. When IO is issued + * again for a copyset with this flag set, the system will check this + * status. When this flag is set, IO issuance will first perform a leader + * refresh. If the leader refresh is successful, the "leaderMayChange" flag + * will be reset. The "SetChunkserverUnstable" operation will iterate + * through all the copysets on the current chunkserver and set the + * "leaderMayChange" flag for the leader copysets of that chunkserver. + * @param: csid is the currently unstable chunkserver ID */ virtual void SetChunkserverUnstable(ChunkServerID csid); /** - * 向map中添加对应chunkserver的copyset信息 - * @param: csid为当前chunkserverid - * @param: cpid为当前copyset的id信息 + * Add copyset information for the corresponding chunkserver to the map + * @param: csid is the current chunkserverid + * @param: cpid is the ID information of the current copyset */ virtual void AddCopysetIDInfo(ChunkServerID csid, - const CopysetIDInfo &cpid); + const CopysetIDInfo& cpid); - virtual void - UpdateChunkserverCopysetInfo(LogicPoolID lpid, - const CopysetInfo &cpinfo); + virtual void UpdateChunkserverCopysetInfo( + LogicPoolID lpid, const CopysetInfo& cpinfo); - void UpdateFileInfo(const FInfo &fileInfo) { fileInfo_ = fileInfo; } + void UpdateFileInfo(const FInfo& fileInfo) { fileInfo_ = fileInfo; } - const FInfo *GetFileInfo() const { return &fileInfo_; } + const FInfo* GetFileInfo() const { return &fileInfo_; } - void UpdateFileEpoch(const FileEpoch& fEpoch) { - fEpoch_ = fEpoch; - } + void UpdateFileEpoch(const FileEpoch& fEpoch) { fEpoch_ = fEpoch; } const FileEpoch* GetFileEpoch() const { return &fEpoch_; } @@ -212,26 +221,26 @@ class MetaCache { } /** - * 获取对应的copyset的LeaderMayChange标志 + * Get the LeaderMayChange flag of the corresponding copyset */ virtual bool IsLeaderMayChange(LogicPoolID logicpoolId, CopysetID copysetId); /** - * 测试使用 - * 获取copysetinfo信息 + * Test Usage + * Obtain copysetinfo information */ virtual CopysetInfo GetCopysetinfo(LogicPoolID lpid, CopysetID csid); - UnstableHelper &GetUnstableHelper() { return unstableHelper_; } + UnstableHelper& GetUnstableHelper() { return unstableHelper_; } uint64_t InodeId() const { return fileInfo_.id; } /** * @brief Get file segment info about the segmentIndex */ - FileSegment *GetFileSegment(SegmentIndex segmentIndex); + FileSegment* GetFileSegment(SegmentIndex segmentIndex); /** * @brief Clean chunks of this segment @@ -240,68 +249,71 @@ class MetaCache { private: /** - * @brief 从mds更新copyset复制组信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @return 0 成功 / -1 失败 + * @brief Update copyset replication group information from mds + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @return 0 successful/-1 failed */ int UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 更新copyset的leader信息 - * @param[in]: logicPoolId逻辑池信息 - * @param[in]: copysetId复制组信息 - * @param[out]: toupdateCopyset为metacache中待更新的copyset信息指针 + * Update the leader information of the copyset + * @param[in]: logicPoolId Logical Pool Information + * @param[in]: copysetId Copy group information + * @param[out]: toupdateCopyset is the pointer to the copyset information to + * be updated in the metacache */ int UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetID copysetId, - CopysetInfo *toupdateCopyset, - FileMetric *fm = nullptr); + CopysetInfo* toupdateCopyset, + FileMetric* fm = nullptr); /** - * 从mds拉去复制组信息,如果当前leader在复制组中 - * 则更新本地缓存,反之则不更新 - * @param: logicPoolId 逻辑池id - * @param: copysetId 复制组id - * @param: leaderAddr 当前的leader address + * Pull replication group information from MDS, if the current leader is in + * the replication group Update local cache, otherwise do not update + * @param: logicPoolId Logical Pool ID + * @param: copysetId Copy group ID + * @param: leaderAddr The current leader address */ void UpdateCopysetInfoIfMatchCurrentLeader(LogicPoolID logicPoolId, CopysetID copysetId, - const PeerAddr &leaderAddr); + const PeerAddr& leaderAddr); private: - MDSClient *mdsclient_; + MDSClient* mdsclient_; MetaCacheOption metacacheopt_; - // chunkindex到chunkidinfo的映射表 + // Mapping table from chunkindex to chunkidinfo CURVE_CACHELINE_ALIGNMENT ChunkIndexInfoMap chunkindex2idMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4Segments_; CURVE_CACHELINE_ALIGNMENT std::unordered_map segments_; // NOLINT - // logicalpoolid和copysetid到copysetinfo的映射表 + // Mapping table for logicalpoolid and copysetid to copysetinfo CURVE_CACHELINE_ALIGNMENT CopysetInfoMap lpcsid2CopsetInfoMap_; - // chunkid到chunkidinfo的映射表 + // chunkid to chunkidinfo mapping table CURVE_CACHELINE_ALIGNMENT ChunkInfoMap chunkid2chunkInfoMap_; - // 三个读写锁分别保护上述三个映射表 + // Three read and write locks protect each of the three mapping tables + // mentioned above CURVE_CACHELINE_ALIGNMENT RWLock rwlock4chunkInfoMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4ChunkInfo_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CopysetInfo_; - // chunkserverCopysetIDMap_存放当前chunkserver到copyset的映射 - // 当rpc closure设置SetChunkserverUnstable时,会设置该chunkserver - // 的所有copyset处于leaderMayChange状态,后续copyset需要判断该值来看 - // 是否需要刷新leader + // chunkserverCopysetIDMap_ stores the mapping of the current chunkserver to + // copysets. When an RPC closure sets SetChunkserverUnstable, it sets all + // the copysets of that chunkserver to the leaderMayChange state. Subsequent + // copyset operations will check this value to determine whether a leader + // refresh is needed. - // chunkserverid到copyset的映射 + // Mapping chunkserverid to copyset std::unordered_map> chunkserverCopysetIDMap_; // NOLINT - // 读写锁保护unStableCSMap + // Read write lock protection unstableCSMap CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CSCopysetIDMap_; - // 当前文件信息 + // Current file information FInfo fileInfo_; // epoch info diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index f283687f3c..4b17893a51 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -43,24 +43,25 @@ using curve::common::ReadLockGuard; using curve::common::SpinLock; using curve::common::WriteLockGuard; -// copyset内的chunkserver节点的基本信息 -// 包含当前chunkserver的id信息,以及chunkserver的地址信息 +// Basic information of chunkserver nodes in the copyset +// Contains the ID information of the current chunkserver and the address +// information of the chunkserver template struct CopysetPeerInfo { - // 当前chunkserver节点的ID + // The ID of the current chunkserver node T peerID = 0; - // 当前chunkserver节点的内部地址 + // The internal address of the current chunkserver node PeerAddr internalAddr; - // 当前chunkserver节点的外部地址 + // The external address of the current chunkserver node PeerAddr externalAddr; CopysetPeerInfo() = default; - CopysetPeerInfo(const T &cid, const PeerAddr &internal, - const PeerAddr &external) + CopysetPeerInfo(const T& cid, const PeerAddr& internal, + const PeerAddr& external) : peerID(cid), internalAddr(internal), externalAddr(external) {} - bool operator==(const CopysetPeerInfo &other) const { + bool operator==(const CopysetPeerInfo& other) const { return this->internalAddr == other.internalAddr && this->externalAddr == other.externalAddr; } @@ -72,7 +73,7 @@ struct CopysetPeerInfo { }; template -inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { +inline std::ostream& operator<<(std::ostream& os, const CopysetPeerInfo& c) { os << "peer id : " << c.peerID << ", internal address : " << c.internalAddr.ToString() << ", external address : " << c.externalAddr.ToString(); @@ -81,23 +82,25 @@ inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { } // copyset's informations including peer and leader information -template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { - // leader存在变更可能标志位 +template +struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { + // Possible flag bits for leader changes bool leaderMayChange_ = false; - // 当前copyset的节点信息 + // Node information of the current copyset std::vector> csinfos_; - // leader在本copyset信息中的索引,用于后面避免重复尝试同一个leader + // The index of the leader in this copyset information is used to avoid + // repeated attempts at the same leader in the future int16_t leaderindex_ = -1; - // 当前copyset的id信息 + // The ID information of the current copyset CopysetID cpid_ = 0; LogicPoolID lpid_ = 0; - // 用于保护对copyset信息的修改 + // Used to protect modifications to copyset information SpinLock spinlock_; CopysetInfo() = default; ~CopysetInfo() = default; - CopysetInfo &operator=(const CopysetInfo &other) { + CopysetInfo& operator=(const CopysetInfo& other) { this->cpid_ = other.cpid_; this->lpid_ = other.lpid_; this->csinfos_ = other.csinfos_; @@ -106,9 +109,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { return *this; } - CopysetInfo(const CopysetInfo &other) - : leaderMayChange_(other.leaderMayChange_), csinfos_(other.csinfos_), - leaderindex_(other.leaderindex_), cpid_(other.cpid_), + CopysetInfo(const CopysetInfo& other) + : leaderMayChange_(other.leaderMayChange_), + csinfos_(other.csinfos_), + leaderindex_(other.leaderindex_), + cpid_(other.cpid_), lpid_(other.lpid_) {} CopysetInfo(CopysetInfo&& other) noexcept @@ -142,11 +147,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 获取当前leader的索引 + * Get the index of the current leader */ int16_t GetCurrentLeaderIndex() const { return leaderindex_; } - bool GetCurrentLeaderID(T *id) const { + bool GetCurrentLeaderID(T* id) const { if (leaderindex_ >= 0) { if (static_cast(csinfos_.size()) < leaderindex_) { return false; @@ -160,10 +165,11 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 更新leaderindex,如果leader不在当前配置组中,则返回-1 - * @param: addr为新的leader的地址信息 + * Update the leaderindex, if the leader is not in the current configuration + * group, return -1 + * @param: addr is the address information of the new leader */ - int UpdateLeaderInfo(const PeerAddr &addr, + int UpdateLeaderInfo(const PeerAddr& addr, CopysetPeerInfo csInfo = CopysetPeerInfo()) { VLOG(3) << "update leader info, pool " << lpid_ << ", copyset " << cpid_ << ", current leader " << addr.ToString(); @@ -179,7 +185,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { tempindex++; } - // 新的addr不在当前copyset内,如果csInfo不为空,那么将其插入copyset + // The new addr is not within the current copyset. If csInfo is not + // empty, insert it into the copyset if (!exists && !csInfo.IsEmpty()) { csinfos_.push_back(csInfo); } else if (exists == false) { @@ -198,8 +205,10 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { * @param[out]: peer id * @param[out]: ep */ - int GetLeaderInfo(T *peerid, EndPoint *ep) { - // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader + int GetLeaderInfo(T* peerid, EndPoint* ep) { + // For the first time obtaining the leader, if the current leader + // information is not determined, return -1, and the external initiative + // will be initiated to update the leader if (leaderindex_ < 0 || leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ @@ -219,32 +228,32 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 添加copyset的peerinfo - * @param: csinfo为待添加的peer信息 + * Add peerinfo for copyset + * @param: csinfo is the peer information to be added */ - void AddCopysetPeerInfo(const CopysetPeerInfo &csinfo) { + void AddCopysetPeerInfo(const CopysetPeerInfo& csinfo) { spinlock_.Lock(); csinfos_.push_back(csinfo); spinlock_.UnLock(); } /** - * 当前CopysetInfo是否合法 + * Is the current CopysetInfo legal */ bool IsValid() const { return !csinfos_.empty(); } /** - * 更新leaderindex + * Update leaderindex */ void UpdateLeaderIndex(int index) { leaderindex_ = index; } /** - * 当前copyset是否存在对应的chunkserver address - * @param: addr需要检测的chunkserver - * @return: true存在;false不存在 + * Does the current copyset have a corresponding chunkserver address + * @param: addr Chunkserver to be detected + * @return: true exists; False does not exist */ - bool HasPeerInCopyset(const PeerAddr &addr) const { - for (const auto &peer : csinfos_) { + bool HasPeerInCopyset(const PeerAddr& addr) const { + for (const auto& peer : csinfos_) { if (peer.internalAddr == addr || peer.externalAddr == addr) { return true; } @@ -255,13 +264,13 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { }; template -inline std::ostream &operator<<(std::ostream &os, - const CopysetInfo ©set) { +inline std::ostream& operator<<(std::ostream& os, + const CopysetInfo& copyset) { os << "pool id : " << copyset.lpid_ << ", copyset id : " << copyset.cpid_ << ", leader index : " << copyset.leaderindex_ << ", leader may change : " << copyset.leaderMayChange_ << ", peers : "; - for (auto &p : copyset.csinfos_) { + for (auto& p : copyset.csinfos_) { os << p << " "; } @@ -276,13 +285,13 @@ struct CopysetIDInfo { : lpid(logicpoolid), cpid(copysetid) {} }; -inline bool operator<(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator<(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.lpid <= cpidinfo2.lpid && cpidinfo1.cpid < cpidinfo2.cpid; } -inline bool operator==(const CopysetIDInfo &cpidinfo1, - const CopysetIDInfo &cpidinfo2) { +inline bool operator==(const CopysetIDInfo& cpidinfo1, + const CopysetIDInfo& cpidinfo2) { return cpidinfo1.cpid == cpidinfo2.cpid && cpidinfo1.lpid == cpidinfo2.lpid; } @@ -290,9 +299,12 @@ class FileSegment { public: FileSegment(SegmentIndex segmentIndex, uint32_t segmentSize, uint32_t discardGranularity) - : segmentIndex_(segmentIndex), segmentSize_(segmentSize), - discardGranularity_(discardGranularity), rwlock_(), - discardBitmap_(segmentSize_ / discardGranularity_), chunks_() {} + : segmentIndex_(segmentIndex), + segmentSize_(segmentSize), + discardGranularity_(discardGranularity), + rwlock_(), + discardBitmap_(segmentSize_ / discardGranularity_), + chunks_() {} /** * @brief Confirm if all bit was discarded @@ -312,7 +324,7 @@ class FileSegment { * @brief Get internal bitmap for unit-test * @return Internal bitmap */ - Bitmap &GetBitmap() { return discardBitmap_; } + Bitmap& GetBitmap() { return discardBitmap_; } void SetBitmap(const uint64_t offset, const uint64_t length); void ClearBitmap(const uint64_t offset, const uint64_t length); @@ -370,14 +382,15 @@ inline void FileSegment::ClearBitmap(const uint64_t offset, enum class FileSegmentLockType { Read, Write }; -template class FileSegmentLockGuard { +template +class FileSegmentLockGuard { public: - explicit FileSegmentLockGuard(FileSegment *segment) : segment_(segment) { + explicit FileSegmentLockGuard(FileSegment* segment) : segment_(segment) { Lock(); } - FileSegmentLockGuard(const FileSegmentLockGuard &) = delete; - FileSegmentLockGuard &operator=(const FileSegmentLockGuard &) = delete; + FileSegmentLockGuard(const FileSegmentLockGuard&) = delete; + FileSegmentLockGuard& operator=(const FileSegmentLockGuard&) = delete; ~FileSegmentLockGuard() { UnLock(); } @@ -392,7 +405,7 @@ template class FileSegmentLockGuard { void UnLock() { segment_->ReleaseLock(); } private: - FileSegment *segment_; + FileSegment* segment_; }; using FileSegmentReadLockGuard = diff --git a/src/client/request_closure.h b/src/client/request_closure.h index 326f76e10b..753f16aea4 100644 --- a/src/client/request_closure.h +++ b/src/client/request_closure.h @@ -63,83 +63,60 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure /** * @brief Get error code */ - virtual int GetErrorCode() { - return errcode_; - } + virtual int GetErrorCode() { return errcode_; } /** * @brief Set error code, 0 means success */ - virtual void SetFailed(int errorCode) { - errcode_ = errorCode; - } + virtual void SetFailed(int errorCode) { errcode_ = errorCode; } /** - * @brief 获取当前closure属于哪个request + * @brief to obtain which request the current closure belongs to */ - virtual RequestContext* GetReqCtx() { - return reqCtx_; - } + virtual RequestContext* GetReqCtx() { return reqCtx_; } /** - * @brief 获取当前request属于哪个iotracker + * @brief: Obtain which iotracker the current request belongs to */ - virtual IOTracker* GetIOTracker() { - return tracker_; - } + virtual IOTracker* GetIOTracker() { return tracker_; } /** - * @brief 设置当前属于哪一个iotracker + * @brief Set which iotracker currently belongs to */ - void SetIOTracker(IOTracker* ioTracker) { - tracker_ = ioTracker; - } + void SetIOTracker(IOTracker* ioTracker) { tracker_ = ioTracker; } /** - * @brief 设置所属的iomanager + * @brief Set the iomanager to which it belongs */ - void SetIOManager(IOManager* ioManager) { - ioManager_ = ioManager; - } + void SetIOManager(IOManager* ioManager) { ioManager_ = ioManager; } /** - * @brief 设置当前closure重试次数 + * @brief Set the current closure retry count */ - void IncremRetriedTimes() { - retryTimes_++; - } + void IncremRetriedTimes() { retryTimes_++; } - uint64_t GetRetriedTimes() const { - return retryTimes_; - } + uint64_t GetRetriedTimes() const { return retryTimes_; } /** - * 设置metric + * Set metric */ - void SetFileMetric(FileMetric* fm) { - metric_ = fm; - } + void SetFileMetric(FileMetric* fm) { metric_ = fm; } /** - * 获取metric指针 + * Get metric pointer */ - FileMetric* GetMetric() const { - return metric_; - } + FileMetric* GetMetric() const { return metric_; } /** - * 获取下一次rpc超时时间, rpc超时时间实现了指数退避的策略 + * Obtain the next RPC timeout, which implements an exponential backoff + * strategy */ - uint64_t GetNextTimeoutMS() const { - return nextTimeoutMS_; - } + uint64_t GetNextTimeoutMS() const { return nextTimeoutMS_; } /** - * 设置下次重试超时时间 + * Set the next retry timeout time */ - void SetNextTimeOutMS(uint64_t timeout) { - nextTimeoutMS_ = timeout; - } + void SetNextTimeOutMS(uint64_t timeout) { nextTimeoutMS_ = timeout; } bool IsSlowRequest() const { return slowRequest_; } @@ -153,25 +130,25 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure // whether own inflight count bool ownInflight_ = false; - // 当前request的错误码 + // The error code of the current request int errcode_ = -1; - // 当前request的tracker信息 + // Tracker information for the current request IOTracker* tracker_ = nullptr; - // closure的request信息 + // Request information for closures RequestContext* reqCtx_ = nullptr; - // metric信息 + // metric Information FileMetric* metric_ = nullptr; - // 重试次数 + // Number of retries uint64_t retryTimes_ = 0; - // 当前closure属于的iomanager + // The iomanager to which the current closure belongs IOManager* ioManager_ = nullptr; - // 下一次rpc超时时间 + // Next RPC timeout uint64_t nextTimeoutMS_ = 0; // create time of this closure(in millisecond) diff --git a/src/client/request_context.h b/src/client/request_context.h index 0b7c9db649..76d2acf4c9 100644 --- a/src/client/request_context.h +++ b/src/client/request_context.h @@ -28,9 +28,9 @@ #include #include +#include "include/curve_compiler_specific.h" #include "src/client/client_common.h" #include "src/client/request_closure.h" -#include "include/curve_compiler_specific.h" namespace curve { namespace client { @@ -73,13 +73,14 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { done_ = nullptr; } - // chunk的ID信息,sender在发送rpc的时候需要附带其ID信息 - ChunkIDInfo idinfo_; + // The ID information of the chunk, which the sender needs to include when + // sending rpc + ChunkIDInfo idinfo_; - // 用户IO被拆分之后,其小IO有自己的offset和length - off_t offset_ = 0; - OpType optype_ = OpType::UNKNOWN; - size_t rawlength_ = 0; + // After user IO is split, its small IO has its own offset and length + off_t offset_ = 0; + OpType optype_ = OpType::UNKNOWN; + size_t rawlength_ = 0; // user's single io request will split into several requests // subIoIndex_ is an index of serveral requests @@ -91,29 +92,31 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { // write data of current request butil::IOBuf writeData_; - // 因为RPC都是异步发送,因此在一个Request结束时,RPC回调调用当前的done - // 来告知当前的request结束了 - RequestClosure* done_ = nullptr; + // Because RPC is sent asynchronously, at the end of a request, the RPC + // callback calls the current done To inform you that the current request is + // over + RequestClosure* done_ = nullptr; // file id uint64_t fileId_; // file epoch uint64_t epoch_; - // request的版本信息 - uint64_t seq_ = 0; + // Version information of request + uint64_t seq_ = 0; - // 这个对应的GetChunkInfo的出参 - ChunkInfoDetail* chunkinfodetail_ = nullptr; + // The output parameter of this corresponding GetChunkInfo + ChunkInfoDetail* chunkinfodetail_ = nullptr; - // clone chunk请求需要携带源chunk的location及所需要创建的chunk的大小 - uint32_t chunksize_ = 0; - std::string location_; - RequestSourceInfo sourceInfo_; - // create clone chunk时候用于修改chunk的correctedSn - uint64_t correctedSeq_ = 0; + // The clone chunk request needs to carry the location of the source chunk + // and the size of the chunk that needs to be created + uint32_t chunksize_ = 0; + std::string location_; + RequestSourceInfo sourceInfo_; + // CorrectedSn used to modify a chunk when creating a clone chunk + uint64_t correctedSeq_ = 0; - // 当前request context id - uint64_t id_ = 0; + // Current request context id + uint64_t id_ = 0; static RequestContext* NewInitedRequestContext() { RequestContext* ctx = new (std::nothrow) RequestContext(); @@ -139,10 +142,8 @@ inline std::ostream& operator<<(std::ostream& os, os << "logicpool id = " << reqCtx.idinfo_.lpid_ << ", copyset id = " << reqCtx.idinfo_.cpid_ << ", chunk id = " << reqCtx.idinfo_.cid_ - << ", offset = " << reqCtx.offset_ - << ", length = " << reqCtx.rawlength_ - << ", sub-io index = " << reqCtx.subIoIndex_ - << ", sn = " << reqCtx.seq_ + << ", offset = " << reqCtx.offset_ << ", length = " << reqCtx.rawlength_ + << ", sub-io index = " << reqCtx.subIoIndex_ << ", sn = " << reqCtx.seq_ << ", source info = " << reqCtx.sourceInfo_; return os; diff --git a/src/client/request_scheduler.cpp b/src/client/request_scheduler.cpp index e723126235..939115e210 100644 --- a/src/client/request_scheduler.cpp +++ b/src/client/request_scheduler.cpp @@ -25,9 +25,9 @@ #include #include -#include "src/client/request_context.h" -#include "src/client/request_closure.h" #include "src/client/chunk_closure.h" +#include "src/client/request_closure.h" +#include "src/client/request_context.h" namespace curve { namespace client { @@ -35,8 +35,7 @@ namespace client { RequestScheduler::~RequestScheduler() {} int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache* metaCache, - FileMetric* fm) { + MetaCache* metaCache, FileMetric* fm) { blockIO_.store(false); reqschopt_ = reqSchdulerOpt; @@ -58,8 +57,7 @@ int RequestScheduler::Init(const RequestScheduleOption& reqSchdulerOpt, } LOG(INFO) << "RequestScheduler conf info: " - << "scheduleQueueCapacity = " - << reqschopt_.scheduleQueueCapacity + << "scheduleQueueCapacity = " << reqschopt_.scheduleQueueCapacity << ", scheduleThreadpoolSize = " << reqschopt_.scheduleThreadpoolSize; return 0; @@ -77,7 +75,7 @@ int RequestScheduler::Fini() { if (running_.exchange(false, std::memory_order_acq_rel)) { for (int i = 0; i < threadPool_.NumOfThreads(); ++i) { // notify the wait thread - BBQItem stopReq(nullptr, true); + BBQItem stopReq(nullptr, true); queue_.PutBack(stopReq); } threadPool_.Stop(); @@ -89,7 +87,7 @@ int RequestScheduler::Fini() { int RequestScheduler::ScheduleRequest( const std::vector& requests) { if (running_.load(std::memory_order_acquire)) { - /* TODO(wudemiao): 后期考虑 qos */ + /* TODO(wudemiao): Consider QoS in the later stage */ for (auto it : requests) { // skip the fake request if (!it->idinfo_.chunkExist) { @@ -99,7 +97,7 @@ int RequestScheduler::ScheduleRequest( continue; } - BBQItem req(it); + BBQItem req(it); queue_.PutBack(req); } return 0; @@ -107,18 +105,18 @@ int RequestScheduler::ScheduleRequest( return -1; } -int RequestScheduler::ScheduleRequest(RequestContext *request) { +int RequestScheduler::ScheduleRequest(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutBack(req); return 0; } return -1; } -int RequestScheduler::ReSchedule(RequestContext *request) { +int RequestScheduler::ReSchedule(RequestContext* request) { if (running_.load(std::memory_order_acquire)) { - BBQItem req(request); + BBQItem req(request); queue_.PutFront(req); return 0; } @@ -126,14 +124,17 @@ int RequestScheduler::ReSchedule(RequestContext *request) { } void RequestScheduler::WakeupBlockQueueAtExit() { - // 在scheduler退出的时候要把队列的内容清空, 通知copyset client - // 当前操作是退出状态,copyset client会针对inflight RPC做响应处理 - // 正常情况下队列内容一定会在Fini调用结束之后全部清空 - // 但是在session刷新失败的时候,scheduler无法继续下发 - // RPC请求,所以需要设置blockingQueue_标志,告知scheduler - // 把队列里内容统统扔到copyset client,因为在session - // 续约失败后copyset client会将IO全部失败返回,scheduler - // 模块不需要处理具体RPC请求,由copyset client负责。 + // When the scheduler exits, it is necessary to clear the contents of the + // queue and notify the copyset client The current operation is in the exit + // state, and the copyset client will respond to the inflight RPC Under + // normal circumstances, the queue content must be completely cleared after + // Fini calls are completed But when the session refresh fails, the + // scheduler cannot continue issuing RPC request, therefore blockingQueue + // needs to be set_ Sign to inform scheduler Throw all the content in the + // queue to the copyset client because in the session After the renewal + // fails, the copyset client will return all IO failures to the scheduler + // The module does not need to handle specific RPC requests, and is the + // responsibility of the copyset client. client_.ResetExitFlag(); blockingQueue_ = false; std::atomic_thread_fence(std::memory_order_acquire); @@ -151,8 +152,8 @@ void RequestScheduler::Process() { ProcessOne(req); } else { /** - * 一旦遇到stop item,所有线程都可以退出,因为此时 - * queue里面所有的request都被处理完了 + * Once a stop item is encountered, all threads can exit because at + * this point All requests in the queue have been processed */ stop_.store(true, std::memory_order_release); } @@ -172,8 +173,8 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { case OpType::WRITE: ctx->done_->GetInflightRPCToken(); client_.WriteChunk(ctx->idinfo_, ctx->fileId_, ctx->epoch_, - ctx->seq_, ctx->writeData_, - ctx->offset_, ctx->rawlength_, ctx->sourceInfo_, + ctx->seq_, ctx->writeData_, ctx->offset_, + ctx->rawlength_, ctx->sourceInfo_, guard.release()); break; case OpType::READ_SNAP: @@ -197,11 +198,12 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { guard.release()); break; default: - /* TODO(wudemiao) 后期整个链路错误发统一了在处理 */ + /* In the later stage of TODO(wudemiao), the entire link error was + * sent and processed uniformly */ ctx->done_->SetFailed(-1); LOG(ERROR) << "unknown op type: OpType::UNKNOWN"; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_scheduler.h b/src/client/request_scheduler.h index 752f72bcb0..f00ded5bc1 100644 --- a/src/client/request_scheduler.h +++ b/src/client/request_scheduler.h @@ -25,88 +25,86 @@ #include -#include "src/common/uncopyable.h" +#include "include/curve_compiler_specific.h" +#include "src/client/client_common.h" #include "src/client/config_info.h" +#include "src/client/copyset_client.h" #include "src/common/concurrent/bounded_blocking_queue.h" #include "src/common/concurrent/thread_pool.h" -#include "src/client/client_common.h" -#include "src/client/copyset_client.h" -#include "include/curve_compiler_specific.h" +#include "src/common/uncopyable.h" namespace curve { namespace client { -using curve::common::ThreadPool; -using curve::common::BoundedBlockingDeque; using curve::common::BBQItem; +using curve::common::BoundedBlockingDeque; +using curve::common::ThreadPool; using curve::common::Uncopyable; struct RequestContext; /** - * 请求调度器,上层拆分的I/O会交给Scheduler的线程池 - * 分发到具体的ChunkServer,后期QoS也会放在这里处理 + * Request the scheduler, and the split I/O from the upper layer will be handed + * over to the scheduler's thread pool Distribute to specific ChunkServers, + * where QoS will also be handled in the future */ class RequestScheduler : public Uncopyable { public: RequestScheduler() - : running_(false), - stop_(true), - client_(), - blockingQueue_(true) {} + : running_(false), stop_(true), client_(), blockingQueue_(true) {} virtual ~RequestScheduler(); /** - * 初始化 - * @param: reqSchdulerOpt为scheduler的配置选项 - * @param: metacache为meta信息 - * @param: filematric为文件的metric信息 + * Initialize + * @param: reqSchdulerOpt is the configuration option for the scheduler + * @param: metacache is the meta information + * @param: filematric is the metric information of the file */ virtual int Init(const RequestScheduleOption& reqSchdulerOpt, - MetaCache *metaCache, - FileMetric* fileMetric = nullptr); + MetaCache* metaCache, FileMetric* fileMetric = nullptr); /** - * 启动Scheduler的线程池开始处理request - * 启动之后才能push request,除此之外,只有当 - * queue里面的任务都被处理完了,才会Scheduler - * 的 thread pool里面的所有线程都退出 - * @return 0成功,-1失败 + * Start the Scheduler's thread pool to begin processing requests. + * Requests can only be pushed after starting. Furthermore, only when + * all tasks in the queue have been processed will all threads in the + * Scheduler's thread pool exit. + * @return 0 for success, -1 for failure */ virtual int Run(); /** - * Stop Scheduler,一旦调用了Fini,那么 - * 此Scheduler不再接收新的request - * @return 0成功,-1失败 + * Stop Scheduler, once Fini is called, then + * This scheduler no longer receives new requests + * @return 0 succeeded, -1 failed */ virtual int Fini(); /** - * 将request push到Scheduler处理 - * @param requests:请求列表 - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param requests: Request List + * @return 0 succeeded, -1 failed */ virtual int ScheduleRequest(const std::vector& requests); /** - * 将request push到Scheduler处理 - * @param request:一个request - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param request: A request + * @return 0 succeeded, -1 failed */ - virtual int ScheduleRequest(RequestContext *request); + virtual int ScheduleRequest(RequestContext* request); /** - * 对于需要重新入队的RPC将其放在头部 + * For RPC that need to be re queued, place them at the top */ - virtual int ReSchedule(RequestContext *request); + virtual int ReSchedule(RequestContext* request); /** - * 关闭scheduler之前如果队列在sessionnotvalid睡眠就将其唤醒 + * Before closing the scheduler, if the queue is in sessionnotvalid, wake it + * up */ virtual void WakeupBlockQueueAtExit(); /** - * 当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 后续的IO调度会被阻塞 + * When LeaseExecutor renewal fails, call LeaseTimeoutDisableIO + * Subsequent IO scheduling will be blocked */ void LeaseTimeoutBlockIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -115,8 +113,8 @@ class RequestScheduler : public Uncopyable { } /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO, - * IO调度被恢复 + * When the lease is successfully renewed, the LeaseExecutor calls the + * interface to restore IO, IO scheduling restored */ void ResumeIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -126,7 +124,7 @@ class RequestScheduler : public Uncopyable { } /** - * 测试使用,获取队列 + * For testing purposes, get the queue. */ BoundedBlockingDeque>* GetQueue() { return &queue_; @@ -134,14 +132,16 @@ class RequestScheduler : public Uncopyable { private: /** - * Thread pool的运行函数,会从queue中取request进行处理 + * The run function of the Thread pool will retrieve the request from the + * queue for processing */ void Process(); void ProcessOne(RequestContext* ctx); void WaitValidSession() { - // lease续约失败的时候需要阻塞IO直到续约成功 + // When the lease renewal fails, IO needs to be blocked until the + // renewal is successful if (blockIO_.load(std::memory_order_acquire) && blockingQueue_) { std::unique_lock lk(leaseRefreshmtx_); leaseRefreshcv_.wait(lk, [&]() -> bool { @@ -151,32 +151,34 @@ class RequestScheduler : public Uncopyable { } private: - // 线程池和queue容量的配置参数 + // Configuration parameters for thread pool and queue capacity RequestScheduleOption reqschopt_; - // 存放 request 的队列 - BoundedBlockingDeque> queue_; - // 处理 request 的线程池 + // Queue for storing request + BoundedBlockingDeque> queue_; + // Thread pool for processing request ThreadPool threadPool_; - // Scheduler 运行标记,只有运行了,才接收 request + // The running flag of the Scheduler, it only accepts requests when it's + // running std::atomic running_; - // stop thread pool 标记,当调用 Scheduler Fini - // 之后且 queue 里面的 request 都处理完了,就可以 - // 让所有处理线程退出了 + // stop thread pool flag, when calling Scheduler Fini + // After processing all the requests in the queue, you can proceed + // Let all processing threads exit std::atomic stop_; - // 访问复制组Chunk的客户端 + // Client accessing replication group Chunk CopysetClient client_; - // 续约失败,卡住IO + // Renewal failed, IO stuck std::atomic blockIO_; - // 此锁与LeaseRefreshcv_条件变量配合使用 - // 在leasee续约失败的时候,所有新下发的IO被阻塞直到续约成功 - std::mutex leaseRefreshmtx_; - // 条件变量,用于唤醒和hang IO + // This lock is associated with LeaseRefreshcv_ Using Conditional Variables + // Together When lease renewal fails, all newly issued IO is blocked until + // the renewal is successful + std::mutex leaseRefreshmtx_; + // Conditional variables for wake-up and hang IO std::condition_variable leaseRefreshcv_; - // 阻塞队列 + // Blocking queue bool blockingQueue_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SCHEDULER_H_ diff --git a/src/client/request_sender.h b/src/client/request_sender.h index f288160267..a08be423d6 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -29,168 +29,157 @@ #include -#include "src/client/client_config.h" -#include "src/client/client_common.h" -#include "src/client/chunk_closure.h" #include "include/curve_compiler_specific.h" +#include "src/client/chunk_closure.h" +#include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/request_context.h" -namespace curve { -namespace client { - -/** - * 一个RequestSender负责管理一个ChunkServer的所有 - * connection,目前一个ChunkServer仅有一个connection - */ -class RequestSender { - public: - RequestSender(ChunkServerID chunkServerId, - butil::EndPoint serverEndPoint) - : chunkServerId_(chunkServerId), - serverEndPoint_(serverEndPoint), - channel_() {} - virtual ~RequestSender() {} - - int Init(const IOSenderOption& ioSenderOpt); - - /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure - */ - int ReadChunk(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); - - /** - * 写Chunk - * @param idinfo为chunk相关的id信息 - * @param fileId: file id - * @param epoch: file epoch - * @param sn:文件版本号 - * @param data 要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure - */ - int WriteChunk(const ChunkIDInfo& idinfo, - uint64_t fileId, - uint64_t epoch, - uint64_t sn, - const butil::IOBuf& data, - off_t offset, - size_t length, - const RequestSourceInfo& sourceInfo, - ClientClosure *done); - - /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure - */ - int ReadChunkSnapshot(const ChunkIDInfo& idinfo, - uint64_t sn, - off_t offset, - size_t length, - ClientClosure *done); - - /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:chunk需要修正的版本号 - * @param done:上一层异步回调的closure - */ - int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, - uint64_t correctedSn, - ClientClosure *done); - - /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param retriedTimes:已经重试了几次 - */ - int GetChunkInfo(const ChunkIDInfo& idinfo, - ClientClosure *done); - - /** - * @brief lazy 创建clone chunk - * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int CreateCloneChunk(const ChunkIDInfo& idinfo, - ClientClosure *done, - const std::string &location, - uint64_t sn, - uint64_t correntSn, - uint64_t chunkSize); - - /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:offset 偏移 - * @param:len 长度 - * @param retriedTimes:已经重试了几次 - * - * @return 错误码 - */ - int RecoverChunk(const ChunkIDInfo& idinfo, - ClientClosure* done, uint64_t offset, uint64_t len); - /** - * 重置和Chunk Server的链接 - * @param chunkServerId:Chunk Server唯一标识 - * @param serverEndPoint:Chunk Server - * @return 0成功,-1失败 - */ - int ResetSender(ChunkServerID chunkServerId, - butil::EndPoint serverEndPoint); - - bool IsSocketHealth() { - return channel_.CheckHealth() == 0; - } - - private: - void UpdateRpcRPS(ClientClosure* done, OpType type) const; - - void SetRpcStuff(ClientClosure* done, brpc::Controller* cntl, - google::protobuf::Message* rpcResponse) const; - - private: - // Rpc stub配置 - IOSenderOption iosenderopt_; - // ChunkServer 的唯一标识 id - ChunkServerID chunkServerId_; - // ChunkServer 的地址 - butil::EndPoint serverEndPoint_; - brpc::Channel channel_; /* TODO(wudemiao): 后期会维护多个 channel */ -}; - -} // namespace client -} // namespace curve - -#endif // SRC_CLIENT_REQUEST_SENDER_H_ +namespace curve +{ + namespace client + { + + /** + * A RequestSender is responsible for managing all aspects of a ChunkServer + * Connection, currently there is only one connection for a ChunkServer + */ + class RequestSender + { + public: + RequestSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint) + : chunkServerId_(chunkServerId), + serverEndPoint_(serverEndPoint), + channel_() {} + virtual ~RequestSender() {} + + int Init(const IOSenderOption &ioSenderOpt); + + /** + * Reading Chunk + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int ReadChunk(const ChunkIDInfo &idinfo, uint64_t sn, off_t offset, + size_t length, const RequestSourceInfo &sourceInfo, + ClientClosure *done); + + /** + * Write Chunk + * @param IDInfo is the ID information related to chunk + * @param fileId: file id + * @param epoch: file epoch + * @param sn: File version number + * @param data The data to be written + * @param offset: write offset + * @param length: The length written + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer + */ + int WriteChunk(const ChunkIDInfo &idinfo, uint64_t fileId, uint64_t epoch, + uint64_t sn, const butil::IOBuf &data, off_t offset, + size_t length, const RequestSourceInfo &sourceInfo, + ClientClosure *done); + + /** + * Reading Chunk snapshot files + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer + */ + int ReadChunkSnapshot(const ChunkIDInfo &idinfo, uint64_t sn, off_t offset, + size_t length, ClientClosure *done); + + /** + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the + * correctedSn of the chunk + * @param IDInfo is the ID information related to chunk + * @param correctedSn: Chunk The version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer + */ + int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo &idinfo, + uint64_t correctedSn, + ClientClosure *done); + + /** + * Obtain information about chunk files + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param retriedTimes: Number of retries + */ + int GetChunkInfo(const ChunkIDInfo &idinfo, ClientClosure *done); + + /** + * @brief lazy Create clone chunk + * @detail + * - The format definition of a location is A@B The form of. + * - If the source data is on s3, the location format is uri@s3 Uri is the + * address of the actual chunk object; + * - If the source data is on curves, the location format + * is/filename/chunkindex@cs + * + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: location, URL of the data source + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: chunkSize Chunk size + * @param retriedTimes: Number of retries + * + * @return error code + */ + int CreateCloneChunk(const ChunkIDInfo &idinfo, ClientClosure *done, + const std::string &location, uint64_t sn, + uint64_t correntSn, uint64_t chunkSize); + + /** + * @brief Actual recovery chunk data + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: offset: offset + * @param: len: length + * @param retriedTimes: Number of retries + * + * @return error code + */ + int RecoverChunk(const ChunkIDInfo &idinfo, ClientClosure *done, + uint64_t offset, uint64_t len); + /** + * Reset Link to Chunk Server + * @param chunkServerId: Chunk Server unique identifier + * @param serverEndPoint: Chunk Server + * @return 0 succeeded, -1 failed + */ + int ResetSender(ChunkServerID chunkServerId, + butil::EndPoint serverEndPoint); + + bool IsSocketHealth() { return channel_.CheckHealth() == 0; } + + private: + void UpdateRpcRPS(ClientClosure *done, OpType type) const; + + void SetRpcStuff(ClientClosure *done, brpc::Controller *cntl, + google::protobuf::Message *rpcResponse) const; + + private: + // Rpc stub configuration + IOSenderOption iosenderopt_; + // The unique identification ID of ChunkServer + ChunkServerID chunkServerId_; + // Address of ChunkServer + butil::EndPoint serverEndPoint_; + brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be + maintained in the later stage */ + }; + + } // namespace client +} // namespace curve + +#endif // SRC_CLIENT_REQUEST_SENDER_H_ diff --git a/src/client/request_sender_manager.cpp b/src/client/request_sender_manager.cpp index a5c77a793f..a5f7264e4b 100644 --- a/src/client/request_sender_manager.cpp +++ b/src/client/request_sender_manager.cpp @@ -30,8 +30,7 @@ namespace curve { namespace client { RequestSenderManager::SenderPtr RequestSenderManager::GetOrCreateSender( - const ChunkServerID& leaderId, - const butil::EndPoint& leaderAddr, + const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt) { { curve::common::ReadLockGuard guard(rwlock_); @@ -66,7 +65,7 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { return; } - // 检查是否健康 + // Check for health if (iter->second->IsSocketHealth()) { return; } @@ -74,5 +73,5 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { senderPool_.erase(iter); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/request_sender_manager.h b/src/client/request_sender_manager.h index 530d8c1c82..189fab3cc8 100644 --- a/src/client/request_sender_manager.h +++ b/src/client/request_sender_manager.h @@ -38,8 +38,8 @@ using curve::common::Uncopyable; class RequestSender; /** - * 所有Chunk Server的request sender管理者, - * 可以理解为Chunk Server的链接管理者 + * Request sender managers for all Chunk Servers, + * It can be understood as the link manager of Chunk Server */ class RequestSenderManager : public Uncopyable { public: @@ -47,30 +47,31 @@ class RequestSenderManager : public Uncopyable { RequestSenderManager() : rwlock_(), senderPool_() {} /** - * 获取指定leader id的sender,如果没有则根据leader - * 地址,创建新的 sender并返回 - * @param leaderId:leader的id - * @param leaderAddr:leader的地址 - * @return nullptr:get或者create失败,否则成功 + * Obtain the sender with the specified leader ID, if not, based on the + * leader Address, create a new sender and return + * @param leaderId: The ID of the leader + * @param leaderAddr: The address of the leader + * @return nullptr: Get or create failed, otherwise successful */ SenderPtr GetOrCreateSender(const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset + * it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId); private: - // 读写锁,保护senderPool_ + // Read write lock to protect senderPool_ curve::common::BthreadRWLock rwlock_; - // 请求发送链接的map,以ChunkServer ID为key + // Request to send a map for the link, with ChunkServer ID as the key std::unordered_map senderPool_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_REQUEST_SENDER_MANAGER_H_ diff --git a/src/client/service_helper.cpp b/src/client/service_helper.cpp index 70a7be6e34..3c8fbee5da 100644 --- a/src/client/service_helper.cpp +++ b/src/client/service_helper.cpp @@ -28,6 +28,7 @@ #include #include #include + #include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/common/curve_define.h" @@ -164,6 +165,7 @@ void ServiceHelper::ProtoCloneSourceInfo2Local( class GetLeaderProxy : public std::enable_shared_from_this { friend struct GetLeaderClosure; + public: GetLeaderProxy() : proxyId_(getLeaderProxyId.fetch_add(1, std::memory_order_relaxed)), @@ -171,10 +173,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_(false) {} /** - * @brief 等待GetLeader返回结果 - * @param[out] leaderId leader的id - * @param[out] leaderAddr leader的ip地址 - * @return 0 成功 / -1 失败 + * @brief waiting for GetLeader to return results + * @param[out] leaderId The ID of the leader + * @param[out] leaderAddr The IP address of the leader + * @return 0 successful/-1 failed */ int Wait(ChunkServerID* leaderId, PeerAddr* leaderAddr) { { @@ -212,11 +214,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 发起GetLeader请求 - * @param peerAddresses 除当前leader以外的peer地址 - * @param logicPoolId getleader请求的logicpool id - * @param copysetId getleader请求的copyset id - * @param fileMetric metric统计 + * @brief initiates GetLeader request + * @param peerAddresses Peer addresses other than the current leader + * @param logicPoolId getleader requested logicpool ID + * @param copysetId getleader requested copyset id + * @param fileMetric metric statistics */ void StartGetLeader(const std::unordered_set& peerAddresses, const GetLeaderRpcOption& rpcOption, @@ -270,10 +272,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 处理异步请求结果 - * @param callId rpc请求id - * @param success rpc请求是否成功 - * @param peer rpc请求返回的leader信息 + * @brief processing asynchronous request results + * @param callId rpc request id + * @param success rpc request successful + * @param peer The leader information returned by the rpc request */ void HandleResponse(brpc::CallId callId, bool success, const curve::common::Peer& peer) { @@ -289,7 +291,8 @@ class GetLeaderProxy : public std::enable_shared_from_this { continue; } - // cancel以后,后续的rpc请求回调仍然会执行,但是会标记为失败 + // After canceling, subsequent rpc request callbacks will still + // be executed, but will be marked as failed brpc::StartCancel(id); } @@ -301,10 +304,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_ = true; finishCv_.notify_one(); } else { - // 删除当前call id + // Delete the current call id callIds_.erase(callId); - // 如果为空,说明是最后一个rpc返回,需要标记请求失败,并向上返回 + // If it is empty, it indicates that it is the last rpc returned, + // and the request needs to be marked as failed and returned upwards if (callIds_.empty()) { std::lock_guard ulk(finishMtx_); finish_ = true; @@ -317,24 +321,25 @@ class GetLeaderProxy : public std::enable_shared_from_this { private: uint64_t proxyId_; - // 是否完成请求 - // 1. 其中一个请求成功 - // 2. 最后一个请求返回 - // 都会标记为true + // Whether to complete the request + // 1. One of the requests was successful + // 2. Last request returned + // Will be marked as true bool finish_; bthread::ConditionVariable finishCv_; bthread::Mutex finishMtx_; - // 记录cntl id + // Record cntl id std::set callIds_; - // 请求是否成功 + // Is the request successful bool success_; - // leader信息 + // leader Information curve::common::Peer leader_; - // 保护callIds_/success_,避免异步rpc回调同时操作 + // Protect callIds_/success_, Avoiding asynchronous rpc callbacks from + // operating simultaneously bthread::Mutex mtx_; LogicPoolID logicPooldId_; @@ -367,17 +372,16 @@ void GetLeaderClosure::Run() { } else { success = true; LOG(INFO) << "GetLeader returned from " << cntl.remote_side() - << ", logicpool id = " << logicPoolId - << ", copyset id = " << copysetId - << ", proxy id = " << proxy->proxyId_ - << ", leader = " << response.DebugString(); + << ", logicpool id = " << logicPoolId + << ", copyset id = " << copysetId + << ", proxy id = " << proxy->proxyId_ + << ", leader = " << response.DebugString(); } proxy->HandleResponse(cntl.call_id(), success, response.leader()); } int ServiceHelper::GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr* leaderAddr, - ChunkServerID* leaderId, + PeerAddr* leaderAddr, ChunkServerID* leaderId, FileMetric* fileMetric) { const auto& peerInfo = getLeaderInfo.copysetPeerInfo; @@ -423,8 +427,8 @@ bool ServiceHelper::GetUserInfoFromFilename(const std::string& filename, return true; } -int ServiceHelper::CheckChunkServerHealth( - const butil::EndPoint& endPoint, int32_t requestTimeoutMs) { +int ServiceHelper::CheckChunkServerHealth(const butil::EndPoint& endPoint, + int32_t requestTimeoutMs) { brpc::Controller cntl; brpc::Channel httpChannel; brpc::ChannelOptions options; @@ -437,22 +441,22 @@ int ServiceHelper::CheckChunkServerHealth( return -1; } - // 访问 ip:port/health + // Accessing ip:port/health cntl.http_request().uri() = ipPort + "/health"; cntl.set_timeout_ms(requestTimeoutMs); httpChannel.CallMethod(nullptr, &cntl, nullptr, nullptr, nullptr); if (cntl.Failed()) { LOG(WARNING) << "CheckChunkServerHealth failed, " << cntl.ErrorText() - << ", url = " << cntl.http_request().uri(); + << ", url = " << cntl.http_request().uri(); return -1; } else { LOG(INFO) << "CheckChunkServerHealth success, " - << cntl.response_attachment() - << ", url = " << cntl.http_request().uri(); + << cntl.response_attachment() + << ", url = " << cntl.http_request().uri(); return 0; } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/service_helper.h b/src/client/service_helper.h index 279c6a17f5..4de48afbf3 100644 --- a/src/client/service_helper.h +++ b/src/client/service_helper.h @@ -25,12 +25,13 @@ #include #include - #include -#include + +#include #include #include -#include +#include + #include "proto/cli2.pb.h" #include "proto/nameserver2.pb.h" #include "src/client/client_common.h" @@ -40,7 +41,7 @@ namespace curve { namespace client { -// GetLeader请求rpc参数信息 +// GetLeader request rpc parameter information struct GetLeaderRpcOption { uint32_t rpcTimeoutMs; @@ -48,29 +49,30 @@ struct GetLeaderRpcOption { : rpcTimeoutMs(rpcTimeoutMs) {} }; -// GetLeader请求对应的copyset信息及rpc相关参数信息 +// The copyset information and rpc related parameter information corresponding +// to the GetLeader request struct GetLeaderInfo { LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; std::vector> copysetPeerInfo; - int16_t currentLeaderIndex; + int16_t currentLeaderIndex; GetLeaderRpcOption rpcOption; - GetLeaderInfo(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - const std::vector>& copysetPeerInfo, //NOLINT + GetLeaderInfo(const LogicPoolID& logicPoolId, const CopysetID& copysetId, + const std::vector>& + copysetPeerInfo, // NOLINT int16_t currentLeaderIndex, const GetLeaderRpcOption& rpcOption = GetLeaderRpcOption()) - : logicPoolId(logicPoolId), - copysetId(copysetId), - copysetPeerInfo(copysetPeerInfo), - currentLeaderIndex(currentLeaderIndex), - rpcOption(rpcOption) {} + : logicPoolId(logicPoolId), + copysetId(copysetId), + copysetPeerInfo(copysetPeerInfo), + currentLeaderIndex(currentLeaderIndex), + rpcOption(rpcOption) {} }; class GetLeaderProxy; -// GetLeader异步请求回调 +// GetLeader asynchronous request callback struct GetLeaderClosure : public google::protobuf::Closure { GetLeaderClosure(LogicPoolID logicPoolId, CopysetID copysetId, std::shared_ptr proxy) @@ -86,7 +88,7 @@ struct GetLeaderClosure : public google::protobuf::Closure { curve::chunkserver::GetLeaderResponse2 response; }; -// ServiceHelper是client端RPC服务的一些工具 +// ServiceHelper is a tool for client-side RPC services class ServiceHelper { public: /** @@ -103,38 +105,41 @@ class ServiceHelper { CloneSourceInfo* info); /** - * 从chunkserver端获取最新的leader信息 - * @param[in]: getLeaderInfo为对应copyset的信息 - * @param[out]: leaderAddr是出参,返回当前copyset的leader信息 - * @param[out]: leaderId是出参,返回当前leader的id信息 - * @param[in]: fileMetric是用于metric的记录 - * @return: 成功返回0,否则返回-1 + * Obtain the latest leader information from the chunkserver side + * @param[in]: getLeaderInfo is the information of the corresponding copyset + * @param[out]: leaderAddr is the output parameter that returns the leader + * information of the current copyset + * @param[out]: leaderId is the output parameter, returning the ID + * information of the current leader + * @param[in]: fileMetric is a record used for metric + * @return: Successfully returns 0, otherwise returns -1 */ static int GetLeader(const GetLeaderInfo& getLeaderInfo, - PeerAddr *leaderAddr, + PeerAddr* leaderAddr, ChunkServerID* leaderId = nullptr, FileMetric* fileMetric = nullptr); /** - * 从文件名中获取user信息. - * 用户的user信息需要夹在文件名中,比如文件名为temp,用户名为user, - * 那么其完整的文件信息是:temp_user_。 - * 如果文件名为: /temp_temp_,那么完整文件名为/temp_temp__user_。 - * @param[in]: filename为用户传下来的文件名 - * @param[out]:realfilename是真正文件名 - * @param[out]: user信息,出参 - * @return: 获取到user信息为true,否则false + * Obtain user information from the file name + * The user information needs to be included in the file name, such as the + * file name being temp and the username being user, So the complete file + * information is: temp_user_. If the file name is: /temp_temp_, So the + * complete file name is /temp_temp__user_. + * @param[in]: filename is the file name passed down by the user + * @param[out]: realfilename is the true file name + * @param[out]: user information, output parameters + * @return: Obtained user information as true, otherwise false */ static bool GetUserInfoFromFilename(const std::string& fname, std::string* realfilename, std::string* user); /** - * @brief: 发送http请求,判断chunkserver是否健康 + * @brief: Send an HTTP request to determine if the chunkserver is healthy * - * @param: endPoint chunkserver的ip:port - * @param: http请求的超时时间 + * @param: endPoint chunkserver's ip:port + * @param: HTTP request timeout * - * @return: 0 表示健康,-1表示不健康 + * @return: 0 indicates health, -1 indicates unhealthy */ static int CheckChunkServerHealth(const butil::EndPoint& endPoint, int32_t requestTimeoutMs); @@ -147,6 +152,6 @@ class ServiceHelper { common::ReadWriteThrottleParams* localParams); }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SERVICE_HELPER_H_ diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..7e923cb1ea 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,53 +46,51 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: FileEpoch_t file epoch information */ - static int IO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch); + static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi, + const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during + * the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: cid is the ID information of the current chunk + * @param: data is the data to be written + * @param: offset is the offset within the current chunk + * @param: length Data length + * @param: seq is the version number of the current chunk */ - static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - const ChunkIDInfo& cid, - butil::IOBuf* data, - off_t offset, - size_t length, - uint64_t seq); + static int SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, const ChunkIDInfo& cid, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief calculates the location information of the request + * @param ioTracker io Context Information + * @param metaCache file cache information + * @param chunkIdx Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,34 +103,33 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting + * operations + * @param: iotracker Big IO Context Information + * @param: mc is the cache information that needs to be used during IO + * splitting process + * @param: targetlist The storage list of small IO after the large IO is + * split + * @param: Data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when + * searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as + * chunksize, etc + * @param: chunkidx is the index value of the current chunk in the vdisk */ - static bool AssignInternal(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - uint64_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch, - ChunkIndex chunkidx); - - static bool GetOrAllocateSegment(bool allocateIfNotExist, - uint64_t offset, - MDSClient* mdsClient, - MetaCache* metaCache, + static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, + uint64_t length, MDSClient* mdsclient, + const FInfo_t* fi, const FileEpoch_t* fEpoch, + ChunkIndex chunkidx); + + static bool GetOrAllocateSegment(bool allocateIfNotExist, uint64_t offset, + MDSClient* mdsClient, MetaCache* metaCache, const FInfo* fileInfo, - const FileEpoch_t *fEpoch, + const FileEpoch_t* fEpoch, ChunkIndex chunkidx); static int SplitForNormal(IOTracker* iotracker, MetaCache* metaCache, @@ -149,14 +146,13 @@ class Splitor { static bool MarkDiscardBitmap(IOTracker* iotracker, FileSegment* fileSegment, - SegmentIndex segmentIndex, - uint64_t offset, + SegmentIndex segmentIndex, uint64_t offset, uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SPLITOR_H_ diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..ae330b1294 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,13 +24,13 @@ namespace curve { namespace client { -UnstableState -UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, - const butil::EndPoint &csEndPoint) { +UnstableState UnstableHelper::GetCurrentUnstableState( + ChunkServerID csId, const butil::EndPoint& csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return + // chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..47c9be6a25 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -35,20 +35,17 @@ namespace curve { namespace client { -enum class UnstableState { - NoUnstable, - ChunkServerUnstable, - ServerUnstable -}; - -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +enum class UnstableState { NoUnstable, ChunkServerUnstable, ServerUnstable }; + +// If the chunkserver goes down or the network is unreachable, the rpc sent to +// the corresponding chunkserver will time out After returning, go back to the +// refresh leader and then send the request In this case, requests on different +// copysets will always first rpc timeout and then refresh the leader again To +// avoid a redundant rpc timeout Record the number of timeout requests sent to +// the same chunkserver If the threshold is exceeded, an HTTP request will be +// sent to check if the chunkserver is healthy If not healthy, notify all +// leaders of the copyset on this chunkserver Actively refresh the leader +// instead of directly sending rpc based on cached leader information class UnstableHelper { public: UnstableHelper() = default; @@ -56,9 +53,7 @@ class UnstableHelper { UnstableHelper(const UnstableHelper&) = delete; UnstableHelper& operator=(const UnstableHelper&) = delete; - void Init(const ChunkServerUnstableOption& opt) { - option_ = opt; - } + void Init(const ChunkServerUnstableOption& opt) { option_ = opt; } void IncreTimeout(ChunkServerID csId) { std::unique_lock guard(mtx_); @@ -78,10 +73,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint, ip:port address of endPoint chunkserver + * @return: true healthy/false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +87,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..f52560379a 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,31 +30,30 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, - const std::string& owner); + const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); private: - static int HMacSha256(const void* key, int key_size, - const void* data, int data_size, - void* digest); + static int HMacSha256(const void* key, int key_size, const void* data, + int data_size, void* digest); - static std::string Base64(const unsigned char *src, size_t sz); + static std::string Base64(const unsigned char* src, size_t sz); }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_AUTHENTICATOR_H_ diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..50d33181d9 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -20,20 +20,22 @@ * Author: yangyaokai */ +#include "src/common/bitmap.h" + #include #include -#include + #include -#include "src/common/bitmap.h" +#include namespace curve { namespace common { -std::string BitRangeVecToString(const std::vector &ranges) { +std::string BitRangeVecToString(const std::vector& ranges) { std::stringstream ss; for (uint32_t i = 0; i < ranges.size(); ++i) { if (i != 0) { - ss << ", "; + ss << ", "; } ss << "(" << ranges[i].beginIndex << "," << ranges[i].endIndex << ")"; } @@ -44,14 +46,14 @@ const uint32_t Bitmap::NO_POS = 0xFFFFFFFF; Bitmap::Bitmap(uint32_t bits) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memset(bitmap_, 0, count); } Bitmap::Bitmap(uint32_t bits, const char* bitmap) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -64,7 +66,7 @@ Bitmap::Bitmap(uint32_t bits, char* bitmap, bool transfer) : bits_(bits) { int count = unitCount(); if (!transfer) { - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -87,18 +89,17 @@ Bitmap::~Bitmap() { Bitmap::Bitmap(const Bitmap& bitmap) { bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); } -Bitmap& Bitmap::operator = (const Bitmap& bitmap) { - if (this == &bitmap) - return *this; +Bitmap& Bitmap::operator=(const Bitmap& bitmap) { + if (this == &bitmap) return *this; delete[] bitmap_; bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); return *this; @@ -118,23 +119,19 @@ Bitmap& Bitmap::operator=(Bitmap&& other) noexcept { return *this; } -bool Bitmap::operator == (const Bitmap& bitmap) const { - if (bits_ != bitmap.Size()) - return false; +bool Bitmap::operator==(const Bitmap& bitmap) const { + if (bits_ != bitmap.Size()) return false; return 0 == memcmp(bitmap_, bitmap.GetBitmap(), unitCount()); } -bool Bitmap::operator != (const Bitmap& bitmap) const { +bool Bitmap::operator!=(const Bitmap& bitmap) const { return !(*this == bitmap); } -void Bitmap::Set() { - memset(bitmap_, 0xff, unitCount()); -} +void Bitmap::Set() { memset(bitmap_, 0xff, unitCount()); } void Bitmap::Set(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] |= mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] |= mask(index); } void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { @@ -144,13 +141,10 @@ void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { } } -void Bitmap::Clear() { - memset(bitmap_, 0, unitCount()); -} +void Bitmap::Clear() { memset(bitmap_, 0, unitCount()); } void Bitmap::Clear(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] &= ~mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] &= ~mask(index); } void Bitmap::Clear(uint32_t startIndex, uint32_t endIndex) { @@ -169,106 +163,93 @@ bool Bitmap::Test(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t index) const { for (; index < bits_; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t index) const { for (; index < bits_; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } -void Bitmap::Divide(uint32_t startIndex, - uint32_t endIndex, +void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex - if (endIndex < startIndex) - return; + // The value of endIndex cannot be less than startIndex + if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; - if (endIndex > lastIndex) - endIndex = lastIndex; + if (endIndex > lastIndex) endIndex = lastIndex; BitRange clearRange; BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there + // is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range - setRange.endIndex = nextClearIndex == NO_POS - ? endIndex - : nextClearIndex - 1; + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range + setRange.endIndex = + nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; tmpSetRanges.push_back(setRange); } - if (nextClearIndex == NO_POS) - break; + if (nextClearIndex == NO_POS) break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear + // range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; - clearRange.endIndex = nextSetIndex == NO_POS - ? endIndex - : nextSetIndex - 1; + clearRange.endIndex = + nextSetIndex == NO_POS ? endIndex : nextSetIndex - 1; tmpClearRanges.push_back(clearRange); startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers + // in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } @@ -277,13 +258,9 @@ void Bitmap::Divide(uint32_t startIndex, } } -uint32_t Bitmap::Size() const { - return bits_; -} +uint32_t Bitmap::Size() const { return bits_; } -const char* Bitmap::GetBitmap() const { - return bitmap_; -} +const char* Bitmap::GetBitmap() const { return bitmap_; } } // namespace common } // namespace curve diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..fb6a5b67fe 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -24,191 +24,215 @@ #define SRC_COMMON_BITMAP_H_ #include -#include -#include - -namespace curve { -namespace common { - -using std::vector; -const int BITMAP_UNIT_SIZE = 8; -const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE +#include +#include -/** - * 表示bitmap中的一段连续区域,为闭区间 - */ -struct BitRange { - // 连续区域起始位置在bitmap中的索引 - uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 - uint32_t endIndex; -}; - - -std::string BitRangeVecToString(const std::vector &ranges); - -class Bitmap { - public: - /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 - */ - explicit Bitmap(uint32_t bits); - /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap - */ - explicit Bitmap(uint32_t bits, const char* bitmap); - - // Construct from a given bitmap, if transfer is false, allocate enough - // memory and copy the given bitmap, otherwise, just store the pointer - Bitmap(uint32_t bits, char* bitmap, bool transfer = false); - - ~Bitmap(); - - /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - */ - Bitmap(const Bitmap& bitmap); - /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 - */ - Bitmap& operator = (const Bitmap& bitmap); - - Bitmap(Bitmap&& other) noexcept; - Bitmap& operator=(Bitmap&& other) noexcept; - - /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false - */ - bool operator == (const Bitmap& bitmap) const; - /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false - */ - bool operator != (const Bitmap& bitmap) const; - /** - * 将所有位置1 - */ - void Set(); - /** - * 将指定位置1 - * @param index: 指定位的位置 - */ - void Set(uint32_t index); - /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 - */ - void Set(uint32_t startIndex, uint32_t endIndex); - /** - * 将所有位置0 - */ - void Clear(); - /** - * 将指定位置0 - * @param index: 指定位的位置 - */ - void Clear(uint32_t index); - /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 - */ - void Clear(uint32_t startIndex, uint32_t endIndex); - /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 - */ - bool Test(uint32_t index) const; - /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS - */ - uint32_t NextSetBit(uint32_t index) const; - /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS - */ - uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; - /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS - */ - uint32_t NextClearBit(uint32_t index) const; - /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS - */ - uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; - /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr - */ - void Divide(uint32_t startIndex, - uint32_t endIndex, - vector* clearRanges, - vector* setRanges) const; - /** - * bitmap的有效位数 - * @return: 返回位数 - */ - uint32_t Size() const; - /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 - */ - const char* GetBitmap() const; - - private: - // bitmap的字节数 - int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE - return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; - } - // 指定位置的bit在其所在字节中的偏移 - int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE - return index >> ALIGN_FACTOR; - } - // 逻辑计算掩码值 - char mask(uint32_t index) const { - int indexInUnit = index % BITMAP_UNIT_SIZE; - char mask = 0x01 << indexInUnit; - return mask; - } - - public: - // 表示不存在的位置,值为0xffffffff - static const uint32_t NO_POS; - - private: - uint32_t bits_; - char* bitmap_; -}; - -} // namespace common -} // namespace curve - -#endif // SRC_COMMON_BITMAP_H_ +namespace curve +{ + namespace common + { + + using std::vector; + + const int BITMAP_UNIT_SIZE = 8; + const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE + + /** + * Represents a continuous region in a bitmap, which is a closed interval + */ + struct BitRange + { + // Index of the starting position of a continuous region in Bitmap + uint32_t beginIndex; + // Index of the end position of a continuous region in Bitmap + uint32_t endIndex; + }; + + std::string BitRangeVecToString(const std::vector &ranges); + + class Bitmap + { + public: + /** + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap + */ + explicit Bitmap(uint32_t bits); + /** + * Constructor when initializing from an existing snapshot file + * The constructor will create a new bitmap internally, and then use the + * bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization + */ + explicit Bitmap(uint32_t bits, const char *bitmap); + + // Construct from a given bitmap, if transfer is false, allocate enough + // memory and copy the given bitmap, otherwise, just store the pointer + Bitmap(uint32_t bits, char *bitmap, bool transfer = false); + + ~Bitmap(); + + /** + * Copy construction, using deep copy + * @param bitmap: Copy content from this object + */ + Bitmap(const Bitmap &bitmap); + /** + *Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference + */ + Bitmap &operator=(const Bitmap &bitmap); + + Bitmap(Bitmap &&other) noexcept; + Bitmap &operator=(Bitmap &&other) noexcept; + + /** + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different + */ + bool operator==(const Bitmap &bitmap) const; + /** + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same + */ + bool operator!=(const Bitmap &bitmap) const; + /** + * Place all positions 1 + */ + void Set(); + /** + * Specify position 1 + * @param index: Refers to the location of the positioning + */ + void Set(uint32_t index); + /** + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position + */ + void Set(uint32_t startIndex, uint32_t endIndex); + /** + * Move all positions to 0 + */ + void Clear(); + /** + * Will specify position 0 + * @param index: Refers to the location of the positioning + */ + void Clear(uint32_t index); + /** + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position + */ + void Clear(uint32_t startIndex, uint32_t endIndex); + /** + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false + * indicates that it is 0 + */ + bool Test(uint32_t index) const; + /** + * Obtain the specified position and the position after which the first bit + * is 1 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 1. If it does not exist, + * return NO_POS + */ + uint32_t NextSetBit(uint32_t index) const; + /** + * Gets the position where the first bit between the specified start + * position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist + * within the specified range, return NO_POS + */ + uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; + /** + * Obtain the specified position and the position after which the first bit + * is 0 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 0. If it does not exist, + * return NO_POS + */ + uint32_t NextClearBit(uint32_t index) const; + /** + * Gets the position where the first bit between the specified start + * position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist + * within the specified range, return NO_POS + */ + uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; + /** + * Divide the designated area of the bitmap into several continuous areas + * based on bit states, with consistent bit states within the continuous + * areas For example, 00011100 will be divided into three regions: [0,2], + * [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit + * state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit + * state of 1, which can be specified as nullptr + */ + void Divide(uint32_t startIndex, uint32_t endIndex, + vector *clearRanges, + vector *setRanges) const; + /** + * Bitmap's significant digits + * @return: Returns the number of digits + */ + uint32_t Size() const; + /** + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap + */ + const char *GetBitmap() const; + + private: + // Bytes of bitmap + int unitCount() const + { + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; + } + // The offset of the bit at the specified position in its byte + int indexOfUnit(uint32_t index) const + { + // Same as index / BITMAP_UNIT_SIZE + return index >> ALIGN_FACTOR; + } + // Logical calculation mask value + char mask(uint32_t index) const + { + int indexInUnit = index % BITMAP_UNIT_SIZE; + char mask = 0x01 << indexInUnit; + return mask; + } + + public: + // Represents a non-existent position, with a value of 0xffffffff + static const uint32_t NO_POS; + + private: + uint32_t bits_; + char *bitmap_; + }; + + } // namespace common +} // namespace curve + +#endif // SRC_COMMON_BITMAP_H_ diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..fb549023e9 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -24,9 +24,10 @@ #define SRC_COMMON_CHANNEL_POOL_H_ #include -#include -#include + #include +#include +#include #include #include "src/common/concurrent/concurrent.h" @@ -39,18 +40,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the + * specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int GetOrInitChannel(const std::string& addr, - ChannelPtr* channelPtr); + int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); @@ -62,5 +63,4 @@ class ChannelPool { } // namespace common } // namespace curve -#endif // SRC_COMMON_CHANNEL_POOL_H_ - +#endif // SRC_COMMON_CHANNEL_POOL_H_ diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..7d8449d812 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -23,12 +23,12 @@ #ifndef SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ #define SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ +#include #include +#include //NOLINT #include -#include //NOLINT #include -#include //NOLINT -#include +#include //NOLINT #include #include "src/common/uncopyable.h" @@ -36,18 +36,17 @@ namespace curve { namespace common { -template +template class BBQItem { public: - explicit BBQItem(const T &t, bool stop = false) - : item_(t) { + explicit BBQItem(const T& t, bool stop = false) : item_(t) { stop_.store(stop, std::memory_order_release); } - BBQItem(const BBQItem &bbqItem) { + BBQItem(const BBQItem& bbqItem) { item_ = bbqItem.item_; stop_.store(bbqItem.stop_, std::memory_order_release); } - BBQItem &operator=(const BBQItem &bbqItem) { + BBQItem& operator=(const BBQItem& bbqItem) { if (&bbqItem == this) { return *this; } @@ -56,13 +55,9 @@ class BBQItem { return *this; } - bool IsStop() const { - return stop_.load(std::memory_order_acquire); - } + bool IsStop() const { return stop_.load(std::memory_order_acquire); } - T Item() { - return item_; - } + T Item() { return item_; } private: T item_; @@ -70,18 +65,13 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ -template +template class BoundedBlockingDeque : public Uncopyable { public: BoundedBlockingDeque() - : mutex_(), - notEmpty_(), - notFull_(), - deque_(), - capacity_(0) { - } + : mutex_(), notEmpty_(), notFull_(), deque_(), capacity_(0) {} int Init(const int capacity) { if (0 >= capacity) { @@ -91,7 +81,7 @@ class BoundedBlockingDeque : public Uncopyable { return 0; } - void PutBack(const T &x) { + void PutBack(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); @@ -100,7 +90,7 @@ class BoundedBlockingDeque : public Uncopyable { notEmpty_.notify_one(); } - void PutFront(const T &x) { + void PutFront(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); diff --git a/src/common/concurrent/concurrent.h b/src/common/concurrent/concurrent.h index df79ea8ec8..9d87996f2e 100644 --- a/src/common/concurrent/concurrent.h +++ b/src/common/concurrent/concurrent.h @@ -24,39 +24,38 @@ #define SRC_COMMON_CONCURRENT_CONCURRENT_H_ #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/rw_lock.h" - -#include "src/common/concurrent/thread_pool.h" +#include "src/common/concurrent/spinlock.h" #include "src/common/concurrent/task_queue.h" #include "src/common/concurrent/task_thread_pool.h" +#include "src/common/concurrent/thread_pool.h" namespace curve { namespace common { -// curve公共组件命名空间替换 -template -using Atomic = std::atomic; -using Mutex = std::mutex; -using Thread = std::thread; -using LockGuard = std::lock_guard; -using UniqueLock = std::unique_lock; -using ConditionVariable = std::condition_variable; - -// curve内部定义的锁组件 -using RWLock = RWLock; -using SpinLock = SpinLock; -using ReadLockGuard = ReadLockGuard; -using WriteLockGuard = WriteLockGuard; - -// curve内部定义的线程组件 -using TaskQueue = TaskQueue; -using ThreadPool = ThreadPool; - -} // namespace common -} // namespace curve +// curve public component namespace replacement +template +using Atomic = std::atomic; +using Mutex = std::mutex; +using Thread = std::thread; +using LockGuard = std::lock_guard; +using UniqueLock = std::unique_lock; +using ConditionVariable = std::condition_variable; + +// Lock components defined internally in curve +using RWLock = RWLock; +using SpinLock = SpinLock; +using ReadLockGuard = ReadLockGuard; +using WriteLockGuard = WriteLockGuard; + +// Thread components defined internally in curve +using TaskQueue = TaskQueue; +using ThreadPool = ThreadPool; + +} // namespace common +} // namespace curve #endif // SRC_COMMON_CONCURRENT_CONCURRENT_H_ diff --git a/src/common/concurrent/count_down_event.h b/src/common/concurrent/count_down_event.h index bfce259351..404fc32681 100644 --- a/src/common/concurrent/count_down_event.h +++ b/src/common/concurrent/count_down_event.h @@ -23,36 +23,30 @@ #ifndef SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ #define SRC_COMMON_CONCURRENT_COUNT_DOWN_EVENT_H_ -#include //NOLINT -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include //NOLINT +#include //NOLINT namespace curve { namespace common { /** - * 用于线程间同步,CountDownEvent是通过一个计数器来实现的,计数器的 - * 初始值initCnt为需要等待event的总数,通过接口Wait等待。每当一个 - * event发生,就会调用Signal接口,让计数器的值就会减 1。当计数器值到 - * 达0时,则Wait等待就会结束。一般用于等待一些事件发生 + * Used for inter-thread synchronization, CountDownEvent is implemented using a + * counter with an initial value (initCnt) representing the total number of + * events to wait for. Threads can wait for events using the Wait interface. + * Each time an event occurs, the Signal interface is called, decrementing the + * counter by 1. When the counter reaches 0, the waiting in Wait will conclude. + * It is typically used to wait for certain events to occur. */ class CountDownEvent { public: - CountDownEvent() : - mutex_(), - cond_(), - count_() { - } + CountDownEvent() : mutex_(), cond_(), count_() {} - explicit CountDownEvent(int initCnt) : - mutex_(), - cond_(), - count_(initCnt) { - } + explicit CountDownEvent(int initCnt) : mutex_(), cond_(), count_(initCnt) {} /** - * 重新设置event计数 - * @param eventCount:事件计数 + * Reset event count + * @param eventCount: Event Count */ void Reset(int eventCount) { std::unique_lock guard(mutex_); @@ -60,7 +54,7 @@ class CountDownEvent { } /** - * 通知wait event发生了一次,计数减1 + * Notify that a wait event has occurred once, count minus 1 */ void Signal() { std::unique_lock guard(mutex_); @@ -71,7 +65,7 @@ class CountDownEvent { } /** - * 等待initCnt的event发生之后,再唤醒 + * Wait for the event of initCnt to occur before waking up */ void Wait() { std::unique_lock guard(mutex_); @@ -81,9 +75,9 @@ class CountDownEvent { } /** - * 等待initCnt的event发生,或者指定时长 - * @param waitMs: 等待的ms数 - * @return:如果所有等待的event都发生,那么就返回true,否则false + * Wait for the event of initCnt to occur, or specify a duration + * @param waitMs: Number of ms waiting + * @return: If all waiting events occur, then return true; otherwise, false */ bool WaitFor(int waitMs) { std::unique_lock guard(mutex_); @@ -92,11 +86,11 @@ class CountDownEvent { while (count_ > 0) { auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration elapsed = now - start; - // 计算还剩余多少时间 + // Calculate how much time is left int leftMs = waitMs - static_cast(elapsed.count()); if (leftMs > 0) { - auto ret = cond_.wait_for(guard, - std::chrono::milliseconds(leftMs)); + auto ret = + cond_.wait_for(guard, std::chrono::milliseconds(leftMs)); (void)ret; } else { break; @@ -113,7 +107,7 @@ class CountDownEvent { private: mutable std::mutex mutex_; std::condition_variable cond_; - // 需要等待的事件计数 + // Count of events to wait for int count_; }; diff --git a/src/common/concurrent/task_thread_pool.h b/src/common/concurrent/task_thread_pool.h index b9b23eebe3..cfd9524024 100644 --- a/src/common/concurrent/task_thread_pool.h +++ b/src/common/concurrent/task_thread_pool.h @@ -23,27 +23,26 @@ #ifndef SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ #define SRC_COMMON_CONCURRENT_TASK_THREAD_POOL_H_ -#include -#include //NOLINT -#include -#include -#include //NOLINT #include -#include //NOLINT #include +#include //NOLINT +#include +#include #include #include +#include //NOLINT +#include //NOLINT #include +#include #include "src/common/uncopyable.h" namespace curve { namespace common { - using Task = std::function; -// 异步运行回调的线程池 +// Thread pool for asynchronously running callbacks template class TaskThreadPool : public Uncopyable { @@ -58,9 +57,10 @@ class TaskThreadPool : public Uncopyable { } /** - * 启动一个线程池 - * @param numThreads 线程池的线程数量,必须大于 0,不设置就是 INT_MAX (不推荐) - * @param queueCapacity queue 的容量,必须大于 0 + * Start a thread pool + * @param numThreads The number of threads in the thread pool must be + * greater than 0, otherwise it is INT_ MAX (not recommended) + * @param queueCapacity The capacity of queue must be greater than 0 * @return */ int Start(int numThreads, int queueCapacity = INT_MAX) { @@ -86,7 +86,7 @@ class TaskThreadPool : public Uncopyable { } /** - * 关闭线程池 + * Close Thread Pool */ void Stop() { if (running_.exchange(false, std::memory_order_acq_rel)) { @@ -101,10 +101,12 @@ class TaskThreadPool : public Uncopyable { } /** - * push 一个 task 给线程池处理,如果队列满,线程阻塞,直到 task push 进去 - * 需要注意的是用户自己需要保证 task 的有效的。除此之外,此 TaskThreadPool - * 并没有提供获取 f 的返回值,所以如果需要获取运行 f 的一些额外信息,需要用户 - * 自己在 f 内部逻辑添加 + * Push a task to the thread pool for processing. If the queue is full, the + * thread will block until the task is pushed in It should be noted that + * users themselves need to ensure the effectiveness of the task. In + * addition, this TaskThreadPool There is no provision for obtaining the + * return value of f, so if you need to obtain some additional information + * about running f, you need the user to Add your own internal logic to f * @tparam F * @tparam Args * @param f @@ -121,40 +123,39 @@ class TaskThreadPool : public Uncopyable { notEmpty_.notify_one(); } - /* 返回线程池 queue 的容量 */ - int QueueCapacity() const { - return capacity_; - } + /*Returns the capacity of the thread pool queue*/ + int QueueCapacity() const { return capacity_; } - /* 返回线程池当前 queue 中的 task 数量,线程安全 */ + /*Returns the number of tasks in the current queue of the thread pool, + * thread safe*/ int QueueSize() const { std::lock_guard guard(mutex_); return queue_.size(); } - /* 返回线程池的线程数 */ - int ThreadOfNums() const { - return threads_.size(); - } + /*Returns the number of threads in the thread pool*/ + int ThreadOfNums() const { return threads_.size(); } protected: - /*线程工作时执行的函数*/ + /*Functions executed during thread work*/ virtual void ThreadFunc() { while (running_.load(std::memory_order_acquire)) { Task task(Take()); - /* ThreadPool 退出的时候,queue 为空,那么会返回无效的 task */ + /*When ThreadPool exits, if the queue is empty, an invalid task will + * be returned*/ if (task) { task(); } } } - /* 判断线程池 queue 是否已经满了, 非线程安全,私有内部使用 */ + /*Determine if the thread pool queue is full, non thread safe, private + * internal use*/ bool IsFullUnlock() const { return queue_.size() >= static_cast(capacity_); } - /* 从线程池的 queue 中取一个 task 线程安全 */ + /*Taking a task from the queue in the thread pool is thread safe*/ Task Take() { std::unique_lock guard(mutex_); while (queue_.empty() && running_.load(std::memory_order_acquire)) { @@ -170,13 +171,13 @@ class TaskThreadPool : public Uncopyable { } protected: - mutable MutexT mutex_; + mutable MutexT mutex_; CondVarT notEmpty_; CondVarT notFull_; std::vector> threads_; - std::deque queue_; - int capacity_; - std::atomic running_; + std::deque queue_; + int capacity_; + std::atomic running_; }; } // namespace common diff --git a/src/common/configuration.cpp b/src/common/configuration.cpp index a35db6d357..28d699240f 100644 --- a/src/common/configuration.cpp +++ b/src/common/configuration.cpp @@ -53,8 +53,8 @@ bool Configuration::LoadConfig() { SetValue(key, value); } } else { - LOG(ERROR) << "Open config file '" << confFile_ << "' failed: " - << strerror(errno); + LOG(ERROR) << "Open config file '" << confFile_ + << "' failed: " << strerror(errno); return false; } @@ -62,8 +62,10 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other + // contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format + // without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -78,14 +80,13 @@ bool Configuration::SaveConfig() { void Configuration::PrintConfig() { LOG(INFO) << std::string(30, '=') << "BEGIN" << std::string(30, '='); - for (auto &item : config_) { + for (auto& item : config_) { LOG(INFO) << item.first << std::string(60 - item.first.size(), ' ') << ": " << item.second; } LOG(INFO) << std::string(31, '=') << "END" << std::string(31, '='); } - void Configuration::ExposeMetric(const std::string& exposeName) { if (!exposeName_.empty()) { LOG(WARNING) << "Config metric has been exposed."; @@ -98,20 +99,20 @@ void Configuration::ExposeMetric(const std::string& exposeName) { } } -void Configuration::UpdateMetricIfExposed(const std::string &key, - const std::string &value) { +void Configuration::UpdateMetricIfExposed(const std::string& key, + const std::string& value) { if (exposeName_.empty()) { return; } auto it = configMetric_.find(key); - // 如果配置项不存在,则新建配置项 + // If the configuration item does not exist, create a new configuration item if (it == configMetric_.end()) { ConfigItemPtr configItem = std::make_shared(); configItem->ExposeAs(exposeName_, key); configMetric_[key] = configItem; } - // 更新配置项 + // Update Configuration Items configMetric_[key]->Set("conf_name", key); configMetric_[key]->Set("conf_value", value); configMetric_[key]->Update(); @@ -121,33 +122,29 @@ std::map Configuration::ListConfig() const { return config_; } -void Configuration::SetConfigPath(const std::string &path) { - confFile_ = path; -} +void Configuration::SetConfigPath(const std::string& path) { confFile_ = path; } -std::string Configuration::GetConfigPath() { - return confFile_; -} +std::string Configuration::GetConfigPath() { return confFile_; } -std::string Configuration::GetStringValue(const std::string &key) { +std::string Configuration::GetStringValue(const std::string& key) { return GetValue(key); } -bool Configuration::GetStringValue(const std::string &key, std::string *out) { +bool Configuration::GetStringValue(const std::string& key, std::string* out) { return GetValue(key, out); } -void Configuration::SetStringValue(const std::string &key, - const std::string &value) { +void Configuration::SetStringValue(const std::string& key, + const std::string& value) { SetValue(key, value); } -int Configuration::GetIntValue(const std::string &key, uint64_t defaultvalue) { +int Configuration::GetIntValue(const std::string& key, uint64_t defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stoi(value); } -bool Configuration::GetIntValue(const std::string &key, int *out) { +bool Configuration::GetIntValue(const std::string& key, int* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoi(res); @@ -156,7 +153,7 @@ bool Configuration::GetIntValue(const std::string &key, int *out) { return false; } -bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { +bool Configuration::GetUInt32Value(const std::string& key, uint32_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoul(res); @@ -165,7 +162,7 @@ bool Configuration::GetUInt32Value(const std::string &key, uint32_t *out) { return false; } -bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { +bool Configuration::GetUInt64Value(const std::string& key, uint64_t* out) { std::string res; if (GetValue(key, &res)) { *out = std::stoull(res); @@ -174,17 +171,16 @@ bool Configuration::GetUInt64Value(const std::string &key, uint64_t *out) { return false; } - -void Configuration::SetIntValue(const std::string &key, const int value) { +void Configuration::SetIntValue(const std::string& key, const int value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt64Value( - const std::string &key, const uint64_t value) { +void Configuration::SetUInt64Value(const std::string& key, + const uint64_t value) { SetValue(key, std::to_string(value)); } -void Configuration::SetUInt32Value(const std::string &key, +void Configuration::SetUInt32Value(const std::string& key, const uint32_t value) { SetValue(key, std::to_string(value)); } @@ -203,14 +199,13 @@ void Configuration::SetInt64Value(const std::string& key, const int64_t value) { SetValue(key, std::to_string(value)); } -double Configuration::GetDoubleValue( - const std::string &key, - double defaultvalue) { +double Configuration::GetDoubleValue(const std::string& key, + double defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stod(value); } -bool Configuration::GetDoubleValue(const std::string &key, double *out) { +bool Configuration::GetDoubleValue(const std::string& key, double* out) { std::string res; if (GetValue(key, &res)) { *out = std::stod(res); @@ -219,18 +214,17 @@ bool Configuration::GetDoubleValue(const std::string &key, double *out) { return false; } -void Configuration::SetDoubleValue(const std::string &key, const double value) { +void Configuration::SetDoubleValue(const std::string& key, const double value) { SetValue(key, std::to_string(value)); } - -double Configuration::GetFloatValue( - const std::string &key, float defaultvalue) { +double Configuration::GetFloatValue(const std::string& key, + float defaultvalue) { std::string value = GetValue(key); return (value == "") ? defaultvalue : std::stof(value); } -bool Configuration::GetFloatValue(const std::string &key, float *out) { +bool Configuration::GetFloatValue(const std::string& key, float* out) { std::string res; if (GetValue(key, &res)) { *out = std::stof(res); @@ -239,11 +233,11 @@ bool Configuration::GetFloatValue(const std::string &key, float *out) { return false; } -void Configuration::SetFloatValue(const std::string &key, const float value) { +void Configuration::SetFloatValue(const std::string& key, const float value) { SetValue(key, std::to_string(value)); } -bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { +bool Configuration::GetBoolValue(const std::string& key, bool defaultvalue) { std::string svalue = config_[key]; transform(svalue.begin(), svalue.end(), svalue.begin(), ::tolower); @@ -253,7 +247,7 @@ bool Configuration::GetBoolValue(const std::string &key, bool defaultvalue) { return ret; } -bool Configuration::GetBoolValue(const std::string &key, bool *out) { +bool Configuration::GetBoolValue(const std::string& key, bool* out) { std::string res; if (GetValue(key, &res)) { transform(res.begin(), res.end(), res.begin(), ::tolower); @@ -273,16 +267,15 @@ bool Configuration::GetBoolValue(const std::string &key, bool *out) { return false; } - -void Configuration::SetBoolValue(const std::string &key, const bool value) { +void Configuration::SetBoolValue(const std::string& key, const bool value) { SetValue(key, std::to_string(value)); } -std::string Configuration::GetValue(const std::string &key) { +std::string Configuration::GetValue(const std::string& key) { return config_[key]; } -bool Configuration::GetValue(const std::string &key, std::string *out) { +bool Configuration::GetValue(const std::string& key, std::string* out) { if (config_.find(key) != config_.end()) { *out = config_[key]; return true; @@ -291,51 +284,47 @@ bool Configuration::GetValue(const std::string &key, std::string *out) { return false; } -void Configuration::SetValue(const std::string &key, const std::string &value) { +void Configuration::SetValue(const std::string& key, const std::string& value) { config_[key] = value; UpdateMetricIfExposed(key, value); } -void Configuration::GetValueFatalIfFail(const std::string& key, - int* value) { - LOG_IF(FATAL, !GetIntValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, int* value) { + LOG_IF(FATAL, !GetIntValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, std::string* value) { - LOG_IF(FATAL, !GetStringValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetStringValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - bool* value) { - LOG_IF(FATAL, !GetBoolValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, bool* value) { + LOG_IF(FATAL, !GetBoolValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint32_t* value) { - LOG_IF(FATAL, !GetUInt32Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt32Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } void Configuration::GetValueFatalIfFail(const std::string& key, uint64_t* value) { - LOG_IF(FATAL, !GetUInt64Value(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; + LOG_IF(FATAL, !GetUInt64Value(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - float* value) { - LOG_IF(FATAL, !GetFloatValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, float* value) { + LOG_IF(FATAL, !GetFloatValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } -void Configuration::GetValueFatalIfFail(const std::string& key, - double* value) { - LOG_IF(FATAL, !GetDoubleValue(key, value)) << "Get " << key - << " from " << confFile_ << " fail"; +void Configuration::GetValueFatalIfFail(const std::string& key, double* value) { + LOG_IF(FATAL, !GetDoubleValue(key, value)) + << "Get " << key << " from " << confFile_ << " fail"; } } // namespace common diff --git a/src/common/configuration.h b/src/common/configuration.h index d546995ade..e3a5144e61 100644 --- a/src/common/configuration.h +++ b/src/common/configuration.h @@ -22,9 +22,10 @@ */ #include -#include + #include #include +#include #include #include "src/common/stringstatus.h" @@ -36,7 +37,7 @@ namespace curve { namespace common { using ConfigItemPtr = std::shared_ptr; -using ConfigMetricMap = std::unordered_map; +using ConfigMetricMap = std::unordered_map; class Configuration { public: @@ -45,94 +46,96 @@ class Configuration { void PrintConfig(); std::map ListConfig() const; /** - * 暴露config的metric供采集 - * 如果metric已经暴露,则直接返回 - * @param exposeName: 对外暴露的metric的名字 + * Expose the metric of config for collection + * If the metric has already been exposed, return it directly + * @param exposeName: The name of the exposed metric */ void ExposeMetric(const std::string& exposeName); - void SetConfigPath(const std::string &path); + void SetConfigPath(const std::string& path); std::string GetConfigPath(); - std::string GetStringValue(const std::string &key); + std::string GetStringValue(const std::string& key); /* - * @brief GetStringValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetStringValue(const std::string &key, std::string *out); - void SetStringValue(const std::string &key, const std::string &value); - - int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); + * @brief GetStringValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetStringValue(const std::string& key, std::string* out); + void SetStringValue(const std::string& key, const std::string& value); + + int GetIntValue(const std::string& key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetIntValue(const std::string &key, int *out); - bool GetUInt32Value(const std::string &key, uint32_t *out); - bool GetUInt64Value(const std::string &key, uint64_t *out); - void SetIntValue(const std::string &key, const int value); - void SetUInt32Value(const std::string &key, const uint32_t value); - void SetUInt64Value(const std::string &key, const uint64_t value); + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the + * specified configuration item//NOLINT + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetIntValue(const std::string& key, int* out); + bool GetUInt32Value(const std::string& key, uint32_t* out); + bool GetUInt64Value(const std::string& key, uint64_t* out); + void SetIntValue(const std::string& key, const int value); + void SetUInt32Value(const std::string& key, const uint32_t value); + void SetUInt64Value(const std::string& key, const uint64_t value); bool GetInt64Value(const std::string& key, int64_t* out); void SetInt64Value(const std::string& key, const int64_t value); - double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); + double GetDoubleValue(const std::string& key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetDoubleValue(const std::string &key, double *out); - void SetDoubleValue(const std::string &key, const double value); - - double GetFloatValue(const std::string &key, float defaultvalue = 0.0); + * @brief GetDoubleValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetDoubleValue(const std::string& key, double* out); + void SetDoubleValue(const std::string& key, const double value); + + double GetFloatValue(const std::string& key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetFloatValue(const std::string &key, float *out); - void SetFloatValue(const std::string &key, const float value); - - bool GetBoolValue(const std::string &key, bool defaultvalue = false); + * @brief GetFloatValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetFloatValue(const std::string& key, float* out); + void SetFloatValue(const std::string& key, const float value); + + bool GetBoolValue(const std::string& key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 - * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 - * - * @return false-未获取到 true-获取成功 - */ - bool GetBoolValue(const std::string &key, bool *out); - void SetBoolValue(const std::string &key, const bool value); - - std::string GetValue(const std::string &key); - bool GetValue(const std::string &key, std::string *out); - void SetValue(const std::string &key, const std::string &value); + * @brief GetBoolValue Get the value of the specified configuration item + * + * @param[in] key configuration item name + * @param[out] out The value obtained + * + * @return false-did not obtain, true-obtained successfully + */ + bool GetBoolValue(const std::string& key, bool* out); + void SetBoolValue(const std::string& key, const bool value); + + std::string GetValue(const std::string& key); + bool GetValue(const std::string& key, std::string* out); + void SetValue(const std::string& key, const std::string& value); /* - * @brief GetValueFatalIfFail 获取指定配置项的值,失败打FATAL日志 - * - * @param[in] key 配置项名称 - * @param[out] value 获取的值 - * - * @return 无 - */ + * @brief GetValueFatalIfFail to obtain the value of the specified + * configuration item, failed to log FATAL + * + * @param[in] key configuration item name + * @param[out] value The value obtained + * + * @return None + */ void GetValueFatalIfFail(const std::string& key, int* value); void GetValueFatalIfFail(const std::string& key, std::string* value); void GetValueFatalIfFail(const std::string& key, bool* value); @@ -141,11 +144,11 @@ class Configuration { void GetValueFatalIfFail(const std::string& key, float* value); void GetValueFatalIfFail(const std::string& key, double* value); - bool GetValue(const std::string &key, int *value) { + bool GetValue(const std::string& key, int* value) { return GetIntValue(key, value); } - bool GetValue(const std::string &key, uint32_t *value) { + bool GetValue(const std::string& key, uint32_t* value) { return GetUInt32Value(key, value); } @@ -171,19 +174,19 @@ class Configuration { private: /** - * 更新新的配置到metric - * @param 要更新的metric + *Update new configuration to metric + * @param The metric to update */ - void UpdateMetricIfExposed(const std::string &key, - const std::string &value); + void UpdateMetricIfExposed(const std::string& key, + const std::string& value); private: - std::string confFile_; - std::map config_; - // metric对外暴露的名字 - std::string exposeName_; - // 每一个配置项使用单独的一个metric,用map管理 - ConfigMetricMap configMetric_; + std::string confFile_; + std::map config_; + // Metric's exposed name + std::string exposeName_; + // Each configuration item uses a separate metric and is managed using a map + ConfigMetricMap configMetric_; }; } // namespace common diff --git a/src/common/crc32.h b/src/common/crc32.h index 99916fe873..7df16e6654 100644 --- a/src/common/crc32.h +++ b/src/common/crc32.h @@ -23,34 +23,36 @@ #ifndef SRC_COMMON_CRC32_H_ #define SRC_COMMON_CRC32_H_ +#include #include #include -#include - namespace curve { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(const char *pData, size_t iLen) { +inline uint32_t CRC32(const char* pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: - * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on + * the crc32 library of brpc This function supports inheritance Calculate to + * support the calculation of a single CRC checksum for SGL type data. Meet the + * following constraints: CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), + * "world", 5) + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ -inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { +inline uint32_t CRC32(uint32_t crc, const char* pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); } diff --git a/src/common/curve_define.h b/src/common/curve_define.h index 04d07ad5ec..1bea28e298 100644 --- a/src/common/curve_define.h +++ b/src/common/curve_define.h @@ -28,34 +28,35 @@ #include #ifndef DLOG_EVERY_SECOND -#define DLOG_EVERY_SECOND(severity) \ +#define DLOG_EVERY_SECOND(severity) \ BAIDU_LOG_IF_EVERY_SECOND_IMPL(DLOG_IF, severity, true) #endif namespace curve { namespace common { -// curve系统中共用的定义,对于各模块自己独有的放在各模块自己的define中 -using ChunkID = uint64_t; -using CopysetID = uint32_t; -using ChunkIndex = uint32_t; -using LogicPoolID = uint32_t; -using ChunkServerID = uint32_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +// The definition shared in the curve system is unique to each module and placed +// in its own definition +using ChunkID = uint64_t; +using CopysetID = uint32_t; +using ChunkIndex = uint32_t; +using LogicPoolID = uint32_t; +using ChunkServerID = uint32_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; -using FileSeqType = uint64_t; -using PageSizeType = uint32_t; -using ChunkSizeType = uint32_t; -using SegmentSizeType = uint32_t; +using FileSeqType = uint64_t; +using PageSizeType = uint32_t; +using ChunkSizeType = uint32_t; +using SegmentSizeType = uint32_t; -using Status = butil::Status; -using EndPoint = butil::EndPoint; +using Status = butil::Status; +using EndPoint = butil::EndPoint; -const uint32_t kKB = 1024; -const uint32_t kMB = 1024*kKB; -const uint32_t kGB = 1024*kMB; +const uint32_t kKB = 1024; +const uint32_t kMB = 1024 * kKB; +const uint32_t kGB = 1024 * kMB; -// maigic number用于FilePool_meta file计算crc +// maigic number for FilePool_meta file calculation of crc const char kFilePoolMagic[3] = "01"; constexpr uint32_t kDefaultBlockSize = 4096; diff --git a/src/common/define.h b/src/common/define.h index e3f90d7bd0..6001e48120 100644 --- a/src/common/define.h +++ b/src/common/define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_DEFINE_H_ #define SRC_COMMON_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -67,72 +67,67 @@ extern const char* kTotalCountStr; extern const char* kSnapshotsStr; extern const char* kTaskInfosStr; - typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -144,8 +139,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 3e591fd5ca..6b23b9558c 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -24,16 +24,18 @@ #define SRC_COMMON_FS_UTIL_H_ #include + #include #include + #include "src/common/string_util.h" namespace curve { namespace common { -// 计算path2相对于path1的相对路径 -inline std::string CalcRelativePath(const std::string &path1, - const std::string &path2) { +// Calculate the relative path of path2 relative to path1 +inline std::string CalcRelativePath(const std::string& path1, + const std::string& path2) { if (path1.empty() || path2.empty()) { return ""; } @@ -66,7 +68,7 @@ inline std::string CalcRelativePath(const std::string &path1, } // Check whether the path2 is the subpath of path1 -inline bool IsSubPath(const std::string &path1, const std::string &path2) { +inline bool IsSubPath(const std::string& path1, const std::string& path2) { return StringStartWith(CalcRelativePath(path1, path2), "./"); } diff --git a/src/common/interruptible_sleeper.h b/src/common/interruptible_sleeper.h index 73c2cba645..7f0f641674 100644 --- a/src/common/interruptible_sleeper.h +++ b/src/common/interruptible_sleeper.h @@ -24,32 +24,35 @@ #define SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ #include // NOLINT + #include "src/common/concurrent/concurrent.h" namespace curve { namespace common { /** - * InterruptibleSleeper 实现可 interruptible 的 sleep 功能. - * 正常情况下 wait_for 超时, 接收到退出信号之后, 程序会立即被唤醒, - * 退出 while 循环, 并执行 cleanup 代码. + * Implement interruptible sleep functionality with InterruptibleSleeper. + * Under normal circumstances, when wait_for times out and receives an exit + * signal, the program will be immediately awakened, exit the while loop, and + * execute cleanup code. */ class InterruptibleSleeper { public: /** - * @brief wait_for 等待指定时间,如果接受到退出信号立刻返回 + * @brief wait_for Wait for the specified time, and immediately return if an + * exit signal is received * - * @param[in] time 指定wait时长 + * @param[in] time specifies the wait duration * - * @return false-收到退出信号 true-超时后退出 + * @return false - Received exit signal true - Exit after timeout */ - template + template bool wait_for(std::chrono::duration const& time) { UniqueLock lock(m); - return !cv.wait_for(lock, time, [&]{return terminate;}); + return !cv.wait_for(lock, time, [&] { return terminate; }); } /** - * @brief interrupt 给当前wait发送退出信号 + * @brief interrupt Send an exit signal to the current wait */ void interrupt() { UniqueLock lock(m); @@ -72,4 +75,3 @@ class InterruptibleSleeper { } // namespace curve #endif // SRC_COMMON_INTERRUPTIBLE_SLEEPER_H_ - diff --git a/src/common/location_operator.cpp b/src/common/location_operator.cpp index f9d5a8f4c8..3571f4e040 100644 --- a/src/common/location_operator.cpp +++ b/src/common/location_operator.cpp @@ -32,22 +32,21 @@ std::string LocationOperator::GenerateS3Location( return location; } -std::string LocationOperator::GenerateCurveLocation( - const std::string& fileName, off_t offset) { +std::string LocationOperator::GenerateCurveLocation(const std::string& fileName, + off_t offset) { std::string location(fileName); location.append(kOriginPathSeprator) - .append(std::to_string(offset)) - .append(kOriginTypeSeprator) - .append(CURVE_TYPE); + .append(std::to_string(offset)) + .append(kOriginTypeSeprator) + .append(CURVE_TYPE); return location; } -OriginType LocationOperator::ParseLocation( - const std::string& location, std::string* originPath) { - // 找到最后一个“@”,不能简单用SplitString - // 因为不能保证OriginPath中不包含“@” - std::string::size_type pos = - location.find_last_of(kOriginTypeSeprator); +OriginType LocationOperator::ParseLocation(const std::string& location, + std::string* originPath) { + // Found the last '@', cannot simply use SplitString + // Because it cannot be guaranteed that OriginPath does not contain '@' + std::string::size_type pos = location.find_last_of(kOriginTypeSeprator); if (std::string::npos == pos) { return OriginType::InvalidOrigin; } @@ -67,18 +66,17 @@ OriginType LocationOperator::ParseLocation( return type; } -bool LocationOperator::ParseCurveChunkPath( - const std::string& originPath, std::string* fileName, off_t* offset) { - std::string::size_type pos = - originPath.find_last_of(kOriginPathSeprator); +bool LocationOperator::ParseCurveChunkPath(const std::string& originPath, + std::string* fileName, + off_t* offset) { + std::string::size_type pos = originPath.find_last_of(kOriginPathSeprator); if (std::string::npos == pos) { return false; } std::string file = originPath.substr(0, pos); std::string offStr = originPath.substr(pos + 1); - if (file.empty() || offStr.empty()) - return false; + if (file.empty() || offStr.empty()) return false; if (fileName != nullptr) { *fileName = file; diff --git a/src/common/location_operator.h b/src/common/location_operator.h index a86b33d158..2669beb4c3 100644 --- a/src/common/location_operator.h +++ b/src/common/location_operator.h @@ -43,43 +43,45 @@ enum class OriginType { class LocationOperator { public: /** - * 生成s3的location - * location格式:${objectname}@s3 - * @param objectName:s3上object的名称 - * @return:生成的location + * Generate location for s3 + * location format: ${objectname}@s3 + * @param objectName: The name of the object on s3 + * @return: Generated location */ static std::string GenerateS3Location(const std::string& objectName); /** - * 生成curve的location - * location格式:${filename}:${offset}@cs + * Generate the location of the curve + * location format: ${filename}:${offset}@cs */ static std::string GenerateCurveLocation(const std::string& fileName, off_t offset); /** - * 解析数据源的位置信息 - * location格式: - * s3示例:${objectname}@s3 - * curve示例:${filename}:${offset}@cs + * Parsing the location information of data sources + * location format: + * example of s3: ${objectname}@s3 + * curve example: ${filename}:${offset}@cs * - * @param location[in]:数据源的位置,其格式为originPath@originType - * @param originPath[out]:表示数据源在源端的路径 - * @return:返回OriginType,表示源数据的源端类型是s3还是curve - * 如果路径格式不正确或者originType无法识别,则返回InvalidOrigin + * @param location[in]: The location of the data source, in the format + * originPath@originType + * @param originPath[out]: represents the path of the data source on the + * source side + * @return: Returns OriginType, indicating whether the source side type of + * the source data is s3 or curve If the path format is incorrect or the + * originType is not recognized, InvalidOrigin is returned */ static OriginType ParseLocation(const std::string& location, std::string* originPath); /** - * 解析curvefs的originPath - * 格式:${filename}:${offset} - * @param originPath[in]:数据源在curvefs上的路径 - * @param fileName[out]:数据源所属文件名 - * @param offset[out]:数据源在文件中的偏移 - * @return: 解析成功返回true,失败返回false + * Parsing the originPath of curves + * Format: ${filename}:${offset} + * @param originPath[in]: The path of the data source on curves + * @param fileName[out]: The file name to which the data source belongs + * @param offset[out]: The offset of the data source in the file + * @return: Successful parsing returns true, while failure returns false */ static bool ParseCurveChunkPath(const std::string& originPath, - std::string* fileName, - off_t* offset); + std::string* fileName, off_t* offset); }; } // namespace common diff --git a/src/common/net_common.h b/src/common/net_common.h index 8bf058e134..c31cb7b770 100644 --- a/src/common/net_common.h +++ b/src/common/net_common.h @@ -23,27 +23,27 @@ #ifndef SRC_COMMON_NET_COMMON_H_ #define SRC_COMMON_NET_COMMON_H_ -#include -#include -#include // in_addr -#include // inet_pton, inet_ntop +#include // inet_pton, inet_ntop #include +#include +#include // in_addr +#include + #include namespace curve { namespace common { class NetCommon { public: - // addr形式为"ip:port" + // The form of addr is "ip:port" static bool CheckAddressValid(const std::string& addr) { std::string ip; uint32_t port; return SplitAddrToIpPort(addr, &ip, &port); } - // addr形式为"ip:port" - static bool SplitAddrToIpPort(const std::string& addr, - std::string* ipstr, + // The form of addr is "ip:port" + static bool SplitAddrToIpPort(const std::string& addr, std::string* ipstr, uint32_t* port) { size_t splitpos = addr.find(":"); if (splitpos == std::string::npos) { @@ -91,7 +91,7 @@ class NetCommon { return true; } }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_NET_COMMON_H_ diff --git a/src/common/s3_adapter.cpp b/src/common/s3_adapter.cpp index e3b3d917a0..8f8b911405 100644 --- a/src/common/s3_adapter.cpp +++ b/src/common/s3_adapter.cpp @@ -35,655 +35,735 @@ #define AWS_ALLOCATE_TAG __FILE__ ":" STRINGIFY(__LINE__) -namespace curve { -namespace common { - -std::once_flag S3INIT_FLAG; -std::once_flag S3SHUTDOWN_FLAG; -Aws::SDKOptions AWS_SDK_OPTIONS; - -namespace { - -// https://github.com/aws/aws-sdk-cpp/issues/1430 -class PreallocatedIOStream : public Aws::IOStream { - public: - PreallocatedIOStream(char *buf, size_t size) - : Aws::IOStream(new Aws::Utils::Stream::PreallocatedStreamBuf( - reinterpret_cast(buf), size)) {} - - PreallocatedIOStream(const char *buf, size_t size) - : PreallocatedIOStream(const_cast(buf), size) {} - - ~PreallocatedIOStream() { - // corresponding new in constructor - delete rdbuf(); - } -}; - -Aws::String GetObjectRequestRange(uint64_t offset, uint64_t len) { - auto range = - "bytes=" + std::to_string(offset) + "-" + std::to_string(offset + len); - return {range.data(), range.size()}; -} - -} // namespace - -void InitS3AdaptorOption(Configuration* conf, S3AdapterOption* s3Opt) { - InitS3AdaptorOptionExceptS3InfoOption(conf, s3Opt); - LOG_IF(FATAL, !conf->GetStringValue("s3.endpoint", &s3Opt->s3Address)); - LOG_IF(FATAL, !conf->GetStringValue("s3.ak", &s3Opt->ak)); - LOG_IF(FATAL, !conf->GetStringValue("s3.sk", &s3Opt->sk)); - LOG_IF(FATAL, !conf->GetStringValue("s3.bucket_name", &s3Opt->bucketName)); -} - -void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, - S3AdapterOption* s3Opt) { - LOG_IF(FATAL, !conf->GetIntValue("s3.logLevel", &s3Opt->loglevel)); - LOG_IF(FATAL, !conf->GetStringValue("s3.logPrefix", &s3Opt->logPrefix)); - LOG_IF(FATAL, !conf->GetIntValue("s3.http_scheme", &s3Opt->scheme)); - LOG_IF(FATAL, !conf->GetBoolValue("s3.verify_SSL", &s3Opt->verifySsl)); - LOG_IF(FATAL, !conf->GetStringValue("s3.user_agent", &s3Opt->userAgent)); - LOG_IF(FATAL, !conf->GetIntValue("s3.maxConnections", - &s3Opt->maxConnections)); - LOG_IF(FATAL, !conf->GetIntValue("s3.connectTimeout", - &s3Opt->connectTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.requestTimeout", - &s3Opt->requestTimeout)); - LOG_IF(FATAL, !conf->GetIntValue("s3.asyncThreadNum", - &s3Opt->asyncThreadNum)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsTotalLimit", - &s3Opt->iopsTotalLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsReadLimit", - &s3Opt->iopsReadLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsWriteLimit", - &s3Opt->iopsWriteLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsTotalMB", - &s3Opt->bpsTotalMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsReadMB", - &s3Opt->bpsReadMB)); - LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.bpsWriteMB", - &s3Opt->bpsWriteMB)); - LOG_IF(FATAL, !conf->GetBoolValue("s3.useVirtualAddressing", - &s3Opt->useVirtualAddressing)); - LOG_IF(FATAL, !conf->GetStringValue("s3.region", &s3Opt->region)); - - if (!conf->GetUInt64Value("s3.maxAsyncRequestInflightBytes", - &s3Opt->maxAsyncRequestInflightBytes)) { - LOG(WARNING) << "Not found s3.maxAsyncRequestInflightBytes in conf"; - s3Opt->maxAsyncRequestInflightBytes = 0; - } -} - -void S3Adapter::Init(const std::string& path) { - LOG(INFO) << "Loading s3 configurations"; - conf_.SetConfigPath(path); - LOG_IF(FATAL, !conf_.LoadConfig()) - << "Failed to open s3 config file: " << conf_.GetConfigPath(); - S3AdapterOption option; - InitS3AdaptorOption(&conf_, &option); - Init(option); -} - -void S3Adapter::InitExceptFsS3Option(const std::string& path) { - LOG(INFO) << "Loading s3 configurations"; - conf_.SetConfigPath(path); - LOG_IF(FATAL, !conf_.LoadConfig()) - << "Failed to open s3 config file: " << conf_.GetConfigPath(); - S3AdapterOption option; - InitS3AdaptorOptionExceptS3InfoOption(&conf_, &option); - Init(option); -} - -void S3Adapter::Init(const S3AdapterOption &option) { - auto initSDK = [&]() { - AWS_SDK_OPTIONS.loggingOptions.logLevel = - Aws::Utils::Logging::LogLevel(option.loglevel); - AWS_SDK_OPTIONS.loggingOptions.defaultLogPrefix = - option.logPrefix.c_str(); - Aws::InitAPI(AWS_SDK_OPTIONS); - }; - std::call_once(S3INIT_FLAG, initSDK); - s3Address_ = option.s3Address.c_str(); - s3Ak_ = option.ak.c_str(); - s3Sk_ = option.sk.c_str(); - bucketName_ = option.bucketName.c_str(); - clientCfg_ = Aws::New(AWS_ALLOCATE_TAG); - clientCfg_->scheme = Aws::Http::Scheme(option.scheme); - clientCfg_->verifySSL = option.verifySsl; - clientCfg_->userAgent = option.userAgent.c_str(); - clientCfg_->region = option.region.c_str(); - clientCfg_->maxConnections = option.maxConnections; - clientCfg_->connectTimeoutMs = option.connectTimeout; - clientCfg_->requestTimeoutMs = option.requestTimeout; - clientCfg_->endpointOverride = s3Address_; - auto asyncThreadNum = option.asyncThreadNum; - LOG(INFO) << "S3Adapter init thread num = " << asyncThreadNum << std::endl; - clientCfg_->executor = - Aws::MakeShared( - "S3Adapter.S3Client", asyncThreadNum); - s3Client_ = Aws::New(AWS_ALLOCATE_TAG, - Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), - *clientCfg_, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - option.useVirtualAddressing); - - ReadWriteThrottleParams params; - params.iopsTotal.limit = option.iopsTotalLimit; - params.iopsRead.limit = option.iopsReadLimit; - params.iopsWrite.limit = option.iopsWriteLimit; - params.bpsTotal.limit = option.bpsTotalMB * kMB; - params.bpsRead.limit = option.bpsReadMB * kMB; - params.bpsWrite.limit = option.bpsWriteMB * kMB; - - throttle_ = new Throttle(); - throttle_->UpdateThrottleParams(params); - - inflightBytesThrottle_.reset(new AsyncRequestInflightBytesThrottle( - option.maxAsyncRequestInflightBytes == 0 - ? UINT64_MAX - : option.maxAsyncRequestInflightBytes)); -} - -void S3Adapter::Deinit() { - // delete s3client in s3adapter - if (clientCfg_ != nullptr) { - Aws::Delete(clientCfg_); - clientCfg_ = nullptr; - } - if (s3Client_ != nullptr) { - Aws::Delete(s3Client_); - s3Client_ = nullptr; - } - if (throttle_ != nullptr) { - delete throttle_; - throttle_ = nullptr; - } - if (inflightBytesThrottle_ != nullptr) - inflightBytesThrottle_.release(); -} - -void S3Adapter::Shutdown() { - // one program should only call once - auto shutdownSDK = [&]() { - Aws::ShutdownAPI(AWS_SDK_OPTIONS); - }; - std::call_once(S3SHUTDOWN_FLAG, shutdownSDK); -} - -void S3Adapter::Reinit(const S3AdapterOption& option) { - Deinit(); - Init(option); -} - -std::string S3Adapter::GetS3Ak() { - return std::string(s3Ak_.c_str(), s3Ak_.size()); -} - -std::string S3Adapter::GetS3Sk() { - return std::string(s3Sk_.c_str(), s3Sk_.size()); -} - -std::string S3Adapter::GetS3Endpoint() { - return std::string(s3Address_.c_str(), s3Address_.size()); -} - -int S3Adapter::CreateBucket() { - Aws::S3::Model::CreateBucketRequest request; - request.SetBucket(bucketName_); - Aws::S3::Model::CreateBucketConfiguration conf; - conf.SetLocationConstraint( - Aws::S3::Model::BucketLocationConstraint::us_east_1); - request.SetCreateBucketConfiguration(conf); - auto response = s3Client_->CreateBucket(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "CreateBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::DeleteBucket() { - Aws::S3::Model::DeleteBucketRequest request; - request.SetBucket(bucketName_); - auto response = s3Client_->DeleteBucket(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "DeleteBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -bool S3Adapter::BucketExist() { - Aws::S3::Model::HeadBucketRequest request; - request.SetBucket(bucketName_); - auto response = s3Client_->HeadBucket(request); - if (response.IsSuccess()) { - return true; - } else { - LOG(ERROR) << "HeadBucket error:" - << bucketName_ - << "--" - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return false; - } -} - -int S3Adapter::PutObject(const Aws::String &key, const char *buffer, - const size_t bufferSize) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - - request.SetBody(Aws::MakeShared(AWS_ALLOCATE_TAG, - buffer, bufferSize)); - - if (throttle_) { - throttle_->Add(false, bufferSize); - } - - auto response = s3Client_->PutObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "PutObject error, bucket: " << bucketName_ - << ", key: " << key << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::PutObject(const Aws::String &key, const std::string &data) { - return PutObject(key, data.data(), data.size()); -} -/* - int S3Adapter::GetObject(const Aws::String &key, - void *buffer, - const int bufferSize) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - request.SetResponseStreamFactory( - [buffer, bufferSize](){ - std::unique_ptr - stream(Aws::New("stream")); - stream->rdbuf()->pubsetbuf(buffer, - bufferSize); - return stream.release(); - }); - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - *buffer << response.GetResult().GetBody().rdbuf(); - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } - } -*/ - -void S3Adapter::PutObjectAsync(std::shared_ptr context) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); - - request.SetBody(Aws::MakeShared( - AWS_ALLOCATE_TAG, context->buffer, context->bufferSize)); - - auto originCallback = context->cb; - auto wrapperCallback = - [this, - originCallback](const std::shared_ptr& ctx) { - inflightBytesThrottle_->OnComplete(ctx->bufferSize); - ctx->cb = originCallback; - ctx->cb(ctx); - }; - - Aws::S3::PutObjectResponseReceivedHandler handler = - [context]( - const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::PutObjectRequest & /*request*/, - const Aws::S3::Model::PutObjectOutcome &response, - const std::shared_ptr - &awsCtx) { - std::shared_ptr ctx = - std::const_pointer_cast( - std::dynamic_pointer_cast( - awsCtx)); - - LOG_IF(ERROR, !response.IsSuccess()) - << "PutObjectAsync error: " - << response.GetError().GetExceptionName() - << "message: " << response.GetError().GetMessage() - << "resend: " << ctx->key; - - ctx->retCode = (response.IsSuccess() ? 0 : -1); - ctx->timer.stop(); - ctx->cb(ctx); - }; - - if (throttle_) { - throttle_->Add(false, context->bufferSize); - } - - inflightBytesThrottle_->OnStart(context->bufferSize); - context->cb = std::move(wrapperCallback); - s3Client_->PutObjectAsync(request, handler, context); -} - -int S3Adapter::GetObject(const Aws::String &key, - std::string *data) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - std::stringstream ss; - if (throttle_) { - throttle_->Add(true, 1); - } - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - ss << response.GetResult().GetBody().rdbuf(); - *data = ss.str(); - return 0; - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::GetObject(const std::string &key, - char *buf, - off_t offset, - size_t len) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{key.c_str(), key.size()}); - request.SetRange(GetObjectRequestRange(offset, len)); - - request.SetResponseStreamFactory([buf, len]() { - return Aws::New(AWS_ALLOCATE_TAG, buf, len); - }); - - if (throttle_) { - throttle_->Add(true, len); - } - auto response = s3Client_->GetObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "GetObject error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -void S3Adapter::GetObjectAsync(std::shared_ptr context) { - Aws::S3::Model::GetObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); - request.SetRange(GetObjectRequestRange(context->offset, context->len)); - - request.SetResponseStreamFactory([context]() { - return Aws::New(AWS_ALLOCATE_TAG, context->buf, - context->len); - }); - - auto originCallback = context->cb; - auto wrapperCallback = - [this, originCallback]( - const S3Adapter* /*adapter*/, - const std::shared_ptr& ctx) { - inflightBytesThrottle_->OnComplete(ctx->len); - ctx->cb = originCallback; - ctx->cb(this, ctx); - }; - - Aws::S3::GetObjectResponseReceivedHandler handler = - [this](const Aws::S3::S3Client * /*client*/, - const Aws::S3::Model::GetObjectRequest & /*request*/, - const Aws::S3::Model::GetObjectOutcome &response, - const std::shared_ptr - &awsCtx) { - std::shared_ptr ctx = - std::const_pointer_cast( - std::dynamic_pointer_cast( - awsCtx)); - - LOG_IF(ERROR, !response.IsSuccess()) - << "GetObjectAsync error: " - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - ctx->actualLen = response.GetResult().GetContentLength(); - ctx->retCode = (response.IsSuccess() ? 0 : -1); - ctx->cb(this, ctx); - }; - - if (throttle_) { - throttle_->Add(true, context->len); - } - - inflightBytesThrottle_->OnStart(context->len); - context->cb = std::move(wrapperCallback); - s3Client_->GetObjectAsync(request, handler, context); -} - -bool S3Adapter::ObjectExist(const Aws::String &key) { - Aws::S3::Model::HeadObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->HeadObject(request); - if (response.IsSuccess()) { - return true; - } else { - LOG(WARNING) << "HeadObject error:" << bucketName_ << "--" << key - << "--" << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return false; - } -} - -int S3Adapter::DeleteObject(const Aws::String &key) { - Aws::S3::Model::DeleteObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->DeleteObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(WARNING) << "DeleteObject error:" << bucketName_ << "--" << key - << "--" << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::DeleteObjects(const std::list& keyList) { - Aws::S3::Model::DeleteObjectsRequest deleteObjectsRequest; - Aws::S3::Model::Delete deleteObjects; - for (const auto& key : keyList) { - Aws::S3::Model::ObjectIdentifier ObjIdent; - ObjIdent.SetKey(key); - deleteObjects.AddObjects(ObjIdent); - } - - deleteObjects.SetQuiet(false); - deleteObjectsRequest.WithBucket(bucketName_).WithDelete(deleteObjects); - auto response = s3Client_->DeleteObjects(deleteObjectsRequest); - if (response.IsSuccess()) { - for (auto del : response.GetResult().GetDeleted()) { - LOG(INFO) << "delete ok : " << del.GetKey(); - } - - for (auto err : response.GetResult().GetErrors()) { - LOG(WARNING) << "delete err : " << err.GetKey() << " --> " - << err.GetMessage(); - } - - if (response.GetResult().GetErrors().size() != 0) { - return -1; - } - - return 0; - } else { - LOG(ERROR) << response.GetError().GetMessage() << " failed, " - << deleteObjectsRequest.SerializePayload(); - return -1; - } - return 0; -} -/* - // object元数据单独更新还有问题,需要单独的s3接口来支持 -int S3Adapter::UpdateObjectMeta(const Aws::String &key, - const Aws::Map &meta) { - Aws::S3::Model::PutObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto input_data = - Aws::MakeShared("PutObjectInputStream"); - request.SetBody(input_data); - request.SetMetadata(meta); - auto response = s3Client_->PutObject(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "PutObject error:" - << bucketName_ << key - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} - -int S3Adapter::GetObjectMeta(const Aws::String &key, - Aws::Map *meta) { - Aws::S3::Model::HeadObjectRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - auto response = s3Client_->HeadObject(request); - if (response.IsSuccess()) { - *meta = response.GetResult().GetMetadata(); - return 0; - } else { - LOG(ERROR) << "HeadObject error:" - << bucketName_ << key - << response.GetError().GetExceptionName() - << response.GetError().GetMessage(); - return -1; - } -} -*/ -Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) { - Aws::S3::Model::CreateMultipartUploadRequest request; - request.WithBucket(bucketName_).WithKey(key); - auto response = s3Client_->CreateMultipartUpload(request); - if (response.IsSuccess()) { - return response.GetResult().GetUploadId(); - } else { - LOG(ERROR) << "CreateMultipartUploadRequest error: " - << response.GetError().GetMessage(); - return ""; - } -} - -Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( - const Aws::String &key, - const Aws::String &uploadId, - int partNum, - int partSize, - const char* buf) { - Aws::S3::Model::UploadPartRequest request; - request.SetBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - request.SetPartNumber(partNum); - request.SetContentLength(partSize); - - request.SetBody( - Aws::MakeShared(AWS_ALLOCATE_TAG, buf, partSize)); - - if (throttle_) { - throttle_->Add(false, partSize); - } - auto result = s3Client_->UploadPart(request); - if (result.IsSuccess()) { - return Aws::S3::Model::CompletedPart() - .WithETag(result.GetResult().GetETag()).WithPartNumber(partNum); - } else { - return Aws::S3::Model::CompletedPart() - .WithETag("errorTag").WithPartNumber(-1); - } -} - -int S3Adapter::CompleteMultiUpload(const Aws::String &key, - const Aws::String &uploadId, - const Aws::Vector &cp_v) { - Aws::S3::Model::CompleteMultipartUploadRequest request; - request.WithBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - request.SetMultipartUpload( - Aws::S3::Model::CompletedMultipartUpload().WithParts(cp_v)); - auto response = s3Client_->CompleteMultipartUpload(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "CompleteMultiUpload error: " - << response.GetError().GetMessage(); - this->AbortMultiUpload(key, uploadId); - return -1; - } -} - -int S3Adapter::AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId) { - Aws::S3::Model::AbortMultipartUploadRequest request; - request.WithBucket(bucketName_); - request.SetKey(key); - request.SetUploadId(uploadId); - auto response = s3Client_->AbortMultipartUpload(request); - if (response.IsSuccess()) { - return 0; - } else { - LOG(ERROR) << "AbortMultiUpload error: " - << response.GetError().GetMessage(); - return -1; - } -} - -void S3Adapter::AsyncRequestInflightBytesThrottle::OnStart(uint64_t len) { - std::unique_lock lock(mtx_); - while (inflightBytes_ + len > maxInflightBytes_) { - cond_.wait(lock); - } - - inflightBytes_ += len; -} - -void S3Adapter::AsyncRequestInflightBytesThrottle::OnComplete(uint64_t len) { - std::unique_lock lock(mtx_); - inflightBytes_ -= len; - cond_.notify_all(); -} -void S3Adapter::SetS3Option(const S3InfoOption& fsS3Opt) { - s3Address_ = fsS3Opt.s3Address.c_str(); - s3Ak_ = fsS3Opt.ak.c_str(); - s3Sk_ = fsS3Opt.sk.c_str(); - bucketName_ = fsS3Opt.bucketName.c_str(); -} - -} // namespace common -} // namespace curve +namespace curve +{ + namespace common + { + + std::once_flag S3INIT_FLAG; + std::once_flag S3SHUTDOWN_FLAG; + Aws::SDKOptions AWS_SDK_OPTIONS; + + namespace + { + + // https://github.com/aws/aws-sdk-cpp/issues/1430 + class PreallocatedIOStream : public Aws::IOStream + { + public: + PreallocatedIOStream(char *buf, size_t size) + : Aws::IOStream(new Aws::Utils::Stream::PreallocatedStreamBuf( + reinterpret_cast(buf), size)) {} + + PreallocatedIOStream(const char *buf, size_t size) + : PreallocatedIOStream(const_cast(buf), size) {} + + ~PreallocatedIOStream() + { + // corresponding new in constructor + delete rdbuf(); + } + }; + + Aws::String GetObjectRequestRange(uint64_t offset, uint64_t len) + { + auto range = + "bytes=" + std::to_string(offset) + "-" + std::to_string(offset + len); + return {range.data(), range.size()}; + } + + } // namespace + + void InitS3AdaptorOption(Configuration *conf, S3AdapterOption *s3Opt) + { + InitS3AdaptorOptionExceptS3InfoOption(conf, s3Opt); + LOG_IF(FATAL, !conf->GetStringValue("s3.endpoint", &s3Opt->s3Address)); + LOG_IF(FATAL, !conf->GetStringValue("s3.ak", &s3Opt->ak)); + LOG_IF(FATAL, !conf->GetStringValue("s3.sk", &s3Opt->sk)); + LOG_IF(FATAL, !conf->GetStringValue("s3.bucket_name", &s3Opt->bucketName)); + } + + void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, + S3AdapterOption *s3Opt) + { + LOG_IF(FATAL, !conf->GetIntValue("s3.logLevel", &s3Opt->loglevel)); + LOG_IF(FATAL, !conf->GetStringValue("s3.logPrefix", &s3Opt->logPrefix)); + LOG_IF(FATAL, !conf->GetIntValue("s3.http_scheme", &s3Opt->scheme)); + LOG_IF(FATAL, !conf->GetBoolValue("s3.verify_SSL", &s3Opt->verifySsl)); + LOG_IF(FATAL, !conf->GetStringValue("s3.user_agent", &s3Opt->userAgent)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.maxConnections", &s3Opt->maxConnections)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.connectTimeout", &s3Opt->connectTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.requestTimeout", &s3Opt->requestTimeout)); + LOG_IF(FATAL, + !conf->GetIntValue("s3.asyncThreadNum", &s3Opt->asyncThreadNum)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsTotalLimit", + &s3Opt->iopsTotalLimit)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsReadLimit", + &s3Opt->iopsReadLimit)); + LOG_IF(FATAL, !conf->GetUInt64Value("s3.throttle.iopsWriteLimit", + &s3Opt->iopsWriteLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsTotalMB", &s3Opt->bpsTotalMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsReadMB", &s3Opt->bpsReadMB)); + LOG_IF(FATAL, + !conf->GetUInt64Value("s3.throttle.bpsWriteMB", &s3Opt->bpsWriteMB)); + LOG_IF(FATAL, !conf->GetBoolValue("s3.useVirtualAddressing", + &s3Opt->useVirtualAddressing)); + LOG_IF(FATAL, !conf->GetStringValue("s3.region", &s3Opt->region)); + + if (!conf->GetUInt64Value("s3.maxAsyncRequestInflightBytes", + &s3Opt->maxAsyncRequestInflightBytes)) + { + LOG(WARNING) << "Not found s3.maxAsyncRequestInflightBytes in conf"; + s3Opt->maxAsyncRequestInflightBytes = 0; + } + } + + void S3Adapter::Init(const std::string &path) + { + LOG(INFO) << "Loading s3 configurations"; + conf_.SetConfigPath(path); + LOG_IF(FATAL, !conf_.LoadConfig()) + << "Failed to open s3 config file: " << conf_.GetConfigPath(); + S3AdapterOption option; + InitS3AdaptorOption(&conf_, &option); + Init(option); + } + + void S3Adapter::InitExceptFsS3Option(const std::string &path) + { + LOG(INFO) << "Loading s3 configurations"; + conf_.SetConfigPath(path); + LOG_IF(FATAL, !conf_.LoadConfig()) + << "Failed to open s3 config file: " << conf_.GetConfigPath(); + S3AdapterOption option; + InitS3AdaptorOptionExceptS3InfoOption(&conf_, &option); + Init(option); + } + + void S3Adapter::Init(const S3AdapterOption &option) + { + auto initSDK = [&]() + { + AWS_SDK_OPTIONS.loggingOptions.logLevel = + Aws::Utils::Logging::LogLevel(option.loglevel); + AWS_SDK_OPTIONS.loggingOptions.defaultLogPrefix = + option.logPrefix.c_str(); + Aws::InitAPI(AWS_SDK_OPTIONS); + }; + std::call_once(S3INIT_FLAG, initSDK); + s3Address_ = option.s3Address.c_str(); + s3Ak_ = option.ak.c_str(); + s3Sk_ = option.sk.c_str(); + bucketName_ = option.bucketName.c_str(); + clientCfg_ = Aws::New(AWS_ALLOCATE_TAG); + clientCfg_->scheme = Aws::Http::Scheme(option.scheme); + clientCfg_->verifySSL = option.verifySsl; + clientCfg_->userAgent = option.userAgent.c_str(); + clientCfg_->region = option.region.c_str(); + clientCfg_->maxConnections = option.maxConnections; + clientCfg_->connectTimeoutMs = option.connectTimeout; + clientCfg_->requestTimeoutMs = option.requestTimeout; + clientCfg_->endpointOverride = s3Address_; + auto asyncThreadNum = option.asyncThreadNum; + LOG(INFO) << "S3Adapter init thread num = " << asyncThreadNum << std::endl; + clientCfg_->executor = + Aws::MakeShared( + "S3Adapter.S3Client", asyncThreadNum); + s3Client_ = Aws::New( + AWS_ALLOCATE_TAG, Aws::Auth::AWSCredentials(s3Ak_, s3Sk_), *clientCfg_, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, + option.useVirtualAddressing); + + ReadWriteThrottleParams params; + params.iopsTotal.limit = option.iopsTotalLimit; + params.iopsRead.limit = option.iopsReadLimit; + params.iopsWrite.limit = option.iopsWriteLimit; + params.bpsTotal.limit = option.bpsTotalMB * kMB; + params.bpsRead.limit = option.bpsReadMB * kMB; + params.bpsWrite.limit = option.bpsWriteMB * kMB; + + throttle_ = new Throttle(); + throttle_->UpdateThrottleParams(params); + + inflightBytesThrottle_.reset(new AsyncRequestInflightBytesThrottle( + option.maxAsyncRequestInflightBytes == 0 + ? UINT64_MAX + : option.maxAsyncRequestInflightBytes)); + } + + void S3Adapter::Deinit() + { + // delete s3client in s3adapter + if (clientCfg_ != nullptr) + { + Aws::Delete(clientCfg_); + clientCfg_ = nullptr; + } + if (s3Client_ != nullptr) + { + Aws::Delete(s3Client_); + s3Client_ = nullptr; + } + if (throttle_ != nullptr) + { + delete throttle_; + throttle_ = nullptr; + } + if (inflightBytesThrottle_ != nullptr) + inflightBytesThrottle_.release(); + } + + void S3Adapter::Shutdown() + { + // one program should only call once + auto shutdownSDK = [&]() + { Aws::ShutdownAPI(AWS_SDK_OPTIONS); }; + std::call_once(S3SHUTDOWN_FLAG, shutdownSDK); + } + + void S3Adapter::Reinit(const S3AdapterOption &option) + { + Deinit(); + Init(option); + } + + std::string S3Adapter::GetS3Ak() + { + return std::string(s3Ak_.c_str(), s3Ak_.size()); + } + + std::string S3Adapter::GetS3Sk() + { + return std::string(s3Sk_.c_str(), s3Sk_.size()); + } + + std::string S3Adapter::GetS3Endpoint() + { + return std::string(s3Address_.c_str(), s3Address_.size()); + } + + int S3Adapter::CreateBucket() + { + Aws::S3::Model::CreateBucketRequest request; + request.SetBucket(bucketName_); + Aws::S3::Model::CreateBucketConfiguration conf; + conf.SetLocationConstraint( + Aws::S3::Model::BucketLocationConstraint::us_east_1); + request.SetCreateBucketConfiguration(conf); + auto response = s3Client_->CreateBucket(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "CreateBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::DeleteBucket() + { + Aws::S3::Model::DeleteBucketRequest request; + request.SetBucket(bucketName_); + auto response = s3Client_->DeleteBucket(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "DeleteBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + bool S3Adapter::BucketExist() + { + Aws::S3::Model::HeadBucketRequest request; + request.SetBucket(bucketName_); + auto response = s3Client_->HeadBucket(request); + if (response.IsSuccess()) + { + return true; + } + else + { + LOG(ERROR) << "HeadBucket error:" << bucketName_ << "--" + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return false; + } + } + + int S3Adapter::PutObject(const Aws::String &key, const char *buffer, + const size_t bufferSize) + { + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + + request.SetBody(Aws::MakeShared(AWS_ALLOCATE_TAG, + buffer, bufferSize)); + + if (throttle_) + { + throttle_->Add(false, bufferSize); + } + + auto response = s3Client_->PutObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "PutObject error, bucket: " << bucketName_ + << ", key: " << key << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::PutObject(const Aws::String &key, const std::string &data) + { + return PutObject(key, data.data(), data.size()); + } + /* + int S3Adapter::GetObject(const Aws::String &key, + void *buffer, + const int bufferSize) { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + request.SetResponseStreamFactory( + [buffer, bufferSize](){ + std::unique_ptr + stream(Aws::New("stream")); + stream->rdbuf()->pubsetbuf(buffer, + bufferSize); + return stream.release(); + }); + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) { + *buffer << response.GetResult().GetBody().rdbuf(); + } else { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + */ + + void S3Adapter::PutObjectAsync(std::shared_ptr context) + { + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); + + request.SetBody(Aws::MakeShared( + AWS_ALLOCATE_TAG, context->buffer, context->bufferSize)); + + auto originCallback = context->cb; + auto wrapperCallback = + [this, + originCallback](const std::shared_ptr &ctx) + { + inflightBytesThrottle_->OnComplete(ctx->bufferSize); + ctx->cb = originCallback; + ctx->cb(ctx); + }; + + Aws::S3::PutObjectResponseReceivedHandler handler = + [context](const Aws::S3::S3Client * /*client*/, + const Aws::S3::Model::PutObjectRequest & /*request*/, + const Aws::S3::Model::PutObjectOutcome &response, + const std::shared_ptr & + awsCtx) + { + std::shared_ptr ctx = + std::const_pointer_cast( + std::dynamic_pointer_cast( + awsCtx)); + + LOG_IF(ERROR, !response.IsSuccess()) + << "PutObjectAsync error: " + << response.GetError().GetExceptionName() + << "message: " << response.GetError().GetMessage() + << "resend: " << ctx->key; + + ctx->retCode = (response.IsSuccess() ? 0 : -1); + ctx->timer.stop(); + ctx->cb(ctx); + }; + + if (throttle_) + { + throttle_->Add(false, context->bufferSize); + } + + inflightBytesThrottle_->OnStart(context->bufferSize); + context->cb = std::move(wrapperCallback); + s3Client_->PutObjectAsync(request, handler, context); + } + + int S3Adapter::GetObject(const Aws::String &key, std::string *data) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + std::stringstream ss; + if (throttle_) + { + throttle_->Add(true, 1); + } + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) + { + ss << response.GetResult().GetBody().rdbuf(); + *data = ss.str(); + return 0; + } + else + { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::GetObject(const std::string &key, char *buf, off_t offset, + size_t len) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{key.c_str(), key.size()}); + request.SetRange(GetObjectRequestRange(offset, len)); + + request.SetResponseStreamFactory([buf, len]() + { return Aws::New(AWS_ALLOCATE_TAG, buf, len); }); + + if (throttle_) + { + throttle_->Add(true, len); + } + auto response = s3Client_->GetObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "GetObject error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + void S3Adapter::GetObjectAsync(std::shared_ptr context) + { + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(Aws::String{context->key.c_str(), context->key.size()}); + request.SetRange(GetObjectRequestRange(context->offset, context->len)); + + request.SetResponseStreamFactory([context]() + { return Aws::New(AWS_ALLOCATE_TAG, context->buf, + context->len); }); + + auto originCallback = context->cb; + auto wrapperCallback = + [this, originCallback]( + const S3Adapter * /*adapter*/, + const std::shared_ptr &ctx) + { + inflightBytesThrottle_->OnComplete(ctx->len); + ctx->cb = originCallback; + ctx->cb(this, ctx); + }; + + Aws::S3::GetObjectResponseReceivedHandler handler = + [this](const Aws::S3::S3Client * /*client*/, + const Aws::S3::Model::GetObjectRequest & /*request*/, + const Aws::S3::Model::GetObjectOutcome &response, + const std::shared_ptr & + awsCtx) + { + std::shared_ptr ctx = + std::const_pointer_cast( + std::dynamic_pointer_cast( + awsCtx)); + + LOG_IF(ERROR, !response.IsSuccess()) + << "GetObjectAsync error: " + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + ctx->actualLen = response.GetResult().GetContentLength(); + ctx->retCode = (response.IsSuccess() ? 0 : -1); + ctx->cb(this, ctx); + }; + + if (throttle_) + { + throttle_->Add(true, context->len); + } + + inflightBytesThrottle_->OnStart(context->len); + context->cb = std::move(wrapperCallback); + s3Client_->GetObjectAsync(request, handler, context); + } + + bool S3Adapter::ObjectExist(const Aws::String &key) + { + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->HeadObject(request); + if (response.IsSuccess()) + { + return true; + } + else + { + LOG(WARNING) << "HeadObject error:" << bucketName_ << "--" << key + << "--" << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return false; + } + } + + int S3Adapter::DeleteObject(const Aws::String &key) + { + Aws::S3::Model::DeleteObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->DeleteObject(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(WARNING) << "DeleteObject error:" << bucketName_ << "--" << key + << "--" << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::DeleteObjects(const std::list &keyList) + { + Aws::S3::Model::DeleteObjectsRequest deleteObjectsRequest; + Aws::S3::Model::Delete deleteObjects; + for (const auto &key : keyList) + { + Aws::S3::Model::ObjectIdentifier ObjIdent; + ObjIdent.SetKey(key); + deleteObjects.AddObjects(ObjIdent); + } + + deleteObjects.SetQuiet(false); + deleteObjectsRequest.WithBucket(bucketName_).WithDelete(deleteObjects); + auto response = s3Client_->DeleteObjects(deleteObjectsRequest); + if (response.IsSuccess()) + { + for (auto del : response.GetResult().GetDeleted()) + { + LOG(INFO) << "delete ok : " << del.GetKey(); + } + + for (auto err : response.GetResult().GetErrors()) + { + LOG(WARNING) << "delete err : " << err.GetKey() << " --> " + << err.GetMessage(); + } + + if (response.GetResult().GetErrors().size() != 0) + { + return -1; + } + + return 0; + } + else + { + LOG(ERROR) << response.GetError().GetMessage() << " failed, " + << deleteObjectsRequest.SerializePayload(); + return -1; + } + return 0; + } + /* + // There are still issues with updating the object metadata separately, and a + separate s3 interface is needed to support it int + S3Adapter::UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta) { Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto input_data = + Aws::MakeShared("PutObjectInputStream"); + request.SetBody(input_data); + request.SetMetadata(meta); + auto response = s3Client_->PutObject(request); + if (response.IsSuccess()) { + return 0; + } else { + LOG(ERROR) << "PutObject error:" + << bucketName_ << key + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + + int S3Adapter::GetObjectMeta(const Aws::String &key, + Aws::Map *meta) { + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + auto response = s3Client_->HeadObject(request); + if (response.IsSuccess()) { + *meta = response.GetResult().GetMetadata(); + return 0; + } else { + LOG(ERROR) << "HeadObject error:" + << bucketName_ << key + << response.GetError().GetExceptionName() + << response.GetError().GetMessage(); + return -1; + } + } + */ + Aws::String S3Adapter::MultiUploadInit(const Aws::String &key) + { + Aws::S3::Model::CreateMultipartUploadRequest request; + request.WithBucket(bucketName_).WithKey(key); + auto response = s3Client_->CreateMultipartUpload(request); + if (response.IsSuccess()) + { + return response.GetResult().GetUploadId(); + } + else + { + LOG(ERROR) << "CreateMultipartUploadRequest error: " + << response.GetError().GetMessage(); + return ""; + } + } + + Aws::S3::Model::CompletedPart S3Adapter::UploadOnePart( + const Aws::String &key, const Aws::String &uploadId, int partNum, + int partSize, const char *buf) + { + Aws::S3::Model::UploadPartRequest request; + request.SetBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + request.SetPartNumber(partNum); + request.SetContentLength(partSize); + + request.SetBody( + Aws::MakeShared(AWS_ALLOCATE_TAG, buf, partSize)); + + if (throttle_) + { + throttle_->Add(false, partSize); + } + auto result = s3Client_->UploadPart(request); + if (result.IsSuccess()) + { + return Aws::S3::Model::CompletedPart() + .WithETag(result.GetResult().GetETag()) + .WithPartNumber(partNum); + } + else + { + return Aws::S3::Model::CompletedPart() + .WithETag("errorTag") + .WithPartNumber(-1); + } + } + + int S3Adapter::CompleteMultiUpload( + const Aws::String &key, const Aws::String &uploadId, + const Aws::Vector &cp_v) + { + Aws::S3::Model::CompleteMultipartUploadRequest request; + request.WithBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + request.SetMultipartUpload( + Aws::S3::Model::CompletedMultipartUpload().WithParts(cp_v)); + auto response = s3Client_->CompleteMultipartUpload(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "CompleteMultiUpload error: " + << response.GetError().GetMessage(); + this->AbortMultiUpload(key, uploadId); + return -1; + } + } + + int S3Adapter::AbortMultiUpload(const Aws::String &key, + const Aws::String &uploadId) + { + Aws::S3::Model::AbortMultipartUploadRequest request; + request.WithBucket(bucketName_); + request.SetKey(key); + request.SetUploadId(uploadId); + auto response = s3Client_->AbortMultipartUpload(request); + if (response.IsSuccess()) + { + return 0; + } + else + { + LOG(ERROR) << "AbortMultiUpload error: " + << response.GetError().GetMessage(); + return -1; + } + } + + void S3Adapter::AsyncRequestInflightBytesThrottle::OnStart(uint64_t len) + { + std::unique_lock lock(mtx_); + while (inflightBytes_ + len > maxInflightBytes_) + { + cond_.wait(lock); + } + + inflightBytes_ += len; + } + + void S3Adapter::AsyncRequestInflightBytesThrottle::OnComplete(uint64_t len) + { + std::unique_lock lock(mtx_); + inflightBytes_ -= len; + cond_.notify_all(); + } + void S3Adapter::SetS3Option(const S3InfoOption &fsS3Opt) + { + s3Address_ = fsS3Opt.s3Address.c_str(); + s3Ak_ = fsS3Opt.ak.c_str(); + s3Sk_ = fsS3Opt.sk.c_str(); + bucketName_ = fsS3Opt.bucketName.c_str(); + } + + } // namespace common +} // namespace curve diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index 2adbbfb3bc..4a3bb3c6b6 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -105,10 +105,10 @@ struct S3InfoOption { uint32_t objectPrefix; }; -void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, - S3AdapterOption *s3Opt); +void InitS3AdaptorOptionExceptS3InfoOption(Configuration* conf, + S3AdapterOption* s3Opt); -void InitS3AdaptorOption(Configuration *conf, S3AdapterOption *s3Opt); +void InitS3AdaptorOption(Configuration* conf, S3AdapterOption* s3Opt); using GetObjectAsyncCallBack = std::function&)>; @@ -185,27 +185,27 @@ class S3Adapter { } virtual ~S3Adapter() { Deinit(); } /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const std::string &path); + virtual void Init(const std::string& path); /** - * 初始化S3Adapter - * 但不包括 S3InfoOption + * Initialize S3Adapter + * But not including S3InfoOption */ - virtual void InitExceptFsS3Option(const std::string &path); + virtual void InitExceptFsS3Option(const std::string& path); /** - * 初始化S3Adapter + * Initialize S3Adapter */ - virtual void Init(const S3AdapterOption &option); + virtual void Init(const S3AdapterOption& option); /** * @brief * * @details */ - virtual void SetS3Option(const S3InfoOption &fsS3Opt); + virtual void SetS3Option(const S3InfoOption& fsS3Opt); /** - * 释放S3Adapter资源 + * Release S3Adapter resources */ virtual void Deinit(); /** @@ -215,7 +215,7 @@ class S3Adapter { /** * reinit s3client with new AWSCredentials */ - virtual void Reinit(const S3AdapterOption &option); + virtual void Reinit(const S3AdapterOption& option); /** * get s3 ak */ @@ -229,39 +229,40 @@ class S3Adapter { */ virtual std::string GetS3Endpoint(); /** - * 创建存储快照数据的桶(桶名称由配置文件指定,需要全局唯一) - * @return: 0 创建成功/ -1 创建失败 + * Create a bucket for storing snapshot data (the bucket name is specified + * by the configuration file and needs to be globally unique) + * @return: 0 successfully created/-1 failed to create */ virtual int CreateBucket(); /** - * 删除桶 - * @return 0 删除成功/-1 删除失败 + * Delete Bucket + * @return 0 deleted successfully/-1 deleted failed */ virtual int DeleteBucket(); /** - * 判断快照数据的桶是否存在 - * @return true 桶存在/ false 桶不存在 + * Determine whether the bucket of snapshot data exists + * @return true bucket exists/false bucket does not exist */ virtual bool BucketExist(); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @param 数据内容大小 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @param data content size + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const char *buffer, + virtual int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize); // Get object to buffer[bufferSize] // int GetObject(const Aws::String &key, void *buffer, // const int bufferSize); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @return: 0 Upload successful/-1 Upload failed */ - virtual int PutObject(const Aws::String &key, const std::string &data); + virtual int PutObject(const Aws::String& key, const std::string& data); virtual void PutObjectAsync(std::shared_ptr context); /** * Get object from s3, @@ -273,40 +274,40 @@ class S3Adapter { * @param pointer which contain the data * @return 0 success / -1 fail */ - virtual int GetObject(const Aws::String &key, std::string *data); + virtual int GetObject(const Aws::String& key, std::string* data); /** - * 从对象存储读取数据 - * @param 对象名 - * @param[out] 返回读取的数据 - * @param 读取的偏移 - * @param 读取的长度 + * Reading data from object storage + * @param object name + * @param[out] returns the read data + * @param read Offset read + * @param The read length read */ - virtual int GetObject(const std::string &key, char *buf, off_t offset, + virtual int GetObject(const std::string& key, char* buf, off_t offset, size_t len); // NOLINT /** - * @brief 异步从对象存储读取数据 + * @brief asynchronously reads data from object storage * - * @param context 异步上下文 + * @param context asynchronous context */ virtual void GetObjectAsync(std::shared_ptr context); /** - * 删除对象 - * @param 对象名 - * @return: 0 删除成功/ - + * Delete Object + * @param object name + * @return: 0 successfully deleted/- */ - virtual int DeleteObject(const Aws::String &key); + virtual int DeleteObject(const Aws::String& key); - virtual int DeleteObjects(const std::list &keyList); + virtual int DeleteObjects(const std::list& keyList); /** - * 判断对象是否存在 - * @param 对象名 - * @return: true 对象存在/ false 对象不存在 + * Determine whether the object exists + * @param object name + * @return: true object exists/false object does not exist */ - virtual bool ObjectExist(const Aws::String &key); + virtual bool ObjectExist(const Aws::String& key); /* // Update object meta content - // Todo 接口还有问题 need fix + // There are still issues with the Todo interface, need fix virtual int UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta); // Get object meta content @@ -314,51 +315,53 @@ class S3Adapter { Aws::Map *meta); */ /** - * 初始化对象的分片上传任务 - * @param 对象名 - * @return 任务名 + * Initialize the sharding upload task of the object + * @param object name + * @return Task Name */ - virtual Aws::String MultiUploadInit(const Aws::String &key); + virtual Aws::String MultiUploadInit(const Aws::String& key); /** - * 增加一个分片到分片上传任务中 - * @param 对象名 - * @param 任务名 - * @param 第几个分片(从1开始) - * @param 分片大小 - * @param 分片的数据内容 - * @return: 分片任务管理对象 + * Add a shard to the shard upload task + * @param object name + * @param Task Name + * @param Which shard (starting from 1) + * @param shard size + * @param sharded data content + * @return: Fragmented task management object */ - virtual Aws::S3::Model::CompletedPart - UploadOnePart(const Aws::String &key, const Aws::String &uploadId, - int partNum, int partSize, const char *buf); + virtual Aws::S3::Model::CompletedPart UploadOnePart( + const Aws::String& key, const Aws::String& uploadId, int partNum, + int partSize, const char* buf); /** - * 完成分片上传任务 - * @param 对象名 - * @param 分片上传任务id - * @管理分片上传任务的vector - * @return 0 任务完成/ -1 任务失败 + * Complete the shard upload task + * @param object name + * @param Partitioning Upload Task ID + * @param Manage vector for sharded upload tasks + * @return 0 task completed/-1 task failed */ - virtual int - CompleteMultiUpload(const Aws::String &key, const Aws::String &uploadId, - const Aws::Vector &cp_v); + virtual int CompleteMultiUpload( + const Aws::String& key, const Aws::String& uploadId, + const Aws::Vector& cp_v); /** - * 终止一个对象的分片上传任务 - * @param 对象名 - * @param 任务id - * @return 0 终止成功/ -1 终止失败 + * Terminate the sharding upload task of an object + * @param object name + * @param Task ID + * @return 0 Terminated successfully/-1 Terminated failed */ - virtual int AbortMultiUpload(const Aws::String &key, - const Aws::String &uploadId); - void SetBucketName(const Aws::String &name) { bucketName_ = name; } + virtual int AbortMultiUpload(const Aws::String& key, + const Aws::String& uploadId); + void SetBucketName(const Aws::String& name) { bucketName_ = name; } Aws::String GetBucketName() { return bucketName_; } - Aws::Client::ClientConfiguration *GetConfig() { return clientCfg_; } + Aws::Client::ClientConfiguration* GetConfig() { return clientCfg_; } private: class AsyncRequestInflightBytesThrottle { public: explicit AsyncRequestInflightBytesThrottle(uint64_t maxInflightBytes) - : maxInflightBytes_(maxInflightBytes), inflightBytes_(0), mtx_(), + : maxInflightBytes_(maxInflightBytes), + inflightBytes_(0), + mtx_(), cond_() {} void OnStart(uint64_t len); @@ -373,19 +376,20 @@ class S3Adapter { }; private: - // S3服务器地址 + // S3 server address Aws::String s3Address_; - // 用于用户认证的AK/SK,需要从对象存储的用户管理中申请 + // AK/SK for user authentication needs to be applied for from user + // management in object storage Aws::String s3Ak_; Aws::String s3Sk_; - // 对象的桶名 + // The bucket name of the object Aws::String bucketName_; - // aws sdk的配置 - Aws::Client::ClientConfiguration *clientCfg_; - Aws::S3::S3Client *s3Client_; + // Configuration of AWS SDK + Aws::Client::ClientConfiguration* clientCfg_; + Aws::S3::S3Client* s3Client_; Configuration conf_; - Throttle *throttle_; + Throttle* throttle_; std::unique_ptr inflightBytesThrottle_; }; @@ -397,7 +401,7 @@ class FakeS3Adapter : public S3Adapter { bool BucketExist() override { return true; } - int PutObject(const Aws::String &key, const char *buffer, + int PutObject(const Aws::String& key, const char* buffer, const size_t bufferSize) override { (void)key; (void)buffer; @@ -405,20 +409,20 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int PutObject(const Aws::String &key, const std::string &data) override { + int PutObject(const Aws::String& key, const std::string& data) override { (void)key; (void)data; return 0; } - void - PutObjectAsync(std::shared_ptr context) override { + void PutObjectAsync( + std::shared_ptr context) override { context->retCode = 0; context->timer.stop(); context->cb(context); } - int GetObject(const Aws::String &key, std::string *data) override { + int GetObject(const Aws::String& key, std::string* data) override { (void)key; (void)data; // just return 4M data @@ -426,7 +430,7 @@ class FakeS3Adapter : public S3Adapter { return 0; } - int GetObject(const std::string &key, char *buf, off_t offset, + int GetObject(const std::string& key, char* buf, off_t offset, size_t len) override { (void)key; (void)offset; @@ -435,30 +439,29 @@ class FakeS3Adapter : public S3Adapter { return 0; } - void - GetObjectAsync(std::shared_ptr context) override { + void GetObjectAsync( + std::shared_ptr context) override { memset(context->buf, '1', context->len); context->retCode = 0; context->cb(this, context); } - int DeleteObject(const Aws::String &key) override { + int DeleteObject(const Aws::String& key) override { (void)key; return 0; } - int DeleteObjects(const std::list &keyList) override { + int DeleteObjects(const std::list& keyList) override { (void)keyList; return 0; } - bool ObjectExist(const Aws::String &key) override { + bool ObjectExist(const Aws::String& key) override { (void)key; return true; } }; - } // namespace common } // namespace curve #endif // SRC_COMMON_S3_ADAPTER_H_ diff --git a/src/common/snapshotclone/snapshotclone_define.cpp b/src/common/snapshotclone/snapshotclone_define.cpp index b3b08f8d74..9e2ba8a0a6 100644 --- a/src/common/snapshotclone/snapshotclone_define.cpp +++ b/src/common/snapshotclone/snapshotclone_define.cpp @@ -20,14 +20,14 @@ * Author: xuchaojie */ -#include - #include "src/common/snapshotclone/snapshotclone_define.h" +#include + namespace curve { namespace snapshotcloneserver { -// 字符串常量定义 +// String constant definition const char* kServiceName = "SnapshotCloneService"; const char* kCreateSnapshotAction = "CreateSnapshot"; const char* kDeleteSnapshotAction = "DeleteSnapshot"; @@ -92,10 +92,8 @@ std::map code2Msg = { {kErrCodeNotSupport, "Not support."}, }; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid) { +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid) { Json::Value mainObj; mainObj[kCodeStr] = std::to_string(errCode); mainObj[kMessageStr] = code2Msg[errCode]; diff --git a/src/common/snapshotclone/snapshotclone_define.h b/src/common/snapshotclone/snapshotclone_define.h index ffa5428a6e..558fa15f97 100644 --- a/src/common/snapshotclone/snapshotclone_define.h +++ b/src/common/snapshotclone/snapshotclone_define.h @@ -23,13 +23,13 @@ #ifndef SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ #define SRC_COMMON_SNAPSHOTCLONE_SNAPSHOTCLONE_DEFINE_H_ -#include #include +#include namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -74,74 +74,66 @@ extern const char* kCloneFileInfoStr; typedef std::string UUID; using TaskIdType = UUID; -enum class CloneTaskType { - kClone = 0, - kRecover -}; +enum class CloneTaskType { kClone = 0, kRecover }; -enum class CloneRefStatus { - kNoRef = 0, - kHasRef = 1, - kNeedCheck = 2 -}; +enum class CloneRefStatus { kNoRef = 0, kHasRef = 1, kNeedCheck = 2 }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or +// cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has +// errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; -std::string BuildErrorMessage( - int errCode, - const std::string &requestId, - const std::string &uuid = ""); - +std::string BuildErrorMessage(int errCode, const std::string& requestId, + const std::string& uuid = ""); // clone progress constexpr uint32_t kProgressCloneStart = 0; @@ -153,8 +145,6 @@ constexpr uint32_t kProgressRecoverChunkBegin = kProgressMetaInstalled; constexpr uint32_t kProgressRecoverChunkEnd = 95; constexpr uint32_t kProgressCloneComplete = 100; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..a8ca00e1d8 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef SRC_COMMON_STRINGSTATUS_H_ -#define SRC_COMMON_STRINGSTATUS_H_ +#ifndef SRC_COMMON_STRINGSTATUS_H_ +#define SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace curve { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,30 +49,31 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key-value map to + * status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common } // namespace curve #endif // SRC_COMMON_STRINGSTATUS_H_ - diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..d3fc2d244c 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -25,9 +25,10 @@ #include #include #include + +#include #include #include -#include namespace curve { namespace common { @@ -57,7 +58,8 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +69,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; @@ -85,13 +87,9 @@ class ExpiredTime { public: ExpiredTime() : startUs_(TimeUtility::GetTimeofDayUs()) {} - double ExpiredSec() const { - return ExpiredUs() / 1000000; - } + double ExpiredSec() const { return ExpiredUs() / 1000000; } - double ExpiredMs() const { - return ExpiredUs() / 1000; - } + double ExpiredMs() const { return ExpiredUs() / 1000; } double ExpiredUs() const { return TimeUtility::GetTimeofDayUs() - startUs_; @@ -101,7 +99,7 @@ class ExpiredTime { uint64_t startUs_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve -#endif // SRC_COMMON_TIMEUTILITY_H_ +#endif // SRC_COMMON_TIMEUTILITY_H_ diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..996704c987 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,29 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time +// synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not + * available and if the MAC address can be obtained, the UUID will be + * generated using a combination of random numbers, current time, and the + * MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +61,14 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of MAC + * address leakage. To ensure uniqueness, it also employs a time + * synchronization mechanism. However, if the time synchronization mechanism + * is not available, there is a possibility of UUID duplication when + * generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +80,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) + * and a fallback to pseudo-random number generation. When using the + * pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..bbf8b21b49 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -20,36 +20,37 @@ * Author: lixiaocui */ -#ifndef SRC_COMMON_WAIT_INTERVAL_H_ -#define SRC_COMMON_WAIT_INTERVAL_H_ +#ifndef SRC_COMMON_WAIT_INTERVAL_H_ +#define SRC_COMMON_WAIT_INTERVAL_H_ #include "src/common/interruptible_sleeper.h" namespace curve { namespace common { -class WaitInterval { +class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution determines how long to wait before executing based on + * the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index f4cd6cfcdb..d649b68ce7 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -20,14 +20,15 @@ * Author: yangyaokai */ +#include "src/fs/ext4_filesystem_impl.h" + +#include #include -#include -#include #include -#include +#include +#include #include "src/common/string_util.h" -#include "src/fs/ext4_filesystem_impl.h" #include "src/fs/wrap_posix.h" #define MIN_KERNEL_VERSION KERNEL_VERSION(3, 15, 0) @@ -40,13 +41,11 @@ std::mutex Ext4FileSystemImpl::mutex_; Ext4FileSystemImpl::Ext4FileSystemImpl( std::shared_ptr posixWrapper) - : posixWrapper_(posixWrapper) - , enableRenameat2_(false) { + : posixWrapper_(posixWrapper), enableRenameat2_(false) { CHECK(posixWrapper_ != nullptr) << "PosixWrapper is null"; } -Ext4FileSystemImpl::~Ext4FileSystemImpl() { -} +Ext4FileSystemImpl::~Ext4FileSystemImpl() {} std::shared_ptr Ext4FileSystemImpl::getInstance() { std::lock_guard lock(mutex_); @@ -54,13 +53,14 @@ std::shared_ptr Ext4FileSystemImpl::getInstance() { std::shared_ptr wrapper = std::make_shared(); self_ = std::shared_ptr( - new(std::nothrow) Ext4FileSystemImpl(wrapper)); + new (std::nothrow) Ext4FileSystemImpl(wrapper)); CHECK(self_ != nullptr) << "Failed to new ext4 local fs."; } return self_; } -void Ext4FileSystemImpl::SetPosixWrapper(std::shared_ptr wrapper) { //NOLINT +void Ext4FileSystemImpl::SetPosixWrapper( + std::shared_ptr wrapper) { // NOLINT CHECK(wrapper != nullptr) << "PosixWrapper is null"; posixWrapper_ = wrapper; } @@ -71,16 +71,17 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { ret = posixWrapper_->uname(&kernel_info); if (ret != 0) { - LOG(ERROR) << "Get kernel info failed."; - return false; + LOG(ERROR) << "Get kernel info failed."; + return false; } LOG(INFO) << "Kernel version: " << kernel_info.release; LOG(INFO) << "System version: " << kernel_info.version; LOG(INFO) << "Machine: " << kernel_info.machine; - // 通过uname获取的版本字符串格式可能为a.b.c-xxx - // a为主版本号,b为此版本号,c为修正号 + // The version string format obtained through uname may be a.b.c-xxx + // A is the main version number, b is the version number, and c is the + // revision number vector elements; ::curve::common::SplitString(kernel_info.release, "-", &elements); if (elements.size() == 0) { @@ -90,7 +91,8 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { vector numbers; ::curve::common::SplitString(elements[0], ".", &numbers); - // 有些系统可能版本格式前面部分是a.b.c.d,但是a.b.c是不变的 + // Some systems may have a version format with the front part being a.b.c.d, + // but a.b.c remains unchanged if (numbers.size() < 3) { LOG(ERROR) << "parse kenel version failed."; return false; @@ -99,11 +101,10 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int major = std::stoi(numbers[0]); int minor = std::stoi(numbers[1]); int revision = std::stoi(numbers[2]); - LOG(INFO) << "major: " << major - << ", minor: " << minor + LOG(INFO) << "major: " << major << ", minor: " << minor << ", revision: " << revision; - // 内核版本必须大于3.15,用于支持renameat2 + // The kernel version must be greater than 3.15 to support renameat2 if (KERNEL_VERSION(major, minor, revision) < MIN_KERNEL_VERSION) { LOG(ERROR) << "Kernel older than 3.15 is not supported."; return false; @@ -114,14 +115,13 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { int Ext4FileSystemImpl::Init(const LocalFileSystemOption& option) { enableRenameat2_ = option.enableRenameat2; if (enableRenameat2_) { - if (!CheckKernelVersion()) - return -1; + if (!CheckKernelVersion()) return -1; } return 0; } int Ext4FileSystemImpl::Statfs(const string& path, - struct FileSystemInfo *info) { + struct FileSystemInfo* info) { struct statfs diskInfo; int rc = posixWrapper_->statfs(path.c_str(), &diskInfo); if (rc < 0) { @@ -157,7 +157,8 @@ int Ext4FileSystemImpl::Close(int fd) { int Ext4FileSystemImpl::Delete(const string& path) { int rc = 0; - // 如果删除对象是目录的话,需要先删除目录下的子对象 + // If the deleted object is a directory, you need to first delete the sub + // objects under the directory if (DirExists(path)) { vector names; rc = List(path, &names); @@ -165,9 +166,9 @@ int Ext4FileSystemImpl::Delete(const string& path) { LOG(WARNING) << "List " << path << " failed."; return rc; } - for (auto &name : names) { + for (auto& name : names) { string subPath = path + "/" + name; - // 递归删除子对象 + // Recursively delete sub objects rc = Delete(subPath); if (rc < 0) { LOG(WARNING) << "Delete " << subPath << " failed."; @@ -189,20 +190,19 @@ int Ext4FileSystemImpl::Mkdir(const string& dirName) { ::curve::common::SplitString(dirName, "/", &names); // root dir must exists - if (0 == names.size()) - return 0; + if (0 == names.size()) return 0; string path; for (size_t i = 0; i < names.size(); ++i) { - if (0 == i && dirName[0] != '/') // 相对路径 + if (0 == i && dirName[0] != '/') // Relative path path = path + names[i]; else path = path + "/" + names[i]; - if (DirExists(path)) - continue; - // 目录需要755权限,不然会出现“Permission denied” + if (DirExists(path)) continue; + // Directory requires 755 permissions, otherwise 'Permission denied' + // will appear if (posixWrapper_->mkdir(path.c_str(), 0755) < 0) { - LOG(WARNING) << "mkdir " << path << " failed. "<< strerror(errno); + LOG(WARNING) << "mkdir " << path << " failed. " << strerror(errno); return -errno; } } @@ -226,8 +226,7 @@ bool Ext4FileSystemImpl::FileExists(const string& filePath) { return false; } -int Ext4FileSystemImpl::DoRename(const string& oldPath, - const string& newPath, +int Ext4FileSystemImpl::DoRename(const string& oldPath, const string& newPath, unsigned int flags) { int rc = 0; if (enableRenameat2_) { @@ -237,8 +236,7 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } if (rc < 0) { LOG(WARNING) << "rename failed: " << strerror(errno) - << ". old path: " << oldPath - << ", new path: " << newPath + << ". old path: " << oldPath << ", new path: " << newPath << ", flag: " << flags; return -errno; } @@ -246,21 +244,22 @@ int Ext4FileSystemImpl::DoRename(const string& oldPath, } int Ext4FileSystemImpl::List(const string& dirName, - vector *names) { - DIR *dir = posixWrapper_->opendir(dirName.c_str()); + vector* names) { + DIR* dir = posixWrapper_->opendir(dirName.c_str()); if (nullptr == dir) { LOG(WARNING) << "opendir:" << dirName << " failed:" << strerror(errno); return -errno; } - struct dirent *dirIter; + struct dirent* dirIter; errno = 0; - while ((dirIter=posixWrapper_->readdir(dir)) != nullptr) { - if (strcmp(dirIter->d_name, ".") == 0 - || strcmp(dirIter->d_name, "..") == 0) + while ((dirIter = posixWrapper_->readdir(dir)) != nullptr) { + if (strcmp(dirIter->d_name, ".") == 0 || + strcmp(dirIter->d_name, "..") == 0) continue; names->push_back(dirIter->d_name); } - // 可能存在其他携程改变了errno,但是只能通过此方式判断readdir是否成功 + // There may be other Ctrip changes to errno, but this is the only way to + // determine whether readdir is successful if (errno != 0) { LOG(WARNING) << "readdir failed: " << strerror(errno); } @@ -268,19 +267,14 @@ int Ext4FileSystemImpl::List(const string& dirName, return -errno; } -int Ext4FileSystemImpl::Read(int fd, - char *buf, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Read(int fd, char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pread(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pread(fd, buf + relativeOffset, remainLength, offset); - // 如果offset大于文件长度,pread会返回0 + // If the offset is greater than the file length, pread will return 0 if (ret == 0) { LOG(WARNING) << "pread returns zero." << "offset: " << offset @@ -304,17 +298,13 @@ int Ext4FileSystemImpl::Read(int fd, return length - remainLength; } -int Ext4FileSystemImpl::Write(int fd, - const char *buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, const char* buf, uint64_t offset, int length) { int remainLength = length; int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { - int ret = posixWrapper_->pwrite(fd, - buf + relativeOffset, - remainLength, + int ret = posixWrapper_->pwrite(fd, buf + relativeOffset, remainLength, offset); if (ret < 0) { if (errno == EINTR && retryTimes < MAX_RETYR_TIME) { @@ -333,9 +323,7 @@ int Ext4FileSystemImpl::Write(int fd, return length; } -int Ext4FileSystemImpl::Write(int fd, - butil::IOBuf buf, - uint64_t offset, +int Ext4FileSystemImpl::Write(int fd, butil::IOBuf buf, uint64_t offset, int length) { if (length != static_cast(buf.size())) { LOG(ERROR) << "IOBuf::pcut_into_file_descriptor failed, fd: " << fd @@ -376,9 +364,7 @@ int Ext4FileSystemImpl::Sync(int fd) { return 0; } -int Ext4FileSystemImpl::Append(int fd, - const char *buf, - int length) { +int Ext4FileSystemImpl::Append(int fd, const char* buf, int length) { (void)fd; (void)buf; (void)length; @@ -386,10 +372,7 @@ int Ext4FileSystemImpl::Append(int fd, return 0; } -int Ext4FileSystemImpl::Fallocate(int fd, - int op, - uint64_t offset, - int length) { +int Ext4FileSystemImpl::Fallocate(int fd, int op, uint64_t offset, int length) { int rc = posixWrapper_->fallocate(fd, op, offset, length); if (rc < 0) { LOG(ERROR) << "fallocate failed: " << strerror(errno); @@ -398,7 +381,7 @@ int Ext4FileSystemImpl::Fallocate(int fd, return 0; } -int Ext4FileSystemImpl::Fstat(int fd, struct stat *info) { +int Ext4FileSystemImpl::Fstat(int fd, struct stat* info) { int rc = posixWrapper_->fstat(fd, info); if (rc < 0) { LOG(ERROR) << "fstat failed: " << strerror(errno); diff --git a/src/fs/local_filesystem.h b/src/fs/local_filesystem.h index 3072867807..075e273a29 100644 --- a/src/fs/local_filesystem.h +++ b/src/fs/local_filesystem.h @@ -23,22 +23,23 @@ #ifndef SRC_FS_LOCAL_FILESYSTEM_H_ #define SRC_FS_LOCAL_FILESYSTEM_H_ -#include #include -#include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include // NOLINT +#include +#include #include "src/fs/fs_common.h" -using std::vector; using std::map; using std::string; +using std::vector; namespace curve { namespace fs { @@ -50,123 +51,130 @@ struct LocalFileSystemOption { class LocalFileSystem { public: - LocalFileSystem() {} + LocalFileSystem() {} virtual ~LocalFileSystem() {} /** - * 初始化文件系统 - * 如果文件系统还未格式化,首先会格式化, - * 然后挂载文件系统, - * 已经格式化或者已经挂载的文件系统不会重复格式化或挂载 - * @param option:初始化参数 + * Initialize file system + * If the file system has not been formatted yet, it will be formatted + * first, Then mount the file system, Formatted or mounted file systems will + * not be repeatedly formatted or mounted + * @param option: initialization parameters */ virtual int Init(const LocalFileSystemOption& option) = 0; /** - * 获取文件或目录所在的文件系统状态信息 - * @param path: 要获取的文件系统下的文件或目录路径 - * @param info[out]: 文件系统状态信息 - * @return 成功返回0 + * Obtain the file system status information where the file or directory is + * located + * @param path: The file or directory path under the file system to obtain + * @param info[out]: File system status information + * @return Successfully returned 0 */ virtual int Statfs(const string& path, struct FileSystemInfo* info) = 0; /** - * 打开文件句柄 - * @param path:文件路径 - * @param flags:操作文件方式的flag - * 此flag使用POSIX文件系统的定义 - * @return 成功返回文件句柄id,失败返回负值 + * Open file handle + * @param path: File path + * @param flags: flags for manipulating file methods + * This flag uses the definition of the POSIX file system + * @return successfully returns the file handle id, while failure returns a + * negative value */ virtual int Open(const string& path, int flags) = 0; /** - * 关闭文件句柄 - * @param fd: 文件句柄id - * @return 成功返回0 + * Close file handle + * @param fd: file handle id + * @return Successfully returned 0 */ virtual int Close(int fd) = 0; /** - * 删除文件或目录 - * 如果删除对象为目录,会删除目录下的文件或子目录 - * @param path:文件或目录的路径 - * return 成功返回0 + * Delete files or directories + * If the deleted object is a directory, the files or subdirectories under + * the directory will be deleted + * @param path: The path to a file or directory + * @return Successful return returns 0 */ virtual int Delete(const string& path) = 0; /** - * 创建目录 - * @param dirPath: 目录路径 - * @return 成功返回0 + * Create directory + * @param dirPath: Directory path + * @return Successfully returned 0 */ virtual int Mkdir(const string& dirPath) = 0; /** - * 判断目录是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the directory exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool DirExists(const string& dirPath) = 0; /** - * 判断文件是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the file exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool FileExists(const string& filePath) = 0; /** - * 重命名文件/目录 - * 将文件或目录重命名或者移到其他路径,不会覆盖已存在的文件 - * @param oldPath:原文件或目录路径 - * @param newPath:新的文件或目录路径 - * 新的文件或目录在重命名之前不存在,否则返回错误 - * @param flags:重命名使用的模式,默认值为0 - * 可选择RENAME_EXCHANGE、RENAME_EXCHANGE、RENAME_WHITEOUT三种模式 + * Rename File/Directory + * Renaming or moving files or directories to a different path will not + * overwrite existing files + * @param oldPath: Path to the original file or directory + * @param newPath: New file or directory path + * The new file or directory does not exist before renaming, otherwise an + * error will be returned + * @param flags: The mode used for renaming, with a default value of 0 + * Optional RENAME_EXCHANGE, RENAME_EXCHANGE, RENAME_WHITEOUT three modes * https://manpages.debian.org/testing/manpages-dev/renameat2.2.en.html - * @return 成功返回0 + * @return Successfully returned 0 */ - virtual int Rename(const string& oldPath, - const string& newPath, + virtual int Rename(const string& oldPath, const string& newPath, unsigned int flags = 0) { return DoRename(oldPath, newPath, flags); } /** - * 列举指定路径下的所有文件和目录名 - * @param dirPath:目录路径 - * @param name[out]:目录下的所有目录和文件名 - * @return 成功返回0 + * List all files and directory names under the specified path + * @param dirPath: Directory path + * @param name[out]: All directories and file names under the directory + * @return Successfully returned 0 */ virtual int List(const string& dirPath, vector* names) = 0; /** - * 从文件指定区域读取数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:接收读取数据的buffer - * @param offset:读取区域的起始偏移 - * @param length:读取数据的长度 - * @return 返回成功读取到的数据长度,失败返回-1 + * Read data from the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for receiving and reading data + * @param offset: The starting offset of the read area + * @param length: The length of the read data + * @return returns the length of the data successfully read, while failure + * returns -1 */ virtual int Read(int fd, char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据的buffer - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: The buffer of the data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, const char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据 - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buf: Data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure + * returns -1 */ virtual int Write(int fd, butil::IOBuf buf, uint64_t offset, int length) = 0; @@ -181,59 +189,62 @@ class LocalFileSystem { virtual int Sync(int fd) = 0; /** - * 向文件末尾追加数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待追加数据的buffer - * @param length:追加数据的长度 - * @return 返回成功追加的数据长度,失败返回-1 + * Append data to the end of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for data to be added + * @param length: Append the length of the data + * @return returns the length of successfully added data, while failure + * returns -1 */ virtual int Append(int fd, const char* buf, int length) = 0; /** - * 文件预分配/挖洞(未实现) - * @param fd:文件句柄id,通过Open接口获取 - * @param op:指定操作类型,预分配还是挖洞 - * @param offset:操作区域的起始偏移 - * @param length:操作区域的长度 - * @return 成功返回0 + * File pre allocation/excavation (not implemented) + * @param fd: File handle id, obtained through the Open interface + * @param op: Specify the type of operation, pre allocation or excavation + * @param offset: The starting offset of the operating area + * @param length: The length of the operation area + * @return Successfully returned 0 */ virtual int Fallocate(int fd, int op, uint64_t offset, int length) = 0; /** - * 获取指定文件状态信息 - * @param fd:文件句柄id,通过Open接口获取 - * @param info[out]:文件系统的信息 - * stat结构同POSIX接口中使用的stat - * @return 成功返回0 + * Obtain specified file status information + * @param fd: File handle id, obtained through the Open interface + * @param info[out]: Information about the file system + * The stat structure is the same as the stat used in the POSIX interface + * @return Successfully returned 0 */ virtual int Fstat(int fd, struct stat* info) = 0; /** - * 将文件数据和元数据刷新到磁盘 - * @param fd:文件句柄id,通过Open接口获取 - * @return 成功返回0 + * Flush file data and metadata to disk + * @param fd: File handle id, obtained through the Open interface + * @return Successfully returned 0 */ virtual int Fsync(int fd) = 0; private: virtual int DoRename(const string& /* oldPath */, const string& /* newPath */, - unsigned int /* flags */) { return -1; } + unsigned int /* flags */) { + return -1; + } }; - class LocalFsFactory { public: /** - * 创建文件系统对象 - * 本地文件系统的工厂方法,根据传入的类型,创建相应的对象 - * 由该接口创建的文件系统会自动进行初始化 - * @param type:文件系统类型 - * @param deviceID: 设备的编号 - * @return 返回本地文件系统对象指针 + * Creating File System Objects + * The factory method of the local file system creates corresponding objects + * based on the type passed in The file system created by this interface + * will automatically initialize + * @param type: File system type + * @param deviceID: Device number + * @return returns the local file system object pointer */ - static std::shared_ptr CreateFs(FileSystemType type, - const std::string& deviceID); + static std::shared_ptr CreateFs( + FileSystemType type, const std::string& deviceID); }; } // namespace fs diff --git a/src/kvstorageclient/etcd_client.h b/src/kvstorageclient/etcd_client.h index 16aec44e6a..b9c2266d83 100644 --- a/src/kvstorageclient/etcd_client.h +++ b/src/kvstorageclient/etcd_client.h @@ -24,9 +24,10 @@ #define SRC_KVSTORAGECLIENT_ETCD_CLIENT_H_ #include + #include -#include #include +#include namespace curve { namespace kvstorage { @@ -43,7 +44,7 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int Put(const std::string &key, const std::string &value) = 0; + virtual int Put(const std::string& key, const std::string& value) = 0; /** * @brief PutRewithRevision store key-value @@ -54,8 +55,9 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int PutRewithRevision(const std::string &key, - const std::string &value, int64_t *revision) = 0; + virtual int PutRewithRevision(const std::string& key, + const std::string& value, + int64_t* revision) = 0; /** * @brief Get Get the value of the specified key @@ -65,7 +67,7 @@ class KVStorageClient { * * @return error code */ - virtual int Get(const std::string &key, std::string *out) = 0; + virtual int Get(const std::string& key, std::string* out) = 0; /** * @brief List Get all the values ​​between [startKey, endKey) @@ -76,15 +78,16 @@ class KVStorageClient { * * @return error code */ - virtual int List(const std::string &startKey, const std::string &endKey, - std::vector *values) = 0; + virtual int List(const std::string& startKey, const std::string& endKey, + std::vector* values) = 0; /** * @brief List all the key and values between [startKey, endKey) * * @param[in] startKey * @param[in] endKey - * @param[out] out store key/value pairs that key is between [startKey, endKey) + * @param[out] out store key/value pairs that key is between [startKey, + * endKey) * * @return error code */ @@ -98,7 +101,7 @@ class KVStorageClient { * * @return error code */ - virtual int Delete(const std::string &key) = 0; + virtual int Delete(const std::string& key) = 0; /** * @brief DeleteRewithRevision Delete the value of the specified key @@ -108,17 +111,18 @@ class KVStorageClient { * * @return error code */ - virtual int DeleteRewithRevision( - const std::string &key, int64_t *revision) = 0; + virtual int DeleteRewithRevision(const std::string& key, + int64_t* revision) = 0; /* - * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., currently 2 and 3 operations are supported //NOLINT - * - * @param[in] ops Operation set - * - * @return error code - */ - virtual int TxnN(const std::vector &ops) = 0; + * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., + * currently 2 and 3 operations are supported //NOLINT + * + * @param[in] ops Operation set + * + * @return error code + */ + virtual int TxnN(const std::vector& ops) = 0; /** * @brief CompareAndSwap Transaction, to achieve CAS @@ -129,17 +133,15 @@ class KVStorageClient { * * @return error code */ - virtual int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) = 0; + virtual int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) = 0; }; // encapsulate the c header file of etcd generated by go compilation class EtcdClientImp : public KVStorageClient { public: EtcdClientImp() {} - ~EtcdClientImp() { - CloseClient(); - } + ~EtcdClientImp() { CloseClient(); } /** * @brief Init init the etcdclient, a global var in go @@ -154,30 +156,30 @@ class EtcdClientImp : public KVStorageClient { void CloseClient(); - int Put(const std::string &key, const std::string &value) override; + int Put(const std::string& key, const std::string& value) override; - int PutRewithRevision(const std::string &key, const std::string &value, - int64_t *revision) override; + int PutRewithRevision(const std::string& key, const std::string& value, + int64_t* revision) override; - int Get(const std::string &key, std::string *out) override; + int Get(const std::string& key, std::string* out) override; - int List(const std::string &startKey, - const std::string &endKey, std::vector *values) override; + int List(const std::string& startKey, const std::string& endKey, + std::vector* values) override; int List(const std::string& startKey, const std::string& endKey, - std::vector >* out) override; + std::vector>* out) override; - int Delete(const std::string &key) override; + int Delete(const std::string& key) override; - int DeleteRewithRevision( - const std::string &key, int64_t *revision) override; + int DeleteRewithRevision(const std::string& key, + int64_t* revision) override; - int TxnN(const std::vector &ops) override; + int TxnN(const std::vector& ops) override; - int CompareAndSwap(const std::string &key, const std::string &preV, - const std::string &target) override; + int CompareAndSwap(const std::string& key, const std::string& preV, + const std::string& target) override; - virtual int GetCurrentRevision(int64_t *revision); + virtual int GetCurrentRevision(int64_t* revision); /** * @brief ListWithLimitAndRevision @@ -191,9 +193,11 @@ class EtcdClientImp : public KVStorageClient { * @param[out] values the value vector of all the key-value pairs * @param[out] lastKey the last key of the vector */ - virtual int ListWithLimitAndRevision(const std::string &startKey, - const std::string &endKey, int64_t limit, int64_t revision, - std::vector *values, std::string *lastKey); + virtual int ListWithLimitAndRevision(const std::string& startKey, + const std::string& endKey, + int64_t limit, int64_t revision, + std::vector* values, + std::string* lastKey); /** * @brief CampaignLeader Leader campaign through etcd, return directly if @@ -209,14 +213,14 @@ class EtcdClientImp : public KVStorageClient { * leader when the session expired after * client offline. * @param[in] electionTimeoutMs the timeout,0 will block always - * @param[out] leaderOid leader的objectId,recorded in objectManager + * @param[out] leaderOid leader's objectId,recorded in objectManager * * @return EtcdErrCode::EtcdCampaignLeaderSuccess success,others fail */ - virtual int CampaignLeader( - const std::string &pfx, const std::string &leaderName, - uint32_t sessionInterSec, uint32_t electionTimeoutMs, - uint64_t *leaderOid); + virtual int CampaignLeader(const std::string& pfx, + const std::string& leaderName, + uint32_t sessionInterSec, + uint32_t electionTimeoutMs, uint64_t* leaderOid); /** * @brief LeaderObserve @@ -228,8 +232,8 @@ class EtcdClientImp : public KVStorageClient { * * @return if returned, the session between mds and etcd expired */ - virtual int LeaderObserve( - uint64_t leaderOid, const std::string &leaderName); + virtual int LeaderObserve(uint64_t leaderOid, + const std::string& leaderName); /** * @brief LeaderResign the leader resigns initiatively, the other peers @@ -241,7 +245,7 @@ class EtcdClientImp : public KVStorageClient { * @return EtcdErrCode::EtcdLeaderResiginSuccess resign seccess * EtcdErrCode::EtcdLeaderResiginErr resign fail */ - virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); + virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); // for test void SetTimeout(int time); diff --git a/src/leader_election/leader_election.cpp b/src/leader_election/leader_election.cpp index 76884e0b9c..de2a86c743 100644 --- a/src/leader_election/leader_election.cpp +++ b/src/leader_election/leader_election.cpp @@ -20,11 +20,14 @@ * Author: lixiaocui1 */ +#include "src/leader_election/leader_election.h" + #include -#include -#include //NOLINT + #include -#include "src/leader_election/leader_election.h" +#include +#include //NOLINT + #include "src/common/concurrent/concurrent.h" using ::curve::common::Thread; @@ -32,23 +35,21 @@ using ::curve::common::Thread; namespace curve { namespace election { int LeaderElection::CampaignLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start campaign leader prefix: " - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start campaign leader prefix: " << realPrefix_; int resCode = opt_.etcdCli->CampaignLeader( - realPrefix_, - opt_.leaderUniqueName, - opt_.sessionInterSec, - opt_.electionTimeoutMs, - &leaderOid_); + realPrefix_, opt_.leaderUniqueName, opt_.sessionInterSec, + opt_.electionTimeoutMs, &leaderOid_); if (resCode == EtcdErrCode::EtcdCampaignLeaderSuccess) { - LOG(INFO) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " success"; + LOG(INFO) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ << " success"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " campaign leader prefix:" - << realPrefix_ << " err: " << resCode; + LOG(WARNING) << opt_.leaderUniqueName + << " campaign leader prefix:" << realPrefix_ + << " err: " << resCode; return -1; } @@ -61,28 +62,29 @@ int LeaderElection::LeaderResign() { int res = opt_.etcdCli->LeaderResign(leaderOid_, 1000 * opt_.sessionInterSec); if (EtcdErrCode::EtcdLeaderResiginSuccess == res) { - LOG(INFO) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " ok"; + LOG(INFO) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " ok"; return 0; } - LOG(WARNING) << opt_.leaderUniqueName << " resign leader prefix:" - << realPrefix_ << " err: " << res; + LOG(WARNING) << opt_.leaderUniqueName + << " resign leader prefix:" << realPrefix_ << " err: " << res; return -1; } int LeaderElection::ObserveLeader() { - LOG(INFO) << opt_.leaderUniqueName << " start observe for prefix:" - << realPrefix_; + LOG(INFO) << opt_.leaderUniqueName + << " start observe for prefix:" << realPrefix_; int resCode = opt_.etcdCli->LeaderObserve(leaderOid_, opt_.leaderUniqueName); - LOG(ERROR) << opt_.leaderUniqueName << " leader observe for prefix:" - << realPrefix_ << " occur error, errcode: " << resCode; + LOG(ERROR) << opt_.leaderUniqueName + << " leader observe for prefix:" << realPrefix_ + << " occur error, errcode: " << resCode; // for test fiu_return_on("src/mds/leaderElection/observeLeader", -1); - // 退出当前进程 + // Exit the current process LOG(INFO) << "mds is existing due to the error of leader observation"; raise(SIGTERM); diff --git a/src/leader_election/leader_election.h b/src/leader_election/leader_election.h index 70a28722ec..2188950cf7 100644 --- a/src/leader_election/leader_election.h +++ b/src/leader_election/leader_election.h @@ -24,32 +24,33 @@ #define SRC_LEADER_ELECTION_LEADER_ELECTION_H_ #include + #include #include -#include "src/kvstorageclient/etcd_client.h" #include "src/common/namespace_define.h" +#include "src/kvstorageclient/etcd_client.h" namespace curve { namespace election { -using ::curve::kvstorage::EtcdClientImp; using ::curve::common::LEADERCAMPAIGNNPFX; +using ::curve::kvstorage::EtcdClientImp; struct LeaderElectionOptions { - // etcd客户端 + // etcd client std::shared_ptr etcdCli; - // 带ttl的session,ttl超时时间内 + // session with ttl, within ttl timeout uint32_t sessionInterSec; - // 竞选leader的超时时间 + // Overtime for running for leader uint32_t electionTimeoutMs; - // leader名称,建议使用ip+port以示区分 + // leader name, it is recommended to use ip+port for differentiation std::string leaderUniqueName; - // 需要竞选的key + // key that need to be contested std::string campaginPrefix; }; @@ -61,33 +62,35 @@ class LeaderElection { } /** - * @brief CampaignLeader 竞选leader + * @brief CampaignLeader * - * @return 0表示竞选成功 -1表示竞选失败 + * @return 0 indicates a successful election, -1 indicates a failed election */ int CampaignLeader(); /** - * @brief StartObserverLeader 启动leader节点监测线程 + * @brief StartObserverLeader starts the leader node monitoring thread */ void StartObserverLeader(); /** - * @brief LeaderResign leader主动卸任leader,卸任成功后其他节点可以竞选leader + * @brief LeaderResign Leader proactively resigns from its leadership + * position. After successful resignation, other nodes can compete to become + * the new leader */ int LeaderResign(); /** - * @brief 返回leader name + * @brief returns the leader name */ - const std::string& GetLeaderName() { - return opt_.leaderUniqueName; - } + const std::string& GetLeaderName() { return opt_.leaderUniqueName; } public: /** - * @brief ObserveLeader 监测在etcd中创建的leader节点,正常情况下一直block, - * 退出表示leader change或者从client端角度看etcd异常,进程退出 + * @brief Monitor the leader node created in etcd. Under normal + * circumstances, this function continuously blocks. Exiting indicates a + * leader change or, from the client's perspective, an abnormality in etcd, + * which leads to process termination */ int ObserveLeader(); @@ -95,14 +98,13 @@ class LeaderElection { // option LeaderElectionOptions opt_; - // realPrefix_ = leader竞选公共prefix + 自定义prefix + // realPrefix_ = leader campaign public prefix + custom prefix std::string realPrefix_; - // 竞选leader之后记录在objectManager中的id号 + // The ID number recorded in the object manager after leader election uint64_t leaderOid_; }; } // namespace election } // namespace curve #endif // SRC_LEADER_ELECTION_LEADER_ELECTION_H_ - diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index 54f743c300..de7b0ae432 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -24,28 +24,27 @@ namespace curve { namespace mds { -StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, +StatusCode CleanCore::CleanSnapShotFile(const FileInfo& fileInfo, TaskProgress* progress) { if (fileInfo.segmentsize() == 0) { LOG(ERROR) << "cleanSnapShot File Error, segmentsize = 0"; return StatusCode::KInternalError; } - uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); + uint32_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint32_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; StoreStatus storeRet = storage_->GetSegment(fileInfo.parentid(), - i * segmentSize, - &segment); + i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "cleanSnapShot File Error: " - << "GetSegment Error, inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << fileInfo.seqnum(); + << "GetSegment Error, inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << fileInfo.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } @@ -54,40 +53,40 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, LogicalPoolID logicalPoolID = segment.logicalpoolid(); uint32_t chunkNum = segment.chunks_size(); for (uint32_t j = 0; j != chunkNum; j++) { - // 删除快照时如果chunk不存在快照,则需要修改chunk的correctedSn - // 防止删除快照后,后续的写触发chunk的快照 - // correctSn为创建快照后文件的版本号,也就是快照版本号+1 + // When deleting a snapshot, if the chunk does not have a snapshot, + // the correctedSn of the chunk needs to be modified Prevent + // subsequent writes from triggering Chunk snapshots after deleting + // snapshots CorrectSn is the version number of the file after + // creating the snapshot, which is the snapshot version number+1 SeqNum correctSn = fileInfo.seqnum() + 1; int ret = copysetClient_->DeleteChunkSnapshotOrCorrectSn( - logicalPoolID, - segment.chunks()[j].copysetid(), - segment.chunks()[j].chunkid(), - correctSn); + logicalPoolID, segment.chunks()[j].copysetid(), + segment.chunks()[j].chunkid(), correctSn); if (ret != 0) { LOG(ERROR) << "CleanSnapShotFile Error: " - << "DeleteChunkSnapshotOrCorrectSn Error" - << ", ret = " << ret - << ", inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", correctSn = " << correctSn; + << "DeleteChunkSnapshotOrCorrectSn Error" + << ", ret = " << ret + << ", inodeid = " << fileInfo.id() + << ", filename = " << fileInfo.filename() + << ", correctSn = " << correctSn; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } } - progress->SetProgress(100 * (i+1) / segmentNum); + progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteSnapshotFile(fileInfo.parentid(), - fileInfo.filename()); + StoreStatus ret = + storage_->DeleteSnapshotFile(fileInfo.parentid(), fileInfo.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete snapshotfile error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kSnapshotFileDeleteError; } else { LOG(INFO) << "inodeid = " << fileInfo.id() - << ", filename = " << fileInfo.filename() - << ", seq = " << fileInfo.seqnum() << ", deleted"; + << ", filename = " << fileInfo.filename() + << ", seq = " << fileInfo.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -95,27 +94,27 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, return StatusCode::kOK; } -StatusCode CleanCore::CleanFile(const FileInfo & commonFile, +StatusCode CleanCore::CleanFile(const FileInfo& commonFile, TaskProgress* progress) { if (commonFile.segmentsize() == 0) { LOG(ERROR) << "Clean commonFile File Error, segmentsize = 0"; return StatusCode::KInternalError; } - int segmentNum = commonFile.length() / commonFile.segmentsize(); + int segmentNum = commonFile.length() / commonFile.segmentsize(); uint64_t segmentSize = commonFile.segmentsize(); for (int i = 0; i != segmentNum; i++) { // load segment PageFileSegment segment; - StoreStatus storeRet = storage_->GetSegment(commonFile.id(), - i * segmentSize, &segment); + StoreStatus storeRet = + storage_->GetSegment(commonFile.id(), i * segmentSize, &segment); if (storeRet == StoreStatus::KeyNotExist) { continue; - } else if (storeRet != StoreStatus::OK) { + } else if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "GetSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize; + << "GetSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } @@ -123,8 +122,7 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, int ret = DeleteChunksInSegment(segment, commonFile.seqnum()); if (ret != 0) { LOG(ERROR) << "Clean common File Error: " - << ", ret = " << ret - << ", inodeid = " << commonFile.id() + << ", ret = " << ret << ", inodeid = " << commonFile.id() << ", filename = " << commonFile.filename() << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); @@ -133,33 +131,33 @@ StatusCode CleanCore::CleanFile(const FileInfo & commonFile, // delete segment int64_t revision; - storeRet = storage_->DeleteSegment( - commonFile.id(), i * segmentSize, &revision); + storeRet = storage_->DeleteSegment(commonFile.id(), i * segmentSize, + &revision); if (storeRet != StoreStatus::OK) { LOG(ERROR) << "Clean common File Error: " - << "DeleteSegment Error, inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", offset = " << i * segmentSize - << ", sequenceNum = " << commonFile.seqnum(); + << "DeleteSegment Error, inodeid = " << commonFile.id() + << ", filename = " << commonFile.filename() + << ", offset = " << i * segmentSize + << ", sequenceNum = " << commonFile.seqnum(); progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } allocStatistic_->DeAllocSpace(segment.logicalpoolid(), - segment.segmentsize(), revision); + segment.segmentsize(), revision); progress->SetProgress(100 * (i + 1) / segmentNum); } // delete the storage - StoreStatus ret = storage_->DeleteFile(commonFile.parentid(), - commonFile.filename()); + StoreStatus ret = + storage_->DeleteFile(commonFile.parentid(), commonFile.filename()); if (ret != StoreStatus::OK) { LOG(INFO) << "delete common file error, retCode = " << ret; progress->SetStatus(TaskStatus::FAILED); return StatusCode::kCommonFileDeleteError; } else { LOG(INFO) << "inodeid = " << commonFile.id() - << ", filename = " << commonFile.filename() - << ", seq = " << commonFile.seqnum() << ", deleted"; + << ", filename = " << commonFile.filename() + << ", seq = " << commonFile.seqnum() << ", deleted"; } progress->SetProgress(100); @@ -223,10 +221,8 @@ int CleanCore::DeleteChunksInSegment(const PageFileSegment& segment, const LogicalPoolID logicalPoolId = segment.logicalpoolid(); for (int i = 0; i < segment.chunks_size(); ++i) { int ret = copysetClient_->DeleteChunk( - logicalPoolId, - segment.chunks()[i].copysetid(), - segment.chunks()[i].chunkid(), - seq); + logicalPoolId, segment.chunks()[i].copysetid(), + segment.chunks()[i].chunkid(), seq); if (ret != 0) { LOG(ERROR) << "DeleteChunk failed, ret = " << ret diff --git a/src/mds/nameserver2/clean_core.h b/src/mds/nameserver2/clean_core.h index 0cb4f3f8ab..8011d10ee8 100644 --- a/src/mds/nameserver2/clean_core.h +++ b/src/mds/nameserver2/clean_core.h @@ -25,12 +25,13 @@ #include #include -#include "src/mds/nameserver2/namespace_storage.h" + +#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/nameserver2/task_progress.h" -#include "src/mds/chunkserverclient/copyset_client.h" #include "src/mds/topology/topology.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" using ::curve::mds::chunkserverclient::CopysetClient; using ::curve::mds::topology::Topology; @@ -41,30 +42,32 @@ namespace mds { class CleanCore { public: CleanCore(std::shared_ptr storage, - std::shared_ptr copysetClient, - std::shared_ptr allocStatistic) + std::shared_ptr copysetClient, + std::shared_ptr allocStatistic) : storage_(storage), copysetClient_(copysetClient), allocStatistic_(allocStatistic) {} /** - * @brief 删除快照文件,更新task状态 - * @param snapShotFile: 需要清理的snapshot文件 - * @param progress: CleanSnapShotFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 + * @brief Delete the snapshot file and update the task status + * @param snapShotFile: The snapshot file that needs to be cleaned + * @param progress: The CleanSnapShotFile interface is a relatively + * asynchronous task that takes a long time Here, progress is transmitted + * for tracking and feedback */ - StatusCode CleanSnapShotFile(const FileInfo & snapShotFile, + StatusCode CleanSnapShotFile(const FileInfo& snapShotFile, TaskProgress* progress); /** - * @brief 删除普通文件,更新task状态 - * @param commonFile: 需要清理的普通文件 - * @param progress: CleanFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 - * @return 是否执行成功,成功返回StatusCode::kOK + * @brief Delete regular files and update task status + * @param commonFile: A regular file that needs to be cleaned + * @param progress: The CleanFile interface is a relatively asynchronous + * task that takes a long time Here, progress is transmitted for tracking + * and feedback + * @return whether the execution was successful, and if successful, return + * StatusCode::kOK */ - StatusCode CleanFile(const FileInfo & commonFile, - TaskProgress* progress); + StatusCode CleanFile(const FileInfo& commonFile, TaskProgress* progress); /** * @brief clean discarded segment and chunks @@ -85,4 +88,4 @@ class CleanCore { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_CORE_H_ diff --git a/src/mds/nameserver2/clean_manager.h b/src/mds/nameserver2/clean_manager.h index 86dbbd3474..223203952a 100644 --- a/src/mds/nameserver2/clean_manager.h +++ b/src/mds/nameserver2/clean_manager.h @@ -26,18 +26,19 @@ #include #include #include + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/clean_task_manager.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/namespace_storage.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_task_manager.h" +#include "src/mds/nameserver2/namespace_storage.h" using curve::common::DLock; using curve::common::DLockOpts; -namespace curve { +namespace curve { namespace mds { class CleanDiscardSegmentTask; @@ -45,8 +46,8 @@ class CleanDiscardSegmentTask; class CleanManagerInterface { public: virtual ~CleanManagerInterface() {} - virtual bool SubmitDeleteSnapShotFileJob(const FileInfo&, - std::shared_ptr entity) = 0; + virtual bool SubmitDeleteSnapShotFileJob( + const FileInfo&, std::shared_ptr entity) = 0; virtual std::shared_ptr GetTask(TaskIDType id) = 0; virtual bool SubmitDeleteCommonFileJob(const FileInfo&) = 0; @@ -56,24 +57,26 @@ class CleanManagerInterface { curve::common::CountDownEvent* counter) = 0; }; /** - * CleanManager 用于异步清理 删除快照对应的数据 - * 1. 接收在线的删除快照请求 - * 2. 线程池异步处理实际的chunk删除任务 + * CleanManager is used for asynchronous cleaning and deleting data + *corresponding to snapshots. + * 1. Receives online requests for snapshot deletion. + * 2. Asynchronously processes the actual chunk deletion tasks in a thread pool. **/ class CleanManager : public CleanManagerInterface { public: explicit CleanManager(std::shared_ptr core, - std::shared_ptr taskMgr, - std::shared_ptr storage); + std::shared_ptr taskMgr, + std::shared_ptr storage); bool Start(void); bool Stop(void); - bool SubmitDeleteSnapShotFileJob(const FileInfo &fileInfo, - std::shared_ptr entity) override; + bool SubmitDeleteSnapShotFileJob( + const FileInfo& fileInfo, + std::shared_ptr entity) override; - bool SubmitDeleteCommonFileJob(const FileInfo&fileInfo) override; + bool SubmitDeleteCommonFileJob(const FileInfo& fileInfo) override; bool SubmitCleanDiscardSegmentJob( const std::string& cleanSegmentKey, diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..c865ff6271 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -23,24 +23,26 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#include //NOLINT +#include //NOLINT + #include #include //NOLINT #include -#include //NOLINT -#include //NOLINT + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/task_progress.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" -#include "src/common/concurrent/dlock.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/task_progress.h" using curve::common::DLock; namespace curve { namespace mds { -typedef uint64_t TaskIDType; +typedef uint64_t TaskIDType; // default clean task retry times const uint32_t kDefaultTaskRetryTimes = 5; @@ -52,56 +54,40 @@ class Task { virtual void Run(void) = 0; std::function Closure() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } - TaskProgress GetTaskProgress(void) const { - return progress_; - } + TaskProgress GetTaskProgress(void) const { return progress_; } - void SetTaskProgress(TaskProgress progress) { - progress_ = progress; - } + void SetTaskProgress(TaskProgress progress) { progress_ = progress; } - TaskProgress* GetMutableTaskProgress(void) { - return &progress_; - } + TaskProgress* GetMutableTaskProgress(void) { return &progress_; } - void SetTaskID(TaskIDType taskID) { - taskID_ = taskID; - } + void SetTaskID(TaskIDType taskID) { taskID_ = taskID; } - TaskIDType GetTaskID(void) const { - return taskID_; - } + TaskIDType GetTaskID(void) const { return taskID_; } - void SetRetryTimes(uint32_t retry) { - retry_ = retry; - } + void SetRetryTimes(uint32_t retry) { retry_ = retry; } void Retry() { retry_--; progress_ = TaskProgress(); } - bool RetryTimesExceed() { - return retry_ == 0; - } + bool RetryTimesExceed() { return retry_ == 0; } protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; -class SnapShotCleanTask: public Task { +class SnapShotCleanTask : public Task { public: - SnapShotCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo, - std::shared_ptr entity = nullptr) { + SnapShotCleanTask( + TaskIDType taskID, std::shared_ptr core, FileInfo fileInfo, + std::shared_ptr entity = nullptr) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -110,29 +96,29 @@ class SnapShotCleanTask: public Task { SetRetryTimes(kDefaultTaskRetryTimes); } void Run(void) override { - StatusCode ret = cleanCore_->CleanSnapShotFile(fileInfo_, - GetMutableTaskProgress()); + StatusCode ret = + cleanCore_->CleanSnapShotFile(fileInfo_, GetMutableTaskProgress()); if (asyncEntity_ != nullptr) { brpc::ClosureGuard doneGuard(asyncEntity_->GetClosure()); brpc::Controller* cntl = static_cast(asyncEntity_->GetController()); - DeleteSnapShotResponse *response = - asyncEntity_->GetDeleteResponse(); - const DeleteSnapShotRequest *request = - asyncEntity_->GetDeleteRequest(); + DeleteSnapShotResponse* response = + asyncEntity_->GetDeleteResponse(); + const DeleteSnapShotRequest* request = + asyncEntity_->GetDeleteRequest(); response->set_statuscode(ret); if (ret != StatusCode::kOK) { LOG(ERROR) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile fail, filename = " - << request->filename() - << ", sequencenum = " << request->seq() - << ", statusCode = " << ret; + << ", CleanSnapShotFile fail, filename = " + << request->filename() + << ", sequencenum = " << request->seq() + << ", statusCode = " << ret; } else { LOG(INFO) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile ok, filename = " - << request->filename() - << ", sequencenum = " << request->seq(); + << ", CleanSnapShotFile ok, filename = " + << request->filename() + << ", sequencenum = " << request->seq(); } } return; @@ -144,10 +130,10 @@ class SnapShotCleanTask: public Task { std::shared_ptr asyncEntity_; }; -class CommonFileCleanTask: public Task { +class CommonFileCleanTask : public Task { public: CommonFileCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo) { + FileInfo fileInfo) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -211,4 +197,4 @@ class SegmentCleanTask : public Task { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ diff --git a/src/mds/nameserver2/clean_task_manager.cpp b/src/mds/nameserver2/clean_task_manager.cpp index 2a73ff87b9..3aadf6694c 100644 --- a/src/mds/nameserver2/clean_task_manager.cpp +++ b/src/mds/nameserver2/clean_task_manager.cpp @@ -19,16 +19,17 @@ * Created Date: Wednesday December 19th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/clean_task_manager.h" +#include +#include + namespace curve { namespace mds { CleanTaskManager::CleanTaskManager(std::shared_ptr channelPool, - int threadNum, int checkPeriod) - : channelPool_(channelPool) { + int threadNum, int checkPeriod) + : channelPool_(channelPool) { threadNum_ = threadNum; checkPeriod_ = checkPeriod; stopFlag_ = true; @@ -43,30 +44,29 @@ void CleanTaskManager::CheckCleanResult(void) { auto taskProgress = iter->second->GetTaskProgress(); if (taskProgress.GetStatus() == TaskStatus::SUCCESS) { LOG(INFO) << "going to remove task, taskID = " - << iter->second->GetTaskID(); + << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } else if (taskProgress.GetStatus() == TaskStatus::FAILED) { iter->second->Retry(); if (!iter->second->RetryTimesExceed()) { - LOG(WARNING) << "CleanTaskManager find Task Failed," - << " retry," - << " taskID = " - << iter->second->GetTaskID(); + LOG(WARNING) + << "CleanTaskManager find Task Failed," + << " retry," + << " taskID = " << iter->second->GetTaskID(); cleanWorkers_->Enqueue(iter->second->Closure()); } else { LOG(ERROR) << "CleanTaskManager find Task Failed," - << " retry times exceed," - << " going to remove task," - << " taskID = " - << iter->second->GetTaskID(); + << " retry times exceed," + << " going to remove task," + << " taskID = " << iter->second->GetTaskID(); iter = cleanTasks_.erase(iter); continue; } } ++iter; } - // clean task为空,清空channelPool + // Clean task is empty, clear channelPool if (cleanTasks_.empty() && notEmptyBefore) { LOG(INFO) << "All tasks completed, clear channel pool"; channelPool_->Clear(); @@ -81,7 +81,7 @@ bool CleanTaskManager::Start(void) { stopFlag_ = false; // start worker thread - cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); + cleanWorkers_ = new ::curve::common::TaskThreadPool<>(); if (cleanWorkers_->Start(threadNum_) != 0) { LOG(ERROR) << "thread pool start error"; @@ -89,8 +89,8 @@ bool CleanTaskManager::Start(void) { } // start check thread - checkThread_ = new common::Thread(&CleanTaskManager::CheckCleanResult, - this); + checkThread_ = + new common::Thread(&CleanTaskManager::CheckCleanResult, this); LOG(INFO) << "TaskManger check thread started"; return true; } @@ -117,7 +117,7 @@ bool CleanTaskManager::PushTask(std::shared_ptr task) { common::LockGuard lck(mutex_); if (stopFlag_) { LOG(ERROR) << "task manager not started, taskID = " - << task->GetTaskID(); + << task->GetTaskID(); return false; } if (cleanTasks_.find(task->GetTaskID()) != cleanTasks_.end()) { @@ -137,7 +137,7 @@ std::shared_ptr CleanTaskManager::GetTask(TaskIDType id) { auto iter = cleanTasks_.begin(); if ((iter = cleanTasks_.find(id)) == cleanTasks_.end()) { - LOG(INFO) << "taskid = "<< id << ", not found"; + LOG(INFO) << "taskid = " << id << ", not found"; return nullptr; } else { return iter->second; diff --git a/src/mds/nameserver2/clean_task_manager.h b/src/mds/nameserver2/clean_task_manager.h index 9673a0b1c4..409b9df5b8 100644 --- a/src/mds/nameserver2/clean_task_manager.h +++ b/src/mds/nameserver2/clean_task_manager.h @@ -22,20 +22,21 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ -#include -#include //NOLINT -#include //NOLINT #include +#include //NOLINT +#include //NOLINT +#include + +#include "src/common/channel_pool.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/interruptible_sleeper.h" -#include "src/common/concurrent/concurrent.h" -#include "src/common/channel_pool.h" #include "src/mds/common/mds_define.h" #include "src/mds/nameserver2/clean_task.h" using ::curve::common::Atomic; -using ::curve::common::InterruptibleSleeper; using ::curve::common::ChannelPool; +using ::curve::common::InterruptibleSleeper; namespace curve { namespace mds { @@ -43,40 +44,40 @@ namespace mds { class CleanTaskManager { public: /** - * @brief 初始化TaskManager - * @param channelPool: 连接池 - * @param threadNum: worker线程的数量 - * @param checkPeriod: 周期性任务检查线程时间, ms + * @brief Initialize TaskManager + * @param channelPool: Connection Pool + * @param threadNum: Number of worker threads + * @param checkPeriod: Periodic task check thread time, ms */ explicit CleanTaskManager(std::shared_ptr channelPool, int threadNum = 10, int checkPeriod = 10000); - ~CleanTaskManager() { - Stop(); - } + ~CleanTaskManager() { Stop(); } /** - * @brief 启动worker线程池、启动检查线程 + * @brief: Start worker thread pool, start check thread * */ bool Start(void); /** - * @brief 停止worker线程池、启动检查线程 + * @brief: Stop worker thread pool, start check thread * */ bool Stop(void); /** - * @brief 向线程池推送task - * @param task: 对应的工作任务 - * @return 推送task是否成功,如已存在对应的任务,推送是吧 + * @brief Push task to thread pool + * @param task: corresponding work task + * @return: Is the task successfully pushed? If a corresponding task already + * exists, is it pushed */ bool PushTask(std::shared_ptr task); /** - * @brief 获取当前的task - * @param id: 对应任务的相关文件InodeID - * @return 返回对应task的shared_ptr 或者 不存在返回nullptr + * @brief Get the current task + * @param id: The relevant file InodeID of the corresponding task + * @return returns the shared_ptr of the corresponding task or return + * nullptr if it does not exist */ std::shared_ptr GetTask(TaskIDType id); @@ -85,20 +86,21 @@ class CleanTaskManager { private: int threadNum_; - ::curve::common::TaskThreadPool<> *cleanWorkers_; + ::curve::common::TaskThreadPool<>* cleanWorkers_; // for period check snapshot delete status std::unordered_map> cleanTasks_; common::Mutex mutex_; - common::Thread *checkThread_; + common::Thread* checkThread_; int checkPeriod_; Atomic stopFlag_; InterruptibleSleeper sleeper_; - // 连接池,和chunkserverClient共享,没有任务在执行时清空 + // Connection pool, shared with chunkserverClient, no tasks cleared during + // execution std::shared_ptr channelPool_; }; -} // namespace mds -} // namespace curve +} // namespace mds +} // namespace curve #endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index 2974ed06c8..5620a4ff63 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -22,1700 +22,1918 @@ #include "src/snapshotcloneserver/clone/clone_core.h" +#include #include #include #include -#include -#include "src/snapshotcloneserver/clone/clone_task.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/location_operator.h" #include "src/common/uuid.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/clone/clone_task.h" -using ::curve::common::UUIDGenerator; using ::curve::common::LocationOperator; using ::curve::common::NameLock; using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; -namespace curve { -namespace snapshotcloneserver { - -int CloneCoreImpl::Init() { - int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::EXISTS) { - LOG(ERROR) << "Mkdir fail, ret = " << ret - << ", dirpath = " << cloneTempDir_; - return kErrCodeServerInitFail; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *cloneInfo) { - // 查询数据库中是否有任务正在执行 - std::vector cloneInfoList; - metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); - bool needJudgeFileExist = false; - std::vector existCloneInfos; - for (auto &info : cloneInfoList) { - LOG(INFO) << "CloneOrRecoverPre find same clone task" - << ", source = " << source - << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset - << ", Exist CloneInfo : " << info; - // is clone - if (taskType == CloneTaskType::kClone) { - if (info.GetStatus() == CloneStatus::cloning || - info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && - (info.GetIsLazy() == lazyFlag) && - (info.GetTaskType() == taskType)) { - // 视为同一个clone - *cloneInfo = info; - return kErrCodeTaskExist; - } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 - return kErrCodeFileExist; +namespace curve +{ + namespace snapshotcloneserver + { + + int CloneCoreImpl::Init() + { + int ret = client_->Mkdir(cloneTempDir_, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) + { + LOG(ERROR) << "Mkdir fail, ret = " << ret + << ", dirpath = " << cloneTempDir_; + return kErrCodeServerInitFail; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, + const std::string &user, + const std::string &destination, + bool lazyFlag, CloneTaskType taskType, + std::string poolset, + CloneInfo *cloneInfo) + { + // Check if there are tasks executing in the database + std::vector cloneInfoList; + metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); + bool needJudgeFileExist = false; + std::vector existCloneInfos; + for (auto &info : cloneInfoList) + { + LOG(INFO) << "CloneOrRecoverPre find same clone task" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset + << ", Exist CloneInfo : " << info; + // is clone + if (taskType == CloneTaskType::kClone) + { + if (info.GetStatus() == CloneStatus::cloning || + info.GetStatus() == CloneStatus::retrying) + { + if ((info.GetUser() == user) && (info.GetSrc() == source) && + (info.GetIsLazy() == lazyFlag) && + (info.GetTaskType() == taskType)) + { + // Treat as the same clone + *cloneInfo = info; + return kErrCodeTaskExist; + } + else + { + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists + return kErrCodeFileExist; + } + } + else if (info.GetStatus() == CloneStatus::done || + info.GetStatus() == CloneStatus::error || + info.GetStatus() == CloneStatus::metaInstalled) + { + // It may have been deleted, and it is necessary to determine + // whether the file exists again, Allowing further cloning under + // deleted conditions + existCloneInfos.push_back(info); + needJudgeFileExist = true; + } + else + { + // At this point, the same clone task is being deleted and the + // return file is occupied + return kErrCodeFileExist; + } } - } else if (info.GetStatus() == CloneStatus::done || - info.GetStatus() == CloneStatus::error || - info.GetStatus() == CloneStatus::metaInstalled) { - // 可能已经删除,需要再判断文件存不存在, - // 在已删除的条件下,允许再克隆 - existCloneInfos.push_back(info); - needJudgeFileExist = true; - } else { - // 此时,有个相同的克隆任务正在删除中, 返回文件被占用 - return kErrCodeFileExist; - } - } else { // is recover - if (info.GetStatus() == CloneStatus::recovering || - info.GetStatus() == CloneStatus::retrying) { - if ((info.GetUser() == user) && - (info.GetSrc() == source) && - (info.GetIsLazy() == lazyFlag) && - (info.GetTaskType() == taskType)) { - // 视为同一个clone,返回任务已存在 - *cloneInfo = info; - return kErrCodeTaskExist; - } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 - return kErrCodeFileExist; + else + { // is recover + if (info.GetStatus() == CloneStatus::recovering || + info.GetStatus() == CloneStatus::retrying) + { + if ((info.GetUser() == user) && (info.GetSrc() == source) && + (info.GetIsLazy() == lazyFlag) && + (info.GetTaskType() == taskType)) + { + // Treat as the same clone, return task already exists + *cloneInfo = info; + return kErrCodeTaskExist; + } + else + { + // Treat it as a different clone, then the file is actually + // occupied and the return file already exists + return kErrCodeFileExist; + } + } + else if (info.GetStatus() == CloneStatus::done || + info.GetStatus() == CloneStatus::error || + info.GetStatus() == CloneStatus::metaInstalled) + { + // nothing + } + else + { + // At this point, the same task is being deleted and the return + // file is occupied + return kErrCodeFileExist; + } } - } else if (info.GetStatus() == CloneStatus::done || - info.GetStatus() == CloneStatus::error || - info.GetStatus() == CloneStatus::metaInstalled) { - // nothing - } else { - // 此时,有个相同的任务正在删除中, 返回文件被占用 - return kErrCodeFileExist; } - } - } - - // 目标文件已存在不能clone, 不存在不能recover - FInfo destFInfo; - int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); - switch (ret) { - case LIBCURVE_ERROR::OK: - if (CloneTaskType::kClone == taskType) { - if (needJudgeFileExist) { - bool match = false; - // 找出inodeid匹配的cloneInfo - for (auto& existInfo : existCloneInfos) { - if (destFInfo.id == existInfo.GetDestId()) { - *cloneInfo = existInfo; - match = true; - break; + + // The target file already exists and cannot be cloned or recovered if it + // does not exist + FInfo destFInfo; + int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); + switch (ret) + { + case LIBCURVE_ERROR::OK: + if (CloneTaskType::kClone == taskType) + { + if (needJudgeFileExist) + { + bool match = false; + // Find the cloneInfo that matches the inodeid + for (auto &existInfo : existCloneInfos) + { + if (destFInfo.id == existInfo.GetDestId()) + { + *cloneInfo = existInfo; + match = true; + break; + } + } + if (match) + { + return kErrCodeTaskExist; + } + else + { + // If not found, then none of the dest files were + // created by these clone tasks, It means the file has a + // duplicate name + LOG(ERROR) + << "Clone dest file exist, " + << "but task not match! " + << "source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset; + return kErrCodeFileExist; } } - if (match) { - return kErrCodeTaskExist; - } else { - // 如果没找到,那么dest file都不是这些clone任务创建的, - // 意味着文件重名了 - LOG(ERROR) << "Clone dest file exist, " - << "but task not match! " - << "source = " << source - << ", user = " << user + else + { + // There is no corresponding cloneInfo, which means the file + // has a duplicate name + LOG(ERROR) << "Clone dest file must not exist" + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; return kErrCodeFileExist; } - } else { - // 没有对应的cloneInfo,意味着文件重名了 - LOG(ERROR) << "Clone dest file must not exist" - << ", source = " << source - << ", user = " << user + } + else if (CloneTaskType::kRecover == taskType) + { + // The recover task keeps the poolset information of the volume + // unchanged + poolset = destFInfo.poolset; + } + else + { + assert(false); + } + break; + case -LIBCURVE_ERROR::NOTEXIST: + if (CloneTaskType::kRecover == taskType) + { + LOG(ERROR) << "Recover dest file must exist" + << ", source = " << source << ", user = " << user + << ", destination = " << destination; + return kErrCodeFileNotExist; + } + break; + default: + LOG(ERROR) << "GetFileInfo encounter an error" + << ", ret = " << ret << ", source = " << source + << ", user = " << user; + return kErrCodeInternalError; + } + + // Is it a snapshot + SnapshotInfo snapInfo; + CloneFileType fileType; + + { + NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), source); + ret = metaStore_->GetSnapshotInfo(source, &snapInfo); + if (0 == ret) + { + if (CloneTaskType::kRecover == taskType && + destination != snapInfo.GetFileName()) + { + LOG(ERROR) << "Can not recover from the snapshot " + << "which is not belong to the destination volume."; + return kErrCodeInvalidSnapshot; + } + if (snapInfo.GetStatus() != Status::done) + { + LOG(ERROR) << "Can not clone by snapshot has status:" + << static_cast(snapInfo.GetStatus()); + return kErrCodeInvalidSnapshot; + } + if (snapInfo.GetUser() != user) + { + LOG(ERROR) << "Clone snapshot by invalid user" + << ", source = " << source << ", user = " << user + << ", destination = " << destination + << ", poolset = " << poolset + << ", snapshot.user = " << snapInfo.GetUser(); + return kErrCodeInvalidUser; + } + fileType = CloneFileType::kSnapshot; + snapshotRef_->IncrementSnapshotRef(source); + } + } + if (ret < 0) + { + FInfo fInfo; + ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); + switch (ret) + { + case LIBCURVE_ERROR::OK: + fileType = CloneFileType::kFile; + break; + case -LIBCURVE_ERROR::NOTEXIST: + case -LIBCURVE_ERROR::PARAM_ERROR: + LOG(ERROR) << "Clone source file not exist" + << ", source = " << source << ", user = " << user << ", destination = " << destination << ", poolset = " << poolset; - return kErrCodeFileExist; + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "GetFileInfo encounter an error" + << ", ret = " << ret << ", source = " << source + << ", user = " << user; + return kErrCodeInternalError; + } + if (fInfo.filestatus != FileStatus::Created && + fInfo.filestatus != FileStatus::Cloned && + fInfo.filestatus != FileStatus::BeingCloned) + { + LOG(ERROR) << "Can not clone when file status = " + << static_cast(fInfo.filestatus); + return kErrCodeFileStatusInvalid; } - } else if (CloneTaskType::kRecover == taskType) { - // recover任务,卷的poolset信息不变 - poolset = destFInfo.poolset; - } else { - assert(false); - } - break; - case -LIBCURVE_ERROR::NOTEXIST: - if (CloneTaskType::kRecover == taskType) { - LOG(ERROR) << "Recover dest file must exist" - << ", source = " << source - << ", user = " << user - << ", destination = " << destination; - return kErrCodeFileNotExist; - } - break; - default: - LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user; - return kErrCodeInternalError; - } - // 是否为快照 - SnapshotInfo snapInfo; - CloneFileType fileType; + // TODO (User authentication for mirror cloning to be improved) + } - { - NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), source); - ret = metaStore_->GetSnapshotInfo(source, &snapInfo); - if (0 == ret) { - if (CloneTaskType::kRecover == taskType && - destination != snapInfo.GetFileName()) { - LOG(ERROR) << "Can not recover from the snapshot " - << "which is not belong to the destination volume."; - return kErrCodeInvalidSnapshot; - } - if (snapInfo.GetStatus() != Status::done) { - LOG(ERROR) << "Can not clone by snapshot has status:" - << static_cast(snapInfo.GetStatus()); - return kErrCodeInvalidSnapshot; - } - if (snapInfo.GetUser() != user) { - LOG(ERROR) << "Clone snapshot by invalid user" - << ", source = " << source - << ", user = " << user - << ", destination = " << destination - << ", poolset = " << poolset - << ", snapshot.user = " << snapInfo.GetUser(); - return kErrCodeInvalidUser; + UUID uuid = UUIDGenerator().GenerateUUID(); + CloneInfo info(uuid, user, taskType, source, destination, poolset, fileType, + lazyFlag); + if (CloneTaskType::kClone == taskType) + { + info.SetStatus(CloneStatus::cloning); } - fileType = CloneFileType::kSnapshot; - snapshotRef_->IncrementSnapshotRef(source); - } - } - if (ret < 0) { - FInfo fInfo; - ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); - switch (ret) { - case LIBCURVE_ERROR::OK: - fileType = CloneFileType::kFile; - break; - case -LIBCURVE_ERROR::NOTEXIST: - case -LIBCURVE_ERROR::PARAM_ERROR: - LOG(ERROR) << "Clone source file not exist" - << ", source = " << source - << ", user = " << user + else + { + info.SetStatus(CloneStatus::recovering); + } + // Here, you must first AddCloneInfo because if you first set + // CloneFileStatus and then AddCloneInfo, If AddCloneInfo fails and + // unexpectedly restarts, no one will know that SetCloneFileStatus has been + // called, causing Mirror cannot be deleted + ret = metaStore_->AddCloneInfo(info); + if (ret < 0) + { + LOG(ERROR) << "AddCloneInfo error" + << ", ret = " << ret << ", taskId = " << uuid + << ", user = " << user << ", source = " << source << ", destination = " << destination << ", poolset = " << poolset; + if (CloneFileType::kSnapshot == fileType) + { + snapshotRef_->DecrementSnapshotRef(source); + } + return ret; + } + if (CloneFileType::kFile == fileType) + { + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + ret = client_->SetCloneFileStatus(source, FileStatus::BeingCloned, + mdsRootUser_); + if (ret < 0) + { + // The SetCloneFileStatus error is not handled here, + // Because all results of SetCloneFileStatus failure are acceptable, + // Compared to handling SetCloneFileStatus failure, it is more + // direct: For example, calling DeleteCloneInfo to delete a task, + // Once DeleteCloneInfo fails and an error is returned to the user, + // Restarting the service will cause Clone to continue, + // Inconsistency with the results returned by the user, causing + // confusion for the user + LOG(WARNING) << "SetCloneFileStatus encounter an error" + << ", ret = " << ret << ", source = " << source + << ", user = " << user; + } + cloneRef_->IncrementRef(source); + } + + *cloneInfo = info; + return kErrCodeSuccess; + } + + int CloneCoreImpl::FlattenPre(const std::string &user, const TaskIdType &taskId, + CloneInfo *cloneInfo) + { + (void)user; + int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); + if (ret < 0) + { return kErrCodeFileNotExist; + } + switch (cloneInfo->GetStatus()) + { + case CloneStatus::done: + case CloneStatus::cloning: + case CloneStatus::recovering: + { + // A task exists is returned for completed or in progress, + // indicating that it does not need to be processed + return kErrCodeTaskExist; + } + case CloneStatus::metaInstalled: + { + if (CloneTaskType::kClone == cloneInfo->GetTaskType()) + { + cloneInfo->SetStatus(CloneStatus::cloning); + } + else + { + cloneInfo->SetStatus(CloneStatus::recovering); + } + break; + } + case CloneStatus::cleaning: + case CloneStatus::errorCleaning: + case CloneStatus::error: default: - LOG(ERROR) << "GetFileInfo encounter an error" + { + LOG(ERROR) << "FlattenPre find clone task status Invalid" + << ", status = " + << static_cast(cloneInfo->GetStatus()); + return kErrCodeFileStatusInvalid; + } + } + ret = metaStore_->UpdateCloneInfo(*cloneInfo); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo fail" << ", ret = " << ret - << ", source = " << source - << ", user = " << user; - return kErrCodeInternalError; - } - if (fInfo.filestatus != FileStatus::Created && - fInfo.filestatus != FileStatus::Cloned && - fInfo.filestatus != FileStatus::BeingCloned) { - LOG(ERROR) << "Can not clone when file status = " - << static_cast(fInfo.filestatus); - return kErrCodeFileStatusInvalid; - } - - // TODO(镜像克隆的用户认证待完善) - } - - UUID uuid = UUIDGenerator().GenerateUUID(); - CloneInfo info(uuid, user, taskType, - source, destination, poolset, fileType, lazyFlag); - if (CloneTaskType::kClone == taskType) { - info.SetStatus(CloneStatus::cloning); - } else { - info.SetStatus(CloneStatus::recovering); - } - // 这里必须先AddCloneInfo, 因为如果先SetCloneFileStatus,然后AddCloneInfo, - // 如果AddCloneInfo失败又意外重启,将没人知道SetCloneFileStatus调用过,造成 - // 镜像无法删除 - ret = metaStore_->AddCloneInfo(info); - if (ret < 0) { - LOG(ERROR) << "AddCloneInfo error" - << ", ret = " << ret - << ", taskId = " << uuid - << ", user = " << user - << ", source = " << source - << ", destination = " << destination - << ", poolset = " << poolset; - if (CloneFileType::kSnapshot == fileType) { - snapshotRef_->DecrementSnapshotRef(source); - } - return ret; - } - if (CloneFileType::kFile == fileType) { - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - ret = client_->SetCloneFileStatus(source, - FileStatus::BeingCloned, - mdsRootUser_); - if (ret < 0) { - // 这里不处理SetCloneFileStatus的错误, - // 因为SetCloneFileStatus失败的所有结果都是可接受的, - // 相比于处理SetCloneFileStatus失败的情况更直接: - // 比如调用DeleteCloneInfo删除任务, - // 一旦DeleteCloneInfo失败,给用户返回error之后, - // 重启服务将造成Clone继续进行, - // 跟用户结果返回的结果不一致,造成用户的困惑 - LOG(WARNING) << "SetCloneFileStatus encounter an error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user; - } - cloneRef_->IncrementRef(source); - } - - *cloneInfo = info; - return kErrCodeSuccess; -} - -int CloneCoreImpl::FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { - (void)user; - int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); - if (ret < 0) { - return kErrCodeFileNotExist; - } - switch (cloneInfo->GetStatus()) { - case CloneStatus::done: - case CloneStatus::cloning: - case CloneStatus::recovering: { - // 已经完成的或正在进行中返回task exist, 表示不需要处理 - return kErrCodeTaskExist; - } - case CloneStatus::metaInstalled: { - if (CloneTaskType::kClone == cloneInfo->GetTaskType()) { - cloneInfo->SetStatus(CloneStatus::cloning); - } else { - cloneInfo->SetStatus(CloneStatus::recovering); + << ", taskId = " << cloneInfo->GetTaskId(); + return ret; } - break; - } - case CloneStatus::cleaning: - case CloneStatus::errorCleaning: - case CloneStatus::error: - default: { - LOG(ERROR) << "FlattenPre find clone task status Invalid" - << ", status = " - << static_cast(cloneInfo->GetStatus()); - return kErrCodeFileStatusInvalid; + return kErrCodeSuccess; } - } - ret = metaStore_->UpdateCloneInfo(*cloneInfo); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << cloneInfo->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleCloneOrRecoverTask( - std::shared_ptr task) { - brpc::ClosureGuard doneGuard(task->GetClosure().get()); - int ret = kErrCodeSuccess; - FInfo newFileInfo; - CloneSegmentMap segInfos; - if (IsSnapshot(task)) { - ret = BuildFileInfoFromSnapshot(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } else { - ret = BuildFileInfoFromFile(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } - // 在kCreateCloneMeta以后的步骤还需更新CloneChunkInfo信息中的chunkIdInfo - if (NeedUpdateCloneMeta(task)) { - ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; - } - } - - CloneStep step = task->GetCloneInfo().GetNextStep(); - while (step != CloneStep::kEnd) { - switch (step) { - case CloneStep::kCreateCloneFile: - ret = CreateCloneFile(task, newFileInfo); - if (ret < 0) { + void CloneCoreImpl::HandleCloneOrRecoverTask( + std::shared_ptr task) + { + brpc::ClosureGuard doneGuard(task->GetClosure().get()); + int ret = kErrCodeSuccess; + FInfo newFileInfo; + CloneSegmentMap segInfos; + if (IsSnapshot(task)) + { + ret = BuildFileInfoFromSnapshot(task, &newFileInfo, &segInfos); + if (ret < 0) + { HandleCloneError(task, ret); return; } - task->SetProgress(kProgressCreateCloneFile); - break; - case CloneStep::kCreateCloneMeta: - ret = CreateCloneMeta(task, &newFileInfo, &segInfos); - if (ret < 0) { + } + else + { + ret = BuildFileInfoFromFile(task, &newFileInfo, &segInfos); + if (ret < 0) + { HandleCloneError(task, ret); return; } - task->SetProgress(kProgressCreateCloneMeta); - break; - case CloneStep::kCreateCloneChunk: - ret = CreateCloneChunk(task, newFileInfo, &segInfos); - if (ret < 0) { + } + + // In the steps after kCreateCloneMeta, it is necessary to update the + // chunkIdInfo in the CloneChunkInfo information + if (NeedUpdateCloneMeta(task)) + { + ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); + if (ret < 0) + { HandleCloneError(task, ret); return; } - break; - case CloneStep::kCompleteCloneMeta: - ret = CompleteCloneMeta(task, newFileInfo, segInfos); - if (ret < 0) { + } + + CloneStep step = task->GetCloneInfo().GetNextStep(); + while (step != CloneStep::kEnd) + { + switch (step) + { + case CloneStep::kCreateCloneFile: + ret = CreateCloneFile(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressCreateCloneFile); + break; + case CloneStep::kCreateCloneMeta: + ret = CreateCloneMeta(task, &newFileInfo, &segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressCreateCloneMeta); + break; + case CloneStep::kCreateCloneChunk: + ret = CreateCloneChunk(task, newFileInfo, &segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kCompleteCloneMeta: + ret = CompleteCloneMeta(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + task->SetProgress(kProgressMetaInstalled); + break; + case CloneStep::kRecoverChunk: + ret = RecoverChunk(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kChangeOwner: + ret = ChangeOwner(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + case CloneStep::kRenameCloneFile: + ret = RenameCloneFile(task, newFileInfo); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + if (IsLazy(task)) + { + HandleLazyCloneStage1Finish(task); + doneGuard.release(); + return; + } + break; + case CloneStep::kCompleteCloneFile: + ret = CompleteCloneFile(task, newFileInfo, segInfos); + if (ret < 0) + { + HandleCloneError(task, ret); + return; + } + break; + default: + LOG(ERROR) << "can not reach here" + << ", taskid = " << task->GetTaskId(); HandleCloneError(task, ret); return; } - task->SetProgress(kProgressMetaInstalled); - break; - case CloneStep::kRecoverChunk: - ret = RecoverChunk(task, newFileInfo, segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + task->UpdateMetric(); + step = task->GetCloneInfo().GetNextStep(); + } + HandleCloneSuccess(task); + } + + int CloneCoreImpl::BuildFileInfoFromSnapshot( + std::shared_ptr task, FInfo *newFileInfo, + CloneSegmentMap *segInfos) + { + segInfos->clear(); + UUID source = task->GetCloneInfo().GetSrc(); + + SnapshotInfo snapInfo; + int ret = metaStore_->GetSnapshotInfo(source, &snapInfo); + if (ret < 0) + { + LOG(ERROR) << "GetSnapshotInfo error" + << ", source = " << source + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + newFileInfo->chunksize = snapInfo.GetChunkSize(); + newFileInfo->segmentsize = snapInfo.GetSegmentSize(); + newFileInfo->length = snapInfo.GetFileLength(); + newFileInfo->stripeUnit = snapInfo.GetStripeUnit(); + newFileInfo->stripeCount = snapInfo.GetStripeCount(); + + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) + { + LOG(ERROR) << "Recover task's poolset should not be empty"; + return kErrCodeInternalError; + } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : snapInfo.GetPoolset(); + + if (IsRecover(task)) + { + FInfo fInfo; + std::string destination = task->GetCloneInfo().GetDest(); + std::string user = task->GetCloneInfo().GetUser(); + ret = client_->GetFileInfo(destination, mdsRootUser_, &fInfo); + switch (ret) + { + case LIBCURVE_ERROR::OK: + break; + case -LIBCURVE_ERROR::NOTEXIST: + LOG(ERROR) << "BuildFileInfoFromSnapshot " + << "find dest file not exist, maybe deleted" + << ", ret = " << ret + << ", destination = " << destination + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret + << ", destination = " << destination + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; } - break; - case CloneStep::kChangeOwner: - ret = ChangeOwner(task, newFileInfo); - if (ret < 0) { - HandleCloneError(task, ret); - return; + // The destinationId recovered from the snapshot is the ID of the target + // file + task->GetCloneInfo().SetDestId(fInfo.id); + // Restore seqnum+1 from snapshot + newFileInfo->seqnum = fInfo.seqnum + 1; + } + else + { + newFileInfo->seqnum = kInitializeSeqNum; + } + newFileInfo->owner = task->GetCloneInfo().GetUser(); + + ChunkIndexDataName indexName(snapInfo.GetFileName(), snapInfo.GetSeqNum()); + ChunkIndexData snapMeta; + ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); + if (ret < 0) + { + LOG(ERROR) << "GetChunkIndexData error" + << ", fileName = " << snapInfo.GetFileName() + << ", seqNum = " << snapInfo.GetSeqNum() + << ", taskid = " << task->GetTaskId(); + return ret; + } + + uint64_t segmentSize = snapInfo.GetSegmentSize(); + uint64_t chunkSize = snapInfo.GetChunkSize(); + uint64_t chunkPerSegment = segmentSize / chunkSize; + + std::vector chunkIndexs = snapMeta.GetAllChunkIndex(); + for (auto &chunkIndex : chunkIndexs) + { + ChunkDataName chunkDataName; + snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); + uint64_t segmentIndex = chunkIndex / chunkPerSegment; + CloneChunkInfo info; + info.location = chunkDataName.ToDataChunkKey(); + info.needRecover = true; + if (IsRecover(task)) + { + info.seqNum = chunkDataName.chunkSeqNum_; } - break; - case CloneStep::kRenameCloneFile: - ret = RenameCloneFile(task, newFileInfo); - if (ret < 0) { - HandleCloneError(task, ret); - return; + else + { + info.seqNum = kInitializeSeqNum; } - if (IsLazy(task)) { - HandleLazyCloneStage1Finish(task); - doneGuard.release(); - return; + + auto it = segInfos->find(segmentIndex); + if (it == segInfos->end()) + { + CloneSegmentInfo segInfo; + segInfo.emplace(chunkIndex % chunkPerSegment, info); + segInfos->emplace(segmentIndex, segInfo); } - break; - case CloneStep::kCompleteCloneFile: - ret = CompleteCloneFile(task, newFileInfo, segInfos); - if (ret < 0) { - HandleCloneError(task, ret); - return; + else + { + it->second.emplace(chunkIndex % chunkPerSegment, info); } - break; - default: - LOG(ERROR) << "can not reach here" - << ", taskid = " << task->GetTaskId(); - HandleCloneError(task, ret); - return; + } + return kErrCodeSuccess; } - task->UpdateMetric(); - step = task->GetCloneInfo().GetNextStep(); - } - HandleCloneSuccess(task); -} - -int CloneCoreImpl::BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { - segInfos->clear(); - UUID source = task->GetCloneInfo().GetSrc(); - - SnapshotInfo snapInfo; - int ret = metaStore_->GetSnapshotInfo(source, &snapInfo); - if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo error" - << ", source = " << source - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - } - newFileInfo->chunksize = snapInfo.GetChunkSize(); - newFileInfo->segmentsize = snapInfo.GetSegmentSize(); - newFileInfo->length = snapInfo.GetFileLength(); - newFileInfo->stripeUnit = snapInfo.GetStripeUnit(); - newFileInfo->stripeCount = snapInfo.GetStripeCount(); - - if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && - task->GetCloneInfo().GetPoolset().empty()) { - LOG(ERROR) << "Recover task's poolset should not be empty"; - return kErrCodeInternalError; - } - newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : snapInfo.GetPoolset(); - - if (IsRecover(task)) { - FInfo fInfo; - std::string destination = task->GetCloneInfo().GetDest(); - std::string user = task->GetCloneInfo().GetUser(); - ret = client_->GetFileInfo(destination, mdsRootUser_, &fInfo); - switch (ret) { - case LIBCURVE_ERROR::OK: - break; - case -LIBCURVE_ERROR::NOTEXIST: - LOG(ERROR) << "BuildFileInfoFromSnapshot " - << "find dest file not exist, maybe deleted" - << ", ret = " << ret - << ", destination = " << destination - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - default: + + int CloneCoreImpl::BuildFileInfoFromFile(std::shared_ptr task, + FInfo *newFileInfo, + CloneSegmentMap *segInfos) + { + segInfos->clear(); + UUID source = task->GetCloneInfo().GetSrc(); + std::string user = task->GetCloneInfo().GetUser(); + + FInfo fInfo; + int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); + if (ret != LIBCURVE_ERROR::OK) + { LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", destination = " << destination - << ", user = " << user + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + // GetOrAllocateSegment depends on fullPathName + fInfo.fullPathName = source; + + newFileInfo->chunksize = fInfo.chunksize; + newFileInfo->segmentsize = fInfo.segmentsize; + newFileInfo->length = fInfo.length; + newFileInfo->seqnum = kInitializeSeqNum; + newFileInfo->owner = task->GetCloneInfo().GetUser(); + newFileInfo->stripeUnit = fInfo.stripeUnit; + newFileInfo->stripeCount = fInfo.stripeCount; + + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) + { + LOG(ERROR) << "Recover task's poolset should not be empty"; + return kErrCodeInternalError; + } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : fInfo.poolset; + + uint64_t fileLength = fInfo.length; + uint64_t segmentSize = fInfo.segmentsize; + uint64_t chunkSize = fInfo.chunksize; + + if (0 == segmentSize) + { + LOG(ERROR) << "GetFileInfo return invalid fileInfo, segmentSize == 0" << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; - } - // 从快照恢复的destinationId为目标文件的id - task->GetCloneInfo().SetDestId(fInfo.id); - // 从快照恢复seqnum+1 - newFileInfo->seqnum = fInfo.seqnum + 1; - } else { - newFileInfo->seqnum = kInitializeSeqNum; - } - newFileInfo->owner = task->GetCloneInfo().GetUser(); - - ChunkIndexDataName indexName(snapInfo.GetFileName(), - snapInfo.GetSeqNum()); - ChunkIndexData snapMeta; - ret = dataStore_->GetChunkIndexData(indexName, &snapMeta); - if (ret < 0) { - LOG(ERROR) << "GetChunkIndexData error" - << ", fileName = " << snapInfo.GetFileName() - << ", seqNum = " << snapInfo.GetSeqNum() - << ", taskid = " << task->GetTaskId(); - return ret; - } - - uint64_t segmentSize = snapInfo.GetSegmentSize(); - uint64_t chunkSize = snapInfo.GetChunkSize(); - uint64_t chunkPerSegment = segmentSize / chunkSize; - - std::vector chunkIndexs = - snapMeta.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexs) { - ChunkDataName chunkDataName; - snapMeta.GetChunkDataName(chunkIndex, &chunkDataName); - uint64_t segmentIndex = chunkIndex / chunkPerSegment; - CloneChunkInfo info; - info.location = chunkDataName.ToDataChunkKey(); - info.needRecover = true; - if (IsRecover(task)) { - info.seqNum = chunkDataName.chunkSeqNum_; - } else { - info.seqNum = kInitializeSeqNum; - } + } + if (fileLength % segmentSize != 0) + { + LOG(ERROR) << "GetFileInfo return invalid fileInfo, " + << "fileLength is not align to SegmentSize" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } - auto it = segInfos->find(segmentIndex); - if (it == segInfos->end()) { - CloneSegmentInfo segInfo; - segInfo.emplace(chunkIndex % chunkPerSegment, info); - segInfos->emplace(segmentIndex, segInfo); - } else { - it->second.emplace(chunkIndex % chunkPerSegment, info); - } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos) { - segInfos->clear(); - UUID source = task->GetCloneInfo().GetSrc(); - std::string user = task->GetCloneInfo().GetUser(); - - FInfo fInfo; - int ret = client_->GetFileInfo(source, mdsRootUser_, &fInfo); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - } - // GetOrAllocateSegment依赖fullPathName - fInfo.fullPathName = source; - - newFileInfo->chunksize = fInfo.chunksize; - newFileInfo->segmentsize = fInfo.segmentsize; - newFileInfo->length = fInfo.length; - newFileInfo->seqnum = kInitializeSeqNum; - newFileInfo->owner = task->GetCloneInfo().GetUser(); - newFileInfo->stripeUnit = fInfo.stripeUnit; - newFileInfo->stripeCount = fInfo.stripeCount; - - if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && - task->GetCloneInfo().GetPoolset().empty()) { - LOG(ERROR) << "Recover task's poolset should not be empty"; - return kErrCodeInternalError; - } - newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() - ? task->GetCloneInfo().GetPoolset() - : fInfo.poolset; - - uint64_t fileLength = fInfo.length; - uint64_t segmentSize = fInfo.segmentsize; - uint64_t chunkSize = fInfo.chunksize; - - if (0 == segmentSize) { - LOG(ERROR) << "GetFileInfo return invalid fileInfo, segmentSize == 0" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (fileLength%segmentSize != 0) { - LOG(ERROR) << "GetFileInfo return invalid fileInfo, " - << "fileLength is not align to SegmentSize" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - for (uint64_t i = 0; i< fileLength/segmentSize; i++) { - uint64_t offset = i * segmentSize; - SegmentInfo segInfoOut; - ret = client_->GetOrAllocateSegmentInfo( - false, offset, &fInfo, mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOT_ALLOCATE) { - LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", ret = " << ret - << ", filename = " << source - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "false" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + for (uint64_t i = 0; i < fileLength / segmentSize; i++) + { + uint64_t offset = i * segmentSize; + SegmentInfo segInfoOut; + ret = client_->GetOrAllocateSegmentInfo(false, offset, &fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOT_ALLOCATE) + { + LOG(ERROR) << "GetOrAllocateSegmentInfo fail" + << ", ret = " << ret << ", filename = " << source + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "false" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (segInfoOut.chunkvec.size() != 0) + { + CloneSegmentInfo segInfo; + for (std::vector::size_type j = 0; + j < segInfoOut.chunkvec.size(); j++) + { + CloneChunkInfo info; + info.location = std::to_string(offset + j * chunkSize); + info.seqNum = kInitializeSeqNum; + info.needRecover = true; + segInfo.emplace(j, info); + } + segInfos->emplace(i, segInfo); + } + } + return kErrCodeSuccess; } - if (segInfoOut.chunkvec.size() != 0) { - CloneSegmentInfo segInfo; - for (std::vector::size_type j = 0; - j < segInfoOut.chunkvec.size(); j++) { - CloneChunkInfo info; - info.location = std::to_string(offset + j * chunkSize); - info.seqNum = kInitializeSeqNum; - info.needRecover = true; - segInfo.emplace(j, info); + + int CloneCoreImpl::CreateCloneFile(std::shared_ptr task, + const FInfo &fInfo) + { + std::string fileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = fInfo.owner; + uint64_t fileLength = fInfo.length; + uint64_t seqNum = fInfo.seqnum; + uint32_t chunkSize = fInfo.chunksize; + uint64_t stripeUnit = fInfo.stripeUnit; + uint64_t stripeCount = fInfo.stripeCount; + const auto &poolset = fInfo.poolset; + + std::string source = ""; + // Clone source is only available when cloning from a file + if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) + { + source = task->GetCloneInfo().GetSrc(); + } + + FInfo fInfoOut; + int ret = client_->CreateCloneFile( + source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, + stripeUnit, stripeCount, poolset, &fInfoOut); + if (ret == LIBCURVE_ERROR::OK) + { + // nothing + } + else if (ret == -LIBCURVE_ERROR::EXISTS) + { + ret = client_->GetFileInfo(fileName, mdsRootUser_, &fInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret << ", fileName = " << fileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + else + { + LOG(ERROR) << "CreateCloneFile file" + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user << ", fileLength = " << fileLength + << ", seqNum = " << seqNum << ", chunkSize = " << chunkSize + << ", return fileId = " << fInfoOut.id + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + task->GetCloneInfo().SetOriginId(fInfoOut.id); + if (IsClone(task)) + { + // In the case of cloning, destinationId = originId; + task->GetCloneInfo().SetDestId(fInfoOut.id); } - segInfos->emplace(i, segInfo); + task->GetCloneInfo().SetTime(fInfoOut.ctime); + // If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk + // yet Wait until stage 2 recoveryChunk, go to createCloneMeta, + // createCloneChunk + if (IsLazy(task) && IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); + } + + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } - return kErrCodeSuccess; -} - - -int CloneCoreImpl::CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { - std::string fileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = fInfo.owner; - uint64_t fileLength = fInfo.length; - uint64_t seqNum = fInfo.seqnum; - uint32_t chunkSize = fInfo.chunksize; - uint64_t stripeUnit = fInfo.stripeUnit; - uint64_t stripeCount = fInfo.stripeCount; - const auto& poolset = fInfo.poolset; - - std::string source = ""; - // 只有从文件克隆才带clone source - if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) { - source = task->GetCloneInfo().GetSrc(); - } - - FInfo fInfoOut; - int ret = client_->CreateCloneFile(source, fileName, - mdsRootUser_, fileLength, seqNum, chunkSize, - stripeUnit, stripeCount, poolset, &fInfoOut); - if (ret == LIBCURVE_ERROR::OK) { - // nothing - } else if (ret == -LIBCURVE_ERROR::EXISTS) { - ret = client_->GetFileInfo(fileName, - mdsRootUser_, &fInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", fileName = " << fileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::CreateCloneMeta(std::shared_ptr task, + FInfo *fInfo, CloneSegmentMap *segInfos) + { + int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); + if (ret < 0) + { + return ret; + } + + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneChunk); + + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } else { - LOG(ERROR) << "CreateCloneFile file" - << ", ret = " << ret - << ", destination = " << fileName - << ", user = " << user - << ", fileLength = " << fileLength - << ", seqNum = " << seqNum - << ", chunkSize = " << chunkSize - << ", return fileId = " << fInfoOut.id - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - task->GetCloneInfo().SetOriginId(fInfoOut.id); - if (IsClone(task)) { - // 克隆情况下destinationId = originId; - task->GetCloneInfo().SetDestId(fInfoOut.id); - } - task->GetCloneInfo().SetTime(fInfoOut.ctime); - // 如果是lazy&非快照,先不要createCloneMeta,createCloneChunk - // 等后面stage2阶段recoveryChunk之前去createCloneMeta,createCloneChunk - if (IsLazy(task) && IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); - } - - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { - int ret = CreateOrUpdateCloneMeta(task, fInfo, segInfos); - if (ret < 0) { - return ret; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneChunk); - - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos) { - int ret = kErrCodeSuccess; - uint32_t chunkSize = fInfo.chunksize; - uint32_t correctSn = 0; - // 克隆时correctSn为0,恢复时为新产生的文件版本 - if (IsClone(task)) { - correctSn = 0; - } else { - correctSn = fInfo.seqnum; - } - auto tracker = std::make_shared(); - for (auto & cloneSegmentInfo : *segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { - std::string location; - if (IsSnapshot(task)) { - location = LocationOperator::GenerateS3Location( - cloneChunkInfo.second.location); - } else { - location = LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull(cloneChunkInfo.second.location)); - } - ChunkIDInfo cidInfo = cloneChunkInfo.second.chunkIdInfo; - - auto context = std::make_shared(); - context->location = location; - context->cidInfo = cidInfo; - context->cloneChunkInfo = &cloneChunkInfo.second; - context->sn = cloneChunkInfo.second.seqNum; - context->csn = correctSn; - context->chunkSize = chunkSize; - context->taskid = task->GetTaskId(); - context->startTime = TimeUtility::GetTimeofDaySec(); - context->clientAsyncMethodRetryTimeSec = - clientAsyncMethodRetryTimeSec_; - - ret = StartAsyncCreateCloneChunk(task, tracker, context); - if (ret < 0) { - return kErrCodeInternalError; + + int CloneCoreImpl::CreateCloneChunk(std::shared_ptr task, + const FInfo &fInfo, + CloneSegmentMap *segInfos) + { + int ret = kErrCodeSuccess; + uint32_t chunkSize = fInfo.chunksize; + uint32_t correctSn = 0; + // When cloning, correctSn is 0, and when restoring, it is the newly + // generated file version + if (IsClone(task)) + { + correctSn = 0; + } + else + { + correctSn = fInfo.seqnum; } + auto tracker = std::make_shared(); + for (auto &cloneSegmentInfo : *segInfos) + { + for (auto &cloneChunkInfo : cloneSegmentInfo.second) + { + std::string location; + if (IsSnapshot(task)) + { + location = LocationOperator::GenerateS3Location( + cloneChunkInfo.second.location); + } + else + { + location = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), + std::stoull(cloneChunkInfo.second.location)); + } + ChunkIDInfo cidInfo = cloneChunkInfo.second.chunkIdInfo; + + auto context = std::make_shared(); + context->location = location; + context->cidInfo = cidInfo; + context->cloneChunkInfo = &cloneChunkInfo.second; + context->sn = cloneChunkInfo.second.seqNum; + context->csn = correctSn; + context->chunkSize = chunkSize; + context->taskid = task->GetTaskId(); + context->startTime = TimeUtility::GetTimeofDaySec(); + context->clientAsyncMethodRetryTimeSec = + clientAsyncMethodRetryTimeSec_; + + ret = StartAsyncCreateCloneChunk(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } - if (tracker->GetTaskNum() >= createCloneChunkConcurrency_) { + if (tracker->GetTaskNum() >= createCloneChunkConcurrency_) + { + tracker->WaitSome(1); + } + std::list results = + tracker->PopResultContexts(); + ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + } + // Tasks with insufficient remaining quantity in the end + do + { tracker->WaitSome(1); + std::list results = + tracker->PopResultContexts(); + if (0 == results.size()) + { + // Completed, no new results + break; + } + ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); + if (ret < 0) + { + return kErrCodeInternalError; + } + } while (true); + + if (IsLazy(task) && IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); } - std::list results = - tracker->PopResultContexts(); - ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); - if (ret < 0) { + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + return kErrCodeSuccess; } - } - // 最后剩余数量不足的任务 - do { - tracker->WaitSome(1); - std::list results = - tracker->PopResultContexts(); - if (0 == results.size()) { - // 已经完成,没有新的结果了 - break; - } - ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); - if (ret < 0) { - return kErrCodeInternalError; - } - } while (true); - - if (IsLazy(task) && IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CreateCloneChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::StartAsyncCreateCloneChunk( - std::shared_ptr task, - std::shared_ptr tracker, - std::shared_ptr context) { - CreateCloneChunkClosure *cb = - new CreateCloneChunkClosure(tracker, context); - tracker->AddOneTrace(); - LOG(INFO) << "Doing CreateCloneChunk" - << ", location = " << context->location - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn - << ", taskid = " << task->GetTaskId(); - int ret = client_->CreateCloneChunk(context->location, - context->cidInfo, - context->sn, - context->csn, - context->chunkSize, - cb); - - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "CreateCloneChunk fail" - << ", ret = " << ret - << ", location = " << context->location - << ", logicalPoolId = " << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( - std::shared_ptr task, - std::shared_ptr tracker, - const std::list &results) { - int ret = kErrCodeSuccess; - for (auto context : results) { - if (context->retCode == -LIBCURVE_ERROR::EXISTS) { - LOG(INFO) << "CreateCloneChunk chunk exist" + + int CloneCoreImpl::StartAsyncCreateCloneChunk( + std::shared_ptr task, + std::shared_ptr tracker, + std::shared_ptr context) + { + CreateCloneChunkClosure *cb = new CreateCloneChunkClosure(tracker, context); + tracker->AddOneTrace(); + LOG(INFO) << "Doing CreateCloneChunk" << ", location = " << context->location << ", logicalPoolId = " << context->cidInfo.lpid_ << ", copysetId = " << context->cidInfo.cpid_ << ", chunkId = " << context->cidInfo.cid_ - << ", seqNum = " << context->sn - << ", csn = " << context->csn + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); - context->cloneChunkInfo->needRecover = false; - } else if (context->retCode != LIBCURVE_ERROR::OK) { - uint64_t nowTime = TimeUtility::GetTimeofDaySec(); - if (nowTime - context->startTime < - context->clientAsyncMethodRetryTimeSec) { - // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - ret = StartAsyncCreateCloneChunk( - task, tracker, context); - if (ret < 0) { - return kErrCodeInternalError; - } - } else { - LOG(ERROR) << "CreateCloneChunk tracker GetResult fail" - << ", ret = " << ret + int ret = client_->CreateCloneChunk(context->location, context->cidInfo, + context->sn, context->csn, + context->chunkSize, cb); + + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "CreateCloneChunk fail" + << ", ret = " << ret << ", location = " << context->location + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", seqNum = " << context->sn << ", csn = " << context->csn << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::HandleCreateCloneChunkResultsAndRetry( + std::shared_ptr task, + std::shared_ptr tracker, + const std::list &results) + { + int ret = kErrCodeSuccess; + for (auto context : results) + { + if (context->retCode == -LIBCURVE_ERROR::EXISTS) + { + LOG(INFO) << "CreateCloneChunk chunk exist" + << ", location = " << context->location + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", seqNum = " << context->sn + << ", csn = " << context->csn + << ", taskid = " << task->GetTaskId(); + context->cloneChunkInfo->needRecover = false; + } + else if (context->retCode != LIBCURVE_ERROR::OK) + { + uint64_t nowTime = TimeUtility::GetTimeofDaySec(); + if (nowTime - context->startTime < + context->clientAsyncMethodRetryTimeSec) + { + // retry + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + ret = StartAsyncCreateCloneChunk(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + else + { + LOG(ERROR) << "CreateCloneChunk tracker GetResult fail" + << ", ret = " << ret + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + } + return ret; + } + + int CloneCoreImpl::CompleteCloneMeta(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + (void)fInfo; + (void)segInfos; + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "CompleteCloneMeta fail" + << ", ret = " << ret << ", filename = " << origin + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + if (IsLazy(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } - return ret; -} - -int CloneCoreImpl::CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { - (void)fInfo; - (void)segInfos; - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = task->GetCloneInfo().GetUser(); - int ret = client_->CompleteCloneMeta(origin, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "CompleteCloneMeta fail" - << ", ret = " << ret - << ", filename = " << origin - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep( - CloneStep::kChangeOwner); - } else { - task->GetCloneInfo().SetNextStep( - CloneStep::kRecoverChunk); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CompleteCloneMeta error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { - int ret = kErrCodeSuccess; - uint32_t chunkSize = fInfo.chunksize; - - uint32_t totalProgress = - kProgressRecoverChunkEnd - kProgressRecoverChunkBegin; - uint32_t segNum = segInfos.size(); - double progressPerData = static_cast(totalProgress) / segNum; - uint32_t index = 0; - - if (0 == cloneChunkSplitSize_ || - chunkSize % cloneChunkSplitSize_ != 0) { - LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" - << ", taskid = " << task->GetTaskId(); - return kErrCodeChunkSizeNotAligned; - } - - auto tracker = std::make_shared(); - uint64_t workingChunkNum = 0; - // 为避免发往同一个chunk碰撞,异步请求不同的chunk - for (auto & cloneSegmentInfo : segInfos) { - for (auto & cloneChunkInfo : cloneSegmentInfo.second) { - if (!cloneChunkInfo.second.needRecover) { - continue; - } - // 当前并发工作的chunk数已大于要求的并发数时,先消化一部分 - while (workingChunkNum >= recoverChunkConcurrency_) { + + int CloneCoreImpl::RecoverChunk(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + int ret = kErrCodeSuccess; + uint32_t chunkSize = fInfo.chunksize; + + uint32_t totalProgress = + kProgressRecoverChunkEnd - kProgressRecoverChunkBegin; + uint32_t segNum = segInfos.size(); + double progressPerData = static_cast(totalProgress) / segNum; + uint32_t index = 0; + + if (0 == cloneChunkSplitSize_ || chunkSize % cloneChunkSplitSize_ != 0) + { + LOG(ERROR) << "chunk is not align to cloneChunkSplitSize" + << ", taskid = " << task->GetTaskId(); + return kErrCodeChunkSizeNotAligned; + } + + auto tracker = std::make_shared(); + uint64_t workingChunkNum = 0; + // To avoid collisions with the same chunk, asynchronous requests for + // different chunks + for (auto &cloneSegmentInfo : segInfos) + { + for (auto &cloneChunkInfo : cloneSegmentInfo.second) + { + if (!cloneChunkInfo.second.needRecover) + { + continue; + } + // When the current number of chunks for concurrent work exceeds the + // required number of concurrent tasks, digest a portion first + while (workingChunkNum >= recoverChunkConcurrency_) + { + uint64_t completeChunkNum = 0; + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); + if (ret < 0) + { + return kErrCodeInternalError; + } + workingChunkNum -= completeChunkNum; + } + // Chunk joining a new job + workingChunkNum++; + auto context = std::make_shared(); + context->cidInfo = cloneChunkInfo.second.chunkIdInfo; + context->totalPartNum = chunkSize / cloneChunkSplitSize_; + context->partIndex = 0; + context->partSize = cloneChunkSplitSize_; + context->taskid = task->GetTaskId(); + context->startTime = TimeUtility::GetTimeofDaySec(); + context->clientAsyncMethodRetryTimeSec = + clientAsyncMethodRetryTimeSec_; + + LOG(INFO) << "RecoverChunk start" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + + ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return kErrCodeInternalError; + } + } + task->SetProgress(static_cast(kProgressRecoverChunkBegin + + index * progressPerData)); + task->UpdateMetric(); + index++; + } + + while (workingChunkNum > 0) + { uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); - if (ret < 0) { + ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + task, tracker, &completeChunkNum); + if (ret < 0) + { return kErrCodeInternalError; } workingChunkNum -= completeChunkNum; } - // 加入新的工作的chunk - workingChunkNum++; - auto context = std::make_shared(); - context->cidInfo = cloneChunkInfo.second.chunkIdInfo; - context->totalPartNum = chunkSize / cloneChunkSplitSize_; - context->partIndex = 0; - context->partSize = cloneChunkSplitSize_; - context->taskid = task->GetTaskId(); - context->startTime = TimeUtility::GetTimeofDaySec(); - context->clientAsyncMethodRetryTimeSec = - clientAsyncMethodRetryTimeSec_; - - LOG(INFO) << "RecoverChunk start" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - - ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { + + task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneFile); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } + return kErrCodeSuccess; } - task->SetProgress(static_cast( - kProgressRecoverChunkBegin + index * progressPerData)); - task->UpdateMetric(); - index++; - } - - while (workingChunkNum > 0) { - uint64_t completeChunkNum = 0; - ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, - tracker, - &completeChunkNum); - if (ret < 0) { - return kErrCodeInternalError; + + int CloneCoreImpl::StartAsyncRecoverChunkPart( + std::shared_ptr task, + std::shared_ptr tracker, + std::shared_ptr context) + { + RecoverChunkClosure *cb = new RecoverChunkClosure(tracker, context); + tracker->AddOneTrace(); + uint64_t offset = context->partIndex * context->partSize; + LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", offset = " << offset + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + int ret = + client_->RecoverChunk(context->cidInfo, offset, context->partSize, cb); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RecoverChunk fail" + << ", ret = " << ret + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", offset = " << offset << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - workingChunkNum -= completeChunkNum; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneFile); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after RecoverChunk error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::StartAsyncRecoverChunkPart( - std::shared_ptr task, - std::shared_ptr tracker, - std::shared_ptr context) { - RecoverChunkClosure *cb = new RecoverChunkClosure(tracker, context); - tracker->AddOneTrace(); - uint64_t offset = context->partIndex * context->partSize; - LOG_EVERY_SECOND(INFO) << "Doing RecoverChunk" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - int ret = client_->RecoverChunk(context->cidInfo, - offset, - context->partSize, - cb); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RecoverChunk fail" - << ", ret = " << ret - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", offset = " << offset - << ", len = " << context->partSize - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( - std::shared_ptr task, - std::shared_ptr tracker, - uint64_t *completeChunkNum) { - *completeChunkNum = 0; - tracker->WaitSome(1); - std::list results = - tracker->PopResultContexts(); - for (auto context : results) { - if (context->retCode != LIBCURVE_ERROR::OK) { - uint64_t nowTime = TimeUtility::GetTimeofDaySec(); - if (nowTime - context->startTime < - context->clientAsyncMethodRetryTimeSec) { - // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - clientAsyncMethodRetryIntervalMs_)); - int ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { - return ret; + + int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( + std::shared_ptr task, + std::shared_ptr tracker, + uint64_t *completeChunkNum) + { + *completeChunkNum = 0; + tracker->WaitSome(1); + std::list results = tracker->PopResultContexts(); + for (auto context : results) + { + if (context->retCode != LIBCURVE_ERROR::OK) + { + uint64_t nowTime = TimeUtility::GetTimeofDaySec(); + if (nowTime - context->startTime < + context->clientAsyncMethodRetryTimeSec) + { + // retry + std::this_thread::sleep_for(std::chrono::milliseconds( + clientAsyncMethodRetryIntervalMs_)); + int ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return ret; + } + } + else + { + LOG(ERROR) << "RecoverChunk tracker GetResult fail" + << ", ret = " << context->retCode + << ", taskid = " << task->GetTaskId(); + return context->retCode; + } } - } else { - LOG(ERROR) << "RecoverChunk tracker GetResult fail" - << ", ret = " << context->retCode - << ", taskid = " << task->GetTaskId(); - return context->retCode; - } - } else { - // 启动一个新的分片,index++,并重置开始时间 - context->partIndex++; - context->startTime = TimeUtility::GetTimeofDaySec(); - if (context->partIndex < context->totalPartNum) { - int ret = StartAsyncRecoverChunkPart(task, tracker, context); - if (ret < 0) { - return ret; + else + { + // Start a new shard, index++, and reset the start time + context->partIndex++; + context->startTime = TimeUtility::GetTimeofDaySec(); + if (context->partIndex < context->totalPartNum) + { + int ret = StartAsyncRecoverChunkPart(task, tracker, context); + if (ret < 0) + { + return ret; + } + } + else + { + LOG(INFO) << "RecoverChunk Complete" + << ", logicalPoolId = " << context->cidInfo.lpid_ + << ", copysetId = " << context->cidInfo.cpid_ + << ", chunkId = " << context->cidInfo.cid_ + << ", len = " << context->partSize + << ", taskid = " << task->GetTaskId(); + (*completeChunkNum)++; + } } - } else { - LOG(INFO) << "RecoverChunk Complete" - << ", logicalPoolId = " - << context->cidInfo.lpid_ - << ", copysetId = " << context->cidInfo.cpid_ - << ", chunkId = " << context->cidInfo.cid_ - << ", len = " << context->partSize + } + return kErrCodeSuccess; + } + + int CloneCoreImpl::ChangeOwner(std::shared_ptr task, + const FInfo &fInfo) + { + (void)fInfo; + std::string user = task->GetCloneInfo().GetUser(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + + int ret = client_->ChangeOwner(origin, user); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "ChangeOwner fail, ret = " << ret + << ", fileName = " << origin << ", newOwner = " << user << ", taskid = " << task->GetTaskId(); - (*completeChunkNum)++; + return kErrCodeInternalError; + } + + task->GetCloneInfo().SetNextStep(CloneStep::kRenameCloneFile); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; } + return kErrCodeSuccess; } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo) { - (void)fInfo; - std::string user = task->GetCloneInfo().GetUser(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - - int ret = client_->ChangeOwner(origin, user); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "ChangeOwner fail, ret = " << ret - << ", fileName = " << origin - << ", newOwner = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - task->GetCloneInfo().SetNextStep(CloneStep::kRenameCloneFile); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after ChangeOwner error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo) { - std::string user = fInfo.owner; - uint64_t originId = task->GetCloneInfo().GetOriginId(); - uint64_t destinationId = task->GetCloneInfo().GetDestId(); - std::string origin = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string destination = task->GetCloneInfo().GetDest(); - - // 先rename - int ret = client_->RenameCloneFile(mdsRootUser_, - originId, - destinationId, - origin, - destination); - if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 有可能是已经rename过了 - FInfo destFInfo; - ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RenameCloneFile return NOTEXIST," - << "And get dest fileInfo fail, ret = " << ret - << ", destination filename = " << destination - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::RenameCloneFile(std::shared_ptr task, + const FInfo &fInfo) + { + std::string user = fInfo.owner; + uint64_t originId = task->GetCloneInfo().GetOriginId(); + uint64_t destinationId = task->GetCloneInfo().GetDestId(); + std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string destination = task->GetCloneInfo().GetDest(); + + // Rename first + int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, + origin, destination); + if (-LIBCURVE_ERROR::NOTEXIST == ret) + { + // It is possible that it has already been renamed + FInfo destFInfo; + ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RenameCloneFile return NOTEXIST," + << "And get dest fileInfo fail, ret = " << ret + << ", destination filename = " << destination + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (destFInfo.id != originId) + { + LOG(ERROR) << "RenameCloneFile return NOTEXIST," + << "And get dest file id not equal, ret = " << ret + << "originId = " << originId + << "destFInfo.id = " << destFInfo.id + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + } + else if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "RenameCloneFile fail" + << ", ret = " << ret << ", user = " << user + << ", originId = " << originId << ", origin = " << origin + << ", destination = " << destination + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + + if (IsLazy(task)) + { + if (IsFile(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + } + task->GetCloneInfo().SetStatus(CloneStatus::metaInstalled); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kEnd); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - if (destFInfo.id != originId) { - LOG(ERROR) << "RenameCloneFile return NOTEXIST," - << "And get dest file id not equal, ret = " << ret - << "originId = " << originId - << "destFInfo.id = " << destFInfo.id - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::CompleteCloneFile(std::shared_ptr task, + const FInfo &fInfo, + const CloneSegmentMap &segInfos) + { + (void)fInfo; + (void)segInfos; + std::string fileName; + if (IsLazy(task)) + { + fileName = task->GetCloneInfo().GetDest(); + } + else + { + fileName = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + } + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); + switch (ret) + { + case LIBCURVE_ERROR::OK: + break; + case -LIBCURVE_ERROR::NOTEXIST: + LOG(ERROR) << "CompleteCloneFile " + << "find dest file not exist, maybe deleted" + << ", ret = " << ret << ", destination = " << fileName + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + default: + LOG(ERROR) << "CompleteCloneFile fail" + << ", ret = " << ret << ", fileName = " << fileName + << ", user = " << user + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + if (IsLazy(task)) + { + task->GetCloneInfo().SetNextStep(CloneStep::kEnd); + } + else + { + task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); + } + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." + << " ret = " << ret << ", taskid = " << task->GetTaskId(); + return ret; + } + return kErrCodeSuccess; } - } else if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "RenameCloneFile fail" - << ", ret = " << ret - << ", user = " << user - << ", originId = " << originId - << ", origin = " << origin - << ", destination = " << destination - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - - if (IsLazy(task)) { - if (IsFile(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kCreateCloneMeta); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kRecoverChunk); + + void CloneCoreImpl::HandleLazyCloneStage1Finish( + std::shared_ptr task) + { + LOG(INFO) << "Task Lazy Stage1 Success" + << ", TaskInfo : " << *task; + task->GetClosure()->SetErrCode(kErrCodeSuccess); + task->Finish(); + task->GetClosure()->Run(); + return; } - task->GetCloneInfo().SetStatus(CloneStatus::metaInstalled); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kEnd); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after RenameCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos) { - (void)fInfo; - (void)segInfos; - std::string fileName; - if (IsLazy(task)) { - fileName = task->GetCloneInfo().GetDest(); - } else { - fileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - } - std::string user = task->GetCloneInfo().GetUser(); - int ret = client_->CompleteCloneFile(fileName, mdsRootUser_); - switch (ret) { - case LIBCURVE_ERROR::OK: - break; - case -LIBCURVE_ERROR::NOTEXIST: - LOG(ERROR) << "CompleteCloneFile " - << "find dest file not exist, maybe deleted" - << ", ret = " << ret - << ", destination = " << fileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; - default: - LOG(ERROR) << "CompleteCloneFile fail" - << ", ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - if (IsLazy(task)) { - task->GetCloneInfo().SetNextStep(CloneStep::kEnd); - } else { - task->GetCloneInfo().SetNextStep(CloneStep::kChangeOwner); - } - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo after CompleteCloneFile error." - << " ret = " << ret - << ", taskid = " << task->GetTaskId(); - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleLazyCloneStage1Finish( - std::shared_ptr task) { - LOG(INFO) << "Task Lazy Stage1 Success" - << ", TaskInfo : " << *task; - task->GetClosure()->SetErrCode(kErrCodeSuccess); - task->Finish(); - task->GetClosure()->Run(); - return; -} - -void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) { - int ret = kErrCodeSuccess; - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); - if (ret < 0) { - task->GetCloneInfo().SetStatus(CloneStatus::error); - int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret2 < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret2 - << ", uuid = " << task->GetTaskId(); + + void CloneCoreImpl::HandleCloneSuccess(std::shared_ptr task) + { + int ret = kErrCodeSuccess; + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + task->GetCloneInfo().SetStatus(CloneStatus::error); + int ret2 = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret2 < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret2 + << ", uuid = " << task->GetTaskId(); + } + LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" + << ", ret = " << ret << ", TaskInfo : " << *task; + task->Finish(); + return; + } } - LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; - task->Finish(); + } + task->GetCloneInfo().SetStatus(CloneStatus::done); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + task->SetProgress(kProgressCloneComplete); + + LOG(INFO) << "Task Success" + << ", TaskInfo : " << *task; + task->Finish(); + return; + } + + void CloneCoreImpl::HandleCloneError(std::shared_ptr task, + int retCode) + { + int ret = kErrCodeSuccess; + if (NeedRetry(task, retCode)) + { + HandleCloneToRetry(task); return; } + + if (IsLazy(task)) + { + task->GetClosure()->SetErrCode(retCode); + } + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + << ", taskid = " << task->GetTaskId(); + } + } + } + task->GetCloneInfo().SetStatus(CloneStatus::error); + ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + LOG(ERROR) << "Task Fail" + << ", TaskInfo : " << *task; + task->Finish(); + return; } - } - task->GetCloneInfo().SetStatus(CloneStatus::done); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); - } - task->SetProgress(kProgressCloneComplete); - - LOG(INFO) << "Task Success" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCloneError(std::shared_ptr task, - int retCode) { - int ret = kErrCodeSuccess; - if (NeedRetry(task, retCode)) { - HandleCloneToRetry(task); - return; - } - - if (IsLazy(task)) { - task->GetClosure()->SetErrCode(retCode); - } - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); - if (ret < 0) { - LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret - << ", taskid = " << task->GetTaskId(); + + void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) + { + task->GetCloneInfo().SetStatus(CloneStatus::retrying); + int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); } + LOG(WARNING) << "Task Fail, Retrying" + << ", TaskInfo : " << *task; + task->Finish(); + return; } - } - task->GetCloneInfo().SetStatus(CloneStatus::error); - ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); - } - LOG(ERROR) << "Task Fail" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCloneToRetry(std::shared_ptr task) { - task->GetCloneInfo().SetStatus(CloneStatus::retrying); - int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task retrying Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); - } - LOG(WARNING) << "Task Fail, Retrying" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) { - TaskIdType taskId = task->GetCloneInfo().GetTaskId(); - int ret = metaStore_->DeleteCloneInfo(taskId); - if (ret < 0) { - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; - } else { - LOG(INFO) << "Clean Task Success" - << ", TaskInfo : " << *task; - } - task->SetProgress(kProgressCloneComplete); - task->GetCloneInfo().SetStatus(CloneStatus::done); - - task->Finish(); - return; -} - -void CloneCoreImpl::HandleCleanError(std::shared_ptr task) { - task->GetCloneInfo().SetStatus(CloneStatus::error); - int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo Task error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetTaskId(); - } - LOG(ERROR) << "Clean Task Fail" - << ", TaskInfo : " << *task; - task->Finish(); - return; -} - -int CloneCoreImpl::GetCloneInfoList(std::vector *taskList) { - metaStore_->GetCloneInfoList(taskList); - return kErrCodeSuccess; -} - -int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) { - return metaStore_->GetCloneInfo(taskId, cloneInfo); -} - -int CloneCoreImpl::GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) { - return metaStore_->GetCloneInfoByFileName(fileName, list); -} - -inline bool CloneCoreImpl::IsLazy(std::shared_ptr task) { - return task->GetCloneInfo().GetIsLazy(); -} - -inline bool CloneCoreImpl::IsSnapshot(std::shared_ptr task) { - return CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType(); -} - -inline bool CloneCoreImpl::IsFile(std::shared_ptr task) { - return CloneFileType::kFile == task->GetCloneInfo().GetFileType(); -} - -inline bool CloneCoreImpl::IsRecover(std::shared_ptr task) { - return CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType(); -} - -inline bool CloneCoreImpl::IsClone(std::shared_ptr task) { - return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); -} - -bool CloneCoreImpl::NeedUpdateCloneMeta( - std::shared_ptr task) { - bool ret = true; - CloneStep step = task->GetCloneInfo().GetNextStep(); - if (CloneStep::kCreateCloneFile == step || - CloneStep::kCreateCloneMeta == step || - CloneStep::kEnd == step) { - ret = false; - } - return ret; -} - -bool CloneCoreImpl::NeedRetry(std::shared_ptr task, - int retCode) { - if (IsLazy(task)) { - CloneStep step = task->GetCloneInfo().GetNextStep(); - if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step) { - // 文件不存在的场景下不需要再重试,因为可能已经被删除了 - if (retCode != kErrCodeFileNotExist) { - return true; + + void CloneCoreImpl::HandleCleanSuccess(std::shared_ptr task) + { + TaskIdType taskId = task->GetCloneInfo().GetTaskId(); + int ret = metaStore_->DeleteCloneInfo(taskId); + if (ret < 0) + { + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", taskId = " << taskId; } + else + { + LOG(INFO) << "Clean Task Success" + << ", TaskInfo : " << *task; + } + task->SetProgress(kProgressCloneComplete); + task->GetCloneInfo().SetStatus(CloneStatus::done); + + task->Finish(); + return; } - } - return false; -} - -int CloneCoreImpl::CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos) { - std::string newFileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - std::string user = fInfo->owner; - FInfo fInfoOut; - int ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); - if (LIBCURVE_ERROR::OK == ret) { - // nothing - } else if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 可能已经rename过了 - newFileName = task->GetCloneInfo().GetDest(); - ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "File is missing, " - << "when CreateOrUpdateCloneMeta, " - << "GetFileInfo fail, ret = " << ret - << ", filename = " << newFileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; + + void CloneCoreImpl::HandleCleanError(std::shared_ptr task) + { + task->GetCloneInfo().SetStatus(CloneStatus::error); + int ret = metaStore_->UpdateCloneInfo(task->GetCloneInfo()); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo Task error Fail!" + << " ret = " << ret << ", uuid = " << task->GetTaskId(); + } + LOG(ERROR) << "Clean Task Fail" + << ", TaskInfo : " << *task; + task->Finish(); + return; } - // 如果是已经rename过,那么id应该一致 - uint64_t originId = task->GetCloneInfo().GetOriginId(); - if (fInfoOut.id != originId) { - LOG(ERROR) << "File is missing, fileId not equal, " - << "when CreateOrUpdateCloneMeta" - << ", fileId = " << fInfoOut.id - << ", originId = " << originId - << ", filename = " << newFileName - << ", taskid = " << task->GetTaskId(); - return kErrCodeFileNotExist; + + int CloneCoreImpl::GetCloneInfoList(std::vector *taskList) + { + metaStore_->GetCloneInfoList(taskList); + return kErrCodeSuccess; } - } else { - LOG(ERROR) << "GetFileInfo fail" - << ", ret = " << ret - << ", filename = " << newFileName - << ", user = " << user - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; - } - // 更新fInfo - *fInfo = fInfoOut; - // GetOrAllocateSegment 依赖fullPathName,需要在此处更新 - fInfo->fullPathName = newFileName; - - uint32_t segmentSize = fInfo->segmentsize; - for (auto &segInfo : *segInfos) { - SegmentInfo segInfoOut; - uint64_t offset = segInfo.first * segmentSize; - ret = client_->GetOrAllocateSegmentInfo( - true, offset, fInfo, mdsRootUser_, &segInfoOut); - if (ret != LIBCURVE_ERROR::OK) { - LOG(ERROR) << "GetOrAllocateSegmentInfo fail" - << ", newFileName = " << newFileName - << ", user = " << user - << ", offset = " << offset - << ", allocateIfNotExist = " << "true" - << ", taskid = " << task->GetTaskId(); - return kErrCodeInternalError; + + int CloneCoreImpl::GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) + { + return metaStore_->GetCloneInfo(taskId, cloneInfo); } - for (auto &cloneChunkInfo : segInfo.second) { - if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) { - LOG(ERROR) << "can not find chunkIndexInSeg = " - << cloneChunkInfo.first - << ", segmentIndex = " << segInfo.first - << ", logicalPoolId = " - << cloneChunkInfo.second.chunkIdInfo.lpid_ - << ", copysetId = " - << cloneChunkInfo.second.chunkIdInfo.cpid_ - << ", chunkId = " - << cloneChunkInfo.second.chunkIdInfo.cid_ - << ", taskid = " << task->GetTaskId(); + int CloneCoreImpl::GetCloneInfoByFileName(const std::string &fileName, + std::vector *list) + { + return metaStore_->GetCloneInfoByFileName(fileName, list); + } + + inline bool CloneCoreImpl::IsLazy(std::shared_ptr task) + { + return task->GetCloneInfo().GetIsLazy(); + } + + inline bool CloneCoreImpl::IsSnapshot(std::shared_ptr task) + { + return CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType(); + } + + inline bool CloneCoreImpl::IsFile(std::shared_ptr task) + { + return CloneFileType::kFile == task->GetCloneInfo().GetFileType(); + } + + inline bool CloneCoreImpl::IsRecover(std::shared_ptr task) + { + return CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType(); + } + + inline bool CloneCoreImpl::IsClone(std::shared_ptr task) + { + return CloneTaskType::kClone == task->GetCloneInfo().GetTaskType(); + } + + bool CloneCoreImpl::NeedUpdateCloneMeta(std::shared_ptr task) + { + bool ret = true; + CloneStep step = task->GetCloneInfo().GetNextStep(); + if (CloneStep::kCreateCloneFile == step || + CloneStep::kCreateCloneMeta == step || CloneStep::kEnd == step) + { + ret = false; + } + return ret; + } + + bool CloneCoreImpl::NeedRetry(std::shared_ptr task, + int retCode) + { + if (IsLazy(task)) + { + CloneStep step = task->GetCloneInfo().GetNextStep(); + if (CloneStep::kRecoverChunk == step || + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) + { + // In scenarios where the file does not exist, there is no need to + // retry as it may have been deleted + if (retCode != kErrCodeFileNotExist) + { + return true; + } + } + } + return false; + } + + int CloneCoreImpl::CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo *fInfo, + CloneSegmentMap *segInfos) + { + std::string newFileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + std::string user = fInfo->owner; + FInfo fInfoOut; + int ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); + if (LIBCURVE_ERROR::OK == ret) + { + // nothing + } + else if (-LIBCURVE_ERROR::NOTEXIST == ret) + { + // Perhaps it has already been renamed + newFileName = task->GetCloneInfo().GetDest(); + ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "File is missing, " + << "when CreateOrUpdateCloneMeta, " + << "GetFileInfo fail, ret = " << ret + << ", filename = " << newFileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + // If it has already been renamed, then the id should be consistent + uint64_t originId = task->GetCloneInfo().GetOriginId(); + if (fInfoOut.id != originId) + { + LOG(ERROR) << "File is missing, fileId not equal, " + << "when CreateOrUpdateCloneMeta" + << ", fileId = " << fInfoOut.id + << ", originId = " << originId + << ", filename = " << newFileName + << ", taskid = " << task->GetTaskId(); + return kErrCodeFileNotExist; + } + } + else + { + LOG(ERROR) << "GetFileInfo fail" + << ", ret = " << ret << ", filename = " << newFileName + << ", user = " << user << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - cloneChunkInfo.second.chunkIdInfo = - segInfoOut.chunkvec[cloneChunkInfo.first]; + // Update fInfo + *fInfo = fInfoOut; + // GetOrAllocateSegment depends on fullPathName and needs to be updated here + fInfo->fullPathName = newFileName; + + uint32_t segmentSize = fInfo->segmentsize; + for (auto &segInfo : *segInfos) + { + SegmentInfo segInfoOut; + uint64_t offset = segInfo.first * segmentSize; + ret = client_->GetOrAllocateSegmentInfo(true, offset, fInfo, + mdsRootUser_, &segInfoOut); + if (ret != LIBCURVE_ERROR::OK) + { + LOG(ERROR) << "GetOrAllocateSegmentInfo fail" + << ", newFileName = " << newFileName + << ", user = " << user << ", offset = " << offset + << ", allocateIfNotExist = " + << "true" + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + + for (auto &cloneChunkInfo : segInfo.second) + { + if (cloneChunkInfo.first > segInfoOut.chunkvec.size()) + { + LOG(ERROR) << "can not find chunkIndexInSeg = " + << cloneChunkInfo.first + << ", segmentIndex = " << segInfo.first + << ", logicalPoolId = " + << cloneChunkInfo.second.chunkIdInfo.lpid_ + << ", copysetId = " + << cloneChunkInfo.second.chunkIdInfo.cpid_ + << ", chunkId = " + << cloneChunkInfo.second.chunkIdInfo.cid_ + << ", taskid = " << task->GetTaskId(); + return kErrCodeInternalError; + } + cloneChunkInfo.second.chunkIdInfo = + segInfoOut.chunkvec[cloneChunkInfo.first]; + } + } + return kErrCodeSuccess; } - } - return kErrCodeSuccess; -} - -int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) { - int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); - if (ret < 0) { - // 不存在时直接返回成功,使接口幂等 - return kErrCodeSuccess; - } - if (cloneInfo->GetUser() != user) { - LOG(ERROR) << "CleanCloneOrRecoverTaskPre by Invalid user"; - return kErrCodeInvalidUser; - } - switch (cloneInfo->GetStatus()) { - case CloneStatus::done: - cloneInfo->SetStatus(CloneStatus::cleaning); - break; - case CloneStatus::error: - cloneInfo->SetStatus(CloneStatus::errorCleaning); - break; - case CloneStatus::cleaning: - case CloneStatus::errorCleaning: - return kErrCodeTaskExist; - break; - default: - LOG(ERROR) << "Can not clean clone/recover task unfinished."; - return kErrCodeCannotCleanCloneUnfinished; - break; - } - - ret = metaStore_->UpdateCloneInfo(*cloneInfo); - if (ret < 0) { - LOG(ERROR) << "UpdateCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; - return ret; - } - return kErrCodeSuccess; -} - -void CloneCoreImpl::HandleCleanCloneOrRecoverTask( - std::shared_ptr task) { - // 只有错误的clone/recover任务才清理临时文件 - if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { - // 错误情况下可能未清除镜像被克隆标志 - if (IsFile(task)) { - // 重新发送 - std::string source = task->GetCloneInfo().GetSrc(); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST) { - LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + + int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, + const TaskIdType &taskId, + CloneInfo *cloneInfo) + { + int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); + if (ret < 0) + { + // Directly returns success when it does not exist, making the interface + // idempotent + return kErrCodeSuccess; + } + if (cloneInfo->GetUser() != user) + { + LOG(ERROR) << "CleanCloneOrRecoverTaskPre by Invalid user"; + return kErrCodeInvalidUser; + } + switch (cloneInfo->GetStatus()) + { + case CloneStatus::done: + cloneInfo->SetStatus(CloneStatus::cleaning); + break; + case CloneStatus::error: + cloneInfo->SetStatus(CloneStatus::errorCleaning); + break; + case CloneStatus::cleaning: + case CloneStatus::errorCleaning: + return kErrCodeTaskExist; + break; + default: + LOG(ERROR) << "Can not clean clone/recover task unfinished."; + return kErrCodeCannotCleanCloneUnfinished; + break; + } + + ret = metaStore_->UpdateCloneInfo(*cloneInfo); + if (ret < 0) + { + LOG(ERROR) << "UpdateCloneInfo fail" + << ", ret = " << ret << ", taskId = " << taskId; + return ret; + } + return kErrCodeSuccess; + } + + void CloneCoreImpl::HandleCleanCloneOrRecoverTask( + std::shared_ptr task) + { + // Only the wrong clone/recover task cleans up temporary files + if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) + { + // In the event of an error, the mirror being cloned flag may not be + // cleared + if (IsFile(task)) + { + // Resend + std::string source = task->GetCloneInfo().GetSrc(); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus( + source, FileStatus::Created, mdsRootUser_); + if (ret != LIBCURVE_ERROR::OK && + ret != -LIBCURVE_ERROR::NOTEXIST) + { + LOG(ERROR) << "SetCloneFileStatus fail, ret = " << ret + << ", taskid = " << task->GetTaskId(); + HandleCleanError(task); + return; + } + } + } + std::string tempFileName = + cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); + uint64_t fileId = task->GetCloneInfo().GetOriginId(); + std::string user = task->GetCloneInfo().GetUser(); + int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST) + { + LOG(ERROR) << "DeleteFile failed" + << ", ret = " << ret << ", fileName = " << tempFileName + << ", user = " << user << ", fileId = " << fileId << ", taskid = " << task->GetTaskId(); HandleCleanError(task); return; } } - } - std::string tempFileName = - cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); - uint64_t fileId = task->GetCloneInfo().GetOriginId(); - std::string user = - task->GetCloneInfo().GetUser(); - int ret = client_->DeleteFile(tempFileName, mdsRootUser_, fileId); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST) { - LOG(ERROR) << "DeleteFile failed" - << ", ret = " << ret - << ", fileName = " << tempFileName - << ", user = " << user - << ", fileId = " << fileId - << ", taskid = " << task->GetTaskId(); - HandleCleanError(task); + HandleCleanSuccess(task); return; } - } - HandleCleanSuccess(task); - return; -} - -int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( - std::shared_ptr task) { - TaskIdType taskId = task->GetCloneInfo().GetTaskId(); - int ret = metaStore_->DeleteCloneInfo(taskId); - if (ret < 0) { - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", taskId = " << taskId; - return kErrCodeInternalError; - } - - if (IsSnapshot(task)) { - snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); - } else { - std::string source = task->GetCloneInfo().GetSrc(); - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); - if (ret < 0) { - LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" - << ", ret = " << ret - << ", TaskInfo : " << *task; + + int CloneCoreImpl::HandleRemoveCloneOrRecoverTask( + std::shared_ptr task) + { + TaskIdType taskId = task->GetCloneInfo().GetTaskId(); + int ret = metaStore_->DeleteCloneInfo(taskId); + if (ret < 0) + { + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeInternalError; } + + if (IsSnapshot(task)) + { + snapshotRef_->DecrementSnapshotRef(task->GetCloneInfo().GetSrc()); + } + else + { + std::string source = task->GetCloneInfo().GetSrc(); + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret < 0) + { + LOG(ERROR) << "Task Fail cause by SetCloneFileStatus fail" + << ", ret = " << ret << ", TaskInfo : " << *task; + return kErrCodeInternalError; + } + } + } + + return kErrCodeSuccess; } - } - - return kErrCodeSuccess; -} - -int CloneCoreImpl::CheckFileExists(const std::string &filename, - uint64_t inodeId) { - FInfo destFInfo; - int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); - if (ret == LIBCURVE_ERROR::OK) { - if (destFInfo.id == inodeId) { - return kErrCodeFileExist; - } else { - return kErrCodeFileNotExist; + + int CloneCoreImpl::CheckFileExists(const std::string &filename, + uint64_t inodeId) + { + FInfo destFInfo; + int ret = client_->GetFileInfo(filename, mdsRootUser_, &destFInfo); + if (ret == LIBCURVE_ERROR::OK) + { + if (destFInfo.id == inodeId) + { + return kErrCodeFileExist; + } + else + { + return kErrCodeFileNotExist; + } + } + + if (ret == -LIBCURVE_ERROR::NOTEXIST) + { + return kErrCodeFileNotExist; + } + + return kErrCodeInternalError; } - } - - if (ret == -LIBCURVE_ERROR::NOTEXIST) { - return kErrCodeFileNotExist; - } - - return kErrCodeInternalError; -} - -// 加减引用计数的时候,接口里面会对引用计数map加锁; -// 加引用计数、处理引用计数减到0的时候,需要额外对修改的那条记录加锁。 -int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { - // 先减引用计数,如果是从镜像克隆且引用计数减到0,需要修改源镜像的状态为created - std::string source = cloneInfo.GetSrc(); - if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { - snapshotRef_->DecrementSnapshotRef(source); - } else { - cloneRef_->DecrementRef(source); - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - if (cloneRef_->GetRef(source) == 0) { - int ret = client_->SetCloneFileStatus(source, - FileStatus::Created, mdsRootUser_); - if (ret == -LIBCURVE_ERROR::NOTEXIST) { - LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " - << source; - } else if (ret != LIBCURVE_ERROR::OK) { - cloneRef_->IncrementRef(source); - LOG(ERROR) << "SetCloneFileStatus fail" - << ", ret = " << ret - << ", cloneInfo : " << cloneInfo; + + // When adding or subtracting reference counts, the interface will lock the + // reference count map; When adding a reference count and reducing the reference + // count to 0, an additional lock needs to be added to the modified record. + int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) + { + // First, reduce the reference count. If you are cloning from a mirror and + // the reference count is reduced to 0, you need to modify the status of the + // source mirror to 'created' + std::string source = cloneInfo.GetSrc(); + if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) + { + snapshotRef_->DecrementSnapshotRef(source); + } + else + { + cloneRef_->DecrementRef(source); + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + if (cloneRef_->GetRef(source) == 0) + { + int ret = client_->SetCloneFileStatus(source, FileStatus::Created, + mdsRootUser_); + if (ret == -LIBCURVE_ERROR::NOTEXIST) + { + LOG(WARNING) << "SetCloneFileStatus, file not exist, filename: " + << source; + } + else if (ret != LIBCURVE_ERROR::OK) + { + cloneRef_->IncrementRef(source); + LOG(ERROR) << "SetCloneFileStatus fail" + << ", ret = " << ret + << ", cloneInfo : " << cloneInfo; + return kErrCodeInternalError; + } + } + } + + // Delete this record. If the deletion fails, add back the previously + // subtracted reference count + int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); + if (ret != 0) + { + if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) + { + NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), + source); + snapshotRef_->IncrementSnapshotRef(source); + } + else + { + NameLockGuard lockGuard(cloneRef_->GetLock(), source); + cloneRef_->IncrementRef(source); + } + LOG(ERROR) << "DeleteCloneInfo failed" + << ", ret = " << ret << ", CloneInfo = " << cloneInfo; return kErrCodeInternalError; } - } - } - - // 删除这条记录,如果删除失败,把前面已经减掉的引用计数加回去 - int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); - if (ret != 0) { - if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { - NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), - source); - snapshotRef_->IncrementSnapshotRef(source); - } else { - NameLockGuard lockGuard(cloneRef_->GetLock(), source); - cloneRef_->IncrementRef(source); - } - LOG(ERROR) << "DeleteCloneInfo failed" - << ", ret = " << ret - << ", CloneInfo = " << cloneInfo; - return kErrCodeInternalError; - } - LOG(INFO) << "HandleDeleteCloneInfo success" - << ", cloneInfo = " << cloneInfo; + LOG(INFO) << "HandleDeleteCloneInfo success" + << ", cloneInfo = " << cloneInfo; - return kErrCodeSuccess; -} + return kErrCodeSuccess; + } -} // namespace snapshotcloneserver -} // namespace curve + } // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_core.h b/src/snapshotcloneserver/clone/clone_core.h index 19c1c20c9d..f33e2f8d5c 100644 --- a/src/snapshotcloneserver/clone/clone_core.h +++ b/src/snapshotcloneserver/clone/clone_core.h @@ -23,20 +23,20 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_CORE_H_ +#include +#include #include #include #include -#include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" -#include "src/snapshotcloneserver/common/snapshot_reference.h" #include "src/snapshotcloneserver/clone/clone_reference.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshot_reference.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -51,359 +51,334 @@ class CloneCore { virtual ~CloneCore() {} /** - * @brief 克隆或恢复任务前置 + * @brief Clone or restore task ahead * - * @param source 克隆或恢复源 - * @param user 用户名 - * @param destination 克隆或恢复的目标文件名 - * @param lazyFlag 是否lazy - * @param taskType 克隆或恢复 - * @param poolset 克隆时目标文件的poolset - * @param[out] info 克隆或恢复任务信息 + * @param source Clone or restore source + * @param user username + * @param destination The target file name for cloning or restoring + * @param lazyFlag is lazy + * @param taskType clone or restore + * @param poolset The poolset of the target file during cloning + * @param[out] info Clone or restore task information * - * @return 错误码 + * @return error code */ - virtual int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) = 0; + virtual int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) = 0; /** - * @brief 处理克隆或恢复任务 + * @brief Processing cloning or recovery tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 清理克隆或恢复任务前置 + * @brief Clean clone or restore tasks ahead * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆或恢复信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo Clone or restore information * - * @return 错误码 + * @return error code */ - virtual int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; - + virtual int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 异步处理清理克隆或恢复任务 + * @brief Asynchronous processing of clean clone or restore tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCleanCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 安装克隆文件数据的前置工作 - * - 进行一些必要的检查 - * - 获取并返回克隆信息 - * - 更新数据库状态 + * @brief Pre work for installing clone file data + * - Conduct necessary inspections + * - Obtain and return clone information + * - Update database status * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo clone information * - * @return 错误码 + * @return error code */ - virtual int FlattenPre( - const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) = 0; + virtual int FlattenPre(const std::string& user, const TaskIdType& taskId, + CloneInfo* cloneInfo) = 0; /** - * @brief 获取全部克隆/恢复任务列表,用于重启后恢复执行 + * @brief Get a list of all clone/restore tasks for resuming execution after + * reboot * - * @param[out] cloneInfos 克隆/恢复任务列表 + * @param[out] cloneInfos Clone/Restore Task List * - * @return 错误码 + * @return error code */ - virtual int GetCloneInfoList(std::vector *cloneInfos) = 0; + virtual int GetCloneInfoList(std::vector* cloneInfos) = 0; /** - * @brief 获取指定id的克隆/恢复任务 + * @brief Get the clone/restore task for the specified ID * - * @param taskId  任务id - * @param cloneInfo 克隆/恢复任务 + * @param taskId Task ID + * @param cloneInfo Clone/Restore Task * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) = 0; + virtual int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) = 0; /** - * @brief 获取指定文件名的克隆/恢复任务 + * @brief Get the clone/restore task for the specified file name * - * @param fileName  文件名 - * @param list 克隆/恢复任务列表 + * @param fileName File name + * @param list Clone/Restore Task List * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取快照引用管理模块 + * @brief Get snapshot reference management module * - * @return 快照引用管理模块 + * @return Snapshot Reference Management Module */ virtual std::shared_ptr GetSnapshotRef() = 0; - /** - * @brief 获取镜像引用管理模块 + * @brief Get Mirror Reference Management Module * - * @return 镜像引用管理模块 + * @return Image Reference Management Module */ virtual std::shared_ptr GetCloneRef() = 0; - /** - * @brief 移除克隆/恢复任务 + * @brief Remove clone/restore task * - * @param task 克隆任务 + * @param task Clone task * - * @return 错误码 + * @return error code */ virtual int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 检查文件是否存在 + * @brief Check if the file exists * - * @param filename 文件名 + * @param filename File name * - * @return 错误码 + * @return error code */ - virtual int CheckFileExists(const std::string &filename, + virtual int CheckFileExists(const std::string& filename, uint64_t inodeId) = 0; /** - * @brief 删除cloneInfo + * @brief Delete cloneInfo * - * @param cloneInfo 待删除的cloneInfo + * @param cloneInfo CloneInfo to be deleted * - * @return 错误码 + * @return error code */ - virtual int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) = 0; }; /** - * @brief 克隆/恢复所需chunk信息 + * @brief Chunk information required for cloning/restoring */ struct CloneChunkInfo { - // 该chunk的id信息 + // The ID information of the chunk ChunkIDInfo chunkIdInfo; - // 位置信息,如果在s3上,是objectName,否则在curvefs上,则是offset + // Location information, if on s3, it is objectName, otherwise on curves, it + // is offset std::string location; - // 该chunk的版本号 + // The version number of the chunk uint64_t seqNum; - // chunk是否需要recover + // Does Chunk require recover bool needRecover; }; -// 克隆/恢复所需segment信息,key是ChunkIndex In Segment, value是chunk信息 +// The segment information required for cloning/recovery, where key is +// ChunkIndex In Segment and value is chunk information using CloneSegmentInfo = std::map; -// 克隆/恢复所需segment信息表,key是segmentIndex +// The segment information table required for cloning/recovery, where the key is +// segmentIndex using CloneSegmentMap = std::map; class CloneCoreImpl : public CloneCore { public: - static const std::string kCloneTempDir; + static const std::string kCloneTempDir; public: - CloneCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - std::shared_ptr cloneRef, - const SnapshotCloneServerOptions option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - cloneRef_(cloneRef), - cloneChunkSplitSize_(option.cloneChunkSplitSize), - cloneTempDir_(option.cloneTempDir), - mdsRootUser_(option.mdsRootUser), - createCloneChunkConcurrency_(option.createCloneChunkConcurrency), - recoverChunkConcurrency_(option.recoverChunkConcurrency), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs) {} - - ~CloneCoreImpl() { - } + CloneCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + std::shared_ptr cloneRef, + const SnapshotCloneServerOptions option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + cloneRef_(cloneRef), + cloneChunkSplitSize_(option.cloneChunkSplitSize), + cloneTempDir_(option.cloneTempDir), + mdsRootUser_(option.mdsRootUser), + createCloneChunkConcurrency_(option.createCloneChunkConcurrency), + recoverChunkConcurrency_(option.recoverChunkConcurrency), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs) {} + + ~CloneCoreImpl() {} int Init(); - int CloneOrRecoverPre(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - CloneTaskType taskType, - std::string poolset, - CloneInfo *info) override; + int CloneOrRecoverPre(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + CloneTaskType taskType, std::string poolset, + CloneInfo* info) override; void HandleCloneOrRecoverTask(std::shared_ptr task) override; - int CleanCloneOrRecoverTaskPre(const std::string &user, - const TaskIdType &taskId, - CloneInfo *cloneInfo) override; + int CleanCloneOrRecoverTaskPre(const std::string& user, + const TaskIdType& taskId, + CloneInfo* cloneInfo) override; void HandleCleanCloneOrRecoverTask( std::shared_ptr task) override; - int FlattenPre( - const std::string &user, - const std::string &fileName, - CloneInfo *cloneInfo) override; + int FlattenPre(const std::string& user, const std::string& fileName, + CloneInfo* cloneInfo) override; - int GetCloneInfoList(std::vector *taskList) override; - int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) override; + int GetCloneInfoList(std::vector* taskList) override; + int GetCloneInfo(TaskIdType taskId, CloneInfo* cloneInfo) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - std::shared_ptr GetSnapshotRef() { - return snapshotRef_; - } + std::shared_ptr GetSnapshotRef() { return snapshotRef_; } - std::shared_ptr GetCloneRef() { - return cloneRef_; - } + std::shared_ptr GetCloneRef() { return cloneRef_; } int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) override; - int CheckFileExists(const std::string &filename, - uint64_t inodeId) override; - int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) override; + int CheckFileExists(const std::string& filename, uint64_t inodeId) override; + int HandleDeleteCloneInfo(const CloneInfo& cloneInfo) override; private: /** - * @brief 从快照构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from snapshot * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromSnapshot( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); + int BuildFileInfoFromSnapshot(std::shared_ptr task, + FInfo* newFileInfo, + CloneSegmentMap* segInfos); /** - * @brief 从源文件构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from source files * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed + * file * - * @return 错误码 + * @return error code */ - int BuildFileInfoFromFile( - std::shared_ptr task, - FInfo *newFileInfo, - CloneSegmentMap *segInfos); - + int BuildFileInfoFromFile(std::shared_ptr task, + FInfo* newFileInfo, CloneSegmentMap* segInfos); /** - * @brief 判断是否需要更新CloneChunkInfo信息中的chunkIdInfo + * @brief to determine if it is necessary to update chunkIdInfo in + * CloneChunkInfo information * - * @param task 任务信息 + * @param task task information * - * @retVal true 需要更新 - * @retVal false 不需要更新 + * @retval true needs to be updated + * @retval false No update required */ - bool NeedUpdateCloneMeta( - std::shared_ptr task); + bool NeedUpdateCloneMeta(std::shared_ptr task); /** - * @brief 判断clone失败后是否需要重试 + * @brief: Determine whether to retry after clone failure * - * @param task 任务信息 - * @param retCode 错误码 + * @param task task information + * @param retCode error code * - * @retVal true 需要 - * @retVal false 不需要 + * @retval true requires + * @retval false No need */ - bool NeedRetry(std::shared_ptr task, - int retCode); + bool NeedRetry(std::shared_ptr task, int retCode); /** - * @brief 创建clone的元数据信息或更新元数据信息 + * @brief Create metadata information for clone or update metadata + * information * - * @param task 任务信息 - * @param[int][out] fInfo 新创建的文件信息 - * @param[int][out] segInfos 文件的segment信息 + * @param task task information + * @param[int][out] fInfo Newly created file information + * @param[int][out] segInfosThe segment information of the file * - * @return 错误码 + * @return error code */ - int CreateOrUpdateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateOrUpdateCloneMeta(std::shared_ptr task, + FInfo* fInfo, CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件 + * @brief Create a new clone file * - * @param task 任务信息 - * @param fInfo 需创建的文件信息 + * @param task task information + * @param fInfo File information to be created * - * @return 错误码 + * @return error code */ - int CreateCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int CreateCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 创建新文件的源信息(创建segment) + * @brief Create source information for new files (create segments) * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneMeta( - std::shared_ptr task, - FInfo *fInfo, - CloneSegmentMap *segInfos); + int CreateCloneMeta(std::shared_ptr task, FInfo* fInfo, + CloneSegmentMap* segInfos); /** - * @brief 创建新clone文件的chunk + * @brief Create a chunk for a new clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CreateCloneChunk( - std::shared_ptr task, - const FInfo &fInfo, - CloneSegmentMap *segInfos); + int CreateCloneChunk(std::shared_ptr task, + const FInfo& fInfo, CloneSegmentMap* segInfos); /** - * @brief 开始CreateCloneChunk的异步请求 + * @brief Start asynchronous request for CreateCloneChunk * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param context CreateCloneChunk上下文 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param context CreateCloneChunk context * - * @return 错误码 + * @return error code */ int StartAsyncCreateCloneChunk( std::shared_ptr task, @@ -411,55 +386,51 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 处理CreateCloneChunk的结果并重试 + * @brief Process the results of CreateCloneChunk and try again * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param results CreateCloneChunk结果列表 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param results CreateCloneChunk result list * - * @return 错误码 + * @return error code */ int HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, std::shared_ptr tracker, - const std::list &results); + const std::list& results); /** - * @brief 通知mds完成源数据创建步骤 + * @brief Notify mds to complete the step of creating source data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneMeta( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneMeta(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 恢复chunk,即通知chunkserver拷贝数据 + * @brief Restore Chunk, that is, notify Chunkserver to copy data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int RecoverChunk( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int RecoverChunk(std::shared_ptr task, const FInfo& fInfo, + const CloneSegmentMap& segInfos); /** - * @brief 开始RecoverChunk的异步请求 + * @brief Start asynchronous request for RecoverChunk * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪器 - * @param context RecoverChunk上下文 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param context RecoverChunk Context * - * @return 错误码 + * @return error code */ int StartAsyncRecoverChunkPart( std::shared_ptr task, @@ -467,110 +438,103 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 继续RecoverChunk的其他部分的请求以及等待完成某些RecoverChunk + * @brief Continue requests for other parts of the RecoverChunk and wait for + * certain RecoverChunks to be completed * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪者 - * @param[out] completeChunkNum 完成的chunk数 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param[out] completeChunkNum Number of chunks completed * - * @return 错误码 + * @return error code */ int ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, std::shared_ptr tracker, - uint64_t *completeChunkNum); + uint64_t* completeChunkNum); /** - * @brief 修改克隆文件的owner + * @brief Modify the owner of the cloned file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int ChangeOwner( - std::shared_ptr task, - const FInfo &fInfo); + int ChangeOwner(std::shared_ptr task, const FInfo& fInfo); /** - * @brief 重命名克隆文件 + * @brief Rename clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ - int RenameCloneFile( - std::shared_ptr task, - const FInfo &fInfo); + int RenameCloneFile(std::shared_ptr task, + const FInfo& fInfo); /** - * @brief 通知mds完成数据创建 + * @brief Notify mds to complete data creation * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ - int CompleteCloneFile( - std::shared_ptr task, - const FInfo &fInfo, - const CloneSegmentMap &segInfos); + int CompleteCloneFile(std::shared_ptr task, + const FInfo& fInfo, const CloneSegmentMap& segInfos); /** - * @brief 从快照克隆时,更新快照状态,通知克隆完成 + * @brief: When cloning from a snapshot, update the snapshot status and + * notify the clone to complete * - * @param task 任务信息 + * @param task task information * - * @return 错误码 + * @return error code */ - int UpdateSnapshotStatus( - std::shared_ptr task); + int UpdateSnapshotStatus(std::shared_ptr task); /** - * @brief 处理Lazy克隆/恢复阶段一结束 + * @brief Processing Lazy Clone/Restore Phase 1 End * - * @param task 任务信息 + * @param task task information */ - void HandleLazyCloneStage1Finish( - std::shared_ptr task); + void HandleLazyCloneStage1Finish(std::shared_ptr task); /** - * @brief 处理克隆/恢复成功 + * @brief Successfully processed clone/restore * - * @param task 任务信息 + * @param task task information */ void HandleCloneSuccess(std::shared_ptr task); - /** - * @brief 处理克隆或恢复失败 + * @brief processing clone or restore failed * - * @param task 任务信息 - * @param retCode 待处理的错误码 + * @param task task information + * @param retCode pending error code */ - void HandleCloneError(std::shared_ptr task, - int retCode); + void HandleCloneError(std::shared_ptr task, int retCode); /** - * @brief Lazy Clone 情况下处理Clone任务失败重试 + * @brief Lazy Clone failed to process Clone task and retry * - * @param task 任务信息 + * @param task task information */ void HandleCloneToRetry(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务成功 + * @brief Successfully processed cleanup clone or restore task * - * @param task 任务信息 + * @param task task information */ void HandleCleanSuccess(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务失败 + * @brief processing cleanup clone or recovery task failed * - * @param task 任务信息 + * @param task task information */ void HandleCleanError(std::shared_ptr task); @@ -587,19 +551,19 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr snapshotRef_; std::shared_ptr cloneRef_; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize_; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir_; // mds root user std::string mdsRootUser_; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency_; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency_; - // client异步请求重试时间 + // Client asynchronous request retry time uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; }; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.cpp b/src/snapshotcloneserver/clone/clone_service_manager.cpp index 9b7439fecf..98cf730c25 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_service_manager.cpp @@ -24,19 +24,19 @@ #include -#include #include +#include #include -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "include/curve_compiler_specific.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/string_util.h" -#include "include/curve_compiler_specific.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" namespace curve { namespace snapshotcloneserver { -int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { +int CloneServiceManager::Init(const SnapshotCloneServerOptions& option) { dlockOpts_ = std::make_shared(option.dlockOpts); std::shared_ptr stage1Pool = std::make_shared(option.stage1PoolThreadNum); @@ -45,8 +45,8 @@ int CloneServiceManager::Init(const SnapshotCloneServerOptions &option) { std::shared_ptr commonPool = std::make_shared(option.commonPoolThreadNum); cloneServiceManagerBackend_->Init( - option.backEndReferenceRecordScanIntervalMs, - option.backEndReferenceFuncScanIntervalMs); + option.backEndReferenceRecordScanIntervalMs, + option.backEndReferenceFuncScanIntervalMs); return cloneTaskMgr_->Init(stage1Pool, stage2Pool, commonPool, option); } @@ -60,38 +60,34 @@ void CloneServiceManager::Stop() { cloneServiceManagerBackend_->Stop(); } -int CloneServiceManager::CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, poolset, &cloneInfo); + int ret = cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, poolset, + &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination - << ", lazyFlag = " << lazyFlag - << ", poolset = " << poolset; + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination + << ", lazyFlag = " << lazyFlag << ", poolset = " << poolset; closure->SetErrCode(ret); return ret; } @@ -106,35 +102,31 @@ int CloneServiceManager::CloneFile(const UUID &source, return ret; } -int CloneServiceManager::RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId) { - // 加锁防止并发 +int CloneServiceManager::RecoverFile( + const UUID& source, const std::string& user, const std::string& destination, + bool lazyFlag, std::shared_ptr closure, TaskIdType* taskId) { + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); closure->SetDestFileName(destination); lockDestFileGuard.Release(); CloneInfo cloneInfo; - int ret = cloneCore_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfo); + int ret = + cloneCore_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); return kErrCodeSuccess; } LOG(ERROR) << "CloneOrRecoverPre error" - << ", ret = " << ret - << ", source = " << source - << ", user = " << user - << ", destination = " << destination + << ", ret = " << ret << ", source = " << source + << ", user = " << user << ", destination = " << destination << ", lazyFlag = " << lazyFlag; closure->SetErrCode(ret); return ret; @@ -151,29 +143,23 @@ int CloneServiceManager::RecoverFile(const UUID &source, } int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushStage1Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -186,29 +172,23 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverLazyTask( } int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure) { + CloneInfo cloneInfo, std::shared_ptr closure) { brpc::ClosureGuard guard(closure.get()); TaskIdType taskId = cloneInfo.GetTaskId(); - auto cloneInfoMetric = - std::make_shared(taskId); + auto cloneInfoMetric = std::make_shared(taskId); closure->SetTaskId(taskId); std::shared_ptr taskInfo = - std::make_shared(cloneInfo, - cloneInfoMetric, nullptr); + std::make_shared(cloneInfo, cloneInfoMetric, nullptr); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - taskId, taskInfo, cloneCore_); + std::make_shared(taskId, taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" - << ", ret = " << ret - << ", going to remove task info."; - int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask( - taskInfo); + << ", ret = " << ret << ", going to remove task info."; + int ret2 = cloneCore_->HandleRemoveCloneOrRecoverTask(taskInfo); if (ret2 < 0) { LOG(ERROR) << "CloneServiceManager has encouter an internal error," << "remove taskInfo fail !"; @@ -218,17 +198,15 @@ int CloneServiceManager::BuildAndPushCloneOrRecoverNotLazyTask( return kErrCodeSuccess; } -int CloneServiceManager::Flatten( - const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::Flatten(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->FlattenPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "FlattenPre error" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskId = " << taskId; return ret; } @@ -240,10 +218,10 @@ int CloneServiceManager::Flatten( dlock_ = std::make_shared(*dlockOpts_); if (0 == dlock_->Init()) { LOG(ERROR) << "Init DLock error" - << ", pfx = " << dlockOpts_->pfx - << ", retryTimes = " << dlockOpts_->retryTimes - << ", timeout = " << dlockOpts_->ctx_timeoutMS - << ", ttl = " << dlockOpts_->ttlSec; + << ", pfx = " << dlockOpts_->pfx + << ", retryTimes = " << dlockOpts_->retryTimes + << ", timeout = " << dlockOpts_->ctx_timeoutMS + << ", ttl = " << dlockOpts_->ttlSec; return kErrCodeInternalError; } } @@ -253,11 +231,9 @@ int CloneServiceManager::Flatten( closure->SetDLock(dlock_); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, cloneInfoMetric, closure); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" @@ -267,8 +243,8 @@ int CloneServiceManager::Flatten( return kErrCodeSuccess; } -int CloneServiceManager::GetCloneTaskInfo(const std::string &user, - std::vector *info) { +int CloneServiceManager::GetCloneTaskInfo(const std::string& user, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -280,16 +256,14 @@ int CloneServiceManager::GetCloneTaskInfo(const std::string &user, } int CloneServiceManager::GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info) { + const std::string& user, const TaskIdType& taskId, + std::vector* info) { std::vector cloneInfos; CloneInfo cloneInfo; int ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return kErrCodeFileNotExist; } if (cloneInfo.GetUser() != user) { @@ -300,23 +274,20 @@ int CloneServiceManager::GetCloneTaskInfoById( } int CloneServiceManager::GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info) { + const std::string& user, const std::string& fileName, + std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoByFileName(fileName, &cloneInfos); if (ret < 0) { LOG(ERROR) << "GetCloneInfoByFileName fail" - << ", ret = " << ret - << ", fileName = " << fileName; + << ", ret = " << ret << ", fileName = " << fileName; return kErrCodeFileNotExist; } return GetCloneTaskInfoInner(cloneInfos, user, info); } int CloneServiceManager::GetCloneTaskInfoByFilter( - const CloneFilterCondition &filter, - std::vector *info) { + const CloneFilterCondition& filter, std::vector* info) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -327,9 +298,9 @@ int CloneServiceManager::GetCloneTaskInfoByFilter( return GetCloneTaskInfoInner(cloneInfos, filter, info); } -int CloneServiceManager::GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles) { +int CloneServiceManager::GetCloneRefStatus( + const std::string& src, CloneRefStatus* refStatus, + std::vector* needCheckFiles) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { @@ -338,10 +309,10 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } *refStatus = CloneRefStatus::kNoRef; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetSrc() == src) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : + case CloneStatus::done: case CloneStatus::error: { break; } @@ -370,14 +341,13 @@ int CloneServiceManager::GetCloneRefStatus(const std::string &src, } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info) { + std::vector cloneInfos, CloneFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (filter.IsMatchCondition(cloneInfo)) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -395,7 +365,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -421,14 +391,13 @@ int CloneServiceManager::GetCloneTaskInfoInner( } int CloneServiceManager::GetCloneTaskInfoInner( - std::vector cloneInfos, - const std::string &user, - std::vector *info) { + std::vector cloneInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &cloneInfo : cloneInfos) { + for (auto& cloneInfo : cloneInfos) { if (cloneInfo.GetUser() == user) { switch (cloneInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { info->emplace_back(cloneInfo, kProgressCloneComplete); break; } @@ -446,7 +415,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( cloneTaskMgr_->GetTask(taskId); if (task != nullptr) { info->emplace_back(cloneInfo, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { TaskCloneInfo tcInfo; ret = GetFinishedCloneTask(taskId, &tcInfo); @@ -471,7 +440,7 @@ int CloneServiceManager::GetCloneTaskInfoInner( return kErrCodeSuccess; } -bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { +bool CloneFilterCondition::IsMatchCondition(const CloneInfo& cloneInfo) { if (user_ != nullptr && *user_ != cloneInfo.GetUser()) { return false; } @@ -489,45 +458,39 @@ bool CloneFilterCondition::IsMatchCondition(const CloneInfo &cloneInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(cloneInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(cloneInfo.GetStatus())) { return false; } int type; - if (type_ != nullptr - && common::StringToInt(*type_, &type) == false) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == false) { return false; } - if (type_ != nullptr - && common::StringToInt(*type_, &type) == true - && type != static_cast(cloneInfo.GetTaskType())) { + if (type_ != nullptr && common::StringToInt(*type_, &type) == true && + type != static_cast(cloneInfo.GetTaskType())) { return false; } return true; } -int CloneServiceManager::GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut) { +int CloneServiceManager::GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut) { CloneInfo newInfo; int ret = cloneCore_->GetCloneInfo(taskId, &newInfo); if (ret < 0) { LOG(ERROR) << "GetCloneInfo fail" - << ", ret = " << ret - << ", taskId = " << taskId; + << ", ret = " << ret << ", taskId = " << taskId; return ret; } switch (newInfo.GetStatus()) { - case CloneStatus::done : { + case CloneStatus::done: { taskCloneInfoOut->SetCloneInfo(newInfo); taskCloneInfoOut->SetCloneProgress(kProgressCloneComplete); break; @@ -544,32 +507,29 @@ int CloneServiceManager::GetFinishedCloneTask( } default: LOG(ERROR) << "can not reach here!" - << " status = " << static_cast( - newInfo.GetStatus()); - // 当更新数据库失败时,有可能进入这里 + << " status = " << static_cast(newInfo.GetStatus()); + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } return kErrCodeSuccess; } -int CloneServiceManager::CleanCloneTask(const std::string &user, - const TaskIdType &taskId) { +int CloneServiceManager::CleanCloneTask(const std::string& user, + const TaskIdType& taskId) { CloneInfo cloneInfo; int ret = cloneCore_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (ret < 0) { LOG(ERROR) << "CleanCloneOrRecoverTaskPre fail" - << ", ret = " << ret - << ", user = " << user + << ", ret = " << ret << ", user = " << user << ", taskid = " << taskId; return ret; } std::shared_ptr taskInfo = std::make_shared(cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -579,40 +539,40 @@ int CloneServiceManager::CleanCloneTask(const std::string &user, return kErrCodeSuccess; } -int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo& cloneInfo) { auto cloneInfoMetric = std::make_shared(cloneInfo.GetTaskId()); auto closure = std::make_shared(); std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, cloneInfoMetric, closure); + std::make_shared(cloneInfo, cloneInfoMetric, closure); taskInfo->UpdateMetric(); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); bool isLazy = cloneInfo.GetIsLazy(); int ret = kErrCodeSuccess; - // Lazy 克隆/恢复 + // Lazy Clone/Restore if (isLazy) { CloneStep step = cloneInfo.GetNextStep(); - // 处理kRecoverChunk,kCompleteCloneFile,kEnd这三个阶段的Push到stage2Pool - // 如果克隆source类型是file,阶段为kCreateCloneChunk和kCreateCloneMeta也需要push到stage2Pool // NOLINT + // Process the Push to stage2Pool for the three stages of + // kRecoverChunk,kCompleteCloneFile, and kEnd If the clone source type + // is file and the stages are kCreateCloneChunk and kCreateCloneMeta, + // they also need to be pushed to stage2Pool// NOLINT if (CloneStep::kRecoverChunk == step || - CloneStep::kCompleteCloneFile == step || - CloneStep::kEnd == step || - (CloneStep::kCreateCloneChunk == step - && cloneInfo.GetFileType() == CloneFileType::kFile) || - (CloneStep::kCreateCloneMeta == step - && cloneInfo.GetFileType() == CloneFileType::kFile)) { + CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step || + (CloneStep::kCreateCloneChunk == step && + cloneInfo.GetFileType() == CloneFileType::kFile) || + (CloneStep::kCreateCloneMeta == step && + cloneInfo.GetFileType() == CloneFileType::kFile)) { ret = cloneTaskMgr_->PushStage2Task(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Stage2 Task error" << ", ret = " << ret; return ret; } - // 否则push到stage1Pool + // Otherwise, push to stage1Pool } else { - // stage1的task包含了异步的请求的返回,需要加锁 + // The task of stage1 contains the return of asynchronous requests + // that require locking std::string destination = cloneInfo.GetDest(); NameLockGuard lockDestFileGuard(*destFileLock_, destination); closure->SetDestFileLock(destFileLock_); @@ -625,7 +585,7 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return ret; } } - // 非Lazy 克隆/恢复push到commonPool + // Non Lazy clone/restore push to commonPool } else { ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { @@ -637,13 +597,11 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return kErrCodeSuccess; } -int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo &cloneInfo) { +int CloneServiceManager::RecoverCleanTaskInternal(const CloneInfo& cloneInfo) { std::shared_ptr taskInfo = - std::make_shared( - cloneInfo, nullptr, nullptr); - std::shared_ptr task = - std::make_shared( - cloneInfo.GetTaskId(), taskInfo, cloneCore_); + std::make_shared(cloneInfo, nullptr, nullptr); + std::shared_ptr task = std::make_shared( + cloneInfo.GetTaskId(), taskInfo, cloneCore_); int ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { LOG(ERROR) << "CloneTaskMgr Push Task error" @@ -660,26 +618,26 @@ int CloneServiceManager::RecoverCloneTask() { LOG(ERROR) << "GetCloneInfoList fail"; return ret; } - for (auto &cloneInfo : list) { + for (auto& cloneInfo : list) { switch (cloneInfo.GetStatus()) { case CloneStatus::retrying: { - // 重置重试任务的状态 + // Reset the status of the retry task if (cloneInfo.GetTaskType() == CloneTaskType::kClone) { cloneInfo.SetStatus(CloneStatus::cloning); } else { cloneInfo.SetStatus(CloneStatus::recovering); } } - FALLTHROUGH_INTENDED; + FALLTHROUGH_INTENDED; case CloneStatus::cloning: case CloneStatus::recovering: { - // 建立快照或镜像的引用关系 + // Establishing a reference relationship for a snapshot or + // mirror if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } ret = RecoverCloneTaskInternal(cloneInfo); if (ret < 0) { @@ -696,13 +654,13 @@ int CloneServiceManager::RecoverCloneTask() { break; } case CloneStatus::metaInstalled: { - // metaInstalled 状态下的克隆对文件仍然有依赖,需要建立引用关系 + // Clones in MetaInstalled state still have dependencies on + // files and need to establish a reference relationship if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); } else { - cloneCore_->GetCloneRef()->IncrementRef( - cloneInfo.GetSrc()); + cloneCore_->GetCloneRef()->IncrementRef(cloneInfo.GetSrc()); } break; } @@ -713,52 +671,59 @@ int CloneServiceManager::RecoverCloneTask() { return kErrCodeSuccess; } -// 当clone处于matainstall状态,且克隆卷已经删除的情况下,原卷的引用计数没有减。 -// 这个后台线程处理函数周期性的检查这个场景,如果发现有clone处于metaintalled状态 -// 且克隆卷已经删除,就去删除这条无效的clone信息,并减去原卷的引用计数。 -// 如果原卷是镜像且引用计数减为0,还需要去mds把原卷的状态改为created。 +// When the clone is in a matainstall state and the clone volume has been +// deleted, the reference count of the original volume does not decrease. This +// backend thread processing function periodically checks this scenario, and if +// any clones are found to be in the 'metaled' state If the clone volume has +// been deleted, delete the invalid clone information and subtract the reference +// count of the original volume. If the original volume is a mirror and the +// reference count is reduced to 0, it is necessary to go to MDS to change the +// status of the original volume to created. void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc start"; while (!isStop_.load()) { std::vector cloneInfos; int ret = cloneCore_->GetCloneInfoList(&cloneInfos); if (ret < 0) { - LOG(WARNING) << "GetCloneInfoList fail" << ", ret = " << ret; + LOG(WARNING) << "GetCloneInfoList fail" + << ", ret = " << ret; } int deleteCount = 0; - for (auto &it : cloneInfos) { - if (it.GetStatus() == CloneStatus::metaInstalled - && it.GetIsLazy() == true) { - // 检查destination在不在 + for (auto& it : cloneInfos) { + if (it.GetStatus() == CloneStatus::metaInstalled && + it.GetIsLazy() == true) { + // Check if the destination is available if (it.GetTaskType() == CloneTaskType::kClone) { ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetDestId()); + it.GetDestId()); } else { - // rename时,inodeid恢复成 + // When renaming, the inodeid is restored to ret = cloneCore_->CheckFileExists(it.GetDest(), - it.GetOriginId()); + it.GetOriginId()); } if (ret == kErrCodeFileNotExist) { - // 如果克隆卷是metaInstalled状态,且destination文件不存在, - // 删除这条cloneInfo,并减引用计数 + // If the cloned volume is in a metaInstalled state and the + // destination file does not exist, Delete this cloneInfo + // and subtract the reference count TaskIdType taskId = it.GetTaskId(); CloneInfo cloneInfo; ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret != kErrCodeSuccess) { - // cloneInfo已经不存在了 + // CloneInfo no longer exists continue; } - // 再次检查cloneInfo是否是metaInstalled状态 + // Check again if cloneInfo is in the metaInstalled state if (cloneInfo.GetStatus() != CloneStatus::metaInstalled) { continue; } ret = cloneCore_->HandleDeleteCloneInfo(cloneInfo); if (ret != kErrCodeSuccess) { - LOG(WARNING) << "HandleDeleteCloneInfo fail, ret = " - << ret << ", cloneInfo = " << cloneInfo; + LOG(WARNING) + << "HandleDeleteCloneInfo fail, ret = " << ret + << ", cloneInfo = " << cloneInfo; } else { deleteCount++; } @@ -771,14 +736,14 @@ void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "backend scan list, size = " << cloneInfos.size() << ", delete clone record count = " << deleteCount; - // 控制每轮扫描间隔 + // Control the scanning interval of each round roundWaitInterval_.WaitForNextExcution(); } LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc exit"; } void CloneServiceManagerBackendImpl::Init(uint32_t recordIntevalMs, - uint32_t roundIntevalMs) { + uint32_t roundIntevalMs) { recordWaitInterval_.Init(recordIntevalMs); roundWaitInterval_.Init(roundIntevalMs); diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..70268a9942 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -22,18 +22,18 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ +#include #include #include -#include +#include "src/common/concurrent/dlock.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/wait_interval.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" namespace curve { namespace snapshotcloneserver { @@ -44,26 +44,16 @@ class TaskCloneInfo { public: TaskCloneInfo() = default; - TaskCloneInfo(const CloneInfo &cloneInfo, - uint32_t progress) - : cloneInfo_(cloneInfo), - cloneProgress_(progress) {} + TaskCloneInfo(const CloneInfo& cloneInfo, uint32_t progress) + : cloneInfo_(cloneInfo), cloneProgress_(progress) {} - void SetCloneInfo(const CloneInfo &cloneInfo) { - cloneInfo_ = cloneInfo; - } + void SetCloneInfo(const CloneInfo& cloneInfo) { cloneInfo_ = cloneInfo; } - CloneInfo GetCloneInfo() const { - return cloneInfo_; - } + CloneInfo GetCloneInfo() const { return cloneInfo_; } - void SetCloneProgress(uint32_t progress) { - cloneProgress_ = progress; - } + void SetCloneProgress(uint32_t progress) { cloneProgress_ = progress; } - uint32_t GetCloneProgress() const { - return cloneProgress_; - } + uint32_t GetCloneProgress() const { return cloneProgress_; } Json::Value ToJsonObj() const { Json::Value cloneTaskObj; @@ -72,88 +62,76 @@ class TaskCloneInfo { cloneTaskObj["User"] = info.GetUser(); cloneTaskObj["File"] = info.GetDest(); cloneTaskObj["Src"] = info.GetSrc(); - cloneTaskObj["TaskType"] = static_cast ( - info.GetTaskType()); - cloneTaskObj["TaskStatus"] = static_cast ( - info.GetStatus()); + cloneTaskObj["TaskType"] = static_cast(info.GetTaskType()); + cloneTaskObj["TaskStatus"] = static_cast(info.GetStatus()); cloneTaskObj["IsLazy"] = info.GetIsLazy(); - cloneTaskObj["NextStep"] = static_cast (info.GetNextStep()); + cloneTaskObj["NextStep"] = static_cast(info.GetNextStep()); cloneTaskObj["Time"] = info.GetTime(); cloneTaskObj["Progress"] = GetCloneProgress(); - cloneTaskObj["FileType"] = static_cast (info.GetFileType()); + cloneTaskObj["FileType"] = static_cast(info.GetFileType()); return cloneTaskObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { CloneInfo info; info.SetTaskId(jsonObj["UUID"].asString()); info.SetUser(jsonObj["User"].asString()); info.SetDest(jsonObj["File"].asString()); info.SetSrc(jsonObj["Src"].asString()); - info.SetTaskType(static_cast( - jsonObj["TaskType"].asInt())); - info.SetStatus(static_cast( - jsonObj["TaskStatus"].asInt())); + info.SetTaskType( + static_cast(jsonObj["TaskType"].asInt())); + info.SetStatus(static_cast(jsonObj["TaskStatus"].asInt())); info.SetIsLazy(jsonObj["IsLazy"].asBool()); info.SetNextStep(static_cast(jsonObj["NextStep"].asInt())); info.SetTime(jsonObj["Time"].asUInt64()); - info.SetFileType(static_cast( - jsonObj["FileType"].asInt())); + info.SetFileType( + static_cast(jsonObj["FileType"].asInt())); SetCloneInfo(info); } private: - CloneInfo cloneInfo_; - uint32_t cloneProgress_; + CloneInfo cloneInfo_; + uint32_t cloneProgress_; }; class CloneFilterCondition { public: CloneFilterCondition() - : uuid_(nullptr), - source_(nullptr), - destination_(nullptr), - user_(nullptr), - status_(nullptr), - type_(nullptr) {} - - CloneFilterCondition(const std::string *uuid, const std::string *source, - const std::string *destination, const std::string *user, - const std::string *status, const std::string *type) - : uuid_(uuid), - source_(source), - destination_(destination), - user_(user), - status_(status), - type_(type) {} - bool IsMatchCondition(const CloneInfo &cloneInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } - void SetSource(const std::string *source) { - source_ = source; - } - void SetDestination(const std::string *destination) { + : uuid_(nullptr), + source_(nullptr), + destination_(nullptr), + user_(nullptr), + status_(nullptr), + type_(nullptr) {} + + CloneFilterCondition(const std::string* uuid, const std::string* source, + const std::string* destination, + const std::string* user, const std::string* status, + const std::string* type) + : uuid_(uuid), + source_(source), + destination_(destination), + user_(user), + status_(status), + type_(type) {} + bool IsMatchCondition(const CloneInfo& cloneInfo); + + void SetUuid(const std::string* uuid) { uuid_ = uuid; } + void SetSource(const std::string* source) { source_ = source; } + void SetDestination(const std::string* destination) { destination_ = destination; } - void SetUser(const std::string *user) { - user_ = user; - } - void SetStatus(const std::string *status) { - status_ = status; - } - void SetType(const std::string *type) { - type_ = type; - } + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } + void SetType(const std::string* type) { type_ = type; } private: - const std::string *uuid_; - const std::string *source_; - const std::string *destination_; - const std::string *user_; - const std::string *status_; - const std::string *type_; + const std::string* uuid_; + const std::string* source_; + const std::string* destination_; + const std::string* user_; + const std::string* status_; + const std::string* type_; }; class CloneServiceManagerBackend { public: @@ -161,7 +139,8 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the + * existence of cloned volumes * */ virtual void Func() = 0; @@ -177,12 +156,9 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { public: explicit CloneServiceManagerBackendImpl( std::shared_ptr cloneCore) - : cloneCore_(cloneCore), - isStop_(true) { - } + : cloneCore_(cloneCore), isStop_(true) {} - ~CloneServiceManagerBackendImpl() { - } + ~CloneServiceManagerBackendImpl() {} void Func() override; void Init(uint32_t recordIntevalMs, uint32_t roundIntevalMs) override; @@ -191,13 +167,14 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + // Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + // Is the current background scanning stopped? Used to support start and + // stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + // Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + // The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -207,250 +184,242 @@ class CloneServiceManager { std::shared_ptr cloneTaskMgr, std::shared_ptr cloneCore, std::shared_ptr cloneServiceManagerBackend) - : cloneTaskMgr_(cloneTaskMgr), - cloneCore_(cloneCore), - cloneServiceManagerBackend_(cloneServiceManagerBackend) { + : cloneTaskMgr_(cloneTaskMgr), + cloneCore_(cloneCore), + cloneServiceManagerBackend_(cloneServiceManagerBackend) { destFileLock_ = std::make_shared(); } virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file name + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int RecoverFile(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user user + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - virtual int Flatten( - const std::string &user, - const TaskIdType &taskId); + virtual int Flatten(const std::string& user, const TaskIdType& taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfo(const std::string &user, - std::vector *info); + virtual int GetCloneTaskInfo(const std::string& user, + std::vector* info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user + * through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user username + * @param taskId Task Id specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info); + virtual int GetCloneTaskInfoById(const std::string& user, + const TaskIdType& taskId, + std::vector* info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through + * a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param fileName The file name specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info); + virtual int GetCloneTaskInfoByName(const std::string& user, + const std::string& fileName, + std::vector* info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering + * criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter filtering conditions + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, - std::vector *info); + virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition& filter, + std::vector* info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src specified file name + * @param refStatus 0 indicates no dependencies, 1 indicates dependencies, + * and 2 indicates further confirmation is needed + * @param needCheckFiles List of files that require further confirmation * - * @return 错误码 + * @return error code */ - virtual int GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles); + virtual int GetCloneRefStatus(const std::string& src, + CloneRefStatus* refStatus, + std::vector* needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user username + * @param taskId Task Id * - * @return 错误码 + * @return error code */ - virtual int CleanCloneTask(const std::string &user, - const TaskIdType &taskId); + virtual int CleanCloneTask(const std::string& user, + const TaskIdType& taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief: Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); // for test - void SetDLock(std::shared_ptr dlock) { - dlock_ = dlock; - } + void SetDLock(std::shared_ptr dlock) { dlock_ = dlock; } private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param user user information + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - const std::string &user, - std::vector *info); + const std::string& user, + std::vector* info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given + * task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param filter filtering conditions + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info); + CloneFilterCondition filter, + std::vector* info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId Task ID + * @param taskCloneInfoOut Clone task information * - * @return 错误码 + * @return error code */ - int GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut); + int GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); + int RecoverCloneTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); + int RecoverCleanTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 构建和push Lazy的任务 + *Task of building and pushing Lazy @brief * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); private: std::shared_ptr dlockOpts_; @@ -461,8 +430,6 @@ class CloneServiceManager { std::shared_ptr cloneServiceManagerBackend_; }; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..2ddc10976e 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -23,17 +23,17 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ -#include #include +#include -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/common/concurrent/dlock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" using ::curve::common::DLock; @@ -42,33 +42,23 @@ namespace snapshotcloneserver { class CloneTaskInfo : public TaskInfo { public: - CloneTaskInfo(const CloneInfo &cloneInfo, - std::shared_ptr metric, - std::shared_ptr closure) + CloneTaskInfo(const CloneInfo& cloneInfo, + std::shared_ptr metric, + std::shared_ptr closure) : TaskInfo(), cloneInfo_(cloneInfo), metric_(metric), closure_(closure) {} - CloneInfo& GetCloneInfo() { - return cloneInfo_; - } + CloneInfo& GetCloneInfo() { return cloneInfo_; } - const CloneInfo& GetCloneInfo() const { - return cloneInfo_; - } + const CloneInfo& GetCloneInfo() const { return cloneInfo_; } - TaskIdType GetTaskId() const { - return cloneInfo_.GetTaskId(); - } + TaskIdType GetTaskId() const { return cloneInfo_.GetTaskId(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } - std::shared_ptr GetClosure() { - return closure_; - } + std::shared_ptr GetClosure() { return closure_; } private: CloneInfo cloneInfo_; @@ -76,20 +66,16 @@ class CloneTaskInfo : public TaskInfo { std::shared_ptr closure_; }; -std::ostream& operator<<(std::ostream& os, const CloneTaskInfo &taskInfo); +std::ostream& operator<<(std::ostream& os, const CloneTaskInfo& taskInfo); class CloneTaskBase : public Task { public: - CloneTaskBase(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} - - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + CloneTaskBase(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} + + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: std::shared_ptr taskInfo_; @@ -98,9 +84,8 @@ class CloneTaskBase : public Task { class CloneTask : public CloneTaskBase { public: - CloneTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneTask(const TaskIdType& taskId, std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} void Run() override { @@ -121,17 +106,14 @@ class CloneTask : public CloneTaskBase { } }; - class CloneCleanTask : public CloneTaskBase { public: - CloneCleanTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneCleanTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} - void Run() override { - core_->HandleCleanCloneOrRecoverTask(taskInfo_); - } + void Run() override { core_->HandleCleanCloneOrRecoverTask(taskInfo_); } }; struct SnapCloneCommonClosure : public SnapCloneClosure { @@ -145,9 +127,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,16 +137,16 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 - struct CloneChunkInfo *cloneChunkInfo; + // Chunk Information + struct CloneChunkInfo* cloneChunkInfo; }; using CreateCloneChunkContextPtr = std::shared_ptr; @@ -173,21 +155,20 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { CreateCloneChunkClosure( std::shared_ptr tracker, CreateCloneChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "CreateCloneChunkClosure return fail" - << ", ret = " << context_->retCode - << ", location = " << context_->location - << ", logicalPoolId = " << context_->cidInfo.lpid_ - << ", copysetId = " << context_->cidInfo.cpid_ - << ", chunkId = " << context_->cidInfo.cid_ - << ", seqNum = " << context_->sn - << ", csn = " << context_->csn - << ", taskid = " << context_->taskid; + << ", ret = " << context_->retCode + << ", location = " << context_->location + << ", logicalPoolId = " << context_->cidInfo.lpid_ + << ", copysetId = " << context_->cidInfo.cpid_ + << ", chunkId = " << context_->cidInfo.cid_ + << ", seqNum = " << context_->sn + << ", csn = " << context_->csn + << ", taskid = " << context_->taskid; } tracker_->PushResultContext(context_); tracker_->HandleResponse(context_->retCode); @@ -197,21 +178,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -219,17 +200,15 @@ using RecoverChunkContextPtr = std::shared_ptr; struct RecoverChunkClosure : public SnapCloneClosure { RecoverChunkClosure(std::shared_ptr tracker, - RecoverChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + RecoverChunkContextPtr context) + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "RecoverChunkClosure return fail" << ", ret = " << context_->retCode - << ", logicalPoolId = " - << context_->cidInfo.lpid_ + << ", logicalPoolId = " << context_->cidInfo.lpid_ << ", copysetId = " << context_->cidInfo.cpid_ << ", chunkId = " << context_->cidInfo.cid_ << ", partIndex = " << context_->partIndex diff --git a/src/snapshotcloneserver/clone/clone_task_manager.cpp b/src/snapshotcloneserver/clone/clone_task_manager.cpp index be14fc5db6..559c22b315 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_task_manager.cpp @@ -21,8 +21,8 @@ */ #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/snapshotclone/snapshotclone_define.h" namespace curve { namespace snapshotcloneserver { @@ -48,9 +48,8 @@ int CloneTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 - backEndThread = - std::thread(&CloneTaskManager::BackEndThreadFunc, this); + // isStop_ Flag set first to prevent backEndThread from exiting first + backEndThread = std::thread(&CloneTaskManager::BackEndThreadFunc, this); } return kErrCodeSuccess; } @@ -66,10 +65,8 @@ void CloneTaskManager::Stop() { } int CloneTaskManager::PushCommonTask(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &commonTaskMap_, - &commonTasksLock_, - commonPool_); + int ret = + PushTaskInternal(task, &commonTaskMap_, &commonTasksLock_, commonPool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -80,10 +77,8 @@ int CloneTaskManager::PushCommonTask(std::shared_ptr task) { } int CloneTaskManager::PushStage1Task(std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage1TaskMap_, - &stage1TasksLock_, - stage1Pool_); + int ret = + PushTaskInternal(task, &stage1TaskMap_, &stage1TasksLock_, stage1Pool_); if (ret >= 0) { cloneMetric_->UpdateBeforeTaskBegin( task->GetTaskInfo()->GetCloneInfo().GetTaskType()); @@ -93,12 +88,9 @@ int CloneTaskManager::PushStage1Task(std::shared_ptr task) { return ret; } -int CloneTaskManager::PushStage2Task( - std::shared_ptr task) { - int ret = PushTaskInternal(task, - &stage2TaskMap_, - &stage2TasksLock_, - stage2Pool_); +int CloneTaskManager::PushStage2Task(std::shared_ptr task) { + int ret = + PushTaskInternal(task, &stage2TaskMap_, &stage2TasksLock_, stage2Pool_); if (ret >= 0) { cloneMetric_->UpdateFlattenTaskBegin(); LOG(INFO) << "Push Task Into Stage2 Pool for data install success," @@ -107,13 +99,13 @@ int CloneTaskManager::PushStage2Task( return ret; } -int CloneTaskManager::PushTaskInternal(std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool) { - // 同一个clone的Stage1的Task和Stage2的Task的任务ID是一样的, - // clean task的ID也是一样的, - // 触发一次扫描,将已完成的任务Flush出去 +int CloneTaskManager::PushTaskInternal( + std::shared_ptr task, + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool) { + // The task IDs for Stage1 and Stage2 of the same clone are the same, + // The ID of the clean task is also the same, + // Trigger a scan to flush out completed tasks ScanStage2Tasks(); ScanStage1Tasks(); ScanCommonTasks(); @@ -124,12 +116,9 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(*taskMapMutex); - std::string destination = - task->GetTaskInfo()->GetCloneInfo().GetDest(); + std::string destination = task->GetTaskInfo()->GetCloneInfo().GetDest(); - auto ret = taskMap->emplace( - destination, - task); + auto ret = taskMap->emplace(destination, task); if (!ret.second) { LOG(ERROR) << "CloneTaskManager::PushTaskInternal fail, " << "same destination exist, " @@ -152,7 +141,7 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, } std::shared_ptr CloneTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(cloneTaskMapLock_); auto it = cloneTaskMap_.find(taskId); if (it != cloneTaskMap_.end()) { @@ -174,16 +163,13 @@ void CloneTaskManager::BackEndThreadFunc() { void CloneTaskManager::ScanCommonTasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(commonTasksLock_); - for (auto it = commonTaskMap_.begin(); - it != commonTaskMap_.end();) { + for (auto it = commonTaskMap_.begin(); it != commonTaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // 移除任务并更新metric + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Remove task and update metric cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "common task {" << " TaskInfo : " << *taskInfo @@ -200,15 +186,12 @@ void CloneTaskManager::ScanStage1Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLock(stage1TasksLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage1TaskMap_.begin(); - it != stage1TaskMap_.end();) { + for (auto it = stage1TaskMap_.begin(); it != stage1TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "stage1 task {" << " TaskInfo : " << *taskInfo @@ -224,27 +207,22 @@ void CloneTaskManager::ScanStage1Tasks() { void CloneTaskManager::ScanStage2Tasks() { WriteLockGuard taskMapWlock(cloneTaskMapLock_); LockGuard workingTasksLockGuard(stage2TasksLock_); - for (auto it = stage2TaskMap_.begin(); - it != stage2TaskMap_.end();) { + for (auto it = stage2TaskMap_.begin(); it != stage2TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { - CloneTaskType taskType = - taskInfo->GetCloneInfo().GetTaskType(); - CloneStatus status = - taskInfo->GetCloneInfo().GetStatus(); - // retrying 状态的任务需要重试 + CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); + CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); + // Tasks in the retrying state need to be retried if (CloneStatus::retrying == status) { if (CloneTaskType::kClone == taskType) { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::cloning); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::cloning); } else { - taskInfo->GetCloneInfo(). - SetStatus(CloneStatus::recovering); + taskInfo->GetCloneInfo().SetStatus(CloneStatus::recovering); } taskInfo->Reset(); stage2Pool_->PushTask(it->second); - // 其他任务结束更新metric + // Update metric after completing other tasks } else { cloneMetric_->UpdateAfterFlattenTaskFinish(status); LOG(INFO) << "stage2 task {" @@ -261,4 +239,3 @@ void CloneTaskManager::ScanStage2Tasks() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..916d25deae 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -23,50 +23,46 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/thread_pool.h" -using ::curve::common::RWLock; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; namespace curve { namespace snapshotcloneserver { class CloneTaskManager { public: - explicit CloneTaskManager( - std::shared_ptr core, - std::shared_ptr cloneMetric) + explicit CloneTaskManager(std::shared_ptr core, + std::shared_ptr cloneMetric) : isStop_(true), core_(core), cloneMetric_(cloneMetric), cloneTaskManagerScanIntervalMs_(0) {} - ~CloneTaskManager() { - Stop(); - } + ~CloneTaskManager() { Stop(); } int Init(std::shared_ptr stage1Pool, - std::shared_ptr stage2Pool, - std::shared_ptr commonPool, - const SnapshotCloneServerOptions &option) { - cloneTaskManagerScanIntervalMs_ = - option.cloneTaskManagerScanIntervalMs; + std::shared_ptr stage2Pool, + std::shared_ptr commonPool, + const SnapshotCloneServerOptions& option) { + cloneTaskManagerScanIntervalMs_ = option.cloneTaskManagerScanIntervalMs; stage1Pool_ = stage1Pool; stage2Pool_ = stage2Pool; commonPool_ = commonPool; @@ -78,40 +74,39 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such + * as clones * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushCommonTask( - std::shared_ptr task); + int PushCommonTask(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage1Task( - std::shared_ptr task); + int PushStage1Task(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery + * clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ - int PushStage2Task( - std::shared_ptr task); + int PushStage2Task(std::shared_ptr task); - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; private: void BackEndThreadFunc(); @@ -120,51 +115,52 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task task + * @param taskMap task table + * @param taskMapMutex task table and thread pool locks + * @param taskPool Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool); + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID ->Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and + // other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Is the current task management stopped? Used to support start and stop + // functions std::atomic_bool isStop_; // clone core @@ -173,16 +169,11 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; } // namespace snapshotcloneserver } // namespace curve - - - - - #endif // SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/common/config.h b/src/snapshotcloneserver/common/config.h index d5e93a24c1..3c8cc13263 100644 --- a/src/snapshotcloneserver/common/config.h +++ b/src/snapshotcloneserver/common/config.h @@ -23,9 +23,9 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CONFIG_H_ - -#include +#include #include + #include "src/common/concurrent/dlock.h" namespace curve { @@ -41,58 +41,61 @@ struct CurveClientOptions { std::string mdsRootUser; // mds root password std::string mdsRootPassword; - // 调用client方法的重试总时间 + // The total retry time for calling the client method uint64_t clientMethodRetryTimeSec; - // 调用client方法重试间隔时间 + // Call client method retry interval uint64_t clientMethodRetryIntervalMs; }; // snapshotcloneserver options struct SnapshotCloneServerOptions { // snapshot&clone server address - std::string addr; - // 调用client异步方法重试总时间 + std::string addr; + // Total retry time for calling client asynchronous methods uint64_t clientAsyncMethodRetryTimeSec; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs; - // 快照工作线程数 + // Number of snapshot worker threads int snapshotPoolThreadNum; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) uint32_t snapshotTaskManagerScanIntervalMs; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit; // snapshotcore threadpool threadNum uint32_t snapshotCoreThreadNum; // mdsSessionTimeUs uint32_t mdsSessionTimeUs; - // ReadChunkSnapshot同时进行的异步请求数量 + // The number of asynchronous requests simultaneously processed by + // ReadChunkSnapshot uint32_t readChunkSnapshotConcurrency; - // 用于Lazy克隆元数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone metadata section int stage1PoolThreadNum; - // 用于Lazy克隆数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone data section int stage2PoolThreadNum; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 + // Number of thread pool threads used for requests for non Lazy clones and + // deletion of clones and other control surfaces int commonPoolThreadNum; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir; // mds root user std::string mdsRootUser; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency; - // 引用计数后台扫描每条记录间隔 + // Reference Count Background Scan Each Record Interval uint32_t backEndReferenceRecordScanIntervalMs; - // 引用计数后台扫描每轮间隔 + // Reference Count Background Scan Every Round Interval uint32_t backEndReferenceFuncScanIntervalMs; // dlock options DLockOpts dlockOpts; diff --git a/src/snapshotcloneserver/common/curvefs_client.h b/src/snapshotcloneserver/common/curvefs_client.h index 131f01659c..72db5e3009 100644 --- a/src/snapshotcloneserver/common/curvefs_client.h +++ b/src/snapshotcloneserver/common/curvefs_client.h @@ -15,42 +15,41 @@ */ /************************************************************************* - > File Name: curvefs_client.h - > Author: - > Created Time: Wed Nov 21 11:33:46 2018 + > File Name: curvefs_client.h + > Author: + > Created Time: Wed Nov 21 11:33:46 2018 ************************************************************************/ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ - -#include -#include -#include #include //NOLINT +#include +#include #include //NOLINT -#include "proto/nameserver2.pb.h" -#include "proto/chunk.pb.h" +#include +#include "proto/chunk.pb.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" -#include "src/client/libcurve_snapshot.h" #include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" #include "src/common/timeutility.h" +#include "src/snapshotcloneserver/common/config.h" -using ::curve::client::SegmentInfo; -using ::curve::client::LogicPoolID; -using ::curve::client::CopysetID; using ::curve::client::ChunkID; -using ::curve::client::ChunkInfoDetail; using ::curve::client::ChunkIDInfo; -using ::curve::client::FInfo; +using ::curve::client::ChunkInfoDetail; +using ::curve::client::CopysetID; +using ::curve::client::FileClient; using ::curve::client::FileStatus; +using ::curve::client::FInfo; +using ::curve::client::LogicPoolID; +using ::curve::client::SegmentInfo; using ::curve::client::SnapCloneClosure; -using ::curve::client::UserInfo; using ::curve::client::SnapshotClient; -using ::curve::client::FileClient; +using ::curve::client::UserInfo; namespace curve { namespace snapshotcloneserver { @@ -60,15 +59,13 @@ using RetryCondition = std::function; class RetryHelper { public: - RetryHelper(const RetryMethod &retryMethod, - const RetryCondition &condition) { + RetryHelper(const RetryMethod& retryMethod, + const RetryCondition& condition) { retryMethod_ = retryMethod; condition_ = condition; } - int RetryTimeSecAndReturn( - uint64_t retryTimeSec, - uint64_t retryIntervalMs) { + int RetryTimeSecAndReturn(uint64_t retryTimeSec, uint64_t retryIntervalMs) { int ret = -LIBCURVE_ERROR::FAILED; uint64_t startTime = TimeUtility::GetTimeofDaySec(); uint64_t nowTime = startTime; @@ -85,7 +82,7 @@ class RetryHelper { } private: - RetryMethod retryMethod_; + RetryMethod retryMethod_; RetryCondition condition_; }; @@ -95,432 +92,366 @@ class CurveFsClient { virtual ~CurveFsClient() {} /** - * @brief client 初始化 + * @brief client initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const CurveClientOptions &options) = 0; + virtual int Init(const CurveClientOptions& options) = 0; /** - * @brief client 资源回收 + * @brief client resource recycling * - * @return 错误码 + * @return error code */ virtual int UnInit() = 0; /** - * @brief 创建快照 + * @brief Create a snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param[out] seq 快照版本号 + * @param filename File name + * @param user user information + * @param[out] seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) = 0; + virtual int CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) = 0; /** - * @brief 删除快照 + * @brief Delete snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 + * @param filename File name + * @param user user information + * @param seq snapshot version number * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) = 0; + virtual int DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) = 0; /** - * @brief 获取快照文件信息 + * @brief Get snapshot file information * - * @param filename 文件名 - * @param user 用户名 - * @param seq 快照版本号 - * @param[out] snapInfo 快照文件信息 + * @param filename File name + * @param user username + * @param seq snapshot version number + * @param[out] snapInfo snapshot file information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, FInfo* snapInfo) = 0; + virtual int GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, + FInfo* snapInfo) = 0; /** - * @brief 查询快照文件segment信息 + * @brief Query snapshot file segment information * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param segInfo segment信息 + * @param filename File name + * @param user user information + * @param seq snapshot version number + * @param offset offset value + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) = 0; + virtual int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, + SegmentInfo* segInfo) = 0; /** - * @brief 读取snapshot chunk的数据 + * @brief Read snapshot chunk data * - * @param cidinfo chunk ID 信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param len 长度 - * @param[out] buf buffer指针 - * @param: scc是异步回调 + * @param cidinfo chunk ID information + * @param seq snapshot version number + * @param offset offset value + * @param len length + * @param[out] buf buffer pointer + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) = 0; + virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, char* buf, + SnapCloneClosure* scc) = 0; /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: filestatus 快照文件状态 + *Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: filestatus Snapshot file status */ - virtual int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) = 0; + virtual int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) = 0; /** - * @brief 获取chunk的版本号信息 + * @brief to obtain the version number information of the chunk * - * @param cidinfo chunk ID 信息 - * @param chunkInfo chunk详细信息 + * @param cidinfo chunk ID information + * @param chunkInfo chunk Details * - * @return 错误码 + * @return error code */ - virtual int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) = 0; + virtual int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) = 0; /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param filename clone目标文件名 - * @param user 用户信息 - * @param size 文件大小 - * @param sn 版本号 - * @param chunkSize chunk大小 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param filename clone Target filename + * @param user user information + * @param size File size + * @param sn version number + * @param chunkSize chunk size * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] fileInfo 文件信息 + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) = 0; + virtual int CreateCloneFile(const std::string& source, + const std::string& filename, + const std::string& user, uint64_t size, + uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, + FInfo* fileInfo) = 0; /** - * @brief lazy 创建clone chunk + * @brief lazy creation of a clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param location 数据源的url - * @param chunkidinfo 目标chunk - * @param sn chunk的序列号 - * @param csn correct sn - * @param chunkSize chunk的大小 - * @param: scc是异步回调 - * - * @return 错误码 + * - The location format is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri + * is the actual chunk object's address. + * - If the source data is on CurveFS, the location format is + * /filename/chunkindex@cs. + * + * @param location URL of the data source + * @param chunkidinfo Target chunk + * @param sn chunk's sequence number + * @param csn correct sequence number + * @param chunkSize Size of the chunk + * @param scc Asynchronous callback + * + * @return Error code */ - virtual int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) = 0; - + virtual int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) = 0; /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param: scc是异步回调 + * @param offset offset + * @param len length + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ - virtual int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) = 0; + virtual int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) = 0; /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneMeta( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneMeta(const std::string& filename, + const std::string& user) = 0; /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int CompleteCloneFile( - const std::string &filename, - const std::string &user) = 0; + virtual int CompleteCloneFile(const std::string& filename, + const std::string& user) = 0; /** - * @brief 设置clone文件状态 + * @brief Set clone file status * - * @param filename 文件名 - * @param filestatus 要设置的目标状态 - * @param user 用户名 + * @param filename File name + * @param filestatus The target state to be set + * @param user username * - * @return 错误码 + * @return error code */ - virtual int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) = 0; + virtual int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) = 0; /** - * @brief 获取文件信息 + * @brief Get file information * - * @param filename 文件名 - * @param user 用户名 - * @param[out] fileInfo 文件信息 + * @param filename File name + * @param user username + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ - virtual int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) = 0; + virtual int GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) = 0; /** - * @brief 查询或分配文件segment信息 + * @brief Query or allocate file segment information * - * @param allocate 是否分配 - * @param offset 偏移值 - * @param fileInfo 文件信息 - * @param user 用户名 - * @param segInfo segment信息 + * @param allocate whether to allocate + * @param offset offset value + * @param fileInfo file information + * @param user username + * @param segInfo segment information * - * @return 错误码 + * @return error code */ - virtual int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) = 0; + virtual int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) = 0; /** - * @brief 为recover rename复制的文件 + * @brief is the file copied for recover rename * - * @param user 用户信息 - * @param originId 被恢复的原始文件Id - * @param destinationId 克隆出的目标文件Id - * @param origin 被恢复的原始文件名 - * @param destination 克隆出的目标文件 + * @param user user information + * @param originId The original file ID that was restored + * @param destinationId The cloned target file ID + * @param origin The original file name of the recovered file + * @param destination Cloned destination file * - * @return 错误码 + * @return error code */ - virtual int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) = 0; - + virtual int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) = 0; /** - * @brief 删除文件 + * @brief Delete file * - * @param fileName 文件名 - * @param user 用户名 - * @param fileId 删除文件的inodeId + * @param fileName File name + * @param user username + * @param fileId Delete the inodeId of the file * - * @return 错误码 + * @return error code */ - virtual int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) = 0; + virtual int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) = 0; /** - * @brief 创建目录 + * @brief Create directory * - * @param dirpath 目录名 - * @param user 用户名 + * @param dirpath directory name + * @param user username * - * @return 错误码 + * @return error code */ - virtual int Mkdir(const std::string& dirpath, - const std::string &user) = 0; + virtual int Mkdir(const std::string& dirpath, const std::string& user) = 0; /** - * @brief 变更文件的owner + * @brief Change the owner of the file * - * @param filename 文件名 - * @param newOwner 新的owner + * @param filename File name + * @param newOwner New owner * - * @return 错误码 + * @return error code */ virtual int ChangeOwner(const std::string& filename, - const std::string& newOwner) = 0; + const std::string& newOwner) = 0; }; class CurveFsClientImpl : public CurveFsClient { public: CurveFsClientImpl(std::shared_ptr snapClient, - std::shared_ptr fileClient) : - snapClient_(snapClient), fileClient_(fileClient) {} + std::shared_ptr fileClient) + : snapClient_(snapClient), fileClient_(fileClient) {} virtual ~CurveFsClientImpl() {} - // 以下接口定义见CurveFsClient接口注释 - int Init(const CurveClientOptions &options) override; + // The following interface definitions can be found in the CurveFsClient + // interface annotations + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; private: - UserInfo GetUserInfo(const std::string &user) { + UserInfo GetUserInfo(const std::string& user) { if (user == mdsRootUser_) { return UserInfo(mdsRootUser_, mdsRootPassword_); } else { diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 766ae00e05..fb7804d1f6 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -23,10 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ -#include -#include #include #include +#include +#include #include "src/common/snapshotclone/snapshotclone_define.h" @@ -44,10 +44,7 @@ enum class CloneStatus { metaInstalled = 7, }; -enum class CloneFileType { - kFile = 0, - kSnapshot = 1 -}; +enum class CloneFileType { kFile = 0, kSnapshot = 1 }; enum class CloneStep { kCreateCloneFile = 0, @@ -61,10 +58,10 @@ enum class CloneStep { kEnd }; -// 数据库中clone/recover任务信息 +// Clone/recover task information in the database class CloneInfo { public: - CloneInfo() + CloneInfo() : type_(CloneTaskType::kClone), originId_(0), destinationId_(0), @@ -74,14 +71,10 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::error) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - CloneFileType fileType, - bool isLazy) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + CloneFileType fileType, bool isLazy) : taskId_(taskId), user_(user), type_(type), @@ -96,19 +89,12 @@ class CloneInfo { nextStep_(CloneStep::kCreateCloneFile), status_(CloneStatus::cloning) {} - CloneInfo(const TaskIdType &taskId, - const std::string &user, - CloneTaskType type, - const std::string &source, - const std::string &destination, - const std::string &poolset, - uint64_t originId, - uint64_t destinationId, - uint64_t time, - CloneFileType fileType, - bool isLazy, - CloneStep nextStep, - CloneStatus status) + CloneInfo(const TaskIdType& taskId, const std::string& user, + CloneTaskType type, const std::string& source, + const std::string& destination, const std::string& poolset, + uint64_t originId, uint64_t destinationId, uint64_t time, + CloneFileType fileType, bool isLazy, CloneStep nextStep, + CloneStatus status) : taskId_(taskId), user_(user), type_(type), @@ -123,146 +109,94 @@ class CloneInfo { nextStep_(nextStep), status_(status) {} - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } - void SetTaskId(const TaskIdType &taskId) { - taskId_ = taskId; - } + void SetTaskId(const TaskIdType& taskId) { taskId_ = taskId; } - std::string GetUser() const { - return user_; - } + std::string GetUser() const { return user_; } - void SetUser(const std::string &user) { - user_ = user; - } + void SetUser(const std::string& user) { user_ = user; } - CloneTaskType GetTaskType() const { - return type_; - } + CloneTaskType GetTaskType() const { return type_; } - void SetTaskType(CloneTaskType type) { - type_ = type; - } + void SetTaskType(CloneTaskType type) { type_ = type; } - std::string GetSrc() const { - return source_; - } + std::string GetSrc() const { return source_; } - void SetSrc(const std::string &source) { - source_ = source; - } + void SetSrc(const std::string& source) { source_ = source; } - std::string GetDest() const { - return destination_; - } + std::string GetDest() const { return destination_; } - void SetDest(const std::string &dest) { - destination_ = dest; - } + void SetDest(const std::string& dest) { destination_ = dest; } - std::string GetPoolset() const { - return poolset_; - } + std::string GetPoolset() const { return poolset_; } - void SetPoolset(const std::string &poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - uint64_t GetOriginId() const { - return originId_; - } + uint64_t GetOriginId() const { return originId_; } - void SetOriginId(uint64_t originId) { - originId_ = originId; - } + void SetOriginId(uint64_t originId) { originId_ = originId; } - uint64_t GetDestId() const { - return destinationId_; - } + uint64_t GetDestId() const { return destinationId_; } - void SetDestId(uint64_t destId) { - destinationId_ = destId; - } + void SetDestId(uint64_t destId) { destinationId_ = destId; } - uint64_t GetTime() const { - return time_; - } + uint64_t GetTime() const { return time_; } - void SetTime(uint64_t time) { - time_ = time; - } + void SetTime(uint64_t time) { time_ = time; } - CloneFileType GetFileType() const { - return fileType_; - } + CloneFileType GetFileType() const { return fileType_; } - void SetFileType(CloneFileType fileType) { - fileType_ = fileType; - } + void SetFileType(CloneFileType fileType) { fileType_ = fileType; } - bool GetIsLazy() const { - return isLazy_; - } + bool GetIsLazy() const { return isLazy_; } - void SetIsLazy(bool flag) { - isLazy_ = flag; - } + void SetIsLazy(bool flag) { isLazy_ = flag; } - CloneStep GetNextStep() const { - return nextStep_; - } + CloneStep GetNextStep() const { return nextStep_; } - void SetNextStep(CloneStep nextStep) { - nextStep_ = nextStep; - } - CloneStatus GetStatus() const { - return status_; - } + void SetNextStep(CloneStep nextStep) { nextStep_ = nextStep; } + CloneStatus GetStatus() const { return status_; } - void SetStatus(CloneStatus status) { - status_ = status; - } + void SetStatus(CloneStatus status) { status_ = status; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 任务Id - TaskIdType taskId_; - // 用户 + // Task Id + TaskIdType taskId_; + // Users std::string user_; - // 克隆或恢复 + // Clone or Restore CloneTaskType type_; - // 源文件或快照uuid + // Source file or snapshot uuid std::string source_; - // 目标文件名 + // Destination File Name std::string destination_; - // 目标文件所在的poolset + // The poolset where the target file is located std::string poolset_; - // 被恢复的原始文件id, 仅用于恢复 + // The original file ID that has been restored, for recovery purposes only uint64_t originId_; - // 目标文件id + // Target file id uint64_t destinationId_; - // 创建时间 + // Creation time uint64_t time_; - // 克隆/恢复的文件类型 + // Clone/Restore File Types CloneFileType fileType_; - // 是否lazy + // Lazy or not bool isLazy_; - // 克隆进度, 下一个步骤 + // Clone progress, next step CloneStep nextStep_; - // 处理的状态 + // Processing status CloneStatus status_; }; -std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); +std::ostream& operator<<(std::ostream& os, const CloneInfo& cloneInfo); -// 快照处理状态 -enum class Status{ +// Snapshot processing status +enum class Status { done = 0, pending, deleting, @@ -271,187 +205,127 @@ enum class Status{ error }; -// 快照信息 +// Snapshot Information class SnapshotInfo { public: SnapshotInfo() - :uuid_(), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &snapshotName) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(snapshotName), - seqNum_(kUnInitializeSeqNum), - chunkSize_(0), - segmentSize_(0), - fileLength_(0), - stripeUnit_(0), - stripeCount_(0), - time_(0), - status_(Status::pending) {} - SnapshotInfo(UUID uuid, - const std::string &user, - const std::string &fileName, - const std::string &desc, - uint64_t seqnum, - uint32_t chunksize, - uint64_t segmentsize, - uint64_t filelength, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - uint64_t time, - Status status) - :uuid_(uuid), - user_(user), - fileName_(fileName), - snapshotName_(desc), - seqNum_(seqnum), - chunkSize_(chunksize), - segmentSize_(segmentsize), - fileLength_(filelength), - stripeUnit_(stripeUnit), - stripeCount_(stripeCount), - poolset_(poolset), - time_(time), - status_(status) {} - - void SetUuid(const UUID &uuid) { - uuid_ = uuid; - } + : uuid_(), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} - UUID GetUuid() const { - return uuid_; - } + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& snapshotName) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(snapshotName), + seqNum_(kUnInitializeSeqNum), + chunkSize_(0), + segmentSize_(0), + fileLength_(0), + stripeUnit_(0), + stripeCount_(0), + time_(0), + status_(Status::pending) {} + SnapshotInfo(UUID uuid, const std::string& user, + const std::string& fileName, const std::string& desc, + uint64_t seqnum, uint32_t chunksize, uint64_t segmentsize, + uint64_t filelength, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, uint64_t time, Status status) + : uuid_(uuid), + user_(user), + fileName_(fileName), + snapshotName_(desc), + seqNum_(seqnum), + chunkSize_(chunksize), + segmentSize_(segmentsize), + fileLength_(filelength), + stripeUnit_(stripeUnit), + stripeCount_(stripeCount), + poolset_(poolset), + time_(time), + status_(status) {} - void SetUser(const std::string &user) { - user_ = user; - } + void SetUuid(const UUID& uuid) { uuid_ = uuid; } - std::string GetUser() const { - return user_; - } + UUID GetUuid() const { return uuid_; } - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetUser(const std::string& user) { user_ = user; } - std::string GetFileName() const { - return fileName_; - } + std::string GetUser() const { return user_; } + + void SetFileName(const std::string& fileName) { fileName_ = fileName; } + + std::string GetFileName() const { return fileName_; } - void SetSnapshotName(const std::string &snapshotName) { + void SetSnapshotName(const std::string& snapshotName) { snapshotName_ = snapshotName; } - std::string GetSnapshotName() const { - return snapshotName_; - } + std::string GetSnapshotName() const { return snapshotName_; } - void SetSeqNum(uint64_t seqNum) { - seqNum_ = seqNum; - } + void SetSeqNum(uint64_t seqNum) { seqNum_ = seqNum; } - uint64_t GetSeqNum() const { - return seqNum_; - } + uint64_t GetSeqNum() const { return seqNum_; } - void SetChunkSize(uint32_t chunkSize) { - chunkSize_ = chunkSize; - } + void SetChunkSize(uint32_t chunkSize) { chunkSize_ = chunkSize; } - uint32_t GetChunkSize() const { - return chunkSize_; - } + uint32_t GetChunkSize() const { return chunkSize_; } - void SetSegmentSize(uint64_t segmentSize) { - segmentSize_ = segmentSize; - } + void SetSegmentSize(uint64_t segmentSize) { segmentSize_ = segmentSize; } - uint64_t GetSegmentSize() const { - return segmentSize_; - } + uint64_t GetSegmentSize() const { return segmentSize_; } - void SetFileLength(uint64_t fileLength) { - fileLength_ = fileLength; - } + void SetFileLength(uint64_t fileLength) { fileLength_ = fileLength; } - uint64_t GetFileLength() const { - return fileLength_; - } + uint64_t GetFileLength() const { return fileLength_; } - void SetStripeUnit(uint64_t stripeUnit) { - stripeUnit_ = stripeUnit; - } + void SetStripeUnit(uint64_t stripeUnit) { stripeUnit_ = stripeUnit; } - uint64_t GetStripeUnit() const { - return stripeUnit_; - } + uint64_t GetStripeUnit() const { return stripeUnit_; } - void SetStripeCount(uint64_t stripeCount) { - stripeCount_ = stripeCount; - } + void SetStripeCount(uint64_t stripeCount) { stripeCount_ = stripeCount; } - uint64_t GetStripeCount() const { - return stripeCount_; - } + uint64_t GetStripeCount() const { return stripeCount_; } - void SetPoolset(const std::string& poolset) { - poolset_ = poolset; - } + void SetPoolset(const std::string& poolset) { poolset_ = poolset; } - const std::string& GetPoolset() const { - return poolset_; - } + const std::string& GetPoolset() const { return poolset_; } - void SetCreateTime(uint64_t createTime) { - time_ = createTime; - } + void SetCreateTime(uint64_t createTime) { time_ = createTime; } - uint64_t GetCreateTime() const { - return time_; - } + uint64_t GetCreateTime() const { return time_; } - void SetStatus(Status status) { - status_ = status; - } + void SetStatus(Status status) { status_ = status; } - Status GetStatus() const { - return status_; - } + Status GetStatus() const { return status_; } - bool SerializeToString(std::string *value) const; + bool SerializeToString(std::string* value) const; - bool ParseFromString(const std::string &value); + bool ParseFromString(const std::string& value); private: - // 快照uuid + // Snapshot uuid UUID uuid_; - // 租户信息 + // Tenant Information std::string user_; - // 快照目标文件名 + // Snapshot Destination File Name std::string fileName_; - // 快照名 + // Snapshot Name std::string snapshotName_; - // 快照版本号 + // Snapshot version number uint64_t seqNum_; - // 文件的chunk大小 + // Chunk size of the file uint32_t chunkSize_; - // 文件的segment大小 + // The segment size of the file uint64_t segmentSize_; - // 文件大小 + // File size uint64_t fileLength_; // stripe size uint64_t stripeUnit_; @@ -459,16 +333,15 @@ class SnapshotInfo { uint64_t stripeCount_; // poolset std::string poolset_; - // 快照创建时间 + // Snapshot creation time uint64_t time_; - // 快照处理的状态 + // Status of snapshot processing Status status_; }; -std::ostream& operator<<(std::ostream& os, const SnapshotInfo &snapshotInfo); +std::ostream& operator<<(std::ostream& os, const SnapshotInfo& snapshotInfo); } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_INFO_H_ diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store.h b/src/snapshotcloneserver/common/snapshotclone_meta_store.h index ff550f5fc7..9e15692eb2 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store.h @@ -23,15 +23,15 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_H_ -#include -#include #include #include -#include //NOLINT +#include //NOLINT +#include +#include +#include "src/common/concurrent/concurrent.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/concurrent/concurrent.h" #include "src/snapshotcloneserver/common/snapshotclone_info.h" namespace curve { @@ -43,25 +43,25 @@ class SnapshotCloneMetaStore { public: SnapshotCloneMetaStore() {} virtual ~SnapshotCloneMetaStore() {} - // 添加一条快照信息记录 + // Add a snapshot information record /** - * 添加一条快照记录到metastore中 - * @param 快照信息结构体 - * @return: 0 插入成功/ -1 插入失败 + * Add a snapshot record to metastore + * @param snapshot information structure + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int AddSnapshot(const SnapshotInfo& snapinfo) = 0; /** - * 从metastore删除一条快照记录 - * @param 快照任务的uuid,全局唯一 - * @return 0 删除成功/ -1 删除失败 + * Delete a snapshot record from metastore + * @param The uuid of the snapshot task, globally unique + * @return 0 successfully deleted/-1 failed to delete */ - virtual int DeleteSnapshot(const UUID &uuid) = 0; + virtual int DeleteSnapshot(const UUID& uuid) = 0; /** - * 更新快照记录 - * @param 快照信息结构体 - * @return: 0 更新成功/ -1 更新失败 + * Update snapshot records + * @param snapshot information structure + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateSnapshot(const SnapshotInfo &snapinfo) = 0; + virtual int UpdateSnapshot(const SnapshotInfo& snapinfo) = 0; /** * @brief Compare and set snapshot @@ -75,76 +75,76 @@ class SnapshotCloneMetaStore { virtual int CASSnapshot(const UUID& uuid, CASFunc cas) = 0; /** - * 获取指定快照的快照信息 - * @param 快照的uuid - * @param 保存快照信息的指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain snapshot information for the specified snapshot + * @param uuid of snapshot + * @param pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) = 0; /** - * 获取指定文件的快照信息列表 - * @param 文件名 - * @param 保存快照信息的vector指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain a list of snapshot information for the specified file + * @param file name + * @param vector pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(const std::string &filename, - std::vector *v) = 0; + virtual int GetSnapshotList(const std::string& filename, + std::vector* v) = 0; /** - * 获取全部的快照信息列表 - * @param 保存快照信息的vector指针 - * @return: 0 获取成功/ -1 获取失败 + * Obtain a list of all snapshot information + * @param vector pointer to save snapshot information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetSnapshotList(std::vector *list) = 0; + virtual int GetSnapshotList(std::vector* list) = 0; /** - * @brief 获取快照总数 + * @brief Total number of snapshots taken * - * @return 快照总数 + * @return Total number of snapshots */ virtual uint32_t GetSnapshotCount() = 0; /** - * @brief 插入一条clone任务记录到metastore - * @param clone记录信息 - * @return: 0 插入成功/ -1 插入失败 + * @brief Insert a clone task record into metastore + * @param clone records information + * @return: 0 insertion successful/-1 insertion failed */ - virtual int AddCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int AddCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 从metastore删除一条clone任务记录 - * @param clone任务的任务id - * @return: 0 删除成功/ -1 删除失败 + * @brief Delete a clone task record from metastore + * @param Task ID of clone task + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteCloneInfo(const std::string &taskID) = 0; + virtual int DeleteCloneInfo(const std::string& taskID) = 0; /** - * @brief 更新一条clone任务记录 - * @param clone记录信息 - * @return: 0 更新成功/ -1 更新失败 + * @brief Update a clone task record + * @param clone records information + * @return: 0 successfully updated/-1 failed to update */ - virtual int UpdateCloneInfo(const CloneInfo &cloneInfo) = 0; + virtual int UpdateCloneInfo(const CloneInfo& cloneInfo) = 0; /** - * @brief 获取指定task id的clone任务信息 - * @param clone任务id - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get clone task information for the specified task ID + * @param clone Task ID + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfo(const std::string &taskID, CloneInfo *info) = 0; + virtual int GetCloneInfo(const std::string& taskID, CloneInfo* info) = 0; /** - * @brief 获取指定文件的clone任务信息 + * @brief Get clone task information for the specified file * - * @param fileName 文件名 - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @param fileName File name + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) = 0; + virtual int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) = 0; /** - * @brief 获取所有clone任务的信息列表 - * @param[out] 只想clone任务vector指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get a list of information for all clone tasks + * @param[out] just wants to clone the task vector pointer + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetCloneInfoList(std::vector *list) = 0; + virtual int GetCloneInfoList(std::vector* list) = 0; }; } // namespace snapshotcloneserver diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h index 6bc69aca1e..a502042761 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h @@ -23,21 +23,21 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_META_STORE_ETCD_H_ -#include -#include #include +#include #include +#include -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/kvstorageclient/etcd_client.h" -#include "src/snapshotcloneserver/common/snapshotclonecodec.h" #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/rw_lock.h" +#include "src/kvstorageclient/etcd_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclonecodec.h" -using ::curve::kvstorage::KVStorageClient; -using ::curve::common::RWLock; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; +using ::curve::kvstorage::KVStorageClient; namespace curve { namespace snapshotcloneserver { @@ -45,54 +45,53 @@ namespace snapshotcloneserver { class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { public: SnapshotCloneMetaStoreEtcd(std::shared_ptr client, - std::shared_ptr codec) - : client_(client), - codec_(codec) {} + std::shared_ptr codec) + : client_(client), codec_(codec) {} int Init(); - int AddSnapshot(const SnapshotInfo &info) override; + int AddSnapshot(const SnapshotInfo& info) override; - int DeleteSnapshot(const UUID &uuid) override; + int DeleteSnapshot(const UUID& uuid) override; - int UpdateSnapshot(const SnapshotInfo &info) override; + int UpdateSnapshot(const SnapshotInfo& info) override; int CASSnapshot(const UUID& uuid, CASFunc cas) override; - int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID& uuid, SnapshotInfo* info) override; - int GetSnapshotList(const std::string &filename, - std::vector *v) override; + int GetSnapshotList(const std::string& filename, + std::vector* v) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; uint32_t GetSnapshotCount() override; - int AddCloneInfo(const CloneInfo &info) override; + int AddCloneInfo(const CloneInfo& info) override; - int DeleteCloneInfo(const std::string &uuid) override; + int DeleteCloneInfo(const std::string& uuid) override; - int UpdateCloneInfo(const CloneInfo &info) override; + int UpdateCloneInfo(const CloneInfo& info) override; - int GetCloneInfo(const std::string &uuid, CloneInfo *info) override; + int GetCloneInfo(const std::string& uuid, CloneInfo* info) override; - int GetCloneInfoByFileName( - const std::string &fileName, std::vector *list) override; + int GetCloneInfoByFileName(const std::string& fileName, + std::vector* list) override; - int GetCloneInfoList(std::vector *list) override; + int GetCloneInfoList(std::vector* list) override; private: /** - * @brief 加载快照信息 + * @brief Load snapshot information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadSnapshotInfos(); /** - * @brief 加载克隆信息 + * @brief Load clone information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadCloneInfos(); @@ -100,11 +99,11 @@ class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { std::shared_ptr client_; std::shared_ptr codec_; - // key is UUID, map 需要考虑并发保护 + // Key is UUID, map needs to consider concurrency protection std::map snapInfos_; // snap info lock RWLock snapInfos_mutex; - // key is TaskIdType, map 需要考虑并发保护 + // Key is TaskIdType, map needs to consider concurrency protection std::map cloneInfos_; // clone info map lock RWLock cloneInfos_lock_; diff --git a/src/snapshotcloneserver/common/snapshotclone_metric.h b/src/snapshotcloneserver/common/snapshotclone_metric.h index 410d9b19f9..e4fd013334 100644 --- a/src/snapshotcloneserver/common/snapshotclone_metric.h +++ b/src/snapshotcloneserver/common/snapshotclone_metric.h @@ -24,9 +24,11 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_SNAPSHOTCLONE_METRIC_H_ #include -#include + #include #include +#include + #include "src/common/stringstatus.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" @@ -39,8 +41,8 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; class CloneTaskInfo; -static uint32_t GetSnapshotTotalNum(void *arg) { - SnapshotCloneMetaStore *metaStore = +static uint32_t GetSnapshotTotalNum(void* arg) { + SnapshotCloneMetaStore* metaStore = reinterpret_cast(arg); uint32_t snapshotCount = 0; if (metaStore != nullptr) { @@ -53,27 +55,27 @@ struct SnapshotMetric { const std::string SnapshotMetricPrefix = "snapshotcloneserver_snapshot_metric_"; - // 正在进行的快照数量 + // Number of snapshots in progress bvar::Adder snapshotDoing; - // 正在等待的快照数量 + // Number of waiting snapshots bvar::Adder snapshotWaiting; - // 累计成功的快照数量 + // Accumulated number of successful snapshots bvar::Adder snapshotSucceed; - // 累计失败的快照数量 + // Accumulated number of failed snapshots bvar::Adder snapshotFailed; std::shared_ptr metaStore_; - // 系统内快照总量 + // Total number of snapshots within the system bvar::PassiveStatus snapshotNum; - explicit SnapshotMetric(std::shared_ptr metaStore) : - snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), - snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), - snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), - snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), - metaStore_(metaStore), - snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", - GetSnapshotTotalNum, metaStore_.get()) {} + explicit SnapshotMetric(std::shared_ptr metaStore) + : snapshotDoing(SnapshotMetricPrefix, "snapshot_doing"), + snapshotWaiting(SnapshotMetricPrefix, "snapshot_waiting"), + snapshotSucceed(SnapshotMetricPrefix, "snapshot_succeed"), + snapshotFailed(SnapshotMetricPrefix, "snapshot_failed"), + metaStore_(metaStore), + snapshotNum(SnapshotMetricPrefix + "snapshot_total_num", + GetSnapshotTotalNum, metaStore_.get()) {} }; struct SnapshotInfoMetric { @@ -81,60 +83,56 @@ struct SnapshotInfoMetric { "snapshotcloneserver_snapshotInfo_metric_"; StringStatus metric; - explicit SnapshotInfoMetric(const std::string &snapshotId) { + explicit SnapshotInfoMetric(const std::string& snapshotId) { metric.ExposeAs(SnapshotInfoMetricPrefix, snapshotId); } - void Update(SnapshotTaskInfo *taskInfo); + void Update(SnapshotTaskInfo* taskInfo); }; struct CloneMetric { - const std::string CloneMetricPrefix = - "snapshotcloneserver_clone_metric_"; + const std::string CloneMetricPrefix = "snapshotcloneserver_clone_metric_"; - // 正在执行的克隆任务数量 + // Number of cloning tasks being executed bvar::Adder cloneDoing; - // 累计成功的克隆任务数量 + // Accumulated number of successful cloning tasks bvar::Adder cloneSucceed; - // 累计失败的克隆任务数量 + // Accumulated number of failed clone tasks bvar::Adder cloneFailed; - // 正在执行的恢复任务数量 + // Number of recovery tasks being executed bvar::Adder recoverDoing; - // 累计成功的恢复任务数量 + // Accumulated number of successful recovery tasks bvar::Adder recoverSucceed; - // 累计失败的恢复任务数量 + // Accumulated number of failed recovery tasks bvar::Adder recoverFailed; - // 正在执行的Flatten任务数量 + // Number of Flatten tasks being executed bvar::Adder flattenDoing; - // 累计成功的Flatten任务数量 + // Accumulated number of successful Flatten tasks bvar::Adder flattenSucceed; - // 累计失败的Flatten任务数量 + // Accumulated number of failed Flatten tasks bvar::Adder flattenFailed; - CloneMetric() : - cloneDoing(CloneMetricPrefix, "clone_doing"), - cloneSucceed(CloneMetricPrefix, "clone_succeed"), - cloneFailed(CloneMetricPrefix, "clone_failed"), - recoverDoing(CloneMetricPrefix, "recover_doing"), - recoverSucceed(CloneMetricPrefix, "recover_succeed"), - recoverFailed(CloneMetricPrefix, "recover_failed"), - flattenDoing(CloneMetricPrefix, "flatten_doing"), - flattenSucceed(CloneMetricPrefix, "flatten_succeed"), - flattenFailed(CloneMetricPrefix, "flatten_failed") {} + CloneMetric() + : cloneDoing(CloneMetricPrefix, "clone_doing"), + cloneSucceed(CloneMetricPrefix, "clone_succeed"), + cloneFailed(CloneMetricPrefix, "clone_failed"), + recoverDoing(CloneMetricPrefix, "recover_doing"), + recoverSucceed(CloneMetricPrefix, "recover_succeed"), + recoverFailed(CloneMetricPrefix, "recover_failed"), + flattenDoing(CloneMetricPrefix, "flatten_doing"), + flattenSucceed(CloneMetricPrefix, "flatten_succeed"), + flattenFailed(CloneMetricPrefix, "flatten_failed") {} - void UpdateBeforeTaskBegin( - const CloneTaskType &taskType); + void UpdateBeforeTaskBegin(const CloneTaskType& taskType); - void UpdateAfterTaskFinish( - const CloneTaskType &taskType, - const CloneStatus &status); + void UpdateAfterTaskFinish(const CloneTaskType& taskType, + const CloneStatus& status); void UpdateFlattenTaskBegin(); - void UpdateAfterFlattenTaskFinish( - const CloneStatus &status); + void UpdateAfterFlattenTaskFinish(const CloneStatus& status); }; struct CloneInfoMetric { @@ -142,14 +140,13 @@ struct CloneInfoMetric { "snapshotcloneserver_cloneInfo_metric_"; StringStatus metric; - explicit CloneInfoMetric(const std::string &cloneTaskId) { + explicit CloneInfoMetric(const std::string& cloneTaskId) { metric.ExposeAs(CloneInfoMetricPrefix, cloneTaskId); } - void Update(CloneTaskInfo *taskInfo); + void Update(CloneTaskInfo* taskInfo); }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/common/task.h b/src/snapshotcloneserver/common/task.h index bc0faa4178..0034230311 100644 --- a/src/snapshotcloneserver/common/task.h +++ b/src/snapshotcloneserver/common/task.h @@ -25,6 +25,7 @@ #include #include + #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/task_tracker.h" @@ -33,8 +34,7 @@ namespace snapshotcloneserver { class Task { public: - explicit Task(const TaskIdType &taskId) - : taskId_(taskId) {} + explicit Task(const TaskIdType& taskId) : taskId_(taskId) {} virtual ~Task() {} @@ -44,47 +44,40 @@ class Task { Task& operator=(Task&&) = default; /** - * @brief 获取快照任务执行体闭包 + * @brief Get snapshot task execution body closure * - * @return 快照任务执行体 + * @return Snapshot Task Execution Body */ virtual std::function clousre() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } /** - * @brief 获取快照任务id + * @brief Get snapshot task ID * - * @return 快照任务id + * @return Snapshot Task ID */ - TaskIdType GetTaskId() const { - return taskId_; - } + TaskIdType GetTaskId() const { return taskId_; } /** - * @brief 快照执行函数接口 + * @brief snapshot execution function interface */ virtual void Run() = 0; private: - // 快照id + // Snapshot ID TaskIdType taskId_; }; class TrackerTask : public Task { public: - explicit TrackerTask(const TaskIdType &taskId) - : Task(taskId) {} + explicit TrackerTask(const TaskIdType& taskId) : Task(taskId) {} void SetTracker(std::shared_ptr tracker) { tracker_ = tracker; } - std::shared_ptr GetTracker() { - return tracker_; - } + std::shared_ptr GetTracker() { return tracker_; } private: std::shared_ptr tracker_; @@ -93,5 +86,4 @@ class TrackerTask : public Task { } // namespace snapshotcloneserver } // namespace curve - #endif // SRC_SNAPSHOTCLONESERVER_COMMON_TASK_H_ diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..2faf6cb1b7 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -23,11 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ - -#include -#include -#include //NOLINT #include +#include +#include //NOLINT +#include #include "src/common/concurrent/concurrent.h" @@ -36,10 +35,7 @@ namespace snapshotcloneserver { class TaskInfo { public: - TaskInfo() - : progress_(0), - isFinish_(false), - isCanceled_(false) {} + TaskInfo() : progress_(0), isFinish_(false), isCanceled_(false) {} virtual ~TaskInfo() {} TaskInfo(const TaskInfo&) = delete; @@ -48,59 +44,47 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent task completion percentage */ - void SetProgress(uint32_t persent) { - progress_ = persent; - } + void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ - uint32_t GetProgress() const { - return progress_; - } + uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ - void Finish() { - isFinish_.store(true); - } + void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true Task completed + * @retval false Task not completed */ - bool IsFinish() const { - return isFinish_.load(); - } + bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ - void Cancel() { - isCanceled_ = true; - } + void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Do you want to cancel the task * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true The task has been canceled + * @retval false The task was not canceled */ - bool IsCanceled() const { - return isCanceled_; - } + bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +92,24 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine + * whether the task is cancelled, If cancelled, release the lock, Otherwise, + * release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine + * whether the task is completed, If completed, release the lock, Otherwise, + * execute the task to cancel the logic and release the lock. */ - curve::common::Mutex& GetLockRef() { - return lock_; - } + curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..1e5c664f15 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -24,6 +24,7 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_THREAD_POOL_H_ #include + #include "src/common/concurrent/task_thread_pool.h" #include "src/snapshotcloneserver/common/task.h" @@ -31,52 +32,49 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief snapshot thread pool */ class ThreadPool { public: - /** - * @brief 构造函数 - * - * @param threadNum 最大线程数 - */ - explicit ThreadPool(int threadNum) - : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief constructor + * + * @param threadNum maximum number of threads + */ + explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} + /** + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ - void PushTask(Task* task) { - threadPool_.Enqueue(task->clousre()); - } + void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); } private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/snapshotcloneserver/main.cpp b/src/snapshotcloneserver/main.cpp index 3430ff0118..3ae3b44e34 100644 --- a/src/snapshotcloneserver/main.cpp +++ b/src/snapshotcloneserver/main.cpp @@ -19,24 +19,27 @@ * Created Date: Fri Dec 14 2018 * Author: xuchaojie */ -#include #include +#include + #include "src/snapshotcloneserver/snapshotclone_server.h" #include "src/common/log_util.h" -DEFINE_string(conf, "conf/snapshot_clone_server.conf", "snapshot&clone server config file path"); //NOLINT +DEFINE_string(conf, "conf/snapshot_clone_server.conf", + "snapshot&clone server config file path"); // NOLINT DEFINE_string(addr, "127.0.0.1:5555", "snapshotcloneserver address"); using Configuration = ::curve::common::Configuration; using SnapShotCloneServer = ::curve::snapshotcloneserver::SnapShotCloneServer; -void LoadConfigFromCmdline(Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void LoadConfigFromCmdline(Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("addr", &info) && !info.is_default) { conf->SetStringValue("server.address", FLAGS_addr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { if (!conf->GetStringValue("log.dir", &FLAGS_log_dir)) { LOG(WARNING) << "no log.dir in " << FLAGS_conf @@ -69,13 +72,12 @@ int snapshotcloneserver_main(std::shared_ptr conf) { return 0; } -int main(int argc, char **argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, true); std::shared_ptr conf = std::make_shared(); conf->SetConfigPath(FLAGS_conf); if (!conf->LoadConfig()) { - LOG(ERROR) << "Failed to open config file: " - << conf->GetConfigPath(); + LOG(ERROR) << "Failed to open config file: " << conf->GetConfigPath(); return -1; } LoadConfigFromCmdline(conf.get()); @@ -85,4 +87,3 @@ int main(int argc, char **argv) { google::InitGoogleLogging(argv[0]); snapshotcloneserver_main(conf); } - diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 6abb94b5e9..ec541c4c80 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -23,17 +23,17 @@ #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include -#include + #include +#include #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "src/common/uuid.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::UUIDGenerator; -using ::curve::common::NameLockGuard; using ::curve::common::LockGuard; +using ::curve::common::NameLockGuard; +using ::curve::common::UUIDGenerator; namespace curve { namespace snapshotcloneserver { @@ -47,10 +47,10 @@ int SnapshotCoreImpl::Init() { return kErrCodeSuccess; } -int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) { NameLockGuard lockGuard(snapshotNameLock_, file); std::vector fileInfo; metaStore_->GetSnapshotList(file, &fileInfo); @@ -60,11 +60,10 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, if ((snap.GetUser() == user) && (snap.GetSnapshotName() == snapshotName)) { LOG(INFO) << "CreateSnapshotPre find same snap task" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName << ", Exist SnapInfo : " << snap; - // 视为同一个快照,返回任务已存在 + // Treat as the same snapshot, return task already exists *snapInfo = snap; return kErrCodeTaskExist; } @@ -85,20 +84,17 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, break; case -LIBCURVE_ERROR::NOTEXIST: LOG(ERROR) << "create snapshot file not exist" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeFileNotExist; case -LIBCURVE_ERROR::AUTHFAIL: LOG(ERROR) << "create snapshot by invalid user" - << ", file = " << file - << ", user = " << user + << ", file = " << file << ", user = " << user << ", snapshotName = " << snapshotName; return kErrCodeInvalidUser; default: LOG(ERROR) << "GetFileInfo encounter an error" - << ", ret = " << ret - << ", file = " << file + << ", ret = " << ret << ", file = " << file << ", user = " << user; return kErrCodeInternalError; } @@ -117,8 +113,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, ret = metaStore_->AddSnapshot(info); if (ret < 0) { LOG(ERROR) << "AddSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid + << " ret = " << ret << ", uuid = " << uuid << ", fileName = " << file << ", snapshotName = " << snapshotName; return ret; @@ -131,46 +126,56 @@ constexpr uint32_t kProgressCreateSnapshotOnCurvefsComplete = 5; constexpr uint32_t kProgressBuildChunkIndexDataComplete = 6; constexpr uint32_t kProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kProgressTransferSnapshotDataStart = - kProgressBuildSnapshotMapComplete; + kProgressBuildSnapshotMapComplete; constexpr uint32_t kProgressTransferSnapshotDataComplete = 99; constexpr uint32_t kProgressComplete = 100; /** - * @brief 异步执行创建快照任务并更新任务进度 + * @brief Asynchronous execution of snapshot creation task and update of task + * progress * - * 快照进度规划如下: + * The snapshot schedule is planned as follows: * - * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | TransferSnapshotData | UpdateSnapshot | //NOLINT - * | 5% | 6% | 10% | 10%~99% | 100% | //NOLINT + * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | + * TransferSnapshotData | UpdateSnapshot | //NOLINT | 5% | 6% + * | 10% | 10%~99% | 100% | //NOLINT * * - * 异步执行期间发生error与cancel情况说明: - * 1. 发生error将导致整个异步任务直接中断,并且不做任何清理动作: - * 发生error时,一般系统存在异常,清理动作很可能不能完成, - * 因此,不进行任何清理,只置状态,待人工干预排除异常之后, - * 使用DeleteSnapshot功能去手动删除error状态的快照。 - * 2. 发生cancel时则以创建功能相反的顺序依次进行清理动作, - * 若清理过程发生error,则立即中断,之后同error过程。 + * Explanation of errors and cancellations during asynchronous execution: + * 1. The occurrence of an error will cause the entire asynchronous task to be + * directly interrupted without any cleaning action: When an error occurs, there + * is usually an abnormality in the system, and the cleaning action may not be + * completed, Therefore, no cleaning will be carried out, only the status will + * be set, and after manual intervention to eliminate anomalies, Use the + * DeleteSnapshot function to manually delete snapshots with error status. + * 2. When a cancel occurs, the cleaning actions are carried out in reverse + * order of creating functions, If an error occurs during the cleaning process, + * it will be immediately interrupted, followed by the same error process. * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleCreateSnapshotTask( std::shared_ptr task) { std::string fileName = task->GetFileName(); - // 如果当前有失败的快照,需先清理失败的快照,否则快照会再次失败 + // If there are currently failed snapshots, it is necessary to clean up the + // failed snapshots first, otherwise the snapshots will fail again int ret = ClearErrorSnapBeforeCreateSnapshot(task); if (ret < 0) { HandleCreateSnapshotError(task); return; } - // 为支持任务重启,这里有三种情况需要处理 - // 1. 没打过快照, 没有seqNum且curve上没有快照 - // 2. 打过快照, 有seqNum且curve上有快照 - // 3. 打过快照并已经转储完删除快照, 有seqNum但curve上没有快照 + // To support task restart, there are three situations that need to be + // addressed + // 1. I haven't taken a snapshot, there's no seqNum, and there's no snapshot + // on the curve + // 2. I have taken a snapshot, and there is seqNum and a snapshot on the + // curve + // 3. I have taken a snapshot and have completed the dump to delete it. + // There is seqNum, but there is no snapshot on the curve - SnapshotInfo *info = &(task->GetSnapshotInfo()); + SnapshotInfo* info = &(task->GetSnapshotInfo()); UUID uuid = task->GetUuid(); uint64_t seqNum = info->GetSeqNum(); bool existIndexData = false; @@ -178,8 +183,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = CreateSnapshotOnCurvefs(fileName, info, task); if (ret < 0) { LOG(ERROR) << "CreateSnapshotOnCurvefs error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; @@ -188,9 +192,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = false; } else { FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = + client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (-LIBCURVE_ERROR::NOTEXIST == ret) { HandleCreateSnapshotSuccess(task); return; @@ -200,8 +203,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( existIndexData = dataStore_->ChunkIndexDataExist(name); } else { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); @@ -224,8 +226,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->GetChunkIndexData(name, &indexData); if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); @@ -238,8 +239,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildSegmentInfo(*info, &segInfos); if (ret < 0) { LOG(ERROR) << "BuildSegmentInfo error," - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -247,8 +247,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = BuildChunkIndexData(*info, &indexData, &segInfos, task); if (ret < 0) { LOG(ERROR) << "BuildChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -256,8 +255,7 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( ret = dataStore_->PutChunkIndexData(name, indexData); if (ret < 0) { LOG(ERROR) << "PutChunkIndexData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -271,14 +269,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( } FileSnapMap fileSnapshotMap; - ret = BuildSnapshotMap(fileName, - seqNum, - &fileSnapshotMap); + ret = BuildSnapshotMap(fileName, seqNum, &fileSnapshotMap); if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -286,26 +281,23 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (existIndexData) { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [this] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [this](const ChunkDataName& chunkDataName) { return dataStore_->ChunkDataExist(chunkDataName); }, task); } else { - ret = TransferSnapshotData(indexData, - *info, - segInfos, - [&fileSnapshotMap] (const ChunkDataName &chunkDataName) { + ret = TransferSnapshotData( + indexData, *info, segInfos, + [&fileSnapshotMap](const ChunkDataName& chunkDataName) { return fileSnapshotMap.IsExistChunk(chunkDataName); }, task); } if (ret < 0) { LOG(ERROR) << "TransferSnapshotData error, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -313,8 +305,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( task->UpdateMetric(); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } ret = DeleteSnapshotOnCurvefs(*info); @@ -327,8 +319,8 @@ void SnapshotCoreImpl::HandleCreateSnapshotTask( LockGuard lockGuard(task->GetLockRef()); if (task->IsCanceled()) { - return CancelAfterTransferSnapshotData( - task, indexData, fileSnapshotMap); + return CancelAfterTransferSnapshotData(task, indexData, + fileSnapshotMap); } HandleCreateSnapshotSuccess(task); @@ -347,9 +339,9 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( std::make_shared(snap, snapInfoMetric); taskInfo->GetSnapshotInfo().SetStatus(Status::errorDeleting); taskInfo->UpdateMetric(); - // 处理删除快照 + // Processing deletion of snapshots HandleDeleteSnapshotTask(taskInfo); - // 仍然失败,则本次快照失败 + // If it still fails, the current snapshot fails if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { LOG(ERROR) << "Find error Snapshot and Delete Fail" << ", error snapshot Id = " << snap.GetUuid() @@ -362,15 +354,13 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( return kErrCodeSuccess; } -int SnapshotCoreImpl::StartCancel( - std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); +int SnapshotCoreImpl::StartCancel(std::shared_ptr task) { + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::canceling); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Cancel Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return kErrCodeInternalError; } @@ -378,18 +368,17 @@ int SnapshotCoreImpl::StartCancel( } void SnapshotCoreImpl::CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap) { + std::shared_ptr task, const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap) { LOG(INFO) << "Cancel After TransferSnapshotData" << ", uuid = " << task->GetUuid(); std::vector chunkIndexVec = indexData.GetAllChunkIndex(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - int ret = dataStore_->DeleteChunkData(chunkDataName); + int ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error" << "while canceling CreateSnapshot, " @@ -410,19 +399,16 @@ void SnapshotCoreImpl::CancelAfterCreateChunkIndexData( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateChunkIndexData" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); int ret = dataStore_->DeleteChunkIndexData(name); if (ret < 0) { LOG(ERROR) << "DeleteChunkIndexData error " << "while canceling CreateSnapshot, " - << " ret = " << ret - << ", fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", fileName = " << task->GetFileName() + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } @@ -433,7 +419,7 @@ void SnapshotCoreImpl::CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task) { LOG(INFO) << "Cancel After CreateSnapshotOnCurvefs" << ", uuid = " << task->GetUuid(); - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); int ret = DeleteSnapshotOnCurvefs(info); @@ -452,13 +438,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( if (ret < 0) { LOG(ERROR) << "MetaStore DeleteSnapshot error " << "while cancel CreateSnapshot, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); HandleCreateSnapshotError(task); return; } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "CancelSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -472,13 +457,12 @@ void SnapshotCoreImpl::HandleClearSnapshotOnMateStore( void SnapshotCoreImpl::HandleCreateSnapshotSuccess( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::done); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Success Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } task->SetProgress(kProgressComplete); @@ -494,13 +478,12 @@ void SnapshotCoreImpl::HandleCreateSnapshotSuccess( void SnapshotCoreImpl::HandleCreateSnapshotError( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); snapInfo.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } LOG(INFO) << "CreateSnapshot Task Fail" @@ -514,14 +497,11 @@ void SnapshotCoreImpl::HandleCreateSnapshotError( } int SnapshotCoreImpl::CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, + const std::string& fileName, SnapshotInfo* info, std::shared_ptr task) { uint64_t seqNum = 0; - int ret = - client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); - if (LIBCURVE_ERROR::OK == ret || - -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { + int ret = client_->CreateSnapshot(fileName, info->GetUser(), &seqNum); + if (LIBCURVE_ERROR::OK == ret || -LIBCURVE_ERROR::UNDER_SNAPSHOT == ret) { // ok } else if (-LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT == ret) { LOG(ERROR) << "CreateSnapshot on curvefs fail, " @@ -530,23 +510,18 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( return kErrCodeNotSupport; } else { LOG(ERROR) << "CreateSnapshot on curvefs fail, " - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } LOG(INFO) << "CreateSnapshot on curvefs success, seq = " << seqNum << ", uuid = " << task->GetUuid(); FInfo snapInfo; - ret = client_->GetSnapshot(fileName, - info->GetUser(), - seqNum, &snapInfo); + ret = client_->GetSnapshot(fileName, info->GetUser(), seqNum, &snapInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetSnapShot on curvefs fail, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << info->GetUser() - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << info->GetUser() << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } @@ -573,46 +548,38 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( ret = metaStore_->CASSnapshot(uuid, compareAndSet); if (ret < 0) { LOG(ERROR) << "CASSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName + << " ret = " << ret << ", fileName = " << fileName << ", uuid = " << task->GetUuid(); return ret; } - // 打完快照需等待2个session时间,以保证seq同步到所有client + // After taking a snapshot, you need to wait for 2 sessions to ensure that + // the seq is synchronized to all clients std::this_thread::sleep_for( std::chrono::microseconds(mdsSessionTimeUs_ * 2)); return kErrCodeSuccess; } -int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { +int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo& info) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seqNum = info.GetSeqNum(); - int ret = client_->DeleteSnapshot(fileName, - user, - seqNum); - if (ret != LIBCURVE_ERROR::OK && - ret != -LIBCURVE_ERROR::NOTEXIST && + int ret = client_->DeleteSnapshot(fileName, user, seqNum); + if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::NOTEXIST && ret != -LIBCURVE_ERROR::DELETING) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seqNum = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seqNum = " << seqNum << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } do { FileStatus status; - ret = client_->CheckSnapShotStatus(info.GetFileName(), - info.GetUser(), - seqNum, - &status); + ret = client_->CheckSnapShotStatus(info.GetFileName(), info.GetUser(), + seqNum, &status); LOG(INFO) << "Doing CheckSnapShotStatus, fileName = " - << info.GetFileName() - << ", user = " << info.GetUser() + << info.GetFileName() << ", user = " << info.GetUser() << ", seqNum = " << seqNum << ", status = " << static_cast(status) << ", uuid = " << info.GetUuid(); @@ -631,8 +598,7 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } } else { LOG(ERROR) << "CheckSnapShotStatus fail" - << ", ret = " << ret - << ", uuid = " << info.GetUuid(); + << ", ret = " << ret << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; } std::this_thread::sleep_for( @@ -642,9 +608,8 @@ int SnapshotCoreImpl::DeleteSnapshotOnCurvefs(const SnapshotInfo &info) { } int SnapshotCoreImpl::BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, + const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, std::shared_ptr task) { std::string fileName = info.GetFileName(); std::string user = info.GetUser(); @@ -656,25 +621,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( indexData->SetFileName(fileName); uint64_t chunkIndex = 0; - for (uint64_t i = 0; i < fileLength/segmentSize; i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - int ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seqNum, - offset, - &segInfo); + int ret = client_->GetSnapshotSegmentInfo(fileName, user, seqNum, + offset, &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, segInfo); for (std::vector::size_type j = 0; - j < segInfo.chunkvec.size(); - j++) { + j < segInfo.chunkvec.size(); j++) { ChunkInfoDetail chunkInfo; ChunkIDInfo cidInfo = segInfo.chunkvec[j]; - ret = client_->GetChunkInfo(cidInfo, - &chunkInfo); + ret = client_->GetChunkInfo(cidInfo, &chunkInfo); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "GetChunkInfo error, " << " ret = " << ret @@ -684,16 +643,19 @@ int SnapshotCoreImpl::BuildChunkIndexData( << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } - // 2个sn,小的是snap sn,大的是快照之后的写 - // 1个sn,有两种情况: - // 小于等于seqNum时为snap sn, 且快照之后未写过; - // 大于时, 表示打快照时为空,是快照之后首次写的版本(seqNum+1) - // 没有sn,从未写过 - // 大于2个sn,错误,报错 + // 2 Sns, the smaller one is the snap snap snap, and the larger + // one is the write after the snapshot 1 SN, there are two + // situations: + // When it is less than or equal to seqNum, it is a snap + // snap and has not been written since the snapshot; When + // greater than, it indicates that it was blank when taking + // a snapshot, and is the version written for the first + // time after the snapshot (seqNum+1) + // No sn, never written before + // Greater than 2 sns, error, error reported if (chunkInfo.chunkSn.size() == 2) { uint64_t seq = - std::min(chunkInfo.chunkSn[0], - chunkInfo.chunkSn[1]); + std::min(chunkInfo.chunkSn[0], chunkInfo.chunkSn[1]); chunkIndex = i * (segmentSize / chunkSize) + j; ChunkDataName chunkDataName(fileName, seq, chunkIndex); indexData->PutChunkDataName(chunkDataName); @@ -708,10 +670,10 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { // should not reach here - LOG(ERROR) << "GetChunkInfo return chunkInfo.chunkSn.size()" - << " invalid, size = " - << chunkInfo.chunkSn.size() - << ", uuid = " << task->GetUuid(); + LOG(ERROR) + << "GetChunkInfo return chunkInfo.chunkSn.size()" + << " invalid, size = " << chunkInfo.chunkSn.size() + << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } if (task->IsCanceled()) { @@ -722,10 +684,8 @@ int SnapshotCoreImpl::BuildChunkIndexData( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seqNum + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seqNum << ", offset = " << offset << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; @@ -736,25 +696,18 @@ int SnapshotCoreImpl::BuildChunkIndexData( } int SnapshotCoreImpl::BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos) { + const SnapshotInfo& info, std::map* segInfos) { int ret = kErrCodeSuccess; std::string fileName = info.GetFileName(); std::string user = info.GetUser(); uint64_t seq = info.GetSeqNum(); uint64_t fileLength = info.GetFileLength(); uint64_t segmentSize = info.GetSegmentSize(); - for (uint64_t i = 0; - i < fileLength/segmentSize; - i++) { + for (uint64_t i = 0; i < fileLength / segmentSize; i++) { uint64_t offset = i * segmentSize; SegmentInfo segInfo; - ret = client_->GetSnapshotSegmentInfo( - fileName, - user, - seq, - offset, - &segInfo); + ret = client_->GetSnapshotSegmentInfo(fileName, user, seq, offset, + &segInfo); if (LIBCURVE_ERROR::OK == ret) { segInfos->emplace(i, std::move(segInfo)); @@ -762,10 +715,8 @@ int SnapshotCoreImpl::BuildSegmentInfo( // nothing } else { LOG(ERROR) << "GetSnapshotSegmentInfo error," - << " ret = " << ret - << ", fileName = " << fileName - << ", user = " << user - << ", seq = " << seq + << " ret = " << ret << ", fileName = " << fileName + << ", user = " << user << ", seq = " << seq << ", offset = " << offset << ", uuid = " << info.GetUuid(); return kErrCodeInternalError; @@ -775,15 +726,14 @@ int SnapshotCoreImpl::BuildSegmentInfo( } int SnapshotCoreImpl::TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, + const ChunkIndexData indexData, const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, std::shared_ptr task) { int ret = 0; uint64_t segmentSize = info.GetSegmentSize(); uint64_t chunkSize = info.GetChunkSize(); - uint64_t chunkPerSegment = segmentSize/chunkSize; + uint64_t chunkPerSegment = segmentSize / chunkSize; if (0 == chunkSplitSize_ || chunkSize % chunkSplitSize_ != 0) { LOG(ERROR) << "error!, ChunkSize is not align to chunkSplitSize" @@ -794,13 +744,13 @@ int SnapshotCoreImpl::TransferSnapshotData( std::vector chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kProgressTransferSnapshotDataComplete - - kProgressTransferSnapshotDataStart; + kProgressTransferSnapshotDataStart; uint32_t transferDataNum = chunkIndexVec.size(); double progressPerData = static_cast(totalProgress) / transferDataNum; uint32_t index = 0; - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { uint64_t segNum = chunkIndex / chunkPerSegment; auto it = segInfos.find(segNum); @@ -818,17 +768,15 @@ int SnapshotCoreImpl::TransferSnapshotData( LOG(ERROR) << "TransferSnapshotData, " << "chunkIndexInSegment >= " << "segInfos[segNum].chunkvec.size()" - << ", chunkIndexInSegment = " - << chunkIndexInSegment - << ", size = " - << it->second.chunkvec.size() + << ", chunkIndexInSegment = " << chunkIndexInSegment + << ", size = " << it->second.chunkvec.size() << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } } auto tracker = std::make_shared(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); uint64_t segNum = chunkIndex / chunkPerSegment; @@ -836,8 +784,7 @@ int SnapshotCoreImpl::TransferSnapshotData( auto it = segInfos.find(segNum); if (it != segInfos.end()) { - ChunkIDInfo cidInfo = - it->second.chunkvec[chunkIndexInSegment]; + ChunkIDInfo cidInfo = it->second.chunkvec[chunkIndexInSegment]; if (!filter(chunkDataName)) { auto taskInfo = std::make_shared( @@ -847,10 +794,7 @@ int SnapshotCoreImpl::TransferSnapshotData( readChunkSnapshotConcurrency_); UUID taskId = UUIDGenerator().GenerateUUID(); auto task = new TransferSnapshotDataChunkTask( - taskId, - taskInfo, - client_, - dataStore_); + taskId, taskInfo, client_, dataStore_); task->SetTracker(tracker); tracker->AddOneTrace(); threadPool_->PushTask(task); @@ -865,50 +809,45 @@ int SnapshotCoreImpl::TransferSnapshotData( ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } task->SetProgress(static_cast( - kProgressTransferSnapshotDataStart + index * progressPerData)); + kProgressTransferSnapshotDataStart + index * progressPerData)); task->UpdateMetric(); index++; if (task->IsCanceled()) { return kErrCodeSuccess; } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end tracker->Wait(); ret = tracker->GetResult(); if (ret < 0) { LOG(ERROR) << "TransferSnapshotDataChunk tracker GetResult fail" - << ", ret = " << ret - << ", uuid = " << task->GetUuid(); + << ", ret = " << ret << ", uuid = " << task->GetUuid(); return ret; } return kErrCodeSuccess; } - -int SnapshotCoreImpl::DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) { +int SnapshotCoreImpl::DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) { NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), uuid); int ret = metaStore_->GetSnapshotInfo(uuid, snapInfo); if (ret < 0) { - // 快照不存在时直接返回删除成功,使接口幂等 + // When the snapshot does not exist, it directly returns deletion + // success, making the interface idempotent return kErrCodeSuccess; } if (snapInfo->GetUser() != user) { LOG(ERROR) << "Can not delete snapshot by different user."; return kErrCodeInvalidUser; } - if ((!fileName.empty()) && - (fileName != snapInfo->GetFileName())) { + if ((!fileName.empty()) && (fileName != snapInfo->GetFileName())) { LOG(ERROR) << "Can not delete, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -938,8 +877,7 @@ int SnapshotCoreImpl::DeleteSnapshotPre( ret = metaStore_->UpdateSnapshot(*snapInfo); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot error," - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; return ret; } return kErrCodeSuccess; @@ -947,23 +885,24 @@ int SnapshotCoreImpl::DeleteSnapshotPre( constexpr uint32_t kDelProgressBuildSnapshotMapComplete = 10; constexpr uint32_t kDelProgressDeleteChunkDataStart = - kDelProgressBuildSnapshotMapComplete; + kDelProgressBuildSnapshotMapComplete; constexpr uint32_t kDelProgressDeleteChunkDataComplete = 80; constexpr uint32_t kDelProgressDeleteChunkIndexDataComplete = 90; /** - * @brief 异步执行删除快照任务并更新任务进度 + * @brief Asynchronous execution of delete snapshot task and update task + * progress * - * 删除快照进度规划如下: + * Delete the snapshot schedule as follows: * * |BuildSnapshotMap|DeleteChunkData|DeleteChunkIndexData|DeleteSnapshot| * | 10% | 10%~80% | 90% | 100% | * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleDeleteSnapshotTask( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); UUID uuid = task->GetUuid(); uint64_t seqNum = info.GetSeqNum(); FileSnapMap fileSnapshotMap; @@ -971,15 +910,13 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( if (ret < 0) { LOG(ERROR) << "BuildSnapshotMap error, " << " fileName = " << task->GetFileName() - << ", seqNum = " << seqNum - << ", uuid = " << task->GetUuid(); + << ", seqNum = " << seqNum << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; } task->SetProgress(kDelProgressBuildSnapshotMapComplete); task->UpdateMetric(); - ChunkIndexDataName name(task->GetFileName(), - seqNum); + ChunkIndexDataName name(task->GetFileName(), seqNum); ChunkIndexData indexData; if (dataStore_->ChunkIndexDataExist(name)) { ret = dataStore_->GetChunkIndexData(name, &indexData); @@ -995,29 +932,28 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( auto chunkIndexVec = indexData.GetAllChunkIndex(); uint32_t totalProgress = kDelProgressDeleteChunkDataComplete - - kDelProgressDeleteChunkDataStart; + kDelProgressDeleteChunkDataStart; uint32_t chunkDataNum = chunkIndexVec.size(); - double progressPerData = static_cast (totalProgress) / - chunkDataNum; + double progressPerData = + static_cast(totalProgress) / chunkDataNum; uint32_t index = 0; LOG(INFO) << "HandleDeleteSnapshotTask GetChunkIndexData success, " << "begin to DeleteChunkData, " << "chunkDataNum = " << chunkIndexVec.size(); - for (auto &chunkIndex : chunkIndexVec) { + for (auto& chunkIndex : chunkIndexVec) { ChunkDataName chunkDataName; indexData.GetChunkDataName(chunkIndex, &chunkDataName); if ((!fileSnapshotMap.IsExistChunk(chunkDataName)) && (dataStore_->ChunkDataExist(chunkDataName))) { - ret = dataStore_->DeleteChunkData(chunkDataName); + ret = dataStore_->DeleteChunkData(chunkDataName); if (ret < 0) { LOG(ERROR) << "DeleteChunkData error, " << " ret = " << ret << ", fileName = " << task->GetFileName() << ", seqNum = " << seqNum - << ", chunkIndex = " - << chunkDataName.chunkIndex_ + << ", chunkIndex = " << chunkDataName.chunkIndex_ << ", uuid = " << task->GetUuid(); HandleDeleteSnapshotError(task); return; @@ -1059,8 +995,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( ret = metaStore_->DeleteSnapshot(uuid); if (ret < 0) { LOG(ERROR) << "DeleteSnapshot error, " - << " ret = " << ret - << ", uuid = " << uuid; + << " ret = " << ret << ", uuid = " << uuid; HandleDeleteSnapshotError(task); return; } @@ -1068,7 +1003,7 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( task->SetProgress(kProgressComplete); task->GetSnapshotInfo().SetStatus(Status::done); - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Success" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1079,19 +1014,17 @@ void SnapshotCoreImpl::HandleDeleteSnapshotTask( return; } - void SnapshotCoreImpl::HandleDeleteSnapshotError( std::shared_ptr task) { - SnapshotInfo &info = task->GetSnapshotInfo(); + SnapshotInfo& info = task->GetSnapshotInfo(); info.SetStatus(Status::error); int ret = metaStore_->UpdateSnapshot(info); if (ret < 0) { LOG(ERROR) << "UpdateSnapshot Task Error Fail!" - << " ret = " << ret - << ", uuid = " << task->GetUuid(); + << " ret = " << ret << ", uuid = " << task->GetUuid(); } - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); LOG(INFO) << "DeleteSnapshot Task Fail" << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() @@ -1102,23 +1035,22 @@ void SnapshotCoreImpl::HandleDeleteSnapshotError( return; } -int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string &file, - std::vector *info) { +int SnapshotCoreImpl::GetFileSnapshotInfo(const std::string& file, + std::vector* info) { metaStore_->GetSnapshotList(file, info); return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) { +int SnapshotCoreImpl::GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) { return metaStore_->GetSnapshotInfo(uuid, info); } -int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap) { +int SnapshotCoreImpl::BuildSnapshotMap(const std::string& fileName, + uint64_t seqNum, + FileSnapMap* fileSnapshotMap) { std::vector snapInfos; int ret = metaStore_->GetSnapshotList(fileName, &snapInfos); - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetSeqNum() != seqNum && snap.GetSeqNum() != kUnInitializeSeqNum) { ChunkIndexDataName name(snap.GetFileName(), snap.GetSeqNum()); @@ -1127,10 +1059,11 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, if (ret < 0) { LOG(ERROR) << "GetChunkIndexData error, " << " ret = " << ret - << ", fileName = " << snap.GetFileName() + << ", fileName = " << snap.GetFileName() << ", seqNum = " << snap.GetSeqNum(); - // 此处不能返回错误, - // 否则一旦某个失败的快照没有indexdata,所有快照都无法删除 + // An error cannot be returned here, + // Otherwise, once a failed snapshot does not have indexdata, + // all snapshots cannot be deleted } else { fileSnapshotMap->maps.push_back(std::move(indexData)); } @@ -1139,19 +1072,18 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, return kErrCodeSuccess; } -int SnapshotCoreImpl::GetSnapshotList(std::vector *list) { +int SnapshotCoreImpl::GetSnapshotList(std::vector* list) { metaStore_->GetSnapshotList(list); return kErrCodeSuccess; } int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) { - auto &snapInfo = task->GetSnapshotInfo(); + auto& snapInfo = task->GetSnapshotInfo(); int ret = metaStore_->DeleteSnapshot(snapInfo.GetUuid()); if (ret < 0) { LOG(ERROR) << "HandleCancelUnSchduledSnapshotTask fail, " - << " ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << " ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() @@ -1161,7 +1093,6 @@ int SnapshotCoreImpl::HandleCancelUnSchduledSnapshotTask( return kErrCodeSuccess; } - int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( std::shared_ptr task) { LockGuard lockGuard(task->GetLockRef()); @@ -1176,8 +1107,7 @@ int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( } else { auto& snapInfo = task->GetSnapshotInfo(); LOG(ERROR) << "HandleCancelSchduledSnapshotTask failed: " - << ", ret = " << ret - << ", uuid = " << snapInfo.GetUuid() + << ", ret = " << ret << ", uuid = " << snapInfo.GetUuid() << ", fileName = " << snapInfo.GetFileName() << ", snapshotName = " << snapInfo.GetSnapshotName() << ", seqNum = " << snapInfo.GetSeqNum() diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.h b/src/snapshotcloneserver/snapshot/snapshot_core.h index 747e02ea2f..9667b64d39 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.h +++ b/src/snapshotcloneserver/snapshot/snapshot_core.h @@ -23,19 +23,19 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_CORE_H_ +#include #include #include #include -#include -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include "src/common/concurrent/name_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshot_reference.h" -#include "src/common/concurrent/name_lock.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" #include "src/snapshotcloneserver/common/thread_pool.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::NameLock; @@ -45,22 +45,23 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; /** - * @brief 文件的快照索引块映射表 + * @brief Snapshot index block mapping table for file */ struct FileSnapMap { std::vector maps; /** - * @brief 获取当前映射表中是否存在当前chunk数据 + * @brief to obtain whether the current chunk data exists in the current + * mapping table * - * @param name chunk数据对象 + * @param name chunk data object * - * @retval true 存在 - * @retval false 不存在 + * @retval true exists + * @retval false does not exist */ - bool IsExistChunk(const ChunkDataName &name) const { + bool IsExistChunk(const ChunkDataName& name) const { bool find = false; - for (auto &v : maps) { + for (auto& v : maps) { find = v.IsExistChunkDataName(name); if (find) { break; @@ -71,7 +72,7 @@ struct FileSnapMap { }; /** - * @brief 快照核心模块 + * @brief snapshot core module */ class SnapshotCore { public: @@ -79,80 +80,76 @@ class SnapshotCore { virtual ~SnapshotCore() {} /** - * @brief 创建快照前置操作 + * @brief Create snapshot pre operation * - * @param file 文件名 - * @param user 用户名 - * @param snapshotName 快照名 - * @param[out] snapInfo 快照信息 + * @param file file name + * @param user username + * @param snapshotName SnapshotName + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) = 0; + virtual int CreateSnapshotPre(const std::string& file, + const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行创建快照任务并更新progress - * 第一步,构建快照文件映射, put MateObj - * 第二步,从curvefs读取chunk文件,并put DataObj - * 第三步,删除curvefs中的临时快照 - * 第四步,update status + * @brief Execute the task of creating a snapshot and update the progress + * Step 1, build a snapshot file mapping and put MateObj + * Step 2, read the chunk file from curvefs and put DataObj + * Step 3, delete the temporary snapshot in curves + * Step 4, update status * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleCreateSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 删除快照前置操作 - * 更新数据库中的快照记录为deleting状态 + * @brief Delete snapshot pre operation + * Update the snapshot records in the database to a deleting state * - * @param uuid 快照uuid - * @param user 用户名 - * @param fileName 文件名 - * @param[out] snapInfo 快照信息 + * @param uuid Snapshot uuid + * @param user username + * @param fileName File name + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshotPre( - UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) = 0; + virtual int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) = 0; /** - * @brief 执行删除快照任务并更新progress + * @brief Execute the delete snapshot task and update the progress * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleDeleteSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 获取文件的快照信息 + * @brief Get snapshot information of files * - * @param file 文件名 - * @param info 快照信息列表 + * @param file file name + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - std::vector *info) = 0; + virtual int GetFileSnapshotInfo(const std::string& file, + std::vector* info) = 0; /** - * @brief 获取全部快照信息 + * @brief Get all snapshot information * - * @param list 快照信息列表 + * @param list snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotList(std::vector *list) = 0; - + virtual int GetSnapshotList(std::vector* list) = 0; - virtual int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) = 0; + virtual int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) = 0; virtual int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) = 0; @@ -170,66 +167,61 @@ class SnapshotCore { class SnapshotCoreImpl : public SnapshotCore { public: - /** - * @brief 构造函数 - * - * @param client curve客户端对象 - * @param metaStore meta存储对象 - * @param dataStore data存储对象 - */ - SnapshotCoreImpl( - std::shared_ptr client, - std::shared_ptr metaStore, - std::shared_ptr dataStore, - std::shared_ptr snapshotRef, - const SnapshotCloneServerOptions &option) - : client_(client), - metaStore_(metaStore), - dataStore_(dataStore), - snapshotRef_(snapshotRef), - chunkSplitSize_(option.chunkSplitSize), - checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), - maxSnapshotLimit_(option.maxSnapshotLimit), - snapshotCoreThreadNum_(option.snapshotCoreThreadNum), - mdsSessionTimeUs_(option.mdsSessionTimeUs), - clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), - clientAsyncMethodRetryIntervalMs_( - option.clientAsyncMethodRetryIntervalMs), - readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { - threadPool_ = std::make_shared( - option.snapshotCoreThreadNum); + /** + * @brief constructor + * + * @param client curve client object + * @param metaStore MetaStorage Object + * @param dataStore data storage object + */ + SnapshotCoreImpl(std::shared_ptr client, + std::shared_ptr metaStore, + std::shared_ptr dataStore, + std::shared_ptr snapshotRef, + const SnapshotCloneServerOptions& option) + : client_(client), + metaStore_(metaStore), + dataStore_(dataStore), + snapshotRef_(snapshotRef), + chunkSplitSize_(option.chunkSplitSize), + checkSnapshotStatusIntervalMs_(option.checkSnapshotStatusIntervalMs), + maxSnapshotLimit_(option.maxSnapshotLimit), + snapshotCoreThreadNum_(option.snapshotCoreThreadNum), + mdsSessionTimeUs_(option.mdsSessionTimeUs), + clientAsyncMethodRetryTimeSec_(option.clientAsyncMethodRetryTimeSec), + clientAsyncMethodRetryIntervalMs_( + option.clientAsyncMethodRetryIntervalMs), + readChunkSnapshotConcurrency_(option.readChunkSnapshotConcurrency) { + threadPool_ = + std::make_shared(option.snapshotCoreThreadNum); } int Init(); - ~SnapshotCoreImpl() { - threadPool_->Stop(); - } + ~SnapshotCoreImpl() { threadPool_->Stop(); } - // 公有接口定义见SnapshotCore接口注释 - int CreateSnapshotPre(const std::string &file, - const std::string &user, - const std::string &snapshotName, - SnapshotInfo *snapInfo) override; + // Public interface definition can be found in the SnapshotCore interface + // annotation + int CreateSnapshotPre(const std::string& file, const std::string& user, + const std::string& snapshotName, + SnapshotInfo* snapInfo) override; void HandleCreateSnapshotTask( std::shared_ptr task) override; - int DeleteSnapshotPre(UUID uuid, - const std::string &user, - const std::string &fileName, - SnapshotInfo *snapInfo) override; + int DeleteSnapshotPre(UUID uuid, const std::string& user, + const std::string& fileName, + SnapshotInfo* snapInfo) override; void HandleDeleteSnapshotTask( std::shared_ptr task) override; - int GetFileSnapshotInfo(const std::string &file, - std::vector *info) override; + int GetFileSnapshotInfo(const std::string& file, + std::vector* info) override; - int GetSnapshotInfo(const UUID uuid, - SnapshotInfo *info) override; + int GetSnapshotInfo(const UUID uuid, SnapshotInfo* info) override; - int GetSnapshotList(std::vector *list) override; + int GetSnapshotList(std::vector* list) override; int HandleCancelUnSchduledSnapshotTask( std::shared_ptr task) override; @@ -239,201 +231,188 @@ class SnapshotCoreImpl : public SnapshotCore { private: /** - * @brief 构建快照文件映射 + * @brief Build snapshot file mapping * - * @param fileName 文件名 - * @param seqNum 快照版本号 - * @param fileSnapshotMap 快照文件映射表 + * @param fileName File name + * @param seqNum snapshot version number + * @param fileSnapshotMap snapshot file mapping table * - * @return 错误码 + * @return error code */ - int BuildSnapshotMap(const std::string &fileName, - uint64_t seqNum, - FileSnapMap *fileSnapshotMap); + int BuildSnapshotMap(const std::string& fileName, uint64_t seqNum, + FileSnapMap* fileSnapshotMap); /** - * @brief 构建Segment信息 + * @brief Build Segment Information * - * @param info 快照信息 - * @param segInfos Segment信息表 + * @param info snapshot information + * @param segInfos Segment Information Table * - * @return 错误码 + * @return error code */ - int BuildSegmentInfo( - const SnapshotInfo &info, - std::map *segInfos); + int BuildSegmentInfo(const SnapshotInfo& info, + std::map* segInfos); /** - * @brief 在curvefs上创建快照 + * @brief Create a snapshot on curves * - * @param fileName 文件名 - * @param info 快照信息 - * @param task 快照任务信息 + * @param fileName File name + * @param info snapshot information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int CreateSnapshotOnCurvefs( - const std::string &fileName, - SnapshotInfo *info, - std::shared_ptr task); + int CreateSnapshotOnCurvefs(const std::string& fileName, SnapshotInfo* info, + std::shared_ptr task); /** - * @brief 删除curvefs上的快照 + * @brief Delete snapshot on curves * - * @param info 快照信息 + * @param info snapshot information * - * @return 错误码 + * @return error code */ - int DeleteSnapshotOnCurvefs(const SnapshotInfo &info); + int DeleteSnapshotOnCurvefs(const SnapshotInfo& info); /** - * @brief 构建索引块 + * @brief Build Index Block * - * @param info 快照信息 - * @param[out] indexData 索引块 - * @param[out] segInfos Segment信息 - * @param task 快照任务信息 + * @param info snapshot information + * @param[out] indexData index block + * @param[out] segInfos Segment Information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int BuildChunkIndexData( - const SnapshotInfo &info, - ChunkIndexData *indexData, - std::map *segInfos, - std::shared_ptr task); + int BuildChunkIndexData(const SnapshotInfo& info, ChunkIndexData* indexData, + std::map* segInfos, + std::shared_ptr task); - using ChunkDataExistFilter = - std::function; + using ChunkDataExistFilter = std::function; /** - * @brief 转储快照过程 + * @brief Dump snapshot process * - * @param indexData 索引块 - * @param info 快照信息 - * @param segInfos Segment信息 - * @param filter 转储数据块过滤器 - * @param task 快照任务信息 + * @param indexData index block + * @param info snapshot information + * @param segInfos Segment Information + * @param filter Dump data block filter + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int TransferSnapshotData( - const ChunkIndexData indexData, - const SnapshotInfo &info, - const std::map &segInfos, - const ChunkDataExistFilter &filter, - std::shared_ptr task); + int TransferSnapshotData(const ChunkIndexData indexData, + const SnapshotInfo& info, + const std::map& segInfos, + const ChunkDataExistFilter& filter, + std::shared_ptr task); /** - * @brief 开始cancel,更新任务状态,更新数据库状态 + * @brief Start cancel, update task status, update database status * - * @param task 快照任务信息 + * @param task snapshot task information * - * @return 错误码 + * @return error code */ - int StartCancel( - std::shared_ptr task); + int StartCancel(std::shared_ptr task); /** - * @brief 转储数据之后取消快照过程 + * @brief: Cancel the snapshot process after dumping data * - * @param task 快照任务信息 - * @param indexData 索引块 - * @param fileSnapshotMap 快照文件映射表 + * @param task snapshot task information + * @param indexData index block + * @param fileSnapshotMap snapshot file mapping table */ - void CancelAfterTransferSnapshotData( - std::shared_ptr task, - const ChunkIndexData &indexData, - const FileSnapMap &fileSnapshotMap); + void CancelAfterTransferSnapshotData(std::shared_ptr task, + const ChunkIndexData& indexData, + const FileSnapMap& fileSnapshotMap); /** - * @brief 创建索引块之后取消快照过程 + * @brief Cancel the snapshot process after creating the index block * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateChunkIndexData( std::shared_ptr task); /** - * @brief 在curvefs上创建快照之后取消快照过程 + * @brief: Cancel the snapshot process after creating a snapshot on curves * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task); /** - * @brief 在Mate数据存储在删除快照 + * @brief in Mate data storage, delete snapshot * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleClearSnapshotOnMateStore( - std::shared_ptr task); + void HandleClearSnapshotOnMateStore(std::shared_ptr task); /** - * @brief 处理创建快照任务成功 + * @brief successfully processed the snapshot creation task * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotSuccess( - std::shared_ptr task); + void HandleCreateSnapshotSuccess(std::shared_ptr task); /** - * @brief 处理创建快照任务失败过程 + * @brief processing failed snapshot creation task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleCreateSnapshotError( - std::shared_ptr task); + void HandleCreateSnapshotError(std::shared_ptr task); /** - * @brief 处理删除快照任务失败过程 + * @brief failed to process the delete snapshot task process * - * @param task 快照任务信息 + * @param task snapshot task information */ - void HandleDeleteSnapshotError( - std::shared_ptr task); - + void HandleDeleteSnapshotError(std::shared_ptr task); /** - * @brief 创建快照前尝试清理失败的快照,否则可能会再次失败 + * @brief Attempt to clean up failed snapshots before creating them, + * otherwise they may fail again * - * @param task 快照任务信息 - * @return 错误码 + * @param task snapshot task information + * @return error code */ int ClearErrorSnapBeforeCreateSnapshot( std::shared_ptr task); private: - // curvefs客户端对象 + // Curvefs client object std::shared_ptr client_; - // meta数据存储 + // Meta Data Storage std::shared_ptr metaStore_; - // data数据存储 + // Data storage std::shared_ptr dataStore_; - // 快照引用计数管理模块 + // Snapshot Reference Count Management Module std::shared_ptr snapshotRef_; - // 执行并发步骤的线程池 + // Thread pool for executing concurrent steps std::shared_ptr threadPool_; - // 锁住打快照的文件名,防止并发同时对其打快照,同一文件的快照需排队 + // Lock the file name of the snapshot to prevent concurrent snapshots. + // Snapshots of the same file need to be queued NameLock snapshotNameLock_; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize_; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs_; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit_; - // 线程数 + // Number of threads uint32_t snapshotCoreThreadNum_; - // session超时时间 + // Session timeout uint32_t mdsSessionTimeUs_; - // client异步回调请求的重试总时间 + // Total retry time for client asynchronous callback requests uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; - // 异步ReadChunkSnapshot的并发数 + // The concurrency of asynchronous ReadChunkSnapshots uint32_t readChunkSnapshotConcurrency_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp index 8401af3b82..2c9fd2e28c 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp @@ -27,10 +27,10 @@ namespace curve { namespace snapshotcloneserver { -bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { - // 逆向解析string,以支持文件名具有分隔字符的情况 - std::string::size_type pos = - name.find_last_of(kChunkDataNameSeprator); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName) { + // Reverse parsing of strings to support cases where file names have + // separator characters + std::string::size_type pos = name.find_last_of(kChunkDataNameSeprator); std::string::size_type lastPos = std::string::npos; if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; @@ -40,8 +40,7 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { cName->chunkSeqNum_ = std::stoll(seqNumStr); lastPos = pos - 1; - pos = - name.find_last_of(kChunkDataNameSeprator, lastPos); + pos = name.find_last_of(kChunkDataNameSeprator, lastPos); if (std::string::npos == pos) { LOG(ERROR) << "ToChunkDataName error, namestr = " << name; return false; @@ -57,27 +56,26 @@ bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { return true; } -bool ChunkIndexData::Serialize(std::string *data) const { +bool ChunkIndexData::Serialize(std::string* data) const { ChunkMap map; - for (const auto &m : this->chunkMap_) { - map.mutable_indexmap()-> - insert({m.first, - ChunkDataName(fileName_, m.second, m.first). - ToDataChunkKey()}); + for (const auto& m : this->chunkMap_) { + map.mutable_indexmap()->insert( + {m.first, + ChunkDataName(fileName_, m.second, m.first).ToDataChunkKey()}); } - // Todo:可以转化为stream给adpater接口使用SerializeToOstream + // Todo: Can be converted into a stream for the adpater interface to use + // SerializeToOstream return map.SerializeToString(data); } -bool ChunkIndexData::Unserialize(const std::string &data) { - ChunkMap map; +bool ChunkIndexData::Unserialize(const std::string& data) { + ChunkMap map; if (map.ParseFromString(data)) { - for (const auto &m : map.indexmap()) { + for (const auto& m : map.indexmap()) { ChunkDataName chunkDataName; if (ToChunkDataName(m.second, &chunkDataName)) { this->fileName_ = chunkDataName.fileName_; - this->chunkMap_.emplace(m.first, - chunkDataName.chunkSeqNum_); + this->chunkMap_.emplace(m.first, chunkDataName.chunkSeqNum_); } else { return false; } @@ -89,7 +87,7 @@ bool ChunkIndexData::Unserialize(const std::string &data) { } bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, - ChunkDataName* nameOut) const { + ChunkDataName* nameOut) const { auto it = chunkMap_.find(index); if (it != chunkMap_.end()) { *nameOut = ChunkDataName(fileName_, it->second, index); @@ -99,7 +97,7 @@ bool ChunkIndexData::GetChunkDataName(ChunkIndexType index, } } -bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName &name) const { +bool ChunkIndexData::IsExistChunkDataName(const ChunkDataName& name) const { if (fileName_ != name.fileName_) { return false; } @@ -120,5 +118,5 @@ std::vector ChunkIndexData::GetAllChunkIndex() const { return ret; } -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.h b/src/snapshotcloneserver/snapshot/snapshot_data_store.h index ae88b7694b..ed7d675450 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.h @@ -26,16 +26,16 @@ #include #include -#include -#include #include -#include +#include #include +#include +#include #include "src/common/concurrent/concurrent.h" -using ::curve::common::SpinLock; using ::curve::common::LockGuard; +using ::curve::common::SpinLock; namespace curve { namespace snapshotcloneserver { @@ -47,25 +47,19 @@ const char kChunkDataNameSeprator[] = "-"; class ChunkDataName { public: - ChunkDataName() - : chunkSeqNum_(0), - chunkIndex_(0) {} - ChunkDataName(const std::string &fileName, - SnapshotSeqType seq, + ChunkDataName() : chunkSeqNum_(0), chunkIndex_(0) {} + ChunkDataName(const std::string& fileName, SnapshotSeqType seq, ChunkIndexType chunkIndex) - : fileName_(fileName), - chunkSeqNum_(seq), - chunkIndex_(chunkIndex) {} + : fileName_(fileName), chunkSeqNum_(seq), chunkIndex_(chunkIndex) {} /** - * 构建datachunk对象的名称 文件名-chunk索引-版本号 - * @return: 对象名称字符串 + * Build the name of the datachunk object File name Chunk index Version + * number + * @return: Object name string */ std::string ToDataChunkKey() const { - return fileName_ - + kChunkDataNameSeprator - + std::to_string(this->chunkIndex_) - + kChunkDataNameSeprator - + std::to_string(this->chunkSeqNum_); + return fileName_ + kChunkDataNameSeprator + + std::to_string(this->chunkIndex_) + kChunkDataNameSeprator + + std::to_string(this->chunkSeqNum_); } std::string fileName_; @@ -73,45 +67,41 @@ class ChunkDataName { ChunkIndexType chunkIndex_; }; -inline bool operator==(const ChunkDataName &lhs, const ChunkDataName &rhs) { +inline bool operator==(const ChunkDataName& lhs, const ChunkDataName& rhs) { return (lhs.fileName_ == rhs.fileName_) && (lhs.chunkSeqNum_ == rhs.chunkSeqNum_) && (lhs.chunkIndex_ == rhs.chunkIndex_); } /** - * @brief 根据对象名称解析生成chunkdataname对象 + * @brief Generate chunkdataname object based on object name parsing * - * @param name 对象名 - * @param[out] cName chunkDataName对象 + * @param name Object name + * @param[out] cName chunkDataName object * - * @retVal true 成功 - * @retVal false 失败 + * @retval true succeeded + * @retval false failed */ -bool ToChunkDataName(const std::string &name, ChunkDataName *cName); +bool ToChunkDataName(const std::string& name, ChunkDataName* cName); class ChunkIndexDataName { public: - ChunkIndexDataName() - : fileSeqNum_(0) {} - ChunkIndexDataName(std::string filename, - SnapshotSeqType seq) { + ChunkIndexDataName() : fileSeqNum_(0) {} + ChunkIndexDataName(std::string filename, SnapshotSeqType seq) { fileName_ = filename; fileSeqNum_ = seq; } /** - * 构建索引chunk的名称 文件名+文件版本号 - * @return: 索引chunk的名称字符串 + * Build the name of the index chunk file name+file version number + * @return: The name string of the index chunk */ std::string ToIndexDataChunkKey() const { - return this->fileName_ - + "-" - + std::to_string(this->fileSeqNum_); + return this->fileName_ + "-" + std::to_string(this->fileSeqNum_); } - // 文件名 + // File name std::string fileName_; - // 文件版本号 + // File version number SnapshotSeqType fileSeqNum_; }; @@ -119,46 +109,41 @@ class ChunkIndexData { public: ChunkIndexData() {} /** - * 索引chunk数据序列化(使用protobuf实现) - * @param 保存序列化后数据的指针 - * @return: true 序列化成功/ false 序列化失败 + * Index chunk data serialization (implemented using protobuf) + * @param data Saves a pointer to serialized data + * @return: true Serialization succeeded/false Serialization failed */ - bool Serialize(std::string *data) const; + bool Serialize(std::string* data) const; /** - * 反序列化索引chunk的数据到map中 - * @param 索引chunk存储的数据 - * @return: true 反序列化成功/ false 反序列化失败 + * Deserialize the data of the index chunk into the map + * @param data The data stored in the index chunk + * @return: true Deserialization succeeded/false Deserialization failed */ - bool Unserialize(const std::string &data); + bool Unserialize(const std::string& data); - void PutChunkDataName(const ChunkDataName &name) { + void PutChunkDataName(const ChunkDataName& name) { chunkMap_.emplace(name.chunkIndex_, name.chunkSeqNum_); } bool GetChunkDataName(ChunkIndexType index, ChunkDataName* nameOut) const; - bool IsExistChunkDataName(const ChunkDataName &name) const; + bool IsExistChunkDataName(const ChunkDataName& name) const; std::vector GetAllChunkIndex() const; - void SetFileName(const std::string &fileName) { - fileName_ = fileName; - } + void SetFileName(const std::string& fileName) { fileName_ = fileName; } - std::string GetFileName() { - return fileName_; - } + std::string GetFileName() { return fileName_; } private: - // 文件名 + // File name std::string fileName_; - // 快照文件索引信息map + // Snapshot file index information map std::map chunkMap_; }; - -class ChunkData{ +class ChunkData { public: ChunkData() {} std::string data_; @@ -166,132 +151,131 @@ class ChunkData{ class TransferTask { public: - TransferTask() {} - std::string uploadId_; + TransferTask() {} + std::string uploadId_; - void AddPartInfo(int partNum, std::string etag) { - m_.Lock(); - partInfo_.emplace(partNum, etag); - m_.UnLock(); - } + void AddPartInfo(int partNum, std::string etag) { + m_.Lock(); + partInfo_.emplace(partNum, etag); + m_.UnLock(); + } - std::map GetPartInfo() { - return partInfo_; - } + std::map GetPartInfo() { return partInfo_; } private: - mutable SpinLock m_; - // partnumber <=> etag - std::map partInfo_; + mutable SpinLock m_; + // partnumber <=> etag + std::map partInfo_; }; class SnapshotDataStore { public: - SnapshotDataStore() {} + SnapshotDataStore() {} virtual ~SnapshotDataStore() {} /** - * 快照的datastore初始化,根据存储的类型有不同的实现 - * @param s3配置文件路径 - * @return 0 初始化成功/ -1 初始化失败 + * The datastore initialization of snapshots can be implemented differently + * depending on the type of storage + * @param s3 configuration file path + * @return 0 initialization successful/-1 initialization failed */ - virtual int Init(const std::string &confpath) = 0; + virtual int Init(const std::string& confpath) = 0; /** - * 存储快照文件的元数据信息到datastore中 - * @param 元数据对象名 - * @param 元数据对象的数据内容 - * @return 0 保存成功/ -1 保存失败 + * Store the metadata information of the snapshot file in the datastore + * @param name Metadata object name + * @param The data content of the metadata object + * @return 0 saved successfully/-1 failed to save */ - virtual int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) = 0; + virtual int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) = 0; /** - * 获取快照文件的元数据信息 - * @param 元数据对象名 - * @param 保存元数据数据内容的指针 - * return: 0 获取成功/ -1 获取失败 + * Obtain metadata information for snapshot files + * @param name Metadata object name + * @param Pointer to save metadata data content + * @return: 0 successfully obtained/-1 failed to obtain */ - virtual int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) = 0; + virtual int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) = 0; /** - * 删除快照文件的元数据 - * @param 元数据对象名 - * @return: 0 删除成功/ -1 删除失败 + * Delete metadata for snapshot files + * @param name Metadata object name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkIndexData(const ChunkIndexDataName &name) = 0; - // 快照元数据chunk是否存在 + virtual int DeleteChunkIndexData(const ChunkIndexDataName& name) = 0; + // Does the snapshot metadata chunk exist /** - * 判断快照元数据是否存在 - * @param 元数据对象名 - * @return: true 存在/ false 不存在 + * Determine whether snapshot metadata exists + * @param name Metadata object name + * @return: true exists/false does not exist */ - virtual bool ChunkIndexDataExist(const ChunkIndexDataName &name) = 0; -/* - // 存储快照文件的数据信息到datastore - virtual int PutChunkData(const ChunkDataName &name, - const ChunkData &data) = 0; - - // 读取快照文件的数据信息 - virtual int GetChunkData(const ChunkDataName &name, - ChunkData *data) = 0; -*/ + virtual bool ChunkIndexDataExist(const ChunkIndexDataName& name) = 0; + /* + // Store the data information of the snapshot file in the datastore + virtual int PutChunkData(const ChunkDataName &name, + const ChunkData &data) = 0; + + // Reading data information from snapshot files + virtual int GetChunkData(const ChunkDataName &name, + ChunkData *data) = 0; + */ /** - * 删除快照的数据chunk - * @param 数据chunk名 - * @return: 0 删除成功/ -1 删除失败 + * Delete the data chunk of the snapshot + * @param name chunk data name + * @return: 0 successfully deleted/-1 failed to delete */ - virtual int DeleteChunkData(const ChunkDataName &name) = 0; + virtual int DeleteChunkData(const ChunkDataName& name) = 0; /** - * 判断快照的数据chunk是否存在 - * @param 数据chunk名称 - * @return: true 存在/ false 不存在 + * Determine whether the data chunk of the snapshot exists + * @param name chunk data name + * @return: true exists/false does not exist */ - virtual bool ChunkDataExist(const ChunkDataName &name) = 0; - // 设置快照转储完成标志 -/* - virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = 0; - // 获取快照转储完成标志 - virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; -*/ + virtual bool ChunkDataExist(const ChunkDataName& name) = 0; + // Set snapshot dump completion flag + /* + virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = + 0; + // Get snapshot dump completion flag + virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; + */ /** - * 初始化数据库chunk的分片转储任务 - * @param 数据chunk名称 - * @param 管理转储任务的指针 - * @return 0 任务初始化成功/ -1 任务初始化失败 + * Initialize the sharded dump task of the database chunk + * @param name chunk data name + * @param task Pointer to management dump task + * @return 0 Task initialization successful/-1 Task initialization failed */ - virtual int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) = 0; /** - * 添加数据chunk的一个分片到转储任务中 - * @param 数据chunk名 - * @转储任务 - * @第几个分片 - * @分片大小 - * @分片的数据内容 - * @return: 0 添加成功/ -1 添加失败 + * Add a shard of data chunk to a dumping task. + * @param name chunk name + * @param task Dumping task + * @param partNum Index of the shard + * @param partSize Shard size + * @param buf Shard data content + * @return: 0 for successful addition / -1 for failure to add */ - virtual int DataChunkTranferAddPart(const ChunkDataName &name, + virtual int DataChunkTranferAddPart(const ChunkDataName& name, std::shared_ptr task, - int partNum, - int partSize, - const char* buf) = 0; + int partNum, int partSize, + const char* buf) = 0; /** - * 完成数据chunk的转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 转储任务完成/ 转储任务失败 -1 + *Complete the dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 Dump task completed/Dump task failed -1 */ - virtual int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) = 0; + virtual int DataChunkTranferComplete( + const ChunkDataName& name, std::shared_ptr task) = 0; /** - * 终止数据chunk的分片转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 任务终止成功/ -1 任务终止失败 + *Terminate the sharded dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 mission terminated successfully/-1 mission terminated failed */ - virtual int DataChunkTranferAbort(const ChunkDataName &name, + virtual int DataChunkTranferAbort(const ChunkDataName& name, std::shared_ptr task) = 0; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h index d1324243e4..d43add3f96 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h @@ -23,13 +23,14 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ -#include -#include #include -#include +#include #include -#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" +#include +#include + #include "src/common/s3_adapter.h" +#include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" using ::curve::common::S3Adapter; namespace curve { @@ -37,59 +38,53 @@ namespace snapshotcloneserver { class S3SnapshotDataStore : public SnapshotDataStore { public: - S3SnapshotDataStore() { + S3SnapshotDataStore() { s3Adapter4Meta_ = std::make_shared(); s3Adapter4Data_ = std::make_shared(); } ~S3SnapshotDataStore() {} - int Init(const std::string &path) override; - int PutChunkIndexData(const ChunkIndexDataName &name, - const ChunkIndexData &meta) override; - int GetChunkIndexData(const ChunkIndexDataName &name, - ChunkIndexData *meta) override; - int DeleteChunkIndexData(const ChunkIndexDataName &name) override; - bool ChunkIndexDataExist(const ChunkIndexDataName &name) override; + int Init(const std::string& path) override; + int PutChunkIndexData(const ChunkIndexDataName& name, + const ChunkIndexData& meta) override; + int GetChunkIndexData(const ChunkIndexDataName& name, + ChunkIndexData* meta) override; + int DeleteChunkIndexData(const ChunkIndexDataName& name) override; + bool ChunkIndexDataExist(const ChunkIndexDataName& name) override; // int PutChunkData(const ChunkDataName &name, // const ChunkData &data) override; // int GetChunkData(const ChunkDataName &name, // ChunkData *data) override; - int DeleteChunkData(const ChunkDataName &name) override; - bool ChunkDataExist(const ChunkDataName &name) override; -/* nos暂时不支持,后续增加 - int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; - int GetSnapshotFlag(const ChunkIndexDataName &name) override; -*/ - int DataChunkTranferInit(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAddPart(const ChunkDataName &name, - std::shared_ptr task, - int partNum, - int partSize, - const char* buf) override; - int DataChunkTranferComplete(const ChunkDataName &name, - std::shared_ptr task) override; - int DataChunkTranferAbort(const ChunkDataName &name, - std::shared_ptr task) override; + int DeleteChunkData(const ChunkDataName& name) override; + bool ChunkDataExist(const ChunkDataName& name) override; + /* NOS is currently not supported, but will be added in the future + int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; + int GetSnapshotFlag(const ChunkIndexDataName &name) override; + */ + int DataChunkTranferInit(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAddPart(const ChunkDataName& name, + std::shared_ptr task, int partNum, + int partSize, const char* buf) override; + int DataChunkTranferComplete(const ChunkDataName& name, + std::shared_ptr task) override; + int DataChunkTranferAbort(const ChunkDataName& name, + std::shared_ptr task) override; - void SetMetaAdapter(std::shared_ptr adapter) { - s3Adapter4Meta_ = adapter; - } - std::shared_ptr GetMetaAdapter(void) { - return s3Adapter4Meta_; - } - void SetDataAdapter(std::shared_ptr adapter) { - s3Adapter4Data_ = adapter; - } - std::shared_ptr GetDataAdapter(void) { - return s3Adapter4Data_; - } + void SetMetaAdapter(std::shared_ptr adapter) { + s3Adapter4Meta_ = adapter; + } + std::shared_ptr GetMetaAdapter(void) { return s3Adapter4Meta_; } + void SetDataAdapter(std::shared_ptr adapter) { + s3Adapter4Data_ = adapter; + } + std::shared_ptr GetDataAdapter(void) { return s3Adapter4Data_; } private: std::shared_ptr s3Adapter4Data_; std::shared_ptr s3Adapter4Meta_; }; -} // namespace snapshotcloneserver -} // namespace curve +} // namespace snapshotcloneserver +} // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_DATA_STORE_S3_H_ diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp index 747b666350..6846b10e16 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp @@ -23,46 +23,39 @@ #include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include + #include "src/common/string_util.h" namespace curve { namespace snapshotcloneserver { -int SnapshotServiceManager::Init(const SnapshotCloneServerOptions &option) { +int SnapshotServiceManager::Init(const SnapshotCloneServerOptions& option) { std::shared_ptr pool = std::make_shared(option.snapshotPoolThreadNum); return taskMgr_->Init(pool, option); } -int SnapshotServiceManager::Start() { - return taskMgr_->Start(); -} +int SnapshotServiceManager::Start() { return taskMgr_->Start(); } -void SnapshotServiceManager::Stop() { - taskMgr_->Stop(); -} +void SnapshotServiceManager::Stop() { taskMgr_->Stop(); } -int SnapshotServiceManager::CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid) { +int SnapshotServiceManager::CreateSnapshot(const std::string& file, + const std::string& user, + const std::string& snapshotName, + UUID* uuid) { SnapshotInfo snapInfo; int ret = core_->CreateSnapshotPre(file, user, snapshotName, &snapInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface + // idempotent *uuid = snapInfo.GetUuid(); return kErrCodeSuccess; } LOG(ERROR) << "CreateSnapshotPre error, " - << " ret =" - << ret - << ", file = " - << file - << ", snapshotName = " - << snapshotName - << ", uuid = " - << snapInfo.GetUuid(); + << " ret =" << ret << ", file = " << file + << ", snapshotName = " << snapshotName + << ", uuid = " << snapInfo.GetUuid(); return ret; } *uuid = snapInfo.GetUuid(); @@ -72,30 +65,27 @@ int SnapshotServiceManager::CreateSnapshot(const std::string &file, std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " - << " ret = " - << ret; + << " ret = " << ret; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::CancelSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::CancelSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { std::shared_ptr task = taskMgr_->GetTask(uuid); if (task != nullptr) { if (user != task->GetTaskInfo()->GetSnapshotInfo().GetUser()) { LOG(ERROR) << "Can not cancel snapshot by different user."; return kErrCodeInvalidUser; } - if ((!file.empty()) && - (file != task->GetTaskInfo()->GetFileName())) { + if ((!file.empty()) && (file != task->GetTaskInfo()->GetFileName())) { LOG(ERROR) << "Can not cancel, fileName is not matched."; return kErrCodeFileNameNotMatch; } @@ -104,35 +94,30 @@ int SnapshotServiceManager::CancelSnapshot( int ret = taskMgr_->CancelTask(uuid); if (ret < 0) { LOG(ERROR) << "CancelSnapshot error, " - << " ret =" - << ret - << ", uuid = " - << uuid - << ", file =" - << file; + << " ret =" << ret << ", uuid = " << uuid + << ", file =" << file; return ret; } return kErrCodeSuccess; } -int SnapshotServiceManager::DeleteSnapshot( - const UUID &uuid, - const std::string &user, - const std::string &file) { +int SnapshotServiceManager::DeleteSnapshot(const UUID& uuid, + const std::string& user, + const std::string& file) { SnapshotInfo snapInfo; int ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (kErrCodeSnapshotCannotDeleteUnfinished == ret) { - // 转Cancel + // Transfer to Cancel ret = CancelSnapshot(uuid, user, file); if (kErrCodeCannotCancelFinished == ret) { - // 防止这一过程中又执行完了 + // To prevent the execution from completing again during this + // process ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -141,8 +126,7 @@ int SnapshotServiceManager::DeleteSnapshot( } } else if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" - << ", ret = " << ret - << ", uuid = " << uuid + << ", ret = " << ret << ", uuid = " << uuid << ", file =" << file; return ret; } @@ -151,8 +135,8 @@ int SnapshotServiceManager::DeleteSnapshot( std::make_shared(snapInfo, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snapInfo.GetUuid(), taskInfo, core_); + std::make_shared(snapInfo.GetUuid(), taskInfo, + core_); ret = taskMgr_->PushTask(task); if (ret < 0) { LOG(ERROR) << "Push Task error, " @@ -162,31 +146,28 @@ int SnapshotServiceManager::DeleteSnapshot( return kErrCodeSuccess; } -int SnapshotServiceManager::GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfo( + const std::string& file, const std::string& user, + std::vector* info) { std::vector snapInfos; int ret = core_->GetFileSnapshotInfo(file, &snapInfos); if (ret < 0) { LOG(ERROR) << "GetFileSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file; + << " ret = " << ret << ", file = " << file; return ret; } return GetFileSnapshotInfoInner(snapInfos, user, info); } -int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info) { +int SnapshotServiceManager::GetFileSnapshotInfoById( + const std::string& file, const std::string& user, const UUID& uuid, + std::vector* info) { std::vector snapInfos; SnapshotInfo snap; int ret = core_->GetSnapshotInfo(uuid, &snap); if (ret < 0) { LOG(ERROR) << "GetSnapshotInfo error, " - << " ret = " << ret - << ", file = " << file + << " ret = " << ret << ", file = " << file << ", uuid = " << uuid; return kErrCodeFileNotExist; } @@ -201,11 +182,10 @@ int SnapshotServiceManager::GetFileSnapshotInfoById(const std::string &file, } int SnapshotServiceManager::GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info) { + std::vector snapInfos, const std::string& user, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (snap.GetUser() == user) { Status st = snap.GetStatus(); switch (st) { @@ -226,15 +206,15 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -248,7 +228,8 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -263,7 +244,7 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( return kErrCodeSuccess; } -bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { +bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo& snapInfo) { if (user_ != nullptr && *user_ != snapInfo.GetUser()) { return false; } @@ -277,14 +258,12 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int status; - if (status_ != nullptr - && common::StringToInt(*status_, &status) == false) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == false) { return false; } - if (status_ != nullptr - && common::StringToInt(*status_, &status) == true - && status != static_cast(snapInfo.GetStatus())) { + if (status_ != nullptr && common::StringToInt(*status_, &status) == true && + status != static_cast(snapInfo.GetStatus())) { return false; } @@ -292,11 +271,10 @@ bool SnapshotFilterCondition::IsMatchCondition(const SnapshotInfo &snapInfo) { } int SnapshotServiceManager::GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info) { + std::vector snapInfos, SnapshotFilterCondition filter, + std::vector* info) { int ret = kErrCodeSuccess; - for (auto &snap : snapInfos) { + for (auto& snap : snapInfos) { if (filter.IsMatchCondition(snap)) { Status st = snap.GetStatus(); switch (st) { @@ -317,15 +295,15 @@ int SnapshotServiceManager::GetSnapshotListInner( taskMgr_->GetTask(uuid); if (task != nullptr) { info->emplace_back(snap, - task->GetTaskInfo()->GetProgress()); + task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { - LOG(ERROR) << "GetSnapshotInfo fail" - << ", ret = " << ret - << ", uuid = " << uuid; + LOG(ERROR) + << "GetSnapshotInfo fail" + << ", ret = " << ret << ", uuid = " << uuid; return ret; } switch (newInfo.GetStatus()) { @@ -339,7 +317,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is + // possible to enter here return kErrCodeInternalError; } } @@ -355,8 +334,8 @@ int SnapshotServiceManager::GetSnapshotListInner( } int SnapshotServiceManager::GetSnapshotListByFilter( - const SnapshotFilterCondition &filter, - std::vector *info) { + const SnapshotFilterCondition& filter, + std::vector* info) { std::vector snapInfos; int ret = core_->GetSnapshotList(&snapInfos); if (ret < 0) { @@ -374,50 +353,44 @@ int SnapshotServiceManager::RecoverSnapshotTask() { LOG(ERROR) << "GetSnapshotList error"; return ret; } - for (auto &snap : list) { + for (auto& snap : list) { Status st = snap.GetStatus(); switch (st) { - case Status::pending : { + case Status::pending: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; } - // 重启恢复的canceling等价于errorDeleting - case Status::canceling : - case Status::deleting : - case Status::errorDeleting : { + // canceling restart recovery is equivalent to errorDeleting + case Status::canceling: + case Status::deleting: + case Status::errorDeleting: { auto snapInfoMetric = std::make_shared(snap.GetUuid()); std::shared_ptr taskInfo = std::make_shared(snap, snapInfoMetric); taskInfo->UpdateMetric(); std::shared_ptr task = - std::make_shared( - snap.GetUuid(), - taskInfo, - core_); + std::make_shared(snap.GetUuid(), + taskInfo, core_); ret = taskMgr_->PushTask(task); if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask push task error, ret = " - << ret - << ", uuid = " - << snap.GetUuid(); + LOG(ERROR) + << "RecoverSnapshotTask push task error, ret = " << ret + << ", uuid = " << snap.GetUuid(); return ret; } break; @@ -431,4 +404,3 @@ int SnapshotServiceManager::RecoverSnapshotTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h index 1aa7143e9f..9c7944f17f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h @@ -27,49 +27,39 @@ #include #include +#include "json/json.h" +#include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/common/config.h" -#include "json/json.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 文件单个快照信息 + * @brief file single snapshot information */ class FileSnapshotInfo { public: FileSnapshotInfo() = default; - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - * @param snapProgress 快照完成度百分比 - */ - FileSnapshotInfo(const SnapshotInfo &snapInfo, - uint32_t snapProgress) - : snapInfo_(snapInfo), - snapProgress_(snapProgress) {} - - void SetSnapshotInfo(const SnapshotInfo &snapInfo) { - snapInfo_ = snapInfo; - } + /** + * @brief constructor + * + * @param snapInfo snapshot information + * @param snapProgress snapshot completion percentage + */ + FileSnapshotInfo(const SnapshotInfo& snapInfo, uint32_t snapProgress) + : snapInfo_(snapInfo), snapProgress_(snapProgress) {} - SnapshotInfo GetSnapshotInfo() const { - return snapInfo_; - } + void SetSnapshotInfo(const SnapshotInfo& snapInfo) { snapInfo_ = snapInfo; } - void SetSnapProgress(uint32_t progress) { - snapProgress_ = progress; - } + SnapshotInfo GetSnapshotInfo() const { return snapInfo_; } - uint32_t GetSnapProgress() const { - return snapProgress_; - } + void SetSnapProgress(uint32_t progress) { snapProgress_ = progress; } + + uint32_t GetSnapProgress() const { return snapProgress_; } Json::Value ToJsonObj() const { Json::Value fileSnapObj; @@ -86,7 +76,7 @@ class FileSnapshotInfo { return fileSnapObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { SnapshotInfo snapInfo; snapInfo.SetUuid(jsonObj["UUID"].asString()); snapInfo.SetUser(jsonObj["User"].asString()); @@ -101,209 +91,185 @@ class FileSnapshotInfo { } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapInfo_; - // 快照处理进度百分比 + // Snapshot processing progress percentage uint32_t snapProgress_; }; class SnapshotFilterCondition { public: SnapshotFilterCondition() - : uuid_(nullptr), - file_(nullptr), - user_(nullptr), - status_(nullptr) {} - - SnapshotFilterCondition(const std::string *uuid, const std::string *file, - const std::string *user, - const std::string *status) - : uuid_(uuid), - file_(file), - user_(user), - status_(status) {} - bool IsMatchCondition(const SnapshotInfo &snapInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } + : uuid_(nullptr), file_(nullptr), user_(nullptr), status_(nullptr) {} - void SetFile(const std::string *file) { - file_ = file; - } + SnapshotFilterCondition(const std::string* uuid, const std::string* file, + const std::string* user, const std::string* status) + : uuid_(uuid), file_(file), user_(user), status_(status) {} + bool IsMatchCondition(const SnapshotInfo& snapInfo); - void SetUser(const std::string *user) { - user_ = user; - } + void SetUuid(const std::string* uuid) { uuid_ = uuid; } - void SetStatus(const std::string *status) { - status_ = status; - } + void SetFile(const std::string* file) { file_ = file; } + + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } private: - const std::string *uuid_; - const std::string *file_; - const std::string *user_; - const std::string *status_; + const std::string* uuid_; + const std::string* file_; + const std::string* user_; + const std::string* status_; }; class SnapshotServiceManager { public: - /** - * @brief 构造函数 - * - * @param taskMgr 快照任务管理类对象 - * @param core 快照核心模块 - */ - SnapshotServiceManager( - std::shared_ptr taskMgr, - std::shared_ptr core) - : taskMgr_(taskMgr), - core_(core) {} + /** + * @brief constructor + * + * @param taskMgr snapshot task management class object + * @param core snapshot core module + */ + SnapshotServiceManager(std::shared_ptr taskMgr, + std::shared_ptr core) + : taskMgr_(taskMgr), core_(core) {} virtual ~SnapshotServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 创建快照服务 + * @brief Create snapshot service * - * @param file 文件名 - * @param user 文件所属用户 - * @param snapshotName 快照名 - * @param uuid 快照uuid + * @param file file name + * @param user The user to whom the file belongs + * @param snapshotName SnapshotName + * @param uuid Snapshot uuid * - * @return 错误码 + * @return error code */ - virtual int CreateSnapshot(const std::string &file, - const std::string &user, - const std::string &snapshotName, - UUID *uuid); + virtual int CreateSnapshot(const std::string& file, const std::string& user, + const std::string& snapshotName, UUID* uuid); /** - * @brief 删除快照服务 + * @brief Delete snapshot service * - * @param uuid 快照uuid - * @param user 快照文件的用户 - * @param file 快照所属文件的文件名 + * @param uuid Snapshot uuid + * @param user The user of the snapshot file + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int DeleteSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int DeleteSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 取消快照服务 + * @brief Cancel snapshot service * - * @param uuid 快照的uuid - * @param user 快照的用户 - * @param file 快照所属文件的文件名 + * @param uuid The uuid of the snapshot + * @param user snapshot user + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ - virtual int CancelSnapshot(const UUID &uuid, - const std::string &user, - const std::string &file); + virtual int CancelSnapshot(const UUID& uuid, const std::string& user, + const std::string& file); /** - * @brief 获取文件的快照信息服务接口 + * @brief Gets the snapshot information service interface for files * - * @param file 文件名 - * @param user 用户名 - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfo(const std::string &file, - const std::string &user, - std::vector *info); + virtual int GetFileSnapshotInfo(const std::string& file, + const std::string& user, + std::vector* info); /** - * @brief 根据Id获取文件的快照信息 + * @brief Obtain snapshot information of the file based on the ID * - * @param file 文件名 - * @param user 用户名 - * @param uuid 快照Id - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param uuid SnapshotId + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetFileSnapshotInfoById(const std::string &file, - const std::string &user, - const UUID &uuid, - std::vector *info); + virtual int GetFileSnapshotInfoById(const std::string& file, + const std::string& user, + const UUID& uuid, + std::vector* info); /** - * @brief 获取快照列表 + * @brief Get snapshot list * - * @param filter 过滤条件 - * @param info 快照信息列表 + * @param filter filtering conditions + * @param info snapshot information list * - * @return 错误码 + * @return error code */ - virtual int GetSnapshotListByFilter(const SnapshotFilterCondition &filter, - std::vector *info); + virtual int GetSnapshotListByFilter(const SnapshotFilterCondition& filter, + std::vector* info); /** - * @brief 恢复快照任务接口 + * @brief Restore Snapshot Task Interface * - * @return 错误码 + * @return error code */ virtual int RecoverSnapshotTask(); private: /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param user 用户名 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param user username + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetFileSnapshotInfoInner( - std::vector snapInfos, - const std::string &user, - std::vector *info); + int GetFileSnapshotInfoInner(std::vector snapInfos, + const std::string& user, + std::vector* info); /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param filter 过滤条件 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param filter filtering conditions + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ - int GetSnapshotListInner( - std::vector snapInfos, - SnapshotFilterCondition filter, - std::vector *info); + int GetSnapshotListInner(std::vector snapInfos, + SnapshotFilterCondition filter, + std::vector* info); private: - // 快照任务管理类对象 + // Snapshot Task Management Class Object std::shared_ptr taskMgr_; - // 快照核心模块 + // Snapshot Core Module std::shared_ptr core_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.cpp b/src/snapshotcloneserver/snapshot/snapshot_task.cpp index 179f2b4617..a66bf4c4ca 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task.cpp @@ -20,10 +20,11 @@ * Author: xuchaojie */ +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" + #include #include "src/common/timeutility.h" -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" namespace curve { namespace snapshotcloneserver { @@ -46,18 +47,20 @@ void ReadChunkSnapshotClosure::Run() { } /** - * @brief 转储快照的单个chunk + * @brief Dump a single chunk of a snapshot * @detail - * 由于单个chunk过大,chunk转储分片进行,分片大小为chunkSplitSize_, - * 步骤如下: - * 1. 创建一个转储任务transferTask,并调用DataChunkTranferInit初始化 - * 2. 调用ReadChunkSnapshot从curvefs读取chunk的一个分片 - * 3. 调用DataChunkTranferAddPart转储一个分片 - * 4. 重复2、3直到所有分片转储完成,调用DataChunkTranferComplete结束转储任务 - * 5. 中间如有读取或转储发生错误,则调用DataChunkTranferAbort放弃转储, - * 并返回错误码 + * Since a single chunk is too large, chunk dumping is done in segments, with + * each segment size being chunkSplitSize_. The steps are as follows: + * 1. Create a dump task transferTask and initialize it using + * DataChunkTransferInit. + * 2. Call ReadChunkSnapshot to read a segment of the chunk from CurveFS. + * 3. Call DataChunkTransferAddPart to dump a segment. + * 4. Repeat steps 2 and 3 until all segments have been dumped, and then call + * DataChunkTransferComplete to end the dump task. + * 5. If there are any errors during reading or dumping in the process, call + * DataChunkTransferAbort to abandon the dump and return an error code. * - * @return 错误码 + * @return Error code */ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { ChunkDataName name = taskInfo_->name_; @@ -67,8 +70,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::shared_ptr transferTask = std::make_shared(); - int ret = dataStore_->DataChunkTranferInit(name, - transferTask); + int ret = dataStore_->DataChunkTranferInit(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferInit error, " << " ret = " << ret @@ -80,9 +82,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } auto tracker = std::make_shared(); - for (uint64_t i = 0; - i < chunkSize / chunkSplitSize; - i++) { + for (uint64_t i = 0; i < chunkSize / chunkSplitSize; i++) { auto context = std::make_shared(); context->cidInfo = taskInfo_->cidInfo_; context->seqNum = taskInfo_->name_.chunkSeqNum_; @@ -101,8 +101,8 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } std::list results = tracker->PopResultContexts(); - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } @@ -113,18 +113,17 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } - ret = HandleReadChunkSnapshotResultsAndRetry( - tracker, transferTask, results); + ret = HandleReadChunkSnapshotResultsAndRetry(tracker, transferTask, + results); if (ret < 0) { break; } } while (true); if (ret >= 0) { - ret = - dataStore_->DataChunkTranferComplete(name, transferTask); + ret = dataStore_->DataChunkTranferComplete(name, transferTask); if (ret < 0) { LOG(ERROR) << "DataChunkTranferComplete fail" << ", ret = " << ret @@ -136,18 +135,15 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { } } if (ret < 0) { - int ret2 = - dataStore_->DataChunkTranferAbort( - name, - transferTask); - if (ret2 < 0) { - LOG(ERROR) << "DataChunkTranferAbort fail" - << ", ret = " << ret2 - << ", chunkDataName = " << name.ToDataChunkKey() - << ", logicalPool = " << cidInfo.lpid_ - << ", copysetId = " << cidInfo.cpid_ - << ", chunkId = " << cidInfo.cid_; - } + int ret2 = dataStore_->DataChunkTranferAbort(name, transferTask); + if (ret2 < 0) { + LOG(ERROR) << "DataChunkTranferAbort fail" + << ", ret = " << ret2 + << ", chunkDataName = " << name.ToDataChunkKey() + << ", logicalPool = " << cidInfo.lpid_ + << ", copysetId = " << cidInfo.cpid_ + << ", chunkId = " << cidInfo.cid_; + } return ret; } return kErrCodeSuccess; @@ -156,7 +152,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context) { - ReadChunkSnapshotClosure *cb = + ReadChunkSnapshotClosure* cb = new ReadChunkSnapshotClosure(tracker, context); tracker->AddOneTrace(); uint64_t offset = context->partIndex * context->len; @@ -166,13 +162,9 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( << ", chunkId = " << context->cidInfo.cid_ << ", seqNum = " << context->seqNum << ", offset = " << offset; - int ret = client_->ReadChunkSnapshot( - context->cidInfo, - context->seqNum, - offset, - context->len, - context->buf.get(), - cb); + int ret = + client_->ReadChunkSnapshot(context->cidInfo, context->seqNum, offset, + context->len, context->buf.get(), cb); if (ret < 0) { LOG(ERROR) << "ReadChunkSnapshot error, " << " ret = " << ret @@ -189,7 +181,7 @@ int TransferSnapshotDataChunkTask::StartAsyncReadChunkSnapshot( int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results) { + const std::list& results) { int ret = kErrCodeSuccess; for (auto context : results) { if (context->retCode < 0) { @@ -197,9 +189,8 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( if (nowTime - context->startTime < context->clientAsyncMethodRetryTimeSec) { // retry - std::this_thread::sleep_for( - std::chrono::milliseconds( - taskInfo_->clientAsyncMethodRetryIntervalMs_)); + std::this_thread::sleep_for(std::chrono::milliseconds( + taskInfo_->clientAsyncMethodRetryIntervalMs_)); ret = StartAsyncReadChunkSnapshot(tracker, context); if (ret < 0) { return ret; @@ -212,15 +203,11 @@ int TransferSnapshotDataChunkTask::HandleReadChunkSnapshotResultsAndRetry( } } else { ret = dataStore_->DataChunkTranferAddPart( - taskInfo_->name_, - transferTask, - context->partIndex, - context->len, - context->buf.get()); + taskInfo_->name_, transferTask, context->partIndex, + context->len, context->buf.get()); if (ret < 0) { LOG(ERROR) << "DataChunkTranferAddPart fail" - << ", ret = " << ret - << ", chunkDataName = " + << ", ret = " << ret << ", chunkDataName = " << taskInfo_->name_.ToDataChunkKey() << ", index = " << context->partIndex; return ret; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.h b/src/snapshotcloneserver/snapshot/snapshot_task.h index bf53993a61..23102eb4f5 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task.h @@ -23,172 +23,153 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_H_ -#include -#include #include +#include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task_tracker.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务信息 + * @brief snapshot task information */ class SnapshotTaskInfo : public TaskInfo { public: - /** - * @brief 构造函数 - * - * @param snapInfo 快照信息 - */ - explicit SnapshotTaskInfo(const SnapshotInfo &snapInfo, - std::shared_ptr metric) - : TaskInfo(), - snapshotInfo_(snapInfo), - metric_(metric) {} + /** + * @brief constructor + * + * @param snapInfo snapshot information + */ + explicit SnapshotTaskInfo(const SnapshotInfo& snapInfo, + std::shared_ptr metric) + : TaskInfo(), snapshotInfo_(snapInfo), metric_(metric) {} /** - * @brief 获取快照信息 + * @brief Get snapshot information * - * @return 快照信息 + * @return snapshot information */ - SnapshotInfo& GetSnapshotInfo() { - return snapshotInfo_; - } + SnapshotInfo& GetSnapshotInfo() { return snapshotInfo_; } /** - * @brief 获取快照uuid + * @brief Get snapshot uuid * - * @return 快照uuid + * @return snapshot uuid */ - UUID GetUuid() const { - return snapshotInfo_.GetUuid(); - } + UUID GetUuid() const { return snapshotInfo_.GetUuid(); } /** - * @brief 获取文件名 + * @brief Get file name * - * @return 文件名 + * @return file name */ - std::string GetFileName() const { - return snapshotInfo_.GetFileName(); - } + std::string GetFileName() const { return snapshotInfo_.GetFileName(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapshotInfo_; - // metric 信息 + // Metric Information std::shared_ptr metric_; }; - class SnapshotTask : public Task { public: /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - */ - SnapshotTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + */ + SnapshotTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} /** - * @brief 获取快照任务信息对象指针 + * @brief Get snapshot task information object pointer * - * @return 快照任务信息对象指针 + * @return Snapshot task information object pointer */ - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: - // 快照任务信息 + // Snapshot Task Information std::shared_ptr taskInfo_; - // 快照核心逻辑对象 + // Snapshot Core Logical Object std::shared_ptr core_; }; /** - * @brief 创建快照任务 + * @brief Create snapshot task */ class SnapshotCreateTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotCreateTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotCreateTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleCreateSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleCreateSnapshotTask(taskInfo_); } }; /** - * @brief 删除快照任务 + * @brief Delete snapshot task */ class SnapshotDeleteTask : public SnapshotTask { public: - /** - * @brief 构造函数 - * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 - */ - SnapshotDeleteTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + /** + * @brief constructor + * + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object + */ + SnapshotDeleteTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ - void Run() override { - core_->HandleDeleteSnapshotTask(taskInfo_); - } + void Run() override { core_->HandleDeleteSnapshotTask(taskInfo_); } }; struct ReadChunkSnapshotContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seq uint64_t seqNum; - // 分片的索引 + // Fragmented index uint64_t partIndex; - // 分片的buffer + // Sliced buffer std::unique_ptr buf; - // 分片长度 + // Slice length uint64_t len; - // 返回值 + // Return value int retCode; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -200,8 +181,7 @@ struct ReadChunkSnapshotClosure : public SnapCloneClosure { ReadChunkSnapshotClosure( std::shared_ptr tracker, std::shared_ptr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() override; std::shared_ptr tracker_; std::shared_ptr context_; @@ -216,13 +196,13 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { uint64_t clientAsyncMethodRetryIntervalMs_; uint32_t readChunkSnapshotConcurrency_; - TransferSnapshotDataChunkTaskInfo(const ChunkDataName &name, - uint64_t chunkSize, - const ChunkIDInfo &cidInfo, - uint64_t chunkSplitSize, - uint64_t clientAsyncMethodRetryTimeSec, - uint64_t clientAsyncMethodRetryIntervalMs, - uint32_t readChunkSnapshotConcurrency) + TransferSnapshotDataChunkTaskInfo(const ChunkDataName& name, + uint64_t chunkSize, + const ChunkIDInfo& cidInfo, + uint64_t chunkSplitSize, + uint64_t clientAsyncMethodRetryTimeSec, + uint64_t clientAsyncMethodRetryIntervalMs, + uint32_t readChunkSnapshotConcurrency) : name_(name), chunkSize_(chunkSize), cidInfo_(cidInfo), @@ -234,7 +214,8 @@ struct TransferSnapshotDataChunkTaskInfo : public TaskInfo { class TransferSnapshotDataChunkTask : public TrackerTask { public: - TransferSnapshotDataChunkTask(const TaskIdType &taskId, + TransferSnapshotDataChunkTask( + const TaskIdType& taskId, std::shared_ptr taskInfo, std::shared_ptr client, std::shared_ptr dataStore) @@ -255,37 +236,37 @@ class TransferSnapshotDataChunkTask : public TrackerTask { private: /** - * @brief 转储快照单个chunk + * @brief Dump snapshot single chunk * - * @return 错误码 + * @return error code */ int TransferSnapshotDataChunk(); /** - * @brief 开始异步ReadSnapshotChunk + * @brief Start asynchronous ReadSnapshotChunk * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param context ReadSnapshotChunk上下文 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param context ReadSnapshotChunk context * - * @return 错误码 + * @return error code */ int StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context); /** - * @brief 处理ReadChunkSnapshot的结果并重试 + * @brief Process the results of ReadChunkSnapshot and try again * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param transferTask 转储任务 - * @param results ReadChunkSnapshot结果列表 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param transferTask Dump Task + * @param results ReadChunkSnapshot result list * - * @return 错误码 + * @return error code */ int HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, std::shared_ptr transferTask, - const std::list &results); + const std::list& results); protected: std::shared_ptr taskInfo_; @@ -293,7 +274,6 @@ class TransferSnapshotDataChunkTask : public TrackerTask { std::shared_ptr dataStore_; }; - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp index aa57505b9f..2c82ae1d0f 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp @@ -21,9 +21,9 @@ */ #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/concurrent/concurrent.h" +#include "src/common/concurrent/concurrent.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using curve::common::LockGuard; @@ -39,7 +39,7 @@ int SnapshotTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 + // isStop_ Flag set first to prevent backEndThread from exiting first backEndThread = std::thread(&SnapshotTaskManager::BackEndThreadFunc, this); } @@ -58,7 +58,7 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { if (isStop_.load()) { return kErrCodeServiceIsStop; } - // 移除实际已完成的task,防止uuid冲突 + // Remove actual completed tasks to prevent uuid conflicts ScanWorkingTask(); { @@ -73,13 +73,13 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { } snapshotMetric_->snapshotWaiting << 1; - // 立即执行task + // Execute task immediately ScanWaitingTask(); return kErrCodeSuccess; } std::shared_ptr SnapshotTaskManager::GetTask( - const TaskIdType &taskId) const { + const TaskIdType& taskId) const { ReadLockGuard taskMapRlock(taskMapLock_); auto it = taskMap_.find(taskId); if (it != taskMap_.end()) { @@ -88,14 +88,12 @@ std::shared_ptr SnapshotTaskManager::GetTask( return nullptr; } -int SnapshotTaskManager::CancelTask(const TaskIdType &taskId) { +int SnapshotTaskManager::CancelTask(const TaskIdType& taskId) { { - // 还在等待队列的Cancel直接移除 + // Waiting for the Cancel of the queue to be directly removed WriteLockGuard taskMapWlock(taskMapLock_); LockGuard waitingTasksLock(waitingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end(); - it++) { + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end(); it++) { if ((*it)->GetTaskId() == taskId) { int ret = core_->HandleCancelUnSchduledSnapshotTask( (*it)->GetTaskInfo()); @@ -131,12 +129,10 @@ void SnapshotTaskManager::BackEndThreadFunc() { void SnapshotTaskManager::ScanWaitingTask() { LockGuard waitingTasksLock(waitingTasksLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = waitingTasks_.begin(); - it != waitingTasks_.end();) { - if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) - == workingTasks_.end()) { - workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), - *it); + for (auto it = waitingTasks_.begin(); it != waitingTasks_.end();) { + if (workingTasks_.find((*it)->GetTaskInfo()->GetFileName()) == + workingTasks_.end()) { + workingTasks_.emplace((*it)->GetTaskInfo()->GetFileName(), *it); threadpool_->PushTask(*it); snapshotMetric_->snapshotDoing << 1; snapshotMetric_->snapshotWaiting << -1; @@ -150,13 +146,11 @@ void SnapshotTaskManager::ScanWaitingTask() { void SnapshotTaskManager::ScanWorkingTask() { WriteLockGuard taskMapWlock(taskMapLock_); LockGuard workingTasksLock(workingTasksLock_); - for (auto it = workingTasks_.begin(); - it != workingTasks_.end();) { + for (auto it = workingTasks_.begin(); it != workingTasks_.end();) { auto taskInfo = it->second->GetTaskInfo(); if (taskInfo->IsFinish()) { snapshotMetric_->snapshotDoing << -1; - if (taskInfo->GetSnapshotInfo().GetStatus() - != Status::done) { + if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { snapshotMetric_->snapshotFailed << 1; } else { snapshotMetric_->snapshotSucceed << 1; @@ -171,4 +165,3 @@ void SnapshotTaskManager::ScanWorkingTask() { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h index a22eb0e2ae..c2cee2baa3 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h @@ -23,54 +23,51 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOT_SNAPSHOT_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/snapshotcloneserver/snapshot/snapshot_core.h" +#include "src/snapshotcloneserver/snapshot/snapshot_task.h" -using ::curve::common::RWLock; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务管理器类 + * @brief Snapshot Task Manager Class */ class SnapshotTaskManager { public: - /** - * @brief 默认构造函数 - */ - SnapshotTaskManager( - std::shared_ptr core, - std::shared_ptr snapshotMetric) + /** + * @brief default constructor + */ + SnapshotTaskManager(std::shared_ptr core, + std::shared_ptr snapshotMetric) : isStop_(true), core_(core), snapshotMetric_(snapshotMetric), snapshotTaskManagerScanIntervalMs_(0) {} /** - * @brief 析构函数 + * @brief destructor */ - ~SnapshotTaskManager() { - Stop(); - } + ~SnapshotTaskManager() { Stop(); } int Init(std::shared_ptr pool, - const SnapshotCloneServerOptions &option) { + const SnapshotCloneServerOptions& option) { snapshotTaskManagerScanIntervalMs_ = option.snapshotTaskManagerScanIntervalMs; threadpool_ = pool; @@ -78,88 +75,92 @@ class SnapshotTaskManager { } /** - * @brief 启动 + * @brief start * - * @return 错误码 + * @return error code */ int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ void Stop(); /** - * @brief 添加任务 + * @brief Add Task * - * @param task 快照任务 + * @param task snapshot task * - * @return 错误码 + * @return error code */ int PushTask(std::shared_ptr task); /** - * @brief 获取任务 + * @brief Get Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 快照任务指针 + * @return Snapshot Task Pointer */ - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; /** - * @brief 取消任务 + * @brief Cancel Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 错误码 + * @return error code */ - int CancelTask(const TaskIdType &taskId); + int CancelTask(const TaskIdType& taskId); private: /** - * @brief 后台线程执行函数 + * @brief Background Thread Execution Function * - * 定期执行扫描等待队列函数与扫描工作队列函数。 + * Regularly execute the scan wait queue function and scan work queue + * function */ void BackEndThreadFunc(); /** - * @brief 扫描等待任务队列函数 + * @brief Scan Waiting Task Queue Function * - * 扫描等待队列,判断工作队列中当前文件 - * 是否有正在执行的快照,若没有则放入工作队列 + * Scan the waiting queue to determine the current file in the work queue + * Check if there is an active snapshot; if not, place it in the work queue * */ void ScanWaitingTask(); /** - * @brief 扫描工作队列函数 + * @brief Scan Work Queue Function * - * 扫描工作队列,判断工作队列中当前 - * 快照任务是否已完成,若完成则移出工作队列 + * Scan the work queue to determine the current status in the work queue + * Check if the snapshot task has been completed; if completed, remove it + * from the work queue * */ void ScanWorkingTask(); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->快照任务表 + // Id ->Snapshot Task Table std::map > taskMap_; mutable RWLock taskMapLock_; - // 快照等待队列 + // Snapshot waiting queue std::list > waitingTasks_; mutable Mutex waitingTasksLock_; - // 快照工作队列,实际是个map,其中key是文件名,以便于查询 + // The snapshot work queue is actually a map, where key is the file name for + // easy query std::map > workingTasks_; mutable Mutex workingTasksLock_; std::shared_ptr threadpool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Indicates whether the current task management is stopped, used to support + // start and stop functions. std::atomic_bool isStop_; // snapshot core @@ -168,7 +169,8 @@ class SnapshotTaskManager { // metric std::shared_ptr snapshotMetric_; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and + // work queue (unit: ms) int snapshotTaskManagerScanIntervalMs_; }; diff --git a/src/snapshotcloneserver/snapshotclone_server.cpp b/src/snapshotcloneserver/snapshotclone_server.cpp index be92a61d9d..b403054f38 100644 --- a/src/snapshotcloneserver/snapshotclone_server.cpp +++ b/src/snapshotcloneserver/snapshotclone_server.cpp @@ -19,365 +19,383 @@ * Created Date: Monday March 9th 2020 * Author: hzsunjianliang */ -#include +#include "src/snapshotcloneserver/snapshotclone_server.h" + #include #include -#include +#include + #include +#include -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/snapshotcloneserver/snapshotclone_server.h" #include "src/common/curve_version.h" +#include "src/common/snapshotclone/snapshotclone_define.h" using LeaderElectionOptions = ::curve::election::LeaderElectionOptions; -namespace curve { -namespace snapshotcloneserver { - -const char metricExposePrefix[] = "snapshotcloneserver"; -const char configMetricName[] = "snapshotcloneserver_config"; -const char statusMetricName[] = "snapshotcloneserver_status"; -const char ACTIVE[] = "active"; -const char STANDBY[] = "standby"; - -void InitClientOption(std::shared_ptr conf, - CurveClientOptions *clientOption) { - conf->GetValueFatalIfFail("client.config_path", - &clientOption->configPath); - conf->GetValueFatalIfFail("mds.rootUser", - &clientOption->mdsRootUser); - conf->GetValueFatalIfFail("mds.rootPassword", - &clientOption->mdsRootPassword); - conf->GetValueFatalIfFail("client.methodRetryTimeSec", - &clientOption->clientMethodRetryTimeSec); - conf->GetValueFatalIfFail("client.methodRetryIntervalMs", - &clientOption->clientMethodRetryIntervalMs); -} - -void InitSnapshotCloneServerOptions(std::shared_ptr conf, - SnapshotCloneServerOptions *serverOption) { - conf->GetValueFatalIfFail("server.address", - &serverOption->addr); - conf->GetValueFatalIfFail("server.clientAsyncMethodRetryTimeSec", - &serverOption->clientAsyncMethodRetryTimeSec); - conf->GetValueFatalIfFail( - "server.clientAsyncMethodRetryIntervalMs", - &serverOption->clientAsyncMethodRetryIntervalMs); - conf->GetValueFatalIfFail("server.snapshotPoolThreadNum", - &serverOption->snapshotPoolThreadNum); - conf->GetValueFatalIfFail( - "server.snapshotTaskManagerScanIntervalMs", - &serverOption->snapshotTaskManagerScanIntervalMs); - conf->GetValueFatalIfFail("server.chunkSplitSize", - &serverOption->chunkSplitSize); - conf->GetValueFatalIfFail( - "server.checkSnapshotStatusIntervalMs", - &serverOption->checkSnapshotStatusIntervalMs); - conf->GetValueFatalIfFail("server.maxSnapshotLimit", - &serverOption->maxSnapshotLimit); - conf->GetValueFatalIfFail("server.snapshotCoreThreadNum", - &serverOption->snapshotCoreThreadNum); - conf->GetValueFatalIfFail("server.mdsSessionTimeUs", - &serverOption->mdsSessionTimeUs); - conf->GetValueFatalIfFail("server.readChunkSnapshotConcurrency", - &serverOption->readChunkSnapshotConcurrency); - - conf->GetValueFatalIfFail("server.stage1PoolThreadNum", - &serverOption->stage1PoolThreadNum); - conf->GetValueFatalIfFail("server.stage2PoolThreadNum", - &serverOption->stage2PoolThreadNum); - conf->GetValueFatalIfFail("server.commonPoolThreadNum", - &serverOption->commonPoolThreadNum); - - conf->GetValueFatalIfFail( - "server.cloneTaskManagerScanIntervalMs", - &serverOption->cloneTaskManagerScanIntervalMs); - conf->GetValueFatalIfFail("server.cloneChunkSplitSize", - &serverOption->cloneChunkSplitSize); - conf->GetValueFatalIfFail("server.cloneTempDir", - &serverOption->cloneTempDir); - conf->GetValueFatalIfFail("mds.rootUser", - &serverOption->mdsRootUser); - conf->GetValueFatalIfFail("server.createCloneChunkConcurrency", - &serverOption->createCloneChunkConcurrency); - conf->GetValueFatalIfFail("server.recoverChunkConcurrency", - &serverOption->recoverChunkConcurrency); - conf->GetValueFatalIfFail("server.backEndReferenceRecordScanIntervalMs", - &serverOption->backEndReferenceRecordScanIntervalMs); - conf->GetValueFatalIfFail("server.backEndReferenceFuncScanIntervalMs", - &serverOption->backEndReferenceFuncScanIntervalMs); - - conf->GetValueFatalIfFail("etcd.retry.times", - &(serverOption->dlockOpts.retryTimes)); - conf->GetValueFatalIfFail("etcd.dlock.timeoutMs", - &(serverOption->dlockOpts.ctx_timeoutMS)); - conf->GetValueFatalIfFail("etcd.dlock.ttlSec", - &(serverOption->dlockOpts.ttlSec)); -} - -void SnapShotCloneServer::InitEtcdConf(EtcdConf* etcdConf) { - conf_->GetValueFatalIfFail("etcd.endpoint", &etcdEndpoints_); - etcdConf->len = etcdEndpoints_.size(); - etcdConf->Endpoints = &etcdEndpoints_[0]; - conf_->GetValueFatalIfFail( - "etcd.dailtimeoutMs", &etcdConf->DialTimeout); - // etcd auth config - bool authEnable = false; - conf_->GetBoolValue("etcd.auth.enable", &authEnable); - etcdConf->authEnable = authEnable ? 1 : 0; - if (authEnable) { - conf_->GetValueFatalIfFail("etcd.auth.username", &etcdUsername_); - etcdConf->username = &etcdUsername_[0]; - etcdConf->usernameLen = etcdUsername_.size(); - conf_->GetValueFatalIfFail("etcd.auth.password", &etcdPassword_); - etcdConf->password = &etcdPassword_[0]; - etcdConf->passwordLen = etcdPassword_.size(); - } -} - -void SnapShotCloneServer::InitAllSnapshotCloneOptions(void) { - InitClientOption(conf_, &(snapshotCloneServerOptions_.clientOptions)); - InitSnapshotCloneServerOptions(conf_, - &(snapshotCloneServerOptions_.serverOption)); - InitEtcdConf(&(snapshotCloneServerOptions_.etcdConf)); - - conf_->GetValueFatalIfFail("etcd.operation.timeoutMs", - &(snapshotCloneServerOptions_.etcdClientTimeout)); - - conf_->GetValueFatalIfFail("etcd.retry.times", - &(snapshotCloneServerOptions_.etcdRetryTimes)); - - conf_->GetValueFatalIfFail("server.dummy.listen.port", - &(snapshotCloneServerOptions_.dummyPort)); - - conf_->GetValueFatalIfFail("leader.campagin.prefix", - &(snapshotCloneServerOptions_.campaginPrefix)); - - conf_->GetValueFatalIfFail("leader.session.intersec", - &(snapshotCloneServerOptions_.sessionInterSec)); - - conf_->GetValueFatalIfFail("leader.election.timeoutms", - &(snapshotCloneServerOptions_.electionTimeoutMs)); - - conf_->GetValueFatalIfFail("s3.config_path", - &(snapshotCloneServerOptions_.s3ConfPath)); -} - -void SnapShotCloneServer::StartDummy() { - // Expose conf and version and role(standby or active) - LOG(INFO) << "snapshotCloneServer version: " - << curve::common::CurveVersion(); - curve::common::ExposeCurveVersion(); - conf_->ExposeMetric(configMetricName); - status_.expose(statusMetricName); - status_.set_value(STANDBY); - - int ret = brpc::StartDummyServerAt(snapshotCloneServerOptions_.dummyPort); - if (ret != 0) { - LOG(FATAL) << "StartDummyServer error"; - } else { - LOG(INFO) << "StartDummyServer ok"; - } -} - -bool SnapShotCloneServer::InitEtcdClient(void) { - etcdClient_ = std::make_shared(); - auto res = etcdClient_->Init(snapshotCloneServerOptions_.etcdConf, - snapshotCloneServerOptions_.etcdClientTimeout, - snapshotCloneServerOptions_.etcdRetryTimes); - if (res != EtcdErrCode::EtcdOK) { - LOG(ERROR) - << "init etcd client err! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " - << snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " - << snapshotCloneServerOptions_.etcdRetryTimes; - return false; - } - - std::string out; - res = etcdClient_->Get("test", &out); - if (res != EtcdErrCode::EtcdOK && res != EtcdErrCode::EtcdKeyNotExist) { - LOG(ERROR) << - "Run snapsthotcloneserver err. Check if etcd is running."; - return false; - } - - LOG(INFO) << "init etcd client ok! " - << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints - << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len - << ", etcdtimeout: " << - snapshotCloneServerOptions_.etcdConf.DialTimeout - << ", operation timeout: " << - snapshotCloneServerOptions_.etcdClientTimeout - << ", etcd retrytimes: " << - snapshotCloneServerOptions_.etcdRetryTimes; - return true; -} - -void SnapShotCloneServer::StartCompaginLeader(void) { - if (!InitEtcdClient()) { - LOG(FATAL) << "InitEtcdClient error"; - } - // init leader election options - LeaderElectionOptions option; - option.etcdCli = etcdClient_; - option.leaderUniqueName = snapshotCloneServerOptions_.serverOption.addr; - option.electionTimeoutMs = snapshotCloneServerOptions_.electionTimeoutMs; - option.sessionInterSec = snapshotCloneServerOptions_.sessionInterSec; - option.campaginPrefix = snapshotCloneServerOptions_.campaginPrefix; - leaderElection_ = std::make_shared(option); - - // compagin leader and observe self then return - while (0 != leaderElection_->CampaignLeader()) { - LOG(INFO) << option.leaderUniqueName - << " campaign for leader again"; - } - LOG(INFO) << "Campain leader ok, I am the active member now"; - status_.set_value(ACTIVE); - leaderElection_->StartObserverLeader(); -} - -bool SnapShotCloneServer::Init() { - snapClient_ = std::make_shared(); - fileClient_ = std::make_shared(); - client_ = std::make_shared(snapClient_, fileClient_); - - if (client_->Init(snapshotCloneServerOptions_.clientOptions) < 0) { - LOG(ERROR) << "curvefs_client init fail."; - return false; - } - auto codec = std::make_shared(); - - metaStore_ = std::make_shared(etcdClient_, - codec); - if (metaStore_->Init() < 0) { - LOG(ERROR) << "metaStore init fail."; - return false; - } - - dataStore_ = std::make_shared(); - if (dataStore_->Init(snapshotCloneServerOptions_.s3ConfPath) < 0) { - LOG(ERROR) << "dataStore init fail."; - return false; - } - - - snapshotRef_ = std::make_shared(); - snapshotMetric_ = std::make_shared(metaStore_); - snapshotCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - snapshotCloneServerOptions_.serverOption); - if (snapshotCore_->Init() < 0) { - LOG(ERROR) << "SnapshotCore init fail."; - return false; - } - - snapshotTaskManager_ = std::make_shared(snapshotCore_, - snapshotMetric_); - snapshotServiceManager_ = - std::make_shared(snapshotTaskManager_, - snapshotCore_); - if (snapshotServiceManager_->Init( - snapshotCloneServerOptions_.serverOption) < 0) { - LOG(ERROR) << "SnapshotServiceManager init fail."; - return false; - } - - cloneMetric_ = std::make_shared(); - cloneRef_ = std::make_shared(); - cloneCore_ = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - snapshotCloneServerOptions_.serverOption); - if (cloneCore_->Init() < 0) { - LOG(ERROR) << "CloneCore init fail."; - return false; - } - cloneTaskMgr_ = std::make_shared(cloneCore_, - cloneMetric_); - - cloneServiceManagerBackend_ = - std::make_shared(cloneCore_); - cloneServiceManager_ = std::make_shared( - cloneTaskMgr_, - cloneCore_, - cloneServiceManagerBackend_); - if (cloneServiceManager_->Init( - snapshotCloneServerOptions_.serverOption) < 0) { - LOG(ERROR) << "CloneServiceManager init fail."; - return false; - } - service_ = std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); - server_ = std::make_shared(); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - LOG(ERROR) << "Failed to add snapshot_service!\n"; - return false; - } - return true; -} - -bool SnapShotCloneServer::Start(void) { - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 - int ret = cloneServiceManager_->Start(); - if (ret < 0) { - LOG(ERROR) << "cloneServiceManager start fail" - << ", ret = " << ret; - return false; - } - ret = cloneServiceManager_->RecoverCloneTask(); - if (ret < 0) { - LOG(ERROR) << "RecoverCloneTask fail" - << ", ret = " << ret; - return false; - } - ret = snapshotServiceManager_->Start(); - if (ret < 0) { - LOG(ERROR) << "snapshotServiceManager start fail" - << ", ret = " << ret; - return false; - } - ret = snapshotServiceManager_->RecoverSnapshotTask(); - if (ret < 0) { - LOG(ERROR) << "RecoverSnapshotTask fail" - << ", ret = " << ret; - return false; - } - - brpc::ServerOptions option; - option.idle_timeout_sec = -1; - if (server_->Start(snapshotCloneServerOptions_.serverOption.addr.c_str(), - &option) != 0) { - LOG(FATAL) << "snapshotclone rpc server start fail."; - } - LOG(INFO) << "snapshotclone service start ok ..."; - return true; -} - -void SnapShotCloneServer::RunUntilQuit(void) { - server_->RunUntilAskedToQuit(); -} - -void SnapShotCloneServer::Stop(void) { - LOG(INFO) << "snapshotcloneserver stopping ..."; - server_->Stop(0); - server_->Join(); - snapshotServiceManager_->Stop(); - cloneServiceManager_->Stop(); - LOG(INFO) << "snapshorcloneserver stopped"; -} - -} // namespace snapshotcloneserver -} // namespace curve +namespace curve +{ + namespace snapshotcloneserver + { + + const char metricExposePrefix[] = "snapshotcloneserver"; + const char configMetricName[] = "snapshotcloneserver_config"; + const char statusMetricName[] = "snapshotcloneserver_status"; + const char ACTIVE[] = "active"; + const char STANDBY[] = "standby"; + + void InitClientOption(std::shared_ptr conf, + CurveClientOptions *clientOption) + { + conf->GetValueFatalIfFail("client.config_path", &clientOption->configPath); + conf->GetValueFatalIfFail("mds.rootUser", &clientOption->mdsRootUser); + conf->GetValueFatalIfFail("mds.rootPassword", + &clientOption->mdsRootPassword); + conf->GetValueFatalIfFail("client.methodRetryTimeSec", + &clientOption->clientMethodRetryTimeSec); + conf->GetValueFatalIfFail("client.methodRetryIntervalMs", + &clientOption->clientMethodRetryIntervalMs); + } + + void InitSnapshotCloneServerOptions(std::shared_ptr conf, + SnapshotCloneServerOptions *serverOption) + { + conf->GetValueFatalIfFail("server.address", &serverOption->addr); + conf->GetValueFatalIfFail("server.clientAsyncMethodRetryTimeSec", + &serverOption->clientAsyncMethodRetryTimeSec); + conf->GetValueFatalIfFail("server.clientAsyncMethodRetryIntervalMs", + &serverOption->clientAsyncMethodRetryIntervalMs); + conf->GetValueFatalIfFail("server.snapshotPoolThreadNum", + &serverOption->snapshotPoolThreadNum); + conf->GetValueFatalIfFail("server.snapshotTaskManagerScanIntervalMs", + &serverOption->snapshotTaskManagerScanIntervalMs); + conf->GetValueFatalIfFail("server.chunkSplitSize", + &serverOption->chunkSplitSize); + conf->GetValueFatalIfFail("server.checkSnapshotStatusIntervalMs", + &serverOption->checkSnapshotStatusIntervalMs); + conf->GetValueFatalIfFail("server.maxSnapshotLimit", + &serverOption->maxSnapshotLimit); + conf->GetValueFatalIfFail("server.snapshotCoreThreadNum", + &serverOption->snapshotCoreThreadNum); + conf->GetValueFatalIfFail("server.mdsSessionTimeUs", + &serverOption->mdsSessionTimeUs); + conf->GetValueFatalIfFail("server.readChunkSnapshotConcurrency", + &serverOption->readChunkSnapshotConcurrency); + + conf->GetValueFatalIfFail("server.stage1PoolThreadNum", + &serverOption->stage1PoolThreadNum); + conf->GetValueFatalIfFail("server.stage2PoolThreadNum", + &serverOption->stage2PoolThreadNum); + conf->GetValueFatalIfFail("server.commonPoolThreadNum", + &serverOption->commonPoolThreadNum); + + conf->GetValueFatalIfFail("server.cloneTaskManagerScanIntervalMs", + &serverOption->cloneTaskManagerScanIntervalMs); + conf->GetValueFatalIfFail("server.cloneChunkSplitSize", + &serverOption->cloneChunkSplitSize); + conf->GetValueFatalIfFail("server.cloneTempDir", + &serverOption->cloneTempDir); + conf->GetValueFatalIfFail("mds.rootUser", &serverOption->mdsRootUser); + conf->GetValueFatalIfFail("server.createCloneChunkConcurrency", + &serverOption->createCloneChunkConcurrency); + conf->GetValueFatalIfFail("server.recoverChunkConcurrency", + &serverOption->recoverChunkConcurrency); + conf->GetValueFatalIfFail( + "server.backEndReferenceRecordScanIntervalMs", + &serverOption->backEndReferenceRecordScanIntervalMs); + conf->GetValueFatalIfFail( + "server.backEndReferenceFuncScanIntervalMs", + &serverOption->backEndReferenceFuncScanIntervalMs); + + conf->GetValueFatalIfFail("etcd.retry.times", + &(serverOption->dlockOpts.retryTimes)); + conf->GetValueFatalIfFail("etcd.dlock.timeoutMs", + &(serverOption->dlockOpts.ctx_timeoutMS)); + conf->GetValueFatalIfFail("etcd.dlock.ttlSec", + &(serverOption->dlockOpts.ttlSec)); + } + + void SnapShotCloneServer::InitEtcdConf(EtcdConf *etcdConf) + { + conf_->GetValueFatalIfFail("etcd.endpoint", &etcdEndpoints_); + etcdConf->len = etcdEndpoints_.size(); + etcdConf->Endpoints = &etcdEndpoints_[0]; + conf_->GetValueFatalIfFail( + "etcd.dailtimeoutMs", &etcdConf->DialTimeout); + // etcd auth config + bool authEnable = false; + conf_->GetBoolValue("etcd.auth.enable", &authEnable); + etcdConf->authEnable = authEnable ? 1 : 0; + if (authEnable) + { + conf_->GetValueFatalIfFail("etcd.auth.username", &etcdUsername_); + etcdConf->username = &etcdUsername_[0]; + etcdConf->usernameLen = etcdUsername_.size(); + conf_->GetValueFatalIfFail("etcd.auth.password", &etcdPassword_); + etcdConf->password = &etcdPassword_[0]; + etcdConf->passwordLen = etcdPassword_.size(); + } + } + + void SnapShotCloneServer::InitAllSnapshotCloneOptions(void) + { + InitClientOption(conf_, &(snapshotCloneServerOptions_.clientOptions)); + InitSnapshotCloneServerOptions(conf_, + &(snapshotCloneServerOptions_.serverOption)); + InitEtcdConf(&(snapshotCloneServerOptions_.etcdConf)); + + conf_->GetValueFatalIfFail( + "etcd.operation.timeoutMs", + &(snapshotCloneServerOptions_.etcdClientTimeout)); + + conf_->GetValueFatalIfFail("etcd.retry.times", + &(snapshotCloneServerOptions_.etcdRetryTimes)); + + conf_->GetValueFatalIfFail("server.dummy.listen.port", + &(snapshotCloneServerOptions_.dummyPort)); + + conf_->GetValueFatalIfFail("leader.campagin.prefix", + &(snapshotCloneServerOptions_.campaginPrefix)); + + conf_->GetValueFatalIfFail("leader.session.intersec", + &(snapshotCloneServerOptions_.sessionInterSec)); + + conf_->GetValueFatalIfFail( + "leader.election.timeoutms", + &(snapshotCloneServerOptions_.electionTimeoutMs)); + + conf_->GetValueFatalIfFail("s3.config_path", + &(snapshotCloneServerOptions_.s3ConfPath)); + } + + void SnapShotCloneServer::StartDummy() + { + // Expose conf and version and role(standby or active) + LOG(INFO) << "snapshotCloneServer version: " + << curve::common::CurveVersion(); + curve::common::ExposeCurveVersion(); + conf_->ExposeMetric(configMetricName); + status_.expose(statusMetricName); + status_.set_value(STANDBY); + + int ret = brpc::StartDummyServerAt(snapshotCloneServerOptions_.dummyPort); + if (ret != 0) + { + LOG(FATAL) << "StartDummyServer error"; + } + else + { + LOG(INFO) << "StartDummyServer ok"; + } + } + + bool SnapShotCloneServer::InitEtcdClient(void) + { + etcdClient_ = std::make_shared(); + auto res = etcdClient_->Init(snapshotCloneServerOptions_.etcdConf, + snapshotCloneServerOptions_.etcdClientTimeout, + snapshotCloneServerOptions_.etcdRetryTimes); + if (res != EtcdErrCode::EtcdOK) + { + LOG(ERROR) << "init etcd client err! " + << "etcdaddr: " + << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " + << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; + return false; + } + + std::string out; + res = etcdClient_->Get("test", &out); + if (res != EtcdErrCode::EtcdOK && res != EtcdErrCode::EtcdKeyNotExist) + { + LOG(ERROR) << "Run snapsthotcloneserver err. Check if etcd is running."; + return false; + } + + LOG(INFO) << "init etcd client ok! " + << "etcdaddr: " << snapshotCloneServerOptions_.etcdConf.Endpoints + << ", etcdaddr len: " << snapshotCloneServerOptions_.etcdConf.len + << ", etcdtimeout: " + << snapshotCloneServerOptions_.etcdConf.DialTimeout + << ", operation timeout: " + << snapshotCloneServerOptions_.etcdClientTimeout + << ", etcd retrytimes: " + << snapshotCloneServerOptions_.etcdRetryTimes; + return true; + } + + void SnapShotCloneServer::StartCompaginLeader(void) + { + if (!InitEtcdClient()) + { + LOG(FATAL) << "InitEtcdClient error"; + } + // init leader election options + LeaderElectionOptions option; + option.etcdCli = etcdClient_; + option.leaderUniqueName = snapshotCloneServerOptions_.serverOption.addr; + option.electionTimeoutMs = snapshotCloneServerOptions_.electionTimeoutMs; + option.sessionInterSec = snapshotCloneServerOptions_.sessionInterSec; + option.campaginPrefix = snapshotCloneServerOptions_.campaginPrefix; + leaderElection_ = std::make_shared(option); + + // compagin leader and observe self then return + while (0 != leaderElection_->CampaignLeader()) + { + LOG(INFO) << option.leaderUniqueName << " campaign for leader again"; + } + LOG(INFO) << "Campain leader ok, I am the active member now"; + status_.set_value(ACTIVE); + leaderElection_->StartObserverLeader(); + } + + bool SnapShotCloneServer::Init() + { + snapClient_ = std::make_shared(); + fileClient_ = std::make_shared(); + client_ = std::make_shared(snapClient_, fileClient_); + + if (client_->Init(snapshotCloneServerOptions_.clientOptions) < 0) + { + LOG(ERROR) << "curvefs_client init fail."; + return false; + } + auto codec = std::make_shared(); + + metaStore_ = + std::make_shared(etcdClient_, codec); + if (metaStore_->Init() < 0) + { + LOG(ERROR) << "metaStore init fail."; + return false; + } + + dataStore_ = std::make_shared(); + if (dataStore_->Init(snapshotCloneServerOptions_.s3ConfPath) < 0) + { + LOG(ERROR) << "dataStore init fail."; + return false; + } + + snapshotRef_ = std::make_shared(); + snapshotMetric_ = std::make_shared(metaStore_); + snapshotCore_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, + snapshotCloneServerOptions_.serverOption); + if (snapshotCore_->Init() < 0) + { + LOG(ERROR) << "SnapshotCore init fail."; + return false; + } + + snapshotTaskManager_ = + std::make_shared(snapshotCore_, snapshotMetric_); + snapshotServiceManager_ = std::make_shared( + snapshotTaskManager_, snapshotCore_); + if (snapshotServiceManager_->Init( + snapshotCloneServerOptions_.serverOption) < 0) + { + LOG(ERROR) << "SnapshotServiceManager init fail."; + return false; + } + + cloneMetric_ = std::make_shared(); + cloneRef_ = std::make_shared(); + cloneCore_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, + snapshotCloneServerOptions_.serverOption); + if (cloneCore_->Init() < 0) + { + LOG(ERROR) << "CloneCore init fail."; + return false; + } + cloneTaskMgr_ = + std::make_shared(cloneCore_, cloneMetric_); + + cloneServiceManagerBackend_ = + std::make_shared(cloneCore_); + cloneServiceManager_ = std::make_shared( + cloneTaskMgr_, cloneCore_, cloneServiceManagerBackend_); + if (cloneServiceManager_->Init(snapshotCloneServerOptions_.serverOption) < + 0) + { + LOG(ERROR) << "CloneServiceManager init fail."; + return false; + } + service_ = std::make_shared( + snapshotServiceManager_, cloneServiceManager_); + server_ = std::make_shared(); + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) + { + LOG(ERROR) << "Failed to add snapshot_service!\n"; + return false; + } + return true; + } + + bool SnapShotCloneServer::Start(void) + { + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies + int ret = cloneServiceManager_->Start(); + if (ret < 0) + { + LOG(ERROR) << "cloneServiceManager start fail" + << ", ret = " << ret; + return false; + } + ret = cloneServiceManager_->RecoverCloneTask(); + if (ret < 0) + { + LOG(ERROR) << "RecoverCloneTask fail" + << ", ret = " << ret; + return false; + } + ret = snapshotServiceManager_->Start(); + if (ret < 0) + { + LOG(ERROR) << "snapshotServiceManager start fail" + << ", ret = " << ret; + return false; + } + ret = snapshotServiceManager_->RecoverSnapshotTask(); + if (ret < 0) + { + LOG(ERROR) << "RecoverSnapshotTask fail" + << ", ret = " << ret; + return false; + } + + brpc::ServerOptions option; + option.idle_timeout_sec = -1; + if (server_->Start(snapshotCloneServerOptions_.serverOption.addr.c_str(), + &option) != 0) + { + LOG(FATAL) << "snapshotclone rpc server start fail."; + } + LOG(INFO) << "snapshotclone service start ok ..."; + return true; + } + + void SnapShotCloneServer::RunUntilQuit(void) { server_->RunUntilAskedToQuit(); } + + void SnapShotCloneServer::Stop(void) + { + LOG(INFO) << "snapshotcloneserver stopping ..."; + server_->Stop(0); + server_->Join(); + snapshotServiceManager_->Stop(); + cloneServiceManager_->Stop(); + LOG(INFO) << "snapshorcloneserver stopped"; + } + + } // namespace snapshotcloneserver +} // namespace curve diff --git a/src/snapshotcloneserver/snapshotclone_server.h b/src/snapshotcloneserver/snapshotclone_server.h index cb9a35d086..7131f5f011 100644 --- a/src/snapshotcloneserver/snapshotclone_server.h +++ b/src/snapshotcloneserver/snapshotclone_server.h @@ -23,144 +23,146 @@ #ifndef SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ -#include #include +#include +#include "src/client/libcurve_file.h" +#include "src/client/libcurve_snapshot.h" #include "src/common/configuration.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/leader_election/leader_election.h" - -#include "src/client/libcurve_snapshot.h" -#include "src/client/libcurve_file.h" - +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/curvefs_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" - +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store.h" #include "src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/snapshot/snapshot_task_manager.h" -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshotclone_service.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" -namespace curve { -namespace snapshotcloneserver { - -extern const char metricExposePrefix[]; -extern const char configMetricName[]; -extern const char statusMetricName[]; -extern const char ACTIVE[]; -extern const char STANDBY[]; - - -using EtcdClientImp = ::curve::kvstorage::EtcdClientImp; -using Configuration = ::curve::common::Configuration; -using LeaderElection = ::curve::election::LeaderElection; - -struct SnapShotCloneServerOptions { - CurveClientOptions clientOptions; - SnapshotCloneServerOptions serverOption; - - // etcd options - EtcdConf etcdConf; - int etcdClientTimeout; - int etcdRetryTimes; - - // leaderelections options - std::string campaginPrefix; - int sessionInterSec; - int electionTimeoutMs; - - int dummyPort; - - // s3 - std::string s3ConfPath; -}; - -class SnapShotCloneServer { - public: - explicit SnapShotCloneServer(std::shared_ptr config) - :conf_(config) {} - /** - * @brief 通过配置初始化snapshotcloneserver所需要的所有配置 - */ - void InitAllSnapshotCloneOptions(void); - - /** - * @brief leader选举,未选中持续等待,选中情况下建立watch并返回 - */ - void StartCompaginLeader(void); - - /** - * @brief 启动dummyPort 用于检查主备snapshotserver - * 存活和各种config metric 和版本信息 - */ - void StartDummy(void); - - /** - * @brief 初始化clone与snapshot 各种核心结构 - */ - bool Init(void); - - /** - * @brief 启动各个组件的逻辑和线程池 - */ - bool Start(void); - - /** - * @brief 停止所有服务 - */ - void Stop(void); - - /** - * @brief 启动RPC服务直到外部kill - */ - void RunUntilQuit(void); - - private: - void InitEtcdConf(EtcdConf* etcdConf); - bool InitEtcdClient(void); - - private: - std::shared_ptr conf_; - SnapShotCloneServerOptions snapshotCloneServerOptions_; - // 标记自己为active/standby - bvar::Status status_; - // 与etcd交互的client - std::shared_ptr etcdClient_; - std::shared_ptr leaderElection_; - - std::shared_ptr snapClient_; - std::shared_ptr fileClient_; - std::shared_ptr client_; - - std::shared_ptr metaStore_; - std::shared_ptr dataStore_; - std::shared_ptr snapshotRef_; - std::shared_ptr snapshotMetric_; - std::shared_ptr snapshotCore_; - std::shared_ptr snapshotTaskManager_; - std::shared_ptr snapshotServiceManager_; - - std::shared_ptr cloneMetric_; - std::shared_ptr cloneRef_; - std::shared_ptr cloneCore_; - std::shared_ptr cloneTaskMgr_; - std::shared_ptr cloneServiceManagerBackend_; - std::shared_ptr cloneServiceManager_; - std::shared_ptr service_; - std::shared_ptr server_; - - std::string etcdEndpoints_; - std::string etcdUsername_; - std::string etcdPassword_; -}; - -} // namespace snapshotcloneserver -} // namespace curve - -#endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ +namespace curve +{ + namespace snapshotcloneserver + { + + extern const char metricExposePrefix[]; + extern const char configMetricName[]; + extern const char statusMetricName[]; + extern const char ACTIVE[]; + extern const char STANDBY[]; + + using EtcdClientImp = ::curve::kvstorage::EtcdClientImp; + using Configuration = ::curve::common::Configuration; + using LeaderElection = ::curve::election::LeaderElection; + + struct SnapShotCloneServerOptions + { + CurveClientOptions clientOptions; + SnapshotCloneServerOptions serverOption; + + // etcd options + EtcdConf etcdConf; + int etcdClientTimeout; + int etcdRetryTimes; + + // leaderelections options + std::string campaginPrefix; + int sessionInterSec; + int electionTimeoutMs; + + int dummyPort; + + // s3 + std::string s3ConfPath; + }; + + class SnapShotCloneServer + { + public: + explicit SnapShotCloneServer(std::shared_ptr config) + : conf_(config) {} + /** + * @brief: Initialize all configurations required for snapshotcloneserver + * through configuration + */ + void InitAllSnapshotCloneOptions(void); + + /** + * @brief leader election, if not selected, continue to wait. If selected, + * establish a watch and return + */ + void StartCompaginLeader(void); + + /** + * @brief: Start dummyPort to check the active and standby snapshotserver + * Survival and various configuration metrics and version information + */ + void StartDummy(void); + + /** + * @brief initializes various core structures of clone and snapshot + */ + bool Init(void); + + /** + * @brief: Start the logic and thread pool of each component + */ + bool Start(void); + + /** + * @brief Stop all services + */ + void Stop(void); + + /** + * @brief Start RPC service until external kill + */ + void RunUntilQuit(void); + + private: + void InitEtcdConf(EtcdConf *etcdConf); + bool InitEtcdClient(void); + + private: + std::shared_ptr conf_; + SnapShotCloneServerOptions snapshotCloneServerOptions_; + // Mark yourself as active/standby + bvar::Status status_; + // Client interacting with ETCD + std::shared_ptr etcdClient_; + std::shared_ptr leaderElection_; + + std::shared_ptr snapClient_; + std::shared_ptr fileClient_; + std::shared_ptr client_; + + std::shared_ptr metaStore_; + std::shared_ptr dataStore_; + std::shared_ptr snapshotRef_; + std::shared_ptr snapshotMetric_; + std::shared_ptr snapshotCore_; + std::shared_ptr snapshotTaskManager_; + std::shared_ptr snapshotServiceManager_; + + std::shared_ptr cloneMetric_; + std::shared_ptr cloneRef_; + std::shared_ptr cloneCore_; + std::shared_ptr cloneTaskMgr_; + std::shared_ptr cloneServiceManagerBackend_; + std::shared_ptr cloneServiceManager_; + std::shared_ptr service_; + std::shared_ptr server_; + + std::string etcdEndpoints_; + std::string etcdUsername_; + std::string etcdPassword_; + }; + + } // namespace snapshotcloneserver +} // namespace curve + +#endif // SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVER_H_ diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 72f6b04683..f8505b03fe 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -22,14 +22,14 @@ #include "src/snapshotcloneserver/snapshotclone_service.h" +#include #include #include -#include #include "json/json.h" #include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/uuid.h" #include "src/common/string_util.h" +#include "src/common/uuid.h" #include "src/snapshotcloneserver/clone/clone_closure.h" using ::curve::common::UUIDGenerator; @@ -38,15 +38,14 @@ namespace curve { namespace snapshotcloneserver { void SnapshotCloneServiceImpl::default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done) { + const HttpRequest* req, + HttpResponse* resp, + Closure* done) { (void)req; (void)resp; brpc::ClosureGuard done_guard(done); - brpc::Controller* bcntl = - static_cast(cntl); - const std::string *action = + brpc::Controller* bcntl = static_cast(cntl); + const std::string* action = bcntl->http_request().uri().GetQuery(kActionStr); std::string requestId = UUIDGenerator().GenerateUUID(); @@ -91,39 +90,27 @@ void SnapshotCloneServiceImpl::default_method(RpcController* cntl, } LOG(INFO) << "SnapshotCloneServiceImpl Return : " - << "action = " << *action - << ", requestId = " << requestId - << ", context = " << bcntl->response_attachment(); + << "action = " << *action << ", requestId = " << requestId + << ", context = " << bcntl->response_attachment(); return; } void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *name = - bcntl->http_request().uri().GetQuery(kNameStr); - if ((version == nullptr) || - (user == nullptr) || - (file == nullptr) || - (name == nullptr) || - (version->empty()) || - (user->empty()) || - (file->empty()) || - (name->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* name = bcntl->http_request().uri().GetQuery(kNameStr); + if ((version == nullptr) || (user == nullptr) || (file == nullptr) || + (name == nullptr) || (version->empty()) || (user->empty()) || + (file->empty()) || (name->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CreateSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << *file - << ", Name = " << *name + << " Version = " << *version << ", User = " << *user + << ", File = " << *file << ", Name = " << *name << ", requestId = " << requestId; UUID uuid; int ret = snapshotManager_->CreateSnapshot(*file, *user, *name, &uuid); @@ -146,22 +133,14 @@ void SnapshotCloneServiceImpl::HandleCreateSnapshotAction( } void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (version->empty()) || (user->empty()) || (uuid->empty())) { HandleBadRequestError(bcntl, requestId); return; } @@ -172,10 +151,8 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( fileName = *file; } LOG(INFO) << "DeleteSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << fileStr << ", requestId = " << requestId; int ret = snapshotManager_->DeleteSnapshot(*uuid, *user, fileName); if (ret < 0) { @@ -196,32 +173,21 @@ void SnapshotCloneServiceImpl::HandleDeleteSnapshotAction( } void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (uuid == nullptr) || - (file == nullptr) || - (version->empty()) || - (user->empty()) || - (uuid->empty()) || - (file->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (uuid == nullptr) || + (file == nullptr) || (version->empty()) || (user->empty()) || + (uuid->empty()) || (file->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CancelSnapshot:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *uuid - << ", File = " << *file + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *uuid << ", File = " << *file << ", requestId = " << requestId; int ret = snapshotManager_->CancelSnapshot(*uuid, *user, *file); if (ret < 0) { @@ -242,28 +208,21 @@ void SnapshotCloneServiceImpl::HandleCancelSnapshotAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -271,7 +230,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -291,22 +250,18 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( fileName = *file; } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << *user - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr + << " Version = " << *version << ", User = " << *user + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = snapshotManager_->GetFileSnapshotInfoById( - fileName, *user, *uuid, &info); + ret = snapshotManager_->GetFileSnapshotInfoById(fileName, *user, *uuid, + &info); } else { - ret = snapshotManager_->GetFileSnapshotInfo( - fileName, *user, &info); + ret = snapshotManager_->GetFileSnapshotInfo(fileName, *user, &info); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -323,8 +278,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -334,32 +288,22 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } -void SnapshotCloneServiceImpl::HandleCloneAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleCloneAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - const std::string *poolset = - bcntl->http_request().uri().GetQuery(kPoolset); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + const std::string* poolset = bcntl->http_request().uri().GetQuery(kPoolset); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty()) || // poolset is optional, but if it exists, it should not be empty (poolset != nullptr && poolset->empty())) { @@ -381,15 +325,12 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } LOG(INFO) << "Clone:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination << ", Lazy = " << *lazy << ", Poolset = " << (poolset != nullptr ? *poolset : "") << ", requestId = " << requestId; - TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); @@ -400,30 +341,21 @@ void SnapshotCloneServiceImpl::HandleCloneAction( return; } -void SnapshotCloneServiceImpl::HandleRecoverAction( - brpc::Controller* bcntl, - const std::string &requestId, - Closure* done) { +void SnapshotCloneServiceImpl::HandleRecoverAction(brpc::Controller* bcntl, + const std::string& requestId, + Closure* done) { brpc::ClosureGuard done_guard(done); - const std::string *version = + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *lazy = - bcntl->http_request().uri().GetQuery(kLazyStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (destination == nullptr) || - (lazy == nullptr) || - (version->empty()) || - (user->empty()) || - (source->empty()) || - (destination->empty()) || + const std::string* lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (destination == nullptr) || (lazy == nullptr) || (version->empty()) || + (user->empty()) || (source->empty()) || (destination->empty()) || (lazy->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " @@ -443,37 +375,27 @@ void SnapshotCloneServiceImpl::HandleRecoverAction( return; } LOG(INFO) << "Recover:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", Destination = " << *destination - << ", Lazy = " << *lazy - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", Destination = " << *destination + << ", Lazy = " << *lazy << ", requestId = " << requestId; TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); - cloneManager_->RecoverFile( - *source, *user, *destination, lazyFlag, closure, &taskId); + cloneManager_->RecoverFile(*source, *user, *destination, lazyFlag, closure, + &taskId); done_guard.release(); return; } void SnapshotCloneServiceImpl::HandleFlattenAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " << "action = Flatten" @@ -482,10 +404,8 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( return; } LOG(INFO) << "Flatten:" - << " Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->Flatten(*user, *taskId); if (ret < 0) { bcntl->http_response().set_status_code( @@ -505,28 +425,21 @@ void SnapshotCloneServiceImpl::HandleFlattenAction( } void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - if ((version == nullptr) || - (user == nullptr) || - (version->empty()) || + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + if ((version == nullptr) || (user == nullptr) || (version->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -534,7 +447,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -554,25 +467,21 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( } LOG(INFO) << "GetTasks:" - << " Version = " << *version - << ", User = " << *user - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", File = " << fileStr + << " Version = " << *version << ", User = " << *user + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", File = " << fileStr << ", requestId = " << requestId; std::vector cloneTaskInfos; int ret = kErrCodeSuccess; if (uuid != nullptr) { - ret = cloneManager_->GetCloneTaskInfoById( - *user, *uuid, &cloneTaskInfos); + ret = + cloneManager_->GetCloneTaskInfoById(*user, *uuid, &cloneTaskInfos); } else if (file != nullptr) { - ret = cloneManager_->GetCloneTaskInfoByName( - *user, *file, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfoByName(*user, *file, + &cloneTaskInfos); } else { - ret = cloneManager_->GetCloneTaskInfo( - *user, &cloneTaskInfos); + ret = cloneManager_->GetCloneTaskInfo(*user, &cloneTaskInfos); } if (ret < 0) { bcntl->http_response().set_status_code( @@ -589,8 +498,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -601,16 +509,12 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } -bool SnapshotCloneServiceImpl::CheckBoolParamter( - const std::string *param, bool *valueOut) { - if (*param == "true" || - *param == "True" || - *param == "TRUE" || +bool SnapshotCloneServiceImpl::CheckBoolParamter(const std::string* param, + bool* valueOut) { + if (*param == "true" || *param == "True" || *param == "TRUE" || *param == "1") { *valueOut = true; - } else if (*param == "false" || - *param == "False" || - *param == "FALSE" || + } else if (*param == "false" || *param == "False" || *param == "FALSE" || *param == "0") { *valueOut = false; } else { @@ -620,30 +524,20 @@ bool SnapshotCloneServiceImpl::CheckBoolParamter( } void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( - brpc::Controller* bcntl, - const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *taskId = - bcntl->http_request().uri().GetQuery(kUUIDStr); - if ((version == nullptr) || - (user == nullptr) || - (taskId == nullptr) || - (version->empty()) || - (user->empty()) || - (taskId->empty())) { + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* taskId = bcntl->http_request().uri().GetQuery(kUUIDStr); + if ((version == nullptr) || (user == nullptr) || (taskId == nullptr) || + (version->empty()) || (user->empty()) || (taskId->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "CleanCloneTask:" - << ", Version = " << *version - << ", User = " << *user - << ", UUID = " << *taskId - << ", requestId = " << requestId; - + << ", Version = " << *version << ", User = " << *user + << ", UUID = " << *taskId << ", requestId = " << requestId; int ret = cloneManager_->CleanCloneTask(*user, *taskId); if (ret < 0) { @@ -664,27 +558,22 @@ void SnapshotCloneServiceImpl::HandleCleanCloneTaskAction( } void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *file = - bcntl->http_request().uri().GetQuery(kFileStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* file = bcntl->http_request().uri().GetQuery(kFileStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *status = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - if ((version == nullptr) || - (version->empty())) { + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -692,7 +581,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -719,14 +608,10 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } LOG(INFO) << "GetFileSnapshotInfo:" - << " Version = " << *version - << ", User = " << userStr - << ", File = " << fileStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Status = " << statusStr - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << userStr + << ", File = " << fileStr << ", Limit = " << limitNum + << ", Offset = " << offsetNum << ", UUID = " << uuidStr + << ", Status = " << statusStr << ", requestId = " << requestId; std::vector info; int ret = kErrCodeSuccess; @@ -748,8 +633,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( mainObj[kTotalCountStr] = info.size(); Json::Value listSnapObj; for (std::vector::size_type i = offsetNum; - i < info.size() && i < limitNum + offsetNum; - i++) { + i < info.size() && i < limitNum + offsetNum; i++) { Json::Value fileSnapObj = info[i].ToJsonObj(); listSnapObj.append(fileSnapObj); } @@ -760,31 +644,26 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( } void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *limit = - bcntl->http_request().uri().GetQuery(kLimitStr); - const std::string *offset = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* limit = bcntl->http_request().uri().GetQuery(kLimitStr); + const std::string* offset = bcntl->http_request().uri().GetQuery(kOffsetStr); - const std::string *uuid = - bcntl->http_request().uri().GetQuery(kUUIDStr); - const std::string *source = + const std::string* uuid = bcntl->http_request().uri().GetQuery(kUUIDStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - const std::string *destination = + const std::string* destination = bcntl->http_request().uri().GetQuery(kDestinationStr); - const std::string *status = + const std::string* status = bcntl->http_request().uri().GetQuery(kStatusStr); - const std::string *type = - bcntl->http_request().uri().GetQuery(kTypeStr); - if ((version == nullptr) || - (version->empty())) { + const std::string* type = bcntl->http_request().uri().GetQuery(kTypeStr); + if ((version == nullptr) || (version->empty())) { HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -792,7 +671,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -832,15 +711,11 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } LOG(INFO) << "GetTaskList:" - << " Version = " << *version - << ", User = " << userStr - << ", Limit = " << limitNum - << ", Offset = " << offsetNum - << ", UUID = " << uuidStr - << ", Source = " << sourceStr + << " Version = " << *version << ", User = " << userStr + << ", Limit = " << limitNum << ", Offset = " << offsetNum + << ", UUID = " << uuidStr << ", Source = " << sourceStr << ", Destination = " << destinationStr - << ", Status = " << statusStr - << ", Type = " << typeStr + << ", Status = " << statusStr << ", Type = " << typeStr << ", requestId = " << requestId; std::vector cloneTaskInfos; @@ -862,8 +737,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( mainObj[kTotalCountStr] = cloneTaskInfos.size(); Json::Value listObj; for (std::vector::size_type i = offsetNum; - i < cloneTaskInfos.size() && i < limitNum + offsetNum; - i++) { + i < cloneTaskInfos.size() && i < limitNum + offsetNum; i++) { Json::Value cloneTaskObj = cloneTaskInfos[i].ToJsonObj(); listObj.append(cloneTaskObj); } @@ -876,33 +750,26 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( } void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( - brpc::Controller* bcntl, const std::string &requestId) { - const std::string *version = + brpc::Controller* bcntl, const std::string& requestId) { + const std::string* version = bcntl->http_request().uri().GetQuery(kVersionStr); - const std::string *user = - bcntl->http_request().uri().GetQuery(kUserStr); - const std::string *source = + const std::string* user = bcntl->http_request().uri().GetQuery(kUserStr); + const std::string* source = bcntl->http_request().uri().GetQuery(kSourceStr); - if ((version == nullptr) || - (user == nullptr) || - (source == nullptr) || - (version->empty()) || - (source->empty()) || - (user->empty())) { + if ((version == nullptr) || (user == nullptr) || (source == nullptr) || + (version->empty()) || (source->empty()) || (user->empty())) { HandleBadRequestError(bcntl, requestId); return; } LOG(INFO) << "GetCloneRefStatus:" - << " Version = " << *version - << ", User = " << *user - << ", Source = " << *source - << ", requestId = " << requestId; + << " Version = " << *version << ", User = " << *user + << ", Source = " << *source << ", requestId = " << requestId; std::vector cloneInfos; CloneRefStatus refStatus; - int ret = cloneManager_->GetCloneRefStatus(*source, &refStatus, - &cloneInfos); + int ret = + cloneManager_->GetCloneRefStatus(*source, &refStatus, &cloneInfos); if (ret < 0) { bcntl->http_response().set_status_code( brpc::HTTP_STATUS_INTERNAL_SERVER_ERROR); @@ -916,7 +783,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( mainObj[kCodeStr] = std::to_string(kErrCodeSuccess); mainObj[kMessageStr] = code2Msg[kErrCodeSuccess]; mainObj[kRequestIdStr] = requestId; - mainObj[kRefStatusStr] = static_cast (refStatus); + mainObj[kRefStatusStr] = static_cast(refStatus); mainObj[kTotalCountStr] = 0; if (refStatus == CloneRefStatus::kNeedCheck) { mainObj[kTotalCountStr] = cloneInfos.size(); @@ -943,20 +810,19 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( } void SnapshotCloneServiceImpl::SetErrorMessage(brpc::Controller* bcntl, - int errCode, - const std::string &requestId, - const std::string &uuid) { + int errCode, + const std::string& requestId, + const std::string& uuid) { butil::IOBufBuilder os; - std::string msg = BuildErrorMessage(errCode, - requestId, uuid); + std::string msg = BuildErrorMessage(errCode, requestId, uuid); os << msg; os.move_to(bcntl->response_attachment()); return; } -void SnapshotCloneServiceImpl::HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid) { +void SnapshotCloneServiceImpl::HandleBadRequestError( + brpc::Controller* bcntl, const std::string& requestId, + const std::string& uuid) { bcntl->http_response().set_status_code(brpc::HTTP_STATUS_BAD_REQUEST); SetErrorMessage(bcntl, kErrCodeInvalidRequest, requestId, uuid); } diff --git a/src/snapshotcloneserver/snapshotclone_service.h b/src/snapshotcloneserver/snapshotclone_service.h index 6ba1f34f48..c9d15fc222 100644 --- a/src/snapshotcloneserver/snapshotclone_service.h +++ b/src/snapshotcloneserver/snapshotclone_service.h @@ -24,87 +24,82 @@ #define SRC_SNAPSHOTCLONESERVER_SNAPSHOTCLONE_SERVICE_H_ #include + #include #include #include "proto/snapshotcloneserver.pb.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" #include "src/snapshotcloneserver/clone/clone_service_manager.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" namespace curve { namespace snapshotcloneserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; /** - * @brief 快照转储rpc服务实现 + * @brief snapshot dump rpc service implementation */ class SnapshotCloneServiceImpl : public SnapshotCloneService { public: - /** - * @brief 构造函数 - * - * @param manager 快照转储服务管理对象 - */ + /** + * @brief constructor + * + * @param manager snapshot dump service management object + */ SnapshotCloneServiceImpl( std::shared_ptr snapshotManager, std::shared_ptr cloneManager) - : snapshotManager_(snapshotManager), - cloneManager_(cloneManager) {} + : snapshotManager_(snapshotManager), cloneManager_(cloneManager) {} virtual ~SnapshotCloneServiceImpl() {} /** - * @brief http服务默认方法 + * @brief HTTP service default method * * @param cntl rpc controller - * @param req http请求报文 - * @param resp http回复报文 - * @param done http异步回调闭包 + * @param req HTTP request message + * @param resp HTTP reply message + * @param done HTTP asynchronous callback closure */ - void default_method(RpcController* cntl, - const HttpRequest* req, - HttpResponse* resp, - Closure* done); + void default_method(RpcController* cntl, const HttpRequest* req, + HttpResponse* resp, Closure* done); private: void HandleCreateSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleDeleteSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCancelSnapshotAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotInfoAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCloneAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleRecoverAction(brpc::Controller* bcntl, - const std::string &requestId, - Closure* done); + const std::string& requestId, Closure* done); void HandleFlattenAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTasksAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleCleanCloneTaskAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetFileSnapshotListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneTaskListAction(brpc::Controller* bcntl, - const std::string &requestId); + const std::string& requestId); void HandleGetCloneRefStatusAction(brpc::Controller* bcntl, - const std::string &requestId); - bool CheckBoolParamter( - const std::string *param, bool *valueOut); + const std::string& requestId); + bool CheckBoolParamter(const std::string* param, bool* valueOut); void SetErrorMessage(brpc::Controller* bcntl, int errCode, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); void HandleBadRequestError(brpc::Controller* bcntl, - const std::string &requestId, - const std::string &uuid = ""); + const std::string& requestId, + const std::string& uuid = ""); private: - // 快照转储服务管理对象 + // Snapshot Dump Service Management Object std::shared_ptr snapshotManager_; std::shared_ptr cloneManager_; }; diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..8ecd7036cd 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -28,11 +28,10 @@ namespace curve { namespace tool { std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { - uint64_t groupId = (static_cast(chunk.logicPoolId) << 32) | - chunk.copysetId; + uint64_t groupId = + (static_cast(chunk.logicPoolId) << 32) | chunk.copysetId; os << "logicalPoolId:" << chunk.logicPoolId - << ",copysetId:" << chunk.copysetId - << ",groupId:" << groupId + << ",copysetId:" << chunk.copysetId << ",groupId:" << groupId << ",chunkId:" << chunk.chunkId; return os; } @@ -40,8 +39,8 @@ std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { int ChunkServerClient::Init(const std::string& csAddr) { csAddr_ = csAddr; if (channel_.Init(csAddr.c_str(), nullptr) != 0) { - std::cout << "Init channel to chunkserver: " << csAddr - << " failed!" << std::endl; + std::cout << "Init channel to chunkserver: " << csAddr << " failed!" + << std::endl; return -1; } return 0; @@ -69,7 +68,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -97,9 +96,8 @@ bool ChunkServerClient::CheckChunkServerOnline() { return false; } -int ChunkServerClient::GetCopysetStatus( - const CopysetStatusRequest& request, - CopysetStatusResponse* response) { +int ChunkServerClient::GetCopysetStatus(const CopysetStatusRequest& request, + CopysetStatusResponse* response) { brpc::Controller cntl; curve::chunkserver::CopysetService_Stub stub(&channel_); uint64_t retryTimes = 0; @@ -112,17 +110,16 @@ int ChunkServerClient::GetCopysetStatus( continue; } if (response->status() != - COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response->status() << std::endl; + << ", errCode: " << response->status() << std::endl; return -1; } else { return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -151,15 +148,14 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response.status() << std::endl; + << ", errCode: " << response.status() << std::endl; return -1; } else { *chunkHash = response.hash(); return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..6c6e006e31 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -23,30 +23,30 @@ #ifndef SRC_TOOLS_CHUNKSERVER_CLIENT_H_ #define SRC_TOOLS_CHUNKSERVER_CLIENT_H_ -#include -#include #include +#include +#include -#include #include +#include #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/tools/curve_tool_define.h" +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::CopysetStatusRequest; using curve::chunkserver::CopysetStatusResponse; -using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::GetChunkHashRequest; using curve::chunkserver::GetChunkHashResponse; -using curve::chunkserver::CHUNK_OP_STATUS; namespace curve { namespace tool { struct Chunk { - Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) : - logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} + Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) + : logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} uint32_t logicPoolId; uint32_t copysetId; uint64_t chunkId; @@ -58,40 +58,44 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 - */ + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure + */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief: Call the RaftStat interface of Braft to obtain detailed + * information about the replication group, and place it in iobuf + * @param iobuf replication group details, valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false - */ + * @brief: Check if the chunkserver is online, only check the controller, + * not the response + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief calls the GetCopysetStatus interface of chunkserver + * @param request Query the request for the copyset + * @param response The response returned contains detailed information + * about the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Get the hash value of chunks from chunkserver + * @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure + */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); private: diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..55505eccf0 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -26,9 +26,9 @@ #include #include +#include "src/fs/ext4_filesystem_impl.h" #include "src/tools/curve_meta_tool.h" #include "src/tools/raft_log_tool.h" -#include "src/fs/ext4_filesystem_impl.h" namespace curve { namespace tool { @@ -38,20 +38,21 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( - const std::string& command); + const std::string& command); + private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..cea600eb5f 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -24,8 +24,9 @@ #define SRC_TOOLS_COMMON_H_ #include -#include + #include +#include DECLARE_uint32(logicalPoolId); DECLARE_uint32(copysetId); @@ -34,9 +35,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/consistency_check.cpp b/src/tools/consistency_check.cpp index e3a84366ae..7cc1d50ed1 100644 --- a/src/tools/consistency_check.cpp +++ b/src/tools/consistency_check.cpp @@ -20,16 +20,18 @@ * Author: tongguangxun */ -#include - #include "src/tools/consistency_check.h" +#include + DEFINE_string(filename, "", "filename to check consistency"); -DEFINE_bool(check_hash, true, R"(用户需要先确认copyset的applyindex一致之后 - 再去查copyset内容是不是一致。通常需要先设置 - check_hash = false先检查copyset的applyindex是否一致 - 如果一致了再设置check_hash = true, - 检查copyset内容是不是一致)"); +DEFINE_bool( + check_hash, true, + R"(Users need to confirm whether the apply index of the copyset is consistent + before checking if the copyset content is consistent. Usually, you should first set + check_hash = false to initially verify if the apply index of the copyset is consistent. + Once confirmed, then set check_hash = true, + to check if the copyset content is consistent)"); DEFINE_uint32(chunkServerBasePort, 8200, "base port of chunkserver"); DECLARE_string(mdsAddr); @@ -48,8 +50,8 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { for (uint32_t i = 0; i < csAddrs.size(); ++i) { std::string ip; uint32_t port; - if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], - &ip, &port)) { + if (curve::common::NetCommon::SplitAddrToIpPort(csAddrs[i], &ip, + &port)) { uint32_t csSeq = port - FLAGS_chunkServerBasePort; ipVec.emplace_back(ip); seqVec.emplace_back(csSeq); @@ -75,12 +77,11 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs) { } ConsistencyCheck::ConsistencyCheck( - std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient) : - nameSpaceToolCore_(nameSpaceToolCore), - csClient_(csClient), - inited_(false) { -} + std::shared_ptr nameSpaceToolCore, + std::shared_ptr csClient) + : nameSpaceToolCore_(nameSpaceToolCore), + csClient_(csClient), + inited_(false) {} bool ConsistencyCheck::SupportCommand(const std::string& command) { return (command == kCheckConsistencyCmd); @@ -98,7 +99,7 @@ int ConsistencyCheck::Init() { return 0; } -int ConsistencyCheck::RunCommand(const std::string &cmd) { +int ConsistencyCheck::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init ConsistencyCheck failed" << std::endl; return -1; @@ -131,13 +132,15 @@ int ConsistencyCheck::CheckFileConsistency(const std::string& fileName, return 0; } -void ConsistencyCheck::PrintHelp(const std::string &cmd) { +void ConsistencyCheck::PrintHelp(const std::string& cmd) { if (!SupportCommand(cmd)) { std::cout << "Command not supported!" << std::endl; return; } std::cout << "Example: " << std::endl; - std::cout << "curve_ops_tool check-consistency -filename=/test [-check_hash=false]" << std::endl; // NOLINT + std::cout << "curve_ops_tool check-consistency -filename=/test " + "[-check_hash=false]" + << std::endl; // NOLINT } int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, @@ -160,14 +163,11 @@ int ConsistencyCheck::FetchFileCopyset(const std::string& fileName, return 0; } -int ConsistencyCheck::CheckCopysetConsistency( - const CopySet copyset, - bool checkHash) { +int ConsistencyCheck::CheckCopysetConsistency(const CopySet copyset, + bool checkHash) { std::vector csLocs; int res = nameSpaceToolCore_->GetChunkServerListInCopySet( - copyset.first, - copyset.second, - &csLocs); + copyset.first, copyset.second, &csLocs); if (res != 0) { std::cout << "GetServerList info failed, exit consistency check!" << std::endl; @@ -180,9 +180,9 @@ int ConsistencyCheck::CheckCopysetConsistency( std::string csAddr = hostIp + ":" + std::to_string(port); csAddrs.emplace_back(csAddr); } - // 检查当前copyset的chunkserver内容是否一致 + // Check if the chunkserver content of the current copyset is consistent if (checkHash) { - // 先检查apply index是否一致 + // First, check if the application index is consistent res = CheckApplyIndex(copyset, csAddrs); if (res != 0) { std::cout << "Apply index not match when check hash!" << std::endl; @@ -195,17 +195,16 @@ int ConsistencyCheck::CheckCopysetConsistency( } int ConsistencyCheck::GetCopysetStatusResponse( - const std::string& csAddr, - const CopySet copyset, - CopysetStatusResponse* response) { + const std::string& csAddr, const CopySet copyset, + CopysetStatusResponse* response) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } CopysetStatusRequest request; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(csAddr); request.set_logicpoolid(copyset.first); request.set_copysetid(copyset.second); @@ -213,8 +212,8 @@ int ConsistencyCheck::GetCopysetStatusResponse( request.set_queryhash(false); res = csClient_->GetCopysetStatus(request, response); if (res != 0) { - std::cout << "GetCopysetStatus from " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopysetStatus from " << csAddr << " fail!" + << std::endl; return -1; } return 0; @@ -226,8 +225,7 @@ int ConsistencyCheck::CheckCopysetHash(const CopySet& copyset, Chunk chunk(copyset.first, copyset.second, chunkId); int res = CheckChunkHash(chunk, csAddrs); if (res != 0) { - std::cout << "{" << chunk - << "," << csAddrs << "}" << std::endl; + std::cout << "{" << chunk << "," << csAddrs << "}" << std::endl; return -1; } } @@ -242,8 +240,8 @@ int ConsistencyCheck::CheckChunkHash(const Chunk& chunk, for (const auto& csAddr : csAddrs) { int res = csClient_->Init(csAddr); if (res != 0) { - std::cout << "Init chunkserverClient to " << csAddr - << " fail!" << std::endl; + std::cout << "Init chunkserverClient to " << csAddr << " fail!" + << std::endl; return -1; } res = csClient_->GetChunkHash(chunk, &curHash); @@ -276,8 +274,8 @@ int ConsistencyCheck::CheckApplyIndex(const CopySet copyset, CopysetStatusResponse response; int res = GetCopysetStatusResponse(csAddr, copyset, &response); if (res != 0) { - std::cout << "GetCopysetStatusResponse from " << csAddr - << " fail" << std::endl; + std::cout << "GetCopysetStatusResponse from " << csAddr << " fail" + << std::endl; ret = -1; break; } diff --git a/src/tools/consistency_check.h b/src/tools/consistency_check.h index 12e12346b9..aad241306f 100644 --- a/src/tools/consistency_check.h +++ b/src/tools/consistency_check.h @@ -23,25 +23,25 @@ #ifndef SRC_TOOLS_CONSISTENCY_CHECK_H_ #define SRC_TOOLS_CONSISTENCY_CHECK_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include #include +#include #include -#include +#include #include "proto/copyset.pb.h" #include "src/common/net_common.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/chunkserver_client.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" DECLARE_string(filename); DECLARE_bool(check_hash); @@ -57,115 +57,118 @@ std::ostream& operator<<(std::ostream& os, const CsAddrsType& csAddrs); class ConsistencyCheck : public CurveTool { public: ConsistencyCheck(std::shared_ptr nameSpaceToolCore, - std::shared_ptr csClient); + std::shared_ptr csClient); ~ConsistencyCheck() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 检查三副本一致性 - * @param fileName 要检查一致性的文件名 - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 一致返回0,否则返回-1 + * @brief Check consistency of three replicas + * @param fileName The file name to check for consistency + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return consistently returns 0, otherwise returns -1 */ int CheckFileConsistency(const std::string& fileName, bool checkHash); /** - * @brief 检查copyset的三副本一致性 - * @param copysetId 要检查的copysetId - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 成功返回0,失败返回-1 + * @brief Check the consistency of the three copies of the copyset + * @param copysetId The copysetId to be checked + * @param checkHash Does check hash? If false, check apply index instead of + * hash + * @return returns 0 for success, -1 for failure */ - int CheckCopysetConsistency(const CopySet copysetId, - bool checkHash); + int CheckCopysetConsistency(const CopySet copysetId, bool checkHash); /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(); /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 从mds获取文件所在的copyset列表 - * @param fileName 文件名 - * @param[out] copysetIds copysetId的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of copysets where the file is located from mds + * @param fileName File name + * @param[out] copysetIds The list copysetIds is valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ int FetchFileCopyset(const std::string& fileName, std::set* copysets); /** - * @brief 从chunkserver获取copyset的状态 - * @param csAddr chunkserver地址 - * @param copysetId 要获取的copysetId - * @param[out] response 返回的response - * @return 成功返回0,失败返回-1 + * @brief Get the status of copyset from chunkserver + * @param csAddr chunkserver address + * @param copysetId The copysetId to obtain + * @param[out] response The response returned + * @return returns 0 for success, -1 for failure */ int GetCopysetStatusResponse(const std::string& csAddr, const CopySet copyset, CopysetStatusResponse* response); /** - * @brief 检查copyset中指定chunk的hash的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the hash of the specified chunk in the + * copyset + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckCopysetHash(const CopySet& copyset, - const CsAddrsType& csAddrs); + int CheckCopysetHash(const CopySet& copyset, const CsAddrsType& csAddrs); /** - * @brief chunk在三个副本的hash的一致性 - * @param chunk 要检查的chunk - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Consistency of hash in three replicates of chunk + * @param chunk The chunk to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckChunkHash(const Chunk& chunk, - const CsAddrsType& csAddrs); + int CheckChunkHash(const Chunk& chunk, const CsAddrsType& csAddrs); /** - * @brief 检查副本间applyindex的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the applyindex between replicas + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the + * copyset + * @return consistently returns 0, otherwise returns -1 */ - int CheckApplyIndex(const CopySet copyset, - const CsAddrsType& csAddrs); + int CheckApplyIndex(const CopySet copyset, const CsAddrsType& csAddrs); private: - // 文件所在的逻辑池id - PoolIdType lpid_; - // 用来与mds的nameservice接口交互 + // The logical pool ID where the file is located + PoolIdType lpid_; + // Used to interact with the nameservice interface of mds std::shared_ptr nameSpaceToolCore_; - // 向chunkserver发送RPC的client + // Client sending RPC to chunkserver std::shared_ptr csClient_; - // copyset中需要检查hash的chunk + // The chunk of the hash needs to be checked in the copyset std::map> chunksInCopyset_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check.cpp b/src/tools/copyset_check.cpp index 06341e5291..1d38b1d691 100644 --- a/src/tools/copyset_check.cpp +++ b/src/tools/copyset_check.cpp @@ -20,42 +20,44 @@ * Author: charisu */ #include "src/tools/copyset_check.h" + #include "src/tools/common.h" #include "src/tools/metric_name.h" DEFINE_bool(detail, false, "list the copyset detail or not"); DEFINE_uint32(chunkserverId, 0, "chunkserver id"); -DEFINE_string(chunkserverAddr, "", "if specified, chunkserverId is not required"); // NOLINT +DEFINE_string(chunkserverAddr, "", + "if specified, chunkserverId is not required"); // NOLINT DEFINE_uint32(serverId, 0, "server id"); DEFINE_string(serverIp, "", "server ip"); DEFINE_string(opName, curve::tool::kTotalOpName, "operator name"); DECLARE_string(mdsAddr); -DEFINE_uint64(opIntervalExceptLeader, 5, "Operator generation interval other " - "than transfer leader"); +DEFINE_uint64(opIntervalExceptLeader, 5, + "Operator generation interval other " + "than transfer leader"); DEFINE_uint64(leaderOpInterval, 30, - "tranfer leader operator generation interval"); + "tranfer leader operator generation interval"); namespace curve { namespace tool { -#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ - do { \ - if ((FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) == 0) { \ - std::cout << # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - if (!(FLAGS_ ## flagname1).empty() && (FLAGS_ ## flagname2) != 0) { \ - std::cout << "Only one of " # flagname1 << " OR " << # flagname2 \ - " should be secified!" << std::endl; \ - return -1; \ - } \ - } while (0); \ +#define CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(flagname1, flagname2) \ + do { \ + if ((FLAGS_##flagname1).empty() && (FLAGS_##flagname2) == 0) { \ + std::cout << #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + if (!(FLAGS_##flagname1).empty() && (FLAGS_##flagname2) != 0) { \ + std::cout << "Only one of " #flagname1 << " OR " \ + << #flagname2 " should be secified!" << std::endl; \ + return -1; \ + } \ + } while (0); bool CopysetCheck::SupportCommand(const std::string& command) { - return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd - || command == kCheckServerCmd || command == kCopysetsStatusCmd - || command == kCheckOperatorCmd - || command == kListMayBrokenVolumes); + return (command == kCheckCopysetCmd || command == kCheckChunnkServerCmd || + command == kCheckServerCmd || command == kCopysetsStatusCmd || + command == kCheckOperatorCmd || command == kListMayBrokenVolumes); } int CopysetCheck::Init() { @@ -76,7 +78,7 @@ int CopysetCheck::RunCommand(const std::string& command) { return -1; } if (command == kCheckCopysetCmd) { - // 检查某个copyset的状态 + // Check the status of a copyset if (FLAGS_logicalPoolId == 0 || FLAGS_copysetId == 0) { std::cout << "logicalPoolId AND copysetId should be specified!" << std::endl; @@ -84,7 +86,7 @@ int CopysetCheck::RunCommand(const std::string& command) { } return CheckCopyset(); } else if (command == kCheckChunnkServerCmd) { - // 检查某个chunkserver上的所有copyset + // Check all copysets on a certain chunkserver CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(chunkserverAddr, chunkserverId); return CheckChunkServer(); } else if (command == kCheckServerCmd) { @@ -159,8 +161,8 @@ int CopysetCheck::CheckServer() { if (FLAGS_detail) { PrintDetail(); std::ostream_iterator out(std::cout, ", "); - std::cout << "unhealthy chunkserver list (total: " - << unhealthyCs.size() <<"): {"; + std::cout << "unhealthy chunkserver list (total: " << unhealthyCs.size() + << "): {"; std::copy(unhealthyCs.begin(), unhealthyCs.end(), out); std::cout << "}" << std::endl; } @@ -188,11 +190,10 @@ int CopysetCheck::CheckOperator(const std::string& opName) { } else { res = core_->CheckOperator(opName, FLAGS_opIntervalExceptLeader); } - if (res < 0) { + if (res < 0) { std::cout << "Check operator fail!" << std::endl; } else { - std::cout << "Operator num is " - << res << std::endl; + std::cout << "Operator num is " << res << std::endl; res = 0; } return res; @@ -202,27 +203,33 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Example: " << std::endl << std::endl; if (command == kCheckCopysetCmd) { std::cout << "curve_ops_tool check-copyset -logicalPoolId=2 " - << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "-copysetId=101 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckChunnkServerCmd) { - std::cout << "curve_ops_tool check-chunkserver " + std::cout + << "curve_ops_tool check-chunkserver " << "-chunkserverId=1 [-mdsAddr=127.0.0.1:6666] [-margin=1000] " << "[-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool check-chunkserver " - << "[-mdsAddr=127.0.0.1:6666] " - << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] " + << "[-chunkserverAddr=127.0.0.1:8200] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckServerCmd) { std::cout << "curve_ops_tool check-server -serverId=1 " - << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-mdsAddr=127.0.0.1:6666] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT std::cout << "curve_ops_tool check-server [-mdsAddr=127.0.0.1:6666] " - << "[-serverIp=127.0.0.1] [-margin=1000] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + << "[-serverIp=127.0.0.1] [-margin=1000] " + << "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (command == kCopysetsStatusCmd) { std::cout << "curve_ops_tool copysets-status [-mdsAddr=127.0.0.1:6666] " << "[-margin=1000] [-operatorMaxPeriod=30] [-checkOperator] " - << "[-confPath=/etc/curve/tools.conf]" << std::endl << std::endl; // NOLINT + << "[-confPath=/etc/curve/tools.conf]" << std::endl + << std::endl; // NOLINT } else if (command == kCheckOperatorCmd) { std::cout << "curve_ops_tool check-operator -opName=" << kTotalOpName << "/" << kChangeOpName << "/" << kAddOpName << "/" @@ -233,26 +240,32 @@ void CopysetCheck::PrintHelp(const std::string& command) { std::cout << "Command not supported!" << std::endl; } std::cout << std::endl; - std::cout << "Standard of healthy is no copyset in the following state:" << std::endl; // NOLINT + std::cout << "Standard of healthy is no copyset in the following state:" + << std::endl; // NOLINT std::cout << "1、copyset has no leader" << std::endl; std::cout << "2、number of replicas less than expected" << std::endl; std::cout << "3、some replicas not online" << std::endl; std::cout << "4、installing snapshot" << std::endl; std::cout << "5、gap of log index between peers exceed margin" << std::endl; - std::cout << "6、for check-cluster, it will also check whether the mds is scheduling if -checkOperator specified" // NOLINT - "(if no operators in operatorMaxPeriod, it considered healthy)" << std::endl; // NOLINT - std::cout << "By default, if the number of replicas is less than 3, it is considered unhealthy, " // NOLINT - "you can change it by specify -replicasNum" << std::endl; - std::cout << "The order is sorted by priority, if the former is satisfied, the rest will not be checked" << std::endl; // NOLINT + std::cout << "6、for check-cluster, it will also check whether the mds is " + "scheduling if -checkOperator specified" // NOLINT + "(if no operators in operatorMaxPeriod, it considered healthy)" + << std::endl; // NOLINT + std::cout << "By default, if the number of replicas is less than 3, it is " + "considered unhealthy, " // NOLINT + "you can change it by specify -replicasNum" + << std::endl; + std::cout << "The order is sorted by priority, if the former is satisfied, " + "the rest will not be checked" + << std::endl; // NOLINT } - void CopysetCheck::PrintStatistic() { const auto& statistics = core_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; } void CopysetCheck::PrintDetail() { @@ -282,7 +295,7 @@ void CopysetCheck::PrintDetail() { PrintCopySet(item.second); } std::cout << std::endl; - // 打印有问题的chunkserver + // Printing problematic chunkservers PrintExcepChunkservers(); } @@ -300,32 +313,30 @@ void CopysetCheck::PrintCopySet(const std::set& set) { } PoolIdType lgId = GetPoolID(groupId); CopySetIdType csId = GetCopysetID(groupId); - std::cout << "(grouId: " << gid << ", logicalPoolId: " - << std::to_string(lgId) << ", copysetId: " - << std::to_string(csId) << ")"; + std::cout << "(grouId: " << gid + << ", logicalPoolId: " << std::to_string(lgId) + << ", copysetId: " << std::to_string(csId) << ")"; } std::cout << "}" << std::endl; } void CopysetCheck::PrintExcepChunkservers() { - auto serviceExceptionChunkServers = - core_->GetServiceExceptionChunkServer(); + auto serviceExceptionChunkServers = core_->GetServiceExceptionChunkServer(); if (!serviceExceptionChunkServers.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "service-exception chunkservers (total: " << serviceExceptionChunkServers.size() << "): {"; std::copy(serviceExceptionChunkServers.begin(), - serviceExceptionChunkServers.end(), out); + serviceExceptionChunkServers.end(), out); std::cout << "}" << std::endl; } - auto copysetLoadExceptionCS = - core_->GetCopysetLoadExceptionChunkServer(); + auto copysetLoadExceptionCS = core_->GetCopysetLoadExceptionChunkServer(); if (!copysetLoadExceptionCS.empty()) { std::ostream_iterator out(std::cout, ", "); std::cout << "copyset-load-exception chunkservers (total: " << copysetLoadExceptionCS.size() << "): {"; - std::copy(copysetLoadExceptionCS.begin(), - copysetLoadExceptionCS.end(), out); + std::copy(copysetLoadExceptionCS.begin(), copysetLoadExceptionCS.end(), + out); std::cout << "}" << std::endl; } } diff --git a/src/tools/copyset_check.h b/src/tools/copyset_check.h index b4fa76c28f..54d5e46d36 100644 --- a/src/tools/copyset_check.h +++ b/src/tools/copyset_check.h @@ -25,23 +25,23 @@ #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include +#include #include "src/mds/common/mds_define.h" #include "src/tools/copyset_check_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::PoolIdType; using curve::mds::topology::ServerIdType; namespace curve { @@ -49,94 +49,101 @@ namespace tool { class CopysetCheck : public CurveTool { public: - explicit CopysetCheck(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit CopysetCheck(std::shared_ptr core) + : core_(core), inited_(false) {} ~CopysetCheck() = default; /** - * @brief 根据flag检查复制组健康状态 - * 复制组健康的标准,没有任何副本处于以下状态,下面的顺序按优先级排序, - * 即满足上面一条,就不会检查下面一条 - * 1、leader为空(复制组的信息以leader处的为准,没有leader无法检查) - * 2、配置中的副本数量不足 - * 3、有副本不在线 - * 4、有副本在安装快照 - * 5、副本间log index差距太大 - * 6、对于集群来说,还要判断一下chunkserver上的copyset数量和leader数量是否均衡, - * 避免后续会有调度使得集群不稳定 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 - * @return 成功返回0,失败返回-1 + * @brief Check the health status of the replication group based on the flag + * The standard for replication group health is that no replica is in the + * following state. The following order is sorted by priority, If the above + * one is met, the following one will not be checked + * 1. The leader is empty (the information of the replication group is + * based on the leader, and cannot be checked without a leader) + * 2. Insufficient number of replicas in the configuration + * 3. Some replicas are not online + * 4. There is a replica in the installation snapshot + * 5. The log index difference between replicas is too large + * 6. For a cluster, it is also necessary to determine whether the number + * of copysets and the number of leaders on the chunkserver are balanced, + * Avoid scheduling that may cause instability in the cluster in the + * future + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 + * @brief Print help information + * @param command The command to be executed by currently + * includescheck-copyset, check-chunkserver, check-server, check-cluster, + * etc */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true / false */ static bool SupportCommand(const std::string& command); private: - /** - * @brief 初始化 + /** + * @brief initialization */ int Init(); /** - * @brief 检查单个copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check a single copyset + * @return Health returns 0, otherwise returns -1 */ int CheckCopyset(); /** - * @brief 检查chunkserver上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on chunkserver + * @return Health returns 0, otherwise returns -1 */ int CheckChunkServer(); /** - * @brief 检查server上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on the server + * @return Health returns 0, otherwise returns -1 */ int CheckServer(); /** - * @brief 检查集群所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets in the cluster + * @return Health returns 0, otherwise returns -1 */ int CheckCopysetsInCluster(); /** - * @brief 检查mds端的operator - * @return 无operator返回0,其他情况返回-1 + * @brief Check the operator on the mds side + * @return returns 0 without an operator, otherwise returns -1 */ int CheckOperator(const std::string& opName); - // 打印copyset检查的详细结果 + // Print detailed results of copyset check void PrintDetail(); void PrintCopySet(const std::set& set); - // 打印检查的结果,一共多少copyset,有多少不健康 + // Print the results of the inspection, how many copies are there in total, + // and how many are unhealthy void PrintStatistic(); - // 打印有问题的chunkserver列表 + // Print a list of problematic chunkservers void PrintExcepChunkservers(); - // 打印大多数不在线的副本上面的卷 + // Print the volume on most offline copies int PrintMayBrokenVolumes(); private: - // 检查copyset的核心逻辑 + // Check the core logic of copyset std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index f32a7a923d..8a7a3165d9 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -20,15 +20,19 @@ * Author: charisu */ #include "src/tools/copyset_check_core.h" + #include + #include DEFINE_uint64(margin, 1000, "The threshold of the gap between peers"); DEFINE_uint64(replicasNum, 3, "the number of replicas that required"); -DEFINE_uint64(operatorMaxPeriod, 30, "max period of operator generating, " - "if no operators in a period, it considered to be healthy"); -DEFINE_bool(checkOperator, false, "if true, the operator number of " - "mds will be considered"); +DEFINE_uint64(operatorMaxPeriod, 30, + "max period of operator generating, " + "if no operators in a period, it considered to be healthy"); +DEFINE_bool(checkOperator, false, + "if true, the operator number of " + "mds will be considered"); namespace curve { namespace tool { @@ -38,24 +42,22 @@ int CopysetCheckCore::Init(const std::string& mdsAddr) { } CopysetStatistics::CopysetStatistics(uint64_t total, uint64_t unhealthy) - : totalNum(total), unhealthyNum(unhealthy) { + : totalNum(total), unhealthyNum(unhealthy) { if (total != 0) { - unhealthyRatio = - static_cast(unhealthyNum) / totalNum; + unhealthyRatio = static_cast(unhealthyNum) / totalNum; } else { unhealthyRatio = 0; } } CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId) { + const CopySetIdType& copysetId) { Clear(); std::vector chunkserverLocation; - int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, &chunkserverLocation); + int res = mdsClient_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + &chunkserverLocation); if (res != 0) { - std::cout << "GetChunkServerListInCopySet from mds fail!" - << std::endl; + std::cout << "GetChunkServerListInCopySet from mds fail!" << std::endl; return CheckResult::kOtherErr; } int majority = chunkserverLocation.size() / 2 + 1; @@ -69,7 +71,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; ++offlinePeers; @@ -92,7 +94,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } } else { if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { checkRes = CheckResult::kOtherErr; } } @@ -106,20 +108,20 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId) { + const ChunkServerIdType& chunkserverId) { Clear(); return CheckCopysetsOnChunkServer(chunkserverId, ""); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr) { + const std::string& chunkserverAddr) { Clear(); return CheckCopysetsOnChunkServer(0, chunkserverAddr); } int CopysetCheckCore::CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId, - const std::string& chunkserverAddr) { + const ChunkServerIdType& chunkserverId, + const std::string& chunkserverAddr) { curve::mds::topology::ChunkServerInfo csInfo; int res = 0; if (chunkserverId > 0) { @@ -131,7 +133,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::cout << "GetChunkServerInfo from mds fail!" << std::endl; return -1; } - // 如果chunkserver retired的话不发送请求 + // If chunkserver is redirected, do not send the request if (csInfo.status() == ChunkServerStatus::RETIRED) { std::cout << "ChunkServer is retired!" << std::endl; return 0; @@ -139,7 +141,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::string hostIp = csInfo.hostip(); uint64_t port = csInfo.port(); std::string csAddr = hostIp + ":" + std::to_string(port); - // 向chunkserver发送RPC请求获取raft state + // Send RPC request to chunkserver to obtain raft state ChunkServerHealthStatus csStatus = CheckCopysetsOnChunkServer(csAddr, {}); if (csStatus == ChunkServerHealthStatus::kHealthy) { return 0; @@ -149,11 +151,8 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( } ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader, - std::pair *record, - bool queryCs) { + const std::string& chunkserverAddr, const std::set& groupIds, + bool queryLeader, std::pair* record, bool queryCs) { bool isHealthy = true; int res = 0; butil::IOBuf iobuf; @@ -165,33 +164,38 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } if (res != 0) { - // 如果查询chunkserver失败,认为不在线,把它上面所有的 - // copyset都添加到peerNotOnlineCopysets_里面 + // If querying the chunkserver fails, consider it offline and add all + // its copysets to the peerNotOnlineCopysets_. UpdatePeerNotOnlineCopysets(chunkserverAddr); serviceExceptionChunkServers_.emplace(chunkserverAddr); chunkserverCopysets_[chunkserverAddr] = {}; return ChunkServerHealthStatus::kNotOnline; } - // 存储每一个copyset的详细信息 + // Store detailed information for each copyset CopySetInfosType copysetInfos; ParseResponseAttachment(groupIds, &iobuf, ©setInfos); - // 只有查询全部chunkserver的时候才更新chunkServer上的copyset列表 + // Only update the copyset list on chunkServer when querying all + // chunkservers if (groupIds.empty()) { UpdateChunkServerCopysets(chunkserverAddr, copysetInfos); } - // 对应的chunkserver上没有要找的leader的copyset,可能已经迁移出去了, - // 但是follower这边还没更新,这种情况也认为chunkserver不健康 + // There is no copyset for the leader you are looking for on the + // corresponding chunkserver, it may have already been migrated, But the + // follower has not been updated yet, and this situation also suggests that + // chunkserver is unhealthy if (copysetInfos.empty() || - (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { + (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { std::cout << "Some copysets not found on chunkserver, may be tranfered" << std::endl; return ChunkServerHealthStatus::kNotHealthy; } - // 存储需要发送消息的chunkserver的地址和对应的groupId - // key是chunkserver地址,value是groupId的列表 + // Store the address and corresponding groupId of the chunkserver that needs + // to send messages Key is the chunkserver address, and value is a list of + // groupIds std::map> csAddrMap; - // 存储没有leader的copyset对应的peers,key为groupId,value为配置 + // Store the peers corresponding to the copyset without a leader, with key + // as groupId and value as configuration std::map> noLeaderCopysetsPeers; for (auto& copysetInfo : copysetInfos) { std::string groupId = copysetInfo[kGroupId]; @@ -228,17 +232,17 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( break; } } else if (state == kStateFollower) { - // 如果没有leader,检查是否是大多数不在线 - // 是的话标记为大多数不在线,否则标记为No leader + // If there is no leader, check if most are offline + // If yes, mark it as mostly offline, otherwise mark it as No leader if (copysetInfo.count(kLeader) == 0 || - copysetInfo[kLeader] == kEmptyAddr) { + copysetInfo[kLeader] == kEmptyAddr) { std::vector peers; curve::common::SplitString(copysetInfo[kPeers], " ", &peers); noLeaderCopysetsPeers[groupId] = peers; continue; } if (queryLeader) { - // 向leader发送rpc请求 + // Send an rpc request to the leader auto pos = copysetInfo[kLeader].rfind(":"); auto csAddr = copysetInfo[kLeader].substr(0, pos); csAddrMap[csAddr].emplace(groupId); @@ -247,25 +251,25 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( copysets_[kNoLeader].emplace(groupId); isHealthy = false; } else { - // 其他情况有ERROR,UNINITIALIZED,SHUTTING和SHUTDOWN,这种都认为不健康,统计到 - // copyset里面 + // In other cases such as ERROR, UNINITIALIZED, SHUTTING, and + // SHUTDOWN, they are considered unhealthy and are counted within + // the copyset. std::string key = "state " + copysetInfo[kState]; copysets_[key].emplace(groupId); isHealthy = false; } } - // 遍历没有leader的copyset - bool health = CheckCopysetsNoLeader(chunkserverAddr, - noLeaderCopysetsPeers); + // Traverse copysets without leaders + bool health = CheckCopysetsNoLeader(chunkserverAddr, noLeaderCopysetsPeers); if (!health) { isHealthy = false; } - // 遍历chunkserver发送请求 + // Traverse chunkserver to send requests for (const auto& item : csAddrMap) { - ChunkServerHealthStatus res = CheckCopysetsOnChunkServer(item.first, - item.second); + ChunkServerHealthStatus res = + CheckCopysetsOnChunkServer(item.first, item.second); if (res != ChunkServerHealthStatus::kHealthy) { isHealthy = false; } @@ -277,10 +281,9 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } } -bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers) { +bool CopysetCheckCore::CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers) { if (copysetsPeers.empty()) { return true; } @@ -296,13 +299,12 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return false; } for (const auto& item : result) { - // 如果在配置组中,检查是否是majority offline + // If in the configuration group, check if it is a majority offline if (item.second) { isHealthy = false; std::string groupId = item.first; - CheckResult checkRes = CheckPeerOnlineStatus( - groupId, - copysetsPeers.at(item.first)); + CheckResult checkRes = + CheckPeerOnlineStatus(groupId, copysetsPeers.at(item.first)); if (checkRes == CheckResult::kMajorityPeerNotOnline) { copysets_[kMajorityPeerNotOnline].emplace(groupId); continue; @@ -313,9 +315,9 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return isHealthy; } -int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, - const std::set copysets, - std::map* result) { +int CopysetCheckCore::CheckIfChunkServerInCopysets( + const std::string& csAddr, const std::set copysets, + std::map* result) { PoolIdType logicPoolId; std::vector copysetIds; for (const auto& gId : copysets) { @@ -330,8 +332,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, } std::vector csServerInfos; - int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, - copysetIds, &csServerInfos); + int res = mdsClient_->GetChunkServerListInCopySets(logicPoolId, copysetIds, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail!" << std::endl; return res; @@ -340,8 +342,8 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, CopySetIdType copysetId = info.copysetid(); std::string groupId = ToGroupId(logicPoolId, copysetId); for (const auto& csLoc : info.cslocs()) { - std::string addr = csLoc.hostip() + ":" - + std::to_string(csLoc.port()); + std::string addr = + csLoc.hostip() + ":" + std::to_string(csLoc.port()); if (addr == csAddr) { (*result)[groupId] = true; break; @@ -351,22 +353,23 @@ int CopysetCheckCore::CheckIfChunkServerInCopysets(const std::string& csAddr, return 0; } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(serverId, "", true, unhealthyChunkServers); } -int CopysetCheckCore::CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers) { Clear(); return CheckCopysetsOnServer(0, serverIp, true, unhealthyChunkServers); } void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, std::map> *result) { + const std::vector& chunkservers, uint32_t* index, + std::map>* result) { while (1) { indexMutex.lock(); if (*index + 1 > chunkservers.size()) { @@ -386,11 +389,11 @@ void CopysetCheckCore::ConcurrentCheckCopysetsOnServer( } } -int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, bool queryLeader, - std::vector* unhealthyChunkServers) { +int CopysetCheckCore::CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, bool queryLeader, + std::vector* unhealthyChunkServers) { bool isHealthy = true; - // 向mds发送RPC + // Send RPC to mds int res = 0; std::vector chunkservers; if (serverId > 0) { @@ -406,16 +409,15 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, std::map> queryCsResult; uint32_t index = 0; for (uint64_t i = 0; i < FLAGS_rpcConcurrentNum; i++) { - threadpool.emplace_back(Thread( - &CopysetCheckCore::ConcurrentCheckCopysetsOnServer, - this, std::ref(chunkservers), &index, - &queryCsResult)); + threadpool.emplace_back( + Thread(&CopysetCheckCore::ConcurrentCheckCopysetsOnServer, this, + std::ref(chunkservers), &index, &queryCsResult)); } - for (auto &thread : threadpool) { + for (auto& thread : threadpool) { thread.join(); } - for (auto &record : queryCsResult) { + for (auto& record : queryCsResult) { std::string chunkserverAddr = record.first; auto res = CheckCopysetsOnChunkServer(chunkserverAddr, {}, queryLeader, &record.second, false); @@ -429,7 +431,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, if (isHealthy) { return 0; - } else { + } else { return -1; } } @@ -450,18 +452,21 @@ int CopysetCheckCore::CheckCopysetsInCluster() { isHealthy = false; } } - // 检查从chunkserver上获取的copyset数量与mds记录的数量是否一致 + // Check if the number of copysets obtained from chunkserver matches the + // number of mds records res = CheckCopysetsWithMds(); if (res != 0) { std::cout << "CheckCopysetNumWithMds fail!" << std::endl; return -1; } - // 如果不健康,直接返回,如果健康,还需要对operator作出判断 + // If not healthy, return directly. If healthy, make a judgment on the + // operator if (!isHealthy) { return -1; } - // 默认不检查operator,在测试脚本之类的要求比较严格的地方才检查operator,不然 - // 每次执行命令等待30秒很不方便 + // By default, operators are not checked, and only checked in areas with + // strict requirements such as test scripts, otherwise waiting for 30 + // seconds each time executing a command is inconvenient if (FLAGS_checkOperator) { int res = CheckOperator(kTotalOpName, FLAGS_operatorMaxPeriod); if (res != 0) { @@ -482,21 +487,22 @@ int CopysetCheckCore::CheckCopysetsWithMds() { if (copysetsInMds.size() != copysets_[kTotal].size()) { std::cout << "Copyset numbers in chunkservers not consistent" " with mds, please check! copysets on chunkserver: " - << copysets_[kTotal].size() << ", copysets in mds: " - << copysetsInMds.size() << std::endl; + << copysets_[kTotal].size() + << ", copysets in mds: " << copysetsInMds.size() << std::endl; return -1; } std::set copysetsInMdsGid; for (const auto& copyset : copysetsInMds) { - std::string gId = ToGroupId(copyset.logicalpoolid(), - copyset.copysetid()); + std::string gId = + ToGroupId(copyset.logicalpoolid(), copyset.copysetid()); copysetsInMdsGid.insert(gId); } int ret = 0; std::vector copysetsInMdsNotInCs(10); - auto iter = std::set_difference(copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsNotInCs.begin()); + auto iter = + std::set_difference(copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsNotInCs.begin()); copysetsInMdsNotInCs.resize(iter - copysetsInMdsNotInCs.begin()); if (!copysetsInMdsNotInCs.empty()) { std::cout << "There are " << copysetsInMdsNotInCs.size() @@ -508,9 +514,10 @@ int CopysetCheckCore::CheckCopysetsWithMds() { ret = -1; } std::vector copysetsInCsNotInMds(10); - iter = std::set_difference(copysets_[kTotal].begin(), - copysets_[kTotal].end(), copysetsInMdsGid.begin(), - copysetsInMdsGid.end(), copysetsInCsNotInMds.begin()); + iter = + std::set_difference(copysets_[kTotal].begin(), copysets_[kTotal].end(), + copysetsInMdsGid.begin(), copysetsInMdsGid.end(), + copysetsInCsNotInMds.begin()); copysetsInCsNotInMds.resize(iter - copysetsInCsNotInMds.begin()); if (!copysetsInCsNotInMds.empty()) { std::cout << "There are " << copysetsInCsNotInMds.size() @@ -542,8 +549,8 @@ int CopysetCheckCore::CheckScanStatus( continue; } - auto groupId = ToGroupId(copysetInfo.logicalpoolid(), - copysetInfo.copysetid()); + auto groupId = + ToGroupId(copysetInfo.logicalpoolid(), copysetInfo.copysetid()); copysets_[kThreeCopiesInconsistent].emplace(groupId); count++; } @@ -565,37 +572,41 @@ int CopysetCheckCore::CheckOperator(const std::string& opName, if (opNum != 0) { return opNum; } - if (curve::common::TimeUtility::GetTimeofDaySec() - - startTime >= checkTimeSec) { + if (curve::common::TimeUtility::GetTimeofDaySec() - startTime >= + checkTimeSec) { break; } sleep(1); - } while (curve::common::TimeUtility::GetTimeofDaySec() - - startTime < checkTimeSec); + } while (curve::common::TimeUtility::GetTimeofDaySec() - startTime < + checkTimeSec); return 0; } -// 每个copyset的信息都会存储在一个map里面,map的key有 -// groupId: 复制组的groupId -// peer_id: 10.182.26.45:8210:0格式的peer id -// state: 节点的状态,LEADER,FOLLOWER,CANDIDATE等等 -// peers: 配置组里的成员,通过空格分隔 -// last_log_id: 最后一个log entry的index -// leader: state为LEADER时才存在这个key,指向复制组leader +// Information for each copyset is stored in a map. The map's keys include: +// - groupId: The groupId of the replication group. +// - peer_id: The peer id in the format 10.182.26.45:8210:0. +// - state: The node's state, which can be LEADER, FOLLOWER, CANDIDATE, etc. +// - peers: Members in the configuration group, separated by spaces. +// - last_log_id: The index of the last log entry. +// - leader: This key exists only when the state is LEADER and points to the +// leader of the replication group. // -// replicator_1: 第一个follower的复制状态,value如下: -// next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 ic=0 -// next_index为下一个要发送给该follower的index -// flying_append_entries_size是发出去还未确认的entry的数量 -// idle表明没有在安装快照,如果在安装快照的话是installing snapshot {12, 3}, -// 1234和3分别是快照包含的最后一个log entry的index和term -// hc,ac,ic分别是发向follower的heartbeat,append entry, -// 和install snapshot的rpc的数量 +// replicator_1: The replication status of the first follower, with values as +// follows: next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 +// ic=0 +// - next_index: The next index to be sent to this follower. +// - flying_append_entries_size: The number of unconfirmed entries that have +// been sent. +// - idle: Indicates whether there is no snapshot installation. If a +// snapshot is being installed, it will show as "installing snapshot {12, +// 3}", +// where 1234 and 3 are the last log entry's index and term included in +// the snapshot. +// - hc, ac, ic: The counts of RPCs sent to the follower for heartbeat, +// append entry, and install snapshot, respectively. void CopysetCheckCore::ParseResponseAttachment( - const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr) { + const std::set& gIds, butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, bool saveIobufStr) { butil::IOBuf copyset; iobuf->append("\r\n"); while (iobuf->cut_until(©set, "\r\n\r\n") == 0) { @@ -629,7 +640,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } } - // 找到了copyset + // Found copyset auto pos = line.npos; if (line.find(kReplicator) != line.npos) { pos = line.rfind(":"); @@ -640,7 +651,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } std::string key = line.substr(0, pos); - // 如果是replicator,把key简化一下 + // If it's a replicator, simplify the key if (key.find(kReplicator) != key.npos) { key = kReplicator + std::to_string(i); ++i; @@ -660,10 +671,11 @@ void CopysetCheckCore::ParseResponseAttachment( } int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, - butil::IOBuf* iobuf) { + butil::IOBuf* iobuf) { // unit test will set csClient_ to mock - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -673,8 +685,7 @@ int CopysetCheckCore::QueryChunkServer(const std::string& chunkserverAddr, } void CopysetCheckCore::UpdateChunkServerCopysets( - const std::string& csAddr, - const CopySetInfosType& copysetInfos) { + const std::string& csAddr, const CopySetInfosType& copysetInfos) { std::set copysetIds; for (const auto& copyset : copysetInfos) { copysetIds.emplace(copyset.at(kGroupId)); @@ -682,11 +693,12 @@ void CopysetCheckCore::UpdateChunkServerCopysets( chunkserverCopysets_[csAddr] = copysetIds; } -// 通过发送RPC检查chunkserver是否在线 +// Check if chunkserver is online by sending RPC bool CopysetCheckCore::CheckChunkServerOnline( - const std::string& chunkserverAddr) { - auto csClient = (csClient_ == nullptr) ? - std::make_shared() : csClient_; + const std::string& chunkserverAddr) { + auto csClient = (csClient_ == nullptr) + ? std::make_shared() + : csClient_; int res = csClient->Init(chunkserverAddr); if (res != 0) { std::cout << "Init chunkserverClient fail!" << std::endl; @@ -718,7 +730,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; return false; @@ -727,7 +739,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, ParseResponseAttachment({}, &iobuf, ©setInfos); UpdateChunkServerCopysets(csAddr, copysetInfos); bool online = (chunkserverCopysets_[csAddr].find(groupId) != - chunkserverCopysets_[csAddr].end()); + chunkserverCopysets_[csAddr].end()); if (!online) { copysetLoacExceptionChunkServers_.emplace(csAddr); } @@ -735,8 +747,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, } CheckResult CopysetCheckCore::CheckPeerOnlineStatus( - const std::string& groupId, - const std::vector& peers) { + const std::string& groupId, const std::vector& peers) { int notOnlineNum = 0; for (const auto& peer : peers) { auto pos = peer.rfind(":"); @@ -762,20 +773,20 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( } CheckResult CopysetCheckCore::CheckHealthOnLeader( - std::map* map) { - // 先判断peers是否小于3 + std::map* map) { + // First, determine if the peers are less than 3 std::vector peers; curve::common::SplitString((*map)[kPeers], " ", &peers); if (peers.size() < FLAGS_replicasNum) { return CheckResult::kPeersNoSufficient; } std::string groupId = (*map)[kGroupId]; - // 检查不在线peer的数量 + // Check the number of offline peers CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes != CheckResult::kHealthy) { return checkRes; } - // 根据replicator的情况判断log index之间的差距 + // Judging the gap between log indices based on the replicator's situation uint64_t lastLogId; std::string str = (*map)[kStorage]; auto pos1 = str.find("="); @@ -785,7 +796,7 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( return CheckResult::kParseError; } bool res = curve::common::StringToUll(str.substr(pos1 + 1, pos2 - pos1 - 1), - &lastLogId); + &lastLogId); if (!res) { std::cout << "parse last log id from string fail!" << std::endl; return CheckResult::kParseError; @@ -805,16 +816,15 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( } } if (info.substr(0, pos) == kNextIndex) { - res = curve::common::StringToUll( - info.substr(pos + 1), &nextIndex); + res = curve::common::StringToUll(info.substr(pos + 1), + &nextIndex); if (!res) { std::cout << "parse next index fail!" << std::endl; return CheckResult::kParseError; } } if (info.substr(0, pos) == "flying_append_entries_size") { - res = curve::common::StringToUll(info.substr(pos + 1), - &flying); + res = curve::common::StringToUll(info.substr(pos + 1), &flying); if (!res) { std::cout << "parse flying_size fail!" << std::endl; return CheckResult::kParseError; @@ -835,8 +845,8 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { std::vector copysets; int res = mdsClient_->GetCopySetsInChunkServer(csAddr, ©sets); if (res != 0) { - std::cout << "GetCopySetsInChunkServer " << csAddr - << " fail!" << std::endl; + std::cout << "GetCopySetsInChunkServer " << csAddr << " fail!" + << std::endl; return; } else if (copysets.empty()) { std::cout << "No copysets on chunkserver " << csAddr << std::endl; @@ -849,26 +859,24 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { copysetIds.emplace_back(csInfo.copysetid()); } - // 获取每个copyset的成员 + // Get the members of each copyset std::vector csServerInfos; - res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, - copysetIds, + res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, copysetIds, &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return; } - // 遍历每个copyset + // Traverse each copyset for (const auto& info : csServerInfos) { std::vector peers; for (const auto& csLoc : info.cslocs()) { - std::string peer = csLoc.hostip() + ":" - + std::to_string(csLoc.port()) + ":0"; + std::string peer = + csLoc.hostip() + ":" + std::to_string(csLoc.port()) + ":0"; peers.emplace_back(peer); } CopySetIdType copysetId = info.copysetid(); - std::string groupId = ToGroupId(logicalPoolId, - copysetId); + std::string groupId = ToGroupId(logicalPoolId, copysetId); CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes == CheckResult::kMinorityPeerNotOnline) { copysets_[kMinorityPeerNotOnline].emplace(groupId); @@ -889,9 +897,8 @@ CopysetStatistics CopysetCheckCore::GetCopysetStatistics() { if (item.first == kTotal) { total = item.second.size(); } else { - // 求并集 - unhealthyCopysets.insert(item.second.begin(), - item.second.end()); + // Union + unhealthyCopysets.insert(item.second.begin(), item.second.end()); } } uint64_t unhealthyNum = unhealthyCopysets.size(); @@ -907,7 +914,7 @@ void CopysetCheckCore::Clear() { } int CopysetCheckCore::ListMayBrokenVolumes( - std::vector* fileNames) { + std::vector* fileNames) { int res = CheckCopysetsOnOfflineChunkServer(); if (res != 0) { std::cout << "CheckCopysetsOnOfflineChunkServer fail" << std::endl; @@ -928,10 +935,10 @@ int CopysetCheckCore::ListMayBrokenVolumes( } void CopysetCheckCore::GetCopysetInfos(const char* key, - std::vector* copysets) { + std::vector* copysets) { (void)key; for (auto iter = copysets_[kMajorityPeerNotOnline].begin(); - iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { + iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { std::string gid = *iter; uint64_t groupId; if (!curve::common::StringToUll(gid, &groupId)) { diff --git a/src/tools/copyset_check_core.h b/src/tools/copyset_check_core.h index 6e93a373c7..157ddf2458 100644 --- a/src/tools/copyset_check_core.h +++ b/src/tools/copyset_check_core.h @@ -25,38 +25,38 @@ #include #include -#include +#include #include +#include #include -#include -#include -#include #include -#include +#include +#include #include +#include +#include "include/chunkserver/chunkserver_common.h" #include "proto/topology.pb.h" -#include "src/mds/common/mds_define.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/string_util.h" -#include "src/tools/mds_client.h" +#include "src/mds/common/mds_define.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/metric_name.h" #include "src/tools/curve_tool_define.h" -#include "include/chunkserver/chunkserver_common.h" -#include "src/common/concurrent/concurrent.h" +#include "src/tools/mds_client.h" +#include "src/tools/metric_name.h" -using curve::mds::topology::PoolIdType; -using curve::mds::topology::CopySetIdType; -using curve::mds::topology::ChunkServerIdType; -using curve::mds::topology::ServerIdType; -using curve::mds::topology::kTopoErrCodeSuccess; -using curve::mds::topology::OnlineState; -using curve::mds::topology::ChunkServerStatus; -using curve::chunkserver::ToGroupId; -using curve::chunkserver::GetPoolID; using curve::chunkserver::GetCopysetID; +using curve::chunkserver::GetPoolID; +using curve::chunkserver::ToGroupId; using curve::common::Mutex; using curve::common::Thread; +using curve::mds::topology::ChunkServerIdType; +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetIdType; +using curve::mds::topology::kTopoErrCodeSuccess; +using curve::mds::topology::OnlineState; +using curve::mds::topology::PoolIdType; +using curve::mds::topology::ServerIdType; namespace curve { namespace tool { @@ -65,32 +65,31 @@ using CopySet = std::pair; using CopySetInfosType = std::vector>; enum class CheckResult { - // copyset健康 + // copyset Health kHealthy = 0, - // 解析结果失败 + // Parsing result failed kParseError = -1, - // peer数量小于预期 - kPeersNoSufficient = -2, - // 副本间的index差距太大 + // The number of peers is less than expected + kPeersNoSufficient = -2, + // The index difference between replicas is too large kLogIndexGapTooBig = -3, - // 有副本在安装快照 + // There is a replica installing the snapshot kInstallingSnapshot = -4, - // 少数副本不在线 + // A few instances are not online kMinorityPeerNotOnline = -5, - // 大多数副本不在线 + // Most replicas are not online kMajorityPeerNotOnline = -6, kOtherErr = -7 }; enum class ChunkServerHealthStatus { - kHealthy = 0, // chunkserver上所有copyset健康 - kNotHealthy = -1, // chunkserver上有copyset不健康 - kNotOnline = -2 // chunkserver不在线 + kHealthy = 0, // All copysets on chunkserver are healthy + kNotHealthy = -1, // Copyset on chunkserver is unhealthy + kNotOnline = -2 // Chunkserver is not online }; struct CopysetStatistics { - CopysetStatistics() : - totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} + CopysetStatistics() : totalNum(0), unhealthyNum(0), unhealthyRatio(0) {} CopysetStatistics(uint64_t total, uint64_t unhealthy); uint64_t totalNum; uint64_t unhealthyNum; @@ -109,102 +108,108 @@ const char kThreeCopiesInconsistent[] = "Three copies inconsistent"; class CopysetCheckCore { public: CopysetCheckCore(std::shared_ptr mdsClient, - std::shared_ptr csClient = nullptr) : - mdsClient_(mdsClient), csClient_(csClient) {} + std::shared_ptr csClient = nullptr) + : mdsClient_(mdsClient), csClient_(csClient) {} virtual ~CopysetCheckCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief check health of one copyset - * - * @param logicalPoolId - * @param copysetId - * - * @return error code - */ + * @brief check health of one copyset + * + * @param logicalPoolId + * @param copysetId + * + * @return error code + */ virtual CheckResult CheckOneCopyset(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId); + const CopySetIdType& copysetId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer( - const ChunkServerIdType& chunkserverId); + const ChunkServerIdType& chunkserverId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserAddr chunkserver地址 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserAddr chunkserver address + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsOnChunkServer(const std::string& chunkserverAddr); /** - * @brief Check copysets on offline chunkservers - */ + * @brief Check copysets on offline chunkservers + */ virtual int CheckCopysetsOnOfflineChunkServer(); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const ServerIdType& serverId, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const ServerIdType& serverId, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的ip - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 - * - * @return 健康返回0,不健康返回-1 - */ - virtual int CheckCopysetsOnServer(const std::string& serverIp, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId IP of server + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy + * chunkservers with copyset on the server + * + * @return Health returns 0, unhealthy returns -1 + */ + virtual int CheckCopysetsOnServer( + const std::string& serverIp, + std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查集群中所有copyset的健康状态 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets in the cluster + * + * @return Health returns 0, unhealthy returns -1 + */ virtual int CheckCopysetsInCluster(); /** - * @brief 检查集群中的operator - * @param opName operator的名字 - * @param checkTimeSec 检查时间 - * @return 检查正常返回0,检查失败或存在operator返回-1 - */ - virtual int CheckOperator(const std::string& opName, - uint64_t checkTimeSec); + * @brief Check the operators in the cluster + * @param opName The name of the operator + * @param checkTimeSec check time + * @return returns 0 if the check is normal, or -1 if the check fails or + * there is an operator present + */ + virtual int CheckOperator(const std::string& opName, uint64_t checkTimeSec); /** - * @brief 计算不健康的copyset的比例,检查后调用 - * @return 不健康的copyset的比例 + * @brief Calculate the proportion of unhealthy copysets, check and call + * @return The proportion of unhealthy copysets */ virtual CopysetStatistics GetCopysetStatistics(); /** - * @brief 获取copyset的列表,通常检查后会调用,然后打印出来 - * @return copyset的列表 + * @brief to obtain a list of copysets, usually called after checking, and + * then printed out + * @return List of copysets */ virtual const std::map>& GetCopysetsRes() - const { + const { return copysets_; } @@ -212,112 +217,119 @@ class CopysetCheckCore { * @brief Get copysets info for specified copysets */ virtual void GetCopysetInfos(const char* key, - std::vector* copysets); + std::vector* copysets); /** - * @brief 获取copyset的详细信息 - * @return copyset的详细信息 + * @brief Get detailed information about copyset + * @return Details of copyset */ virtual const std::string& GetCopysetDetail() const { return copysetsDetail_; } /** - * @brief 获取检查过程中服务异常的chunkserver列表,通常检查后会调用,然后打印出来 - * @return 服务异常的chunkserver的列表 + * @brief: Obtain a list of chunkservers with service exceptions during the + * inspection process, which is usually called after the inspection and + * printed out + * @return List of chunkservers with service exceptions */ virtual const std::set& GetServiceExceptionChunkServer() - const { + const { return serviceExceptionChunkServers_; } /** - * @brief 获取检查过程中copyset寻找失败的chunkserver列表,通常检查后会调用,然后打印出来 - * @return copyset加载异常的chunkserver的列表 + * @brief: Obtain the list of failed chunkservers for copyset during the + * check process, which is usually called after the check and printed out + * @return List of chunkservers with copyset loading exceptions */ virtual const std::set& GetCopysetLoadExceptionChunkServer() - const { + const { return copysetLoacExceptionChunkServers_; } /** - * @brief 通过发送RPC检查chunkserver是否在线 - * - * @param chunkserverAddr chunkserver的地址 - * - * @return 在线返回true,不在线返回false - */ + * @brief Check if chunkserver is online by sending RPC + * + * @param chunkserverAddr Address of chunkserver + * + * @return returns true online and false offline + */ virtual bool CheckChunkServerOnline(const std::string& chunkserverAddr); /** - * @brief List volumes on majority peers offline copysets - * - * @param fileNames affected volumes - * - * @return return 0 when sucess, otherwise return -1 - */ + * @brief List volumes on majority peers offline copysets + * + * @param fileNames affected volumes + * + * @return return 0 when sucess, otherwise return -1 + */ virtual int ListMayBrokenVolumes(std::vector* fileNames); private: /** - * @brief 从iobuf分析出指定groupId的复制组的信息, - * 每个复制组的信息都放到一个map里面 - * - * @param gIds 要查询的复制组的groupId,为空的话全部查询 - * @param iobuf 要分析的iobuf - * @param[out] maps copyset信息的列表,每个copyset的信息都是一个map - * @param saveIobufStr 是否要把iobuf里的详细内容存下来 - * - */ + * @brief Analyze the replication group information for the specified + * groupId from iobuf, Each replication group's information is placed in a + * map + * + * @param gIds: The groupId of the replication group to be queried. If it is + * empty, all queries will be performed + * @param iobuf The iobuf to analyze + * @param[out] maps A list of copyset information, where each copyset's + * information is a map + * @param saveIobufStr Do you want to save the detailed content in iobuf + * + */ void ParseResponseAttachment(const std::set& gIds, - butil::IOBuf* iobuf, - CopySetInfosType* copysetInfos, - bool saveIobufStr = false); + butil::IOBuf* iobuf, + CopySetInfosType* copysetInfos, + bool saveIobufStr = false); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 - * - * @param chunkserId chunkserverId - * @param chunkserverAddr chunkserver的地址,两者指定一个就好 - * - * @return 健康返回0,不健康返回-1 - */ + * @brief Check the health status of all copysets on a certain chunkserver + * + * @param chunkserId chunkserverId + * @param chunkserverAddr chunkserver address, just specify one of the two + * + * @return Health returns 0, unhealthy returns -1 + */ int CheckCopysetsOnChunkServer(const ChunkServerIdType& chunkserverId, const std::string& chunkserverAddr); /** - * @brief check copysets' healthy status on chunkserver - * - * @param[in] chunkserAddr: chunkserver address - * @param[in] groupIds: groupId for check, default is null, check all the copysets - * @param[in] queryLeader: whether send rpc to chunkserver which copyset leader on. - * All the chunkserves will be check when check clusters status. - * @param[in] record: raft state rpc response from chunkserver - * @param[in] queryCs: whether send rpc to chunkserver - * - * @return error code - */ + * @brief check copysets' healthy status on chunkserver + * + * @param[in] chunkserAddr: chunkserver address + * @param[in] groupIds: groupId for check, default is null, check all the + * copysets + * @param[in] queryLeader: whether send rpc to chunkserver which copyset + * leader on. All the chunkserves will be check when check clusters status. + * @param[in] record: raft state rpc response from chunkserver + * @param[in] queryCs: whether send rpc to chunkserver + * + * @return error code + */ ChunkServerHealthStatus CheckCopysetsOnChunkServer( - const std::string& chunkserverAddr, - const std::set& groupIds, - bool queryLeader = true, - std::pair *record = nullptr, - bool queryCs = true); + const std::string& chunkserverAddr, + const std::set& groupIds, bool queryLeader = true, + std::pair* record = nullptr, bool queryCs = true); /** - * @brief 检查某个server上的所有copyset的健康状态 - * - * @param serverId server的id - * @param serverIp server的ip,serverId或serverIp指定一个就好 - * @param queryLeader 是否向leader所在的server发送RPC查询, - * 对于检查cluster来说,所有server都会遍历到,不用查询 - * - * @return 健康返回0,不健康返回-1 - */ - int CheckCopysetsOnServer(const ServerIdType& serverId, - const std::string& serverIp, - bool queryLeader = true, - std::vector* unhealthyChunkServers = nullptr); + * @brief Check the health status of all copysets on a server + * + * @param serverId Server ID + * @param serverIp Just specify one of the server's IP, serverId, or + * serverIp + * @param queryLeader Does the send RPC queries to the server where the + * leader is located, For checking the cluster, all servers will be + * traversed without querying + * + * @return Health returns 0, unhealthy returns -1 + */ + int CheckCopysetsOnServer( + const ServerIdType& serverId, const std::string& serverIp, + bool queryLeader = true, + std::vector* unhealthyChunkServers = nullptr); /** * @brief concurrent check copyset on server @@ -326,126 +338,130 @@ class CopysetCheckCore { * @param[in] result: rpc response from chunkserver */ void ConcurrentCheckCopysetsOnServer( - const std::vector &chunkservers, - uint32_t *index, - std::map> *result); + const std::vector& chunkservers, uint32_t* index, + std::map>* result); /** - * @brief 根据leader的map里面的copyset信息分析出copyset是否健康,健康返回0,否则 - * 否则返回错误码 - * - * @param map leader的copyset信息,以键值对的方式存储 - * - * @return 返回错误码 - */ + * @brief: Analyze whether the copyset is healthy based on the copyset + * information in the leader's map, and return 0 if it is healthy. Otherwise + * Otherwise, an error code will be returned + * + * @param map The copyset information of the leader is stored as key value + * pairs + * + * @return returns an error code + */ CheckResult CheckHealthOnLeader(std::map* map); /** - * @brief 向chunkserver发起raft state rpc - * - * @param chunkserverAddr chunkserver的地址 - * @param[out] iobuf 返回的responseattachment,返回0的时候有效 - * - * @return 成功返回0,失败返回-1 - */ + * @brief Initiate raft state rpc to chunkserver + * + * @param chunkserverAddr Address of chunkserver + * @param[out] iobuf The responseattachment returned by is valid when 0 is + * returned + * + * @return returns 0 for success, -1 for failure + */ int QueryChunkServer(const std::string& chunkserverAddr, butil::IOBuf* iobuf); /** - * @brief 把chunkserver上所有的copyset更新到peerNotOnline里面 - * - * @param csAddr chunkserver的地址 - * - * @return 无 - */ + * @brief: Update all copysets on chunkserver to peerNotOnline + * + * @param csAddr chunkserver Address of + * + * @return None + */ void UpdatePeerNotOnlineCopysets(const std::string& csAddr); /** - * @brief 以mds中的copyset配置组为参照,检查chunkserver是否在copyset的配置组中 - * - * @param csAddr chunkserver的地址 - * @param copysets copyset列表 - * @param[out] result 检查结果,copyset到存在与否的映射 - * - * @return 包含返回true,否则返回false - */ + * @brief: Using the copyset configuration group in mds as a reference, + * check if chunkserver is in the copyset configuration group + * + * @param csAddr Address of chunkserver + * @param copysets copyset list + * @param[out] result check result, copyset mapping to presence or absence + * + * @return returns true, otherwise returns false + */ int CheckIfChunkServerInCopysets(const std::string& csAddr, const std::set copysets, std::map* result); /** - * @brief 检查没有leader的copyset是否健康 - * - * @param csAddr chunkserver 地址 - * @param copysetsPeers copyset的groupId到peers的映射 - * - * @return 健康返回true,不健康返回false - */ - bool CheckCopysetsNoLeader(const std::string& csAddr, - const std::map>& - copysetsPeers); + * @brief Check if the copyset without a leader is healthy + * + * @param csAddr chunkserver address + * @param copysetsPeers copyset's groupId to Peers mapping + * + * @return returns true if healthy, false if unhealthy + */ + bool CheckCopysetsNoLeader( + const std::string& csAddr, + const std::map>& copysetsPeers); /** - * @brief 清空统计信息 - * - * @return 无 - */ + * @brief Clear Statistics + * + * @return None + */ void Clear(); /** - * @brief 获取chunkserver上的copyset的在线状态 - * - * @param csAddr chunkserver地址 - * @param groupId copyset的groupId - * - * @return 在线返回true - */ + * @brief: Obtain the online status of the copyset on chunkserver + * + * @param csAddr chunkserver address + * @param groupId copyset's groupId + * + * @return returns true online + */ bool CheckCopySetOnline(const std::string& csAddr, const std::string& groupId); /** - * @brief 获取不在线的peer的数量 - * - * - * @param peers 副本peer的列表ip:port:id的形式 - * - * @return 返回错误码 - */ + * @brief: Obtain the number of offline peers + * + * + * @param peers The list of replica peers in the form of ip:port:id + * + * @return returns an error code + */ CheckResult CheckPeerOnlineStatus(const std::string& groupId, const std::vector& peers); /** - * @brief 更新chunkserver上的copyset的groupId列表 - * - * @param csAddr chunkserver地址 - * @param copysetInfos copyset信息列表 - */ + * @brief Update the groupId list of copyset on chunkserver + * + * @param csAddr chunkserver address + * @param copysetInfos copyset information list + */ void UpdateChunkServerCopysets(const std::string& csAddr, - const CopySetInfosType& copysetInfos); + const CopySetInfosType& copysetInfos); int CheckCopysetsWithMds(); int CheckScanStatus(const std::vector& copysetInfos); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; // for unittest mock csClient std::shared_ptr csClient_; - // 保存copyset的信息 + // Save information for copyset std::map> copysets_; - // 用来保存发送RPC失败的那些chunkserver + // Used to save the chunkservers that failed to send RPC std::set serviceExceptionChunkServers_; - // 用来保存一些copyset加载有问题的chunkserver + // Used to save some copysets and load problematic chunkservers std::set copysetLoacExceptionChunkServers_; - // 用来存放访问过的chunkserver上的copyset列表,避免重复RPC + // Used to store the copyset list on accessed chunkservers to avoid + // duplicate RPCs std::map> chunkserverCopysets_; - // 查询单个copyset的时候,保存复制组的详细信息 + // When querying a single copyset, save the detailed information of the + // replication group std::string copysetsDetail_; const std::string kEmptyAddr = "0.0.0.0:0:0"; diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 0dc5dcf46e..60bb516b86 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -20,23 +20,21 @@ * Author: wudemiao */ -#include #include "src/tools/curve_cli.h" + +#include + #include "src/tools/common.h" -DEFINE_int32(timeout_ms, - -1, "Timeout (in milliseconds) of the operation"); -DEFINE_int32(max_retry, - 3, "Max retry times of each operation"); -DEFINE_string(conf, - "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", +DEFINE_int32(timeout_ms, -1, "Timeout (in milliseconds) of the operation"); +DEFINE_int32(max_retry, 3, "Max retry times of each operation"); +DEFINE_string(conf, "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", "Initial configuration of the replication group"); -DEFINE_string(peer, - "", "Id of the operating peer"); -DEFINE_string(new_conf, - "", "new conf to reset peer"); -DEFINE_bool(remove_copyset, false, "Whether need to remove broken copyset " - "after remove peer (default: false)"); +DEFINE_string(peer, "", "Id of the operating peer"); +DEFINE_string(new_conf, "", "new conf to reset peer"); +DEFINE_bool(remove_copyset, false, + "Whether need to remove broken copyset " + "after remove peer (default: false)"); DEFINE_bool(affirm, true, "If true, command line interactive affirmation is required." @@ -45,26 +43,22 @@ DECLARE_string(mdsAddr); namespace curve { namespace tool { -#define CHECK_FLAG(flagname) \ - do { \ - if ((FLAGS_ ## flagname).empty()) { \ - std::cout << __FUNCTION__ << " requires --" # flagname \ - << std::endl; \ - return -1; \ - } \ - } while (0); \ - +#define CHECK_FLAG(flagname) \ + do { \ + if ((FLAGS_##flagname).empty()) { \ + std::cout << __FUNCTION__ << " requires --" #flagname \ + << std::endl; \ + return -1; \ + } \ + } while (0); bool CurveCli::SupportCommand(const std::string& command) { - return (command == kResetPeerCmd || command == kRemovePeerCmd - || command == kTransferLeaderCmd - || command == kDoSnapshot - || command == kDoSnapshotAll); + return (command == kResetPeerCmd || command == kRemovePeerCmd || + command == kTransferLeaderCmd || command == kDoSnapshot || + command == kDoSnapshotAll); } -int CurveCli::Init() { - return mdsClient_->Init(FLAGS_mdsAddr); -} +int CurveCli::Init() { return mdsClient_->Init(FLAGS_mdsAddr); } butil::Status CurveCli::DeleteBrokenCopyset(braft::PeerId peerId, const LogicPoolID& poolId, @@ -121,13 +115,13 @@ int CurveCli::RemovePeer() { } // STEP 1: remove peer - butil::Status status = curve::chunkserver::RemovePeer( - poolId, copysetId, conf, peer, opt); + butil::Status status = + curve::chunkserver::RemovePeer(poolId, copysetId, conf, peer, opt); auto succ = status.ok(); - std::cout << "Remove peer " << peerId << " for copyset(" - << poolId << ", " << copysetId << ") " - << (succ ? "success" : "fail") << ", original conf: " << conf - << ", status: " << status << std::endl; + std::cout << "Remove peer " << peerId << " for copyset(" << poolId << ", " + << copysetId << ") " << (succ ? "success" : "fail") + << ", original conf: " << conf << ", status: " << status + << std::endl; if (!succ || !FLAGS_remove_copyset) { return succ ? 0 : -1; @@ -138,8 +132,8 @@ int CurveCli::RemovePeer() { succ = status.ok(); std::cout << "Delete copyset(" << poolId << ", " << copysetId << ")" << " in " << peerId << (succ ? "success" : "fail") - << ", original conf: " << conf - << ", status: " << status << std::endl; + << ", original conf: " << conf << ", status: " << status + << std::endl; return succ ? 0 : -1; } @@ -164,25 +158,19 @@ int CurveCli::TransferLeader() { opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::TransferLeader( - FLAGS_logicalPoolId, - FLAGS_copysetId, - conf, - targetPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, conf, targetPeer, opt); if (!st.ok()) { std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " fail, original conf: " << conf + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" + << " to " << targetPeerId << " fail, original conf: " << conf << ", detail: " << st << std::endl; return -1; } std::cout << "Transfer leader of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << targetPeerId - << " success, original conf: " << conf << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << targetPeerId << " success, original conf: " << conf + << std::endl; return 0; } @@ -217,13 +205,14 @@ int CurveCli::ResetPeer() { } curve::common::Peer requestPeer; requestPeer.set_address(requestPeerId.to_string()); - // 目前reset peer只支持reset为1一个副本,不支持增加副本, - // 因为不能通过工具在chunkserver上创建copyset + // Currently, reset peer only supports resetting to 1 replica and does not + // support adding replicas, Because it is not possible to create a copyset + // on chunkserver through tools if (newConf.size() != 1) { std::cout << "New conf can only specify one peer!" << std::endl; return -1; } - // 新的配置必须包含发送RPC的peer + // The new configuration must include a peer that sends RPC if (*newConf.begin() != requestPeerId) { std::cout << "New conf must include the target peer!" << std::endl; return -1; @@ -233,25 +222,20 @@ int CurveCli::ResetPeer() { opt.max_retry = FLAGS_max_retry; butil::Status st = curve::chunkserver::ResetPeer( - FLAGS_logicalPoolId, - FLAGS_copysetId, - newConf, - requestPeer, - opt); + FLAGS_logicalPoolId, FLAGS_copysetId, newConf, requestPeer, opt); if (!st.ok()) { std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " to " << newConf << " fail, requestPeer: " << requestPeerId << ", detail: " << st << std::endl; return -1; } std::cout << "Reset peer of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" - << " to " << newConf - << " success, requestPeer: " << requestPeerId << std::endl; + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId << ")" + << " to " << newConf << " success, requestPeer: " << requestPeerId + << std::endl; return 0; } @@ -274,15 +258,12 @@ int CurveCli::DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - butil::Status st = curve::chunkserver::Snapshot( - FLAGS_logicalPoolId, - FLAGS_copysetId, - peer, - opt); + butil::Status st = curve::chunkserver::Snapshot(FLAGS_logicalPoolId, + FLAGS_copysetId, peer, opt); if (!st.ok()) { std::cout << "Do snapshot of copyset " - << "(" << FLAGS_logicalPoolId << ", " - << FLAGS_copysetId << ")" + << "(" << FLAGS_logicalPoolId << ", " << FLAGS_copysetId + << ")" << " fail, requestPeer: " << peer.address() << ", detail: " << st << std::endl; return -1; @@ -301,8 +282,8 @@ int CurveCli::DoSnapshotAll() { braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; - std::string csAddr = chunkserver.hostip() + ":" + - std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); curve::common::Peer peer; peer.set_address(csAddr); butil::Status st = curve::chunkserver::SnapshotAll(peer, opt); @@ -315,17 +296,27 @@ int CurveCli::DoSnapshotAll() { return res; } -void CurveCli::PrintHelp(const std::string &cmd) { +void CurveCli::PrintHelp(const std::string& cmd) { std::cout << "Example " << std::endl; if (cmd == kResetPeerCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-new_conf=127.0.0.1:8080:0 -max_retry=3 -timeout_ms=100" + << std::endl; // NOLINT } else if (cmd == kRemovePeerCmd || cmd == kTransferLeaderCmd) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 -max_retry=3 -timeout_ms=100 -remove_copyset=true/false" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-conf=127.0.0.1:8080:0,127.0.0.1:8081:0,127.0.0.1:8082:0 " + "-max_retry=3 -timeout_ms=100 -remove_copyset=true/false" + << std::endl; // NOLINT } else if (cmd == kDoSnapshot) { - std::cout << "curve_ops_tool " << cmd << " -logicalPoolId=1 -copysetId=10001 -peer=127.0.0.1:8080:0 " // NOLINT - "-max_retry=3 -timeout_ms=100" << std::endl; + std::cout << "curve_ops_tool " << cmd + << " -logicalPoolId=1 -copysetId=10001 " + "-peer=127.0.0.1:8080:0 " // NOLINT + "-max_retry=3 -timeout_ms=100" + << std::endl; } else if (cmd == kDoSnapshotAll) { std::cout << "curve_ops_tool " << cmd << std::endl; } else { @@ -333,7 +324,7 @@ void CurveCli::PrintHelp(const std::string &cmd) { } } -int CurveCli::RunCommand(const std::string &cmd) { +int CurveCli::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init CurveCli tool failed" << std::endl; return -1; @@ -358,4 +349,3 @@ int CurveCli::RunCommand(const std::string &cmd) { } } // namespace tool } // namespace curve - diff --git a/src/tools/curve_cli.h b/src/tools/curve_cli.h index 24a4944cee..7267262893 100644 --- a/src/tools/curve_cli.h +++ b/src/tools/curve_cli.h @@ -23,64 +23,65 @@ #ifndef SRC_TOOLS_CURVE_CLI_H_ #define SRC_TOOLS_CURVE_CLI_H_ -#include -#include #include #include +#include +#include -#include -#include #include +#include #include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" #include "src/tools/mds_client.h" -#include "proto/copyset.pb.h" namespace curve { namespace tool { -using ::curve::chunkserver::LogicPoolID; using ::curve::chunkserver::CopysetID; using ::curve::chunkserver::CopysetRequest; using ::curve::chunkserver::CopysetResponse; using ::curve::chunkserver::CopysetService_Stub; +using ::curve::chunkserver::LogicPoolID; +using ::curve::chunkserver::COPYSET_OP_STATUS:: + COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS; -using ::curve::chunkserver::COPYSET_OP_STATUS::COPYSET_OP_STATUS_FAILURE_UNKNOWN; // NOLINT class CurveCli : public CurveTool { public: - explicit CurveCli(std::shared_ptr mdsClient) : - mdsClient_(mdsClient) {} + explicit CurveCli(std::shared_ptr mdsClient) + : mdsClient_(mdsClient) {} /** - * @brief 初始化mds client - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @return returns 0 for success, -1 for failure */ int Init(); /** - * @brief 打印help信息 - * @param 无 - * @return 无 + * @brief Print help information + * @param None + * @return None */ - void PrintHelp(const std::string &cmd) override; + void PrintHelp(const std::string& cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &cmd) override; + int RunCommand(const std::string& cmd) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); @@ -97,47 +98,48 @@ class CurveCli : public CurveTool { const CopysetID& copysetId); /** - * @brief 删除peer - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief delete peer + * @param None + * @return returns 0 for success, -1 for failure */ int RemovePeer(); /** - * @brief 转移leader - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief transfer leader + * @param None + * @return returns 0 for success, -1 for failure */ int TransferLeader(); /** - * @brief 触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshot(); /** - * @brief 触发打快照 - * @param lgPoolId 逻辑池id - * @param copysetId 复制组id - * @param peer 复制组成员 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param lgPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param peer replication group members + * @return returns 0 for success, -1 for failure */ int DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer); /** - * @brief 给集群中全部copyset node触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Trigger a snapshot of all copyset nodes in the cluster + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshotAll(); /** - * @brief 重置配置组成员,目前只支持reset成一个成员 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Reset configuration group members, currently only supports + * resetting to one member + * @param None + * @return returns 0 for success, -1 for failure */ int ResetPeer(); diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 08aa1f62ed..d5f30d9b7b 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -20,45 +20,41 @@ * Author: tongguangxun */ -#include +#include #include +#include #include -#include - -#include -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include +#include // NOLINT #include -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" -#include "src/common/crc32.h" +#include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/bitmap.h" +#include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "src/common/fast_align.h" - -#include "include/chunkserver/chunkserver_common.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using ::curve::common::align_up; using ::curve::common::is_aligned; /** - * chunkfile pool预分配工具,提供两种分配方式 - * 1. 以磁盘空间百分比方式,指定需要分配的百分比 - * 2. 指定以chunk数量分配 - * 默认的分配方式是以磁盘空间百分比作为分配方式,可以通过-allocateByPercent=false/true - * 调整分配方式。 + * chunkfile pool pre allocation tool, providing two allocation methods + * 1. Specify the percentage to be allocated as a percentage of disk space + * 2. Specify allocation by chunk quantity + * The default allocation method is based on the percentage of disk space, which + * can be achieved by -allocateByPercent=false/true Adjust the allocation + * method. */ -DEFINE_bool(allocateByPercent, - true, +DEFINE_bool(allocateByPercent, true, "allocate filePool by percent of disk size or by chunk num!"); -DEFINE_uint32(fileSize, - 16 * 1024 * 1024, - "chunk size"); +DEFINE_uint32(fileSize, 16 * 1024 * 1024, "chunk size"); DEFINE_uint32(blockSize, 4096, "minimum io alignment supported"); @@ -69,41 +65,34 @@ static bool ValidateBlockSize(const char* /*name*/, uint32_t blockSize) { DEFINE_validator(blockSize, &ValidateBlockSize); -DEFINE_string(fileSystemPath, - "./", - "chunkserver disk path"); +DEFINE_string(fileSystemPath, "./", "chunkserver disk path"); -DEFINE_string(filePoolDir, - "./filePool/", - "chunkfile pool dir"); +DEFINE_string(filePoolDir, "./filePool/", "chunkfile pool dir"); -DEFINE_string(filePoolMetaPath, - "./filePool.meta", +DEFINE_string(filePoolMetaPath, "./filePool.meta", "chunkfile pool meta info file path."); -// preallocateNum仅在测试的时候使用,测试提前预分配固定数量的chunk -// 当设置这个值的时候可以不用设置allocatepercent -DEFINE_uint32(preAllocateNum, - 0, +// preallocateNum is only used during testing, and a fixed number of chunks are +// pre allocated in advance during testing When setting this value, there is no +// need to set allocatepercent +DEFINE_uint32(preAllocateNum, 0, "preallocate chunk nums, this is JUST for curve test"); -// 在系统初始化的时候,管理员需要预先格式化磁盘,并进行预分配 -// 这时候只需要指定allocatepercent,allocatepercent是占整个盘的空间的百分比 -DEFINE_uint32(allocatePercent, - 80, - "preallocate storage percent of total disk"); +// During system initialization, the administrator needs to pre format the disk +// and pre allocate it At this point, only allocate percentage needs to be +// specified, which is the percentage of the entire disk space occupied by +// allocate percentage +DEFINE_uint32(allocatePercent, 80, "preallocate storage percent of total disk"); -// 测试情况下置为false,加快测试速度 -DEFINE_bool(needWriteZero, - true, - "not write zero for test."); +// Set to false during testing to accelerate testing speed +DEFINE_bool(needWriteZero, true, "not write zero for test."); -using curve::fs::FileSystemType; -using curve::fs::LocalFsFactory; +using curve::chunkserver::FilePoolMeta; +using curve::common::kFilePoolMagic; using curve::fs::FileSystemInfo; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::common::kFilePoolMagic; -using curve::chunkserver::FilePoolMeta; +using curve::fs::LocalFsFactory; class CompareInternal { public: @@ -128,7 +117,7 @@ struct AllocateStruct { static int AllocateFiles(AllocateStruct* allocatestruct) { const size_t actualFileSize = allocatestruct->actualFileSize; - char* data = new(std::nothrow)char[actualFileSize]; + char* data = new (std::nothrow) char[actualFileSize]; memset(data, 0, actualFileSize); uint64_t count = 0; @@ -137,14 +126,13 @@ static int AllocateFiles(AllocateStruct* allocatestruct) { { std::unique_lock lk(*allocatestruct->mtx); allocatestruct->allocateChunknum->fetch_add(1); - filename = std::to_string( - allocatestruct->allocateChunknum->load()); + filename = std::to_string(allocatestruct->allocateChunknum->load()); } - std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" - + filename + allocatestruct->cleanChunkSuffix; + std::string tmpchunkfilepath = FLAGS_filePoolDir + "/" + filename + + allocatestruct->cleanChunkSuffix; - int ret = allocatestruct->fsptr->Open(tmpchunkfilepath, - O_RDWR | O_CREAT); + int ret = + allocatestruct->fsptr->Open(tmpchunkfilepath, O_RDWR | O_CREAT); if (ret < 0) { *allocatestruct->checkwrong = true; LOG(ERROR) << "file open failed, " << tmpchunkfilepath; @@ -205,12 +193,12 @@ static bool CanBitmapFitInMetaPage() { constexpr size_t kMaximumBitmapBytes = 1024; auto bitmapBytes = - FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; + FLAGS_fileSize / FLAGS_blockSize / curve::common::BITMAP_UNIT_SIZE; LOG(INFO) << "bitmap bytes is " << bitmapBytes; return bitmapBytes <= kMaximumBitmapBytes; } -// TODO(tongguangxun) :添加单元测试 +// TODO(tongguangxun): Adding unit tests int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); @@ -247,7 +235,9 @@ int main(int argc, char** argv) { } tmpChunkSet_.insert(tmpvec.begin(), tmpvec.end()); - uint64_t size = tmpChunkSet_.size() ? atoi((*(--tmpChunkSet_.end())).c_str()) : 0; // NOLINT + uint64_t size = tmpChunkSet_.size() + ? atoi((*(--tmpChunkSet_.end())).c_str()) + : 0; // NOLINT allocateChunknum_.store(size + 1); FileSystemInfo finfo; @@ -278,7 +268,7 @@ int main(int argc, char** argv) { bool checkwrong = false; // two threads concurrent, can reach the bandwidth of disk. - uint64_t threadAllocateNum = preAllocateChunkNum/2; + uint64_t threadAllocateNum = preAllocateChunkNum / 2; std::vector thvec; AllocateStruct allocateStruct; allocateStruct.fsptr = fsptr; @@ -316,7 +306,7 @@ int main(int argc, char** argv) { return -1; } - // 读取meta文件,检查是否写入正确 + // Read the meta file and check if it is written correctly FilePoolMeta recordMeta; ret = curve::chunkserver::FilePoolHelper::DecodeMetaInfoFromMetaFile( fsptr, FLAGS_filePoolMetaPath, 4096, &recordMeta); @@ -345,8 +335,8 @@ int main(int argc, char** argv) { if (recordMeta.filePoolPath != FLAGS_filePoolDir) { LOG(ERROR) << "meta info persistency failed!" - << ", read chunkpath = " << recordMeta.filePoolPath - << ", real chunkpath = " << FLAGS_filePoolDir; + << ", read chunkpath = " << recordMeta.filePoolPath + << ", real chunkpath = " << FLAGS_filePoolDir; break; } diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index 5d9da78ec0..6a4bd0af6f 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -40,8 +40,7 @@ std::ostream& operator<<(std::ostream& os, const vector& ranges) { } uint64_t startOff = ranges[i].beginIndex * FLAGS_pageSize; uint64_t endOff = (ranges[i].endIndex + 1) * FLAGS_pageSize; - os << "[" << startOff << "," - << endOff << ")"; + os << "[" << startOff << "," << endOff << ")"; } return os; } @@ -105,26 +104,24 @@ int CurveMetaTool::RunCommand(const std::string& cmd) { } } - - int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { - // 打开chunk文件 - int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY|O_NOATIME); + // Open chunk file + int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << chunkFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << chunkFileName << ", " << berror() + << std::endl; return -1; } - // 读取chunk头部 + // Read chunk header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << chunkFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << chunkFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << ", " << berror() << std::endl; @@ -138,29 +135,29 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { - // 打开快照文件 - int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY|O_NOATIME); + // Open snapshot file + int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY | O_NOATIME); if (fd < 0) { - std::cout << "Fail to open " << snapFileName << ", " - << berror() << std::endl; + std::cout << "Fail to open " << snapFileName << ", " << berror() + << std::endl; return -1; } - // 读取快照文件头部 + // Read snapshot file header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { - std::cout << "Fail to read metaPage from " - << snapFileName << ", " << berror() << std::endl; + std::cout << "Fail to read metaPage from " << snapFileName << ", " + << berror() << std::endl; } else { std::cout << "Read size not match, page size: " << FLAGS_pageSize << ", read size: " << rc << std::endl; @@ -174,7 +171,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } diff --git a/src/tools/curve_meta_tool.h b/src/tools/curve_meta_tool.h index fe2b040c58..2125679022 100644 --- a/src/tools/curve_meta_tool.h +++ b/src/tools/curve_meta_tool.h @@ -24,24 +24,26 @@ #define SRC_TOOLS_CURVE_META_TOOL_H_ #include + #include #include #include #include + +#include "src/chunkserver/datastore/chunkserver_chunkfile.h" #include "src/common/bitmap.h" #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" -#include "src/chunkserver/datastore/chunkserver_chunkfile.h" namespace curve { namespace tool { -using curve::common::BitRange; -using curve::fs::LocalFileSystem; using curve::chunkserver::ChunkFileMetaPage; -using curve::chunkserver::SnapshotMetaPage; using curve::chunkserver::CSErrorCode; +using curve::chunkserver::SnapshotMetaPage; +using curve::common::BitRange; +using curve::fs::LocalFileSystem; std::ostream& operator<<(std::ostream& os, const vector& ranges); std::ostream& operator<<(std::ostream& os, const ChunkFileMetaPage& metaPage); @@ -49,40 +51,40 @@ std::ostream& operator<<(std::ostream& os, const SnapshotMetaPage& metaPage); class CurveMetaTool : public CurveTool { public: - explicit CurveMetaTool(std::shared_ptr localFs) : - localFS_(localFs) {} + explicit CurveMetaTool(std::shared_ptr localFs) + : localFS_(localFs) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印chunk文件元数据 - * @param chunkFileName chunk文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print chunk file metadata + * @param chunkFileName The file name of the chunk file + * @return successfully returns 0, otherwise returns -1 */ int PrintChunkMeta(const std::string& chunkFileName); /** - * @brief 打印快照文件元数据 - * @param snapFileName 快照文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print snapshot file metadata + * @param snapFileName The file name of the snapshot file + * @return successfully returns 0, otherwise returns -1 */ int PrintSnapshotMeta(const std::string& snapFileName); diff --git a/src/tools/curve_tool_define.h b/src/tools/curve_tool_define.h index 8800bf847c..e2261550d5 100644 --- a/src/tools/curve_tool_define.h +++ b/src/tools/curve_tool_define.h @@ -24,6 +24,7 @@ #define SRC_TOOLS_CURVE_TOOL_DEFINE_H_ #include + #include DECLARE_string(mdsAddr); @@ -40,10 +41,10 @@ DECLARE_string(password); namespace curve { namespace tool { -// 显示版本命令 +// Display Version Command const char kVersionCmd[] = "version"; -// StatusTool相关命令 +// StatusTool related commands const char kStatusCmd[] = "status"; const char kSpaceCmd[] = "space"; const char kChunkserverStatusCmd[] = "chunkserver-status"; @@ -59,7 +60,7 @@ const char kClusterStatusCmd[] = "cluster-status"; const char kScanStatusCmd[] = "scan-status"; const char kFormatStatusCmd[] = "format-status"; -// NameSpaceTool相关命令 +// NameSpaceTool related commands const char kGetCmd[] = "get"; const char kListCmd[] = "list"; const char kSegInfoCmd[] = "seginfo"; @@ -71,7 +72,7 @@ const char kChunkLocatitonCmd[] = "chunk-location"; const char kUpdateThrottle[] = "update-throttle"; const char kListPoolsets[] = "list-poolsets"; -// CopysetCheck相关命令 +// CopysetCheck related commands const char kCheckCopysetCmd[] = "check-copyset"; const char kCheckChunnkServerCmd[] = "check-chunkserver"; const char kCheckServerCmd[] = "check-server"; @@ -79,13 +80,13 @@ const char kCopysetsStatusCmd[] = "copysets-status"; const char kCheckOperatorCmd[] = "check-operator"; const char kListMayBrokenVolumes[] = "list-may-broken-vol"; -// CopysetTool相关命令 +// CopysetTool related commands const char kSetCopysetAvailFlag[] = "set-copyset-availflag"; -// 一致性检查命令 +// Consistency check command const char kCheckConsistencyCmd[] = "check-consistency"; -// 配置变更命令 +// Configuration change command const char kRemovePeerCmd[] = "remove-peer"; const char kTransferLeaderCmd[] = "transfer-leader"; const char kResetPeerCmd[] = "reset-peer"; @@ -96,18 +97,18 @@ const char kDoSnapshotAll[] = "do-snapshot-all"; const char kRapidLeaderSchedule[] = "rapid-leader-schedule"; const char kSetScanState[] = "set-scan-state"; -// curve文件meta相关的命令 +// Meta related commands for curve files const char kChunkMeta[] = "chunk-meta"; const char kSnapshotMeta[] = "snapshot-meta"; -// raft log相关命令 +// raft log related commands const char kRaftLogMeta[] = "raft-log-meta"; const char kOffline[] = "offline"; const char kVars[] = "/vars/"; const char kConfValue[] = "conf_value"; -// raft state 相关常量 +// raft state related constants const char kState[] = "state"; const char kStateLeader[] = "LEADER"; const char kStateFollower[] = "FOLLOWER"; diff --git a/src/tools/curve_tool_factory.h b/src/tools/curve_tool_factory.h index dc48778713..a863bce5fb 100644 --- a/src/tools/curve_tool_factory.h +++ b/src/tools/curve_tool_factory.h @@ -23,18 +23,18 @@ #ifndef SRC_TOOLS_CURVE_TOOL_FACTORY_H_ #define SRC_TOOLS_CURVE_TOOL_FACTORY_H_ -#include #include #include +#include -#include "src/tools/curve_tool.h" -#include "src/tools/status_tool.h" -#include "src/tools/namespace_tool.h" #include "src/tools/consistency_check.h" -#include "src/tools/curve_cli.h" #include "src/tools/copyset_check.h" -#include "src/tools/schedule_tool.h" #include "src/tools/copyset_tool.h" +#include "src/tools/curve_cli.h" +#include "src/tools/curve_tool.h" +#include "src/tools/namespace_tool.h" +#include "src/tools/schedule_tool.h" +#include "src/tools/status_tool.h" namespace curve { namespace tool { @@ -42,41 +42,41 @@ namespace tool { class CurveToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateCurveTool( - const std::string& command); + const std::string& command); private: /** - * @brief 获取StatusTool实例 + * @brief Get StatusTool instance */ static std::shared_ptr GenerateStatusTool(); /** - * @brief 获取NameSpaceTool实例 + * @brief Get NameSpaceTool instance */ static std::shared_ptr GenerateNameSpaceTool(); /** - * @brief 获取ConsistencyCheck实例 + * @brief Get ConsistencyCheck instance */ static std::shared_ptr GenerateConsistencyCheck(); /** - * @brief 获取CurveCli实例 + * @brief Get CurveCli instance */ static std::shared_ptr GenerateCurveCli(); /** - * @brief 获取CopysetCheck实例 + * @brief Get CopysetCheck instance */ static std::shared_ptr GenerateCopysetCheck(); /** - * @brief 获取ScheduleTool实例 + * @brief to obtain a ScheduleTool instance */ static std::shared_ptr GenerateScheduleTool(); diff --git a/src/tools/curve_tool_main.cpp b/src/tools/curve_tool_main.cpp index 8e516dc0e7..5f57f718c1 100644 --- a/src/tools/curve_tool_main.cpp +++ b/src/tools/curve_tool_main.cpp @@ -21,12 +21,16 @@ */ #include + #include "src/common/curve_version.h" #include "src/tools/curve_tool_factory.h" -static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" +static const char* + kHelpStr = + "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "COMMANDS:\n" - "space : show curve all disk type space, include total space and used space\n" //NOLINT + "space : show curve all disk type space, include total space and used " + "space\n" // NOLINT "status : show the total status of the cluster\n" "chunkserver-status : show the chunkserver online status\n" "mds-status : show the mds status\n" @@ -35,22 +39,26 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "etcd-status : show the etcd status\n" "snapshot-clone-status : show the snapshot clone server status\n" "copysets-status : check the health state of all copysets\n" - "chunkserver-list : show curve chunkserver-list, list all chunkserver information\n" //NOLINT + "chunkserver-list : show curve chunkserver-list, list all chunkserver " + "information\n" // NOLINT "server-list : list all server information\n" "logical-pool-list : list all logical pool information\n" "cluster-status : show cluster status\n" "get : show the file info and the actual space of file\n" "list : list the file info of files in the directory\n" "seginfo : list the segments info of the file\n" - "delete : delete the file, to force delete, should specify the --forcedelete=true\n" //NOLINT + "delete : delete the file, to force delete, should specify the " + "--forcedelete=true\n" // NOLINT "clean-recycle : clean the RecycleBin\n" "create : create file, file length unit is GB\n" "extend : extend volume of file\n" - "chunk-location : query the location of the chunk corresponding to the offset\n" //NOLINT + "chunk-location : query the location of the chunk corresponding to the " + "offset\n" // NOLINT "check-consistency : check the consistency of three copies\n" "remove-peer : remove the peer from the copyset\n" - "transfer-leader : transfer the leader of the copyset to the peer\n" //NOLINT - "reset-peer : reset the configuration of copyset, only reset to one peer is supported\n" //NOLINT + "transfer-leader : transfer the leader of the copyset to the peer\n" // NOLINT + "reset-peer : reset the configuration of copyset, only reset to one " + "peer is supported\n" // NOLINT "do-snapshot : do snapshot of the peer of the copyset\n" "do-snapshot-all : do snapshot of all peers of all copysets\n" "check-chunkserver : check the health state of the chunkserver\n" @@ -60,11 +68,13 @@ static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "list-may-broken-vol: list all volumes on majority offline copysets\n" "set-copyset-availflag: set copysets available flags\n" "update-throttle: update file throttle params\n" - "rapid-leader-schedule: rapid leader schedule in cluster in logicalpool\n" //NOLINT + "rapid-leader-schedule: rapid leader schedule in cluster in " + "logicalpool\n" // NOLINT "set-scan-state: set scan state for specify logical pool\n" "scan-status: show scan status\n" "list-poolsets: list all poolsets in cluster\n\n" - "You can specify the config path by -confPath to avoid typing too many options\n"; //NOLINT + "You can specify the config path by -confPath to avoid typing too many " + "options\n"; // NOLINT DEFINE_bool(example, false, "print the example of usage"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); @@ -80,8 +90,10 @@ extern std::string rootUserPassword; } // namespace curve void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mdsAddr", &info) && info.is_default) { conf->GetStringValue("mdsAddr", &FLAGS_mdsAddr); @@ -98,27 +110,23 @@ void UpdateFlagsFromConf(curve::common::Configuration* conf) { if (GetCommandLineFlagInfo("rpcRetryTimes", &info) && info.is_default) { conf->GetUInt64Value("rpcRetryTimes", &FLAGS_rpcRetryTimes); } - if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("rpcConcurrentNum", &info) && info.is_default) { conf->GetUInt64Value("rpcConcurrentNum", &FLAGS_rpcConcurrentNum); } - if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("snapshotCloneAddr", &info) && info.is_default) { conf->GetStringValue("snapshotCloneAddr", &FLAGS_snapshotCloneAddr); } if (GetCommandLineFlagInfo("snapshotCloneDummyPort", &info) && - info.is_default) { + info.is_default) { conf->GetStringValue("snapshotCloneDummyPort", - &FLAGS_snapshotCloneDummyPort); + &FLAGS_snapshotCloneDummyPort); } - if (GetCommandLineFlagInfo("userName", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("userName", &info) && info.is_default) { conf->GetStringValue("rootUserName", &FLAGS_userName); } - if (GetCommandLineFlagInfo("password", &info) && - info.is_default) { + if (GetCommandLineFlagInfo("password", &info) && info.is_default) { conf->GetStringValue("rootUserPassword", &FLAGS_password); } } @@ -168,7 +176,8 @@ int main(int argc, char** argv) { UpdateFlagsFromConf(&conf); - // 关掉健康检查,否则Not Connect to的时候重试没有意义 + // Turn off the health check, otherwise trying again when Not Connect to is + // meaningless brpc::FLAGS_health_check_interval = -1; auto curveTool = curve::tool::CurveToolFactory::GenerateCurveTool(command); if (!curveTool) { diff --git a/src/tools/etcd_client.h b/src/tools/etcd_client.h index b7d8f56964..5392a1c6b3 100644 --- a/src/tools/etcd_client.h +++ b/src/tools/etcd_client.h @@ -27,9 +27,9 @@ #include #include +#include #include #include -#include #include "src/common/string_util.h" #include "src/tools/version_tool.h" @@ -49,26 +49,29 @@ class EtcdClient { virtual ~EtcdClient() = default; /** - * @brief 初始化etcdAddrVec - * @param etcdAddr etcd的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize etcdAddrVec + * @param etcdAddr etcd addresses, supporting multiple addresses separated + * by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& etcdAddr); /** - * @brief 获取etcd集群的leader - * @param[out] leaderAddrVec etcd的leader的地址列表,返回值为0时有效 - * @param[out] onlineState etcd集群中每个节点的在线状态,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the leader of the ETCD cluster + * @param[out] leaderAddrVec The address list of the leader for etcd, valid + * when the return value is 0 + * @param[out] onlineState etcd The online state of each node in the + * cluster, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetEtcdClusterStatus(std::vector* leaderAddrVec, - std::map* onlineState); + std::map* onlineState); /** - * @brief 获取etcd的版本并检查版本一致性 - * @param[out] version 版本 - * @param[out] failedList 查询version失败的地址列表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of ETCD and check version consistency + * @param[out] version Version + * @param[out] failedList Query address list for version failure + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckEtcdVersion(std::string* version, std::vector* failedList); diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 4db8bb81f0..50c0eb448b 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -34,12 +34,11 @@ int MDSClient::Init(const std::string& mdsAddr) { return Init(mdsAddr, std::to_string(kDefaultMdsDummyPort)); } -int MDSClient::Init(const std::string& mdsAddr, - const std::string& dummyPort) { +int MDSClient::Init(const std::string& mdsAddr, const std::string& dummyPort) { if (isInited_) { return 0; } - // 初始化channel + // Initialize channel curve::common::SplitString(mdsAddr, ",", &mdsAddrVec_); if (mdsAddrVec_.empty()) { std::cout << "Split mds address fail!" << std::endl; @@ -57,7 +56,7 @@ int MDSClient::Init(const std::string& mdsAddr, std::cout << "Init channel to " << mdsAddr << "fail!" << std::endl; continue; } - // 寻找哪个mds存活 + // Looking for which mds survived curve::mds::topology::ListPhysicalPoolRequest request; curve::mds::topology::ListPhysicalPoolResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -83,7 +82,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < mdsAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -92,7 +91,8 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != mdsAddrVec_.size()) { std::cout << "mds dummy port list must be correspond as" - " mds addr list" << std::endl; + " mds addr list" + << std::endl; return -1; } @@ -109,8 +109,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { return 0; } -int MDSClient::GetFileInfo(const std::string &fileName, - FileInfo* fileInfo) { +int MDSClient::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { assert(fileInfo != nullptr); curve::mds::GetFileInfoRequest request; curve::mds::GetFileInfoResponse response; @@ -123,13 +122,12 @@ int MDSClient::GetFileInfo(const std::string &fileName, std::cout << "GetFileInfo info from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { fileInfo->CopyFrom(response.fileinfo()); return 0; } - std::cout << "GetFileInfo fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetFileInfo fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -150,19 +148,18 @@ int MDSClient::GetAllocatedSize(const std::string& fileName, *allocSize = response.allocatedsize(); if (allocMap) { for (auto it = response.allocsizemap().begin(); - it != response.allocsizemap().end(); ++it) { + it != response.allocsizemap().end(); ++it) { allocMap->emplace(it->first, it->second); } } return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetFileSize(const std::string& fileName, - uint64_t* fileSize) { +int MDSClient::GetFileSize(const std::string& fileName, uint64_t* fileSize) { assert(fileSize != nullptr); curve::mds::GetFileSizeRequest request; curve::mds::GetFileSizeResponse response; @@ -178,8 +175,8 @@ int MDSClient::GetFileSize(const std::string& fileName, *fileSize = response.filesize(); return 0; } - std::cout << "GetAllocatedSize fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetAllocatedSize fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -200,8 +197,7 @@ int MDSClient::ListDir(const std::string& dirName, std::cout << "ListDir from all mds fail!" << std::endl; return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.fileinfo_size(); ++i) { files->emplace_back(response.fileinfo(i)); } @@ -213,8 +209,8 @@ int MDSClient::ListDir(const std::string& dirName, } GetSegmentRes MDSClient::GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment) { + uint64_t offset, + PageFileSegment* segment) { if (!segment) { std::cout << "The argument is a null pointer!" << std::endl; return GetSegmentRes::kOtherError; @@ -260,13 +256,13 @@ int MDSClient::DeleteFile(const std::string& fileName, bool forcedelete) { } if (response.has_statuscode() && - (response.statuscode() == StatusCode::kOK || - response.statuscode() == StatusCode::kFileNotExists || - response.statuscode() == StatusCode::kFileUnderDeleting)) { + (response.statuscode() == StatusCode::kOK || + response.statuscode() == StatusCode::kFileNotExists || + response.statuscode() == StatusCode::kFileUnderDeleting)) { return 0; } - std::cout << "DeleteFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "DeleteFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -294,8 +290,7 @@ int MDSClient::CreateFile(const CreateFileContext& context) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { return 0; } std::cout << "CreateFile fail with errCode: " @@ -316,19 +311,18 @@ int MDSClient::ExtendVolume(const std::string& fileName, uint64_t newSize) { return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { - std::cout << "extendFile success!" << std::endl; + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { + std::cout << "extendFile success!" << std::endl; return 0; } - std::cout << "extendFile fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "extendFile fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames) { + const std::vector& copysets, + std::vector* fileNames) { curve::mds::ListVolumesOnCopysetsRequest request; curve::mds::ListVolumesOnCopysetsResponse response; for (const auto& copyset : copysets) { @@ -343,8 +337,7 @@ int MDSClient::ListVolumesOnCopyset( return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.filenames_size(); ++i) { fileNames->emplace_back(response.filenames(i)); } @@ -373,31 +366,30 @@ int MDSClient::ListClient(std::vector* clientAddrs, return -1; } - if (response.has_statuscode() && - response.statuscode() == StatusCode::kOK) { + if (response.has_statuscode() && response.statuscode() == StatusCode::kOK) { for (int i = 0; i < response.clientinfos_size(); ++i) { const auto& clientInfo = response.clientinfos(i); - std::string clientAddr = clientInfo.ip() + ":" + - std::to_string(clientInfo.port()); + std::string clientAddr = + clientInfo.ip() + ":" + std::to_string(clientInfo.port()); clientAddrs->emplace_back(clientAddr); } return 0; } - std::cout << "ListClient fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListClient fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { +int MDSClient::GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { if (!csLocs) { std::cout << "The argument is a null pointer!" << std::endl; return -1; } std::vector csServerInfos; - int res = GetChunkServerListInCopySets(logicalPoolId, - {copysetId}, &csServerInfos); + int res = GetChunkServerListInCopySets(logicalPoolId, {copysetId}, + &csServerInfos); if (res != 0) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return -1; @@ -409,9 +401,10 @@ int MDSClient::GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, return 0; } -int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos) { +int MDSClient::GetChunkServerListInCopySets( + const PoolIdType& logicalPoolId, + const std::vector& copysetIds, + std::vector* csServerInfos) { if (!csServerInfos) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -424,7 +417,8 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServerListInCopySets; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetChunkServerListInCopySets; // NOLINT if (SendRpcToMds(&request, &response, &stub, fp) != 0) { std::cout << "GetChunkServerListInCopySets from all mds fail!" << std::endl; @@ -432,7 +426,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.csinfo_size(); ++i) { csServerInfos->emplace_back(response.csinfo(i)); } @@ -444,7 +438,7 @@ int MDSClient::GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, } int MDSClient::ListPhysicalPoolsInCluster( - std::vector* pools) { + std::vector* pools) { if (!pools) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -455,20 +449,19 @@ int MDSClient::ListPhysicalPoolsInCluster( auto fp = &curve::mds::topology::TopologyService_Stub::ListPhysicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPhysicalPool from all mds fail!" - << std::endl; + std::cout << "ListPhysicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.physicalpoolinfos_size(); ++i) { pools->emplace_back(response.physicalpoolinfos(i)); } return 0; } - std::cout << "ListPhysicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPhysicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -481,8 +474,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { } for (const auto& phyPool : phyPools) { std::vector lgPools; - ret = ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), - &lgPools); + ret = + ListLogicalPoolsInPhysicalPool(phyPool.physicalpoolid(), &lgPools); if (ret != 0) { std::cout << "ListLogicalPoolsInPhysicalPool " << phyPool.physicalpoolid() << " fail" << std::endl; @@ -493,8 +486,8 @@ int MDSClient::ListLogicalPoolsInCluster(std::vector* pools) { return 0; } -int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools) { +int MDSClient::ListLogicalPoolsInPhysicalPool( + const PoolIdType& id, std::vector* pools) { assert(pools != nullptr); curve::mds::topology::ListLogicalPoolRequest request; curve::mds::topology::ListLogicalPoolResponse response; @@ -503,20 +496,19 @@ int MDSClient::ListLogicalPoolsInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListLogicalPool; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListLogicalPool from all mds fail!" - << std::endl; + std::cout << "ListLogicalPool from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.logicalpoolinfos_size(); ++i) { pools->emplace_back(response.logicalpoolinfos(i)); } return 0; } - std::cout << "ListLogicalPool fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListLogicalPool fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -530,20 +522,19 @@ int MDSClient::ListZoneInPhysicalPool(const PoolIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListPoolZone; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListPoolZone from all mds fail!" - << std::endl; + std::cout << "ListPoolZone from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.zones_size(); ++i) { zones->emplace_back(response.zones(i)); } return 0; } - std::cout << "ListPoolZone fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListPoolZone fail with errCode: " << response.statuscode() + << std::endl; return -1; } @@ -557,55 +548,54 @@ int MDSClient::ListServersInZone(const ZoneIdType& id, auto fp = &curve::mds::topology::TopologyService_Stub::ListZoneServer; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListZoneServer from all mds fail!" - << std::endl; + std::cout << "ListZoneServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.serverinfo_size(); ++i) { servers->emplace_back(response.serverinfo(i)); } return 0; } - std::cout << "ListZoneServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "ListZoneServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } -int MDSClient::ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const ServerIdType& id, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_serverid(id); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + const std::string& ip, std::vector* chunkservers) { assert(chunkservers != nullptr); curve::mds::topology::ListChunkServerRequest request; request.set_ip(ip); return ListChunkServersOnServer(&request, chunkservers); } -int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, - std::vector* chunkservers) { +int MDSClient::ListChunkServersOnServer( + ListChunkServerRequest* request, + std::vector* chunkservers) { curve::mds::topology::ListChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "ListChunkServer from all mds fail!" - << std::endl; + std::cout << "ListChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.chunkserverinfos_size(); ++i) { const auto& chunkserver = response.chunkserverinfos(i); - // 跳过retired状态的chunkserver + // Skipping chunkserver in Retired State if (chunkserver.status() == ChunkServerStatus::RETIRED) { continue; } @@ -613,9 +603,9 @@ int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, } return 0; } - std::cout << "ListChunkServer fail with errCode: " - << response.statuscode() << std::endl; - return -1; + std::cout << "ListChunkServer fail with errCode: " << response.statuscode() + << std::endl; + return -1; } int MDSClient::GetChunkServerInfo(const ChunkServerIdType& id, @@ -653,23 +643,22 @@ int MDSClient::GetChunkServerInfo(GetChunkServerInfoRequest* request, auto fp = &curve::mds::topology::TopologyService_Stub::GetChunkServer; if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetChunkServer from all mds fail!" - << std::endl; + std::cout << "GetChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { chunkserver->CopyFrom(response.chunkserverinfo()); return 0; } - std::cout << "GetChunkServer fail with errCode: " - << response.statuscode() << std::endl; + std::cout << "GetChunkServer fail with errCode: " << response.statuscode() + << std::endl; return -1; } int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -678,7 +667,7 @@ int MDSClient::GetCopySetsInChunkServer(const ChunkServerIdType& id, } int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets) { + std::vector* copysets) { assert(copysets != nullptr); curve::mds::topology::GetCopySetsInChunkServerRequest request; curve::mds::topology::GetCopySetsInChunkServerResponse response; @@ -697,7 +686,7 @@ int MDSClient::GetCopySetsInChunkServer(const std::string& csAddr, } int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, - bool availFlag) { + bool availFlag) { curve::mds::topology::SetCopysetsAvailFlagRequest request; curve::mds::topology::SetCopysetsAvailFlagResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -708,13 +697,12 @@ int MDSClient::SetCopysetsAvailFlag(const std::vector copysets, request.set_availflag(availFlag); auto fp = &curve::mds::topology::TopologyService_Stub::SetCopysetsAvailFlag; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "SetCopysetsAvailFlag from all mds fail!" - << std::endl; + std::cout << "SetCopysetsAvailFlag from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { return 0; } std::cout << "SetCopysetsAvailFlag fail with errCode: " @@ -728,13 +716,12 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { curve::mds::topology::TopologyService_Stub stub(&channel_); auto fp = &curve::mds::topology::TopologyService_Stub::ListUnAvailCopySets; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "ListUnAvailCopySets from all mds fail!" - << std::endl; + std::cout << "ListUnAvailCopySets from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { + response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.copysets_size(); ++i) { copysets->emplace_back(response.copysets(i)); } @@ -746,21 +733,21 @@ int MDSClient::ListUnAvailCopySets(std::vector* copysets) { } int MDSClient::GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets) { + GetCopySetsInChunkServerRequest* request, + std::vector* copysets) { curve::mds::topology::GetCopySetsInChunkServerResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); - auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInChunkServer; // NOLINT + auto fp = &curve::mds::topology::TopologyService_Stub:: + GetCopySetsInChunkServer; // NOLINT if (SendRpcToMds(request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInChunkServer from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInChunkServer from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -783,14 +770,13 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, auto fp = &curve::mds::topology::TopologyService_Stub::GetCopySetsInCluster; if (SendRpcToMds(&request, &response, &stub, fp) != 0) { - std::cout << "GetCopySetsInCluster from all mds fail!" - << std::endl; + std::cout << "GetCopySetsInCluster from all mds fail!" << std::endl; return -1; } if (response.has_statuscode() && - response.statuscode() == kTopoErrCodeSuccess) { - for (int i =0; i < response.copysetinfos_size(); ++i) { + response.statuscode() == kTopoErrCodeSuccess) { + for (int i = 0; i < response.copysetinfos_size(); ++i) { copysets->emplace_back(response.copysetinfos(i)); } return 0; @@ -800,9 +786,7 @@ int MDSClient::GetCopySetsInCluster(std::vector* copysets, return -1; } - -int MDSClient::GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, +int MDSClient::GetCopyset(PoolIdType lpid, CopySetIdType copysetId, CopysetInfo* copysetInfo) { curve::mds::topology::GetCopysetRequest request; curve::mds::topology::GetCopysetResponse response; @@ -843,8 +827,8 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } for (const auto& zone : zones) { if (ListServersInZone(zone.zoneid(), servers) != 0) { - std::cout << "ListServersInZone fail, zoneId :" - << zone.zoneid() << std::endl; + std::cout << "ListServersInZone fail, zoneId :" << zone.zoneid() + << std::endl; return -1; } } @@ -853,7 +837,7 @@ int MDSClient::ListServersInCluster(std::vector* servers) { } int MDSClient::ListChunkServersInCluster( - std::vector* chunkservers) { + std::vector* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -869,8 +853,8 @@ int MDSClient::ListChunkServersInCluster( return 0; } -int MDSClient::ListChunkServersInCluster(std::map>* chunkservers) { +int MDSClient::ListChunkServersInCluster( + std::map>* chunkservers) { assert(chunkservers != nullptr); std::vector servers; if (ListServersInCluster(&servers) != 0) { @@ -880,8 +864,8 @@ int MDSClient::ListChunkServersInCluster(std::map chunkserverList; - if (ListChunkServersOnServer(server.serverid(), - &chunkserverList) != 0) { + if (ListChunkServersOnServer(server.serverid(), &chunkserverList) != + 0) { std::cout << "ListChunkServersOnServer fail!" << std::endl; return -1; } @@ -889,7 +873,7 @@ int MDSClient::ListChunkServersInCluster(std::mapfind(server.physicalpoolid()); if (iter != chunkservers->end()) { iter->second.insert(iter->second.end(), chunkserverList.begin(), - chunkserverList.end()); + chunkserverList.end()); } else { chunkservers->emplace(server.physicalpoolid(), chunkserverList); } @@ -900,8 +884,8 @@ int MDSClient::ListChunkServersInCluster(std::map* onlineStatus) { assert(onlineStatus != nullptr); onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -943,7 +928,7 @@ int MDSClient::GetMetric(const std::string& metricName, std::string* value) { while (changeTimeLeft >= 0) { brpc::Controller cntl; MetricRet res = metricClient_.GetMetric(mdsAddrVec_[currentMdsIndex_], - metricName, value); + metricName, value); if (res == MetricRet::kOK) { return 0; } @@ -962,8 +947,7 @@ bool MDSClient::ChangeMDServer() { if (currentMdsIndex_ > static_cast(mdsAddrVec_.size() - 1)) { currentMdsIndex_ = 0; } - if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), - nullptr) != 0) { + if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), nullptr) != 0) { return false; } return true; @@ -971,14 +955,14 @@ bool MDSClient::ChangeMDServer() { std::vector MDSClient::GetCurrentMds() { std::vector leaderAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_.GetMetric(item.second, - kMdsStatusMetricName, &status); + MetricRet ret = + metricClient_.GetMetric(item.second, kMdsStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kMdsStatusLeader) { @@ -995,7 +979,8 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { request.set_logicalpoolid(lpoolId); - auto fp = &::curve::mds::schedule::ScheduleService_Stub::RapidLeaderSchedule; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + RapidLeaderSchedule; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "RapidLeaderSchedule fail" << std::endl; return -1; @@ -1006,7 +991,7 @@ int MDSClient::RapidLeaderSchedule(PoolIdType lpoolId) { return 0; } std::cout << "RapidLeaderSchedule fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1027,8 +1012,8 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { auto retCode = response.statuscode(); if (retCode != ::curve::mds::topology::kTopoErrCodeSuccess) { - std::cout << "SetLogicalPoolScanState fail with retCode: " - << retCode << std::endl; + std::cout << "SetLogicalPoolScanState fail with retCode: " << retCode + << std::endl; return -1; } @@ -1037,7 +1022,7 @@ int MDSClient::SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable) { int MDSClient::QueryChunkServerRecoverStatus( const std::vector& cs, - std::map *statusMap) { + std::map* statusMap) { assert(statusMap != nullptr); ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest request; ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse response; @@ -1047,7 +1032,8 @@ int MDSClient::QueryChunkServerRecoverStatus( request.add_chunkserverid(id); } - auto fp = &::curve::mds::schedule::ScheduleService_Stub::QueryChunkServerRecoverStatus; // NOLINT + auto fp = &::curve::mds::schedule::ScheduleService_Stub:: + QueryChunkServerRecoverStatus; // NOLINT if (0 != SendRpcToMds(&request, &response, &stub, fp)) { std::cout << "QueryChunkServerRecoverStatus fail" << std::endl; return -1; @@ -1056,13 +1042,13 @@ int MDSClient::QueryChunkServerRecoverStatus( if (response.statuscode() == ::curve::mds::schedule::kScheduleErrCodeSuccess) { for (auto it = response.recoverstatusmap().begin(); - it != response.recoverstatusmap().end(); ++it) { + it != response.recoverstatusmap().end(); ++it) { (*statusMap)[it->first] = it->second; } return 0; } std::cout << "QueryChunkServerRecoverStatus fail with errCode: " - << response.statuscode() << std::endl; + << response.statuscode() << std::endl; return -1; } @@ -1095,21 +1081,22 @@ int MDSClient::UpdateFileThrottleParams( template int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)) { + void (T::*func)(google::protobuf::RpcController*, + const Request*, Response*, + google::protobuf::Closure*)) { int changeTimeLeft = mdsAddrVec_.size() - 1; while (changeTimeLeft >= 0) { brpc::Controller cntl; cntl.set_timeout_ms(FLAGS_rpcTimeout); (obp->*func)(&cntl, request, response, nullptr); if (!cntl.Failed()) { - // 如果成功了,就返回0,对response的判断放到上一层 + // If successful, return 0 and place the response judgment on the + // previous level return 0; } - bool needRetry = (cntl.ErrorCode() != EHOSTDOWN && - cntl.ErrorCode() != ETIMEDOUT && - cntl.ErrorCode() != brpc::ELOGOFF); + bool needRetry = + (cntl.ErrorCode() != EHOSTDOWN && cntl.ErrorCode() != ETIMEDOUT && + cntl.ErrorCode() != brpc::ELOGOFF); uint64_t retryTimes = 0; while (needRetry && retryTimes < FLAGS_rpcRetryTimes) { cntl.Reset(); @@ -1120,10 +1107,13 @@ int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, } return 0; } - // 对于需要重试的错误,重试次数用完了还没成功就返回错误不切换 - // ERPCTIMEDOUT比较特殊,这种情况下,mds可能切换了也可能没切换,所以 - // 需要重试并且重试次数用完后切换 - // 只有不需要重试的,也就是mds不在线的才会去切换mds + // For errors that require retries, if the retry limit is exhausted + // without success, return an error without switching. However, for + // ERPCTIMEDOUT, which is a special case, the MDS may have switched or + // may not have switched, so it needs to be retried, and if the retry + // limit is exhausted, then switch. Only for errors that do not require + // retries, meaning when the MDS is not online, will the MDS be + // switched. if (needRetry && cntl.ErrorCode() != brpc::ERPCTIMEDOUT) { std::cout << "Send RPC to mds fail, error content: " << cntl.ErrorText() << std::endl; diff --git a/src/tools/mds_client.h b/src/tools/mds_client.h index fbbc94ffab..dc14bd84bc 100644 --- a/src/tools/mds_client.h +++ b/src/tools/mds_client.h @@ -72,504 +72,532 @@ using curve::mds::topology::ServerInfo; using curve::mds::topology::ZoneIdType; using curve::mds::topology::ZoneInfo; +using curve::common::Authenticator; using curve::mds::schedule::RapidLeaderScheduleRequst; using curve::mds::schedule::RapidLeaderScheduleResponse; -using curve::common::Authenticator; -namespace curve { -namespace tool { - -using curve::mds::topology::PoolsetInfo; - -enum class GetSegmentRes { - kOK = 0, // 获取segment成功 - kSegmentNotAllocated = -1, // segment不存在 - kFileNotExists = -2, // 文件不存在 - kOtherError = -3 // 其他错误 -}; - -using AllocMap = std::unordered_map; - -struct CreateFileContext { - curve::mds::FileType type; - std::string name; - uint64_t length; - uint64_t stripeUnit; - uint64_t stripeCount; - std::string poolset; -}; - -class MDSClient { - public: - MDSClient() : currentMdsIndex_(0), userName_(""), - password_(""), isInited_(false) {} - virtual ~MDSClient() = default; - - /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 - */ - virtual int Init(const std::string& mdsAddr); - - /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有mds用同样的dummy port,用字符串分隔有多个的话 - * 为每个mds设置不同的dummy port - * @return 成功返回0,失败返回-1 - */ - virtual int Init(const std::string& mdsAddr, - const std::string& dummyPort); - - /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); - - /** - * @brief 获取文件或目录分配大小 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录分配大小,返回值为0时有效 - * @param[out] allocMap 文件在各个池子分配的情况 - * @return 成功返回0,失败返回-1 - */ - virtual int GetAllocatedSize(const std::string& fileName, - uint64_t* allocSize, - AllocMap* allocMap = nullptr); - - /** - * @brief 获取文件或目录的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录分配大小,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetFileSize(const std::string& fileName, - uint64_t* fileSize); - - /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListDir(const std::string& dirName, - std::vector* files); - - /** - * @brief 获取指定偏移的segment放到segment里面 - * @param fileName 文件名 - * @param offset 偏移值 - * @param[out] segment 文件中指定偏移的segmentInfo,返回值为0时有效 - * @return 返回GetSegmentRes,区分segment未分配和其他错误 - */ - virtual GetSegmentRes GetSegmentInfo(const std::string& fileName, - uint64_t offset, - PageFileSegment* segment); - - /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 - */ - virtual int DeleteFile(const std::string& fileName, - bool forcedelete = false); - - /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 - */ - virtual int CreateFile(const CreateFileContext& context); - - /** - * @brief List all volumes on copysets - * @param copysets - * @param[out] fileNames volumes name - * @return return 0 when success, -1 when fail - */ - virtual int ListVolumesOnCopyset( - const std::vector& copysets, - std::vector* fileNames); - - /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的卷大小 - * @return 成功返回0,失败返回-1 - */ - virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); - - /** - * @brief 列出client的dummyserver的地址 - * @param[out] clientAddrs client地址列表,返回0时有效 - * @param[out] listClientsInRepo 把数据库里的client也列出来 - * @return 成功返回0,失败返回-1 - */ - virtual int ListClient(std::vector* clientAddrs, - bool listClientsInRepo = false); - - /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); - - /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetIds 要查询的copysetId的列表 - * @param[out] csServerInfos copyset成员的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, - const std::vector& copysetIds, - std::vector* csServerInfos); - - /** - * @brief 获取集群中的物理池列表 - * @param[out] pools 物理池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListPhysicalPoolsInCluster( - std::vector* pools); - - - /** - * @brief 获取物理池中的逻辑池列表 - * @param id 物理池id - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListLogicalPoolsInPhysicalPool(const PoolIdType& id, - std::vector* pools); - - /** - * @brief 集群中的逻辑池列表 - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListLogicalPoolsInCluster(std::vector* pools); - - /** - * @brief 获取物理池中的zone列表 - * @param id 物理池id - * @param[out] zones zone信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListZoneInPhysicalPool(const PoolIdType& id, - std::vector* zones); - - /** - * @brief 获取zone中的server列表 - * @param id zone id - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListServersInZone(const ZoneIdType& id, - std::vector* servers); - - /** - * @brief 获取server上的chunkserver的列表 - * @param id server id - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersOnServer(const ServerIdType& id, - std::vector* chunkservers); - - /** - * @brief 获取server上的chunkserver的列表 - * @param ip server ip - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersOnServer(const std::string& ip, - std::vector* chunkservers); - - /** - * @brief 获取chunkserver的详细信息 - * @param id chunkserver id - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerInfo(const ChunkServerIdType& id, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver的详细信息 - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetChunkServerInfo(const std::string& csAddr, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver上的所有copyset - * @param id chunkserver的id - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetCopySetsInChunkServer(const ChunkServerIdType& id, - std::vector* copysets); - - /** - * @brief 获取chunkserver上的所有copyset - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetCopySetsInChunkServer(const std::string& csAddr, - std::vector* copysets); - - /** - * @brief Get all copysets in cluster - * @param[out] the copyset list - * @param[in] filterScaning whether need to filter copyset which in scaning - * @return 0 if success, else return -1 - */ - virtual int GetCopySetsInCluster(std::vector* copysetInfos, - bool filterScaning = false); - - /** - * @brief Get specify copyset - * @param[in] lpid logical pool id - * @param[in] copysetId copyset id - * @param[out] copysetInfo the copyset - * @return 0 if success, else return -1 - */ - virtual int GetCopyset(PoolIdType lpid, - CopySetIdType copysetId, - CopysetInfo* copysetInfo); - - /** - * @brief 列出集群中的所有server - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListServersInCluster(std::vector* servers); - - /** - * @brief 列出集群中的所有chunkserver - * @param[out] chunkservers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int ListChunkServersInCluster( - std::vector* chunkservers); - - /** - * @brief list all the chunkservers with poolid in cluster - * @param[out] chunkservers chunkserver info - * @return succeed return 0; failed return -1; - */ - virtual int ListChunkServersInCluster(std::map>* chunkservers); - - /** - * @brief set copysets available flag - * @param copysets copysets going to be set available flag - * @param availFlag availble or not - * @return succeed return 0; failed return -1; - */ - virtual int SetCopysetsAvailFlag(const std::vector copysets, - bool availFlag); - - /** - * @brief list all copysets that are unavailable - * @param[out] copysets copysets that are not availble currently - * @return succeed return 0; failed return -1; - */ - virtual int ListUnAvailCopySets(std::vector* copysets); - - /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名字 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetMetric(const std::string& metricName, uint64_t* value); - - /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名子 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual int GetMetric(const std::string& metricName, std::string* value); - - /** - * @brief 设置userName,访问namespace接口的时候调用 - * @param userName 用户名 - */ - void SetUserName(const std::string& userName) { - userName_ = userName; - } - - /** - * @brief 设置password,访问namespace接口的时候调用 - * @param password 密码 - */ - void SetPassword(const std::string& password) { - password_ = password; - } - - /** - * @brief 获取mds地址列表 - * @return mds地址的列表 - */ - virtual const std::vector& GetMdsAddrVec() const { - return mdsAddrVec_; - } - - virtual const std::map& GetDummyServerMap() - const { - return dummyServerMap_; - } - - /** - * @brief 获取当前mds的地址 - */ - virtual std::vector GetCurrentMds(); - - /** - * @brief 向mds发送rpc触发快速leader均衡 - */ - virtual int RapidLeaderSchedule(PoolIdType lpid); - - /** - * @brief Set specify logical pool to enable/disable scan - * @param[in] lpid logical pool id - * @param[in] scanEnable enable(true)/disable(false) scan - * @return 0 if set success, else return -1 - */ - virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); - - /** - * @brief 获取mds在线状态, - * dummyserver在线且dummyserver记录的listen addr - * 与mds地址一致才认为在线 - * @param[out] onlineStatus mds在线状态,返回0时有效 - * @return 成功返回0,失败返回-1 - */ - virtual void GetMdsOnlineStatus(std::map* onlineStatus); - - /** - * @brief 获取指定chunkserver的恢复状态 - * @param[in] cs 需要查询的chunkserver列表 - * @param[out] statusMap 返回各chunkserver对应的恢复状态 - * @return 成功返回0,失败返回-1 - */ - int QueryChunkServerRecoverStatus( - const std::vector& cs, - std::map *statusMap); - - virtual int UpdateFileThrottleParams( - const std::string& fileName, const curve::mds::ThrottleParams& params); - - int ListPoolset(std::vector* poolsets); - - int ListChunkFormatStatus(std::vector* formatStatuses); - - private: - /** - * @brief 切换mds - * @return 切换成功返回true,所有mds都失败则返回false - */ - bool ChangeMDServer(); - - /** - * @brief 向mds发送RPC,为了复用代码 - * @param - * @return 成功返回0,失败返回-1 - */ - template - int SendRpcToMds(Request* request, Response* response, T* obp, - void (T::*func)(google::protobuf::RpcController*, - const Request*, Response*, - google::protobuf::Closure*)); - - /** - * @brief 获取server上的chunkserver的列表 - * @param request 要发送的request - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int ListChunkServersOnServer(ListChunkServerRequest* request, - std::vector* chunkservers); - - /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int GetChunkServerInfo(GetChunkServerInfoRequest* request, - ChunkServerInfo* chunkserver); - - /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ - int GetCopySetsInChunkServer( - GetCopySetsInChunkServerRequest* request, - std::vector* copysets); - - /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 - */ - int InitDummyServerMap(const std::string& dummyPort); - - /** - * @brief 通过dummyServer获取mds的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr mds的监听地址 - * @return 成功返回0,失败返回-1 - */ - int GetListenAddrFromDummyPort(const std::string& dummyAddr, - std::string* listenAddr); - - - // 填充signature - template - void FillUserInfo(T* request); - - // 用于发送http请求的client - MetricClient metricClient_; - // 向mds发送RPC的channel - brpc::Channel channel_; - // 保存mds地址的vector - std::vector mdsAddrVec_; - // 保存mds地址对应的dummy server的地址 - std::map dummyServerMap_; - // 保存当前mds在mdsAddrVec_中的索引 - int currentMdsIndex_; - // 用户名 - std::string userName_; - // 密码 - std::string password_; - // 避免重复初始化 - bool isInited_; -}; -} // namespace tool -} // namespace curve - -#endif // SRC_TOOLS_MDS_CLIENT_H_ +namespace curve +{ + namespace tool + { + + using curve::mds::topology::PoolsetInfo; + + enum class GetSegmentRes + { + kOK = 0, // Successfully obtained segment + kSegmentNotAllocated = -1, // segment does not exist + kFileNotExists = -2, // File does not exist + kOtherError = -3 // Other errors + }; + + using AllocMap = std::unordered_map; + + struct CreateFileContext + { + curve::mds::FileType type; + std::string name; + uint64_t length; + uint64_t stripeUnit; + uint64_t stripeCount; + std::string poolset; + }; + + class MDSClient + { + public: + MDSClient() + : currentMdsIndex_(0), userName_(""), password_(""), isInited_(false) {} + virtual ~MDSClient() = default; + + /** + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure + */ + virtual int Init(const std::string &mdsAddr); + + /** + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @param dummyPort dummy port list, if only one is entered + * All mds use the same dummy port, separated by strings if + * there are multiple Set different dummy ports for each mds + * @return returns 0 for success, -1 for failure + */ + virtual int Init(const std::string &mdsAddr, const std::string &dummyPort); + + /** + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetFileInfo(const std::string &fileName, FileInfo *fileInfo); + + /** + * @brief Get file or directory allocation size + * @param fileName File name + * @param[out] allocSize file or directory allocation size, valid when the + * return value is 0 + * @param[out] allocMap Allocation of files in various pools + * @return returns 0 for success, -1 for failure + */ + virtual int GetAllocatedSize(const std::string &fileName, + uint64_t *allocSize, + AllocMap *allocMap = nullptr); + + /** + * @brief Get the size of a file or directory + * @param fileName File name + * @param[out] fileSize File or directory allocation size, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetFileSize(const std::string &fileName, uint64_t *fileSize); + + /** + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListDir(const std::string &dirName, + std::vector *files); + + /** + * @brief Get the segment with the specified offset and place it in the + * segment + * @param fileName File name + * @param offset offset value + * @param[out] segment The segmentInfo of the specified offset in the file + * is valid when the return value is 0 + * @return returns GetSegmentRes, distinguishing between unassigned segments + * and other errors + */ + virtual GetSegmentRes GetSegmentInfo(const std::string &fileName, + uint64_t offset, + PageFileSegment *segment); + + /** + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure + */ + virtual int DeleteFile(const std::string &fileName, + bool forcedelete = false); + + /** + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure + */ + virtual int CreateFile(const CreateFileContext &context); + + /** + * @brief List all volumes on copysets + * @param copysets + * @param[out] fileNames volumes name + * @return return 0 when success, -1 when fail + */ + virtual int ListVolumesOnCopyset( + const std::vector ©sets, + std::vector *fileNames); + + /** + * @brief expansion volume + * @param fileName File name + * @param newSize The volume size after expansion + * @return returns 0 for success, -1 for failure + */ + virtual int ExtendVolume(const std::string &fileName, uint64_t newSize); + + /** + * @brief List the address of the client's dummyserver + * @param[out] clientAddrs client address list, valid when 0 is returned + * @param[out] listClientsInRepo also lists the clients in the database + * @return returns 0 for success, -1 for failure + */ + virtual int ListClient(std::vector *clientAddrs, + bool listClientsInRepo = false); + + /** + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool id + * @param copysetId copyset id + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerListInCopySet( + const PoolIdType &logicalPoolId, const CopySetIdType ©setId, + std::vector *csLocs); + + /** + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetIds List of copysetIds to query + * @param[out] csServerInfos A list of copyset members, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerListInCopySets( + const PoolIdType &logicalPoolId, + const std::vector ©setIds, + std::vector *csServerInfos); + + /** + * @brief Get a list of physical pools in the cluster + * @param[out] pools A list of physical pool information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListPhysicalPoolsInCluster( + std::vector *pools); + + /** + * @brief Get a list of logical pools in the physical pool + * @param id Physical pool id + * @param[out] pools List of logical pool information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListLogicalPoolsInPhysicalPool( + const PoolIdType &id, std::vector *pools); + + /** + *List of logical pools in the @brief cluster + * @param[out] pools List of logical pool information, valid when the return + *value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListLogicalPoolsInCluster(std::vector *pools); + + /** + * @brief to obtain a list of zones in the physical pool + * @param id Physical pool id + * @param[out] zones A list of zone information, valid when the return value + * is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListZoneInPhysicalPool(const PoolIdType &id, + std::vector *zones); + + /** + * @brief to obtain a list of servers in the zone + * @param id zone id + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListServersInZone(const ZoneIdType &id, + std::vector *servers); + + /** + * @brief Get a list of chunkservers on the server + * @param id server id + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersOnServer( + const ServerIdType &id, std::vector *chunkservers); + + /** + * @brief Get a list of chunkservers on the server + * @param ip server ip + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersOnServer( + const std::string &ip, std::vector *chunkservers); + + /** + * @brief Get detailed information about chunkserver + * @param id chunkserver id + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerInfo(const ChunkServerIdType &id, + ChunkServerInfo *chunkserver); + + /** + * @brief Get detailed information about chunkserver + * @param csAddr The address of chunkserver, in the format of ip:port + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetChunkServerInfo(const std::string &csAddr, + ChunkServerInfo *chunkserver); + + /** + * @brief Get all copysets on chunkserver + * @param id The id of chunkserver + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetCopySetsInChunkServer(const ChunkServerIdType &id, + std::vector *copysets); + + /** + * @brief Get all copysets on chunkserver + * @param csAddr The address of chunkserver, in the format of ip: port + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetCopySetsInChunkServer(const std::string &csAddr, + std::vector *copysets); + + /** + * @brief Get all copysets in cluster + * @param[out] the copyset list + * @param[in] filterScaning whether need to filter copyset which in scaning + * @return 0 if success, else return -1 + */ + virtual int GetCopySetsInCluster(std::vector *copysetInfos, + bool filterScaning = false); + + /** + * @brief Get specify copyset + * @param[in] lpid logical pool id + * @param[in] copysetId copyset id + * @param[out] copysetInfo the copyset + * @return 0 if success, else return -1 + */ + virtual int GetCopyset(PoolIdType lpid, CopySetIdType copysetId, + CopysetInfo *copysetInfo); + + /** + * @brief List all servers in the cluster + * @param[out] servers List of server information, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListServersInCluster(std::vector *servers); + + /** + * @brief List all chunkservers in the cluster + * @param[out] chunkservers A list of server information, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int ListChunkServersInCluster( + std::vector *chunkservers); + + /** + * @brief list all the chunkservers with poolid in cluster + * @param[out] chunkservers chunkserver info + * @return succeed return 0; failed return -1; + */ + virtual int ListChunkServersInCluster( + std::map> *chunkservers); + + /** + * @brief set copysets available flag + * @param copysets copysets going to be set available flag + * @param availFlag availble or not + * @return succeed return 0; failed return -1; + */ + virtual int SetCopysetsAvailFlag(const std::vector copysets, + bool availFlag); + + /** + * @brief list all copysets that are unavailable + * @param[out] copysets copysets that are not availble currently + * @return succeed return 0; failed return -1; + */ + virtual int ListUnAvailCopySets(std::vector *copysets); + + /** + * @brief Get the value of a metric for mds + * @param metricName The name of the metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetMetric(const std::string &metricName, uint64_t *value); + + /** + * @brief Get the value of a metric for mds + * @param metricName The name of metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + virtual int GetMetric(const std::string &metricName, std::string *value); + + /** + * @brief sets userName and calls it when accessing the namespace interface + * @param userName username + */ + void SetUserName(const std::string &userName) { userName_ = userName; } + + /** + * @brief sets the password and calls it when accessing the namespace + * interface + * @param password password + */ + void SetPassword(const std::string &password) { password_ = password; } + + /** + * @brief Get mds address list + * @return List of mds addresses + */ + virtual const std::vector &GetMdsAddrVec() const + { + return mdsAddrVec_; + } + + virtual const std::map &GetDummyServerMap() + const + { + return dummyServerMap_; + } + + /** + * @brief Get the address of the current mds + */ + virtual std::vector GetCurrentMds(); + + /** + * @brief sends rpc to mds to trigger fast leader balancing + */ + virtual int RapidLeaderSchedule(PoolIdType lpid); + + /** + * @brief Set specify logical pool to enable/disable scan + * @param[in] lpid logical pool id + * @param[in] scanEnable enable(true)/disable(false) scan + * @return 0 if set success, else return -1 + */ + virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); + + /** + * @brief to obtain mds online status, + * dummyserver is online and the dummyserver records a listen addr + * Only when the address is consistent with the mds address is + * considered online + * @param[out] onlineStatus mds online status, valid when returned to 0 + * @return returns 0 for success, -1 for failure + */ + virtual void GetMdsOnlineStatus(std::map *onlineStatus); + + /** + * @brief Get the recovery status of the specified chunkserver + * @param[in] cs List of chunkservers to query + * @param[out] statusMap returns the recovery status corresponding to each + * chunkserver + * @return returns 0 for success, -1 for failure + */ + int QueryChunkServerRecoverStatus( + const std::vector &cs, + std::map *statusMap); + + virtual int UpdateFileThrottleParams( + const std::string &fileName, const curve::mds::ThrottleParams ¶ms); + + int ListPoolset(std::vector *poolsets); + + int ListChunkFormatStatus(std::vector *formatStatuses); + + private: + /** + * @brief switch mds + * @return returns true if the switch is successful, and false if all mds + * fail + */ + bool ChangeMDServer(); + + /** + * @brief sends RPC to mds for code reuse + * @param + * @return returns 0 for success, -1 for failure + */ + template + int SendRpcToMds(Request *request, Response *response, T *obp, + void (T::*func)(google::protobuf::RpcController *, + const Request *, Response *, + google::protobuf::Closure *)); + + /** + * @brief Get a list of chunkservers on the server + * @param request The request to be sent + * @param[out] chunkservers A list of chunkserver information, valid when + * the return value is 0 + * @return returns 0 for success, -1 for failure + */ + int ListChunkServersOnServer(ListChunkServerRequest *request, + std::vector *chunkservers); + + /** + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] chunkserver The detailed information of chunkserver is valid + * when the return value is 0 + * @return returns 0 for success, -1 for failure + */ + int GetChunkServerInfo(GetChunkServerInfoRequest *request, + ChunkServerInfo *chunkserver); + + /** + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] copysets Details of copysets on chunkserver, valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure + */ + int GetCopySetsInChunkServer(GetCopySetsInChunkServerRequest *request, + std::vector *copysets); + + /** + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure + */ + int InitDummyServerMap(const std::string &dummyPort); + + /** + * @brief: Obtain the listening address of mds through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr mds listening address + * @return returns 0 for success, -1 for failure + */ + int GetListenAddrFromDummyPort(const std::string &dummyAddr, + std::string *listenAddr); + + // Fill in the signature + template + void FillUserInfo(T *request); + + // client used to send HTTP requests + MetricClient metricClient_; + // Send RPC channel to mds + brpc::Channel channel_; + // Save vector for mds address + std::vector mdsAddrVec_; + // Save the address of the dummy server corresponding to the mds address + std::map dummyServerMap_; + // Save the current mds in mdsAddrVec_ Index in + int currentMdsIndex_; + // User name + std::string userName_; + // Password + std::string password_; + // Avoiding duplicate initialization + bool isInited_; + }; + } // namespace tool +} // namespace curve + +#endif // SRC_TOOLS_MDS_CLIENT_H_ diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index 776347f738..fc5012d58a 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -30,9 +30,9 @@ DECLARE_uint64(rpcRetryTimes); namespace curve { namespace tool { -MetricRet MetricClient::GetMetric(const std::string &addr, - const std::string &metricName, - std::string *value) { +MetricRet MetricClient::GetMetric(const std::string& addr, + const std::string& metricName, + std::string* value) { brpc::Channel httpChannel; brpc::ChannelOptions options; brpc::Controller cntl; @@ -70,15 +70,16 @@ MetricRet MetricClient::GetMetric(const std::string &addr, res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - // 这里不输出错误,因为对mds有切换的可能,把打印的处理交给外部 + // There is no output error here, as there is a possibility of switching + // between mds, and the printing process is handed over to external parties bool notExist = cntl.ErrorCode() == brpc::EHTTP && cntl.http_response().status_code() == kHttpCodeNotFound; return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; } -MetricRet MetricClient::GetMetricUint(const std::string &addr, - const std::string &metricName, - uint64_t *value) { +MetricRet MetricClient::GetMetricUint(const std::string& addr, + const std::string& metricName, + uint64_t* value) { std::string str; MetricRet res = GetMetric(addr, metricName, &str); if (res != MetricRet::kOK) { @@ -92,9 +93,9 @@ MetricRet MetricClient::GetMetricUint(const std::string &addr, return MetricRet::kOK; } -MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, - const std::string &metricName, - std::string *confValue) { +MetricRet MetricClient::GetConfValueFromMetric(const std::string& addr, + const std::string& metricName, + std::string* confValue) { std::string jsonString; brpc::Controller cntl; MetricRet res = GetMetric(addr, metricName, &jsonString); @@ -118,8 +119,8 @@ MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, return MetricRet::kOK; } -int MetricClient::GetValueFromAttachment(const std::string &attachment, - std::string *value) { +int MetricClient::GetValueFromAttachment(const std::string& attachment, + std::string* value) { auto pos = attachment.find(":"); if (pos == std::string::npos) { std::cout << "parse response attachment fail!" << std::endl; diff --git a/src/tools/metric_client.h b/src/tools/metric_client.h index 94e29a545f..103f8da7f3 100644 --- a/src/tools/metric_client.h +++ b/src/tools/metric_client.h @@ -25,65 +25,68 @@ #include #include + #include #include -#include "src/tools/common.h" + #include "src/common/string_util.h" +#include "src/tools/common.h" #include "src/tools/curve_tool_define.h" namespace curve { namespace tool { enum class MetricRet { - // 成功 + // Success kOK = 0, - // metric未找到 + // Metric not found kNotFound = -1, - // 其他错误 - kOtherErr = -2, + // Other errors + kOtherErr = -2, }; const int kHttpCodeNotFound = 404; class MetricClient { public: - virtual ~MetricClient() {} + virtual ~MetricClient() {} - /** - * @brief 从指定地址获取metric - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief Get metric from specified address + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetric(const std::string& addr, const std::string& metricName, std::string* value); - /** - * @brief 从指定地址获取metric,并转换成uint - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + /** + * @brief retrieves metric from the specified address and converts it to + * uint + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetricUint(const std::string& addr, const std::string& metricName, uint64_t* value); /** - * @brief 从metric获取配置的值 - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] confValue metric中配置的值 - * @return 错误码 + * @brief Get the configured value from metric + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] confValue The value configured in metric + * @return error code */ virtual MetricRet GetConfValueFromMetric(const std::string& addr, const std::string& metricName, std::string* confValue); private: - // 从response attachment解析出metric值 + // Parse the metric value from the response attachment int GetValueFromAttachment(const std::string& attachment, std::string* value); }; diff --git a/src/tools/metric_name.h b/src/tools/metric_name.h index 3f85d971a4..e576481ff5 100644 --- a/src/tools/metric_name.h +++ b/src/tools/metric_name.h @@ -22,131 +22,135 @@ #include -#include #include +#include #ifndef SRC_TOOLS_METRIC_NAME_H_ #define SRC_TOOLS_METRIC_NAME_H_ - -namespace curve { -namespace tool { - -// common metric name -const char kCurveVersionMetricName[] = "curve_version"; - -// snapshot clone server metric name -const char kSnapshotCloneConfMetricName[] = - "snapshot_clone_server_config_server_address"; -const char kSnapshotCloneStatusMetricName[] = "snapshotcloneserver_status"; -const char kSnapshotCloneStatusActive[] = "active"; - -// mds metric name -const char kLogicalPoolMetricPrefix[] = "topology_metric_logicalPool_"; -const char kChunkServerMetricPrefix[] = "chunkserver_"; -const char kOperatorNumMetricName[] = "mds_scheduler_metric_operator_num"; -const char kProcessCmdLineMetricName[] = "process_cmdline"; -const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; -const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; -const char kMdsStatusMetricName[] = "mds_status"; -const char kMdsStatusLeader[] = "leader"; -// operator名称 -const char kTotalOpName[] = "operator"; -const char kChangeOpName[] = "change_peer"; -const char kAddOpName[] = "add_peer"; -const char kRemoveOpName[] = "remove_peer"; -const char kTransferOpName[] = "transfer_leader"; - - -inline std::string GetPoolTotalChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeTotalBytes"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolUsedChunkSizeName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_chunkSizeUsedBytes"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolLogicalCapacityName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalCapacity"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetPoolLogicalAllocName( - const std::string& poolName) { - std::string tmpName = kLogicalPoolMetricPrefix + - poolName + "_logicalAlloc"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetCSLeftChunkName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_chunkfilepool_left"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetCSLeftWalSegmentName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_walfilepool_left"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetUseWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_copyset_raft_log_uri"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetUseChunkFilePoolAsWalPoolName(const std::string& csAddr) { - std::string tmpName = kChunkServerMetricPrefix + - csAddr + "_config_walfilepool_use_chunk_file_pool"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline std::string GetOpNumMetricName(const std::string& opName) { - std::string tmpName = kSechduleOpMetricpPrefix + - opName + "_num"; - std::string metricName; - bvar::to_underscored_name(&metricName, tmpName); - return metricName; -} - -inline bool SupportOpName(const std::string& opName) { - return opName == kTotalOpName || opName == kChangeOpName - || opName == kAddOpName || opName == kRemoveOpName - || opName == kTransferOpName; -} - -inline void PrintSupportOpName() { - std::cout << kTotalOpName << ", " << kChangeOpName - << ", " << kAddOpName << ", " << kRemoveOpName - << ", " << kTransferOpName << std::endl; -} - -} // namespace tool -} // namespace curve - -#endif // SRC_TOOLS_METRIC_NAME_H_ +namespace curve +{ + namespace tool + { + + // common metric name + const char kCurveVersionMetricName[] = "curve_version"; + + // snapshot clone server metric name + const char kSnapshotCloneConfMetricName[] = + "snapshot_clone_server_config_server_address"; + const char kSnapshotCloneStatusMetricName[] = "snapshotcloneserver_status"; + const char kSnapshotCloneStatusActive[] = "active"; + + // mds metric name + const char kLogicalPoolMetricPrefix[] = "topology_metric_logicalPool_"; + const char kChunkServerMetricPrefix[] = "chunkserver_"; + const char kOperatorNumMetricName[] = "mds_scheduler_metric_operator_num"; + const char kProcessCmdLineMetricName[] = "process_cmdline"; + const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; + const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; + const char kMdsStatusMetricName[] = "mds_status"; + const char kMdsStatusLeader[] = "leader"; + // operator Name + const char kTotalOpName[] = "operator"; + const char kChangeOpName[] = "change_peer"; + const char kAddOpName[] = "add_peer"; + const char kRemoveOpName[] = "remove_peer"; + const char kTransferOpName[] = "transfer_leader"; + + inline std::string GetPoolTotalChunkSizeName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeTotalBytes"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolUsedChunkSizeName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_chunkSizeUsedBytes"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolLogicalCapacityName(const std::string &poolName) + { + std::string tmpName = + kLogicalPoolMetricPrefix + poolName + "_logicalCapacity"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetPoolLogicalAllocName(const std::string &poolName) + { + std::string tmpName = kLogicalPoolMetricPrefix + poolName + "_logicalAlloc"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetCSLeftChunkName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_chunkfilepool_left"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetCSLeftWalSegmentName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_walfilepool_left"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetUseWalPoolName(const std::string &csAddr) + { + std::string tmpName = + kChunkServerMetricPrefix + csAddr + "_config_copyset_raft_log_uri"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetUseChunkFilePoolAsWalPoolName(const std::string &csAddr) + { + std::string tmpName = kChunkServerMetricPrefix + csAddr + + "_config_walfilepool_use_chunk_file_pool"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline std::string GetOpNumMetricName(const std::string &opName) + { + std::string tmpName = kSechduleOpMetricpPrefix + opName + "_num"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; + } + + inline bool SupportOpName(const std::string &opName) + { + return opName == kTotalOpName || opName == kChangeOpName || + opName == kAddOpName || opName == kRemoveOpName || + opName == kTransferOpName; + } + + inline void PrintSupportOpName() + { + std::cout << kTotalOpName << ", " << kChangeOpName << ", " << kAddOpName + << ", " << kRemoveOpName << ", " << kTransferOpName << std::endl; + } + + } // namespace tool +} // namespace curve + +#endif // SRC_TOOLS_METRIC_NAME_H_ diff --git a/src/tools/namespace_tool.cpp b/src/tools/namespace_tool.cpp index 8d6119b75d..b0b039a835 100644 --- a/src/tools/namespace_tool.cpp +++ b/src/tools/namespace_tool.cpp @@ -28,8 +28,9 @@ DEFINE_string(fileName, "", "file name"); DEFINE_string(dirName, "", "directory name"); -DEFINE_string(expireTime, "7d", "Time for file in recyclebin exceed expire time " // NOLINT - "will be deleted (default: 7d)"); +DEFINE_string(expireTime, "7d", + "Time for file in recyclebin exceed expire time " // NOLINT + "will be deleted (default: 7d)"); DEFINE_bool(forcedelete, false, "force delete file or not"); DEFINE_uint64(fileLength, 20, "file length (GB)"); DEFINE_uint64(newSize, 30, "the new size of expanded volume(GB)"); @@ -37,11 +38,14 @@ DEFINE_string(poolset, "", "specify the poolset name"); DEFINE_bool(isTest, false, "is unit test or not"); DEFINE_uint64(offset, 0, "offset to query chunk location"); DEFINE_uint64(rpc_timeout, 3000, "millisecond for rpc timeout"); -DEFINE_bool(showAllocSize, true, "If specified, the allocated size will not be computed"); // NOLINT -DEFINE_bool(showFileSize, true, "If specified, the file size will not be computed"); // NOLINT +DEFINE_bool(showAllocSize, true, + "If specified, the allocated size will not be computed"); // NOLINT +DEFINE_bool(showFileSize, true, + "If specified, the file size will not be computed"); // NOLINT DECLARE_string(mdsAddr); -DEFINE_bool(showAllocMap, false, "If specified, the allocated size in each" - " logical pool will be print"); +DEFINE_bool(showAllocMap, false, + "If specified, the allocated size in each" + " logical pool will be print"); DEFINE_string(throttleType, "", "throttle type"); DEFINE_uint64(limit, 0, "throttle limit"); @@ -66,19 +70,15 @@ int NameSpaceTool::Init() { } bool NameSpaceTool::SupportCommand(const std::string& command) { - return (command == kGetCmd || command == kListCmd - || command == kSegInfoCmd - || command == kDeleteCmd - || command == kCreateCmd - || command == kExtendCmd - || command == kCleanRecycleCmd - || command == kChunkLocatitonCmd - || command == kUpdateThrottle - || command == kListPoolsets); + return (command == kGetCmd || command == kListCmd || + command == kSegInfoCmd || command == kDeleteCmd || + command == kCreateCmd || command == kExtendCmd || + command == kCleanRecycleCmd || command == kChunkLocatitonCmd || + command == kUpdateThrottle || command == kListPoolsets); } -// 根据命令行参数选择对应的操作 -int NameSpaceTool::RunCommand(const std::string &cmd) { +// Select the corresponding operation based on command line parameters +int NameSpaceTool::RunCommand(const std::string& cmd) { if (Init() != 0) { std::cout << "Init NameSpaceTool failed" << std::endl; return -1; @@ -92,12 +92,12 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } else if (cmd == kSegInfoCmd) { return PrintSegmentInfo(fileName); } else if (cmd == kDeleteCmd) { - // 单元测试不判断输入 + // Unit testing does not judge input if (FLAGS_isTest) { return core_->DeleteFile(fileName, FLAGS_forcedelete); } - std::cout << "Are you sure you want to delete " - << fileName << "?" << "(yes/no)" << std::endl; + std::cout << "Are you sure you want to delete " << fileName << "?" + << "(yes/no)" << std::endl; std::string str; std::cin >> str; if (str == "yes") { @@ -163,29 +163,71 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } } -void NameSpaceTool::PrintHelp(const std::string &cmd) { +void NameSpaceTool::PrintHelp(const std::string& cmd) { std::cout << "Example: " << std::endl; if (cmd == kGetCmd || cmd == kListCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT - " [-showAllocSize=false] [-showFileSize=false] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666]" // NOLINT + " [-showAllocSize=false] [-showFileSize=false] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kSegInfoCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kCleanRecycleCmd) { - std::cout << "curve_ops_tool " << cmd << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "If -fileName is specified, delete the files in recyclebin that the original directory is fileName" << std::endl; // NOLINT - std::cout << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " [-fileName=/cinder] [-expireTime=1(s|m|h|d|M|y)] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "If -fileName is specified, delete the files in " + "recyclebin that the original directory is fileName" + << std::endl; // NOLINT + std::cout + << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" + << std::endl; // NOLINT } else if (cmd == kCreateCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -fileLength=20 [--poolset=default] [-stripeUnit=32768] [-stripeCount=32] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "curve_ops_tool " << cmd << " -dirName=/dir -userName=test -password=123 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT - std::cout << "The first example can create a volume and the second create a directory." << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -fileLength=20 " + "[--poolset=default] [-stripeUnit=32768] [-stripeCount=32] " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -dirName=/dir -userName=test -password=123 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT + std::cout << "The first example can create a volume and the second " + "create a directory." + << std::endl; // NOLINT } else if (cmd == kExtendCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -newSize=30 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 -newSize=30 " + "[-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kDeleteCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -forcedelete=true [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd + << " -fileName=/test -userName=test -password=123 " + "-forcedelete=true [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kChunkLocatitonCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test -offset=16777216 [-mdsAddr=127.0.0.1:6666] " + "[-confPath=/etc/curve/tools.conf]" + << std::endl; // NOLINT } else if (cmd == kUpdateThrottle) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" << std::endl; // NOLINT + std::cout + << "curve_ops_tool " << cmd + << " -fileName=/test " + "-throttleType=(IOPS_TOTAL|IOPS_READ|IOPS_WRITE|BPS_TOTAL|BPS_" + "READ|BPS_WRITE) -limit=20000 [-burst=30000] [-burstLength=10]" + << std::endl; // NOLINT } else { std::cout << "command not found!" << std::endl; } @@ -204,7 +246,8 @@ int NameSpaceTool::PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo) { PrintFileInfo(fileInfo); int ret = GetAndPrintAllocSize(fullName); - // 如果是目录的话,计算目录中的文件大小(用户创建时指定的) + // If it is a directory, calculate the file size in the directory (specified + // by the user when creating it) if (fileInfo.filetype() == curve::mds::FileType::INODE_DIRECTORY) { ret = GetAndPrintFileSize(fullName); } @@ -255,14 +298,14 @@ void NameSpaceTool::PrintFileInfo(const FileInfo& fileInfo) { curve::common::SplitString(fileInfoStr, "\n", &items); for (const auto& item : items) { if (item.compare(0, 5, "ctime") == 0) { - // ctime是微妙,打印的时候只打印到秒 + // CTIME is subtle, printing only takes seconds time_t ctime = fileInfo.ctime() / 1000000; std::string standard; curve::common::TimeUtility::TimeStampToStandard(ctime, &standard); std::cout << "ctime: " << standard << std::endl; continue; } - // 把length转换成GB + // Convert length to GB if (item.compare(0, 6, "length") == 0) { uint64_t length = fileInfo.length(); double fileSize = static_cast(length) / curve::mds::kGB; @@ -315,15 +358,15 @@ int NameSpaceTool::PrintPoolsets() { for (const auto& poolset : poolsets) { const std::string str = absl::StrFormat( - "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), - poolset.poolsetname(), poolset.type(), poolset.desc()); + "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), + poolset.poolsetname(), poolset.type(), poolset.desc()); std::cout << str << std::endl; } return 0; } -int NameSpaceTool::PrintSegmentInfo(const std::string &fileName) { +int NameSpaceTool::PrintSegmentInfo(const std::string& fileName) { std::vector segments; if (core_->GetFileSegments(fileName, &segments) != 0) { std::cout << "GetFileSegments fail!" << std::endl; @@ -358,14 +401,13 @@ void NameSpaceTool::PrintSegment(const PageFileSegment& segment) { if (segment.chunks(i).has_copysetid()) { copysetId = segment.chunks(i).copysetid(); } - std::cout << "chunkID: " << chunkId << ", copysetID: " - << copysetId << std::endl; + std::cout << "chunkID: " << chunkId << ", copysetID: " << copysetId + << std::endl; } } - int NameSpaceTool::PrintChunkLocation(const std::string& fileName, - uint64_t offset) { + uint64_t offset) { uint64_t chunkId; std::pair copyset; if (core_->QueryChunkCopyset(fileName, offset, &chunkId, ©set) != 0) { @@ -375,13 +417,12 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, uint32_t logicPoolId = copyset.first; uint32_t copysetId = copyset.second; uint64_t groupId = (static_cast(logicPoolId) << 32) | copysetId; - std::cout << "chunkId: " << chunkId - << ", logicalPoolId: " << logicPoolId - << ", copysetId: " << copysetId - << ", groupId: " << groupId << std::endl; + std::cout << "chunkId: " << chunkId << ", logicalPoolId: " << logicPoolId + << ", copysetId: " << copysetId << ", groupId: " << groupId + << std::endl; std::vector csLocs; - int res = core_->GetChunkServerListInCopySet(logicPoolId, - copysetId, &csLocs); + int res = + core_->GetChunkServerListInCopySet(logicPoolId, copysetId, &csLocs); if (res != 0) { std::cout << "GetChunkServerListInCopySet fail!" << std::endl; return -1; @@ -400,7 +441,7 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, } void NameSpaceTool::TrimEndingSlash(std::string* fileName) { - // 如果最后面有/,去掉 + // If there is/at the end, remove it if (fileName->size() > 1 && fileName->back() == '/') { fileName->pop_back(); } diff --git a/src/tools/namespace_tool.h b/src/tools/namespace_tool.h index 1af7f8ca8f..3594afafa6 100644 --- a/src/tools/namespace_tool.h +++ b/src/tools/namespace_tool.h @@ -26,22 +26,22 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/namespace_tool_core.h" using curve::mds::FileInfo; using curve::mds::PageFileSegment; @@ -52,71 +52,72 @@ namespace tool { class NameSpaceTool : public CurveTool { public: - explicit NameSpaceTool(std::shared_ptr core) : - core_(core), inited_(false) {} + explicit NameSpaceTool(std::shared_ptr core) + : core_(core), inited_(false) {} /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - // 初始化 + // Initialize int Init(); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fileName); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo); - // 打印目录中的文件信息 + // Print file information in the directory int PrintListDir(const std::string& dirName); - // 打印出文件的segment信息 - int PrintSegmentInfo(const std::string &fileName); + // Print out the segment information of the file + int PrintSegmentInfo(const std::string& fileName); - // 打印fileInfo,把时间转化为易读的格式输出 + // Print fileInfo and convert the time into a readable format for output void PrintFileInfo(const FileInfo& fileInfo); - // 打印PageFileSegment,把同一个chunk的信息打在同一行 + // Print PageFileSegment and type information for the same chunk on the same + // line void PrintSegment(const PageFileSegment& segment); - // 打印chunk的位置信息 - int PrintChunkLocation(const std::string& fileName, - uint64_t offset); + // Print the location information of the chunk + int PrintChunkLocation(const std::string& fileName, uint64_t offset); - // 打印文件的分配大小 + // Allocation size of printed files int GetAndPrintAllocSize(const std::string& fileName); - // 打印目录的file size + // Print the file size of the directory int GetAndPrintFileSize(const std::string& fileName); - // 目前curve mds不支持/test/格式的文件名,需要把末尾的/去掉 + // Currently, curve mds does not support file names in the/test/format, so + // the/at the end needs to be removed void TrimEndingSlash(std::string* fileName); int PrintPoolsets(); private: - // 核心逻辑 + // Core logic std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index b69a6ecacc..4c1f8ff1a4 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -27,8 +27,8 @@ DEFINE_string(password, "", "password of administrator"); namespace curve { namespace tool { -NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) : - client_(client) { +NameSpaceToolCore::NameSpaceToolCore(std::shared_ptr client) + : client_(client) { client_->SetUserName(FLAGS_userName); client_->SetPassword(FLAGS_password); } @@ -37,7 +37,7 @@ int NameSpaceToolCore::Init(const std::string& mdsAddr) { return client_->Init(mdsAddr); } -int NameSpaceToolCore::GetFileInfo(const std::string &fileName, +int NameSpaceToolCore::GetFileInfo(const std::string& fileName, FileInfo* fileInfo) { return client_->GetFileInfo(fileName, fileInfo); } @@ -48,11 +48,10 @@ int NameSpaceToolCore::ListDir(const std::string& dirName, } int NameSpaceToolCore::GetChunkServerListInCopySet( - const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs) { - return client_->GetChunkServerListInCopySet(logicalPoolId, - copysetId, csLocs); + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs) { + return client_->GetChunkServerListInCopySet(logicalPoolId, copysetId, + csLocs); } int NameSpaceToolCore::DeleteFile(const std::string& fileName, @@ -65,7 +64,7 @@ int NameSpaceToolCore::CreateFile(const CreateFileContext& ctx) { } int NameSpaceToolCore::ExtendVolume(const std::string& fileName, - uint64_t newSize) { + uint64_t newSize) { return client_->ExtendVolume(fileName, newSize); } int NameSpaceToolCore::GetAllocatedSize(const std::string& fileName, @@ -85,7 +84,7 @@ int NameSpaceToolCore::GetFileSize(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - std::vector* segments) { + std::vector* segments) { FileInfo fileInfo; int res = GetFileInfo(fileName, &fileInfo); if (res != 0) { @@ -96,28 +95,30 @@ int NameSpaceToolCore::GetFileSegments(const std::string& fileName, } int NameSpaceToolCore::GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, - std::vector* segments) { - // 只能获取page file的segment + const FileInfo& fileInfo, + std::vector* segments) { + // Only segments of page files can be obtained if (fileInfo.filetype() != curve::mds::FileType::INODE_PAGEFILE) { std::cout << "It is not a page file!" << std::endl; return -1; } - // 获取文件的segment数,并打印每个segment的详细信息 + // Obtain the number of segments in the file and print detailed information + // for each segment uint64_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint64_t i = 0; i < segmentNum; i++) { // load segment PageFileSegment segment; - GetSegmentRes res = client_->GetSegmentInfo(fileName, - i * segmentSize, &segment); + GetSegmentRes res = + client_->GetSegmentInfo(fileName, i * segmentSize, &segment); if (res == GetSegmentRes::kOK) { segments->emplace_back(segment); } else if (res == GetSegmentRes::kSegmentNotAllocated) { continue; } else if (res == GetSegmentRes::kFileNotExists) { - // 查询过程中文件被删掉了,清空segment并返回0 + // uring the query process, the file was deleted, the segment was + // cleared, and 0 was returned segments->clear(); return 0; } else { @@ -137,8 +138,7 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, return -1; } - auto needDelete = [](const FileInfo &fileInfo, - uint64_t now, + auto needDelete = [](const FileInfo& fileInfo, uint64_t now, uint64_t expireTime) -> bool { auto filename = fileInfo.filename(); std::vector items; @@ -147,9 +147,9 @@ int NameSpaceToolCore::CleanRecycleBin(const std::string& dirName, uint64_t dtime; auto n = items.size(); auto id = std::to_string(fileInfo.id()); - if (n >= 2 && items[n - 2] == id - && ::curve::common::StringToUll(items[n - 1], &dtime) - && now - dtime < expireTime) { + if (n >= 2 && items[n - 2] == id && + ::curve::common::StringToUll(items[n - 1], &dtime) && + now - dtime < expireTime) { return false; } @@ -210,10 +210,9 @@ int NameSpaceToolCore::UpdateFileThrottle(const std::string& fileName, return client_->UpdateFileThrottleParams(fileName, params); } -int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, - uint64_t offset, - uint64_t* chunkId, - std::pair* copyset) { +int NameSpaceToolCore::QueryChunkCopyset( + const std::string& fileName, uint64_t offset, uint64_t* chunkId, + std::pair* copyset) { if (!chunkId || !copyset) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -229,11 +228,11 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t segmentSize = fileInfo.segmentsize(); - // segment对齐的offset + // segment aligned offset uint64_t segOffset = (offset / segmentSize) * segmentSize; PageFileSegment segment; - GetSegmentRes segRes = client_->GetSegmentInfo(fileName, - segOffset, &segment); + GetSegmentRes segRes = + client_->GetSegmentInfo(fileName, segOffset, &segment); if (segRes != GetSegmentRes::kOK) { if (segRes == GetSegmentRes::kSegmentNotAllocated) { std::cout << "Chunk has not been allocated!" << std::endl; @@ -243,7 +242,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } } - // 在segment里面的chunk的索引 + // Index of chunk in segment if (segment.chunksize() == 0) { std::cout << "No chunks in segment!" << std::endl; return -1; diff --git a/src/tools/namespace_tool_core.h b/src/tools/namespace_tool_core.h index febf0882f8..60e702e3f7 100644 --- a/src/tools/namespace_tool_core.h +++ b/src/tools/namespace_tool_core.h @@ -26,28 +26,28 @@ #include #include -#include -#include -#include -#include #include #include +#include +#include +#include #include +#include #include "proto/nameserver2.pb.h" #include "proto/topology.pb.h" -#include "src/common/timeutility.h" -#include "src/common/string_util.h" #include "src/common/fs_util.h" +#include "src/common/string_util.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" #include "src/tools/mds_client.h" +using curve::common::ChunkServerLocation; using curve::mds::FileInfo; +using curve::mds::PageFileChunkInfo; using curve::mds::PageFileSegment; using curve::mds::StatusCode; -using curve::mds::PageFileChunkInfo; using curve::mds::topology::kTopoErrCodeSuccess; -using curve::common::ChunkServerLocation; namespace curve { namespace tool { @@ -60,107 +60,116 @@ class NameSpaceToolCore { virtual ~NameSpaceToolCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by + * ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the + * return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetId copyset ID + * @param[out] csLocs List of chunkserver locations, valid when the return + * value is 0 + * @return returns 0 for success, -1 for failure */ - virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, - const CopySetIdType& copysetId, - std::vector* csLocs); + virtual int GetChunkServerListInCopySet( + const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, + std::vector* csLocs); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& ctx); - /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的文件长度 - * @return 成功返回0,失败返回-1 + /** + * @brief expansion volume + * @param fileName File name + * @param newSize The file length after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 计算文件或目录实际分配的空间 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录已分配大小,返回值为0是有效 - * @param[out] allocMap 在每个池子的分配量,返回值0时有效 - * @return 成功返回0,失败返回-1 + * @brief Calculate the actual allocated space of a file or directory + * @param fileName File name + * @param[out] allocSize The file or directory has already been allocated a + * size, and a return value of 0 is valid + * @param[out] allocMap The allocation amount of each pool, valid when + * returning a value of 0 + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 返回文件或目录的中的文件的用户申请的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录中用户申请的大小,返回值为0是有效 - * @return 成功返回0,失败返回-1 + * @brief Returns the user requested size of files in a file or directory + * @param fileName File name + * @param[out] fileSize The size requested by the user in the file or + * directory, with a return value of 0 being valid + * @return returns 0 for success, -1 for failure */ virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ virtual int GetFileSegments(const std::string& fileName, - std::vector* segments); + std::vector* segments); /** - * @brief 查询offset对应的chunk的id和所属的copyset - * @param fileName 文件名 - * @param offset 文件中的偏移 - * @param[out] chunkId chunkId,返回值为0时有效 - * @param[out] copyset chunk对应的copyset,是logicalPoolId和copysetId的pair - * @return 成功返回0,失败返回-1 + * @brief: Query the ID of the chunk corresponding to the offset and the + * copyset it belongs to + * @param fileName File name + * @param offset Offset in file + * @param[out] chunkId chunkId, valid when the return value is 0 + * @param[out] copyset The copyset corresponding to the chunk is the pair of + * logicalPoolId and copysetId + * @return returns 0 for success, -1 for failure */ virtual int QueryChunkCopyset(const std::string& fileName, uint64_t offset, - uint64_t* chunkId, - std::pair* copyset); + uint64_t* chunkId, + std::pair* copyset); /** * @brief clean recycle bin @@ -174,25 +183,24 @@ class NameSpaceToolCore { virtual int UpdateFileThrottle(const std::string& fileName, const std::string& throttleType, - const uint64_t limit, - const int64_t burst, + const uint64_t limit, const int64_t burst, const int64_t burstLength); virtual int ListPoolset(std::vector* poolsets); private: /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param fileInfo 文件的fileInfo - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param fileInfo The fileInfo of the file + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it + * will be -1 */ - int GetFileSegments(const std::string& fileName, - const FileInfo& fileInfo, + int GetFileSegments(const std::string& fileName, const FileInfo& fileInfo, std::vector* segments); - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr client_; }; } // namespace tool diff --git a/src/tools/raft_log_tool.cpp b/src/tools/raft_log_tool.cpp index a4fb97e142..cbe40eb2b5 100644 --- a/src/tools/raft_log_tool.cpp +++ b/src/tools/raft_log_tool.cpp @@ -35,33 +35,31 @@ enum class CheckSumType { CHECKSUM_CRC32 = 1, }; -inline bool VerifyCheckSum(int type, - const char* data, size_t len, uint32_t value) { +inline bool VerifyCheckSum(int type, const char* data, size_t len, + uint32_t value) { CheckSumType checkSunType = static_cast(type); switch (checkSunType) { - case CheckSumType::CHECKSUM_MURMURHASH32: - return (value == braft::murmurhash32(data, len)); - case CheckSumType::CHECKSUM_CRC32: - return (value == braft::crc32(data, len)); - default: - std::cout << "Unknown checksum_type=" << type <Fstat(fd_, &stBuf) != 0) { - std::cout << "Fail to get the stat of " << fileName - << ", " << berror() << std::endl; + std::cout << "Fail to get the stat of " << fileName << ", " << berror() + << std::endl; localFS_->Close(fd_); return -1; } @@ -135,9 +133,7 @@ int SegmentParser::Init(const std::string& fileName) { return 0; } -void SegmentParser::UnInit() { - localFS_->Close(fd_); -} +void SegmentParser::UnInit() { localFS_->Close(fd_); } bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { if (off_ >= fileLen_) { @@ -147,12 +143,11 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { const ssize_t n = localFS_->Read(fd_, buf, off_, ENTRY_HEADER_SIZE); if (n != (ssize_t)ENTRY_HEADER_SIZE) { if (n < 0) { - std::cout << "read header from file, fd: " << fd_ << ", offset: " - << off_ << ", " << berror() << std::endl; + std::cout << "read header from file, fd: " << fd_ + << ", offset: " << off_ << ", " << berror() << std::endl; } else { std::cout << "Read size not match, header size: " - << ENTRY_HEADER_SIZE << ", read size: " - << n << std::endl; + << ENTRY_HEADER_SIZE << ", read size: " << n << std::endl; } return false; } @@ -162,19 +157,20 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { uint32_t data_len = 0; uint32_t data_checksum = 0; uint32_t header_checksum = 0; - butil::RawUnpacker(buf).unpack64((uint64_t&)term) - .unpack32(meta_field) - .unpack32(data_len) - .unpack32(data_checksum) - .unpack32(header_checksum); + butil::RawUnpacker(buf) + .unpack64((uint64_t&)term) + .unpack32(meta_field) + .unpack32(data_len) + .unpack32(data_checksum) + .unpack32(header_checksum); EntryHeader tmp; tmp.term = term; tmp.type = meta_field >> 24; tmp.checksum_type = (meta_field << 8) >> 24; tmp.data_len = data_len; tmp.data_checksum = data_checksum; - if (!VerifyCheckSum(tmp.checksum_type, - buf, ENTRY_HEADER_SIZE - 4, header_checksum)) { + if (!VerifyCheckSum(tmp.checksum_type, buf, ENTRY_HEADER_SIZE - 4, + header_checksum)) { std::cout << "Found corrupted header at offset=" << off_ << ", header=" << tmp; return false; @@ -189,30 +185,28 @@ bool SegmentParser::GetNextEntryHeader(EntryHeader* head) { int RaftLogTool::ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex) { int match = 0; - int64_t lastIndex = 0; + int64_t lastIndex = 0; std::string name; - auto pos = fileName.find_last_of("/"); + auto pos = fileName.find_last_of("/"); if (pos == std::string::npos) { name = fileName; } else { name = fileName.substr(pos + 1); } - match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, - firstIndex, &lastIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_CLOSED_PATTERN, firstIndex, + &lastIndex); if (match == 2) { std::cout << "it is a closed segment, path: " << fileName << " first index: " << *firstIndex << " last index: " << lastIndex << std::endl; } else { - match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, - firstIndex); + match = sscanf(name.c_str(), BRAFT_SEGMENT_OPEN_PATTERN, firstIndex); if (match == 1) { - std::cout << "it is a opening segment, path: " - << fileName + std::cout << "it is a opening segment, path: " << fileName << " first index: " << *firstIndex << std::endl; } else { - std::cout << "filename = " << fileName << - " is not a raft segment pattern!" << std::endl; + std::cout << "filename = " << fileName + << " is not a raft segment pattern!" << std::endl; return -1; } } diff --git a/src/tools/raft_log_tool.h b/src/tools/raft_log_tool.h index d056608bb9..d445b9a280 100644 --- a/src/tools/raft_log_tool.h +++ b/src/tools/raft_log_tool.h @@ -23,14 +23,16 @@ #ifndef SRC_TOOLS_RAFT_LOG_TOOL_H_ #define SRC_TOOLS_RAFT_LOG_TOOL_H_ -#include #include #include #include +#include + #include #include #include #include + #include "src/fs/local_filesystem.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" @@ -49,48 +51,46 @@ struct EntryHeader { uint32_t data_len; uint32_t data_checksum; - bool operator== (const EntryHeader& rhs) const; + bool operator==(const EntryHeader& rhs) const; }; std::ostream& operator<<(std::ostream& os, const EntryHeader& h); class SegmentParser { public: - explicit SegmentParser(std::shared_ptr localFS) : - localFS_(localFS) {} + explicit SegmentParser(std::shared_ptr localFS) + : localFS_(localFS) {} /** - * @brief 初始化 - * @param fileName segmnet文件的文件名 - * @return 获取成功返回0,失败返回-1 + * @brief initialization + * @param fileName The file name of the segmnet file + * @return returns 0 if successful, -1 if unsuccessful */ virtual int Init(const std::string& fileName); /** - * @brief 反初始化 + * @brief deinitialization */ virtual void UnInit(); /** - * @brief 获取下一个EntryHeader - * @param[out] header log entry header - * @return 获取成功返回true,失败返回false + * @brief Get the next EntryHeader + * @param[out] header log entry header + * @return returns true for success, false for failure */ virtual bool GetNextEntryHeader(EntryHeader* header); /** - * @brief 判断读取是否成功完成 + * @brief Determine if the read was successfully completed */ - virtual bool SuccessfullyFinished() { - return off_ >= fileLen_; - } + virtual bool SuccessfullyFinished() { return off_ >= fileLen_; } private: - // 文件描述符 + // File Descriptor int fd_; - // 下一个Entry的偏移 + // Offset for the next Entry int64_t off_; - // 文件长度 + // File length int64_t fileLen_; std::shared_ptr localFS_; @@ -98,50 +98,52 @@ class SegmentParser { class RaftLogTool : public CurveTool { public: - explicit RaftLogTool(std::shared_ptr parser) : - parser_(parser) {} + explicit RaftLogTool(std::shared_ptr parser) + : parser_(parser) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 - */ + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure + */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - */ + * @brief Print help information + */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印文件中所有raft log的头部信息 - * @param fileName raft log文件名 - * @return 成功返回0,否则返回-1 + * @brief Print the header information of all raft logs in the file + * @param fileName raft log file name + * @return successfully returns 0, otherwise returns -1 */ int PrintHeaders(const std::string& fileName); /** - * @brief 从文件解析出entry header - * @param fd 文件描述符 - * @param offset 文件中的偏移 - * @param[out] head entry头部信息,返回值为0时有效 - * @return 成功返回0,否则返回-1 + * @brief Parse the entry header from the file + * @param fd file descriptor + * @param offset Offset in file + * @param[out] head entry header information, valid when the return value is + * 0 + * @return successfully returns 0, otherwise returns -1 */ - int ParseEntryHeader(int fd, off_t offset, EntryHeader *head); + int ParseEntryHeader(int fd, off_t offset, EntryHeader* head); /** - * @brief 从文件名解析first index - * @param fileName raft log文件名 - * @param[out] firstIndex segment文件包含的log entry的第一个index - * @return 成功返回0,否则返回-1 + * @brief Parsing first index from file name + * @param fileName raft log file name + * @param[out] firstIndex The first index of the log entry contained in the + * segment file + * @return successfully returns 0, otherwise returns -1 */ int ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex); diff --git a/src/tools/schedule_tool.cpp b/src/tools/schedule_tool.cpp index 25cd976382..2370bdd6ca 100644 --- a/src/tools/schedule_tool.cpp +++ b/src/tools/schedule_tool.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/tools/schedule_tool.h" + #include + #include -#include "src/tools/schedule_tool.h" + #include "src/tools/curve_tool_define.h" DEFINE_uint32(logical_pool_id, 1, "logical pool"); DECLARE_string(mdsAddr); DEFINE_bool(scheduleAll, true, "schedule all logical pool or not"); -DEFINE_bool(scanEnable, true, "Enable(true)/Disable(false) scan " - "for specify logical pool"); +DEFINE_bool(scanEnable, true, + "Enable(true)/Disable(false) scan " + "for specify logical pool"); namespace curve { namespace tool { bool ScheduleTool::SupportCommand(const std::string& command) { - return command == kRapidLeaderSchedule || - command == kSetScanState; + return command == kRapidLeaderSchedule || command == kSetScanState; } void ScheduleTool::PrintHelp(const std::string& cmd) { @@ -50,31 +53,28 @@ void ScheduleTool::PrintHelp(const std::string& cmd) { } void ScheduleTool::PrintRapidLeaderScheduleHelp() { - std::cout << "Example :" << std::endl + std::cout + << "Example :" << std::endl << "curve_ops_tool " << kRapidLeaderSchedule << " -logical_pool_id=1 -scheduleAll=false [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-confPath=/etc/curve/tools.conf]" << std::endl; std::cout << "curve_ops_tool " << kRapidLeaderSchedule - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } void ScheduleTool::PrintSetScanStateHelp() { - std::cout - << "Example:" << std::endl - << " curve_ops_tool " << kSetScanState - << " -logical_pool_id=1 -scanEnable=true/false" - << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]" - << std::endl; + std::cout << "Example:" << std::endl + << " curve_ops_tool " << kSetScanState + << " -logical_pool_id=1 -scanEnable=true/false" + << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]" << std::endl; } -int ScheduleTool::RunCommand(const std::string &cmd) { +int ScheduleTool::RunCommand(const std::string& cmd) { if (kRapidLeaderSchedule == cmd) { return DoRapidLeaderSchedule(); - } else if (cmd == kSetScanState) { + } else if (cmd == kSetScanState) { return DoSetScanState(); } std::cout << "Command not supported!" << std::endl; @@ -90,14 +90,14 @@ int ScheduleTool::DoSetScanState() { auto lpid = FLAGS_logical_pool_id; auto scanEnable = FLAGS_scanEnable; auto retCode = mdsClient_->SetLogicalPoolScanState(lpid, scanEnable); - std::cout << (scanEnable ? "Enable" : "Disable") - << " scan for logicalpool(" << lpid << ")" - << (retCode == 0 ? " success" : " fail") << std::endl; + std::cout << (scanEnable ? "Enable" : "Disable") << " scan for logicalpool(" + << lpid << ")" << (retCode == 0 ? " success" : " fail") + << std::endl; return retCode; } int ScheduleTool::DoRapidLeaderSchedule() { - if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { + if (0 != mdsClient_->Init(FLAGS_mdsAddr)) { std::cout << "Init mds client fail!" << std::endl; return -1; } @@ -109,11 +109,11 @@ int ScheduleTool::DoRapidLeaderSchedule() { } int ScheduleTool::ScheduleOne(PoolIdType lpoolId) { - // 给mds发送rpc + // Send rpc to mds int res = mdsClient_->RapidLeaderSchedule(lpoolId); if (res != 0) { - std::cout << "RapidLeaderSchedule pool " << lpoolId - << " fail" << std::endl; + std::cout << "RapidLeaderSchedule pool " << lpoolId << " fail" + << std::endl; return -1; } return 0; diff --git a/src/tools/schedule_tool.h b/src/tools/schedule_tool.h index edc9bf44dc..094475bafc 100644 --- a/src/tools/schedule_tool.h +++ b/src/tools/schedule_tool.h @@ -25,8 +25,9 @@ #include #include -#include "src/tools/mds_client.h" + #include "src/tools/curve_tool.h" +#include "src/tools/mds_client.h" namespace curve { namespace tool { @@ -39,36 +40,37 @@ class ScheduleTool : public CurveTool { : mdsClient_(mdsClient) {} /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; private: /** - * @brief PrintRapidLeaderSchedule 打印rapid-leader-schdule的help信息 + * @brief PrintRapidLeaderSchedule Print help information for + * rapid-leader-schdule */ void PrintRapidLeaderScheduleHelp(); void PrintSetScanStateHelp(); /** - * @brief DoRapidLeaderSchedule 向mds发送rpc进行快速transfer leader + * @brief DoRapidLeaderSchedule sends rpc to mds for fast transfer leader */ int DoRapidLeaderSchedule(); diff --git a/src/tools/snapshot_check.h b/src/tools/snapshot_check.h index 87bf512758..0750cf5b50 100644 --- a/src/tools/snapshot_check.h +++ b/src/tools/snapshot_check.h @@ -25,60 +25,60 @@ #include #include + +#include #include #include #include -#include -#include "src/client/libcurve_file.h" #include "src/client/client_common.h" +#include "src/client/libcurve_file.h" #include "src/common/configuration.h" -#include "src/common/s3_adapter.h" #include "src/common/crc32.h" -#include "src/tools/snapshot_read.h" +#include "src/common/s3_adapter.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/snapshot_read.h" namespace curve { namespace tool { class SnapshotCheck : public CurveTool { public: SnapshotCheck(std::shared_ptr client, - std::shared_ptr snapshot) : - client_(client), snapshot_(snapshot), inited_(false) {} + std::shared_ptr snapshot) + : client_(client), snapshot_(snapshot), inited_(false) {} ~SnapshotCheck(); - /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 比较文件和快照的一致性 - * @return 成功返回0,失败返回-1 + * @brief Compare file and snapshot consistency + * @return returns 0 for success, -1 for failure */ int Check(); private: /** - * 初始化 + * Initialize */ int Init(); diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 2b8be3c739..847027aab3 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -48,7 +48,7 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < serverAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -57,7 +57,8 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { if (dummyPortVec.size() != serverAddrVec_.size()) { std::cout << "snapshot clone server dummy port list must be correspond" - " as snapshot clone addr list" << std::endl; + " as snapshot clone addr list" + << std::endl; return -1; } @@ -76,23 +77,23 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::vector SnapshotCloneClient::GetActiveAddrs() { std::vector activeAddrs; - for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + for (const auto& item : dummyServerMap_) { + // Obtain status to determine the address being served std::string status; - MetricRet ret = metricClient_->GetMetric(item.second, - kSnapshotCloneStatusMetricName, &status); + MetricRet ret = metricClient_->GetMetric( + item.second, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.second - << " fail" << std::endl; + std::cout << "Get status metric from " << item.second << " fail" + << std::endl; continue; } if (status == kSnapshotCloneStatusActive) { - // 如果是active状态,再访问一下服务端口 - MetricRet ret = metricClient_->GetMetric(item.first, - kSnapshotCloneStatusMetricName, &status); + // If it is in an active state, please visit the service port again + MetricRet ret = metricClient_->GetMetric( + item.first, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { - std::cout << "Get status metric from " << item.first - << " fail" << std::endl; + std::cout << "Get status metric from " << item.first << " fail" + << std::endl; continue; } activeAddrs.emplace_back(item.first); @@ -102,12 +103,13 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { } void SnapshotCloneClient::GetOnlineStatus( - std::map* onlineStatus) { + std::map* onlineStatus) { onlineStatus->clear(); - for (const auto &item : dummyServerMap_) { + for (const auto& item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS + // address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -117,10 +119,9 @@ void SnapshotCloneClient::GetOnlineStatus( } int SnapshotCloneClient::GetListenAddrFromDummyPort( - const std::string& dummyAddr, - std::string* listenAddr) { - MetricRet res = metricClient_->GetConfValueFromMetric(dummyAddr, - kSnapshotCloneConfMetricName, listenAddr); + const std::string& dummyAddr, std::string* listenAddr) { + MetricRet res = metricClient_->GetConfValueFromMetric( + dummyAddr, kSnapshotCloneConfMetricName, listenAddr); if (res != MetricRet::kOK) { return -1; } diff --git a/src/tools/snapshot_clone_client.h b/src/tools/snapshot_clone_client.h index 295134bd50..711952686a 100644 --- a/src/tools/snapshot_clone_client.h +++ b/src/tools/snapshot_clone_client.h @@ -23,10 +23,10 @@ #ifndef SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ #define SRC_TOOLS_SNAPSHOT_CLONE_CLIENT_H_ -#include -#include #include +#include #include +#include #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" @@ -36,66 +36,69 @@ namespace tool { class SnapshotCloneClient { public: - explicit SnapshotCloneClient(std::shared_ptr metricClient) : - metricClient_(metricClient) {} + explicit SnapshotCloneClient(std::shared_ptr metricClient) + : metricClient_(metricClient) {} virtual ~SnapshotCloneClient() = default; /** - * @brief 初始化,从字符串解析出地址和dummy port - * @param serverAddr snapshot clone server的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有server用同样的dummy port,用字符串分隔有多个的话 - * 为每个server设置不同的dummy port - * @return - * success: 0 - * failed: -1 - * no snapshot server: 1 + * @brief initialization, parsing the address and dummy port from the string + * @param serverAddr Address of snapshot clone server, supporting multiple + * addresses separated by ',' + * @param dummyPort dummyPort list, if only one is entered + * All servers use the same dummy port, separated by strings if there + * are multiple Set different dummy ports for each server + * @return + * Success: 0 + * Failed: -1 + * No snapshot server: 1 * */ virtual int Init(const std::string& serverAddr, const std::string& dummyPort); /** - * @brief 获取当前服务的snapshot clone server的地址 + * @brief Get the address of the snapshot clone server for the current + * service */ virtual std::vector GetActiveAddrs(); /** - * @brief 获取snapshot clone server的在线状态 - * dummyserver在线且dummyserver记录的listen addr - * 与服务地址一致才认为在线 - * @param[out] onlineStatus 每个节点的在线状态 + * @brief Get the online status of the snapshot clone server + * dummyserver is online and the dummyserver records a listen addr + * Only when consistent with the service address is considered + * online + * @param[out] onlineStatus The online status of each node */ virtual void GetOnlineStatus(std::map* onlineStatus); virtual const std::map& GetDummyServerMap() - const { + const { return dummyServerMap_; } private: /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取server的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr 服务地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of the server through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr service address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); private: - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 保存server地址的vector + // Save the vector of the server address std::vector serverAddrVec_; - // 保存server地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the server address std::map dummyServerMap_; }; diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 4444f51fd2..bfb01015d9 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -20,1143 +20,1377 @@ * Author: charisu */ #include "src/tools/status_tool.h" + #include DEFINE_bool(offline, false, "if true, only list offline chunskervers"); -DEFINE_bool(unhealthy, false, "if true, only list chunkserver that unhealthy " - "ratio greater than 0"); -DEFINE_bool(checkHealth, true, "if true, it will check the health " - "state of chunkserver in chunkserver-list"); -DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " - "chunkservers with rpc in chunkserver-list"); -DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" - " include that in repo"); +DEFINE_bool(unhealthy, false, + "if true, only list chunkserver that unhealthy " + "ratio greater than 0"); +DEFINE_bool(checkHealth, true, + "if true, it will check the health " + "state of chunkserver in chunkserver-list"); +DEFINE_bool(checkCSAlive, false, + "if true, it will check the online state of " + "chunkservers with rpc in chunkserver-list"); +DEFINE_bool(listClientInRepo, true, + "if true, list-client will list all clients" + " include that in repo"); DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); DECLARE_string(mdsDummyPort); DECLARE_bool(detail); -const char* kProtocalCurve = "curve"; +const char *kProtocalCurve = "curve"; -namespace curve { -namespace tool { +namespace curve +{ + namespace tool + { -std::ostream& operator<<(std::ostream& os, - std::vector strs) { - for (uint32_t i = 0; i < strs.size(); ++i) { - if (i != 0) { - os << ", "; - } - os << strs[i]; - } - return os; -} - -std::string ToString(ServiceName name) { - static std::map serviceNameMap = - {{ServiceName::kMds, "mds"}, - {ServiceName::kEtcd, "etcd"}, - {ServiceName::kSnapshotCloneServer, - "snapshot-clone-server"}}; - return serviceNameMap[name]; -} - -int StatusTool::Init(const std::string& command) { - if (CommandNeedMds(command) && !mdsInited_) { - if (mdsClient_->Init(FLAGS_mdsAddr, FLAGS_mdsDummyPort) != 0) { - std::cout << "Init mdsClient failed!" << std::endl; - return -1; - } - if (copysetCheckCore_->Init(FLAGS_mdsAddr) != 0) { - std::cout << "Init copysetCheckCore failed!" << std::endl; - return -1; - } - mdsInited_ = true; - } - if (CommandNeedEtcd(command) && !etcdInited_) { - if (etcdClient_->Init(FLAGS_etcdAddr) != 0) { - std::cout << "Init etcdClient failed!" << std::endl; - return -1; - } - etcdInited_ = true; - } - if (CommandNeedSnapshotClone(command)) { - int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, - FLAGS_snapshotCloneDummyPort); - switch (snapshotRet) { - case 0: - // success - break; - case 1: - // no snapshot clone server - noSnapshotServer_ = true; - break; - default: - // -1 and other - std::cout << "Init snapshotClient failed!" << std::endl; - return -1; - } - } - return 0; -} - -bool StatusTool::CommandNeedEtcd(const std::string& command) { - return (command == kEtcdStatusCmd || command == kStatusCmd); -} - -bool StatusTool::CommandNeedMds(const std::string& command) { - return (command != kEtcdStatusCmd && command != kSnapshotCloneStatusCmd); -} - -bool StatusTool::CommandNeedSnapshotClone(const std::string& command) { - return (command == kSnapshotCloneStatusCmd || command == kStatusCmd); -} - -bool StatusTool::SupportCommand(const std::string& command) { - return (command == kSpaceCmd || command == kStatusCmd || - command == kChunkserverListCmd || - command == kChunkserverStatusCmd || command == kMdsStatusCmd || - command == kEtcdStatusCmd || command == kClientStatusCmd || - command == kClientListCmd || command == kSnapshotCloneStatusCmd || - command == kClusterStatusCmd || command == kServerListCmd || - command == kLogicalPoolList || command == kScanStatusCmd || - command == kFormatStatusCmd); -} - -void StatusTool::PrintHelp(const std::string& cmd) { - std::cout << "Example :" << std::endl; - std::cout << "curve_ops_tool " << cmd; - if (CommandNeedMds(cmd)) { - std::cout << " [-mdsAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]"; - } - if (CommandNeedEtcd(cmd)) { - std::cout << " [-etcdAddr=127.0.0.1:6666]" - << " [-confPath=/etc/curve/tools.conf]"; - } - if (CommandNeedSnapshotClone(cmd)) { - std::cout << " [-snapshotCloneAddr=127.0.0.1:5555]" - << " [-confPath=/etc/curve/tools.conf]"; - } - - if (cmd == kChunkserverListCmd) { - std::cout << " [-offline] [-unhealthy] [-checkHealth=false]" - << " [-confPath=/etc/curve/tools.conf]" - << " [-checkCSAlive]"; - } else if (cmd == kClientStatusCmd) { - std::cout << " [-detail] [-confPath=/etc/curve/tools.conf]"; - } else if (cmd == kClientListCmd) { - std::cout << " [-listClientInRepo=false]" - << " [-confPath=/etc/curve/tools.conf]"; - } else if (cmd == kScanStatusCmd) { - std::cout << " [-logicalPoolId=1] [-copysetId=1]" << std::endl; - } - - std::cout << std::endl; -} - -int StatusTool::SpaceCmd() { - SpaceInfo spaceInfo; - int res = GetSpaceInfo(&spaceInfo); - if (res != 0) { - std::cout << "GetSpaceInfo fail!" << std::endl; - return -1; - } - double physicalUsedRatio = 0; - if (spaceInfo.totalChunkSize != 0) { - physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / - spaceInfo.totalChunkSize; - } - - double logicalUsedRatio = 0; - double logicalLeftRatio = 0; - double canBeRecycledRatio = 0; - double createdFileRatio = 0; - if (spaceInfo.totalCapacity != 0) { - logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - logicalLeftRatio = static_cast( - spaceInfo.totalCapacity - spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - createdFileRatio = static_cast(spaceInfo.currentFileSize) / - spaceInfo.totalCapacity; - } - if (spaceInfo.allocatedSize != 0) { - canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / - spaceInfo.allocatedSize; - } - std:: cout.setf(std::ios::fixed); - std::cout << std::setprecision(2); - std::cout << "Space info:" << std::endl; - std::cout << "physical: total = " - << spaceInfo.totalChunkSize / mds::kGB << "GB" - << ", used = " << spaceInfo.usedChunkSize / mds::kGB - << "GB(" << physicalUsedRatio * 100 << "%), left = " - << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB - << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; - std::cout << "logical: total = " - << spaceInfo.totalCapacity / mds::kGB << "GB" - << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" - << "(" << logicalUsedRatio * 100 << "%, can be recycled = " - << spaceInfo.recycleAllocSize / mds::kGB << "GB(" - << canBeRecycledRatio * 100 << "%))" - << ", left = " - << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB - << "GB(" << logicalLeftRatio * 100 << "%)" - << ", created file size = " - << spaceInfo.currentFileSize / mds::kGB - << "GB(" << createdFileRatio * 100 << "%)" << std::endl; - - std::cout << "Every Logicalpool Space info:" << std::endl; - for (const auto &i : spaceInfo.lpoolspaceinfo) { - std::cout << "logicalPool: name = "<< i.second.poolName - << ", poolid = " << i.first - << ", total = "<< i.second.totalCapacity / mds::kGB << "GB" - << ", used = " << i.second.allocatedSize / mds::kGB << "GB" - << ", left = " << (i.second.totalCapacity - - i.second.allocatedSize) / mds::kGB - << "GB"<< std::endl; - } - return 0; -} - -int StatusTool::FormatStatusCmd() { - std::vector formatStatus; - int res = mdsClient_->ListChunkFormatStatus(&formatStatus); - if (res != 0) { - std::cout << "ListChunkserversInCluster fail!" << std::endl; - return -1; - } - for (auto stat : formatStatus) { - std::cout << "ip:" << stat.ip() << " port:" << stat.port() - << " id:" << stat.chunkserverid() - << " format percent:" << stat.formatpercent() << std::endl; - } - return 0; -} - -int StatusTool::ChunkServerListCmd() { - std::vector chunkservers; - int res = mdsClient_->ListChunkServersInCluster(&chunkservers); - if (res != 0) { - std::cout << "ListChunkserversInCluster fail!" << std::endl; - return -1; - } - - std::cout << "curve chunkserver list: " << std::endl; - uint64_t total = 0; - uint64_t online = 0; - uint64_t offline = 0; - uint64_t unstable = 0; - uint64_t pendding = 0; - uint64_t retired = 0; - uint64_t penddingCopyset = 0; - for (auto& chunkserver : chunkservers) { - auto csId = chunkserver.chunkserverid(); - std::vector copysets; - int ret = mdsClient_->GetCopySetsInChunkServer(csId, ©sets); - if (ret != 0) { - std::cout << "GetCopySetsInChunkServer fail, chunkserver id = " - << csId; - return -1; + std::ostream &operator<<(std::ostream &os, std::vector strs) + { + for (uint32_t i = 0; i < strs.size(); ++i) + { + if (i != 0) + { + os << ", "; + } + os << strs[i]; + } + return os; } - double unhealthyRatio = 0.0; - if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); - if (isOnline) { - chunkserver.set_onlinestate(OnlineState::ONLINE); - } else { - chunkserver.set_onlinestate(OnlineState::OFFLINE); - } + std::string ToString(ServiceName name) + { + static std::map serviceNameMap = { + {ServiceName::kMds, "mds"}, + {ServiceName::kEtcd, "etcd"}, + {ServiceName::kSnapshotCloneServer, "snapshot-clone-server"}}; + return serviceNameMap[name]; } - if (chunkserver.onlinestate() != OnlineState::ONLINE) { - if (chunkserver.onlinestate() == OnlineState::OFFLINE) { - offline++; - } - if (chunkserver.onlinestate() == OnlineState::UNSTABLE) { - unstable++; + int StatusTool::Init(const std::string &command) + { + if (CommandNeedMds(command) && !mdsInited_) + { + if (mdsClient_->Init(FLAGS_mdsAddr, FLAGS_mdsDummyPort) != 0) + { + std::cout << "Init mdsClient failed!" << std::endl; + return -1; + } + if (copysetCheckCore_->Init(FLAGS_mdsAddr) != 0) + { + std::cout << "Init copysetCheckCore failed!" << std::endl; + return -1; + } + mdsInited_ = true; } - unhealthyRatio = 1; - } else { - if (FLAGS_offline) { - continue; + if (CommandNeedEtcd(command) && !etcdInited_) + { + if (etcdClient_->Init(FLAGS_etcdAddr) != 0) + { + std::cout << "Init etcdClient failed!" << std::endl; + return -1; + } + etcdInited_ = true; } - if (FLAGS_checkHealth) { - copysetCheckCore_->CheckCopysetsOnChunkServer(csId); - const auto& statistics = - copysetCheckCore_->GetCopysetStatistics(); - unhealthyRatio = statistics.unhealthyRatio; - if (FLAGS_unhealthy && unhealthyRatio == 0) { - continue; + if (CommandNeedSnapshotClone(command)) + { + int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, + FLAGS_snapshotCloneDummyPort); + switch (snapshotRet) + { + case 0: + // success + break; + case 1: + // no snapshot clone server + noSnapshotServer_ = true; + break; + default: + // -1 and other + std::cout << "Init snapshotClient failed!" << std::endl; + return -1; } } - online++; - } - if (chunkserver.status() == ChunkServerStatus::PENDDING) { - pendding++; - penddingCopyset += copysets.size(); + return 0; } - if (chunkserver.status() == ChunkServerStatus::RETIRED) { - retired++; + + bool StatusTool::CommandNeedEtcd(const std::string &command) + { + return (command == kEtcdStatusCmd || command == kStatusCmd); } - total++; - std::cout << "chunkServerID = " << csId - << ", diskType = " << chunkserver.disktype() - << ", hostIP = " << chunkserver.hostip() - << ", port = " << chunkserver.port() - << ", rwStatus = " - << ChunkServerStatus_Name(chunkserver.status()) - << ", diskState = " - << DiskState_Name(chunkserver.diskstatus()) - << ", onlineState = " - << OnlineState_Name(chunkserver.onlinestate()) - << ", copysetNum = " << copysets.size() - << ", mountPoint = " << chunkserver.mountpoint() - << ", diskCapacity = " << chunkserver.diskcapacity() - / curve::mds::kGB << " GB" - << ", diskUsed = " << chunkserver.diskused() - / curve::mds::kGB << " GB"; - if (FLAGS_checkHealth) { - std::cout << ", unhealthyCopysetRatio = " - << unhealthyRatio * 100 << "%"; + + bool StatusTool::CommandNeedMds(const std::string &command) + { + return (command != kEtcdStatusCmd && command != kSnapshotCloneStatusCmd); } - if (chunkserver.has_externalip()) { - std::cout << ", externalIP = " << chunkserver.externalip(); + + bool StatusTool::CommandNeedSnapshotClone(const std::string &command) + { + return (command == kSnapshotCloneStatusCmd || command == kStatusCmd); } - std::cout << std::endl; - } - std::cout << "total: " << total << ", online: " << online; - if (!FLAGS_checkCSAlive) { - std::cout <<", unstable: " << unstable; - } - std::cout << ", offline: " << offline << std::endl; - - std::cout << "pendding: " << pendding - << ", penddingCopyset: " << penddingCopyset - << ", retired:" << retired << std::endl; - return 0; -} - -int StatusTool::ServerListCmd() { - std::vector servers; - int res = mdsClient_->ListServersInCluster(&servers); - if (res != 0) { - std::cout << "ListServersInCluster fail!" << std::endl; - return -1; - } - std::cout << "curve server list: " << std::endl; - uint64_t total = 0; - for (auto& server : servers) { - total++; - std::cout << "serverID = " << server.serverid() - << ", hostName = " << server.hostname() - << ", internalIP = " << server.internalip() - << ", internalPort = " << server.internalport() - << ", externalIp = " << server.externalip() - << ", externalPort = " << server.externalport() - << ", zoneID = " << server.zoneid() - << ", poolID = " << server.physicalpoolid() << std::endl; - } - std::cout << "total: " << total << std::endl; - return 0; -} - -int StatusTool::LogicalPoolListCmd() { - std::vector lgPools; - int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); - if (res != 0) { - std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; - return -1; - } - std::cout << "curve logical pool list: " << std::endl; - uint64_t total = 0; - uint64_t allocSize; - AllocMap allocMap; - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &allocSize, &allocMap); - if (res != 0) { - std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; - return -1; - } - for (auto& lgPool : lgPools) { - total++; - std::string poolName = lgPool.logicalpoolname(); - uint64_t totalSize; - std::string metricName = GetPoolLogicalCapacityName(poolName); - res = mdsClient_->GetMetric(metricName, &totalSize); - if (res != 0) { - std::cout << "Get logical capacity from mds fail!" << std::endl; - return -1; + + bool StatusTool::SupportCommand(const std::string &command) + { + return (command == kSpaceCmd || command == kStatusCmd || + command == kChunkserverListCmd || + command == kChunkserverStatusCmd || command == kMdsStatusCmd || + command == kEtcdStatusCmd || command == kClientStatusCmd || + command == kClientListCmd || command == kSnapshotCloneStatusCmd || + command == kClusterStatusCmd || command == kServerListCmd || + command == kLogicalPoolList || command == kScanStatusCmd || + command == kFormatStatusCmd); } - uint64_t usedSize; - metricName = GetPoolLogicalAllocName(poolName); - res = mdsClient_->GetMetric(metricName, &usedSize); - if (res != 0) { - std::cout << "Get logical alloc size from mds fail!" << std::endl; - return -1; + + void StatusTool::PrintHelp(const std::string &cmd) + { + std::cout << "Example :" << std::endl; + std::cout << "curve_ops_tool " << cmd; + if (CommandNeedMds(cmd)) + { + std::cout << " [-mdsAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]"; + } + if (CommandNeedEtcd(cmd)) + { + std::cout << " [-etcdAddr=127.0.0.1:6666]" + << " [-confPath=/etc/curve/tools.conf]"; + } + if (CommandNeedSnapshotClone(cmd)) + { + std::cout << " [-snapshotCloneAddr=127.0.0.1:5555]" + << " [-confPath=/etc/curve/tools.conf]"; + } + + if (cmd == kChunkserverListCmd) + { + std::cout << " [-offline] [-unhealthy] [-checkHealth=false]" + << " [-confPath=/etc/curve/tools.conf]" + << " [-checkCSAlive]"; + } + else if (cmd == kClientStatusCmd) + { + std::cout << " [-detail] [-confPath=/etc/curve/tools.conf]"; + } + else if (cmd == kClientListCmd) + { + std::cout << " [-listClientInRepo=false]" + << " [-confPath=/etc/curve/tools.conf]"; + } + else if (cmd == kScanStatusCmd) + { + std::cout << " [-logicalPoolId=1] [-copysetId=1]" << std::endl; + } + + std::cout << std::endl; } - double usedRatio = 0; - if (total != 0) { - usedRatio = static_cast(usedSize) / totalSize; + + int StatusTool::SpaceCmd() + { + SpaceInfo spaceInfo; + int res = GetSpaceInfo(&spaceInfo); + if (res != 0) + { + std::cout << "GetSpaceInfo fail!" << std::endl; + return -1; + } + double physicalUsedRatio = 0; + if (spaceInfo.totalChunkSize != 0) + { + physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / + spaceInfo.totalChunkSize; + } + + double logicalUsedRatio = 0; + double logicalLeftRatio = 0; + double canBeRecycledRatio = 0; + double createdFileRatio = 0; + if (spaceInfo.totalCapacity != 0) + { + logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; + logicalLeftRatio = static_cast(spaceInfo.totalCapacity - + spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; + createdFileRatio = static_cast(spaceInfo.currentFileSize) / + spaceInfo.totalCapacity; + } + if (spaceInfo.allocatedSize != 0) + { + canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / + spaceInfo.allocatedSize; + } + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "Space info:" << std::endl; + std::cout << "physical: total = " << spaceInfo.totalChunkSize / mds::kGB + << "GB" + << ", used = " << spaceInfo.usedChunkSize / mds::kGB << "GB(" + << physicalUsedRatio * 100 << "%), left = " + << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB + << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; + std::cout << "logical: total = " << spaceInfo.totalCapacity / mds::kGB + << "GB" + << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" + << "(" << logicalUsedRatio * 100 << "%, can be recycled = " + << spaceInfo.recycleAllocSize / mds::kGB << "GB(" + << canBeRecycledRatio * 100 << "%))" + << ", left = " + << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB + << "GB(" << logicalLeftRatio * 100 << "%)" + << ", created file size = " + << spaceInfo.currentFileSize / mds::kGB << "GB(" + << createdFileRatio * 100 << "%)" << std::endl; + + std::cout << "Every Logicalpool Space info:" << std::endl; + for (const auto &i : spaceInfo.lpoolspaceinfo) + { + std::cout << "logicalPool: name = " << i.second.poolName + << ", poolid = " << i.first + << ", total = " << i.second.totalCapacity / mds::kGB << "GB" + << ", used = " << i.second.allocatedSize / mds::kGB << "GB" + << ", left = " + << (i.second.totalCapacity - i.second.allocatedSize) / + mds::kGB + << "GB" << std::endl; + } + return 0; } - uint64_t canBeRecycle = allocMap[lgPool.logicalpoolid()]; - double recycleRatio = 0; - if (usedSize != 0) { - recycleRatio = static_cast(canBeRecycle) / usedSize; + + int StatusTool::FormatStatusCmd() + { + std::vector formatStatus; + int res = mdsClient_->ListChunkFormatStatus(&formatStatus); + if (res != 0) + { + std::cout << "ListChunkserversInCluster fail!" << std::endl; + return -1; + } + for (auto stat : formatStatus) + { + std::cout << "ip:" << stat.ip() << " port:" << stat.port() + << " id:" << stat.chunkserverid() + << " format percent:" << stat.formatpercent() << std::endl; + } + return 0; } - std::cout << "id = " << lgPool.logicalpoolid() - << ", name = " << lgPool.logicalpoolname() - << ", physicalPoolID = " << lgPool.physicalpoolid() - << ", type = " - << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) - << ", scanEnable = " << lgPool.scanenable() - << ", allocateStatus = " - << curve::mds::topology:: - AllocateStatus_Name(lgPool.allocatestatus()) - << ", total space = " << totalSize / curve::mds::kGB << "GB" - << ", used space = " << usedSize / curve::mds::kGB << "GB" - << "(" << usedRatio * 100 << "%, can be recycled = " - << canBeRecycle / curve::mds::kGB << "GB" - << "(" << recycleRatio * 100 << "%))" << ", left space = " - << (totalSize - usedSize) / curve::mds::kGB - << "GB(" << (1 - usedRatio) * 100 << "%)" << std::endl; - } - std::cout << "total: " << total << std::endl; - return 0; -} - -int StatusTool::StatusCmd() { - int res = PrintClusterStatus(); - bool success = true; - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintClientStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintMdsStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintEtcdStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintSnapshotCloneStatus(); - if (res != 0) { - success = false; - } - std::cout << std::endl; - res = PrintChunkserverStatus(); - if (res != 0) { - success = false; - } - if (success) { - return 0; - } else { - return -1; - } -} - -int StatusTool::ChunkServerStatusCmd() { - return PrintChunkserverStatus(false); -} - -int StatusTool::PrintClusterStatus() { - int ret = 0; - std::cout << "Cluster status:" << std::endl; - bool healthy = IsClusterHeatlhy(); - if (healthy) { - std::cout << "cluster is healthy" << std::endl; - } else { - std::cout << "cluster is not healthy" << std::endl; - ret = -1; - } - const auto& statistics = copysetCheckCore_->GetCopysetStatistics(); - std::cout << "total copysets: " << statistics.totalNum - << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; - std::vector phyPools; - std::vector lgPools; - int res = GetPoolsInCluster(&phyPools, &lgPools); - if (res != 0) { - std::cout << "GetPoolsInCluster fail!" << std::endl; - ret = -1; - } - std::cout << "physical pool number: " << phyPools.size() - << ", logical pool number: " << lgPools.size() << std::endl; - res = SpaceCmd(); - if (res != 0) { - ret = -1; - } - return ret; -} - -bool StatusTool::IsClusterHeatlhy() { - bool ret = true; - // 1、检查copyset健康状态 - int res = copysetCheckCore_->CheckCopysetsInCluster(); - if (res != 0) { - std::cout << "Copysets are not healthy!" << std::endl; - ret = false; - } - - // 2、检查mds状态 - if (!CheckServiceHealthy(ServiceName::kMds)) { - ret = false; - } - - // 3、检查etcd在线状态 - if (!CheckServiceHealthy(ServiceName::kEtcd)) { - ret = false; - } - - // 4、检查snapshot clone server状态 - if (!noSnapshotServer_ && - !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { - ret = false; - } - - return ret; -} - -bool StatusTool::CheckServiceHealthy(const ServiceName& name) { - std::vector leaderVec; - std::map onlineStatus; - switch (name) { - case ServiceName::kMds: { - leaderVec = mdsClient_->GetCurrentMds(); - mdsClient_->GetMdsOnlineStatus(&onlineStatus); - break; + + int StatusTool::ChunkServerListCmd() + { + std::vector chunkservers; + int res = mdsClient_->ListChunkServersInCluster(&chunkservers); + if (res != 0) + { + std::cout << "ListChunkserversInCluster fail!" << std::endl; + return -1; + } + + std::cout << "curve chunkserver list: " << std::endl; + uint64_t total = 0; + uint64_t online = 0; + uint64_t offline = 0; + uint64_t unstable = 0; + uint64_t pendding = 0; + uint64_t retired = 0; + uint64_t penddingCopyset = 0; + for (auto &chunkserver : chunkservers) + { + auto csId = chunkserver.chunkserverid(); + std::vector copysets; + int ret = mdsClient_->GetCopySetsInChunkServer(csId, ©sets); + if (ret != 0) + { + std::cout << "GetCopySetsInChunkServer fail, chunkserver id = " + << csId; + return -1; + } + + double unhealthyRatio = 0.0; + if (FLAGS_checkCSAlive) + { + // Send RPC to reset online status + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); + if (isOnline) + { + chunkserver.set_onlinestate(OnlineState::ONLINE); + } + else + { + chunkserver.set_onlinestate(OnlineState::OFFLINE); + } + } + if (chunkserver.onlinestate() != OnlineState::ONLINE) + { + if (chunkserver.onlinestate() == OnlineState::OFFLINE) + { + offline++; + } + + if (chunkserver.onlinestate() == OnlineState::UNSTABLE) + { + unstable++; + } + unhealthyRatio = 1; + } + else + { + if (FLAGS_offline) + { + continue; + } + if (FLAGS_checkHealth) + { + copysetCheckCore_->CheckCopysetsOnChunkServer(csId); + const auto &statistics = + copysetCheckCore_->GetCopysetStatistics(); + unhealthyRatio = statistics.unhealthyRatio; + if (FLAGS_unhealthy && unhealthyRatio == 0) + { + continue; + } + } + online++; + } + if (chunkserver.status() == ChunkServerStatus::PENDDING) + { + pendding++; + penddingCopyset += copysets.size(); + } + if (chunkserver.status() == ChunkServerStatus::RETIRED) + { + retired++; + } + total++; + std::cout << "chunkServerID = " << csId + << ", diskType = " << chunkserver.disktype() + << ", hostIP = " << chunkserver.hostip() + << ", port = " << chunkserver.port() << ", rwStatus = " + << ChunkServerStatus_Name(chunkserver.status()) + << ", diskState = " + << DiskState_Name(chunkserver.diskstatus()) + << ", onlineState = " + << OnlineState_Name(chunkserver.onlinestate()) + << ", copysetNum = " << copysets.size() + << ", mountPoint = " << chunkserver.mountpoint() + << ", diskCapacity = " + << chunkserver.diskcapacity() / curve::mds::kGB << " GB" + << ", diskUsed = " << chunkserver.diskused() / curve::mds::kGB + << " GB"; + if (FLAGS_checkHealth) + { + std::cout << ", unhealthyCopysetRatio = " << unhealthyRatio * 100 + << "%"; + } + if (chunkserver.has_externalip()) + { + std::cout << ", externalIP = " << chunkserver.externalip(); + } + std::cout << std::endl; + } + std::cout << "total: " << total << ", online: " << online; + if (!FLAGS_checkCSAlive) + { + std::cout << ", unstable: " << unstable; + } + std::cout << ", offline: " << offline << std::endl; + + std::cout << "pendding: " << pendding + << ", penddingCopyset: " << penddingCopyset + << ", retired:" << retired << std::endl; + return 0; } - case ServiceName::kEtcd: { - int res = etcdClient_->GetEtcdClusterStatus(&leaderVec, - &onlineStatus); - if (res != 0) { - std:: cout << "GetEtcdClusterStatus fail!" << std::endl; - return false; + + int StatusTool::ServerListCmd() + { + std::vector servers; + int res = mdsClient_->ListServersInCluster(&servers); + if (res != 0) + { + std::cout << "ListServersInCluster fail!" << std::endl; + return -1; } - break; + std::cout << "curve server list: " << std::endl; + uint64_t total = 0; + for (auto &server : servers) + { + total++; + std::cout << "serverID = " << server.serverid() + << ", hostName = " << server.hostname() + << ", internalIP = " << server.internalip() + << ", internalPort = " << server.internalport() + << ", externalIp = " << server.externalip() + << ", externalPort = " << server.externalport() + << ", zoneID = " << server.zoneid() + << ", poolID = " << server.physicalpoolid() << std::endl; + } + std::cout << "total: " << total << std::endl; + return 0; } - case ServiceName::kSnapshotCloneServer: { - leaderVec = snapshotClient_->GetActiveAddrs(); - snapshotClient_->GetOnlineStatus(&onlineStatus); - break; + + int StatusTool::LogicalPoolListCmd() + { + std::vector lgPools; + int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); + if (res != 0) + { + std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; + return -1; + } + std::cout << "curve logical pool list: " << std::endl; + uint64_t total = 0; + uint64_t allocSize; + AllocMap allocMap; + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &allocSize, + &allocMap); + if (res != 0) + { + std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; + return -1; + } + for (auto &lgPool : lgPools) + { + total++; + std::string poolName = lgPool.logicalpoolname(); + uint64_t totalSize; + std::string metricName = GetPoolLogicalCapacityName(poolName); + res = mdsClient_->GetMetric(metricName, &totalSize); + if (res != 0) + { + std::cout << "Get logical capacity from mds fail!" << std::endl; + return -1; + } + uint64_t usedSize; + metricName = GetPoolLogicalAllocName(poolName); + res = mdsClient_->GetMetric(metricName, &usedSize); + if (res != 0) + { + std::cout << "Get logical alloc size from mds fail!" << std::endl; + return -1; + } + double usedRatio = 0; + if (total != 0) + { + usedRatio = static_cast(usedSize) / totalSize; + } + uint64_t canBeRecycle = allocMap[lgPool.logicalpoolid()]; + double recycleRatio = 0; + if (usedSize != 0) + { + recycleRatio = static_cast(canBeRecycle) / usedSize; + } + std::cout << "id = " << lgPool.logicalpoolid() + << ", name = " << lgPool.logicalpoolname() + << ", physicalPoolID = " << lgPool.physicalpoolid() + << ", type = " + << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) + << ", scanEnable = " << lgPool.scanenable() + << ", allocateStatus = " + << curve::mds::topology::AllocateStatus_Name( + lgPool.allocatestatus()) + << ", total space = " << totalSize / curve::mds::kGB << "GB" + << ", used space = " << usedSize / curve::mds::kGB << "GB" + << "(" << usedRatio * 100 + << "%, can be recycled = " << canBeRecycle / curve::mds::kGB + << "GB" + << "(" << recycleRatio * 100 << "%))" + << ", left space = " + << (totalSize - usedSize) / curve::mds::kGB << "GB(" + << (1 - usedRatio) * 100 << "%)" << std::endl; + } + std::cout << "total: " << total << std::endl; + return 0; } - default: { - std::cout << "Unknown service" << std::endl; - return false; + + int StatusTool::StatusCmd() + { + int res = PrintClusterStatus(); + bool success = true; + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintClientStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintMdsStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintEtcdStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintSnapshotCloneStatus(); + if (res != 0) + { + success = false; + } + std::cout << std::endl; + res = PrintChunkserverStatus(); + if (res != 0) + { + success = false; + } + if (success) + { + return 0; + } + else + { + return -1; + } } - } - bool ret = true; - if (leaderVec.empty()) { - std::cout << "No " << ToString(name) << " is active" << std::endl; - ret = false; - } else if (leaderVec.size() != 1) { - std::cout << "More than one " << ToString(name) << " is active" - << std::endl; - ret = false; - } - for (const auto& item : onlineStatus) { - if (!item.second) { - std::cout << ToString(name) << " " << item.first << " is offline" + + int StatusTool::ChunkServerStatusCmd() { return PrintChunkserverStatus(false); } + + int StatusTool::PrintClusterStatus() + { + int ret = 0; + std::cout << "Cluster status:" << std::endl; + bool healthy = IsClusterHeatlhy(); + if (healthy) + { + std::cout << "cluster is healthy" << std::endl; + } + else + { + std::cout << "cluster is not healthy" << std::endl; + ret = -1; + } + const auto &statistics = copysetCheckCore_->GetCopysetStatistics(); + std::cout << "total copysets: " << statistics.totalNum + << ", unhealthy copysets: " << statistics.unhealthyNum + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" << std::endl; - ret = false; - } - } - return ret; -} - -void StatusTool::PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus) { - std::vector online; - std::vector offline; - for (const auto& item : onlineStatus) { - if (item.second) { - online.emplace_back(item.first); - } else { - offline.emplace_back(item.first); + std::vector phyPools; + std::vector lgPools; + int res = GetPoolsInCluster(&phyPools, &lgPools); + if (res != 0) + { + std::cout << "GetPoolsInCluster fail!" << std::endl; + ret = -1; + } + std::cout << "physical pool number: " << phyPools.size() + << ", logical pool number: " << lgPools.size() << std::endl; + res = SpaceCmd(); + if (res != 0) + { + ret = -1; + } + return ret; } - } - std::cout << "online " << name << " list: "; - for (uint64_t i = 0; i < online.size(); ++i) { - if (i != 0) { - std::cout << ", "; + + bool StatusTool::IsClusterHeatlhy() + { + bool ret = true; + // 1. Check the health status of copyset + int res = copysetCheckCore_->CheckCopysetsInCluster(); + if (res != 0) + { + std::cout << "Copysets are not healthy!" << std::endl; + ret = false; + } + + // 2. Check the mds status + if (!CheckServiceHealthy(ServiceName::kMds)) + { + ret = false; + } + + // 3. Check the online status of ETCD + if (!CheckServiceHealthy(ServiceName::kEtcd)) + { + ret = false; + } + + // 4. Check the status of the snapshot clone server + if (!noSnapshotServer_ && + !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) + { + ret = false; + } + + return ret; } - std::cout << online[i]; - } - std::cout << std::endl; - - std::cout << "offline " << name << " list: "; - for (uint64_t i = 0; i < offline.size(); ++i) { - if (i != 0) { - std::cout << ", "; + + bool StatusTool::CheckServiceHealthy(const ServiceName &name) + { + std::vector leaderVec; + std::map onlineStatus; + switch (name) + { + case ServiceName::kMds: + { + leaderVec = mdsClient_->GetCurrentMds(); + mdsClient_->GetMdsOnlineStatus(&onlineStatus); + break; + } + case ServiceName::kEtcd: + { + int res = + etcdClient_->GetEtcdClusterStatus(&leaderVec, &onlineStatus); + if (res != 0) + { + std::cout << "GetEtcdClusterStatus fail!" << std::endl; + return false; + } + break; + } + case ServiceName::kSnapshotCloneServer: + { + leaderVec = snapshotClient_->GetActiveAddrs(); + snapshotClient_->GetOnlineStatus(&onlineStatus); + break; + } + default: + { + std::cout << "Unknown service" << std::endl; + return false; + } + } + bool ret = true; + if (leaderVec.empty()) + { + std::cout << "No " << ToString(name) << " is active" << std::endl; + ret = false; + } + else if (leaderVec.size() != 1) + { + std::cout << "More than one " << ToString(name) << " is active" + << std::endl; + ret = false; + } + for (const auto &item : onlineStatus) + { + if (!item.second) + { + std::cout << ToString(name) << " " << item.first << " is offline" + << std::endl; + ret = false; + } + } + return ret; } - std::cout << offline[i]; - } - std::cout << std::endl; -} - -int StatusTool::PrintMdsStatus() { - std::cout << "MDS status:" << std::endl; - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckMdsVersion(&version, &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckMdsVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; + + void StatusTool::PrintOnlineStatus( + const std::string &name, const std::map &onlineStatus) + { + std::vector online; + std::vector offline; + for (const auto &item : onlineStatus) + { + if (item.second) + { + online.emplace_back(item.first); + } + else + { + offline.emplace_back(item.first); + } + } + std::cout << "online " << name << " list: "; + for (uint64_t i = 0; i < online.size(); ++i) + { + if (i != 0) + { + std::cout << ", "; + } + std::cout << online[i]; + } + std::cout << std::endl; + + std::cout << "offline " << name << " list: "; + for (uint64_t i = 0; i < offline.size(); ++i) + { + if (i != 0) + { + std::cout << ", "; + } + std::cout << offline[i]; + } + std::cout << std::endl; } - } - std::vector mdsAddrs = mdsClient_->GetCurrentMds(); - std::cout << "current MDS: " << mdsAddrs << std::endl; - std::map onlineStatus; - mdsClient_->GetMdsOnlineStatus(&onlineStatus); - if (res != 0) { - std::cout << "GetMdsOnlineStatus fail!" << std::endl; - ret = -1; - } else { - PrintOnlineStatus("mds", onlineStatus); - } - return ret; -} - -int StatusTool::PrintEtcdStatus() { - std::cout << "Etcd status:" << std::endl; - std::string version; - std::vector failedList; - int res = etcdClient_->GetAndCheckEtcdVersion(&version, &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckEtcdVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - VersionTool::PrintFailedList(failedList); - ret = -1; + + int StatusTool::PrintMdsStatus() + { + std::cout << "MDS status:" << std::endl; + std::string version; + std::vector failedList; + int res = versionTool_->GetAndCheckMdsVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckMdsVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); + ret = -1; + } + } + std::vector mdsAddrs = mdsClient_->GetCurrentMds(); + std::cout << "current MDS: " << mdsAddrs << std::endl; + std::map onlineStatus; + mdsClient_->GetMdsOnlineStatus(&onlineStatus); + if (res != 0) + { + std::cout << "GetMdsOnlineStatus fail!" << std::endl; + ret = -1; + } + else + { + PrintOnlineStatus("mds", onlineStatus); + } + return ret; } - } - std::vector leaderAddrVec; - std::map onlineStatus; - res = etcdClient_->GetEtcdClusterStatus(&leaderAddrVec, &onlineStatus); - if (res != 0) { - std::cout << "GetEtcdClusterStatus fail!" << std::endl; - return -1; - } - std::cout << "current etcd: " << leaderAddrVec << std::endl; - PrintOnlineStatus("etcd", onlineStatus); - return ret; -} - -int StatusTool::PrintSnapshotCloneStatus() { - std::cout << "SnapshotCloneServer status:" << std::endl; - if (noSnapshotServer_) { - std::cout << "No SnapshotCloneServer" << std::endl; - return 0; - } - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckSnapshotCloneVersion(&version, - &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; + + int StatusTool::PrintEtcdStatus() + { + std::cout << "Etcd status:" << std::endl; + std::string version; + std::vector failedList; + int res = etcdClient_->GetAndCheckEtcdVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckEtcdVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + VersionTool::PrintFailedList(failedList); + ret = -1; + } + } + std::vector leaderAddrVec; + std::map onlineStatus; + res = etcdClient_->GetEtcdClusterStatus(&leaderAddrVec, &onlineStatus); + if (res != 0) + { + std::cout << "GetEtcdClusterStatus fail!" << std::endl; + return -1; + } + std::cout << "current etcd: " << leaderAddrVec << std::endl; + PrintOnlineStatus("etcd", onlineStatus); + return ret; } - } - std::vector activeAddrs = snapshotClient_->GetActiveAddrs(); - std::map onlineStatus; - snapshotClient_->GetOnlineStatus(&onlineStatus); - std::cout << "current snapshot-clone-server: " << activeAddrs << std::endl; - PrintOnlineStatus("snapshot-clone-server", onlineStatus); - return ret; -} - -int StatusTool::PrintClientStatus() { - std::cout << "Client status: " << std::endl; - ClientVersionMapType versionMap; - int res = versionTool_->GetClientVersion(&versionMap); - if (res != 0) { - std::cout << "GetClientVersion fail" << std::endl; - return -1; - } - for (const auto& item : versionMap) { - std::cout << item.first << ": "; - bool first = true; - for (const auto& item2 : item.second) { - if (!first) { - std::cout << ", "; - } - std::cout << "version-" << item2.first << ": " - << item2.second.size(); - first = false; + + int StatusTool::PrintSnapshotCloneStatus() + { + std::cout << "SnapshotCloneServer status:" << std::endl; + if (noSnapshotServer_) + { + std::cout << "No SnapshotCloneServer" << std::endl; + return 0; + } + std::string version; + std::vector failedList; + int res = + versionTool_->GetAndCheckSnapshotCloneVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; + ret = -1; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); + ret = -1; + } + } + std::vector activeAddrs = snapshotClient_->GetActiveAddrs(); + std::map onlineStatus; + snapshotClient_->GetOnlineStatus(&onlineStatus); + std::cout << "current snapshot-clone-server: " << activeAddrs << std::endl; + PrintOnlineStatus("snapshot-clone-server", onlineStatus); + return ret; } - std::cout << std::endl; - if (FLAGS_detail) { - std::cout << "version map: " << std::endl; - versionTool_->PrintVersionMap(item.second); + + int StatusTool::PrintClientStatus() + { + std::cout << "Client status: " << std::endl; + ClientVersionMapType versionMap; + int res = versionTool_->GetClientVersion(&versionMap); + if (res != 0) + { + std::cout << "GetClientVersion fail" << std::endl; + return -1; + } + for (const auto &item : versionMap) + { + std::cout << item.first << ": "; + bool first = true; + for (const auto &item2 : item.second) + { + if (!first) + { + std::cout << ", "; + } + std::cout << "version-" << item2.first << ": " + << item2.second.size(); + first = false; + } + std::cout << std::endl; + if (FLAGS_detail) + { + std::cout << "version map: " << std::endl; + versionTool_->PrintVersionMap(item.second); + } + } + return 0; } - } - return 0; -} - -int StatusTool::ClientListCmd() { - std::vector clientAddrs; - int res = mdsClient_->ListClient(&clientAddrs, FLAGS_listClientInRepo); - if (res != 0) { - std::cout << "ListClient from mds fail!" << std::endl; - return -1; - } - for (const auto& addr : clientAddrs) { - std::cout << addr << std::endl; - } - return 0; -} - -int StatusTool::ScanStatusCmd() { - if (FLAGS_logicalPoolId != 0 && FLAGS_copysetId != 0) { - CopysetInfo copysetInfo; - auto lpid = FLAGS_logicalPoolId; - auto copysetId = FLAGS_copysetId; - if (mdsClient_->GetCopyset(lpid, copysetId, ©setInfo) != 0) { - std::cout << "GetCopyset fail!" << std::endl; - return -1; + + int StatusTool::ClientListCmd() + { + std::vector clientAddrs; + int res = mdsClient_->ListClient(&clientAddrs, FLAGS_listClientInRepo); + if (res != 0) + { + std::cout << "ListClient from mds fail!" << std::endl; + return -1; + } + for (const auto &addr : clientAddrs) + { + std::cout << addr << std::endl; + } + return 0; } - std::cout - << "Scan status for copyset(" - << lpid << "," << copysetId << "):" << std::endl - << " scaning=" << copysetInfo.scaning() - << " lastScanSec=" << copysetInfo.lastscansec() - << " lastScanConsistent=" << copysetInfo.lastscanconsistent() - << std::endl; - - return 0; - } - - std::vector copysetInfos; - if (mdsClient_->GetCopySetsInCluster(©setInfos, true) != 0) { - std::cout << "GetCopySetsInCluster fail!" << std::endl; - return -1; - } - - int count = 0; - std::cout << "Scaning copysets: " << copysetInfos.size(); - for (auto& copysetInfo : copysetInfos) { - if (count % 5 == 0) { + int StatusTool::ScanStatusCmd() + { + if (FLAGS_logicalPoolId != 0 && FLAGS_copysetId != 0) + { + CopysetInfo copysetInfo; + auto lpid = FLAGS_logicalPoolId; + auto copysetId = FLAGS_copysetId; + if (mdsClient_->GetCopyset(lpid, copysetId, ©setInfo) != 0) + { + std::cout << "GetCopyset fail!" << std::endl; + return -1; + } + + std::cout << "Scan status for copyset(" << lpid << "," << copysetId + << "):" << std::endl + << " scaning=" << copysetInfo.scaning() + << " lastScanSec=" << copysetInfo.lastscansec() + << " lastScanConsistent=" << copysetInfo.lastscanconsistent() + << std::endl; + + return 0; + } + + std::vector copysetInfos; + if (mdsClient_->GetCopySetsInCluster(©setInfos, true) != 0) + { + std::cout << "GetCopySetsInCluster fail!" << std::endl; + return -1; + } + + int count = 0; + std::cout << "Scaning copysets: " << copysetInfos.size(); + for (auto ©setInfo : copysetInfos) + { + if (count % 5 == 0) + { + std::cout << std::endl; + } + std::cout << " (" << copysetInfo.logicalpoolid() << "," + << copysetInfo.copysetid() << ")"; + count++; + } + std::cout << std::endl; + + return 0; } - std::cout << " (" << copysetInfo.logicalpoolid() - << "," << copysetInfo.copysetid() << ")"; - count++; - } - - std::cout << std::endl; - - return 0; -} - -int CheckUseWalPool(const std::map> - &poolChunkservers, - bool *useWalPool, - bool *useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { - int ret = 0; - if (!poolChunkservers.empty()) { - ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - // check whether use chunkfilepool - std::string metricValue; - std::string metricName = GetUseWalPoolName(csAddr); - MetricRet res = metricClient->GetConfValueFromMetric(csAddr, - metricName, &metricValue); - if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool conf " - << csAddr << " fail!" << std::endl; - ret = -1; - } - std::string raftLogProtocol = - curve::common::UriParser ::GetProtocolFromUri(metricValue); - *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; - - // check whether use chunkfilepool as walpool from chunkserver conf metric // NOLINT - metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); - res = metricClient->GetConfValueFromMetric(csAddr, metricName, - &metricValue); - if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool as walpool conf " - << csAddr << " fail!" << std::endl; - ret = -1; - } - *useChunkFilePoolAsWalPool = StringToBool(metricValue, - useChunkFilePoolAsWalPool); - } - return ret; -} - -int PrintChunkserverOnlineStatus( - const std::map> &poolChunkservers, - std::shared_ptr copysetCheckCore, - std::shared_ptr mdsClient) { - int ret = 0; - uint64_t total = 0; - uint64_t online = 0; - uint64_t offline = 0; - std::vector offlineCs; - for (const auto& poolChunkserver : poolChunkservers) { - for (const auto& chunkserver : poolChunkserver.second) { - total++; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - if (copysetCheckCore->CheckChunkServerOnline(csAddr)) { - online++; - } else { - offline++; - offlineCs.emplace_back(chunkserver.chunkserverid()); + + int CheckUseWalPool( + const std::map> &poolChunkservers, + bool *useWalPool, bool *useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) + { + int ret = 0; + if (!poolChunkservers.empty()) + { + ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + // check whether use chunkfilepool + std::string metricValue; + std::string metricName = GetUseWalPoolName(csAddr); + MetricRet res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); + if (res != MetricRet::kOK) + { + std::cout << "Get use chunkfilepool conf " << csAddr << " fail!" + << std::endl; + ret = -1; + } + std::string raftLogProtocol = + curve::common::UriParser ::GetProtocolFromUri(metricValue); + *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; + + // check whether use chunkfilepool as walpool from chunkserver conf + // metric // NOLINT + metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); + res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); + if (res != MetricRet::kOK) + { + std::cout << "Get use chunkfilepool as walpool conf " << csAddr + << " fail!" << std::endl; + ret = -1; + } + *useChunkFilePoolAsWalPool = + StringToBool(metricValue, useChunkFilePoolAsWalPool); } + return ret; } - } - // get the recover status of offline chunkservers - std::vector offlineRecover; - if (offlineCs.size() > 0) { - std::map statusMap; - int res = mdsClient->QueryChunkServerRecoverStatus( - offlineCs, &statusMap); - if (res != 0) { - std::cout << "query offlinne chunkserver recover status fail"; - ret = -1; - } else { - // Distinguish between recovering and unrecovered - for (auto it = statusMap.begin(); it != statusMap.end(); ++it) { - if (it->second) { - offlineRecover.emplace_back(it->first); + + int PrintChunkserverOnlineStatus( + const std::map> &poolChunkservers, + std::shared_ptr copysetCheckCore, + std::shared_ptr mdsClient) + { + int ret = 0; + uint64_t total = 0; + uint64_t online = 0; + uint64_t offline = 0; + std::vector offlineCs; + for (const auto &poolChunkserver : poolChunkservers) + { + for (const auto &chunkserver : poolChunkserver.second) + { + total++; + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + if (copysetCheckCore->CheckChunkServerOnline(csAddr)) + { + online++; + } + else + { + offline++; + offlineCs.emplace_back(chunkserver.chunkserverid()); + } + } + } + // get the recover status of offline chunkservers + std::vector offlineRecover; + if (offlineCs.size() > 0) + { + std::map statusMap; + int res = + mdsClient->QueryChunkServerRecoverStatus(offlineCs, &statusMap); + if (res != 0) + { + std::cout << "query offlinne chunkserver recover status fail"; + ret = -1; + } + else + { + // Distinguish between recovering and unrecovered + for (auto it = statusMap.begin(); it != statusMap.end(); ++it) + { + if (it->second) + { + offlineRecover.emplace_back(it->first); + } + } } } + std::cout << "chunkserver: total num = " << total << ", online = " << online + << ", offline = " << offline + << "(recoveringout = " << offlineRecover.size() + << ", chunkserverlist: ["; + + int i = 0; + for (ChunkServerIdType csId : offlineRecover) + { + i++; + if (i == static_cast(offlineRecover.size())) + { + std::cout << csId; + } + else + { + std::cout << csId << ", "; + } + } + std::cout << "])" << std::endl; + return ret; } - } - std::cout << "chunkserver: total num = " << total - << ", online = " << online - << ", offline = " << offline - << "(recoveringout = " << offlineRecover.size() - << ", chunkserverlist: ["; - - int i = 0; - for (ChunkServerIdType csId : offlineRecover) { - i++; - if (i == static_cast(offlineRecover.size())) { - std::cout << csId; - } else { - std::cout << csId << ", "; + + int GetChunkserverLeftSize( + const std::map> &poolChunkservers, + std::map> *poolChunkLeftSize, + std::map> *poolWalSegmentLeftSize, + bool useWalPool, bool useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) + { + int ret = 0; + for (const auto &poolChunkserver : poolChunkservers) + { + std::vector chunkLeftSize; + std::vector walSegmentLeftSize; + for (const auto &chunkserver : poolChunkserver.second) + { + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); + std::string metricName = GetCSLeftChunkName(csAddr); + uint64_t chunkNum; + MetricRet res = + metricClient->GetMetricUint(csAddr, metricName, &chunkNum); + if (res != MetricRet::kOK) + { + std::cout << "Get left chunk size of chunkserver " << csAddr + << " fail!" << std::endl; + ret = -1; + continue; + } + uint64_t size = chunkNum * FLAGS_chunkSize; + chunkLeftSize.emplace_back(size / mds::kGB); + + // walfilepool left size + if (useWalPool && !useChunkFilePoolAsWalPool) + { + metricName = GetCSLeftWalSegmentName(csAddr); + uint64_t walSegmentNum; + res = metricClient->GetMetricUint(csAddr, metricName, + &walSegmentNum); + if (res != MetricRet::kOK) + { + std::cout << "Get left wal segment size of chunkserver " + << csAddr << " fail!" << std::endl; + ret = -1; + continue; + } + size = walSegmentNum * FLAGS_walSegmentSize; + walSegmentLeftSize.emplace_back(size / mds::kGB); + } + } + poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); + poolWalSegmentLeftSize->emplace(poolChunkserver.first, + walSegmentLeftSize); + } + return ret; } - } - std::cout << "])" << std::endl; - return ret; -} - -int GetChunkserverLeftSize( - const std::map> &poolChunkservers, - std::map> *poolChunkLeftSize, - std::map> *poolWalSegmentLeftSize, - bool useWalPool, - bool useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { - int ret = 0; - for (const auto& poolChunkserver : poolChunkservers) { - std::vector chunkLeftSize; - std::vector walSegmentLeftSize; - for (const auto& chunkserver : poolChunkserver.second) { - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); - std::string metricName = GetCSLeftChunkName(csAddr); - uint64_t chunkNum; - MetricRet res = metricClient->GetMetricUint(csAddr, - metricName, &chunkNum); - if (res != MetricRet::kOK) { - std::cout << "Get left chunk size of chunkserver " << csAddr - << " fail!" << std::endl; + + int StatusTool::PrintChunkserverStatus(bool checkLeftSize) + { + // get and check chunkserver version + std::cout << "ChunkServer status:" << std::endl; + std::string version; + std::vector failedList; + int res = + versionTool_->GetAndCheckChunkServerVersion(&version, &failedList); + int ret = 0; + if (res != 0) + { + std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; ret = -1; - continue; - } - uint64_t size = chunkNum * FLAGS_chunkSize; - chunkLeftSize.emplace_back(size / mds::kGB); - - // walfilepool left size - if (useWalPool && !useChunkFilePoolAsWalPool) { - metricName = GetCSLeftWalSegmentName(csAddr); - uint64_t walSegmentNum; - res = metricClient->GetMetricUint(csAddr, metricName, - &walSegmentNum); - if (res != MetricRet::kOK) { - std::cout << "Get left wal segment size of chunkserver " - << csAddr << " fail!" << std::endl; + } + else + { + std::cout << "version: " << version << std::endl; + if (!failedList.empty()) + { + versionTool_->PrintFailedList(failedList); ret = -1; - continue; } - size = walSegmentNum * FLAGS_walSegmentSize; - walSegmentLeftSize.emplace_back(size / mds::kGB); } - } - poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); - poolWalSegmentLeftSize->emplace(poolChunkserver.first, - walSegmentLeftSize); - } - return ret; -} - -int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { - // get and check chunkserver version - std::cout << "ChunkServer status:" << std::endl; - std::string version; - std::vector failedList; - int res = versionTool_->GetAndCheckChunkServerVersion(&version, - &failedList); - int ret = 0; - if (res != 0) { - std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; - ret = -1; - } else { - std::cout << "version: " << version << std::endl; - if (!failedList.empty()) { - versionTool_->PrintFailedList(failedList); - ret = -1; - } - } - // list chunkservers in cluster group by poolid - std::map> poolChunkservers; - res = mdsClient_->ListChunkServersInCluster(&poolChunkservers); - if (res != 0) { - std::cout << "ListChunkServersInCluster fail!" << std::endl; - return -1; - } - - // get chunkserver online status - ret = PrintChunkserverOnlineStatus(poolChunkservers, - copysetCheckCore_, - mdsClient_); - if (!checkLeftSize) { - return ret; - } - - bool useWalPool = false; - bool useChunkFilePoolAsWalPool = true; - // check use walpool - ret = CheckUseWalPool(poolChunkservers, &useWalPool, - &useChunkFilePoolAsWalPool, metricClient_); - - // get chunkserver left size - std::map> poolChunkLeftSize; - std::map> poolWalSegmentLeftSize; - ret = GetChunkserverLeftSize(poolChunkservers, - &poolChunkLeftSize, - &poolWalSegmentLeftSize, - useWalPool, - useChunkFilePoolAsWalPool, - metricClient_); - if (0 != ret) { - return ret; - } - // print filepool left size - PrintCsLeftSizeStatistics("chunkfilepool", poolChunkLeftSize); - if (useWalPool && !useChunkFilePoolAsWalPool) { - PrintCsLeftSizeStatistics("walfilepool", poolWalSegmentLeftSize); - } else if (useChunkFilePoolAsWalPool) { - std::cout << "No walpool left size found, " - << "use chunkfilepool as walpool!\n"; - } else { - std::cout << "No walpool left size found, " - << "no walpool used!\n"; - } - return ret; -} - -void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize) { - if (poolLeftSize.empty()) { - std::cout << "No " << name << " left size found!" << std::endl; - return; - } - for (const auto& leftSize : poolLeftSize) { - if (leftSize.second.empty()) { - continue; - } - uint64_t min = leftSize.second[0]; - uint64_t max = leftSize.second[0]; - double sum = 0; - for (const auto& size : leftSize.second) { - sum += size; - if (size < min) { - min = size; + // list chunkservers in cluster group by poolid + std::map> poolChunkservers; + res = mdsClient_->ListChunkServersInCluster(&poolChunkservers); + if (res != 0) + { + std::cout << "ListChunkServersInCluster fail!" << std::endl; + return -1; } - if (size > max) { - max = size; + + // get chunkserver online status + ret = PrintChunkserverOnlineStatus(poolChunkservers, copysetCheckCore_, + mdsClient_); + if (!checkLeftSize) + { + return ret; } - } - uint64_t range = max - min; - double avg = sum / leftSize.second.size(); - sum = 0; - for (const auto& size : leftSize.second) { - sum += (size - avg) * (size - avg); - } - double var = sum / leftSize.second.size(); - std:: cout.setf(std::ios::fixed); - std::cout<< std::setprecision(2); - std::cout<< "pool" << leftSize.first << " " << name; - std::cout << " left size: min = " << min << "GB" - << ", max = " << max << "GB" - << ", average = " << avg << "GB" - << ", range = " << range << "GB" - << ", variance = " << var << std::endl; - } -} - -int StatusTool::GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools) { - int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); - if (res != 0) { - std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; - return -1; - } - for (const auto& phyPool : *phyPools) { - int res = mdsClient_->ListLogicalPoolsInPhysicalPool( - phyPool.physicalpoolid(), lgPools) != 0; - if (res != 0) { - std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; - return -1; + bool useWalPool = false; + bool useChunkFilePoolAsWalPool = true; + // check use walpool + ret = CheckUseWalPool(poolChunkservers, &useWalPool, + &useChunkFilePoolAsWalPool, metricClient_); + + // get chunkserver left size + std::map> poolChunkLeftSize; + std::map> poolWalSegmentLeftSize; + ret = GetChunkserverLeftSize(poolChunkservers, &poolChunkLeftSize, + &poolWalSegmentLeftSize, useWalPool, + useChunkFilePoolAsWalPool, metricClient_); + if (0 != ret) + { + return ret; + } + // print filepool left size + PrintCsLeftSizeStatistics("chunkfilepool", poolChunkLeftSize); + if (useWalPool && !useChunkFilePoolAsWalPool) + { + PrintCsLeftSizeStatistics("walfilepool", poolWalSegmentLeftSize); + } + else if (useChunkFilePoolAsWalPool) + { + std::cout << "No walpool left size found, " + << "use chunkfilepool as walpool!\n"; + } + else + { + std::cout << "No walpool left size found, " + << "no walpool used!\n"; + } + return ret; } - } - return 0; -} - -int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { - std::vector lgPools; - int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); - if (res != 0) { - std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; - return -1; - } - res = mdsClient_->GetFileSize(curve::mds::ROOTFILENAME, - &spaceInfo->currentFileSize); - if (res != 0) { - std::cout << "Get root directory file size from mds fail!" << std::endl; - return -1; - } - // 从metric获取space信息 - for (const auto& lgPool : lgPools) { - LogicalpoolSpaceInfo lpinfo; - std::string poolName = lgPool.logicalpoolname(); - lpinfo.poolName = poolName; - std::string metricName = GetPoolTotalChunkSizeName(poolName); - uint64_t size; - int res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get total chunk byte from mds fail!" << std::endl; - return -1; + + void StatusTool::PrintCsLeftSizeStatistics( + const std::string &name, + const std::map> &poolLeftSize) + { + if (poolLeftSize.empty()) + { + std::cout << "No " << name << " left size found!" << std::endl; + return; + } + for (const auto &leftSize : poolLeftSize) + { + if (leftSize.second.empty()) + { + continue; + } + uint64_t min = leftSize.second[0]; + uint64_t max = leftSize.second[0]; + double sum = 0; + for (const auto &size : leftSize.second) + { + sum += size; + if (size < min) + { + min = size; + } + if (size > max) + { + max = size; + } + } + uint64_t range = max - min; + double avg = sum / leftSize.second.size(); + sum = 0; + for (const auto &size : leftSize.second) + { + sum += (size - avg) * (size - avg); + } + + double var = sum / leftSize.second.size(); + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "pool" << leftSize.first << " " << name; + std::cout << " left size: min = " << min << "GB" + << ", max = " << max << "GB" + << ", average = " << avg << "GB" + << ", range = " << range << "GB" + << ", variance = " << var << std::endl; + } } - spaceInfo->totalChunkSize += size; - lpinfo.totalChunkSize +=size; - metricName = GetPoolUsedChunkSizeName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get used chunk byte from mds fail!" << std::endl; - return -1; + + int StatusTool::GetPoolsInCluster(std::vector *phyPools, + std::vector *lgPools) + { + int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); + if (res != 0) + { + std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; + return -1; + } + for (const auto &phyPool : *phyPools) + { + int res = mdsClient_->ListLogicalPoolsInPhysicalPool( + phyPool.physicalpoolid(), lgPools) != 0; + if (res != 0) + { + std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; + return -1; + } + } + return 0; } - spaceInfo->usedChunkSize += size; - lpinfo.usedChunkSize += size; - metricName = GetPoolLogicalCapacityName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get logical capacity from mds fail!" << std::endl; - return -1; + + int StatusTool::GetSpaceInfo(SpaceInfo *spaceInfo) + { + std::vector lgPools; + int res = mdsClient_->ListLogicalPoolsInCluster(&lgPools); + if (res != 0) + { + std::cout << "ListLogicalPoolsInCluster fail!" << std::endl; + return -1; + } + res = mdsClient_->GetFileSize(curve::mds::ROOTFILENAME, + &spaceInfo->currentFileSize); + if (res != 0) + { + std::cout << "Get root directory file size from mds fail!" << std::endl; + return -1; + } + // Obtain space information from metric + for (const auto &lgPool : lgPools) + { + LogicalpoolSpaceInfo lpinfo; + std::string poolName = lgPool.logicalpoolname(); + lpinfo.poolName = poolName; + std::string metricName = GetPoolTotalChunkSizeName(poolName); + uint64_t size; + int res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get total chunk byte from mds fail!" << std::endl; + return -1; + } + spaceInfo->totalChunkSize += size; + lpinfo.totalChunkSize += size; + metricName = GetPoolUsedChunkSizeName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get used chunk byte from mds fail!" << std::endl; + return -1; + } + spaceInfo->usedChunkSize += size; + lpinfo.usedChunkSize += size; + metricName = GetPoolLogicalCapacityName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get logical capacity from mds fail!" << std::endl; + return -1; + } + spaceInfo->totalCapacity += size; + lpinfo.totalCapacity += size; + metricName = GetPoolLogicalAllocName(poolName); + res = mdsClient_->GetMetric(metricName, &size); + if (res != 0) + { + std::cout << "Get logical alloc size from mds fail!" << std::endl; + return -1; + } + spaceInfo->allocatedSize += size; + lpinfo.allocatedSize += size; + spaceInfo->lpoolspaceinfo.insert( + std::pair(lgPool.logicalpoolid(), + lpinfo)); + } + // Obtain the allocation size of RecycleBin + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, + &spaceInfo->recycleAllocSize); + if (res != 0) + { + std::cout << "GetAllocatedSize of RecycleBin fail!" << std::endl; + return -1; + } + return 0; } - spaceInfo->totalCapacity += size; - lpinfo.totalCapacity += size; - metricName = GetPoolLogicalAllocName(poolName); - res = mdsClient_->GetMetric(metricName, &size); - if (res != 0) { - std::cout << "Get logical alloc size from mds fail!" << std::endl; - return -1; + + int StatusTool::RunCommand(const std::string &cmd) + { + if (Init(cmd) != 0) + { + std::cout << "Init StatusTool failed" << std::endl; + return -1; + } + if (cmd == kSpaceCmd) + { + return SpaceCmd(); + } + else if (cmd == kStatusCmd) + { + return StatusCmd(); + } + else if (cmd == kChunkserverListCmd) + { + return ChunkServerListCmd(); + } + else if (cmd == kServerListCmd) + { + return ServerListCmd(); + } + else if (cmd == kLogicalPoolList) + { + return LogicalPoolListCmd(); + } + else if (cmd == kChunkserverStatusCmd) + { + return ChunkServerStatusCmd(); + } + else if (cmd == kMdsStatusCmd) + { + return PrintMdsStatus(); + } + else if (cmd == kEtcdStatusCmd) + { + return PrintEtcdStatus(); + } + else if (cmd == kClientStatusCmd) + { + return PrintClientStatus(); + } + else if (cmd == kSnapshotCloneStatusCmd) + { + return PrintSnapshotCloneStatus(); + } + else if (cmd == kClusterStatusCmd) + { + return PrintClusterStatus(); + } + else if (cmd == kClientListCmd) + { + return ClientListCmd(); + } + else if (cmd == kScanStatusCmd) + { + return ScanStatusCmd(); + } + else if (cmd == kFormatStatusCmd) + { + return FormatStatusCmd(); + } + else + { + std::cout << "Command not supported!" << std::endl; + return -1; + } + + return 0; } - spaceInfo->allocatedSize += size; - lpinfo.allocatedSize += size; - spaceInfo->lpoolspaceinfo.insert( - std::pair( - lgPool.logicalpoolid(), lpinfo)); - } - // 获取RecycleBin的分配大小 - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &spaceInfo->recycleAllocSize); - if (res != 0) { - std::cout << "GetAllocatedSize of RecycleBin fail!" << std::endl; - return -1; - } - return 0; -} - -int StatusTool::RunCommand(const std::string &cmd) { - if (Init(cmd) != 0) { - std::cout << "Init StatusTool failed" << std::endl; - return -1; - } - if (cmd == kSpaceCmd) { - return SpaceCmd(); - } else if (cmd == kStatusCmd) { - return StatusCmd(); - } else if (cmd == kChunkserverListCmd) { - return ChunkServerListCmd(); - } else if (cmd == kServerListCmd) { - return ServerListCmd(); - } else if (cmd == kLogicalPoolList) { - return LogicalPoolListCmd(); - } else if (cmd == kChunkserverStatusCmd) { - return ChunkServerStatusCmd(); - } else if (cmd == kMdsStatusCmd) { - return PrintMdsStatus(); - } else if (cmd == kEtcdStatusCmd) { - return PrintEtcdStatus(); - } else if (cmd == kClientStatusCmd) { - return PrintClientStatus(); - } else if (cmd == kSnapshotCloneStatusCmd) { - return PrintSnapshotCloneStatus(); - } else if (cmd == kClusterStatusCmd) { - return PrintClusterStatus(); - } else if (cmd == kClientListCmd) { - return ClientListCmd(); - } else if (cmd == kScanStatusCmd) { - return ScanStatusCmd(); - } else if (cmd == kFormatStatusCmd) { - return FormatStatusCmd(); - } else { - std::cout << "Command not supported!" << std::endl; - return -1; - } - - return 0; -} -} // namespace tool -} // namespace curve + } // namespace tool +} // namespace curve diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 82b776fa73..37e0546050 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -23,31 +23,33 @@ #ifndef SRC_TOOLS_STATUS_TOOL_H_ #define SRC_TOOLS_STATUS_TOOL_H_ +#include #include #include -#include -#include + #include -#include -#include -#include +#include #include +#include +#include #include +#include + #include "proto/topology.pb.h" #include "src/common/timeutility.h" +#include "src/common/uri_parser.h" #include "src/mds/common/mds_define.h" -#include "src/tools/mds_client.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/copyset_check_core.h" -#include "src/tools/etcd_client.h" -#include "src/tools/version_tool.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/etcd_client.h" +#include "src/tools/mds_client.h" #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" +#include "src/tools/namespace_tool_core.h" #include "src/tools/snapshot_clone_client.h" -#include "src/common/uri_parser.h" +#include "src/tools/version_tool.h" using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; @@ -63,22 +65,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -100,49 +102,54 @@ class StatusTool : public CurveTool { std::shared_ptr versionTool, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), - etcdClient_(etcdClient), metricClient_(metricClient), - snapshotClient_(snapshotClient), versionTool_(versionTool), - mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} + : mdsClient_(mdsClient), + copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), + metricClient_(metricClient), + snapshotClient_(snapshotClient), + versionTool_(versionTool), + mdsInited_(false), + etcdInited_(false), + noSnapshotServer_(false) {} ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ - static bool SupportCommand(const std::string &command); + static bool SupportCommand(const std::string& command); /** - * @brief 判断集群是否健康 + * @brief to determine whether the cluster is healthy */ bool IsClusterHeatlhy(); private: - int Init(const std::string &command); + int Init(const std::string& command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector *phyPools, - std::vector *lgPools); - int GetSpaceInfo(SpaceInfo *spaceInfo); + int GetPoolsInCluster(std::vector* phyPools, + std::vector* lgPools); + int GetSpaceInfo(SpaceInfo* spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -152,67 +159,67 @@ class StatusTool : public CurveTool { int ScanStatusCmd(); int FormatStatusCmd(); void PrintCsLeftSizeStatistics( - const std::string &name, - const std::map> &poolLeftSize); + const std::string& name, + const std::map>& poolLeftSize); int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command needs to interact with ETCD + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedEtcd(const std::string &command); - + bool CommandNeedEtcd(const std::string& command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command requires mds + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedMds(const std::string &command); + bool CommandNeedMds(const std::string& command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ - bool CommandNeedSnapshotClone(const std::string &command); + bool CommandNeedSnapshotClone(const std::string& command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus Map of online status */ - void PrintOnlineStatus(const std::string &name, - const std::map &onlineStatus); + void PrintOnlineStatus(const std::string& name, + const std::map& onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name Service Name */ - bool CheckServiceHealthy(const ServiceName &name); + bool CheckServiceHealthy(const ServiceName& name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and + // chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..42b1d3e9a5 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -48,8 +48,8 @@ int VersionTool::GetAndCheckMdsVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList) { std::vector chunkServers; int res = mdsClient_->ListChunkServersInCluster(&chunkServers); if (res != 0) { @@ -78,8 +78,8 @@ int VersionTool::GetAndCheckChunkServerVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList) { const auto& dummyServerMap = snapshotClient_->GetDummyServerMap(); std::vector dummyServers; for (const auto& item : dummyServerMap) { @@ -123,9 +123,8 @@ void VersionTool::FetchClientProcessMap(const std::vector& addrVec, ProcessMapType* processMap) { for (const auto& addr : addrVec) { std::string cmd; - MetricRet res = metricClient_->GetMetric(addr, - kProcessCmdLineMetricName, - &cmd); + MetricRet res = + metricClient_->GetMetric(addr, kProcessCmdLineMetricName, &cmd); if (res != MetricRet::kOK) { continue; } @@ -156,10 +155,11 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, failedList->clear(); for (const auto& addr : addrVec) { std::string version; - MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, - &version); + MetricRet res = + metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so + // let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..eb293433e6 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -23,13 +23,14 @@ #ifndef SRC_TOOLS_VERSION_TOOL_H_ #define SRC_TOOLS_VERSION_TOOL_H_ -#include #include -#include #include +#include +#include + +#include "src/common/string_util.h" #include "src/tools/mds_client.h" #include "src/tools/metric_client.h" -#include "src/common/string_util.h" #include "src/tools/snapshot_clone_client.h" namespace curve { @@ -49,95 +50,97 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + : mdsClient_(mdsClient), + snapshotClient_(snapshotClient), metricClient_(metricClient) {} virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int GetAndCheckMdsVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckMdsVersion(std::string* version, + std::vector* failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckChunkServerVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckSnapshotCloneVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ - virtual int GetClientVersion(ClientVersionMapType *versionMap); + virtual int GetClientVersion(ClientVersionMapType* versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap version to address list map */ - static void PrintVersionMap(const VersionMapType &versionMap); + static void PrintVersionMap(const VersionMapType& versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList Access Failed Address List */ - static void PrintFailedList(const std::vector &failedList); + static void PrintFailedList(const std::vector& failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] versionMap version to address map + * @param[out] failedList Query address list for version failure */ - void GetVersionMap(const std::vector &addrVec, - VersionMapType *versionMap, - std::vector *failedList); + void GetVersionMap(const std::vector& addrVec, + VersionMapType* versionMap, + std::vector* failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to + * different processes */ - void FetchClientProcessMap(const std::vector &addrVec, - ProcessMapType *processMap); + void FetchClientProcessMap(const std::vector& addrVec, + ProcessMapType* processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of + * starting the server For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd server + * @param addrVec Address List + * @return The name of the process */ - std::string GetProcessNameFromCmd(const std::string &cmd); + std::string GetProcessNameFromCmd(const std::string& cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshot clone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric clients std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..110a0923b8 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -20,25 +20,26 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service2.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service2.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/common/timeutility.h" +#include "src/common/uuid.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -48,10 +49,12 @@ using curve::common::UUIDGenerator; class BraftCliService2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { peer1.set_address("127.0.0.1:9310:0"); @@ -75,10 +78,10 @@ class BraftCliService2Test : public testing::Test { } public: - const char *ip = "127.0.0.1"; - int port = 9310; - const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + const char* ip = "127.0.0.1"; + int port = 9310; + const char* confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -128,12 +131,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dirMap[peer1.address()]; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -143,12 +142,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dirMap[peer2.address()]; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -158,16 +153,12 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dirMap[peer3.address()]; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,15 +173,15 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /*Add peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,10 +201,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /*Add peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -223,7 +214,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +228,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /*Add peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change + // request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,15 +266,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /*Remove peer - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,10 +294,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /*Remove peer - illegal peer id*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -315,7 +307,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,15 +321,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /*Remove peer - sent to peers who are not leaders*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader and send the configuration change + // request to it for processing + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; } else { @@ -367,15 +359,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -395,8 +387,8 @@ TEST_F(BraftCliService2Test, basic2) { } /* transfer leader to leader */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -417,10 +409,10 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /*Transfer leader - illegal peer*/ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -430,7 +422,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,18 +436,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /*Get leader - illegal copyset*/ { PeerId leaderId = leaderId; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); - GetLeaderRequest2 request; GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -467,14 +458,13 @@ TEST_F(BraftCliService2Test, basic2) { /* remove peer then add peer */ { // 1 remove peer - Peer *removePeer = new Peer(); - Peer *leaderPeer1 = new Peer(); - Peer *leaderPeer2 = new Peer(); - Peer *addPeer = new Peer(); + Peer* removePeer = new Peer(); + Peer* leaderPeer1 = new Peer(); + Peer* leaderPeer2 = new Peer(); + Peer* addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader as a remove peer + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); *removePeer = peer2; } else { @@ -508,7 +498,6 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl1.Failed()); ASSERT_EQ(0, cntl1.ErrorCode()); - // add peer AddPeerRequest2 request2; request2.set_logicpoolid(logicPoolId); @@ -529,17 +518,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /*Snapshot - illegal copyset*/ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); *peerPtr = peer1; request.set_allocated_peer(peerPtr); @@ -557,11 +546,12 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + - ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; + ToGroupId(logicPoolId, copysetId) + "/" + + RAFT_LOG_DIR; std::shared_ptr fs( - LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); + LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); std::vector files; fs->List(copysetDataDir.c_str(), &files); ASSERT_GE(files.size(), 1); @@ -574,7 +564,7 @@ TEST_F(BraftCliService2Test, basic2) { SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); peerPtr->set_address(leaderId.to_string()); request.set_allocated_peer(peerPtr); @@ -586,19 +576,20 @@ TEST_F(BraftCliService2Test, basic2) { LOG(INFO) << "Start do snapshot"; CliService2_Stub stub(&channel); stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); + // Two consecutive snapshots are required to delete the log from the + // first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + // After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -619,18 +610,18 @@ TEST_F(BraftCliService2Test, basic2) { CliService2_Stub stub(&channel); stub.SnapshotAll(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /*Reset peer - illegal copyset*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,9 +637,9 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /*Reset peer - new peer is empty*/ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; @@ -669,7 +660,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* reset peer - normal */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..63a83cfe9d 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -20,21 +20,22 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" #include "test/chunkserver/chunkserver_test_util.h" namespace curve { @@ -43,10 +44,12 @@ namespace chunkserver { class BraftCliServiceTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { Exec("mkdir 6"); @@ -68,9 +71,9 @@ class BraftCliServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(BraftCliServiceTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9015; - const char *confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; + const char* confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; int snapshotInterval = 600; PeerId peer1("127.0.0.1:9015:0"); PeerId peer2("127.0.0.1:9016:0"); @@ -87,12 +90,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 1 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid1) { - const char *copysetdir = "local://./6"; - StartChunkserver(ip, - port + 0, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./6"; + StartChunkserver(ip, port + 0, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -102,12 +101,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 2 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid2) { - const char *copysetdir = "local://./7"; - StartChunkserver(ip, - port + 1, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./7"; + StartChunkserver(ip, port + 1, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -117,17 +112,13 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 3 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid3) { - const char *copysetdir = "local://./8"; - StartChunkserver(ip, - port + 2, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./8"; + StartChunkserver(ip, port + 2, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -144,6 +135,7 @@ TEST_F(BraftCliServiceTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -166,7 +158,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +180,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,12 +202,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -240,13 +232,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +253,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,12 +273,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -309,7 +301,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +338,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +357,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..c1191bde5b 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -20,24 +20,24 @@ * Author: wudemiao */ +#include "src/chunkserver/chunk_service.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -75,11 +75,10 @@ class ChunkserverTest : public testing::Test { butil::AtExitManager atExitManager; - TEST_F(ChunkserverTest, normal_read_write_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9020; - const char *confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; + const char* confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -96,12 +95,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -111,12 +106,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -126,16 +117,12 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -152,6 +139,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -313,7 +301,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +317,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /*Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +335,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /*Applied index Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -416,9 +404,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -435,9 +421,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -467,7 +451,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +469,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +544,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +563,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /*Multi chunk read/write/delete*/ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +669,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +687,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +754,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..9d3c136e14 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -20,24 +20,23 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -76,9 +75,9 @@ class ChunkService2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(ChunkService2Test, illegial_parameters_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9023; - const char *confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; + const char* confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -95,12 +94,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -110,12 +105,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -125,16 +116,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -151,6 +138,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -177,13 +165,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /*Illegal parameter request test*/ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /*Read overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +189,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /*Read offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +207,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /*Read size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +225,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /*Read copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +244,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /*Read snapshot overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +262,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /*Read snapshot offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +281,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /*Read snapshot size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +300,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /*Read snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +319,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /*Write overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +338,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /*Write offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +357,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /*Write size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +376,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /*The write copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +395,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /*Delete copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +411,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /*Delete snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -434,9 +422,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { request.set_copysetid(copysetId + 1); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, @@ -456,7 +442,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /*Not a leader*/ { PeerId peer1; PeerId peer2; @@ -562,13 +548,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { class ChunkServiceTestClosure : public ::google::protobuf::Closure { public: - explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~ChunkServiceTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -580,13 +565,12 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { class UpdateEpochTestClosure : public ::google::protobuf::Closure { public: - explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~UpdateEpochTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -602,12 +586,12 @@ TEST_F(ChunkService2Test, overload_test) { // inflight throttle uint64_t maxInflight = 0; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -690,9 +674,7 @@ TEST_F(ChunkService2Test, overload_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -750,12 +732,12 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { // inflight throttle uint64_t maxInflight = 10; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -780,17 +762,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -863,9 +845,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -916,7 +896,8 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can + // receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } @@ -995,9 +976,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_NE(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -1055,12 +1034,12 @@ TEST_F(ChunkService2Test, CheckEpochTest) { // inflight throttle uint64_t maxInflight = 10000; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -1083,7 +1062,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_chunkid(chunkId); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk request have epoch, but epoch map have no epoch @@ -1100,7 +1079,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // update epoch map to {(1, 1) , (2, 2)} { @@ -1130,7 +1109,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk check epoch failed { @@ -1146,7 +1125,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } // update epoch map to {(1, 2) , (2, 2)} @@ -1174,7 +1153,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..d401a22185 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -20,14 +20,16 @@ * Author: lixiaocui */ -#include #include "src/chunkserver/chunkserver_helper.h" + +#include + #include "src/chunkserver/register.h" namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +45,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..4b834a5037 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -20,40 +20,41 @@ * Author: lixiaocui1 */ -#include -#include +#include "src/chunkserver/chunkserver_service.h" + #include +#include #include +#include #include -#include "src/chunkserver/chunkserver_service.h" -#include "test/chunkserver/mock_copyset_node_manager.h" + #include "proto/chunkserver.pb.h" +#include "test/chunkserver/mock_copyset_node_manager.h" namespace curve { namespace chunkserver { -using ::testing::Return; using ::testing::_; +using ::testing::Return; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); auto listenAddr = butil::endpoint2str(server->listen_address()).c_str(); - brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr, NULL)); ChunkServerService_Stub stub(&channel); ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,23 +64,22 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/chunkserver/chunkserver_snapshot_test.cpp b/test/chunkserver/chunkserver_snapshot_test.cpp index b534ca2ee3..a05a9e6498 100644 --- a/test/chunkserver/chunkserver_snapshot_test.cpp +++ b/test/chunkserver/chunkserver_snapshot_test.cpp @@ -21,25 +21,25 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" +#include "proto/common.pb.h" +#include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "proto/common.pb.h" -#include "proto/copyset.pb.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -64,7 +64,7 @@ class ChunkServerSnapshotTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); /* wait for process exit */ - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -77,26 +77,22 @@ class ChunkServerSnapshotTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -112,14 +108,13 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -134,13 +129,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -152,22 +146,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -181,16 +171,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -198,22 +186,18 @@ static void ReadVerify(PeerId leaderId, } /** - * 异常 I/O 验证,验证集群是否处于不可用状态 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Abnormal I/O verification to verify if the cluster is in an unavailable state + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerifyNotAvailable(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerifyNotAvailable(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -228,30 +212,29 @@ static void ReadVerifyNotAvailable(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证copyset status是否符合预期 + * Verify if the copyset status meets expectations * @param peerId: peer id - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id - * @param expectResp: 期待的copyset status + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID + * @param expectResp: Expected copyset status */ -static void CopysetStatusVerify(PeerId peerId, - LogicPoolID logicPoolID, +static void CopysetStatusVerify(PeerId peerId, LogicPoolID logicPoolID, CopysetID copysetId, - CopysetStatusResponse *expectResp) { + CopysetStatusResponse* expectResp) { brpc::Channel channel; ASSERT_EQ(0, channel.Init(peerId.addr, NULL)); CopysetService_Stub stub(&channel); @@ -261,7 +244,7 @@ static void CopysetStatusVerify(PeerId peerId, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -279,14 +262,13 @@ static void CopysetStatusVerify(PeerId peerId, } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -static void CopysetStatusVerify(const std::vector &peerIds, - LogicPoolID logicPoolID, - CopysetID copysetId, +static void CopysetStatusVerify(const std::vector& peerIds, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0) { std::vector resps; for (PeerId peerId : peerIds) { @@ -300,7 +282,7 @@ static void CopysetStatusVerify(const std::vector &peerIds, cntl.set_timeout_ms(5000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerId.to_string()); request.set_queryhash(true); @@ -309,7 +291,8 @@ static void CopysetStatusVerify(const std::vector &peerIds, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -333,9 +316,11 @@ static void CopysetStatusVerify(const std::vector &peerIds, butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组是否能够正常提供服务 - * 1. 创建一个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the replication group of one node can provide services + * normally + * 1. Create a replication group for a replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -355,23 +340,18 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; - // read、write、1次配置变更 + // read, write, 1 configuration change int64_t commitedIndex = loop + 1; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -390,12 +370,15 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { } /** - * 验证1个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建1个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of one node can provide + * normal service + * 1. Create a replication group for 1 replica + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { LogicPoolID logicPoolId = 2; @@ -415,45 +398,30 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); ASSERT_STREQ(peer1.to_string().c_str(), leaderId.to_string().c_str()); ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); CopysetStatusResponse expectResp; int64_t commitedIndex = 2 * loop + 2; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); - Peer *peer = new Peer(); + Peer* peer = new Peer(); expectResp.set_allocated_peer(peer); peer->set_address(peer1.to_string()); - Peer *leader = new Peer(); + Peer* leader = new Peer(); expectResp.set_allocated_leader(leader); leader->set_address(peer1.to_string()); expectResp.set_readonly(false); @@ -473,9 +441,10 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { } /** - * 验证2个节点是否能够正常提供服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether two nodes can provide services normally + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, TwoNodes) { LogicPoolID logicPoolId = 2; @@ -498,12 +467,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); ::usleep(2000 * 1000); @@ -511,12 +475,15 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { } /** - * 验证2个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting two nodes after closing non leader nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -539,19 +506,14 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -563,40 +525,33 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ::usleep(2000 * electionTimeoutMs); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of two nodes can + * provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -619,48 +574,34 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is not available + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { LogicPoolID logicPoolId = 2; @@ -685,26 +626,24 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting after closing non leader nodes on three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -729,19 +668,14 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -750,28 +684,26 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader node and restart of three nodes can + * provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written + * before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -796,62 +728,49 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群暂时不可用 - ReadVerifyNotAvailable(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); + // Testing found that the cluster is temporarily unavailable + ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, chunkId, length, + ch, 1); ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval and write read data + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 6. Wait for the leader to be generated, and then verify the data written + * before the read + * 7. transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -877,19 +796,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -900,40 +814,25 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - - // restart, 需要从 install snapshot 恢复 + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + + // Restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 2, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it ::sleep(3); @@ -944,10 +843,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -961,37 +857,35 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 删除 shutdown peer 的数据目录,然后再拉起来 - * 10. 然后 read 之前写入的数据验证一遍 - * 11. transfer leader 到shut down 的 peer 上 - * 12. 在 read 之前写入的数据验证 - * 13. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Delete the data directory of the shutdown peer and then pull it up again + * 10. Then verify the data written before read + * 11. Transfer leader to shut down peer + * 12. Verification of data written before read + * 13. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { LogicPoolID logicPoolId = 2; @@ -1017,19 +911,14 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1040,54 +929,40 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); - - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop); - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); - - // 删除此 peer 的数据,然后重启 + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); + + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 2, loop); + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); + + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(TestCluster::RemoveCopysetDirCmd(shutdownPeerid) - .c_str())); //NOLINT + .c_str())); // NOLINT LOG(INFO) << "remove data cmd: " << TestCluster::RemoveCopysetDirCmd(shutdownPeerid); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); Exec(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid).c_str()); LOG(INFO) << "remove data dir: " << TestCluster::CopysetDirWithoutProtocol(shutdownPeerid); - ASSERT_FALSE(fs->DirExists(TestCluster::CopysetDirWithoutProtocol( - shutdownPeerid).c_str())); //NOLINT + ASSERT_FALSE( + fs->DirExists(TestCluster::CopysetDirWithoutProtocol(shutdownPeerid) + .c_str())); // NOLINT ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it @@ -1099,10 +974,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -1116,38 +988,36 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 4, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 4, loop); ::usleep(2000 * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更 add peer - * 10. 然后 read 之前写入的数据验证一遍 - * 11. 在发起 write,再 read 读出来验证一遍 - * 12. transfer leader 到 add 的 peer 上 - * 13. 在 read 之前写入的数据验证 - * 14. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control + * the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add peer through configuration changes + * 10. Then verify the data written before read + * 11. Initiate write and read again to verify + * 12. Transfer leader to add's peer + * 13. Verification of data written before read + * 14. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1173,21 +1043,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -1198,46 +1063,31 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 一个 peer + // Add a peer { ASSERT_EQ(0, cluster.StartPeer(peer4, true)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); @@ -1245,26 +1095,18 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - peer4, - options); + butil::Status status = AddPeer(logicPoolId, copysetId, + cluster.CopysetConf(), peer4, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } @@ -1277,11 +1119,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - peer4, - options); + status = TransferLeader(logicPoolId, copysetId, conf, peer4, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == peer4) { @@ -1291,21 +1129,16 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { ::sleep(1); } - ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), - peer4.to_string().c_str())); + ASSERT_EQ( + 0, ::strcmp(leaderId.to_string().c_str(), peer4.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 5, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 5, loop); } @@ -1321,20 +1154,23 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { } /** - * * 验证3个节点的 remove 一个节点,然后再 add 回来,并控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. 通过配置变更 remove 一个 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更再将之前 remove 的 peer add 回来 - * 10. transfer leader 到此 peer - * 11. 在 read 之前写入的数据验证 - * 12. 再 write 数据,再 read 出来验证一遍 + * Verify the removal of one node from three nodes, then add it back and control + * it to recover from the install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + * verification + * 3. Remove a non leader through configuration changes + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 + * two-step It is to ensure that at least two snapshots are taken, so that when + * the node restarts again, it must pass the install snapshot, Because the log + * has been deleted + * 9. Add the previously removed peer back through configuration changes + * 10. Transfer leader to this peer + * 11. Verification of data written before read + * 12. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1360,21 +1196,16 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId removePeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { removePeerid = peer2; } else { removePeerid = peer1; @@ -1383,70 +1214,51 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderId.to_string(); ASSERT_NE(0, ::strcmp(removePeerid.to_string().c_str(), leaderId.to_string().c_str())); - // remove 一个 peer + // Remove a peer { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 8000; - butil::Status status = RemovePeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + RemovePeer(logicPoolId, copysetId, cluster.CopysetConf(), + removePeerid, options); ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 1, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 1, loop); } - // wait snapshot, 保证能够触发安装快照 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Wait snapshot to ensure that the installation snapshot can be triggered + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 2, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 2, loop); } - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 3, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // add 回来 + // Add, come back { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = 80000; - butil::Status status = AddPeer(logicPoolId, - copysetId, - cluster.CopysetConf(), - removePeerid, - options); + butil::Status status = + AddPeer(logicPoolId, copysetId, cluster.CopysetConf(), removePeerid, + options); ASSERT_EQ(0, status.error_code()); } @@ -1459,11 +1271,8 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - removePeerid, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, removePeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); if (leaderId == removePeerid) { @@ -1476,18 +1285,13 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), removePeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - i, - length, - ch + 4, + WriteThenReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } diff --git a/test/chunkserver/chunkserver_test_util.cpp b/test/chunkserver/chunkserver_test_util.cpp index cb2d020048..612594e9ac 100644 --- a/test/chunkserver/chunkserver_test_util.cpp +++ b/test/chunkserver/chunkserver_test_util.cpp @@ -22,27 +22,27 @@ #include "test/chunkserver/chunkserver_test_util.h" -#include -#include -#include #include #include #include +#include #include #include +#include +#include #include #include #include -#include "src/common/concurrent/task_thread_pool.h" -#include "src/common/crc32.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "test/chunkserver/fake_datastore.h" +#include "src/common/concurrent/task_thread_pool.h" +#include "src/common/crc32.h" #include "src/common/uri_parser.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "test/chunkserver/fake_datastore.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; using ::curve::common::UriParser; @@ -50,25 +50,22 @@ using ::curve::common::UriParser; namespace curve { namespace chunkserver { -std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; } -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath) { +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath) { auto filePoolPtr = std::make_shared(fsptr); if (filePoolPtr == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; @@ -76,10 +73,10 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, int count = 1; std::string dirname = poolpath; while (count <= chunkfileCount) { - std::string filename = poolpath + std::to_string(count); + std::string filename = poolpath + std::to_string(count); fsptr->Mkdir(poolpath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); - char *data = new char[chunkfileSize + 4096]; + char* data = new char[chunkfileSize + 4096]; memset(data, 'a', chunkfileSize + 4096); fsptr->Write(fd, data, 0, chunkfileSize + 4096); fsptr->Close(fd); @@ -87,7 +84,7 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, delete[] data; } /** - * 持久化FilePool meta file + * Persisting FilePool meta file */ FilePoolMeta meta; @@ -107,11 +104,8 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, return filePoolPtr; } -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs) { LOG(INFO) << "Going to start chunk server"; @@ -123,13 +117,14 @@ int StartChunkserver(const char *ip, return -1; } if (server.Start(port, NULL) != 0) { - LOG(ERROR) << "Fail to start Server, port: " << port << ", errno: " - << errno << ", " << strerror(errno); + LOG(ERROR) << "Fail to start Server, port: " << port + << ", errno: " << errno << ", " << strerror(errno); return -1; } LOG(INFO) << "start rpc server success"; - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT const uint32_t kMaxChunkSize = 16 * 1024 * 1024; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = ip; @@ -188,12 +183,10 @@ int StartChunkserver(const char *ip, CopysetID copysetId = 100001; CopysetNodeManager::GetInstance().Init(copysetNodeOptions); CopysetNodeManager::GetInstance().Run(); - CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode(logicPoolId, - copysetId, - peers)); + CHECK(CopysetNodeManager::GetInstance().CreateCopysetNode( + logicPoolId, copysetId, peers)); auto copysetNode = CopysetNodeManager::GetInstance().GetCopysetNode( - logicPoolId, - copysetId); + logicPoolId, copysetId); DataStoreOptions options; options.baseDir = "./test-temp"; options.chunkSize = 16 * 1024 * 1024; @@ -214,18 +207,16 @@ int StartChunkserver(const char *ip, return 0; } -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs) { +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs) { butil::Status status; const int kMaxLoop = (5 * electionTimeoutMs) / 100; for (int i = 0; i < kMaxLoop; ++i) { status = GetLeader(logicPoolId, copysetId, conf, leaderId); if (status.ok()) { /** - * 等待 flush noop entry + * Waiting for flush noop entry */ ::usleep(electionTimeoutMs * 1000); return status; @@ -239,14 +230,14 @@ butil::Status WaitLeader(const LogicPoolID &logicPoolId, return status; } -TestCluster::TestCluster(const std::string &clusterName, +TestCluster::TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - catchupMargin_(10) { + const std::vector& peers) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + catchupMargin_(10) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -255,10 +246,8 @@ TestCluster::TestCluster(const std::string &clusterName, } } -int TestCluster::StartPeer(const PeerId &peerId, - const bool empty, - bool getChunkFromPool, - bool createChunkFilePool) { +int TestCluster::StartPeer(const PeerId& peerId, const bool empty, + bool getChunkFromPool, bool createChunkFilePool) { LOG(INFO) << "going start peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { @@ -299,30 +288,29 @@ int TestCluster::StartPeer(const PeerId &peerId, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ - StartPeerNode(peer->options, peer->conf, - getChunkFromPool, createChunkFilePool); + /*Starting a ChunkServer in a child process*/ + StartPeerNode(peer->options, peer->conf, getChunkFromPool, + createChunkFilePool); exit(0); } LOG(INFO) << "Start peer success, pid: " << pid; peer->pid = pid; peer->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peer))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peer))); return 0; } -int TestCluster::ShutdownPeer(const PeerId &peerId) { +int TestCluster::ShutdownPeer(const PeerId& peerId) { LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { int waitState; if (0 != kill(it->second->pid, SIGINT)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -335,7 +323,7 @@ int TestCluster::ShutdownPeer(const PeerId &peerId) { } } -int TestCluster::StopPeer(const PeerId &peerId) { +int TestCluster::StopPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::RUNNING) { @@ -345,8 +333,8 @@ int TestCluster::StopPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::STOP; @@ -358,7 +346,7 @@ int TestCluster::StopPeer(const PeerId &peerId) { } } -int TestCluster::ContPeer(const PeerId &peerId) { +int TestCluster::ContPeer(const PeerId& peerId) { auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { if (it->second->state != PeerNodeState::STOP) { @@ -368,8 +356,8 @@ int TestCluster::ContPeer(const PeerId &peerId) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } it->second->state = PeerNodeState::RUNNING; @@ -381,10 +369,10 @@ int TestCluster::ContPeer(const PeerId &peerId) { } } -int TestCluster::WaitLeader(PeerId *leaderId) { +int TestCluster::WaitLeader(PeerId* leaderId) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(2 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -393,8 +381,10 @@ int TestCluster::WaitLeader(PeerId *leaderId) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderId); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " @@ -417,9 +407,7 @@ int TestCluster::StopAllPeers() { return 0; } -const Configuration TestCluster::CopysetConf() const { - return conf_; -} +const Configuration TestCluster::CopysetConf() const { return conf_; } int TestCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -441,7 +429,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, bool enableGetchunkFromPool, bool createChunkFilePool) { /** - * 用于注释,说明 cmd format + * Used for annotation to explain the cmd format */ std::string cmdFormat = R"( ./bazel-bin/test/chunkserver/server-test @@ -466,7 +454,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, confStr += it->to_string(); confStr += ","; } - // 去掉最后的逗号 + // Remove the last comma confStr.pop_back(); std::string cmd_dir("./bazel-bin/test/chunkserver/server-test"); @@ -478,28 +466,22 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string confs; butil::string_printf(&confs, "-conf=%s", confStr.c_str()); std::string copyset_dir; - butil::string_printf(©set_dir, - "-copyset_dir=%s", + butil::string_printf(©set_dir, "-copyset_dir=%s", options.chunkDataUri.c_str()); std::string election_timeout_ms; - butil::string_printf(&election_timeout_ms, - "-election_timeout_ms=%d", + butil::string_printf(&election_timeout_ms, "-election_timeout_ms=%d", options.electionTimeoutMs); std::string snapshot_interval_s; - butil::string_printf(&snapshot_interval_s, - "-snapshot_interval_s=%d", + butil::string_printf(&snapshot_interval_s, "-snapshot_interval_s=%d", options.snapshotIntervalS); std::string catchup_margin; - butil::string_printf(&catchup_margin, - "-catchup_margin=%d", + butil::string_printf(&catchup_margin, "-catchup_margin=%d", options.catchupMargin); std::string getchunk_from_pool; - butil::string_printf(&getchunk_from_pool, - "-enable_getchunk_from_pool=%d", + butil::string_printf(&getchunk_from_pool, "-enable_getchunk_from_pool=%d", enableGetchunkFromPool); std::string create_pool; - butil::string_printf(&create_pool, - "-create_chunkfilepool=%d", + butil::string_printf(&create_pool, "-create_chunkfilepool=%d", createChunkFilePool); std::string logic_pool_id; butil::string_printf(&logic_pool_id, "-logic_pool_id=%d", logicPoolID_); @@ -508,59 +490,51 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string raft_sync; butil::string_printf(&raft_sync, "-raft_sync=%s", "true"); - char *arg[] = { - const_cast(cmd.c_str()), - const_cast(ip.c_str()), - const_cast(port.c_str()), - const_cast(confs.c_str()), - const_cast(copyset_dir.c_str()), - const_cast(election_timeout_ms.c_str()), - const_cast(snapshot_interval_s.c_str()), - const_cast(catchup_margin.c_str()), - const_cast(logic_pool_id.c_str()), - const_cast(copyset_id.c_str()), - const_cast(getchunk_from_pool.c_str()), - const_cast(create_pool.c_str()), - NULL - }; + char* arg[] = {const_cast(cmd.c_str()), + const_cast(ip.c_str()), + const_cast(port.c_str()), + const_cast(confs.c_str()), + const_cast(copyset_dir.c_str()), + const_cast(election_timeout_ms.c_str()), + const_cast(snapshot_interval_s.c_str()), + const_cast(catchup_margin.c_str()), + const_cast(logic_pool_id.c_str()), + const_cast(copyset_id.c_str()), + const_cast(getchunk_from_pool.c_str()), + const_cast(create_pool.c_str()), + NULL}; ::execv(cmd_dir.c_str(), arg); return 0; } -const std::string TestCluster::CopysetDirWithProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId &peerId) { +const std::string TestCluster::CopysetDirWithoutProtocol(const PeerId& peerId) { std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string TestCluster::RemoveCopysetDirCmd(const PeerId &peerId) { +const std::string TestCluster::RemoveCopysetDirCmd(const PeerId& peerId) { std::string cmd; - butil::string_printf(&cmd, - "rm -fr %s-%d-%d", + butil::string_printf(&cmd, "rm -fr %s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return cmd; } LogicPoolID TestCluster::logicPoolID_ = 0; -CopysetID TestCluster::copysetID_ = 0; +CopysetID TestCluster::copysetID_ = 0; } // namespace chunkserver } // namespace curve diff --git a/test/chunkserver/chunkserver_test_util.h b/test/chunkserver/chunkserver_test_util.h index b329e069cd..eaf423bbd4 100644 --- a/test/chunkserver/chunkserver_test_util.h +++ b/test/chunkserver/chunkserver_test_util.h @@ -26,188 +26,182 @@ #include #include -#include -#include -#include #include +#include +#include #include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" namespace curve { namespace chunkserver { using curve::fs::LocalFileSystem; -std::string Exec(const char *cmd); +std::string Exec(const char* cmd); /** - * 当前FilePool需要事先格式化,才能使用,此函数用于事先格式化FilePool - * @param fsptr:本文文件系统指针 - * @param chunkfileSize:chunk文件的大小 - * @param metaPageSize:chunk文件的meta page大小 - * @param poolpath:文件池的路径,例如./chunkfilepool/ - * @param metaPath:meta文件路径,例如./chunkfilepool/chunkfilepool.meta - * @return 初始化成功返回FilePool指针,否则返回null + * The current FilePool needs to be formatted in advance before it can be used. + * This function is used to format the FilePool in advance + * @param fsptr: This article's file system pointer + * @param chunkfileSize: Chunk file size + * @param metaPageSize: The metapage size of the chunk file + * @param poolpath: The path to the file pool, for example ./chunkfilepool/ + * @param metaPath: meta file path, for example + * ./chunkfilepool/chunkfilepool.meta + * @return successfully initializes and returns the FilePool pointer. Otherwise, + * it returns null */ -std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT - int chunkfileCount, - int chunkfileSize, - int metaPageSize, - std::string poolpath, - std::string metaPath); - -int StartChunkserver(const char *ip, - int port, - const char *copysetdir, - const char *confs, - const int snapshotInterval, +std::shared_ptr InitFilePool( + std::shared_ptr fsptr, // NOLINT + int chunkfileCount, int chunkfileSize, int metaPageSize, + std::string poolpath, std::string metaPath); + +int StartChunkserver(const char* ip, int port, const char* copysetdir, + const char* confs, const int snapshotInterval, const int electionTimeoutMs); -butil::Status WaitLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId, - int electionTimeoutMs); +butil::Status WaitLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId, int electionTimeoutMs); /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), options(), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; - // Peer的地址 + // Peer's address PeerId peerId; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // copyset的基本配置 + // Basic configuration of copyset CopysetNodeOptions options; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; /** - * 封装模拟 cluster 测试相关的接口 + * Package simulation cluster testing related interfaces */ class TestCluster { public: - TestCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers); + TestCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers); virtual ~TestCluster() { StopAllPeers(); } public: /** - * 启动一个 Peer + * Start a Peer * @param peerId - * @param empty 初始化配置是否为空 - * @param: get_chunk_from_pool是否从FilePool获取chunk - * @param: createFilePool是否创建FilePool,重启的情况下不需要 - * @return 0:成功,-1 失败 + * @param empty Is the initialization configuration empty + * @param: get_chunk_from_pool Does obtain a chunk from FilePool + * @param: createFilePool: create a FilePool? It is not necessary to restart + * it + * @return 0: Success, -1 failed */ - int StartPeer(const PeerId &peerId, - const bool empty = false, - bool getChunkFrom_pool = false, - bool createFilePool = true); + int StartPeer(const PeerId& peerId, const bool empty = false, + bool getChunkFrom_pool = false, bool createFilePool = true); /** - * 关闭一个 peer,使用 SIGINT + * Close a peer and use SIGINT * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int ShutdownPeer(const PeerId &peerId); - + int ShutdownPeer(const PeerId& peerId); /** - * hang 住一个 peer,使用 SIGSTOP + * Hang lives in a peer and uses SIGSTOP * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ - int StopPeer(const PeerId &peerId); + int StopPeer(const PeerId& peerId); /** - * 恢复 hang 住的 peer,使用 SIGCONT - * @param peerId - * @return 0:成功,-1 失败 - */ - int ContPeer(const PeerId &peerId); + * Restore the peer where Hang lives and use SIGCONT + * @param peerId + * @return 0: Success, -1 failed + */ + int ContPeer(const PeerId& peerId); /** - * 反复重试直到等到新的 leader 产生 - * @param leaderId 出参,返回 leader id - * @return 0:成功,-1 失败 + * Try again and again until a new leader is generated + * @param leaderId takes a parameter and returns the leader id + * @return 0: Success, -1 failed */ - int WaitLeader(PeerId *leaderId); + int WaitLeader(PeerId* leaderId); /** - * Stop 所有的 peer - * @return 0:成功,-1 失败 + * Stop all peers + * @return 0: Success, -1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /*Returns the current configuration of the cluster*/ const Configuration CopysetConf() const; - /* 修改 PeerNode 配置相关的接口,单位: s */ + /*Modify the interface related to PeerNode configuration, unit: s*/ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); int SetCatchupMargin(int catchupMargin); static int StartPeerNode(CopysetNodeOptions options, - const Configuration conf, - bool from_chunkfile_pool = false, - bool createFilePool = true); + const Configuration conf, + bool from_chunkfile_pool = false, + bool createFilePool = true); public: /** - * 返回执行 peer 的 copyset 路径 with protocol, ex: local://./127.0.0.1:9101:0 - */ - static const std::string CopysetDirWithProtocol(const PeerId &peerId); + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 + */ + static const std::string CopysetDirWithProtocol(const PeerId& peerId); /** - * 返回执行 peer 的 copyset 路径 without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const PeerId &peerId); + static const std::string CopysetDirWithoutProtocol(const PeerId& peerId); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const PeerId &peerid); + static const std::string RemoveCopysetDirCmd(const PeerId& peerid); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::set peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::set peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 + // Snapshot interval int snapshotIntervalS_; - // 选举超时时间 + // Election timeout int electionTimeoutMs_; - // catchup margin配置 + // Catchup margin configuration int catchupMargin_; - // 集群成员配置 + // Cluster member configuration Configuration conf_; - // 逻辑池id - static LogicPoolID logicPoolID_; - // 复制组id - static CopysetID copysetID_; + // Logical Pool ID + static LogicPoolID logicPoolID_; + // Copy Group ID + static CopysetID copysetID_; }; } // namespace chunkserver diff --git a/test/chunkserver/cli2_test.cpp b/test/chunkserver/cli2_test.cpp index d4d482d118..41d3b75ada 100644 --- a/test/chunkserver/cli2_test.cpp +++ b/test/chunkserver/cli2_test.cpp @@ -20,23 +20,24 @@ * Author: wudemiao */ -#include -#include -#include -#include -#include +#include "src/chunkserver/cli2.h" + #include #include #include +#include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli2.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -46,10 +47,12 @@ using curve::common::UUIDGenerator; class Cli2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -84,13 +87,14 @@ class Cli2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(Cli2Test, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9033; - const char *confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; + const char* confs = "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -103,12 +107,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -118,12 +118,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -133,12 +129,8 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -148,16 +140,12 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(false); } else if (0 == pid4) { std::string copysetdir = "local://./" + dir4; - StartChunkserver(ip, - port + 3, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 3, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3, pid_t pid4) { @@ -177,6 +165,7 @@ TEST_F(Cli2Test, basic) { kill(pid4_, SIGINT); waitpid(pid4_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -197,11 +186,12 @@ TEST_F(Cli2Test, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /*Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -211,23 +201,18 @@ TEST_F(Cli2Test, basic) { { Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -237,28 +222,21 @@ TEST_F(Cli2Test, basic) { conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); Peer peer; peer.set_address("127.0.0.1:9035:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peer, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -273,90 +251,70 @@ TEST_F(Cli2Test, basic) { peer3.set_address("127.0.0.1:9035:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.address().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } @@ -366,33 +324,29 @@ TEST_F(Cli2Test, basic) { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT - butil::Status st = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); - LOG(INFO) << "change peers: " - << st.error_code() << ", " << st.error_str(); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); // NOLINT + butil::Status st = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); + LOG(INFO) << "change peers: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* reset peer */ { - // 等待change peer完成,否则用例会失败 + // Wait for the change peer to complete, otherwise the instance will + // fail sleep(3); Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -401,127 +355,105 @@ TEST_F(Cli2Test, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; Peer leader; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ Peer peer; peer.set_address("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); Peer peer; peer.set_address("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "transfer leader: " << status.error_code() << ", " << status.error_str(); } } - /* change peers - 不存在的 peer */ + /*Change peers - non-existent peers*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); Configuration newConf; - newConf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT - butil::Status status = curve::chunkserver::ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - opt); + newConf.parse_from( + "127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9039:0"); // NOLINT + butil::Status status = curve::chunkserver::ChangePeers( + logicPoolId, copysetId, conf, newConf, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "change peers: " << status.error_code() << ", " << status.error_str(); } - /* reset peer - newConf为空 */ + /*Reset peer - newConf is empty*/ { Configuration conf; Peer peer; peer.set_address("127.0.0.1:9033:0"); - butil::Status - status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); LOG(INFO) << "reset peer: " << status.error_code() << ", " << status.error_str(); ASSERT_EQ(EINVAL, status.error_code()); } - /* reset peer peer地址非法 */ + /*Illegal reset peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* reset peer peer地址不存在 */ + /*Reset peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::ResetPeer(logicPoolId, - copysetId, - conf, - peer, - opt); - LOG(INFO) << "reset peer: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = curve::chunkserver::ResetPeer( + logicPoolId, copysetId, conf, peer, opt); + LOG(INFO) << "reset peer: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } - /* snapshot peer地址非法 */ + /*Illegal snapshot peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* snapshot peer地址不存在 */ + /*The snapshot peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); - butil::Status status = curve::chunkserver::Snapshot(logicPoolId, - copysetId, - peer, - opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = + curve::chunkserver::Snapshot(logicPoolId, copysetId, peer, opt); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } /* snapshot all normal */ @@ -529,8 +461,8 @@ TEST_F(Cli2Test, basic) { Peer peer; peer.set_address("127.0.0.1:9040:0"); butil::Status status = curve::chunkserver::SnapshotAll(peer, opt); - LOG(INFO) << "snapshot: " - << status.error_code() << ", " << status.error_str(); + LOG(INFO) << "snapshot: " << status.error_code() << ", " + << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } } diff --git a/test/chunkserver/cli_test.cpp b/test/chunkserver/cli_test.cpp index 111ec23773..7aa218a446 100644 --- a/test/chunkserver/cli_test.cpp +++ b/test/chunkserver/cli_test.cpp @@ -20,22 +20,23 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/cli.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -45,10 +46,12 @@ using curve::common::UUIDGenerator; class CliTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "CliTest " << "SetUpTestCase"; + LOG(INFO) << "CliTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "CliTest " << "TearDownTestCase"; + LOG(INFO) << "CliTest " + << "TearDownTestCase"; } virtual void SetUp() { UUIDGenerator uuidGenerator; @@ -78,13 +81,14 @@ class CliTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CliTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9030; - const char *confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; + const char* confs = "127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"; int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment + * is prone to timeout */ int electionTimeoutMs = 3000; @@ -97,12 +101,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -112,12 +112,8 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -127,16 +123,12 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -153,6 +145,7 @@ TEST_F(CliTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -172,11 +165,12 @@ TEST_F(CliTest, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /* Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion + * of copying a log entry Time to avoid occasional timeout errors in the CI + * environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -185,23 +179,18 @@ TEST_F(CliTest, basic) { /* remove peer */ { PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::RemovePeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "remove peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::RemovePeer( + logicPoolId, copysetId, conf, peerId, opt); + LOG(INFO) << "remove peer: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it + * is necessary to wait until a new leader is generated, Otherwise, the + * add peer test below will fail and wait for a long time to ensure + * removal After the successful election of the new leader, switch to + * the flush configuration of the become leader Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); } @@ -210,27 +199,20 @@ TEST_F(CliTest, basic) { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add peer: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); PeerId peerId("127.0.0.1:9032:0"); - butil::Status st = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); - LOG(INFO) << "add one peer repeat: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::AddPeer(logicPoolId, copysetId, + conf, peerId, opt); + LOG(INFO) << "add one peer repeat: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); } /* transfer leader */ @@ -242,95 +224,75 @@ TEST_F(CliTest, basic) { PeerId peer3("127.0.0.1:9032:0"); { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not + * wait for the leader to transfer We only return after success, so + * we need to wait here. In addition, we cannot immediately check + * the leader because After the leader transfer, the previous leader + * may be returned, except for the transfer After the leader is + * successful, when the benefit leader is in progress, the leader is + * already visible, but The benefit leader will execute flush + * current conf to act as the noop. If at this time Immediately + * proceed to the next transfer leader, which will be organized + * because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer1.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer2, - opt); - LOG(INFO) << "transfer leader: " - << st.error_code() << ", " << st.error_str(); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer2, opt); + LOG(INFO) << "transfer leader: " << st.error_code() << ", " + << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer2.to_string().c_str(), leader.to_string().c_str()); } { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); LOG(INFO) << "transfer leader: " << st.error_str(); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; - butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer3, - opt); + butil::Status st = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer3, opt); ASSERT_TRUE(st.ok()); ::usleep(waitTransferLeader); - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - electionTimeoutMs); - LOG(INFO) << "get leader: " - << status.error_code() << ", " << status.error_str(); + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, + &leader, electionTimeoutMs); + LOG(INFO) << "get leader: " << status.error_code() << ", " + << status.error_str(); ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -338,41 +300,35 @@ TEST_F(CliTest, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; - conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT + conf.parse_from( + "127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); // NOLINT butil::Status status = GetLeader(logicPoolId, copysetId, conf, &leader); ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9030:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ PeerId peerId("127.0.0.1:9039:2"); - butil::Status status = curve::chunkserver::AddPeer(logicPoolId, - copysetId, - conf, - peerId, - opt); + butil::Status status = curve::chunkserver::AddPeer( + logicPoolId, copysetId, conf, peerId, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"); PeerId peer1("127.0.0.1:9039:0"); { - butil::Status - status = curve::chunkserver::TransferLeader(logicPoolId, - copysetId, - conf, - peer1, - opt); + butil::Status status = curve::chunkserver::TransferLeader( + logicPoolId, copysetId, conf, peer1, opt); ASSERT_FALSE(status.ok()); LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..1452c24e72 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -20,49 +20,47 @@ * Author: wudemiao */ -#include -#include -#include #include #include +#include +#include +#include -#include "src/chunkserver/copyset_node.h" #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" #include "test/chunkserver/chunkserver_test_util.h" DEFINE_int32(request_size, 10, "Size of each requst"); DEFINE_int32(timeout_ms, 500, "Timeout for each request"); DEFINE_int32(election_timeout_ms, 3000, "election timeout ms"); DEFINE_int32(write_percentage, 100, "Percentage of fetch_add"); -DEFINE_string(confs, - "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", +DEFINE_string(confs, "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", "Configuration of the raft group"); -using curve::chunkserver::CopysetRequest; -using curve::chunkserver::CopysetResponse; -using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::CHUNK_OP_TYPE; using curve::chunkserver::ChunkRequest; using curve::chunkserver::ChunkResponse; using curve::chunkserver::ChunkService_Stub; -using curve::chunkserver::PeerId; -using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; using curve::chunkserver::Configuration; -using curve::chunkserver::CHUNK_OP_TYPE; -using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::COPYSET_OP_STATUS; +using curve::chunkserver::CopysetID; +using curve::chunkserver::CopysetRequest; +using curve::chunkserver::CopysetResponse; +using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::LogicPoolID; +using curve::chunkserver::PeerId; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - uint64_t chunkId = 1; - uint64_t sn = 1; - char fillCh = 'a'; + CopysetID copysetId = 100001; + uint64_t chunkId = 1; + uint64_t sn = 1; + char fillCh = 'a'; PeerId leader; curve::chunkserver::Configuration conf; @@ -70,9 +68,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "conf parse failed: " << FLAGS_confs; } - - - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); @@ -105,8 +101,10 @@ int main(int argc, char *argv[]) { if (cntl.Failed()) { LOG(FATAL) << "create copyset fialed: " << cntl.ErrorText(); } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS //NOLINT - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT + if (response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS // NOLINT + || response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT LOG(INFO) << "create copyset success: " << response.status(); } else { LOG(FATAL) << "create copyset failed: "; @@ -116,11 +114,9 @@ int main(int argc, char *argv[]) { // wait leader ::usleep(1000 * FLAGS_election_timeout_ms); - butil::Status status = curve::chunkserver::WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - FLAGS_election_timeout_ms); //NOLINT + butil::Status status = + curve::chunkserver::WaitLeader(logicPoolId, copysetId, conf, &leader, + FLAGS_election_timeout_ms); // NOLINT LOG(INFO) << "leader is: " << leader.to_string(); if (0 != status.error_code()) { LOG(FATAL) << "Wait leader failed"; @@ -176,8 +172,5 @@ int main(int argc, char *argv[]) { } } - return 0; } - - diff --git a/test/chunkserver/clone/clone_copyer_test.cpp b/test/chunkserver/clone/clone_copyer_test.cpp index 3c15969d9a..033664c6a3 100644 --- a/test/chunkserver/clone/clone_copyer_test.cpp +++ b/test/chunkserver/clone/clone_copyer_test.cpp @@ -20,12 +20,13 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_copyer.h" + #include +#include +#include #include "include/client/libcurve.h" -#include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/client/mock/mock_file_client.h" @@ -46,21 +47,16 @@ const uint64_t EXPIRED_USE = 5; class MockDownloadClosure : public DownloadClosure { public: explicit MockDownloadClosure(AsyncDownloadContext* context) - : DownloadClosure(nullptr, nullptr, context, nullptr) - , isRun_(false) {} + : DownloadClosure(nullptr, nullptr, context, nullptr), isRun_(false) {} void Run() { CHECK(!isRun_) << "closure has been invoked."; isRun_ = true; } - bool IsFailed() { - return isFailed_; - } + bool IsFailed() { return isFailed_; } - bool IsRun() { - return isRun_; - } + bool IsRun() { return isRun_; } void Reset() { isFailed_ = false; @@ -71,16 +67,14 @@ class MockDownloadClosure : public DownloadClosure { bool isRun_; }; -class CloneCopyerTest : public testing::Test { +class CloneCopyerTest : public testing::Test { public: void SetUp() { curveClient_ = std::make_shared(); s3Client_ = std::make_shared(); Aws::InitAPI(awsOptions_); } - void TearDown() { - Aws::ShutdownAPI(awsOptions_); - } + void TearDown() { Aws::ShutdownAPI(awsOptions_); } protected: std::shared_ptr curveClient_; @@ -133,8 +127,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,读取成功 - * 预期:调用Open和Read读取数据 + /* Use case: Reading data on curve, successful reading + * Expected: Calling Open and Read to read data */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) @@ -151,12 +145,11 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:再次读前面的文件,但是ret值为-1 - * 预期:直接Read,返回失败 + /* Use case: Read the previous file again, but the ret value is -1 + * Expected: Direct Read, return failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .WillOnce(Invoke([](int fd, CurveAioContext* context, curve::client::UserDataType dataType) { @@ -169,21 +162,20 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Open的时候失败 - * 预期:返回-1 + /* Use case: Reading data on curve, failed during Open + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) .WillOnce(Return(-1)); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Read的时候失败 - * 预期:返回-1 + /* Use case: Failed to read data on curve + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) @@ -195,14 +187,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - - /* 用例:读s3上的数据,读取成功 - * 预期:返回0 + /* Use case: Reading data on s3, successful reading + * Expected: Return 0 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = 0; context->cb(s3Client_.get(), context); })); @@ -211,13 +202,13 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 - * 预期:返回-1 + /* Use case: Read data on s3, read failed + * Expected: Return -1 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) .WillOnce(Invoke( - [&] (const std::shared_ptr& context) { + [&](const std::shared_ptr& context) { context->retCode = -1; context->cb(s3Client_.get(), context); })); @@ -226,18 +217,14 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini test { - EXPECT_CALL(*curveClient_, Close(1)) - .Times(1); - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(1)).Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } } @@ -250,16 +237,15 @@ TEST_F(CloneCopyerTest, DisableTest) { options.curveUser.owner = ROOT_OWNER; options.curveUser.password = ROOT_PWD; options.curveFileTimeoutSec = EXPIRED_USE; - // 禁用curveclient和s3adapter + // Disable curveclient and s3adapter options.curveClient = nullptr; options.s3Client = nullptr; // curvefs init success - EXPECT_CALL(*curveClient_, Init(_)) - .Times(0); + EXPECT_CALL(*curveClient_, Init(_)).Times(0); ASSERT_EQ(0, copyer.Init(options)); - // 从上s3或者curve请求下载数据会返回失败 + // Requesting data download from s3 or curve above will return a failure { char* buf = new char[4096]; AsyncDownloadContext context; @@ -268,30 +254,27 @@ TEST_F(CloneCopyerTest, DisableTest) { context.buf = buf; MockDownloadClosure closure(&context); - /* 用例:读curve上的数据,读取失败 + /* Use case: Read data on curve, read failed */ context.location = "test:0@cs"; - EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .Times(0); - EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .Times(0); + EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)).Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 + /* Use case: Read data on s3, read failed */ context.location = "test@s3"; - EXPECT_CALL(*s3Client_, GetObjectAsync(_)) - .Times(0); + EXPECT_CALL(*s3Client_, GetObjectAsync(_)).Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - delete [] buf; + delete[] buf; } - // fini 可以成功 + // Fini can succeed ASSERT_EQ(0, copyer.Fini()); } @@ -308,7 +291,7 @@ TEST_F(CloneCopyerTest, ExpiredTest) { // curvefs init success EXPECT_CALL(*curveClient_, Init(StrEq(CURVE_CONF))) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + .WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(0, copyer.Init(options)); { @@ -320,18 +303,18 @@ TEST_F(CloneCopyerTest, ExpiredTest) { MockDownloadClosure closure(&context); /* Case: Read the same chunk after it expired - * Expect: Re-Open the curve file - */ + * Expect: Re-Open the curve file + */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) - .WillOnce(Return(1)); + .WillOnce(Return(1)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); ASSERT_FALSE(closure.IsFailed()); @@ -341,26 +324,23 @@ TEST_F(CloneCopyerTest, ExpiredTest) { context.location = "test:0@cs"; std::this_thread::sleep_for(std::chrono::seconds(1)); EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) - .WillOnce(Return(2)); + .WillOnce(Return(2)); EXPECT_CALL(*curveClient_, AioRead(_, _, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context, - curve::client::UserDataType dataType) { - context->ret = 1024; - context->cb(context); - return LIBCURVE_ERROR::OK; - })); + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { + context->ret = 1024; + context->cb(context); + return LIBCURVE_ERROR::OK; + })); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); closure.Reset(); - delete [] buf; + delete[] buf; } // fini - EXPECT_CALL(*curveClient_, Close(2)) - .Times(1); - EXPECT_CALL(*curveClient_, UnInit()) - .Times(1); - EXPECT_CALL(*s3Client_, Deinit()) - .Times(1); + EXPECT_CALL(*curveClient_, Close(2)).Times(1); + EXPECT_CALL(*curveClient_, UnInit()).Times(1); + EXPECT_CALL(*s3Client_, Deinit()).Times(1); ASSERT_EQ(0, copyer.Fini()); } diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index 86d6a70898..2632acb635 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -20,21 +20,22 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_core.h" + #include +#include #include +#include #include -#include "src/chunkserver/clone_core.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/op_request.h" -#include "test/chunkserver/mock_copyset_node.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_copyer.h" #include "test/chunkserver/datastore/mock_datastore.h" -#include "src/fs/local_filesystem.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -46,7 +47,7 @@ using curve::fs::LocalFsFactory; ACTION_TEMPLATE(SaveBraftTask, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(value)) { auto input = static_cast(::testing::get(args)); - auto output = static_cast(value); + auto output = static_cast(value); output->data->swap(*input.data); output->done = input.done; } @@ -83,18 +84,19 @@ class CloneCoreTest .WillRepeatedly(Return(LAST_INDEX)); } - std::shared_ptr - GenerateReadRequest(CHUNK_OP_TYPE optype, off_t offset, size_t length) { - ChunkRequest *readRequest = new ChunkRequest(); + std::shared_ptr GenerateReadRequest(CHUNK_OP_TYPE optype, + off_t offset, + size_t length) { + ChunkRequest* readRequest = new ChunkRequest(); readRequest->set_logicpoolid(LOGICPOOL_ID); readRequest->set_copysetid(COPYSET_ID); readRequest->set_chunkid(CHUNK_ID); readRequest->set_optype(optype); readRequest->set_offset(offset); readRequest->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - FakeChunkClosure *closure = new FakeChunkClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + FakeChunkClosure* closure = new FakeChunkClosure(); closure->SetCntl(cntl); closure->SetRequest(readRequest); closure->SetResponse(response); @@ -105,19 +107,19 @@ class CloneCoreTest } void SetCloneParam(std::shared_ptr readRequest) { - ChunkRequest *request = - const_cast(readRequest->GetChunkRequest()); + ChunkRequest* request = + const_cast(readRequest->GetChunkRequest()); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); } - void CheckTask(const braft::Task &task, off_t offset, size_t length, - char *buf) { + void CheckTask(const braft::Task& task, off_t offset, size_t length, + char* buf) { butil::IOBuf data; ChunkRequest request; auto req = ChunkOpRequest::Decode(*task.data, &request, &data, 0, PeerId("127.0.0.1:8200:0")); - auto preq = dynamic_cast(req.get()); + auto preq = dynamic_cast(req.get()); ASSERT_TRUE(preq != nullptr); ASSERT_EQ(LOGICPOOL_ID, request.logicpoolid()); @@ -139,19 +141,20 @@ class CloneCoreTest }; /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不是clone chunk - * result:不会从远端拷贝数据,直接从本地读取数据,结果返回成功 + * Test CHUNK_OP_READ type request, requesting to read a chunk that is not a + * clone chunk Result: Will not copy data from the remote end, directly read + * data from the local, and the result is returned as successful */ TEST_P(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; size_t length = 5 * blocksize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + // Will not copy data from the source EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -159,16 +162,16 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -176,15 +179,17 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk是clone chunk - * case1:请求读取的区域全部被写过 - * result1:全部从本地chunk读取 - * case2:请求读取的区域都未被写过 - * result2:全部从源端读取,产生paste请求 - * case3:请求读取的区域有部分被写过,部分未被写过 - * result3:写过区域从本地chunk读取,未写过区域从源端读取,产生paste请求 - * case4:请求读取的区域部分被写过,请求的偏移未与pagesize对齐 - * result4:返回错误 + * Test CHUNK_OP_READ type request, the requested chunk to read is a clone chunk + * Case1: All regions requested for reading have been written + * Result1: Read all from local chunk + * Case2: The requested read area has not been written + * Result2: Read all from the source and generate a pass request + * Case3: The requested read area has been partially written and partially + * unwritten Result3: Read from the local chunk for regions that have been + * written, and read from the source for regions that have not been written, + * resulting in a pass request Case4: The requested read area has been partially + * written, and the requested offset is not aligned with pagesize Result4: Error + * returned */ TEST_P(CloneCoreTest, ReadChunkTest2) { off_t offset = 0; @@ -195,32 +200,33 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -237,26 +243,27 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -264,16 +271,17 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -289,33 +297,34 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { { info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) - memset(chunkData, 'a', pagesize_ + 2 * blocksize_); + // Reading Chunk Files + char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) + memset(chunkData, 'a', pagesize_ + 2 * blocksize_); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, 0, pagesize_ + 2 * blocksize_)) .WillOnce( DoAll(SetArrayArgument<2>( chunkData, chunkData + pagesize_ + 2 * blocksize_), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -323,24 +332,30 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - 3 * blocksize_), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + 3 * blocksize_), + 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * blocksize_, //NOLINT - 2 * blocksize_), 0); + closure->resContent_.attachment.to_string().c_str() + + 3 * blocksize_, // NOLINT + 2 * blocksize_), + 0); } // case4 { @@ -349,7 +364,8 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { length = 4 * blocksize_; info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); @@ -357,18 +373,18 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); - // 不产生PasteChunkRequest + // Do not generate PasteChunkRequest braft::Task task; EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -377,8 +393,9 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不存在,但是请求中包含源端数据地址 - * 预期结果:从源端下载数据,产生paste请求 + * Test CHUNK_OP_READ type request, the chunk requested for reading does not + * exist, but the request contains the source data address Expected result: + * Download data from the source and generate a pass request */ TEST_P(CloneCoreTest, ReadChunkTest3) { off_t offset = 0; @@ -389,32 +406,33 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / pagesize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -422,16 +440,17 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -445,13 +464,13 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { } /** - * 执行HandleReadRequest过程中出现错误 - * case1:GetChunkInfo时出错 - * result1:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case2:Download时出错 - * result2:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case3:ReadChunk时出错 - * result3:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN + * An error occurred during the execution of HandleReadRequest + * Case1: Error in GetChunkInfo + * Result1: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN Case2: Error downloading Result2: Returns -1, + * and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN Case3: + * Error in ReadChunk Result3: Returns -1, and the response status changes to + * CHUNK_OP_STATUS_FAILURE_UNKNOWN */ TEST_P(CloneCoreTest, ReadChunkErrorTest) { off_t offset = 0; @@ -479,8 +498,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(-1, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -494,10 +513,10 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); closure->SetFailed(); })); @@ -505,8 +524,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -522,17 +541,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -540,16 +559,17 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); delete[] cloneData; @@ -557,19 +577,20 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk不是clone chunk - * result:不会从远端拷贝数据,也不会从本地读取数据,直接返回成功 + * Test CHUNK_OP_RECOVER type request, the requested chunk is not a clone chunk + *Result: Will not copy data remotely or read data locally, returns success + *directly */ TEST_P(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; size_t length = 5 * pagesize_; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); - // 不会从源端拷贝数据 + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); + // Will not copy data from the sourc EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -577,14 +598,14 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -592,11 +613,11 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk是clone chunk - * case1:请求恢复的区域全部被写过 - * result1:不会拷贝数据,直接返回成功 - * case2:请求恢复的区域全部或部分未被写过 - * result2:从远端拷贝数据,并产生paste请求 + * Test CHUNK_OP_WRECOVER type request, the requested chunk is clone chunk + * Case1: All areas requested for recovery have been written + * Result1: Will not copy data, returns success directly + * Case2: The requested area for recovery has not been written in whole or in + * part Result2: Copy data from the remote end and generate a pass request */ TEST_P(CloneCoreTest, RecoverChunkTest2) { off_t offset = 0; @@ -607,26 +628,27 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -636,23 +658,24 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -660,14 +683,16 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -678,8 +703,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { } } -// case1: read chunk时,从远端拷贝数据,但是不会产生paste请求 -// case2: recover chunk时,从远端拷贝数据,会产生paste请求 +// Case1: When reading a chunk, copy data from the remote end, but do not +// generate a pass request Case2: When recovering a chunk, copying data from the +// remote end will generate a pass request TEST_P(CloneCoreTest, DisablePasteTest) { off_t offset = 0; size_t length = 5 * blocksize_; @@ -689,39 +715,40 @@ TEST_P(CloneCoreTest, DisablePasteTest) { info.chunkSize = chunksize_; info.blockSize = blocksize_; info.bitmap = std::make_shared(chunksize_ / blocksize_); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, false, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, false, copyer_); // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生paste chunk请求 + // No paste chunk request will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -732,23 +759,24 @@ TEST_P(CloneCoreTest, DisablePasteTest) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the + // closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT - char *cloneData = new char[length]; + char* cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure *closure) { + .WillOnce(Invoke([&](DownloadClosure* closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext *context = closure->GetDownloadContext(); + AsyncDownloadContext* context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -756,14 +784,16 @@ TEST_P(CloneCoreTest, DisablePasteTest) { ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); - FakeChunkClosure *closure = - reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + FakeChunkClosure* closure = + reinterpret_cast(readRequest->Closure()); + // The closure has been forwarded to PasteRequest for processing, and + // the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the + // concurrency layer for processing, Since the node here is mock, it is + // necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -775,8 +805,7 @@ TEST_P(CloneCoreTest, DisablePasteTest) { } INSTANTIATE_TEST_CASE_P( - CloneCoreTest, - CloneCoreTest, + CloneCoreTest, CloneCoreTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/clone/clone_manager_test.cpp b/test/chunkserver/clone/clone_manager_test.cpp index f41bc1bed2..6b29058364 100644 --- a/test/chunkserver/clone/clone_manager_test.cpp +++ b/test/chunkserver/clone/clone_manager_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/clone_manager.h" + #include +#include +#include + #include -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/op_request.h" namespace curve { @@ -33,22 +35,18 @@ namespace chunkserver { class UTCloneTask : public CloneTask { public: - UTCloneTask() : CloneTask(nullptr, nullptr, nullptr) - , sleepTime_(0) {} + UTCloneTask() : CloneTask(nullptr, nullptr, nullptr), sleepTime_(0) {} void Run() { - std::this_thread::sleep_for( - std::chrono::milliseconds(sleepTime_)); + std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime_)); isComplete_ = true; } - void SetSleepTime(uint32_t sleepTime) { - sleepTime_ = sleepTime; - } + void SetSleepTime(uint32_t sleepTime) { sleepTime_ = sleepTime; } private: uint32_t sleepTime_; }; -class CloneManagerTest : public testing::Test { +class CloneManagerTest : public testing::Test { public: void SetUp() {} void TearDown() {} @@ -58,32 +56,34 @@ TEST_F(CloneManagerTest, BasicTest) { CloneOptions options; options.checkPeriod = 100; CloneManager cloneMgr; - // 如果线程数设置为0,启动线程池失败 + // If the number of threads is set to 0, starting the thread pool fails { options.threadNum = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 队列深度为0,启动线程池会失败 + // Queue depth is 0, starting thread pool will fail { options.threadNum = 5; options.queueCapacity = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 线程数和队列深度都大于0,可以启动线程池 + // If the number of threads and queue depth are both greater than 0, the + // thread pool can be started { options.threadNum = 5; options.queueCapacity = 100; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), 0); - // 线程池启动运行后,重复Run直接返回成功 + // After the thread pool starts running, repeating the run directly + // returns success ASSERT_EQ(cloneMgr.Run(), 0); } - // 通过Fini暂停任务 + // Pause tasks through Fini { ASSERT_EQ(cloneMgr.Fini(), 0); - // 重复Fini直接返回成功 + // Repeated Fini directly returns success ASSERT_EQ(cloneMgr.Fini(), 0); } } @@ -99,9 +99,9 @@ TEST_F(CloneManagerTest, TaskTest) { std::shared_ptr req = std::make_shared(); - // 测试 GenerateCloneTask 和 IssueCloneTask + // Testing GenerateCloneTask and IssueCloneTask { - // options.core为nullptr,则产生的任务也是nullptr + // If options.core is nullptr, the resulting task is also nullptr std::shared_ptr task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_EQ(task, nullptr); @@ -111,55 +111,58 @@ TEST_F(CloneManagerTest, TaskTest) { task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_NE(task, nullptr); - // 自定义任务测试 + // Custom task testing task = std::make_shared(); ASSERT_FALSE(task->IsComplete()); - // 如果clone manager还未启动,则无法发布任务 + // If the clone manager has not yet started, the task cannot be + // published ASSERT_FALSE(cloneMgr.IssueCloneTask(task)); - // 启动以后就可以发布任务 + // After startup, tasks can be published ASSERT_EQ(cloneMgr.Run(), 0); ASSERT_TRUE(cloneMgr.IssueCloneTask(task)); - // 等待一点时间,任务执行完成,检查任务状态以及是否从队列中移除 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for a moment, the task execution is completed, check the task + // status and whether it has been removed from the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task->IsComplete()); - // 无法发布空任务 + // Unable to publish empty task ASSERT_FALSE(cloneMgr.IssueCloneTask(nullptr)); } - // 测试自定义的测试任务 + // Test custom test tasks { - // 初始化执行时间各不相同的任务 + // Initialize tasks with varying execution times std::shared_ptr task1 = std::make_shared(); std::shared_ptr task2 = std::make_shared(); std::shared_ptr task3 = std::make_shared(); task1->SetSleepTime(100); task2->SetSleepTime(300); task3->SetSleepTime(500); - // 同时发布所有任务 + // Publish all tasks simultaneously ASSERT_TRUE(cloneMgr.IssueCloneTask(task1)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task2)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task3)); - // 此时任务还在执行中,此时引用计数为2 + // At this point, the task is still executing and the reference count is + // 2 ASSERT_FALSE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 等待220ms,task1执行成功,其他还没完成;220ms基本可以保证task1执行完 - std::this_thread::sleep_for( - std::chrono::milliseconds(220)); + // Waiting for 220ms, task1 successfully executed, but other tasks have + // not been completed yet; 220ms basically guarantees the completion of + // task1 execution + std::this_thread::sleep_for(std::chrono::milliseconds(220)); ASSERT_TRUE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,task2执行成功,task3还在执行中 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait another 200ms, task2 successfully executed, and task3 is still + // executing + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,所有任务执行成功,任务全被移出队列 - std::this_thread::sleep_for( - std::chrono::milliseconds(200)); + // Wait for another 200ms, all tasks are successfully executed, and all + // tasks are moved out of the queue + std::this_thread::sleep_for(std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_TRUE(task3->IsComplete()); diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 6746594097..1b509e4b0f 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -20,16 +20,18 @@ * Author: yangyaokai */ -#include -#include +#include "src/chunkserver/op_request.h" + #include +#include +#include + #include -#include "src/chunkserver/op_request.h" #include "test/chunkserver/clone/clone_test_util.h" #include "test/chunkserver/clone/mock_clone_manager.h" -#include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/datastore/mock_datastore.h" +#include "test/chunkserver/mock_copyset_node.h" namespace curve { namespace chunkserver { @@ -67,28 +69,23 @@ class OpRequestTest FakeCopysetNode(); FakeCloneManager(); } - void TearDown() { - } + void TearDown() {} void FakeCopysetNode() { - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, GetDataStore()) - .WillRepeatedly(Return(datastore_)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, GetDataStore()).WillRepeatedly(Return(datastore_)); EXPECT_CALL(*node_, GetConcurrentApplyModule()) .WillRepeatedly(Return(concurrentApplyModule_.get())); EXPECT_CALL(*node_, GetAppliedIndex()) .WillRepeatedly(Return(LAST_INDEX)); PeerId peer(PEER_STRING); - EXPECT_CALL(*node_, GetLeaderId()) - .WillRepeatedly(Return(peer)); + EXPECT_CALL(*node_, GetLeaderId()).WillRepeatedly(Return(peer)); } void FakeCloneManager() { EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillRepeatedly(Return(true)); } protected: @@ -99,11 +96,11 @@ class OpRequestTest std::shared_ptr node_; std::shared_ptr datastore_; std::shared_ptr cloneMgr_; - std::shared_ptr concurrentApplyModule_; + std::shared_ptr concurrentApplyModule_; }; TEST_P(OpRequestTest, CreateCloneTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -118,20 +115,17 @@ TEST_P(OpRequestTest, CreateCloneTest) { request->set_location(location); request->set_size(size); request->set_sn(sn); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cntl, - request, - response, - closure); + std::make_shared(node_, cntl, request, + response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -152,23 +146,22 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_EQ(sn, request->sn()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -176,28 +169,27 @@ TEST_P(OpRequestTest, CreateCloneTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); @@ -251,8 +243,7 @@ TEST_P(OpRequestTest, CreateCloneTest) { // set expection EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(0); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); opReq->OnApply(3, closure); @@ -264,15 +255,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { closure->response_->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -280,15 +271,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -296,37 +287,33 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, PasteChunkTest) { - // 生成临时的readrequest - ChunkResponse *response = new ChunkResponse(); + // Generate temporary readrequest + ChunkResponse* response = new ChunkResponse(); std::shared_ptr readChunkRequest = - std::make_shared(node_, - nullptr, - nullptr, - nullptr, - response, - nullptr); - - // 创建PasteChunkRequest + std::make_shared(node_, nullptr, nullptr, nullptr, + response, nullptr); + + // Create PasteChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -343,17 +330,14 @@ TEST_P(OpRequestTest, PasteChunkTest) { butil::IOBuf cloneData; cloneData.append(str); - UnitTestClosure *closure = new UnitTestClosure(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - request, - response, - &cloneData, - closure); + std::make_shared(node_, request, response, + &cloneData, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -376,23 +360,22 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_STREQ(str.c_str(), data.to_string().c_str()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -400,85 +383,83 @@ TEST_P(OpRequestTest, PasteChunkTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(response->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute + // task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:CreateCloneChunk成功 - * 预期:返回 CHUNK_OP_STATUS_SUCCESS ,并更新apply index + * Test OnApply + * Scenario: CreateCloneChunk successful + * Expected: return CHUNK_OP_STATUS_SUCCESS and update the apply index */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -486,15 +467,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { response->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -502,15 +483,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -518,27 +499,27 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, ReadChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -551,21 +532,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { request->set_optype(CHUNK_OP_READ); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -585,17 +562,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -616,19 +592,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_EXPIRED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); @@ -649,20 +622,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_NOT_READY; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -688,20 +658,17 @@ TEST_P(OpRequestTest, ReadChunkTest) { closure->Reset(); // set expection - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_DISABLED; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(false)); braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveArg<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveArg<0>(&task)); opReq->Process(); @@ -724,36 +691,34 @@ TEST_P(OpRequestTest, ReadChunkTest) { info.bitmap = std::make_shared(chunksize_ / blocksize_); /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true, - * 请求的 apply index 小于等于 node的 apply index - * 预期: 不会走一致性协议,请求提交给concurrentApplyModule_处理 + * Test Process + * Scenario: node_->IsLeaderTerm() == true, + * The requested application index is less than or equal to the + * node's application index Expected: Will not follow the consistency + * protocol, request submission to ConcurrentApplyModule_ handle */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char *chunkData = new char[length]; + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -775,21 +740,21 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -797,7 +762,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -811,22 +776,23 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Chunk read locally, returning + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -834,7 +800,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -850,31 +816,29 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -883,54 +847,50 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 但是请求中包含源chunk的信息 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * But the request contains information about the source chunk + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -939,137 +899,135 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 请求中包含源chunk的信息 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 The request contains information about the source + * chunk Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - char *chunkData = new char[length]; + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + char* chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + length), + .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->response_->status()); ASSERT_EQ(memcmp(chunkData, - closure->cntl_->response_attachment().to_string().c_str(), //NOLINT - length), 0); + closure->cntl_->response_attachment() + .to_string() + .c_str(), // NOLINT + length), + 0); delete[] chunkData; } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:读本地chunk的时候返回错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: Error returned when reading local chunk + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件失败 + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Failed to read chunk file EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, RecoverChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -1082,21 +1040,17 @@ TEST_P(OpRequestTest, RecoverChunkTest) { request->set_optype(CHUNK_OP_RECOVER); request->set_offset(offset); request->set_size(length); - brpc::Controller *cntl = new brpc::Controller(); - ChunkResponse *response = new ChunkResponse(); - UnitTestClosure *closure = new UnitTestClosure(); + brpc::Controller* cntl = new brpc::Controller(); + ChunkResponse* response = new ChunkResponse(); + UnitTestClosure* closure = new UnitTestClosure(); closure->SetCntl(cntl); closure->SetRequest(request); closure->SetResponse(response); std::shared_ptr opReq = - std::make_shared(node_, - cloneMgr_.get(), - cntl, - request, - response, - closure); + std::make_shared(node_, cloneMgr_.get(), cntl, + request, response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -1116,23 +1070,22 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return + * CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(false)); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); // EXPECT_CALL(*node_, GetLeaderId()) // .WillOnce(Return(leaderId)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -1154,29 +1107,25 @@ TEST_P(OpRequestTest, RecoverChunkTest) { * expect: don't propose to raft,request commit to concurrentApplyModule_ */ { - // 重置closure + // Reset closure closure->Reset(); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 设置预期 - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + // Set expectations + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, Propose(_)).Times(0); braft::LeaderLeaseStatus status; status.state = braft::LEASE_VALID; EXPECT_CALL(*node_, GetLeaderLeaseStatus(_)) .WillOnce(SetArgPointee<0>(status)); - EXPECT_CALL(*node_, IsLeaseLeader(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*node_, IsLeaseLeader(_)).WillOnce(Return(true)); opReq->Process(); @@ -1193,54 +1142,52 @@ TEST_P(OpRequestTest, RecoverChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Directly return to CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response->status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the + * request area are all 1 Expected: Directly return to + * CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 不读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Do not read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -1248,31 +1195,29 @@ TEST_P(OpRequestTest, RecoverChunkTest) { closure->response_->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Expected: Forward request to clone manager + * for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(true)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(true)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -1281,103 +1226,97 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + // Unable to read chunk files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the + * request area has a bit of 0 Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) - .Times(1); - EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)) - .WillOnce(Return(false)); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + // Reading Chunk Files + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)).Times(1); + EXPECT_CALL(*cloneMgr_, IssueCloneTask(_)).WillOnce(Return(false)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } INSTANTIATE_TEST_CASE_P( - OpRequestTest, - OpRequestTest, + OpRequestTest, OpRequestTest, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/copyset_epoch_test.cpp b/test/chunkserver/copyset_epoch_test.cpp index f9f80ad50f..810b9c3c5d 100644 --- a/test/chunkserver/copyset_epoch_test.cpp +++ b/test/chunkserver/copyset_epoch_test.cpp @@ -20,26 +20,25 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" #include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/uuid.h" #include "src/fs/fs_common.h" +#include "test/chunkserver/chunkserver_test_util.h" #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 @@ -59,9 +58,7 @@ class CopysetEpochTest : public testing::Test { dir1 = uuidGenerator.GenerateUUID(); Exec(("mkdir " + dir1).c_str()); } - virtual void TearDown() { - Exec(("rm -fr " + dir1).c_str()); - } + virtual void TearDown() { Exec(("rm -fr " + dir1).c_str()); } public: std::string dir1; @@ -70,27 +67,23 @@ class CopysetEpochTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetEpochTest, DISABLED_basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9026; - const char *confs = "127.0.0.1:9026:0"; + const char* confs = "127.0.0.1:9026:0"; int snapshotInterval = 1; int electionTimeoutMs = 3000; - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::string snapshotPath = dir1 + "/4295067297/raft_snapshot"; uint64_t lastIncludeIndex = 0; /** - * 启动一个chunkserver + * Start a chunkserver */ std::string copysetdir = "local://./" + dir1; auto startChunkServerFunc = [&] { - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); }; Thread t1(startChunkServerFunc); @@ -105,111 +98,95 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { ::usleep(1000 * electionTimeoutMs); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); auto node = nodeManager.GetCopysetNode(logicPoolId, copysetId); ASSERT_EQ(1, node->GetConfEpoch()); std::string confEpochPath1 = snapshotPath; - butil::string_appendf(&confEpochPath1, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath1, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath1.append("/"); confEpochPath1.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath1)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(2, node->GetConfEpoch()); std::string confEpochPath2 = snapshotPath; - butil::string_appendf(&confEpochPath2, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath2, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath2.append("/"); confEpochPath2.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath2)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(3, node->GetConfEpoch()); std::string confEpochPath3 = snapshotPath; - butil::string_appendf(&confEpochPath3, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath3, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath3.append("/"); confEpochPath3.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath3)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(4, node->GetConfEpoch()); std::string confEpochPath4 = snapshotPath; - butil::string_appendf(&confEpochPath4, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath4, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath4.append("/"); confEpochPath4.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath4)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be + // executed to load the epoch from the snapshot node->Fini(); node->Run(); { - butil::Status status = WaitLeader(logicPoolId, - copysetId, - conf, - &leader, + butil::Status status = WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); } ASSERT_EQ(5, node->GetConfEpoch()); std::string confEpochPath5 = snapshotPath; - butil::string_appendf(&confEpochPath5, - "/" BRAFT_SNAPSHOT_PATTERN, + butil::string_appendf(&confEpochPath5, "/" BRAFT_SNAPSHOT_PATTERN, ++lastIncludeIndex); confEpochPath5.append("/"); confEpochPath5.append(kCurveConfEpochFilename); diff --git a/test/chunkserver/copyset_node_manager_test.cpp b/test/chunkserver/copyset_node_manager_test.cpp index 7103ba0697..fe4f0472e3 100644 --- a/test/chunkserver/copyset_node_manager_test.cpp +++ b/test/chunkserver/copyset_node_manager_test.cpp @@ -20,14 +20,15 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node_manager.h" + +#include #include #include -#include #include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/copyset_node.h" #include "test/chunkserver/mock_copyset_node.h" @@ -35,10 +36,10 @@ namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Return; -using ::testing::NotNull; -using ::testing::Mock; using ::testing::DoAll; +using ::testing::Mock; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; using ::testing::SetArgPointee; @@ -72,20 +73,19 @@ class CopysetNodeManagerTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = std::make_shared(fs); defaultOptions_.trash = std::make_shared(); } void TearDown() { - CopysetNodeManager *copysetNodeManager = + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); copysetNodeManager->Fini(); ::system("rm -rf node_manager_test"); } protected: - CopysetNodeOptions defaultOptions_; + CopysetNodeOptions defaultOptions_; ConcurrentApplyModule concurrentModule_; }; @@ -93,34 +93,32 @@ TEST_F(CopysetNodeManagerTest, ErrorOptionsTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); defaultOptions_.chunkDataUri = "//."; defaultOptions_.logUri = "//."; ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); } TEST_F(CopysetNodeManagerTest, ServiceNotStartTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_FALSE(copysetNodeManager->LoadFinished()); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->LoadFinished()); /* null server */ { - brpc::Server *server = nullptr; + brpc::Server* server = nullptr; int port = 9000; butil::EndPoint addr(butil::IP_ANY, port); ASSERT_EQ(-1, copysetNodeManager->AddService(server, addr)); @@ -131,7 +129,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -143,21 +141,19 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - // 本地 copyset 未加载完成,则无法创建新的copyset - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Cannot create a new copyset if the local copyset has not been loaded + // completely + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_EQ(0, copysetNodeManager->Run()); - ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + ASSERT_TRUE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); ASSERT_TRUE(copysetNodeManager->IsExist(logicPoolId, copysetId)); - // 重复创建 - ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId, - conf)); + // Duplicate creation + ASSERT_FALSE( + copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); auto copysetNode1 = - copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); + copysetNodeManager->GetCopysetNode(logicPoolId, copysetId); ASSERT_TRUE(nullptr != copysetNode1); auto copysetNode2 = copysetNodeManager->GetCopysetNode(logicPoolId + 1, copysetId + 1); @@ -168,8 +164,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(1, copysetNodes.size()); - ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, - copysetId)); + ASSERT_TRUE(copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); ASSERT_FALSE(copysetNodeManager->IsExist(logicPoolId, copysetId)); ASSERT_EQ(0, copysetNodeManager->Fini()); @@ -178,46 +173,49 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { } TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - std::shared_ptr mockNode - = std::make_shared(); + std::shared_ptr mockNode = + std::make_shared(); - // 测试copyset node manager还没运行 + // The test copyset node manager has not been run yet EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 启动copyset node manager + // Start the copyset node manager ASSERT_EQ(0, copysetNodeManager->Run()); - // 测试node为空 + // Test node is empty EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(nullptr)); - // 测试无法获取到leader status的情况 + // Test the situation where the leader status cannot be obtained EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); NodeStatus leaderStatus; EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(defaultOptions_.checkRetryTimes) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); + .Times(defaultOptions_.checkRetryTimes) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(false))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); leaderStatus.leader_id.parse("127.0.0.1:9043:0"); - // 测试leader first_index 大于 follower last_index的情况 + // Test the situation that leader first_index greater than follower + // last_index leaderStatus.first_index = 1000; NodeStatus followerStatus; followerStatus.last_index = 999; - EXPECT_CALL(*mockNode, GetStatus(_)).Times(1) - .WillOnce(SetArgPointee<0>(followerStatus)); + EXPECT_CALL(*mockNode, GetStatus(_)) + .Times(1) + .WillOnce(SetArgPointee<0>(followerStatus)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 测试可以获取到leader status,且follower当前不在安装快照 的情况 + // The test can obtain the leader status and the follower is currently not + // installing the snapshot leaderStatus.first_index = 1; leaderStatus.committed_index = 2000; NodeStatus status1; @@ -233,14 +231,14 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { status4.last_index = 1666; status4.known_applied_index = 1001; EXPECT_CALL(*mockNode, GetStatus(_)) - .Times(4) - .WillOnce(SetArgPointee<0>(status1)) - .WillOnce(SetArgPointee<0>(status2)) - .WillOnce(SetArgPointee<0>(status3)) - .WillOnce(SetArgPointee<0>(status4)); + .Times(4) + .WillOnce(SetArgPointee<0>(status1)) + .WillOnce(SetArgPointee<0>(status2)) + .WillOnce(SetArgPointee<0>(status3)) + .WillOnce(SetArgPointee<0>(status4)); EXPECT_CALL(*mockNode, GetLeaderStatus(_)) - .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); + .Times(4) + .WillRepeatedly(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_TRUE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); } @@ -248,7 +246,7 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; Configuration conf; - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); // start server brpc::Server server; @@ -258,15 +256,14 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LOG(FATAL) << "Fail to start Server"; } - // 构造初始环境 + // Construct initial environment ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); - // 创建多个copyset + // Create multiple copyset int copysetNum = 5; for (int i = 0; i < copysetNum; ++i) { ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + i, - conf)); + copysetId + i, conf)); } std::vector> copysetNodes; copysetNodeManager->GetAllCopysetNodes(©setNodes); @@ -276,11 +273,10 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { copysetNodeManager->GetAllCopysetNodes(©setNodes); ASSERT_EQ(0, copysetNodes.size()); - - // 本地 copyset 未加载完成,则无法创建新的copyset + // Cannot create a new copyset if the local copyset has not been loaded + // completely ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, - copysetId + 5, - conf)); + copysetId + 5, conf)); // reload copysets when loadConcurrency < copysetNum std::cout << "Test ReloadCopysets when loadConcurrency=3" << std::endl; diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index 46ed6a4fdb..4a30fae926 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -20,1116 +20,1096 @@ * Author: wudemiao */ +#include "src/chunkserver/copyset_node.h" + +#include #include #include -#include -#include #include -#include -#include #include +#include +#include +#include -#include "test/fs/mock_local_filesystem.h" -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" -#include "test/chunkserver/fake_datastore.h" -#include "test/chunkserver/mock_node.h" -#include "src/chunkserver/conf_epoch_file.h" #include "proto/heartbeat.pb.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/conf_epoch_file.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" +#include "test/chunkserver/fake_datastore.h" #include "test/chunkserver/mock_curve_filesystem_adaptor.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" - -namespace curve { -namespace chunkserver { - -using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; -using ::testing::AnyNumber; -using ::testing::Matcher; -using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; -using ::testing::InSequence; -using ::testing::AtLeast; -using ::testing::SaveArgPointee; - -using curve::fs::MockLocalFileSystem; -using curve::fs::FileSystemType; -using curve::fs::MockLocalFileSystem; -using curve::chunkserver::concurrent::ConcurrentApplyOption; - -const char copysetUri[] = "local://./copyset_node_test"; -const int port = 9044; - -class FakeSnapshotReader : public braft::SnapshotReader { - public: - std::string get_path() { - /* 返回一个不存在的 path */ - return std::string("/1002093939/temp/238408034"); - } - void list_files(std::vector *files) { - return; - } - int load_meta(braft::SnapshotMeta *meta) { - return 1; - } - std::string generate_uri_for_copy() { - return std::string(""); - } -}; - -class FakeSnapshotWriter : public braft::SnapshotWriter { - public: - std::string get_path() { - /* 返回一个不存在的 path */ - return std::string("."); - } - void list_files(std::vector *files) { - return; - } - virtual int save_meta(const braft::SnapshotMeta &meta) { - return 0; - } - - virtual int add_file(const std::string &filename) { - return 0; - } - - virtual int add_file(const std::string &filename, - const ::google::protobuf::Message *file_meta) { - return 0; - } - - virtual int remove_file(const std::string &filename) { - return 0; - } -}; - -class FakeClosure : public braft::Closure { - public: - void Run() { - std::cerr << "FakeClosure run" << std::endl; - } -}; - -class CopysetNodeTest : public ::testing::Test { - protected: - void SetUp() { - defaultOptions_.ip = "127.0.0.1"; - defaultOptions_.port = port; - defaultOptions_.electionTimeoutMs = 1000; - defaultOptions_.snapshotIntervalS = 30; - defaultOptions_.catchupMargin = 50; - defaultOptions_.chunkDataUri = copysetUri; - defaultOptions_.chunkSnapshotUri = copysetUri; - defaultOptions_.logUri = copysetUri; - defaultOptions_.raftMetaUri = copysetUri; - defaultOptions_.raftSnapshotUri = copysetUri; - defaultOptions_.loadConcurrency = 5; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.checkRetryTimes = 3; - defaultOptions_.finishLoadMargin = 1000; - - defaultOptions_.concurrentapply = &concurrentModule_; - ConcurrentApplyOption opt{2, 1, 2, 1}; - defaultOptions_.concurrentapply->Init(opt); - std::shared_ptr fs = - LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ASSERT_TRUE(nullptr != fs); - defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkFilePool = - std::make_shared(fs); - defaultOptions_.trash = std::make_shared(); - defaultOptions_.enableOdsyncWhenOpenChunkFile = true; - } - - void TearDown() { - ::system("rm -rf copyset_node_test"); - } - - protected: - CopysetNodeOptions defaultOptions_; - ConcurrentApplyModule concurrentModule_; -}; - -TEST_F(CopysetNodeTest, error_test) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - // on_snapshot_save: List failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs)); - - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1).WillOnce(Return(-1)); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - LOG(INFO) << closure.status().error_cstr(); - } - - // on_snapshot_save: save conf open failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - LOG(INFO) << closure.status().error_cstr(); - } - // on_snapshot_save: success - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - } - - // on_snapshot_save: success, enableOdsyncWhenOpenChunkFile_ = false - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - files.push_back("test-2.txt"); - - const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT - std::string jsonStr(json); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.enableOdsyncWhenOpenChunkFile = false; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; - defaultOptions_.syncThreshold = 65536; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - FakeClosure closure; - FakeSnapshotWriter writer; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)).Times(1) - .WillOnce(Return(jsonStr.size())); - EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - copysetNode.on_snapshot_save(&writer, &closure); - copysetNode.WaitSnapshotDone(); - } - // ShipToSync & handle sync time out - { - CopysetNode::copysetSyncPool_ = - std::make_shared>(); - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - defaultOptions_.enableOdsyncWhenOpenChunkFile = false; - defaultOptions_.syncConcurrency = 20; - defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; - defaultOptions_.syncThreshold = 65536; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - ChunkID id1 = 100; - ChunkID id2 = 200; - ChunkID id3 = 100; - copysetNode.ShipToSync(id1); - copysetNode.ShipToSync(id2); - copysetNode.ShipToSync(id3); - copysetNode.HandleSyncTimerOut(); - } - - // on_snapshot_load: Dir not exist, File not exist, data init success - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - DataStoreOptions options; - options.baseDir = "./test-temp"; - options.chunkSize = 16 * 1024 * 1024; - options.metaPageSize = 4 * 1024; - options.blockSize = 4 * 1024; - std::shared_ptr dataStore = - std::make_shared(options, fs); - copysetNode.SetCSDateStore(dataStore); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(0, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - // on_snapshot_load: Dir not exist, File not exist, data init failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - FakeClosure closure; - FakeSnapshotReader reader; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - DataStoreOptions options; - options.baseDir = "./test-temp"; - options.chunkSize = 16 * 1024 * 1024; - options.metaPageSize = 4 * 1024; - options.blockSize = 4 * 1024; - std::shared_ptr dataStore = - std::make_shared(options, fs); - copysetNode.SetCSDateStore(dataStore); - dataStore->InjectError(); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - // on_snapshot_load: Dir not exist, File exist, load conf.epoch failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, delete failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, delete success, rename failed - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(false)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - } - - // on_snapshot_load: Dir exist, rename success - // file exist, open failed - { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::vector files; - files.push_back("test-1.txt"); - - CopysetNode copysetNode(logicPoolID, copysetID, conf); - FakeClosure closure; - FakeSnapshotReader reader; - std::shared_ptr - mockfs = std::make_shared(); - std::unique_ptr - epochFile(new ConfEpochFile(mockfs));; - defaultOptions_.localFileSystem = mockfs; - MockCurveFilesystemAdaptor* cfa = - new MockCurveFilesystemAdaptor(); - auto sfs = new scoped_refptr(cfa); - copysetNode.SetSnapshotFileSystem(sfs); - copysetNode.SetLocalFileSystem(mockfs); - copysetNode.SetConfEpochFile(std::move(epochFile)); - EXPECT_CALL(*mockfs, DirExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*cfa, - delete_file(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*cfa, - rename(_, _)).Times(1).WillOnce(Return(true)); - EXPECT_CALL(*mockfs, FileExists(_)).Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); - LOG(INFO) << "OK"; - } - /* on_error */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - braft::Error error; - ASSERT_DEATH(copysetNode.on_error(error), ".*raft error.*"); - } - /* Fini, raftNode is null */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.Fini(); - } - /* Fini, raftNode is not null */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - std::vector files; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.Fini(); - } - /* Load/SaveConfEpoch */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_EQ(0, copysetNode.SaveConfEpoch(kCurveConfEpochFilename)); - ASSERT_EQ(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - ASSERT_EQ(0, copysetNode.GetConfEpoch()); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* load: ConfEpochFile load failed*/ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* load: logic pool id 错误 */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - uint64_t epoch = 12; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID + 1, - copysetID, - epoch)); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } - /* load: copyset id 错误 */ - { - LogicPoolID logicPoolID = 123; - CopysetID copysetID = 1345; - uint64_t epoch = 12; - Configuration conf; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ConfEpochFile confEpochFile(fs); - ASSERT_EQ(0, - confEpochFile.Save(kCurveConfEpochFilename, - logicPoolID, - copysetID + 1, - epoch)); - defaultOptions_.localFileSystem = fs; - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); - copysetNode.Fini(); - ::system(rmCmd.c_str()); - } -} - -TEST_F(CopysetNodeTest, get_conf_change) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - Configuration conf1; - Configuration conf2; - PeerId peer("127.0.0.1:3200:0"); - PeerId peer1("127.0.0.1:3201:0"); - PeerId emptyPeer; - conf.add_peer(peer); - conf1.add_peer(peer); - conf1.add_peer(peer1); - conf2.add_peer(peer1); - - // 当前没有在做配置变更 - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::NONE, type); - } - // 当前正在Add Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, add_peer(_, _)) - .Times(1); - EXPECT_CALL(*mockNode, remove_peer(_, _)) - .WillOnce( - Invoke([](const PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); - })); - Peer addPeer; - addPeer.set_address("127.0.0.1:3202:0"); - Peer removePeer; - removePeer.set_address("127.0.0.1:3200:0"); - copysetNode.AddPeer(addPeer); - copysetNode.RemovePeer(removePeer); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::ADD_PEER, type); - EXPECT_EQ(addPeer.address(), alterPeer.address()); - } - // 当前正在Remove Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, remove_peer(_, _)) - .Times(1); - EXPECT_CALL(*mockNode, add_peer(_, _)) - .WillOnce( - Invoke([](const braft::PeerId& peer, braft::Closure* done) { - done->status().set_error(-1, - "another config change is ongoing"); - })); - Peer addPeer1; - addPeer1.set_address("127.0.0.1:3202:0"); - Peer removePeer; - removePeer.set_address("127.0.0.1:3200:0"); - copysetNode.RemovePeer(removePeer); - copysetNode.AddPeer(addPeer1); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); - EXPECT_EQ(removePeer.address(), alterPeer.address()); - } - // 当前正在Transfer leader - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - Peer transferee1; - transferee1.set_address("127.0.0.1:3201:0"); - Peer transferee2; - transferee2.set_address("127.0.0.1:3200:0"); - EXPECT_CALL(*mockNode, transfer_leadership_to(_)) - .WillOnce(Return(0)) - .WillOnce(Return(-1)); - EXPECT_CALL(*mockNode, leader_id()) - .WillOnce(Return(peer)) - .WillOnce(Return(peer1)) - .WillOnce(Return(peer)); - copysetNode.TransferLeader(transferee1); - copysetNode.TransferLeader(transferee2); - copysetNode.TransferLeader(transferee2); - - NodeStatus status; - status.state = braft::State::STATE_TRANSFERRING; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); - EXPECT_EQ(transferee1.address(), alterPeer.address()); - } - // 当前正在Change Peer - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - copysetNode.SetCopysetNode(mockNode); - - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - - copysetNode.on_leader_start(8); - - EXPECT_CALL(*mockNode, change_peers(_, _)) - .Times(1); - - Peer addPeer1; - addPeer1.set_address("127.0.0.1:3201:0"); - std::vector peers; - peers.emplace_back(addPeer1); - copysetNode.ChangePeer(peers); - Peer addPeer2; - addPeer2.set_address("127.0.0.1:3202:0"); - peers.emplace_back(addPeer2); - copysetNode.ChangePeer(peers); - - NodeStatus status; - status.state = braft::State::STATE_LEADER; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); - EXPECT_EQ(addPeer1.address(), alterPeer.address()); - } - // leader term小于0 - { - CopysetNode copysetNode(logicPoolID, copysetID, conf); - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - ConfigChangeType type; - Configuration oldConf; - Peer alterPeer; - copysetNode.on_leader_start(-1); - EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); - EXPECT_EQ(ConfigChangeType::NONE, type); - } -} - -TEST_F(CopysetNodeTest, get_hash) { - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - std::string rmCmd("rm -f "); - rmCmd += kCurveConfEpochFilename; - - LogicPoolID logicPoolID = 1 + 1; - CopysetID copysetID = 1 + 1; - Configuration conf; - Configuration conf1; - PeerId peer("127.0.0.1:3200:0"); - PeerId peer1("127.0.0.1:3201:0"); - PeerId emptyPeer; - conf.add_peer(peer); - conf1.add_peer(peer); - conf1.add_peer(peer1); - - std::string hashValue = std::to_string(1355371765); - // get hash - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - // 生成多个有数据的文件 - ::system("echo \"abcddddddddd333\" >" - "copyset_node_test/8589934594/data/test-2.txt"); - ::system("echo \"mmmmmmmm\" >" - "copyset_node_test/8589934594/data/test-4.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934594/data/test-5.txt"); - - ::system("touch copyset_node_test/8589934594/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934594/data/test-1.txt"); - - // 获取hash - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_STREQ(hashValue.c_str(), hash.c_str()); - ::system("rm -fr copyset_node_test/8589934594"); - } - - { - std::string hash; - // 使用不同的copyset id,让目录不一样 - CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); - - ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - - // 生成多个有数据的文件,并且交换生成文件的顺序 - ::system("touch copyset_node_test/8589934595/data/test-1.txt"); - ::system("echo \"wwwww\" > " - "copyset_node_test/8589934595/data/test-1.txt"); - - ::system("echo \"mmmmmmmm\" > " - "copyset_node_test/8589934595/data/test-4.txt"); - ::system("echo \"eeeeeeeeeee\" > " - "copyset_node_test/8589934595/data/test-5.txt"); - ::system("dd if=/dev/zero of=" - "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT - ::system("echo \"abcddddddddd333\" > " - "copyset_node_test/8589934595/data/test-2.txt"); - - // 获取hash - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_STREQ(hashValue.c_str(), hash.c_str()); - ::system("rm -fr copyset_node_test/8589934595"); - } - - // List failed - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } - - // List success - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - ASSERT_EQ(hash, "0"); - } +#include "test/chunkserver/mock_node.h" +#include "test/fs/mock_local_filesystem.h" - // List success, open failed +namespace curve +{ + namespace chunkserver { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - std::vector files; - files.push_back("test-1.txt"); - - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(-1)); + using ::testing::_; + using ::testing::AnyNumber; + using ::testing::AtLeast; + using ::testing::DoAll; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Matcher; + using ::testing::Return; + using ::testing::SaveArgPointee; + using ::testing::SetArgPointee; + using ::testing::SetArgReferee; + + using curve::chunkserver::concurrent::ConcurrentApplyOption; + using curve::fs::FileSystemType; + using curve::fs::MockLocalFileSystem; + + const char copysetUri[] = "local://./copyset_node_test"; + const int port = 9044; + + class FakeSnapshotReader : public braft::SnapshotReader + { + public: + std::string get_path() + { + /*Returns a non-existent path*/ + return std::string("/1002093939/temp/238408034"); + } + void list_files(std::vector *files) { return; } + int load_meta(braft::SnapshotMeta *meta) { return 1; } + std::string generate_uri_for_copy() { return std::string(""); } + }; - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } + class FakeSnapshotWriter : public braft::SnapshotWriter + { + public: + std::string get_path() + { + /*Returns a non-existent path*/ + return std::string("."); + } + void list_files(std::vector *files) { return; } + virtual int save_meta(const braft::SnapshotMeta &meta) { return 0; } + + virtual int add_file(const std::string &filename) { return 0; } + + virtual int add_file(const std::string &filename, + const ::google::protobuf::Message *file_meta) + { + return 0; + } + + virtual int remove_file(const std::string &filename) { return 0; } + }; - // List success, open success,fstat failed - { - std::string hash; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); + class FakeClosure : public braft::Closure + { + public: + void Run() { std::cerr << "FakeClosure run" << std::endl; } + }; - std::vector files; - files.push_back("test-1.txt"); + class CopysetNodeTest : public ::testing::Test + { + protected: + void SetUp() + { + defaultOptions_.ip = "127.0.0.1"; + defaultOptions_.port = port; + defaultOptions_.electionTimeoutMs = 1000; + defaultOptions_.snapshotIntervalS = 30; + defaultOptions_.catchupMargin = 50; + defaultOptions_.chunkDataUri = copysetUri; + defaultOptions_.chunkSnapshotUri = copysetUri; + defaultOptions_.logUri = copysetUri; + defaultOptions_.raftMetaUri = copysetUri; + defaultOptions_.raftSnapshotUri = copysetUri; + defaultOptions_.loadConcurrency = 5; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.checkRetryTimes = 3; + defaultOptions_.finishLoadMargin = 1000; + + defaultOptions_.concurrentapply = &concurrentModule_; + ConcurrentApplyOption opt{2, 1, 2, 1}; + defaultOptions_.concurrentapply->Init(opt); + std::shared_ptr fs = + LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ASSERT_TRUE(nullptr != fs); + defaultOptions_.localFileSystem = fs; + defaultOptions_.chunkFilePool = std::make_shared(fs); + defaultOptions_.trash = std::make_shared(); + defaultOptions_.enableOdsyncWhenOpenChunkFile = true; + } + + void TearDown() { ::system("rm -rf copyset_node_test"); } + + protected: + CopysetNodeOptions defaultOptions_; + ConcurrentApplyModule concurrentModule_; + }; + TEST_F(CopysetNodeTest, error_test) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + // on_snapshot_save: List failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)).Times(1).WillOnce(Return(-1)); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + LOG(INFO) << closure.status().error_cstr(); + } + + // on_snapshot_save: save conf open failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + LOG(INFO) << closure.status().error_cstr(); + } + // on_snapshot_save: success + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + } + + // on_snapshot_save: success, enableOdsyncWhenOpenChunkFile_ = false + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + files.push_back("test-2.txt"); + + const char *json = + "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":" + "774340440}"; // NOLINT + std::string jsonStr(json); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.enableOdsyncWhenOpenChunkFile = false; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; + defaultOptions_.syncThreshold = 65536; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + FakeClosure closure; + FakeSnapshotWriter writer; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(10)); + EXPECT_CALL(*mockfs, Write(_, Matcher(_), _, _)) + .Times(1) + .WillOnce(Return(jsonStr.size())); + EXPECT_CALL(*mockfs, Fsync(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, Close(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + copysetNode.on_snapshot_save(&writer, &closure); + copysetNode.WaitSnapshotDone(); + } + // ShipToSync & handle sync time out + { + CopysetNode::copysetSyncPool_ = + std::make_shared>(); + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + defaultOptions_.enableOdsyncWhenOpenChunkFile = false; + defaultOptions_.syncConcurrency = 20; + defaultOptions_.syncChunkLimit = 2 * 1024 * 1024; + defaultOptions_.syncThreshold = 65536; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + ChunkID id1 = 100; + ChunkID id2 = 200; + ChunkID id3 = 100; + copysetNode.ShipToSync(id1); + copysetNode.ShipToSync(id2); + copysetNode.ShipToSync(id3); + copysetNode.HandleSyncTimerOut(); + } + + // on_snapshot_load: Dir not exist, File not exist, data init success + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + DataStoreOptions options; + options.baseDir = "./test-temp"; + options.chunkSize = 16 * 1024 * 1024; + options.metaPageSize = 4 * 1024; + options.blockSize = 4 * 1024; + std::shared_ptr dataStore = + std::make_shared(options, fs); + copysetNode.SetCSDateStore(dataStore); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(0, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + // on_snapshot_load: Dir not exist, File not exist, data init failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + FakeClosure closure; + FakeSnapshotReader reader; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + DataStoreOptions options; + options.baseDir = "./test-temp"; + options.chunkSize = 16 * 1024 * 1024; + options.metaPageSize = 4 * 1024; + options.blockSize = 4 * 1024; + std::shared_ptr dataStore = + std::make_shared(options, fs); + copysetNode.SetCSDateStore(dataStore); + dataStore->InjectError(); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + // on_snapshot_load: Dir not exist, File exist, load conf.epoch failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, delete failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, delete success, rename failed + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + defaultOptions_.localFileSystem = mockfs; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(false)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + } + + // on_snapshot_load: Dir exist, rename success + // file exist, open failed + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::vector files; + files.push_back("test-1.txt"); + + CopysetNode copysetNode(logicPoolID, copysetID, conf); + FakeClosure closure; + FakeSnapshotReader reader; + std::shared_ptr mockfs = + std::make_shared(); + std::unique_ptr epochFile(new ConfEpochFile(mockfs)); + ; + defaultOptions_.localFileSystem = mockfs; + MockCurveFilesystemAdaptor *cfa = new MockCurveFilesystemAdaptor(); + auto sfs = new scoped_refptr(cfa); + copysetNode.SetSnapshotFileSystem(sfs); + copysetNode.SetLocalFileSystem(mockfs); + copysetNode.SetConfEpochFile(std::move(epochFile)); + EXPECT_CALL(*mockfs, DirExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, delete_file(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*cfa, rename(_, _)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, FileExists(_)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.on_snapshot_load(&reader)); + LOG(INFO) << "OK"; + } + /* on_error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + braft::Error error; + ASSERT_DEATH(copysetNode.on_error(error), ".*raft error.*"); + } + /* Fini, raftNode is null */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.Fini(); + } + /* Fini, raftNode is not null */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + std::vector files; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.Fini(); + } + /* Load/SaveConfEpoch */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_EQ(0, copysetNode.SaveConfEpoch(kCurveConfEpochFilename)); + ASSERT_EQ(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + ASSERT_EQ(0, copysetNode.GetConfEpoch()); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* load: ConfEpochFile load failed*/ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* Load: logic pool id error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + uint64_t epoch = 12; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ConfEpochFile confEpochFile(fs); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, + logicPoolID + 1, copysetID, epoch)); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + /* Load: copyset id error */ + { + LogicPoolID logicPoolID = 123; + CopysetID copysetID = 1345; + uint64_t epoch = 12; + Configuration conf; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + auto fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + ConfEpochFile confEpochFile(fs); + ASSERT_EQ(0, confEpochFile.Save(kCurveConfEpochFilename, logicPoolID, + copysetID + 1, epoch)); + defaultOptions_.localFileSystem = fs; + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ASSERT_NE(0, copysetNode.LoadConfEpoch(kCurveConfEpochFilename)); + copysetNode.Fini(); + ::system(rmCmd.c_str()); + } + } - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) - .WillOnce(Return(-1)); + TEST_F(CopysetNodeTest, get_conf_change) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + Configuration conf1; + Configuration conf2; + PeerId peer("127.0.0.1:3200:0"); + PeerId peer1("127.0.0.1:3201:0"); + PeerId emptyPeer; + conf.add_peer(peer); + conf1.add_peer(peer); + conf1.add_peer(peer1); + conf2.add_peer(peer1); + + // There are currently no configuration changes in progress + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::NONE, type); + } + // Currently adding Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, add_peer(_, _)).Times(1); + EXPECT_CALL(*mockNode, remove_peer(_, _)) + .WillOnce(Invoke([](const PeerId &peer, braft::Closure *done) + { done->status().set_error(-1, + "another config change is ongoing"); })); + Peer addPeer; + addPeer.set_address("127.0.0.1:3202:0"); + Peer removePeer; + removePeer.set_address("127.0.0.1:3200:0"); + copysetNode.AddPeer(addPeer); + copysetNode.RemovePeer(removePeer); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::ADD_PEER, type); + EXPECT_EQ(addPeer.address(), alterPeer.address()); + } + // Currently removing Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, remove_peer(_, _)).Times(1); + EXPECT_CALL(*mockNode, add_peer(_, _)) + .WillOnce( + Invoke([](const braft::PeerId &peer, braft::Closure *done) + { done->status().set_error( + -1, "another config change is ongoing"); })); + Peer addPeer1; + addPeer1.set_address("127.0.0.1:3202:0"); + Peer removePeer; + removePeer.set_address("127.0.0.1:3200:0"); + copysetNode.RemovePeer(removePeer); + copysetNode.AddPeer(addPeer1); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); + EXPECT_EQ(removePeer.address(), alterPeer.address()); + } + // Currently transferring leader + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + Peer transferee1; + transferee1.set_address("127.0.0.1:3201:0"); + Peer transferee2; + transferee2.set_address("127.0.0.1:3200:0"); + EXPECT_CALL(*mockNode, transfer_leadership_to(_)) + .WillOnce(Return(0)) + .WillOnce(Return(-1)); + EXPECT_CALL(*mockNode, leader_id()) + .WillOnce(Return(peer)) + .WillOnce(Return(peer1)) + .WillOnce(Return(peer)); + copysetNode.TransferLeader(transferee1); + copysetNode.TransferLeader(transferee2); + copysetNode.TransferLeader(transferee2); + + NodeStatus status; + status.state = braft::State::STATE_TRANSFERRING; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); + EXPECT_EQ(transferee1.address(), alterPeer.address()); + } + // Currently changing Peer + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + copysetNode.SetCopysetNode(mockNode); + + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + + copysetNode.on_leader_start(8); + + EXPECT_CALL(*mockNode, change_peers(_, _)).Times(1); + + Peer addPeer1; + addPeer1.set_address("127.0.0.1:3201:0"); + std::vector peers; + peers.emplace_back(addPeer1); + copysetNode.ChangePeer(peers); + Peer addPeer2; + addPeer2.set_address("127.0.0.1:3202:0"); + peers.emplace_back(addPeer2); + copysetNode.ChangePeer(peers); + + NodeStatus status; + status.state = braft::State::STATE_LEADER; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); + EXPECT_EQ(addPeer1.address(), alterPeer.address()); + } + // leader term is less than 0 + { + CopysetNode copysetNode(logicPoolID, copysetID, conf); + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + ConfigChangeType type; + Configuration oldConf; + Peer alterPeer; + copysetNode.on_leader_start(-1); + EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); + EXPECT_EQ(ConfigChangeType::NONE, type); + } + } - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } + TEST_F(CopysetNodeTest, get_hash) + { + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT + std::string rmCmd("rm -f "); + rmCmd += kCurveConfEpochFilename; + + LogicPoolID logicPoolID = 1 + 1; + CopysetID copysetID = 1 + 1; + Configuration conf; + Configuration conf1; + PeerId peer("127.0.0.1:3200:0"); + PeerId peer1("127.0.0.1:3201:0"); + PeerId emptyPeer; + conf.add_peer(peer); + conf1.add_peer(peer); + conf1.add_peer(peer1); + + std::string hashValue = std::to_string(1355371765); + // get hash + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + // Generate multiple files with data + ::system( + "echo \"abcddddddddd333\" >" + "copyset_node_test/8589934594/data/test-2.txt"); + ::system( + "echo \"mmmmmmmm\" >" + "copyset_node_test/8589934594/data/test-4.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934594/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934594/data/test-5.txt"); + + ::system("touch copyset_node_test/8589934594/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934594/data/test-1.txt"); + + // Get hash + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_STREQ(hashValue.c_str(), hash.c_str()); + ::system("rm -fr copyset_node_test/8589934594"); + } + + { + std::string hash; + // Using different copyset IDs to make the directory different + CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); + + ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); + + // Generate multiple files with data and exchange the order of generated + // files + ::system("touch copyset_node_test/8589934595/data/test-1.txt"); + ::system( + "echo \"wwwww\" > " + "copyset_node_test/8589934595/data/test-1.txt"); + + ::system( + "echo \"mmmmmmmm\" > " + "copyset_node_test/8589934595/data/test-4.txt"); + ::system( + "echo \"eeeeeeeeeee\" > " + "copyset_node_test/8589934595/data/test-5.txt"); + ::system( + "dd if=/dev/zero of=" + "copyset_node_test/8589934595/data/test-3.txt bs=512 count=15"); // NOLINT + ::system( + "echo \"abcddddddddd333\" > " + "copyset_node_test/8589934595/data/test-2.txt"); + + // Get hash + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_STREQ(hashValue.c_str(), hash.c_str()); + ::system("rm -fr copyset_node_test/8589934595"); + } + + // List failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + ASSERT_EQ(hash, "0"); + } + + // List success, open failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success,fstat failed + { + std::string hash; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success, fstat success, read failed + { + std::string hash; + struct stat fileInfo; + fileInfo.st_size = 1024; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); + + ASSERT_EQ(-1, copysetNode.GetHash(&hash)); + } + + // List success, open success, fstat success, read success + { + char *buff = new (std::nothrow) char[1024]; + ::memset(buff, 'a', 1024); + std::string hash; + struct stat fileInfo; + fileInfo.st_size = 1024; + CopysetNode copysetNode(logicPoolID, copysetID, conf); + std::shared_ptr mockfs = + std::make_shared(); + copysetNode.SetLocalFileSystem(mockfs); + + std::vector files; + files.push_back("test-1.txt"); + + EXPECT_CALL(*mockfs, List(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*mockfs, Open(_, _)).Times(1).WillOnce(Return(3)); + EXPECT_CALL(*mockfs, Fstat(_, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*mockfs, Read(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); + + ASSERT_EQ(0, copysetNode.GetHash(&hash)); + } + } - // List success, open success, fstat success, read failed - { - std::string hash; - struct stat fileInfo; - fileInfo.st_size = 1024; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) - .WillOnce(Return(-1)); - - ASSERT_EQ(-1, copysetNode.GetHash(&hash)); - } - - // List success, open success, fstat success, read success - { - char *buff = new (std::nothrow) char[1024]; - ::memset(buff, 'a', 1024); - std::string hash; - struct stat fileInfo; - fileInfo.st_size = 1024; - CopysetNode copysetNode(logicPoolID, copysetID, conf); - std::shared_ptr - mockfs = std::make_shared(); - copysetNode.SetLocalFileSystem(mockfs); - - std::vector files; - files.push_back("test-1.txt"); - - - EXPECT_CALL(*mockfs, List(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); - EXPECT_CALL(*mockfs, Open(_, _)).Times(1) - .WillOnce(Return(3)); - EXPECT_CALL(*mockfs, Fstat(_, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); - EXPECT_CALL(*mockfs, Read(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<1>(*buff), Return(1024))); - - ASSERT_EQ(0, copysetNode.GetHash(&hash)); - } -} - -TEST_F(CopysetNodeTest, get_leader_status) { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.SetCopysetNode(mockNode); - - // 当前peer不是leader,且当前无leader - { - NodeStatus status; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - NodeStatus leaderStatus; - ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); - } - - // 当前peer为leader - { - NodeStatus status; - status.leader_id.parse("127.0.0.1:3200:0"); - status.peer_id = status.leader_id; - status.committed_index = 6666; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(status)); - NodeStatus leaderStatus; - ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(status.committed_index, - leaderStatus.committed_index); - } - - // 存在leader,但不是当前peer - { - // 模拟启动chunkserver - CopysetNodeManager* copysetNodeManager - = &CopysetNodeManager::GetInstance(); - ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - ASSERT_EQ(0, copysetNodeManager->Run()); - PeerId leader_peer("127.0.0.1:9044:0"); - brpc::Server server; - ASSERT_EQ(0, copysetNodeManager->AddService(&server, leader_peer.addr)); - if (server.Start(port, NULL) != 0) { - LOG(FATAL) << "Fail to start Server"; + TEST_F(CopysetNodeTest, get_leader_status) + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.SetCopysetNode(mockNode); + + // The current peer is not a leader, and there is currently no leader + { + NodeStatus status; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + NodeStatus leaderStatus; + ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); + } + + // The current peer is the leader + { + NodeStatus status; + status.leader_id.parse("127.0.0.1:3200:0"); + status.peer_id = status.leader_id; + status.committed_index = 6666; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(status)); + NodeStatus leaderStatus; + ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); + ASSERT_EQ(status.committed_index, leaderStatus.committed_index); + } + + // There is a leader, but it is not the current peer + { + // Simulate starting chunkserver + CopysetNodeManager *copysetNodeManager = + &CopysetNodeManager::GetInstance(); + ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); + ASSERT_EQ(0, copysetNodeManager->Run()); + PeerId leader_peer("127.0.0.1:9044:0"); + brpc::Server server; + ASSERT_EQ(0, copysetNodeManager->AddService(&server, leader_peer.addr)); + if (server.Start(port, NULL) != 0) + { + LOG(FATAL) << "Fail to start Server"; + } + // Construct a leader copyset + ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, + copysetID, conf)); + auto leaderNode = + copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); + ASSERT_TRUE(nullptr != leaderNode); + // Set expected values + std::shared_ptr mockLeader = + std::make_shared(logicPoolID, copysetID); + leaderNode->SetCopysetNode(mockLeader); + NodeStatus mockLeaderStatus; + mockLeaderStatus.leader_id = leader_peer; + mockLeaderStatus.peer_id = leader_peer; + mockLeaderStatus.committed_index = 10000; + mockLeaderStatus.known_applied_index = 6789; + EXPECT_CALL(*mockLeader, get_status(_)) + .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); + + // Test obtaining the committed index of the leader through the node of + // the follower + NodeStatus followerStatus; + followerStatus.leader_id = leader_peer; + followerStatus.peer_id.parse("127.0.0.1:3201:0"); + followerStatus.committed_index = 3456; + followerStatus.known_applied_index = 3456; + EXPECT_CALL(*mockNode, get_status(_)) + .WillOnce(SetArgPointee<0>(followerStatus)); + + NodeStatus leaderStatus; + ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); + ASSERT_EQ(mockLeaderStatus.committed_index, + leaderStatus.committed_index); + ASSERT_EQ(mockLeaderStatus.known_applied_index, + leaderStatus.known_applied_index); + } } - // 构造leader copyset - ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, - copysetID, - conf)); - auto leaderNode = copysetNodeManager->GetCopysetNode(logicPoolID, - copysetID); - ASSERT_TRUE(nullptr != leaderNode); - // 设置预期值 - std::shared_ptr mockLeader - = std::make_shared(logicPoolID, - copysetID); - leaderNode->SetCopysetNode(mockLeader); - NodeStatus mockLeaderStatus; - mockLeaderStatus.leader_id = leader_peer; - mockLeaderStatus.peer_id = leader_peer; - mockLeaderStatus.committed_index = 10000; - mockLeaderStatus.known_applied_index = 6789; - EXPECT_CALL(*mockLeader, get_status(_)) - .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); - - // 测试通过follower的node获取leader的committed index - NodeStatus followerStatus; - followerStatus.leader_id = leader_peer; - followerStatus.peer_id.parse("127.0.0.1:3201:0"); - followerStatus.committed_index = 3456; - followerStatus.known_applied_index = 3456; - EXPECT_CALL(*mockNode, get_status(_)) - .WillOnce(SetArgPointee<0>(followerStatus)); - - NodeStatus leaderStatus; - ASSERT_TRUE(copysetNode.GetLeaderStatus(&leaderStatus)); - ASSERT_EQ(mockLeaderStatus.committed_index, - leaderStatus.committed_index); - ASSERT_EQ(mockLeaderStatus.known_applied_index, - leaderStatus.known_applied_index); - } -} - -TEST_F(CopysetNodeTest, is_lease_leader) { - LogicPoolID logicPoolID = 1; - CopysetID copysetID = 1; - Configuration conf; - std::shared_ptr mockNode - = std::make_shared(logicPoolID, - copysetID); - CopysetNode copysetNode(logicPoolID, copysetID, conf); - copysetNode.Init(defaultOptions_); - copysetNode.SetCopysetNode(mockNode); - - EXPECT_FALSE(copysetNode.IsLeaderTerm()); - EXPECT_EQ(-1, copysetNode.LeaderTerm()); - - // not leader now - { - std::vector states = { - braft::LEASE_DISABLED, - braft::LEASE_VALID, - braft::LEASE_NOT_READY, - braft::LEASE_EXPIRED - }; - braft::LeaderLeaseStatus status; - for (auto &state : states) { - status.state = state; - ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + + TEST_F(CopysetNodeTest, is_lease_leader) + { + LogicPoolID logicPoolID = 1; + CopysetID copysetID = 1; + Configuration conf; + std::shared_ptr mockNode = + std::make_shared(logicPoolID, copysetID); + CopysetNode copysetNode(logicPoolID, copysetID, conf); + copysetNode.Init(defaultOptions_); + copysetNode.SetCopysetNode(mockNode); + + EXPECT_FALSE(copysetNode.IsLeaderTerm()); + EXPECT_EQ(-1, copysetNode.LeaderTerm()); + + // not leader now + { + std::vector states = { + braft::LEASE_DISABLED, braft::LEASE_VALID, braft::LEASE_NOT_READY, + braft::LEASE_EXPIRED}; + braft::LeaderLeaseStatus status; + for (auto &state : states) + { + status.state = state; + ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + } + } + + // ABA problem, current node is term 8(on leader start), + // but leader lease term is 10 + { + copysetNode.on_leader_start(8); + braft::LeaderLeaseStatus status; + status.term = 10; + status.state = braft::LEASE_NOT_READY; + ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); + } + + // normal condition + { + copysetNode.on_leader_start(10); + braft::LeaderLeaseStatus status; + status.term = 10; + status.state = braft::LEASE_VALID; + ASSERT_TRUE(copysetNode.IsLeaseLeader(status)); + } } - } - // ABA problem, current node is term 8(on leader start), - // but leader lease term is 10 - { - copysetNode.on_leader_start(8); - braft::LeaderLeaseStatus status; - status.term = 10; - status.state = braft::LEASE_NOT_READY; - ASSERT_FALSE(copysetNode.IsLeaseLeader(status)); - } - - // normal condition - { - copysetNode.on_leader_start(10); - braft::LeaderLeaseStatus status; - status.term = 10; - status.state = braft::LEASE_VALID; - ASSERT_TRUE(copysetNode.IsLeaseLeader(status)); - } -} - -} // namespace chunkserver -} // namespace curve + } // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/copyset_service_test.cpp b/test/chunkserver/copyset_service_test.cpp index 973529366b..d456b2a361 100644 --- a/test/chunkserver/copyset_service_test.cpp +++ b/test/chunkserver/copyset_service_test.cpp @@ -20,35 +20,34 @@ * Author: wudemiao */ -#include -#include -#include #include #include #include +#include +#include +#include #include -#include "src/chunkserver/trash.h" +#include "proto/chunk.pb.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "proto/chunk.pb.h" +#include "src/chunkserver/trash.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; -static std::string Exec(const char *cmd) { - FILE *pipe = popen(cmd, "r"); +static std::string Exec(const char* cmd) { + FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[4096]; std::string result = ""; while (!feof(pipe)) { - if (fgets(buffer, 1024, pipe) != NULL) - result += buffer; + if (fgets(buffer, 1024, pipe) != NULL) result += buffer; } pclose(pipe); return result; @@ -72,9 +71,7 @@ class CopysetServiceTest : public testing::Test { trash_->Init(opt); } - void TearDown() { - Exec(rmCmd.c_str()); - } + void TearDown() { Exec(rmCmd.c_str()); } protected: std::string testDir; @@ -87,7 +84,7 @@ class CopysetServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(CopysetServiceTest, basic) { - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 1; CopysetID copysetId = 100002; std::string ip = "127.0.0.1"; @@ -99,7 +96,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -115,8 +113,7 @@ TEST_F(CopysetServiceTest, basic) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.trash = trash_; copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); @@ -128,7 +125,7 @@ TEST_F(CopysetServiceTest, basic) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -149,7 +146,7 @@ TEST_F(CopysetServiceTest, basic) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -169,7 +166,7 @@ TEST_F(CopysetServiceTest, basic) { response.status()); } - /* 非法参数测试 */ + /* Illegal parameter testing */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -213,8 +210,8 @@ TEST_F(CopysetServiceTest, basic) { ASSERT_EQ(response.status(), COPYSET_OP_STATUS_FAILURE_UNKNOWN); // CASE 3: delete broken copyset success - ASSERT_TRUE(copysetNodeManager-> - DeleteCopysetNode(logicPoolId, copysetId)); + ASSERT_TRUE( + copysetNodeManager->DeleteCopysetNode(logicPoolId, copysetId)); cntl.Reset(); request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); @@ -228,8 +225,8 @@ TEST_F(CopysetServiceTest, basic) { } TEST_F(CopysetServiceTest, basic2) { - /********************* 设置初始环境 ***********************/ - CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); + /********************* Set Up Initial Environment ***********************/ + CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 2; CopysetID copysetId = 100003; std::string ip = "127.0.0.1"; @@ -241,7 +238,8 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_EQ(0, copysetNodeManager->AddService(&server, addr)); ASSERT_EQ(0, server.Start(port, NULL)); - std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); // NOLINT ASSERT_TRUE(nullptr != fs); butil::string_printf(©setDir, copysetDirPattern.c_str(), port); @@ -257,8 +255,7 @@ TEST_F(CopysetServiceTest, basic2) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkFilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = std::make_shared(fs); copysetNodeOptions.enableOdsyncWhenOpenChunkFile = true; ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); ASSERT_EQ(0, copysetNodeManager->Run()); @@ -269,9 +266,9 @@ TEST_F(CopysetServiceTest, basic2) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /********************** 跑测试cases ************************/ + /********************** Run Test Cases ************************/ - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -279,15 +276,15 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -298,22 +295,22 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -324,7 +321,7 @@ TEST_F(CopysetServiceTest, basic2) { response.status()); } - /* 创建多个copyset */ + /* Create multiple copysets */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -332,31 +329,31 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - // 准备第1个copyset + // Prepare the first copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 1); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } - // 准备第2个copyset + // Prepare the second copyset { - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 2); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); - Peer *peer2 = copyset->add_peers(); + Peer* peer2 = copyset->add_peers(); peer2->set_address("127.0.0.1:9041:0"); - Peer *peer3 = copyset->add_peers(); + Peer* peer3 = copyset->add_peers(); peer3->set_address("127.0.0.1:9042:0"); } @@ -370,18 +367,18 @@ TEST_F(CopysetServiceTest, basic2) { // get status { - // 创建一个copyset + // Create a copyset { brpc::Controller cntl; cntl.set_timeout_ms(3000); CopysetRequest2 request; CopysetResponse2 response; - Copyset *copyset; + Copyset* copyset; copyset = request.add_copysets(); copyset->set_logicpoolid(logicPoolId); copyset->set_copysetid(copysetId + 3); - Peer *peer1 = copyset->add_peers(); + Peer* peer1 = copyset->add_peers(); peer1->set_address("127.0.0.1:9040:0"); stub.CreateCopysetNode2(&cntl, &request, &response, nullptr); @@ -392,11 +389,11 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - // 睡眠等待leader产生 + // Sleep waiting for leader generation ::usleep(2 * 1000 * 1000); { - // query hash为false + // query hash is false std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -404,7 +401,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(false); @@ -432,7 +429,7 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_FALSE(response.has_hash()); } { - // query hash为true + // query hash is true std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -440,7 +437,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetStatusResponse response; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId + 3); - Peer *peer = new Peer(); + Peer* peer = new Peer(); request.set_allocated_peer(peer); peer->set_address(peerStr); request.set_queryhash(true); @@ -476,4 +473,3 @@ TEST_F(CopysetServiceTest, basic2) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 5910df808e..26cdd8fb9b 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -20,43 +20,44 @@ * Author: tongguangxun */ -#include #include -#include +#include + #include +#include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/common/bitmap.h" #include "src/common/crc32.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/mock_file_pool.h" #include "test/fs/mock_local_filesystem.h" +using curve::common::Bitmap; using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; -using curve::common::Bitmap; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::Invoke; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using std::shared_ptr; using std::make_shared; +using std::shared_ptr; using std::string; namespace curve { @@ -67,27 +68,21 @@ const char baseDir[] = "/home/chunkserver/copyset/data"; const char chunk1[] = "chunk_1"; const char chunk1Path[] = "/home/chunkserver/copyset/data/chunk_1"; const char chunk1snap1[] = "chunk_1_snap_1"; -const char chunk1snap1Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_1"; +const char chunk1snap1Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_1"; const char chunk1snap2[] = "chunk_1_snap_2"; -const char chunk1snap2Path[] - = "/home/chunkserver/copyset/data/chunk_1_snap_2"; +const char chunk1snap2Path[] = "/home/chunkserver/copyset/data/chunk_1_snap_2"; const char chunk2[] = "chunk_2"; -const char chunk2Path[] - = "/home/chunkserver/copyset/data/chunk_2"; +const char chunk2Path[] = "/home/chunkserver/copyset/data/chunk_2"; const char chunk2snap1[] = "chunk_2_snap_1"; -const char chunk2snap1Path[] - = "/home/chunkserver/copyset/data/chunk_2_snap_1"; +const char chunk2snap1Path[] = "/home/chunkserver/copyset/data/chunk_2_snap_1"; const char temp1[] = "chunk_1_tmp"; -const char temp1Path[] - = "/home/chunkserver/copyset/data/chunk_1_tmp"; +const char temp1Path[] = "/home/chunkserver/copyset/data/chunk_1_tmp"; const char location[] = "/file1/0@curve"; const int UT_ERRNO = 1234; -bool hasCreatFlag(int flag) {return flag & O_CREAT;} +bool hasCreatFlag(int flag) { return flag & O_CREAT; } -ACTION_TEMPLATE(SetVoidArrayArgument, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArrayArgument, HAS_1_TEMPLATE_PARAMS(int, k), AND_2_VALUE_PARAMS(first, last)) { auto output = reinterpret_cast(::testing::get(args)); auto input = first; @@ -100,163 +95,140 @@ class CSDataStore_test : public testing::TestWithParam< std::tuple> { public: - void SetUp() { - chunksize_ = std::get<0>(GetParam()); - blocksize_ = std::get<1>(GetParam()); - metapagesize_ = std::get<2>(GetParam()); - - chunk1MetaPage = new char[metapagesize_]; - chunk2MetaPage = new char[metapagesize_]; - chunk1SnapMetaPage = new char[metapagesize_]; - - lfs_ = std::make_shared(); - fpool_ = std::make_shared(lfs_); - DataStoreOptions options; - options.baseDir = baseDir; - options.chunkSize = chunksize_; - options.blockSize = blocksize_; - options.metaPageSize = metapagesize_; - options.locationLimit = kLocationLimit; - options.enableOdsyncWhenOpenChunkFile = true; - dataStore = std::make_shared(lfs_, - fpool_, - options); - fdMock = 100; - memset(chunk1MetaPage, 0, metapagesize_); - memset(chunk2MetaPage, 0, metapagesize_); - memset(chunk1SnapMetaPage, 0, metapagesize_); - } - - void TearDown() override { - delete[] chunk1MetaPage; - delete[] chunk2MetaPage; - delete[] chunk1SnapMetaPage; - } - - inline void FakeEncodeChunk(char* buf, - SequenceNum correctedSn, - SequenceNum sn, - shared_ptr bitmap = nullptr, - const std::string& location = "") { - ChunkFileMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.correctedSn = correctedSn; - metaPage.bitmap = bitmap; - metaPage.location = location; - metaPage.encode(buf); - } - - inline void FakeEncodeSnapshot(char* buf, - SequenceNum sn) { - uint32_t bits = chunksize_ / blocksize_; - SnapshotMetaPage metaPage; - metaPage.version = FORMAT_VERSION; - metaPage.sn = sn; - metaPage.bitmap = std::make_shared(bits); - metaPage.encode(buf); - } - - /** - * 构造初始环境 - * datastore存在两个chunk,分别为chunk1、chunk2 - * chunk1 和 chunk2的sn都为2,correctSn为0 - * chunk1存在快照文件,快照文件版本号为1 - * chunk2不存在快照文件 - */ - void FakeEnv() { - // fake DirExists - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .WillRepeatedly(Return(true)); - // fake List - vector fileNames; - fileNames.push_back(chunk1); - fileNames.push_back(chunk1snap1); - fileNames.push_back(chunk2); - EXPECT_CALL(*lfs_, List(baseDir, NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - // fake FileExists - ON_CALL(*lfs_, FileExists(_)) - .WillByDefault(Return(false)); - EXPECT_CALL(*lfs_, FileExists(chunk1Path)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(true)); - // fake Open - ON_CALL(*lfs_, Open(_, _)) - .WillByDefault(Return(fdMock++)); - EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillRepeatedly(Return(2)); - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))) - .Times(0); - EXPECT_CALL(*lfs_, Open(chunk2Path, _)) - .WillRepeatedly(Return(3)); - EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))) - .Times(0); - // fake fpool->GetFile() - ON_CALL(*fpool_, GetFileImpl(_, NotNull())) - .WillByDefault(Return(0)); - EXPECT_CALL(*fpool_, RecycleFile(_)) - .WillRepeatedly(Return(0)); - // fake Close - ON_CALL(*lfs_, Close(_)) - .WillByDefault(Return(0)); - // fake Delete - ON_CALL(*lfs_, Delete(_)) - .WillByDefault(Return(0)); - // fake Fsync - ON_CALL(*lfs_, Fsync(_)) - .WillByDefault(Return(0)); - // fake Fstat - struct stat fileInfo; - fileInfo.st_size = chunksize_ + metapagesize_; - EXPECT_CALL(*lfs_, Fstat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - // fake Read - ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake Write - ON_CALL(*lfs_, - Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) - .WillByDefault(ReturnArg<3>()); - // fake read chunk1 metapage - FakeEncodeChunk(chunk1MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk1's snapshot1 metapage - FakeEncodeSnapshot(chunk1SnapMetaPage, 1); - EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); - // fake read chunk2 metapage - FakeEncodeChunk(chunk2MetaPage, 0, 2); - EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) - .WillRepeatedly( - DoAll(SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); - } + void SetUp() { + chunksize_ = std::get<0>(GetParam()); + blocksize_ = std::get<1>(GetParam()); + metapagesize_ = std::get<2>(GetParam()); + + chunk1MetaPage = new char[metapagesize_]; + chunk2MetaPage = new char[metapagesize_]; + chunk1SnapMetaPage = new char[metapagesize_]; + + lfs_ = std::make_shared(); + fpool_ = std::make_shared(lfs_); + DataStoreOptions options; + options.baseDir = baseDir; + options.chunkSize = chunksize_; + options.blockSize = blocksize_; + options.metaPageSize = metapagesize_; + options.locationLimit = kLocationLimit; + options.enableOdsyncWhenOpenChunkFile = true; + dataStore = std::make_shared(lfs_, fpool_, options); + fdMock = 100; + memset(chunk1MetaPage, 0, metapagesize_); + memset(chunk2MetaPage, 0, metapagesize_); + memset(chunk1SnapMetaPage, 0, metapagesize_); + } + + void TearDown() override { + delete[] chunk1MetaPage; + delete[] chunk2MetaPage; + delete[] chunk1SnapMetaPage; + } + + inline void FakeEncodeChunk(char* buf, SequenceNum correctedSn, + SequenceNum sn, + shared_ptr bitmap = nullptr, + const std::string& location = "") { + ChunkFileMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.correctedSn = correctedSn; + metaPage.bitmap = bitmap; + metaPage.location = location; + metaPage.encode(buf); + } + + inline void FakeEncodeSnapshot(char* buf, SequenceNum sn) { + uint32_t bits = chunksize_ / blocksize_; + SnapshotMetaPage metaPage; + metaPage.version = FORMAT_VERSION; + metaPage.sn = sn; + metaPage.bitmap = std::make_shared(bits); + metaPage.encode(buf); + } + + /** + * Construct initial environment + * There are two chunks in the datastore, chunk1 and chunk2 + * The sn of chunk1 and chunk2 are both 2, and correctSn is 0 + * chunk1 has a snapshot file with version number 1 + * chunk2 does not have a snapshot file + */ + void FakeEnv() { + // fake DirExists + EXPECT_CALL(*lfs_, DirExists(baseDir)).WillRepeatedly(Return(true)); + // fake List + vector fileNames; + fileNames.push_back(chunk1); + fileNames.push_back(chunk1snap1); + fileNames.push_back(chunk2); + EXPECT_CALL(*lfs_, List(baseDir, NotNull())) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); + // fake FileExists + ON_CALL(*lfs_, FileExists(_)).WillByDefault(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk1Path)).WillRepeatedly(Return(true)); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(true)); + // fake Open + ON_CALL(*lfs_, Open(_, _)).WillByDefault(Return(fdMock++)); + EXPECT_CALL(*lfs_, Open(_, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillRepeatedly(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, Truly(hasCreatFlag))).Times(0); + EXPECT_CALL(*lfs_, Open(chunk2Path, _)).WillRepeatedly(Return(3)); + EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))).Times(0); + // fake fpool->GetFile() + ON_CALL(*fpool_, GetFileImpl(_, NotNull())).WillByDefault(Return(0)); + EXPECT_CALL(*fpool_, RecycleFile(_)).WillRepeatedly(Return(0)); + // fake Close + ON_CALL(*lfs_, Close(_)).WillByDefault(Return(0)); + // fake Delete + ON_CALL(*lfs_, Delete(_)).WillByDefault(Return(0)); + // fake Fsync + ON_CALL(*lfs_, Fsync(_)).WillByDefault(Return(0)); + // fake Fstat + struct stat fileInfo; + fileInfo.st_size = chunksize_ + metapagesize_; + EXPECT_CALL(*lfs_, Fstat(_, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + // fake Read + ON_CALL(*lfs_, Read(Ge(1), NotNull(), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake Write + ON_CALL(*lfs_, + Write(Ge(1), Matcher(NotNull()), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + ON_CALL(*lfs_, Write(Ge(1), Matcher(_), Ge(0), Gt(0))) + .WillByDefault(ReturnArg<3>()); + // fake read chunk1 metapage + FakeEncodeChunk(chunk1MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1MetaPage, + chunk1MetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk1's snapshot1 metapage + FakeEncodeSnapshot(chunk1SnapMetaPage, 1); + EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); + // fake read chunk2 metapage + FakeEncodeChunk(chunk2MetaPage, 0, 2); + EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk2MetaPage, + chunk2MetaPage + metapagesize_), + Return(metapagesize_))); + } protected: int fdMock; std::shared_ptr lfs_; std::shared_ptr fpool_; - std::shared_ptr dataStore; + std::shared_ptr dataStore; char* chunk1MetaPage; char* chunk2MetaPage; char* chunk1SnapMetaPage; @@ -267,8 +239,8 @@ class CSDataStore_test }; /** * ConstructorTest - * case:测试构造参数为空的情况 - * 预期结果:进程退出 + * Case: Test the case where the construction parameter is empty + * Expected result: Process exited */ TEST_P(CSDataStore_test, ConstructorTest) { // null param test @@ -277,86 +249,66 @@ TEST_P(CSDataStore_test, ConstructorTest) { options.chunkSize = chunksize_; options.blockSize = blocksize_; options.metaPageSize = metapagesize_; - ASSERT_DEATH(std::make_shared(nullptr, - fpool_, - options), - ""); - ASSERT_DEATH(std::make_shared(lfs_, - nullptr, - options), - ""); + ASSERT_DEATH(std::make_shared(nullptr, fpool_, options), ""); + ASSERT_DEATH(std::make_shared(lfs_, nullptr, options), ""); options.baseDir = ""; - ASSERT_DEATH(std::make_shared(lfs_, - fpool_, - options), - ""); + ASSERT_DEATH(std::make_shared(lfs_, fpool_, options), ""); } /** * InitializeTest - * case:存在未知类型的文件 - * 预期结果:删除该文件,返回true + * Case: There is an unknown type of file + * Expected result: Delete the file and return true */ TEST_P(CSDataStore_test, InitializeTest1) { // test unknown file - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(0); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(0); vector fileNames; fileNames.push_back(temp1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // unknown file will be deleted EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在快照文件,但是快照文件没有对应的chunk - * 预期结果:删除快照文件,返回true + * Case: There is a snapshot file, but the snapshot file does not have a + * corresponding chunk Expected result: Delete the snapshot file and return true */ TEST_P(CSDataStore_test, InitializeTest2) { // test snapshot without chunk - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(true)); vector fileNames; fileNames.push_back(chunk2snap1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(chunk2Path)) - .WillRepeatedly(Return(false)); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(chunk2Path)).WillRepeatedly(Return(false)); EXPECT_TRUE(dataStore->Initialize()); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在快照文件 - * 预期结果:正常加载文件,返回true + * Case: Chunk file exists, Chunk file has snapshot file + * Expected result: Loading the file normally, returning true */ TEST_P(CSDataStore_test, InitializeTest3) { // test chunk with snapshot FakeEnv(); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * 预期结果:返回true + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Expected result: Returns true */ TEST_P(CSDataStore_test, InitializeTest4) { // test snapshot founded before chunk file , @@ -368,19 +320,16 @@ TEST_P(CSDataStore_test, InitializeTest4) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_TRUE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeTest - * case:存在chunk文件,chunk文件存在两个冲突的快照文件 - * 预期结果:返回false + * Case: There is a chunk file, and there are two conflicting snapshot files in + * the chunk file Expected result: returns false */ TEST_P(CSDataStore_test, InitializeTest5) { // test snapshot conflict @@ -391,47 +340,35 @@ TEST_P(CSDataStore_test, InitializeTest5) { fileNames.push_back(chunk1snap2); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * InitializeErrorTest - * case:data目录不存在,创建目录时失败 - * 预期结果:返回false + * Case: The data directory does not exist, creating the directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest1) { // dir not exist and mkdir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(-UT_ERRNO)); // List should not be called - EXPECT_CALL(*lfs_, List(baseDir, _)) - .Times(0); + EXPECT_CALL(*lfs_, List(baseDir, _)).Times(0); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:List目录时失败 - * 预期结果:返回false + * Case: List directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest2) { // List dir failed - EXPECT_CALL(*lfs_, DirExists(baseDir)) - .Times(1) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(baseDir)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, DirExists(baseDir)).Times(1).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(baseDir)).Times(1).WillOnce(Return(0)); // List failed EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) @@ -441,220 +378,182 @@ TEST_P(CSDataStore_test, InitializeErrorTest2) { /** * InitializeErrorTest - * case:open chunk文件的时候出错 - * 预期结果:返回false + * Case: Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest3) { // test chunk open failed FakeEnv(); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(1, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); // open success - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillOnce(Return(1)); // expect call close - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(1, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1MetaPage[1] += 1; // change the page data memcpy(chunk1MetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); } /** * InitializeErrorTest - * case:open 快照文件的时候出错 - * 预期结果:返回false + * Case: Error opening snapshot file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest4) { // test chunk open failed FakeEnv(); // set open snapshot file failed - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but version incompatible uint8_t version = FORMAT_VERSION + 1; memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); + // Each reinitialization will release the original resources and reload them + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); // open success - EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)) - .WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Open(chunk1snap1Path, _)).WillOnce(Return(2)); // expect call close - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage success, but crc check failed version = FORMAT_VERSION; chunk1SnapMetaPage[1] += 1; // change the page data memcpy(chunk1SnapMetaPage, &version, sizeof(uint8_t)); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); } /** * InitializeErrorTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * open chunk文件的时候出错 - * 预期结果:返回false + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest5) { // test snapshot founded before chunk file , @@ -666,18 +565,16 @@ TEST_P(CSDataStore_test, InitializeErrorTest5) { fileNames.push_back(chunk1); EXPECT_CALL(*lfs_, List(baseDir, NotNull())) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk1Path, _)) - .WillRepeatedly(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(chunk1Path, _)).WillRepeatedly(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); } /** * Test - * case:chunk 不存在 - * 预期结果:创建chunk文件,并成功写入数据 + * Case: chunk does not exist + * Expected result: Create chunk file and successfully write data */ TEST_P(CSDataStore_test, WriteChunkTest1) { // initialize @@ -691,47 +588,34 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); - - // 如果sn为0,返回InvalidArgError - EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->WriteChunk(id, - 0, - buf, - offset, - length, - nullptr)); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); + + // If sn is 0, returns InvalidArgError + EXPECT_EQ(CSErrorCode::InvalidArgError, + dataStore->WriteChunk(id, 0, buf, offset, length, nullptr)); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage char chunk3MetaPage[metapagesize_]; // NOLINT memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); FakeEncodeChunk(chunk3MetaPage, 0, 1); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); // will write data EXPECT_CALL(*lfs_, Write(4, Matcher(_), metapagesize_ + offset, length)) .Times(1); - EXPECT_EQ(CSErrorCode::Success, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - EXPECT_CALL(*lfs_, Sync(4)) - .WillOnce(Return(0)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Sync(4)).WillOnce(Return(0)).WillOnce(Return(-1)); // sync chunk success EXPECT_EQ(CSErrorCode::Success, dataStore->SyncChunk(id)); @@ -744,21 +628,17 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { ASSERT_EQ(1, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的sn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request sn smaller than chunk's sn + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest2) { // initialize @@ -776,46 +656,28 @@ TEST_P(CSDataStore_test, WriteChunkTest2) { // snchunk.correctedsn EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的correctedSn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request correctedSn with sn less than chunk + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest3) { // initialize @@ -824,9 +686,8 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { FakeEncodeChunk(chunk2MetaPage, 4, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -838,47 +699,29 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { // sn>chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn==chunk.sn snWriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk不存在快照 - * 预期结果:直接写数据到chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk does not have a snapshot Expected result: Directly + * write data to chunk file */ TEST_P(CSDataStore_test, WriteChunkTest4) { // initialize @@ -898,12 +741,7 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); @@ -914,52 +752,33 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, __amd64)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), _, _)) .Times(0); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn, - * chunk不存在快照 - * 预期结果:会更新metapage,然后写数据到chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn, chunk does not have a snapshot Expected result: Metapage will be + * updated and data will be written to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest6) { // initialize @@ -968,9 +787,8 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { FakeEncodeChunk(chunk2MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(3, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk2MetaPage, - chunk2MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk2MetaPage, chunk2MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; @@ -989,32 +807,25 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn以及correctSn, - * chunk不存在快照、 - * 预期结果:会创建快照文件,更新metapage, - * 写数据时先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn greater than Chunk's sn and correctSn, + * chunk does not have a snapshot + * Expected result: A snapshot file will be created, and the metapage will be + * updated, When writing data, first perform a Copy-On-Write operation to the + * snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest7) { // initialize @@ -1028,23 +839,19 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // snapshot not exists - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); // expect call chunkfile pool GetFile - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1066,54 +873,37 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写同一个block的数据,不再进行cow,而是直接写入数据 + // Write data for the same block again, no longer co w, but directly write + // the data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - // sn - 1 < chunk.sn , 返回 BackwardRequestError + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + // sn - 1 < chunk. sn, returns BackwardRequestError EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn - 1, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn - 1, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not + * less than correctSn chunk has a snapshot Expected result: When writing data, + * first perform a Copy-On-Write operation to the snapshot, and then write to + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest9) { // initialize @@ -1143,31 +933,23 @@ TEST_P(CSDataStore_test, WriteChunkTest9) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn - * chunk存在快照 - * 预期结果:更新metapage,然后写chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to + * correctSn chunk has a snapshot Expected result: Update the metapage and write + * the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest10) { // initialize @@ -1176,9 +958,8 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1198,31 +979,24 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn和correctSn - * chunk存在快照,snapsn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1245,38 +1018,29 @@ TEST_P(CSDataStore_test, WriteChunkTest11) { // sn>chunk.sn, sn>chunk.correctedsn EXPECT_EQ(CSErrorCode::SnapshotConflictError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟克隆 - * case1:clone chunk存在,写入区域之前未写过 - * 预期结果1:写入数据并更新bitmap - * case2:clone chunk存在,写入区域之前已写过 - * 预期结果2:写入数据但不会更新bitmap - * case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果3:写入数据并更新bitmap - * case4:遍写整个chunk - * 预期结果4:写入数据,然后clone chunk会被转为普通chunk + * Write a clone chunk to simulate cloning + * Case1: clone chunk exists and has not been written before writing to the + * region Expected result 1: Write data and update bitmap Case2: clone chunk + * exists and has been written before writing to the region Expected result 2: + * Write data but not update bitmap Case3: chunk exists and is a clone chunk. + * Some areas have been written, while others have not Expected result 3: Write + * data and update bitmap Case4: Overwrite the entire chunk Expected result 4: + * Write data, and then the clone chunk will be converted to a regular chunk */ TEST_P(CSDataStore_test, WriteChunkTest13) { // initialize @@ -1291,7 +1055,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { LOG(INFO) << "case 1"; char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) @@ -1300,30 +1064,25 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk存在,且是clone chunk,写入区域之前未写过 + // Case1: chunk exists and is a clone chunk, which has not been written + // before writing to the region { LOG(INFO) << "case 2"; id = 3; // not exist @@ -1338,13 +1097,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1352,7 +1106,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case2:chunk存在,且是clone chunk,写入区域之前已写过 + // Case2: chunk exists and is a clone chunk, which has been written before + // writing to the region { LOG(INFO) << "case 3"; id = 3; // not exist @@ -1366,13 +1121,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1380,7 +1130,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case3: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { LOG(INFO) << "case 4"; id = 3; // not exist @@ -1389,8 +1140,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1398,14 +1149,10 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); @@ -1413,7 +1160,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case4:遍写整个chunk + // Case4: Overwrite the entire chun { LOG(INFO) << "case 5"; id = 3; // not exist @@ -1422,8 +1169,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1431,41 +1178,33 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * 写clone chunk,模拟恢复 - * case1:clone chunk 存在,snchunk.sn,sn==chunk.correctedsn - * 预期结果2:写入数据并更新bitmap,更新chunk.sn为sn - * case3:clone chunk存在,sn==chunk.sn,sn==chunk.correctedsn - * 预期结果3:写入数据并更新bitmap - * case4:clone chunk 存在,sn>chunk.sn, sn>chunk.correctedsn - * 预期结果4:返回StatusConflictError + * Write clone chunk to simulate recovery + * Case1: clone chunk exists, snchunk.sn, sn==chunk.correctedsn + * Expected result 2: Write data and update bitmap, update chunk.sn to sn + * Case3: clone chunk exists, sn==chunk.sn, sn==chunk.correctedsn + * Expected result 3: Write data and update bitmap + * Case4: clone chunk exists, sn>chunk.sn, sn>chunk.correctedsn + * Expected result 4: Returning StatusConflictError */ TEST_P(CSDataStore_test, WriteChunkTest14) { // initialize @@ -1480,7 +1219,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -1488,26 +1227,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(2, info.curSn); @@ -1518,32 +1251,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case1:clone chunk存在 + // Case1: clone chunk exists { LOG(INFO) << "case 1"; // sn == chunk.sn, sn < chunk.correctedSn sn = 2; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // sn < chunk.sn, sn < chunk.correctedSn sn = 1; ASSERT_EQ(CSErrorCode::BackwardRequestError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); } - // case2:chunk存在,且是clone chunk, + // Case2: chunk exists and is a clone chunk, { LOG(INFO) << "case 2"; id = 3; @@ -1559,13 +1282,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { .Times(2); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1576,7 +1294,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn == correctedsn { LOG(INFO) << "case 3"; @@ -1585,8 +1303,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, blocksize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, blocksize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1594,14 +1312,10 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); - ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // paste后,chunk的状态不变 + ASSERT_EQ( + CSErrorCode::Success, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1612,25 +1326,20 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn > correctedsn { LOG(INFO) << "case 4"; sn = 4; - // 不会写数据 - EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)) - .Times(0); + // Unable to write data + EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)).Times(0); std::unique_ptr buf(new char[length]); - ASSERT_EQ(CSErrorCode::StatusConflictError, - dataStore->WriteChunk(id, - sn, - buf.get(), - offset, - length, - nullptr)); - // chunk的状态不变 + ASSERT_EQ( + CSErrorCode::StatusConflictError, + dataStore->WriteChunk(id, sn, buf.get(), offset, length, nullptr)); + // The state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1641,25 +1350,22 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, + * Case: chunk exists, * sn==chunk.sn * sn>chunk.correctedSn * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1696,30 +1401,23 @@ TEST_P(CSDataStore_test, WriteChunkTest15) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * WriteChunkTest - * case:chunk存在, - * sn>chunk.sn - * sn>chunk.correctedSn - * chunk.sn==snap.sn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, + * sn>chunk.sn + * sn>chunk.correctedSn + * chunk.sn==snap.sn + * chunk has a snapshot + * Expected result: When writing data, first perform a Copy-On-Write operation + * to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest16) { // initialize @@ -1728,16 +1426,15 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -1760,26 +1457,18 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件时出错 - * 预期结果:写失败,不会改变当前chunk状态 + * WriteChunkTest exception test + * Case: Error creating snapshot file + * Expected result: Write failed and will not change the current chunk state */ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { // initialize @@ -1792,80 +1481,56 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { size_t length = blocksize_; char* buf = new char[length]; // NOLINT memset(buf, 0, length); - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // getchunk failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); // open snapshot failed - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); // open success but read snapshot metapage failed - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage失败 - * 预期结果:写失败,产生快照文件,但是chunk版本号不会改变 - * 再次写入,不会生成新的快照文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, failed to update metadata + * Expected result: Write failed, resulting in a snapshot file, but the chunk + * version number will not change Write again without generating a new snapshot + * file */ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { // initialize @@ -1879,22 +1544,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // write chunk metapage failed EXPECT_CALL(*lfs_, @@ -1902,34 +1563,26 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); // chunk sn not changed ASSERT_EQ(2, info.curSn); ASSERT_EQ(2, info.snapSn); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap未发生变化,再次写入,仍会进行cow + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metadata, and failed row + * Expected result: Write failed, snapshot file generated, chunk version number + * changed, The bitmap of the snapshot has not changed. If written again, it + * will still be cowed */ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { // initialize @@ -1943,22 +1596,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { char* buf = new char[length]; memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -1971,12 +1620,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); CSChunkInfo info; dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); @@ -1991,12 +1635,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { metapagesize_ + offset, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); @@ -2014,17 +1653,12 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); dataStore->GetChunkInfo(id, &info); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写入仍会cow + // Writing again will still slow down // will copy on write LOG(INFO) << "case 4"; EXPECT_CALL(*lfs_, Read(3, NotNull(), metapagesize_ + offset, length)) @@ -2043,29 +1677,21 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { LOG(INFO) << "case 5"; EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow成功,写数据失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap发生变化,再次写入,直接写chunk文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metapage, row, and write + * data failed Expected result: Write failed, snapshot file generated, chunk + * version number changed, The bitmap of the snapshot has changed, write it + * again and directly write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { // initialize @@ -2079,22 +1705,18 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); // will Open snapshot file, snap sn equals 2 - string snapPath = string(baseDir) + "/" + - FileNameOperator::GenerateSnapshotName(id, 2); + string snapPath = + string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(snapPath)) - .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(snapPath, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, FileExists(snapPath)).WillOnce(Return(false)); + EXPECT_CALL(*fpool_, GetFileImpl(snapPath, NotNull())).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Open(snapPath, _)).WillOnce(Return(4)); // will read snapshot metapage char metapage[metapagesize_]; // NOLINT(runtime/arrays) memset(metapage, 0, sizeof(metapage)); FakeEncodeSnapshot(metapage, 2); EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(metapage, - metapage + metapagesize_), + .WillOnce(DoAll(SetArrayArgument<1>(metapage, metapage + metapagesize_), Return(metapagesize_))); // will update metapage EXPECT_CALL(*lfs_, @@ -2116,39 +1738,25 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 再次写入直接写chunk文件 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Write directly to the chunk file again // will write data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * WriteChunkTest - * case:chunk 不存在 - * 预期结果:创建chunk文件的时候失败 + * Case: chunk does not exist + * Expected result: Failed to create chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { // initialize @@ -2162,117 +1770,78 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { char* buf = new char[length]; memset(buf, 0, length); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); // getchunk success - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); // set open chunk file failed - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat failed - EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Fstat(4, NotNull())).WillOnce(Return(-UT_ERRNO)); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success but file size not equal chunksize_ + metapagesize_ struct stat fileInfo; fileInfo.st_size = chunksize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_EQ(CSErrorCode::FileFormatError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(true)); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_EQ(CSErrorCode::FileFormatError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(true)); // open success - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).WillOnce(Return(4)); // expect call close - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); // stat success fileInfo.st_size = chunksize_ + metapagesize_; EXPECT_CALL(*lfs_, Fstat(4, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // read metapage failed EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_EQ(CSErrorCode::InternalError, + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /* * WriteChunkErrorTest - * 所写chunk为clone chunk - * case1:请求location过长,导致metapage size超出page size - * 预期结果1:create clone chunk失败 - * case2:写数据时失败 - * 预期结果2:返回InternalError,chunk状态不变 - * case3:更新metapage时失败 - * 预期结果3:返回InternalError,chunk状态不变 + * The chunk written is a clone chunk + * Case1: The request location is too long, causing the metapage size to exceed + * the page size Expected result 1: Create clone chunk failed Case2: Failed to + * write data Expected result 2: InternalError returned, chunk status remains + * unchanged Case3: Failed to update metapage Expected result 3: InternalError + * returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { // initialize @@ -2287,17 +1856,14 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { - string longLocation(kLocationLimit+1, 'a'); + string longLocation(kLocationLimit + 1, 'a'); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, longLocation)); } - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -2305,29 +1871,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { make_shared(chunksize_ / metapagesize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -2340,18 +1900,13 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -2364,32 +1919,23 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->WriteChunk(id, - sn, - buf, - offset, - length, - nullptr)); - // 检查paste后chunk的状态 + dataStore->WriteChunk(id, sn, buf, offset, length, nullptr)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest1) { // initialize @@ -2404,24 +1950,17 @@ TEST_P(CSDataStore_test, ReadChunkTest1) { memset(buf, 0, sizeof(buf)); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:chunk存在,读取区域超过chunk大小或者offset和length未对齐 - * 预期结果:返回InvalidArgError错误码 + * Case: chunk exists, reading area exceeds chunk size or offset and length are + * not aligned Expected result: InvalidArgError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest2) { // initialize @@ -2436,42 +1975,27 @@ TEST_P(CSDataStore_test, ReadChunkTest2) { memset(buf, 0, sizeof(buf)); // test read out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if length not aligned offset = blocksize_; length = blocksize_ - 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); // return InvalidArgError if offset not aligned offset = blocksize_ + 1; length = blocksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * ReadChunkTest - * case:正常读取存在的chunk - * 预期结果:读取成功 + * Case: Normal reading of existing chunks + * Expected result: read successfully */ TEST_P(CSDataStore_test, ReadChunkTest3) { // initialize @@ -2488,30 +2012,23 @@ TEST_P(CSDataStore_test, ReadChunkTest3) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadChunkTest - * 读取 clone chunk - * case1:读取区域未被写过 - * 预期结果:返回PageNerverWrittenError - * case2:读取区域部分被写过 - * 预期结果:返回PageNerverWrittenError - * case3:读取区域已被写过 - * 预期结果:返回Success,数据成功写入 + * Read clone chunk + * Case1: The read area has not been written + * Expected result: PageNerverWrittenError returned + * Case2: The read area part has been written + * Expected result: PageNerverWrittenError returned + * Case3: The read area has been written + * Expected result: Success returned, data successfully written */ TEST_P(CSDataStore_test, ReadChunkTest4) { // initialize @@ -2529,80 +2046,56 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // case1: 读取未写过区域 + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Case1: Read unwritten area off_t offset = 1 * blocksize_; size_t length = blocksize_; char buf[2 * length]; // NOLINT memset(buf, 0, sizeof(buf)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case2: 读取区域部分被写过 + // Case2: The read area part has been written offset = 0; length = 2 * blocksize_; - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - // case3: 读取区域已写过 + // Case3: The read area has been written offset = 0; length = blocksize_; EXPECT_CALL(*lfs_, Read(4, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * ReadChunkErrorTest - * case:读chunk文件时出错 - * 预期结果:读取失败,返回InternalError + * Case: Error reading chunk file + * Expected result: Read failed, returned InternalError */ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { // initialize @@ -2619,25 +2112,18 @@ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { // initialize @@ -2652,25 +2138,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于chunk版本号 - * 预期结果:读chunk的数据 + * Case: chunk exists, request version number equal to Chunk version number + * Expected result: Read chunk data */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { // initialize @@ -2685,54 +2164,35 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { memset(buf, 0, length); // test out of range EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test offset not aligned offset = chunksize_ - 1; length = chunksize_; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test length not aligned offset = chunksize_; length = chunksize_ + 1; EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); // test in range offset = blocksize_; length = 2 * blocksize_; EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于snapshot版本号 - * 预期结果:读快照的数据 + * Case: chunk exists, request version number equal to snapshot version number + * Expected result: Read data from snapshot */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { // initialize @@ -2760,12 +2220,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { offset + metapagesize_, length)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test out of range sn = 1; @@ -2774,16 +2229,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { char* readBuf = new char[length]; memset(readBuf, 0, length); EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // test in range, read [0, 4*blocksize_) offset = 0; // read chunk in[0, blocksize_) and [3*blocksize_, 4*blocksize_) - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2792,26 +2242,19 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { 2 * blocksize_)) .Times(1); EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkTest - * case:chunk存在,但是请求的版本号不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk exists, but the requested version number does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { // initialize @@ -2826,25 +2269,18 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { memset(buf, 0, length); // test sn not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } /** * ReadSnapshotChunkErrorTest - * case:读快照时失败 - * 预期结果:返回InternalError + * Case: Failed to read snapshot + * Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { // initialize @@ -2872,12 +2308,7 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { offset + metapagesize_, length)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->WriteChunk(id, - sn, - writeBuf, - offset, - length, - nullptr)); + dataStore->WriteChunk(id, sn, writeBuf, offset, length, nullptr)); // test in range, read [0, 4*blocksize_) sn = 1; @@ -2889,15 +2320,10 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - readBuf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); // read snapshot failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)) - .Times(1); + EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_, blocksize_)).Times(1); EXPECT_CALL(*lfs_, Read(1, NotNull(), metapagesize_ + 3 * blocksize_, blocksize_)) .Times(1); @@ -2907,20 +2333,17 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { ASSERT_EQ(CSErrorCode::InternalError, dataStore->ReadSnapshotChunk(id, sn, readBuf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] writeBuf; delete[] readBuf; } /** * ReadSnapshotChunkErrorTest - * case:chunk存在,请求版本号等于chunk版本号,读数据时失败 - * 预期结果:返回InternalError + * Case: chunk exists, request version number is equal to Chunk version number, + * failed while reading data Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { // initialize @@ -2938,18 +2361,11 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + metapagesize_, length)) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->ReadSnapshotChunk(id, - sn, - buf, - offset, - length)); + dataStore->ReadSnapshotChunk(id, sn, buf, offset, length)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); delete[] buf; } @@ -2971,12 +2387,9 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest1) { EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** @@ -2994,24 +2407,18 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest2) { char buf[blocksize_]; // NOLINT(runtime/arrays) memset(buf, 0, blocksize_); // test chunk exists - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) - .Times(1); - EXPECT_EQ(CSErrorCode::Success, - dataStore->ReadChunkMetaPage(id, sn, buf)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)).Times(1); + EXPECT_EQ(CSErrorCode::Success, dataStore->ReadChunkMetaPage(id, sn, buf)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } - /** * DeleteChunkTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest1) { // initialize @@ -3022,21 +2429,17 @@ TEST_P(CSDataStore_test, DeleteChunkTest1) { SequenceNum sn = 2; // test chunk not exists - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteChunkTest - * case:chunk存在快照文件 - * 预期结果:返回Success, chunk被删除,快照被删除 + * Case: Chunk has a snapshot file present + * Expected result: Success returned, chunk deleted, snapshot deleted */ TEST_P(CSDataStore_test, DeleteChunkTest2) { // initialize @@ -3046,25 +2449,21 @@ TEST_P(CSDataStore_test, DeleteChunkTest2) { ChunkID id = 1; SequenceNum sn = 2; - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // delete chunk with snapshot - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); CSChunkInfo info; ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** - * case:chunk存在,快照文件不存在 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest3) { // initialize @@ -3075,27 +2474,22 @@ TEST_P(CSDataStore_test, DeleteChunkTest3) { SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkTest - * chunk存在,快照文件不存在 - * case1: snchunkinfo.sn - * 预期结果2:返回成功 + * chunk exists, snapshot file does not exist + * Case1: snchunkinfo.sn + * Expected result 2: Success returned */ TEST_P(CSDataStore_test, DeleteChunkTest4) { // initialize @@ -3107,37 +2501,30 @@ TEST_P(CSDataStore_test, DeleteChunkTest4) { // case1 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(0); + EXPECT_CALL(*lfs_, Close(3)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).Times(0); EXPECT_EQ(CSErrorCode::BackwardRequestError, - dataStore->DeleteChunk(id, 1)); + dataStore->DeleteChunk(id, 1)); } // case2 { // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(0)); - EXPECT_EQ(CSErrorCode::Success, - dataStore->DeleteChunk(id, 3)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(0)); + EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, 3)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteChunkErrorTest - * case:chunk存在,快照文件不存在,recyclechunk时出错 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist, error occurred during + * recyclechunk Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { // initialize @@ -3147,24 +2534,19 @@ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { ChunkID id = 2; SequenceNum sn = 2; // chunk will be closed - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) - .WillOnce(Return(-1)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->DeleteChunk(id, sn)); + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)).WillOnce(Return(-1)); + EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteChunk(id, sn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { // initialize @@ -3177,27 +2559,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } -// 对于DeleteSnapshotChunkOrCorrectSn来说,内部主要有两个操作 -// 一个是删除快照文件,一个是修改correctedSn -// 当存在快照文件时,fileSn>=chunk的sn是判断是否要删除快照的唯一条件 -// 对于correctedSn来说,fileSn大于chunk的sn以及correctedSn是判断 -// 是否要修改correctedSn的唯一条件 +// For DeleteSnapshotChunkOrCorrectSn, there are two main internal operations +// One is to delete the snapshot file, and the other is to modify correctedSn +// When there is a snapshot file, the sn of fileSn>=chunk is the only condition +// to determine whether to delete the snapshot For correctedSn, if fileSn is +// greater than chunk's sn and correctedSn is the judgment Do you want to modify +// the unique condition for correctedSn /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn >= chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn>snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn>=Chunk's sn + * fileSn==correctedSn of chunk + * chunk.sn>snap.sn + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // initialize @@ -3206,9 +2587,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { FakeEncodeChunk(chunk1MetaPage, 3, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3216,11 +2596,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // fileSn == correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3228,18 +2606,17 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn < chunk的sn - * 此时无论correctSn为何值都不会修改correctedSn - * 预期结果:返回成功,不会删除快照,不会修改correctedSn + * Case: chunk exists, snapshot exists + * fileSn < chunk's sn + * At this point, regardless of the value of correctSn, correctedSn will + * not be modified Expected result: Success returned, snapshot will not be + * deleted, correctedSn will not be modified */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // initialize @@ -3248,9 +2625,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { FakeEncodeChunk(chunk1MetaPage, 0, 3); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3258,8 +2634,7 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // 2 > correctedSn SequenceNum fileSn = 2; // snapshot should not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(3, Matcher(NotNull()), 0, metapagesize_)) @@ -3267,17 +2642,14 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - // 下则用例用于补充DeleteSnapshotChunkOrCorrectSnTest2用例中 - // 当 fileSn == sn 时的边界情况 - // fileSn == sn - // fileSn > correctedSn + // The following use case is used to supplement the + // DeleteSnapshotChunkOrCorrectSnTest2 use case Boundary situation when + // fileSn == sn fileSn == sn fileSn > correctedSn fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3285,17 +2657,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn以及correctedSn - * 预期结果:删除快照,并修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn and correctedSn + * Expected result: Delete the snapshot and modify correctedSn, returning + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // initialize @@ -3307,11 +2678,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(1); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(1); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3319,17 +2688,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn <= chunk的sn或correctedSn - * 预期结果:不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn <= SN or correctedSn of chunk + * Expected result: CorrectedSn will not be modified, returning success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { // initialize @@ -3347,19 +2714,16 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn > chunk的sn及correctedSn - * 预期结果:修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn > chunk's sn and correctedSn + * Expected result: Modify correctedSn and return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { // initialize @@ -3377,18 +2741,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在,chunk为clone chunk - * 预期结果:返回StatusConflictError + * Case: chunk exists, snapshot does not exist, chunk is clone chunk + * Expected result: Returning StatusConflictError */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { // initialize @@ -3405,29 +2766,23 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) - .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), - Return(metapagesize_))); - EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - - // 无论correctedSn为多少,都返回StatusConflictError + .WillOnce(DoAll( + SetArrayArgument<1>(chunk3MetaPage, chunk3MetaPage + metapagesize_), + Return(metapagesize_))); + EXPECT_EQ( + CSErrorCode::Success, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + + // Returns StatusConflictError regardless of the number of correctedSn EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 1)); EXPECT_EQ(CSErrorCode::StatusConflictError, @@ -3439,23 +2794,20 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 5)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn - * fileSn > chunk的correctedSn + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn + * fileSn > chunk's correctedSn * chunk.sn==snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Expected result: Delete snapshot without modifying correctedSn, return + * success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // initialize @@ -3464,16 +2816,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { FakeEncodeChunk(chunk1MetaPage, 0, 2); EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metapagesize_)) .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 2); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3481,11 +2832,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3493,21 +2842,19 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn == chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn(chunk1MetaPage, - chunk1MetaPage + metapagesize_), - Return(metapagesize_))); + SetArrayArgument<1>(chunk1MetaPage, chunk1MetaPage + metapagesize_), + Return(metapagesize_))); // fake read chunk1's snapshot1 metapage,chunk.sn==snap.sn FakeEncodeSnapshot(chunk1SnapMetaPage, 3); EXPECT_CALL(*lfs_, Read(2, NotNull(), 0, metapagesize_)) - .WillRepeatedly(DoAll( - SetArrayArgument<1>(chunk1SnapMetaPage, - chunk1SnapMetaPage + metapagesize_), - Return(metapagesize_))); + .WillRepeatedly( + DoAll(SetArrayArgument<1>(chunk1SnapMetaPage, + chunk1SnapMetaPage + metapagesize_), + Return(metapagesize_))); EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 1; @@ -3533,11 +2879,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { // fileSn == correctedSn SequenceNum fileSn = 2; // snapshot will not be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(0); + EXPECT_CALL(*lfs_, Close(2)).Times(0); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .Times(0); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3545,18 +2889,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:修改correctedSn时失败 - * 预期结果:返回失败,correctedSn的值未改变 + * Case: Failed to modify correctedSn + * Expected result: Failed to return, the value of correctedSn has not changed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { // initialize @@ -3582,18 +2923,15 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest1) { EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * DeleteSnapshotChunkOrCorrectSnErrorTest - * case:回收snapshot的chunk的时候失败 - * 预期结果:返回失败 + * Case: Failed to recycle snapshot chunks + * Expected result: return failed */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // initialize @@ -3605,11 +2943,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // fileSn > correctedSn SequenceNum fileSn = 3; // snapshot will be closed - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); // expect to call FilePool RecycleFile - EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) - .WillOnce(Return(-1)); + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)).WillOnce(Return(-1)); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, metapagesize_)) @@ -3617,26 +2953,26 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * CreateCloneChunkTest - * case1:指定的chunk不存在,输入错误的参数 - * 预期结果1:返回InvalidArgError - * case2:指定的chunk不存在,指定chunksize与配置一致 - * 预期结果2:创建成功 - * case3:指定的chunk存在,参数与原chunk一致 - * 预期结果3:返回成功 - * case4:指定的chunk存在,参数与原chunk不一致 - * 预期结果4:返回ChunkConflictError,不改变原chunk信息 - * case5:指定的chunk存在,指定chunksize与配置不一致 - * 预期结果5: 返回InvalidArgError,不改变原chunk信息 - * case6:指定的chunk存在,chunk不是clone chunk,参数与chunk信息一致 - * 预期结果:返回ChunkConflictError,不改变原chunk信息 + * Case1: The specified chunk does not exist, incorrect parameter input + * Expected result 1: InvalidArgError returned + * Case2: The specified chunk does not exist, the specified chunksize is + * consistent with the configuration Expected result 2: Creation successful + * Case3: The specified chunk exists, and the parameters are consistent with the + * original chunk Expected result 3: Success returned Case4: The specified chunk + * exists, and the parameters are inconsistent with the original chunk Expected + * result 4: ChunkConflictError returned without changing the original chunk + * information Case5: The specified chunk exists, but the specified chunk size + * is inconsistent with the configuration Expected result 5: InvalidArgError + * returned without changing the original chunk information Case6: The specified + * chunk exists, but the chunk is not a clone chunk. The parameters are + * consistent with the chunk information Expected result: ChunkConflictError + * returned without changing the original chunk information */ TEST_P(CSDataStore_test, CreateCloneChunkTest) { // initialize @@ -3652,58 +2988,44 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { shared_ptr bitmap = make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); - // case1:输入错误的参数 + // Case1: Input incorrect parameters { // size != chunksize EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - blocksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, blocksize_, location)); // sn == 0 EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - 0, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, 0, correctedSn, chunksize_, location)); // location is empty - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - "")); + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "")); } - // case2:指定的chunk不存在,指定chunksize与配置一致 + // Case2: The specified chunk does not exist, the specified chunksize is + // consistent with the configuration { // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3714,15 +3036,13 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case3:指定的chunk存在,参数与原chunk一致 + // Case3: The specified chunk exists, and the parameters are consistent with + // the original chunk { EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3733,31 +3053,23 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case4:指定的chunk存在,参数与原chunk不一致 - // 返回ChunkConflictError,但是不会改变原chunk信息 + // Case4: The specified chunk exists, and the parameters are inconsistent + // with the original chunk Returns ChunkConflictError, but does not change + // the original chunk information { - // 版本不一致 + // Version inconsistency EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn + 1, - correctedSn, - chunksize_, - location)); - // correctedSn不一致 + dataStore->CreateCloneChunk(id, sn + 1, correctedSn, + chunksize_, location)); + // Inconsistent correctedSn EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn + 1, - chunksize_, - location)); - // location不一致 + dataStore->CreateCloneChunk(id, sn, correctedSn + 1, + chunksize_, location)); + // Inconsistent location EXPECT_EQ(CSErrorCode::ChunkConflictError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "temp")); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3768,16 +3080,15 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case5:指定的chunk存在,指定chunksize与配置不一致 - // 返回InvalidArgError,但是不会改变原chunk信息 + // Case5: The specified chunk exists, but the specified chunksize is + // inconsistent with the configuration Returns InvalidArgError, but does not + // change the original chunk information { - EXPECT_EQ(CSErrorCode::InvalidArgError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_ + metapagesize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InvalidArgError, + dataStore->CreateCloneChunk(id, sn, correctedSn, + chunksize_ + metapagesize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3788,39 +3099,33 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case6:已存在chunk,chunk不是clone chunk + // Case6: Chunk already exists, chunk is not a clone chunk { - // location 为空 + // location is empty EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - "")); + chunksize_, "")); - // location 不为空 + // location is not empty EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(1, // id 2, // sn 0, // correctedSn - chunksize_, - location)); + chunksize_, location)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); } /** * CreateCloneChunkErrorTest - * case:chunk不存在,调chunkFile->Open的时候失败 - * 预期结果:创建clone chunk失败 + * Case: chunk does not exist, failed when calling chunkFile->Open + * Expected result: Failed to create clone chunk */ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { // initialize @@ -3832,47 +3137,40 @@ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { SequenceNum correctedSn = 2; CSChunkInfo info; // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunk file pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); - EXPECT_EQ(CSErrorCode::InternalError, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, - location)); - // 检查生成的clone chunk信息 + EXPECT_EQ( + CSErrorCode::InternalError, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /** * PasteChunkTedt - * case1:chunk 不存在 - * 预期结果1:返回ChunkNotExistError - * case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 - * 预期结果2:返回InvalidArgError - * case3:chunk存在,但不是clone chunk - * 预期结果3:返回成功 - * case4:chunk存在,且是clone chunk,写入区域之前未写过 - * 预期结果4:写入数据并更新bitmap - * case5:chunk存在,且是clone chunk,写入区域之前已写过 - * 预期结果5:无数据写入,且不会更新bitmap - * case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果6:只写入未写过数据,并更新bitmap - * case7:遍写整个chunk - * 预期结果7:数据写入未写过区域,然后clone chunk会被转为普通chunk + * Case1: Chunk does not exist + * Expected result 1: ChunkNotExistError returned + * Case2: chunk exists, requested offset exceeds chunk file size or offset + * length is not aligned Expected result 2: InvalidArgError returned Case3: + * chunk exists, but not clone chunk Expected result 3: Success returned Case4: + * chunk exists and is a clone chunk, which has not been written before writing + * to the region Expected result 4: Write data and update bitmap Case5: chunk + * exists and is a clone chunk, which has been written before writing to the + * region Expected result 5: No data written and Bitmap will not be updated + * Case6: chunk exists and is a clone chunk. Some areas have been written, while + * others have not Expected result 6: Only write unwritten data and update + * bitmap Case7: Overwrite the entire chunk Expected result 7: Data is written + * to an unwritten area, and then the clone chunk will be converted to a regular + * chunk */ TEST_P(CSDataStore_test, PasteChunkTest1) { // initialize @@ -3887,7 +3185,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -3895,90 +3193,68 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:chunk 不存在 + // Case1: chunk does not exist { id = 4; // not exist ASSERT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 + // Case2: chunk exists, requested offset exceeds chunk file size or offset + // length is not aligned { id = 3; // not exist offset = chunksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_ - 1; length = blocksize_; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); offset = blocksize_; length = blocksize_ + 1; ASSERT_EQ(CSErrorCode::InvalidArgError, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case3:chunk存在,但不是clone chunk + // Case3: chunk exists, but not clone chunk { EXPECT_CALL(*lfs_, Write(_, Matcher(NotNull()), _, _)) .Times(0); - // 快照不存在 + // The snapshot does not exist id = 2; offset = 0; length = blocksize_; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); - // 快照存在 + // Snapshot exists id = 1; offset = 0; ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); + dataStore->PasteChunk(id, buf, offset, length)); } - // case4:chunk存在,且是clone chunk,写入区域之前未写过 + // Case4: chunk exists and is a clone chunk, which has not been written + // before writing to the region { id = 3; // not exist offset = blocksize_; @@ -3991,11 +3267,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -4003,7 +3276,8 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case5:chunk存在,且是clone chunk,写入区域之前已写过 + // Case5: chunk exists and is a clone chunk, which has been written before + // writing to the region { id = 3; // not exist offset = blocksize_; @@ -4015,23 +3289,22 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(1)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case6: chunk exists and is a clone chunk. Some areas have been written, + // while others have not { id = 3; // not exist offset = 0; length = 4 * blocksize_; - // [2 * blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage + // [2 * blocksize_, 4 * blocksize_) area has been written, [0, + // blocksize_) is a metapage EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_, blocksize_)) .Times(1); @@ -4043,21 +3316,21 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { .Times(1); ASSERT_EQ(CSErrorCode::Success, dataStore->PasteChunk(id, buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(4, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case7:遍写整个chunk + // Case7: Overwrite the entire chunk { id = 3; // not exist offset = 0; length = chunksize_; - // [blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage - EXPECT_CALL(*lfs_, Write(4, - Matcher(NotNull()), + // [blocksize_, 4 * blocksize_) area has been written, [0, blocksize_) + // is a metapage + EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_ + 4 * blocksize_, chunksize_ - 4 * blocksize_)) .Times(1); @@ -4065,33 +3338,26 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(1); ASSERT_EQ(CSErrorCode::Success, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // paste后,chunk的状态不变 + dataStore->PasteChunk(id, buf, offset, length)); + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* * PasteChunkErrorTest - * case1:写数据时失败 - * 预期结果1:返回InternalError,chunk状态不变 - * case2:更新metapage时失败 - * 预期结果2:返回InternalError,chunk状态不变 + * Case1: Failed to write data + * Expected result 1: InternalError returned, chunk status remains unchanged + * Case2: Failed to update metapage + * Expected result 2: InternalError returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { // initialize @@ -4106,7 +3372,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -4114,29 +3380,23 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); // create new chunk and open it - string chunk3Path = string(baseDir) + "/" + - FileNameOperator::GenerateChunkFileName(id); + string chunk3Path = + string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); // expect call chunkfile pool GetFile - EXPECT_CALL(*lfs_, FileExists(chunk3Path)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs_, FileExists(chunk3Path)).WillOnce(Return(false)); EXPECT_CALL(*fpool_, GetFileImpl(chunk3Path, NotNull())) .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Open(chunk3Path, _)) - .Times(1) - .WillOnce(Return(4)); + EXPECT_CALL(*lfs_, Open(chunk3Path, _)).Times(1).WillOnce(Return(4)); // will read metapage EXPECT_CALL(*lfs_, Read(4, NotNull(), 0, metapagesize_)) .WillOnce(DoAll(SetArrayArgument<1>(chunk3MetaPage, - chunk3MetaPage + metapagesize_), + chunk3MetaPage + metapagesize_), Return(metapagesize_))); EXPECT_EQ(CSErrorCode::Success, - dataStore->CreateCloneChunk(id, - sn, - correctedSn, - chunksize_, + dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -4149,16 +3409,13 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .Times(0); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -4171,29 +3428,22 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { Write(4, Matcher(NotNull()), 0, metapagesize_)) .WillOnce(Return(-UT_ERRNO)); ASSERT_EQ(CSErrorCode::InternalError, - dataStore->PasteChunk(id, - buf, - offset, - length)); - // 检查paste后chunk的状态 + dataStore->PasteChunk(id, buf, offset, length)); + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); - EXPECT_CALL(*lfs_, Close(4)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); + EXPECT_CALL(*lfs_, Close(4)).Times(1); delete[] buf; } /* - * chunk不存在 + * Chunk does not exist */ TEST_P(CSDataStore_test, GetHashErrorTest1) { // initialize @@ -4205,21 +3455,15 @@ TEST_P(CSDataStore_test, GetHashErrorTest1) { // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); + dataStore->GetChunkHash(id, 0, 4096, &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * read报错 + * Read error */ TEST_P(CSDataStore_test, GetHashErrorTest2) { // initialize @@ -4231,23 +3475,16 @@ TEST_P(CSDataStore_test, GetHashErrorTest2) { off_t offset = 0; size_t length = metapagesize_ + chunksize_; // test read chunk failed - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)) - .WillOnce(Return(-UT_ERRNO)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)).WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, - dataStore->GetChunkHash(id, - 0, - 4096, - &hash)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + dataStore->GetChunkHash(id, 0, 4096, &hash)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } /* - * 获取datastore状态测试 + * Obtain Datastore Status Test */ TEST_P(CSDataStore_test, GetStatusTest) { // initialize @@ -4259,17 +3496,13 @@ TEST_P(CSDataStore_test, GetStatusTest) { ASSERT_EQ(2, status.chunkFileCount); // ASSERT_EQ(1, status.snapshotCount); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - EXPECT_CALL(*lfs_, Close(3)) - .Times(1); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + EXPECT_CALL(*lfs_, Close(3)).Times(1); } INSTANTIATE_TEST_CASE_P( - CSDataStoreTest, - CSDataStore_test, + CSDataStoreTest, CSDataStore_test, ::testing::Values( // chunk size block size, metapagesize std::make_tuple(16U * 1024 * 1024, 4096U, 4096U), diff --git a/test/chunkserver/datastore/file_helper_unittest.cpp b/test/chunkserver/datastore/file_helper_unittest.cpp index 0f7ca39b95..359d7303d7 100644 --- a/test/chunkserver/datastore/file_helper_unittest.cpp +++ b/test/chunkserver/datastore/file_helper_unittest.cpp @@ -20,10 +20,11 @@ * Author: yangyaokai */ -#include #include -#include +#include + #include +#include #include "src/chunkserver/datastore/datastore_file_helper.h" #include "test/fs/mock_local_filesystem.h" @@ -32,17 +33,17 @@ using curve::fs::LocalFileSystem; using curve::fs::MockLocalFileSystem; using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; namespace curve { namespace chunkserver { @@ -54,6 +55,7 @@ class FileHelper_MockTest : public testing::Test { fileHelper_ = std::make_shared(fs_); } void TearDown() {} + protected: std::shared_ptr fs_; std::shared_ptr fileHelper_; @@ -64,29 +66,26 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { vector chunkFiles; vector snapFiles; - // case1:List失败,返回-1 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-1)); + // Case1: List failed, returned -1 + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); - // 如果返回ENOENT错误,直接返回成功 - EXPECT_CALL(*fs_, List(_, _)) - .WillOnce(Return(-ENOENT)); + // If an ENOENT error is returned, success is returned directly + EXPECT_CALL(*fs_, List(_, _)).WillOnce(Return(-ENOENT)); ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); vector files; string chunk1 = "chunk_1"; string chunk2 = "chunk_2"; string snap1 = "chunk_1_snap_1"; - string other = "chunk_1_S"; // 非法文件名 + string other = "chunk_1_S"; // Illegal file name files.emplace_back(chunk1); files.emplace_back(chunk2); files.emplace_back(snap1); files.emplace_back(other); EXPECT_CALL(*fs_, List(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); - // case2:List成功,返回chunk文件和snapshot文件 + // Case2: List successful, returning chunk file and snapshot file ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); ASSERT_EQ(2, chunkFiles.size()); ASSERT_STREQ(chunk1.c_str(), chunkFiles[0].c_str()); @@ -94,7 +93,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { ASSERT_EQ(1, snapFiles.size()); ASSERT_STREQ(snap1.c_str(), snapFiles[0].c_str()); - // case3:允许vector为空指针 + // Case3: Allow vector to be a null pointer ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, nullptr, nullptr)); } diff --git a/test/chunkserver/datastore/filepool_mock_unittest.cpp b/test/chunkserver/datastore/filepool_mock_unittest.cpp index 5a70d46551..182b1e90ac 100644 --- a/test/chunkserver/datastore/filepool_mock_unittest.cpp +++ b/test/chunkserver/datastore/filepool_mock_unittest.cpp @@ -20,943 +20,844 @@ * Author: yangyaokai */ +#include #include #include #include -#include + #include #include #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/file_pool.h" #include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; -using ::testing::Return; -using ::testing::NotNull; +using ::testing::Invoke; using ::testing::Matcher; using ::testing::Mock; -using ::testing::Truly; -using ::testing::DoAll; +using ::testing::NotNull; +using ::testing::Return; using ::testing::ReturnArg; -using ::testing::Invoke; -using ::testing::ElementsAre; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; +using ::testing::Truly; -using curve::fs::MockLocalFileSystem; using curve::common::kFilePoolMagic; +using curve::fs::MockLocalFileSystem; + +namespace curve +{ + namespace chunkserver + { + + const ChunkSizeType CHUNK_SIZE = 16 * 1024 * 1024; + const PageSizeType PAGE_SIZE = 4096; + const uint32_t metaFileSize = 4096; + const uint32_t blockSize = 4096; + const uint32_t fileSize = CHUNK_SIZE + PAGE_SIZE; + const std::string poolDir = "./chunkfilepool_dat"; // NOLINT + const std::string poolMetaPath = "./chunkfilepool_dat.meta"; // NOLINT + const std::string filePath1 = poolDir + "/1"; // NOLINT + const std::string targetPath = "./data/chunk_1"; // NOLINT + const char *kChunkSize = "chunkSize"; + const char *kMetaPageSize = "metaPageSize"; + const char *kChunkFilePoolPath = "chunkfilepool_path"; + const char *kCRC = "crc"; + const char *kBlockSize = "blockSize"; + + class CSChunkfilePoolMockTest : public testing::Test + { + public: + void SetUp() { lfs_ = std::make_shared(); } + + void TearDown() {} + + static Json::Value GenerateMetaJson(bool hasBlockSize = false) + { + // JSON format for normal meta files + FilePoolMeta meta; + meta.chunkSize = CHUNK_SIZE; + meta.metaPageSize = PAGE_SIZE; + meta.hasBlockSize = hasBlockSize; + if (hasBlockSize) + { + meta.blockSize = blockSize; + } + meta.filePoolPath = poolDir; + + Json::Value jsonContent; + jsonContent[kChunkSize] = CHUNK_SIZE; + jsonContent[kMetaPageSize] = PAGE_SIZE; + + if (hasBlockSize) + { + jsonContent[kBlockSize] = blockSize; + } + + jsonContent[kChunkFilePoolPath] = poolDir; + jsonContent[kCRC] = meta.Crc32(); + return jsonContent; + } + + void FakeMetaFile() + { + EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) + .WillOnce(Return(100)); + EXPECT_CALL(*lfs_, Read(100, NotNull(), 0, metaFileSize)) + .WillOnce(Invoke( + [this](int /*fd*/, char *buf, uint64_t offset, int length) + { + EXPECT_EQ(offset, 0); + EXPECT_EQ(length, metaFileSize); + + Json::Value root = GenerateMetaJson(); + auto json = root.toStyledString(); + strncpy(buf, json.c_str(), json.size() + 1); + return metaFileSize; + })); + + EXPECT_CALL(*lfs_, Close(100)).Times(1); + } + + void FakePool(FilePool *pool, const FilePoolOptions &options, + uint32_t fileNum) + { + if (options.getFileFromPool) + { + FakeMetaFile(); + std::vector fileNames; + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + for (int i = 1; i <= fileNum; ++i) + { + std::string name = std::to_string(i); + std::string filePath = poolDir + "/" + name; + fileNames.push_back(name); + EXPECT_CALL(*lfs_, FileExists(filePath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath, _)).WillOnce(Return(i)); + EXPECT_CALL(*lfs_, Fstat(i, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(i)).Times(1); + } + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + + ASSERT_EQ(true, pool->Initialize(options)); + ASSERT_EQ(fileNum, pool->Size()); + } + else + { + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + ASSERT_EQ(true, pool->Initialize(options)); + } + } + + protected: + std::shared_ptr lfs_; + }; + + // Exception testing for PersistEnCodeMetaInfo interface + TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) + { + FilePoolMeta meta; + meta.chunkSize = CHUNK_SIZE; + meta.metaPageSize = PAGE_SIZE; + meta.hasBlockSize = false; + meta.filePoolPath = poolDir; + + // open failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, + poolMetaPath)); + } + // open successful, write failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) + .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, + poolMetaPath)); + } + // open successful, write successful + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) + .WillOnce(Return(4096)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ( + 0, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); + } + } + + // Exception testing for DecodeMetaInfoFromMetaFile interface + TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) + { + FilePoolMeta meta; + + // open failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Read(_, _, _, _)).Times(0); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // read failed + { + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // read successful, parsing Json format failed + { + char buf[metaFileSize] = {0}; + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, chunksize is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kChunkSize); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, metapagesize is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kMetaPageSize); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // parsing Json format succeeded, kFilePoolPath is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kChunkFilePoolPath); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Successfully parsed Json format, kCRC is empty + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root.removeMember(kCRC); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Successfully parsed Json format, crc mismatch + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + root[kCRC] = 0; + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } + // Normal process + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } -namespace curve { -namespace chunkserver { - -const ChunkSizeType CHUNK_SIZE = 16 * 1024 * 1024; -const PageSizeType PAGE_SIZE = 4096; -const uint32_t metaFileSize = 4096; -const uint32_t blockSize = 4096; -const uint32_t fileSize = CHUNK_SIZE + PAGE_SIZE; -const std::string poolDir = "./chunkfilepool_dat"; // NOLINT -const std::string poolMetaPath = "./chunkfilepool_dat.meta"; // NOLINT -const std::string filePath1 = poolDir + "/1"; // NOLINT -const std::string targetPath = "./data/chunk_1"; // NOLINT -const char* kChunkSize = "chunkSize"; -const char* kMetaPageSize = "metaPageSize"; -const char* kChunkFilePoolPath = "chunkfilepool_path"; -const char* kCRC = "crc"; -const char* kBlockSize = "blockSize"; - -class CSChunkfilePoolMockTest : public testing::Test { - public: - void SetUp() { - lfs_ = std::make_shared(); - } - - void TearDown() {} - - static Json::Value GenerateMetaJson(bool hasBlockSize = false) { - // 正常的meta文件的json格式 - FilePoolMeta meta; - meta.chunkSize = CHUNK_SIZE; - meta.metaPageSize = PAGE_SIZE; - meta.hasBlockSize = hasBlockSize; - if (hasBlockSize) { - meta.blockSize = blockSize; + // Normal process + { + char buf[metaFileSize] = {0}; + Json::Value root = GenerateMetaJson(true); + memcpy(buf, root.toStyledString().c_str(), + root.toStyledString().size()); + + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( + lfs_, poolMetaPath, metaFileSize, &meta)); + } } - meta.filePoolPath = poolDir; - Json::Value jsonContent; - jsonContent[kChunkSize] = CHUNK_SIZE; - jsonContent[kMetaPageSize] = PAGE_SIZE; + TEST_F(CSChunkfilePoolMockTest, InitializeTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + options.retryTimes = 3; + + /****************getFileFromPool is true**************/ + // Failed while checking valid + { + // DecodeMetaInfoFromMetaFile has been tested separately on it + // Here, select a set of uncommon examples from the above to test + // parsing JSON format failed + FilePool pool(lfs_); + char buf[metaFileSize] = {0}; + EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) + .WillOnce(Return(1)); + EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) + .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), + Return(metaFileSize))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // getFileFromPool is true, checkvalid succeeded, current directory does not + // exist + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)) + .WillOnce(Return(false)); + ASSERT_EQ(true, pool.Initialize(options)); + pool.WaitoFormatDoneForTesting(); + } + // The current directory exists, list directory failed + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, List(_, _)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory successful, file name contains non numeric characters + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("aaa"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory succeeded, it contains objects of non ordinary file types + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(false)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // list directory successful, open file failed + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // Failed to retrieve stat file information + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + EXPECT_CALL(*lfs_, Fstat(2, NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // stat file information successful, file size mismatch + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE; + EXPECT_CALL(*lfs_, Fstat(2, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(false, pool.Initialize(options)); + } + // File information matching + { + FilePool pool(lfs_); + FakeMetaFile(); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + std::vector fileNames; + fileNames.push_back("1"); + EXPECT_CALL(*lfs_, List(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); + EXPECT_CALL(*lfs_, FileExists(filePath1)).WillOnce(Return(true)); + EXPECT_CALL(*lfs_, Open(filePath1, _)).WillOnce(Return(2)); + + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + EXPECT_CALL(*lfs_, Fstat(2, NotNull())) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(2)).Times(1); + ASSERT_EQ(true, pool.Initialize(options)); + ASSERT_EQ(1, pool.Size()); + } - if (hasBlockSize) { - jsonContent[kBlockSize] = blockSize; + /****************getFileFromPool is false**************/ + options.getFileFromPool = false; + // The current directory does not exist, creating directory failed + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(-1)); + ASSERT_EQ(false, pool.Initialize(options)); + } + // The current directory does not exist, creating the directory succeeded + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*lfs_, Mkdir(_)).WillOnce(Return(0)); + ASSERT_EQ(true, pool.Initialize(options)); + } + // The current directory exists + { + FilePool pool(lfs_); + EXPECT_CALL(*lfs_, DirExists(_)).WillOnce(Return(true)); + ASSERT_EQ(true, pool.Initialize(options)); + } } - jsonContent[kChunkFilePoolPath] = poolDir; - jsonContent[kCRC] = meta.Crc32(); - return jsonContent; - } - - void FakeMetaFile() { - EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(100)); - EXPECT_CALL(*lfs_, Read(100, NotNull(), 0, metaFileSize)) - .WillOnce(Invoke( - [this](int /*fd*/, char* buf, uint64_t offset, int length) { - EXPECT_EQ(offset, 0); - EXPECT_EQ(length, metaFileSize); - - Json::Value root = GenerateMetaJson(); - auto json = root.toStyledString(); - strncpy(buf, json.c_str(), json.size() + 1); - return metaFileSize; - })); - - EXPECT_CALL(*lfs_, Close(100)) - .Times(1); - } - - void FakePool(FilePool* pool, - const FilePoolOptions& options, - uint32_t fileNum) { - if (options.getFileFromPool) { - FakeMetaFile(); - std::vector fileNames; - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - for (int i = 1; i <= fileNum; ++i) { - std::string name = std::to_string(i); - std::string filePath = poolDir + "/" + name; - fileNames.push_back(name); - EXPECT_CALL(*lfs_, FileExists(filePath)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath, _)) - .WillOnce(Return(i)); - EXPECT_CALL(*lfs_, Fstat(i, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(i)) - .Times(1); - } - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - - ASSERT_EQ(true, pool->Initialize(options)); - ASSERT_EQ(fileNum, pool->Size()); - } else { - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - ASSERT_EQ(true, pool->Initialize(options)); + TEST_F(CSChunkfilePoolMockTest, GetFileTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + int retryTimes = 3; + options.retryTimes = retryTimes; + + char metapage[PAGE_SIZE] = {0}; + + /****************getFileFromPool is true**************/ + // There is no remaining chunk situation + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Chunk present, open failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(_)).Times(0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, write failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk present, fsync failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, closing failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, EEXIST error returned when renaming + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .WillOnce(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-EEXIST)); + ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(9, pool.Size()); + } + // Chunk exists, non EEXIST error returned when renaming + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .Times(retryTimes) + .WillRepeatedly(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(10 - retryTimes, pool.Size()); + } + // Chunk exists, rename successful + { + FilePool pool(lfs_); + FakePool(&pool, options, 10); + EXPECT_CALL(*lfs_, Open(_, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) + .WillOnce(Return(PAGE_SIZE)); + EXPECT_CALL(*lfs_, Fsync(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Close(1)).WillOnce(Return(0)); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); + ASSERT_EQ(9, pool.Size()); + } + + options.getFileFromPool = false; + /****************getFileFromPool is false**************/ + // Failed on open + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(0); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed while failing + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed while writing + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Fsync failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(fileSize)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(retryTimes); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } + // Failed to close + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Open(_, _)) + .Times(retryTimes) + .WillRepeatedly(Return(1)); + EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, + Write(1, Matcher(NotNull()), 0, fileSize)) + .Times(retryTimes) + .WillRepeatedly(Return(fileSize)); + EXPECT_CALL(*lfs_, Fsync(1)) + .Times(retryTimes) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*lfs_, Close(1)) + .Times(retryTimes) + .WillRepeatedly(Return(-1)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); + } } - } - protected: - std::shared_ptr lfs_; -}; + TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) + { + // Initialize options + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; + options.metaPageSize = PAGE_SIZE; + memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); + options.metaFileSize = metaFileSize; + int retryTimes = 3; + options.retryTimes = retryTimes; + + /****************getFileFromPool is false**************/ + options.getFileFromPool = false; + // Failed to delete file + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(-1)); + ASSERT_EQ(-1, pool.RecycleFile(filePath1)); + } + // Successfully deleted file + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + EXPECT_CALL(*lfs_, Delete(filePath1)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.RecycleFile(filePath1)); + } -// PersistEnCodeMetaInfo接口的异常测试 -TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { - FilePoolMeta meta; - meta.chunkSize = CHUNK_SIZE; - meta.metaPageSize = PAGE_SIZE; - meta.hasBlockSize = false; - meta.filePoolPath = poolDir; + /****************getFileFromPool is true**************/ + options.getFileFromPool = true; + // open failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(-1)); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } - // open失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Write(_, Matcher(_), _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, - poolMetaPath)); - } - // open成功,write失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, - poolMetaPath)); - } - // open成功,write成功 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, 4096)) - .WillOnce(Return(4096)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ( - 0, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); - } -} - -// DecodeMetaInfoFromMetaFile接口的异常测试 -TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { - FilePoolMeta meta; - - // open失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Read(_, _, _, _)) - .Times(0); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // read失败 - { - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // read成功,解析Json格式失败 - { - char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,chunksize为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kChunkSize); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,metapagesize为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kMetaPageSize); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,kFilePoolPath为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kChunkFilePoolPath); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,kCRC为空 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root.removeMember(kCRC); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 解析Json格式成功,crc不匹配 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - root[kCRC] = 0; - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - // 正常流程 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } - - // 正常流程 - { - char buf[metaFileSize] = {0}; - Json::Value root = GenerateMetaJson(true); - memcpy(buf, root.toStyledString().c_str(), - root.toStyledString().size()); - - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(0, FilePoolHelper::DecodeMetaInfoFromMetaFile( - lfs_, poolMetaPath, metaFileSize, &meta)); - } -} - -TEST_F(CSChunkfilePoolMockTest, InitializeTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - options.retryTimes = 3; - - /****************getFileFromPool为true**************/ - // checkvalid时失败 - { - // DecodeMetaInfoFromMetaFile在上面已经单独测试过了 - // 这里选上面中的一组异常用例来检验即可 - // 解析json格式失败 - FilePool pool(lfs_); - char buf[metaFileSize] = {0}; - EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, FileExists(poolMetaPath)).WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, metaFileSize)) - .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + metaFileSize), - Return(metaFileSize))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // getFileFromPool为true,checkvalid成功,当前目录不存在 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - ASSERT_EQ(true, pool.Initialize(options)); - pool.WaitoFormatDoneForTesting(); - } - // 当前目录存在,list目录失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,文件名中包含非数字字符 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("aaa"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,目录中包含非普通文件类型的对象 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(false)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // list目录成功,open文件时失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // stat文件信息时失败 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // stat文件信息成功,文件大小不匹配 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE; - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(false, pool.Initialize(options)); - } - // 文件信息匹配 - { - FilePool pool(lfs_); - FakeMetaFile(); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - std::vector fileNames; - fileNames.push_back("1"); - EXPECT_CALL(*lfs_, List(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileNames), - Return(0))); - EXPECT_CALL(*lfs_, FileExists(filePath1)) - .WillOnce(Return(true)); - EXPECT_CALL(*lfs_, Open(filePath1, _)) - .WillOnce(Return(2)); - - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - EXPECT_CALL(*lfs_, Fstat(2, NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(2)) - .Times(1); - ASSERT_EQ(true, pool.Initialize(options)); - ASSERT_EQ(1, pool.Size()); - } - - /****************getFileFromPool为false**************/ - options.getFileFromPool = false; - // 当前目录不存在,创建目录失败 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(-1)); - ASSERT_EQ(false, pool.Initialize(options)); - } - // 当前目录不存在,创建目录成功 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*lfs_, Mkdir(_)) - .WillOnce(Return(0)); - ASSERT_EQ(true, pool.Initialize(options)); - } - // 当前目录存在 - { - FilePool pool(lfs_); - EXPECT_CALL(*lfs_, DirExists(_)) - .WillOnce(Return(true)); - ASSERT_EQ(true, pool.Initialize(options)); - } -} - -TEST_F(CSChunkfilePoolMockTest, GetFileTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - int retryTimes = 3; - options.retryTimes = retryTimes; - - char metapage[PAGE_SIZE] = {0}; - - /****************getFileFromPool为true**************/ - // 没有剩余chunk的情况 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // 存在chunk,open时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(_)) - .Times(0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,write时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,fsync时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,close时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,rename时返回EEXIST错误 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-EEXIST)); - ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(9, pool.Size()); - } - // 存在chunk,rename时返回非EEXIST错误 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .Times(retryTimes) - .WillRepeatedly(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(10 - retryTimes, pool.Size()); - } - // 存在chunk,rename成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 10); - EXPECT_CALL(*lfs_, Open(_, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Write(1, metapage, 0, PAGE_SIZE)) - .WillOnce(Return(PAGE_SIZE)); - EXPECT_CALL(*lfs_, Fsync(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .WillOnce(Return(0)); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); - ASSERT_EQ(9, pool.Size()); - } - - options.getFileFromPool = false; - /****************getFileFromPool为false**************/ - // open 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(0); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // fallocate 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // write 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // fsync 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(fileSize)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } - // close 时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Open(_, _)) - .Times(retryTimes) - .WillRepeatedly(Return(1)); - EXPECT_CALL(*lfs_, Fallocate(1, 0, 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, - Write(1, Matcher(NotNull()), 0, fileSize)) - .Times(retryTimes) - .WillRepeatedly(Return(fileSize)); - EXPECT_CALL(*lfs_, Fsync(1)) - .Times(retryTimes) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(retryTimes) - .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); - } -} - -TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { - // 初始化options - FilePoolOptions options; - options.getFileFromPool = true; - memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); - options.fileSize = CHUNK_SIZE; - options.metaPageSize = PAGE_SIZE; - memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.metaFileSize = metaFileSize; - int retryTimes = 3; - options.retryTimes = retryTimes; - - /****************getFileFromPool为false**************/ - options.getFileFromPool = false; - // delete文件时失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleFile(filePath1)); - } - // delete文件成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - EXPECT_CALL(*lfs_, Delete(filePath1)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleFile(filePath1)); - } - - /****************getFileFromPool为true**************/ - options.getFileFromPool = true; - // open失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(-1)); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(Return(-1)); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat成功,大小不匹配 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(0)); - // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - // 失败直接Delete - EXPECT_CALL(*lfs_, Delete(targetPath)) - .WillOnce(Return(-1)); - // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - } - - // Fstat信息匹配,rename失败 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleFile(targetPath)); - ASSERT_EQ(0, pool.Size()); - } - - // Fstat信息匹配,rename成功 - { - FilePool pool(lfs_); - FakePool(&pool, options, 0); - struct stat fileInfo; - fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; - - EXPECT_CALL(*lfs_, Open(targetPath, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*lfs_, Fstat(1, _)) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); - EXPECT_CALL(*lfs_, Close(1)) - .Times(1); - EXPECT_CALL(*lfs_, Rename(_, _, _)) - .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleFile(targetPath)); - ASSERT_EQ(1, pool.Size()); - } -} - -} // namespace chunkserver -} // namespace curve + // Fstat failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)).WillOnce(Return(-1)); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } + + // Fstat successful, size mismatch + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(0)); + // If Delete is successful, return 0 + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + // Failed to delete directly + EXPECT_CALL(*lfs_, Delete(targetPath)).WillOnce(Return(-1)); + // If Delete fails, an error code will be returned + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + } + + // Fstat information matching, rename failed + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(-1)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); + ASSERT_EQ(0, pool.Size()); + } + + // Fstat information matching, rename successful + { + FilePool pool(lfs_); + FakePool(&pool, options, 0); + struct stat fileInfo; + fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; + + EXPECT_CALL(*lfs_, Open(targetPath, _)).WillOnce(Return(1)); + EXPECT_CALL(*lfs_, Fstat(1, _)) + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); + EXPECT_CALL(*lfs_, Close(1)).Times(1); + EXPECT_CALL(*lfs_, Rename(_, _, _)).WillOnce(Return(0)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); + ASSERT_EQ(1, pool.Size()); + } + } + + } // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/datastore/filepool_unittest.cpp b/test/chunkserver/datastore/filepool_unittest.cpp index ea1592f62b..e7297f7224 100644 --- a/test/chunkserver/datastore/filepool_unittest.cpp +++ b/test/chunkserver/datastore/filepool_unittest.cpp @@ -51,9 +51,9 @@ using ::testing::StrEq; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::FilePoolState; -using curve::chunkserver::FilePoolMeta; using curve::common::kFilePoolMagic; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; @@ -183,8 +183,9 @@ TEST_P(CSFilePool_test, InitializeNomalTest) { // initialize ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cfop)); ASSERT_EQ(100, chunkFilePoolPtr_->Size()); - // 初始化阶段会扫描FilePool内的所有文件,在扫描结束之后需要关闭这些文件 - // 防止过多的文件描述符被占用 + // During the initialization phase, all files in the FilePool will be + // scanned, and after the scan is completed, these files need to be closed + // Prevent excessive file descriptors from being occupied ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "1")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "2")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "50.clean")); @@ -291,8 +292,7 @@ TEST_P(CSFilePool_test, GetFileTest) { ASSERT_EQ(-2, fsptr->Delete("test0")); // CASE 2: get dirty chunk - auto checkBytes = [this](const std::string& filename, - char byte, + auto checkBytes = [this](const std::string& filename, char byte, bool isCleaned = false) { ASSERT_TRUE(fsptr->FileExists(filename)); int fd = fsptr->Open(filename, O_RDWR); @@ -631,8 +631,7 @@ TEST_P(CSFilePool_test, CleanChunkTest) { } } -INSTANTIATE_TEST_CASE_P(CSFilePoolTest, - CSFilePool_test, +INSTANTIATE_TEST_CASE_P(CSFilePoolTest, CSFilePool_test, ::testing::Values(false, true)); TEST(CSFilePool, GetFileDirectlyTest) { @@ -641,8 +640,9 @@ TEST(CSFilePool, GetFileDirectlyTest) { fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); const std::string filePoolPath = FILEPOOL_DIR; // create chunkfile in chunkfile pool dir - // if chunkfile pool 的getFileFromPool开关关掉了,那么 - // FilePool的size是一直为0,不会从pool目录中找 + // if the getFileFromPool switch of the chunkfile pool is turned off, then + // The size of FilePool is always 0 and will not be found in the pool + // directory std::string filename = filePoolPath + "1000"; fsptr->Mkdir(filePoolPath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); @@ -666,7 +666,8 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cspopt)); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); - // 测试获取chunk,chunkfile pool size不变一直为0 + // Test to obtain chunk, chunkfile pool size remains unchanged and remains + // at 0 char metapage[4096]; memset(metapage, '1', 4096); @@ -683,12 +684,12 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_EQ(buf[i], '1'); } - // 测试回收chunk,文件被删除,FilePool Size不受影响 + // Test recycling chunk, file deleted, FilePool Size not affected chunkFilePoolPtr_->RecycleFile("./new1"); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists("./new1")); - // 删除测试文件及目录 + // Delete test files and directories ASSERT_EQ(0, fsptr->Close(fd)); ASSERT_EQ(0, fsptr->Delete(filePoolPath + "1000")); ASSERT_EQ(0, fsptr->Delete(filePoolPath)); diff --git a/test/chunkserver/fake_datastore.h b/test/chunkserver/fake_datastore.h index 75b5c80330..6d26815bc8 100644 --- a/test/chunkserver/fake_datastore.h +++ b/test/chunkserver/fake_datastore.h @@ -24,27 +24,25 @@ #define TEST_CHUNKSERVER_FAKE_DATASTORE_H_ #include -#include #include +#include #include -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "include/chunkserver/chunkserver_common.h" +#include "src/chunkserver/datastore/chunkserver_datastore.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; class FakeCSDataStore : public CSDataStore { public: FakeCSDataStore(DataStoreOptions options, - std::shared_ptr fs) : - CSDataStore(fs, - std::make_shared(fs), - options) { + std::shared_ptr fs) + : CSDataStore(fs, std::make_shared(fs), options) { chunk_ = new (std::nothrow) char[options.chunkSize]; ::memset(chunk_, 0, options.chunkSize); sn_ = 0; @@ -93,10 +91,7 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode ReadChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, + CSErrorCode ReadChunk(ChunkID id, SequenceNum sn, char* buf, off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -105,18 +100,15 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); if (HasInjectError()) { return CSErrorCode::InternalError; } return CSErrorCode::Success; } - CSErrorCode ReadSnapshotChunk(ChunkID id, - SequenceNum sn, - char *buf, - off_t offset, - size_t length) override { + CSErrorCode ReadSnapshotChunk(ChunkID id, SequenceNum sn, char* buf, + off_t offset, size_t length) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -124,32 +116,26 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(buf, chunk_+offset, length); + ::memcpy(buf, chunk_ + offset, length); return CSErrorCode::Success; } - CSErrorCode WriteChunk(ChunkID id, - SequenceNum sn, - const butil::IOBuf& buf, - off_t offset, - size_t length, - uint32_t *cost, - const std::string & csl = "") override { + CSErrorCode WriteChunk(ChunkID id, SequenceNum sn, const butil::IOBuf& buf, + off_t offset, size_t length, uint32_t* cost, + const std::string& csl = "") override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; } - ::memcpy(chunk_+offset, buf.to_string().c_str(), length); + ::memcpy(chunk_ + offset, buf.to_string().c_str(), length); *cost = length; chunkIds_.insert(id); sn_ = sn; return CSErrorCode::Success; } - CSErrorCode CreateCloneChunk(ChunkID id, - SequenceNum sn, - SequenceNum correctedSn, - ChunkSizeType size, + CSErrorCode CreateCloneChunk(ChunkID id, SequenceNum sn, + SequenceNum correctedSn, ChunkSizeType size, const string& location) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -160,9 +146,7 @@ class FakeCSDataStore : public CSDataStore { return CSErrorCode::Success; } - CSErrorCode PasteChunk(ChunkID id, - const char * buf, - off_t offset, + CSErrorCode PasteChunk(ChunkID id, const char* buf, off_t offset, size_t length) { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { @@ -171,12 +155,11 @@ class FakeCSDataStore : public CSDataStore { if (chunkIds_.find(id) == chunkIds_.end()) { return CSErrorCode::ChunkNotExistError; } - ::memcpy(chunk_+offset, buf, length); + ::memcpy(chunk_ + offset, buf, length); return CSErrorCode::Success; } - CSErrorCode GetChunkInfo(ChunkID id, - CSChunkInfo* info) override { + CSErrorCode GetChunkInfo(ChunkID id, CSChunkInfo* info) override { CSErrorCode errorCode = HasInjectError(); if (errorCode != CSErrorCode::Success) { return errorCode; @@ -190,10 +173,8 @@ class FakeCSDataStore : public CSDataStore { } } - CSErrorCode GetChunkHash(ChunkID id, - off_t offset, - size_t length, - std::string *hash) { + CSErrorCode GetChunkHash(ChunkID id, off_t offset, size_t length, + std::string* hash) { uint32_t crc32c = 0; if (chunkIds_.find(id) != chunkIds_.end()) { crc32c = curve::common::CRC32(chunk_ + offset, length); @@ -213,14 +194,14 @@ class FakeCSDataStore : public CSDataStore { if (errorCode == CSErrorCode::Success) { return error_; } else { - // 注入错误自动恢复 + // Automatic recovery of injection errors error_ = CSErrorCode::Success; return errorCode; } } private: - char *chunk_; + char* chunk_; std::set chunkIds_; bool snapDeleteFlag_; SequenceNum sn_; @@ -234,14 +215,14 @@ class FakeFilePool : public FilePool { : FilePool(lfs) {} ~FakeFilePool() {} - bool Initialize(const FilePoolOptions &cfop) { + bool Initialize(const FilePoolOptions& cfop) { LOG(INFO) << "FakeFilePool init success"; return true; } - int GetChunk(const std::string &chunkpath, char *metapage) { return 0; } - int RecycleChunk(const std::string &chunkpath) { return 0; } + int GetChunk(const std::string& chunkpath, char* metapage) { return 0; } + int RecycleChunk(const std::string& chunkpath) { return 0; } size_t Size() { return 4; } - void UnInitialize() { } + void UnInitialize() {} }; } // namespace chunkserver diff --git a/test/chunkserver/heartbeat_helper_test.cpp b/test/chunkserver/heartbeat_helper_test.cpp index 7b9f9a9c6b..57d88c6c45 100644 --- a/test/chunkserver/heartbeat_helper_test.cpp +++ b/test/chunkserver/heartbeat_helper_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/heartbeat_helper.h" + #include +#include #include -#include #include -#include "src/chunkserver/heartbeat_helper.h" +#include +#include + #include "src/chunkserver/chunkserver_service.h" #include "test/chunkserver/mock_copyset_node.h" #include "test/chunkserver/mock_copyset_node_manager.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Mock; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace chunkserver { @@ -46,12 +48,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_epoch(2); std::vector newPeers; - // 1. 目标节点格式错误 + // 1. Destination node format error { - // 目标节点为空 + // The target node is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 目标节点不为空但格式有误 + // The target node is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.4"); conf.set_allocated_configchangeitem(replica); @@ -63,12 +65,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_configchangeitem(replica); } - // 2. 待删除节点格式错误 + // 2. The format of the node to be deleted is incorrect { - // 待删除节点为空 + // The node to be deleted is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 待删除接节点不为空但格式有误 + // The node to be deleted is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.1"); conf.set_allocated_oldpeer(replica); @@ -80,13 +82,13 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_oldpeer(replica); } - // 3. 生成新配置成功 + // 3. Successfully generated new configuration { for (int i = 0; i < 3; i++) { - auto replica = conf.add_peers(); - replica->set_id(i + 1); - replica->set_address( - "192.0.0." + std::to_string(i + 1) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i + 1); + replica->set_address("192.0.0." + std::to_string(i + 1) + + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); ASSERT_EQ(3, newPeers.size()); @@ -110,19 +112,17 @@ TEST(HeartbeatHelperTest, test_CopySetConfValid) { std::shared_ptr copyset; - // 1. chunkserver中不存在需要变更的copyset - { - ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); - } + // 1. There is no copyset that needs to be changed in chunkserver + { ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 2. mds下发copysetConf的epoch是落后的 + // 2. The epoch of copysetConf issued by mds is outdated { copyset = std::make_shared(); EXPECT_CALL(*copyset, GetConfEpoch()).Times(2).WillOnce(Return(3)); ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 3. mds下发copysetConf正常 + // 3. Mds sends copysetConf normally { EXPECT_CALL(*copyset, GetConfEpoch()).WillOnce(Return(2)); ASSERT_TRUE(HeartbeatHelper::CopySetConfValid(conf, copyset)); @@ -140,24 +140,24 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { auto copyset = std::make_shared(); - // 1. mds下发空配置 + // 1. MDS issued empty configuration { conf.set_epoch(0); ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 2. 该副本不在复制组中 + // 2. The replica is not in the replication group { conf.set_epoch(2); for (int i = 2; i <= 4; i++) { - auto replica = conf.add_peers(); - replica->set_id(i); - replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); + auto replica = conf.add_peers(); + replica->set_id(i); + replica->set_address("192.0.0." + std::to_string(i) + ":8200:0"); } ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 3. 该副本在复制组中 + // 3. This replica is in the replication group { butil::str2endpoint("192.0.0.4:8200", &csEp); ASSERT_FALSE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); @@ -165,39 +165,37 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { } TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { - // 1. peerId的格式不对 + // 1. The format of peerId is incorrect { std::string peerId = "127.0.0:5555:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - // 2. 对端的chunkserver_service未起起来 + // 2. Opposite chunkserver_service not started { std::string peerId = "127.0.0.1:8888:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); string listenAddr(butil::endpoint2str(server->listen_address()).c_str()); - // 3. 对端copyset未加载完成 + // 3. Peer copyset not loaded completed { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } - // 4. 对端copyset加载完成 + // 4. End to end copyset loading completed { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); ASSERT_TRUE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } @@ -210,4 +208,3 @@ TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { } // namespace chunkserver } // namespace curve - diff --git a/test/chunkserver/heartbeat_test.cpp b/test/chunkserver/heartbeat_test.cpp index fcfcae375a..eabadce0ee 100644 --- a/test/chunkserver/heartbeat_test.cpp +++ b/test/chunkserver/heartbeat_test.cpp @@ -20,25 +20,26 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/heartbeat.h" + #include #include +#include + #include -#include "test/chunkserver/heartbeat_test_common.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" - +#include "src/common/configuration.h" +#include "test/chunkserver/heartbeat_test_common.h" #include "test/client/fake/fakeMDS.h" -std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9300"; // NOLINT namespace curve { namespace chunkserver { -const LogicPoolID poolId = 666; -const CopysetID copysetId = 888; +const LogicPoolID poolId = 666; +const CopysetID copysetId = 888; class HeartbeatTest : public ::testing::Test { public: @@ -57,27 +58,27 @@ class HeartbeatTest : public ::testing::Test { hbtest_->UnInitializeMds(); } - protected: std::shared_ptr hbtest_; }; TEST_F(HeartbeatTest, TransferLeader) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; - std::string dest1 = "127.0.0.1:8200:0"; - std::string dest2 = "127.0.0.1:8201:0"; + std::string dest1 = "127.0.0.1:8200:0"; + std::string dest2 = "127.0.0.1:8201:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo,expectleader是dst1 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst1 ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -86,11 +87,11 @@ TEST_F(HeartbeatTest, TransferLeader) { peer->set_address(dest1); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst1 + // Construct CopySetConf in resp, transfer to dst CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -99,27 +100,28 @@ TEST_F(HeartbeatTest, TransferLeader) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::TRANSFER_LEADER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); - // 构造req中期望的CopySetInfo,expectleader是dst2 + // Construct the expected CopySetInfo for req, with the expectleader being + // dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst2 + // Construct CopySetConf in resp, transfer to dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); conf.set_allocated_configchangeitem(peer); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, RemovePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; std::string leaderPeer = "127.0.0.1:8200:0"; std::string destPeer = "127.0.0.1:8202:0"; @@ -128,21 +130,21 @@ TEST_F(HeartbeatTest, RemovePeer) { hbtest_->WaitCopysetReady(poolId, copysetId, confStr); hbtest_->TransferLeaderSync(poolId, copysetId, confStr, leaderPeer); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = expect.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 3; j ++) { + for (int j = 0; j < 3; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -151,62 +153,62 @@ TEST_F(HeartbeatTest, RemovePeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::REMOVE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_after_Configchange) { - // 创建copyset + // Create copyset std::vector cslist{"127.0.0.1:8200"}; std::string confStr = "127.0.0.1:8200:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_not_exist_in_MDS) { - // 在chunkserver上创建一个copyset + // Create a copyset on chunkserver std::vector cslist{"127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); conf.set_epoch(0); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, AddPeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0"; std::string addPeer = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -216,11 +218,11 @@ TEST_F(HeartbeatTest, AddPeer) { } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - for (int j = 0; j < 2; j ++) { + for (int j = 0; j < 2; j++) { auto replica = conf.add_peers(); replica->set_address("127.0.0.1:820" + std::to_string(j) + ":0"); } @@ -229,14 +231,14 @@ TEST_F(HeartbeatTest, AddPeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::ADD_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, ChangePeer) { - // 创建copyset - std::vector cslist{ - "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; + // Create copyset + std::vector cslist{"127.0.0.1:8200", "127.0.0.1:8201", + "127.0.0.1:8202"}; std::string oldConf = "127.0.0.1:8200:0,127.0.0.1:8202:0"; std::string addOne = "127.0.0.1:8201:0"; std::string rmOne = "127.0.0.1:8202:0"; @@ -244,7 +246,7 @@ TEST_F(HeartbeatTest, ChangePeer) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, oldConf); hbtest_->WaitCopysetReady(poolId, copysetId, oldConf); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -254,7 +256,7 @@ TEST_F(HeartbeatTest, ChangePeer) { replica->set_address("127.0.0.1:8201:0"); expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -271,7 +273,7 @@ TEST_F(HeartbeatTest, ChangePeer) { conf.set_allocated_oldpeer(peer); conf.set_type(curve::mds::heartbeat::CHANGE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 20d6b444f8..5a24d3dac9 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -23,8 +23,8 @@ #include "test/chunkserver/heartbeat_test_common.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT static const char* confPath[3] = { "./8200/chunkserver.conf", @@ -37,12 +37,12 @@ namespace chunkserver { HeartbeatTestCommon* HeartbeatTestCommon::hbtestCommon_ = nullptr; -void HeartbeatTestCommon::CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; +void HeartbeatTestCommon::CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; LOG(INFO) << "Cleaning peer " << peer; @@ -52,16 +52,16 @@ void HeartbeatTestCommon::CleanPeer( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - std::string sender = req->ip() + ":" + std::to_string(req->port()) - + ":0"; + std::string sender = + req->ip() + ":" + std::to_string(req->port()) + ":0"; if (sender != peer) { continue; } if (req->copysetinfos_size() >= 1) { int i = 0; - for (; i < req->copysetinfos_size(); i ++) { - if ( req->copysetinfos(i).logicalpoolid() == poolId && - req->copysetinfos(i).copysetid() == copysetId ) { + for (; i < req->copysetinfos_size(); i++) { + if (req->copysetinfos(i).logicalpoolid() == poolId && + req->copysetinfos(i).copysetid() == copysetId) { break; } } @@ -94,7 +94,7 @@ void HeartbeatTestCommon::CleanPeer( void HeartbeatTestCommon::CreateCopysetPeers( LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& confStr) { + const std::vector& cslist, const std::string& confStr) { braft::Configuration conf; ASSERT_EQ(0, conf.parse_from(confStr)); std::vector confPeers; @@ -113,8 +113,8 @@ void HeartbeatTestCommon::CreateCopysetPeers( cntl.set_timeout_ms(3000); request.set_logicpoolid(poolId); request.set_copysetid(copysetId); - for (auto peer = confPeers.begin(); - peer != confPeers.end(); peer++) { + for (auto peer = confPeers.begin(); peer != confPeers.end(); + peer++) { request.add_peerid(peer->to_string()); } @@ -122,11 +122,11 @@ void HeartbeatTestCommon::CreateCopysetPeers( copyset_stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "Creating copyset failed: " - << cntl.ErrorCode() << " " << cntl.ErrorText(); + LOG(ERROR) << "Creating copyset failed: " << cntl.ErrorCode() + << " " << cntl.ErrorText(); } else if (COPYSET_OP_STATUS_EXIST == response.status()) { - LOG(INFO) << "Skipped creating existed copyset <" - << poolId << ", " << copysetId << ">: " << conf + LOG(INFO) << "Skipped creating existed copyset <" << poolId + << ", " << copysetId << ">: " << conf << " on peer: " << *it; break; } else if (COPYSET_OP_STATUS_SUCCESS == response.status()) { @@ -141,8 +141,9 @@ void HeartbeatTestCommon::CreateCopysetPeers( } } -void HeartbeatTestCommon::WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& confStr) { +void HeartbeatTestCommon::WaitCopysetReady(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -160,9 +161,10 @@ void HeartbeatTestCommon::WaitCopysetReady( } } -void HeartbeatTestCommon::TransferLeaderSync( - LogicPoolID poolId, CopysetID copysetId, - const std::string& confStr, const std::string& newLeader) { +void HeartbeatTestCommon::TransferLeaderSync(LogicPoolID poolId, + CopysetID copysetId, + const std::string& confStr, + const std::string& newLeader) { braft::PeerId peerId; butil::Status status; Configuration conf; @@ -198,21 +200,18 @@ void HeartbeatTestCommon::ReleaseHeartbeat() { } void HeartbeatTestCommon::SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { cntl_ = cntl; req_ = request; resp_ = response; done_ = done; } -void HeartbeatTestCommon::GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done) { +void HeartbeatTestCommon::GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done) { std::unique_lock lock(hbtestCommon_->GetMutex()); handlerReady_.store(true, std::memory_order_release); @@ -230,10 +229,8 @@ void HeartbeatTestCommon::GetHeartbeat( } void HeartbeatTestCommon::HeartbeatCallback( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* cntl, const HeartbeatRequest* request, + HeartbeatResponse* response, ::google::protobuf::Closure* done) { { std::unique_lock lock(hbtestCommon_->GetMutex()); if (!hbtestCommon_->GetReady().load(std::memory_order_acquire)) { @@ -250,8 +247,8 @@ void HeartbeatTestCommon::HeartbeatCallback( } bool HeartbeatTestCommon::SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect) { + const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect) { if (!expect.IsInitialized()) { if (!orig.IsInitialized()) { return true; @@ -301,13 +298,12 @@ bool HeartbeatTestCommon::SameCopySetInfo( } bool HeartbeatTestCommon::WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimit) { - ::google::protobuf::RpcController* cntl; - ::google::protobuf::Closure* done; - const HeartbeatRequest* req; - HeartbeatResponse* resp; + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimit) { + ::google::protobuf::RpcController* cntl; + ::google::protobuf::Closure* done; + const HeartbeatRequest* req; + HeartbeatResponse* resp; int64_t startTime = butil::monotonic_time_ms(); bool leaderPeerSet = expectedInfo.has_leaderpeer(); @@ -316,8 +312,8 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - // 获取当前copyset的leader - std::string sender = + // Get the leader of the current copyset + std::string sender = req->ip() + ":" + std::to_string(req->port()) + ":0"; if (1 == req->copysetinfos_size()) { leader = req->copysetinfos(0).leaderpeer().address(); @@ -333,8 +329,10 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( } } - // 如果当前req是leader发送的,判断req中的内容是否符合要求 - // 如果符合要求,返回true; 如果不符合要求,设置resp中的内容 + // If the current req is sent by the leader, determine whether the + // content in the req meets the requirements If it meets the + // requirements, return true; If it does not meet the requirements, set + // the content in resp if (leader == sender) { if (!leaderPeerSet) { auto peer = new ::curve::common::Peer(); @@ -342,22 +340,23 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( expectedInfo.set_allocated_leaderpeer(peer); } - // 判断req是否符合要求, 符合要求返回true + // Determine whether the req meets the requirements, and return true + // if it meets the requirements if (req->copysetinfos_size() == 1) { if (SameCopySetInfo(req->copysetinfos(0), expectedInfo)) { return true; } LOG(INFO) << "req->copysetinfos:" - << req->copysetinfos(0).DebugString() - << ", expectedInfo: " << expectedInfo.DebugString(); + << req->copysetinfos(0).DebugString() + << ", expectedInfo: " << expectedInfo.DebugString(); } else if (req->copysetinfos_size() == 0) { - if (SameCopySetInfo( - ::curve::mds::heartbeat::CopySetInfo{}, expectedInfo)) { + if (SameCopySetInfo(::curve::mds::heartbeat::CopySetInfo{}, + expectedInfo)) { return true; } } - // 不符合要求设置resp + // Not meeting the requirements to set resp if (req->copysetinfos_size() == 1) { auto build = resp->add_needupdatecopysets(); if (!build->has_epoch()) { @@ -388,7 +387,7 @@ int RmDirData(std::string uri) { int RemovePeersData(bool rmChunkServerMeta) { common::Configuration conf; - for (int i = 0; i < 3; i ++) { + for (int i = 0; i < 3; i++) { conf.SetConfigPath(confPath[i]); CHECK(conf.LoadConfig()) << "load conf err"; @@ -396,35 +395,35 @@ int RemovePeersData(bool rmChunkServerMeta) { LOG_IF(FATAL, !conf.GetStringValue("copyset.chunk_data_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " data dir: " << strerror(errno); + << " data dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " log dir: " << strerror(errno); + << " log dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_log_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft meta dir: " << strerror(errno); + << " raft meta dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.raft_snapshot_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft snapshot dir: " << strerror(errno); + << " raft snapshot dir: " << strerror(errno); return -1; } LOG_IF(FATAL, !conf.GetStringValue("copyset.recycler_uri", &res)); if (RmDirData(res)) { LOG(ERROR) << "Failed to remove node " << i - << " raft recycler dir: " << strerror(errno); + << " raft recycler dir: " << strerror(errno); return -1; } @@ -432,7 +431,7 @@ int RemovePeersData(bool rmChunkServerMeta) { if (rmChunkServerMeta) { if (RmFile(res)) { LOG(ERROR) << "Failed to remove node " << i - << " chunkserver meta file: " << strerror(errno); + << " chunkserver meta file: " << strerror(errno); return -1; } } diff --git a/test/chunkserver/heartbeat_test_common.h b/test/chunkserver/heartbeat_test_common.h index 433f7119eb..744dbe78d3 100644 --- a/test/chunkserver/heartbeat_test_common.h +++ b/test/chunkserver/heartbeat_test_common.h @@ -23,20 +23,20 @@ #ifndef TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ #define TEST_CHUNKSERVER_HEARTBEAT_TEST_COMMON_H_ -#include #include +#include +#include #include +#include //NOLINT #include -#include -#include //NOLINT #include "include/chunkserver/chunkserver_common.h" -#include "src/chunkserver/copyset_node_manager.h" #include "proto/heartbeat.pb.h" -#include "src/common/configuration.h" -#include "src/chunkserver/heartbeat.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/chunkserver/heartbeat.h" +#include "src/common/configuration.h" #include "src/common/uri_parser.h" #include "test/client/fake/fakeMDS.h" @@ -47,7 +47,7 @@ using ::curve::common::UriParser; class HeartbeatTestCommon { public: - explicit HeartbeatTestCommon(const std::string &filename) { + explicit HeartbeatTestCommon(const std::string& filename) { hbtestCommon_ = this; handlerReady_.store(false, std::memory_order_release); @@ -57,17 +57,11 @@ class HeartbeatTestCommon { mds_->StartService(); } - std::atomic& GetReady() { - return handlerReady_; - } + std::atomic& GetReady() { return handlerReady_; } - std::mutex& GetMutex() { - return hbMtx_; - } + std::mutex& GetMutex() { return hbMtx_; } - std::condition_variable& GetCV() { - return hbCV_; - } + std::condition_variable& GetCV() { return hbCV_; } void UnInitializeMds() { mds_->UnInitialize(); @@ -75,105 +69,110 @@ class HeartbeatTestCommon { } /** - * CleanPeer 清空peer上指定copyset数据 + * CleanPeer: Clear the specified copyset data on the peer * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] peer chunkserver ip + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] peer chunkserver IP */ - void CleanPeer( - LogicPoolID poolId, CopysetID copysetId, const std::string& peer); + void CleanPeer(LogicPoolID poolId, CopysetID copysetId, + const std::string& peer); /** - * CreateCopysetPeers 在指定chunkserverlist上创建指定配置的copyset + * CreateCopysetPeers: Create a copyset of the specified configuration on + * the specified chunkserverlist * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] cslist 待创建copyset的chunkserver列表 - * @param[in] conf 使用该配置作为初始配置创建copyset + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] cslist The chunkserver list for the copyset to be created + * @param[in] conf Use this configuration as the initial configuration to + * create a copyset */ void CreateCopysetPeers(LogicPoolID poolId, CopysetID copysetId, - const std::vector &cslist, const std::string& conf); + const std::vector& cslist, + const std::string& conf); /** - * WaitCopysetReady 等待指定copyset选出leader + * WaitCopysetReady: Wait for the specified copyset to select the leader * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members */ - void WaitCopysetReady( - LogicPoolID poolId, CopysetID copysetId, const std::string& conf); + void WaitCopysetReady(LogicPoolID poolId, CopysetID copysetId, + const std::string& conf); /** - * TransferLeaderSync 触发transferleader并等待完成 + * TransferLeaderSync: Trigger transferleader and waits for completion * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 - * @param[in] newLeader 目标leader + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members + * @param[in] newLeader Target Leader */ void TransferLeaderSync(LogicPoolID poolId, CopysetID copysetId, - const std::string& conf, const std::string& newLeader); + const std::string& conf, + const std::string& newLeader); /** - * WailForConfigChangeOk 指定时间内(timeLimitMs),chunkserver是否上报了 - * 符合预期的copyset信息 + * WailForConfigChangeOk: Determine whether the chunkserver has reported the + * expected copyset information within the specified time limit + * (timeLimitMs). * - * @param[in] conf mds需要下发给指定copyset的变更命令 - * @param[in] expectedInfo 变更之后期望复制组配置 - * @param[in] timeLimitMs 等待时间 + * @param[in] conf mds needs to issue a change command to the specified + * copyset + * @param[in] expectedInfo replication group configuration after change + * @param[in] timeLimitMs waiting time * - * @return false-指定时间内copyset配置未能达到预期, true-达到预期 + * @return false - Copyset configuration failed to meet expectations within + * the specified time, true - met expectations */ bool WailForConfigChangeOk( - const ::curve::mds::heartbeat::CopySetConf &conf, - ::curve::mds::heartbeat::CopySetInfo expectedInfo, - int timeLimitMs); + const ::curve::mds::heartbeat::CopySetConf& conf, + ::curve::mds::heartbeat::CopySetInfo expectedInfo, int timeLimitMs); /** - * SameCopySetInfo 比较两个copysetInfo是否一致 + * SameCopySetInfo: Compare two copysetInfo structures to check if they are + * identical. * - * @param[in] orig 待比较的copysetInfo - * @param[in] expect 期望copysetInfo + * @param[in] orig The copysetInfo to compare. + * @param[in] expect The expected copysetInfo for comparison. * - * @return true-一致 false-不一致 + * @return true if they are identical, false if they are not. */ - bool SameCopySetInfo( - const ::curve::mds::heartbeat::CopySetInfo &orig, - const ::curve::mds::heartbeat::CopySetInfo &expect); + bool SameCopySetInfo(const ::curve::mds::heartbeat::CopySetInfo& orig, + const ::curve::mds::heartbeat::CopySetInfo& expect); /** - * ReleaseHeartbeat heartbeat中的会掉设置为nullptr + * ReleaseHeartbeat: Set the callback in the heartbeat to nullptr. */ void ReleaseHeartbeat(); /** - * SetHeartbeatInfo 把mds接受到的cntl等信息复制到成员变量 + * SetHeartbeatInfo: Copy the cntl and other information received by mds to + * the member variable */ - void SetHeartbeatInfo( - ::google::protobuf::RpcController* cntl, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + void SetHeartbeatInfo(::google::protobuf::RpcController* cntl, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); /** - * GetHeartbeat 把当前成员中的cntl等变量设置到rpc中 + * GetHeartbeat: Set the current member's cntl and other variables into the + * RPC. */ - void GetHeartbeat( - ::google::protobuf::RpcController** cntl, - const HeartbeatRequest** request, - HeartbeatResponse** response, - ::google::protobuf::Closure** done); + void GetHeartbeat(::google::protobuf::RpcController** cntl, + const HeartbeatRequest** request, + HeartbeatResponse** response, + ::google::protobuf::Closure** done); /** - * HeartbeatCallback heartbeat回掉 + * HeartbeatCallback: heartbeat callback */ - static void HeartbeatCallback( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); + static void HeartbeatCallback(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); private: FakeMDS* mds_; diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index de06bcc255..d2d517bfc4 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -21,9 +21,9 @@ * 2018/12/23 Wenyu Zhou Initial version */ -#include #include #include +#include #include "include/chunkserver/chunkserver_common.h" #include "src/chunkserver/chunkserver.h" @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static const char *param[3][15] = { +static const char* param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -89,7 +89,7 @@ using ::curve::chunkserver::ChunkServer; butil::AtExitManager atExitManager; -static int RunChunkServer(int i, int argc, char **argv) { +static int RunChunkServer(int i, int argc, char** argv) { auto chunkserver = new curve::chunkserver::ChunkServer(); if (chunkserver == nullptr) { LOG(ERROR) << "Failed to create chunkserver " << i; @@ -104,7 +104,7 @@ static int RunChunkServer(int i, int argc, char **argv) { return 0; } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { int ret; pid_t pids[3]; testing::InitGoogleTest(&argc, argv); @@ -133,10 +133,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create chunkserver process 0"; } else if (pids[i] == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), - const_cast(param[i])); + return RunChunkServer(i, sizeof(param[i]) / sizeof(char*), + const_cast(param[i])); } } @@ -148,8 +149,9 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create test proccess"; } else if (pid == 0) { /* - * RUN_ALL_TESTS内部可能会调用LOG(), - * 有较低概率因不兼容fork()而卡死 + * LOG() may be called internally in RUN_ALL_TESTS, + * There is a low probability of getting stuck due to incompatible + * fork() */ ret = RUN_ALL_TESTS(); return ret; @@ -171,10 +173,11 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to restart chunkserver process 1"; } else if (pid == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability + * of getting stuck due to incompatible fork() */ - ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), - const_cast(param[1])); + ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char*), + const_cast(param[1])); return ret; } sleep(2); diff --git a/test/chunkserver/inflight_throttle_test.cpp b/test/chunkserver/inflight_throttle_test.cpp index 8faa18d76e..333e1f6934 100644 --- a/test/chunkserver/inflight_throttle_test.cpp +++ b/test/chunkserver/inflight_throttle_test.cpp @@ -20,10 +20,11 @@ * Author: wudemiao */ +#include "src/chunkserver/inflight_throttle.h" + #include #include "src/common/concurrent/concurrent.h" -#include "src/chunkserver/inflight_throttle.h" namespace curve { namespace chunkserver { @@ -31,7 +32,7 @@ namespace chunkserver { using curve::common::Thread; TEST(InflightThrottleTest, basic) { - // 基本测试 + // Basic testing { uint64_t maxInflight = 1; InflightThrottle inflightThrottle(maxInflight); @@ -45,7 +46,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发加 + // Concurrent addition { uint64_t maxInflight = 10000; InflightThrottle inflightThrottle(maxInflight); @@ -78,7 +79,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发减 + // Concurrent reduction { uint64_t maxInflight = 16; InflightThrottle inflightThrottle(maxInflight); diff --git a/test/chunkserver/metrics_test.cpp b/test/chunkserver/metrics_test.cpp index 282802336f..57c7a79c33 100644 --- a/test/chunkserver/metrics_test.cpp +++ b/test/chunkserver/metrics_test.cpp @@ -20,24 +20,25 @@ * Author: yangyaokai */ -#include -#include +#include +#include #include #include -#include -#include -#include -#include +#include +#include + #include +#include +#include #include -#include "src/common/configuration.h" #include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" +#include "src/fs/local_filesystem.h" #include "test/chunkserver/datastore/filepool_helper.h" namespace curve { @@ -55,15 +56,14 @@ const PageSizeType PAGE_SIZE = 4 * 1024; const int chunkNum = 10; const LogicPoolID logicId = 1; -const string baseDir = "./data_csmetric"; // NOLINT -const string copysetDir = "local://./data_csmetric"; // NOLINT -const string logDir = "curve://./data_csmetric"; // NOLINT -const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT +const string baseDir = "./data_csmetric"; // NOLINT +const string copysetDir = "local://./data_csmetric"; // NOLINT +const string logDir = "curve://./data_csmetric"; // NOLINT +const string chunkPoolDir = "./chunkfilepool_csmetric"; // NOLINT const string chunkPoolMetaPath = "./chunkfilepool_csmetric.meta"; // NOLINT -const string walPoolDir = "./walfilepool_csmetric"; // NOLINT -const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT -const string trashPath = "./trash_csmetric"; // NOLINT - +const string walPoolDir = "./walfilepool_csmetric"; // NOLINT +const string walPoolMetaPath = "./walfilepool_csmetric.meta"; // NOLINT +const string trashPath = "./trash_csmetric"; // NOLINT class CSMetricTest : public ::testing::Test { public: @@ -90,8 +90,7 @@ class CSMetricTest : public ::testing::Test { cfop.blockSize = BLOCK_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool->Size()); @@ -147,8 +146,8 @@ class CSMetricTest : public ::testing::Test { } void CreateConfigFile() { - confFile_ = "csmetric.conf"; - // 创建配置文件 + confFile_ = "csmetric.conf"; + // Create Configuration File std::string confItem; std::ofstream cFile(confFile_); ASSERT_TRUE(cFile.is_open()); @@ -210,18 +209,18 @@ TEST_F(CSMetricTest, CopysetMetricTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - // 如果copyset的metric已经存在,返回-1 + // If the metric for the copyset already exists, return -1 rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, -1); - // 获取不存在的copyset metric,返回nullptr + // Get non-existent copyset metric and return nullptr CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, 2); ASSERT_EQ(copysetMetric, nullptr); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); ASSERT_NE(copysetMetric, nullptr); - // 删除copyset metric后,再去获取返回nullptr + // After deleting the copyset metric, go to retrieve and return nullptr rc = metric_->RemoveCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); @@ -233,7 +232,8 @@ TEST_F(CSMetricTest, OnRequestTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -257,7 +257,7 @@ TEST_F(CSMetricTest, OnRequestTest) { const IOMetricPtr cpDownloadMetric = copysetMetric->GetIOMetric(CSIOMetricType::DOWNLOAD); - // 统计写入成功的情况 + // Count the success of writing metric_->OnRequest(logicId, copysetId, CSIOMetricType::WRITE_CHUNK); ASSERT_EQ(1, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverWriteMetric->ioNum_.get_value()); @@ -268,7 +268,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 + // Statistics on successful reads metric_->OnRequest(logicId, copysetId, CSIOMetricType::READ_CHUNK); ASSERT_EQ(1, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverReadMetric->ioNum_.get_value()); @@ -279,7 +279,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 + // Statistics on successful recovery metric_->OnRequest(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK); ASSERT_EQ(1, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(0, serverRecoverMetric->ioNum_.get_value()); @@ -290,7 +290,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 + // Count the success of pass metric_->OnRequest(logicId, copysetId, CSIOMetricType::PASTE_CHUNK); ASSERT_EQ(1, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverPasteMetric->ioNum_.get_value()); @@ -301,7 +301,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 + // Statistics on successful downloads metric_->OnRequest(logicId, copysetId, CSIOMetricType::DOWNLOAD); ASSERT_EQ(1, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverDownloadMetric->ioNum_.get_value()); @@ -318,7 +318,8 @@ TEST_F(CSMetricTest, OnResponseTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_NE(copysetMetric, nullptr); const IOMetricPtr serverWriteMetric = @@ -345,9 +346,9 @@ TEST_F(CSMetricTest, OnResponseTest) { size_t size = PAGE_SIZE; int64_t latUs = 100; bool hasError = false; - // 统计写入成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the success of writing + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -357,9 +358,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Statistics on successful reads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -369,9 +370,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on successful recovery + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -381,9 +382,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the success of pass + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -393,9 +394,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on successful downloads + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -406,9 +407,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(0, cpDownloadMetric->errorNum_.get_value()); hasError = true; - // 统计写入失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); + // Count the number of write failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverWriteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverWriteMetric->ioBytes_.get_value()); @@ -418,9 +420,10 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpWriteMetric->errorNum_.get_value()); - // 统计读取失败的情况,错误数增加,其他不变 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); + // Count the number of read failures, increase the number of errors, and + // keep everything else unchanged + metric_->OnResponse(logicId, copysetId, CSIOMetricType::READ_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverReadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverReadMetric->ioBytes_.get_value()); @@ -430,9 +433,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpReadMetric->errorNum_.get_value()); - // 统计恢复失败的情况 - metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, - size, latUs, hasError); + // Statistics on recovery failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(1, serverRecoverMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverRecoverMetric->ioBytes_.get_value()); @@ -442,9 +445,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpRecoverMetric->errorNum_.get_value()); - // 统计paste失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); + // Count the situation of pass failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, + latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(1, serverPasteMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverPasteMetric->ioBytes_.get_value()); @@ -454,9 +457,9 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpPasteMetric->errorNum_.get_value()); - // 统计下载失败的情况 - metric_->OnResponse( - logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); + // Statistics on download failures + metric_->OnResponse(logicId, copysetId, CSIOMetricType::DOWNLOAD, size, + latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(1, serverDownloadMetric->ioNum_.get_value()); ASSERT_EQ(PAGE_SIZE, serverDownloadMetric->ioBytes_.get_value()); @@ -468,19 +471,21 @@ TEST_F(CSMetricTest, OnResponseTest) { } TEST_F(CSMetricTest, CountTest) { - // 初始状态下,没有copyset,FilePool中有chunkNum个chunk + // In the initial state, there is no copyset and there are chunkNum chunks + // in FilePool ASSERT_EQ(0, metric_->GetCopysetCount()); ASSERT_EQ(10, metric_->GetChunkLeftCount()); // Shared with chunk file pool ASSERT_EQ(0, metric_->GetWalSegmentLeftCount()); - // 创建copyset + // Create copyset Configuration conf; CopysetID copysetId = 1; ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId, conf)); ASSERT_EQ(1, metric_->GetCopysetCount()); - // 此时copyset下面没有chunk和快照 - CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT + // At this point, there are no chunks or snapshots under the copyset + CopysetMetricPtr copysetMetric = + metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_EQ(0, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); @@ -522,7 +527,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(10, metric_->GetWalSegmentLeftCount()); ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId2, conf)); ASSERT_EQ(2, metric_->GetCopysetCount()); - CopysetMetricPtr copysetMetric2 = metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT + CopysetMetricPtr copysetMetric2 = + metric_->GetCopysetMetric(logicId, copysetId2); // NOLINT ASSERT_EQ(0, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(1, metric_->GetTotalWalSegmentCount()); @@ -534,7 +540,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(2, metric_->GetTotalWalSegmentCount()); - // 写入数据生成chunk + // Write data to generate chunk std::shared_ptr datastore = copysetMgr_->GetCopysetNode(logicId, copysetId)->GetDataStore(); ChunkID id = 1; @@ -553,7 +559,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(0, metric_->GetTotalCloneChunkCount()); - // 增加版本号,生成快照 + // Add version number and generate snapshot seq = 2; ASSERT_EQ(CSErrorCode::Success, datastore->WriteChunk(id, seq, dataBuf, offset, length, nullptr)); @@ -561,14 +567,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 删除快照 + // Delete snapshot ASSERT_EQ(CSErrorCode::Success, datastore->DeleteSnapshotChunkOrCorrectSn(id, seq)); ASSERT_EQ(1, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 创建 clone chunk + // Create clone chunk ChunkID id2 = 2; ChunkID id3 = 3; std::string location = "test@cs"; @@ -580,7 +586,8 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(2, copysetMetric->GetCloneChunkCount()); - // clone chunk被覆盖写一遍,clone chun转成普通chunk + // The clone chunk is overwritten and written once, converting it to a + // regular chunk char* buf2 = new char[CHUNK_SIZE]; butil::IOBuf dataBuf2; dataBuf2.append(buf2, CHUNK_SIZE); @@ -591,15 +598,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 删除上面的chunk - ASSERT_EQ(CSErrorCode::Success, - datastore->DeleteChunk(id2, 1)); + // Delete the chunk above + ASSERT_EQ(CSErrorCode::Success, datastore->DeleteChunk(id2, 1)); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 模拟copyset重新加载datastore,重新初始化后,chunk数量不变 - // for bug fix: CLDCFS-1473 + // Simulate copyset to reload the datastore, and after reinitialization, the + // number of chunks remains unchanged for bug fix: CLDCFS-1473 datastore->Initialize(); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); @@ -608,7 +614,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(1, metric_->GetTotalCloneChunkCount()); - // 模拟copyset放入回收站测试 + // Simulate copyset placement in the recycle bin for testing ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId)); ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId2)); ASSERT_EQ(nullptr, metric_->GetCopysetMetric(logicId, copysetId)); @@ -619,7 +625,7 @@ TEST_F(CSMetricTest, CountTest) { // copysetId2: 1(wal) ASSERT_EQ(4, metric_->GetChunkTrashedCount()); - // 测试leader count计数 + // Test leader count count ASSERT_EQ(0, metric_->GetLeaderCount()); metric_->IncreaseLeaderCount(); ASSERT_EQ(1, metric_->GetLeaderCount()); @@ -639,11 +645,11 @@ TEST_F(CSMetricTest, ConfigTest) { "{\"conf_name\":\"chunksize\",\"conf_value\":\"1234\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), "{\"conf_name\":\"timeout\",\"conf_value\":\"100\"}"); - // 修改新增配置信息 + // Modify new configuration information conf.SetStringValue("chunksize", "4321"); conf.SetStringValue("port", "9999"); metric_->ExposeConfigMetric(&conf); - // // 验证修改后信息 + // Verify modified information ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "chunksize").c_str(), "{\"conf_name\":\"chunksize\",\"conf_value\":\"4321\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), @@ -657,7 +663,7 @@ TEST_F(CSMetricTest, OnOffTest) { ChunkServerMetricOptions metricOptions; metricOptions.port = PORT; metricOptions.ip = IP; - // 关闭metric开关后进行初始化 + // Initialize after turning off the metric switch { metricOptions.collectMetric = false; ASSERT_EQ(0, metric_->Init(metricOptions)); @@ -669,7 +675,7 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(ret, true); metric_->ExposeConfigMetric(&conf); } - // 初始化后获取所有指标项都为空 + // Obtain all indicator items as empty after initialization { ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::READ_CHUNK), nullptr); ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::WRITE_CHUNK), nullptr); @@ -685,7 +691,8 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(metric_->GetTotalCloneChunkCount(), 0); ASSERT_EQ(metric_->GetTotalWalSegmentCount(), 0); } - // 创建copyset的metric返回成功,但实际并未创建 + // Creating the metric for the copyset returned success, but it was not + // actually created { CopysetID copysetId = 1; ASSERT_EQ(0, metric_->CreateCopysetMetric(logicId, copysetId)); @@ -696,7 +703,7 @@ TEST_F(CSMetricTest, OnOffTest) { PAGE_SIZE, 100, false); ASSERT_EQ(0, metric_->RemoveCopysetMetric(logicId, copysetId)); } - // 增加leader count,但是实际未计数 + // Increase the leader count, but it is not actually counted { metric_->IncreaseLeaderCount(); ASSERT_EQ(metric_->GetLeaderCount(), 0); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp index b33d196d95..1329b919a6 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp @@ -20,38 +20,38 @@ * Author: tongguangxun */ -#include -#include #include #include +#include +#include #include -#include "src/fs/local_filesystem.h" -#include "test/fs/mock_local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +#include "test/fs/mock_local_filesystem.h" using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; using ::testing::SetArgReferee; -using ::testing::AtLeast; +using ::testing::StrEq; +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::chunkserver::FilePool; using curve::fs::MockLocalFileSystem; namespace curve { namespace chunkserver { @@ -63,7 +63,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); FilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(FilePoolPtr_); @@ -146,32 +146,33 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr FilePoolPtr_; - std::shared_ptr fsptr; - std::shared_ptr lfs; - CurveFilesystemAdaptor* rfa; + std::shared_ptr FilePoolPtr_; + std::shared_ptr fsptr; + std::shared_ptr lfs; + CurveFilesystemAdaptor* rfa; }; TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { - // 1. open flag不带CREAT, open失败 + // 1. open flag without CREAT, open failed CreateChunkFile("./10"); std::string path = "./10"; butil::File::Error e; ASSERT_EQ(FilePoolPtr_->Size(), 3); EXPECT_CALL(*lfs, Open(_, _)).Times(AtLeast(1)).WillRepeatedly(Return(-1)); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(FilePoolPtr_->Size(), 3); ASSERT_EQ(nullptr, fa); - // 2. open flag带CREAT, 从FilePool取文件,但是FilePool打开文件失败 - // 所以还是走原有逻辑,本地创建文件成功 - EXPECT_CALL(*lfs, Open(_, _)).Times(3).WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)); + // 2. open flag with CREAT to retrieve files from FilePool, but FilePool + // failed to open the file So we still follow the original logic and + // successfully create the file locally + EXPECT_CALL(*lfs, Open(_, _)) + .Times(3) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(0)); ASSERT_EQ(FilePoolPtr_->Size(), 3); path = "./11"; @@ -182,7 +183,8 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { ASSERT_FALSE(fsptr->FileExists("./10")); ASSERT_EQ(nullptr, fa); - // 3. 待创建文件在Filter中,但是直接本地创建该文件,创建成功 + // 3. The file to be created is in Filter, but it was created locally and + // successfully EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); path = BRAFT_SNAPSHOT_META_FILE; @@ -191,14 +193,16 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { - // 1. 删除文件,文件存在且在过滤名单里,但delete失败,返回false + // 1. Delete file. The file exists and is on the filter list, but delete + // failed with false return EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); bool ret = fsadaptor->delete_file(BRAFT_SNAPSHOT_META_FILE, true); ASSERT_FALSE(ret); - // 2. 删除文件,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 2. Delete file. The file exists and is not on the filter list, but the + // recycle chunk failed with false return EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); @@ -206,29 +210,35 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { ret = fsadaptor->delete_file("temp", true); ASSERT_FALSE(ret); - // 3. 删除目录,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 3. Delete directory. The file exists and is not on the filter list, but + // the recycle chunk failed with false return std::vector dircontent; dircontent.push_back("/2"); dircontent.push_back("/1"); dircontent.push_back(BRAFT_SNAPSHOT_META_FILE); - EXPECT_CALL(*lfs, DirExists(_)).Times(2).WillOnce(Return(true)) - .WillOnce(Return(false)); + EXPECT_CALL(*lfs, DirExists(_)) + .Times(2) + .WillOnce(Return(true)) + .WillOnce(Return(false)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(-1)); - EXPECT_CALL(*lfs, List(_, _)).Times(2).WillRepeatedly(DoAll( - SetArgPointee<1>(dircontent), Return(-1))); + EXPECT_CALL(*lfs, List(_, _)) + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<1>(dircontent), Return(-1))); ret = fsadaptor->delete_file("1", true); ASSERT_FALSE(ret); } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { - // 1. 重命名文件,文件存在且在过滤名单里,但Rename失败,返回false + // 1. Renaming file, file exists and is on the filter list, but Rename + // failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); bool ret = fsadaptor->rename("1", BRAFT_SNAPSHOT_META_FILE); ASSERT_FALSE(ret); - // 2. 重命名文件,文件存在且不在过滤名单里,但Rename失败,返回false + // 2. Renaming file. The file exists and is not on the filter list, but + // Rename failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(0)); @@ -237,5 +247,5 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { ASSERT_TRUE(ret); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp index 926ccc76c5..a7de21c7fe 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp @@ -20,20 +20,21 @@ * Author: tongguangxun */ -#include +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" + #include #include +#include #include -#include "src/fs/local_filesystem.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" +#include "src/fs/local_filesystem.h" +using curve::chunkserver::FilePool; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { @@ -44,7 +45,7 @@ class CurveFilesystemAdaptorTest : public testing::Test { public: void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( - curve::fs::FileSystemType::EXT4, "/dev/sda"); + curve::fs::FileSystemType::EXT4, "/dev/sda"); chunkFilePoolPtr_ = std::make_shared(fsptr); ASSERT_TRUE(chunkFilePoolPtr_); @@ -124,42 +125,39 @@ class CurveFilesystemAdaptorTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr chunkFilePoolPtr_; - std::shared_ptr fsptr; - CurveFilesystemAdaptor* rfa; + std::shared_ptr chunkFilePoolPtr_; + std::shared_ptr fsptr; + CurveFilesystemAdaptor* rfa; }; TEST_F(CurveFilesystemAdaptorTest, open_file_test) { - // 1. open flag不带CREAT + // 1. Open flag without CREAT std::string path = "./raftsnap/10"; butil::File::Error e; ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - braft::FileAdaptor* fa = fsadaptor->open(path, - O_RDONLY | O_CLOEXEC, - nullptr, - &e); + braft::FileAdaptor* fa = + fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsptr->FileExists("./raftsnap/10")); ASSERT_EQ(nullptr, fa); - // 2. open flag待CREAT, 从FilePool取文件 + // 2. Open flag for CREAT, retrieve files from FilePool ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 2); ASSERT_TRUE(fsptr->FileExists("./raftsnap/10")); ASSERT_NE(nullptr, fa); - // 3. open flag待CREAT,FilePool为空时,从FilePool取文件 + // 3. Open flag, wait for CREAT, and when FilePool is empty, retrieve the + // file from FilePool ClearFilePool(); - fa = fsadaptor->open("./raftsnap/11", - O_RDONLY | O_CLOEXEC | O_CREAT, - nullptr, - &e); + fa = fsadaptor->open("./raftsnap/11", O_RDONLY | O_CLOEXEC | O_CREAT, + nullptr, &e); ASSERT_EQ(nullptr, fa); } TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1/test_temp2")); @@ -169,11 +167,11 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { CreateChunkFile("./test_temp/test_temp1/2"); CreateChunkFile("./test_temp/test_temp1/test_temp2/1"); CreateChunkFile("./test_temp/test_temp1/test_temp2/2"); - // 非递归删除非空文件夹,返回false + // Non recursive deletion of non empty folders, returning false ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsadaptor->delete_file("./test_temp", false)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - // 递归删除文件夹,chunk被回收到FilePool + // Recursively delete folder, chunk is recycled to FilePool ASSERT_TRUE(fsadaptor->delete_file("./test_temp", true)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp")); @@ -186,7 +184,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/1")); ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/2")); - // 2. 创建一个单层空目录 + // 2. Create a single level empty directory ASSERT_EQ(0, fsptr->Mkdir("./test_temp3")); ASSERT_TRUE(fsadaptor->delete_file("./test_temp3", false)); ASSERT_EQ(0, fsptr->Mkdir("./test_temp4")); @@ -195,7 +193,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->DirExists("./test_temp3")); ASSERT_FALSE(fsptr->DirExists("./test_temp4")); - // 3. 删除一个常规chunk文件, 会被回收到FilePool + // 3. Deleting a regular chunk file will be recycled to FilePool ASSERT_EQ(0, fsptr->Mkdir("./test_temp5")); CreateChunkFile("./test_temp5/3"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp5/3", false)); @@ -211,8 +209,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_EQ(0, fsptr->Delete("./test_temp5")); ASSERT_EQ(0, fsptr->Delete("./test_temp6")); - - // 4. 删除一个非chunk大小的文件,会直接删除该文件 + // 4. Deleting a file of a non chunk size will directly delete the file ASSERT_EQ(0, fsptr->Mkdir("./test_temp7")); int fd = fsptr->Open("./test_temp7/5", O_RDWR | O_CREAT); char data[4096]; @@ -226,12 +223,13 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { } TEST_F(CurveFilesystemAdaptorTest, rename_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); std::string filename = "./test_temp/"; filename.append(BRAFT_SNAPSHOT_META_FILE); - // 目标文件size是chunksize,但是目标文件在过滤名单里,所以直接过滤 + // The target file size is chunksize, but it is on the filtering list, so it + // is directly filtered CreateChunkFile(filename); int poolSize = chunkFilePoolPtr_->Size(); std::string temppath = "./temp"; @@ -243,7 +241,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete(filename)); - // 目标文件size是chunksize,但是目标文件不在过滤名单里,所以先回收再rename + // The target file size is chunksize, but it is not on the filter list, so + // recycle it first and rename it again filename = "./test_temp/"; filename.append("test"); CreateChunkFile(filename); @@ -254,9 +253,8 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_TRUE(fsptr->FileExists(filename)); ASSERT_EQ(0, fsptr->Delete(filename)); - ASSERT_EQ(0, fsptr->Delete("./test_temp")); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp index 9e3ca39605..8b72b7f84e 100644 --- a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp +++ b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp @@ -20,12 +20,14 @@ * Author: yangyaokai */ -#include +#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" + #include +#include + #include #include -#include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -34,11 +36,11 @@ namespace chunkserver { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; -using ::testing::Mock; using ::testing::DoAll; -using ::testing::ReturnArg; using ::testing::ElementsAre; +using ::testing::Mock; +using ::testing::Return; +using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::UnorderedElementsAre; @@ -53,13 +55,14 @@ class CurveSnapshotAttachmentMockTest : public testing::Test { new CurveSnapshotAttachment(fs_)); } void TearDown() {} + protected: std::shared_ptr fs_; scoped_refptr attachment_; }; TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { - // 返回成功 + // Return successful vector fileNames; fileNames.emplace_back("chunk_1"); fileNames.emplace_back("chunk_1_snap_1"); @@ -69,24 +72,21 @@ TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { vector snapFiles; attachment_->list_attach_files(&snapFiles, kRaftSnapDir); - std::string snapPath1 = - "../../data/chunk_1_snap_1"; - std::string snapPath2 = - "../../data/chunk_2_snap_1"; - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); + std::string snapPath1 = "../../data/chunk_1_snap_1"; + std::string snapPath2 = "../../data/chunk_2_snap_1"; + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); - // 路径结尾添加反斜杠 + // Add a backslash at the end of the path EXPECT_CALL(*fs_, List(kDataDir, _)) .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); attachment_->list_attach_files(&snapFiles, std::string(kRaftSnapDir) + "/"); - EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), - snapPath2.c_str())); - // 返回失败 - EXPECT_CALL(*fs_, List(kDataDir, _)) - .WillRepeatedly(Return(-1)); + EXPECT_THAT(snapFiles, + UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); + // Return failed + EXPECT_CALL(*fs_, List(kDataDir, _)).WillRepeatedly(Return(-1)); ASSERT_DEATH(attachment_->list_attach_files(&snapFiles, kRaftSnapDir), ""); } -} // namespace chunkserver -} // namespace curve +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp index 94bcc4d5a8..66891bc031 100644 --- a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp +++ b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp @@ -21,23 +21,23 @@ */ #include -#include #include +#include #include -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; static constexpr uint32_t kOpRequestAlignSize = 4096; @@ -61,7 +61,7 @@ class RaftSnapFilePoolTest : public testing::Test { Exec(TestCluster::RemoveCopysetDirCmd(peer2).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer3).c_str()); Exec(TestCluster::RemoveCopysetDirCmd(peer4).c_str()); - ::usleep(100*1000); + ::usleep(100 * 1000); } public: @@ -74,26 +74,22 @@ class RaftSnapFilePoolTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void WriteThenReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, + int length, char fillCh, int loop) { brpc::Channel* channel = new brpc::Channel; uint64_t sn = 1; ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); @@ -108,18 +104,16 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); ChunkService_Stub stub(channel); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - if (response.status() == - CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { + if (response.status() == CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED) { std::string redirect = response.redirect(); leaderId.parse(redirect); delete channel; @@ -127,8 +121,7 @@ static void WriteThenReadVerify(PeerId leaderId, ASSERT_EQ(0, channel->Init(leaderId.addr, NULL)); continue; } - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); // read { @@ -140,13 +133,12 @@ static void WriteThenReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -158,22 +150,18 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -static void ReadVerify(PeerId leaderId, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { brpc::Channel channel; uint64_t sn = 1; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); @@ -187,16 +175,14 @@ static void ReadVerify(PeerId leaderId, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -204,18 +190,23 @@ static void ReadVerify(PeerId leaderId, } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了, install snapshot的数据从FilePool中取文件 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown and restart of non-leader nodes in a cluster of 3 nodes, + * and control them to recover from installing snapshots. + * 1. Create a replication group with 3 replicas. + * 2. Wait for the leader to emerge, write data, and then read to verify. + * 3. Shutdown a non-leader node. + * 4. Sleep for a duration longer than a snapshot interval, then write and read + * data. + * 5. Sleep for a duration longer than a snapshot interval again, then write and + * read data. Steps 4 and 5 are to ensure that at least two snapshots are taken. + * Therefore, when the node restarts, it must recover via an install snapshot + * because the log has already been deleted. The data for the install snapshot + * is retrieved from the FilePool. + * 6. Wait for the leader to emerge, then read the previously written data for + * verification. + * 7. Transfer leadership to the shut down peer. + * 8. Verify the data written before the transfer of leadership. + * 9. Write data again, then read it to verify. */ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -238,75 +229,67 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.StartPeer(peer2, false, true, true)); ASSERT_EQ(0, cluster.StartPeer(peer3, false, true, true)); - // 等待FilePool创建成功 + // Waiting for FilePool creation to succeed std::this_thread::sleep_for(std::chrono::seconds(60)); PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 获取三个chunkserver的FilePool的pool容量 - std::shared_ptr fs(LocalFsFactory::CreateFs( - FileSystemType::EXT4, "")); + // Obtain the pool capacity of FilePool for three chunkservers + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); std::vector Peer1ChunkPoolSize; std::vector Peer2ChunkPoolSize; std::vector Peer3ChunkPoolSize; std::string copysetdir1, copysetdir2, copysetdir3; - butil::string_printf(©setdir1, - "./%s-%d-%d", - butil::ip2str(peer1.addr.ip).c_str(), - peer1.addr.port, + butil::string_printf(©setdir1, "./%s-%d-%d", + butil::ip2str(peer1.addr.ip).c_str(), peer1.addr.port, 0); - butil::string_printf(©setdir2, - "./%s-%d-%d", - butil::ip2str(peer2.addr.ip).c_str(), - peer2.addr.port, + butil::string_printf(©setdir2, "./%s-%d-%d", + butil::ip2str(peer2.addr.ip).c_str(), peer2.addr.port, 0); - butil::string_printf(©setdir3, - "./%s-%d-%d", - butil::ip2str(peer3.addr.ip).c_str(), - peer3.addr.port, + butil::string_printf(©setdir3, "./%s-%d-%d", + butil::ip2str(peer3.addr.ip).c_str(), peer3.addr.port, 0); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 目前只有chunk文件才会从FilePool中取 - // raft snapshot meta 和 conf epoch文件直接从文件系统创建 + // Currently, only chunk files are retrieved from FilePool + // raft snapshot meta and conf epoch files are created directly from the + // file system ASSERT_EQ(20, Peer1ChunkPoolSize.size()); ASSERT_EQ(20, Peer2ChunkPoolSize.size()); ASSERT_EQ(20, Peer3ChunkPoolSize.size()); LOG(INFO) << "write 1 start"; - // 发起 read/write, 写数据会触发chunkserver从FilePool取chunk - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Initiate read/write, writing data will trigger chunkserver to fetch + // chunks from FilePool + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 - ::sleep(1*snapshotTimeoutS); + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,ChunkFilePool容量少一个 + // After writing the data, ChunkFilePool has one less capacity ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // shutdown 某个非 leader 的 peer + // shutdown a non leader peer PeerId shutdownPeerid; - if (0 == ::strcmp(leaderId.to_string().c_str(), - peer1.to_string().c_str())) { + if (0 == + ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { shutdownPeerid = peer2; } else { shutdownPeerid = peer1; @@ -317,68 +300,61 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers the snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file, and it will delete the previous snapshot file. The + // deleted file will be reclaimed into the FilePool. So overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but a snapshot is taken and then returned. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop); + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 1, loop); LOG(INFO) << "write 2 end"; - ::sleep(1*snapshotTimeoutS); + ::sleep(1 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 - ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // wait snapshot, to ensure it triggers snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as + // the snapshot file. Then, it will delete the previous snapshot file, and + // the deleted file will be reclaimed into the FilePool. So, overall, this + // snapshot creation will only result in the datastore retrieving a file + // from the FilePool, but it involves taking one snapshot and returning + // another to the FilePool. + ::sleep(1.5 * snapshotTimeoutS); + // Initiate read/write again LOG(INFO) << "write 3 start"; - // 增加chunkid,使chunkserver端的chunk又被取走一个 - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId + 1, - length, - ch + 2, - loop); + // Add a chunkid to remove another chunk from the chunkserver side + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, + ch + 2, loop); LOG(INFO) << "write 3 end"; ::sleep(snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one if (shutdownPeerid == peer1) { ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); @@ -388,22 +364,17 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { } ASSERT_EQ(18, Peer3ChunkPoolSize.size()); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid, false, true, false)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 - ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, - length, ch + 2, loop); + // Read it out and verify it again + ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, ch + 2, + loop); LOG(INFO) << "write 4 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderId, - logicPoolId, - copysetId, - chunkId, - length, - ch + 3, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, + ch + 3, loop); LOG(INFO) << "write 4 end"; @@ -416,10 +387,7 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeerid, + status = TransferLeader(logicPoolId, copysetId, conf, shutdownPeerid, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderId); @@ -433,20 +401,21 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - ::sleep(5*snapshotTimeoutS); + ::sleep(5 * snapshotTimeoutS); Peer1ChunkPoolSize.clear(); Peer2ChunkPoolSize.clear(); Peer3ChunkPoolSize.clear(); - fs->List(copysetdir1+"/chunkfilepool", &Peer1ChunkPoolSize); - fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); - fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); + fs->List(copysetdir1 + "/chunkfilepool", &Peer1ChunkPoolSize); + fs->List(copysetdir2 + "/chunkfilepool", &Peer2ChunkPoolSize); + fs->List(copysetdir3 + "/chunkfilepool", &Peer3ChunkPoolSize); LOG(INFO) << "chunk pool1 size = " << Peer1ChunkPoolSize.size(); LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 当前的raftsnapshot filesystem只存取chunk文件 - // meta文件遵守原有逻辑,直接通过文件系统创建,所以这里只有两个chunk被取出 + // The current raftsnapshot filesystem only accesses chunk files + // The meta file follows the original logic and is created directly through + // the file system, so only two chunks are extracted here ASSERT_EQ(18, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); ASSERT_EQ(18, Peer3ChunkPoolSize.size()); diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index d6f5d9aa97..50f6f46c1d 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -20,20 +20,20 @@ * Author: wudemiao */ -#include -#include -#include #include +#include +#include +#include -#include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/copyset_node.h" #include "src/chunkserver/chunk_service.h" -#include "src/fs/fs_common.h" -#include "src/fs/local_filesystem.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/datastore/file_pool.h" -#include "src/common/uri_parser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" +#include "src/fs/local_filesystem.h" using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::Configuration; @@ -42,6 +42,7 @@ using curve::chunkserver::CopysetNodeManager; using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::FilePool; using curve::chunkserver::FilePoolHelper; +using curve::chunkserver::FilePoolMeta; using curve::chunkserver::FilePoolOptions; using curve::chunkserver::LogicPoolID; using curve::chunkserver::PeerId; @@ -52,9 +53,6 @@ using curve::common::UriParser; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::chunkserver::FilePoolHelper; -using curve::chunkserver::FilePoolMeta; DEFINE_string(ip, "127.0.0.1", "Initial configuration of the replication group"); @@ -73,7 +71,7 @@ DEFINE_bool(create_chunkfilepool, true, "create chunkfile pool"); butil::AtExitManager atExitManager; -void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, +void CreateChunkFilePool(const std::string& dirname, uint64_t chunksize, std::shared_ptr fsptr) { std::string datadir = dirname + "/chunkfilepool"; std::string metapath = dirname + "/chunkfilepool.meta"; @@ -110,7 +108,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); FilePoolMeta meta; - meta.chunkSize = cpopt.fileSize; + meta.chunkSize = cpopt.fileSize; meta.metaPageSize = cpopt.metaFileSize; meta.hasBlockSize = true; meta.blockSize = cpopt.blockSize; @@ -120,7 +118,7 @@ void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, (void)FilePoolHelper::PersistEnCodeMetaInfo(fsptr, meta, metapath); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); /* Generally you only need one Server. */ @@ -142,7 +140,8 @@ int main(int argc, char *argv[]) { std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; - // TODO(yyk) 这部分实现不太优雅,后续进行重构 + // The implementation of TODO(yyk) is not very elegant, and will be + // refactored in the future std::string copysetUri = FLAGS_copyset_dir + "/copysets"; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = FLAGS_ip; diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..2c28a6015c 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); @@ -556,8 +556,7 @@ TEST_F(TrashTest, recycle_copyset_dir_list_err) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -569,8 +568,7 @@ TEST_F(TrashTest, recycle_copyset_dir_ok) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -607,18 +605,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // chunk_200_snap_1, abc +1 // log/ - using item4list = struct{ + using item4list = struct { std::string subdir; std::vector& names; }; std::vector action4List{ - { "", copysets }, - { "/4294967493.55555", dirs}, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, - { "/4294967494.55555", dirs}, - { "/4294967494.55555/data", chunks2 }, - { "/4294967494.55555/log", logfiles2 }, + {"", copysets}, + {"/4294967493.55555", dirs}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, + {"/4294967494.55555", dirs}, + {"/4294967494.55555/data", chunks2}, + {"/4294967494.55555/log", logfiles2}, }; for (auto& it : action4List) { @@ -627,18 +625,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { } EXPECT_CALL(*lfs, DirExists(_)) - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_100 - .WillOnce(Return(false)) // chunk_101 - .WillOnce(Return(true)) // log - .WillOnce(Return(false)) // curve_log_10086_10087 - .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 - .WillOnce(Return(false)) // log_10083_10084 - .WillOnce(Return(false)) // log_inprogress_10085 - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_200_snap_1 - .WillOnce(Return(false)) // abc - .WillOnce(Return(true)); // log + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_100 + .WillOnce(Return(false)) // chunk_101 + .WillOnce(Return(true)) // log + .WillOnce(Return(false)) // curve_log_10086_10087 + .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 + .WillOnce(Return(false)) // log_10083_10084 + .WillOnce(Return(false)) // log_inprogress_10085 + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_200_snap_1 + .WillOnce(Return(false)) // abc + .WillOnce(Return(true)); // log trash->Init(ops); ASSERT_EQ(5, trash->GetChunkNum()); @@ -657,14 +655,14 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, DirExists(_)) .WillOnce(Return(true)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // data .WillOnce(Return(false)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // log + .WillOnce(Return(true)) // log .WillOnce(Return(false)) - .WillOnce(Return(true)) // raft_snapshot - .WillOnce(Return(true)) // temp - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // raft_snapshot + .WillOnce(Return(true)) // temp + .WillOnce(Return(true)) // data .WillOnce(Return(false)); std::string trashedCopysetDir = "/trash_test0/copysets/4294967495"; @@ -695,21 +693,21 @@ TEST_F(TrashTest, test_chunk_num_statistic) { std::vector raftfiles{RAFT_DATA_DIR, RAFT_LOG_DIR}; // DirExists - using item4dirExists = struct{ + using item4dirExists = struct { std::string subdir; bool exist; }; std::vector action4DirExists{ - { "", true }, - { "/4294967493.55555", true }, - { "/4294967493.55555/data", true }, - { "/4294967493.55555/log", true }, - { "/4294967493.55555/data/chunk_100", false }, - { "/4294967493.55555/data/chunk_101", false }, - { "/4294967493.55555/log/curve_log_10086_10087", false }, - { "/4294967493.55555/log/curve_log_inprogress_10088", false }, - { "/4294967493.55555/log/log_10083_10084", false }, - { "/4294967493.55555/log/log_inprogress_10085", false }, + {"", true}, + {"/4294967493.55555", true}, + {"/4294967493.55555/data", true}, + {"/4294967493.55555/log", true}, + {"/4294967493.55555/data/chunk_100", false}, + {"/4294967493.55555/data/chunk_101", false}, + {"/4294967493.55555/log/curve_log_10086_10087", false}, + {"/4294967493.55555/log/curve_log_inprogress_10088", false}, + {"/4294967493.55555/log/log_10083_10084", false}, + {"/4294967493.55555/log/log_inprogress_10085", false}, }; for (auto& it : action4DirExists) { @@ -719,10 +717,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // List std::vector action4List2{ - { "", copysets }, - { "/4294967493.55555", raftfiles }, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, + {"", copysets}, + {"/4294967493.55555", raftfiles}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, }; for (auto& it : action4List2) { @@ -735,16 +733,16 @@ TEST_F(TrashTest, test_chunk_num_statistic) { SetCopysetNeedDelete(trashPath + "/" + copysets[2], notNeedDelete); // RecycleFile - using item4CycleFile = struct{ + using item4CycleFile = struct { std::shared_ptr pool; std::string subdir; int ret; }; std::vector action4CycleFile{ - { pool, "/4294967493.55555/data/chunk_100", 0 }, - { pool, "/4294967493.55555/data/chunk_101", -1 }, - { walPool, "/4294967493.55555/log/curve_log_10086_10087", 0 }, - { walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1 }, + {pool, "/4294967493.55555/data/chunk_100", 0}, + {pool, "/4294967493.55555/data/chunk_101", -1}, + {walPool, "/4294967493.55555/log/curve_log_10086_10087", 0}, + {walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1}, }; for (auto& it : action4CycleFile) { diff --git a/test/client/client_common_unittest.cpp b/test/client/client_common_unittest.cpp index d7601e19f1..6052bf93f1 100644 --- a/test/client/client_common_unittest.cpp +++ b/test/client/client_common_unittest.cpp @@ -20,28 +20,29 @@ * Author: tongguangxun */ -#include - #include "src/client/client_common.h" +#include + namespace curve { namespace client { TEST(ClientCommon, PeerAddrTest) { - // 默认构造函数创建的成员变量内容为空 + // The member variable content created by the default constructor is empty PeerAddr chunkaddr; ASSERT_TRUE(chunkaddr.IsEmpty()); EndPoint ep; str2endpoint("127.0.0.1:8000", &ep); - // 从已有的endpoint创建PeerAddr,变量内容非空 + // Create PeerAddr from an existing endpoint, with non empty variable + // content PeerAddr caddr(ep); ASSERT_FALSE(caddr.IsEmpty()); ASSERT_EQ(caddr.addr_.port, 8000); ASSERT_STREQ("127.0.0.1:8000:0", caddr.ToString().c_str()); - // reset置位后成员变量内容为空 + // After resetting, the member variable content is empty caddr.Reset(); ASSERT_TRUE(caddr.IsEmpty()); @@ -49,7 +50,8 @@ TEST(ClientCommon, PeerAddrTest) { PeerAddr caddr2; ASSERT_TRUE(caddr2.IsEmpty()); - // 从字符串中解析出地址信息,字符串不符合解析格式返回-1,"ip:port:index" + // Resolve address information from the string, if the string does not + // conform to the parsing format, return -1, "ip:port:index" std::string ipaddr1("127.0.0.1"); ASSERT_EQ(-1, caddr2.Parse(ipaddr1)); std::string ipaddr2("127.0.0.q:9000:0"); @@ -61,11 +63,12 @@ TEST(ClientCommon, PeerAddrTest) { std::string ipaddr5("127.0.0.1001:9000:0"); ASSERT_EQ(-1, caddr2.Parse(ipaddr5)); - // 从字符串解析地址成功后,成员变量即为非空 + // After successfully resolving the address from the string, the member + // variable becomes non empty ASSERT_EQ(0, caddr2.Parse(ipaddr)); ASSERT_FALSE(caddr2.IsEmpty()); - // 验证非空成员变量是否为预期值 + // Verify if the non empty member variable is the expected value EndPoint ep1; str2endpoint("127.0.0.1:9000", &ep1); ASSERT_EQ(caddr2.addr_, ep1); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index cfae5506e1..6f7fd3fdf3 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -20,48 +20,47 @@ * Author: tongguangxun */ -#include -#include -#include +#include #include +#include #include #include -#include -#include +#include +#include +#include +#include +#include //NOLINT #include #include //NOLINT -#include //NOLINT #include -#include +#include "absl/memory/memory.h" +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" +#include "test/client/mock/mock_namespace_service.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" -#include "test/client/mock/mock_namespace_service.h" - -#include "absl/memory/memory.h" uint32_t chunk_size = 4 * 1024 * 1024; uint32_t segment_size = 1 * 1024 * 1024 * 1024; -std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT -std::string configpath = // NOLINT - "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT +std::string configpath = // NOLINT + "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT -extern curve::client::FileClient *globalclient; +extern curve::client::FileClient* globalclient; namespace curve { namespace client { @@ -96,10 +95,10 @@ class MDSClientTest : public ::testing::Test { ASSERT_TRUE(false) << "Fail to add service"; } - curve::mds::topology::GetChunkServerInfoResponse *response = + curve::mds::topology::GetChunkServerInfoResponse* response = new curve::mds::topology::GetChunkServerInfoResponse(); response->set_statuscode(0); - curve::mds::topology::ChunkServerInfo *serverinfo = + curve::mds::topology::ChunkServerInfo* serverinfo = new curve::mds::topology::ChunkServerInfo(); serverinfo->set_chunkserverid(888); serverinfo->set_disktype("nvme"); @@ -113,8 +112,8 @@ class MDSClientTest : public ::testing::Test { serverinfo->set_diskcapacity(11111); serverinfo->set_diskused(1111); response->set_allocated_chunkserverinfo(serverinfo); - FakeReturn *getcsret = - new FakeReturn(nullptr, static_cast(response)); // NOLINT + FakeReturn* getcsret = + new FakeReturn(nullptr, static_cast(response)); // NOLINT topologyservice.SetGetChunkserverFakeReturn(getcsret); brpc::ServerOptions options; @@ -150,8 +149,8 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -163,19 +162,18 @@ TEST_F(MDSClientTest, Createfile) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Create(filename.c_str(), userinfo, len)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -197,8 +195,8 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); @@ -215,20 +213,18 @@ TEST_F(MDSClientTest, MkDir) { ::curve::mds::CreateFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateFileFakeReturn(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Mkdir(dirpath.c_str(), userinfo)); - - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -250,8 +246,8 @@ TEST_F(MDSClientTest, Closefile) { ::curve::mds::CloseFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCloseFile(fakeret); LOG(INFO) << "now create file!"; @@ -259,25 +255,23 @@ TEST_F(MDSClientTest, Closefile) { mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::NOTEXIST); - // file close ok ::curve::mds::CloseFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloseFile(fakeret1); LOG(INFO) << "now create file!"; ret = mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::OK); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloseFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -296,8 +290,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::OpenFileResponse openresponse; openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(fakeret); FInfo finfo; @@ -308,7 +302,7 @@ TEST_F(MDSClientTest, Openfile) { // has protosession no fileinfo ::curve::mds::OpenFileResponse openresponse1; - ::curve::mds::ProtoSession *se = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); se->set_leasetime(10000000); @@ -317,8 +311,8 @@ TEST_F(MDSClientTest, Openfile) { openresponse1.set_statuscode(::curve::mds::StatusCode::kOK); openresponse1.set_allocated_protosession(se); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&openresponse1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&openresponse1)); curvefsservice.SetOpenFile(fakeret1); ASSERT_EQ(globalclient->Open(filename, userinfo), -LIBCURVE_ERROR::FAILED); @@ -326,13 +320,13 @@ TEST_F(MDSClientTest, Openfile) { // has protosession and finfo ::curve::mds::OpenFileResponse openresponse2; - ::curve::mds::ProtoSession *se2 = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* se2 = new ::curve::mds::ProtoSession; se2->set_sessionid("1"); se2->set_createtime(12345); se2->set_leasetime(10000000); se2->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *fin = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* fin = new ::curve::mds::FileInfo; fin->set_filename("_filename_"); fin->set_id(1); fin->set_parentid(0); @@ -347,21 +341,21 @@ TEST_F(MDSClientTest, Openfile) { openresponse2.set_allocated_protosession(se2); openresponse2.set_allocated_fileinfo(fin); - FakeReturn *fakeret2 = - new FakeReturn(nullptr, static_cast(&openresponse2)); + FakeReturn* fakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret2); ASSERT_EQ(globalclient->Open(filename, userinfo), LIBCURVE_ERROR::OK); ASSERT_EQ(LIBCURVE_ERROR::OK, Write(0, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(0, nullptr, 0, 0)); - ::curve::mds::ProtoSession *socupied = new ::curve::mds::ProtoSession; + ::curve::mds::ProtoSession* socupied = new ::curve::mds::ProtoSession; socupied->set_sessionid("1"); socupied->set_createtime(12345); socupied->set_leasetime(10000000); socupied->set_sessionstatus(::curve::mds::SessionStatus::kSessionOK); - ::curve::mds::FileInfo *focupied = new ::curve::mds::FileInfo; + ::curve::mds::FileInfo* focupied = new ::curve::mds::FileInfo; focupied->set_filename("_filename_"); focupied->set_id(1); focupied->set_parentid(0); @@ -381,14 +375,14 @@ TEST_F(MDSClientTest, Openfile) { refreshresponse.set_statuscode(::curve::mds::StatusCode::kOK); refreshresponse.set_sessionid("2"); - FakeReturn *r = - new FakeReturn(nullptr, static_cast(&responseOccupied)); + FakeReturn* r = + new FakeReturn(nullptr, static_cast(&responseOccupied)); curvefsservice.SetOpenFile(r); - FakeReturn *refreshret = - new FakeReturn(nullptr, static_cast(&refreshresponse)); + FakeReturn* refreshret = + new FakeReturn(nullptr, static_cast(&refreshresponse)); curvefsservice.SetRefreshSession(refreshret, []() {}); - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename("_filename_"); info->set_id(1); @@ -402,8 +396,8 @@ TEST_F(MDSClientTest, Openfile) { getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); curvefsservice.SetGetFileInfoFakeReturn(fakegetinfo); int fd = globalclient->Open(filename, userinfo); @@ -411,12 +405,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, Write(fd, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(fd, nullptr, 0, 0)); - // 测试关闭文件 + // Test closing file ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecloseret = - new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* fakecloseret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(fakecloseret); globalclient->Close(fd); @@ -426,12 +420,12 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, AioWrite(fd, &aioctx)); ASSERT_EQ(LIBCURVE_ERROR::OK, AioRead(fd, &aioctx)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret3 = - new FakeReturn(&cntl, static_cast(&openresponse2)); + FakeReturn* fakeret3 = + new FakeReturn(&cntl, static_cast(&openresponse2)); curvefsservice.SetOpenFile(fakeret3); curvefsservice.CleanRetryTimes(); @@ -441,8 +435,8 @@ TEST_F(MDSClientTest, Openfile) { ::curve::mds::CloseFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kSessionNotExist); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloseFile(fakeret4); globalclient->Close(0); @@ -458,8 +452,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetRenameFile(fakeret); @@ -475,8 +469,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetRenameFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -486,8 +480,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetRenameFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -497,8 +491,8 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetRenameFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -508,19 +502,18 @@ TEST_F(MDSClientTest, Renamefile) { ::curve::mds::RenameFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetRenameFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rename(userinfo, filename1, filename2)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetRenameFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -543,8 +536,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetExtendFile(fakeret); @@ -560,8 +553,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetExtendFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -571,8 +564,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetExtendFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -582,8 +575,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetExtendFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -593,8 +586,8 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetExtendFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, @@ -604,19 +597,18 @@ TEST_F(MDSClientTest, Extendfile) { ::curve::mds::ExtendFileResponse response5; response5.set_statuscode(::curve::mds::StatusCode::kShrinkBiggerFile); - FakeReturn *fakeret6 = - new FakeReturn(nullptr, static_cast(&response5)); + FakeReturn* fakeret6 = + new FakeReturn(nullptr, static_cast(&response5)); curvefsservice.SetExtendFile(fakeret6); ASSERT_EQ(-1 * LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE, globalclient->Extend(filename1, userinfo, newsize)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetExtendFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -640,8 +632,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -652,8 +644,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Unlink(filename1, userinfo)); @@ -662,8 +654,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -673,8 +665,8 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -684,26 +676,25 @@ TEST_F(MDSClientTest, Deletefile) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Unlink(filename1, userinfo)); - // 设置delete force + // Set delete force fiu_init(0); fiu_enable("test/client/fake/fakeMDS/forceDeleteFile", 1, nullptr, 0); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOT_SUPPORT, globalclient->Unlink(filename1, userinfo, true)); fiu_disable("test/client/fake/fakeMDS/forceDeleteFile"); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -727,8 +718,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret); @@ -744,8 +735,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetDeleteFile(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Rmdir(filename1, userinfo)); @@ -754,8 +745,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetDeleteFile(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -765,8 +756,8 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetDeleteFile(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -776,19 +767,18 @@ TEST_F(MDSClientTest, Rmdir) { ::curve::mds::DeleteFileResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetDeleteFile(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rmdir(filename1, userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetDeleteFile(fakeret2); curvefsservice.CleanRetryTimes(); @@ -802,7 +792,7 @@ TEST_F(MDSClientTest, Rmdir) { TEST_F(MDSClientTest, StatFile) { std::string filename = "/1_userinfo_"; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; info->set_filename("_filename_"); info->set_id(1); @@ -816,11 +806,11 @@ TEST_F(MDSClientTest, StatFile) { response.set_allocated_fileinfo(info); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); - curve::client::FInfo_t *finfo = new curve::client::FInfo_t; + curve::client::FInfo_t* finfo = new curve::client::FInfo_t; FileStatInfo fstat; globalclient->StatFile(filename, userinfo, &fstat); @@ -831,12 +821,11 @@ TEST_F(MDSClientTest, StatFile) { ASSERT_EQ(fstat.ctime, 12345678); ASSERT_EQ(fstat.length, 4 * 1024 * 1024 * 1024ul); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); @@ -872,7 +861,7 @@ TEST_F(MDSClientTest, GetFileInfo) { response.set_statuscode(::curve::mds::StatusCode::kOK); auto fakeret = absl::make_unique( - nullptr, static_cast(&response)); + nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret.get()); curve::client::FileEpoch_t fEpoch; @@ -890,19 +879,19 @@ TEST_F(MDSClientTest, GetFileInfo) { ASSERT_EQ(finfo->segmentsize, 1 * 1024 * 1024 * 1024ul); ASSERT_EQ(finfo->blocksize, hasBlockSize ? blocksize : 4096); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - auto fakeret2 = absl::make_unique( - &cntl, static_cast(&response)); + auto fakeret2 = + absl::make_unique(&cntl, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret2.get()); curvefsservice.CleanRetryTimes(); - ASSERT_EQ(LIBCURVE_ERROR::FAILED, - mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), - &fEpoch)); + ASSERT_EQ( + LIBCURVE_ERROR::FAILED, + mdsclient_.GetFileInfo(filename, userinfo, finfo.get(), &fEpoch)); } } @@ -940,7 +929,7 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { // checkTimer(10000, 11000); curve::mds::GetOrAllocateSegmentResponse response; - curve::mds::PageFileSegment *pfs = new curve::mds::PageFileSegment; + curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(pfs); response.mutable_pagefilesegment()->set_logicalpoolid(1234); @@ -953,8 +942,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { chunk->set_copysetid(i); chunk->set_chunkid(i); } - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(fakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse response_1; @@ -971,8 +960,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { cslocs->set_port(5000 + j); } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); curve::client::MetaCache mc; @@ -1035,8 +1024,8 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { GetChunkServerListInCopySetsResponse response_2; response_2.set_statuscode(-1); - FakeReturn *faktopologyeret_2 = - new FakeReturn(nullptr, static_cast(&response_2)); + FakeReturn* faktopologyeret_2 = + new FakeReturn(nullptr, static_cast(&response_2)); topologyservice.SetFakeReturn(faktopologyeret_2); uint32_t csid; @@ -1097,8 +1086,8 @@ TEST_F(MDSClientTest, GetServerList) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; for (int j = 0; j < 256; j++) { csinfo = response_1.add_csinfo(); csinfo->set_copysetid(j); @@ -1111,8 +1100,8 @@ TEST_F(MDSClientTest, GetServerList) { } } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); std::vector cpidvec; @@ -1222,12 +1211,12 @@ TEST_F(MDSClientTest, GetLeaderTest) { mc.UpdateCopysetInfo(1234, 1234, cslist); - // 测试复制组里第三个addr为leader + // The third addr in the test replication group is the leader curve::chunkserver::GetLeaderResponse2 response1; - curve::common::Peer *peer1 = new curve::common::Peer(); + curve::common::Peer* peer1 = new curve::common::Peer(); peer1->set_address(peerinfo_3.internalAddr.ToString()); response1.set_allocated_leader(peer1); - FakeReturn fakeret1(nullptr, static_cast(&response1)); + FakeReturn fakeret1(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); cliservice3.SetFakeReturn(&fakeret1); @@ -1245,12 +1234,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { butil::str2endpoint("127.0.0.1", 29122, &expected); EXPECT_EQ(expected, leaderep); - // 测试拉取新leader失败,需要到mds重新fetch新的serverlist - // 当前新leader是3,尝试再刷新leader,这个时候会从1, 2获取leader - // 但是这时候leader找不到了,于是就会触发向mds重新拉取最新的server list + // The test failed to retrieve the new leader, and a new serverlist needs to + // be retrieved from the mds The current new leader is 3. Try refreshing the + // leader again, and at this time, the leader will be obtained from 1 and 2 + // But at this point, the leader cannot be found, so it will trigger a new + // pull of the latest server list from the mds brpc::Controller controller11; controller11.SetFailed(-1, "error"); - FakeReturn fakeret111(&controller11, static_cast(&response1)); + FakeReturn fakeret111(&controller11, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret111); cliservice2.SetFakeReturn(&fakeret111); cliservice3.SetFakeReturn(&fakeret111); @@ -1259,8 +1250,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation *cslocs; - ::curve::mds::topology::CopySetServerInfo *csinfo; + ::curve::common::ChunkServerLocation* cslocs; + ::curve::mds::topology::CopySetServerInfo* csinfo; csinfo = response_1.add_csinfo(); csinfo->set_copysetid(1234); for (int i = 0; i < 4; i++) { @@ -1271,28 +1262,31 @@ TEST_F(MDSClientTest, GetLeaderTest) { cslocs->set_port(29120 + i); } - FakeReturn *faktopologyeret = - new FakeReturn(nullptr, static_cast(&response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(&response_1)); topologyservice.SetFakeReturn(faktopologyeret); cliservice1.CleanInvokeTimes(); cliservice2.CleanInvokeTimes(); cliservice3.CleanInvokeTimes(); - // 向当前集群中拉取leader,然后会从mds一侧获取新server list + // Pull the leader from the current cluster, and then obtain a new server + // list from the mds side EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, true)); - // getleader请求会跳过当前leader + // The getleader request will skip the current leader EXPECT_EQ(0, cliservice3.GetInvokeTimes()); - // 因为从mds获取新的copyset信息了,所以其leader信息被重置了,需要重新获取新leader - // 获取新新的leader,这时候会从1,2,3,4这四个server拉取新leader,并成功获取新leader + // Because the new copyset information was obtained from the mds, its leader + // information has been reset and a new leader needs to be obtained Obtain a + // new leader, which will be pulled from servers 1, 2, 3, and 4 and + // successfully obtain the new leader std::string leader = "10.182.26.2:29123:0"; peer1 = new curve::common::Peer(); peer1->set_address(leader); peer1->set_id(4321); response1.set_allocated_leader(peer1); - fakeret1 = FakeReturn(nullptr, static_cast(&response1)); + fakeret1 = FakeReturn(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret1); cliservice2.SetFakeReturn(&fakeret1); @@ -1309,7 +1303,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { cliservice3.CleanInvokeTimes(); cliservice4.CleanInvokeTimes(); - // refresh为false,所以只会从metacache中获取,不会发起rpc请求 + // Refresh is false, so it will only be obtained from the metacache and will + // not initiate rpc requests EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, false)); EXPECT_EQ(expected, leaderep); EXPECT_EQ(0, cliservice1.GetInvokeTimes()); @@ -1317,13 +1312,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { EXPECT_EQ(0, cliservice3.GetInvokeTimes()); EXPECT_EQ(0, cliservice4.GetInvokeTimes()); - // 测试新增一个leader,该节点不在配置组内, 然后通过向mds - // 查询其chunkserverInfo之后, 将其成功插入metacache - curve::common::Peer *peer7 = new curve::common::Peer(); + // Add a new leader to the test, which is not in the configuration group, + // and then add it to the mds After querying its chunkserverInfo, + // successfully insert it into the metacache + curve::common::Peer* peer7 = new curve::common::Peer(); leader = "10.182.26.2:29124:0"; peer7->set_address(leader); response1.set_allocated_leader(peer7); - FakeReturn fakeret44(nullptr, static_cast(&response1)); + FakeReturn fakeret44(nullptr, static_cast(&response1)); cliservice1.SetFakeReturn(&fakeret44); cliservice2.SetFakeReturn(&fakeret44); cliservice3.SetFakeReturn(&fakeret44); @@ -1355,19 +1351,18 @@ TEST_F(MDSClientTest, GetLeaderTest) { LOG(INFO) << "GetLeaderTest stopped"; } - TEST_F(MDSClientTest, GetFileInfoException) { std::string filename = "/1_userinfo_"; - FakeReturn *fakeret = nullptr; - curve::client::FInfo_t *finfo = nullptr; + FakeReturn* fakeret = nullptr; + curve::client::FInfo_t* finfo = nullptr; FileEpoch_t fEpoch; { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1376,7 +1371,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { } { - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); info->clear_parentid(); @@ -1389,7 +1384,7 @@ TEST_F(MDSClientTest, GetFileInfoException) { info->clear_segmentsize(); response.set_allocated_fileinfo(info); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); finfo = new curve::client::FInfo_t; @@ -1405,17 +1400,17 @@ TEST_F(MDSClientTest, CreateCloneFile) { std::string filename = "/1_userinfo_"; FInfo finfo; - curve::mds::FileInfo *info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::CreateCloneFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCreateCloneFile(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1424,12 +1419,12 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 认证失败 + // Authentication failed curve::mds::CreateCloneFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCreateCloneFile(fakecreateclone1); @@ -1437,14 +1432,14 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 请求成功 + // Request successful info->set_id(5); curve::mds::CreateCloneFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); response2.set_allocated_fileinfo(info); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCreateCloneFile(fakecreateclone2); @@ -1463,15 +1458,15 @@ TEST_F(MDSClientTest, CreateCloneFile) { TEST_F(MDSClientTest, CompleteCloneMeta) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1479,23 +1474,23 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1506,15 +1501,15 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { TEST_F(MDSClientTest, CompleteCloneFile) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); curve::mds::SetCloneFileStatusResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakecreateclone = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakecreateclone = + new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetCloneFileStatus(fakecreateclone); curvefsservice.CleanRetryTimes(); @@ -1522,23 +1517,23 @@ TEST_F(MDSClientTest, CompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakecreateclone1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakecreateclone1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetCloneFileStatus(fakecreateclone1); ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakecreateclone2 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakecreateclone2 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetCloneFileStatus(fakecreateclone2); @@ -1556,8 +1551,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret); @@ -1568,8 +1563,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetChangeOwner(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -1579,8 +1574,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetChangeOwner(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1590,8 +1585,8 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetChangeOwner(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1601,19 +1596,18 @@ TEST_F(MDSClientTest, ChangeOwner) { ::curve::mds::ChangeOwnerResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetChangeOwner(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->ChangeOwner(filename1, "newowner", userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetChangeOwner(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1634,7 +1628,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_CntlFailed) { cntl.SetFailed(-1, "Failed"); std::unique_ptr fakeret( - new FakeReturn(&cntl, static_cast(&response))); + new FakeReturn(&cntl, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); auto startTime = curve::common::TimeUtility::GetTimeofDayMs(); @@ -1652,7 +1646,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseError) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); ASSERT_EQ(LIBCURVE_ERROR::FAILED, @@ -1680,7 +1674,7 @@ TEST_F(MDSClientTest, ListChunkServerTest_ResponseOK) { std::string ip = "127.0.0.1:6666"; std::unique_ptr fakeret( - new FakeReturn(nullptr, static_cast(&response))); + new FakeReturn(nullptr, static_cast(&response))); topologyservice.SetFakeReturn(fakeret.get()); std::vector returnIds; @@ -1697,8 +1691,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetListDir(fakeret); @@ -1724,16 +1718,16 @@ TEST_F(MDSClientTest, ListDir) { fin->set_owner("test"); } - FakeReturn *fakeret1 = - new FakeReturn(nullptr, static_cast(&response1)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response1)); curvefsservice.SetListDir(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Listdir(filename1, userinfo, &filestatVec)); C_UserInfo_t cuserinfo; memcpy(cuserinfo.owner, "test", 5); - FileStatInfo *filestat = new FileStatInfo[5]; - DirInfo_t *dir = OpenDir(filename1.c_str(), &cuserinfo); + FileStatInfo* filestat = new FileStatInfo[5]; + DirInfo_t* dir = OpenDir(filename1.c_str(), &cuserinfo); ASSERT_NE(dir, nullptr); ASSERT_EQ(-LIBCURVE_ERROR::FAILED, Listdir(nullptr)); ASSERT_EQ(LIBCURVE_ERROR::OK, Listdir(dir)); @@ -1767,8 +1761,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kDirNotExist); - FakeReturn *fakeret3 = - new FakeReturn(nullptr, static_cast(&response2)); + FakeReturn* fakeret3 = + new FakeReturn(nullptr, static_cast(&response2)); curvefsservice.SetListDir(fakeret3); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOTEXIST, @@ -1778,8 +1772,8 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response3; response3.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn *fakeret4 = - new FakeReturn(nullptr, static_cast(&response3)); + FakeReturn* fakeret4 = + new FakeReturn(nullptr, static_cast(&response3)); curvefsservice.SetListDir(fakeret4); ASSERT_EQ(-1 * LIBCURVE_ERROR::AUTHFAIL, @@ -1789,19 +1783,18 @@ TEST_F(MDSClientTest, ListDir) { ::curve::mds::ListDirResponse response4; response4.set_statuscode(::curve::mds::StatusCode::kStorageError); - FakeReturn *fakeret5 = - new FakeReturn(nullptr, static_cast(&response4)); + FakeReturn* fakeret5 = + new FakeReturn(nullptr, static_cast(&response4)); curvefsservice.SetListDir(fakeret5); ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Listdir(filename1, userinfo, &filestatVec)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); - FakeReturn *fakeret2 = - new FakeReturn(&cntl, static_cast(&response)); + FakeReturn* fakeret2 = new FakeReturn(&cntl, static_cast(&response)); curvefsservice.SetListDir(fakeret2); curvefsservice.CleanRetryTimes(); @@ -1816,7 +1809,7 @@ TEST_F(MDSClientTest, ListDir) { TEST(LibcurveInterface, InvokeWithOutInit) { CurveAioContext aioctx; UserInfo_t userinfo; - C_UserInfo_t *ui = nullptr; + C_UserInfo_t* ui = nullptr; FileClient fc; ASSERT_EQ(-LIBCURVE_ERROR::FAILED, fc.Create("", userinfo, 0)); @@ -1859,10 +1852,10 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { using GetLeaderResponse2 = curve::chunkserver::GetLeaderResponse2; void SetUp() override { - // 添加service,并启动server + // Add a service and start the server for (int i = 0; i < kChunkServerNum; ++i) { - auto &chunkserver = chunkServers[i]; - auto &fakeCliService = fakeCliServices[i]; + auto& chunkserver = chunkServers[i]; + auto& fakeCliService = fakeCliServices[i]; ASSERT_EQ(0, chunkserver.AddService( &fakeCliService, brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add service"; @@ -1870,7 +1863,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { brpc::ServerOptions options; options.idle_timeout_sec = -1; - const auto &ipPort = + const auto& ipPort = "127.0.0.1:" + std::to_string(chunkserverPorts[i]); ASSERT_EQ(0, chunkserver.Start(ipPort.c_str(), &options)) << "Fail to start server"; @@ -1886,7 +1879,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { externalAddrs[i] = PeerAddr(endpoint); } - // 设置copyset peer信息 + // Set copyset peer information for (int i = 0; i < kChunkServerNum; ++i) { curve::client::CopysetPeerInfo peerinfo; peerinfo.peerID = i + 1; @@ -1900,7 +1893,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void ResetAllFakeCliService() { - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.CleanInvokeTimes(); cliService.ClearDelay(); cliService.ClearErrorCode(); @@ -1909,7 +1902,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { int GetAllInvokeTimes() { int total = 0; - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { total += cliService.GetInvokeTimes(); } @@ -1917,29 +1910,29 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } void TearDown() override { - for (auto &server : chunkServers) { + for (auto& server : chunkServers) { server.Stop(0); server.Join(); } } - GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr &addr) { + GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr& addr) { GetLeaderResponse2 response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(addr.ToString()); response.set_allocated_leader(peer); return response; } - void SetGetLeaderResponse(const curve::client::PeerAddr &addr) { + void SetGetLeaderResponse(const curve::client::PeerAddr& addr) { static GetLeaderResponse2 response; response = MakeResponse(addr); static FakeReturn fakeret(nullptr, nullptr); - fakeret = FakeReturn(nullptr, static_cast(&response)); + fakeret = FakeReturn(nullptr, static_cast(&response)); - for (auto &cliService : fakeCliServices) { + for (auto& cliService : fakeCliServices) { cliService.SetFakeReturn(&fakeret); } @@ -1971,16 +1964,16 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { }; TEST_F(ServiceHelperGetLeaderTest, NormalTest) { - // 测试复制组里第一个chunkserver为leader + // Test the first chunkserver in the replication group as the leader GetLeaderResponse2 response = MakeResponse(internalAddrs[0]); - FakeReturn fakeret0(nullptr, static_cast(&response)); + FakeReturn fakeret0(nullptr, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret0); - FakeReturn fakeret1(nullptr, static_cast(&response)); + FakeReturn fakeret1(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - FakeReturn fakeret2(nullptr, static_cast(&response)); + FakeReturn fakeret2(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); GetLeaderRpcOption rpcOption; @@ -1993,14 +1986,15 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 + // Test pulling a new leader for the second time, skip the first chunkserver + // directly, and search for the second and third two int32_t currentLeaderIndex = 0; curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2012,15 +2006,16 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第三次获取leader,会跳过第二个chunkserver,重试1/3 + // Testing for the third time obtaining the leader will skip the second + // chunkserver and retry 1/3 currentLeaderIndex = 1; currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); - fakeret1 = FakeReturn(nullptr, static_cast(&response)); + fakeret1 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret1); - fakeret2 = FakeReturn(nullptr, static_cast(&response)); + fakeret2 = FakeReturn(nullptr, static_cast(&response)); fakeCliServices[2].SetFakeReturn(&fakeret2); getLeaderInfo = GetLeaderInfo(kLogicPoolId, kCopysetId, copysetPeerInfos, @@ -2034,13 +2029,14 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { } TEST_F(ServiceHelperGetLeaderTest, RpcDelayTest) { - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[2]; + const auto& currentLeader = internalAddrs[2]; SetGetLeaderResponse(currentLeader); - // 再次GetLeader会向chunkserver 1/2 发送请求 - // 在chunksever GetLeader service 中加入sleep,触发backup request + // GetLeader will send a request to chunkserver 1/2 again + // Add a sleep in the chunksever GetLeader service to trigger a backup + // request fakeCliServices[0].SetDelayMs(200); fakeCliServices[1].SetDelayMs(200); @@ -2063,25 +2059,26 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader,GetLeader会向chunkserver 1/2发送请求 + // Set the third chunkserver as the leader, and GetLeader will send a + // request to chunkserver 1/2 const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 设置第一个chunkserver GetLeader service 延迟 + // Set the delay for the first chunkserver GetLeader service fakeCliServices[0].SetDelayMs(200); - // 设置第二个chunkserver 返回对应的错误码 + // Set the second chunkserver to return the corresponding error code for (auto errCode : exceptionErrCodes) { fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[1].SetFakeReturn(&fakeret); GetLeaderRpcOption rpcOption; @@ -2095,7 +2092,7 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(1)); ASSERT_EQ(currentLeader, leaderAddr); - for (auto &cliservice : fakeCliServices) { + for (auto& cliservice : fakeCliServices) { cliservice.CleanInvokeTimes(); } } @@ -2105,25 +2102,25 @@ TEST_F(ServiceHelperGetLeaderTest, AllChunkServerExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader + // Set the third chunkserver as the leader const auto currentLeaderIndex = 2; - const auto ¤tLeader = internalAddrs[currentLeaderIndex]; + const auto& currentLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 另外两个chunkserver都返回对应的错误码 + // The other two chunkservers both return corresponding error codes for (auto errCode : exceptionErrCodes) { fakeCliServices[0].SetErrorCode(errCode); fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; controller.SetFailed(errCode, "Failed"); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(currentLeader.ToString()); GetLeaderResponse2 response; response.set_allocated_leader(peer); - FakeReturn fakeret(&controller, static_cast(&response)); + FakeReturn fakeret(&controller, static_cast(&response)); fakeCliServices[0].SetFakeReturn(&fakeret); fakeCliServices[1].SetFakeReturn(&fakeret); @@ -2178,8 +2175,8 @@ TEST_F(MDSClientTest, StatFileStatusTest) { response.set_allocated_fileinfo(info.release()); response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetGetFileInfoFakeReturn(fakeret); std::unique_ptr finfo( @@ -2208,7 +2205,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { brpc::Controller cntl; cntl.SetFailed(-1, "rpc failed"); - FakeReturn *fakeRet = new FakeReturn(&cntl, nullptr); + FakeReturn* fakeRet = new FakeReturn(&cntl, nullptr); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); uint64_t startMs = curve::common::TimeUtility::GetTimeofDayMs(); @@ -2222,7 +2219,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kOK); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2233,7 +2230,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_EQ(LIBCURVE_ERROR::OK, @@ -2251,7 +2248,7 @@ TEST_F(MDSClientTest, DeAllocateSegmentTest) { for (auto err : errorCodes) { curve::mds::DeAllocateSegmentResponse response; response.set_statuscode(err); - FakeReturn *fakeRet = new FakeReturn(nullptr, &response); + FakeReturn* fakeRet = new FakeReturn(nullptr, &response); curvefsservice.SetDeAllocateSegmentFakeReturn(fakeRet); ASSERT_NE(LIBCURVE_ERROR::OK, @@ -2272,10 +2269,10 @@ using ::testing::SaveArgPointee; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; -static void MockRefreshSession(::google::protobuf::RpcController *controller, - const curve::mds::ReFreshSessionRequest *request, - curve::mds::ReFreshSessionResponse *response, - ::google::protobuf::Closure *done) { +static void MockRefreshSession(::google::protobuf::RpcController* controller, + const curve::mds::ReFreshSessionRequest* request, + curve::mds::ReFreshSessionResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard guard(done); response->set_statuscode(curve::mds::StatusCode::kOK); @@ -2317,7 +2314,7 @@ TEST_F(MDSClientRefreshSessionTest, StartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2344,7 +2341,7 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { curve::mds::ReFreshSessionRequest request; curve::mds::ReFreshSessionResponse response; - curve::mds::FileInfo *fileInfo = new curve::mds::FileInfo(); + curve::mds::FileInfo* fileInfo = new curve::mds::FileInfo(); response.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) .WillOnce(DoAll(SaveArgPointee<1>(&request), SetArgPointee<2>(response), @@ -2376,7 +2373,7 @@ const std::vector clientConf{ std::string("throttle.enable=true"), }; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index 2f092fc79f..4072bd60f4 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -20,37 +20,38 @@ * Author: tongguangxun */ -#include +#include "src/client/client_metric.h" + #include #include +#include -#include // NOLINT -#include // NOLINT -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -#include "proto/nameserver2.pb.h" #include "include/client/libcurve.h" -#include "src/client/client_metric.h" -#include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" #include "src/client/client_config.h" +#include "src/client/file_instance.h" +#include "src/client/libcurve_file.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" DECLARE_string(chunkserver_list); -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT namespace curve { namespace client { -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9150"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -64,7 +65,7 @@ const std::vector clientConf { }; TEST(MetricTest, ChunkServer_MetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -72,25 +73,26 @@ TEST(MetricTest, ChunkServer_MetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT std::string configpath("./test/client/configs/client_metric.conf"); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +149,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -165,8 +167,8 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(-2, ret); - - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + // 4 correct reads and writes, 4 timeout reads and writes, timeout will + // cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -204,7 +206,7 @@ void cb(CurveAioContext* ctx) { } // namespace TEST(MetricTest, SlowRequestMetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -212,16 +214,17 @@ TEST(MetricTest, SlowRequestMetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -267,13 +270,13 @@ TEST(MetricTest, SlowRequestMetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -383,5 +386,5 @@ TEST(MetricTest, MetricHelperTest) { ASSERT_NO_THROW(MetricHelper::IncremSlowRequestNum(nullptr)); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..680d80ce93 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -20,30 +20,29 @@ * Author: tongguangxun */ -#include -#include +#include +#include #include #include -#include +#include +#include +#include #include #include -#include -#include - -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "src/client/client_config.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/file_instance.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/fakeMDS.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -72,7 +71,7 @@ void sessioncallback(CurveAioContext* aioctx) { TEST(ClientSession, LeaseTaskTest) { FLAGS_chunkserver_list = - "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; + "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; std::string filename = "/1"; @@ -80,7 +79,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); @@ -104,7 +103,7 @@ TEST(ClientSession, LeaseTaskTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -117,8 +116,8 @@ TEST(ClientSession, LeaseTaskTest) { openresponse.set_allocated_protosession(se); openresponse.set_allocated_fileinfo(finfo); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice->SetOpenFile(openfakeret); // 2. set refresh response @@ -129,7 +128,7 @@ TEST(ClientSession, LeaseTaskTest) { std::unique_lock lk(mtx); refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename(filename); info->set_seqnum(2); info->set_id(1); @@ -143,8 +142,8 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); refreshresp.set_sessionid("1234"); refreshresp.set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice->SetRefreshSession(refreshfakeret, refresht); // 3. open the file @@ -253,10 +252,9 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_allocated_fileinfo(newFileInfo); refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* refreshFakeRetWithNewInodeId = new FakeReturn( - nullptr, static_cast(&refreshresp)); - curvefsservice->SetRefreshSession( - refreshFakeRetWithNewInodeId, refresht); + FakeReturn* refreshFakeRetWithNewInodeId = + new FakeReturn(nullptr, static_cast(&refreshresp)); + curvefsservice->SetRefreshSession(refreshFakeRetWithNewInodeId, refresht); { std::unique_lock lk(mtx); @@ -302,8 +300,8 @@ TEST(ClientSession, LeaseTaskTest) { // 11. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice->SetCloseFile(closefileret); LOG(INFO) << "uninit fileinstance"; @@ -321,12 +319,12 @@ TEST(ClientSession, LeaseTaskTest) { } // namespace client } // namespace curve -std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9101,127.0.0.1:9102"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -337,18 +335,17 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.maxRetryMS=5000") -}; + std::string("mds.maxRetryMS=5000")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..4ef1c6487c 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -20,10 +20,11 @@ * Author: wuhanqing */ -#include -#include -#include #include +#include +#include +#include + #include #include "src/client/unstable_helper.h" @@ -48,50 +49,51 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + // First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - cs.first, cs.second)); + helper.GetCurrentUnstableState(cs.first, cs.second)); } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + // Add another timeout to each chunkserver + // The first two are in the chunkserver unstable state, and the third is in + // the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + // Continue to increase the number of timeouts + // In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + // The first timeout of a new chunkserver can be directly set to chunkserver + // unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -99,22 +101,22 @@ TEST(UnstableHelperTest, normal_test) { helper.IncreTimeout(chunkserver4.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver4.first, chunkserver4.second)); + helper.GetCurrentUnstableState(chunkserver4.first, + chunkserver4.second)); - // 其他ip的chunkserver + // Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } } // namespace client diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..442af59c6f 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -20,23 +20,23 @@ * Author: tongguangxun */ -#include +#include #include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "include/client/libcurve.h" #include "src/client/client_common.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" #include "src/client/iomanager4chunk.h" +#include "src/client/libcurve_file.h" #include "src/client/libcurve_snapshot.h" +#include "test/client/fake/fakeMDS.h" extern std::string mdsMetaServerAddr; extern std::string configpath; @@ -70,8 +70,8 @@ class CurveClientUserAuthFail : public ::testing::Test { ASSERT_EQ(0, server.Join()); } - brpc::Server server; - MetaServerOption metaopt; + brpc::Server server; + MetaServerOption metaopt; FakeMDSCurveFSService curvefsservice; FakeMDSTopologyService topologyservice; }; @@ -102,7 +102,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -115,16 +115,16 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { openresponse.mutable_fileinfo()->set_seqnum(2); openresponse.mutable_fileinfo()->set_filename(filename); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret); // 1. create a File authfailed ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); size_t len = 4 * 1024 * 1024ul; @@ -138,7 +138,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { LOG(INFO) << "get refresh session request!"; refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::ReFreshSessionResponse refreshresp; refreshresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); refreshresp.set_sessionid("1234"); @@ -147,12 +147,13 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { refreshresp.mutable_fileinfo()->set_filename(filename); refreshresp.mutable_fileinfo()->set_id(1); refreshresp.mutable_fileinfo()->set_parentid(0); - refreshresp.mutable_fileinfo()->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + refreshresp.mutable_fileinfo()->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT refreshresp.mutable_fileinfo()->set_chunksize(4 * 1024 * 1024); refreshresp.mutable_fileinfo()->set_length(4 * 1024 * 1024 * 1024ul); refreshresp.mutable_fileinfo()->set_ctime(12345678); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, refresht); // 3. open the file auth failed @@ -161,47 +162,47 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // 4. open file success openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* openfakeret2 - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret2); openret = fileinstance.Open(); ASSERT_EQ(openret, LIBCURVE_ERROR::OK); -/* - // 5. wait for refresh - for (int i = 0; i < 4; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + /* + // 5. wait for refresh + for (int i = 0; i < 4; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; - aioctx.ret = LIBCURVE_ERROR::OK; - aioctx.cb = sessioncallback; - aioctx.buf = nullptr; - - fileinstance.AioRead(&aioctx); - fileinstance.AioWrite(&aioctx); - - for (int i = 0; i < 1; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + CurveAioContext aioctx; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.ret = LIBCURVE_ERROR::OK; + aioctx.cb = sessioncallback; + aioctx.buf = nullptr; + + fileinstance.AioRead(&aioctx); + fileinstance.AioWrite(&aioctx); + + for (int i = 0; i < 1; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - char buffer[10]; - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); -*/ + char buffer[10]; + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); + */ // 6. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(closefileret); ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, fileinstance.Close()); @@ -235,12 +236,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -255,54 +255,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - emptyuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, emptyuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16 * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - emptyuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, emptyuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -311,7 +308,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -319,20 +317,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - emptyuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, emptyuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); + cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; @@ -341,7 +338,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; @@ -359,7 +356,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ASSERT_TRUE(!cl.Init(opt)); UserInfo_t rootuserinfo; - rootuserinfo.owner ="root"; + rootuserinfo.owner = "root"; rootuserinfo.password = "123"; std::string filename = "./1_usertest_.img"; @@ -370,12 +367,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -390,54 +386,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - rootuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, rootuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16ull*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16ull * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - rootuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, rootuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -446,7 +439,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -454,21 +448,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - rootuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, rootuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, rootuserinfo, - &seqvec, &fivec)); + cl.ListSnapShot(filename, rootuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index b71383ec9d..759cb6fc3c 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -20,3995 +20,4052 @@ * Author: wudemiao */ -#include +#include "src/client/copyset_client.h" + #include +#include #include -#include #include +#include -#include //NOLINT -#include // NOLINT +#include // NOLINT +#include //NOLINT -#include "src/client/copyset_client.h" -#include "test/client/mock/mock_meta_cache.h" -#include "src/common/concurrent/count_down_event.h" -#include "test/client/mock/mock_chunkservice.h" -#include "test/client/mock/mock_request_context.h" #include "src/client/chunk_closure.h" +#include "src/client/metacache.h" +#include "src/client/request_closure.h" +#include "src/common/concurrent/count_down_event.h" #include "src/common/timeutility.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" +#include "test/client/mock/mock_request_context.h" #include "test/client/mock/mock_request_scheduler.h" -#include "src/client/request_closure.h" -#include "src/client/metacache.h" -namespace curve { -namespace client { - -using curve::chunkserver::CHUNK_OP_STATUS; -using curve::chunkserver::ChunkRequest; - -using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; -using ::testing::AnyNumber; -using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArgReferee; -using ::testing::InSequence; -using ::testing::AtLeast; -using ::testing::SaveArgPointee; -using curve::client::MetaCache; -using curve::common::TimeUtility; - -class CopysetClientTest : public testing::Test { - protected: - virtual void SetUp() { - listenAddr_ = "127.0.0.1:9109"; - server_ = new brpc::Server(); - } - - virtual void TearDown() { - server_->Stop(0); - server_->Join(); - delete server_; - server_ = nullptr; - } - - public: - std::string listenAddr_; - brpc::Server *server_; -}; - -/* TODO(wudemiao) 当前 controller 错误不能通过 mock 返回 */ -int gWriteCntlFailedCode = 0; -int gReadCntlFailedCode = 0; - -static void WriteChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { - /* return response */ - brpc::ClosureGuard doneGuard(done); - if (0 != gWriteCntlFailedCode) { - if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) { - std::this_thread::sleep_for(std::chrono::milliseconds(3500)); - } - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); - } -} - -static void ReadChunkFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) { - std::this_thread::sleep_for(std::chrono::milliseconds(4000)); +namespace curve +{ + namespace client + { + + using curve::chunkserver::CHUNK_OP_STATUS; + using curve::chunkserver::ChunkRequest; + + using curve::client::MetaCache; + using curve::common::TimeUtility; + using ::testing::_; + using ::testing::AnyNumber; + using ::testing::AtLeast; + using ::testing::DoAll; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Return; + using ::testing::SaveArgPointee; + using ::testing::SetArgPointee; + using ::testing::SetArgReferee; + + class CopysetClientTest : public testing::Test + { + protected: + virtual void SetUp() + { + listenAddr_ = "127.0.0.1:9109"; + server_ = new brpc::Server(); + } + + virtual void TearDown() + { + server_->Stop(0); + server_->Join(); + delete server_; + server_ = nullptr; + } + + public: + std::string listenAddr_; + brpc::Server *server_; + }; + + /* TODO(wudemiao) current controller error cannot be returned through mock */ + int gWriteCntlFailedCode = 0; + int gReadCntlFailedCode = 0; + + static void WriteChunkFunc(::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + /* return response */ + brpc::ClosureGuard doneGuard(done); + if (0 != gWriteCntlFailedCode) + { + if (gWriteCntlFailedCode == brpc::ERPCTIMEDOUT) + { + std::this_thread::sleep_for(std::chrono::milliseconds(3500)); + } + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(gWriteCntlFailedCode, "write controller error"); + } } - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(gReadCntlFailedCode, "read controller error"); - } -} - -static void ReadChunkSnapshotFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, //NOLINT - google::protobuf::Closure *done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "read snapshot controller error"); - } -} - -static void DeleteChunkSnapshotFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "delete snapshot controller error"); - } -} - -static void CreateCloneChunkFunc( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - google::protobuf::Closure* done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller* cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "create clone chunk controller error"); - } -} - -static void RecoverChunkFunc(::google::protobuf::RpcController *controller, //NOLINT - const ::curve::chunkserver::ChunkRequest *request, //NOLINT + + static void ReadChunkFunc(::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "recover chunk controller error"); - } -} - -static void GetChunkInfoFunc(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, //NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, //NOLINT - google::protobuf::Closure *done) { - brpc::ClosureGuard doneGuard(done); - if (0 != gReadCntlFailedCode) { - brpc::Controller *cntl = dynamic_cast(controller); - cntl->SetFailed(-1, "get chunk info controller error"); - } -} - -TEST_F(CopysetClientTest, normal_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - // write success - for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - - reqCtx->offset_ = i * 8; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - // read success - for (int i = 0; i < 10; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = i * 8; - reqCtx->rawlength_ = len; - reqCtx->subIoIndex_ = 0; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = offset; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * write error testing - */ -TEST_F(CopysetClientTest, write_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 10000); - gWriteCntlFailedCode = 0; - } - /* controller set timeout */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试超时时间为5000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 3000); - ASSERT_LT(end - start, 6000); - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gWriteCntlFailedCode = 0; - } - - /* controller set timeout */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试睡眠时间为5000,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*5000 - // 但是增加指数退避之后,睡眠间隔将增加到10000 + 20000 = 30000 - // 加上随机因子,三次重试时间应该大于29000, 且小于50000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 28000); - ASSERT_LT(end - start, 2 * 50000); - gWriteCntlFailedCode = 0; - } - - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - - ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); - } - /* 不是 leader,没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - FileMetric fm("test"); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - auto elpased = curve::common::TimeUtility::GetTimeofDayUs() - - startTimeUs; - // chunkserverOPRetryIntervalUS = 5000 - // 每次redirect睡眠500us,共重试2次(chunkserverOPMaxRetry=3,判断时大于等于就返回,所以共只重试了两次) - // 所以总共耗费时间大于1000us - ASSERT_GE(elpased, 1000); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(WriteChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - // epoch too old - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - reqDone->GetErrorCode()); - } - - scheduler.Fini(); -} - -/** - * write failed testing - */ -TEST_F(CopysetClientTest, write_failed_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(buff1) - 1); - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* controller set timeout */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要超时49*500 - // 但是增加指数退避之后,超时时间将增加到49*1000 = 49000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - reqCtx->done_ = reqDone; - gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(50)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) - .WillRepeatedly(Invoke(WriteChunkFunc)); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 25000); - ASSERT_LT(end - start, 55000); - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gWriteCntlFailedCode = 0; - } + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + if (gReadCntlFailedCode == brpc::ERPCTIMEDOUT) + { + std::this_thread::sleep_for(std::chrono::milliseconds(4000)); + } + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(gReadCntlFailedCode, "read controller error"); + } + } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000us - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000... ~= 4650000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(WriteChunkFunc))); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 250000); - ASSERT_LT(end - start, 4650000); - gWriteCntlFailedCode = 0; - } - scheduler.Fini(); -} - - -/** - * read failed testing - */ -TEST_F(CopysetClientTest, read_failed_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - /* controller set timeout */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要50*500 - // 但是增加指数退避之后,超时时间将增加到500 + 1000 + 2000... ~= 60000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(50)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 25000); - ASSERT_LT(end - start, 60000); - - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gReadCntlFailedCode = 0; - } - - /* 设置 overload */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000 - // 但是增加指数退避之后,睡眠间隔将增加到 - // 10000 + 20000 + 40000 ... = 4650000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(50).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(50) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 250000); - ASSERT_LT(end - start, 4650000); - } - scheduler.Fini(); -} - -/** - * read error testing - */ -TEST_F(CopysetClientTest, read_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - size_t len = 8; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - iot.PrepareReadIOBuffers(1); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* chunk not exist */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(0, reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 1000); - gReadCntlFailedCode = 0; - } - - /* controller set timeout */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - // 配置文件设置的超时时间为1000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 - uint64_t start = TimeUtility::GetTimeofDayMs(); - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = brpc::ERPCTIMEDOUT; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(3)) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(ReadChunkFunc)); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(end - start, 3000); - ASSERT_LT(end - start, 6000); - - std::this_thread::sleep_for(std::chrono::seconds(8)); - - gReadCntlFailedCode = 0; - } - - /* 设置 overload */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - // 配置文件设置的重试睡眠时间为500,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*500 - // 但是增加指数退避之后,睡眠间隔将增加到1000 + 2000 = 3000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 - uint64_t start = TimeUtility::GetTimeofDayUs(); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, - reqDone->GetErrorCode()); - - uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 2900); - ASSERT_LT(end - start, 3 * 5000); - } - - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); -// response1.set_redirect(leaderStr2); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->subIoIndex_ = 0; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkFunc))); - copysetClient.ReadChunk(reqCtx->idinfo_, sn, - offset, len, {}, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - scheduler.Fini(); -} - -/** - * read snapshot error testing - */ -TEST_F(CopysetClientTest, read_snapshot_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - size_t len = 8; - int sn = 1; - char buff1[8 + 1]; - char buff2[8 + 1]; - memset(buff1, 'a', 8); - memset(buff2, 'a', 8); - buff1[8] = '\0'; - buff2[8] = '\0'; - off_t offset = 0; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* chunk snapshot not exist */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::READ_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(ReadChunkSnapshotFunc))); - copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, - sn, offset, len, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * delete snapshot error testing - */ -TEST_F(CopysetClientTest, delete_snapshot_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(ReadChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr);; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(DeleteChunkSnapshotFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(DeleteChunkSnapshotFunc))); - copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, - sn, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * create clone chunk error testing - */ -TEST_F(CopysetClientTest, create_s3_clone_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::CREATE_CLONE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->seq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT - .WillRepeatedly(Invoke(CreateCloneChunkFunc)); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - // /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* op success */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(3) // NOLINT - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(2) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(CreateCloneChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)).Times(1) // NOLINT - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(CreateCloneChunkFunc))); - copysetClient.CreateCloneChunk(reqCtx->idinfo_, - "destination", sn, 1, 1024, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - - -/** - * recover chunk error testing - */ -TEST_F(CopysetClientTest, recover_chunk_error_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t sn = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(RecoverChunkFunc)); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - - reqCtx->done_ = reqDone; - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(RecoverChunkFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::DELETE_SNAP; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - reqCtx->correctedSeq_ = sn; - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - ChunkResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(Return(-1)) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(RecoverChunkFunc))); - copysetClient.RecoverChunk(reqCtx->idinfo_, - 0, 4096, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -/** - * get chunk info error testing - */ -TEST_F(CopysetClientTest, get_chunk_info_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ(server_->AddService(&mockChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - RequestScheduler scheduler; - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - - ChunkServerID leaderId = 10000; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); - - /* 非法参数 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, - reqDone->GetErrorCode()); - } - /* controller error */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - gReadCntlFailedCode = -1; - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(Invoke(GetChunkInfoFunc)); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_NE(0, reqDone->GetErrorCode()); - gReadCntlFailedCode = 0; - } - /* 其他错误 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3).WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, - reqDone->GetErrorCode()); - } - /* 不是 leader,返回正确的 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - /* 不是 leader,但返回的是错误 leader */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(3) - .WillRepeatedly(Return(0)); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 依然失败 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - reqDone->GetErrorCode()); - } - /* copyset 不存在,更新 leader 成功 */ - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); - response1.set_redirect(leaderStr); - GetChunkInfoResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response2.set_redirect(leaderStr); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)).Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(2) - .WillOnce(DoAll(SetArgPointee<2>(response1), - Invoke(GetChunkInfoFunc))) - .WillOnce(DoAll(SetArgPointee<2>(response2), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - { - RequestContext *reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::GET_CHUNK_INFO; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - - - curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - GetChunkInfoResponse response; - response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)). - Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(-1))) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), - Return(0))); - EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)).Times(1) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke(GetChunkInfoFunc))); - copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); - cond.Wait(); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } -} - -namespace { - -bool gWriteSuccessFlag = false; - -void WriteCallBack(CurveAioContext* aioctx) { - gWriteSuccessFlag = true; - delete aioctx; -} - -void PrepareOpenFile(FakeCurveFSService *service, - OpenFileResponse *openresp, - FakeReturn *fakeReturn) { - openresp->set_statuscode(curve::mds::StatusCode::kOK); - auto *session = openresp->mutable_protosession(); - session->set_sessionid("xxx"); - session->set_leasetime(10000); - session->set_createtime(10000); - session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); - auto *fileinfo = openresp->mutable_fileinfo(); - fileinfo->set_id(1); - fileinfo->set_filename("filename"); - fileinfo->set_parentid(0); - fileinfo->set_length(10ULL * 1024 * 1024 * 1024); - fileinfo->set_blocksize(4096); - - *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); - - service->SetOpenFile(fakeReturn); -} - -} // namespace - -TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { - const std::string endpoint = "127.0.0.1:9102"; - - ClientConfig cc; - const std::string& configPath = "./conf/client.conf"; - cc.Init(configPath.c_str()); - FileInstance fileinstance; - UserInfo userinfo; - userinfo.owner = "userinfo"; - - std::shared_ptr mdsclient = std::make_shared(); - - // set mds addr - auto mdsopts = cc.GetFileServiceOption().metaServerOpt; - mdsopts.rpcRetryOpt.addrs.clear(); - mdsopts.rpcRetryOpt.addrs.push_back(endpoint); - - ASSERT_EQ(LIBCURVE_ERROR::OK, mdsclient->Initialize(mdsopts)); - ASSERT_TRUE(fileinstance.Initialize( - "/test", mdsclient, userinfo, OpenFlags{}, cc.GetFileServiceOption())); - - // create fake chunkserver service - FakeChunkServerService fakechunkservice; - // 设置cli服务 - CliServiceFake fakeCliservice; - - FakeCurveFSService curvefsService; - OpenFileResponse openresp; - FakeReturn fakeReturn; - - PrepareOpenFile(&curvefsService, &openresp, &fakeReturn); - - brpc::Server server; - ASSERT_EQ(0, server.AddService(&fakechunkservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakechunkservice"; - ASSERT_EQ(0, server.AddService(&fakeCliservice, - brpc::SERVER_DOESNT_OWN_SERVICE)) << "Fail to add fakecliservice"; - ASSERT_EQ( - 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) - << "Fail to add curvefsService"; - - ASSERT_EQ(0, server.Start(endpoint.c_str(), nullptr)) - << "Fail to start server at " << endpoint; - - // fill metacache - curve::client::MetaCache* mc = - fileinstance.GetIOManager4File()->GetMetaCache(); - curve::client::ChunkIDInfo_t chunkinfo(1, 2, 3); - mc->UpdateChunkInfoByIndex(0, chunkinfo); - curve::client::CopysetInfo cpinfo; - curve::client::EndPoint ep; - butil::str2endpoint("127.0.0.1", 9102, &ep); - - braft::PeerId pd(ep); - curve::client::PeerAddr addr = curve::client::PeerAddr(ep); - curve::client::CopysetPeerInfo peer(1, addr, addr); - cpinfo.csinfos_.push_back(peer); - mc->UpdateCopysetInfo(2, 3, cpinfo); - - fakeCliservice.SetPeerID(pd); - - curve::chunkserver::ChunkResponse response; - response.set_status( - curve::chunkserver::CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - response.set_appliedindex(0); - FakeReturn writeFakeRet(nullptr, static_cast(&response)); - fakechunkservice.SetFakeWriteReturn(&writeFakeRet); - - const int kNewFileSn = 100; - const int kOldFileSn = 30; - - ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); - - // 设置文件版本号 - fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - - // 发送写请求,并等待sec秒后检查io是否返回 - auto startWriteAndCheckResult = [&fileinstance](int sec)-> bool { // NOLINT - CurveAioContext* aioctx = new CurveAioContext(); - char buffer[4096]; - - aioctx->buf = buffer; - aioctx->offset = 0; - aioctx->length = sizeof(buffer); - aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; - aioctx->cb = WriteCallBack; - - // 下发写请求 - fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); - - std::this_thread::sleep_for(std::chrono::seconds(sec)); - return gWriteSuccessFlag; - }; - - // 第一次写成功,并更新chunkserver端的文件版本号 - ASSERT_TRUE(startWriteAndCheckResult(3)); - - // 设置一个旧的版本号去写 - fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); - gWriteSuccessFlag = false; - - // chunkserver返回backward,重新获取版本号后还是旧的版本 - // IO hang - ASSERT_FALSE(startWriteAndCheckResult(3)); - - // 更新版本号为正常状态 - fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - std::this_thread::sleep_for(std::chrono::seconds(1)); - - // 上次写请求成功 - ASSERT_EQ(true, gWriteSuccessFlag); - - server.Stop(0); - server.Join(); -} - -TEST_F(CopysetClientTest, retry_rpc_sleep_test) { - MockChunkServiceImpl mockChunkService; - ASSERT_EQ( - server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), - 0); - ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); - - const uint64_t sleepUsBeforeRetry = 5 * 1000 * 1000; - - IOSenderOption ioSenderOpt; - ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; - ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = - sleepUsBeforeRetry; - ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; - ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; - - RequestScheduleOption reqopt; - reqopt.ioSenderOpt = ioSenderOpt; - - CopysetClient copysetClient; - MockMetaCache mockMetaCache; - mockMetaCache.DelegateToFake(); - - RequestScheduler scheduler; - scheduler.Init(reqopt, &mockMetaCache); - scheduler.Run(); - copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); - - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - ChunkID chunkId = 1; - uint64_t fileId = 1; - uint64_t epoch = 1; - size_t len = 8; - char buff1[8] = {0}; - butil::IOBuf iobuf; - iobuf.append(buff1, sizeof(len)); - off_t offset = 0; - - ChunkServerID leaderId = 10000; - ChunkServerID leaderId2 = 10001; - butil::EndPoint leaderAddr; - std::string leaderStr = "127.0.0.1:9109"; - butil::str2endpoint(leaderStr.c_str(), &leaderAddr); - - FileMetric fm("test"); - IOTracker iot(nullptr, nullptr, nullptr, &fm); + static void ReadChunkSnapshotFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, // NOLINT + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "read snapshot controller error"); + } + } - { - // redirect情况下, chunkserver返回新的leader - // 重试之前不会睡眠 - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // 返回新的leader id,所以重试之前不会进行睡眠 - ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void DeleteChunkSnapshotFunc( + ::google::protobuf::RpcController *controller, // NOLINT + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "delete snapshot controller error"); + } + } - { - // redirect情况下,chunkserver返回旧leader - // 重试之前会睡眠 - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - response1.set_redirect(leaderStr); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // 返回同样的leader id,重试之前会进行睡眠 - ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void CreateCloneChunkFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::ChunkRequest *request, + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "create clone chunk controller error"); + } + } - { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到新leader - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - // 返回新的leader id,所以重试之前不会进行睡眠 - ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } + static void RecoverChunkFunc( + ::google::protobuf::RpcController *controller, // NOLINT + const ::curve::chunkserver::ChunkRequest *request, // NOLINT + ::curve::chunkserver::ChunkResponse *response, + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "recover chunk controller error"); + } + } - { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到旧leader - RequestContext* reqCtx = new FakeRequestContext(); - reqCtx->optype_ = OpType::WRITE; - reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - // reqCtx->writeBuffer_ = buff1; - reqCtx->writeData_ = iobuf; - reqCtx->offset_ = 0; - reqCtx->rawlength_ = len; - - curve::common::CountDownEvent cond(1); - RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); - reqDone->SetFileMetric(&fm); - reqDone->SetIOTracker(&iot); - reqCtx->done_ = reqDone; - - ChunkResponse response1; - response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); - - ChunkResponse response2; - response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); - - EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), - SetArgPointee<3>(leaderAddr), Return(0))); - EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); - EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) - .Times(2) - .WillOnce( - DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) - .WillOnce( - DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); - auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, - iobuf, offset, len, {}, - reqDone); - cond.Wait(); - auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - - ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - reqDone->GetErrorCode()); - } - scheduler.Fini(); -} - -class TestRunnedRequestClosure : public RequestClosure { - public: - TestRunnedRequestClosure() : RequestClosure(nullptr) {} - - void Run() override { - runned_ = true; - } - - bool IsRunned() const { - return runned_; - } - - private: - bool runned_ = false; -}; - -// 测试session失效后,重试请求会被重新放入请求队列 -TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { - MockRequestScheduler requestScheduler; - CopysetClient copysetClient; - IOSenderOption ioSenderOption; - MetaCache metaCache; - - ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, - &requestScheduler, nullptr)); - - // 设置session not valid - copysetClient.StartRecycleRetryRPC(); + static void GetChunkInfoFunc( + ::google::protobuf::RpcController *controller, + const ::curve::chunkserver::GetChunkInfoRequest *request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse *response, // NOLINT + google::protobuf::Closure *done) + { + brpc::ClosureGuard doneGuard(done); + if (0 != gReadCntlFailedCode) + { + brpc::Controller *cntl = dynamic_cast(controller); + cntl->SetFailed(-1, "get chunk info controller error"); + } + } - { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + TEST_F(CopysetClientTest, normal_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + // write success + for (int i = 0; i < 10; ++i) + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + + reqCtx->offset_ = i * 8; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + // read success + for (int i = 0; i < 10; ++i) + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = i * 8; + reqCtx->rawlength_ = len; + reqCtx->subIoIndex_ = 0; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = offset; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } - TestRunnedRequestClosure closure; - copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); - ASSERT_FALSE(closure.IsRunned()); - } + /** + * write error testing + */ + TEST_F(CopysetClientTest, write_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries + uint64_t start = TimeUtility::GetTimeofDayUs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 10000); + gWriteCntlFailedCode = 0; + } + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry timeout set by the configuration file is 5000 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying three times is normal, only 3 * + // 1000 sleep is required But after increasing the index backoff, the + // timeout will increase to 1000+2000+2000=5000 Adding random factors, + // the three retry times should be greater than 7000 and less than 8000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 3000); + ASSERT_LT(end - start, 6000); + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gWriteCntlFailedCode = 0; + } + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000 because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying three times is normal, only 3 * 5000 sleep is + // required But after increasing the index retreat, the sleep interval + // will increase to 10000+20000=30000 Adding random factors, the three + // retry times should be greater than 29000 and less than 50000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 28000); + ASSERT_LT(end - start, 2 * 50000); + gWriteCntlFailedCode = 0; + } + + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + + ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); + } + /* Not a leader, did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, did not return a leader, refreshing the meta cache failed + */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + FileMetric fm("test"); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto elpased = + curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; + // chunkserverOPRetryIntervalUS = 5000 + // redirect sleep for 500us each time and retry a total of 2 times + // (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, + // so only two retries were made) So the total time spent is greater + // than 1000us + ASSERT_GE(elpased, 1000); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + // epoch too old + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, + reqDone->GetErrorCode()); + } + + scheduler.Fini(); + } - { - EXPECT_CALL(requestScheduler, ReSchedule(_)) - .Times(1); + /** + * write failed testing + */ + TEST_F(CopysetClientTest, write_failed_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // a timeout of 49 * 500 But after increasing the index backoff, the + // timeout will increase to 49 * 1000=49000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + reqCtx->done_ = reqDone; + gWriteCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(50)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly(Invoke(WriteChunkFunc)); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 25000); + ASSERT_LT(end - start, 55000); + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gWriteCntlFailedCode = 0; + } + + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times normally only requires 49 * 5000us of sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000... ~= 4650000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 250000); + ASSERT_LT(end - start, 4650000); + gWriteCntlFailedCode = 0; + } + scheduler.Fini(); + } + + /** + * read failed testing + */ + TEST_F(CopysetClientTest, read_failed_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 500; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 50; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 5000; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 100000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry timeout set by the configuration file is 500 because the + // chunkserver setting returns timeout Causing the triggering of an + // exponential backoff of the underlying timeout time, increasing the + // interval between each retry. Retrying 50 times normally only requires + // 50 * 500 But after increasing the index retreat, the timeout will + // increase to 500+1000+2000...~=60000 + uint64_t start = TimeUtility::GetTimeofDayMs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(50)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 25000); + ASSERT_LT(end - start, 60000); + + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gReadCntlFailedCode = 0; + } + + /* Set overload */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 5000us because + // the chunkserver setting returns timeout Causing triggering of + // low-level exponential backoff, increasing the interval between each + // retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep + // But after increasing the index of retreat, the sleep interval will + // increase to 10000 + 20000 + 40000 ... = 4650000 Adding random + // factors, the three retry times should be greater than 2900 and less + // than 5000 + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(50) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(50) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 250000); + ASSERT_LT(end - start, 4650000); + } + scheduler.Fini(); + } + + /** + * read error testing + */ + TEST_F(CopysetClientTest, read_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + size_t len = 8; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* chunk not exist */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(0, reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The retry sleep time set in the configuration file is 5000, as there + // is no triggering of underlying index backoff, so there will be no + // sleep between retries + uint64_t start = TimeUtility::GetTimeofDayUs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 1000); + gReadCntlFailedCode = 0; + } + + /* controller set timeout */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + // The timeout configured in the settings file is 1000, but due to chunk + // server timeout, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 1000. However, with the added + // exponential backoff, the timeout intervals will increase to 1000 + + // 2000 + 2000 = 5000. Considering the random factor, the total time for + // three retries should be greater than 7000 and less than 8000. + uint64_t start = TimeUtility::GetTimeofDayMs(); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = brpc::ERPCTIMEDOUT; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(3)) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkFunc)); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(end - start, 3000); + ASSERT_LT(end - start, 6000); + + std::this_thread::sleep_for(std::chrono::seconds(8)); + + gReadCntlFailedCode = 0; + } + + /* Set overload */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + // The retry sleep time set in the configuration file is 500, but due to + // chunkserver timeouts, it triggers exponential backoff, increasing the + // interval between retries. In normal conditions, three retries would + // only require a sleep time of 3 * 500. However, with the added + // exponential backoff, the sleep intervals will increase to 1000 + 2000 + // = 3000. Considering the random factor, the total time for three + // retries should be greater than 2900 and less than 5000. + uint64_t start = TimeUtility::GetTimeofDayUs(); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, + reqDone->GetErrorCode()); + + uint64_t end = TimeUtility::GetTimeofDayUs(); + ASSERT_GT(end - start, 2900); + ASSERT_LT(end - start, 3 * 5000); + } + + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + // response1.set_redirect(leaderStr2); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->subIoIndex_ = 0; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunk(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), Invoke(ReadChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(ReadChunkFunc))); + copysetClient.ReadChunk(reqCtx->idinfo_, sn, offset, len, {}, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + scheduler.Fini(); + } + + /** + * read snapshot error testing + */ + TEST_F(CopysetClientTest, read_snapshot_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + size_t len = 8; + int sn = 1; + char buff1[8 + 1]; + char buff2[8 + 1]; + memset(buff1, 'a', 8); + memset(buff2, 'a', 8); + buff1[8] = '\0'; + buff2[8] = '\0'; + off_t offset = 0; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* chunk snapshot not exist */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(ReadChunkSnapshotFunc)); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::READ_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, ReadChunkSnapshot(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(ReadChunkSnapshotFunc))); + copysetClient.ReadChunkSnapshot(reqCtx->idinfo_, sn, offset, len, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * delete snapshot error testing + */ + TEST_F(CopysetClientTest, delete_snapshot_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(Invoke(DeleteChunkSnapshotFunc)); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(ReadChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + ; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(2) + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(DeleteChunkSnapshotFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, + DeleteChunkSnapshotOrCorrectSn(_, _, _, _)) // NOLINT + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(DeleteChunkSnapshotFunc))); + copysetClient.DeleteChunkSnapshotOrCorrectSn(reqCtx->idinfo_, sn, + reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * create clone chunk error testing + */ + TEST_F(CopysetClientTest, create_s3_clone_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::CREATE_CLONE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->seq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(Invoke(CreateCloneChunkFunc)); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + // /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* op success */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(3) // NOLINT + .WillRepeatedly(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(2) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response1), + Invoke(CreateCloneChunkFunc))) + .WillOnce(DoAll(SetArgPointee<2>(response2), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, CreateCloneChunk(_, _, _, _)) + .Times(1) // NOLINT + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(CreateCloneChunkFunc))); + copysetClient.CreateCloneChunk(reqCtx->idinfo_, "destination", sn, 1, + 1024, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } - TestRunnedRequestClosure closure; - copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); - ASSERT_FALSE(closure.IsRunned()); - } -} + /** + * recover chunk error testing + */ + TEST_F(CopysetClientTest, recover_chunk_error_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t sn = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(RecoverChunkFunc)); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + + reqCtx->done_ = reqDone; + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(RecoverChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::DELETE_SNAP; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + reqCtx->correctedSeq_ = sn; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + ChunkResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(Return(-1)) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, RecoverChunk(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(RecoverChunkFunc))); + copysetClient.RecoverChunk(reqCtx->idinfo_, 0, 4096, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + /** + * get chunk info error testing + */ + TEST_F(CopysetClientTest, get_chunk_info_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 5000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + RequestScheduler scheduler; + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + + ChunkServerID leaderId = 10000; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + /* Illegal parameter */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, + reqDone->GetErrorCode()); + } + /* controller error */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + gReadCntlFailedCode = -1; + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(GetChunkInfoFunc)); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_NE(0, reqDone->GetErrorCode()); + gReadCntlFailedCode = 0; + } + /* Other errors */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, + reqDone->GetErrorCode()); + } + /* Not a leader, returning the correct leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but did not return a leader, refreshing the meta cache + * failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(Return(-1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + /* Not a leader, but returned an incorrect leader */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(3) + .WillRepeatedly(Return(0)); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader still failed */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(6) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, + reqDone->GetErrorCode()); + } + /* copyset does not exist, updating leader succeeded */ + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); + response1.set_redirect(leaderStr); + GetChunkInfoResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response2.set_redirect(leaderStr); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(GetChunkInfoFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + { + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::GET_CHUNK_INFO; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + GetChunkInfoResponse response; + response.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(AtLeast(1)) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(-1))) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockChunkService, GetChunkInfo(_, _, _, _)) + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(response), Invoke(GetChunkInfoFunc))); + copysetClient.GetChunkInfo(reqCtx->idinfo_, reqDone); + cond.Wait(); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + } + + namespace + { + + bool gWriteSuccessFlag = false; + + void WriteCallBack(CurveAioContext *aioctx) + { + gWriteSuccessFlag = true; + delete aioctx; + } + + void PrepareOpenFile(FakeCurveFSService *service, OpenFileResponse *openresp, + FakeReturn *fakeReturn) + { + openresp->set_statuscode(curve::mds::StatusCode::kOK); + auto *session = openresp->mutable_protosession(); + session->set_sessionid("xxx"); + session->set_leasetime(10000); + session->set_createtime(10000); + session->set_sessionstatus(curve::mds::SessionStatus::kSessionOK); + auto *fileinfo = openresp->mutable_fileinfo(); + fileinfo->set_id(1); + fileinfo->set_filename("filename"); + fileinfo->set_parentid(0); + fileinfo->set_length(10ULL * 1024 * 1024 * 1024); + fileinfo->set_blocksize(4096); + + *fakeReturn = FakeReturn(nullptr, static_cast(openresp)); + + service->SetOpenFile(fakeReturn); + } + + } // namespace + + TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) + { + const std::string endpoint = "127.0.0.1:9102"; + + ClientConfig cc; + const std::string &configPath = "./conf/client.conf"; + cc.Init(configPath.c_str()); + FileInstance fileinstance; + UserInfo userinfo; + userinfo.owner = "userinfo"; + + std::shared_ptr mdsclient = std::make_shared(); + + // set mds addr + auto mdsopts = cc.GetFileServiceOption().metaServerOpt; + mdsopts.rpcRetryOpt.addrs.clear(); + mdsopts.rpcRetryOpt.addrs.push_back(endpoint); + + ASSERT_EQ(LIBCURVE_ERROR::OK, mdsclient->Initialize(mdsopts)); + ASSERT_TRUE(fileinstance.Initialize( + "/test", mdsclient, userinfo, OpenFlags{}, cc.GetFileServiceOption())); + + // create fake chunkserver service + FakeChunkServerService fakechunkservice; + // Set up cli service + CliServiceFake fakeCliservice; + + FakeCurveFSService curvefsService; + OpenFileResponse openresp; + FakeReturn fakeReturn; + + PrepareOpenFile(&curvefsService, &openresp, &fakeReturn); + + brpc::Server server; + ASSERT_EQ(0, server.AddService(&fakechunkservice, + brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakechunkservice"; + ASSERT_EQ( + 0, server.AddService(&fakeCliservice, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add fakecliservice"; + ASSERT_EQ( + 0, server.AddService(&curvefsService, brpc::SERVER_DOESNT_OWN_SERVICE)) + << "Fail to add curvefsService"; + + ASSERT_EQ(0, server.Start(endpoint.c_str(), nullptr)) + << "Fail to start server at " << endpoint; + + // fill metacache + curve::client::MetaCache *mc = + fileinstance.GetIOManager4File()->GetMetaCache(); + curve::client::ChunkIDInfo_t chunkinfo(1, 2, 3); + mc->UpdateChunkInfoByIndex(0, chunkinfo); + curve::client::CopysetInfo cpinfo; + curve::client::EndPoint ep; + butil::str2endpoint("127.0.0.1", 9102, &ep); + + braft::PeerId pd(ep); + curve::client::PeerAddr addr = curve::client::PeerAddr(ep); + curve::client::CopysetPeerInfo peer(1, addr, addr); + cpinfo.csinfos_.push_back(peer); + mc->UpdateCopysetInfo(2, 3, cpinfo); + + fakeCliservice.SetPeerID(pd); + + curve::chunkserver::ChunkResponse response; + response.set_status( + curve::chunkserver::CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.set_appliedindex(0); + FakeReturn writeFakeRet(nullptr, static_cast(&response)); + fakechunkservice.SetFakeWriteReturn(&writeFakeRet); + + const int kNewFileSn = 100; + const int kOldFileSn = 30; + + ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); + + // Set file version number + fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); + + // Send a write request and wait for seconds to check if IO returns + auto startWriteAndCheckResult = + [&fileinstance](int sec) -> bool { // NOLINT + CurveAioContext *aioctx = new CurveAioContext(); + char buffer[4096]; + + aioctx->buf = buffer; + aioctx->offset = 0; + aioctx->length = sizeof(buffer); + aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; + aioctx->cb = WriteCallBack; + + // Send write request + fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); + + std::this_thread::sleep_for(std::chrono::seconds(sec)); + return gWriteSuccessFlag; + }; + + // Successfully written for the first time and updated the file version + // number on the chunkserver side + ASSERT_TRUE(startWriteAndCheckResult(3)); + + // Set an old version number to write + fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); + gWriteSuccessFlag = false; + + // chunkserver returns the feedback, and after obtaining the version number + // again, it is still the old version IO hang + ASSERT_FALSE(startWriteAndCheckResult(3)); + + // Update version number to normal state + fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); + std::this_thread::sleep_for(std::chrono::seconds(1)); + + // Last write request successful + ASSERT_EQ(true, gWriteSuccessFlag); + + server.Stop(0); + server.Join(); + } + + TEST_F(CopysetClientTest, retry_rpc_sleep_test) + { + MockChunkServiceImpl mockChunkService; + ASSERT_EQ( + server_->AddService(&mockChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); + ASSERT_EQ(server_->Start(listenAddr_.c_str(), nullptr), 0); + + const uint64_t sleepUsBeforeRetry = 5 * 1000 * 1000; + + IOSenderOption ioSenderOpt; + ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; + ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; + ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = + sleepUsBeforeRetry; + ioSenderOpt.failRequestOpt.chunkserverMaxRPCTimeoutMS = 3500; + ioSenderOpt.failRequestOpt.chunkserverMaxRetrySleepIntervalUS = 3500000; + + RequestScheduleOption reqopt; + reqopt.ioSenderOpt = ioSenderOpt; + + CopysetClient copysetClient; + MockMetaCache mockMetaCache; + mockMetaCache.DelegateToFake(); + + RequestScheduler scheduler; + scheduler.Init(reqopt, &mockMetaCache); + scheduler.Run(); + copysetClient.Init(&mockMetaCache, ioSenderOpt, &scheduler, nullptr); + + LogicPoolID logicPoolId = 1; + CopysetID copysetId = 100001; + ChunkID chunkId = 1; + uint64_t fileId = 1; + uint64_t epoch = 1; + size_t len = 8; + char buff1[8] = {0}; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(len)); + off_t offset = 0; + + ChunkServerID leaderId = 10000; + ChunkServerID leaderId2 = 10001; + butil::EndPoint leaderAddr; + std::string leaderStr = "127.0.0.1:9109"; + butil::str2endpoint(leaderStr.c_str(), &leaderAddr); + + FileMetric fm("test"); + IOTracker iot(nullptr, nullptr, nullptr, &fm); + + { + // In the redirect case, chunkserver returns a new leader + // Will not sleep until retry + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Returns a new leader ID, so there will be no sleep before retrying + ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver returns the old leader + // Sleep before retrying + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + response1.set_redirect(leaderStr); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)) + .Times(1) + .WillOnce(Return(0)); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Return the same leader ID and sleep before retrying + ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain a new leader + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillOnce(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId2), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + // Returns a new leader id, so there will be no sleep before retrying + ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + + { + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain old leader + RequestContext *reqCtx = new FakeRequestContext(); + reqCtx->optype_ = OpType::WRITE; + reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; + reqCtx->offset_ = 0; + reqCtx->rawlength_ = len; + + curve::common::CountDownEvent cond(1); + RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + reqDone->SetFileMetric(&fm); + reqDone->SetIOTracker(&iot); + reqCtx->done_ = reqDone; + + ChunkResponse response1; + response1.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED); + + ChunkResponse response2; + response2.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + + EXPECT_CALL(mockMetaCache, GetLeader(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<2>(leaderId), + SetArgPointee<3>(leaderAddr), Return(0))); + EXPECT_CALL(mockMetaCache, UpdateLeader(_, _, _)).Times(0); + EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)) + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(response1), Invoke(WriteChunkFunc))) + .WillOnce( + DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); + auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); + copysetClient.WriteChunk(reqCtx->idinfo_, fileId, epoch, 0, iobuf, + offset, len, {}, reqDone); + cond.Wait(); + auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); + + ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, + reqDone->GetErrorCode()); + } + scheduler.Fini(); + } + + class TestRunnedRequestClosure : public RequestClosure + { + public: + TestRunnedRequestClosure() : RequestClosure(nullptr) {} + + void Run() override { runned_ = true; } + + bool IsRunned() const { return runned_; } + + private: + bool runned_ = false; + }; + + // After the test session fails, the retry request will be placed back in the + // request queue + TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) + { + MockRequestScheduler requestScheduler; + CopysetClient copysetClient; + IOSenderOption ioSenderOption; + MetaCache metaCache; + + ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, + &requestScheduler, nullptr)); + + // Set session not valid + copysetClient.StartRecycleRetryRPC(); + + { + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + + TestRunnedRequestClosure closure; + copysetClient.ReadChunk({}, 0, 0, 0, {}, &closure); + ASSERT_FALSE(closure.IsRunned()); + } + + { + EXPECT_CALL(requestScheduler, ReSchedule(_)).Times(1); + + TestRunnedRequestClosure closure; + copysetClient.WriteChunk({}, 1, 1, 0, {}, 0, 0, {}, &closure); + ASSERT_FALSE(closure.IsRunned()); + } + } -} // namespace client -} // namespace curve + } // namespace client +} // namespace curve diff --git a/test/client/fake/client_workflow_test.cpp b/test/client/fake/client_workflow_test.cpp index c42a9371ba..fdab88f1ed 100644 --- a/test/client/fake/client_workflow_test.cpp +++ b/test/client/fake/client_workflow_test.cpp @@ -19,28 +19,28 @@ * File Created: Saturday, 13th October 2018 1:59:08 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include +#include +#include // NOLINT #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" -using curve::client::PeerAddr; using curve::client::EndPoint; +using curve::client::PeerAddr; -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9104"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -67,7 +67,7 @@ void readcallbacktest(CurveAioContext* context) { delete context; } -int main(int argc, char ** argv) { +int main(int argc, char** argv) { // google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); std::string configpath = "./test/client/configs/client.conf"; @@ -76,7 +76,7 @@ int main(int argc, char ** argv) { LOG(FATAL) << "Fail to init config"; } - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // uint64_t size = FLAGS_test_disk_size; @@ -86,7 +86,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -127,12 +127,11 @@ int main(int argc, char ** argv) { memset(buffer + 7 * 1024, 'h', 1024); uint64_t offset_base; - for (int i = 0; i < 16; i ++) { + for (int i = 0; i < 16; i++) { uint64_t offset = i * chunk_size; Write(fd, buffer, offset, 4096); } - char* buf2 = new char[128 * 1024]; char* buf1 = new char[128 * 1024]; @@ -155,7 +154,7 @@ int main(int argc, char ** argv) { aioctx2->op = LIBCURVE_OP_READ; aioctx2->cb = readcallbacktest; AioRead(fd, aioctx2); - if (j%10 == 0) { + if (j % 10 == 0) { mds.EnableNetUnstable(600); } else { mds.EnableNetUnstable(100); @@ -185,18 +184,18 @@ int main(int argc, char ** argv) { CurveAioContext readaioctx; { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } writeflag = false; AioWrite(fd, &writeaioctx); { std::unique_lock lk(writeinterfacemtx); - writeinterfacecv.wait(lk, []()->bool{return writeflag;}); + writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { @@ -204,31 +203,31 @@ int main(int argc, char ** argv) { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } @@ -236,7 +235,7 @@ int main(int argc, char ** argv) { LOG(INFO) << "LibCurve I/O verified for stage 1, going to read repeatedly"; -// skip_write_io: + // skip_write_io: std::atomic stop(false); auto testfunc = [&]() { while (!stop.load()) { @@ -247,44 +246,44 @@ int main(int argc, char ** argv) { AioRead(fd, &readaioctx); { std::unique_lock lk(interfacemtx); - interfacecv.wait(lk, []()->bool{return readflag;}); + interfacecv.wait(lk, []() -> bool { return readflag; }); } for (int i = 0; i < 1024; i++) { if (readbuffer[i] != 'a') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 1024] != 'b') { + if (readbuffer[i + 1024] != 'b') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 2 * 1024] != 'c') { + if (readbuffer[i + 2 * 1024] != 'c') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 3 * 1024] != 'd') { + if (readbuffer[i + 3 * 1024] != 'd') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 4 * 1024] != 'e') { + if (readbuffer[i + 4 * 1024] != 'e') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 5 * 1024] != 'f') { + if (readbuffer[i + 5 * 1024] != 'f') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 6 * 1024] != 'g') { + if (readbuffer[i + 6 * 1024] != 'g') { LOG(FATAL) << "read wrong data!"; break; } - if (readbuffer[i + 7 * 1024] != 'h') { + if (readbuffer[i + 7 * 1024] != 'h') { LOG(FATAL) << "read wrong data!"; break; } } -skip_read_io: + skip_read_io: std::this_thread::sleep_for(std::chrono::milliseconds(50)); } }; diff --git a/test/client/fake/client_workflow_test4snap.cpp b/test/client/fake/client_workflow_test4snap.cpp index 9aa9a75e23..4dcb77aec9 100644 --- a/test/client/fake/client_workflow_test4snap.cpp +++ b/test/client/fake/client_workflow_test4snap.cpp @@ -19,26 +19,26 @@ * File Created: Monday, 7th January 2019 10:04:50 pm * Author: tongguangxun */ +#include // NOLINT #include #include -#include // NOLINT -#include -#include #include -#include //NOLINT -#include //NOLINT +#include //NOLINT +#include +#include +#include //NOLINT -#include "src/client/client_common.h" #include "include/client/libcurve.h" -#include "src/client/libcurve_snapshot.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_snapshot.h" #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:6666"; // NOLINT DECLARE_uint64(test_disk_size); DEFINE_uint32(io_time, 5, "Duration for I/O test"); @@ -55,21 +55,21 @@ std::condition_variable interfacecv; DECLARE_uint64(test_disk_size); -using curve::client::UserInfo_t; -using curve::client::PeerAddr; -using curve::client::EndPoint; -using curve::client::SegmentInfo; -using curve::client::ChunkInfoDetail; -using curve::client::SnapshotClient; using curve::client::ChunkID; -using curve::client::LogicPoolID; -using curve::client::CopysetID; using curve::client::ChunkIDInfo; +using curve::client::ChunkInfoDetail; +using curve::client::CopysetID; using curve::client::CopysetPeerInfo; -using curve::client::MetaCache; +using curve::client::EndPoint; using curve::client::LogicalPoolCopysetIDInfo; +using curve::client::LogicPoolID; +using curve::client::MetaCache; +using curve::client::PeerAddr; +using curve::client::SegmentInfo; +using curve::client::SnapshotClient; +using curve::client::UserInfo_t; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); std::string filename = "/1_userinfo_test.txt"; @@ -79,7 +79,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 8200, &ep); PeerId pd(ep); @@ -116,10 +116,8 @@ int main(int argc, char ** argv) { SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - if (LIBCURVE_ERROR::FAILED == cl.GetSnapshotSegmentInfo(filename, - userinfo, - 0, 0, - &seginfo)) { + if (LIBCURVE_ERROR::FAILED == + cl.GetSnapshotSegmentInfo(filename, userinfo, 0, 0, &seginfo)) { LOG(ERROR) << "GetSnapshotSegmentInfo failed!"; return -1; } @@ -140,7 +138,7 @@ int main(int argc, char ** argv) { cl.DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo(1, 10000, 1), 2); - ChunkInfoDetail *chunkInfo = new ChunkInfoDetail; + ChunkInfoDetail* chunkInfo = new ChunkInfoDetail; cl.GetChunkInfo(ChunkIDInfo(1, 10000, 1), chunkInfo); for (auto iter : chunkInfo->chunkSn) { if (iter != 1111) { diff --git a/test/client/fake/fakeChunkserver.h b/test/client/fake/fakeChunkserver.h index 6ebbbeffcf..0841e18d7d 100644 --- a/test/client/fake/fakeChunkserver.h +++ b/test/client/fake/fakeChunkserver.h @@ -23,15 +23,15 @@ #ifndef TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ #define TEST_CLIENT_FAKE_FAKECHUNKSERVER_H_ +#include #include #include #include -#include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include "proto/chunk.pb.h" #include "proto/cli2.pb.h" @@ -40,8 +40,8 @@ #include "test/client/fake/mockMDS.h" using braft::PeerId; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; class FakeChunkService : public ChunkService { public: @@ -53,20 +53,19 @@ class FakeChunkService : public ChunkService { } virtual ~FakeChunkService() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(-1, "set rpc failed!"); } - ::memcpy(chunk_, - cntl->request_attachment().to_string().c_str(), + ::memcpy(chunk_, cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_appliedindex(2); @@ -75,13 +74,13 @@ class FakeChunkService : public ChunkService { } } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); retryTimes.fetch_add(1); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); if (rpcFailed) { cntl->SetFailed(EHOSTDOWN, "set rpc failed!"); } @@ -97,67 +96,69 @@ class FakeChunkService : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {1}; ::memset(buff, 1, 8192); cntl->response_attachment().append(buff, request->size()); auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, // NOLINT - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, // NOLINT + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkInforet_->controller_ != nullptr && - fakeGetChunkInforet_->controller_->Failed()) { + fakeGetChunkInforet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkInfoResponse*>( - fakeGetChunkInforet_->response_); + fakeGetChunkInforet_->response_); response->CopyFrom(*resp); } - void GetChunkHash(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkHashRequest *request, // NOLINT - ::curve::chunkserver::GetChunkHashResponse *response, - google::protobuf::Closure *done) { + void GetChunkHash( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkHashRequest* request, // NOLINT + ::curve::chunkserver::GetChunkHashResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetChunkHashRet_->controller_ != nullptr && - fakeGetChunkHashRet_->controller_->Failed()) { + fakeGetChunkHashRet_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::GetChunkHashResponse*>( - fakeGetChunkHashRet_->response_); + fakeGetChunkHashRet_->response_); response->CopyFrom(*resp); } @@ -177,13 +178,9 @@ class FakeChunkService : public ChunkService { fakeGetChunkHashRet_ = fakeret; } - void SetRPCFailed() { - rpcFailed = true; - } + void SetRPCFailed() { rpcFailed = true; } - void ReSetRPCFailed() { - rpcFailed = false; - } + void ReSetRPCFailed() { rpcFailed = false; } FakeReturn* fakedeletesnapchunkret_; FakeReturn* fakereadchunksnapret_; @@ -200,16 +197,13 @@ class FakeChunkService : public ChunkService { waittimeMS = 0; } - void CleanRetryTimes() { - retryTimes.store(0); - } + void CleanRetryTimes() { retryTimes.store(0); } - uint64_t GetRetryTimes() { - return retryTimes.load(); - } + uint64_t GetRetryTimes() { return retryTimes.load(); } private: - // wait4netunstable用来模拟网络延时,当打开之后,每个读写rpc会停留一段时间再返回 + // wait4netunstable is used to simulate network latency. When turned on, + // each read/write rpc will pause for a period of time before returning bool wait4netunstable; uint64_t waittimeMS; bool rpcFailed; @@ -219,32 +213,24 @@ class FakeChunkService : public ChunkService { class CliServiceFake : public curve::chunkserver::CliService2 { public: - CliServiceFake() { - invokeTimes = 0; - } + CliServiceFake() { invokeTimes = 0; } void GetLeader(::google::protobuf::RpcController* controller, - const curve::chunkserver::GetLeaderRequest2* request, - curve::chunkserver::GetLeaderResponse2* response, - ::google::protobuf::Closure* done) { + const curve::chunkserver::GetLeaderRequest2* request, + curve::chunkserver::GetLeaderResponse2* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(leaderid_.to_string()); response->set_allocated_leader(peer); invokeTimes++; } - void SetPeerID(PeerId peerid) { - leaderid_ = peerid; - } + void SetPeerID(PeerId peerid) { leaderid_ = peerid; } - uint64_t GetInvokeTimes() { - return invokeTimes; - } + uint64_t GetInvokeTimes() { return invokeTimes; } - void ReSetInvokeTimes() { - invokeTimes = 0; - } + void ReSetInvokeTimes() { invokeTimes = 0; } private: PeerId leaderid_; @@ -253,17 +239,19 @@ class CliServiceFake : public curve::chunkserver::CliService2 { class FakeChunkServerService : public ChunkService { public: - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakewriteret_->controller_ != nullptr && fakewriteret_->controller_->Failed()) { // NOLINT + if (fakewriteret_->controller_ != nullptr && + fakewriteret_->controller_->Failed()) { // NOLINT controller->SetFailed("failed"); } - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakewriteret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakewriteret_->response_); // NOLINT response->CopyFrom(*resp); static uint64_t latestSn = 0; @@ -274,13 +262,13 @@ class FakeChunkServerService : public ChunkService { latestSn = std::max(latestSn, request->sn()); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[8192] = {0}; if (request->has_appliedindex()) { memset(buff, 'a', 4096); @@ -290,17 +278,14 @@ class FakeChunkServerService : public ChunkService { memset(buff + 4096, 'd', 4096); } cntl->response_attachment().append(buff, request->size()); - auto resp = static_cast<::curve::chunkserver::ChunkResponse*>(fakereadret_->response_); // NOLINT + auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( + fakereadret_->response_); // NOLINT response->CopyFrom(*resp); } - void SetFakeWriteReturn(FakeReturn* ret) { - fakewriteret_ = ret; - } + void SetFakeWriteReturn(FakeReturn* ret) { fakewriteret_ = ret; } - void SetFakeReadReturn(FakeReturn* ret) { - fakereadret_ = ret; - } + void SetFakeReadReturn(FakeReturn* ret) { fakereadret_ = ret; } private: FakeReturn* fakewriteret_; @@ -310,23 +295,20 @@ class FakeChunkServerService : public ChunkService { class FakeRaftStateService : public braft::raft_stat { public: void default_method(::google::protobuf::RpcController* controller, - const ::braft::IndexRequest*, - ::braft::IndexResponse*, + const ::braft::IndexRequest*, ::braft::IndexResponse*, ::google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); // NOLINT + brpc::Controller* cntl = + dynamic_cast(controller); // NOLINT if (failed_) { cntl->SetFailed("failed for test"); return; } cntl->response_attachment().append(buf_); } - void SetBuf(const butil::IOBuf& iobuf) { - buf_ = iobuf; - } - void SetFailed(bool failed) { - failed_ = failed; - } + void SetBuf(const butil::IOBuf& iobuf) { buf_ = iobuf; } + void SetFailed(bool failed) { failed_ = failed; } + private: butil::IOBuf buf_; bool failed_ = false; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index e29f251c26..6daed2e5ed 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -22,73 +22,68 @@ #ifndef TEST_CLIENT_FAKE_FAKEMDS_H_ #define TEST_CLIENT_FAKE_FAKEMDS_H_ -#include -#include -#include #include +#include +#include +#include -#include -#include #include -#include #include -#include "src/client/client_common.h" -#include "test/client/fake/mockMDS.h" -#include "test/client/fake/fakeChunkserver.h" +#include +#include +#include -#include "proto/nameserver2.pb.h" -#include "proto/topology.pb.h" #include "proto/copyset.pb.h" -#include "proto/schedule.pb.h" -#include "src/common/timeutility.h" -#include "src/common/authenticator.h" #include "proto/heartbeat.pb.h" +#include "proto/nameserver2.pb.h" +#include "proto/schedule.pb.h" +#include "proto/topology.pb.h" +#include "src/client/client_common.h" #include "src/client/mds_client_base.h" +#include "src/common/authenticator.h" +#include "src/common/timeutility.h" #include "src/common/uuid.h" +#include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/mockMDS.h" using curve::common::Authenticator; using braft::PeerId; -using curve::common::Authenticator; using curve::chunkserver::COPYSET_OP_STATUS; -using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; -using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using curve::common::Authenticator; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; +using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; +using ::curve::mds::schedule::RapidLeaderScheduleRequst; +using ::curve::mds::schedule::RapidLeaderScheduleResponse; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; using ::curve::mds::topology::GetChunkServerInfoRequest; using ::curve::mds::topology::GetChunkServerInfoResponse; +using ::curve::mds::topology::GetChunkServerListInCopySetsRequest; +using ::curve::mds::topology::GetChunkServerListInCopySetsResponse; +using ::curve::mds::topology::GetClusterInfoRequest; +using ::curve::mds::topology::GetClusterInfoResponse; +using ::curve::mds::topology::GetCopySetsInChunkServerRequest; +using ::curve::mds::topology::GetCopySetsInChunkServerResponse; using ::curve::mds::topology::ListChunkServerRequest; using ::curve::mds::topology::ListChunkServerResponse; +using ::curve::mds::topology::ListLogicalPoolRequest; +using ::curve::mds::topology::ListLogicalPoolResponse; using ::curve::mds::topology::ListPhysicalPoolRequest; using ::curve::mds::topology::ListPhysicalPoolResponse; using ::curve::mds::topology::ListPoolZoneRequest; using ::curve::mds::topology::ListPoolZoneResponse; using ::curve::mds::topology::ListZoneServerRequest; using ::curve::mds::topology::ListZoneServerResponse; -using ::curve::mds::topology::GetCopySetsInChunkServerRequest; -using ::curve::mds::topology::GetCopySetsInChunkServerResponse; -using ::curve::mds::topology::ListLogicalPoolRequest; -using ::curve::mds::topology::ListLogicalPoolResponse; -using ::curve::mds::topology::GetClusterInfoRequest; -using ::curve::mds::topology::GetClusterInfoResponse; -using ::curve::mds::schedule::RapidLeaderScheduleRequst; -using ::curve::mds::schedule::RapidLeaderScheduleResponse; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusRequest; -using ::curve::mds::schedule::QueryChunkServerRecoverStatusResponse; -using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; +using HeartbeatRequest = curve::mds::heartbeat::ChunkServerHeartbeatRequest; using HeartbeatResponse = curve::mds::heartbeat::ChunkServerHeartbeatResponse; - DECLARE_bool(start_builtin_service); class FakeMDSCurveFSService : public curve::mds::CurveFSService { public: - FakeMDSCurveFSService() { - retrytimes_ = 0; - } + FakeMDSCurveFSService() { retrytimes_ = 0; } void ListClient(::google::protobuf::RpcController* controller, const ::curve::mds::ListClientRequest* request, @@ -96,39 +91,39 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListClient_->controller_ != nullptr && - fakeListClient_->controller_->Failed()) { + fakeListClient_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListClientResponse*>( - fakeListClient_->response_); + fakeListClient_->response_); response->CopyFrom(*resp); } void CreateFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateFileRequest* request, - ::curve::mds::CreateFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateFileRequest* request, + ::curve::mds::CreateFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateFileret_->controller_ != nullptr - && fakeCreateFileret_->controller_->Failed()) { + if (fakeCreateFileret_->controller_ != nullptr && + fakeCreateFileret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateFileResponse*>( - fakeCreateFileret_->response_); + fakeCreateFileret_->response_); response->CopyFrom(*resp); } void GetFileInfo(::google::protobuf::RpcController* controller, - const ::curve::mds::GetFileInfoRequest* request, - ::curve::mds::GetFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::GetFileInfoRequest* request, + ::curve::mds::GetFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetFileInforet_->controller_ != nullptr && fakeGetFileInforet_->controller_->Failed()) { @@ -138,14 +133,15 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::GetFileInfoResponse*>( - fakeGetFileInforet_->response_); + fakeGetFileInforet_->response_); response->CopyFrom(*resp); } - void IncreaseFileEpoch(::google::protobuf::RpcController* controller, - const ::curve::mds::IncreaseFileEpochRequest* request, - ::curve::mds::IncreaseFileEpochResponse* response, - ::google::protobuf::Closure* done) { + void IncreaseFileEpoch( + ::google::protobuf::RpcController* controller, + const ::curve::mds::IncreaseFileEpochRequest* request, + ::curve::mds::IncreaseFileEpochResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeIncreaseFileEpochret_->controller_ != nullptr && fakeIncreaseFileEpochret_->controller_->Failed()) { @@ -155,7 +151,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::IncreaseFileEpochResponse*>( - fakeIncreaseFileEpochret_->response_); + fakeIncreaseFileEpochret_->response_); response->CopyFrom(*resp); } @@ -165,41 +161,42 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetAllocatedSizeRet_->controller_ != nullptr && - fakeGetAllocatedSizeRet_->controller_->Failed()) { + fakeGetAllocatedSizeRet_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::GetAllocatedSizeResponse*>( - fakeGetAllocatedSizeRet_->response_); + fakeGetAllocatedSizeRet_->response_); response->CopyFrom(*resp); } - void GetOrAllocateSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetOrAllocateSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeGetOrAllocateSegmentret_->controller_ != nullptr && - fakeGetOrAllocateSegmentret_->controller_->Failed()) { + fakeGetOrAllocateSegmentret_->controller_->Failed()) { controller->SetFailed("failed"); } if (!strcmp(request->owner().c_str(), "root")) { - // 当user为root用户的时候需要检查其signature信息 + // When the user is root, it is necessary to check their signature + // information std::string str2sig = Authenticator::GetString2Signature( - request->date(), - request->owner()); - std::string sig = Authenticator::CalcString2Signature(str2sig, - "root_password"); + request->date(), request->owner()); + std::string sig = + Authenticator::CalcString2Signature(str2sig, "root_password"); ASSERT_STREQ(request->signature().c_str(), sig.c_str()); LOG(INFO) << "GetOrAllocateSegment with password!"; } retrytimes_++; - // 检查请求内容是全路径 + // Check that the request content is full path auto checkFullpath = [&]() { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); @@ -207,14 +204,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { (void)checkFullpath; fiu_do_on("test/client/fake/fakeMDS.GetOrAllocateSegment", - checkFullpath()); + checkFullpath()); curve::mds::GetOrAllocateSegmentResponse* resp; - if (request->filename() == "/clonesource") { + if (request->filename() == "/clonesource") { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentretForClone_->response_); + fakeGetOrAllocateSegmentretForClone_->response_); } else { resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakeGetOrAllocateSegmentret_->response_); + fakeGetOrAllocateSegmentret_->response_); } response->CopyFrom(*resp); } @@ -236,26 +233,26 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { } void OpenFile(::google::protobuf::RpcController* controller, - const ::curve::mds::OpenFileRequest* request, - ::curve::mds::OpenFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::OpenFileRequest* request, + ::curve::mds::OpenFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeopenfile_->controller_ != nullptr && - fakeopenfile_->controller_->Failed()) { + fakeopenfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::OpenFileResponse*>( - fakeopenfile_->response_); + fakeopenfile_->response_); response->CopyFrom(*resp); } void RefreshSession(::google::protobuf::RpcController* controller, const curve::mds::ReFreshSessionRequest* request, curve::mds::ReFreshSessionResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::Closure* done) { { brpc::ClosureGuard done_guard(done); if (fakeRefreshSession_->controller_ != nullptr && @@ -266,10 +263,10 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { static int seq = 1; auto resp = static_cast<::curve::mds::ReFreshSessionResponse*>( - fakeRefreshSession_->response_); + fakeRefreshSession_->response_); if (resp->statuscode() == ::curve::mds::StatusCode::kOK) { - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_seqnum(seq++); info->set_filename("_filename_"); info->set_id(resp->fileinfo().id()); @@ -279,13 +276,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { info->set_length(4 * 1024 * 1024 * 1024ul); info->set_ctime(12345678); - curve::mds::ProtoSession *protoSession = - new curve::mds::ProtoSession(); + curve::mds::ProtoSession* protoSession = + new curve::mds::ProtoSession(); protoSession->set_sessionid("1234"); protoSession->set_createtime(12345); protoSession->set_leasetime(10000000); protoSession->set_sessionstatus( - ::curve::mds::SessionStatus::kSessionOK); + ::curve::mds::SessionStatus::kSessionOK); response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_sessionid("1234"); @@ -299,175 +296,166 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; - if (refreshtask_) - refreshtask_(); + if (refreshtask_) refreshtask_(); } void CreateSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateSnapShotRequest* request, - ::curve::mds::CreateSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateSnapShotRequest* request, + ::curve::mds::CreateSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakecreatesnapshotret_->controller_ != nullptr && - fakecreatesnapshotret_->controller_->Failed()) { + fakecreatesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateSnapShotResponse*>( - fakecreatesnapshotret_->response_); + fakecreatesnapshotret_->response_); response->CopyFrom(*resp); } void ListSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::ListSnapShotFileInfoRequest* request, - ::curve::mds::ListSnapShotFileInfoResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListSnapShotFileInfoRequest* request, + ::curve::mds::ListSnapShotFileInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakelistsnapshotret_->controller_ != nullptr && - fakelistsnapshotret_->controller_->Failed()) { + fakelistsnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::ListSnapShotFileInfoResponse*>( - fakelistsnapshotret_->response_); + fakelistsnapshotret_->response_); response->CopyFrom(*resp); } void DeleteSnapShot(::google::protobuf::RpcController* controller, - const ::curve::mds::DeleteSnapShotRequest* request, - ::curve::mds::DeleteSnapShotResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::DeleteSnapShotRequest* request, + ::curve::mds::DeleteSnapShotResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapshotret_->controller_ != nullptr && - fakedeletesnapshotret_->controller_->Failed()) { + fakedeletesnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakedeletesnapshotret_->response_); + fakedeletesnapshotret_->response_); response->CopyFrom(*resp); } - void CheckSnapShotStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::CheckSnapShotStatusRequest* request, - ::curve::mds::CheckSnapShotStatusResponse* response, - ::google::protobuf::Closure* done) { + void CheckSnapShotStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::CheckSnapShotStatusRequest* request, + ::curve::mds::CheckSnapShotStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakechecksnapshotret_->controller_ != nullptr && - fakechecksnapshotret_->controller_->Failed()) { + fakechecksnapshotret_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } auto resp = static_cast<::curve::mds::DeleteSnapShotResponse*>( - fakechecksnapshotret_->response_); + fakechecksnapshotret_->response_); response->CopyFrom(*resp); } - void GetSnapShotFileSegment(::google::protobuf::RpcController* controller, - const ::curve::mds::GetOrAllocateSegmentRequest* request, - ::curve::mds::GetOrAllocateSegmentResponse* response, - ::google::protobuf::Closure* done) { + void GetSnapShotFileSegment( + ::google::protobuf::RpcController* controller, + const ::curve::mds::GetOrAllocateSegmentRequest* request, + ::curve::mds::GetOrAllocateSegmentResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakegetsnapsegmentinforet_->controller_ != nullptr && - fakegetsnapsegmentinforet_->controller_->Failed()) { + fakegetsnapsegmentinforet_->controller_->Failed()) { controller->SetFailed("failed"); } if (request->has_signature()) { - CheckAuth(request->signature(), - request->filename(), - request->owner(), - request->date()); + CheckAuth(request->signature(), request->filename(), + request->owner(), request->date()); } retrytimes_++; auto resp = static_cast<::curve::mds::GetOrAllocateSegmentResponse*>( - fakegetsnapsegmentinforet_->response_); + fakegetsnapsegmentinforet_->response_); response->CopyFrom(*resp); } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletesnapchunkret_->controller_ != nullptr && - fakedeletesnapchunkret_->controller_->Failed()) { + fakedeletesnapchunkret_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakedeletesnapchunkret_->response_); + fakedeletesnapchunkret_->response_); response->CopyFrom(*resp); } void ReadChunkSnapshot(::google::protobuf::RpcController* controller, - const ::curve::chunkserver::ChunkRequest* request, - ::curve::chunkserver::ChunkResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakereadchunksnapret_->controller_ != nullptr && - fakereadchunksnapret_->controller_->Failed()) { + fakereadchunksnapret_->controller_->Failed()) { controller->SetFailed("failed"); } auto resp = static_cast<::curve::chunkserver::ChunkResponse*>( - fakereadchunksnapret_->response_); + fakereadchunksnapret_->response_); response->CopyFrom(*resp); } void CloseFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CloseFileRequest* request, - ::curve::mds::CloseFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CloseFileRequest* request, + ::curve::mds::CloseFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeclosefile_->controller_ != nullptr && - fakeclosefile_->controller_->Failed()) { + fakeclosefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakeclosefile_->response_); + fakeclosefile_->response_); response->CopyFrom(*resp); if (closeFileTask_) { @@ -481,14 +469,14 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakerenamefile_->controller_ != nullptr && - fakerenamefile_->controller_->Failed()) { + fakerenamefile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakerenamefile_->response_); + fakerenamefile_->response_); response->CopyFrom(*resp); } @@ -498,7 +486,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakedeletefile_->controller_ != nullptr && - fakedeletefile_->controller_->Failed()) { + fakedeletefile_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -509,12 +497,13 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; auto resp = static_cast<::curve::mds::CloseFileResponse*>( - fakedeletefile_->response_); + fakedeletefile_->response_); if (request->forcedelete()) { LOG(INFO) << "force delete file!"; - fiu_do_on("test/client/fake/fakeMDS/forceDeleteFile", - resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); + fiu_do_on( + "test/client/fake/fakeMDS/forceDeleteFile", + resp->set_statuscode(curve::mds::StatusCode::kNotSupported)); } response->CopyFrom(*resp); @@ -526,103 +515,97 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeextendfile_->controller_ != nullptr && - fakeextendfile_->controller_->Failed()) { + fakeextendfile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ExtendFileResponse*>( - fakeextendfile_->response_); + fakeextendfile_->response_); response->CopyFrom(*resp); } void CreateCloneFile(::google::protobuf::RpcController* controller, - const ::curve::mds::CreateCloneFileRequest* request, - ::curve::mds::CreateCloneFileResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::CreateCloneFileRequest* request, + ::curve::mds::CreateCloneFileResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeCreateCloneFile_->controller_ != nullptr - && fakeCreateCloneFile_->controller_->Failed()) { + if (fakeCreateCloneFile_->controller_ != nullptr && + fakeCreateCloneFile_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::CreateCloneFileResponse*>( - fakeCreateCloneFile_->response_); + fakeCreateCloneFile_->response_); response->CopyFrom(*resp); } - void SetCloneFileStatus(::google::protobuf::RpcController* controller, - const ::curve::mds::SetCloneFileStatusRequest* request, - ::curve::mds::SetCloneFileStatusResponse* response, - ::google::protobuf::Closure* done) { + void SetCloneFileStatus( + ::google::protobuf::RpcController* controller, + const ::curve::mds::SetCloneFileStatusRequest* request, + ::curve::mds::SetCloneFileStatusResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeSetCloneFileStatus_->controller_ != nullptr - && fakeSetCloneFileStatus_->controller_->Failed()) { + if (fakeSetCloneFileStatus_->controller_ != nullptr && + fakeSetCloneFileStatus_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::SetCloneFileStatusResponse*>( - fakeSetCloneFileStatus_->response_); + fakeSetCloneFileStatus_->response_); response->CopyFrom(*resp); } void ChangeOwner(::google::protobuf::RpcController* controller, - const ::curve::mds::ChangeOwnerRequest* request, - ::curve::mds::ChangeOwnerResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ChangeOwnerRequest* request, + ::curve::mds::ChangeOwnerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeChangeOwner_->controller_ != nullptr && - fakeChangeOwner_->controller_->Failed()) { + fakeChangeOwner_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ChangeOwnerResponse*>( - fakeChangeOwner_->response_); + fakeChangeOwner_->response_); response->CopyFrom(*resp); } void ListDir(::google::protobuf::RpcController* controller, - const ::curve::mds::ListDirRequest* request, - ::curve::mds::ListDirResponse* response, - ::google::protobuf::Closure* done) { + const ::curve::mds::ListDirRequest* request, + ::curve::mds::ListDirResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); if (fakeListDir_->controller_ != nullptr && - fakeListDir_->controller_->Failed()) { + fakeListDir_->controller_->Failed()) { controller->SetFailed("failed"); } retrytimes_++; auto resp = static_cast<::curve::mds::ListDirResponse*>( - fakeListDir_->response_); + fakeListDir_->response_); response->CopyFrom(*resp); } - void SetListDir(FakeReturn* fakeret) { - fakeListDir_ = fakeret; - } + void SetListDir(FakeReturn* fakeret) { fakeListDir_ = fakeret; } - void SetListClient(FakeReturn* fakeret) { - fakeListClient_ = fakeret; - } + void SetListClient(FakeReturn* fakeret) { fakeListClient_ = fakeret; } void SetCreateCloneFile(FakeReturn* fakeret) { fakeCreateCloneFile_ = fakeret; } - void SetExtendFile(FakeReturn* fakeret) { - fakeextendfile_ = fakeret; - } - + void SetExtendFile(FakeReturn* fakeret) { fakeextendfile_ = fakeret; } void SetCreateFileFakeReturn(FakeReturn* fakeret) { fakeCreateFileret_ = fakeret; @@ -652,9 +635,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakeDeAllocateSegment_ = fakeret; } - void SetOpenFile(FakeReturn* fakeret) { - fakeopenfile_ = fakeret; - } + void SetOpenFile(FakeReturn* fakeret) { fakeopenfile_ = fakeret; } void SetRefreshSession(FakeReturn* fakeret, std::function t) { fakeRefreshSession_ = fakeret; @@ -685,61 +666,41 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { fakedeletesnapchunkret_ = fakeret; } - void SetCloseFile(FakeReturn* fakeret) { - fakeclosefile_ = fakeret; - } + void SetCloseFile(FakeReturn* fakeret) { fakeclosefile_ = fakeret; } - void SetCheckSnap(FakeReturn* fakeret) { - fakechecksnapshotret_ = fakeret; - } + void SetCheckSnap(FakeReturn* fakeret) { fakechecksnapshotret_ = fakeret; } - void SetRenameFile(FakeReturn* fakeret) { - fakerenamefile_ = fakeret; - } + void SetRenameFile(FakeReturn* fakeret) { fakerenamefile_ = fakeret; } - void SetDeleteFile(FakeReturn* fakeret) { - fakedeletefile_ = fakeret; - } + void SetDeleteFile(FakeReturn* fakeret) { fakedeletefile_ = fakeret; } - void SetRegistRet(FakeReturn* fakeret) { - fakeRegisterret_ = fakeret; - } + void SetRegistRet(FakeReturn* fakeret) { fakeRegisterret_ = fakeret; } void SetCloneFileStatus(FakeReturn* fakeret) { fakeSetCloneFileStatus_ = fakeret; } - void SetChangeOwner(FakeReturn* fakeret) { - fakeChangeOwner_ = fakeret; - } + void SetChangeOwner(FakeReturn* fakeret) { fakeChangeOwner_ = fakeret; } void SetCloseFileTask(std::function task) { closeFileTask_ = task; } - void CleanRetryTimes() { - retrytimes_ = 0; - } + void CleanRetryTimes() { retrytimes_ = 0; } - uint64_t GetRetryTimes() { - return retrytimes_; - } + uint64_t GetRetryTimes() { return retrytimes_; } - std::string GetIP() { - return ip_; - } + std::string GetIP() { return ip_; } - uint16_t GetPort() { - return port_; - } + uint16_t GetPort() { return port_; } - void CheckAuth(const std::string& signature, - const std::string& filename, - const std::string& owner, - uint64_t date) { + void CheckAuth(const std::string& signature, const std::string& filename, + const std::string& owner, uint64_t date) { if (owner == curve::client::kRootUserName) { - std::string str2sig = Authenticator::GetString2Signature(date, owner); // NOLINT - std::string sigtest = Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT + std::string str2sig = + Authenticator::GetString2Signature(date, owner); // NOLINT + std::string sigtest = + Authenticator::CalcString2Signature(str2sig, "123"); // NOLINT ASSERT_STREQ(sigtest.c_str(), signature.c_str()); } else { ASSERT_STREQ("", signature.c_str()); @@ -785,18 +746,17 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { class FakeMDSTopologyService : public curve::mds::topology::TopologyService { public: void GetChunkServerListInCopySets( - ::google::protobuf::RpcController* controller, - const GetChunkServerListInCopySetsRequest* request, - GetChunkServerListInCopySetsResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); int statcode = 0; if (response->has_statuscode()) { statcode = response->statuscode(); } - if (statcode == -1 || - (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed())) { + if (statcode == -1 || (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed())) { controller->SetFailed("failed"); } @@ -805,11 +765,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->CopyFrom(*resp); } - void RegistChunkServer( - ::google::protobuf::RpcController* controller, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - ::google::protobuf::Closure* done) { + void RegistChunkServer(::google::protobuf::RpcController* controller, + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); @@ -818,87 +777,87 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void GetChunkServer(::google::protobuf::RpcController* controller, - const GetChunkServerInfoRequest* request, - GetChunkServerInfoResponse* response, - ::google::protobuf::Closure* done) { + const GetChunkServerInfoRequest* request, + GetChunkServerInfoResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListChunkServer(::google::protobuf::RpcController* controller, - const ListChunkServerRequest* request, - ListChunkServerResponse* response, - ::google::protobuf::Closure* done) { + const ListChunkServerRequest* request, + ListChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = static_cast(fakeret_->response_); response->CopyFrom(*resp); } void ListPhysicalPool(::google::protobuf::RpcController* controller, - const ListPhysicalPoolRequest* request, - ListPhysicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistpoolret_->controller_ != nullptr - && fakelistpoolret_->controller_->Failed()) { + if (fakelistpoolret_->controller_ != nullptr && + fakelistpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistpoolret_->response_); + auto resp = + static_cast(fakelistpoolret_->response_); response->CopyFrom(*resp); } void ListPoolZone(::google::protobuf::RpcController* controller, - const ListPoolZoneRequest* request, - ListPoolZoneResponse* response, - ::google::protobuf::Closure* done) { + const ListPoolZoneRequest* request, + ListPoolZoneResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistzoneret_->controller_ != nullptr - && fakelistzoneret_->controller_->Failed()) { + if (fakelistzoneret_->controller_ != nullptr && + fakelistzoneret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistzoneret_->response_); + auto resp = + static_cast(fakelistzoneret_->response_); response->CopyFrom(*resp); } void ListZoneServer(::google::protobuf::RpcController* controller, - const ListZoneServerRequest* request, - ListZoneServerResponse* response, - ::google::protobuf::Closure* done) { + const ListZoneServerRequest* request, + ListZoneServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistserverret_->controller_ != nullptr - && fakelistserverret_->controller_->Failed()) { + if (fakelistserverret_->controller_ != nullptr && + fakelistserverret_->controller_->Failed()) { controller->SetFailed("failed"); return; } - auto resp = static_cast( - fakelistserverret_->response_); + auto resp = + static_cast(fakelistserverret_->response_); response->CopyFrom(*resp); } - void GetCopySetsInChunkServer(::google::protobuf::RpcController* controller, - const GetCopySetsInChunkServerRequest* request, - GetCopySetsInChunkServerResponse* response, - ::google::protobuf::Closure* done) { + void GetCopySetsInChunkServer( + ::google::protobuf::RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakegetcopysetincsret_->controller_ != nullptr - && fakegetcopysetincsret_->controller_->Failed()) { + if (fakegetcopysetincsret_->controller_ != nullptr && + fakegetcopysetincsret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -908,12 +867,12 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { } void ListLogicalPool(::google::protobuf::RpcController* controller, - const ListLogicalPoolRequest* request, - ListLogicalPoolResponse* response, - ::google::protobuf::Closure* done) { + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakelistlogicalpoolret_->controller_ != nullptr - && fakelistlogicalpoolret_->controller_->Failed()) { + if (fakelistlogicalpoolret_->controller_ != nullptr && + fakelistlogicalpoolret_->controller_->Failed()) { controller->SetFailed("failed"); return; } @@ -933,9 +892,7 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { response->set_clusterid(uuid); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; FakeReturn* fakelistpoolret_; @@ -945,11 +902,10 @@ class FakeMDSTopologyService : public curve::mds::topology::TopologyService { FakeReturn* fakelistlogicalpoolret_; }; -typedef void (*HeartbeatCallback) ( - ::google::protobuf::RpcController* controller, - const HeartbeatRequest* request, - HeartbeatResponse* response, - ::google::protobuf::Closure* done); +typedef void (*HeartbeatCallback)(::google::protobuf::RpcController* controller, + const HeartbeatRequest* request, + HeartbeatResponse* response, + ::google::protobuf::Closure* done); class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { public: @@ -975,19 +931,18 @@ class FakeMDSHeartbeatService : public curve::mds::heartbeat::HeartbeatService { private: HeartbeatCallback cb_; - mutable std::mutex cbMtx_; + mutable std::mutex cbMtx_; }; class FakeCreateCopysetService : public curve::chunkserver::CopysetService { public: - void CreateCopysetNode( - ::google::protobuf::RpcController* controller, - const ::curve::chunkserver::CopysetRequest* request, - ::curve::chunkserver::CopysetResponse* response, - ::google::protobuf::Closure* done) { + void CreateCopysetNode(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetRequest* request, + ::curve::chunkserver::CopysetResponse* response, + ::google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); } @@ -996,22 +951,23 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->CopyFrom(*resp); } - void GetCopysetStatus(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::CopysetStatusRequest *request, - ::curve::chunkserver::CopysetStatusResponse *response, - google::protobuf::Closure *done) { + void GetCopysetStatus( + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::CopysetStatusRequest* request, + ::curve::chunkserver::CopysetStatusResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { controller->SetFailed("failed"); return; } response->set_state(::braft::State::STATE_LEADER); - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); response->set_allocated_peer(peer); peer->set_address("127.0.0.1:1111"); - curve::common::Peer *leader = new curve::common::Peer(); + curve::common::Peer* leader = new curve::common::Peer(); response->set_allocated_leader(leader); leader->set_address("127.0.0.1:1111"); response->set_readonly(1); @@ -1029,21 +985,13 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { response->set_status(status_); } - void SetHash(uint64_t hash) { - hash_ = hash; - } + void SetHash(uint64_t hash) { hash_ = hash; } - void SetApplyindex(uint64_t index) { - applyindex_ = index; - } + void SetApplyindex(uint64_t index) { applyindex_ = index; } - void SetStatus(const COPYSET_OP_STATUS& status) { - status_ = status; - } + void SetStatus(const COPYSET_OP_STATUS& status) { status_ = status; } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } public: uint64_t applyindex_; @@ -1054,30 +1002,29 @@ class FakeCreateCopysetService : public curve::chunkserver::CopysetService { class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { public: - void RapidLeaderSchedule( - google::protobuf::RpcController* cntl_base, - const RapidLeaderScheduleRequst* request, - RapidLeaderScheduleResponse* response, - google::protobuf::Closure* done) { + void RapidLeaderSchedule(google::protobuf::RpcController* cntl_base, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } - auto resp = static_cast( - fakeret_->response_); + auto resp = + static_cast(fakeret_->response_); response->CopyFrom(*resp); } void QueryChunkServerRecoverStatus( google::protobuf::RpcController* cntl_base, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); - if (fakeret_->controller_ != nullptr - && fakeret_->controller_->Failed()) { + if (fakeret_->controller_ != nullptr && + fakeret_->controller_->Failed()) { cntl_base->SetFailed("failed"); return; } @@ -1086,9 +1033,7 @@ class FakeScheduleService : public ::curve::mds::schedule::ScheduleService { response->CopyFrom(*resp); } - void SetFakeReturn(FakeReturn* fakeret) { - fakeret_ = fakeret; - } + void SetFakeReturn(FakeReturn* fakeret) { fakeret_ = fakeret; } FakeReturn* fakeret_; }; @@ -1118,15 +1063,11 @@ class FakeMDS { std::vector conf; }; - FakeScheduleService* GetScheduleService() { - return &fakeScheduleService_; - } + FakeScheduleService* GetScheduleService() { return &fakeScheduleService_; } - FakeMDSCurveFSService* GetMDSService() { - return &fakecurvefsservice_; - } + FakeMDSCurveFSService* GetMDSService() { return &fakecurvefsservice_; } - std::vector GetCreateCopysetService() { + std::vector GetCreateCopysetService() { return copysetServices_; } @@ -1134,15 +1075,11 @@ class FakeMDS { return chunkServices_; } - CliServiceFake* GetCliService() { - return &fakeCliService_; - } + CliServiceFake* GetCliService() { return &fakeCliService_; } - std::vector GetChunkservice() { - return chunkServices_; - } + std::vector GetChunkservice() { return chunkServices_; } - std::vector GetRaftStateService() { + std::vector GetRaftStateService() { return raftStateServices_; } @@ -1159,23 +1096,23 @@ class FakeMDS { private: std::vector copysetnodeVec_; brpc::Server* server_; - std::vector chunkservers_; + std::vector chunkservers_; std::vector server_addrs_; std::vector peers_; - std::vector chunkServices_; - std::vector copysetServices_; - std::vector raftStateServices_; - std::vector fakeChunkServerServices_; + std::vector chunkServices_; + std::vector copysetServices_; + std::vector raftStateServices_; + std::vector fakeChunkServerServices_; std::string filename_; uint64_t size_; - CliServiceFake fakeCliService_; + CliServiceFake fakeCliService_; FakeMDSCurveFSService fakecurvefsservice_; FakeMDSTopologyService faketopologyservice_; FakeMDSHeartbeatService fakeHeartbeatService_; FakeScheduleService fakeScheduleService_; - std::map metrics_; + std::map metrics_; }; -#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ +#endif // TEST_CLIENT_FAKE_FAKEMDS_H_ diff --git a/test/client/inflight_rpc_control_test.cpp b/test/client/inflight_rpc_control_test.cpp index 8d6d4de1ee..717211348f 100644 --- a/test/client/inflight_rpc_control_test.cpp +++ b/test/client/inflight_rpc_control_test.cpp @@ -72,7 +72,7 @@ TEST(InflightRPCTest, TestInflightRPC) { int maxInflightNum = 8; { - // 测试inflight数量 + // Number of inflight tests InflightControl control; control.SetMaxInflightNum(maxInflightNum); ASSERT_EQ(0, control.GetCurrentInflightNum()); @@ -89,7 +89,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试GetInflightTokan与ReleaseInflightToken的并发 + // Testing the concurrency of GetInflightTokan and ReleaseInflightToken InflightControl control; control.SetMaxInflightNum(maxInflightNum); @@ -123,7 +123,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试WaitInflightAllComeBack + // Testing WaitInflightAllComeBack InflightControl control; control.SetMaxInflightNum(maxInflightNum); for (int i = 1; i <= maxInflightNum; ++i) { @@ -148,13 +148,15 @@ TEST(InflightRPCTest, TestInflightRPC) { } TEST(InflightRPCTest, FileCloseTest) { - // 测试在文件关闭的时候,lese续约失败不会调用iomanager已析构的资源 - // lease时长10s,在lease期间仅续约一次,一次失败就会调用iomanager - // block IO,这时候其实调用的是scheduler的LeaseTimeoutBlockIO + // Test that when the lease renewal fails at the time of file closure, it + // will not invoke the already destructed resources of the IO manager. The + // lease duration is 10 seconds, and only one renewal is allowed during the + // lease period. If the renewal fails, it will trigger the IO manager's + // block IO, which actually calls the LeaseTimeoutBlockIO of the scheduler. IOOption ioOption; ioOption.reqSchdulerOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 10000; - // 设置inflight RPC最大数量为1 + // Set the maximum number of inflight RPCs to 1 ioOption.ioSenderOpt.inflightOpt.fileMaxInFlightRPCNum = 1; std::condition_variable cv; @@ -200,7 +202,8 @@ TEST(InflightRPCTest, FileCloseTest) { LeaseExecutor lease(lopt, userinfo, nullptr, iomanager); for (int j = 0; j < 5; j++) { - // 测试iomanager退出之后,lease再去调用其scheduler资源不会crash + // After testing the iomanager exit, please call its scheduler + // resource again without crashing lease.InvalidLease(); } @@ -214,11 +217,12 @@ TEST(InflightRPCTest, FileCloseTest) { } }; - // 并发两个线程,一个线程启动iomanager初始化,然后反初始化 - // 另一个线程启动lease续约,然后调用iomanager使其block IO - // 预期:并发两个线程,lease线程续约失败即使在iomanager线程 - // 退出的同时去调用其block IO接口也不会出现并发竞争共享资源的 - // 场景。 + // Concurrently run two threads: one thread initializes the IO manager and + // then deinitializes it, while the other thread initiates lease renewal and + // then calls the IO manager to make it block IO. Expectation: Concurrent + // execution of the two threads should not result in concurrent competition + // for shared resources, even if the lease thread fails to renew while the + // IO manager thread exits. std::thread t1(f1); std::thread t2(f2); diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 1f423250fa..10dae34e55 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -31,22 +31,22 @@ #include //NOLINT #include // NOLINT #include -#include //NOLINT +#include //NOLINT +#include "include/client/libcurve.h" #include "src/client/client_common.h" #include "src/client/client_config.h" #include "src/client/config_info.h" #include "src/client/file_instance.h" #include "src/client/io_tracker.h" #include "src/client/iomanager4file.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" #include "src/client/mds_client.h" #include "src/client/metacache.h" #include "src/client/metacache_struct.h" #include "src/client/request_context.h" -#include "src/client/splitor.h" #include "src/client/source_reader.h" +#include "src/client/splitor.h" #include "test/client/fake/fakeMDS.h" #include "test/client/fake/mockMDS.h" #include "test/client/fake/mock_schedule.h" @@ -90,7 +90,8 @@ class IOTrackerSplitorTest : public ::testing::Test { fopt.ioOpt.ioSplitOpt.fileIOSplitMaxSizeKB = 64; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 1000; fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPMaxRetry = 3; - fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = 500; // NOLINT + fopt.ioOpt.ioSenderOpt.failRequestOpt.chunkserverOPRetryIntervalUS = + 500; // NOLINT fopt.ioOpt.metaCacheOpt.metacacheGetLeaderRetry = 3; fopt.ioOpt.metaCacheOpt.metacacheRPCRetryIntervalUS = 500; fopt.ioOpt.reqSchdulerOpt.scheduleQueueCapacity = 4096; @@ -131,11 +132,11 @@ class IOTrackerSplitorTest : public ::testing::Test { void InsertMetaCache() { if (server.AddService(&curvefsservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } if (server.AddService(&topologyservice, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { LOG(FATAL) << "Fail to add service"; } brpc::ServerOptions options; @@ -148,7 +149,7 @@ class IOTrackerSplitorTest : public ::testing::Test { * 1. set openfile response */ ::curve::mds::OpenFileResponse* openresponse = - new ::curve::mds::OpenFileResponse(); + new ::curve::mds::OpenFileResponse(); ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -170,21 +171,23 @@ class IOTrackerSplitorTest : public ::testing::Test { openresponse->set_statuscode(::curve::mds::StatusCode::kOK); openresponse->set_allocated_protosession(se); openresponse->set_allocated_fileinfo(fin); - FakeReturn* openfakeret = new FakeReturn(nullptr, static_cast(openresponse)); // NOLINT + FakeReturn* openfakeret = new FakeReturn( + nullptr, static_cast(openresponse)); // NOLINT curvefsservice.SetOpenFile(openfakeret); fileinstance_->Open(); /** * 2. set closefile response */ - ::curve::mds::CloseFileResponse* closeresp = new ::curve::mds::CloseFileResponse; // NOLINT + ::curve::mds::CloseFileResponse* closeresp = + new ::curve::mds::CloseFileResponse; // NOLINT closeresp->set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(closeresp)); curvefsservice.SetCloseFile(closefileret); /** - * 3. 设置GetOrAllocateSegmentresponse + * 3. Set GetOrAllocateSegmentresponse */ curve::mds::GetOrAllocateSegmentResponse* response = new curve::mds::GetOrAllocateSegmentResponse(); @@ -192,30 +195,27 @@ class IOTrackerSplitorTest : public ::testing::Test { response->set_statuscode(::curve::mds::StatusCode::kOK); response->set_allocated_pagefilesegment(pfs); - response->mutable_pagefilesegment()-> - set_logicalpoolid(1234); - response->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - response->mutable_pagefilesegment()-> - set_startoffset(0); - - for (int i = 0; i < 256; i ++) { + response->mutable_pagefilesegment()->set_logicalpoolid(1234); + response->mutable_pagefilesegment()->set_segmentsize(1 * 1024 * 1024 * + 1024); + response->mutable_pagefilesegment()->set_chunksize(4 * 1024 * 1024); + response->mutable_pagefilesegment()->set_startoffset(0); + + for (int i = 0; i < 256; i++) { auto chunk = response->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeret = new FakeReturn(nullptr, - static_cast(response)); + getsegmentfakeret = + new FakeReturn(nullptr, static_cast(response)); curvefsservice.SetGetOrAllocateSegmentFakeReturn(getsegmentfakeret); curve::mds::GetOrAllocateSegmentResponse* notallocateresponse = - new curve::mds::GetOrAllocateSegmentResponse(); - notallocateresponse->set_statuscode(::curve::mds::StatusCode - ::kSegmentNotAllocated); - notallocatefakeret = new FakeReturn(nullptr, - static_cast(notallocateresponse)); + new curve::mds::GetOrAllocateSegmentResponse(); + notallocateresponse->set_statuscode( + ::curve::mds::StatusCode ::kSegmentNotAllocated); + notallocatefakeret = + new FakeReturn(nullptr, static_cast(notallocateresponse)); // set GetOrAllocateSegmentResponse for read from clone source curve::mds::GetOrAllocateSegmentResponse* cloneSourceResponse = @@ -224,28 +224,27 @@ class IOTrackerSplitorTest : public ::testing::Test { cloneSourceResponse->set_statuscode(::curve::mds::StatusCode::kOK); cloneSourceResponse->set_allocated_pagefilesegment(clonepfs); - cloneSourceResponse->mutable_pagefilesegment()-> - set_logicalpoolid(1); - cloneSourceResponse->mutable_pagefilesegment()-> - set_segmentsize(1 * 1024 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_chunksize(4 * 1024 * 1024); - cloneSourceResponse->mutable_pagefilesegment()-> - set_startoffset(1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_logicalpoolid(1); + cloneSourceResponse->mutable_pagefilesegment()->set_segmentsize( + 1 * 1024 * 1024 * 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_chunksize(4 * 1024 * + 1024); + cloneSourceResponse->mutable_pagefilesegment()->set_startoffset( + 1 * 1024 * 1024 * 1024); for (int i = 256; i < 512; i++) { - auto chunk = cloneSourceResponse->mutable_pagefilesegment() - ->add_chunks(); + auto chunk = + cloneSourceResponse->mutable_pagefilesegment()->add_chunks(); chunk->set_copysetid(i); chunk->set_chunkid(i); } - getsegmentfakeretclone = new FakeReturn(nullptr, - static_cast(cloneSourceResponse)); + getsegmentfakeretclone = + new FakeReturn(nullptr, static_cast(cloneSourceResponse)); /** * 4. set refresh response */ - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename("1_userinfo_.txt"); info->set_seqnum(2); info->set_id(1); @@ -260,18 +259,19 @@ class IOTrackerSplitorTest : public ::testing::Test { refreshresp->set_statuscode(::curve::mds::StatusCode::kOK); refreshresp->set_sessionid("1234"); refreshresp->set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, nullptr); /** - * 5. 设置topology返回值 + * 5. Set topology return value */ - ::curve::mds::topology::GetChunkServerListInCopySetsResponse* response_1 - = new ::curve::mds::topology::GetChunkServerListInCopySetsResponse; + ::curve::mds::topology::GetChunkServerListInCopySetsResponse* + response_1 = new ::curve::mds::topology:: + GetChunkServerListInCopySetsResponse; response_1->set_statuscode(0); uint64_t chunkserveridc = 1; - for (int i = 0; i < 256; i ++) { + for (int i = 0; i < 256; i++) { auto csinfo = response_1->add_csinfo(); csinfo->set_copysetid(i); @@ -282,23 +282,23 @@ class IOTrackerSplitorTest : public ::testing::Test { cslocs->set_port(9104); } } - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(response_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(response_1)); topologyservice.SetFakeReturn(faktopologyeret); - curve::client::MetaCache* mc = fileinstance_->GetIOManager4File()-> - GetMetaCache(); + curve::client::MetaCache* mc = + fileinstance_->GetIOManager4File()->GetMetaCache(); curve::client::FInfo_t fi; fi.userinfo = userinfo; - fi.chunksize = 4 * 1024 * 1024; + fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; SegmentInfo sinfo; LogicalPoolCopysetIDInfo_t lpcsIDInfo; mdsclient_->GetOrAllocateSegment(true, 0, &fi, nullptr, &sinfo); int count = 0; for (auto iter : sinfo.chunkvec) { - uint64_t index = (sinfo.startoffset + count*fi.chunksize ) - / fi.chunksize; + uint64_t index = + (sinfo.startoffset + count * fi.chunksize) / fi.chunksize; mc->UpdateChunkInfoByIndex(index, iter); ++count; } @@ -339,17 +339,17 @@ class IOTrackerSplitorTest : public ::testing::Test { curvefsservice.SetOpenFile(fakeOpen_.get()); } - FileClient *fileClient_; + FileClient* fileClient_; UserInfo_t userinfo; std::shared_ptr mdsclient_; FileServiceOption fopt; - FileInstance *fileinstance_; + FileInstance* fileinstance_; brpc::Server server; FakeMDSCurveFSService curvefsservice; FakeTopologyService topologyservice; - FakeReturn *getsegmentfakeret; - FakeReturn *notallocatefakeret; - FakeReturn *getsegmentfakeretclone; + FakeReturn* getsegmentfakeret; + FakeReturn* notallocatefakeret; + FakeReturn* getsegmentfakeretclone; OpenFileResponse openResp_; std::unique_ptr fakeOpen_; @@ -376,7 +376,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -521,7 +521,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartRead) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } ASSERT_EQ('a', data[0]); ASSERT_EQ('a', data[4 * 1024 - 1]); @@ -557,7 +557,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { { std::unique_lock lk(writemtx); - writecv.wait(lk, []()->bool{return iowriteflag;}); + writecv.wait(lk, []() -> bool { return iowriteflag; }); } std::unique_ptr writebuffer(new char[aioctx->length]); @@ -603,13 +603,11 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetSegmentFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get segment接口返回失败,底层task thread层会一直重试, - // 但是不会阻塞上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { - while (reqcount > 0) { - fileinstance_->AioWrite(aioctx); - reqcount--; + // When the 'get segment' interface on the MDS (Metadata Server) side is +reported as failed, the underlying task thread layer will keep retrying. + // However, this will not block the upper layer from continuing to send IO +requests downward. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount +> 0) { fileinstance_->AioWrite(aioctx); reqcount--; } }; @@ -636,15 +634,12 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { ioctxmana->SetRequestScheduler(mockschuler); ioctxmana->SetIOOpt(fopt.ioOpt); - // offset 10*1024*1024*1024ul 不在metacache里 - // client回去mds拿segment和serverlist - CurveAioContext* aioctx = new CurveAioContext; - aioctx->offset = 10*1024*1024*1024ul; - aioctx->length = chunk_size + 8 * 1024; - aioctx->ret = LIBCURVE_ERROR::OK; - aioctx->cb = writecallback; - aioctx->buf = new char[aioctx->length]; - aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; + // The offset offset 10*1024*1024*1024ul is not in the metacache. + // The client will request the segment and serverlist from the MDS (Metadata +Server). CurveAioContext* aioctx = new CurveAioContext; aioctx->offset = +10*1024*1024*1024ul; aioctx->length = chunk_size + 8 * 1024; aioctx->ret = +LIBCURVE_ERROR::OK; aioctx->cb = writecallback; aioctx->buf = new +char[aioctx->length]; aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; char* data = static_cast(aioctx->buf); @@ -652,10 +647,10 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get server list接口返回失败,底层task thread层会一直重试 - // 但是不会阻塞,上层继续向下发送IO请求 - int reqcount = 32; - auto threadFunc1 = [&]() { + // If the "get server list" interface on the MDS side is reported as a +failure, the underlying task thread layer will keep retrying. + // However, this won't block the process, and the upper layer will continue +sending IO requests downstream. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount > 0) { fileinstance_->AioWrite(aioctx); reqcount--; @@ -722,7 +717,7 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { process.join(); } - std::unique_ptr writebuffer(new char[length]); + std::unique_ptr writebuffer(new char[length]); memcpy(writebuffer.get(), writeData.to_string().c_str(), length); ASSERT_EQ('a', writebuffer[0]); @@ -768,8 +763,8 @@ TEST_F(IOTrackerSplitorTest, ExceptionTest_TEST) { auto threadfunc = [&]() { iotracker->SetUserDataType(UserDataType::RawBuffer); - iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), - &fi, nullptr); + iotracker->StartWrite(nullptr, offset, length, mdsclient_.get(), &fi, + nullptr); }; std::thread process(threadfunc); @@ -800,8 +795,7 @@ TEST_F(IOTrackerSplitorTest, BoundaryTEST) { // this offset and length will make splitor split fail. // we set disk size = 1G. - uint64_t offset = 1 * 1024 * 1024 * 1024 - - 4 * 1024 * 1024 - 4 *1024; + uint64_t offset = 1 * 1024 * 1024 * 1024 - 4 * 1024 * 1024 - 4 * 1024; uint64_t length = 4 * 1024 * 1024 + 8 * 1024; char* buf = new char[length]; @@ -828,11 +822,10 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { /** * this offset and length will make splitor split into two 8k IO. */ - uint64_t length = 2 * 64 * 1024; // 128KB + uint64_t length = 2 * 64 * 1024; // 128KB uint64_t offset = 4 * 1024 * 1024 - length; // 4MB - 128KB char* buf = new char[length]; - memset(buf, 'a', 64 * 1024); // 64KB memset(buf + 64 * 1024, 'b', 64 * 1024); // 64KB butil::IOBuf writeData; @@ -902,37 +895,33 @@ TEST_F(IOTrackerSplitorTest, InvalidParam) { mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - nullptr, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + nullptr, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, nullptr, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, nullptr, - &reqlist, cid, &iobuf, offset, length, 0)); + ASSERT_EQ( + -1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, nullptr, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); - ASSERT_EQ( - -1, curve::client::Splitor::IO2ChunkRequests( - iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, - &fi, nullptr)); + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, + &fi, nullptr)); ASSERT_EQ(0, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - &reqlist, cid, &iobuf, offset, length, 0)); + iotracker, mc, &reqlist, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, nullptr, &iobuf, offset, length, mdsclient_.get(), nullptr, nullptr)); ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( - iotracker, mc, - nullptr, cid, &iobuf, offset, length, 0)); + iotracker, mc, nullptr, cid, &iobuf, offset, length, 0)); ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( iotracker, mc, &reqlist, nullptr, offset, length, @@ -961,7 +950,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { MetaCache metaCache; FInfo_t fileInfo; - fileInfo.chunksize = 16 * 1024 * 1024; // 16M + fileInfo.chunksize = 16 * 1024 * 1024; // 16M fileInfo.filestatus = FileStatus::CloneMetaInstalled; CloneSourceInfo cloneSourceInfo; @@ -969,7 +958,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { cloneSourceInfo.length = 10ull * 1024 * 1024 * 1024; // 10GB cloneSourceInfo.segmentSize = 1ull * 1024 * 1024 * 1024; // 1GB - // 源卷只分配了第一个和最后一个segment + // The source volume has only allocated the first and last segments cloneSourceInfo.allocatedSegmentOffsets.insert(0); cloneSourceInfo.allocatedSegmentOffsets.insert(cloneSourceInfo.length - cloneSourceInfo.segmentSize); @@ -980,14 +969,14 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ChunkIndex chunkIdx = 0; RequestSourceInfo sourceInfo; - // 第一个chunk + // First chunk sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_TRUE(sourceInfo.IsValid()); ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 克隆卷最后一个chunk + // Clone the last chunk of the volume chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize - 1; LOG(INFO) << "clone length = " << fileInfo.sourceInfo.length << ", chunk size = " << fileInfo.chunksize @@ -1000,19 +989,19 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 10720641024); - // 源卷未分配segment - // 读取每个segment的第一个chunk + // Source volume unassigned segment + // Read the first chunk of each segment for (int i = 1; i < 9; ++i) { ChunkIndex chunkIdx = i * cloneSourceInfo.segmentSize / fileInfo.chunksize; - RequestSourceInfo sourceInfo = Splitor::CalcRequestSourceInfo( - &ioTracker, &metaCache, chunkIdx); + RequestSourceInfo sourceInfo = + Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); } - // 超过长度 + // Exceeding length chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize; sourceInfo = @@ -1021,7 +1010,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 源卷长度为0 + // Source volume length is 0 chunkIdx = 0; fileInfo.sourceInfo.length = 0; metaCache.UpdateFileInfo(fileInfo); @@ -1031,7 +1020,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 不是read/write请求 + // Not a read/write request chunkIdx = 1; ioTracker.SetOpType(OpType::READ_SNAP); sourceInfo = @@ -1045,7 +1034,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { chunkIdx = 0; - // 不是克隆卷 + // Not a clone volume sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); @@ -1068,7 +1057,7 @@ TEST_F(IOTrackerSplitorTest, stripeTest) { fi.segmentsize = 1 * 1024 * 1024 * 1024ul; fi.stripeUnit = 1 * 1024 * 1024; fi.stripeCount = 4; - memset(buf, 'a', length); // 64KB + memset(buf, 'a', length); // 64KB dataCopy.append(buf, length); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); MetaCache* mc = iomana->GetMetaCache(); @@ -1162,9 +1151,9 @@ TEST_F(IOTrackerSplitorTest, TestDisableStripeForStripeFile) { IOTracker ioTracker(iomanager, cache, &scheduler, nullptr, true); std::vector reqlist; - ASSERT_EQ(0, - Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, &dataCopy, - offset, length, mdsclient_.get(), &fi, nullptr)); + ASSERT_EQ(0, Splitor::IO2ChunkRequests(&ioTracker, cache, &reqlist, + &dataCopy, offset, length, + mdsclient_.get(), &fi, nullptr)); ASSERT_EQ(2, reqlist.size()); auto* first = reqlist[0]; @@ -1206,7 +1195,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegment) { } for (int i = 0; i < length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1233,11 +1222,11 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < aioctx.length; i++) { - ASSERT_EQ(0, data[i]); + ASSERT_EQ(0, data[i]); } delete[] data; } @@ -1303,7 +1292,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment2) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } for (int i = 0; i < 4 * 1024; i++) { @@ -1342,8 +1331,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1352,7 +1340,7 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { mc->UpdateChunkInfoByIndex(257, chunkIdInfo); FInfo_t fileInfo; - fileInfo.chunksize = 4 * 1024 * 1024; // 4M + fileInfo.chunksize = 4 * 1024 * 1024; // 4M fileInfo.fullPathName = "/1_userinfo_.txt"; fileInfo.owner = "userinfo"; fileInfo.filestatus = FileStatus::CloneMetaInstalled; @@ -1389,7 +1377,6 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { ASSERT_EQ('a', data[4 * 1024 + chunk_size]); ASSERT_EQ('a', data[length - 1]); - fileinstance2->UnInitialize(); delete fileinstance2; @@ -1398,8 +1385,8 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { curvefsservice.SetGetOrAllocateSegmentFakeReturn(notallocatefakeret); - curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone - (getsegmentfakeretclone); + curvefsservice.SetGetOrAllocateSegmentFakeReturnForClone( + getsegmentfakeretclone); PrepareOpenFile(); MockRequestScheduler* mockschuler = new MockRequestScheduler; @@ -1420,8 +1407,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { auto& handlers = SourceReader::GetInstance().GetReadHandlers(); handlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/clonesource"), + std::piecewise_construct, std::forward_as_tuple("/clonesource"), std::forward_as_tuple(fileinstance2, ::time(nullptr), false)); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); @@ -1460,7 +1446,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { { std::unique_lock lk(readmtx); - readcv.wait(lk, []()->bool{return ioreadflag;}); + readcv.wait(lk, []() -> bool { return ioreadflag; }); } LOG(ERROR) << "address = " << &data; ASSERT_EQ('a', data[0]); @@ -1478,28 +1464,22 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegmentFromOrigin) { TEST_F(IOTrackerSplitorTest, TimedCloseFd) { std::unordered_map fakeHandlers; fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/1"), + std::piecewise_construct, std::forward_as_tuple("/1"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, true)); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/2"), + std::piecewise_construct, std::forward_as_tuple("/2"), std::forward_as_tuple( - nullptr, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, + nullptr, ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); FileInstance* instance = new FileInstance(); fakeHandlers.emplace( - std::piecewise_construct, - std::forward_as_tuple("/3"), + std::piecewise_construct, std::forward_as_tuple("/3"), std::forward_as_tuple( instance, - ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, - false)); + ::time(nullptr) - fopt.ioOpt.closeFdThreadOption.fdTimeout, false)); SourceReader::GetInstance().SetReadHandlers(fakeHandlers); diff --git a/test/client/lease_executor_test.cpp b/test/client/lease_executor_test.cpp index 4f5629ad8b..e008abd8f6 100644 --- a/test/client/lease_executor_test.cpp +++ b/test/client/lease_executor_test.cpp @@ -16,17 +16,18 @@ /* * Project: curve - * File Created: 2019年11月20日 + * File Created: November 20, 2019 * Author: wuhanqing */ +#include "src/client/lease_executor.h" + +#include #include #include #include -#include #include "src/client/iomanager4file.h" -#include "src/client/lease_executor.h" #include "src/client/mds_client.h" #include "test/client/mock/mock_namespace_service.h" @@ -81,8 +82,8 @@ class LeaseExecutorTest : public ::testing::Test { response_.set_allocated_fileinfo(fileInfo); EXPECT_CALL(curveFsService_, RefreshSession(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(response_), - Invoke(MockRefreshSession))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(response_), Invoke(MockRefreshSession))); } protected: diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 3f582b8a3c..82fe048992 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -21,33 +21,32 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include #include #include -#include +#include #include // #define CBD_BACKEND_FAKE #include "include/client/libcbd.h" - -#include "src/client/libcurve_file.h" #include "include/client/libcurve.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" +#include "src/client/libcurve_file.h" #include "test/client/fake/fakeMDS.h" -#include "src/client/client_common.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" using curve::client::EndPoint; -#define BUFSIZE 4 * 1024 -#define FILESIZE 10uL * 1024 * 1024 * 1024 -#define NEWSIZE 20uL * 1024 * 1024 * 1024 +#define BUFSIZE 4 * 1024 +#define FILESIZE 10uL * 1024 * 1024 * 1024 +#define NEWSIZE 20uL * 1024 * 1024 * 1024 -#define filename "1_userinfo_test.img" +#define filename "1_userinfo_test.img" const uint64_t GiB = 1024ull * 1024 * 1024; @@ -68,11 +67,11 @@ class TestLibcbdLibcurve : public ::testing::Test { public: void SetUp() { FLAGS_chunkserver_list = - "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; + "127.0.0.1:9110:0,127.0.0.1:9111:0,127.0.0.1:9112:0"; mds_ = new FakeMDS(filename); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9110, &ep); braft::PeerId pd(ep); @@ -381,7 +380,8 @@ TEST_F(TestLibcbdLibcurve, ReadAndCloseConcurrencyTest) { auto readThread = [buffer](int fd) { auto start = curve::common::TimeUtility::GetTimeofDayMs(); - ASSERT_EQ(BUFSIZE, cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT + ASSERT_EQ(BUFSIZE, + cbd_lib_pread(fd, (void*)buffer, 0, BUFSIZE)); // NOLINT auto end = curve::common::TimeUtility::GetTimeofDayMs(); ASSERT_LE(end - start, 1000); @@ -429,12 +429,12 @@ TEST_F(TestLibcbdLibcurve, IncreaseEpochTest) { ASSERT_EQ(ret, LIBCURVE_ERROR::OK); } -std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9951"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_libcbd.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9951"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -445,17 +445,16 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("discard.discardTaskDelayMs=10") -}; + std::string("discard.discardTaskDelayMs=10")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 99d35696b4..8a0c7a4b90 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -24,6 +24,7 @@ #include #include #include + #include // NOLINT #include // NOLINT #include @@ -58,14 +59,14 @@ std::condition_variable writeinterfacecv; std::mutex interfacemtx; std::condition_variable interfacecv; -void writecallbacktest(CurveAioContext *context) { +void writecallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); writeflag = true; writeinterfacecv.notify_one(); LOG(INFO) << "aio call back here, errorcode = " << context->ret; } -void readcallbacktest(CurveAioContext *context) { +void readcallbacktest(CurveAioContext* context) { std::lock_guard lk(writeinterfacemtx); readflag = true; interfacecv.notify_one(); @@ -88,7 +89,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { memcpy(userinfo.owner, "userinfo", 9); memcpy(userinfo.password, "", 1); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -128,7 +129,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { ASSERT_NE(fd, -1); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -155,7 +156,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -244,7 +245,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -279,7 +280,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { fiu_enable("test/client/fake/fakeMDS.GetOrAllocateSegment", 1, nullptr, 0); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 1024); memset(buffer + 1024, 'b', 1024); memset(buffer + 2 * 1024, 'c', 1024); @@ -303,7 +304,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { std::unique_lock lk(writeinterfacemtx); writeinterfacecv.wait(lk, []() -> bool { return writeflag; }); } - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; memset(readbuffer, 0xFF, 8 * 1024); CurveAioContext readaioctx; readaioctx.buf = readbuffer; @@ -375,7 +376,7 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { mdsclient_.Initialize(fopt.metaServerOpt); fileinstance_.Initialize("/test", &mdsclient_, userinfo, fopt); - // 设置leaderid + // set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -413,12 +414,11 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // 正常情况下只有第一次会去get leader + // Normally, getting the leader will only occur the first time. ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // metacache中被写过的copyset leadermaychange都处于正常状态 - ChunkIDInfo_t chunkinfo1; - MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); - ASSERT_EQ(rc, MetaCacheErrorType::OK); + // LeaderMayChange remains in a normal state for copyset leader that has +been written to in metacache. ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = +mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_) { @@ -430,17 +430,21 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { } } - // 设置chunkservice返回失败,那么mds每次重试都会去拉新的leader - // 127.0.0.1:9151:0,127.0.0.1:9152:0,127.0.0.1:9153:0是当前集群信息 - // 127.0.0.1:9151对应第一个chunkservice - // 设置rpc失败,会导致client将该chunkserverid上的leader copyset都标记为 + // If chunkservice returns failure, MDS will retry and fetch new leaders +each time. + // The current cluster information is: 127.0.0.1:9151:0, 127.0.0.1:9152:0, +127.0.0.1:9153:0. + // 127.0.0.1:9151 corresponds to the first chunkservice. + // An RPC failure causes the client to mark all leader copysets on that +chunkserver id as // leadermaychange chunkservice[0]->SetRPCFailed(); // -现在写第二个chunk,第二个chunk与第一个chunk不在同一个copyset里,这次读写失败 - ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 +Now, write to the second chunk; as it does not belong to the same copyset as the +first chunk, this read and write attempt fails. ASSERT_EQ(-2, +fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, +fileinstance_.Read(buffer, 1 * chunk_size, length)); + // Obtain chunkid information for the second chunk. ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -449,33 +453,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_ || i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // 这两个leader为该chunkserver的copyset的LeaderMayChange置位 - ASSERT_TRUE(ci.LeaderMayChange()); - } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // Set LeaderMayChange for both of these leaders of the +chunkserver's copysets. ASSERT_TRUE(ci.LeaderMayChange()); } else { + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } chunkservice[0]->ReSetRPCFailed(); - // 再次写第二个chunk,这时候获取leader成功后,会将LeaderMayChange置位fasle - // 第一个chunk对应的copyset依然LeaderMayChange为true - ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); + // Write to the second chunk again; after successfully obtaining a leader, +LeaderMayChange will be set to false. + // LeaderMayChange for the copyset corresponding to the first chunk remains +true. ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2. ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange保持原有状态 + // LeaderMayChange for copyset1 remains unchanged. ASSERT_TRUE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For copysets without current leader information, set +LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } @@ -485,33 +489,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { butil::str2endpoint("127.0.0.1", 9152, &ep2); PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rpc失败,迫使copyset切换leader,切换leader后读写成功 - chunkservice[0]->SetRPCFailed(); - // 读写第一个和第二个chunk + // Force an RPC failure to trigger copyset leader switch; successful read +and write after leader switch. chunkservice[0]->SetRPCFailed(); + // Read and write to the first and second chunks. ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // 这个时候 + // At this point for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2 ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange置位 + // Set LeaderMayChange for copyset1 ASSERT_FALSE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange - ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); + // For the current copyset without leader information, directly set +LeaderMayChange ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } - // 验证copyset id信息更新 + // Verify the update of copyset ID information. // copyset id = 888, chunkserver id = 100 101 102 // copyset id = 999, chunkserver id = 102 103 104 CopysetPeerInfo csinfo1; @@ -568,8 +572,8 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { curve::client::CopysetPeerInfo peer9(103, addr); csinfo3.csinfos_.push_back(peer9); - // 更新copyset信息,chunkserver 104的信息被清除 - // 100,和 101上添加了新的copyset信息 + // Update copyset information, clearing the information for chunkserver 104. + // New copyset information has been added on chunk servers 100 and 101. mc->UpdateChunkserverCopysetInfo(FLAGS_logic_pool_id, csinfo3); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 888)); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 999)); @@ -596,7 +600,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { // open not create file ASSERT_EQ(-1 * LIBCURVE_ERROR::FAILED, Open(filename.c_str(), &userinfo)); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -610,7 +614,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(0, Init(configpath.c_str())); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; memset(buffer, 'a', 8 * 1024); CurveAioContext writeaioctx; @@ -623,7 +627,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { ASSERT_EQ(-LIBCURVE_ERROR::BAD_FD, AioWrite(1234, &writeaioctx)); // aioread not opened file - char *readbuffer = new char[8 * 1024]; + char* readbuffer = new char[8 * 1024]; CurveAioContext readaioctx; readaioctx.buf = readbuffer; readaioctx.offset = 0; @@ -681,10 +685,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { LOG(INFO) << "here"; mdsclient_->Initialize(fopt.metaServerOpt); - fileinstance_.Initialize( - "/UnstableChunkserverTest", mdsclient_, userinfo, OpenFlags{}, fopt); + fileinstance_.Initialize("/UnstableChunkserverTest", mdsclient_, userinfo, + OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -699,14 +703,14 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - CliServiceFake *cliservice = mds.GetCliService(); - std::vector chunkservice = mds.GetFakeChunkService(); + CliServiceFake* cliservice = mds.GetCliService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -722,7 +726,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -740,19 +745,20 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.EnableNetUnstable(10000); - // 写2次,读2次,每次请求重试3次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // unstable阈值为10,所以第11次请求返回时,对应的chunkserver被标记为unstable - // leader在对应chunkserver上的copyset会设置leaderMayChange为true - // 下次发起请求时,会先去刷新leader信息, - // 由于leader没有发生改变,而且延迟仍然存在 - // 所以第12次请求仍然超时,leaderMayChange仍然为true + // Write twice, read twice, and retry three times per request + // Due to the delay set on the chunkserver side, each request will time out + // The unstable threshold is 10, so when the 11th request returns, the + // corresponding chunkserver is marked as unstable The copyset of the leader + // on the corresponding chunkserver will set leaderMayChange to true The + // next time a request is made, the leader information will be refreshed + // first, Since the leader has not changed and the delay still exists So the + // 12th request still timed out, and leaderMayChange is still true ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 + // Obtain chunkid information for the second chunk ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -769,9 +775,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { } } - // 当copyset处于unstable状态时 - // 不进入超时时间指数退避逻辑,rpc超时时间设置为默认值 - // 所以每个请求总时间为3s,4个请求需要12s + // When copyset is in an unstable state + // Do not enter the timeout index backoff logic, and set the rpc timeout to + // the default value So the total time for each request is 3 seconds, and 4 + // requests require 12 seconds auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); @@ -783,9 +790,10 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.DisableNetUnstable(); - // 取消延迟,再次读写第2个chunk - // 获取leader信息后,会将leaderMayChange置为false - // 第一个chunk对应的copyset依赖leaderMayChange为true + // Cancel delay and read and write the second chunk again + // After obtaining the leader information, the leaderMayChange will be set + // to false The copyset dependency for the first chunk, leaderMayChange, is + // true ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; ++i) { @@ -809,7 +817,8 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rcp返回失败,迫使copyset切换leader, 切换leader后读写成功 + // Failed to set rcp return, forcing copyset to switch leaders. After + // switching leaders, read and write succeeded chunkservice[0]->SetRPCFailed(); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); @@ -872,7 +881,7 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { fileinstance_.Initialize("/ResumeTimeoutBackoff", mdsclient_, userinfo, OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -887,13 +896,13 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); int fd = fileinstance_.Open(); - MetaCache *mc = fileinstance_.GetIOManager4File()->GetMetaCache(); + MetaCache* mc = fileinstance_.GetIOManager4File()->GetMetaCache(); ASSERT_NE(fd, -1); - std::vector chunkservice = mds.GetFakeChunkService(); + std::vector chunkservice = mds.GetFakeChunkService(); - char *buffer = new char[8 * 1024]; + char* buffer = new char[8 * 1024]; uint64_t offset = 0; uint64_t length = 8 * 1024; @@ -909,7 +918,8 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all + // in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -927,17 +937,18 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { mds.EnableNetUnstable(10000); - // 写2次, 每次请求重试11次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // 第一个请求重试11次,会把chunkserver标记为unstable + // Write twice, retry 11 times per request + // Due to the delay set on the chunkserver side, each request will time out + // The first request will be retried 11 times and the chunkserver will be + // marked as unstable ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - // 第二个写请求,由于其对应的copyset leader may change - // 第1次请求超时时间为1s - // 后面4次重试由于leader may change所以超时时间也是1s - // 第5-11次请求由于重试次数超过minRetryTimesForceTimeoutBackoff - // 所以超时时间都进入指数退避,为8s * 6 = 48s - // 所以第二次写请求,总共耗时53s,并写入失败 + // The second write request, due to its corresponding copyset leader may + // change The first request timeout is 1 second The timeout for the next + // four retries is also 1 second due to the leader may change 5th to 11th + // requests due to more than minRetryTimesForceTimeoutBackoff retries So all + // timeout times enter exponential backoff, which is 8s * 6 = 48s So the + // second write request took a total of 53 seconds and failed to write auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); auto elapsedMs = TimeUtility::GetTimeofDayMs() - start; @@ -961,7 +972,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { uint64_t size = 100 * 1024 * 1024 * 1024ul; FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -975,12 +986,12 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(0, fc.Init(configpath)); - FakeMDSCurveFSService *service = NULL; + FakeMDSCurveFSService* service = NULL; service = mds.GetMDSService(); ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakeret = - new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); CreateFileContext context; context.pagefile = true; @@ -991,7 +1002,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, ret); response.set_statuscode(::curve::mds::StatusCode::kFileExists); - fakeret = new FakeReturn(nullptr, static_cast(&response)); + fakeret = new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); context.pagefile = true; context.name = filename2; @@ -1003,7 +1014,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { ASSERT_EQ(LIBCURVE_ERROR::EXISTS, -ret); FileStatInfo_t fsinfo; - ::curve::mds::FileInfo *info = new curve::mds::FileInfo; + ::curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::GetFileInfoResponse getinforesponse; info->set_filename(filename2); info->set_id(1); @@ -1017,8 +1028,8 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { info->set_stripecount(4); getinforesponse.set_allocated_fileinfo(info); getinforesponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn *fakegetinfo = - new FakeReturn(nullptr, static_cast(&getinforesponse)); + FakeReturn* fakegetinfo = + new FakeReturn(nullptr, static_cast(&getinforesponse)); service->SetGetFileInfoFakeReturn(fakegetinfo); ret = fc.StatFile(filename2, userinfo, &fsinfo); ASSERT_EQ(1024 * 1024, fsinfo.stripeUnit); diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index e95912f610..df487eca62 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -20,256 +20,291 @@ * Author: tongguangxun */ -#include #include #include +#include +#include +#include //NOLINT #include -#include //NOLINT -#include //NOLINT +#include //NOLINT #include -#include +#include "include/client/libcurve.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "test/client/fake/mockMDS.h" -#include "src/client/metacache.h" -#include "test/client/fake/mock_schedule.h" -#include "include/client/libcurve.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" -#include "src/client/service_helper.h" #include "src/client/mds_client.h" -#include "src/client/config_info.h" -#include "test/client/fake/fakeMDS.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/service_helper.h" #include "src/common/net_common.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" -namespace curve { -namespace client { - -// 测试mds failover切换状态机 -TEST(MDSChangeTest, MDSFailoverTest) { - RPCExcutorRetryPolicy rpcexcutor; - - MetaServerOption metaopt; - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); - metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); - - metaopt.rpcRetryOpt.rpcTimeoutMs = 1000; - metaopt.rpcRetryOpt.rpcRetryIntervalUS = 10000; // 10ms - metaopt.rpcRetryOpt.maxFailedTimesBeforeChangeAddr = 2; - metaopt.rpcRetryOpt.rpcTimeoutMs = 1500; - - rpcexcutor.SetOption(metaopt.rpcRetryOpt); - - int mds0RetryTimes = 0; - int mds1RetryTimes = 0; - int mds2RetryTimes = 0; - - // 场景1: mds0、1、2, currentworkindex = 0, mds0, mds1, mds2都宕机, - // 发到其rpc都以EHOSTDOWN返回,导致上层client会一直切换mds重试 - // 按照0-->1-->2持续进行 - // 每次rpc返回-EHOSTDOWN,会直接触发RPC切换。最终currentworkindex没有切换 - auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { - if (mdsindex == 0) { - mds0RetryTimes++; +namespace curve +{ + namespace client + { + + // Testing mds failover switching state machine + TEST(MDSChangeTest, MDSFailoverTest) + { + RPCExcutorRetryPolicy rpcexcutor; + + MetaServerOption metaopt; + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9903"); + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9904"); + metaopt.rpcRetryOpt.addrs.push_back("127.0.0.1:9905"); + + metaopt.rpcRetryOpt.rpcTimeoutMs = 1000; + metaopt.rpcRetryOpt.rpcRetryIntervalUS = 10000; // 10ms + metaopt.rpcRetryOpt.maxFailedTimesBeforeChangeAddr = 2; + metaopt.rpcRetryOpt.rpcTimeoutMs = 1500; + + rpcexcutor.SetOption(metaopt.rpcRetryOpt); + + int mds0RetryTimes = 0; + int mds1RetryTimes = 0; + int mds2RetryTimes = 0; + + // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all + // down, + // All RPCs sent to them are returned as EHOSTDOWN, resulting in + // upper level clients constantly switching to mds and retrying + // Continue according to 0-->1-->2 + // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC + // switching. The final currentworkindex did not switch + auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + } + return -EHOSTDOWN; + }; + + uint64_t startMS = TimeUtility::GetTimeofDayMs(); + // Control surface interface call, 1000 is the total retry time of this RPC + rpcexcutor.DoRPCTask(task1, 1000); + uint64_t endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 1000 - 1); + + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds + ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); + ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task1, 3000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 3000 - 1); + ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); + + // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will + // be working at this time + // Mds index switches to index2, and it is expected that the client + // will directly switch to index2 after retrying with index = 0 At + // this point, mds2 directly returns OK and rpc stops trying again. + // Expected client to send a total of two RPCs, one to mds0 and the + // other to mds2, skipping the middle mds1。 + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + rpcexcutor.SetCurrentWorkIndex(2); + return -ECONNRESET; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + return -ECONNRESET; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + // If OK is returned this time, then RPC should have succeeded and + // will not try again + return LIBCURVE_ERROR::OK; + } + + return 0; + }; + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task2, 1000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_LT(endMS - startMS, 1000); + ASSERT_EQ(2, rpcexcutor.GetCurrentWorkIndex()); + ASSERT_EQ(mds0RetryTimes, 1); + ASSERT_EQ(mds1RetryTimes, 0); + ASSERT_EQ(mds2RetryTimes, 1); + + // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, + // At this point, it will switch to mds0 and mds2 + // After switching to 2, mds1 resumed, and then switched to mds1, and + // the rpc was successfully sent. At this point, the switching order is + // 1->2->0, 1->2->0, 1. + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + rpcexcutor.SetCurrentWorkIndex(1); + auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + return -ECONNRESET; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + // When retrying on mds1 for the third time, success is returned + // upwards and the retry is stopped + if (mds1RetryTimes == 3) + { + return LIBCURVE_ERROR::OK; + } + return -ECONNREFUSED; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + return -brpc::ELOGOFF; + } + + return 0; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task3, 1000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_LT(endMS - startMS, 1000); + ASSERT_EQ(mds0RetryTimes, 2); + ASSERT_EQ(mds1RetryTimes, 3); + ASSERT_EQ(mds2RetryTimes, 2); + + ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); + + // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 + // consistently times out + // The final result returned by rpc is timeout + // For timeout mds nodes, they will continuously retry + // mds.maxFailedTimesBeforeChangeMDS and switch Current + // mds.maxFailedTimesBeforeChangeMDS=2. + // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, + // ... + LOG(INFO) << "case 4"; + mds0RetryTimes = 0; + mds1RetryTimes = 0; + mds2RetryTimes = 0; + rpcexcutor.SetCurrentWorkIndex(0); + auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, + brpc::Channel *channel, brpc::Controller *cntl) -> int + { + if (mdsindex == 0) + { + mds0RetryTimes++; + return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT : -ETIMEDOUT; + } + + if (mdsindex == 1) + { + mds1RetryTimes++; + return -ECONNREFUSED; + } + + if (mdsindex == 2) + { + mds2RetryTimes++; + return -brpc::ELOGOFF; + } + + return 0; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task4, 3000); + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GT(endMS - startMS, 3000 - 1); + ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); + // This retry is a polling retry, and the number of retries per mds should + // be close to and not exceed the total number of mds + ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); + + // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 + // But the first 10 requests from rpc all returned EHOSTDOWN + // Mds retries sleep for 10ms, so it takes a total of 100ms + rpcexcutor.SetCurrentWorkIndex(0); + int hostDownTimes = 10; + auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, + brpc::Channel *channel, brpc::Controller *cntl) + { + static int count = 0; + if (++count <= hostDownTimes) + { + return -EHOSTDOWN; + } + + return 0; + }; + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GE(endMS - startMS, 100); + + // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with + // a total of 5 retries + rpcexcutor.SetCurrentWorkIndex(0); + int calledTimes = 0; + auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, + brpc::Channel *channel, brpc::Controller *cntl) + { + ++calledTimes; + return -EHOSTDOWN; + }; + + startMS = TimeUtility::GetTimeofDayMs(); + rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s + endMS = TimeUtility::GetTimeofDayMs(); + ASSERT_GE(endMS - startMS, 5 * 1000 - 1); + + // In each hostdown situation, sleep for 10ms and the total retry time is + // 5s, so the total number of retries is less than or equal to 500 times In + // order to minimize false positives, 10 redundant attempts were added + LOG(INFO) << "called times " << calledTimes; + ASSERT_LE(calledTimes, 510); } - if (mdsindex == 1) { - mds1RetryTimes++; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - } - return -EHOSTDOWN; - }; - - uint64_t startMS = TimeUtility::GetTimeofDayMs(); - // 控制面接口调用, 1000为本次rpc的重试总时间 - rpcexcutor.DoRPCTask(task1, 1000); - uint64_t endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 1000 - 1); - - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 - ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); - ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task1, 3000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 3000 - 1); - ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - - // 场景2:mds0、1、2, currentworkindex = 0, mds0宕机,并且这时候将正在工作的 - // mds索引切换到index2,预期client在index=0重试之后会直接切换到index 2 - // mds2这这时候直接返回OK,rpc停止重试。 - // 预期client总共发送两次rpc,一次发送到mds0,另一次发送到mds2,跳过中间的 - // mds1。 - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - auto task2 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { - if (mdsindex == 0) { - mds0RetryTimes++; - rpcexcutor.SetCurrentWorkIndex(2); - return -ECONNRESET; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - return -ECONNRESET; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - // 本次返回ok,那么RPC应该成功了,不会再重试 - return LIBCURVE_ERROR::OK; - } - - return 0; - }; - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task2, 1000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_LT(endMS - startMS, 1000); - ASSERT_EQ(2, rpcexcutor.GetCurrentWorkIndex()); - ASSERT_EQ(mds0RetryTimes, 1); - ASSERT_EQ(mds1RetryTimes, 0); - ASSERT_EQ(mds2RetryTimes, 1); - - // 场景3:mds0、1、2,currentworkindex = 1,且mds1宕机了, - // 这时候会切换到mds0和mds2 - // 在切换到2之后,mds1又恢复了,这时候切换到mds1,然后rpc发送成功。 - // 这时候的切换顺序为1->2->0, 1->2->0, 1。 - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - rpcexcutor.SetCurrentWorkIndex(1); - auto task3 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { - if (mdsindex == 0) { - mds0RetryTimes++; - return -ECONNRESET; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - // 当在mds1上重试到第三次的时候向上返回成功,停止重试 - if (mds1RetryTimes == 3) { - return LIBCURVE_ERROR::OK; - } - return -ECONNREFUSED; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - return -brpc::ELOGOFF; - } - - return 0; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task3, 1000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_LT(endMS - startMS, 1000); - ASSERT_EQ(mds0RetryTimes, 2); - ASSERT_EQ(mds1RetryTimes, 3); - ASSERT_EQ(mds2RetryTimes, 2); - - ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); - - // 场景4:mds0、1、2, currentWorkindex = 0, 但是发往mds1的rpc请求一直超时 - // 最后rpc返回结果是超时. - // 对于超时的mds节点会连续重试mds.maxFailedTimesBeforeChangeMDS后切换 - // 当前mds.maxFailedTimesBeforeChangeMDS=2。 - // 所以重试逻辑应该是:0->0->1->2, 0->0->1->2, 0->0->1->2, ... - LOG(INFO) << "case 4"; - mds0RetryTimes = 0; - mds1RetryTimes = 0; - mds2RetryTimes = 0; - rpcexcutor.SetCurrentWorkIndex(0); - auto task4 = [&](int mdsindex, uint64_t rpctimeoutMS, - brpc::Channel* channel, brpc::Controller* cntl)->int { - if (mdsindex == 0) { - mds0RetryTimes++; - return mds0RetryTimes % 2 == 0 ? -brpc::ERPCTIMEDOUT - : -ETIMEDOUT; - } - - if (mdsindex == 1) { - mds1RetryTimes++; - return -ECONNREFUSED; - } - - if (mdsindex == 2) { - mds2RetryTimes++; - return -brpc::ELOGOFF; - } - - return 0; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task4, 3000); - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GT(endMS - startMS, 3000 - 1); - ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 - ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); - - // 场景5:mds0、1、2,currentWorkIndex = 0 - // 但是rpc请求前10次全部返回EHOSTDOWN - // mds重试睡眠10ms,所以总共耗时100ms时间 - rpcexcutor.SetCurrentWorkIndex(0); - int hostDownTimes = 10; - auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { - static int count = 0; - if (++count <= hostDownTimes) { - return -EHOSTDOWN; - } - - return 0; - }; - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task5, 10000); // 总重试时间10s - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GE(endMS - startMS, 100); - - // 场景6: mds在重试过程中一直返回EHOSTDOWN,总共重试5s - rpcexcutor.SetCurrentWorkIndex(0); - int calledTimes = 0; - auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, - brpc::Channel* channel, - brpc::Controller* cntl) { - ++calledTimes; - return -EHOSTDOWN; - }; - - startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task6, 5 * 1000); // 总重试时间5s - endMS = TimeUtility::GetTimeofDayMs(); - ASSERT_GE(endMS - startMS, 5 * 1000 - 1); - - // 每次hostdown情况下,睡眠10ms,总重试时间5s,所以总共重试次数小于等于500次 - // 为了尽量减少误判,所以加入10次冗余 - LOG(INFO) << "called times " << calledTimes; - ASSERT_LE(calledTimes, 510); -} - -} // namespace client -} // namespace curve + } // namespace client +} // namespace curve -const std::vector registConfOff { +const std::vector registConfOff{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("rpcRetryTimes=3"), std::string("global.logPath=./runlog/"), @@ -281,10 +316,9 @@ const std::vector registConfOff { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=false") -}; + std::string("mds.registerToMDS=false")}; -const std::vector registConfON { +const std::vector registConfON{ std::string("mds.listen.addr=127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"), std::string("global.logPath=./runlog/"), std::string("synchronizeRPCTimeoutMS=500"), @@ -297,14 +331,15 @@ const std::vector registConfON { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.registerToMDS=true") -}; - -std::string mdsMetaServerAddr = "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT -int main(int argc, char ** argv) { + std::string("mds.registerToMDS=true")}; + +std::string mdsMetaServerAddr = + "127.0.0.1:9903,127.0.0.1:9904,127.0.0.1:9905"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/mds_failover.conf"; // NOLINT +int main(int argc, char **argv) +{ ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); diff --git a/test/client/mock/mock_chunkservice.h b/test/client/mock/mock_chunkservice.h index 3891ce60bf..134f404a85 100644 --- a/test/client/mock/mock_chunkservice.h +++ b/test/client/mock/mock_chunkservice.h @@ -25,8 +25,8 @@ #include #include -#include #include +#include #include @@ -39,48 +39,48 @@ namespace client { using ::testing::_; using ::testing::Invoke; -using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::ChunkService; -/* 当前仅仅模拟单 chunk read/write */ +/*Currently, only single chunk read/write is simulated*/ class FakeChunkServiceImpl : public ChunkService { public: virtual ~FakeChunkServiceImpl() {} - void WriteChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void WriteChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); chunkIds_.insert(request->chunkid()); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); ::memcpy(chunk_ + request->offset(), cntl->request_attachment().to_string().c_str(), request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void ReadChunkSnapshot(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void ReadChunkSnapshot(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); char buff[4096] = {0}; ::memcpy(buff, chunk_ + request->offset(), request->size()); cntl->response_attachment().append(buff, request->size()); @@ -88,113 +88,114 @@ class FakeChunkServiceImpl : public ChunkService { } void DeleteChunkSnapshotOrCorrectSn( - ::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + ::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); LOG(INFO) << "delete chunk snapshot: " << request->chunkid(); if (chunkIds_.find(request->chunkid()) == chunkIds_.end()) { - response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT - LOG(INFO) << "delete chunk snapshot: " - << request->chunkid() << " not exist"; + response->set_status( + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT + LOG(INFO) << "delete chunk snapshot: " << request->chunkid() + << " not exist"; return; } chunkIds_.erase(request->chunkid()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void GetChunkInfo(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done) { + void GetChunkInfo(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->add_chunksn(1); response->add_chunksn(2); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void CreateCloneChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void CreateCloneChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } - void RecoverChunk(::google::protobuf::RpcController *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done) { + void RecoverChunk(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } private: std::set chunkIds_; - /* 由于 bthread 栈空间的限制,这里不会开很大的空间,如果测试需要更大的空间 - * 请在堆上申请 */ + /* Due to the limitations of the bthread stack space, there will not be a + * large amount of space opened here. If testing requires more space Please + * apply on the pile*/ char chunk_[4096] = {0}; }; class MockChunkServiceImpl : public ChunkService { public: - MOCK_METHOD4(WriteChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(ReadChunkSnapshot, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, void( - ::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(GetChunkInfo, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::GetChunkInfoRequest *request, - ::curve::chunkserver::GetChunkInfoResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(WriteChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(ReadChunkSnapshot, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(DeleteChunkSnapshotOrCorrectSn, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(GetChunkInfo, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::GetChunkInfoRequest* request, + ::curve::chunkserver::GetChunkInfoResponse* response, + google::protobuf::Closure* done)); MOCK_METHOD4(CreateCloneChunk, void(::google::protobuf::RpcController* controller, const ::curve::chunkserver::ChunkRequest* request, ::curve::chunkserver::ChunkResponse* response, google::protobuf::Closure* done)); - MOCK_METHOD4(RecoverChunk, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::ChunkRequest *request, - ::curve::chunkserver::ChunkResponse *response, - google::protobuf::Closure *done)); - MOCK_METHOD4(UpdateEpoch, void(::google::protobuf::RpcController - *controller, - const ::curve::chunkserver::UpdateEpochRequest *request, - ::curve::chunkserver::UpdateEpochResponse *response, - google::protobuf::Closure *done)); + MOCK_METHOD4(RecoverChunk, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::ChunkRequest* request, + ::curve::chunkserver::ChunkResponse* response, + google::protobuf::Closure* done)); + MOCK_METHOD4(UpdateEpoch, + void(::google::protobuf::RpcController* controller, + const ::curve::chunkserver::UpdateEpochRequest* request, + ::curve::chunkserver::UpdateEpochResponse* response, + google::protobuf::Closure* done)); void DelegateToFake() { ON_CALL(*this, WriteChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::WriteChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::WriteChunk)); ON_CALL(*this, ReadChunk(_, _, _, _)) - .WillByDefault(Invoke(&fakeChunkService, - &FakeChunkServiceImpl::ReadChunk)); + .WillByDefault( + Invoke(&fakeChunkService, &FakeChunkServiceImpl::ReadChunk)); } private: FakeChunkServiceImpl fakeChunkService; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // TEST_CLIENT_MOCK_MOCK_CHUNKSERVICE_H_ diff --git a/test/client/request_scheduler_test.cpp b/test/client/request_scheduler_test.cpp index 9ff0636530..bf75580957 100644 --- a/test/client/request_scheduler_test.cpp +++ b/test/client/request_scheduler_test.cpp @@ -20,18 +20,19 @@ * Author: wudemiao */ -#include -#include -#include +#include "src/client/request_scheduler.h" + #include +#include #include +#include +#include -#include "src/client/request_scheduler.h" #include "src/client/client_common.h" -#include "test/client/mock/mock_meta_cache.h" +#include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" +#include "test/client/mock/mock_meta_cache.h" #include "test/client/mock/mock_request_context.h" -#include "src/common/concurrent/count_down_event.h" namespace curve { namespace client { @@ -49,8 +50,9 @@ TEST(RequestSchedulerTest, fake_server_test) { brpc::Server server; std::string listenAddr = "127.0.0.1:9109"; FakeChunkServiceImpl fakeChunkService; - ASSERT_EQ(server.AddService(&fakeChunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&fakeChunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; ASSERT_EQ(server.Start(listenAddr.c_str(), &option), 0); @@ -94,7 +96,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* error request schedule test when scheduler not run */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -102,17 +104,17 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(-1, requestScheduler.ScheduleRequest(reqCtxs)); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->writeData_.append(writebuff, len); @@ -120,7 +122,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(0); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -147,7 +149,7 @@ TEST(RequestSchedulerTest, fake_server_test) { const uint64_t len1 = 16; /* write should with attachment size */ { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -157,18 +159,18 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -177,12 +179,12 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -190,11 +192,10 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; ::memset(writebuff1, 'a', 8); ::memset(writebuff1 + 8, '\0', 8); @@ -203,34 +204,33 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -258,9 +258,9 @@ TEST(RequestSchedulerTest, fake_server_test) { } // read snapshot - // 1. 先 write snapshot + // 1. Write snapshot first { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -272,35 +272,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } - // 2. 再 read snapshot 验证一遍 + // 2. Verify with read snapshot again { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff1, '0', 16); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -309,47 +308,45 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 3. 在 delete snapshot + // 3. In delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 4. 重复 delete snapshot + // 4. Repeat delete snapshot { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->correctedSeq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -357,22 +354,22 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试 get chunk info + // Test get chunk info { ChunkInfoDetail chunkInfo; - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->chunkinfodetail_ = &chunkInfo; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -383,9 +380,9 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试createClonechunk + // Test createClonechunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -395,36 +392,35 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->location_ = "destination"; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 测试recoverChunk + // Testing recoverChunk { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::RECOVER_CHUNK; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -434,7 +430,7 @@ TEST(RequestSchedulerTest, fake_server_test) { /* read/write chunk test */ const int kMaxLoop = 100; for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -444,35 +440,34 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -482,34 +477,33 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::UNKNOWN; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::vector reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); ASSERT_EQ(-1, reqDone->GetErrorCode()); } - /* 2. 并发测试 */ + /* 2. Concurrent testing */ curve::common::CountDownEvent cond(4 * kMaxLoop); auto func = [&]() { for (int i = 0; i < kMaxLoop; ++i) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -518,7 +512,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -538,18 +532,17 @@ TEST(RequestSchedulerTest, fake_server_test) { cond.Wait(); for (int i = 0; i < kMaxLoop; i += 1) { - RequestContext *reqCtx = new FakeRequestContext(); + RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, 1000, copysetId); - reqCtx->seq_ = sn; memset(readbuff, '0', 8); // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); - RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); + RequestClosure* reqDone = new FakeRequestClosure(&cond, reqCtx); reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; @@ -578,11 +571,11 @@ TEST(RequestSchedulerTest, CommonTest) { MetaCache metaCache; FileMetric fm("test"); - // scheduleQueueCapacity 设置为 0 + // scheduleQueueCapacity set to 0 opt.scheduleQueueCapacity = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); - // threadpoolsize 设置为 0 + // threadpoolsize set to 0 opt.scheduleQueueCapacity = 4096; opt.scheduleThreadpoolSize = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); @@ -597,5 +590,5 @@ TEST(RequestSchedulerTest, CommonTest) { ASSERT_EQ(0, sche.Fini()); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/request_sender_test.cpp b/test/client/request_sender_test.cpp index 92882bac79..c453fd2468 100644 --- a/test/client/request_sender_test.cpp +++ b/test/client/request_sender_test.cpp @@ -20,11 +20,12 @@ * Author: wudemiao */ +#include "src/client/request_sender.h" + #include #include #include "src/client/client_common.h" -#include "src/client/request_sender.h" #include "src/common/concurrent/count_down_event.h" #include "test/client/mock/mock_chunkservice.h" @@ -54,9 +55,7 @@ class FakeChunkClosure : public ClientClosure { SetClosure(&reqeustClosure); } - void Run() override { - event->Signal(); - } + void Run() override { event->Signal(); } void SendRetryRequest() override {} @@ -96,7 +95,7 @@ class RequestSenderTest : public ::testing::Test { }; TEST_F(RequestSenderTest, BasicTest) { - // 非法的 port + // Illegal port std::string leaderStr = "127.0.0.1:65539"; butil::EndPoint leaderAddr; ChunkServerID leaderId = 1; @@ -126,8 +125,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { FakeChunkClosure closure(&event); sourceInfo.cloneFileSource.clear(); - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_FALSE(chunkRequest.has_clonefilesource()); @@ -148,8 +147,8 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { sourceInfo.cloneFileOffset = 0; sourceInfo.valid = true; - requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, - sourceInfo, &closure); + requestSender.WriteChunk(ChunkIDInfo(), 1, 1, 0, {}, 0, 0, sourceInfo, + &closure); event.Wait(); ASSERT_TRUE(chunkRequest.has_clonefilesource()); diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..2bfbed38ca 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -20,10 +20,10 @@ * Author: yangyaokai */ -#include - #include "src/common/bitmap.h" +#include + namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test Comparison Operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set ranges at the beginning and end, and clear ranges + // in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +302,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear ranges at the beginning and end, and set ranges + // in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +321,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +342,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +363,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..d573142cf0 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -20,30 +20,30 @@ * Author: charisu */ -#include - #include "src/common/channel_pool.h" +#include + namespace curve { namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/common/configuration_test.cpp b/test/common/configuration_test.cpp index 9dc770bcc8..d51c2c84f4 100644 --- a/test/common/configuration_test.cpp +++ b/test/common/configuration_test.cpp @@ -21,17 +21,17 @@ * 2018/11/23 Wenyu Zhou Initial version */ -#include +#include "src/common/configuration.h" + #include +#include -#include -#include #include +#include #include +#include #include -#include "src/common/configuration.h" - namespace curve { namespace common { @@ -87,9 +87,7 @@ class ConfigurationTest : public ::testing::Test { cFile << confItem; } - void TearDown() { - ASSERT_EQ(0, unlink(confFile_.c_str())); - } + void TearDown() { ASSERT_EQ(0, unlink(confFile_.c_str())); } std::string confFile_; }; @@ -129,52 +127,54 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } -// 覆盖原有配置 +// Overwrite the original configuration TEST_F(ConfigurationTest, SaveConfig) { bool ret; Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration + // items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } -// 读取当前配置写到其他路径 +// Read the current configuration and write to another path TEST_F(ConfigurationTest, SaveConfigToFileNotExist) { bool ret; - // 加载当前配置 + // Load current configuration Configuration conf; conf.SetConfigPath(confFile_); ret = conf.LoadConfig(); ASSERT_EQ(ret, true); - // 写配置到其他位置 + // Write configuration to another location std::string newFile("curve.conf.test2"); conf.SetConfigPath(newFile); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 从新配置文件加载,并读取某项配置来进行校验 + // Load from a new configuration file and read a certain configuration for + // verification Configuration newConf; newConf.SetConfigPath(newFile); ret = newConf.LoadConfig(); @@ -337,11 +337,11 @@ TEST_F(ConfigurationTest, TestMetric) { "{\"conf_name\":\"key1\",\"conf_value\":\"123\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key2").c_str(), "{\"conf_name\":\"key2\",\"conf_value\":\"1.230000\"}"); - // 还未设置时,返回空 + // When not yet set, return empty ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key3").c_str(), ""); - // 支持自动更新metric + // Support for automatic updating of metrics conf.SetIntValue("key1", 234); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key1").c_str(), "{\"conf_name\":\"key1\",\"conf_value\":\"234\"}"); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index 8bdc5c9681..41633c6425 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -20,13 +20,13 @@ * Author: wudemiao */ +#include "src/common/concurrent/count_down_event.h" + #include -#include //NOLINT #include -#include //NOLINT - -#include "src/common/concurrent/count_down_event.h" +#include //NOLINT +#include //NOLINT namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(CountDownEventTest, basic) { }; std::thread t1(func); - std::this_thread::sleep_for(std::chrono::milliseconds(3*sleepMs)); + std::this_thread::sleep_for(std::chrono::milliseconds(3 * sleepMs)); ASSERT_TRUE(isRun.load()); t1.join(); @@ -89,8 +89,7 @@ TEST(CountDownEventTest, basic) { cond.WaitFor(1000); } - - /* 1. initCnt==Signal次数 */ + /* 1. InitCnt==Signal count */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); @@ -111,13 +110,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 2. initCnt signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 20; - const int kInitCnt = kEventNum - 10; + const int kInitCnt = kEventNum - 10; CountDownEvent cond(kInitCnt); auto func = [&] { for (int i = 0; i < kEventNum; ++i) { @@ -128,7 +127,7 @@ TEST(CountDownEventTest, basic) { std::thread t1(func); - /* 等到Signal次数>initCnt */ + /* Wait until SignalCount>initCnt */ while (true) { ::usleep(5); if (signalCount.load(std::memory_order_acquire) > kInitCnt) { @@ -141,13 +140,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 3. initCnt>Signal次数 */ + /* 3. initCnt>SignalCount */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 10; - /* kSignalEvent1 + kSignalEvent2等于kEventNum */ + /* kSignalEvent1 + kSignalEvent2 = kEventNum */ const int kSignalEvent1 = kEventNum - 5; const int kSignalEvent2 = 5; CountDownEvent cond(kEventNum); @@ -167,7 +166,8 @@ TEST(CountDownEventTest, basic) { }; std::thread waitThread(waitFunc); - /* 由于t1 唤醒的次数不够,所以waitThread会阻塞在wait那里 */ + /* Due to insufficient wake-up times for t1, waitThread will block at + * the wait location */ ASSERT_EQ(false, passWait.load(std::memory_order_acquire)); auto func2 = [&] { @@ -176,7 +176,7 @@ TEST(CountDownEventTest, basic) { cond.Signal(); } }; - /* 运行t2,补上不够的唤醒次数 */ + /* Run t2 to make up for insufficient wake-up times */ std::thread t2(func2); t1.join(); @@ -203,8 +203,9 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件未到达,超时返回,可以容许在一定的误差 - ASSERT_GT(static_cast(elpased.count()), waitForMs-1000); + // The event did not arrive and returned after a timeout, allowing for a + // certain error + ASSERT_GT(static_cast(elpased.count()), waitForMs - 1000); t1.join(); } @@ -226,7 +227,7 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件达到,提前返回 + // Event reached, return early ASSERT_GT(waitForMs, static_cast(elpased.count())); t1.join(); diff --git a/test/common/lru_cache_test.cpp b/test/common/lru_cache_test.cpp index a5e9d65e19..773d42e153 100644 --- a/test/common/lru_cache_test.cpp +++ b/test/common/lru_cache_test.cpp @@ -20,11 +20,13 @@ * Author: xuchaojie,lixiaocui */ -#include +#include "src/common/lru_cache.h" + #include +#include + #include -#include "src/common/lru_cache.h" #include "src/common/timeutility.h" namespace curve { @@ -33,26 +35,26 @@ namespace common { TEST(TestCacheMetrics, testall) { CacheMetrics cacheMetrics("LRUCache"); - // 1. 新增数据项 + // 1. Add Data Item cacheMetrics.UpdateAddToCacheCount(); ASSERT_EQ(1, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateAddToCacheBytes(1000); ASSERT_EQ(1000, cacheMetrics.cacheBytes.get_value()); - // 2. 移除数据项 + // 2. Remove Data Item cacheMetrics.UpdateRemoveFromCacheCount(); ASSERT_EQ(0, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateRemoveFromCacheBytes(200); ASSERT_EQ(800, cacheMetrics.cacheBytes.get_value()); - // 3. cache命中 + // 3. cache hit ASSERT_EQ(0, cacheMetrics.cacheHit.get_value()); cacheMetrics.OnCacheHit(); ASSERT_EQ(1, cacheMetrics.cacheHit.get_value()); - // 4. cache未命中 + // 4. cache Misses ASSERT_EQ(0, cacheMetrics.cacheMiss.get_value()); cacheMetrics.OnCacheMiss(); ASSERT_EQ(1, cacheMetrics.cacheMiss.get_value()); @@ -60,10 +62,10 @@ TEST(TestCacheMetrics, testall) { TEST(CaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get uint64_t cacheSize = 0; for (int i = 1; i <= maxCount + 1; i++) { std::string eliminated; @@ -74,8 +76,8 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { } else { cacheSize += std::to_string(i).size() * 2 - std::to_string(1).size() * 2; - ASSERT_EQ( - cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); + ASSERT_EQ(cacheSize, + cache->GetCacheMetrics()->cacheBytes.get_value()); } std::string res; @@ -83,7 +85,7 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 第一个元素被剔出 + // 2. The first element is removed std::string res; ASSERT_FALSE(cache->Get(std::to_string(1), &res)); for (int i = 2; i <= maxCount + 1; i++) { @@ -91,17 +93,17 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->Get("2", &res)); cacheSize -= std::to_string(2).size() * 2; ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); ASSERT_EQ(cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); - // 4. 重复put + // 4. Repeat put std::string eliminated; cache->Put("4", "hello", &eliminated); ASSERT_TRUE(cache->Get("4", &res)); @@ -116,7 +118,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -125,7 +127,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->Get("1", &res)); } @@ -192,9 +194,7 @@ TEST(CaCheTest, TestCacheGetLastKV) { ASSERT_EQ(1, k); ASSERT_EQ(1, v); } -bool TestFunction(const int& a) { - return a > 1; -} +bool TestFunction(const int& a) { return a > 1; } TEST(CaCheTest, TestCacheGetLastKVWithFunction) { auto cache = std::make_shared>( std::make_shared("LruCache")); @@ -228,10 +228,10 @@ TEST(SglCaCheTest, TestGetBefore) { TEST(SglCaCheTest, test_cache_with_capacity_limit) { int maxCount = 5; - auto cache = std::make_shared>(maxCount, - std::make_shared("LruCache")); + auto cache = std::make_shared>( + maxCount, std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached uint64_t cacheSize = 0; for (int i = 1; i <= maxCount; i++) { cache->Put(std::to_string(i)); @@ -240,19 +240,19 @@ TEST(SglCaCheTest, test_cache_with_capacity_limit) { ASSERT_TRUE(cache->IsCached(std::to_string(i))); } - // 2. 第一个元素被剔出 + // 2. The first element is removed cache->Put(std::to_string(11)); ASSERT_FALSE(cache->IsCached(std::to_string(1))); - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->IsCached("2")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); - // 4. 重复put + // 4. Repeat put cache->Put("4"); ASSERT_TRUE(cache->IsCached("4")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); @@ -262,7 +262,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -271,7 +271,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { ASSERT_FALSE(cache->IsCached(std::to_string(100))); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->IsCached("1")); } @@ -315,7 +315,7 @@ TEST(TimedCaCheTest, test_base) { ASSERT_EQ(i, cache->GetCacheMetrics()->cacheCount.get_value()); } else { ASSERT_EQ(maxCount, - cache->GetCacheMetrics()->cacheCount.get_value()); + cache->GetCacheMetrics()->cacheCount.get_value()); } std::string res; ASSERT_TRUE(cache->Get(std::to_string(i), &res)); @@ -355,5 +355,3 @@ TEST(TimedCaCheTest, test_timeout) { } // namespace common } // namespace curve - - diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..fcb7791d54 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ +#include "src/common/concurrent/task_thread_pool.h" + #include -#include #include +#include #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace common { using curve::common::CountDownEvent; -void TestAdd1(int a, double b, CountDownEvent *cond) { +void TestAdd1(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); } -int TestAdd2(int a, double b, CountDownEvent *cond) { +int TestAdd2(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); @@ -47,7 +48,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /*Test thread pool start input parameter*/ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +75,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /*Test not set, at this time it is INT_ MAX*/ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +93,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /*TestAdd2 is a function with a return value*/ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +101,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /*Basic task testing*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +134,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /*Wait for all tasks to complete execution*/ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /*The test queue is full, push will block*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -157,8 +158,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond4(1); CountDownEvent startRunCond4(1); - auto waitTask = [&](CountDownEvent* sigCond, - CountDownEvent* waitCond) { + auto waitTask = [&](CountDownEvent* sigCond, CountDownEvent* waitCond) { sigCond->Signal(); waitCond->Wait(); runTaskCount.fetch_add(1, std::memory_order_acq_rel); @@ -169,12 +169,13 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /*Stuck all processing threads in the thread pool*/ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /*Wait for waitTask1, waitTask2, waitTask3, and waitTask4 to start + * running*/ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +187,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /*Record the number of tasks from thread push to thread pool queue*/ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +209,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /*Waiting for thread pool queue to be pushed full*/ int pushTaskCount; while (true) { ::usleep(50); @@ -222,32 +223,33 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /*The tasks that were pushed in were not executed*/ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + *At this point, the thread pool queue must be full of push, and the + *push After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /*Wake up all threads in the thread pool*/ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /*Wait for all task executions to complete*/ while (true) { ::usleep(10); - if (runTaskCount.load(std::memory_order_acquire) - >= 4 + 3 * kMaxLoop) { + if (runTaskCount.load(std::memory_order_acquire) >= + 4 + 3 * kMaxLoop) { break; } } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + *Wait for all push threads to exit so that the pushThreadCount count is + *updated */ pushThreadCond.Wait(); diff --git a/test/common/test_name_lock.cpp b/test/common/test_name_lock.cpp index e5520e0a1a..074dd885ce 100644 --- a/test/common/test_name_lock.cpp +++ b/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include "src/common/concurrent/name_lock.h" @@ -31,29 +32,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Rame lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Rame lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Rame lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -63,12 +62,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Ruccessfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -79,14 +79,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -94,12 +94,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = Thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve diff --git a/test/failpoint/failpoint_test.cpp b/test/failpoint/failpoint_test.cpp index f0096b0ea4..c77f3b6e52 100644 --- a/test/failpoint/failpoint_test.cpp +++ b/test/failpoint/failpoint_test.cpp @@ -19,56 +19,56 @@ * Created Date: Monday May 13th 2019 * Author: hzsunjianliang */ -#include -#include #include +#include +#include + #include "test/failpoint/fiu_local.h" /* - * libfiu 使用文档详见:https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html - * 分为2个部分,一部分是core API,包括fiu_do_on/fiu_return_on/fiu_init - * core API 用于作用与注入在业务代码处,并由外部control API控制触发。 - * control API 包括:fiu_enable\fiu_disable\fiu_enable_random等等 - * 用于在测试代码处用户进行错误的注入,具体使用方式和方法如下示例代码所示 + * For detailed documentation on how to use libfiu, please refer to: + * https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html Libfiu is divided into + * two parts: the core API, which includes functions like + * fiu_do_on/fiu_return_on/fiu_init. The core API is used to inject faults into + * your business code and is controlled externally using the control API. The + * control API includes functions like fiu_enable, fiu_disable, + * fiu_enable_random, and more. These functions are used in your test code to + * inject errors. You can find specific usage examples and methods in the code + * snippets below. */ namespace curve { namespace failpint { -class FailPointTest: public ::testing::Test { +class FailPointTest : public ::testing::Test { protected: - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } }; -// 注入方式: 通过返回值的方式进行注入 +// Injection method: Inject by returning a value size_t free_space() { - fiu_return_on("no_free_space", 0); - return 100; + fiu_return_on("no_free_space", 0); + return 100; } -// 注入方式: 通过side_effet 进行注入 -void modify_state(int *val) { +// Injection method: through side_effet injection +void modify_state(int* val) { *val += 1; fiu_do_on("side_effect", *val += 1); return; } -// 注入方式: 通过side_effet 进行注入(lambda方式) -void modify_state_with_lamda(int &val) { //NOLINT - fiu_do_on("side_effect_2", - auto func = [&] () { - val++; - }; - func();); +// Injection method: through side_effet injection (lambda method) +void modify_state_with_lamda(int& val) { // NOLINT + fiu_do_on( + "side_effect_2", auto func = [&]() { val++; }; func();); return; } -// 错误触发方式: 总是触发 +// Error triggering method: always triggered TEST_F(FailPointTest, alwaysfail) { if (fiu_enable("no_free_space", 1, NULL, 0) == 0) { ASSERT_EQ(free_space(), 0); @@ -80,7 +80,7 @@ TEST_F(FailPointTest, alwaysfail) { ASSERT_EQ(free_space(), 100); } -// 错误触发方式: 随机触发错误 +// Error triggering method: Random error triggering TEST_F(FailPointTest, nondeterministic) { if (fiu_enable_random("no_free_space", 1, NULL, 0, 1) == 0) { ASSERT_EQ(free_space(), 0); @@ -144,6 +144,5 @@ TEST_F(FailPointTest, WildZard) { } } - } // namespace failpint } // namespace curve diff --git a/test/fs/ext4_filesystem_test.cpp b/test/fs/ext4_filesystem_test.cpp index f2c6cfa520..65540555c5 100644 --- a/test/fs/ext4_filesystem_test.cpp +++ b/test/fs/ext4_filesystem_test.cpp @@ -21,34 +21,34 @@ */ #include -#include #include -#include -#include +#include #include +#include +#include + #include -#include "test/fs/mock_posix_wrapper.h" #include "src/fs/ext4_filesystem_impl.h" +#include "test/fs/mock_posix_wrapper.h" using ::testing::_; +using ::testing::DoAll; +using ::testing::ElementsAre; using ::testing::Ge; using ::testing::Gt; using ::testing::Mock; -using ::testing::DoAll; +using ::testing::NotNull; using ::testing::Return; +using ::testing::ReturnArg; using ::testing::ReturnPointee; -using ::testing::NotNull; -using ::testing::StrEq; -using ::testing::ElementsAre; using ::testing::SetArgPointee; -using ::testing::ReturnArg; +using ::testing::StrEq; namespace curve { namespace fs { -ACTION_TEMPLATE(SetVoidArgPointee, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SetVoidArgPointee, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(first)) { auto output = reinterpret_cast(::testing::get(args)); *output = first; @@ -56,18 +56,18 @@ ACTION_TEMPLATE(SetVoidArgPointee, class Ext4LocalFileSystemTest : public testing::Test { public: - void SetUp() { - wrapper = std::make_shared(); - lfs = Ext4FileSystemImpl::getInstance(); - lfs->SetPosixWrapper(wrapper); - errno = 1234; - } + void SetUp() { + wrapper = std::make_shared(); + lfs = Ext4FileSystemImpl::getInstance(); + lfs->SetPosixWrapper(wrapper); + errno = 1234; + } - void TearDown() { - errno = 0; - // allows the destructor of lfs_ to be invoked correctly - Mock::VerifyAndClear(wrapper.get()); - } + void TearDown() { + errno = 0; + // allows the destructor of lfs_ to be invoked correctly + Mock::VerifyAndClear(wrapper.get()); + } protected: std::shared_ptr wrapper; @@ -79,99 +79,70 @@ TEST_F(Ext4LocalFileSystemTest, InitTest) { option.enableRenameat2 = true; struct utsname kernel_info; - // 测试版本偏低的情况 - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "2.16.0"); + // Testing with a lower version + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "2.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.14.19-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(lfs->Init(option), -1); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.16.0-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", - "4.16.0"); + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "4.16.0"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); } // test Statfs TEST_F(Ext4LocalFileSystemTest, StatfsTest) { FileSystemInfo fsinfo; - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), 0); - EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Statfs("./", &fsinfo), -errno); } // test Open TEST_F(Ext4LocalFileSystemTest, OpenTest) { - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(666)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(666)); ASSERT_EQ(lfs->Open("/a", 0), 666); - EXPECT_CALL(*wrapper, open(NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, open(NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Open("/a", 0), -errno); } // test Close TEST_F(Ext4LocalFileSystemTest, CloseTest) { - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Close(666), 0); - EXPECT_CALL(*wrapper, close(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, close(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Close(666), -errno); } @@ -185,32 +156,26 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { fileInfo.st_mode = S_IFREG; // /a is a file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); // /b is a dir EXPECT_CALL(*wrapper, stat(StrEq("/b"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(dirInfo), Return(0))); // /b/1 is a file EXPECT_CALL(*wrapper, stat(StrEq("/b/1"), NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileInfo), Return(0))); DIR* dirp = reinterpret_cast(0x01); struct dirent entryArray[1]; memset(entryArray, 0, sizeof(entryArray)); memcpy(entryArray[0].d_name, "1", 1); - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(2) .WillOnce(Return(entryArray)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillRepeatedly(Return(0)); } // test delete dir @@ -219,8 +184,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { ASSERT_EQ(lfs->Delete("/b"), 0); // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/b"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/b"))).WillOnce(Return(nullptr)); // List will failed ASSERT_EQ(lfs->Delete("/b"), -errno); } @@ -229,8 +193,7 @@ TEST_F(Ext4LocalFileSystemTest, DeleteTest) { { ASSERT_EQ(lfs->Delete("/a"), 0); // error occured when remove file - EXPECT_CALL(*wrapper, remove(NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, remove(NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Delete("/a"), -errno); } } @@ -242,32 +205,25 @@ TEST_F(Ext4LocalFileSystemTest, MkdirTest) { info.st_mode = S_IFDIR; // success EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .Times(0); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).Times(0); ASSERT_EQ(lfs->Mkdir("/a"), 0); // stat failed ,mkdir success - EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())).WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, mkdir(StrEq("/a"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("/a"), 0); // test relative path EXPECT_CALL(*wrapper, stat(_, NotNull())) .Times(2) .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))) .WillOnce(Return(-1)); - EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, mkdir(StrEq("aaa/bbb"), _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Mkdir("aaa/bbb"), 0); // is not a dir, mkdir failed info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); - EXPECT_CALL(*wrapper, mkdir(NotNull(), _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); + EXPECT_CALL(*wrapper, mkdir(NotNull(), _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Mkdir("/a"), -errno); } @@ -277,19 +233,16 @@ TEST_F(Ext4LocalFileSystemTest, DirExistsTest) { info.st_mode = S_IFDIR; // is dir EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->DirExists("/a"), false); // not dir info.st_mode = S_IFREG; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->DirExists("/a"), false); } @@ -299,19 +252,16 @@ TEST_F(Ext4LocalFileSystemTest, FileExistsTest) { info.st_mode = S_IFREG; // is file EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), true); // stat failed EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(-1))); ASSERT_EQ(lfs->FileExists("/a"), false); // not file info.st_mode = S_IFDIR; EXPECT_CALL(*wrapper, stat(StrEq("/a"), NotNull())) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(0))); ASSERT_EQ(lfs->FileExists("/a"), false); } @@ -320,11 +270,9 @@ TEST_F(Ext4LocalFileSystemTest, RenameTest) { LocalFileSystemOption option; option.enableRenameat2 = false; ASSERT_EQ(0, lfs->Init(option)); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(0)); ASSERT_EQ(lfs->Rename("/a", "/b"), 0); - EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, rename(NotNull(), NotNull())).WillOnce(Return(-1)); ASSERT_EQ(lfs->Rename("/a", "/b"), -errno); } @@ -333,13 +281,10 @@ TEST_F(Ext4LocalFileSystemTest, Renameat2Test) { LocalFileSystemOption option; option.enableRenameat2 = true; struct utsname kernel_info; - snprintf(kernel_info.release, - sizeof(kernel_info.release), - "%s", + snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", "3.15.1-sss"); EXPECT_CALL(*wrapper, uname(NotNull())) - .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(kernel_info), Return(0))); ASSERT_EQ(0, lfs->Init(option)); EXPECT_CALL(*wrapper, renameat2(NotNull(), NotNull(), 0)) .WillOnce(Return(0)); @@ -359,20 +304,17 @@ TEST_F(Ext4LocalFileSystemTest, ListTest) { memcpy(entryArray[2].d_name, "1", 1); vector names; // opendir failed - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(nullptr)); ASSERT_EQ(lfs->List("/a", &names), -errno); // success - EXPECT_CALL(*wrapper, opendir(StrEq("/a"))) - .WillOnce(Return(dirp)); + EXPECT_CALL(*wrapper, opendir(StrEq("/a"))).WillOnce(Return(dirp)); EXPECT_CALL(*wrapper, readdir(dirp)) .Times(4) .WillOnce(Return(entryArray)) .WillOnce(Return(entryArray + 1)) .WillOnce(Return(entryArray + 2)) .WillOnce(Return(nullptr)); - EXPECT_CALL(*wrapper, closedir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, closedir(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->List("/a", &names), 0); ASSERT_THAT(names, ElementsAre("1")); } @@ -397,13 +339,11 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { ASSERT_EQ(lfs->Read(666, buf, 0, 3), 2); ASSERT_STREQ(buf, "12"); // pread failed - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; - EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*wrapper, pread(_, NotNull(), _, _)).WillRepeatedly(Return(-1)); ASSERT_EQ(lfs->Read(666, buf, 0, 3), -errno); // set errno = EINTR,but only return -1 once errno = EINTR; @@ -418,16 +358,12 @@ TEST_F(Ext4LocalFileSystemTest, ReadTest) { TEST_F(Ext4LocalFileSystemTest, WriteTest) { char buf[4] = {0}; // success - EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)) - .WillOnce(Return(1)); - EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)) - .WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 1, _, _)).WillOnce(Return(1)); + EXPECT_CALL(*wrapper, pwrite(_, buf + 2, _, _)).WillOnce(Return(1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), 3); // pwrite failed - EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, pwrite(_, NotNull(), _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Write(666, buf, 0, 3), -errno); // set errno = EINTR,and will repeatedly return -1 errno = EINTR; @@ -509,12 +445,10 @@ TEST_F(Ext4LocalFileSystemTest, WriteIOBufTest) { // test Fallocate TEST_F(Ext4LocalFileSystemTest, FallocateTest) { // success - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), 0); // fallocate failed - EXPECT_CALL(*wrapper, fallocate(_, _, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fallocate(_, _, _, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fallocate(666, 0, 0, 4096), -errno); } @@ -522,31 +456,27 @@ TEST_F(Ext4LocalFileSystemTest, FallocateTest) { TEST_F(Ext4LocalFileSystemTest, FstatTest) { struct stat info; // success - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fstat(666, &info), 0); // fallocate failed - EXPECT_CALL(*wrapper, fstat(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fstat(_, _)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fstat(666, &info), -errno); } // test Fsync TEST_F(Ext4LocalFileSystemTest, FsyncTest) { // success - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(0)); ASSERT_EQ(lfs->Fsync(666), 0); // fallocate failed - EXPECT_CALL(*wrapper, fsync(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*wrapper, fsync(_)).WillOnce(Return(-1)); ASSERT_EQ(lfs->Fsync(666), -errno); } TEST_F(Ext4LocalFileSystemTest, ReadRealTest) { std::shared_ptr pw = std::make_shared(); lfs->SetPosixWrapper(pw); - int fd = lfs->Open("a", O_CREAT|O_RDWR); + int fd = lfs->Open("a", O_CREAT | O_RDWR); ASSERT_LT(0, fd); // 0 < fd char buf[8192] = {0}; ASSERT_EQ(4096, lfs->Write(fd, buf, 0, 4096)); diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index a36bfedcee..30c40442aa 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -24,8 +24,8 @@ #include #include -#include #include +#include #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" @@ -49,24 +49,23 @@ static constexpr uint32_t kOpRequestAlignSize = 4096; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static const char *chunkServerParams[1][16] = { - { "chunkserver", "-chunkServerIp=127.0.0.1", - "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, - "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", - "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkserver.dat", - "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", - "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", - "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkfilepool.meta", - "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", - "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/walfilepool.meta", - "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", - "-raft_sync_segments=true", NULL }, +static const char* chunkServerParams[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", + "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, + "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", + "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkserver.dat", + "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", + "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", + "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool.meta", + "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", + "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -107,7 +106,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -125,11 +124,11 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } - int InitCluster(PeerCluster *cluster) { + int InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs_); @@ -139,7 +138,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -168,45 +167,46 @@ class ChunkServerIoTest : public testing::Test { std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* Scenario 1: Newly created file, Chunk file does not exist*/ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* Scenario 2: After generating a chunk file through WriteChunk, perform + * the operation*/ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); + data.c_str(), &chunkData)); ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunkId, sn1, NULL_SN, leader)); - ASSERT_EQ(0, verify->VerifyReadChunk( - chunkId, sn1, 0, 4 * KB, &chunkData)); + ASSERT_EQ(0, + verify->VerifyReadChunk(chunkId, sn1, 0, 4 * KB, &chunkData)); ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, kChunkSize - 4 * KB, - 4 * KB, nullptr)); + 4 * KB, nullptr)); data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length * 2, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 8 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files*/ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); } void TestSnapshotIO(std::shared_ptr verify) { @@ -217,150 +217,164 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, + 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, + 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, + 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not + // exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, - data.c_str(), &chunkData1a)); + data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 - */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + * Scenario 1: Taking a snapshot of a file for the first time + */ + chunkData1b.assign(chunkData1a); // Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + data.c_str(), &chunkData1b)); + // Write repeatedly to the same area to verify that there will be no + // duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, - data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + data.c_str(), &chunkData1b)); + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot + // version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 - ASSERT_EQ(0, - verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); + // Chunk1 read [0, 12KB], expected to read version 2 data + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 - ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( - chunk2, sn1, 0, 12 * KB, nullptr)); + // Reading snapshot of chunk2, expected chunk not to exist + ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn1, 0, 12 * KB, + nullptr)); /* - * 场景二:第一次快照结束,删除快照 - */ - // 删除chunk1快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); + // Obtain chunk1 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 - */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + * Scenario 3: Taking a second snapshot + */ + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); // Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, - data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + data.c_str(), &chunkData1c)); + // Obtain chunk1 information, expect its version to be 3 and snapshot + // version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, - &chunkData1b)); + &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, - &chunkData2)); + &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, + // the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); + verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 - */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + * Scenario 4: The second snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot because chunk1 and its snapshot have been + // deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 - */ - // 删除chunk1,已不存在,预期成功 + * Scenario 5: User deletes files + */ + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + verify->VerifyDeleteChunk(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk2, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunk(chunk2, sn3)); + // Obtaining chunk2 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk2, NULL_SN, NULL_SN, leader)); } public: @@ -370,7 +384,7 @@ class ChunkServerIoTest : public testing::Test { CopysetID copysetId_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; std::string externalIp_; private: @@ -391,8 +405,8 @@ class ChunkServerIoTest : public testing::Test { * */ TEST_F(ChunkServerIoTest, BasicIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } @@ -401,15 +415,15 @@ TEST_F(ChunkServerIoTest, BasicIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } TEST_F(ChunkServerIoTest, SnapshotIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } @@ -417,8 +431,8 @@ TEST_F(ChunkServerIoTest, SnapshotIO) { TEST_F(ChunkServerIoTest, SnapshotIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..0aae174746 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -20,9 +20,9 @@ * Author: qinyi */ -#include -#include #include +#include +#include #include #include @@ -30,14 +30,14 @@ #include #include "include/client/libcurve.h" -#include "src/common/s3_adapter.h" -#include "src/common/timeutility.h" -#include "src/client/inflight_controller.h" #include "src/chunkserver/cli2.h" +#include "src/client/inflight_controller.h" #include "src/common/concurrent/count_down_event.h" -#include "test/integration/common/chunkservice_op.h" +#include "src/common/s3_adapter.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" +#include "test/integration/common/chunkservice_op.h" #include "test/util/config_generator.h" using curve::CurveCluster; @@ -91,11 +91,11 @@ const uint32_t kChunkSize = 16 * 1024 * 1024; const uint32_t kChunkServerMaxIoSize = 64 * 1024; const std::vector mdsConf0{ - { "--confPath=" + MDS0_CONF_PATH }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME }, - { "--sessionInterSec=20" }, - { "--etcdAddr=" + ETCD_CLIENT_IP_PORT }, + {"--confPath=" + MDS0_CONF_PATH}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME}, + {"--sessionInterSec=20"}, + {"--etcdAddr=" + ETCD_CLIENT_IP_PORT}, }; const std::vector mdsFileConf0{ @@ -129,73 +129,67 @@ const std::vector csCommonConf{ }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER0_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER0_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER1_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER1_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER2_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER2_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta"}}; namespace curve { namespace chunkserver { @@ -203,7 +197,9 @@ namespace chunkserver { class CSCloneRecoverTest : public ::testing::Test { public: CSCloneRecoverTest() - : logicPoolId_(1), copysetId_(1), chunkData1_(kChunkSize, 'X'), + : logicPoolId_(1), + copysetId_(1), + chunkData1_(kChunkSize, 'X'), chunkData2_(kChunkSize, 'Y') {} void SetUp() { @@ -217,11 +213,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,19 +227,20 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, - mdsConf0, true); + mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, - CHUNK_SERVER1_IP_PORT, - CHUNK_SERVER2_IP_PORT}; + CHUNK_SERVER1_IP_PORT, + CHUNK_SERVER2_IP_PORT}; for (int i = 0; i < 3; ++i) { Json::Value server; std::vector ipPort; @@ -278,7 +275,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -291,13 +288,12 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createPPCmd: " << createPPCmd; ret = system(createPPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserve pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +315,8 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -331,27 +328,26 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createLPCmd: " << createLPCmd; ret = system(createLPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); - struct ChunkServiceOpConf conf0 = { &leaderPeer_, logicPoolId_, - copysetId_, 5000 }; + struct ChunkServiceOpConf conf0 = {&leaderPeer_, logicPoolId_, + copysetId_, 5000}; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +413,10 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** Send a write request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Is IO successfully completed */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +428,8 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -447,8 +444,7 @@ class CSCloneRecoverTest : public ::testing::Test { int ret; if ((ret = AioWrite(fd_, context))) { - LOG(ERROR) << "failed to send aio write request, err=" - << ret; + LOG(ERROR) << "failed to send aio write request, err=" << ret; return false; } @@ -460,11 +456,11 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** Send a read request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @data: Read out data + * @return: Is IO successfully completed */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +469,8 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -485,8 +482,7 @@ class CSCloneRecoverTest : public ::testing::Test { context->cb = readCallBack; int ret; if ((ret = AioRead(fd_, context))) { - LOG(ERROR) << "failed to send aio read request, err=" - << ret; + LOG(ERROR) << "failed to send aio read request, err=" << ret; return false; } @@ -547,7 +543,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +555,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +609,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +629,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +643,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,17 +663,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Clone files will not be converted to regular chunk1 files after being + * read through Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +682,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +707,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +721,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +747,17 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated and successfully written. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +765,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +800,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +815,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +826,10 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * The clone file will be converted to a regular chunk1 file after being + * overwritten Write by increasing the version, If it is a clone chunk, the + * write will fail; If it is a regular chunk, a snapshot file will be + * generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +837,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +861,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +874,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,17 +894,18 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that the clone file will not be converted to a regular + * chunk1 file after being read through Write by increasing the version, If + * it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +913,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +938,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +951,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +977,17 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +995,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1019,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,17 +1045,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1064,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1087,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1119,17 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a + * snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1137,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index a5ac75a823..acf24fc63a 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -21,72 +21,60 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "src/common/concurrent/concurrent.h" -#include "test/integration/common/peer_cluster.h" #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::common::Thread; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::common::Thread; static const char* kFakeMdsAddr = "127.0.0.1:9329"; constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *chunkConcurrencyParams1[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9076", - "-chunkServerStoreUri=local://./9076/", - "-chunkServerMetaUri=local://./9076/chunkserver.dat", - "-copySetUri=local://./9076/copysets", - "-raftSnapshotUri=curve://./9076/copysets", - "-raftLogUri=curve://./9076/copysets", - "-recycleUri=local://./9076/recycler", - "-chunkFilePoolDir=./9076/chunkfilepool/", - "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", - "-walFilePoolDir=./9076/walfilepool/", - "-walFilePoolMetaPath=./9076/walfilepool.meta", - "-conf=./9076/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams1[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9076", + "-chunkServerStoreUri=local://./9076/", + "-chunkServerMetaUri=local://./9076/chunkserver.dat", + "-copySetUri=local://./9076/copysets", + "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", + "-recycleUri=local://./9076/recycler", + "-chunkFilePoolDir=./9076/chunkfilepool/", + "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", + "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; -static const char *chunkConcurrencyParams2[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9077", - "-chunkServerStoreUri=local://./9077/", - "-chunkServerMetaUri=local://./9077/chunkserver.dat", - "-copySetUri=local://./9077/copysets", - "-raftSnapshotUri=curve://./9077/copysets", - "-raftLogUri=curve://./9077/copysets", - "-recycleUri=local://./9077/recycler", - "-chunkFilePoolDir=./9077/chunkfilepool/", - "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", - "-walFilePoolDir=./9077/walfilepool/", - "-walFilePoolMetaPath=./9077/walfilepool.meta", - "-conf=./9077/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams2[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9077", + "-chunkServerStoreUri=local://./9077/", + "-chunkServerMetaUri=local://./9077/chunkserver.dat", + "-copySetUri=local://./9077/copysets", + "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", + "-recycleUri=local://./9077/recycler", + "-chunkFilePoolDir=./9077/chunkfilepool/", + "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", + "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -94,7 +82,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -136,14 +124,14 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -162,10 +150,10 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { CopysetID copysetId; std::map paramsIndexs; - std::vector params; + std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -182,7 +170,6 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { electionTimeoutMs = 3000; snapshotIntervalS = 60; - ASSERT_TRUE(cg1.Init("9077")); cg1.SetKV("copyset.election_timeout_ms", "3000"); cg1.SetKV("copyset.snapshot_interval_s", "60"); @@ -198,14 +185,12 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - poolDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool/"; - metaDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool.meta"; + poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool/"; + metaDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool.meta"; FilePoolMeta meta(kChunkSize, kPageSize, poolDir); FilePoolHelper::PersistEnCodeMetaInfo(lfs, meta, metaDir); @@ -213,7 +198,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // There maybe one chunk in cleaning, so you should allocate // (kChunkNum + 1) chunks in start if you want to use kChunkNum chunks. // This situation will not occur in the production environment - allocateChunk(lfs, kChunkNum+1, poolDir, kChunkSize); + allocateChunk(lfs, kChunkNum + 1, poolDir, kChunkSize); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -224,14 +209,14 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // wait for process exit ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -243,28 +228,23 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::vector peers; PeerId leaderId; Peer leaderPeer; - int electionTimeoutMs; - int snapshotIntervalS; + int electionTimeoutMs; + int snapshotIntervalS; LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; - std::map paramsIndexs; - std::vector params; + std::map paramsIndexs; + std::vector params; std::string poolDir; std::string metaDir; - std::shared_ptr lfs; + std::shared_ptr lfs; }; -// 写chunk -int WriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - off_t offset, - size_t len, - const char *data, +// Write chunk +int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, off_t offset, size_t len, const char* data, const int sn = 1) { PeerId leaderId(leader.address()); brpc::Channel channel; @@ -299,13 +279,9 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read -void RandReadChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for read +void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; uint64_t appliedIndex = 1; PeerId leaderId(leader.address()); @@ -314,7 +290,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -330,7 +306,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -342,7 +318,8 @@ void RandReadChunk(Peer leader, } if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS && - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { //NOLINT + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { // NOLINT LOG(INFO) << "read failed: " << CHUNK_OP_STATUS_Name(response.status()); ret = -1; @@ -352,13 +329,9 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write -void RandWriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for writing +void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; char data[kOpRequestAlignSize] = {'a'}; int length = kOpRequestAlignSize; @@ -369,7 +342,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -385,7 +358,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -406,12 +379,9 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 -void RandDeleteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop) { +// Randomly select a chunk to delete +void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop) { int ret = 0; PeerId leaderId(leader.address()); @@ -420,7 +390,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -450,12 +420,9 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk -void CreateCloneChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID start, - ChunkID end) { +// Create clone chunk +void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID start, ChunkID end) { int ret = 0; SequenceNum sn = 2; SequenceNum correctedSn = 1; @@ -497,10 +464,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + *Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -511,37 +478,21 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -549,33 +500,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -583,8 +525,9 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -592,29 +535,19 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -622,7 +555,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -646,7 +579,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -654,7 +587,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -664,50 +597,30 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -716,7 +629,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -726,38 +639,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -765,33 +663,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + RandReadMultiNotExistChunk) { // NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -799,7 +690,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -809,39 +700,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already + // been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout + // failure for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -849,7 +727,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -857,38 +735,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -897,7 +761,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -907,38 +771,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -946,30 +796,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -979,10 +822,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -993,36 +836,21 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); + + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1030,33 +858,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1064,8 +883,9 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1073,29 +893,19 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -1103,7 +913,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1127,7 +937,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1135,7 +945,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1143,38 +953,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1183,7 +979,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1193,38 +989,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1232,33 +1013,25 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1266,7 +1039,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1274,26 +1047,17 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1301,7 +1065,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1309,38 +1073,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1349,7 +1099,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1359,38 +1109,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1398,30 +1134,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -1430,7 +1159,8 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, +// with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1440,52 +1170,32 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread with a 20% probability + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } diff --git a/test/integration/chunkserver/datastore/datastore_basic_test.cpp b/test/integration/chunkserver/datastore/datastore_basic_test.cpp index 14fdc3901c..a7367253c5 100644 --- a/test/integration/chunkserver/datastore/datastore_basic_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_basic_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_bas"; // NOLINT -const string poolDir = "./chunkfilepool_int_bas"; // NOLINT +const string baseDir = "./data_int_bas"; // NOLINT +const string poolDir = "./chunkfilepool_int_bas"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_bas.meta"; // NOLINT class BasicTestSuit : public DatastoreIntegrationBase { @@ -36,51 +36,49 @@ class BasicTestSuit : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(BasicTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scene One: New file created, Chunk file does not + * exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // ChunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /****************** Scene Two: Operations after generating chunk files + * through WriteChunk **************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -93,69 +91,53 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; @@ -164,27 +146,19 @@ TEST_F(BasicTestSuit, BasicTest) { butil::IOBuf iobuf1_1_4; iobuf1_1_4.append(buf1_1_4, length); - errorCode = dataStore_->WriteChunk(id, - sn, - iobuf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, iobuf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content + // read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene Three: User deletes file******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp index 3b0d635652..6db8375ff2 100644 --- a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_clo"; // NOLINT -const string poolDir = "./chunkfilepool_int_clo"; // NOLINT +const string baseDir = "./data_int_clo"; // NOLINT +const string poolDir = "./chunkfilepool_int_clo"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_clo.meta"; // NOLINT class CloneTestSuit : public DatastoreIntegrationBase { @@ -36,7 +36,7 @@ class CloneTestSuit : public DatastoreIntegrationBase { }; /** - * 克隆场景测试 + * Clone scenario testing */ TEST_F(CloneTestSuit, CloneTest) { ChunkID id = 1; @@ -48,16 +48,14 @@ TEST_F(CloneTestSuit, CloneTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk1 + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -71,14 +69,13 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 - errorCode = dataStore_->CreateCloneChunk(id, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Call the interface again, but still return success. Chunk information + // remains unchanged + errorCode = + dataStore_->CreateCloneChunk(id, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -92,14 +89,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 创建克隆文件chunk2 - errorCode = dataStore_->CreateCloneChunk(2, // chunk id - sn, - correctedSn, - CHUNK_SIZE, - location); + // Create clone file chunk2 + errorCode = + dataStore_->CreateCloneChunk(2, // chunk id + sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(2, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -113,23 +108,19 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ - // 构造原始数据 + /******************Scene 2: Restoring Cloned Files******************/ + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // WriteChunk写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf1[2 * PAGE_SIZE]; memset(writeBuf1, 'a', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf1, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf1, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -137,26 +128,23 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -164,30 +152,26 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘a’ + // Reading Chunk data, [0, 8KB] data should be 'a' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // WriteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; char writeBuf3[2 * PAGE_SIZE]; memset(writeBuf3, 'c', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf3, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf3, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -195,11 +179,12 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 4KB]数据应为‘a’,[4KB, 12KB]数据应为‘c’ + // Reading Chunk data, [0, 4KB] data should be 'a', [4KB, 12KB] data should + // be 'c' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -207,17 +192,18 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(0, memcmp(writeBuf1, readBuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(writeBuf3, readBuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Conversion of Cloned Files after Iterative + * Writing into Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->PasteChunk(id, - overBuf, + errorCode = dataStore_->PasteChunk(id, overBuf, i * kMB, // offset 1 * kMB); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -226,15 +212,15 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - /******************场景三:删除文件****************/ + /******************Scene 3: Delete File****************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(1, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(2, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(2, &info); @@ -242,7 +228,7 @@ TEST_F(CloneTestSuit, CloneTest) { } /** - * 恢复场景测试 + * Recovery scenario testing */ TEST_F(CloneTestSuit, RecoverTest) { ChunkID id = 1; @@ -254,16 +240,15 @@ TEST_F(CloneTestSuit, RecoverTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -277,14 +262,14 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 + // Call the interface again, but still return success. Chunk information + // remains unchanged errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, 3, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -298,20 +283,17 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ + /******************Scene 2: Restoring Cloned Files******************/ sn = 3; - // 构造原始数据 + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // PasteChunk写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); @@ -319,30 +301,26 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(pasteBuf, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf2[2 * PAGE_SIZE]; memset(writeBuf2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - writeBuf2, - offset, - length, - nullptr); - ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + errorCode = + dataStore_->WriteChunk(id, sn, writeBuf2, offset, length, nullptr); + ASSERT_EQ(errorCode, CSErrorCode::Success); + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -350,26 +328,23 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘b’ + // Reading Chunk data, [0, 8KB] data should be 'b' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf2, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // PasteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->PasteChunk(id, - pasteBuf, - offset, - length); + errorCode = dataStore_->PasteChunk(id, pasteBuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -377,11 +352,12 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 8KB]数据应为‘b’,[8KB, 12KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be 'b', [8KB, 12KB] data should + // be '1' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -389,19 +365,19 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(0, memcmp(writeBuf2, readBuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(pasteBuf, readBuf + 2 * PAGE_SIZE, PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Convert Cloned Files from Sequential Write to + * Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { - errorCode = dataStore_->WriteChunk(id, - sn, - overBuf, - i * kMB, // offset + errorCode = dataStore_->WriteChunk(id, sn, overBuf, + i * kMB, // offset 1 * kMB, nullptr); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. + // The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); diff --git a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp index e873cdb667..e1ded2ef1a 100644 --- a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_con"; // NOLINT -const string poolDir = "./chunkfilepool_int_con"; // NOLINT +const string baseDir = "./data_int_con"; // NOLINT +const string poolDir = "./chunkfilepool_int_con"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_con.meta"; // NOLINT class ConcurrencyTestSuit : public DatastoreIntegrationBase { @@ -46,9 +46,8 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { const int kThreadNum = 10; auto readFunc = [&](ChunkID id) { - // 五分之一概率增加版本号 - if (rand_r(&seed) % 5 == 0) - ++sn; + // One fifth probability of increasing version number + if (rand_r(&seed) % 5 == 0) ++sn; uint64_t pageIndex = rand_r(&seed) % (CHUNK_SIZE / PAGE_SIZE); offset = pageIndex * PAGE_SIZE; dataStore_->ReadChunk(id, sn, buf, offset, length); @@ -60,9 +59,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { dataStore_->WriteChunk(id, sn, buf, offset, length, nullptr); }; - auto deleteFunc = [&](ChunkID id) { - dataStore_->DeleteChunk(id, sn); - }; + auto deleteFunc = [&](ChunkID id) { dataStore_->DeleteChunk(id, sn); }; auto deleteSnapFunc = [&](ChunkID id) { dataStore_->DeleteSnapshotChunkOrCorrectSn(id, sn); @@ -107,7 +104,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { Thread threads[kThreadNum]; printf("===============TEST CHUNK1===================\n"); - // 测试并发对同一chunk进行随机操作 + // Testing concurrent random operations on the same chunk for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, 1, kLoopNum); } @@ -118,7 +115,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { printf("===============TEST RANDOM==================\n"); - // 测试并发对不同chunk进行随机操作 + // Test and perform random operations on different chunks simultaneously int idRange = 10; for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, idRange, kLoopNum); diff --git a/test/integration/chunkserver/datastore/datastore_exception_test.cpp b/test/integration/chunkserver/datastore/datastore_exception_test.cpp index 5405b03e8c..cc020c395b 100644 --- a/test/integration/chunkserver/datastore/datastore_exception_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_exception_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_exc"; // NOLINT -const string poolDir = "./chunkfilepool_int_exc"; // NOLINT +const string baseDir = "./data_int_exc"; // NOLINT +const string poolDir = "./chunkfilepool_int_exc"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_exc.meta"; // NOLINT class ExceptionTestSuit : public DatastoreIntegrationBase { @@ -36,9 +36,9 @@ class ExceptionTestSuit : public DatastoreIntegrationBase { }; /** - * 异常测试1 - * 用例:chunk的metapage数据损坏,然后启动DataStore - * 预期:重启失败 + * Exception test 1 + * Scenario: Chunk's metapage data is corrupt, and then start DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest1) { SequenceNum fileSn = 1; @@ -47,46 +47,41 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试2 - * 用例:chunk的metapage数据损坏,然后更新了metapage,然后重启DataStore - * 预期:重启datastore可以成功 + * Exception Test 2 + * Scenario: Chunk's metapage data is corrupt, then the metapage is updated, and + * then the DataStore is restarted Expected: Restarting the datastore can be + * successful */ TEST_F(ExceptionTestSuit, ExceptionTest2) { SequenceNum fileSn = 1; @@ -95,55 +90,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 触发metapage更新 + // Trigger metapage Update errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); } /** - * 异常测试3 - * 用例:chunk快照的metapage数据损坏,然后重启DataStore - * 预期:重启失败 + * Exception Test 3 + * Scenario: Chunk snapshot metadata data corruption, then restart DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest3) { SequenceNum fileSn = 1; @@ -152,55 +137,45 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试4 - * 用例:chunk快照的metapage数据损坏,但是更新了metapage,然后重启DataStore - * 预期:重启成功 + * Exception Test 4 + * Scenario: Chunk snapshot's metapage data is corrupt, but the metapage is + * updated, and then restart the DataStore Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest4) { SequenceNum fileSn = 1; @@ -209,64 +184,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset, - length, - nullptr); + fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf, - offset, - length, - nullptr); + ++fileSn, buf, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 触发快照metapage更新 + // Trigger snapshot metadata update errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf, - offset + PAGE_SIZE, - length, + fileSn, buf, offset + PAGE_SIZE, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); + // Modifying the metapage of chunk1 snapshot through lfs + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_FALSE(dataStore_->Initialize()); } /** - * 异常测试5 - * 用例:WriteChunk数据写到一半重启 - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 5 + * Scenario: WriteChunk data is written halfway and restarted + * Expected: Successful restart, successful re execution of the previous + * operation */ TEST_F(ExceptionTestSuit, ExceptionTest5) { SequenceNum fileSn = 1; @@ -275,66 +238,54 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; - // 通过lfs写一半数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write half of the data to the chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf2, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试6 - * 用例:WriteChunk更新metapage后重启,sn>chunk.sn,sn==chunk.correctedSn - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 6 + * Scenario: WriteChunk updates the metapage and restarts, + * sn>chunk.sn,sn==chunk.correctedSn Expected: Successful restart, successful re + * execution of the previous operation */ TEST_F(ExceptionTestSuit, ExceptionTest6) { SequenceNum fileSn = 1; @@ -343,84 +294,70 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 更新 correctedsn 为2 + // Update correctedsn to 2 errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的请求参数 + // Construct request parameters to write char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; fileSn = 2; // sn > chunk.sn; sn == chunk.correctedSn - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = fileSn; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); } /** - * 异常测试7 - * 用例:WriteChunk产生快照后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 7 + * Scenario: WriteChunk generates a snapshot and restarts, restoring historical + * and current operations sn>chunk.sn, sn>chunk.correctedSn Test + * chunk.sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest7) { SequenceNum fileSn = 1; @@ -429,18 +366,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -452,19 +386,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -472,61 +404,47 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试8 - * 用例:WriteChunk产生快照后重启, + * Exception Test8 + * Scenario: WriteChunk generates a snapshot and restarts, * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn==chunk.correctedSn - * 预期:重启成功 + * Test chunk.sn==chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest8) { SequenceNum fileSn = 1; @@ -535,27 +453,20 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,构造chunk.sn==chunk.correctedsn的场景 + // Generate chunk1 and construct a scenario where + // chunk.sn==chunk.correctedsn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->WriteChunk(1, // id - ++fileSn, - buf1, - offset, - length, - nullptr); + ++fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 2; @@ -567,19 +478,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -587,60 +496,46 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 2, - readbuf, - offset, - length); + 2, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试9 - * 用例:WriteChunk产生快照并更新metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 9 + * Scenario: WriteChunk generates a snapshot and updates the metapage before + * restarting, restoring historical and current operations sn>chunk.sn, + * sn>chunk.correctedSn Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest9) { SequenceNum fileSn = 1; @@ -649,18 +544,15 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -672,38 +564,36 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); + // Modifying the metapage of chunk1 through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = 2; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -711,56 +601,42 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试10 - * 用例:WriteChunk更新快照metapage前重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 10 + * Scenario: WriteChunk restarts before updating the snapshot metapage to + * restore historical and current operations sn>chunk.sn, sn>chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest10) { SequenceNum fileSn = 1; @@ -769,42 +645,35 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); - // 更新metapage + // Update Metapage char metabuf[PAGE_SIZE]; lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage SnapshotMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -813,19 +682,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -833,67 +700,52 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试11 - * 用例:WriteChunk更新快照metapage后重启,恢复历史操作和当前操作 - * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Exception Test 11 + * Scenario: WriteChunk updates snapshot metadata and restarts to restore + * historical and current operations sn>chunk.sn, sn>chunk.correctedSn Expected: + * Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest11) { SequenceNum fileSn = 1; @@ -902,53 +754,44 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where + // chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf1, - offset, - length, - nullptr); + fileSn, buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; memset(buf2, '2', 2 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(1, // id - fileSn, - buf2, - offset, - length, - nullptr); + fileSn, buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow - std::string snapPath = baseDir + "/" + - FileNameOperator::GenerateSnapshotName(1, 1); - int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); + // Simulate Cow + std::string snapPath = + baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); + int fd = lfs_->Open(snapPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -956,66 +799,51 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 1, // sn - buf1, - offset, - length, - nullptr); + buf1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn - buf2, - offset, - length, - nullptr); + buf2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->ReadChunk(1, // id - fileSn, - readbuf, - offset, - length); + fileSn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id - 1, - readbuf, - offset, - length); + 1, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } /** - * 异常测试12 - * 用例:PasteChunk,数据写入一半时,还未更新metapage重启/崩溃 - * 预期:重启成功,paste成功 + * Exception Test 12 + * Scenario: PasteChunk, when data is written halfway and the metapage has not + * been updated, restart/crash Expected: Reboot successful, pass successful */ TEST_F(ExceptionTestSuit, ExceptionTest12) { ChunkID id = 1; @@ -1027,14 +855,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { CSChunkInfo info; std::string location("test@s3"); - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -1048,58 +875,50 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf1[PAGE_SIZE]; memset(buf1, '1', length); offset = 0; length = PAGE_SIZE; - // 通过lfs写数据到chunk文件 - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(1); - int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); + // Write data to chunk file through lfs + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); + int fd = lfs_->Open(chunkPath, O_RDWR | O_NOATIME | O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.blockSize = BLOCK_SIZE; options.metaPageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize, restart failed + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn - CHUNK_SIZE, - location); + CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->PasteChunk(1, // id - buf1, - offset, - length); + buf1, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查bitmap + // Check Bitmap errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(1, info.bitmap->NextClearBit(0)); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id - sn, - readbuf, - offset, - length); + sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1, readbuf, length)); } diff --git a/test/integration/chunkserver/datastore/datastore_integration_base.h b/test/integration/chunkserver/datastore/datastore_integration_base.h index 0731eb39cd..bf99263214 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_base.h +++ b/test/integration/chunkserver/datastore/datastore_integration_base.h @@ -24,26 +24,27 @@ #define TEST_INTEGRATION_CHUNKSERVER_DATASTORE_DATASTORE_INTEGRATION_BASE_H_ #include -#include #include +#include #include + #include #include +#include "src/chunkserver/datastore/chunkserver_datastore.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/timeutility.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/file_pool.h" -#include "src/chunkserver/datastore/chunkserver_datastore.h" #include "test/chunkserver/datastore/filepool_helper.h" -using curve::fs::FileSystemType; -using curve::fs::LocalFileSystem; -using curve::fs::LocalFsFactory; using curve::common::Atomic; using curve::common::Thread; using curve::common::TimeUtility; +using curve::fs::FileSystemType; +using curve::fs::LocalFileSystem; +using curve::fs::LocalFsFactory; using ::testing::UnorderedElementsAre; @@ -55,12 +56,12 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; const ChunkSizeType BLOCK_SIZE = 4096; const PageSizeType PAGE_SIZE = 4 * 1024; -extern const string baseDir; // NOLINT -extern const string poolDir; // NOLINT +extern const string baseDir; // NOLINT +extern const string poolDir; // NOLINT extern const string poolMetaPath; // NOLINT /** - * DataStore层集成LocalFileSystem层测试 + * Datastore layer integration LocalFileSystem layer testing */ class DatastoreIntegrationBase : public testing::Test { public: @@ -79,9 +80,7 @@ class DatastoreIntegrationBase : public testing::Test { options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + dataStore_ = std::make_shared(lfs_, filePool_, options); if (dataStore_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } @@ -105,8 +104,7 @@ class DatastoreIntegrationBase : public testing::Test { cfop.metaPageSize = PAGE_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - if (lfs_->DirExists(poolDir)) - lfs_->Delete(poolDir); + if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); ASSERT_TRUE(filePool_->Initialize(cfop)); ASSERT_EQ(chunkNum, filePool_->Size()); @@ -121,8 +119,8 @@ class DatastoreIntegrationBase : public testing::Test { } protected: - std::shared_ptr filePool_; - std::shared_ptr lfs_; + std::shared_ptr filePool_; + std::shared_ptr lfs_; std::shared_ptr dataStore_; }; diff --git a/test/integration/chunkserver/datastore/datastore_integration_test.cpp b/test/integration/chunkserver/datastore/datastore_integration_test.cpp index 52693dfa9e..a5f0316ba9 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_integration_test.cpp @@ -28,8 +28,8 @@ namespace chunkserver { const uint64_t kMB = 1024 * 1024; const ChunkSizeType CHUNK_SIZE = 16 * kMB; const PageSizeType PAGE_SIZE = 4 * 1024; -const string baseDir = "./data_int"; // NOLINT -const string poolDir = "./chunkfilepool_int"; // NOLINT +const string baseDir = "./data_int"; // NOLINT +const string poolDir = "./chunkfilepool_int"; // NOLINT const string poolMetaPath = "./chunkfilepool_int.meta"; // NOLINT class DatastoreIntegrationTest : public DatastoreIntegrationBase { @@ -39,51 +39,49 @@ class DatastoreIntegrationTest : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(DatastoreIntegrationTest, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - std::string chunkPath = baseDir + "/" + - FileNameOperator::GenerateChunkFileName(id); + std::string chunkPath = + baseDir + "/" + FileNameOperator::GenerateChunkFileName(id); CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scenario 1: New File Created, Chunk File Does Not + * Exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // chunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /******************Scene 2: Operations after generating chunk files via + * WriteChunk.**************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - // 第一次WriteChunk会产生chunk文件 - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_1, - offset, - length, - nullptr); + // The first WriteChunk will generate a chunk file + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -95,87 +93,63 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - CHUNK_SIZE - PAGE_SIZE, - length); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = + dataStore_->ReadChunk(id, sn, readbuf, CHUNK_SIZE - PAGE_SIZE, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_2, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - offset, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; length = PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_3, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; length = 2 * PAGE_SIZE; - errorCode = dataStore_->WriteChunk(id, - sn, - buf1_1_4, - offset, - length, - nullptr); + errorCode = + dataStore_->WriteChunk(id, sn, buf1_1_4, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 - errorCode = dataStore_->ReadChunk(id, - sn, - readbuf, - 0, - 3 * PAGE_SIZE); + // Areas that have not been written can also be read, but the data content + // read is not guaranteed + errorCode = dataStore_->ReadChunk(id, sn, readbuf, 0, 3 * PAGE_SIZE); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - - /******************场景三:用户删除文件******************/ + /******************Scene 3: User Deletes File******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -185,7 +159,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { } /** - * 重启恢复测试 + * Restart Recovery Test */ TEST_F(DatastoreIntegrationTest, RestartTest) { SequenceNum fileSn = 1; @@ -196,7 +170,7 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSChunkInfo info3; std::string location("test@s3"); - // 构造要用到的读写缓冲区 + // Construct read and write buffers to be used char buf1_1[2 * PAGE_SIZE]; memset(buf1_1, 'a', length); char buf2_1[2 * PAGE_SIZE]; @@ -212,7 +186,8 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { size_t readSize = 4 * PAGE_SIZE; char readBuf[4 * PAGE_SIZE]; - // 各个操作对应的错误码返回值,错误码命名格式为 e_optype_chunid_sn + // The error code return value corresponding to each operation, and the + // error code naming format is e_optype_chunid_sn CSErrorCode e_write_1_1; CSErrorCode e_write_2_1; CSErrorCode e_write_2_2; @@ -224,112 +199,99 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSErrorCode e_delsnap_2_3; CSErrorCode e_clone_3_1; - // 模拟所有用户请求,用lamdba函数可以用于验证日志恢复时重用这部分代码 - // 如果后面要加用例,只需要在函数内加操作即可 + // Simulate all user requests and use the lamdba function to validate the + // reuse of this code during log recovery If you want to add use cases + // later, you only need to add operations within the function auto ApplyRequests = [&]() { fileSn = 1; - // 模拟普通文件操作,WriteChunk产生chunk1、chunk2 + // Simulate ordinary file operations, WriteChunk generates chunk1, + // chunk2 offset = 0; length = 2 * PAGE_SIZE; - // 产生chunk1 - e_write_1_1 = dataStore_->WriteChunk(1, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 产生chunk2 - e_write_2_1 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf1_1, - offset, - length, - nullptr); - // 删除chunk1 + // Generate chunk1 + e_write_1_1 = + dataStore_->WriteChunk(1, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Generate chunk2 + e_write_2_1 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf1_1, offset, length, nullptr); + // Delete chunk1 e_del_1_1 = dataStore_->DeleteChunk(1, fileSn); - // 模拟快照操作 + // Simulate snapshot operations ++fileSn; offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_2 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_2, - offset, - length, - nullptr); - // 删除chunk2快照 + // Write chunk2 to generate a snapshot file + e_write_2_2 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_2, offset, length, nullptr); + // Delete chunk2 snapshot e_delsnap_2_2 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后删除chunk2快照 + // Simulate taking another snapshot and then delete the chunk2 snapshot ++fileSn; e_delsnap_2_3 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后写数据到chunk2产生快照 + // Simulate another snapshot, then write data to chunk2 to generate a + // snapshot ++fileSn; offset = 2 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 - e_write_2_4 = dataStore_->WriteChunk(2, // chunk id - fileSn, - buf2_4, - offset, - length, - nullptr); - - // 模拟克隆操作 + // Write chunk2 to generate a snapshot file + e_write_2_4 = + dataStore_->WriteChunk(2, // chunk id + fileSn, buf2_4, offset, length, nullptr); + + // Simulate Clone Operations e_clone_3_1 = dataStore_->CreateCloneChunk(3, // chunk id - 1, // sn - 0, // corrected sn - CHUNK_SIZE, - location); - // 写数据到chunk3 + 1, // sn + 0, // corrected sn + CHUNK_SIZE, location); + // Write data to chunk3 offset = 0; length = 2 * PAGE_SIZE; - // 写chunk3 - e_write_3_1 = dataStore_->WriteChunk(3, // chunk id - 1, // sn - writeBuf, - offset, - length, - nullptr); - // paste数据到chunk3 + // Write chunk3 + e_write_3_1 = dataStore_->WriteChunk(3, // chunk id + 1, // sn + writeBuf, offset, length, nullptr); + // Paste data to chunk3 offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id - pasteBuf, - offset, - length); + e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id + pasteBuf, offset, length); }; - // 检查上面用户操作以后,DataStore层各文件的状态,可重用 + // After checking the user actions above, the status of each file in the + // DataStore layer can be reused auto CheckStatus = [&]() { CSErrorCode errorCode; - // chunk1 不存在 + // Chunk1 does not exist errorCode = dataStore_->GetChunkInfo(1, &info1); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // chunk2存在,版本为4,correctedSn为3,存在快照,快照版本为2 + // Chunk2 exists, version 4, correctedSn is 3, snapshot exists, snapshot + // version 2 errorCode = dataStore_->GetChunkInfo(2, &info2); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(4, info2.curSn); ASSERT_EQ(2, info2.snapSn); ASSERT_EQ(3, info2.correctedSn); - // 检查chunk2数据,[0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c + // Check chunk2 data, [0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c errorCode = dataStore_->ReadChunk(2, fileSn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_4, readBuf + 2 * PAGE_SIZE, 2 * PAGE_SIZE)); - // 检查chunk2快照数据,[0, 1KB]:a , [1KB, 3KB]:b + // Check chunk2 snapshot data, [0, 1KB]:a , [1KB, 3KB]:b errorCode = dataStore_->ReadSnapshotChunk(2, 2, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 2 * PAGE_SIZE)); }; - /******************构造重启前的数据******************/ - // 提交操作 + /******************Generate data before reboot******************/ + // Submit Action ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::Success); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); @@ -340,27 +302,27 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { ASSERT_EQ(e_clone_3_1, CSErrorCode::Success); ASSERT_EQ(e_write_3_1, CSErrorCode::Success); ASSERT_EQ(e_paste_3_1, CSErrorCode::Success); - // 检查此时各个文件的状态 + // Check the status of each file at this time CheckStatus(); - /******************场景一:重启重新加载文件******************/ - // 模拟重启 + /******************Scene 1: Reboot and Reload Files******************/ + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.pageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化 - dataStore_ = std::make_shared(lfs_, - filePool_, - options); + // Construct a new dataStore_, And reinitialize + dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查各个chunk的状态,应该与前面的一致 + // Check the status of each chunk, which should be consistent with the + // previous one CheckStatus(); - /******************场景二:恢复日志,重放之前的操作******************/ - // 模拟日志回放 + /******************Scene 2: Restore logs, replay previous + * actions******************/ + // Simulate log playback ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::BackwardRequestError); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index f7a9d9ae5a..8d8a64812b 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -27,10 +27,10 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_res"; // NOLINT -const string poolDir = "./chunfilepool_int_res"; // NOLINT +const string baseDir = "./data_int_res"; // NOLINT +const string poolDir = "./chunfilepool_int_res"; // NOLINT const string poolMetaPath = "./chunfilepool_int_res.meta"; // NOLINT -// 以下的测试读写数据都在[0, 32kb]范围内 +// The following test read and write data are within the range of [0, 32kb] const uint64_t kMaxSize = 8 * PAGE_SIZE; struct RangeData { @@ -39,9 +39,7 @@ struct RangeData { size_t length; RangeData() = default; RangeData(char ch, off_t off, size_t len) - : data(ch) - , offset(off) - , length(len) {} + : data(ch), offset(off), length(len) {} }; struct ExpectStatus { @@ -52,12 +50,12 @@ struct ExpectStatus { ExpectStatus() : exist(false), chunkData(nullptr), snapshotData(nullptr) {} ~ExpectStatus() { if (chunkData != nullptr) { - delete [] chunkData; + delete[] chunkData; chunkData = nullptr; } if (snapshotData != nullptr) { - delete [] snapshotData; + delete[] snapshotData; snapshotData = nullptr; } } @@ -66,26 +64,16 @@ struct ExpectStatus { class ExecStep { public: explicit ExecStep(std::shared_ptr* datastore, ChunkID id) - : datastore_(datastore) - , id_(id) - , statusAfterExec_(nullptr) {} + : datastore_(datastore), id_(id), statusAfterExec_(nullptr) {} virtual ~ExecStep() {} - std::shared_ptr GetDataStore() { - return (*datastore_); - } + std::shared_ptr GetDataStore() { return (*datastore_); } - ChunkID GetChunkID() { - return id_; - } + ChunkID GetChunkID() { return id_; } - std::shared_ptr GetStatus() { - return statusAfterExec_; - } + std::shared_ptr GetStatus() { return statusAfterExec_; } - void ClearStatus() { - statusAfterExec_ = nullptr; - } + void ClearStatus() { statusAfterExec_ = nullptr; } virtual void SetExpectStatus() { statusAfterExec_ = std::make_shared(); @@ -100,29 +88,25 @@ class ExecStep { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - (*datastore_)->ReadChunk(id_, - info.curSn, - (chunkData + offset), - offset, - length); + (*datastore_) + ->ReadChunk(id_, info.curSn, (chunkData + offset), + offset, length); } } else { - (*datastore_)->ReadChunk(id_, - info.curSn, - chunkData, - 0, - kMaxSize); + (*datastore_) + ->ReadChunk(id_, info.curSn, chunkData, 0, kMaxSize); } statusAfterExec_->chunkData = chunkData; - // 快照存在,读取快照数据 + // Snapshot exists, reading snapshot data if (info.snapSn > 0) { char* snapData = new char[kMaxSize]; - (*datastore_)->ReadSnapshotChunk( - id_, info.snapSn, snapData, 0, kMaxSize); + (*datastore_) + ->ReadSnapshotChunk(id_, info.snapSn, snapData, 0, + kMaxSize); statusAfterExec_->snapshotData = snapData; } } // if (err == CSErrorCode::Success) @@ -142,23 +126,22 @@ class ExecWrite : public ExecStep { public: ExecWrite(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, RangeData data) - : ExecStep(datastore, id) - , sn_(sn) - , data_(data) {} + : ExecStep(datastore, id), sn_(sn), data_(data) {} ~ExecWrite() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); - (*datastore_)->WriteChunk(id_, sn_, buf, - data_.offset, data_.length, nullptr); + (*datastore_) + ->WriteChunk(id_, sn_, buf, data_.offset, data_.length, nullptr); } void Dump() override { - printf("WriteChunk, id = %lu, sn = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, sn_, data_.offset, data_.length, data_.data); + printf( + "WriteChunk, id = %lu, sn = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, sn_, data_.offset, data_.length, data_.data); } private: @@ -170,21 +153,21 @@ class ExecPaste : public ExecStep { public: ExecPaste(std::shared_ptr* datastore, ChunkID id, RangeData data) - : ExecStep(datastore, id) - , data_(data) {} + : ExecStep(datastore, id), data_(data) {} ~ExecPaste() {} void Exec() override { char* buf = new char[data_.length]; memset(buf, data_.data, data_.length); (*datastore_)->PasteChunk(id_, buf, data_.offset, data_.length); - delete [] buf; + delete[] buf; } void Dump() override { - printf("PasteChunk, id = %lu, offset = %lu, " - "size = %lu, data = %c.\n", - id_, data_.offset, data_.length, data_.data); + printf( + "PasteChunk, id = %lu, offset = %lu, " + "size = %lu, data = %c.\n", + id_, data_.offset, data_.length, data_.data); } private: @@ -195,13 +178,10 @@ class ExecDelete : public ExecStep { public: ExecDelete(std::shared_ptr* datastore, ChunkID id, SequenceNum sn) - : ExecStep(datastore, id) - , sn_(sn) {} + : ExecStep(datastore, id), sn_(sn) {} ~ExecDelete() {} - void Exec() override { - (*datastore_)->DeleteChunk(id_, sn_); - } + void Exec() override { (*datastore_)->DeleteChunk(id_, sn_); } void Dump() override { printf("DeleteChunk, id = %lu, sn = %lu.\n", id_, sn_); @@ -213,11 +193,9 @@ class ExecDelete : public ExecStep { class ExecDeleteSnapshot : public ExecStep { public: - ExecDeleteSnapshot(std::shared_ptr* datastore, - ChunkID id, - SequenceNum correctedSn) - : ExecStep(datastore, id) - , correctedSn_(correctedSn) {} + ExecDeleteSnapshot(std::shared_ptr* datastore, ChunkID id, + SequenceNum correctedSn) + : ExecStep(datastore, id), correctedSn_(correctedSn) {} ~ExecDeleteSnapshot() {} void Exec() override { @@ -225,8 +203,10 @@ class ExecDeleteSnapshot : public ExecStep { } void Dump() override { - printf("DeleteSnapshotChunkOrCorrectSn, " - "id = %lu, correctedSn = %lu.\n", id_, correctedSn_); + printf( + "DeleteSnapshotChunkOrCorrectSn, " + "id = %lu, correctedSn = %lu.\n", + id_, correctedSn_); } private: @@ -238,22 +218,23 @@ class ExecCreateClone : public ExecStep { ExecCreateClone(std::shared_ptr* datastore, ChunkID id, SequenceNum sn, SequenceNum correctedSn, ChunkSizeType size, std::string location) - : ExecStep(datastore, id) - , sn_(sn) - , correctedSn_(correctedSn) - , size_(size) - , location_(location) {} + : ExecStep(datastore, id), + sn_(sn), + correctedSn_(correctedSn), + size_(size), + location_(location) {} ~ExecCreateClone() {} void Exec() override { - (*datastore_)->CreateCloneChunk( - id_, sn_, correctedSn_, size_, location_); + (*datastore_) + ->CreateCloneChunk(id_, sn_, correctedSn_, size_, location_); } void Dump() override { - printf("CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " - "chunk size = %u, location = %s.\n", - id_, sn_, correctedSn_, size_, location_.c_str()); + printf( + "CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " + "chunk size = %u, location = %s.\n", + id_, sn_, correctedSn_, size_, location_.c_str()); } private: @@ -269,41 +250,41 @@ class StepList { explicit StepList(ClearFunc clearFunc) : clearFunc_(clearFunc) {} ~StepList() {} - void Add(std::shared_ptr step) { - steps.push_back(step); - } + void Add(std::shared_ptr step) { steps.push_back(step); } - int GetStepCount() { - return steps.size(); - } + int GetStepCount() { return steps.size(); } void ClearEnv() { clearFunc_(); - // 清理每一步的预期状态,因为清理环境后,读取到的数据内容可能会不一样 - // 因为通过FilePool分配的chunk初始内容是不确定的 - for (auto &step : steps) { + // Clean up the expected state of each step, as the data content read + // after cleaning up the environment may differ Because the initial + // content of the chunk allocated through FilePool is uncertain + for (auto& step : steps) { step->ClearStatus(); } } - // 重启前,用户最后执行的操作可能为任意步骤, - // 需要验证每个步骤作为最后执行操作时,日志从该步骤前任意步骤进行恢复的幂等性 - // 对于未执行的步骤可以不必验证,只要保证已执行步骤的恢复是幂等的 - // 未执行的步骤恢复一定是幂等的 + // Before restarting, the last action performed by the user may be any step, + // It is necessary to verify the idempotence of the log recovery from any + // step before each step as the final execution operation For steps that + // have not been executed, there is no need to verify as long as the + // recovery of the executed steps is idempotent Unexecuted step recovery + // must be idempotent bool VerifyLogReplay() { - // 验证每个步骤作为最后执行操作时日志恢复的幂等性 + // Verify the idempotence of log recovery at each step as the final + // operation for (int lastStep = 0; lastStep < steps.size(); ++lastStep) { - // 重新初始化环境 + // Reinitialize the environment ClearEnv(); printf("==============Verify log replay to step%d==============\n", - lastStep + 1); - // 构造重启前环境 + lastStep + 1); + // Construct a pre restart environment if (!ConstructEnv(lastStep)) { LOG(ERROR) << "Construct env failed."; Dump(); return false; } - // 验证日志恢复后的幂等性 + // Verify the idempotence of log recovery if (!ReplayLog(lastStep)) { LOG(ERROR) << "Replay log failed." << "last step: step" << lastStep + 1; @@ -322,15 +303,16 @@ class StepList { } private: - // 构造初始状态 + // Construction initial state bool ConstructEnv(int lastStep) { - // 模拟日志恢复前执行,用于构造初始Chunk状态,并初始化每一步的预期状态 + // Execute before simulating log recovery to construct the initial Chunk + // state and initialize the expected state for each step for (int curStep = 0; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); step->SetExpectStatus(); } - // 检查构造出来的状态是否符合预期 + // Check if the constructed state meets expectations if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "last step: step" << lastStep + 1; @@ -339,16 +321,18 @@ class StepList { return true; } - // 从最后步骤前任意一个步骤进行恢复都应该保证幂等性 + // Restoring from any step before the final step should ensure idempotence bool ReplayLog(int lastStep) { - // 模拟从不同的起始位置进行日志恢复 + // Simulate log recovery from different starting locations for (int beginStep = 0; beginStep <= lastStep; ++beginStep) { - // 执行恢复前,chunk的状态保证为预期的状态 + // Before performing the recovery, the state of the chunk is + // guaranteed to be the expected state for (int curStep = beginStep; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); } - // 每次日志恢复完成检查Chunk状态是否符合预期 + // Check if the Chunk status meets expectations after each log + // recovery is completed if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "begin step: step" << beginStep + 1 @@ -361,8 +345,7 @@ class StepList { bool CheckChunkData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -373,50 +356,41 @@ class StepList { uint32_t endIndex = kMaxSize / PAGE_SIZE - 1; std::vector setRanges; info.bitmap->Divide(0, endIndex, nullptr, &setRanges); - for (auto &range : setRanges) { + for (auto& range : setRanges) { off_t offset = range.beginIndex * PAGE_SIZE; size_t length = (range.endIndex - range.beginIndex + 1) * PAGE_SIZE; - datastore->ReadChunk(id, - info.curSn, - (actualData + offset), - offset, - length); + datastore->ReadChunk(id, info.curSn, (actualData + offset), + offset, length); } } else { - datastore->ReadChunk(id, - info.curSn, - actualData, - 0, - kMaxSize); + datastore->ReadChunk(id, info.curSn, actualData, 0, kMaxSize); } int ret = memcmp(expectStatus->chunkData, actualData, kMaxSize); if (ret != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id - << ", ret: " << ret; + << "chunk id: " << id << ", ret: " << ret; for (int i = 0; i < kMaxSize; ++i) { if (*(expectStatus->chunkData + i) != *(actualData + i)) { - LOG(ERROR) << "diff pos: " << i - << ", expect data: " - << *(expectStatus->chunkData + i) - << ", actual data: " << *(actualData + i); + LOG(ERROR) + << "diff pos: " << i + << ", expect data: " << *(expectStatus->chunkData + i) + << ", actual data: " << *(actualData + i); break; } } - delete [] actualData; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } bool CheckSnapData(std::shared_ptr step) { std::shared_ptr expectStatus = step->GetStatus(); - std::shared_ptr datastore = - step->GetDataStore(); + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; datastore->GetChunkInfo(id, &info); @@ -424,23 +398,22 @@ class StepList { char* actualData = new char[kMaxSize]; CSErrorCode err; - err = datastore->ReadSnapshotChunk( - id, info.snapSn, actualData, 0, kMaxSize); + err = datastore->ReadSnapshotChunk(id, info.snapSn, actualData, 0, + kMaxSize); if (err != CSErrorCode::Success) { LOG(ERROR) << "Read snapshot failed." - << "Error Code: " << err - << ", chunk id: " << id; - delete [] actualData; + << "Error Code: " << err << ", chunk id: " << id; + delete[] actualData; return false; } if (memcmp(expectStatus->snapshotData, actualData, kMaxSize) != 0) { LOG(ERROR) << "Data readed not as expect." - << "chunk id: " << id; - delete [] actualData; + << "chunk id: " << id; + delete[] actualData; return false; } - delete [] actualData; + delete[] actualData; return true; } @@ -448,57 +421,51 @@ class StepList { std::shared_ptr step = steps[lastStep]; std::shared_ptr expectStatus = step->GetStatus(); - // 获取chunk信息 - std::shared_ptr datastore = - step->GetDataStore(); + // Obtain chunk information + std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; CSErrorCode err = datastore->GetChunkInfo(id, &info); - // 返回Success说明chunk存在 + // Returning Success indicates that the chunk exists if (err == CSErrorCode::Success) { - // 检查chunk的状态 - if (!expectStatus->exist || - expectStatus->chunkInfo != info) { + // Check the status of the chunk + if (!expectStatus->exist || expectStatus->chunkInfo != info) { LOG(ERROR) << "Chunk info is not as expected!"; LOG(ERROR) << "Expect status(" << "chunk exist: " << expectStatus->exist << ", sn: " << expectStatus->chunkInfo.curSn - << ", correctedSn: " << expectStatus->chunkInfo.correctedSn // NOLINT + << ", correctedSn: " + << expectStatus->chunkInfo.correctedSn // NOLINT << ", snap sn: " << expectStatus->chunkInfo.snapSn << ", isClone: " << expectStatus->chunkInfo.isClone << ", location: " << expectStatus->chunkInfo.location << ")."; LOG(ERROR) << "Actual status(" - << "chunk exist: " << true - << ", sn: " << info.curSn - << ", correctedSn: " << info.correctedSn + << "chunk exist: " << true << ", sn: " << info.curSn + << ", correctedSn: " << info.correctedSn << ", isClone: " << info.isClone - << ", location: " << info.location - << ")."; + << ", location: " << info.location << ")."; return false; } - // 检查chunk的数据状态 - if (!CheckChunkData(step)) - return false; + // Check the data status of the chunk + if (!CheckChunkData(step)) return false; - // 检查快照状态 + // Check snapshot status if (info.snapSn > 0) { - // 检查快照的数据状态 - if (!CheckSnapData(step)) - return false; + // Check the data status of the snapshot + if (!CheckSnapData(step)) return false; } } else if (err == CSErrorCode::ChunkNotExistError) { - // 预期chunk存在,实际却不存在 + // The expected chunk exists, but it does not actually exist if (expectStatus->exist) { LOG(ERROR) << "Chunk is expected to exist, but actual not."; return false; } } else { LOG(ERROR) << "Get chunk info failed." - << "chunk id: " << id - << ", error code: " << err; + << "chunk id: " << id << ", error code: " << err; return false; } return true; @@ -529,7 +496,7 @@ TEST_F(RestartTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -538,7 +505,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -547,7 +514,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:DeleteChunk + // Step 3: DeleteChunk std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); @@ -561,7 +528,7 @@ TEST_F(RestartTestSuit, SnapshotTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -570,10 +537,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 模拟用户打了快照,此时sn +1 + // Simulated user took a snapshot, at which point sn+1 ++sn; - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -582,20 +549,21 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:用户请求删除快照 + // Step 3: User requests to delete the snapshot std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第四步:此次快照过程中没有数据写入,直接DeleteSnapshotOrCorrectedSn + // Step 4: No data was written during this snapshot process, directly delete + // SnapshotOrCorrectedSn std::shared_ptr step4 = std::make_shared(&dataStore_, id, sn); list.Add(step4); - // 第五步:WriteChunk,写[8kb, 16kb]区域 + // Step 5: WriteChunk, write the [8kb, 16kb] area RangeData step5Data; step5Data.offset = 2 * PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -604,10 +572,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第六步:WriteChunk,写[4kb, 12kb]区域 + // Step 6: WriteChunk, write the [4kb, 12kb] area RangeData step6Data; step6Data.offset = PAGE_SIZE; step6Data.length = 2 * PAGE_SIZE; @@ -616,20 +584,20 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step6Data); list.Add(step6); - // 第七步:用户请求删除快照 + // Step 7: User requests to delete the snapshot std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第八步:用户请求删除快照 + // Step 8: User requests to delete the snapshot std::shared_ptr step8 = std::make_shared(&dataStore_, id, sn); list.Add(step8); - // 第九步:用户请求删除chunk + // Step 9: User requests to delete chunk std::shared_ptr step9 = std::make_shared(&dataStore_, id, sn); list.Add(step9); @@ -637,7 +605,8 @@ TEST_F(RestartTestSuit, SnapshotTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试克隆场景,以及克隆后打快照的组合场景 +// Test the cloning scenario and the combination scenario of taking a snapshot +// after cloning TEST_F(RestartTestSuit, CloneTest) { StepList list(clearFunc); @@ -646,17 +615,12 @@ TEST_F(RestartTestSuit, CloneTest) { SequenceNum correctedSn = 0; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 第二步:WriteChunk,写[0kb, 8kb]区域 + // Step 2: WriteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -665,7 +629,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -674,7 +638,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -683,10 +647,10 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step4Data); list.Add(step4); - // 模拟打快照 + // Simulate taking a snapshot ++sn; - // 第五步:WriteChunk,写[4kb, 12kb]区域 + // Step 5: WriteChunk, write the [4kb, 12kb] area RangeData step5Data; step5Data.offset = PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -695,12 +659,12 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 第六步:用户请求删除快照 + // Step 6: User requests to delete the snapshot std::shared_ptr step6 = std::make_shared(&dataStore_, id, sn); list.Add(step6); - // 第七步:DeleteChunk + // Step 7: DeleteChunk std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); @@ -708,7 +672,7 @@ TEST_F(RestartTestSuit, CloneTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试恢复场景 +// Testing Recovery Scenarios TEST_F(RestartTestSuit, RecoverTest) { StepList list(clearFunc); @@ -717,20 +681,15 @@ TEST_F(RestartTestSuit, RecoverTest) { SequenceNum correctedSn = 5; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk - std::shared_ptr step1 = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + // Step 1: Create a clone chunk through CreateCloneChunk + std::shared_ptr step1 = std::make_shared( + &dataStore_, id, sn, correctedSn, CHUNK_SIZE, location); list.Add(step1); - // 数据写入的版本应为最新的版本 + // The version of data writing should be the latest version sn = correctedSn; - // 第二步:PasteChunk,写[0kb, 8kb]区域 + // Step 2: PasteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -739,7 +698,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -748,7 +707,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -757,7 +716,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step4Data); list.Add(step4); - // 第五步:DeleteChunk + // Step 5: DeleteChunk std::shared_ptr step5 = std::make_shared(&dataStore_, id, sn); list.Add(step5); @@ -765,7 +724,9 @@ TEST_F(RestartTestSuit, RecoverTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 按照实际用户使用从场景随机产生每一步的操作,校验一定操作个数下都能保证幂等性 +// Randomly generate each step of the operation from the scene based on actual +// user usage, and verify that a certain number of operations can ensure +// idempotence TEST_F(RestartTestSuit, RandomCombine) { StepList list(clearFunc); @@ -775,7 +736,7 @@ TEST_F(RestartTestSuit, RandomCombine) { std::string location("test@s3"); std::srand(std::time(nullptr)); - // 写随机地址的数据,在[0, kMaxSize]范围内写 + // Write random address data within the range of [0, kMaxSize] auto randWriteOrPaste = [&](bool isPaste) { int pageCount = kMaxSize / PAGE_SIZE; RangeData stepData; @@ -793,21 +754,17 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 随机的克隆过程 + // Random cloning process auto randClone = [&]() { - // 二分之一概率,模拟恢复过程 - if (std::rand() % 2 == 0) - correctedSn = 2; + // Half probability, simulating the recovery process + if (std::rand() % 2 == 0) correctedSn = 2; std::shared_ptr createStep = - std::make_shared(&dataStore_, - id, - sn, - correctedSn, - CHUNK_SIZE, - location); + std::make_shared(&dataStore_, id, sn, correctedSn, + CHUNK_SIZE, location); list.Add(createStep); - // 克隆过程模拟5个操作,Write或者Paste,三分之一概率Write + // The cloning process simulates 5 operations, Write or Paste, with a + // one-third probability of Write for (int i = 0; i < 5; ++i) { if (std::rand() % 3 == 0) { randWriteOrPaste(false); @@ -816,7 +773,8 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 遍写一遍chunk,可以用于模拟后续写入创建快照 + // Write the chunk over and over again, which can be used to simulate + // subsequent writes and create snapshots RangeData pasteData; pasteData.offset = 0; pasteData.length = CHUNK_SIZE; @@ -826,11 +784,12 @@ TEST_F(RestartTestSuit, RandomCombine) { list.Add(pasteStep); }; - // 随机的快照过程 + // Random snapshot process auto randSnapshot = [&](int* stepCount) { - // 快照需要将版本+1 + // Snapshots require version+1 ++sn; - // 三分之一的概率调DeleteSnapshot,一旦调了DeleteSnapshot就退出快照 + // One third of the probability is to call DeleteSnapshot, and once + // DeleteSnapshot is called, it exits the snapshot while (true) { if (std::rand() % 3 == 0) { std::shared_ptr step = @@ -844,14 +803,14 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 创建clone chunk, + // Create a clone chunk randClone(); - // 设置最长执行步数 + // Set the maximum number of execution steps int maxSteps = 30; int stepCount = 0; while (stepCount < maxSteps) { - // 三分之一的概率会模拟快照过程 + // One-third of the probability will simulate the snapshot process if (std::rand() % 3 == 0) { randSnapshot(&stepCount); } else { @@ -860,7 +819,7 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 最后删除chunk + // Finally, delete the chunk std::shared_ptr lastStep = std::make_shared(&dataStore_, id, sn); list.Add(lastStep); diff --git a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp index 61dc402c21..f1dfa68b26 100644 --- a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_sna"; // NOLINT -const string poolDir = "./chunkfilepool_int_sna"; // NOLINT +const string baseDir = "./data_int_sna"; // NOLINT +const string poolDir = "./chunkfilepool_int_sna"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_sna.meta"; // NOLINT class SnapshotTestSuit : public DatastoreIntegrationBase { @@ -36,14 +36,16 @@ class SnapshotTestSuit : public DatastoreIntegrationBase { }; /** - * 快照场景测试 - * 构造存在两个chunk的文件,分别为chunk1和chunk2,做如下操作 - * 1.写chunk1 - * 2.模拟第一次打快照,转储过程中写chunk1并产生快照,chunk2未发生数据写入 - * 3.删除快照,然后向chunk2中写入数据 - * 4.模拟第二次打快照,转储过程中写chunk1,但是不写chunk2 - * 5.删除快照,再次向chunk2写入数据 - * 6.删除文件 + * Snapshot scenario testing + * Construct a file with two chunks, chunk1 and chunk2, as follows + * 1. Write chunk1 + * 2. Simulate the first snapshot taken, write chunk1 during the dump process + * and generate a snapshot, but chunk2 does not have data write + * 3. Delete the snapshot and write data to chunk2 + * 4. Simulate taking a second snapshot, writing chunk1 during the dump process, + * but not chunk2 + * 5. Delete the snapshot and write data to chunk2 again + * 6. Delete files */ TEST_F(SnapshotTestSuit, SnapshotTest) { SequenceNum fileSn = 1; @@ -55,39 +57,34 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { CSChunkInfo chunk1Info; CSChunkInfo chunk2Info; - /******************构造初始环境,创建chunk1******************/ + /****************** Creating Initial Environment, Creating Chunk1 + * ******************/ - // 向chunk1的[0, 12KB)区域写入数据 "1" + // Write data '1' to the [0, 12KB) area of chunk1 offset = 0; length = 3 * PAGE_SIZE; // 12KB char buf1_1[3 * PAGE_SIZE]; memset(buf1_1, '1', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_1, - offset, - length, - nullptr); + fileSn, buf1_1, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景一:第一次给文件打快照******************/ + /******************Scene 1: Take the first snapshot of the + * file******************/ - // 模拟打快照,此时文件版本递增 - ++fileSn; // fileSn == 2 + // Simulate taking a snapshot, where the file version increases + ++fileSn; // fileSn == 2 - // 向chunk1的[4KB, 8KB)区域写入数据 “2” + // Write data '2' to the [4KB, 8KB] area of chunk1 offset = 1 * PAGE_SIZE; length = 1 * PAGE_SIZE; char buf1_2[3 * PAGE_SIZE]; memset(buf1_2, '2', 3 * PAGE_SIZE); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); @@ -96,256 +93,242 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { size_t readSize = 3 * PAGE_SIZE; char readbuf[3 * PAGE_SIZE]; - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该都是‘1’ + // Read the [0, 12KB) area of the chunk1 snapshot file, and the data read + // should all be '1' errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // 重复写入,验证不会重复cow,读快照时[4KB, 8KB)区域的数据应为“1” + // Repeat write, verify that there will be no duplicate rows, and when + // reading the snapshot, the data in the [4KB, 8KB] area should be '1' errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写未cow过的区域,写入[0,4kb]区域 + // Write to an uncooked area, write to the [0,4kb] area offset = 0; length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写部分cow过的区域,写入[4kb,12kb]区域 + // Write the area that has been partially cowed, and write the [4kb, 12kb] + // area offset = PAGE_SIZE; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_2, - offset, - length, - nullptr); + fileSn, buf1_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk1Info.curSn); ASSERT_EQ(1, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB):1 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,12KB]:2 The data content returned from reading chunk1 snapshot should + // be [0, 12KB):1 The data in other address spaces can be guaranteed without + // any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该还是‘1’ + // When reading the [0, 12KB) area of the chunk1 snapshot file, the read + // data should still be '1' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // ReadSnapshotChun,请求offset+length > page size + // ReadSnapshotChun, request offset+length > page size offset = CHUNK_SIZE - PAGE_SIZE; readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 1, // snap sn + 1, // snap sn readbuf, offset, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::InvalidArgError); - // 读chunk2快照文件,返回ChunkNotExistError + // Read chunk2 snapshot file and return ChunkNotExistError readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 1, // snap sn + 1, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - /******************场景二:第一次快照结束,删除快照******************/ + /******************Scene 2: First snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功,并删除快照 + // Request to delete the snapshot of chunk1, return success, and delete the + // snapshot errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 向chunk2的[0, 8KB)区域写入数据 "a" + // Write data 'a' to the [0, 8KB) area of chunk2 offset = 0; length = 2 * PAGE_SIZE; // 8KB char buf2_2[2 * PAGE_SIZE]; memset(buf2_2, 'a', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_2, - offset, - length, - nullptr); + fileSn, buf2_2, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(0, chunk2Info.correctedSn); - /******************场景三:第二次打快照******************/ + /******************Scene 3: Take second snapshot******************/ - // 模拟第二次打快照,版本递增 + // Simulate taking a second snapshot and increasing the version ++fileSn; // fileSn == 3 - // 向chunk1的[0KB, 8KB)区域写入数据 “3” + // Write data '3' to the [0KB, 8KB) area of chunk1 offset = 0; length = 2 * PAGE_SIZE; char buf1_3[2 * PAGE_SIZE]; memset(buf1_3, '3', length); errorCode = dataStore_->WriteChunk(id1, // id - fileSn, - buf1_3, - offset, - length, - nullptr); + fileSn, buf1_3, offset, length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets + // expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(2, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,8KB]:3,[8KB, 12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB]:2 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be + // [0,8KB]:3, [8KB, 12KB]:2 The data content returned from reading chunk1 + // snapshot should be [0, 12KB]:2 The data in other address spaces can be + // guaranteed without any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); - errorCode = dataStore_->ReadChunk(id1, // chunk id - fileSn, - readbuf, - 0, // offset + errorCode = dataStore_->ReadChunk(id1, // chunk id + fileSn, readbuf, + 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_3, readbuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_2, readbuf + 2 * PAGE_SIZE, 1 * PAGE_SIZE)); - // 读chunk1快照文件的[0, 12KB)区域,数据内容为‘2’ + // Read the [0, 12KB) area of the chunk1 snapshot file, with data content of + // '2' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk2快照返回的数据内容应该为[0, 8KB):a,其余地址空间的数据可以不用保证 + // The data content returned by reading the chunk2 snapshot should be [0, + // 8KB): a, and the data in the other address spaces can be guaranteed + // without any need to readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id - 2, // snap sn + 2, // snap sn readbuf, 0, // offset readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_2, readbuf, readSize)); - /******************场景四:第二次快照结束,删除快照******************/ + /******************Scene 4: Second snapshot completes, delete + * snapshot******************/ - // 请求删chunk1的快照,返回成功 + // Request to delete snapshot of chunk1, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk2信息,符合预期 + // Check chunk2 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 向chunk2的[0KB, 4KB)区域写入数据 “b” + // Write data 'b' to the [0KB, 4KB) area of chunk2 offset = 0; length = 1 * PAGE_SIZE; char buf2_3[1 * PAGE_SIZE]; memset(buf2_3, 'b', length); errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,符合预期,curSn变为3,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, as expected, curSn becomes 3, no snapshot will + // be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 再次向chunk2的[0KB, 8KB)区域写入数据 + // Write data to the [0KB, 8KB) area of chunk2 again errorCode = dataStore_->WriteChunk(id2, // id - fileSn, - buf2_3, - offset, - length, - nullptr); - // 检查chunk2信息,chunk信息不变,不会产生快照 + fileSn, buf2_3, offset, length, nullptr); + // Check chunk2 information, chunk information remains unchanged and no + // snapshot will be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - /******************场景五:用户删除文件******************/ + /******************Scene 5: User Deletes File******************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id2, &chunk1Info); diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 2364d61dd2..ae59850db5 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -25,8 +25,8 @@ namespace curve { namespace chunkserver { -const string baseDir = "./data_int_str"; // NOLINT -const string poolDir = "./chunkfilepool_int_str"; // NOLINT +const string baseDir = "./data_int_str"; // NOLINT +const string poolDir = "./chunkfilepool_int_str"; // NOLINT const string poolMetaPath = "./chunkfilepool_int_str.meta"; // NOLINT class StressTestSuit : public DatastoreIntegrationBase { @@ -64,7 +64,7 @@ TEST_F(StressTestSuit, StressTest) { auto RunStress = [&](int threadNum, int rwPercent, int ioNum) { uint64_t beginTime = TimeUtility::GetTimeofDayUs(); - Thread *threads = new Thread[threadNum]; + Thread* threads = new Thread[threadNum]; int readThreadNum = threadNum * rwPercent / 100; int ioNumAvg = ioNum / threadNum; int idRange = 100; @@ -92,27 +92,27 @@ TEST_F(StressTestSuit, StressTest) { printf("===============TEST WRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 0, 10000); - // 10个线程 + // 10 threads RunStress(10, 0, 50000); - // 50个线程 + // 50 threads RunStress(50, 0, 100000); printf("===============TEST READ==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 100, 10000); - // 10个线程 + // 10 threads RunStress(10, 100, 50000); - // 50个线程 + // 50 threads RunStress(50, 100, 100000); printf("===============TEST READWRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 50, 10000); - // 10个线程 + // 10 threads RunStress(10, 50, 50000); - // 50个线程 + // 50 threads RunStress(50, 50, 100000); } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index aa676fc718..df41c9b07b 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include +#include #include -#include // NOLINT -#include // NOLINT -#include +#include // NOLINT +#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -48,12 +48,12 @@ curve::client::InflightControl inflightContl; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "--mdsDbName=module_exception_curve_chunkserver" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22233" }, - { "--updateToRepoSec=5" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"--mdsDbName=module_exception_curve_chunkserver"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22233"}, + {"--updateToRepoSec=5"}, }; const std::vector chunkserverConf4{ @@ -119,9 +119,9 @@ const std::vector chunkserverConf6{ {"-walFilePoolDir=./moduleException6/walfilepool/"}, {"-walFilePoolMetaPath=./moduleException6/walfilepool.meta"}}; -std::string mdsaddr = // NOLINT +std::string mdsaddr = // NOLINT "127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124"; -std::string logpath = "./runlog/ChunkserverException"; // NOLINT +std::string logpath = "./runlog/ChunkserverException"; // NOLINT const std::vector clientConf{ std::string("mds.listen.addr=") + mdsaddr, @@ -129,9 +129,11 @@ const std::vector clientConf{ std::string("chunkserver.rpcTimeoutMS=1000"), std::string("chunkserver.opMaxRetry=10"), }; -class CSModuleException : public ::testing::Test { - public: - void SetUp() { +class CSModuleException : public ::testing::Test +{ +public: + void SetUp() + { std::string confPath = "./test/integration/client/config/client.conf.1"; system("mkdir ./runlog/ChunkserverException"); system("rm -rf module_exception_test_chunkserver.etcd"); @@ -143,15 +145,16 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ - "--name=module_exception_test_chunkserver" }); + "--name=module_exception_test_chunkserver"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +171,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -184,13 +187,15 @@ class CSModuleException : public ::testing::Test { LOG(INFO) << "exec cmd: " << createPPCmd; int ret = 0; int retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createPPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +212,8 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -221,26 +227,29 @@ class CSModuleException : public ::testing::Test { ret = 0; retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createLPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } - void TearDown() { + void TearDown() + { ::Close(fd); UnInit(); ASSERT_EQ(0, cluster->StopCluster()); @@ -250,19 +259,25 @@ class CSModuleException : public ::testing::Test { "module_exception_test_chunkserver.etcd"); } - void CreateOpenFileBackend() { + void CreateOpenFileBackend() + { createDone = false; createOrOpenFailed = false; - auto func = [&]() { - for (int i = 0; i < 20; i++) { + auto func = [&]() + { + for (int i = 0; i < 20; i++) + { std::string filename = "/" + std::to_string(i); int ret = curve::test::FileCommonOperation::Open(filename, "curve"); ret == -1 ? createOrOpenFailed = true : 0; - if (ret != -1) { + if (ret != -1) + { ::Close(ret); - } else { + } + else + { break; } } @@ -276,44 +291,55 @@ class CSModuleException : public ::testing::Test { t.detach(); } - void WaitBackendCreateDone() { + void WaitBackendCreateDone() + { std::unique_lock lk(createMtx); - createCV.wait(lk, [&]() { return createDone; }); + createCV.wait(lk, [&]() + { return createDone; }); } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If io can be issued normally within the expected time, return + * true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, - uint64_t* failCount = nullptr) { + uint64_t *failCount = nullptr) + { inflightContl.SetMaxInflightNum(16); resumeFlag = false; ioFailedCount = 0; - auto wcb = [](CurveAioContext* context) { + auto wcb = [](CurveAioContext *context) + { inflightContl.DecremInflightNum(); - if (context->ret == context->length) { + if (context->ret == context->length) + { std::unique_lock lk(resumeMtx); resumeFlag = true; resumeCV.notify_all(); - } else { + } + else + { ioFailedCount++; } delete context; }; - char* writebuf = new char[size]; + char *writebuf = new char[size]; memset(writebuf, 'a', size); - auto iofunc = [&]() { + auto iofunc = [&]() + { std::this_thread::sleep_for(std::chrono::seconds(predictTimeS)); inflightContl.WaitInflightComeBack(); - CurveAioContext* context = new CurveAioContext; + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = off; context->length = size; @@ -335,7 +361,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,182 +371,195 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Is there a failure to hang and uninstall bool createOrOpenFailed; bool createDone; std::mutex createMtx; std::condition_variable createCV; - CurveCluster* cluster; + CurveCluster *cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node -TEST_F(CSModuleException, ChunkserverException) { +TEST_F(CSModuleException, ChunkserverException) +{ LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1.. Test restarting a chunkserver + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill a chunkserver: The client's read and + // write requests are stuck at most + // election_timeout*2s can read and write normally + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang a chunk server, and then restore the hang's chunk server + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang a chunkserver: client + // Read and write requests may experience a maximum delay of + // selection_timeout*2s for normal read and write operations + // c. Restoring chunkserver: Client read and write requests have no + // impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunkservers + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Kill two chunkservers: expected client IO + // to continue to hang, new write IO and overwrite write both hang + // Pulling up a chunkserver in the kill: client IO is expected to be + // at most Restore read and write within (chunkserver starts + // playback of data+2 * selection_timeout) time + // c. Pulling up another kill chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunkservers, and then restore Hang's chunkservers + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang two chunkservers: client IO continues + // to hang, while new write IO and overwrite write both hang c. Restore + // one of the chunkservers: client IO restores read and write, + // Recovery time from chunkserver to client IO during election_ + // Timeout * 2 + // d. Restoring another hang's chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just hung, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunkservers + // 2. Expected: + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Close three chunkservers: client IO hang + // c. Restart a chunkserver: client IO hang + // d. Restart the second chunkserver: client IO hang, + // Until the chunkserver is fully restored and IO is restored. + // The recovery time is approximately equal to (chunkserver starts + // playback data+2 * election_timeout) + // e. Restarting the third chunkserver: No impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +567,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunkservers, and then restore Hang's chunkservers + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang three chunkservers: client IO hang c. + // Restore a chunkserver: client IO hang d. Restore another + // chunkserver: expected to be + // election_ About timeout * 2, client IO recovery + // e. Restore the last chunkserver: Expected no impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 - // (copysetNum / load_concurrency) * election_timeout + // 7. The client's IO is expected to recover within a maximum of 2 * + // electtime seconds If slow start is configured, wait (copysetNum / + // load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/integration/client/common/file_operation.cpp b/test/integration/client/common/file_operation.cpp index 44dfc186a5..c5943a629f 100644 --- a/test/integration/client/common/file_operation.cpp +++ b/test/integration/client/common/file_operation.cpp @@ -43,15 +43,15 @@ int FileCommonOperation::Open(const std::string& filename, memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); - // 先创建文件 - int ret = Create(filename.c_str(), &userinfo, 100*1024*1024*1024ul); + // Create a file first + int ret = Create(filename.c_str(), &userinfo, 100 * 1024 * 1024 * 1024ul); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret << ", filename = " << filename; return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -68,8 +68,8 @@ void FileCommonOperation::Close(int fd) { } int FileCommonOperation::Open(const std::string& filename, - const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount) { + const std::string& owner, uint64_t stripeUnit, + uint64_t stripeCount) { assert(globalclient != nullptr); C_UserInfo_t userinfo; @@ -84,7 +84,7 @@ int FileCommonOperation::Open(const std::string& filename, context.stripeUnit = stripeUnit; context.stripeCount = stripeCount; - // 先创建文件 + // Create a file first int ret = globalclient->Create2(context); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret @@ -92,7 +92,7 @@ int FileCommonOperation::Open(const std::string& filename, return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -102,5 +102,5 @@ int FileCommonOperation::Open(const std::string& filename, return fd; } -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve diff --git a/test/integration/client/common/file_operation.h b/test/integration/client/common/file_operation.h index 0414146eff..c46b7add46 100644 --- a/test/integration/client/common/file_operation.h +++ b/test/integration/client/common/file_operation.h @@ -30,17 +30,18 @@ namespace curve { namespace test { class FileCommonOperation { public: - /** - * 指定文件名,打开文件,如果没创建则先创建,返回fd - */ + /** + * Specify a file name, open the file, if not created, create it first, + * return fd + */ static int Open(const std::string& filename, const std::string& owner); static void Close(int fd); static int Open(const std::string& filename, const std::string& owner, - uint64_t stripeUnit, uint64_t stripeCount); + uint64_t stripeUnit, uint64_t stripeCount); }; -} // namespace test -} // namespace curve +} // namespace test +} // namespace curve #endif // TEST_INTEGRATION_CLIENT_COMMON_FILE_OPERATION_H_ diff --git a/test/integration/client/mds_exception_test.cpp b/test/integration/client/mds_exception_test.cpp index ad0d82b093..6eb665621a 100644 --- a/test/integration/client/mds_exception_test.cpp +++ b/test/integration/client/mds_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include +#include #include -#include // NOLINT -#include // NOLINT -#include +#include // NOLINT +#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -51,11 +51,11 @@ bool testIORead = false; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/MDSExceptionTest" }, - { "--mdsDbName=module_exception_curve_mds" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22230" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/MDSExceptionTest"}, + {"--mdsDbName=module_exception_curve_mds"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22230"}, }; const std::vector chunkserverConf1{ @@ -124,9 +124,9 @@ const std::vector chunkserverConf3{ {"-walFilePoolDir=./moduleException3/walfilepool/"}, {"-walFilePoolMetaPath=./moduleException3/walfilepool.meta"}}; -std::string mdsaddr = // NOLINT - "127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"; // NOLINT -std::string logpath = "./runlog/MDSExceptionTest"; // NOLINT +std::string mdsaddr = // NOLINT + "127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224"; // NOLINT +std::string logpath = "./runlog/MDSExceptionTest"; // NOLINT const std::vector clientConf{ std::string("mds.listen.addr=") + mdsaddr, @@ -135,9 +135,11 @@ const std::vector clientConf{ std::string("chunkserver.opMaxRetry=10"), }; -class MDSModuleException : public ::testing::Test { - public: - void SetUp() { +class MDSModuleException : public ::testing::Test +{ +public: + void SetUp() + { std::string confPath = "./test/integration/client/config/client.conf"; system("mkdir ./runlog/MDSExceptionTest"); system("rm -rf module_exception_test_mds.etcd"); @@ -149,14 +151,15 @@ class MDSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22230", "127.0.0.1:22231", std::vector{"--name=module_exception_test_mds"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22230:22231, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; @@ -173,7 +176,7 @@ class MDSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -189,14 +192,16 @@ class MDSModuleException : public ::testing::Test { LOG(INFO) << "exec cmd: " << createPPCmd; int ret = 0; int retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createPPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22225", chunkserverConf1); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22225, pid = " << pid; @@ -212,7 +217,8 @@ class MDSModuleException : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -226,22 +232,24 @@ class MDSModuleException : public ::testing::Test { ret = 0; retry = 0; - while (retry < 5) { + while (retry < 5) + { ret = system(createLPCmd.c_str()); - if (ret == 0) break; + if (ret == 0) + break; retry++; } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); ipmap[0] = "127.0.0.1:22222"; @@ -253,7 +261,8 @@ class MDSModuleException : public ::testing::Test { configmap[2] = mdsConf; } - void TearDown() { + void TearDown() + { ::Close(fd); UnInit(); @@ -263,22 +272,28 @@ class MDSModuleException : public ::testing::Test { system("rm -rf moduleException1 moduleException2 moduleException3"); } - void CreateOpenFileBackend() { + void CreateOpenFileBackend() + { createDone = false; createOrOpenFailed = false; - auto func = [&]() { + auto func = [&]() + { static int num = 0; - for (int i = num; i < num + 20; i++) { + for (int i = num; i < num + 20; i++) + { std::string filename = "/" + std::to_string(i); LOG(INFO) << "now create file: " << filename; int ret = curve::test::FileCommonOperation::Open(filename, "curve"); ret == -1 ? createOrOpenFailed = true : 0; - if (ret != -1) { + if (ret != -1) + { ::Close(ret); std::this_thread::sleep_for(std::chrono::milliseconds(500)); - } else { + } + else + { break; } } @@ -293,44 +308,55 @@ class MDSModuleException : public ::testing::Test { t.detach(); } - void WaitBackendCreateDone() { + void WaitBackendCreateDone() + { std::unique_lock lk(createMtx); - createCV.wait(lk, [&]() { return createDone; }); + createCV.wait(lk, [&]() + { return createDone; }); } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内嫩够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be + * restored + * @param[out]: failCount is the number of error returns in the current io + * distribution + * @return: If the io is issued normally within the expected time, return + * true; otherwise, return false */ - bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) { + bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) + { inflightContl.SetMaxInflightNum(16); resumeFlag = false; ioFailedCount = 0; - auto wcb = [](CurveAioContext* context) { + auto wcb = [](CurveAioContext *context) + { inflightContl.DecremInflightNum(); - if (context->ret == context->length) { + if (context->ret == context->length) + { std::unique_lock lk(resumeMtx); resumeFlag = true; resumeCV.notify_all(); - } else { + } + else + { ioFailedCount++; } LOG(INFO) << "end aiowrite with ret = " << context->ret; delete context; }; - char* writebuf = new char[size]; + char *writebuf = new char[size]; memset(writebuf, 'a', size); - auto iofunc = [&]() { + auto iofunc = [&]() + { std::this_thread::sleep_for(std::chrono::seconds(predictTimeS)); inflightContl.WaitInflightComeBack(); - CurveAioContext* context = new CurveAioContext; + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = off; context->length = size; @@ -352,7 +378,7 @@ class MDSModuleException : public ::testing::Test { ret = resumeFlag; } - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -360,25 +386,28 @@ class MDSModuleException : public ::testing::Test { return ret; } - /**下发一个写请求 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否下发成功 + /** Send a write request + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Whether the IO was successfully issued */ - bool SendAioWriteRequest(uint64_t offset, uint64_t size) { + bool SendAioWriteRequest(uint64_t offset, uint64_t size) + { writeIOReturnFlag = false; - auto writeCallBack = [](CurveAioContext* context) { - // 无论IO是否成功,只要返回,就置为true + auto writeCallBack = [](CurveAioContext *context) + { + // Regardless of whether IO is successful or not, as long as it + // returns, it is set to true writeIOReturnFlag = true; - char* buffer = reinterpret_cast(context->buf); + char *buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; }; - char* buffer = new char[size]; + char *buffer = new char[size]; memset(buffer, 'a', size); - CurveAioContext* context = new CurveAioContext(); + CurveAioContext *context = new CurveAioContext(); context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = offset; context->length = size; @@ -388,26 +417,30 @@ class MDSModuleException : public ::testing::Test { return AioWrite(fd, context) == 0; } - /** 下发一个写请求并读取进行数据验证 - * @param: fd 卷fd - * @param: 当前需要下发io的偏移 - * @param:下发io的大小 - * @return: 数据是否一致 - */ - void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) { - char* writebuf = new char[size]; - char* readbuf = new char[size]; + /** Send a write request and read for data validation + * @param: fd volume fd + * @param: The offset that currently needs to be issued for IO + * @param: The size of the distributed IO + * @return: Whether the data is consistent + */ + void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) + { + char *writebuf = new char[size]; + char *readbuf = new char[size]; unsigned int i; - LOG(INFO) << "VerifyDataConsistency(): offset " << - offset << ", size " << size; - for (i = 0; i < size; i++) { + LOG(INFO) << "VerifyDataConsistency(): offset " << offset << ", size " + << size; + for (i = 0; i < size; i++) + { writebuf[i] = ('a' + std::rand() % 26); } - // 开始写 - auto wcb = [](CurveAioContext* context) { - if (context->ret == context->length) { + // Start writing + auto wcb = [](CurveAioContext *context) + { + if (context->ret == context->length) + { testIOWrite = true; } std::unique_lock lk(resumeMtx); @@ -415,8 +448,10 @@ class MDSModuleException : public ::testing::Test { delete context; }; - auto writefunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + auto writefunc = [&]() + { + CurveAioContext *context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->offset = offset; context->length = size; @@ -434,9 +469,11 @@ class MDSModuleException : public ::testing::Test { writeThread.join(); ASSERT_TRUE(testIOWrite); - // 开始读 - auto rcb = [](CurveAioContext* context) { - if (context->ret == context->length) { + // Start reading + auto rcb = [](CurveAioContext *context) + { + if (context->ret == context->length) + { testIORead = true; } std::unique_lock lk(resumeMtx); @@ -444,8 +481,10 @@ class MDSModuleException : public ::testing::Test { delete context; }; - auto readfunc = [&]() { - CurveAioContext* context = new CurveAioContext;; + auto readfunc = [&]() + { + CurveAioContext *context = new CurveAioContext; + ; context->op = LIBCURVE_OP::LIBCURVE_OP_READ; context->offset = offset; context->length = size; @@ -471,53 +510,63 @@ class MDSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether mounting or unmounting fails. bool createOrOpenFailed; bool createDone; std::mutex createMtx; std::condition_variable createCV; - CurveCluster* cluster; + CurveCluster *cluster; std::map ipmap; std::map> configmap; }; #define segment_size 1 * 1024 * 1024 * 1024ul -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node -TEST_F(MDSModuleException, MDSExceptionTest) { +TEST_F(MDSModuleException, MDSExceptionTest) +{ LOG(INFO) << "current case: KillOneInserviceMDSThenRestartTheMDS"; /********** KillOneInserviceMDSThenRestartTheMDS *************/ - // 1. 重启一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台mds,在mds服务切换到另一台mds之前, - // client 新写IO会hang,挂卸载服务会异常 - // c. mds服务切换后,预期client IO无影响,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restarting a currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down an MDS, before + // the MDS service switches to another MDS, + // new write IO from clients will hang, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // be unaffected, and mount/unmount services will be normal. d. When + // bringing the MDS back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Kill an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -525,85 +574,103 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillOneNotInserviceMDSThenRestartTheMDS"; /*********** KillOneNotInserviceMDSThenRestartTheMDS *******/ - // 1. 重启一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Restart an MDS that is not in service + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Turn off an MDS that is not in service, + // expect no impact on client IO, and suspend and uninstall the service + // normally + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台不在服务的mds,在启动的时候第一台mds当选leader, kill第二台 + // 2. Kill an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and kill the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int killid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(killid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(2 * segment_size, 4096, 25)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(killid, ipmap[killid], 22240 + killid, configmap[killid], false); LOG(INFO) << "mds " << killid << " started on " << ipmap[killid] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneInserviceMDSThenResumeTheMDS"; /************ hangOneInserviceMDSThenResumeTheMDS ********/ - // 1. hang一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 0. 先睡眠一段时间等待mds集群选出leader + // 1. Hang one of the currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS is restored, it is expected to have no impact on + // client IO. + // 0. First, sleep for a period of time to allow the MDS cluster to elect a + // leader. std::this_thread::sleep_for(std::chrono::seconds(10)); - // 1. 集群最初状态,io正常下发 + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Hang an MDS that is currently in service, and when it is started, the + // first MDS is selected as the leader serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->HangMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend + // and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) auto ret = MonitorResume(3 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_TRUE(false); } - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], @@ -614,39 +681,46 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被kill的mds,对集群没有影响 + // 7. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneNotInserviceMDSThenResumeTheMDS"; /********** hangOneNotInserviceMDSThenResumeTheMDS ***********/ - // 1. hang一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Hang an out of service MDS + // 2. Expectations + // a. When the cluster status is normal: client read and write requests + // can be issued normally b. Hang an MDS that is not in service, + // expecting no impact on client IO, and suspending and uninstalling + // the service is normal + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台不在服务的mds,在启动的时候第一台mds当选leader, hang第二台 + // 2. Hang an MDS that is not in service. When starting, the first MDS is + // selected as the leader, and hang the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int hangid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->HangMDS(hangid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected + // that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台iops监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start backend iops monitoring and start writing from the next segment + // to trigger getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(4 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_EQ(0, cluster->StopMDS(hangid)); pid = cluster->StartSingleMDS(hangid, ipmap[hangid], 22240 + hangid, @@ -657,42 +731,50 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoInserviceMDSThenRestartTheMDS"; /************* KillTwoInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,在mds服务切换到另一台mds之前, - // client 新写IO会出现失败,挂卸载服务会异常 - // c. mds服务切换后,预期client IO恢复,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, one of which is currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // before the MDS service switches to another MDS, + // new write IO from clients will fail, and mount/unmount services + // will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will + // recover, and mount/unmount services will be normal. d. When bringing + // the MDS nodes back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill两台mds,在启动的时候第一台mds当选leader, kill前二台 + // 2. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the first two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int secondid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(secondid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ASSERT_TRUE(MonitorResume(5 * segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -700,10 +782,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起被kill的其他mds + // 9. Pull up other mds killed pid = cluster->StartSingleMDS(secondid, ipmap[secondid], 22240 + secondid, configmap[secondid], false); LOG(INFO) << "mds " << secondid << " started on " << ipmap[secondid] @@ -712,18 +794,22 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillTwoNotInserviceMDSThenRestartTheMDS"; /******** KillTwoNotInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中两台都不在服务 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,预期client IO无影响,挂卸载服务正常 - // c. 重启这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, with both nodes not currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. When shutting down two MDS nodes, + // it is expected that client IO will be unaffected, and mount/unmount + // services will be normal. c. When restarting these two MDS nodes, it is + // expected that client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. kill两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 3. Kill two MDSs. When starting, the first MDS is selected as the leader, + // and kill the second two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int tempid_1 = (serviceMDSID + 1) % 3; @@ -731,27 +817,28 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->StopMDS(tempid_1)); ASSERT_EQ(0, cluster->StopMDS(tempid_2)); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ASSERT_TRUE(MonitorResume(6 * segment_size, 4096, 10)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(tempid_1, ipmap[tempid_1], 22240 + tempid_1, configmap[tempid_1], false); LOG(INFO) << "mds " << tempid_1 << " started on " << ipmap[tempid_1] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 集群没有影响 + // 8. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起其他mds,使集群恢复正常 + // 9. Pull up other mds to restore the cluster to normal pid = cluster->StartSingleMDS(tempid_2, ipmap[tempid_2], 22240 + tempid_2, configmap[tempid_2], false); LOG(INFO) << "mds " << tempid_2 << " started on " << ipmap[tempid_2] @@ -760,17 +847,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: hangTwoInserviceMDSThenResumeTheMDS"; /******** hangTwoInserviceMDSThenResumeTheMDS ************/ - // 1. hang两台mds,其中包含一台正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 1. hang两台mds,在启动的时候第一台mds当选leader, hang前二台 + // 1. Hang two MDS nodes, one of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. During the MDS hang period and + // before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC + // sent to the MDS will keep timing out, leading to retries that + // eventually fail. + // c. The client session renewal duration is longer than the lease + // renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal + // failure occurs. Therefore, the client's session will not expire, + // and overwrite writes will not result in exceptions. + // d. When the hung MDS nodes are recovered, it is expected to have no + // impact on client IO. + // 1. Hang two MDS nodes, with the first MDS being elected as leader during + // startup, and both being hung before the process. serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = serviceMDSID; @@ -778,25 +872,30 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); LOG(INFO) << "monitor resume start!"; - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration + // (20s renewal) ret = MonitorResume(7 * segment_size, 4096, 25); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_TRUE(false); } LOG(INFO) << "monitor resume done!"; - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); LOG(INFO) << "wait backend create thread done!"; - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -814,20 +913,24 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被hang的mds,对集群没有影响 + // 7. Pulling up the hung mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangTwoNotInserviceMDSThenResumeTheMDS"; /********** hangTwoNotInserviceMDSThenResumeTheMDS ********/ - // 1. hang两台mds,其中不包含正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // c. 恢复这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two MDS nodes, neither of which is currently serving, and then + // recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang one MDS node that is not + // currently serving. It is expected that client IO will be unaffected, + // and mount/unmount services will behave normally. c. When these two MDS + // nodes are recovered, client IO is expected to be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 2. Hang two mds, the first mds is selected as the leader when starting, + // and kill the second two mds serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = (serviceMDSID + 1) % 3; @@ -835,22 +938,25 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment + // to trigger the getorallocate logic + // Killing mds that are not in service has no impact on the cluster ret = MonitorResume(8 * segment_size, 4096, 10); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -868,178 +974,197 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeMDSThenRestartTheMDS"; /********* KillThreeMDSThenRestartTheMDS *********/ - // 1. 重启三台mds - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. kill三台mds:client 在session过期之后出现IO 失败 - // c. client session过期之前这段时间的新写会失败,覆盖写不影响 - // d. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // e. 恢复另外两台hang的mds,client io无影响 - - // 1. kill三台mds + // 1. Restart three MDS nodes. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Kill all three MDS nodes: Client + // IO failures occur after session expiration. c. During the period + // before the client session expires, new writes will fail, but overwrite + // writes will not be affected. d. Recover one of the hung MDS nodes: + // Client session renewal succeeds, and IO returns to normal. e. Recover + // the other two hung MDS nodes: Client IO remains unaffected. + + // 1. Kill three MDSs ASSERT_EQ(0, cluster->StopAllMDS()); - // 确保mds确实退出了 + // Ensure that the mds has indeed exited std::this_thread::sleep_for(std::chrono::seconds(10)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 3. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(9 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(30)); ASSERT_FALSE(writeIOReturnFlag); - // 4. 等待后台挂卸载监测结束 + // 4. Waiting for the end of backend suspension and uninstallation + // monitoring WaitBackendCreateDone(); - // 5. 判断当前挂卸载情况 + // 5. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 6. 拉起被kill的进程 + // 6. Pulling up the process of being killed pid = -1; - while (pid < 0) { + while (pid < 0) + { pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 7. 检测上次IO是否返回 + // 7. Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ASSERT_TRUE(MonitorResume(segment_size, 4096, 10)); - // 9. 再拉起被kill的进程 + // 9. Pull up the process of being killed again pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, false); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; ASSERT_GT(pid, 0); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 11. 拉起其他被kill的mds + // 11. Pull up other killed mds pid = cluster->StartSingleMDS(2, "127.0.0.1:22224", 22232, mdsConf, false); LOG(INFO) << "mds 2 started on 127.0.0.1:22224, pid = " << pid; ASSERT_GT(pid, 0); LOG(INFO) << "current case: hangThreeMDSThenResumeTheMDS"; /********** hangThreeMDSThenResumeTheMDS **************/ - // 1. hang三台mds,然后恢复 - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. hang三台mds:client 在session过期之后出现IO hang - // c. client session过期之前这段时间的新写会一直hang,覆盖写不影响 - // e. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // f. 恢复另外两台hang的mds,client io无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three MDS nodes and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Hang three MDS nodes: Client IO + // hangs after the session expires. c. During the period before the + // client session expires, new writes will hang continuously, but + // overwrite writes will not be affected. e. Recover one of the hung MDS + // nodes: Client session renewal succeeds, and IO returns to normal. f. + // Recover the other two hung MDS nodes: Client IO remains unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang三台mds + // 2. Hang Three MDSs ASSERT_EQ(0, cluster->HangMDS(0)); ASSERT_EQ(0, cluster->HangMDS(1)); ASSERT_EQ(0, cluster->HangMDS(2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that + // the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 4. Send an IO and sleep for a period of time to determine whether to + // return + // Due to writing from the next segment, it triggers the getorallocate + // logic MDS is no longer in service, write requests are constantly + // hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(10 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(3)); ret = writeIOReturnFlag; - if (ret) { + if (ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 5. 等待监测结束 + // 5. Waiting for monitoring to end WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 - if (!createOrOpenFailed) { + // 6. Determine the current suspension and uninstallation situation + if (!createOrOpenFailed) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 7. 拉起被hang的进程, 有可能hang的进程因为长时间未与etcd握手, - // 导致其被拉起后就退出了,所以这里在recover之后再启动该mds, - // 这样保证集群中至少有一个mds在提供服务 + // 7. Pulling up the process being hung may result in the process not + // shaking hands with ETCD for a long time, + // After it was pulled up, it exited, so the mds was restarted after + // recover, This ensures that at least one mds in the cluster is + // providing services ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->StopMDS(1)); pid = -1; - while (pid < 0) { + while (pid < 0) + { pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 检测上次IO是否返回 + // Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ret = MonitorResume(segment_size, 4096, 1); - if (!ret) { + if (!ret) + { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); ASSERT_TRUE(false); } - // 9. 再拉起被hang的进程 + // 9. Pull up the process of being hung again ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); } -TEST_F(MDSModuleException, StripeMDSExceptionTest) { +TEST_F(MDSModuleException, StripeMDSExceptionTest) +{ LOG(INFO) << "current case: StripeMDSExceptionTest"; - // 1. 创建一个条带的卷 - int stripefd = curve::test::FileCommonOperation::Open("/test2", - "curve", 1024 * 1024, 8); + // 1. Create a striped volume + int stripefd = curve::test::FileCommonOperation::Open("/test2", "curve", + 1024 * 1024, 8); ASSERT_NE(stripefd, -1); uint64_t offset = std::rand() % 5 * segment_size; - // 2. 进行数据的读写校验 - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + // 2. Perform data read and write verification + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); std::this_thread::sleep_for(std::chrono::seconds(60)); - // 3. kill 一台当前为leader的mds + // 3. Kill an MDS that is currently the leader LOG(INFO) << "stop mds."; int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 4. 启动后台挂卸载线程 + // 4. Start the background suspend and unload thread CreateOpenFileBackend(); - // 5. 继续随机写数据进行校验 + // 5. Continue to randomly write data for verification offset = std::rand() % 5 * segment_size; LOG(INFO) << "when stop mds, write and read data."; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); - // 6. 等待挂卸载检测结果 + // 6. Waiting for the results of pending uninstallation detection WaitBackendCreateDone(); - // 7. 挂卸载服务正常 + // 7. Hanging and uninstalling service is normal ASSERT_TRUE(createOrOpenFailed); - LOG(INFO) <<"start mds."; + LOG(INFO) << "start mds."; pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -1047,10 +1172,9 @@ TEST_F(MDSModuleException, StripeMDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - LOG(INFO) << "start mds, write and read data."; offset = std::rand() % 5 * segment_size; - VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); + VerifyDataConsistency(stripefd, offset, 128 * 1024 * 1024); ::Close(stripefd); } diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index ea5c7e4c37..6ffca843bb 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -20,54 +20,52 @@ * Author: wuhanqing */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT +#include #include -#include +#include +#include // NOLINT +#include #include -#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT +#include #include "include/client/libcurve.h" -#include "src/common/timeutility.h" #include "src/client/client_metric.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" using curve::CurveCluster; -const char* kMdsConfPath = "./test/integration/unstable_test_mds.conf"; -const char* kCSConfPath = "./test/integration/unstable_test_cs.conf"; -const char* kClientConfPath = "./test/integration/unstable_test_client.conf"; +const char *kMdsConfPath = "./test/integration/unstable_test_mds.conf"; +const char *kCSConfPath = "./test/integration/unstable_test_cs.conf"; +const char *kClientConfPath = "./test/integration/unstable_test_client.conf"; -const char* kEtcdClientIpPort = "127.0.0.1:21000"; -const char* kEtcdPeerIpPort = "127.0.0.1:20999"; -const char* kMdsIpPort = "127.0.0.1:30010"; -const char* kClientInflightNum = "6"; -const char* kLogPath = "./runlog/"; +const char *kEtcdClientIpPort = "127.0.0.1:21000"; +const char *kEtcdPeerIpPort = "127.0.0.1:20999"; +const char *kMdsIpPort = "127.0.0.1:30010"; +const char *kClientInflightNum = "6"; +const char *kLogPath = "./runlog/"; curve::client::PerSecondMetric iops("test", "iops"); -std::atomic running{ false }; +std::atomic running{false}; const std::vector chunkserverConfigOpts{ "chunkfilepool.enable_get_chunk_from_pool=false", - "walfilepool.enable_get_segment_from_pool=false" -}; + "walfilepool.enable_get_segment_from_pool=false"}; -const std::vector mdsConfigOpts{ - std::string("mds.etcd.endpoint=") + std::string(kEtcdClientIpPort) -}; +const std::vector mdsConfigOpts{std::string("mds.etcd.endpoint=") + + std::string(kEtcdClientIpPort)}; const std::vector clientConfigOpts{ std::string("mds.listen.addr=") + kMdsIpPort, @@ -81,9 +79,8 @@ const std::vector mdsConf{ std::string("--confPath=") + kMdsConfPath, std::string("--mdsAddr=") + kMdsIpPort, std::string("--etcdAddr=") + kEtcdClientIpPort, - { "--log_dir=./runlog/mds" }, - { "--stderrthreshold=3" } -}; + {"--log_dir=./runlog/mds"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverConfTemplate{ {"-raft_sync_segments=true"}, @@ -105,14 +102,21 @@ const std::vector chunkserverConfTemplate{ {"--stderrthreshold=3"}}; const std::vector chunkserverPorts{ - 31000, 31001, 31010, 31011, 31020, 31021, + 31000, + 31001, + 31010, + 31011, + 31020, + 31021, }; -std::vector GenChunkserverConf(int port) { +std::vector GenChunkserverConf(int port) +{ std::vector conf(chunkserverConfTemplate); char temp[NAME_MAX_SIZE]; - auto formatter = [&](const std::string& format, int port) { + auto formatter = [&](const std::string &format, int port) + { snprintf(temp, sizeof(temp), format.c_str(), port); return temp; }; @@ -138,23 +142,21 @@ std::vector GenChunkserverConf(int port) { return conf; } -off_t RandomWriteOffset() { - return rand() % 32 * (16 * 1024 * 1024); -} +off_t RandomWriteOffset() { return rand() % 32 * (16 * 1024 * 1024); } -size_t RandomWriteLength() { - return rand() % 32 * 4096; -} +size_t RandomWriteLength() { return rand() % 32 * 4096; } static char buffer[1024 * 4096]; -struct ChunkserverParam { +struct ChunkserverParam +{ int id; int port; - std::string addr{ "127.0.0.1:" }; + std::string addr{"127.0.0.1:"}; std::vector conf; - ChunkserverParam(int id, int port) { + ChunkserverParam(int id, int port) + { this->id = id; this->port = port; this->addr.append(std::to_string(port)); @@ -162,10 +164,12 @@ struct ChunkserverParam { } }; -class UnstableCSModuleException : public ::testing::Test { - protected: - static void SetUpTestCase() { - // 清理文件夹 +class UnstableCSModuleException : public ::testing::Test +{ +protected: + static void SetUpTestCase() + { + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +179,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,50 +187,53 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ - "--name=module_exception_curve_unstable_cs" }); + "--name=module_exception_curve_unstable_cs"}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << ":" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 - ASSERT_EQ( - 0, - cluster->PreparePhysicalPool( - 1, - "./test/integration/client/config/unstable/" - "topo_unstable.json")); + // 3. Creating a physical pool + ASSERT_EQ(0, cluster->PreparePhysicalPool( + 1, + "./test/integration/client/config/unstable/" + "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster->PrepareLogicalPool( - 1, "test/integration/client/config/unstable/topo_unstable.json")); + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ( + 0, + cluster->PrepareLogicalPool( + 1, + "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } - static void TearDownTestCase() { + static void TearDownTestCase() + { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -236,9 +243,11 @@ class UnstableCSModuleException : public ::testing::Test { ::unlink(kClientConfPath); } - static void StartAllChunkserver() { + static void StartAllChunkserver() + { int id = 1; - for (auto port : chunkserverPorts) { + for (auto port : chunkserverPorts) + { ChunkserverParam param(id, port); chunkServers.emplace(id, param); @@ -252,18 +261,21 @@ class UnstableCSModuleException : public ::testing::Test { } } - static void OpenAndWrite(const std::string& filename) { + static void OpenAndWrite(const std::string &filename) + { int fd = curve::test::FileCommonOperation::Open(filename, "curve"); ASSERT_NE(-1, fd); std::vector writeThs; - for (int i = 0; i < 5; ++i) { + for (int i = 0; i < 5; ++i) + { writeThs.emplace_back(AioWriteFunc, fd); LOG(INFO) << "write " << filename << ", thread " << (i + 1) << " started"; } - for (auto& th : writeThs) { + for (auto &th : writeThs) + { th.join(); } @@ -271,14 +283,17 @@ class UnstableCSModuleException : public ::testing::Test { LOG(INFO) << "stop all write thread, filename " << filename; } - static void AioWriteFunc(int fd) { - auto cb = [](CurveAioContext* ctx) { + static void AioWriteFunc(int fd) + { + auto cb = [](CurveAioContext *ctx) + { iops.count << 1; delete ctx; }; - while (running) { - CurveAioContext* context = new CurveAioContext; + while (running) + { + CurveAioContext *context = new CurveAioContext; context->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; context->cb = cb; context->offset = RandomWriteOffset(); @@ -300,9 +315,11 @@ class UnstableCSModuleException : public ::testing::Test { int UnstableCSModuleException::fd = 0; std::unique_ptr UnstableCSModuleException::cluster; -std::unordered_map UnstableCSModuleException::chunkServers; // NOLINT +std::unordered_map + UnstableCSModuleException::chunkServers; // NOLINT -TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { +TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) +{ const std::string filename = "/TestCommonReadAndWrite"; constexpr size_t length = 4ull * 1024 * 1024; constexpr off_t offset = 4ull * 1024 * 1024; @@ -323,28 +340,31 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver -TEST_F(UnstableCSModuleException, HangOneZone) { +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside +TEST_F(UnstableCSModuleException, HangOneZone) +{ srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; - for (int i = 0; i < 2; ++i) { + for (int i = 0; i < 2; ++i) + { openAndWriteThreads.emplace_back( &UnstableCSModuleException::OpenAndWrite, "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); - for (int i = 1; i <= 30; ++i) { + for (int i = 1; i <= 30; ++i) + { std::this_thread::sleep_for(std::chrono::seconds(1)); beforeRecords.push_back(iops.value.get_value(1)); } @@ -353,19 +373,21 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 - for (int i = 1; i <= 10; ++i) { + // Print IOPS per second + for (int i = 1; i <= 10; ++i) + { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 - if (i >= 5) { + // Record the iops value for 5 seconds after recording + if (i >= 5) + { afterRecords.push_back(tmp); } } @@ -381,7 +403,8 @@ TEST_F(UnstableCSModuleException, HangOneZone) { ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); running = false; - for (auto& th : openAndWriteThreads) { + for (auto &th : openAndWriteThreads) + { th.join(); } LOG(INFO) << "all write thread stoped"; diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..72410a5ca7 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -20,26 +20,28 @@ * Author: lixiaocui */ -#include +#include "test/integration/cluster_common/cluster.h" + #include -#include -#include -#include #include #include #include -#include -#include //NOLINT +#include +#include +#include +#include + #include //NOLINT +#include #include +#include +#include //NOLINT #include #include -#include -#include "test/integration/cluster_common/cluster.h" +#include "src/client/client_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/client/client_common.h" #include "src/kvstorageclient/etcd_client.h" using ::curve::client::UserInfo_t; @@ -50,29 +52,29 @@ namespace curve { using ::curve::client::CreateFileContext; -int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { +int CurveCluster::InitMdsClient(const curve::client::MetaServerOption& op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); } -std::vector VecStr2VecChar(std::vector args) { - std::vector argv(args.size() + 1); // for the NULL terminator +std::vector VecStr2VecChar(std::vector args) { + std::vector argv(args.size() + 1); // for the NULL terminator for (std::size_t i = 0; i < args.size(); ++i) { // not include cmd - argv[i] = new char[args[i].size()+1]; + argv[i] = new char[args[i].size() + 1]; snprintf(argv[i], args[i].size() + 1, "%s", args[i].c_str()); } argv[args.size()] = NULL; return argv; } -void ClearArgv(const std::vector &argv) { - for (auto const &item : argv) { - delete [] item; +void ClearArgv(const std::vector& argv) { + for (auto const& item : argv) { + delete[] item; } } int CurveCluster::InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints) { + const std::string& etcdEndpoints) { EtcdConf conf; conf.Endpoints = new char[etcdEndpoints.size()]; std::memcpy(conf.Endpoints, etcdEndpoints.c_str(), etcdEndpoints.size()); @@ -88,8 +90,8 @@ int CurveCluster::InitSnapshotCloneMetaStoreEtcd( } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient, - codec); + metaStore_ = + std::make_shared(etcdClient, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return -1; @@ -106,17 +108,13 @@ int CurveCluster::StopCluster() { LOG(INFO) << "stop cluster begin..."; int ret = 0; - if (StopAllMDS() < 0) - ret = -1; + if (StopAllMDS() < 0) ret = -1; - if (StopAllChunkServer() < 0) - ret = -1; + if (StopAllChunkServer() < 0) ret = -1; - if (StopAllSnapshotCloneServer() < 0) - ret = -1; + if (StopAllSnapshotCloneServer() < 0) ret = -1; - if (StopAllEtcd() < 0) - ret = -1; + if (StopAllEtcd() < 0) ret = -1; if (!ret) LOG(INFO) << "success stop cluster"; @@ -125,9 +123,9 @@ int CurveCluster::StopCluster() { return ret; } -int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, +int CurveCluster::StartSingleMDS(int id, const std::string& ipPort, int dummyPort, - const std::vector &mdsConf, + const std::vector& mdsConf, bool expectLeader) { LOG(INFO) << "start mds " << ipPort << " begin..."; pid_t pid = ::fork(); @@ -135,20 +133,21 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); args.emplace_back("--mdsAddr=" + ipPort); args.emplace_back("--dummyPort=" + std::to_string(dummyPort)); - for (auto &item : mdsConf) { + for (auto& item : mdsConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -221,26 +220,27 @@ int CurveCluster::StopAllMDS() { } int CurveCluster::StartSnapshotCloneServer( - int id, const std::string &ipPort, - const std::vector &snapshotcloneConf) { + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf) { LOG(INFO) << "start snapshotcloneserver " << ipPort << " begin ..."; pid_t pid = ::fork(); if (0 > pid) { LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + // Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); args.emplace_back("--addr=" + ipPort); - for (auto &item : snapshotcloneConf) { + for (auto& item : snapshotcloneConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -317,19 +317,18 @@ int CurveCluster::StopAllSnapshotCloneServer() { int ret = 0; auto tempMap = snapPidMap_; for (auto pair : tempMap) { - if (StopSnapshotCloneServer(pair.first) < 0) - ret = -1; + if (StopSnapshotCloneServer(pair.first) < 0) ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; } -int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf) { +int CurveCluster::StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf) { LOG(INFO) << "start etcd " << clientIpPort << " begin..."; pid_t pid = ::fork(); @@ -337,7 +336,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -348,14 +347,15 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, args.emplace_back("--initial-cluster-token=etcd-cluster-1"); args.emplace_back("--election-timeout=3000"); args.emplace_back("--heartbeat-interval=300"); - for (auto &item : etcdConf) { + for (auto& item : etcdConf) { args.push_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -380,7 +380,7 @@ bool CurveCluster::WaitForEtcdClusterAvalible(int waitSec) { return false; } else { int i = 0; - for (auto &item : etcdClientIpPort_) { + for (auto& item : etcdClientIpPort_) { i++; if (i == etcdClientIpPort_.size()) { endpoint += "http://" + item.second; @@ -464,9 +464,9 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystempath, +int CurveCluster::FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystempath, uint32_t size) { LOG(INFO) << "FormatFilePool begin..."; @@ -475,8 +475,7 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, " -filePoolMetaPath=" + filePoolmetapath + " -fileSystemPath=" + filesystempath + " -allocateByPercent=false -preAllocateNum=" + - std::to_string(size * 300) + - " -needWriteZero=false"; + std::to_string(size * 300) + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); @@ -485,8 +484,8 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, } int CurveCluster::StartSingleChunkServer( - int id, const std::string &ipPort, - const std::vector &chunkserverConf) { + int id, const std::string& ipPort, + const std::vector& chunkserverConf) { LOG(INFO) << "start chunkserver " << id << ", " << ipPort << " begin..."; std::vector split; ::curve::common::SplitString(ipPort, ":", &split); @@ -500,19 +499,20 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); args.emplace_back("-chunkServerPort=" + split[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -530,7 +530,7 @@ int CurveCluster::StartSingleChunkServer( } int CurveCluster::StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf) { + int id, const std::vector& chunkserverConf) { std::vector ipPort; ::curve::common::SplitString(ChunkServerIpPortInBackground(id), ":", &ipPort); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -555,13 +555,14 @@ int CurveCluster::StartSingleChunkServerInBackground( args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + ipPort[0]); args.emplace_back("-chunkServerPort=" + ipPort[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); @@ -723,7 +724,7 @@ std::string CurveCluster::ChunkServerIpPortInBackground(int id) { } int CurveCluster::PreparePhysicalPool(int mdsId, - const std::string &clusterMap) { + const std::string& clusterMap) { LOG(INFO) << "create physicalpool begin..."; std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + @@ -741,15 +742,14 @@ int CurveCluster::PreparePhysicalPool(int mdsId, return 0; } -int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { +int CurveCluster::PrepareLogicalPool(int mdsId, const std::string& clusterMap) { LOG(INFO) << "create logicalpool begin..."; - std::string createLPCmd = - std::string("./bazel-bin/tools/curvefsTool") + - std::string(" -cluster_map=") + clusterMap + - std::string(" -mds_addr=") + MDSIpPort(mdsId) + - std::string(" -op=create_logicalpool") + - std::string(" -stderrthreshold=0 -minloglevel=0"); + std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + + std::string(" -cluster_map=") + clusterMap + + std::string(" -mds_addr=") + MDSIpPort(mdsId) + + std::string(" -op=create_logicalpool") + + std::string(" -stderrthreshold=0 -minloglevel=0"); LOG(INFO) << "exec cmd: " << createLPCmd; RETURN_IF_NOT_ZERO(system(createLPCmd.c_str())); @@ -758,7 +758,7 @@ int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { return 0; } -bool CurveCluster::CurrentServiceMDS(int *curId) { +bool CurveCluster::CurrentServiceMDS(int* curId) { for (auto mdsId : mdsPidMap_) { if (0 == ProbePort(mdsIpPort_[mdsId.first], 20000, true)) { *curId = mdsId.first; @@ -772,8 +772,8 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { return false; } -int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize, +int CurveCluster::CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize, bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; @@ -785,13 +785,12 @@ int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, context.length = fileSize; context.poolset = poolset; - RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(context)); + RETURN_IF_NOT_ZERO(mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } -int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, +int CurveCluster::ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen) { int socket_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == socket_fd) { @@ -819,7 +818,7 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = - connect(socket_fd, (struct sockaddr *)&addr, sizeof(addr)); + connect(socket_fd, (struct sockaddr*)&addr, sizeof(addr)); if (expectOpen && connectRes == 0) { LOG(INFO) << "probe " << ipPort << " success."; close(socket_fd); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..2b25fca7a0 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -23,431 +23,451 @@ #ifndef TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ #define TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ -#include #include -#include #include -#include "src/client/mds_client.h" +#include +#include + #include "src/client/config_info.h" -#include "test/util/config_generator.h" +#include "src/client/mds_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" +#include "test/util/config_generator.h" -using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; using ::curve::client::MDSClient; +using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; -namespace curve { - -#define RETURN_IF_NOT_ZERO(x) \ - do { \ - int ret = (x); \ - if (ret != 0) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get non-ZERO, return -1"; \ - return ret; \ - } \ +namespace curve +{ + +#define RETURN_IF_NOT_ZERO(x) \ + do \ + { \ + int ret = (x); \ + if (ret != 0) \ + { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get non-ZERO, return -1"; \ + return ret; \ + } \ } while (0) -#define RETURN_IF_FALSE(x) \ - do { \ - bool ret = (x); \ - if (!ret) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get FALSE, return -1"; \ - return -1; \ - } \ +#define RETURN_IF_FALSE(x) \ + do \ + { \ + bool ret = (x); \ + if (!ret) \ + { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get FALSE, return -1"; \ + return -1; \ + } \ } while (0) -class CurveCluster { - public: - /** - * CurveCluster 构造函数 - * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" - */ - CurveCluster(const std::string &netWorkSegment = "192.168.200.", - const std::string &nsPrefix = "integ_") - : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} - - /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 - * - * @param op 参数设置 - * @return 0.成功; 非0.失败 - */ - int InitMdsClient(const curve::client::MetaServerOption &op); - - - /** - * @brief 初始化metastore - * - * @param[in] etcdEndpoints etcd client的ip port - * - * @return 返回错误码 - */ - int InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints); - - /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 - */ - int BuildNetWork(); - - /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 - */ - int StopCluster(); - - /** - * @brief 生成各模块配置文件 - * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 - */ - template - void PrepareConfig(const std::string &configPath, - const std::vector &options) { - T gentor(configPath); - gentor.SetConfigOptions(options); - gentor.Generate(); - } - - /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX - * - * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: - * const std::vector mdsConf{ - {"--graceful_quit_on_sigterm"}, - {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, - }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 - */ - int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, - const std::vector &mdsConf, - bool expectLeader); - - /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 - */ - int StopMDS(int id); - - /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 - */ - int StopAllMDS(); - - /** - * @brief 启动一个snapshotcloneserver - * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 - */ - int - StartSnapshotCloneServer(int id, const std::string &ipPort, - const std::vector &snapshotcloneConf); - - /** - * @brief 停止指定Id的snapshotcloneserver - * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 - */ - int StopSnapshotCloneServer(int id, bool force = false); - - /** - * @brief 重启指定Id的snapshotcloneserver - * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 - */ - int RestartSnapshotCloneServer(int id, bool force = false); - - /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 - */ - int StopAllSnapshotCloneServer(); - - /** - * StartSingleEtcd 启动一个etcd节点 - * - * @param clientIpPort - * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 - * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 - */ - int StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf); - - /** - * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 - */ - bool WaitForEtcdClusterAvalible(int waitSec = 20); - - /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 - */ - int StopEtcd(int id); - - /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 - */ - int StopAllEtcd(); - - /** - * @brief 格式化FilePool - * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 - * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 - */ - int FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystemPath, uint32_t size); - - /** - * StartSingleChunkServer 启动一个chunkserver节点 - * - * @param[in] id - * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: - * const std::vector chunkserverConf1{ - {"--graceful_quit_on_sigterm"}, - {"-chunkServerStoreUri=local://./basic1/"}, - {"-chunkServerMetaUri=local://./basic1/chunkserver.dat"}, - {"-copySetUri=local://./basic1/copysets"}, - {"-recycleUri=local://./basic1/recycler"}, - {"-chunkFilePoolDir=./basic1/chunkfilepool/"}, - {"-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta"}, - {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, - {"-raft_sync_segments=true"}, - }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 - */ - int StartSingleChunkServer(int id, const std::string &ipPort, - const std::vector &chunkserverConf); - - /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort - * - * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 - */ - int StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf); - - /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 - */ - int StopChunkServer(int id); - - /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 - */ - int StopAllChunkServer(); - - /** - * PreparePhysicalPool 创建物理池 - * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) - * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 - */ - int PreparePhysicalPool(int mdsId, const std::string &clusterMap); - - /** - * @return 0.成功; -1.失败 - */ - int PrepareLogicalPool(int mdsId, const std::string &clusterMap); - - /** - * MDSIpPort 获取指定id的mds地址 - */ - std::string MDSIpPort(int id); - - /** - * EtcdClientIpPort 获取指定id的etcd client地址 - */ - std::string EtcdClientIpPort(int id); - - /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 - */ - std::string EtcdPeersIpPort(int id); - - /** - * ChunkServerIpPort 获取指定id的chunkserver地址 - */ - std::string ChunkServerIpPort(int id); - - /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 - */ - int HangMDS(int id); - - /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 - */ - int RecoverHangMDS(int id); - - /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 - */ - int HangEtcd(int id); - - /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 - */ - int RecoverHangEtcd(int id); - - /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 - */ - int HangChunkServer(int id); - - /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 - */ - int RecoverHangChunkServer(int id); - - /** - * CurrentServiceMDS 获取当前正在提供服务的mds - * - * @param[out] curId 当前正在服务的mds编号 - * - * @return true表示有正在服务的mds, false表示没有正在服务的mds - */ - bool CurrentServiceMDS(int *curId); - - /** - * CreateFile 在curve中创建文件 - * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 - */ - int CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize = 0, - bool normalFile = true, const std::string& poolset = ""); - - private: - /** - * ProbePort 探测指定ipPort是否处于监听状态 - * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 - * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 - */ - int ProbePort(const std::string &ipPort, int64_t timeoutMs, - bool expectOpen); - - /** - * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort - */ - std::string ChunkServerIpPortInBackground(int id); - - /** - * HangProcess hang住一个进程 - * - * @param pid 进程id - * @return 0.成功; -1.失败 - */ - int HangProcess(pid_t pid); - - /** - * RecoverHangProcess 恢复hang住的进程 - * - * @param pid 进程id - * @return 0.成功; -1.失败 - */ - int RecoverHangProcess(pid_t pid); - - private: - // 网络号 - std::string networkSegment_; - - // 网络命名空间前缀 - std::string nsPrefix_; - - // mds的id对应的进程号 - std::map mdsPidMap_; - - // mds的id对应的ipport - std::map mdsIpPort_; - - // snapshotcloneserver id对应的pid - std::map snapPidMap_; - - // snapshotcloneserver id对应的ipPort - std::map snapIpPort_; - - // snapshotcloneserver id对应的conf - std::map> snapConf_; - - // etcd的id对应的进程号 - std::map etcdPidMap_; - - // etcd的id对应的client ipport - std::map etcdClientIpPort_; - - // etcd的id对应的peer ipport - std::map etcdPeersIpPort_; - - // chunkserver的id对应的进程号 - std::map chunkserverPidMap_; - - // chunkserver的id对应的ipport - std::map chunkserverIpPort_; - - // mdsClient - std::shared_ptr mdsClient_; - - public: - // SnapshotCloneMetaStore用于测试过程中灌数据 - std::shared_ptr metaStore_; -}; -} // namespace curve - -#endif // TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ + class CurveCluster + { + public: + /** + * CurveCluster constructor + * + * @param[in] netWorkSegment The network address of the bridge, which + * defaults to "192.168.200." + * @param[in] nsPrefix The prefix of the network namespace, which defaults + * to "integ_" + */ + CurveCluster(const std::string &netWorkSegment = "192.168.200.", + const std::string &nsPrefix = "integ_") + : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} + + /** + * InitMdsClient initializes mdsclient for interaction with mds + * + * @param op parameter setting + * @return 0. Success; Non 0. Failure + */ + int InitMdsClient(const curve::client::MetaServerOption &op); + + /** + * @brief Initialize metastore + * + * @param[in] etcdEndpoints etcd client's IP port + * + * @return returns an error code + */ + int InitSnapshotCloneMetaStoreEtcd(const std::string &etcdEndpoints); + + /** + * If BuildNet needs to use a different IP to start the chunkserver, + * This function needs to be called first in the SetUp of the test case + * @return 0. Success; Non 0. Failure + */ + int BuildNetWork(); + + /** + * StopCluster stops all processes in the cluster + * @return 0.Success; -1.Failure + */ + int StopCluster(); + + /** + * @brief Generate configuration files for each module + * + * @tparam T any ConfigGenerator + * @param configPath Configuration file path + * @param options Configuration items modified + */ + template + void PrepareConfig(const std::string &configPath, + const std::vector &options) + { + T gentor(configPath); + gentor.SetConfigOptions(options); + gentor.Generate(); + } + + /** + * StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to + * 192.168.200.1:XXXX + * + * @param[in] id mdsId + * @param[in] ipPort specifies the ipPort of the mds + * @param[in] mdsConf mds startup parameter item, example: + * const std::vector mdsConf{ + {"--graceful_quit_on_sigterm"}, + {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, + }; + * @param[in] expectLeader is the expected leader expected + * @return success returns pid; Failure returns -1 + */ + int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, + const std::vector &mdsConf, + bool expectLeader); + + /** + * StopMDS stops the specified id's mds + * @return 0.Success; -1.Failure + */ + int StopMDS(int id); + + /** + * StopAllMDS stops all mds + * @return 0.Success; -1.Failure + */ + int StopAllMDS(); + + /** + * @brief Start a snapshotcloneserver + * + * @param id The ID of snapshotclone server + * @param ipPort IP Port + * @param snapshot clone Conf parameter item + * @return success returns pid; Failure returns -1 + */ + int StartSnapshotCloneServer( + int id, const std::string &ipPort, + const std::vector &snapshotcloneConf); + + /** + * @brief Stop the snapshotcloneserver for the specified Id + * + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return returns 0 for success, -1 for failure + */ + int StopSnapshotCloneServer(int id, bool force = false); + + /** + * @brief: Restart the snapshotcloneserver with the specified Id + * + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return success returns pid; Failure returns -1 + */ + int RestartSnapshotCloneServer(int id, bool force = false); + + /** + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure + */ + int StopAllSnapshotCloneServer(); + + /** + * StartSingleEtcd starts an etcd node + * + * @param clientIpPort + * @param peerIpPort + * @param etcdConf etcd startup parameter, it is recommended to specify the + * name according to the module to prevent concurrent runtime conflicts + * std::vector{"--name basic_test_start_stop_module1"} + * @return success returns pid; Failure returns -1 + */ + int StartSingleEtcd(int id, const std::string &clientIpPort, + const std::string &peerIpPort, + const std::vector &etcdConf); + + /** + * WaitForEtcdClusterAvalible + * Wait for the ETCD cluster leader election to be successful and available + * for a certain period of time + */ + bool WaitForEtcdClusterAvalible(int waitSec = 20); + + /** + * StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure + */ + int StopEtcd(int id); + + /** + * StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure + */ + int StopAllEtcd(); + + /** + * @brief Format FilePool + * + * @param filePooldir FilePool directory + * @param filePoolmetapath FilePool metadata directory + * @param filesystemPath file system directory + * @param size FilePool size (GB) + * @return returns 0 for success, -1 for failure + */ + int FormatFilePool(const std::string &filePooldir, + const std::string &filePoolmetapath, + const std::string &filesystemPath, uint32_t size); + + /** + * StartSingleChunkServer starts a chunkserver node + * + * @param[in] id + * @param[in] ipPort + * @param[in] chunkserverConf chunkserver startup item, example: + * const std::vector chunkserverConf1{ + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic1/"}, + {"-chunkServerMetaUri=local://./basic1/chunkserver.dat"}, + {"-copySetUri=local://./basic1/copysets"}, + {"-recycleUri=local://./basic1/recycler"}, + {"-chunkFilePoolDir=./basic1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta"}, + {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, + {"-raft_sync_segments=true"}, + }; + It is recommended to also use the abbreviation of the module for the + file name. The file name should not be too long, otherwise registering to + the database will fail + * @return success returns pid; Failure returns -1 + */ + int StartSingleChunkServer(int id, const std::string &ipPort, + const std::vector &chunkserverConf); + + /** + * StartSingleChunkServer Starts a chunkserver with the specified id in the + * network namespace No need to specify ipPort + * + * @param id + * @param chunkserverConf, same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 + */ + int StartSingleChunkServerInBackground( + int id, const std::vector &chunkserverConf); + + /** + * StopChunkServer stops the chunkserver process with the specified id + * @return 0.Success; -1.Failure + */ + int StopChunkServer(int id); + + /** + * StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure + */ + int StopAllChunkServer(); + + /** + * PreparePhysicalPool Create Physical Pool + * + * @param[in] id Send command to the specified mds with id + * @param[in] clusterMap topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different + * IPs) + * ./test/integration/cluster_common/cluster_common_topo_2.txt + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in + * the clusterMap) + * @return 0.Success; -1.Failure + */ + int PreparePhysicalPool(int mdsId, const std::string &clusterMap); + + /** + * @return 0.Success; -1.Failure + */ + int PrepareLogicalPool(int mdsId, const std::string &clusterMap); + + /** + * MDSIpPort retrieves the mds address of the specified id + */ + std::string MDSIpPort(int id); + + /** + * EtcdClientIpPort retrieves the etcd client address for the specified id + */ + std::string EtcdClientIpPort(int id); + + /** + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id + */ + std::string EtcdPeersIpPort(int id); + + /** + * ChunkServerIpPort retrieves the chunkserver address for the specified id + */ + std::string ChunkServerIpPort(int id); + + /** + * HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure + */ + int HangMDS(int id); + + /** + * RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure + */ + int RecoverHangMDS(int id); + + /** + * HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure + */ + int HangEtcd(int id); + + /** + * RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure + */ + int RecoverHangEtcd(int id); + + /** + * HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure + */ + int HangChunkServer(int id); + + /** + * RecoverHangChunkServer Restores the chunkserver process where hang + * resides + * @return 0.Success; -1.Failure + */ + int RecoverHangChunkServer(int id); + + /** + * CurrentServiceMDS obtains the mds that are currently providing services + * + * @param[out] curId is currently serving the mds number + * + * @return true indicates that there are serving mds, while false indicates + * that there are no serving mds + */ + bool CurrentServiceMDS(int *curId); + + /** + * CreateFile creates a file in Curve. + * + * @param[in] user User + * @param[in] pwd Password + * @param[in] fileName File name + * @param[in] fileSize File size + * @param[in] normalFile Whether it is a normal file + * @return 0. Success; -1. Failure + */ + int CreateFile(const std::string &user, const std::string &pwd, + const std::string &fileName, uint64_t fileSize = 0, + bool normalFile = true, const std::string &poolset = ""); + + private: + /** + * ProbePort checks if the specified ipPort is in a listening state. + * + * @param[in] ipPort The specified ipPort value. + * @param[in] timeoutMs The timeout for probing in milliseconds. + * @param[in] expectOpen Whether it is expected to be in a listening state. + * + * @return 0 indicates that the probing meets the expected condition within + * the specified time. -1 indicates that the probing does not meet the + * expected condition within the specified time. + */ + int ProbePort(const std::string &ipPort, int64_t timeoutMs, + bool expectOpen); + + /** + * ChunkServerIpPortInBackground + * Used to generate chunkserver ipPort when chunkservers with different + * IPs are required + */ + std::string ChunkServerIpPortInBackground(int id); + + /** + * HangProcess hang + * + * @param pid process id + * @return 0.Success; -1.Failure + */ + int HangProcess(pid_t pid); + + /** + * RecoverHangProcess + * + * @param pid process id + * @return 0.Success; -1.Failure + */ + int RecoverHangProcess(pid_t pid); + + private: + // Network number + std::string networkSegment_; + + // Network namespace prefix + std::string nsPrefix_; + + // The process number corresponding to the ID of the mds + std::map mdsPidMap_; + + // The ipport corresponding to the ID of the mds + std::map mdsIpPort_; + + // The pid corresponding to the snapshotcloneserver id + std::map snapPidMap_; + + // The ipPort corresponding to the snapshotcloneserver ID + std::map snapIpPort_; + + // Conf corresponding to snapshotcloneserver id + std::map> snapConf_; + + // The process number corresponding to the id of ETCD + std::map etcdPidMap_; + + // The client ipport corresponding to the id of ETCD + std::map etcdClientIpPort_; + + // Peer ipport corresponding to the id of ETCD + std::map etcdPeersIpPort_; + + // The process number corresponding to the id of chunkserver + std::map chunkserverPidMap_; + + // The IP port corresponding to the ID of the chunkserver + std::map chunkserverIpPort_; + + // mdsClient + std::shared_ptr mdsClient_; + + public: + // SnapshotCloneMetaStore for filling data during testing + std::shared_ptr metaStore_; + }; +} // namespace curve + +#endif // TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 8f49b1ebe0..071bc58e1f 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ - /* * Project: curve * Created Date: 19-09-02 @@ -22,113 +21,110 @@ */ #include + +#include //NOLINT #include #include -#include #include //NOLINT -#include //NOLINT +#include + #include "test/integration/cluster_common/cluster.h" namespace curve { const std::vector mdsConf{ - { "--graceful_quit_on_sigterm" }, - { "--confPath=./conf/mds.conf" }, - { "--mdsDbName=cluster_common_curve_mds" }, - { "--sessionInterSec=30" }, + {"--graceful_quit_on_sigterm"}, + {"--confPath=./conf/mds.conf"}, + {"--mdsDbName=cluster_common_curve_mds"}, + {"--sessionInterSec=30"}, }; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic1/" }, - { "-chunkServerMetaUri=local://./basic1/chunkserver.dat" }, - { "-copySetUri=local://./basic1/copysets" }, - { "-raftSnapshotUri=curve://./basic1/copysets" }, - { "-raftLogUri=curve://./basic1/copysets" }, - { "-recycleUri=local://./basic1/recycler" }, - { "-chunkFilePoolDir=./basic1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic1/walfilepool/" }, - { "-walFilePoolMetaPath=./basic1/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic1/"}, + {"-chunkServerMetaUri=local://./basic1/chunkserver.dat"}, + {"-copySetUri=local://./basic1/copysets"}, + {"-raftSnapshotUri=curve://./basic1/copysets"}, + {"-raftLogUri=curve://./basic1/copysets"}, + {"-recycleUri=local://./basic1/recycler"}, + {"-chunkFilePoolDir=./basic1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic1/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic1/walfilepool/"}, + {"-walFilePoolMetaPath=./basic1/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic2/" }, - { "-chunkServerMetaUri=local://./basic2/chunkserver.dat" }, - { "-copySetUri=local://./basic2/copysets" }, - { "-raftSnapshotUri=curve://./basic2/copysets" }, - { "-raftLogUri=curve://./basic2/copysets" }, - { "-recycleUri=local://./basic2/recycler" }, - { "-chunkFilePoolDir=./basic2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic2/walfilepool/" }, - { "-walFilePoolMetaPath=./basic2/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic2/"}, + {"-chunkServerMetaUri=local://./basic2/chunkserver.dat"}, + {"-copySetUri=local://./basic2/copysets"}, + {"-raftSnapshotUri=curve://./basic2/copysets"}, + {"-raftLogUri=curve://./basic2/copysets"}, + {"-recycleUri=local://./basic2/recycler"}, + {"-chunkFilePoolDir=./basic2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic2/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic2/walfilepool/"}, + {"-walFilePoolMetaPath=./basic2/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./basic3/" }, - { "-chunkServerMetaUri=local://./basic3/chunkserver.dat" }, - { "-copySetUri=local://./basic3/copysets" }, - { "-raftSnapshotUri=curve://./basic3/copysets" }, - { "-raftLogUri=curve://./basic3/copysets" }, - { "-recycleUri=local://./basic3/recycler" }, - { "-chunkFilePoolDir=./basic3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta" }, - { "-conf=./conf/chunkserver.conf.example" }, - { "-raft_sync_segments=true" }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=./basic3/walfilepool/" }, - { "-walFilePoolMetaPath=./basic3/walfilepool.meta" } -}; + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./basic3/"}, + {"-chunkServerMetaUri=local://./basic3/chunkserver.dat"}, + {"-copySetUri=local://./basic3/copysets"}, + {"-raftSnapshotUri=curve://./basic3/copysets"}, + {"-raftLogUri=curve://./basic3/copysets"}, + {"-recycleUri=local://./basic3/recycler"}, + {"-chunkFilePoolDir=./basic3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./basic3/chunkfilepool.meta"}, + {"-conf=./conf/chunkserver.conf.example"}, + {"-raft_sync_segments=true"}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=./basic3/walfilepool/"}, + {"-walFilePoolMetaPath=./basic3/walfilepool.meta"}}; class ClusterBasicTest : public ::testing::Test { protected: void SetUp() { curveCluster_ = std::make_shared(); - // TODO(lixiaocui): 需要用sudo去运行,后续打开 + // TODO(lixiaocui): It needs to be run with sudo and opened later // curveCluster_->BuildNetWork(); } - void TearDown() { - ASSERT_EQ(0, curveCluster_->StopCluster()); - } + void TearDown() { ASSERT_EQ(0, curveCluster_->StopCluster()); } protected: std::shared_ptr curveCluster_; }; -// TODO(lixiaocui): 需要sudo运行,ci变更后打开 +// TODO(lixiaocui): Requires sudo to run and open after ci changes TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { - // 起etcd + // Starting etcd pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=basic_test_start_stop_module1" }); + std::vector{"--name=basic_test_start_stop_module1"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); - // 起mds + // Starting mds pid = curveCluster_->StartSingleMDS(1, "192.168.200.1:3333", 3334, mdsConf, true); LOG(INFO) << "mds 1 started on 192.168.200.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 创建chunkserver + // Create chunkserver pid = curveCluster_->StartSingleChunkServerInBackground(1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started in background, pid = " << pid; @@ -142,17 +138,19 @@ TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { LOG(INFO) << "chunkserver 3 started in background, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system("rm -r test_start_stop_module1.etcd"); @@ -165,16 +163,16 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_EQ(0, system("rm -fr basic*")); ASSERT_EQ(0, system((std::string("mkdir -p ") + commonDir).c_str())); - // 起etcd + // Starting etcd std::string etcdDir = commonDir + "/etcd.log"; pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", - std::vector{ "--name=test_start_stop_module2" }); + std::vector{"--name=test_start_stop_module2"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起mds + // Starting mds auto mdsConfbak = mdsConf; auto mdsDir = commonDir + "/mds"; ASSERT_EQ(0, system((std::string("mkdir ") + mdsDir).c_str())); @@ -184,19 +182,19 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { true); LOG(INFO) << "mds 1 started on 127.0.0.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 初始化mdsclient + // Initialize mdsclient curve::client::MetaServerOption op; op.rpcRetryOpt.rpcTimeoutMs = 4000; - op.rpcRetryOpt.addrs = std::vector{ "127.0.0.1:3333" }; + op.rpcRetryOpt.addrs = std::vector{"127.0.0.1:3333"}; ASSERT_EQ(0, curveCluster_->InitMdsClient(op)); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建chunkserver + // Create chunkserver auto copy1 = chunkserverConf1; std::string chunkserver1Dir = commonDir + "/chunkserver1"; ASSERT_EQ(0, system((std::string("mkdir ") + chunkserver1Dir).c_str())); @@ -224,40 +222,42 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { LOG(INFO) << "chunkserver 3 started on 127.0.0.1:2004, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset - ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( - 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); + // Creating logical pools and copysets + ASSERT_EQ( + 0, + curveCluster_->PrepareLogicalPool( + 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建文件 + // Create File ASSERT_EQ(0, curveCluster_->CreateFile("test", "test", "/basic_test", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // hang mds进程 + // hang mds process ASSERT_EQ(0, curveCluster_->HangMDS(1)); - // 创建文件失败 + // Failed to create file ASSERT_NE(0, curveCluster_->CreateFile("test1", "test1", "/basic_test1", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 恢复mds进程 + // Resume mds process ASSERT_EQ(0, curveCluster_->RecoverHangMDS(1)); - // 创建文件成功 + // Successfully created file ASSERT_EQ(0, curveCluster_->CreateFile("test2", "test2", "/basic_test2", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system((std::string("rm -fr ") + commonDir).c_str()); @@ -271,7 +271,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_EQ(0, system("rm -fr test_multi_etcd_node*.etcd")); ASSERT_EQ(0, system((std::string("mkdir ") + commonDir).c_str())); - // 起三个etcd + // Start three ETCDs std::string etcdDir = commonDir + "/etcd"; ASSERT_EQ(0, system((std::string("mkdir ") + etcdDir).c_str())); std::vector etcdCluster{ @@ -307,7 +307,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起三mds + // Starting three mds std::string mds1Dir = commonDir + "/mds1"; std::string mds2Dir = commonDir + "/mds2"; std::string mds3Dir = commonDir + "/mds3"; @@ -340,16 +340,16 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { LOG(INFO) << "mds 3 started on 127.0.0.1:2312, pid = " << pid; ASSERT_GT(pid, 0); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); ASSERT_EQ(0, curveCluster_->StopMDS(2)); ASSERT_EQ(0, curveCluster_->StopMDS(3)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); ASSERT_EQ(0, curveCluster_->StopEtcd(2)); ASSERT_EQ(0, curveCluster_->StopEtcd(3)); diff --git a/test/integration/cluster_common/mds.basic.conf b/test/integration/cluster_common/mds.basic.conf index 9486982bf5..b0cb16d055 100644 --- a/test/integration/cluster_common/mds.basic.conf +++ b/test/integration/cluster_common/mds.basic.conf @@ -15,196 +15,196 @@ # # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=localhost:2221 -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=1000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 -# leader竞选时会创建session, 单位是秒, 因为go端代码的接口这个值得单位就是s +# During the leader campaign, a session will be created in seconds, as the value unit for the interface of the go side code is seconds mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误。这里设置10分钟超时,超时后mds会继续竞选 +# The timeout for leader election. If set to 0, the election will block indefinitely if unsuccessful. If set to a value greater than 0, an error will be returned if not elected as leader within the electionTimeoutMs duration. +# Here, a timeout of 10 minutes is set, and if it times out, the MDS will continue the election process. mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=4 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=1800 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=1800 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkserver offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): It needs to be related to the time interval of snapshots to some extent. mds.scheduler.chunkserver.cooling.timeSec=1800 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=1000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=3000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mysql Database config # -# 数据库使用的database名称 +# The database name used by the database mds.DbName=cluster_common_curve_mds -# 数据库用户名 +# Database username mds.DbUser=root -# 数据库地址 +# Database address mds.DbUrl=localhost -# 数据库登录密码 +# Database login password mds.DbPassword=qwer mds.DbPoolSize=128 # # mds.session settings # -# mds.session过期时间,单位us +# mds.session expiration time, in us mds.session.leaseTimeUs=5000000 -# 能够容忍的client和mds之间的时钟不同步的时间,单位us +# Tolerable time of clock asynchrony between client and mds, in units of us mds.session.toleranceTimeUs=500000 -# mds.session后台扫描线程扫描间隔时间,单位us +# mds.session Background Scan Thread Scan Interval Time, Unit: us mds.session.intevalTimeUs=500000 # # auth settings # -# root用户密码 +# root User Password mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=1 -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit=90 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of Deviation from the Mean ScatterWidth of All Chunk Servers. Setting a high percentage for scatterWidth deviation can lead to some machines having +# excessively small scatterWidth, which impacts machine recovery times and reduces the overall reliability of the cluster. Additionally, it can result in certain machines +# having excessively large scatterWidth values, causing copysets on these chunk servers to be scattered across various machines. When other machines write data, these servers +# with larger scatterWidth can become performance bottlenecks. +# Conversely, setting a low percentage for scatterWidth deviation requires a higher degree of scatterWidth uniformity, demanding more from the copyset algorithm. This +# can lead to the algorithm being unable to produce optimal results. It is recommended to set the value at 20 for a balance between these factors. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./runlog/ diff --git a/test/integration/common/chunkservice_op.cpp b/test/integration/common/chunkservice_op.cpp index d359d5e294..13e9f05954 100644 --- a/test/integration/common/chunkservice_op.cpp +++ b/test/integration/common/chunkservice_op.cpp @@ -31,9 +31,9 @@ namespace chunkserver { static constexpr uint32_t kOpRequestAlignSize = 4096; const PageSizeType kPageSize = kOpRequestAlignSize; -int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, + size_t len, const char* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -69,9 +69,9 @@ int ChunkServiceOp::WriteChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data, + size_t len, std::string* data, const std::string& cloneFileSource, off_t cloneFileOffset) { PeerId leaderId(opConf->leaderPeer->address()); @@ -105,7 +105,7 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); LOG_IF(ERROR, status) << "read failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -114,10 +114,10 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - std::string *data) { + std::string* data) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -145,7 +145,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, LOG_IF(ERROR, status) << "readchunksnapshot failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -154,7 +154,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -184,7 +184,7 @@ int ChunkServiceOp::DeleteChunk(struct ChunkServiceOpConf *opConf, } int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( - struct ChunkServiceOpConf *opConf, ChunkID chunkId, uint64_t correctedSn) { + struct ChunkServiceOpConf* opConf, ChunkID chunkId, uint64_t correctedSn) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -213,9 +213,9 @@ int ChunkServiceOp::DeleteChunkSnapshotOrCorrectSn( return status; } -int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { PeerId leaderId(opConf->leaderPeer->address()); @@ -249,7 +249,7 @@ int ChunkServiceOp::CreateCloneChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, +int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; @@ -280,10 +280,10 @@ int ChunkServiceOp::RecoverChunk(struct ChunkServiceOpConf *opConf, return status; } -int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, SequenceNum *curSn, - SequenceNum *snapSn, - std::string *redirectedLeader) { +int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, SequenceNum* curSn, + SequenceNum* snapSn, + std::string* redirectedLeader) { PeerId leaderId(opConf->leaderPeer->address()); brpc::Channel channel; channel.Init(leaderId.addr, NULL); @@ -305,18 +305,18 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); if (status == CHUNK_OP_STATUS_SUCCESS) { switch (response.chunksn().size()) { - case 2: - *snapSn = response.chunksn(1); - FALLTHROUGH_INTENDED; - case 1: - *curSn = response.chunksn(0); - break; - case 0: - return CHUNK_OP_STATUS_CHUNK_NOTEXIST; - default: - LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " - << response.chunksn().size(); - return -1; + case 2: + *snapSn = response.chunksn(1); + FALLTHROUGH_INTENDED; + case 1: + *curSn = response.chunksn(0); + break; + case 0: + return CHUNK_OP_STATUS_CHUNK_NOTEXIST; + default: + LOG(ERROR) << "GetChunkInfo failed, invalid chunkSn size: " + << response.chunksn().size(); + return -1; } } @@ -331,7 +331,7 @@ int ChunkServiceOp::GetChunkInfo(struct ChunkServiceOpConf *opConf, int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, string *chunkData, + const char* data, string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { int ret = @@ -342,7 +342,8 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, << ", offset=" << offset << ", len=" << len << ", cloneFileSource=" << cloneFileSource << ", cloneFileOffset=" << cloneFileOffset << ", ret=" << ret; - // chunk写成功,同步更新chunkData内容和existChunks_ + // Chunk successfully written, synchronously updating chunkData content and + // existChunks_ if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) chunkData->replace(offset, len, data); existChunks_.insert(chunkId); @@ -352,7 +353,7 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData, + string* chunkData, const std::string& cloneFileSource, off_t cloneFileOffset) { std::string data(len, 0); @@ -369,8 +370,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, if (ret != CHUNK_OP_STATUS_SUCCESS && ret != CHUNK_OP_STATUS_CHUNK_NOTEXIST) { return -1; - } else if (ret == CHUNK_OP_STATUS_SUCCESS && - !chunk_existed && + } else if (ret == CHUNK_OP_STATUS_SUCCESS && !chunk_existed && cloneFileSource.empty()) { LOG(ERROR) << "Unexpected read success, chunk " << chunkId << " should not existed"; @@ -381,20 +381,19 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences uint32_t i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个page的第一个字节 + // Print the first byte of each page uint32_t j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -409,7 +408,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - string *chunkData) { + string* chunkData) { std::string data(len, 0); bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); @@ -431,20 +430,19 @@ int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences int i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 - if (i == len) - return 0; + // Read data that matches expectations, return 0 + if (i == len) return 0; LOG(ERROR) << "read data missmatch for chunk " << chunkId << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个4KB的第一个字节 + // Print the first byte of each 4KB int j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -461,8 +459,7 @@ int ChunkServiceVerify::VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn) { int ret = ChunkServiceOp::DeleteChunk(opConf_, chunkId, sn); LOG(INFO) << "Delete Chunk " << chunkId << ", sn " << sn << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.erase(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.erase(chunkId); return ret; } @@ -477,7 +474,7 @@ int ChunkServiceVerify::VerifyDeleteChunkSnapshotOrCorrectSn( } int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, - const std::string &location, + const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize) { @@ -487,8 +484,7 @@ int ChunkServiceVerify::VerifyCreateCloneChunk(ChunkID chunkId, << location << ", correctedSn=" << correctedSn << ", sn=" << sn << ", chunkSize=" << chunkSize << ", ret=" << ret; - if (ret == CHUNK_OP_STATUS_SUCCESS) - existChunks_.insert(chunkId); + if (ret == CHUNK_OP_STATUS_SUCCESS) existChunks_.insert(chunkId); return ret; } @@ -517,31 +513,33 @@ int ChunkServiceVerify::VerifyGetChunkInfo(ChunkID chunkId, bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); switch (ret) { - case CHUNK_OP_STATUS_SUCCESS: - // 如果curSn或snapSn与预期不符,则返回-1 - LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) - << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn - << ", expected " << expCurSn << "; snapSn=" << snapSn - << ", expected " << expSnapSn; - return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; - - case CHUNK_OP_STATUS_CHUNK_NOTEXIST: - // 如果chunk预期存在,则返回-1 - LOG_IF(ERROR, chunk_existed) - << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId - << " must be existed"; - return chunk_existed ? -1 : 0; - - case CHUNK_OP_STATUS_REDIRECTED: - // 如果返回的redirectedLeader与给定的不符,则返回-1 - LOG_IF(ERROR, expLeader != redirectedLeader) - << "GetChunkInfo failed, redirected to " << redirectedLeader - << ", expected " << expLeader; - return (expLeader != redirectedLeader) ? -1 : 0; - - default: - LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, ret=" << ret; - return -1; + case CHUNK_OP_STATUS_SUCCESS: + // If curSn or snapSn does not match expectations, return -1 + LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) + << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn + << ", expected " << expCurSn << "; snapSn=" << snapSn + << ", expected " << expSnapSn; + return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; + + case CHUNK_OP_STATUS_CHUNK_NOTEXIST: + // If chunk is expected to exist, return -1 + LOG_IF(ERROR, chunk_existed) + << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId + << " must be existed"; + return chunk_existed ? -1 : 0; + + case CHUNK_OP_STATUS_REDIRECTED: + // If the redirectedLeader returned does not match the given, then + // -1 is returned + LOG_IF(ERROR, expLeader != redirectedLeader) + << "GetChunkInfo failed, redirected to " << redirectedLeader + << ", expected " << expLeader; + return (expLeader != redirectedLeader) ? -1 : 0; + + default: + LOG(ERROR) << "GetChunkInfo for " << chunkId + << "failed, ret=" << ret; + return -1; } LOG(ERROR) << "GetChunkInfo for " << chunkId << "failed, Illgal branch"; diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..b6338ba888 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -24,9 +24,11 @@ #define TEST_INTEGRATION_COMMON_CHUNKSERVICE_OP_H_ #include -#include -#include + #include +#include +#include + #include "include/chunkserver/chunkserver_common.h" #include "proto/common.pb.h" @@ -40,7 +42,7 @@ using std::string; #define NULL_SN -1 struct ChunkServiceOpConf { - Peer *leaderPeer; + Peer* leaderPeer; LogicPoolID logicPoolId; CopysetID copysetId; uint32_t rpcTimeout; @@ -49,221 +51,246 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data to be written + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, + const char* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum sn, off_t offset, size_t len, - string *data, + static int ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum sn, off_t offset, size_t len, string* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, + static int ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data); + size_t len, std::string* data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn chunk version + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical + * legacy through chunkService If no snapshot is generated during the dump + * process, modify the correctedSn of the chunk + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, + static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location The location of the source chunk on the source side, + * possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, const std::string &location, + static int CreateCloneChunk(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn returns the current chunk version + * @param snapSn returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum *curSn, SequenceNum *snapSn, - string *redirectedLeader); + static int GetChunkInfo(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum* curSn, SequenceNum* snapSn, + string* redirectedLeader); }; class ChunkServiceVerify { public: - explicit ChunkServiceVerify(struct ChunkServiceOpConf *opConf) + explicit ChunkServiceVerify(struct ChunkServiceOpConf* opConf) : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief executes the write chunk and writes the data to the corresponding + * area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data to be written + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, string *chunkData, + size_t len, const char* data, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief executes the read chunk and verifies whether the read content + * matches the expected data in the corresponding region of the chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData, + size_t len, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + *And verify whether the read content matches the expected data in the + *corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @return The read request result meets the expected return of 0, + *otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData); + size_t len, string* chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ - int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, + int VerifyCreateCloneChunk(ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief to obtain chunk metadata and verify if the results meet + * expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn Expected chunk version, -1 indicates non-existent + * @param expSanpSn Expected snapshot version, -1 indicates non-existent + * @param expLeader Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: - struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + struct ChunkServiceOpConf* opConf_; + // Record the chunkId (expected existence) that has been written, used to + // determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/common/config_generator.h b/test/integration/common/config_generator.h index e838aed61f..84e32f47d1 100644 --- a/test/integration/common/config_generator.h +++ b/test/integration/common/config_generator.h @@ -40,7 +40,7 @@ class CSTConfigGenerator : public ConfigGenerator { CSTConfigGenerator() {} ~CSTConfigGenerator() {} bool Init(const std::string& port) { - // 加载配置文件模板 + // Load Configuration File Template config_.SetConfigPath(DEFAULT_CHUNKSERVER_CONF); if (!config_.LoadConfig()) { return false; diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index f09db13283..ab335a4328 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -22,39 +22,38 @@ #include "test/integration/common/peer_cluster.h" -#include -#include #include +#include +#include +#include #include #include -#include -#include "src/chunkserver/cli2.h" -#include "src/chunkserver/register.h" +#include "proto/cli2.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/chunkserver_helper.h" +#include "src/chunkserver/cli2.h" +#include "src/chunkserver/register.h" #include "src/fs/fs_common.h" -#include "proto/cli2.pb.h" namespace curve { namespace chunkserver { using curve::fs::FileSystemType; - -PeerCluster::PeerCluster(const std::string &clusterName, +PeerCluster::PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs) : - clusterName_(clusterName), - snapshotIntervalS_(1), - electionTimeoutMs_(1000), - paramsIndexs_(paramsIndexs), - params_(params), - isFakeMdsStart_(false) { + const std::vector& peers, + std::vector params, + std::map paramsIndexs) + : clusterName_(clusterName), + snapshotIntervalS_(1), + electionTimeoutMs_(1000), + paramsIndexs_(paramsIndexs), + params_(params), + isFakeMdsStart_(false) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; for (auto it = peers.begin(); it != peers.end(); ++it) { @@ -63,7 +62,7 @@ PeerCluster::PeerCluster(const std::string &clusterName, } } -int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { +int PeerCluster::StartFakeTopoloyService(const std::string& listenAddr) { if (isFakeMdsStart_) { return 0; } @@ -81,9 +80,7 @@ int PeerCluster::StartFakeTopoloyService(const std::string &listenAddr) { return ret; } -int PeerCluster::StartPeer(const Peer &peer, - int id, - const bool empty) { +int PeerCluster::StartPeer(const Peer& peer, int id, const bool empty) { LOG(INFO) << "going start peer: " << peer.address() << " " << id; auto it = peersMap_.find(peer.address()); if (it != peersMap_.end()) { @@ -109,18 +106,17 @@ int PeerCluster::StartPeer(const Peer &peer, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ + /* Starting a ChunkServer in a child process */ StartPeerNode(id, params_[paramsIndexs_[id]]); exit(0); } LOG(INFO) << "start peer success, peer id = " << pid; peerNode->pid = pid; peerNode->state = PeerNodeState::RUNNING; - peersMap_.insert(std::pair>(peerId.to_string(), - std::move(peerNode))); + peersMap_.insert(std::pair>( + peerId.to_string(), std::move(peerNode))); - // 在创建copyset之前,先等chunkserver启动 + // Before creating a copyset, wait for chunkserver to start ::usleep(1500 * 1000); int ret = CreateCopyset(logicPoolID_, copysetID_, peer, peers_); @@ -133,7 +129,7 @@ int PeerCluster::StartPeer(const Peer &peer, return 0; } -int PeerCluster::ShutdownPeer(const Peer &peer) { +int PeerCluster::ShutdownPeer(const Peer& peer) { PeerId peerId(peer.address()); LOG(INFO) << "going to shutdown peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); @@ -141,8 +137,8 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { int waitState; if (0 != kill(it->second->pid, SIGKILL)) { LOG(ERROR) << "Stop peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } waitpid(it->second->pid, &waitState, 0); @@ -156,7 +152,7 @@ int PeerCluster::ShutdownPeer(const Peer &peer) { } } -int PeerCluster::HangPeer(const Peer &peer) { +int PeerCluster::HangPeer(const Peer& peer) { LOG(INFO) << "peer cluster: hang " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -168,8 +164,8 @@ int PeerCluster::HangPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGSTOP)) { LOG(ERROR) << "Hang peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -184,7 +180,7 @@ int PeerCluster::HangPeer(const Peer &peer) { } } -int PeerCluster::SignalPeer(const Peer &peer) { +int PeerCluster::SignalPeer(const Peer& peer) { LOG(INFO) << "peer cluster: signal " << peer.address(); PeerId peerId(peer.address()); auto it = peersMap_.find(peerId.to_string()); @@ -196,8 +192,8 @@ int PeerCluster::SignalPeer(const Peer &peer) { } if (0 != kill(it->second->pid, SIGCONT)) { LOG(ERROR) << "Cont peer: " << peerId.to_string() << "failed," - << "errno: " << errno << ", error str: " - << strerror(errno); + << "errno: " << errno + << ", error str: " << strerror(errno); return -1; } int waitState; @@ -212,18 +208,17 @@ int PeerCluster::SignalPeer(const Peer &peer) { } } -int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader) { +int PeerCluster::ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const std::string& leaderAddr, Peer* leader) { brpc::Channel channel; auto pos = leaderAddr.rfind(":"); std::string addr = leaderAddr.substr(0, pos); if (channel.Init(addr.c_str(), NULL) != 0) { - LOG(ERROR) <<"Fail to init channel to " << leaderAddr.c_str(); + LOG(ERROR) << "Fail to init channel to " << leaderAddr.c_str(); return -1; } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -235,7 +230,7 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, stub.GetLeader(&cntl, &request, &response, NULL); if (cntl.Failed()) { - LOG(ERROR) <<"confirm leader fail"; + LOG(ERROR) << "confirm leader fail"; return -1; } Peer leader2 = response.leader(); @@ -244,21 +239,21 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, PeerId leaderId1; leaderId1.parse(leader->address()); if (leaderId2.is_empty()) { - LOG(ERROR) <<"Confirmed leaderId is null"; + LOG(ERROR) << "Confirmed leaderId is null"; return -1; } if (leaderId2 != leaderId1) { - LOG(INFO) << "twice leaderId is inconsistent, first is " - << leaderId1 << " second is " << leaderId2; + LOG(INFO) << "twice leaderId is inconsistent, first is " << leaderId1 + << " second is " << leaderId2; return -1; } return 0; } -int PeerCluster::WaitLeader(Peer *leaderPeer) { +int PeerCluster::WaitLeader(Peer* leaderPeer) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(3 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -267,15 +262,17 @@ int PeerCluster::WaitLeader(Peer *leaderPeer) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderPeer); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the + * election to provide services, So we need to wait for the noop + * application here. If the wait time is too short, it may be easy + * to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " << leaderPeer->address(); std::string leaderAddr = leaderPeer->address(); - int ret = ConfirmLeader(logicPoolID_, copysetID_, - leaderAddr, leaderPeer); + int ret = + ConfirmLeader(logicPoolID_, copysetID_, leaderAddr, leaderPeer); if (ret == 0) { return ret; } @@ -299,9 +296,7 @@ int PeerCluster::StopAllPeers() { return 0; } -Configuration PeerCluster::CopysetConf() const { - return conf_; -} +Configuration PeerCluster::CopysetConf() const { return conf_; } int PeerCluster::SetsnapshotIntervalS(int snapshotIntervalS) { snapshotIntervalS_ = snapshotIntervalS; @@ -313,10 +308,10 @@ int PeerCluster::SetElectionTimeoutMs(int electionTimeoutMs) { return 0; } -int PeerCluster::StartPeerNode(int id, char *arg[]) { +int PeerCluster::StartPeerNode(int id, char* arg[]) { struct RegisterOptions opt; - opt.chunkserverMetaUri = "local://./" + std::to_string(id) + - "/chunkserver.dat"; + opt.chunkserverMetaUri = + "local://./" + std::to_string(id) + "/chunkserver.dat"; opt.fs = fs_; Register regist(opt); @@ -334,52 +329,43 @@ int PeerCluster::StartPeerNode(int id, char *arg[]) { return 0; } -const std::string PeerCluster::CopysetDirWithProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "local://./%s-%d-%d", + butil::string_printf(©setdir, "local://./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer &peer) { +const std::string PeerCluster::CopysetDirWithoutProtocol(const Peer& peer) { PeerId peerId(peer.address()); std::string copysetdir; - butil::string_printf(©setdir, - "./%s-%d-%d", + butil::string_printf(©setdir, "./%s-%d-%d", butil::ip2str(peerId.addr.ip).c_str(), - peerId.addr.port, - 0); + peerId.addr.port, 0); return copysetdir; } -const std::string PeerCluster::RemoveCopysetDirCmd(const Peer &peer) { +const std::string PeerCluster::RemoveCopysetDirCmd(const Peer& peer) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets", peerId.addr.port); + butil::string_printf(&cmd, "rm -fr %d/copysets", peerId.addr.port); return cmd; } -const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer &peer, +const std::string PeerCluster::RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID) { PeerId peerId(peer.address()); std::string cmd; - butil::string_printf(&cmd, - "rm -fr %d/copysets/%s", - peerId.addr.port, + butil::string_printf(&cmd, "rm -fr %d/copysets/%s", peerId.addr.port, ToGroupIdString(logicPoolID, copysetID).c_str()); return cmd; } -int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers) { +int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers) { LOG(INFO) << "PeerCluster begin create copyset: " << ToGroupIdString(logicPoolID, copysetID); @@ -403,17 +389,17 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, CopysetService_Stub stub(&channel); stub.CreateCopysetNode(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(ERROR) << "failed create copsyet, " - << cntl.ErrorText() << std::endl; + LOG(ERROR) << "failed create copsyet, " << cntl.ErrorText() + << std::endl; ::usleep(1000 * 1000); continue; } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT - LOG(INFO) << "create copyset " << ToGroupIdString(logicPoolID, - copysetID) - << " success."; + if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS || + response.status() == + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT + LOG(INFO) << "create copyset " + << ToGroupIdString(logicPoolID, copysetID) << " success."; return 0; } @@ -423,14 +409,13 @@ int PeerCluster::CreateCopyset(LogicPoolID logicPoolID, return -1; } -int PeerCluster::PeerToId(const Peer &peer) { +int PeerCluster::PeerToId(const Peer& peer) { PeerId peerId(peer.address()); return peerId.addr.port; } -int PeerCluster::GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers) { +int PeerCluster::GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers) { for (auto& peer : peers) { if (leader.address() != peer.address()) { followers->push_back(peer); @@ -442,28 +427,23 @@ int PeerCluster::GetFollwerPeers(const std::vector& peers, ChunkServerID PeerCluster::chunkServerId_ = 0; -std::shared_ptr PeerCluster::fs_ - = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); +std::shared_ptr PeerCluster::fs_ = + LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn) { +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn) { LOG(INFO) << "Write then read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); @@ -486,9 +466,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -507,9 +486,8 @@ void WriteThenReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) << "error msg: " << cntl.ErrorCode() + << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); @@ -521,22 +499,17 @@ void WriteThenReadVerify(Peer leaderPeer, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop) { LOG(INFO) << "Read verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -556,12 +529,10 @@ void ReadVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -569,22 +540,18 @@ void ReadVerify(Peer leaderPeer, } /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read snapshot verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -592,7 +559,7 @@ void ReadSnapshotVerify(Peer leaderPeer, ChunkService_Stub stub(&channel); - // 获取chunk的快照版本 + // Obtain the snapshot version of the chunk uint64_t snapSn; { brpc::Controller cntl; @@ -603,12 +570,10 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_copysetid(copysetId); request.set_chunkid(chunkId); stub.GetChunkInfo(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); ASSERT_EQ(2, response.chunksn_size()); snapSn = std::min(response.chunksn(0), response.chunksn(1)); } @@ -622,16 +587,14 @@ void ReadSnapshotVerify(Peer leaderPeer, request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - request.set_offset(length*i); + request.set_offset(length * i); request.set_size(length); request.set_sn(snapSn); stub.ReadChunkSnapshot(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STREQ(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -639,18 +602,15 @@ void ReadSnapshotVerify(Peer leaderPeer, } /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn) { +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn) { LOG(INFO) << "Delete snapshot verify, csn: " << csn; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -668,31 +628,25 @@ void DeleteSnapshotVerify(Peer leaderPeer, request.set_chunkid(chunkId); request.set_correctedsn(csn); stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read not verify: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -712,12 +666,10 @@ void ReadNotVerify(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_FALSE(cntl.Failed()); - ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - response.status()); + ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); std::string expectRead(length, fillCh); ASSERT_STRNE(expectRead.c_str(), cntl.response_attachment().to_string().c_str()); @@ -725,22 +677,18 @@ void ReadNotVerify(Peer leaderPeer, } /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Read verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -760,32 +708,28 @@ void ReadVerifyNotAvailable(Peer leaderPeer, request.set_size(length); request.set_sn(sn); stub.ReadChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); LOG(INFO) << "read: " << CHUNK_OP_STATUS_Name(response.status()); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop) { +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop) { LOG(INFO) << "Write verify not available: " << fillCh; PeerId leaderId(leaderPeer.address()); brpc::Channel channel; @@ -807,23 +751,22 @@ void WriteVerifyNotAvailable(Peer leaderPeer, request.set_sn(sn); cntl.request_attachment().resize(length, fillCh); stub.WriteChunk(&cntl, &request, &response, nullptr); - LOG_IF(INFO, cntl.Failed()) << "error msg: " - << cntl.ErrorCode() << " : " - << cntl.ErrorText(); + LOG_IF(INFO, cntl.Failed()) + << "error msg: " << cntl.ErrorCode() << " : " << cntl.ErrorText(); ASSERT_TRUE(cntl.Failed() || - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch) { std::vector resps; for (Peer peer : peers) { @@ -838,7 +781,7 @@ void CopysetStatusVerify(const std::vector &peers, cntl.set_timeout_ms(2000); request.set_logicpoolid(logicPoolID); request.set_copysetid(copysetId); - Peer *peerP = new Peer(); + Peer* peerP = new Peer(); request.set_allocated_peer(peerP); peerP->set_address(peerId.to_string()); request.set_queryhash(true); @@ -847,7 +790,8 @@ void CopysetStatusVerify(const std::vector &peers, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are + // leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -869,20 +813,15 @@ void CopysetStatusVerify(const std::vector &peers, } } - - -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt) { Peer leaderPeer; const int kMaxLoop = 10; butil::Status status; for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(cluster->GetLogicPoolId(), - cluster->GetCopysetId(), - cluster->CopysetConf(), - targetLeader, - opt); + status = + TransferLeader(cluster->GetLogicPoolId(), cluster->GetCopysetId(), + cluster->CopysetConf(), targetLeader, opt); if (0 == status.error_code()) { cluster->WaitLeader(&leaderPeer); if (leaderPeer.address() == targetLeader.address()) { @@ -891,8 +830,7 @@ void TransferLeaderAssertSuccess(PeerCluster *cluster, } ::sleep(1); } - ASSERT_STREQ(targetLeader.address().c_str(), - leaderPeer.address().c_str()); + ASSERT_STREQ(targetLeader.address().c_str(), leaderPeer.address().c_str()); } } // namespace chunkserver diff --git a/test/integration/common/peer_cluster.h b/test/integration/common/peer_cluster.h index 4a5fcacb58..24b2c2d63e 100644 --- a/test/integration/common/peer_cluster.h +++ b/test/integration/common/peer_cluster.h @@ -23,29 +23,29 @@ #ifndef TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ #define TEST_INTEGRATION_COMMON_PEER_CLUSTER_H_ +#include +#include #include #include #include -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include +#include -#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" -#include "src/fs/local_filesystem.h" -#include "src/chunkserver/copyset_node.h" #include "proto/common.pb.h" #include "proto/topology.pb.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/file_pool.h" +#include "src/fs/local_filesystem.h" -using ::curve::mds::topology::TopologyService; using ::curve::mds::topology::ChunkServerRegistRequest; using ::curve::mds::topology::ChunkServerRegistResponse; +using ::curve::mds::topology::TopologyService; namespace curve { namespace chunkserver { @@ -53,37 +53,37 @@ namespace chunkserver { using curve::common::Peer; /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; // Peer Peer peer; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; class FakeTopologyService : public TopologyService { void RegistChunkServer(google::protobuf::RpcController* cntl_base, - const ChunkServerRegistRequest* request, - ChunkServerRegistResponse* response, - google::protobuf::Closure* done) { + const ChunkServerRegistRequest* request, + ChunkServerRegistResponse* response, + google::protobuf::Closure* done) { brpc::ClosureGuard done_guard(done); response->set_statuscode(0); response->set_chunkserverid(request->chunkserverid()); @@ -92,16 +92,13 @@ class FakeTopologyService : public TopologyService { }; /** - * 封装模拟cluster测试相关的接口 + * Package simulation cluster testing related interfaces */ class PeerCluster { public: - PeerCluster(const std::string &clusterName, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const std::vector &peers, - std::vector params, - std::map paramsIndexs); + PeerCluster(const std::string& clusterName, const LogicPoolID logicPoolID, + const CopysetID copysetID, const std::vector& peers, + std::vector params, std::map paramsIndexs); virtual ~PeerCluster() { StopAllPeers(); if (isFakeMdsStart_) { @@ -116,139 +113,134 @@ class PeerCluster { * * @return 0 for success, -1 for failed */ - int StartFakeTopoloyService(const std::string &listenAddr); + int StartFakeTopoloyService(const std::string& listenAddr); /** - * 启动一个 Peer + * Start a Peer * @param peer - * @param empty初始化配置是否为空 - * @return 0,成功;-1,失败 + * @param empty Is the initialization configuration empty + * @return 0, successful- 1. Failure */ - int StartPeer(const Peer &peer, - int id, - const bool empty = false); + int StartPeer(const Peer& peer, int id, const bool empty = false); /** - * 关闭一个peer,使用SIGINT + * Close a peer and use SIGINT * @param peer - * @return 0 成功;-1 失败 + * @return 0 successful, - 1 failed */ - int ShutdownPeer(const Peer &peer); - + int ShutdownPeer(const Peer& peer); /** - * hang住一个peer,使用SIGSTOP + * hang lives in a peer and uses SIGSTOP * @param peer - * @return 0成功;-1失败 + * @return 0 successful, - 1 failed */ - int HangPeer(const Peer &peer); + int HangPeer(const Peer& peer); /** - * 恢复hang住的peer,使用SIGCONT - * @param peer - * @return 0:成功,-1 失败 - */ - int SignalPeer(const Peer &peer); + * Restore the peer where Hang lives and use SIGCONT + * @param peer + * @return 0: Success, -1 failed + */ + int SignalPeer(const Peer& peer); /** - * 反复重试直到等到新的leader产生 - * @param leaderPeer出参,返回leader info - * @return 0,成功;-1 失败 + * Try again and again until a new leader is generated + * @param leaderPeer generates parameters and returns leader information + * @return 0 successful, - 1 failed */ - int WaitLeader(Peer *leaderPeer); + int WaitLeader(Peer* leaderPeer); /** - * confirm leader + * confirm leader * @param: LogicPoolID logicalPool id * @param: copysetId copyset id * @param: leaderAddr leader address - * @param: leader leader info - * @return 0,成功;-1 失败 + * @param: leader leader information + * @return 0 successful, - 1 failed */ - int ConfirmLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const std::string& leaderAddr, - Peer *leader); - + int ConfirmLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const std::string& leaderAddr, + Peer* leader); /** - * Stop所有的peer - * @return 0,成功;-1 失败 + * Stop all peers + * @return 0 successful, - 1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /* Returns the current configuration of the cluster */ Configuration CopysetConf() const; - LogicPoolID GetLogicPoolId() const {return logicPoolID_;} + LogicPoolID GetLogicPoolId() const { return logicPoolID_; } - CopysetID GetCopysetId() const {return copysetID_;} + CopysetID GetCopysetId() const { return copysetID_; } - void SetWorkingCopyset(CopysetID copysetID) {copysetID_ = copysetID;} + void SetWorkingCopyset(CopysetID copysetID) { copysetID_ = copysetID; } - /* 修改 PeerNode 配置相关的接口,单位: s */ + /* Modify the interface related to PeerNode configuration, unit: s */ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); - static int StartPeerNode(int id, char *arg[]); + static int StartPeerNode(int id, char* arg[]); - static int PeerToId(const Peer &peer); + static int PeerToId(const Peer& peer); - static int GetFollwerPeers(const std::vector& peers, - Peer leader, - std::vector *followers); + static int GetFollwerPeers(const std::vector& peers, Peer leader, + std::vector* followers); public: /** - * 返回执行peer的copyset路径with protocol, ex: local://./127.0.0.1:9101:0 + * Returns the copyset path for executing peer with protocol, ex: + * local://./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithProtocol(const Peer &peer); + static const std::string CopysetDirWithProtocol(const Peer& peer); /** - * 返回执行peer的copyset路径without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: + * ./127.0.0.1:9101:0 */ - static const std::string CopysetDirWithoutProtocol(const Peer &peer); + static const std::string CopysetDirWithoutProtocol(const Peer& peer); /** * remove peer's copyset dir's cmd */ - static const std::string RemoveCopysetDirCmd(const Peer &peer); + static const std::string RemoveCopysetDirCmd(const Peer& peer); - static const std::string RemoveCopysetLogDirCmd(const Peer &peer, + static const std::string RemoveCopysetLogDirCmd(const Peer& peer, LogicPoolID logicPoolID, CopysetID copysetID); - static int CreateCopyset(LogicPoolID logicPoolID, - CopysetID copysetID, - Peer peer, - const std::vector& peers); + static int CreateCopyset(LogicPoolID logicPoolID, CopysetID copysetID, + Peer peer, const std::vector& peers); private: - // 集群名字 - std::string clusterName_; - // 集群的peer集合 - std::vector peers_; - // peer集合的映射map + // Cluster Name + std::string clusterName_; + // The peer set of the cluster + std::vector peers_; + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 - int snapshotIntervalS_; - // 选举超时时间 - int electionTimeoutMs_; - // 集群成员配置 - Configuration conf_; - - // 逻辑池id - LogicPoolID logicPoolID_; - // 复制组id - CopysetID copysetID_; + // Snapshot interval + int snapshotIntervalS_; + // Election timeout + int electionTimeoutMs_; + // Cluster member configuration + Configuration conf_; + + // Logical Pool ID + LogicPoolID logicPoolID_; + // Copy Group ID + CopysetID copysetID_; // chunkserver id - static ChunkServerID chunkServerId_; - // 文件系统适配层 + static ChunkServerID chunkServerId_; + // File System Adaptation Layer static std::shared_ptr fs_; - // chunkserver启动传入参数的映射关系(chunkserver id: params_'s index) + // chunkserver starts the mapping relationship of incoming parameters + // (chunkserver id: params_'s index) std::map paramsIndexs_; - // chunkserver启动需要传递的参数列表 - std::vector params_; + // List of parameters to be passed for chunkserver startup + std::vector params_; // fake mds server brpc::Server fakeMdsServer_; @@ -259,148 +251,117 @@ class PeerCluster { }; /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ -void WriteThenReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop, - uint64_t sn = 1); +void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop, uint64_t sn = 1); /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, int length, char fillCh, int loop); /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param csn corrected sn + *Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param csn corrected sn */ -void DeleteSnapshotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - uint64_t csn); +void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, uint64_t csn); /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + *Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadNotVerify(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void ReadVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ -void WriteVerifyNotAvailable(Peer leaderPeer, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - int length, - char fillCh, - int loop); +void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, + CopysetID copysetId, ChunkID chunkId, int length, + char fillCh, int loop); /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ -void CopysetStatusVerify(const std::vector &peers, - LogicPoolID logicPoolID, - CopysetID copysetId, +void CopysetStatusVerify(const std::vector& peers, + LogicPoolID logicPoolID, CopysetID copysetId, uint64_t expectEpoch = 0); /** - * transfer leader,并且预期能够成功 - * @param cluster: 集群的指针 - * @param targetLeader: 期望tranfer的目标节点 - * @param opt: tranfer 请求使用的 clioption + * transfer leader and expected to succeed + * @param cluster: Pointer to the cluster + * @param targetLeader: The target node for the expected transfer + * @param opt: The clioption used in the transfer request */ -void TransferLeaderAssertSuccess(PeerCluster *cluster, - const Peer &targetLeader, +void TransferLeaderAssertSuccess(PeerCluster* cluster, const Peer& targetLeader, braft::cli::CliOptions opt); } // namespace chunkserver diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..ae597506bc 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -21,44 +21,44 @@ */ #include "test/integration/heartbeat/common.h" + #include "test/mds/mock/mock_alloc_statistic.h" namespace curve { namespace mds { -void HeartbeatIntegrationCommon::PrepareAddPoolset( - const Poolset &poolset) { +void HeartbeatIntegrationCommon::PrepareAddPoolset(const Poolset& poolset) { int ret = topology_->AddPoolset(poolset); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } void HeartbeatIntegrationCommon::PrepareAddLogicalPool( - const LogicalPool &lpool) { + const LogicalPool& lpool) { int ret = topology_->AddLogicalPool(lpool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; } void HeartbeatIntegrationCommon::PrepareAddPhysicalPool( - const PhysicalPool &ppool) { + const PhysicalPool& ppool) { int ret = topology_->AddPhysicalPool(ppool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } -void HeartbeatIntegrationCommon::PrepareAddZone(const Zone &zone) { +void HeartbeatIntegrationCommon::PrepareAddZone(const Zone& zone) { int ret = topology_->AddZone(zone); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } -void HeartbeatIntegrationCommon::PrepareAddServer(const Server &server) { +void HeartbeatIntegrationCommon::PrepareAddServer(const Server& server) { int ret = topology_->AddServer(server); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void HeartbeatIntegrationCommon::PrepareAddChunkServer( - const ChunkServer &chunkserver) { + const ChunkServer& chunkserver) { ChunkServer cs(chunkserver); cs.SetOnlineState(OnlineState::ONLINE); int ret = topology_->AddChunkServer(cs); @@ -68,7 +68,7 @@ void HeartbeatIntegrationCommon::PrepareAddChunkServer( void HeartbeatIntegrationCommon::PrepareAddCopySet( CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members) { + const std::set& members) { CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); int ret = topology_->AddCopySet(cs); @@ -78,10 +78,10 @@ void HeartbeatIntegrationCommon::PrepareAddCopySet( void HeartbeatIntegrationCommon::UpdateCopysetTopo( CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, - ChunkServerIdType leader, const std::set &members, + ChunkServerIdType leader, const std::set& members, ChunkServerIdType candidate) { ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE(topology_->GetCopySet(CopySetKey{ logicalPoolId, copysetId }, + ASSERT_TRUE(topology_->GetCopySet(CopySetKey{logicalPoolId, copysetId}, ©setInfo)); copysetInfo.SetEpoch(epoch); copysetInfo.SetLeader(leader); @@ -93,8 +93,8 @@ void HeartbeatIntegrationCommon::UpdateCopysetTopo( } void HeartbeatIntegrationCommon::SendHeartbeat( - const ChunkServerHeartbeatRequest &request, bool expectFailed, - ChunkServerHeartbeatResponse *response) { + const ChunkServerHeartbeatRequest& request, bool expectFailed, + ChunkServerHeartbeatResponse* response) { // init brpc client brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr_.c_str(), NULL)); @@ -109,7 +109,7 @@ void HeartbeatIntegrationCommon::SendHeartbeat( } void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( - ChunkServerIdType id, ChunkServerHeartbeatRequest *req) { + ChunkServerIdType id, ChunkServerHeartbeatRequest* req) { ChunkServer out; EXPECT_TRUE(topology_->GetChunkServer(id, &out)) << "get chunkserver: " << id << " fail"; @@ -139,7 +139,7 @@ void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( } void HeartbeatIntegrationCommon::AddCopySetToRequest( - ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, + ChunkServerHeartbeatRequest* req, const CopySetInfo& csInfo, ConfigChangeType type) { auto info = req->add_copysetinfos(); info->set_logicalpoolid(csInfo.GetLogicalPoolId()); @@ -170,7 +170,7 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( << "get chunkserver: " << csInfo.GetCandidate() << " error"; std::string ipport = out.GetHostIp() + ":" + std::to_string(out.GetPort()) + ":0"; - ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); auto replica = new ::curve::common::Peer(); replica->set_address(ipport.c_str()); confChxInfo->set_allocated_peer(replica); @@ -180,13 +180,13 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( } } -void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator &op) { +void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator& op) { auto opController = coordinator_->GetOpController(); ASSERT_TRUE(opController->AddOperator(op)); } void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( - const CopySetKey &id) { + const CopySetKey& id) { auto opController = coordinator_->GetOpController(); opController->RemoveOperator(id); } @@ -243,11 +243,11 @@ void HeartbeatIntegrationCommon::PrepareBasicCluseter() { PrepareAddChunkServer(cs3); // add copyset - PrepareAddCopySet(1, 1, std::set{ 1, 2, 3 }); + PrepareAddCopySet(1, 1, std::set{1, 2, 3}); } void HeartbeatIntegrationCommon::InitHeartbeatOption( - Configuration *conf, HeartbeatOption *heartbeatOption) { + Configuration* conf, HeartbeatOption* heartbeatOption) { heartbeatOption->heartbeatIntervalMs = conf->GetIntValue("mds.heartbeat.intervalMs"); heartbeatOption->heartbeatMissTimeOutMs = @@ -259,7 +259,7 @@ void HeartbeatIntegrationCommon::InitHeartbeatOption( } void HeartbeatIntegrationCommon::InitSchedulerOption( - Configuration *conf, ScheduleOption *scheduleOption) { + Configuration* conf, ScheduleOption* scheduleOption) { scheduleOption->enableCopysetScheduler = conf->GetBoolValue("mds.enable.copyset.scheduler"); scheduleOption->enableLeaderScheduler = @@ -305,22 +305,20 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto idGen = std::make_shared(); auto tokenGen = std::make_shared(); - auto topologyStorage = - std::make_shared(); + auto topologyStorage = std::make_shared(); topology_ = std::make_shared(idGen, tokenGen, topologyStorage); ASSERT_EQ(kTopoErrCodeSuccess, topology_->Init(topologyOption)); // init topology manager - topologyStat_ = - std::make_shared(topology_); + topologyStat_ = std::make_shared(topology_); topologyStat_->Init(); auto copysetManager = std::make_shared(CopysetOption()); auto allocStat = std::make_shared(); auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +339,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/heartbeat/common.h b/test/integration/heartbeat/common.h index b281d5a9ab..7787a22910 100644 --- a/test/integration/heartbeat/common.h +++ b/test/integration/heartbeat/common.h @@ -23,41 +23,41 @@ #ifndef TEST_INTEGRATION_HEARTBEAT_COMMON_H_ #define TEST_INTEGRATION_HEARTBEAT_COMMON_H_ -#include -#include #include #include +#include +#include -#include -#include //NOLINT -#include //NOLINT -#include +#include //NOLINT #include +#include +#include #include +#include //NOLINT +#include #include -#include +#include "proto/common.pb.h" +#include "proto/heartbeat.pb.h" +#include "proto/topology.pb.h" #include "src/common/configuration.h" -#include "src/mds/topology/topology_config.h" +#include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_token_generator.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/topology/topology_storge.h" -#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" +#include "src/mds/copyset/copyset_config.h" +#include "src/mds/copyset/copyset_manager.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" #include "src/mds/heartbeat/heartbeat_manager.h" #include "src/mds/heartbeat/heartbeat_service.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" -#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" #include "src/mds/schedule/operator.h" -#include "src/mds/copyset/copyset_manager.h" -#include "src/mds/copyset/copyset_config.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "proto/topology.pb.h" -#include "proto/heartbeat.pb.h" -#include "proto/common.pb.h" -#include "src/common/timeutility.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" +#include "src/mds/topology/topology_storge.h" +#include "src/mds/topology/topology_token_generator.h" using ::curve::common::Configuration; using std::string; @@ -65,15 +65,17 @@ using std::string; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::ChunkServerState; using ::curve::mds::topology::CopySetIdType; +using ::curve::mds::topology::CopySetKey; using ::curve::mds::topology::DefaultIdGenerator; using ::curve::mds::topology::DefaultTokenGenerator; using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::LogicalPool; using ::curve::mds::topology::LogicalPoolType; -using ::curve::mds::topology::Poolset; using ::curve::mds::topology::PhysicalPool; -using ::curve::mds::topology::PoolsetIdType; using ::curve::mds::topology::PoolIdType; +using ::curve::mds::topology::Poolset; +using ::curve::mds::topology::PoolsetIdType; +using ::curve::mds::topology::Server; using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::TopologyImpl; using ::curve::mds::topology::TopologyOption; @@ -82,8 +84,6 @@ using ::curve::mds::topology::TopologyStatImpl; using ::curve::mds::topology::UNINTIALIZE_ID; using ::curve::mds::topology::Zone; using ::curve::mds::topology::ZoneIdType; -using ::curve::mds::topology::Server; -using ::curve::mds::topology::CopySetKey; using ::curve::mds::heartbeat::ChunkServerHeartbeatRequest; using ::curve::mds::heartbeat::ChunkServerHeartbeatResponse; @@ -120,247 +120,206 @@ class FakeTopologyStorage : public TopologyStorage { public: FakeTopologyStorage() {} - bool - LoadPoolset(std::unordered_map *PoolsetMap, - PoolsetIdType *maxPoolsetId) { + bool LoadPoolset(std::unordered_map* PoolsetMap, + PoolsetIdType* maxPoolsetId) { return true; } - bool - LoadLogicalPool(std::unordered_map *logicalPoolMap, - PoolIdType *maxLogicalPoolId) { + bool LoadLogicalPool( + std::unordered_map* logicalPoolMap, + PoolIdType* maxLogicalPoolId) { return true; } bool LoadPhysicalPool( - std::unordered_map *physicalPoolMap, - PoolIdType *maxPhysicalPoolId) { + std::unordered_map* physicalPoolMap, + PoolIdType* maxPhysicalPoolId) { return true; } - bool LoadZone(std::unordered_map *zoneMap, - ZoneIdType *maxZoneId) { + bool LoadZone(std::unordered_map* zoneMap, + ZoneIdType* maxZoneId) { return true; } - bool LoadServer(std::unordered_map *serverMap, - ServerIdType *maxServerId) { + bool LoadServer(std::unordered_map* serverMap, + ServerIdType* maxServerId) { return true; } bool LoadChunkServer( - std::unordered_map *chunkServerMap, - ChunkServerIdType *maxChunkServerId) { + std::unordered_map* chunkServerMap, + ChunkServerIdType* maxChunkServerId) { return true; } - bool LoadCopySet(std::map *copySetMap, - std::map *copySetIdMaxMap) { + bool LoadCopySet(std::map* copySetMap, + std::map* copySetIdMaxMap) { return true; } - bool StoragePoolset(const Poolset &data) { - return true; - } - bool StorageLogicalPool(const LogicalPool &data) { - return true; - } - bool StoragePhysicalPool(const PhysicalPool &data) { - return true; - } - bool StorageZone(const Zone &data) { - return true; - } - bool StorageServer(const Server &data) { - return true; - } - bool StorageChunkServer(const ChunkServer &data) { - return true; - } - bool StorageCopySet(const CopySetInfo &data) { - return true; - } - - bool DeletePoolset(PoolsetIdType id) { - return true; - } - bool DeleteLogicalPool(PoolIdType id) { - return true; - } - bool DeletePhysicalPool(PoolIdType id) { - return true; - } - bool DeleteZone(ZoneIdType id) { - return true; - } - bool DeleteServer(ServerIdType id) { - return true; - } - bool DeleteChunkServer(ChunkServerIdType id) { - return true; - } - bool DeleteCopySet(CopySetKey key) { - return true; - } - - bool UpdateLogicalPool(const LogicalPool &data) { - return true; - } - bool UpdatePhysicalPool(const PhysicalPool &data) { - return true; - } - bool UpdateZone(const Zone &data) { - return true; - } - bool UpdateServer(const Server &data) { - return true; - } - bool UpdateChunkServer(const ChunkServer &data) { - return true; - } - bool UpdateCopySet(const CopySetInfo &data) { - return true; - } - - bool LoadClusterInfo(std::vector *info) { - return true; - } - bool StorageClusterInfo(const ClusterInformation &info) { - return true; - } + bool StoragePoolset(const Poolset& data) { return true; } + bool StorageLogicalPool(const LogicalPool& data) { return true; } + bool StoragePhysicalPool(const PhysicalPool& data) { return true; } + bool StorageZone(const Zone& data) { return true; } + bool StorageServer(const Server& data) { return true; } + bool StorageChunkServer(const ChunkServer& data) { return true; } + bool StorageCopySet(const CopySetInfo& data) { return true; } + + bool DeletePoolset(PoolsetIdType id) { return true; } + bool DeleteLogicalPool(PoolIdType id) { return true; } + bool DeletePhysicalPool(PoolIdType id) { return true; } + bool DeleteZone(ZoneIdType id) { return true; } + bool DeleteServer(ServerIdType id) { return true; } + bool DeleteChunkServer(ChunkServerIdType id) { return true; } + bool DeleteCopySet(CopySetKey key) { return true; } + + bool UpdateLogicalPool(const LogicalPool& data) { return true; } + bool UpdatePhysicalPool(const PhysicalPool& data) { return true; } + bool UpdateZone(const Zone& data) { return true; } + bool UpdateServer(const Server& data) { return true; } + bool UpdateChunkServer(const ChunkServer& data) { return true; } + bool UpdateCopySet(const CopySetInfo& data) { return true; } + + bool LoadClusterInfo(std::vector* info) { return true; } + bool StorageClusterInfo(const ClusterInformation& info) { return true; } }; } // namespace topology class HeartbeatIntegrationCommon { public: - /* HeartbeatIntegrationCommon 构造函数 + /* HeartbeatIntegrationCommon constructor * - * @param[in] conf 配置信息 + * @param[in] conf configuration information */ - explicit HeartbeatIntegrationCommon(const Configuration &conf) { + explicit HeartbeatIntegrationCommon(const Configuration& conf) { conf_ = conf; } - /* PrepareAddPoolset 在集群中添加物理池集合 + /* PrepareAddPoolset adds a physical pool collection to the cluster * - * @param[in] poolset 物理池集合(池组) + * @param[in] poolset Physical pool set (pool group) */ - void PrepareAddPoolset(const Poolset &poolset); + void PrepareAddPoolset(const Poolset& poolset); - /* PrepareAddLogicalPool 在集群中添加逻辑池 + /* PrepareAddLogicalPool Adding a Logical Pool to a Cluster * - * @param[in] lpool 逻辑池 + * @param[in] lpool logical pool */ - void PrepareAddLogicalPool(const LogicalPool &lpool); + void PrepareAddLogicalPool(const LogicalPool& lpool); - /* PrepareAddPhysicalPool 在集群中添加物理池 + /* PrepareAddPhysicalPool Adding a Physical Pool to a Cluster * - * @param[in] ppool 物理池 + * @param[in] ppool physical pool */ - void PrepareAddPhysicalPool(const PhysicalPool &ppool); + void PrepareAddPhysicalPool(const PhysicalPool& ppool); - /* PrepareAddZone 在集群中添加zone + /* PrepareAddZone adds a zone to the cluster * * @param[in] zone */ - void PrepareAddZone(const Zone &zone); + void PrepareAddZone(const Zone& zone); - /* PrepareAddServer 在集群中添加server + /* PrepareAddServer Adding a server to a Cluster * * @param[in] server */ - void PrepareAddServer(const Server &server); + void PrepareAddServer(const Server& server); - /* PrepareAddChunkServer 在集群中添加chunkserver节点 + /* PrepareAddChunkServer adds chunkserver nodes to the cluster * * @param[in] chunkserver */ - void PrepareAddChunkServer(const ChunkServer &chunkserver); + void PrepareAddChunkServer(const ChunkServer& chunkserver); - /* PrepareAddCopySet 在集群中添加copyset + /* PrepareAddCopySet Adding a copyset to a cluster * - * @param[in] copysetId copyset id - * @param[in] logicalPoolId 逻辑池id - * @param[in] members copyset成员 + * @param[in] copysetId copyset ID + * @param[in] logicalPoolId Logical Pool ID + * @param[in] members copyset members */ void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members); + const std::set& members); - /* UpdateCopysetTopo 更新topology中copyset的状态 + /* UpdateCopysetTopo updates the status of copyset in topology * - * @param[in] copysetId copyset的id - * @param[in] logicalPoolId 逻辑池id - * @param[in] epoch copyset的epoch - * @param[in] leader copyset的leader - * @param[in] members copyset的成员 - * @param[in] candidate copyset的candidate信息 + * @param[in] copysetId The ID of the copyset + * @param[in] logicalPoolId Logical Pool ID + * @param[in] epoch epoch of copyset + * @param[in] leader copyset's leader + * @param[in] members members of copyset + * @param[in] candidate copyset's candidate information */ void UpdateCopysetTopo(CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidate = UNINTIALIZE_ID); - /* SendHeartbeat 发送心跳 + /* SendHeartbeat sends a heartbeat * * @param[in] req - * @param[in] expectedFailed 为true表示希望发送成功,为false表示希望发送失败 + * @param[in] expectedFailed true: to indicate that the transmission is + * expected to succeed, false: indicate that the transmission is expected to + * fail * @param[out] response */ - void SendHeartbeat(const ChunkServerHeartbeatRequest &request, + void SendHeartbeat(const ChunkServerHeartbeatRequest& request, bool expectFailed, - ChunkServerHeartbeatResponse *response); + ChunkServerHeartbeatResponse* response); - /* BuildBasicChunkServerRequest 构建最基本的request + /* BuildBasicChunkServerRequest Build the most basic request * - * @param[in] id chunkserver的id - * @param[out] req 构造好的指定id的request + * @param[in] id chunkserver ID + * @param[out] req Constructed request with specified id */ void BuildBasicChunkServerRequest(ChunkServerIdType id, - ChunkServerHeartbeatRequest *req); + ChunkServerHeartbeatRequest* req); - /* AddCopySetToRequest 向request中添加copyset + /* AddCopySetToRequest adds a copyset to the request * * @param[in] req - * @param[in] csInfo copyset信息 - * @param[in] type copyset当前变更类型 + * @param[in] csInfo copyset information + * @param[in] type copyset Current change type */ - void AddCopySetToRequest(ChunkServerHeartbeatRequest *req, - const CopySetInfo &csInfo, + void AddCopySetToRequest(ChunkServerHeartbeatRequest* req, + const CopySetInfo& csInfo, ConfigChangeType type = ConfigChangeType::NONE); - /* AddOperatorToOpController 向调度模块添加op + /* AddOperatorToOpController adds op to the scheduling module * * @param[in] op */ - void AddOperatorToOpController(const Operator &op); + void AddOperatorToOpController(const Operator& op); - /* RemoveOperatorFromOpController 从调度模块移除指定copyset上的op + /* RemoveOperatorFromOpController removes the op on the specified copyset + * from the scheduling module * - * @param[in] id 需要移除op的copysetId + * @param[in] id needs to remove the copysetId of op */ - void RemoveOperatorFromOpController(const CopySetKey &id); + void RemoveOperatorFromOpController(const CopySetKey& id); /* - * PrepareBasicCluseter 在topology中构建最基本的拓扑结构 - * 一个物理池,一个逻辑池,三个zone,每个zone一个chunkserver, - * 集群中有一个copyset + * PrepareBasicCluseter builds the most basic topology structure in topology + * One physical pool, one logical pool, three zones, and one chunkserver for + * each zone, There is a copyset in the cluster */ void PrepareBasicCluseter(); /** - * InitHeartbeatOption 初始化heartbeatOption + * InitHeartbeatOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的心跳option + * @param[in] conf configuration module + * @param[out] heartbeat option assignment completed heartbeat option */ - void InitHeartbeatOption(Configuration *conf, - HeartbeatOption *heartbeatOption); + void InitHeartbeatOption(Configuration* conf, + HeartbeatOption* heartbeatOption); /** - * InitSchedulerOption 初始化scheduleOption + * InitSchedulerOption initializes scheduleOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的调度option + * @param[in] conf configuration module + * @param[out] heartbeat Scheduling option with completed assignment of + * option */ - void InitSchedulerOption(Configuration *conf, - ScheduleOption *scheduleOption); + void InitSchedulerOption(Configuration* conf, + ScheduleOption* scheduleOption); /** - * BuildBasicCluster 运行heartbeat/topology/scheduler模块 + * BuildBasicCluster runs the heartbeat/topology/scheduler module */ void BuildBasicCluster(); diff --git a/test/integration/heartbeat/heartbeat_basic_test.cpp b/test/integration/heartbeat/heartbeat_basic_test.cpp index c9a2ae416d..4144a9d53b 100644 --- a/test/integration/heartbeat/heartbeat_basic_test.cpp +++ b/test/integration/heartbeat/heartbeat_basic_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -32,19 +32,19 @@ namespace mds { class HeartbeatBasicTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 300); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 500); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", 0); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6879"); - // scheduler相关的内容 + // Schedule related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -65,14 +65,14 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithCandidateOpOnGoing() { - // 构造mds中copyset当前状 + // Construct the current state of copyset in mds ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 10); + std::set{1, 2, 3}, 10); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -80,15 +80,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsNoCnandidateOpOnGoing() { - // 构造mds中copyset当前状态 + // Construct the current state of copyset in mds // copyset-1(epoch=5, peers={1,2,3}, leader=1); ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // 构造scheduler当前的状态 - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + // Construct the current state of the scheduler + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); op.timeLimit = std::chrono::seconds(3); @@ -96,14 +96,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3, 4 }); + std::set{1, 2, 3, 4}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -111,14 +112,15 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, + // step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - hbtest_->UpdateCopysetTopo( - 1, 1, 5, 1, std::set{ 1, 2, 3, 4 }, 4); + hbtest_->UpdateCopysetTopo(1, 1, 5, 1, + std::set{1, 2, 3, 4}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(4)); op.timeLimit = std::chrono::seconds(3); @@ -126,12 +128,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -139,12 +142,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 2); + std::set{1, 2, 3}, 2); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(1, 2)); op.timeLimit = std::chrono::seconds(3); @@ -152,22 +156,23 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrePareMdsWithCandidateNoOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); } void PrepareMdsWithChangeOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has + // operator:startEpoch=5,step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); @@ -175,24 +180,25 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithChangeOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); - // scheduler中copyset-1有operator:startEpoch=5,step=step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // In the scheduler, copyset-1 has + // operator:startEpoch=5,step=step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, - std::set{ 1, 2, 3 }, 4); + std::set{1, 2, 3}, 4); - Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, + Operator op(5, CopySetKey{1, 1}, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(3, 4)); op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); } - bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo &expected) { + bool ValidateCopySet(const ::curve::mds::topology::CopySetInfo& expected) { ::curve::mds::topology::CopySetInfo copysetInfo; if (!hbtest_->topology_->GetCopySet( - CopySetKey{ expected.GetLogicalPoolId(), expected.GetId() }, + CopySetKey{expected.GetLogicalPoolId(), expected.GetId()}, ©setInfo)) { return false; } @@ -226,9 +232,9 @@ class HeartbeatBasicTest : public ::testing::Test { return true; } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -255,14 +261,14 @@ class HeartbeatBasicTest : public ::testing::Test { }; TEST_F(HeartbeatBasicTest, test_request_no_chunkserverID) { - // 空的HeartbeatRequest + // Empty HeartbeatRequest ChunkServerHeartbeatRequest req; ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBFAIL, &rep); } TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { - // mds不存在该chunkserver + // The chunkserver does not exist in the mds ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_chunkserverid(4); @@ -273,8 +279,8 @@ TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { } TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { - // chunkserver上报的id相同,ip和port不匹配 - // ip不匹配 + // The id reported by chunkserver is the same, but the IP and port do not + // match IP mismatch ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_ip("127.0.0.1"); @@ -283,14 +289,14 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // port不匹配 + // Port mismatch req.set_ip("10.198.100.3"); req.set_port(1111); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // token不匹配 + // Token mismatch req.set_ip("10.198.100.3"); req.set_port(9000); req.set_token("youdao"); @@ -300,20 +306,20 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { } TEST_F(HeartbeatBasicTest, test_chunkserver_offline_then_online) { - // chunkserver上报心跳时间间隔大于offline - // sleep 800ms, 该chunkserver onffline状态 + // Chunkserver reports that the heartbeat time interval is greater than + // offline Sleep 800ms, the chunkserver onffline status std::this_thread::sleep_for(std::chrono::milliseconds(800)); ChunkServer out; hbtest_->topology_->GetChunkServer(1, &out); ASSERT_EQ(OnlineState::OFFLINE, out.GetOnlineState()); - // chunkserver上报心跳,chunkserver online + // Chunkserver reports heartbeat, chunkserver online ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(out.GetId(), &req); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 后台健康检查程序把chunksrver更新为onlinne状态 + // The backend health check program updates chunksrver to online status uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool updateSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 2) { @@ -330,8 +336,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -339,8 +344,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); } @@ -349,8 +353,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -361,8 +364,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition2) { hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_TRUE(copysetInfo.HasCandidate()); @@ -373,7 +375,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - CopySetKey key{ 1, 1 }; + CopySetKey key{1, 1}; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -387,11 +389,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_is_initial_state_condition3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -400,8 +401,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -417,11 +417,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 5 }; + std::set res{1, 2, 3, 5}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(4, copysetInfo.GetCandidate()); ASSERT_EQ(0, rep.needupdatecopysets_size()); @@ -431,8 +430,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -441,11 +439,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -454,8 +451,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -464,11 +460,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -477,8 +472,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=0, peers={1,2,3}, leader=1) CopySetInfo csInfo(1, 1); @@ -487,11 +481,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -500,8 +493,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); // copyset-1(epoch=1, peers={1,2,3}, leader=0) CopySetInfo csInfo(1, 1); @@ -510,11 +502,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -523,12 +514,11 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // 上报copyset-1(epoch=2, peers={1,2,3,4}, leader=1) + // Report copyset-1(epoch=2, peers={1,2,3,4}, leader=1) auto copysetMembers = copysetInfo.GetCopySetMembers(); copysetMembers.emplace(4); CopySetInfo csInfo(1, 1); @@ -537,11 +527,10 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -550,8 +539,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); @@ -564,415 +552,398 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(0, copysetInfo.GetEpoch()); ASSERT_EQ(0, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(0, rep.needupdatecopysets_size()); } -// 上报的是leader +// Reported as the leader TEST_F(HeartbeatBasicTest, test_leader_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5 + // response is empty, mds updates epoch to 5 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5,leader为2 + // response is empty, mds updates epoch to 5, and leader to 2 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 3, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 3, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 3, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 1, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的不是复制组成员 +// The reported member is not a replication group member TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 2, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 2, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -980,29 +951,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1010,29 +980,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 1, 1, std::set{ 1, 2 }); + BuildCopySetInfo(&csInfo, 1, 1, std::set{1, 2}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1040,30 +1009,29 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1071,32 +1039,31 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs4); ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9090, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1104,29 +1071,28 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { ASSERT_EQ(3, conf.peers_size()); ASSERT_EQ(2, conf.epoch()); ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1140,24 +1106,23 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); ASSERT_EQ(10, copysetInfo.GetCandidate()); } @@ -1165,246 +1130,238 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(2, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3, 10 }; + std::set res{1, 2, 3, 10}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition6) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition12) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition13) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1413,16 +1370,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition14) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1431,48 +1388,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition15) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition16) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition17) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1481,16 +1438,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition18) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1499,128 +1456,126 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition19) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition20) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition21) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(1, copysetInfo.GetLeader()); - std::set res{ 1, 2, 3 }; + std::set res{1, 2, 3}; ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition24) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition25) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1629,16 +1584,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition26) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1647,48 +1602,48 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition27) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition28) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition29) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1697,16 +1652,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1715,16 +1670,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition31) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1733,16 +1688,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition32) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1756,16 +1711,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition33) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1781,16 +1736,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition34) { ChunkServer cs5(5, "testtoekn", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1804,16 +1759,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1825,16 +1780,16 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(ConfigChangeType::ADD_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", @@ -1844,42 +1799,41 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_test_mdsWithCandidate_OpOnGoing_condition2) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报 + // chunkserver1 reporting // copyset-1(epoch=5, peers={1,2,3}, leader=1, // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 10); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 10); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); @@ -1888,206 +1842,200 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=6, peers={1,2,3,10}, leader=2) + // chunkserver1 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(6, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) + // chunkserver2 reports copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(2, copysetInfo.GetLeader()); ASSERT_EQ(7, copysetInfo.GetEpoch()); ASSERT_EQ(UNINTIALIZE_ID, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3, 10 }; + std::set peers{1, 2, 3, 10}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition6) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition7) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition11) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2096,28 +2044,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition12) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2126,84 +2073,81 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition15) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2212,28 +2156,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition16) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2242,196 +2185,189 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 10 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 10}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition23) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2440,28 +2376,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition24) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2470,86 +2405,83 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition27) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2558,19 +2490,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2580,14 +2512,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2596,19 +2527,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2618,14 +2549,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } @@ -2634,19 +2564,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2656,30 +2586,29 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE( - hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); + ASSERT_TRUE(hbtest_->topology_->GetCopySet(CopySetKey{1, 1}, ©setInfo)); ASSERT_EQ(1, copysetInfo.GetLeader()); ASSERT_EQ(5, copysetInfo.GetEpoch()); ASSERT_EQ(10, copysetInfo.GetCandidate()); - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ASSERT_EQ(peers, copysetInfo.GetCopySetMembers()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2694,48 +2623,47 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { ASSERT_EQ(ConfigChangeType::REMOVE_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_2) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // cofigChangeInfo={peer: 4, type:REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2743,22 +2671,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2766,23 +2694,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(7); csInfo.SetLeader(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2790,23 +2718,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2814,24 +2742,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2839,24 +2767,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2864,25 +2792,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2890,25 +2818,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2916,26 +2844,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2943,43 +2871,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -2987,44 +2915,44 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3032,43 +2960,43 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3076,45 +3004,45 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_15) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3124,17 +3052,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3145,13 +3073,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3161,17 +3089,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3182,14 +3110,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3199,17 +3127,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3220,14 +3148,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -3237,17 +3165,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3258,847 +3186,846 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // configChangeInfo={peer: 4, type: REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }, - 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::REMOVE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_2) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_3) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_4) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_5) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_33) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4108,17 +4035,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4129,14 +4056,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4146,17 +4073,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4167,27 +4094,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4198,16 +4125,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -4217,75 +4144,75 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0 ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3, 4}); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -4299,350 +4226,350 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.2:9000:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_3) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_4) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4651,15 +4578,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4669,13 +4596,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4684,15 +4611,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4702,14 +4629,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4720,15 +4647,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { hbtest_->PrepareAddChunkServer(cs1); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4738,15 +4665,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -4758,15 +4685,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4776,25 +4703,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4804,346 +4731,346 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1, // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_2) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_3) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5152,15 +5079,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5170,14 +5097,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5186,15 +5113,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5204,15 +5131,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } @@ -5224,15 +5151,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5242,26 +5169,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5271,661 +5198,661 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_1) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_2) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: ADD_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_3) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_4) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_5) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_6) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_7) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_8) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_9) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_10) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_11) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_12) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_13) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_14) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_15) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_16) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_17) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_18) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_19) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_20) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_21) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_22) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5934,16 +5861,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5953,11 +5880,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5966,17 +5893,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5986,12 +5913,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6000,17 +5927,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6020,13 +5947,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6035,15 +5962,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6053,23 +5980,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6079,468 +6006,467 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_1) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_2) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_3) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_4) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }, 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_5) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_6) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }, - 2); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}, 2); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_7) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_8) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_9) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_10) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_11) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_12) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_13) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_14) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + std::set{1, 2, 3}); + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6550,29 +6476,29 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6582,118 +6508,118 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6703,24 +6629,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6730,31 +6656,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6764,23 +6690,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6790,31 +6716,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); + std::set{1, 2, 3}); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6824,24 +6750,24 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6851,31 +6777,31 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, - std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + std::set{1, 2, 3}); + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6885,23 +6811,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); hbtest_->BuildBasicChunkServerRequest(4, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6911,11 +6837,11 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6923,15 +6849,15 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6944,14 +6870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -6960,24 +6886,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -6985,20 +6911,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7006,20 +6932,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_4) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7029,20 +6955,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7050,20 +6976,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7071,20 +6997,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7092,20 +7018,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7113,23 +7039,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7137,24 +7063,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7162,24 +7088,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7187,25 +7113,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7213,25 +7139,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7239,26 +7165,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_14) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7268,25 +7194,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7296,26 +7222,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7323,24 +7249,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7348,25 +7274,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_18) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7376,25 +7302,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7404,26 +7330,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7431,24 +7357,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7456,25 +7382,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_22) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7484,25 +7410,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7512,26 +7438,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7539,23 +7465,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7563,24 +7489,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7588,25 +7514,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7614,26 +7540,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7641,24 +7567,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7666,25 +7592,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_30) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7694,25 +7620,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7722,26 +7648,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7749,24 +7675,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7774,25 +7700,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_34) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7802,25 +7728,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7830,26 +7756,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7859,16 +7785,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7878,14 +7804,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7895,16 +7821,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7914,13 +7840,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -7928,13 +7854,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7944,14 +7870,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -7961,16 +7887,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7980,28 +7906,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8011,15 +7937,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8029,16 +7955,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8048,26 +7974,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8077,15 +8003,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8093,15 +8019,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8114,14 +8040,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(3, step->GetOldPeer()); ASSERT_EQ(4, step->GetTargetPeer()); @@ -8130,24 +8056,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }, 4); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}, 4); hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::CHANGE_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8155,20 +8081,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8176,20 +8102,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_4) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 2, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 2, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8199,20 +8125,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8220,20 +8146,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8241,20 +8167,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 2, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 2, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8262,20 +8188,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 7, 4, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 7, 4, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8283,24 +8209,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8308,25 +8234,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8334,25 +8260,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8360,26 +8286,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8387,26 +8313,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8414,27 +8340,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_14) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8444,26 +8370,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8473,27 +8399,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8501,25 +8427,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8527,26 +8453,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_18) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8556,26 +8482,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8585,27 +8511,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8613,25 +8539,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8639,26 +8565,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_22) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8668,26 +8594,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8697,27 +8623,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8725,24 +8651,24 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8750,25 +8676,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8776,26 +8702,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 1, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8803,27 +8729,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 6, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8831,25 +8757,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8857,26 +8783,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_30) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8886,26 +8812,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8915,27 +8841,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8943,25 +8869,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8969,26 +8895,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_34) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -8998,26 +8924,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 1, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9027,27 +8953,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9057,16 +8983,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 5, 0, std::set{ 1, 2, 5 }); + BuildCopySetInfo(&csInfo, 5, 0, std::set{1, 2, 5}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9076,15 +9002,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9094,16 +9020,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9113,15 +9039,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); @@ -9129,13 +9055,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 0, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9145,16 +9071,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9164,16 +9090,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9183,28 +9109,28 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); + BuildCopySetInfo(&csInfo, 4, 1, std::set{1, 2, 6}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9214,15 +9140,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } @@ -9232,16 +9158,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 3}); hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9251,27 +9177,27 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - auto step = dynamic_cast(ops[0].step.get()); + auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); - BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 4 }); + BuildCopySetInfo(&csInfo, 0, 0, std::set{1, 2, 4}); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9281,16 +9207,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); - csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); + csInfo.SetCopySetMembers(std::set{1, 2, 3}); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - step = dynamic_cast(ops[0].step.get()); + step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); } diff --git a/test/integration/heartbeat/heartbeat_exception_test.cpp b/test/integration/heartbeat/heartbeat_exception_test.cpp index 67ac0bcf01..3b04c79390 100644 --- a/test/integration/heartbeat/heartbeat_exception_test.cpp +++ b/test/integration/heartbeat/heartbeat_exception_test.cpp @@ -20,8 +20,8 @@ * Author: lixiaocui */ -#include #include +#include #include "test/integration/heartbeat/common.h" @@ -31,19 +31,19 @@ namespace curve { namespace mds { class HeartbeatExceptionTest : public ::testing::Test { protected: - void InitConfiguration(Configuration *conf) { + void InitConfiguration(Configuration* conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 3000); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 5000); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", sleepTimeMs_); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6880"); - // scheduler相关的内容 + // scheduler related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -64,9 +64,9 @@ class HeartbeatExceptionTest : public ::testing::Test { conf->SetIntValue("mds.scheduler.minScatterWidth", 50); } - void BuildCopySetInfo(CopySetInfo *info, uint64_t epoch, + void BuildCopySetInfo(CopySetInfo* info, uint64_t epoch, ChunkServerIdType leader, - const std::set &members, + const std::set& members, ChunkServerIdType candidateId = UNINTIALIZE_ID) { info->SetEpoch(epoch); info->SetLeader(leader); @@ -95,35 +95,51 @@ class HeartbeatExceptionTest : public ::testing::Test { }; /* - * bug说明:稳定性测试环境,宕机一台机器之后设置pending,副本恢复过程中mds有切换 - * 最终发现有5个pending状态的chunkserver没有完成迁移 - * 分析: - * 1. mds1提供服务时产生operator并下发给copyset-1{A,B,C} + - * D的变更,C是offline状态 - * 2. copyset-1完成配置变更,此时leader上的配置更新为epoch=2/{A,B,C,D}, - * candidate上的配置为epoch=1/{A,B,C}, mds1中记录的配置为epoch=1/{A,B,C} - * 3. mds1挂掉,mds2提供服务, 并从数据库加载copyset,mds2中copyset-1的配置 - * epoch=1/{A,B,C} - * 4. candidate-D上报心跳,copyset-1的配置为epoch=1/{A,B,C}。mds2发现D上报的 - * copyset中epoch和mds2记录的相同,但D并不在mds2记录的复制组中且调度模块也没有 - * 对应的operator,下发命令把D上的copyset-1删除导致D被误删 + * Bug Description: In a stability testing environment, when one machine + crashes, it is set to "pending," and during the replica recovery process, there + is MDS switching. Eventually, it was found that there were 5 "pending" chunk + servers that did not complete migration. + + * Analysis: + * 1. When MDS1 is providing services, it generates an operator and sends it to + * copyset-1 {A, B, C} + D for modification, where C is in an offline state. + * 2. Copyset-1 completes the configuration change. At this point, the + configuration on the leader is updated to epoch=2/{A, B, C, D}, + * the configuration on the candidate is epoch=1/{A, B, C}, and the + configuration recorded in MDS1 is epoch=1/{A, B, C}. + * 3. MDS1 crashes, and MDS2 takes over the service. MDS2 loads copysets from + the database, and the configuration for copyset-1 in MDS2 is epoch=1/{A, B, C}. + * 4. Candidate-D reports a heartbeat, and the configuration for copyset-1 is + epoch=1/{A, B, C}. + * MDS2 finds that the epoch reported by D matches the one recorded in MDS2, + but D is not in the replication group recorded by MDS2, + * and there is no corresponding operator in the scheduling module. As a + result, a command is issued to delete copyset-1 on D, leading to an accidental + deletion of D. * - * 解决方法: - * 正常情况下,heartbeat模块会在mds启动一定时间(目前配置20min)后才可以下发删除copyset - * 的命令,极大概率保证这段时间内copyset-leader上的配置更新到mds, - * 防止刚加入复制组 副本上的数据被误删 + * Solution: + * Under normal circumstances, the heartbeat module should wait for a certain + period (currently configured as 20 minutes) after MDS starts before issuing a + command to delete a copyset. + * This greatly ensures that during this time, the configuration on the + copyset-leader is updated in MDS, + * preventing the accidental deletion of data on replicas that have just joined + the replication group. * - * 这个时间的起始点应该是mds正式对外提供服务的时间,而不是mds的启动时间。如果设置为mds的启动 - * 时间,备mds启动很久后如果能够提供服务,就立马可以删除,导致bug + * The starting point for this time should be when MDS officially starts + providing external services, + * rather than the MDS startup time. If it is set based on the MDS startup time, + then if the standby MDS starts much later but can still provide services, it + could be deleted immediately, leading to the bug. */ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { // 1. copyset-1(epoch=2, peers={1,2,3}, leader=1) - // scheduler中有+4的operator - CopySetKey key{ 1, 1 }; + // In the scheduler, there is an operator that +4 + CopySetKey key{1, 1}; int startEpoch = 2; ChunkServerIdType leader = 1; ChunkServerIdType candidate = 4; - std::set peers{ 1, 2, 3 }; + std::set peers{1, 2, 3}; ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); Operator op(2, key, OperatorPriority::NormalPriority, @@ -131,8 +147,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); - // 2. leader上报copyset-1(epoch=2, peers={1,2,3}, leader=1) - // mds下发配置变更 + // 2. leader reports copyset-1(epoch=2, peers={1,2,3}, leader=1) + // mds issued configuration changes ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(leader, &req); CopySetInfo csInfo(key.first, key.second); @@ -140,7 +156,7 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发+D的配置变更 + // Check the response and issue configuration changes for+D ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(key.first, conf.logicalpoolid()); @@ -150,25 +166,28 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(ConfigChangeType::ADD_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); - // 3. 清除mds中的operrator(模拟mds重启) + // 3 Clear the optimizer in mds (simulate mds restart) hbtest_->RemoveOperatorFromOpController(key); - // 4. canndidate上报落后的与mds的配置(candidate回放日志时会一一apply旧配置): + // 4. The candidate reports the outdated configuration compared to MDS (the + // candidate replays logs one by one to apply the old configuration): // copyset-1(epoch=1, peers={1,2,3}, leader=1) - // 由于mds.heartbeat.clean_follower_afterMs时间还没有到,mds还不能下发 - // 删除命令。mds下发为空,candidate上的数据不会被误删 + // Because mds.heartbeat.clean_follower_afterMs time has not yet elapsed, + // MDS cannot issue deletion commands. MDS issues no commands, so the + // data on the candidate will not be accidentally deleted. rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 5. 睡眠mds.heartbeat.clean_follower_afterMs + 10ms后 - // canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds下发删除配置,candidate上的数据会被误删 + // 5. Sleep mds.heartbeat.clean_follower_afterMs + 10ms, + // candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds issues a deletion configuration, and the data on the candidate + // will be mistakenly deleted usleep((sleepTimeMs_ + 10) * 1000); rep.Clear(); req.Clear(); @@ -183,7 +202,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(peers.size(), conf.peers_size()); ASSERT_EQ(startEpoch, conf.epoch()); - // 6. leader上报最新配置copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // 6. leader reports the latest configuration copyset-1(epoch=3, + // peers={1,2,3,4}, leader=1) auto newPeers = peers; newPeers.emplace(candidate); auto newEpoch = startEpoch + 1; @@ -193,24 +213,25 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { BuildCopySetInfo(&csInfo, startEpoch + 1, leader, newPeers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查mdstopology的数据 + // Check the data of mdstopology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ASSERT_EQ(newEpoch, copysetInfo.GetEpoch()); ASSERT_EQ(leader, copysetInfo.GetLeader()); ASSERT_EQ(newPeers, copysetInfo.GetCopySetMembers()); - // 7. canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds不下发配置 + // 7. candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds does not distribute configuration rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发copyset当前配置指导candidate删除数据 + // Check the response and issue the copyset current configuration guide + // candidate to delete data ASSERT_EQ(0, rep.needupdatecopysets_size()); } diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index 5660617558..ca34604820 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftConfigChangeTestLogDir[] = "./runlog/RaftConfigChange"; const char* kFakeMdsAddr = "127.0.0.1:9080"; @@ -46,96 +46,66 @@ const char* kFakeMdsAddr = "127.0.0.1:9080"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftConfigParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9081", - "-chunkServerStoreUri=local://./9081/", - "-chunkServerMetaUri=local://./9081/chunkserver.dat", - "-copySetUri=local://./9081/copysets", - "-raftSnapshotUri=curve://./9081/copysets", - "-raftLogUri=curve://./9081/copysets", - "-recycleUri=local://./9081/recycler", - "-chunkFilePoolDir=./9081/chunkfilepool/", - "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", - "-walFilePoolDir=./9081/walfilepool/", - "-walFilePoolMetaPath=./9081/walfilepool.meta", - "-conf=./9081/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9082", - "-chunkServerStoreUri=local://./9082/", - "-chunkServerMetaUri=local://./9082/chunkserver.dat", - "-copySetUri=local://./9082/copysets", - "-raftSnapshotUri=curve://./9082/copysets", - "-raftLogUri=curve://./9082/copysets", - "-recycleUri=local://./9082/recycler", - "-chunkFilePoolDir=./9082/chunkfilepool/", - "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", - "-walFilePoolDir=./9082/walfilepool/", - "-walFilePoolMetaPath=./9082/walfilepool.meta", - "-conf=./9082/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9083", - "-chunkServerStoreUri=local://./9083/", - "-chunkServerMetaUri=local://./9083/chunkserver.dat", - "-copySetUri=local://./9083/copysets", - "-raftSnapshotUri=curve://./9083/copysets", - "-raftLogUri=curve://./9083/copysets", - "-recycleUri=local://./9083/recycler", - "-chunkFilePoolDir=./9083/chunkfilepool/", - "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", - "-walFilePoolDir=./9083/walfilepool/", - "-walFilePoolMetaPath=./9083/walfilepool.meta", - "-conf=./9083/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9084", - "-chunkServerStoreUri=local://./9084/", - "-chunkServerMetaUri=local://./9084/chunkserver.dat", - "-copySetUri=local://./9084/copysets", - "-raftSnapshotUri=curve://./9084/copysets", - "-raftLogUri=curve://./9084/copysets", - "-recycleUri=local://./9084/recycler", - "-chunkFilePoolDir=./9084/chunkfilepool/", - "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", - "-walFilePoolDir=./9084/walfilepool/", - "-walFilePoolMetaPath=./9084/walfilepool.meta", - "-conf=./9084/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9085", - "-chunkServerStoreUri=local://./9085/", - "-chunkServerMetaUri=local://./9085/chunkserver.dat", - "-copySetUri=local://./9085/copysets", - "-raftSnapshotUri=curve://./9085/copysets", - "-raftLogUri=curve://./9085/copysets", - "-recycleUri=local://./9085/recycler", - "-chunkFilePoolDir=./9085/chunkfilepool/", - "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", - "-walFilePoolDir=./9085/walfilepool/", - "-walFilePoolMetaPath=./9085/walfilepool.meta", - "-conf=./9085/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9081", + "-chunkServerStoreUri=local://./9081/", + "-chunkServerMetaUri=local://./9081/chunkserver.dat", + "-copySetUri=local://./9081/copysets", + "-raftSnapshotUri=curve://./9081/copysets", + "-raftLogUri=curve://./9081/copysets", + "-recycleUri=local://./9081/recycler", + "-chunkFilePoolDir=./9081/chunkfilepool/", + "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", + "-walFilePoolDir=./9081/walfilepool/", + "-walFilePoolMetaPath=./9081/walfilepool.meta", + "-conf=./9081/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9082", + "-chunkServerStoreUri=local://./9082/", + "-chunkServerMetaUri=local://./9082/chunkserver.dat", + "-copySetUri=local://./9082/copysets", + "-raftSnapshotUri=curve://./9082/copysets", + "-raftLogUri=curve://./9082/copysets", + "-recycleUri=local://./9082/recycler", + "-chunkFilePoolDir=./9082/chunkfilepool/", + "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", + "-walFilePoolDir=./9082/walfilepool/", + "-walFilePoolMetaPath=./9082/walfilepool.meta", + "-conf=./9082/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9083", + "-chunkServerStoreUri=local://./9083/", + "-chunkServerMetaUri=local://./9083/chunkserver.dat", + "-copySetUri=local://./9083/copysets", + "-raftSnapshotUri=curve://./9083/copysets", + "-raftLogUri=curve://./9083/copysets", + "-recycleUri=local://./9083/recycler", + "-chunkFilePoolDir=./9083/chunkfilepool/", + "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", + "-walFilePoolDir=./9083/walfilepool/", + "-walFilePoolMetaPath=./9083/walfilepool.meta", + "-conf=./9083/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9084", + "-chunkServerStoreUri=local://./9084/", + "-chunkServerMetaUri=local://./9084/chunkserver.dat", + "-copySetUri=local://./9084/copysets", + "-raftSnapshotUri=curve://./9084/copysets", + "-raftLogUri=curve://./9084/copysets", + "-recycleUri=local://./9084/recycler", + "-chunkFilePoolDir=./9084/chunkfilepool/", + "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", + "-walFilePoolDir=./9084/walfilepool/", + "-walFilePoolMetaPath=./9084/walfilepool.meta", + "-conf=./9084/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9085", + "-chunkServerStoreUri=local://./9085/", + "-chunkServerMetaUri=local://./9085/chunkserver.dat", + "-copySetUri=local://./9085/copysets", + "-raftSnapshotUri=curve://./9085/copysets", + "-raftLogUri=curve://./9085/copysets", + "-recycleUri=local://./9085/recycler", + "-chunkFilePoolDir=./9085/chunkfilepool/", + "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", + "-walFilePoolDir=./9085/walfilepool/", + "-walFilePoolMetaPath=./9085/walfilepool.meta", + "-conf=./9085/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftConfigChangeTest : public testing::Test { @@ -179,39 +149,34 @@ class RaftConfigChangeTest : public testing::Test { ASSERT_TRUE(cg4.Init("9084")); ASSERT_TRUE(cg5.Init("9085")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftConfigChangeTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftConfigChangeTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -268,22 +233,20 @@ class RaftConfigChangeTest : public testing::Test { int confChangeTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; + std::vector params; int maxWaitInstallSnapshotMs; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 1. 3个节点正常启动 - * 2. 移除一个follower - * 3. 重复移除上一个follower - * 4. 再添加回来 - * 5. 重复添加回来 + * 1. 3 nodes start normally + * 2. Remove a follower + * 3. Repeatedly remove the previous follower + * 4. Add it back again + * 5. Repeatedly add back */ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { LogicPoolID logicPoolId = 2; @@ -293,7 +256,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 member LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -301,12 +264,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -317,15 +276,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除1个follower + // 2. Remove 1 follower LOG(INFO) << "remove 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -335,61 +289,40 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st1.ok()); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 重复移除,验证重复移除的逻辑是否正常 - butil::Status - st2 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); + // 3. Duplicate removal, verify if the logic of duplicate removal is normal + butil::Status st2 = + RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st2.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. add回来 + // 4. Add it back conf.remove_peer(removePeer.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st3.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 重复add回来,验证重复添加的逻辑是否正常 + // 5. Repeat the add and verify if the logic added repeatedly is normal conf.add_peer(removePeer.address()); - butil::Status - st4 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); + butil::Status st4 = + AddPeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st4.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } @@ -402,7 +335,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -410,12 +343,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -426,75 +355,50 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()) << st2.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } @@ -507,7 +411,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -515,12 +419,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -531,81 +431,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower LOG(INFO) << "recover hang follower"; ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); - butil::Status - st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st2 = + AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 3个节点正常启动 - * 2. 移除leader - * 3. 再将old leader添加回来 + * 1. 3 nodes start normally + * 2. Remove leader + * 3. Add the old leader back again */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -615,7 +490,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -623,12 +498,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -638,22 +509,17 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 移除leader + // 2. Remove leader LOG(INFO) << "remove leader"; braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); ASSERT_TRUE(st1.ok()); Peer oldLeader = leaderPeer; @@ -661,50 +527,35 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. add回来 + // 3. Add it back conf.remove_peer(oldLeader.address()); - butil::Status - st3 = AddPeer(logicPoolId, copysetId, conf, oldLeader, options); + butil::Status st3 = + AddPeer(logicPoolId, copysetId, conf, oldLeader, options); ASSERT_TRUE(st3.ok()); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(oldLeader.address().c_str(), leaderPeer.address().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } /** - * 1. 3个节点正常启动 - * 2. 挂一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull it up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -714,7 +565,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -722,12 +573,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -738,79 +585,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以只用验证2个副本数据一致性 + // The leader has been removed, so only the consistency of the data for two + // replicas is verified ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -822,10 +647,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { } /** - * 1. 3个节点正常启动 - * 2. hang一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -835,7 +660,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -843,12 +668,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -859,78 +680,56 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; Configuration conf = cluster.CopysetConf(); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone + * down, the leader will automatically perform a term check and discover + * that the majority of followers are no longer connected. At this point, + * the leader will proactively step down, causing all requests to return as + * failures prematurely. Therefore, the assertions below may fail, but the + * removal itself will be successful. */ -// ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + // ASSERT_TRUE(st1.ok()); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // leader已经移除,所以验证2个副本数据一致性 + // The leader has been removed, so verify the data consistency of the two + // replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -942,9 +741,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. 挂掉B,transfer leader给B - * 3. 拉起B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang up B, transfer leader to B + * 3. Pull up B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -954,7 +753,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -962,12 +761,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -978,28 +773,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to shutdown peer braft::cli::CliOptions options; @@ -1009,19 +794,14 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status st1 = TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st1.ok()); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch -1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(shutdownPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 拉起follower,然后再把leader transfer过去 + // 4. Pull up the follower and then transfer the leader over LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -1032,11 +812,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -1050,32 +827,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_STREQ(shutdownPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. hang B,transfer leader给B - * 3. 恢复 B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang B, transfer leader to B + * 3. Restore B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -1085,7 +852,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1093,12 +860,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1109,28 +872,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 3. transfer leader to hang peer braft::cli::CliOptions options; @@ -1145,7 +898,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(hangPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 恢复follower,然后再把leader transfer过去 + // 4. Restore the follower and then transfer the leader LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); @@ -1155,54 +908,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { butil::Status status; LOG(INFO) << "start transfer leader to " << hangPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - hangPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, hangPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == hangPeer.address()) { break; } } - LOG(INFO) << i + 1 << " th transfer leader to " - << hangPeer.address() << " failed"; + LOG(INFO) << i + 1 << " th transfer leader to " << hangPeer.address() + << " failed"; ::sleep(1); } ASSERT_STREQ(hangPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** * - * 1. {A、B、C} 3个节点正常启 - * 2. 挂掉一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1210,7 +952,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1218,12 +960,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1234,54 +972,34 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 通过打两次快照确保后面的恢复必须走安装快照 + // Wait snapshot, ensuring that subsequent restores must follow the + // installation snapshot by taking two snapshots LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); Configuration conf = cluster.CopysetConf(); @@ -1291,25 +1009,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()) << st.error_str(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1324,12 +1032,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta /** * - * 1. {A、B、C} 3个节点正常启 - * 2. hang一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1337,7 +1046,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1345,12 +1054,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1361,54 +1066,33 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起peer4 - ASSERT_EQ(0, cluster.StartPeer(peer4, - PeerCluster::PeerToId(peer4))); + // 3. Pull up peer4 + ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ::sleep(1); @@ -1419,25 +1103,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - butil::Status - st1 = RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); + butil::Status st1 = + RemovePeer(logicPoolId, copysetId, conf, hangPeer, options); ::usleep(waitMultiReplicasBecomeConsistent * 1000); peers.push_back(peer4); @@ -1451,11 +1125,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log和数据 - * 3. 重启follower,follower能够通过数据恢复最终追上leader + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs and data + * 3. Restart the follower, and the follower can eventually catch up with the + * leader through data recovery */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1463,7 +1139,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1471,12 +1147,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1487,92 +1159,63 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 删除此peer的数据,然后重启 + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(PeerCluster::RemoveCopysetDirCmd(shutdownPeer).c_str())); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::CopysetDirWithoutProtocol( - shutdownPeer))); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE( + fs->DirExists(PeerCluster::CopysetDirWithoutProtocol(shutdownPeer))); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log - * 3. 重启follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs + * 3. Restart follower */ -TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1580,7 +1223,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1588,12 +1231,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1604,96 +1243,69 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 删除此peer的log,然后重启 - ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId).c_str()); - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); - ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, - logicPoolId, - copysetId))); - - // wait snapshot, 保证能够触发安装快照 + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // Delete the log of this peer and restart it + ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, logicPoolId, + copysetId) + .c_str()); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + ASSERT_FALSE(fs->DirExists(PeerCluster::RemoveCopysetLogDirCmd( + shutdownPeer, logicPoolId, copysetId))); + + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中挂掉leader - * 本次install snapshot失败,但是new leader会被选出来,new leader继续给 - * follower恢复数据,最终follower数据追上leader并一致 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process + * The install snapshot failed this time, but the new leader will be selected + * and will continue to provide The follower recovers data, and ultimately the + * follower data catches up with the leader and is consistent */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1701,7 +1313,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1709,12 +1321,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1725,91 +1333,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1822,11 +1396,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader重启 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and restart the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1834,7 +1410,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1842,12 +1418,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1858,93 +1430,59 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -1957,11 +1495,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中hang leader + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and hang the leader during the recovery process */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -1969,7 +1509,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1977,12 +1517,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1993,91 +1529,57 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader hang + // 4. After a period of random sleep, hang up the leader and simulate the + // leader hang during installation snapshot int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2090,11 +1592,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader hang一会 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and during the recovery process, the leader will + * hang for a while */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2102,7 +1608,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2110,12 +1616,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2126,58 +1628,39 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the + // installation snapshot when the leader hangs up int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); @@ -2191,22 +1674,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -2219,12 +1692,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower挂了 - * 4. 一段时间后拉起来 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), but the follower hung during the recovery process + * 4. After a period of time, pull it up */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2232,7 +1708,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2240,12 +1716,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2256,63 +1728,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, @@ -2321,33 +1773,27 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower重启了 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot). During the recovery process, the follower + * restarted */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT +TEST_F( + RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2355,7 +1801,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2363,12 +1809,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2379,97 +1821,70 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower hang了 - * 4. 一段时间后恢复 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to + * recover through snapshot), and the follower has changed during the recovery + * process + * 4. Restore after a period of time */ -TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT +TEST_F(RaftConfigChangeTest, + ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -2477,7 +1892,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2485,12 +1900,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2501,63 +1912,43 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,hang follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a period of random sleep, hang the follower and simulate the + // installation snapshot Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - // 5. 把follower恢复 + // 5. Restore the follower int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); @@ -2565,32 +1956,23 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2600,7 +1982,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2608,12 +1990,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2624,52 +2002,32 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -2684,11 +2042,8 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { butil::Status status; LOG(INFO) << "start transfer leader to " << shutdownPeer.address(); for (int i = 0; i < kMaxLoop; ++i) { - status = TransferLeader(logicPoolId, - copysetId, - conf, - shutdownPeer, - options); + status = + TransferLeader(logicPoolId, copysetId, conf, shutdownPeer, options); if (0 == status.error_code()) { cluster.WaitLeader(&leaderPeer); if (leaderPeer.address() == shutdownPeer.address()) { @@ -2703,31 +2058,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderPeer.address().c_str(), shutdownPeer.address().c_str())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 创建5个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉两个follower - * 3. 让两个follower从installsnapshot恢复 + * 1. Create a replication group of 5 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up two followers + * 3. Restore two followers from installsnapshot */ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2737,7 +2083,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 member LOG(INFO) << "start 5 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2747,12 +2093,8 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2765,15 +2107,10 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个follower + // 2. Hang up 2 followers LOG(INFO) << "shutdown 2 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2782,37 +2119,22 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { Peer shutdownPeer2 = followerPeers[1]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer1)); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer2)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown 2 follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer1, PeerCluster::PeerToId(shutdownPeer1))); @@ -2820,33 +2142,24 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { PeerCluster::PeerToId(shutdownPeer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组{A、B、C},并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2856,19 +2169,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2878,30 +2187,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer shutdownPeer = followerPeers[0]; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -2912,33 +2213,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -2954,11 +2244,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2968,19 +2259,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -2990,30 +2277,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); Peer hangPeer = followerPeers[0]; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3024,33 +2303,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3066,11 +2334,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3080,19 +2349,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3102,29 +2367,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader LOG(INFO) << "shutdown 1 leader"; Peer shutdownPeer = leaderPeer; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3135,33 +2392,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(shutdownPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); @@ -3177,11 +2423,12 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3191,19 +2438,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -3213,29 +2456,21 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // a + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 leader"; Peer hangPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, // b + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3246,33 +2481,22 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { Configuration newConf = conf; newConf.remove_peer(PeerId(hangPeer.address())); newConf.add_peer(PeerId(peer4.address())); - butil::Status st = ChangePeers(logicPoolId, - copysetId, - conf, - newConf, - options); + butil::Status st = + ChangePeers(logicPoolId, copysetId, conf, newConf, options); ASSERT_TRUE(st.ok()) << st.error_str(); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, // b loop); diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index f6a39c3436..15b731e329 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -21,121 +21,91 @@ */ #include -#include #include +#include -#include #include +#include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftLogRepTestLogDir[] = "./runlog/RaftLogRep"; const char* kFakeMdsAddr = "127.0.0.1:9070"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftLogParam[5][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9071", - "-chunkServerStoreUri=local://./9071/", - "-chunkServerMetaUri=local://./9071/chunkserver.dat", - "-copySetUri=local://./9071/copysets", - "-raftSnapshotUri=curve://./9071/copysets", - "-raftLogUri=curve://./9071/copysets", - "-recycleUri=local://./9071/recycler", - "-chunkFilePoolDir=./9071/chunkfilepool/", - "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", - "-walFilePoolDir=./9071/walfilepool/", - "-walFilePoolMetaPath=./9071/walfilepool.meta", - "-conf=./9071/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9072", - "-chunkServerStoreUri=local://./9072/", - "-chunkServerMetaUri=local://./9072/chunkserver.dat", - "-copySetUri=local://./9072/copysets", - "-raftSnapshotUri=curve://./9072/copysets", - "-raftLogUri=curve://./9072/copysets", - "-recycleUri=local://./9072/recycler", - "-chunkFilePoolDir=./9072/chunkfilepool/", - "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", - "-walFilePoolDir=./9072/walfilepool/", - "-walFilePoolMetaPath=./9072/walfilepool.meta", - "-conf=./9072/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9073", - "-chunkServerStoreUri=local://./9073/", - "-chunkServerMetaUri=local://./9073/chunkserver.dat", - "-copySetUri=local://./9073/copysets", - "-raftSnapshotUri=curve://./9073/copysets", - "-raftLogUri=curve://./9073/copysets", - "-recycleUri=local://./9073/recycler", - "-chunkFilePoolDir=./9073/chunkfilepool/", - "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", - "-walFilePoolDir=./9073/walfilepool/", - "-walFilePoolMetaPath=./9073/walfilepool.meta", - "-conf=./9073/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9074", - "-chunkServerStoreUri=local://./9074/", - "-chunkServerMetaUri=local://./9074/chunkserver.dat", - "-copySetUri=local://./9074/copysets", - "-raftSnapshotUri=curve://./9074/copysets", - "-raftLogUri=curve://./9074/copysets", - "-recycleUri=local://./9074/recycler", - "-chunkFilePoolDir=./9074/chunkfilepool/", - "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", - "-walFilePoolDir=./9074/walfilepool/", - "-walFilePoolMetaPath=./9074/walfilepool.meta", - "-conf=./9074/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9075", - "-chunkServerStoreUri=local://./9075/", - "-chunkServerMetaUri=local://./9075/chunkserver.dat", - "-copySetUri=local://./9075/copysets", - "-raftSnapshotUri=curve://./9075/copysets", - "-raftLogUri=curve://./9075/copysets", - "-recycleUri=local://./9075/recycler", - "-chunkFilePoolDir=./9075/chunkfilepool/", - "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", - "-walFilePoolDir=./9075/walfilepool/", - "-walFilePoolMetaPath=./9075/walfilepool.meta", - "-conf=./9075/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9071", + "-chunkServerStoreUri=local://./9071/", + "-chunkServerMetaUri=local://./9071/chunkserver.dat", + "-copySetUri=local://./9071/copysets", + "-raftSnapshotUri=curve://./9071/copysets", + "-raftLogUri=curve://./9071/copysets", + "-recycleUri=local://./9071/recycler", + "-chunkFilePoolDir=./9071/chunkfilepool/", + "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", + "-walFilePoolDir=./9071/walfilepool/", + "-walFilePoolMetaPath=./9071/walfilepool.meta", + "-conf=./9071/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9072", + "-chunkServerStoreUri=local://./9072/", + "-chunkServerMetaUri=local://./9072/chunkserver.dat", + "-copySetUri=local://./9072/copysets", + "-raftSnapshotUri=curve://./9072/copysets", + "-raftLogUri=curve://./9072/copysets", + "-recycleUri=local://./9072/recycler", + "-chunkFilePoolDir=./9072/chunkfilepool/", + "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", + "-walFilePoolDir=./9072/walfilepool/", + "-walFilePoolMetaPath=./9072/walfilepool.meta", + "-conf=./9072/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9073", + "-chunkServerStoreUri=local://./9073/", + "-chunkServerMetaUri=local://./9073/chunkserver.dat", + "-copySetUri=local://./9073/copysets", + "-raftSnapshotUri=curve://./9073/copysets", + "-raftLogUri=curve://./9073/copysets", + "-recycleUri=local://./9073/recycler", + "-chunkFilePoolDir=./9073/chunkfilepool/", + "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", + "-walFilePoolDir=./9073/walfilepool/", + "-walFilePoolMetaPath=./9073/walfilepool.meta", + "-conf=./9073/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9074", + "-chunkServerStoreUri=local://./9074/", + "-chunkServerMetaUri=local://./9074/chunkserver.dat", + "-copySetUri=local://./9074/copysets", + "-raftSnapshotUri=curve://./9074/copysets", + "-raftLogUri=curve://./9074/copysets", + "-recycleUri=local://./9074/recycler", + "-chunkFilePoolDir=./9074/chunkfilepool/", + "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", + "-walFilePoolDir=./9074/walfilepool/", + "-walFilePoolMetaPath=./9074/walfilepool.meta", + "-conf=./9074/chunkserver.conf", "-raft_sync_segments=true", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9075", + "-chunkServerStoreUri=local://./9075/", + "-chunkServerMetaUri=local://./9075/chunkserver.dat", + "-copySetUri=local://./9075/copysets", + "-raftSnapshotUri=curve://./9075/copysets", + "-raftLogUri=curve://./9075/copysets", + "-recycleUri=local://./9075/recycler", + "-chunkFilePoolDir=./9075/chunkfilepool/", + "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", + "-walFilePoolDir=./9075/walfilepool/", + "-walFilePoolMetaPath=./9075/walfilepool.meta", + "-conf=./9075/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; class RaftLogReplicationTest : public testing::Test { @@ -177,39 +147,34 @@ class RaftLogReplicationTest : public testing::Test { ASSERT_TRUE(cg4.Init("9074")); ASSERT_TRUE(cg5.Init("9075")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); cg4.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg4.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg4.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg4.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg4.SetKV("mds.listen.addr", kFakeMdsAddr); cg5.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg5.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg5.SetKV("chunkserver.common.logDir", - kRaftLogRepTestLogDir); + std::to_string(snapshotIntervalS)); + cg5.SetKV("chunkserver.common.logDir", kRaftLogRepTestLogDir); cg5.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -265,19 +230,20 @@ class RaftLogReplicationTest : public testing::Test { int electionTimeoutMs; int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; butil::AtExitManager atExitManager; /** - * 验证3个节点的复制组,测试隐式提交 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 等带step down - * 3. 拉起1个follower + * Validate replication groups for 3 nodes and test implicit commit + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Wait for step down + * 3. Pull up 1 follower */ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { LogicPoolID logicPoolId = 2; @@ -287,19 +253,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -310,62 +272,38 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); - // 3. 等待step down,等待2个选举超时,保证一定step down + // 3. Wait for step down, wait for 2 elections to timeout, ensure a certain + // step down ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 4. 拉起1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // new leader就是old leader + // new leader is an old leader ASSERT_STREQ(leaderPeer.address().c_str(), newLeader.address().c_str()); - // read step down之前append进去的log entry,测试隐式提交 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Read the log entries appended before the "read step down" to test + // implicit commits. + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -378,11 +316,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { } /** - * 验证3个节点的复制组,测试日志截断 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 挂掉leader - * 3. 拉起2个follower + * Verify the replication groups of three nodes and test log truncation + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Hang up the leader + * 3. Pull up 2 followers */ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { LogicPoolID logicPoolId = 2; @@ -392,19 +331,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -415,33 +350,23 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - 2); - - // 3. 挂掉leader + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 2); + + // 3. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - // 4. 拉起2个follower + // 4. Pull up 2 followers ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], @@ -449,22 +374,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 日志截断 - ReadNotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Log truncation + ReadNotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 2); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; @@ -477,12 +392,14 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { } /** - * 验证3个节点的复制组,测试向落后多个term的follower复制日志 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉一个follower - * 3. 挂掉leader,等待2个ET重启 - * 4. 挂掉leader,等待2个ET重启 - * 3. 拉起挂掉的follower + * Verify the replication group of three nodes, and test copying logs to + * followers who fall behind multiple terms + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up a follower + * 3. Hang up the leader and wait for 2 ETs to restart + * 4. Hang up the leader and wait for 2 ETs to restart + * 3. Pull up the hanging follower */ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { LogicPoolID logicPoolId = 2; @@ -492,19 +409,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -515,89 +428,64 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个Follower + // 2. Hang up 1 Follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 3. 挂掉leader,等待2个ET重启 + // 3. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 挂掉leader,等待2个ET重启 + // 4. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 拉起挂掉的follower + // 5. Pull up the hanging follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 多等一会,保证安装快照成功 + // Wait a little longer to ensure successful installation of the snapshot ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. 挂掉leader - * 3. leader拉起来 - * 4. 挂1一个follower - * 5. follower拉起来 - * 6. 挂2个follower - * 7. 拉起1个follower - * 8. 挂掉leader - * 9. 拉起上一步挂的leader - * 10. 挂掉leader和两个follower - * 11. 逐个拉起来 - * 12. 挂掉3个follower - * 13. 逐个拉起来 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang up the leader + * 3. Pull up the leader + * 4. Hang 1 follower + * 5. Follower, pull it up + * 6. Hang 2 followers + * 7. Pull up 1 follower + * 8. Hang up the leader + * 9. Pull up the leader from the previous step + * 10. Hang up the leader and two followers + * 11. Pull up one by one + * 12. Hang up three followers + * 13. Pull up one by one */ TEST_F(RaftLogReplicationTest, FourNodeKill) { LogicPoolID logicPoolId = 2; @@ -607,7 +495,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -615,12 +503,8 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -632,124 +516,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -758,117 +599,81 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j 1); ::usleep(1000 * electionTimeoutMs * 2); - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang1一个follower - * 5. 恢复follower - * 6. hang2个follower - * 7. 恢复1个follower - * 8. hangleader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang1, a follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hangleader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FourNodeHang) { LogicPoolID logicPoolId = 2; @@ -878,7 +683,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -886,12 +691,8 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { peers.push_back(peer2); peers.push_back(peer3); peers.push_back(peer4); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -903,119 +704,76 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer oldLeader = leaderPeer; - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); ASSERT_STRNE(oldLeader.address().c_str(), newLeader.address().c_str()); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldLeader)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f 1); -// 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1024,107 +782,70 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个恢复 + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k 1); - - // 13. 逐个恢复 + // 13. Restore one by one ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. 挂 leader - * 3. 恢复leader - * 4. 挂1一个follower - * 5. 恢复follower - * 6. 挂2个follower - * 7. 恢复1个follower - * 8. 挂leader - * 9. 恢复一步挂的leader - * 10. 挂leader和两个follower - * 11. 逐个恢复 - * 12. 挂3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang the leader + * 3. Restore leader + * 4. Hang 1 follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang the leader + * 9. Restore one-step suspended leaders + * 10. Hang the leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeKill) { LogicPoolID logicPoolId = 2; @@ -1134,7 +855,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1143,12 +864,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1161,122 +878,79 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. old leader拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + // 3. Pull up the old leader + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. follower拉起来 + // 5. Follower, pull it up ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 拉起上一步挂的leader - ASSERT_EQ(0, cluster.StartPeer(newLeader, - PeerCluster::PeerToId(newLeader))); + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Pull up the leader from the previous step + ASSERT_EQ(0, + cluster.StartPeer(newLeader, PeerCluster::PeerToId(newLeader))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1285,113 +959,78 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.ShutdownPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个拉起来 - ASSERT_EQ(0, cluster.StartPeer(leaderPeer, - PeerCluster::PeerToId(leaderPeer))); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Pull up one by one + ASSERT_EQ(0, + cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.StartPeer(shutdownFollower, PeerCluster::PeerToId(shutdownFollower))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers2[1], PeerCluster::PeerToId(followerPeers2[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[1], PeerCluster::PeerToId(followerPeers3[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers3[2], PeerCluster::PeerToId(followerPeers3[2]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } - /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang 1一个follower - * 5. 恢复follower - * 6. hang 2个follower - * 7. 恢复1个follower - * 8. hang leader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang 1, one follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang leader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeHang) { LogicPoolID logicPoolId = 2; @@ -1401,7 +1040,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1410,12 +1049,8 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { peers.push_back(peer3); peers.push_back(peer4); peers.push_back(peer5); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1428,115 +1063,72 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // a loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(newLeader.address())); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // b loop); - - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers2[1])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // f loop); - // 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); - WriteThenReadVerify(newLeader, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(newLeader, logicPoolId, copysetId, chunkId, length, ch++, // g loop); // 8. hang leader ASSERT_EQ(0, cluster.HangPeer(newLeader)); - ReadVerifyNotAvailable(newLeader, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 9. 恢复上一步挂的leader + ReadVerifyNotAvailable(newLeader, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(newLeader)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1545,83 +1137,49 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { shutdownFollower = followerPeers2[2]; } ASSERT_EQ(0, cluster.HangPeer(shutdownFollower)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 11. 逐个恢复 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // i loop); ASSERT_EQ(0, cluster.SignalPeer(shutdownFollower)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // j loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // k loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[1])); ASSERT_EQ(0, cluster.HangPeer(followerPeers3[2])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // l 1); - - // 13. 逐个恢复 + // 13. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // m loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[1])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // n loop); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[2])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch++, // o loop); } diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index a8e57aaa3f..d6cd2981dc 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -21,102 +21,78 @@ */ #include -#include #include +#include #include -#include "test/integration/common/peer_cluster.h" -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli2.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftSnapshotTestLogDir[] = "./runlog/RaftSnapshot"; const char* kFakeMdsAddr = "127.0.0.1:9320"; static constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *raftVoteParam[4][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9321", - "-chunkServerStoreUri=local://./9321/", - "-chunkServerMetaUri=local://./9321/chunkserver.dat", - "-copySetUri=local://./9321/copysets", - "-raftSnapshotUri=curve://./9321/copysets", - "-recycleUri=local://./9321/recycler", - "-chunkFilePoolDir=./9321/chunkfilepool/", - "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", - "-conf=./9321/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9321/copysets", - "-walFilePoolDir=./9321/walfilepool/", - "-walFilePoolMetaPath=./9321/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9322", - "-chunkServerStoreUri=local://./9322/", - "-chunkServerMetaUri=local://./9322/chunkserver.dat", - "-copySetUri=local://./9322/copysets", - "-raftSnapshotUri=curve://./9322/copysets", - "-recycleUri=local://./9322/recycler", - "-chunkFilePoolDir=./9322/chunkfilepool/", - "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", - "-conf=./9322/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9322/copysets", - "-walFilePoolDir=./9322/walfilepool/", - "-walFilePoolMetaPath=./9322/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9323", - "-chunkServerStoreUri=local://./9323/", - "-chunkServerMetaUri=local://./9323/chunkserver.dat", - "-copySetUri=local://./9323/copysets", - "-raftSnapshotUri=curve://./9323/copysets", - "-recycleUri=local://./9323/recycler", - "-chunkFilePoolDir=./9323/chunkfilepool/", - "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", - "-conf=./9323/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9323/copysets", - "-walFilePoolDir=./9323/walfilepool/", - "-walFilePoolMetaPath=./9323/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9324", - "-chunkServerStoreUri=local://./9324/", - "-chunkServerMetaUri=local://./9324/chunkserver.dat", - "-copySetUri=local://./9324/copysets", - "-raftSnapshotUri=curve://./9324/copysets", - "-recycleUri=local://./9324/recycler", - "-chunkFilePoolDir=./9324/chunkfilepool/", - "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", - "-conf=./9324/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9324/copysets", - "-walFilePoolDir=./9324/walfilepool/", - "-walFilePoolMetaPath=./9324/walfilepool.meta", - NULL - }, +static const char* raftVoteParam[4][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9321", + "-chunkServerStoreUri=local://./9321/", + "-chunkServerMetaUri=local://./9321/chunkserver.dat", + "-copySetUri=local://./9321/copysets", + "-raftSnapshotUri=curve://./9321/copysets", + "-recycleUri=local://./9321/recycler", + "-chunkFilePoolDir=./9321/chunkfilepool/", + "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", + "-conf=./9321/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9321/copysets", + "-walFilePoolDir=./9321/walfilepool/", + "-walFilePoolMetaPath=./9321/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9322", + "-chunkServerStoreUri=local://./9322/", + "-chunkServerMetaUri=local://./9322/chunkserver.dat", + "-copySetUri=local://./9322/copysets", + "-raftSnapshotUri=curve://./9322/copysets", + "-recycleUri=local://./9322/recycler", + "-chunkFilePoolDir=./9322/chunkfilepool/", + "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", + "-conf=./9322/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9322/copysets", + "-walFilePoolDir=./9322/walfilepool/", + "-walFilePoolMetaPath=./9322/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9323", + "-chunkServerStoreUri=local://./9323/", + "-chunkServerMetaUri=local://./9323/chunkserver.dat", + "-copySetUri=local://./9323/copysets", + "-raftSnapshotUri=curve://./9323/copysets", + "-recycleUri=local://./9323/recycler", + "-chunkFilePoolDir=./9323/chunkfilepool/", + "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", + "-conf=./9323/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9323/copysets", + "-walFilePoolDir=./9323/walfilepool/", + "-walFilePoolMetaPath=./9323/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9324", + "-chunkServerStoreUri=local://./9324/", + "-chunkServerMetaUri=local://./9324/chunkserver.dat", + "-copySetUri=local://./9324/copysets", + "-raftSnapshotUri=curve://./9324/copysets", + "-recycleUri=local://./9324/recycler", + "-chunkFilePoolDir=./9324/chunkfilepool/", + "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", + "-conf=./9324/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9324/copysets", + "-walFilePoolDir=./9324/walfilepool/", + "-walFilePoolMetaPath=./9324/walfilepool.meta", NULL}, }; class RaftSnapshotTest : public testing::Test { @@ -152,32 +128,28 @@ class RaftSnapshotTest : public testing::Test { ASSERT_TRUE(cg3_.Init("9323")); ASSERT_TRUE(cg4_.Init("9324")); cg1_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg1_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg1_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg1_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg1_.SetKV("mds.listen.addr", kFakeMdsAddr); cg2_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg2_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg2_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg2_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg2_.SetKV("mds.listen.addr", kFakeMdsAddr); cg3_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg3_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg3_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg3_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg3_.SetKV("mds.listen.addr", kFakeMdsAddr); cg4_.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs_)); + std::to_string(electionTimeoutMs_)); cg4_.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS_)); - cg4_.SetKV("chunkserver.common.logDir", - kRaftSnapshotTestLogDir); + std::to_string(snapshotIntervalS_)); + cg4_.SetKV("chunkserver.common.logDir", kRaftSnapshotTestLogDir); cg4_.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1_.Generate()); ASSERT_TRUE(cg2_.Generate()); @@ -194,7 +166,7 @@ class RaftSnapshotTest : public testing::Test { params_.push_back(const_cast(raftVoteParam[2])); params_.push_back(const_cast(raftVoteParam[3])); - // 配置默认raft client option + // Configure default raft client option defaultCliOpt_.max_retry = 3; defaultCliOpt_.timeout_ms = 10000; } @@ -232,20 +204,20 @@ class RaftSnapshotTest : public testing::Test { braft::cli::CliOptions defaultCliOpt_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; }; - /** - * 验证连续通过快照恢复copyset - * 1.创建3个副本的复制组 - * 2.挂掉一个follower - * 3.写入数据,并等待raft snapshot 产生 - * 4.启动挂掉的follower,使其通过snapshot恢复 - * 5.transfer leader到刚启动的follower,读数据验证 - * 6.remove old leader,主要为了删除其copyset目录 - * 7.添加新的peer,使其通过快照加载数据 - * 8.transfer leader到新加入的peer,读数据验证 + *Verify continuous recovery of copyset through snapshots + *1. Create a replication group of 3 replicas + *2. Hang up a follower + *3. Write data and wait for the raft snapshot to be generated + *4. Start the failed follower and restore it through snapshot + *5. Transfer the leader to the newly started follower and read the data for + *verification + *6. Remove old leader, mainly to delete its copyset directory + *7. Add a new peer to load data through a snapshot + *8. Transfer leader to newly added peer, read data validation */ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { LogicPoolID logicPoolId = 2; @@ -261,12 +233,8 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -276,7 +244,7 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); Peer oldLeader = leaderPeer; - // 挂掉一个follower + // Hang up a follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -288,21 +256,15 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot,保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -310,43 +272,44 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); Configuration conf = cluster.CopysetConf(); - // 删除旧leader及其目录 + // Delete old leader and its directory butil::Status status = RemovePeer(logicPoolId, copysetId, conf, oldLeader, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); + rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); ::system(rmdir.c_str()); - // 添加新的peer - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + // Add a new peer + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()) << status; - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据, - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for + *verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval, write read data, + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 + *two-step It is to ensure that at least two snapshots are taken, so that when + *the node restarts again, it must pass the install snapshot + * 6. Wait for the leader to be generated, and then verify the data written + *before the read + * 7. Transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -362,12 +325,8 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -377,22 +336,17 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - loop, - initsn); + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop, initsn); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -403,47 +357,31 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); - // 再次发起 read/write + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); + // Initiate read/write again LOG(INFO) << "write 2 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 1, - loop, - initsn + 1); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 1, loop, initsn + 1); LOG(INFO) << "write 2 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 3 start"; - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch + 2, - loop, - initsn + 1); + // Initiate read/write again + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch + 2, loop, initsn + 1); LOG(INFO) << "write 3 end"; @@ -451,24 +389,29 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch + 2, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch + 2, + loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, + loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,并更新写版本,产生chunk快照 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval, - * 5. 删除chunk快照,再次用新版本write 数据,产生新的chunk快照 - * 6. 然后再 sleep 超过一个 snapshot interval;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 7. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 8. transfer leader 到shut down 的peer 上 - * 9. 在 read 之前写入的数据验证 + *Verify the shutdown of non leader nodes on three nodes, restart, and control + *the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and update the write + *version to generate a chunk snapshot + * 3. Shutdown non leader + * 4. Then the sleep exceeds one snapshot interval, + * 5. Delete the chunk snapshot and write the data again with a new version to + *generate a new chunk snapshot + * 6. Then sleep more than one snapshot interval; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the + *node restarts again, it must pass the install snapshot + * 7. Wait for the leader to be generated, and then verify the data written + *before the read + * 8. Transfer leader to shut down peer + * 9. Verification of data written before read */ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LogicPoolID logicPoolId = 2; @@ -484,12 +427,8 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); @@ -499,43 +438,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -546,41 +473,31 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk + // file to generate a snapshot file + ::sleep(1.5 * snapshotIntervalS_); - // 删除旧的快照 - DeleteSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, + // Delete old snapshots + DeleteSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, initsn + 1); // csn = 2 - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 3 start"; - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // c loop, initsn + 2); // sn = 3 LOG(INFO) << "write 3 end"; - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // b + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // b loop); - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -589,24 +506,29 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } /** - * 验证curve快照转储过程当中,chunkserver存在多个copyset情况下, - * 1. 创建3个副本的复制组 - * 2. 为每个复制组的chunkserver生成新的copyset,并作为后续操作对象 - * 3. 等待 leader 产生,write 数据 - * 4. sleep 超过一个 snapshot interval,确保产生raft快照 - * 5. 更新写版本,产生chunk快照 - * 6. 然后 sleep 超过一个 snapshot interval,确保产生raft快照 - * 7. shutdown 非 leader - * 8. AddPeer添加一个新节点使其通过加载快照恢复,然后remove掉shutdown的peer - * 9. 切换leader到新添加的peer - * 10. 等待 leader 产生,然后 read 之前产生的数据和chunk快照进行验证 + * During the process of verifying the curve snapshot dump, if there are + * multiple copysets in the chunkserver, + * 1. Create a replication group of 3 replicas + * 2. Generate a new copyset for each replication group's chunkserver and use it + * as a subsequent operation object + * 3. Wait for the leader to generate and write data + * 4. If the sleep exceeds one snapshot interval, ensure that a raft snapshot is + * generated + * 5. Update the write version to generate a chunk snapshot + * 6. Then the sleep exceeds one snapshot interval to ensure that a raft + * snapshot is generated + * 7. Shutdown non leader + * 8. Add a new node to AddPeer and restore it by loading a snapshot, then + * remove the shutdown peer + * 9. Switch the leader to the newly added peer + * 10. Wait for the leader to be generated, then read the data and chunk + * snapshot generated before validation */ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LogicPoolID logicPoolId = 2; @@ -622,18 +544,14 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { peers.push_back(peer2_); peers.push_back(peer3_); - PeerCluster cluster("ThreeNode-cluster", - logicPoolId, - copysetId, - peers, - params_, - paramsIndexs_); + PeerCluster cluster("ThreeNode-cluster", logicPoolId, copysetId, peers, + params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, cluster.StartPeer(peer1_, PeerCluster::PeerToId(peer1_))); ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); ASSERT_EQ(0, cluster.StartPeer(peer3_, PeerCluster::PeerToId(peer3_))); - // 创建新的copyset + // Create a new copyset LOG(INFO) << "create new copyset."; ++copysetId; int ret = cluster.CreateCopyset(logicPoolId, copysetId, peer1_, peers); @@ -643,57 +561,46 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ret = cluster.CreateCopyset(logicPoolId, copysetId, peer3_, peers); ASSERT_EQ(0, ret); - // 使用新的copyset作为操作对象 + // Use the new copyset as the operand cluster.SetWorkingCopyset(copysetId); Peer leaderPeer; ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write to generate a chunk file + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, // a - loop, - initsn); + loop, initsn); LOG(INFO) << "write 1 end"; - // wait snapshot, 保证能够触发打快照 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + ::sleep(1.5 * snapshotIntervalS_); LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, + // Initiate read/write, generate chunk files, and generate snapshot files + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ++ch, // b loop, - initsn+1); // sn = 2 - // 验证chunk快照数据正确性 - ReadSnapshotVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch-1, // a + initsn + 1); // sn = 2 + // Verify the correctness of chunk snapshot data + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, // a loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, + // and there may be outdated replica operations So take a nap first to + // prevent concurrent statistics of file information ::sleep(2); - // wait snapshot, 保证能够触发打快照 - // 通过至少两次快照,保证新加的peer通过下载快照安装 - ::sleep(1.5*snapshotIntervalS_); + // Wait snapshot to ensure that it can trigger a snapshot + // Ensure that the newly added peer is installed by downloading the snapshot + // by taking at least two snapshots + ::sleep(1.5 * snapshotIntervalS_); - // shutdown 某个follower + // Shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -704,30 +611,28 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 添加新的peer,并移除shutdown的peer + // Add a new peer and remove the shutdown peer Configuration conf = cluster.CopysetConf(); - ASSERT_EQ(0, cluster.StartPeer(peer4_, - PeerCluster::PeerToId(peer4_))); + ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); butil::Status status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()); - // 删除旧leader及其目录 + // Delete old leader and its directory status = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, defaultCliOpt_); ASSERT_TRUE(status.ok()); std::string rmdir("rm -fr "); - rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); + rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); ::system(rmdir.c_str()); - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 - ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch, loop); - ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, - length, ch-1, loop); + // Read Data Validation + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); + ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, loop); } } // namespace chunkserver diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index 5f87a1495f..9b5d97b98f 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -21,84 +21,66 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "test/integration/common/peer_cluster.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; const char kRaftVoteTestLogDir[] = "./runlog/RaftVote"; const char* kFakeMdsAddr = "127.0.0.1:9089"; static constexpr uint32_t kOpRequestAlignSize = 4096; static const char* raftVoteParam[3][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9091", - "-chunkServerStoreUri=local://./9091/", - "-chunkServerMetaUri=local://./9091/chunkserver.dat", - "-copySetUri=local://./9091/copysets", - "-raftSnapshotUri=curve://./9091/copysets", - "-recycleUri=local://./9091/recycler", - "-chunkFilePoolDir=./9091/chunkfilepool/", - "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", - "-conf=./9091/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9091/copysets", - "-walFilePoolDir=./9091/walfilepool/", - "-walFilePoolMetaPath=./9091/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9092", - "-chunkServerStoreUri=local://./9092/", - "-chunkServerMetaUri=local://./9092/chunkserver.dat", - "-copySetUri=local://./9092/copysets", - "-raftSnapshotUri=curve://./9092/copysets", - "-recycleUri=local://./9092/recycler", - "-chunkFilePoolDir=./9092/chunkfilepool/", - "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", - "-conf=./9092/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9092/copysets", - "-walFilePoolDir=./9092/walfilepool/", - "-walFilePoolMetaPath=./9092/walfilepool.meta", - NULL - }, - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9093", - "-chunkServerStoreUri=local://./9093/", - "-chunkServerMetaUri=local://./9093/chunkserver.dat", - "-copySetUri=local://./9093/copysets", - "-raftSnapshotUri=curve://./9093/copysets", - "-recycleUri=local://./9093/recycler", - "-chunkFilePoolDir=./9093/chunkfilepool/", - "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", - "-conf=./9093/chunkserver.conf", - "-raft_sync_segments=true", - "-raftLogUri=curve://./9093/copysets", - "-walFilePoolDir=./9093/walfilepool/", - "-walFilePoolMetaPath=./9093/walfilepool.meta", - NULL - }, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9091", + "-chunkServerStoreUri=local://./9091/", + "-chunkServerMetaUri=local://./9091/chunkserver.dat", + "-copySetUri=local://./9091/copysets", + "-raftSnapshotUri=curve://./9091/copysets", + "-recycleUri=local://./9091/recycler", + "-chunkFilePoolDir=./9091/chunkfilepool/", + "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", + "-conf=./9091/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9091/copysets", + "-walFilePoolDir=./9091/walfilepool/", + "-walFilePoolMetaPath=./9091/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9092", + "-chunkServerStoreUri=local://./9092/", + "-chunkServerMetaUri=local://./9092/chunkserver.dat", + "-copySetUri=local://./9092/copysets", + "-raftSnapshotUri=curve://./9092/copysets", + "-recycleUri=local://./9092/recycler", + "-chunkFilePoolDir=./9092/chunkfilepool/", + "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", + "-conf=./9092/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9092/copysets", + "-walFilePoolDir=./9092/walfilepool/", + "-walFilePoolMetaPath=./9092/walfilepool.meta", NULL}, + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9093", + "-chunkServerStoreUri=local://./9093/", + "-chunkServerMetaUri=local://./9093/chunkserver.dat", + "-copySetUri=local://./9093/copysets", + "-raftSnapshotUri=curve://./9093/copysets", + "-recycleUri=local://./9093/recycler", + "-chunkFilePoolDir=./9093/chunkfilepool/", + "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", + "-conf=./9093/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9093/copysets", + "-walFilePoolDir=./9093/walfilepool/", + "-walFilePoolMetaPath=./9093/walfilepool.meta", NULL}, }; class RaftVoteTest : public testing::Test { @@ -130,25 +112,22 @@ class RaftVoteTest : public testing::Test { ASSERT_TRUE(cg2.Init("9092")); ASSERT_TRUE(cg3.Init("9093")); cg1.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg1.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg1.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg1.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg1.SetKV("mds.listen.addr", kFakeMdsAddr); cg2.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg2.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg2.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg2.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg2.SetKV("mds.listen.addr", kFakeMdsAddr); cg3.SetKV("copyset.election_timeout_ms", - std::to_string(electionTimeoutMs)); + std::to_string(electionTimeoutMs)); cg3.SetKV("copyset.snapshot_interval_s", - std::to_string(snapshotIntervalS)); - cg3.SetKV("chunkserver.common.logDir", - kRaftVoteTestLogDir); + std::to_string(snapshotIntervalS)); + cg3.SetKV("chunkserver.common.logDir", kRaftVoteTestLogDir); cg3.SetKV("mds.listen.addr", kFakeMdsAddr); ASSERT_TRUE(cg1.Generate()); ASSERT_TRUE(cg2.Generate()); @@ -189,22 +168,21 @@ class RaftVoteTest : public testing::Test { int snapshotIntervalS; std::map paramsIndexs; - std::vector params; - // 等待多个副本数据一致的时间 + std::vector params; + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; - - butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组 - * 1. 创建1个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader,验证可用性 - * 3. 拉起leader - * 4. hang住leader - * 5. 恢复leader + * Verify replication group for 1 node + * 1. Create a replication group of 1 member, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and verify availability + * 3. Pull up the leader + * 4. Hang in the leader + * 5. Restore leader */ TEST_F(RaftVoteTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -214,17 +192,13 @@ TEST_F(RaftVoteTest, OneNode) { char ch = 'a'; int loop = 25; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -234,85 +208,51 @@ TEST_F(RaftVoteTest, OneNode) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉这个节点 + // 2. Hang up this node ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 将节点拉起来 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the node ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. hang住此节点 + // 4. Hang on to this node ASSERT_EQ(0, cluster.HangPeer(peer1)); ::usleep(200 * 1000); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 5. 恢复节点 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 5. Restore nodes ASSERT_EQ(0, cluster.SignalPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } /** - * 验证2个节点的复制组,并挂掉leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of two nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -322,18 +262,14 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -343,55 +279,36 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并挂掉follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of two nodes and hang the follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LogicPoolID logicPoolId = 2; @@ -401,19 +318,15 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -423,15 +336,10 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉follower + // 2. Hang up the follower Peer followerPeer; if (leaderPeer.address() == peer1.address()) { followerPeer = peer2; @@ -441,57 +349,37 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LOG(INFO) << "kill follower " << followerPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down,之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // Wait for the leader step to down, and after that, read is no longer + // supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 拉起follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Pull up the follower LOG(INFO) << "restart follower " << followerPeer.address(); - ASSERT_EQ(0, - cluster.StartPeer(followerPeer, - PeerCluster::PeerToId(followerPeer))); + ASSERT_EQ(0, cluster.StartPeer(followerPeer, + PeerCluster::PeerToId(followerPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证2个节点的复制组,并hang leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication group of 2 nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -501,18 +389,14 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -522,56 +406,37 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. Hang leader LOG(INFO) << "hang leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore leader LOG(INFO) << "recover leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证2个节点的复制组,并发Hang一个follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 恢复follower + * Verify the replication group of two nodes and concurrently hang a follower + * 1. Create a replication group of 2 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LogicPoolID logicPoolId = 2; @@ -581,19 +446,15 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -603,13 +464,8 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang follower Peer followerPeer; @@ -621,53 +477,33 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LOG(INFO) << "hang follower " << followerPeer.address(); ASSERT_EQ(0, cluster.HangPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - // 等待leader step down之后,也不支持read了 + // The request before the step down will eventually be submitted + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + // After waiting for the leader step to down, read is no longer supported ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - // 3. 恢复follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch, 1); + + // 3. Restore follower LOG(INFO) << "recover follower " << followerPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(followerPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Verify the data written before read, and write before step down + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组,等待leader产生,write数据,然后read出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of three replicas, wait for the leader to + * generate, write the data, and then read it out for verification */ TEST_F(RaftVoteTest, ThreeNodesNormal) { LogicPoolID logicPoolId = 2; @@ -682,12 +518,8 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -700,24 +532,20 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { PeerId leaderId; ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 再次发起 read/write - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + // Initiate read/write agai + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -727,19 +555,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -750,55 +574,36 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { LogicPoolID logicPoolId = 2; @@ -808,19 +613,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -831,57 +632,37 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 拉起follower - ASSERT_EQ(0, - cluster.StartPeer(followerPeers[0], - PeerCluster::PeerToId(followerPeers[0]))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Pull up the follower + ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], + PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,反复restart leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复restart leader + * Verify the replication group of three nodes and repeatedly restart the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeated restart leader */ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { LogicPoolID logicPoolId = 2; @@ -891,19 +672,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -914,13 +691,8 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. restart leader for (int i = 0; i < 5; ++i) { @@ -928,32 +700,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ::sleep(3); ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, + length, ch - 1, 1); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); } ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); @@ -961,9 +718,11 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { } /** - * 验证3个节点的复制组,反复重启一个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复重启follower + * Verify the replication groups of three nodes and restart a follower + * repeatedly + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Repeatedly restarting the follower */ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { LogicPoolID logicPoolId = 2; @@ -973,19 +732,15 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -996,27 +751,17 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 反复 restart follower + // 2. Repeatedly restart follower for (int i = 0; i < 5; ++i) { std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); @@ -1028,11 +773,13 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { } /** - * 验证3个节点的复制组,并挂掉leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader和1个follwoer - * 3. 拉起leader - * 4. 拉起follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the leader and 1 follower + * 3. Pull up the leader + * 4. Pull up the follower */ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1042,19 +789,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1065,72 +808,48 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉leader和Follower + // 2. Hang up the leader and follower ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并挂掉2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 拉起1个follower - * 4. 拉起1个follower + * Verify the replication groups of three nodes and hang two followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Pull up 1 follower + * 4. Pull up 1 follower */ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1140,19 +859,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1163,73 +878,49 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[0])); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeers[1])); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个follower + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], PeerCluster::PeerToId(followerPeers[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并挂掉3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉3个成员 - * 3. 拉起1个成员 - * 4. 拉起1个成员 - * 5. 拉起1个成员 + * Verify the replication group of 3 nodes and suspend 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Pull up 1 member + * 4. Pull up 1 member + * 5. Pull up 1 member */ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { LogicPoolID logicPoolId = 2; @@ -1239,19 +930,15 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1262,80 +949,50 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); ASSERT_EQ(0, cluster.ShutdownPeer(peer2)); ASSERT_EQ(0, cluster.ShutdownPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer1, - PeerCluster::PeerToId(peer1))); + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); - ReadVerifyNotAvailable(peer1, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - - // 4. 拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer2, - PeerCluster::PeerToId(peer2))); + ReadVerifyNotAvailable(peer1, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 4. Pull up 1 member + ASSERT_EQ(0, cluster.StartPeer(peer2, PeerCluster::PeerToId(peer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 5. 再拉起1个成员 - ASSERT_EQ(0, cluster.StartPeer(peer3, - PeerCluster::PeerToId(peer3))); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 5. Pull up one more member + ASSERT_EQ(0, cluster.StartPeer(peer3, PeerCluster::PeerToId(peer3))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - - /** - * 验证3个节点的复制组,并hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -1345,19 +1002,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1368,65 +1021,40 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader Peer oldPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 等待new leader产生 + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // Waiting for new leader generation ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复 old leader + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldPeer)); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } - /** - * 验证3个节点的复制组,并hang1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of 3 nodes and hang 1 follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { LogicPoolID logicPoolId = 2; @@ -1436,19 +1064,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1459,56 +1083,38 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 3. 恢复follower + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 3. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); } /** - * 验证3个节点的复制组,并hang leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader和1个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of three nodes and hang the leader and one + * follower + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang leader and 1 follower + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1518,19 +1124,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1541,13 +1143,8 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); // 2. hang leader ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); @@ -1555,63 +1152,39 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复 old leader + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + // Verification of data written before read + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 4. 恢复follower + // 4. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 0); } /** - * 验证3个节点的复制组,并hang 2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang两个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of 3 nodes and hang 2 followers + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang two followers + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1621,19 +1194,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1644,89 +1213,54 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. hang 2个follower + // 2. Hang 2 followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers[1])); - // step down之前提交request会超时 - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch ++, - 1); - - // 等待step down之后,读也不可提供服务 + // Submitting a request before the step down will timeout + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, 1); + + // After waiting for the step down, reading is not available for service ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个follower + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); - - // 4. 恢复1个follower + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, 1); + + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); + + // 4. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[1])); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(2 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证3个节点的复制组,并hang 3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang 3个成员 - * 3. 恢复1个成员 - * 4. 恢复1个成员 - * 5. 恢复1个成员 + * Verify the replication group of 3 nodes and hang 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, + * write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Restore 1 member + * 4. Restore 1 member + * 5. Restore 1 member */ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { LogicPoolID logicPoolId = 2; @@ -1736,19 +1270,15 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; peers.push_back(peer1); peers.push_back(peer2); peers.push_back(peer3); - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); cluster.SetElectionTimeoutMs(electionTimeoutMs); cluster.SetsnapshotIntervalS(snapshotIntervalS); @@ -1759,77 +1289,41 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.HangPeer(peer1)); ASSERT_EQ(0, cluster.HangPeer(peer2)); ASSERT_EQ(0, cluster.HangPeer(peer3)); - WriteVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - - // 3. 恢复1个成员 + WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); + + // 3. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer1)); ::usleep(1000 * electionTimeoutMs * 2); - ReadVerifyNotAvailable(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, - 1); - + ReadVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch - 1, 1); - // 4. 恢复1个成员 + // 4. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer2)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - ReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch - 1, + ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch - 1, loop); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); - // 5. 再恢复1个成员 + // 5. Restore 1 more member ASSERT_EQ(0, cluster.SignalPeer(peer3)); - WriteThenReadVerify(leaderPeer, - logicPoolId, - copysetId, - chunkId, - length, - ch++, - loop); + WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, + ch++, loop); ::usleep(1.6 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 1); diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp index 49191fdd40..af6be699fd 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp @@ -20,11 +20,11 @@ * Author: xuchaojie */ +#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" + #include #include -#include "test/integration/snapshotcloneserver/fake_curvefs_client.h" - namespace curve { namespace snapshotcloneserver { @@ -36,9 +36,8 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t fileLength = 64ULL * 1024 * 1024; - -int FakeCurveFsClient::Init(const CurveClientOptions &options) { - // 初始化一个文件用打快照和克隆 +int FakeCurveFsClient::Init(const CurveClientOptions& options) { + // Initialize a file for snapshot and cloning FInfo fileInfo; fileInfo.id = 100; fileInfo.parentid = 3; @@ -59,15 +58,13 @@ int FakeCurveFsClient::Init(const CurveClientOptions &options) { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::UnInit() { - return LIBCURVE_ERROR::OK; -} +int FakeCurveFsClient::UnInit() { return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) { +int FakeCurveFsClient::CreateSnapshot(const std::string& filename, + const std::string& user, uint64_t* seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { @@ -77,8 +74,8 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, snapInfo.filetype = FileType::INODE_SNAPSHOT_PAGEFILE; snapInfo.id = fileId_++; snapInfo.parentid = it->second.id; - snapInfo.filename = (it->second.filename + "-" - + std::to_string(it->second.seqnum)); + snapInfo.filename = + (it->second.filename + "-" + std::to_string(it->second.seqnum)); snapInfo.filestatus = FileStatus::Created; it->second.seqnum++; @@ -89,11 +86,11 @@ int FakeCurveFsClient::CreateSnapshot(const std::string &filename, } } -int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) { +int FakeCurveFsClient::DeleteSnapshot(const std::string& filename, + const std::string& user, uint64_t seq) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileSnapInfoMap_.find(filename); if (it != fileSnapInfoMap_.end()) { fileSnapInfoMap_.erase(it); @@ -102,12 +99,12 @@ int FakeCurveFsClient::DeleteSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, +int FakeCurveFsClient::GetSnapshot(const std::string& filename, + const std::string& user, uint64_t seq, FInfo* snapInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileSnapInfoMap_.find(filename) != fileSnapInfoMap_.end()) { *snapInfo = fileSnapInfoMap_[filename]; return LIBCURVE_ERROR::OK; @@ -115,17 +112,18 @@ int FakeCurveFsClient::GetSnapshot(const std::string &filename, return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, + uint64_t seq, uint64_t offset, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -134,50 +132,47 @@ int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) { +int FakeCurveFsClient::ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, + uint64_t offset, uint64_t len, + char* buf, SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", + -LIBCURVE_ERROR::FAILED); // NOLINT memset(buf, 'x', len); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) { + std::string user, uint64_t seq, + FileStatus* filestatus) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) { +int FakeCurveFsClient::GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT chunkInfo->chunkSn.push_back(1); return LIBCURVE_ERROR::OK; } int FakeCurveFsClient::CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, + const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, uint32_t chunkSize, + uint64_t stripeUnit, uint64_t stripeCount, const std::string& poolset, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT fileInfo->id = fileId_++; fileInfo->parentid = 2; @@ -202,37 +197,37 @@ int FakeCurveFsClient::CreateCloneFile( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc) { +int FakeCurveFsClient::CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, + uint64_t sn, uint64_t csn, + uint64_t chunkSize, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) { +int FakeCurveFsClient::RecoverChunk(const ChunkIDInfo& chunkidinfo, + uint64_t offset, uint64_t len, + SnapCloneClosure* scc) { scc->SetRetCode(LIBCURVE_ERROR::OK); scc->Run(); fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", + -LIBCURVE_ERROR::FAILED); // NOLINT return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::CompleteCloneMeta( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneMeta(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::CloneMetaInstalled; @@ -242,11 +237,12 @@ int FakeCurveFsClient::CompleteCloneMeta( } } -int FakeCurveFsClient::CompleteCloneFile( - const std::string &filename, - const std::string &user) { +int FakeCurveFsClient::CompleteCloneFile(const std::string& filename, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = FileStatus::Cloned; @@ -256,12 +252,13 @@ int FakeCurveFsClient::CompleteCloneFile( } } -int FakeCurveFsClient::SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) { +int FakeCurveFsClient::SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.SetCloneFileStatus", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.SetCloneFileStatus", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.filestatus = filestatus; @@ -271,12 +268,11 @@ int FakeCurveFsClient::SetCloneFileStatus( } } -int FakeCurveFsClient::GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) { +int FakeCurveFsClient::GetFileInfo(const std::string& filename, + const std::string& user, FInfo* fileInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT if (fileMap_.find(filename) != fileMap_.end()) { *fileInfo = fileMap_[filename]; return LIBCURVE_ERROR::OK; @@ -284,18 +280,18 @@ int FakeCurveFsClient::GetFileInfo( return -LIBCURVE_ERROR::NOTEXIST; } -int FakeCurveFsClient::GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) { +int FakeCurveFsClient::GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, + const std::string& user, + SegmentInfo* segInfo) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", + -LIBCURVE_ERROR::FAILED); // NOLINT segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -304,16 +300,16 @@ int FakeCurveFsClient::GetOrAllocateSegmentInfo( return LIBCURVE_ERROR::OK; } -int FakeCurveFsClient::RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) { - LOG(INFO) << "RenameCloneFile from " << origin - << " to " << destination; +int FakeCurveFsClient::RenameCloneFile(const std::string& user, + uint64_t originId, + uint64_t destinationId, + const std::string& origin, + const std::string& destination) { + LOG(INFO) << "RenameCloneFile from " << origin << " to " << destination; fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(origin); if (it != fileMap_.end()) { it->second.parentid = 3; @@ -326,10 +322,8 @@ int FakeCurveFsClient::RenameCloneFile( } } -int FakeCurveFsClient::DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) { +int FakeCurveFsClient::DeleteFile(const std::string& fileName, + const std::string& user, uint64_t fileId) { auto it = fileMap_.find(fileName); if (it != fileMap_.end()) { fileMap_.erase(it); @@ -340,14 +334,15 @@ int FakeCurveFsClient::DeleteFile( } int FakeCurveFsClient::Mkdir(const std::string& dirpath, - const std::string &user) { + const std::string& user) { return -LIBCURVE_ERROR::EXISTS; } int FakeCurveFsClient::ChangeOwner(const std::string& filename, const std::string& newOwner) { fiu_return_on( - "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", -LIBCURVE_ERROR::FAILED); // NOLINT + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", + -LIBCURVE_ERROR::FAILED); // NOLINT auto it = fileMap_.find(filename); if (it != fileMap_.end()) { it->second.owner = newOwner; @@ -358,7 +353,7 @@ int FakeCurveFsClient::ChangeOwner(const std::string& filename, } bool FakeCurveFsClient::JudgeCloneDirHasFile() { - for (auto &f : fileMap_) { + for (auto& f : fileMap_) { if (2 == f.second.parentid) { LOG(INFO) << "Clone dir has file, fileinfo is :" << " id = " << f.second.id diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.h b/test/integration/snapshotcloneserver/fake_curvefs_client.h index 0f3a0a6107..c93d76daa4 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.h +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.h @@ -23,15 +23,13 @@ #ifndef TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ #define TEST_INTEGRATION_SNAPSHOTCLONESERVER_FAKE_CURVEFS_CLIENT_H_ -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" - using ::curve::client::UserInfo_t; - namespace curve { namespace snapshotcloneserver { @@ -43,122 +41,84 @@ extern const char* testFile1; class FakeCurveFsClient : public CurveFsClient { public: - FakeCurveFsClient() : - fileId_(101) {} + FakeCurveFsClient() : fileId_(101) {} virtual ~FakeCurveFsClient() {} - int Init(const CurveClientOptions &options) override; + int Init(const CurveClientOptions& options) override; int UnInit() override; - int CreateSnapshot(const std::string &filename, - const std::string &user, - uint64_t *seq) override; - - int DeleteSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq) override; - - int GetSnapshot(const std::string &filename, - const std::string &user, - uint64_t seq, - FInfo* snapInfo) override; - - int GetSnapshotSegmentInfo(const std::string &filename, - const std::string &user, - uint64_t seq, - uint64_t offset, - SegmentInfo *segInfo) override; - - int ReadChunkSnapshot(ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure *scc) override; - - int CheckSnapShotStatus(std::string filename, - std::string user, - uint64_t seq, - FileStatus* filestatus) override; - - int GetChunkInfo(const ChunkIDInfo &cidinfo, - ChunkInfoDetail *chunkInfo) override; - - int CreateCloneFile( - const std::string &source, - const std::string &filename, - const std::string &user, - uint64_t size, - uint64_t sn, - uint32_t chunkSize, - uint64_t stripeUnit, - uint64_t stripeCount, - const std::string& poolset, - FInfo* fileInfo) override; - - int CreateCloneChunk( - const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure *scc) override; - - int RecoverChunk( - const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure *scc) override; - - int CompleteCloneMeta( - const std::string &filename, - const std::string &user) override; - - int CompleteCloneFile( - const std::string &filename, - const std::string &user) override; - - int SetCloneFileStatus( - const std::string &filename, - const FileStatus& filestatus, - const std::string &user) override; - - int GetFileInfo( - const std::string &filename, - const std::string &user, - FInfo* fileInfo) override; - - int GetOrAllocateSegmentInfo( - bool allocate, - uint64_t offset, - FInfo* fileInfo, - const std::string &user, - SegmentInfo *segInfo) override; - - int RenameCloneFile( - const std::string &user, - uint64_t originId, - uint64_t destinationId, - const std::string &origin, - const std::string &destination) override; - - int DeleteFile( - const std::string &fileName, - const std::string &user, - uint64_t fileId) override; - - int Mkdir(const std::string& dirpath, - const std::string &user) override; + int CreateSnapshot(const std::string& filename, const std::string& user, + uint64_t* seq) override; + + int DeleteSnapshot(const std::string& filename, const std::string& user, + uint64_t seq) override; + + int GetSnapshot(const std::string& filename, const std::string& user, + uint64_t seq, FInfo* snapInfo) override; + + int GetSnapshotSegmentInfo(const std::string& filename, + const std::string& user, uint64_t seq, + uint64_t offset, SegmentInfo* segInfo) override; + + int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, + SnapCloneClosure* scc) override; + + int CheckSnapShotStatus(std::string filename, std::string user, + uint64_t seq, FileStatus* filestatus) override; + + int GetChunkInfo(const ChunkIDInfo& cidinfo, + ChunkInfoDetail* chunkInfo) override; + + int CreateCloneFile(const std::string& source, const std::string& filename, + const std::string& user, uint64_t size, uint64_t sn, + uint32_t chunkSize, uint64_t stripeUnit, + uint64_t stripeCount, const std::string& poolset, + FInfo* fileInfo) override; + + int CreateCloneChunk(const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, + uint64_t csn, uint64_t chunkSize, + SnapCloneClosure* scc) override; + + int RecoverChunk(const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) override; + + int CompleteCloneMeta(const std::string& filename, + const std::string& user) override; + + int CompleteCloneFile(const std::string& filename, + const std::string& user) override; + + int SetCloneFileStatus(const std::string& filename, + const FileStatus& filestatus, + const std::string& user) override; + + int GetFileInfo(const std::string& filename, const std::string& user, + FInfo* fileInfo) override; + + int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, + FInfo* fileInfo, const std::string& user, + SegmentInfo* segInfo) override; + + int RenameCloneFile(const std::string& user, uint64_t originId, + uint64_t destinationId, const std::string& origin, + const std::string& destination) override; + + int DeleteFile(const std::string& fileName, const std::string& user, + uint64_t fileId) override; + + int Mkdir(const std::string& dirpath, const std::string& user) override; int ChangeOwner(const std::string& filename, const std::string& newOwner) override; /** - * @brief 判断/clone目录下是否存在临时文件 + * @brief Check if there are temporary files under the /clone directory. * - * @retval true 存在 - * @retval false 不存在 + * @retval true If they exist. + * @retval false If they do not exist. */ bool JudgeCloneDirHasFile(); @@ -169,11 +129,11 @@ class FakeCurveFsClient : public CurveFsClient { // fileName -> snapshot fileInfo std::map fileSnapInfoMap_; - // inodeid 从101开始,100以内预留 - // 快照所属文件Id一律为100, parentid = 99 - // "/" 目录的Id为1 - // "/clone" 目录的Id为2 - // "/user1" 目录的Id为3 + // Inode IDs start from 101, with numbers under 100 reserved. + // Snapshot file IDs are always 100, with a parentid = 99. + // The ID for the "/" directory is 1. + // The ID for the "/clone" directory is 2. + // The ID for the "/user1" directory is 3. std::atomic fileId_; }; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 8f312b9a88..1ff6116653 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -20,72 +20,71 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; -const std::string kTestPrefix = "SCSTest"; // NOLINT +const std::string kTestPrefix = "SCSTest"; // NOLINT const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkGap = 1; -const char* kEtcdClientIpPort = "127.0.0.1:10001"; -const char* kEtcdPeerIpPort = "127.0.0.1:10002"; -const char* kMdsIpPort = "127.0.0.1:10003"; -const char* kChunkServerIpPort1 = "127.0.0.1:10004"; -const char* kChunkServerIpPort2 = "127.0.0.1:10005"; -const char* kChunkServerIpPort3 = "127.0.0.1:10006"; -const char* kSnapshotCloneServerIpPort = "127.0.0.1:10007"; +const char *kEtcdClientIpPort = "127.0.0.1:10001"; +const char *kEtcdPeerIpPort = "127.0.0.1:10002"; +const char *kMdsIpPort = "127.0.0.1:10003"; +const char *kChunkServerIpPort1 = "127.0.0.1:10004"; +const char *kChunkServerIpPort2 = "127.0.0.1:10005"; +const char *kChunkServerIpPort3 = "127.0.0.1:10006"; +const char *kSnapshotCloneServerIpPort = "127.0.0.1:10007"; const int kMdsDummyPort = 10008; -const char* kSnapshotCloneServerDummyServerPort = "12000"; -const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock3"; +const char *kSnapshotCloneServerDummyServerPort = "12000"; +const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock3"; -static const char* kDefaultPoolset = "default"; +static const char *kDefaultPoolset = "default"; -const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT -const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT -const std::string kMdsConfigPath = // NOLINT +const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT +const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT +const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -99,11 +98,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -128,66 +127,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -212,7 +208,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -221,903 +217,954 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char* testFile1_ = "/ItUser1/file1"; -const char* testFile2_ = "/ItUser1/file2"; -const char* testFile3_ = "/ItUser2/file3"; -const char* testFile4_ = "/ItUser1/file3"; -const char* testFile5_ = "/ItUser1/file4"; -const char* testUser1_ = "ItUser1"; -const char* testUser2_ = "ItUser2"; - -namespace curve { -namespace snapshotcloneserver { - -class SnapshotCloneServerTest : public ::testing::Test { - public: - static void SetUpTestCase() { - std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; - system(mkLogDirCmd.c_str()); - - cluster_ = new CurveCluster(); - ASSERT_NE(nullptr, cluster_); - - // 初始化db - system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); - system(std::string("rm -rf " + kTestPrefix + "1").c_str()); - system(std::string("rm -rf " + kTestPrefix + "2").c_str()); - system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - - // 启动etcd - pid_t pid = cluster_->StartSingleEtcd( - 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); - LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort - << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->InitSnapshotCloneMetaStoreEtcd(kEtcdClientIpPort); - - cluster_->PrepareConfig(kMdsConfigPath, - mdsConfigOptions); - - // 启动一个mds - pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, - true); - LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - // 创建物理池 - ASSERT_EQ(0, cluster_->PreparePhysicalPool( - 1, - "./test/integration/snapshotcloneserver/" - "config/topo.json")); // NOLINT - - // format chunkfilepool and walfilepool - std::vector threadpool(3); - - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); - for (int i = 0; i < 3; i++) { - threadpool[i].join(); +const char *testFile1_ = "/ItUser1/file1"; +const char *testFile2_ = "/ItUser1/file2"; +const char *testFile3_ = "/ItUser2/file3"; +const char *testFile4_ = "/ItUser1/file3"; +const char *testFile5_ = "/ItUser1/file4"; +const char *testUser1_ = "ItUser1"; +const char *testUser2_ = "ItUser2"; + +namespace curve +{ + namespace snapshotcloneserver + { + + class SnapshotCloneServerTest : public ::testing::Test + { + public: + static void SetUpTestCase() + { + std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; + system(mkLogDirCmd.c_str()); + + cluster_ = new CurveCluster(); + ASSERT_NE(nullptr, cluster_); + + // Initialize db + system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); + system(std::string("rm -rf " + kTestPrefix + "1").c_str()); + system(std::string("rm -rf " + kTestPrefix + "2").c_str()); + system(std::string("rm -rf " + kTestPrefix + "3").c_str()); + + // Start etcd + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=" + kTestPrefix}); + LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort + << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->InitSnapshotCloneMetaStoreEtcd(kEtcdClientIpPort); + + cluster_->PrepareConfig(kMdsConfigPath, + mdsConfigOptions); + + // Start an mds + pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, + true); + LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + // Creating a physical pool + ASSERT_EQ(0, cluster_->PreparePhysicalPool( + 1, + "./test/integration/snapshotcloneserver/" + "config/topo.json")); // NOLINT + + // format chunkfilepool and walfilepool + std::vector threadpool(3); + + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); + for (int i = 0; i < 3; i++) + { + threadpool[i].join(); + } + + cluster_->PrepareConfig(kCsClientConfigPath, + csClientConfigOptions); + + cluster_->PrepareConfig(kS3ConfigPath, + s3ConfigOptions); + + cluster_->PrepareConfig(kCSConfigPath, + chunkserverConfigOptions); + + // Create chunkserver + pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, + chunkserverConf1); + LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + pid = cluster_->StartSingleChunkServer(2, kChunkServerIpPort2, + chunkserverConf2); + LOG(INFO) << "chunkserver 2 started on " << kChunkServerIpPort2 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + pid = cluster_->StartSingleChunkServer(3, kChunkServerIpPort3, + chunkserverConf3); + LOG(INFO) << "chunkserver 3 started on " << kChunkServerIpPort3 + << ", pid = " << pid; + ASSERT_GT(pid, 0); + + std::this_thread::sleep_for(std::chrono::seconds(5)); + + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ(0, cluster_->PrepareLogicalPool( + 1, + "./test/integration/snapshotcloneserver/config/" + "topo.json")); + + cluster_->PrepareConfig( + kSnapClientConfigPath, snapClientConfigOptions); + + cluster_->PrepareConfig( + kSCSConfigPath, snapshotcloneserverConfigOptions); + + pid = cluster_->StartSnapshotCloneServer(1, kSnapshotCloneServerIpPort, + snapshotcloneConf); + LOG(INFO) << "SnapshotCloneServer 1 started on " + << kSnapshotCloneServerIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kClientConfigPath, + clientConfigOptions); + + fileClient_ = new FileClient(); + fileClient_->Init(kClientConfigPath); + + UserInfo_t userinfo; + userinfo.owner = "ItUser1"; + + ASSERT_EQ(0, fileClient_->Mkdir("/ItUser1", userinfo)); + + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CreateAndWriteFile(testFile1_, testUser1_, fakeData)); + LOG(INFO) << "Write testFile1_ success."; + + ASSERT_TRUE(CreateAndWriteFile(testFile2_, testUser1_, fakeData)); + LOG(INFO) << "Write testFile2_ success."; + + UserInfo_t userinfo2; + userinfo2.owner = "ItUser2"; + ASSERT_EQ(0, fileClient_->Mkdir("/ItUser2", userinfo2)); + + ASSERT_TRUE(CreateAndWriteFile(testFile3_, testUser2_, fakeData)); + LOG(INFO) << "Write testFile3_ success."; + + ASSERT_EQ(0, fileClient_->Create(testFile4_, userinfo, + 10ULL * 1024 * 1024 * 1024)); + + ASSERT_EQ(0, fileClient_->Create(testFile5_, userinfo, + 10ULL * 1024 * 1024 * 1024)); + } + + static bool CreateAndWriteFile(const std::string &fileName, + const std::string &user, + const std::string &dataSample) + { + UserInfo_t userinfo; + userinfo.owner = user; + int ret = + fileClient_->Create(fileName, userinfo, 10ULL * 1024 * 1024 * 1024); + if (ret < 0) + { + LOG(ERROR) << "Create fail, ret = " << ret; + return false; + } + return WriteFile(fileName, user, dataSample); + } + + static bool WriteFile(const std::string &fileName, const std::string &user, + const std::string &dataSample) + { + int ret = 0; + UserInfo_t userinfo; + userinfo.owner = user; + int testfd1_ = fileClient_->Open(fileName, userinfo); + if (testfd1_ < 0) + { + LOG(ERROR) << "Open fail, ret = " << testfd1_; + return false; + } + // Write the first 4k data and two segments for each chunk + uint64_t totalChunk = 2ULL * segmentSize / chunkSize; + for (uint64_t i = 0; i < totalChunk / chunkGap; i++) + { + ret = + fileClient_->Write(testfd1_, dataSample.c_str(), + i * chunkSize * chunkGap, dataSample.size()); + if (ret < 0) + { + LOG(ERROR) << "Write Fail, ret = " << ret; + return false; + } + } + ret = fileClient_->Close(testfd1_); + if (ret < 0) + { + LOG(ERROR) << "Close fail, ret = " << ret; + return false; + } + return true; + } + + static bool CheckFileData(const std::string &fileName, + const std::string &user, + const std::string &dataSample) + { + UserInfo_t userinfo; + userinfo.owner = user; + int dstFd = fileClient_->Open(fileName, userinfo); + if (dstFd < 0) + { + LOG(ERROR) << "Open fail, ret = " << dstFd; + return false; + } + + int ret = 0; + uint64_t totalChunk = 2ULL * segmentSize / chunkSize; + for (uint64_t i = 0; i < totalChunk / chunkGap; i++) + { + char buf[4096]; + ret = fileClient_->Read(dstFd, buf, i * chunkSize * chunkGap, 4096); + if (ret < 0) + { + LOG(ERROR) << "Read fail, ret = " << ret; + return false; + } + std::string data(buf, 4096); + if (data != dataSample) + { + LOG(ERROR) << "CheckFileData not Equal, data = [" << data + << "] , expect data = [" << dataSample << "]."; + return false; + } + } + ret = fileClient_->Close(dstFd); + if (ret < 0) + { + LOG(ERROR) << "Close fail, ret = " << ret; + return false; + } + return true; + } + + static void TearDownTestCase() + { + fileClient_->UnInit(); + delete fileClient_; + fileClient_ = nullptr; + ASSERT_EQ(0, cluster_->StopCluster()); + delete cluster_; + cluster_ = nullptr; + system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); + system(std::string("rm -rf " + kTestPrefix + "1").c_str()); + system(std::string("rm -rf " + kTestPrefix + "2").c_str()); + system(std::string("rm -rf " + kTestPrefix + "3").c_str()); + } + + void SetUp() {} + + void TearDown() {} + + void PrepareSnapshotForTestFile1(std::string *uuid1) + { + if (!hasSnapshotForTestFile1_) + { + int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); + ASSERT_EQ(0, ret); + bool success1 = + CheckSnapshotSuccess(testUser1_, testFile1_, *uuid1); + ASSERT_TRUE(success1); + hasSnapshotForTestFile1_ = true; + snapIdForTestFile1_ = *uuid1; + } + } + + void WaitDeleteSnapshotForTestFile1() + { + if (hasSnapshotForTestFile1_) + { + ASSERT_EQ(0, DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, + snapIdForTestFile1_)); + } + } + + static CurveCluster *cluster_; + static FileClient *fileClient_; + + bool hasSnapshotForTestFile1_ = false; + std::string snapIdForTestFile1_; + }; + + CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; + FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; + + // Regular test cases + // Scenario 1: Adding, deleting, and searching snapshots + TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) + { + std::string uuid1; + int ret = 0; + // Step1: User testUser1_ Take a snapshot of non-existent files + // Expected 1: Return file does not exist + ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: User testUser2_ For testFile1_ Take a snapshot + // Expected 2: Failed to return user authentication + ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. + // Expected 3: Successful snapshot taking + ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); + ASSERT_EQ(0, ret); + + std::string fakeData(4096, 'y'); + ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 4: Return information for snapshot snap1 + bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_TRUE(success1); + + // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ + // Expected 5: User authentication failure returned + FileSnapshotInfo info1; + ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ + // Expected 6: Return null + std::vector infoVec; + ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step7: testUser2_ Delete snapshot snap1 + // Expected 7: User authentication failure returned + ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for + // Expected 8: Return file name mismatch + ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); + ASSERT_EQ(kErrCodeFileNameNotMatch, ret); + + // Step9: testUser1_ Delete snapshot snap1 + // Expected 9: Successful deletion returned + ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, ret); + + // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 10: Return empty + ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) + // Expected 11: Successful deletion returned + ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, ret); + + // Restore testFile1_ + std::string fakeData2(4096, 'x'); + ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); } - cluster_->PrepareConfig(kCsClientConfigPath, - csClientConfigOptions); - - cluster_->PrepareConfig(kS3ConfigPath, - s3ConfigOptions); - - cluster_->PrepareConfig(kCSConfigPath, - chunkserverConfigOptions); - - // 创建chunkserver - pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, - chunkserverConf1); - LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - pid = cluster_->StartSingleChunkServer(2, kChunkServerIpPort2, - chunkserverConf2); - LOG(INFO) << "chunkserver 2 started on " << kChunkServerIpPort2 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - pid = cluster_->StartSingleChunkServer(3, kChunkServerIpPort3, - chunkserverConf3); - LOG(INFO) << "chunkserver 3 started on " << kChunkServerIpPort3 - << ", pid = " << pid; - ASSERT_GT(pid, 0); - - std::this_thread::sleep_for(std::chrono::seconds(5)); - - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster_->PrepareLogicalPool( - 1, - "./test/integration/snapshotcloneserver/config/" - "topo.json")); - - cluster_->PrepareConfig( - kSnapClientConfigPath, snapClientConfigOptions); - - cluster_->PrepareConfig( - kSCSConfigPath, snapshotcloneserverConfigOptions); - - pid = cluster_->StartSnapshotCloneServer(1, kSnapshotCloneServerIpPort, - snapshotcloneConf); - LOG(INFO) << "SnapshotCloneServer 1 started on " - << kSnapshotCloneServerIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kClientConfigPath, - clientConfigOptions); - - fileClient_ = new FileClient(); - fileClient_->Init(kClientConfigPath); - - UserInfo_t userinfo; - userinfo.owner = "ItUser1"; - - ASSERT_EQ(0, fileClient_->Mkdir("/ItUser1", userinfo)); - - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CreateAndWriteFile(testFile1_, testUser1_, fakeData)); - LOG(INFO) << "Write testFile1_ success."; - - ASSERT_TRUE(CreateAndWriteFile(testFile2_, testUser1_, fakeData)); - LOG(INFO) << "Write testFile2_ success."; - - UserInfo_t userinfo2; - userinfo2.owner = "ItUser2"; - ASSERT_EQ(0, fileClient_->Mkdir("/ItUser2", userinfo2)); - - ASSERT_TRUE(CreateAndWriteFile(testFile3_, testUser2_, fakeData)); - LOG(INFO) << "Write testFile3_ success."; - - ASSERT_EQ(0, fileClient_->Create(testFile4_, userinfo, - 10ULL * 1024 * 1024 * 1024)); - - ASSERT_EQ(0, fileClient_->Create(testFile5_, userinfo, - 10ULL * 1024 * 1024 * 1024)); - } - - static bool CreateAndWriteFile(const std::string& fileName, - const std::string& user, - const std::string& dataSample) { - UserInfo_t userinfo; - userinfo.owner = user; - int ret = - fileClient_->Create(fileName, userinfo, 10ULL * 1024 * 1024 * 1024); - if (ret < 0) { - LOG(ERROR) << "Create fail, ret = " << ret; - return false; + // Scenario 2: Cancel Snapshot + TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) + { + std::string uuid1; + int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = false; + bool isCancel = false; + for (int i = 0; i < 600; i++) + { + FileSnapshotInfo info1; + int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid1, &info1); + if (retCode == 0) + { + if (info1.GetSnapshotInfo().GetStatus() == Status::pending || + info1.GetSnapshotInfo().GetStatus() == Status::canceling) + { + if (!isCancel) + { + // Step1: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser2_ before the snapshot is completed_ + // Cancel testFile1_ Snap1 of snapshot + // Expected 1: Failed to cancel user authentication + int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeInvalidUser, retCode); + + // Step2: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile1_ A non-existent snapshot of + // Expected 2: Return kErrCodeCannotCancelFinished + retCode = + CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); + ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); + + // Step3: User testUser1_ For testFile1_ Take a snapshot + // snap1, + // testUser1_ before the snapshot is completed_ + // Cancel testFile2_ Snap1 of snapshot + // Expected 3: Return file name mismatch + retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); + ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); + + // Step4: User testUser1_ For testFile1_ Take a snapshot, + // testUser1_ before the snapshot is completed_ + // Cancel snapshot snap1 + // Expected 4: Successfully cancelled snapshot + retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); + ASSERT_EQ(0, retCode); + isCancel = true; + } + std::this_thread::sleep_for(std::chrono::milliseconds(3000)); + continue; + } + else if (info1.GetSnapshotInfo().GetStatus() == Status::done) + { + success1 = false; + break; + } + else + { + FAIL() << "Snapshot Fail On status = " + << static_cast(info1.GetSnapshotInfo().GetStatus()); + } + } + else if (retCode == -8) + { + // Step5: Obtain snapshot information, user=testUser1_, + // filename=testFile1_ Expected 5: Return empty + success1 = true; + break; + } + } + ASSERT_TRUE(success1); + + // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ + // Snap1 of snapshot Expected 6: Returning a pending snapshot that does not + // exist or has been completed + ret = CancelSnapshot(testUser1_, testFile1_, uuid1); + ASSERT_EQ(kErrCodeCannotCancelFinished, ret); } - return WriteFile(fileName, user, dataSample); - } - - static bool WriteFile(const std::string& fileName, const std::string& user, - const std::string& dataSample) { - int ret = 0; - UserInfo_t userinfo; - userinfo.owner = user; - int testfd1_ = fileClient_->Open(fileName, userinfo); - if (testfd1_ < 0) { - LOG(ERROR) << "Open fail, ret = " << testfd1_; - return false; + + // Scenario 3: Lazy snapshot clone scene + TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1, uuid2, uuid3, uuid4, uuid5; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", + "/ItUser1/SnapLazyClone1", true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", + true, &uuid2); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 3 to return successful cloning + std::string dstFile = "/ItUser1/SnapLazyClone1"; + ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); + ASSERT_EQ(0, ret); + + // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 + // (duplicate clone) Expected 4: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", + true, &uuid4); + ASSERT_EQ(0, ret); + + // Flatten + ret = Flatten(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Step5: testUser1_ GetCloneTask + // Expected 5: Return clone task for SnapLazyClone1 + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); + ASSERT_TRUE(success1); + + // Step6: testUser2_ GetCloneTask + // Expected 6: Return null + std::vector infoVec; + ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); + ASSERT_EQ(0, ret); + ASSERT_EQ(0, infoVec.size()); + + // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 7: User authentication failure returned + ret = CleanCloneTask(testUser2_, uuid3); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 8: Return execution successful + ret = CleanCloneTask(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Waiting for cleaning to complete + std::this_thread::sleep_for(std::chrono::seconds(3)); + + // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // (repeated execution) Expected 9: Return execution successful + ret = CleanCloneTask(testUser1_, uuid3); + ASSERT_EQ(0, ret); + + // Step10: testUser1_ GetCloneTask + // Expected 10: Return empty + TaskCloneInfo info; + ret = GetCloneTaskInfo(testUser1_, uuid3, &info); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - // 每个chunk写前面4k数据, 写两个segment - uint64_t totalChunk = 2ULL * segmentSize / chunkSize; - for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { - ret = - fileClient_->Write(testfd1_, dataSample.c_str(), - i * chunkSize * chunkGap, dataSample.size()); - if (ret < 0) { - LOG(ERROR) << "Write Fail, ret = " << ret; - return false; - } + + // Scenario 4: Non lazy snapshot clone scenario + TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ A snapshot with a clone that does not exist, + // fileName=SnapNotLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", + "/ItUser1/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Clone", testUser2_, snapId, + "/ItUser2/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 3 to return successful cloning + std::string dstFile = "/ItUser1/SnapNotLazyClone1"; + ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); + ASSERT_TRUE(success1); + + // Step4: testUser1_ Clone block photo snap1, + // fileName=SnapNotLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser1/SnapNotLazyClone1", false, &uuid1); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - ret = fileClient_->Close(testfd1_); - if (ret < 0) { - LOG(ERROR) << "Close fail, ret = " << ret; - return false; + + // Scenario 5: Lazy snapshot recovery scenario + TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, + true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned + ret = + CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success + ret = + CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); + ASSERT_EQ(0, ret); + + // Flatten + ret = Flatten(testUser1_, uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); + ASSERT_TRUE(success1); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist + ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", + true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); } - return true; - } - - static bool CheckFileData(const std::string& fileName, - const std::string& user, - const std::string& dataSample) { - UserInfo_t userinfo; - userinfo.owner = user; - int dstFd = fileClient_->Open(fileName, userinfo); - if (dstFd < 0) { - LOG(ERROR) << "Open fail, ret = " << dstFd; - return false; + + // Scenario 6: Non lazy snapshot recovery scenario + TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) + { + std::string snapId; + PrepareSnapshotForTestFile1(&snapId); + + // Step1: testUser1_ Recover snapshot that does not exist, + // fileName=testFile1_ Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, + false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned + ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, + &uuid1); + ASSERT_EQ(kErrCodeInvalidUser, ret); + + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success + ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, + &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); + ASSERT_TRUE(success1); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); + + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent + // file Expected 4: Return target file does not exist + ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", + false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); } - int ret = 0; - uint64_t totalChunk = 2ULL * segmentSize / chunkSize; - for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { - char buf[4096]; - ret = fileClient_->Read(dstFd, buf, i * chunkSize * chunkGap, 4096); - if (ret < 0) { - LOG(ERROR) << "Read fail, ret = " << ret; - return false; - } - std::string data(buf, 4096); - if (data != dataSample) { - LOG(ERROR) << "CheckFileData not Equal, data = [" << data - << "] , expect data = [" << dataSample << "]."; - return false; - } + // Scenario 7: Lazy Mirror Clone Scene + TEST_F(SnapshotCloneServerTest, TestImageLazyClone) + { + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageLazyClone1 Expected 1: Return file does not exist + std::string uuid1, uuid2, uuid3, uuid4; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", + "/ItUser1/ImageLazyClone1", true, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 + // Expected 2 to return successful cloning + std::string dstFile = "/ItUser1/ImageLazyClone1"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); + ASSERT_EQ(0, ret); + + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, testFile1_, + "/ItUser1/ImageLazyClone1", true, &uuid3); + ASSERT_EQ(0, ret); + + // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not + // completed the lazy clone Expected 4: Abnormal file status returned + ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); + ASSERT_EQ(kErrCodeFileStatusInvalid, ret); + FileSnapshotInfo info2; + int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid4, &info2); + ASSERT_EQ(kErrCodeFileNotExist, retCode); + + ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); + + // Verify data correctness before Flatten + std::string fakeData1(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); + + // Flatten + ret = Flatten(testUser1_, uuid2); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); + ASSERT_TRUE(success1); + + // Verify data correctness after Flatten + std::string fakeData2(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); } - ret = fileClient_->Close(dstFd); - if (ret < 0) { - LOG(ERROR) << "Close fail, ret = " << ret; - return false; + + // Scenario 8: Non Lazy Mirror Clone Scene + TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) + { + // Step1: testUser1_ Clone does not exist in an image, + // fileName=ImageNotLazyClone1 Expected 1: Return snapshot does not exist + std::string uuid1; + int ret; + ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", + "/ItUser1/ImageNotLazyClone1", false, &uuid1); + ASSERT_EQ(kErrCodeFileNotExist, ret); + + // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 + // Expected 2 to return successful cloning + std::string dstFile = "/ItUser1/ImageNotLazyClone1"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); + ASSERT_TRUE(success1); + + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageNotLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) + ret = CloneOrRecover("Clone", testUser1_, testFile1_, + "/ItUser1/ImageNotLazyClone1", false, &uuid1); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - return true; - } - - static void TearDownTestCase() { - fileClient_->UnInit(); - delete fileClient_; - fileClient_ = nullptr; - ASSERT_EQ(0, cluster_->StopCluster()); - delete cluster_; - cluster_ = nullptr; - system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); - system(std::string("rm -rf " + kTestPrefix + "1").c_str()); - system(std::string("rm -rf " + kTestPrefix + "2").c_str()); - system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - } - - void SetUp() {} - - void TearDown() {} - - void PrepareSnapshotForTestFile1(std::string* uuid1) { - if (!hasSnapshotForTestFile1_) { - int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); + + // Scenario 9: The snapshot has a failure scenario + TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) + { + std::string snapId = "errorSnapUuid"; + SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, + 0, 0, kDefaultPoolset, 0, Status::error); + + cluster_->metaStore_->AddSnapshot(snapInfo); + + pid_t pid = cluster_->RestartSnapshotCloneServer(1); + LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; + ASSERT_GT(pid, 0); + std::string uuid1, uuid2; + + // Step1: lazy clone snapshot snap1 + // Expected 1: Exception in returning snapshot + int ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser2/SnapLazyClone1", true, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step2: Non lazy clone snapshot snap1 + // Expected 2: Exception in returning snapshot + ret = CloneOrRecover("Clone", testUser1_, snapId, + "/ItUser2/SnapNotLazyClone1", false, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step3: lazy snap1 recover from snapshot + // Expected 3: Exception in returning snapshot + ret = + CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step4: Snap1 recover from snapshot without lazy + // Expected 4: Exception in returning snapshot + ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, + &uuid2); + ASSERT_EQ(kErrCodeInvalidSnapshot, ret); + + // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 + // Expectation 5: Clean failed snapshot and take snapshot successfully + ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); ASSERT_EQ(0, ret); - bool success1 = - CheckSnapshotSuccess(testUser1_, testFile1_, *uuid1); + + // Successfully verified snapshot + bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); ASSERT_TRUE(success1); - hasSnapshotForTestFile1_ = true; - snapIdForTestFile1_ = *uuid1; + + // Verification cleaning failed, snapshot succeeded + FileSnapshotInfo info1; + int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); + ASSERT_EQ(kErrCodeFileNotExist, retCode); } - } - void WaitDeleteSnapshotForTestFile1() { - if (hasSnapshotForTestFile1_) { - ASSERT_EQ(0, DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, - snapIdForTestFile1_)); + //[Online issue repair] Clone failed, rollback delete clone volume, and create + // the same uuid volume again scenario + TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) + { + std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; + // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID + // Expected 1 to return successful cloning + std::string dstFile = "/ItUser1/CloneHasSameDest"; + int ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo; + userinfo.owner = testUser1_; + int ret2 = fileClient_->Unlink(dstFile, userinfo, false); + ASSERT_EQ(0, ret2); + + // Step2: testUser1_ Clone image testFile1_ again, + // fileName=CloneHasSameDestUUID + // Expected 2 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); + ASSERT_EQ(0, ret); + + // Verify data correctness + std::string fakeData(4096, 'x'); + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 + // Expected 3 to return successful cloning + dstFile = "/ItUser1/CloneHasSameDest2"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo2; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step4: testUser1_ Clone the image testFile2_ again, + // fileName=CloneHasSameDest2 + // Expected 4 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); + ASSERT_EQ(0, ret); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Verify different situations when cloning lazyflag again + // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 + // Expected 5 to return successful cloning + dstFile = "/ItUser1/CloneHasSameDest3"; + ret = + CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); + ASSERT_EQ(0, ret); + + // Delete Clone Volume + UserInfo_t userinfo3; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step6: testUser1_ Non lazy clone image testFile2_ again, + // fileName=CloneHasSameDest3 + // Expected 6 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); + ASSERT_EQ(0, ret); + + bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); + ASSERT_TRUE(success1); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); + + // Delete Clone Volume + UserInfo_t userinfo4; + userinfo2.owner = testUser1_; + ret2 = fileClient_->Unlink(dstFile, userinfo2, false); + ASSERT_EQ(0, ret2); + + // Step7: testUser1_ Non lazy clone image testFile2_ again, + // fileName=CloneHasSameDest3 + // Expected 7 to return successful cloning + ret = + CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); + ASSERT_EQ(0, ret); + + // Verify data correctness + ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } - } - static CurveCluster* cluster_; - static FileClient* fileClient_; + // Lazy clone volume, delete clone volume, and then delete source volume. The + // source volume can be deleted if needed + TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) + { + // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 + // and dstFile2 Expected 1 to return successful cloning + std::string uuid1; + std::string uuid2; + std::string dstFile1 = "/dest1"; + std::string dstFile2 = "/dest2"; + UserInfo_t userinfo; + userinfo.owner = testUser1_; + int ret = + CloneOrRecover("Clone", testUser1_, testFile5_, dstFile1, true, &uuid1); + ASSERT_EQ(0, ret); - bool hasSnapshotForTestFile1_ = false; - std::string snapIdForTestFile1_; -}; + ret = + CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); + ASSERT_EQ(0, ret); + + // Delete source volume, deletion failed, volume occupied + + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(-27, ret); -CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; -FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; - -// 常规测试用例 -// 场景一:快照增加删除查找 -TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { - std::string uuid1; - int ret = 0; - // 操作1:用户testUser1_对不存在的文件打快照 - // 预期1:返回文件不存在 - ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:用户testUser2_对testFile1_打快照 - // 预期2:返回用户认证失败 - ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作3:用户testUser1_对testFile1_打快照snap1。 - // 预期3:打快照成功 - ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); - ASSERT_EQ(0, ret); - - std::string fakeData(4096, 'y'); - ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // 操作4: 获取快照信息,user=testUser1_,filename=testFile1_ - // 预期4:返回快照snap1的信息 - bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_TRUE(success1); - - // 操作5:获取快照信息,user=testUser2_,filename=testFile1_ - // 预期5:返回用户认证失败 - FileSnapshotInfo info1; - ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作6:获取快照信息,user=testUser2_,filename=testFile2_ - // 预期6:返回空 - std::vector infoVec; - ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // 操作7:testUser2_删除快照snap1 - // 预期7:返回用户认证失败 - ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作8:testUser1_删除testFile2_的快照,ID为snap1 - // 预期8:返回文件名不匹配 - ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); - ASSERT_EQ(kErrCodeFileNameNotMatch, ret); - - // 操作9:testUser1_删除快照snap1 - // 预期9:返回删除成功 - ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, ret); - - // 操作10:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期10:返回空 - ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // 操作11:testUser1_删除快照snap1(重复删除) - // 预期11:返回删除成功 - ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, ret); - - // 复原testFile1_ - std::string fakeData2(4096, 'x'); - ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); -} - -// 场景二:取消快照 -TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { - std::string uuid1; - int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = false; - bool isCancel = false; - for (int i = 0; i < 600; i++) { - FileSnapshotInfo info1; - int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid1, &info1); - if (retCode == 0) { - if (info1.GetSnapshotInfo().GetStatus() == Status::pending || - info1.GetSnapshotInfo().GetStatus() == Status::canceling) { - if (!isCancel) { - // 操作1:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser2_取消testFile1_的快照snap1 - // 预期1:取消用户认证失败 - int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeInvalidUser, retCode); - - // 操作2:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile1_ - // 的不存在的快照 - // 预期2:返回kErrCodeCannotCancelFinished - retCode = - CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); - ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); - - // 操作3:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile2_的快照snap1 - // 预期3: 返回文件名不匹配 - retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); - ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); - - // 操作4:用户testUser1_对testFile1_打快照, - // 在快照未完成前testUser1_取消快照snap1 - // 预期4:取消快照成功 - retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); - ASSERT_EQ(0, retCode); - isCancel = true; + // Step2: Successfully delete the destination volume dstFile1, delete the + // source volume again Expected 2 deletion failed, volume occupied + ret = fileClient_->Unlink(dstFile1, userinfo, false); + ASSERT_EQ(0, ret); + + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(-27, ret); + + // Step3: Successfully delete the destination volume dstFile2, delete the + // source volume again Expected 3 deletion successful + ret = fileClient_->Unlink(dstFile2, userinfo, false); + ASSERT_EQ(0, ret); + + ret = fileClient_->Unlink(testFile5_, userinfo, false); + ASSERT_EQ(0, ret); + + // Step4: Wait for a period of time to see if the garbage record can be + // deleted in the background + bool noRecord = false; + for (int i = 0; i < 100; i++) + { + TaskCloneInfo info; + int ret1 = GetCloneTaskInfo(testUser1_, uuid1, &info); + int ret2 = GetCloneTaskInfo(testUser1_, uuid2, &info); + if (ret1 == kErrCodeFileNotExist && ret2 == kErrCodeFileNotExist) + { + noRecord = true; + break; } + std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - continue; - } else if (info1.GetSnapshotInfo().GetStatus() == Status::done) { - success1 = false; - break; - } else { - FAIL() << "Snapshot Fail On status = " - << static_cast(info1.GetSnapshotInfo().GetStatus()); } - } else if (retCode == -8) { - // 操作5:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期5:返回空 - success1 = true; - break; - } - } - ASSERT_TRUE(success1); - - // 操作6: 在快照已完成后,testUser1_取消testFile1_的快照snap1 - // 预期6: 返回待取消的快照不存在或已完成 - ret = CancelSnapshot(testUser1_, testFile1_, uuid1); - ASSERT_EQ(kErrCodeCannotCancelFinished, ret); -} - -// 场景三:lazy快照克隆场景 -TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // 操作1: testUser1_ clone不存在的快照,fileName=SnapLazyClone1 - // 预期1:返回快照不存在 - std::string uuid1, uuid2, uuid3, uuid4, uuid5; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", - "/ItUser1/SnapLazyClone1", true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser2_ clone快照snap1,fileName=SnapLazyClone1 - // 预期2: 返回用户认证失败 - ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", - true, &uuid2); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作3:testUser1_ clone 快照snap1,fileName=SnapLazyClone1 - // 预期3 返回克隆成功 - std::string dstFile = "/ItUser1/SnapLazyClone1"; - ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); - ASSERT_EQ(0, ret); - - // 操作4: testUser1_ clone 块照snap1,fileName=SnapLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) - ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", - true, &uuid4); - ASSERT_EQ(0, ret); - - // Flatten - ret = Flatten(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // 操作5: testUser1_ GetCloneTask - // 预期5:返回SnapLazyClone1的clone 任务 - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); - ASSERT_TRUE(success1); - - // 操作6: testUser2_ GetCloneTask - // 预期6: 返回空 - std::vector infoVec; - ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); - ASSERT_EQ(0, ret); - ASSERT_EQ(0, infoVec.size()); - - // 操作7: testUser2_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期7:返回用户认证失败 - ret = CleanCloneTask(testUser2_, uuid3); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作8: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期8:返回执行成功 - ret = CleanCloneTask(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // 等待清理完成 - std::this_thread::sleep_for(std::chrono::seconds(3)); - - // 操作9: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID(重复执行) - // 预期9:返回执行成功 - ret = CleanCloneTask(testUser1_, uuid3); - ASSERT_EQ(0, ret); - - // 操作10:testUser1_ GetCloneTask - // 预期10:返回空 - TaskCloneInfo info; - ret = GetCloneTaskInfo(testUser1_, uuid3, &info); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// 场景四:非lazy快照克隆场景 -TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // 操作1: testUser1_ clone不存在的快照,fileName=SnapNotLazyClone1 - // 预期1:返回快照不存在 - std::string uuid1; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", - "/ItUser1/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser2_ clone快照snap1,fileName=SnapNotLazyClone1 - // 预期2: 返回用户认证失败 - ret = CloneOrRecover("Clone", testUser2_, snapId, - "/ItUser2/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作3:testUser1_ clone 快照snap1,fileName=SnapNotLazyClone1 - // 预期3 返回克隆成功 - std::string dstFile = "/ItUser1/SnapNotLazyClone1"; - ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); - ASSERT_TRUE(success1); - - // 操作4: testUser1_ clone 块照snap1, - // fileName=SnapNotLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) - ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser1/SnapNotLazyClone1", false, &uuid1); - ASSERT_EQ(0, ret); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// 场景五:lazy快照恢复场景 -TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 - std::string uuid1; - int ret; - ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, - true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 - ret = - CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 - ret = - CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); - ASSERT_EQ(0, ret); - - // Flatten - ret = Flatten(testUser1_, uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); - ASSERT_TRUE(success1); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 - ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", - true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); -} - -// 场景六:非lazy快照恢复场景 -TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { - std::string snapId; - PrepareSnapshotForTestFile1(&snapId); - - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 - std::string uuid1; - int ret; - ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, - false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 - ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, - &uuid1); - ASSERT_EQ(kErrCodeInvalidUser, ret); - - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 - ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, - &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); - ASSERT_TRUE(success1); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 - ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", - false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); -} - -// 场景七: lazy镜像克隆场景 -TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageLazyClone1 - // 预期1:返回文件不存在 - std::string uuid1, uuid2, uuid3, uuid4; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", - "/ItUser1/ImageLazyClone1", true, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageLazyClone1 - // 预期2 返回克隆成功 - std::string dstFile = "/ItUser1/ImageLazyClone1"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); - ASSERT_EQ(0, ret); - - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) - ret = CloneOrRecover("Clone", testUser1_, testFile1_, - "/ItUser1/ImageLazyClone1", true, &uuid3); - ASSERT_EQ(0, ret); - - // 操作4:对未完成lazy克隆的文件ImageLazyClone1打快照snap1 - // 预期4:返回文件状态异常 - ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); - ASSERT_EQ(kErrCodeFileStatusInvalid, ret); - FileSnapshotInfo info2; - int retCode = GetSnapshotInfo(testUser1_, testFile1_, uuid4, &info2); - ASSERT_EQ(kErrCodeFileNotExist, retCode); - - ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); - - // Flatten之前验证数据正确性 - std::string fakeData1(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); - - // Flatten - ret = Flatten(testUser1_, uuid2); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); - ASSERT_TRUE(success1); - - // Flatten之后验证数据正确性 - std::string fakeData2(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); -} - -// 场景八:非lazy镜像克隆场景 -TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageNotLazyClone1 - // 预期1:返回快照不存在 - std::string uuid1; - int ret; - ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", - "/ItUser1/ImageNotLazyClone1", false, &uuid1); - ASSERT_EQ(kErrCodeFileNotExist, ret); - - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageNotLazyClone1 - // 预期2 返回克隆成功 - std::string dstFile = "/ItUser1/ImageNotLazyClone1"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); - ASSERT_TRUE(success1); - - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageNotLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) - ret = CloneOrRecover("Clone", testUser1_, testFile1_, - "/ItUser1/ImageNotLazyClone1", false, &uuid1); - ASSERT_EQ(0, ret); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// 场景九:快照存在失败场景 -TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { - std::string snapId = "errorSnapUuid"; - SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, - 0, 0, kDefaultPoolset, 0, Status::error); - - cluster_->metaStore_->AddSnapshot(snapInfo); - - pid_t pid = cluster_->RestartSnapshotCloneServer(1); - LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; - ASSERT_GT(pid, 0); - std::string uuid1, uuid2; - - // 操作1: lazy clone 快照snap1 - // 预期1:返回快照存在异常 - int ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser2/SnapLazyClone1", true, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // 操作2:非lazy clone 快照snap1 - // 预期2:返回快照存在异常 - ret = CloneOrRecover("Clone", testUser1_, snapId, - "/ItUser2/SnapNotLazyClone1", false, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // 操作3:lazy 从 快照snap1 recover - // 预期3:返回快照存在异常 - ret = - CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // 操作4:非lazy 从 快照snap1 recover - // 预期4:返回快照存在异常 - ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, - &uuid2); - ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - - // 操作5:用户testUser1_对testFile4_打快照snap1 - // 预期5:清理失败快照,并打快照成功 - ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); - ASSERT_EQ(0, ret); - - // 校验快照成功 - bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); - ASSERT_TRUE(success1); - - // 校验清理失败快照成功 - FileSnapshotInfo info1; - int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); - ASSERT_EQ(kErrCodeFileNotExist, retCode); -} - -// [线上问题修复]克隆失败,回滚删除克隆卷,再次创建同样的uuid的卷的场景 -TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { - std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; - // 操作1:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDestUUID - // 预期1 返回克隆成功 - std::string dstFile = "/ItUser1/CloneHasSameDest"; - int ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); - ASSERT_EQ(0, ret); - - // 删除克隆卷 - UserInfo_t userinfo; - userinfo.owner = testUser1_; - int ret2 = fileClient_->Unlink(dstFile, userinfo, false); - ASSERT_EQ(0, ret2); - - - // 操作2:testUser1_ 再次clone 镜像testFile1_, - // fileName=CloneHasSameDestUUID - // 预期2 返回克隆成功 - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); - ASSERT_EQ(0, ret); - - // 验证数据正确性 - std::string fakeData(4096, 'x'); - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // 操作3:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest2 - // 预期3 返回克隆成功 - dstFile = "/ItUser1/CloneHasSameDest2"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); - ASSERT_EQ(0, ret); - - // 删除克隆卷 - UserInfo_t userinfo2; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - - // 操作4:testUser1_ 再次clone 镜像testFile2_, - // fileName=CloneHasSameDest2 - // 预期4 返回克隆成功 - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); - ASSERT_EQ(0, ret); - - // 验证数据正确性 - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // 验证再次克隆lazyflag不同的情况 - // 操作5:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest3 - // 预期5 返回克隆成功 - dstFile = "/ItUser1/CloneHasSameDest3"; - ret = - CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); - ASSERT_EQ(0, ret); - - // 删除克隆卷 - UserInfo_t userinfo3; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - // 操作6:testUser1_ 再次非lazy clone 镜像testFile2_, - // fileName=CloneHasSameDest3 - // 预期6 返回克隆成功 - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); - ASSERT_EQ(0, ret); - - bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); - ASSERT_TRUE(success1); - - // 验证数据正确性 - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - - // 删除克隆卷 - UserInfo_t userinfo4; - userinfo2.owner = testUser1_; - ret2 = fileClient_->Unlink(dstFile, userinfo2, false); - ASSERT_EQ(0, ret2); - - // 操作7:testUser1_ 再次非lazy clone 镜像testFile2_, - // fileName=CloneHasSameDest3 - // 预期7 返回克隆成功 - ret = - CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); - ASSERT_EQ(0, ret); - - // 验证数据正确性 - ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); -} - -// lazy克隆卷,删除克隆卷,再删除源卷,源卷需要可以删除 -TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { - // 操作1:testUser1_ clone 镜像testFile5_,lazy克隆两个卷dstFile1,dstFile2 - // 预期1 返回克隆成功 - std::string uuid1; - std::string uuid2; - std::string dstFile1 = "/dest1"; - std::string dstFile2 = "/dest2"; - UserInfo_t userinfo; - userinfo.owner = testUser1_; - int ret = - CloneOrRecover("Clone", testUser1_, testFile5_, dstFile1, true, &uuid1); - ASSERT_EQ(0, ret); - - ret = - CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); - ASSERT_EQ(0, ret); - - // 删除源卷,删除失败,卷被占用 - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(-27, ret); - - // 操作2:删除目的卷dstFile1成功,再次删除源卷 - // 预期2 删除失败,卷被占用 - ret = fileClient_->Unlink(dstFile1, userinfo, false); - ASSERT_EQ(0, ret); - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(-27, ret); - - - // 操作3:删除目的卷dstFile2成功,再次删除源卷 - // 预期3 删除成功 - ret = fileClient_->Unlink(dstFile2, userinfo, false); - ASSERT_EQ(0, ret); - - ret = fileClient_->Unlink(testFile5_, userinfo, false); - ASSERT_EQ(0, ret); - - // 操作4: 等待一段时间,看垃圾记录后台能否删除 - bool noRecord = false; - for (int i = 0; i < 100; i++) { - TaskCloneInfo info; - int ret1 = GetCloneTaskInfo(testUser1_, uuid1, &info); - int ret2 = GetCloneTaskInfo(testUser1_, uuid2, &info); - if (ret1 == kErrCodeFileNotExist && ret2 == kErrCodeFileNotExist) { - noRecord = true; - break; - } - - std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - } - ASSERT_TRUE(noRecord); -} -} // namespace snapshotcloneserver -} // namespace curve + ASSERT_TRUE(noRecord); + } + } // namespace snapshotcloneserver +} // namespace curve diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp index a725cbe12f..cc0a47698d 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp @@ -20,24 +20,24 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/client/source_reader.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" -#include "src/client/source_reader.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; -using curve::client::UserInfo_t; using curve::client::SourceReader; +using curve::client::UserInfo_t; const std::string kTestPrefix = "ConSCSTest"; // NOLINT @@ -96,11 +96,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -125,66 +125,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -196,7 +193,8 @@ const std::vector snapshotcloneserverConfigOptions{ std::string("server.clonePoolThreadNum=8"), std::string("server.createCloneChunkConcurrency=2"), std::string("server.recoverChunkConcurrency=2"), - // 最大快照数修改为3,以测试快照达到上限的用例 + // Modify the maximum number of snapshots to 3 to test cases where snapshots + // reach the upper limit std::string("server.maxSnapshotLimit=3"), std::string("client.methodRetryTimeSec=1"), std::string("server.clientAsyncMethodRetryTimeSec=1"), @@ -211,7 +209,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -222,7 +220,8 @@ const std::vector clientConfigOptions{ const char* testFile1_ = "/concurrentItUser1/file1"; const char* testFile2_ = - "/concurrentItUser1/file2"; // 将在TestImage2Clone2Success中删除 //NOLINT + "/concurrentItUser1/file2"; // Will be removed from + // TestImage2Clone2Success//NOLINT const char* testFile3_ = "/concurrentItUser2/file3"; const char* testFile4_ = "/concurrentItUser1/file3"; const char* testUser1_ = "concurrentItUser1"; @@ -240,16 +239,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -257,13 +256,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -272,21 +271,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 1); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 1); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 1); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 1); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 1); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -300,7 +296,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -319,7 +315,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -387,7 +384,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -487,9 +484,9 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 并发测试用例 +// Concurrent test cases -// 这个用例测试快照层数,放在最前面 +// This use case tests the number of snapshot layers, placed at the top TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { std::string uuid1, uuid2, uuid3; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -507,7 +504,8 @@ TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { bool success3 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid3); ASSERT_TRUE(success3); - // 快照层数设置为3,尝试再打一次快照,超过层数失败 + // Set the number of snapshot layers to 3. Attempt to take another snapshot, + // exceeding the number of layers failed ret = MakeSnapshot(testUser1_, testFile1_, "snap3", &uuid3); ASSERT_EQ(kErrCodeSnapshotCountReachLimit, ret); @@ -586,7 +584,7 @@ TEST_F(SnapshotCloneServerTest, TestSnapSameClone1Success) { ret1 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid1); ASSERT_EQ(0, ret1); - // 幂等 + // Idempotent ret2 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid2); ASSERT_EQ(0, ret2); @@ -733,7 +731,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -748,7 +746,8 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid1, true)); - // clone完成stage1之后即可对外提供服务,测试克隆卷是否能正常读取数据 + // After the clone completes stage1, it can provide external services and + // test whether the cloned volume can read data normally std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -760,7 +759,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData2)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -783,7 +782,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyRecoverSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + // Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp index 326ebe66c0..6da5478c86 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp @@ -20,15 +20,15 @@ * Author: xuchaojie */ -#include -#include -#include #include #include +#include +#include +#include +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" -#include "test/integration/cluster_common/cluster.h" using curve::CurveCluster; @@ -73,9 +73,9 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); system(std::string("rm -rf ExcSCSTest.etcd").c_str()); - pid_t pid = cluster_->StartSingleEtcd(1, kEtcdClientIpPort, - kEtcdPeerIpPort, - std::vector{ "--name=ExcSCSTest"}); + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=ExcSCSTest"}); LOG(INFO) << "etcd 1 started on " << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -92,22 +92,18 @@ class SnapshotCloneServerTest : public ::testing::Test { system(std::string("rm -rf ExcSCSTest.etcd").c_str()); } - void SetUp() override { - fiu_init(0); - } + void SetUp() override { fiu_init(0); } void TearDown() override { // noop } - bool JudgeSnapTaskFailCleanTaskAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid, - SnapshotInfo *snapInfo) { - // 验证任务失败 + bool JudgeSnapTaskFailCleanTaskAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid, + SnapshotInfo* snapInfo) { + // Verification task failed FileSnapshotInfo info1; - int ret = GetSnapshotInfo( - user, file, uuid, &info1); + int ret = GetSnapshotInfo(user, file, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetSnapshotInfo Fail" << ", ret = " << ret; @@ -126,7 +122,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); if (ret != -1) { @@ -137,28 +133,27 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeSnapTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &file, - const std::string &uuid) { + bool JudgeSnapTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& file, + const std::string& uuid) { SnapshotInfo snapInfo; - bool success = JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo); + bool success = + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo); if (!success) { return false; } int seqNum = snapInfo.GetSeqNum(); - // 验证curve上无快照 + // Verify that there are no snapshots on the curve FInfo fInfo; - int ret = server_->GetCurveFsClient()->GetSnapshot( - file, user, seqNum, &fInfo); + int ret = server_->GetCurveFsClient()->GetSnapshot(file, user, seqNum, + &fInfo); if (ret != -LIBCURVE_ERROR::NOTEXIST) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on curve" << ", ret = " << ret; return false; } - // 验证nos上无快照 + // Verify that there are no snapshots on NOS ChunkIndexDataName indexData(file, seqNum); if (server_->GetDataStore()->ChunkIndexDataExist(indexData)) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on nos."; @@ -167,13 +162,11 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - bool JudgeCloneTaskFailCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务状态为error + bool JudgeCloneTaskFailCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verify that the task status is error TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret < 0) { LOG(INFO) << "GetCloneTask fail" << ", ret = " << ret; @@ -188,31 +181,28 @@ class SnapshotCloneServerTest : public ::testing::Test { return CleanCloneTaskAndCheckEnvClean(user, uuid); } - bool JudgeCloneTaskNotExistCleanEnvAndCheck( - const std::string &user, - const std::string &uuid) { - // 验证任务不存在 + bool JudgeCloneTaskNotExistCleanEnvAndCheck(const std::string& user, + const std::string& uuid) { + // Verification task does not exist TaskCloneInfo info1; - int ret = GetCloneTaskInfo( - user, uuid, &info1); + int ret = GetCloneTaskInfo(user, uuid, &info1); if (ret != kErrCodeFileNotExist) { LOG(INFO) << "AsserTaskNotExist fail" << ", ret = " << ret; return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool CleanCloneTaskAndCheckEnvClean( - const std::string &user, - const std::string &uuid) { + bool CleanCloneTaskAndCheckEnvClean(const std::string& user, + const std::string& uuid) { int ret = CleanCloneTask(user, uuid); if (ret < 0) { LOG(INFO) << "CleanCloneTask fail" @@ -222,7 +212,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - // 验证任务不存在 + // Verification task does not exist TaskCloneInfo info; ret = GetCloneTaskInfo(user, uuid, &info); if (kErrCodeFileNotExist != ret) { @@ -231,34 +221,29 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" - << ", ret = " << ret; + << ", ret = " << ret; return false; } return true; } - bool PrepreTestSnapshot( - const std::string &user, - const std::string &file, - const std::string &snapName, - std::string *uuid) { - int ret = MakeSnapshot(user, - file , snapName, uuid); + bool PrepreTestSnapshot(const std::string& user, const std::string& file, + const std::string& snapName, std::string* uuid) { + int ret = MakeSnapshot(user, file, snapName, uuid); if (ret < 0) { return false; } - bool success1 = CheckSnapshotSuccess(user, file, - *uuid); + bool success1 = CheckSnapshotSuccess(user, file, *uuid); return success1; } bool PrepreTestSnapshotIfNotExist() { if (testSnapId_.empty()) { - bool ret = PrepreTestSnapshot(testUser1, - testFile1, "testSnap", &testSnapId_); + bool ret = PrepreTestSnapshot(testUser1, testFile1, "testSnap", + &testSnapId_); return ret; } return true; @@ -266,53 +251,56 @@ class SnapshotCloneServerTest : public ::testing::Test { std::string testSnapId_; - static SnapshotCloneServerModule *server_; - static SnapshotCloneServerOptions *options_; + static SnapshotCloneServerModule* server_; + static SnapshotCloneServerOptions* options_; static CurveCluster* cluster_; }; -SnapshotCloneServerModule * SnapshotCloneServerTest::server_ = nullptr; -SnapshotCloneServerOptions * SnapshotCloneServerTest::options_ = nullptr; -CurveCluster * SnapshotCloneServerTest::cluster_ = nullptr; +SnapshotCloneServerModule* SnapshotCloneServerTest::server_ = nullptr; +SnapshotCloneServerOptions* SnapshotCloneServerTest::options_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCurvefs) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap1", &uuid); + int ret = MakeSnapshot(user, file, "snap1", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateSnapshot"); // NOLINT SnapshotInfo snapInfo; - ASSERT_TRUE(JudgeSnapTaskFailCleanTaskAndCheck( - user, file, uuid, &snapInfo)); + ASSERT_TRUE( + JudgeSnapTaskFailCleanTaskAndCheck(user, file, uuid, &snapInfo)); } - TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetSnapshot) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap2", &uuid); + int ret = MakeSnapshot(user, file, "snap2", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { @@ -320,18 +308,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnDeleteSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap3", &uuid); + int ret = MakeSnapshot(user, file, "snap3", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.DeleteSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { @@ -339,38 +329,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnCheckSnapShotStatus) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap4", &uuid); + int ret = MakeSnapshot(user, file, "snap4", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CheckSnapShotStatus"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { + TestCreateSnapshotFailOnGetSnapshotSegmentInfo) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap5", &uuid); + int ret = MakeSnapshot(user, file, "snap5", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetSnapshotSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { @@ -378,18 +374,21 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnReadChunkSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap6", &uuid); + int ret = MakeSnapshot(user, file, "snap6", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.ReadChunkSnapshot"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { @@ -397,18 +396,19 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnGetChunkInfo) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap7", &uuid); + int ret = MakeSnapshot(user, file, "snap7", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetChunkInfo"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { @@ -416,16 +416,20 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT 1, NULL, 0); - // 验证任务失败 - int ret = MakeSnapshot(user, file , "snap8", &uuid); + // Verification task failed + int ret = MakeSnapshot(user, file, "snap8", &uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(-1, ret); @@ -436,20 +440,23 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnUpdateSnapshot) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap9", &uuid); + int ret = MakeSnapshot(user, file, "snap9", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed FileSnapshotInfo info1; - ret = GetSnapshotInfo( - user, file, uuid, &info1); + ret = GetSnapshotInfo(user, file, uuid, &info1); ASSERT_EQ(kErrCodeInternalError, ret); @@ -462,38 +469,44 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnPutChunkIndexData) { std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap10", &uuid); + int ret = MakeSnapshot(user, file, "snap10", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.PutChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, - TestCreateSnapshotFailOnDataChunkTranferComplete) { + TestCreateSnapshotFailOnDataChunkTranferComplete) { std::string uuid; std::string user = testUser1; std::string file = testFile1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete", // NOLINT 1, NULL, 0); - int ret = MakeSnapshot(user, file , "snap11", &uuid); + int ret = MakeSnapshot(user, file, "snap11", &uuid); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DataChunkTranferComplete"); // NOLINT - ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck( - user, file, uuid)); + ASSERT_TRUE(JudgeSnapTaskFailCleanEnvAndCheck(user, file, uuid)); } TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { @@ -503,16 +516,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap12", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -526,16 +543,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap13", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -549,16 +570,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap14", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -572,16 +597,20 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteSnapshot) { ASSERT_TRUE(PrepreTestSnapshot(user, file, "snap15", &uuid)); - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -592,220 +621,234 @@ TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/cloneSuccess1", true, - &uuid1); + "/user1/cloneSuccess1", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -815,238 +858,251 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess1", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testSnapId_, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testSnapId_, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCompleteCloneMeta) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnGetFileInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { + TestLazyCloneImageFailOnGetOrAllocateSegmentInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnAddCloneInfo) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnCreateCloneChunk) { +TEST_F(SnapshotCloneServerTest, TestLazyCloneImageFailOnCreateCloneChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1056,52 +1112,53 @@ TEST_F(SnapshotCloneServerTest, std::this_thread::sleep_for(std::chrono::milliseconds(3000)); ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { + TestLazyCloneImageFailOnFileNotExistWhenRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", true, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + true, &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); + server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, - TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { + TestLazyCloneImageSuccessWhenRecoverChunkFailOneTime) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/cloneSuccess2", true, - &uuid1); + "/user1/cloneSuccess2", true, &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1111,276 +1168,299 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, true); ASSERT_TRUE(success1); - ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile("/user1/cloneSuccess2", "", 0)); + ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile( + "/user1/cloneSuccess2", "", 0)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean( - testUser1, uuid1)); + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + ASSERT_TRUE(CleanCloneTaskAndCheckEnvClean(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRecoverChunk) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRecoverChunk) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnRenameCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnRenameCloneFile) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyCloneSnapImageOnChangeOwner) { std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } -TEST_F(SnapshotCloneServerTest, - TestNotLazyCloneImageFailOnCompleteCloneFile) { +TEST_F(SnapshotCloneServerTest, TestNotLazyCloneImageFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Clone", testUser1, testFile1, - "/user1/clone1", false, - &uuid1); + int ret = CloneOrRecover("Clone", testUser1, testFile1, "/user1/clone1", + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCompleteCloneMeta) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneMeta"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneMeta"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetFileInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.GetFileInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { + TestLazyRecoverSnapFailOnGetOrAllocateSegmentInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.GetOrAllocateSegmentInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnCreateCloneChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CreateCloneChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnGetChunkIndexData) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnAddCloneInfo) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); - fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeSnapshotCloneMetaStore.AddCloneInfo"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskNotExistCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, - &uuid1); + &uuid1); ASSERT_EQ(kErrCodeInternalError, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } - TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { + TestLazyRecoverSnapSuccessWhenRecoverChunkFailOneTime) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, FIU_ONETIME); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten @@ -1390,116 +1470,121 @@ TEST_F(SnapshotCloneServerTest, bool success1 = CheckCloneOrRecoverSuccess(testUser1, uuid1, false); ASSERT_TRUE(success1); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnRenameCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RenameCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.RenameCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnChangeOwner) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.ChangeOwner"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, TestNotLazyRecoverSnapFailOnCompleteCloneFile) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, false, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, + false, &uuid1); ASSERT_EQ(0, ret); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.CompleteCloneFile"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/" + "FakeCurveFsClient.CompleteCloneFile"); // NOLINT - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); } TEST_F(SnapshotCloneServerTest, - TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { + TestLazyRecoverSnapFailOnFileNotExistWhenRecoverChunk) { ASSERT_TRUE(PrepreTestSnapshotIfNotExist()); std::string uuid1; - fiu_enable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT + fiu_enable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk", // NOLINT 1, NULL, 0); - int ret = CloneOrRecover("Recover", testUser1, testSnapId_, - testFile1, true, - &uuid1); + int ret = CloneOrRecover("Recover", testUser1, testSnapId_, testFile1, true, + &uuid1); ASSERT_EQ(0, ret); // Flatten ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 恢复未完成前删除目标文件 + // Delete target files before recovery is complete ASSERT_EQ(LIBCURVE_ERROR::OK, - server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); + server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck( - testUser1, uuid1)); + ASSERT_TRUE(JudgeCloneTaskFailCleanEnvAndCheck(testUser1, uuid1)); - fiu_disable("test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT + fiu_disable( + "test/integration/snapshotcloneserver/FakeCurveFsClient.RecoverChunk"); // NOLINT } } // namespace snapshotcloneserver diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp index 2e549688b8..b1b99953ae 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp @@ -20,16 +20,14 @@ * Author: xuchaojie */ -#include - #include "test/integration/snapshotcloneserver/snapshotcloneserver_module.h" +#include namespace curve { namespace snapshotcloneserver { -int SnapshotCloneServerModule::Start( - const SnapshotCloneServerOptions &option) { +int SnapshotCloneServerModule::Start(const SnapshotCloneServerOptions& option) { serverOption_ = option; client_ = std::make_shared(); @@ -45,13 +43,8 @@ int SnapshotCloneServerModule::Start( auto cloneRef_ = std::make_shared(); - auto core = - std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - serverOption_); + auto core = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, serverOption_); if (core->Init() < 0) { LOG(ERROR) << "SnapshotCore init fail."; @@ -61,8 +54,7 @@ int SnapshotCloneServerModule::Start( auto taskMgr = std::make_shared(core, snapshotMetric); snapshotServiceManager_ = - std::make_shared(taskMgr, - core); + std::make_shared(taskMgr, core); if (snapshotServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "SnapshotServiceManager init fail."; @@ -71,13 +63,9 @@ int SnapshotCloneServerModule::Start( auto cloneMetric = std::make_shared(); - auto cloneCore = std::make_shared( - client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - serverOption_); + auto cloneCore = + std::make_shared(client_, metaStore_, dataStore_, + snapshotRef_, cloneRef_, serverOption_); if (cloneCore->Init() < 0) { LOG(ERROR) << "CloneCore init fail."; return kErrCodeServerInitFail; @@ -87,28 +75,26 @@ int SnapshotCloneServerModule::Start( std::make_shared(cloneCore, cloneMetric); auto cloneServiceManagerBackend = - std::make_shared(cloneCore); + std::make_shared(cloneCore); - cloneServiceManager_ = - std::make_shared(cloneTaskMgr, - cloneCore, cloneServiceManagerBackend); + cloneServiceManager_ = std::make_shared( + cloneTaskMgr, cloneCore, cloneServiceManagerBackend); if (cloneServiceManager_->Init(serverOption_) < 0) { LOG(ERROR) << "CloneServiceManager init fail."; return kErrCodeServerInitFail; } server_ = std::make_shared(); - service_ = - std::make_shared( - snapshotServiceManager_, - cloneServiceManager_); + service_ = std::make_shared( + snapshotServiceManager_, cloneServiceManager_); - if (server_->AddService(service_.get(), - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + if (server_->AddService(service_.get(), brpc::SERVER_DOESNT_OWN_SERVICE) != + 0) { LOG(ERROR) << "Failed to add snapshot_service!\n"; return kErrCodeServerInitFail; } - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because + // there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index d4ccb66c65..8ed3364576 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -20,20 +20,20 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "src/common/uuid.h" -#include "src/common/location_operator.h" -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/common/location_operator.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/uuid.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; @@ -49,27 +49,27 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +// Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 -const char *cloneTempDir_ = "/clone"; -const char *mdsRootUser_ = "root"; -const char *mdsRootPassword_ = "root_password"; +// Some constant definitions +const char* cloneTempDir_ = "/clone"; +const char* mdsRootUser_ = "root"; +const char* mdsRootPassword_ = "root_password"; constexpr uint32_t kProgressTransferSnapshotDataStart = 10; -const char *kEtcdClientIpPort = "127.0.0.1:10021"; -const char *kEtcdPeerIpPort = "127.0.0.1:10022"; -const char *kMdsIpPort = "127.0.0.1:10023"; -const char *kChunkServerIpPort1 = "127.0.0.1:10024"; -const char *kChunkServerIpPort2 = "127.0.0.1:10025"; -const char *kChunkServerIpPort3 = "127.0.0.1:10026"; -const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; -const char *kSnapshotCloneServerDummyServerPort = "12002"; -const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +const char* kEtcdClientIpPort = "127.0.0.1:10021"; +const char* kEtcdPeerIpPort = "127.0.0.1:10022"; +const char* kMdsIpPort = "127.0.0.1:10023"; +const char* kChunkServerIpPort1 = "127.0.0.1:10024"; +const char* kChunkServerIpPort2 = "127.0.0.1:10025"; +const char* kChunkServerIpPort3 = "127.0.0.1:10026"; +const char* kSnapshotCloneServerIpPort = "127.0.0.1:10027"; +const char* kSnapshotCloneServerDummyServerPort = "12002"; +const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; -static const char *kDefaultPoolset = "default"; +static const char* kDefaultPoolset = "default"; const int kMdsDummyPort = 10028; @@ -79,27 +79,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -120,11 +119,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -151,66 +150,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -237,7 +233,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -246,8 +242,8 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char *testFile1_ = "/RcvItUser1/file1"; -const char *testUser1_ = "RcvItUser1"; +const char* testFile1_ = "/RcvItUser1/file1"; +const char* testUser1_ = "RcvItUser1"; int testFd1_ = 0; namespace curve { @@ -262,16 +258,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -281,13 +277,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -296,21 +292,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 2); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 2); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 2); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 2); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 2); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -324,7 +317,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -343,7 +336,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -381,9 +375,9 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(INFO) << "Write testFile1_ success."; } - static bool CreateAndWriteFile(const std::string &fileName, - const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool CreateAndWriteFile(const std::string& fileName, + const std::string& user, + const std::string& dataSample, int* fdOut) { UserInfo_t userinfo; userinfo.owner = user; int ret = fileClient_->Create(fileName, userinfo, testFile1Length); @@ -394,8 +388,8 @@ class SnapshotCloneServerTest : public ::testing::Test { return WriteFile(fileName, user, dataSample, fdOut); } - static bool WriteFile(const std::string &fileName, const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool WriteFile(const std::string& fileName, const std::string& user, + const std::string& dataSample, int* fdOut) { int ret = 0; UserInfo_t userinfo; userinfo.owner = user; @@ -404,7 +398,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + // 2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -421,14 +415,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - static bool CheckFileData(const std::string &fileName, - const std::string &user, - const std::string &dataSample) { + static bool CheckFileData(const std::string& fileName, + const std::string& user, + const std::string& dataSample) { UserInfo_t userinfo; userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -490,7 +484,7 @@ class SnapshotCloneServerTest : public ::testing::Test { void TearDown() {} - void PrepareSnapshotForTestFile1(std::string *uuid1) { + void PrepareSnapshotForTestFile1(std::string* uuid1) { if (!hasSnapshotForTestFile1_) { int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); ASSERT_EQ(0, ret); @@ -509,23 +503,23 @@ class SnapshotCloneServerTest : public ::testing::Test { } } - int PrepareCreateCloneFile(const std::string &fileName, FInfo *fInfoOut, + int PrepareCreateCloneFile(const std::string& fileName, FInfo* fInfoOut, bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( - testFile1_, fileName, - UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, - seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); + testFile1_, fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), + testFile1Length, seqNum, chunkSize, 0, 0, kDefaultPoolset, + fInfoOut); return ret; } - int PrepareCreateCloneMeta(FInfo *fInfoOut, const std::string &newFileName, - std::vector *segInfoOutVec) { + int PrepareCreateCloneMeta(FInfo* fInfoOut, const std::string& newFileName, + std::vector* segInfoOutVec) { fInfoOut->fullPathName = newFileName; fInfoOut->userinfo = UserInfo_t(mdsRootUser_, mdsRootPassword_); for (int i = 0; i < testFile1AllocSegmentNum; i++) { @@ -540,7 +534,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCreateCloneChunk(const std::vector &segInfoVec, + int PrepareCreateCloneChunk(const std::vector& segInfoVec, bool IsRecover = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -555,13 +549,14 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So cloning each segment from the snapshot + // only creates the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -571,8 +566,10 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the + // snapshot + 2, // Restore the version number of the new file, which is + // the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -585,7 +582,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LocationOperator::GenerateCurveLocation( testFile1_, i * segmentSize + j * chunkSize); ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -593,11 +590,11 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", copysetId = " << cidInfo.cpid_ << ", chunkId = " << cidInfo.cid_ << ", seqNum = " << 1 << ", csn = " << 0; - int ret = - snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 - chunkSize, cb); + int ret = snapClient_->CreateCloneChunk( + location, cidInfo, + 1, // Clone using initial version number 1 + 0, // Clone using 0 + chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; } @@ -614,14 +611,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneMeta(const std::string &uuid) { + int PrepareCompleteCloneMeta(const std::string& uuid) { std::string fileName = std::string(cloneTempDir_) + "/" + uuid; int ret = snapClient_->CompleteCloneMeta( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); return ret; } - int PrepareRecoverChunk(const std::vector &segInfoVec, + int PrepareRecoverChunk(const std::vector& segInfoVec, bool IsSnapshot = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -630,14 +627,15 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So clone each segment from the snapshot and + // only recover the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -658,7 +656,7 @@ class SnapshotCloneServerTest : public ::testing::Test { for (uint64_t j = 0; j < segmentSize / chunkSize; j++) { ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -686,44 +684,42 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneFile(const std::string &fileName) { + int PrepareCompleteCloneFile(const std::string& fileName) { return snapClient_->CompleteCloneFile( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } - int PrepareChangeOwner(const std::string &fileName) { + int PrepareChangeOwner(const std::string& fileName) { return fileClient_->ChangeOwner( fileName, testUser1_, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } int PrepareRenameCloneFile(uint64_t originId, uint64_t destinationId, - const std::string &fileName, - const std::string &newFileName) { + const std::string& fileName, + const std::string& newFileName) { return snapClient_->RenameCloneFile( UserInfo_t(mdsRootUser_, mdsRootPassword_), originId, destinationId, fileName, newFileName); } - static CurveCluster *cluster_; - static FileClient *fileClient_; - static SnapshotClient *snapClient_; + static CurveCluster* cluster_; + static FileClient* fileClient_; + static SnapshotClient* snapClient_; bool hasSnapshotForTestFile1_ = false; std::string snapIdForTestFile1_; }; -CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; -FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; -SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; +FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; +SnapshotClient* SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +// Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -740,19 +736,18 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +// A snapshot has been created in the curve, but the successful result has not +// been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -769,18 +764,18 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +// A snapshot has been created in the curve, and the results have been returned. +// Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, - chunkSize, segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -797,7 +792,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots +// and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -812,7 +808,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -836,16 +832,14 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -860,7 +854,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -870,12 +865,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -890,7 +883,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -898,12 +891,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -918,7 +909,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -932,12 +924,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -952,7 +942,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -964,12 +955,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -984,7 +973,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1000,12 +990,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1020,7 +1008,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1034,12 +1022,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1054,7 +1040,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1072,12 +1059,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1092,7 +1077,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1108,12 +1093,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1128,7 +1111,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1147,12 +1131,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1167,7 +1149,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1185,12 +1167,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1205,7 +1185,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1227,12 +1208,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1247,7 +1226,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1267,12 +1246,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1287,7 +1264,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1310,12 +1288,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1330,7 +1306,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1352,12 +1328,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1372,7 +1346,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1400,12 +1375,10 @@ TEST_F(SnapshotCloneServerTest, LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, fInfoOut.id, fileName, dstFile)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1420,18 +1393,16 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); std::string uuid1 = UUIDGenerator().GenerateUUID(); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1451,7 +1422,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1462,12 +1434,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1487,7 +1457,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1497,12 +1467,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1522,7 +1490,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1537,12 +1506,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1562,7 +1529,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1576,12 +1544,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1601,7 +1567,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1618,12 +1585,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1643,7 +1608,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1659,12 +1624,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1684,7 +1647,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1703,12 +1667,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1728,7 +1690,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1746,12 +1708,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1771,7 +1731,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1792,12 +1753,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1817,7 +1776,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1837,12 +1796,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1862,7 +1819,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1886,12 +1844,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1911,7 +1867,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1934,12 +1890,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1954,7 +1908,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1980,12 +1935,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2000,7 +1953,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2025,12 +1978,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2045,7 +1996,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; @@ -2073,12 +2025,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp index f56bae71e7..9dd30a65b9 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp @@ -20,49 +20,49 @@ * Author: hzsunjianliang */ -#include -#include -#include #include -#include // NOLINT -#include // NOLINT +#include +#include +#include + +#include // NOLINT +#include // NOLINT -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" #include "src/snapshotcloneserver/snapshotclone_server.h" +#include "test/integration/cluster_common/cluster.h" #include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" #include "test/util/config_generator.h" -const std::string kTestPrefix = "MainSCSTest"; // NOLINT +const std::string kTestPrefix = "MainSCSTest"; // NOLINT -// 一些常数定义 -const char* cloneTempDir_ = "/clone"; -const char* mdsRootUser_ = "root"; -const char* mdsRootPassword_ = "root_password"; +// Some constant definitions +const char *cloneTempDir_ = "/clone"; +const char *mdsRootUser_ = "root"; +const char *mdsRootPassword_ = "root_password"; const uint64_t segmentSize = 32ULL * 1024 * 1024; -const char* kEtcdClientIpPort = "127.0.0.1:10041"; -const char* kEtcdPeerIpPort = "127.0.0.1:10042"; -const char* kMdsIpPort = "127.0.0.1:10043"; -const char* kSnapshotCloneServerIpPort = "127.0.0.1:10047"; +const char *kEtcdClientIpPort = "127.0.0.1:10041"; +const char *kEtcdPeerIpPort = "127.0.0.1:10042"; +const char *kMdsIpPort = "127.0.0.1:10043"; +const char *kSnapshotCloneServerIpPort = "127.0.0.1:10047"; const int kMdsDummyPort = 10048; -const char* kSnapshotCloneServerDummyServerPort = "12004"; -const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock4"; +const char *kSnapshotCloneServerDummyServerPort = "12004"; +const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock4"; -const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT -const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT -const std::string kEtcdName = kTestPrefix; // NOLINT -const std::string kMdsConfigPath = // NOLINT +const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT +const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT +const std::string kEtcdName = kTestPrefix; // NOLINT +const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; @@ -81,11 +81,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector snapClientConfigOptions{ @@ -119,107 +119,113 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, -}; - -namespace curve { -namespace snapshotcloneserver { - -class SnapshotCloneServerMainTest : public ::testing::Test { - public: - void SetUp() { - std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; - system(mkLogDirCmd.c_str()); - system("mkdir -p /data/log/curve ./fakes3"); - - cluster_ = new CurveCluster(); - ASSERT_NE(nullptr, cluster_); - - // 初始化db - std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; - system(rmcmd.c_str()); - - // 启动etcd - pid_t pid = cluster_->StartSingleEtcd( - 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{"--name=" + std::string(kEtcdName)}); - LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort - << "::" << kEtcdPeerIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kMdsConfigPath, - mdsConfigOptions); - - // 启动一个mds - pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, - true); - LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; - ASSERT_GT(pid, 0); - - cluster_->PrepareConfig(kS3ConfigPath, - s3ConfigOptions); - - cluster_->PrepareConfig( - kSnapClientConfigPath, snapClientConfigOptions); - - cluster_->PrepareConfig( - kSCSConfigPath, snapshotcloneserverConfigOptions); - } - - void TearDown() { - ASSERT_EQ(0, cluster_->StopCluster()); - delete cluster_; - cluster_ = nullptr; - - std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; - system(rmcmd.c_str()); - } - - public: - CurveCluster* cluster_; + {"--stderrthreshold=3"}, }; -TEST_F(SnapshotCloneServerMainTest, testmain) { - std::shared_ptr conf = std::make_shared(); - conf->SetConfigPath(kSCSConfigPath); - - ASSERT_TRUE(conf->LoadConfig()); - LOG(INFO) << kSCSConfigPath; - conf->PrintConfig(); - - SnapShotCloneServer* snapshotCloneServer = new SnapShotCloneServer(conf); - - snapshotCloneServer->InitAllSnapshotCloneOptions(); - - snapshotCloneServer->StartDummy(); - - snapshotCloneServer->StartCompaginLeader(); - - ASSERT_TRUE(snapshotCloneServer->Init()); - - ASSERT_TRUE(snapshotCloneServer->Start()); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - - // 测试验证是否状态为active - // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; - std::string cmd = - "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + - "/vars/" + std::string(statusMetricName) + "\""; - // snapshotcloneserver_status : "active\r\n" - std::string expectResult = std::string(statusMetricName) + " : \"" + - std::string(ACTIVE) + "\"\r\n"; - - FILE* fp = popen(cmd.c_str(), "r"); - ASSERT_TRUE(fp != nullptr); - char buf[1024]; - fread(buf, sizeof(char), sizeof(buf), fp); - pclose(fp); - std::string result(buf); - ASSERT_EQ(result, expectResult); - - snapshotCloneServer->Stop(); - LOG(INFO) << "snapshotCloneServer Stopped"; -} -} // namespace snapshotcloneserver -} // namespace curve +namespace curve +{ + namespace snapshotcloneserver + { + + class SnapshotCloneServerMainTest : public ::testing::Test + { + public: + void SetUp() + { + std::string mkLogDirCmd = std::string("mkdir -p ") + kLogPath; + system(mkLogDirCmd.c_str()); + system("mkdir -p /data/log/curve ./fakes3"); + + cluster_ = new CurveCluster(); + ASSERT_NE(nullptr, cluster_); + + // Initialize db + std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; + system(rmcmd.c_str()); + + // Start etcd + pid_t pid = cluster_->StartSingleEtcd( + 1, kEtcdClientIpPort, kEtcdPeerIpPort, + std::vector{"--name=" + std::string(kEtcdName)}); + LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort + << "::" << kEtcdPeerIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kMdsConfigPath, + mdsConfigOptions); + + // Start an mds + pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, + true); + LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; + ASSERT_GT(pid, 0); + + cluster_->PrepareConfig(kS3ConfigPath, + s3ConfigOptions); + + cluster_->PrepareConfig( + kSnapClientConfigPath, snapClientConfigOptions); + + cluster_->PrepareConfig( + kSCSConfigPath, snapshotcloneserverConfigOptions); + } + + void TearDown() + { + ASSERT_EQ(0, cluster_->StopCluster()); + delete cluster_; + cluster_ = nullptr; + + std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; + system(rmcmd.c_str()); + } + + public: + CurveCluster *cluster_; + }; + + TEST_F(SnapshotCloneServerMainTest, testmain) + { + std::shared_ptr conf = std::make_shared(); + conf->SetConfigPath(kSCSConfigPath); + + ASSERT_TRUE(conf->LoadConfig()); + LOG(INFO) << kSCSConfigPath; + conf->PrintConfig(); + + SnapShotCloneServer *snapshotCloneServer = new SnapShotCloneServer(conf); + + snapshotCloneServer->InitAllSnapshotCloneOptions(); + + snapshotCloneServer->StartDummy(); + + snapshotCloneServer->StartCompaginLeader(); + + ASSERT_TRUE(snapshotCloneServer->Init()); + + ASSERT_TRUE(snapshotCloneServer->Start()); + + std::this_thread::sleep_for(std::chrono::seconds(2)); + + // Test and verify if the status is active + // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; + std::string cmd = + "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + + "/vars/" + std::string(statusMetricName) + "\""; + // snapshotcloneserver_status : "active\r\n" + std::string expectResult = std::string(statusMetricName) + " : \"" + + std::string(ACTIVE) + "\"\r\n"; + + FILE *fp = popen(cmd.c_str(), "r"); + ASSERT_TRUE(fp != nullptr); + char buf[1024]; + fread(buf, sizeof(char), sizeof(buf), fp); + pclose(fp); + std::string result(buf); + ASSERT_EQ(result, expectResult); + + snapshotCloneServer->Stop(); + LOG(INFO) << "snapshotCloneServer Stopped"; + } + } // namespace snapshotcloneserver +} // namespace curve diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index ff92a579f3..8bb7f66138 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -20,18 +20,20 @@ * Author: lixiaocui1 */ -#include #include -#include //NOLINT +#include + #include //NOLINT #include #include -#include "src/kvstorageclient/etcd_client.h" -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/common/timeutility.h" +#include //NOLINT + +#include "proto/nameserver2.pb.h" #include "src/common/concurrent/concurrent.h" +#include "src/common/timeutility.h" +#include "src/kvstorageclient/etcd_client.h" #include "src/mds/common/mds_define.h" -#include "proto/nameserver2.pb.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" namespace curve { namespace kvstorage { @@ -43,7 +45,7 @@ using ::curve::mds::NameSpaceStorageCodec; using ::curve::mds::PageFileChunkInfo; using ::curve::mds::PageFileSegment; -// 接口测试 +// Interface testing class TestEtcdClinetImp : public ::testing::Test { protected: TestEtcdClinetImp() {} @@ -63,8 +65,9 @@ class TestEtcdClinetImp : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", @@ -75,7 +78,7 @@ class TestEtcdClinetImp : public ::testing::Test { exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -108,8 +111,8 @@ class TestEtcdClinetImp : public ::testing::Test { TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { // 1. put file - // - file0~file9 put到etcd中 - // - file6有快照 + // - file0~file9 put into etcd + // - file6 has a snapshot std::map keyMap; std::map fileName; FileInfo fileInfo7, fileInfo8; @@ -170,7 +173,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } } - // 2. get file, 可以正确获取并解码file0~file9 + // 2. get file, which can correctly obtain and decode file0~file9 for (int i = 0; i < keyMap.size(); i++) { std::string out; int errCode = client_->Get(keyMap[i], &out); @@ -180,7 +183,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], fileinfo.filename()); } - // 3. list file, 可以list到file0~file9 + // 3. list file, which can be listed to file0~file9 std::vector listRes; std::vector> listRes2; int errCode = client_->List("01", "02", &listRes2); @@ -193,7 +196,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], finfo.filename()); } - // 4. delete file, 删除file0~file4,这部分文件不能再获取到 + // 4. Delete file, delete file0~file4, these files cannot be retrieved + // anymore for (int i = 0; i < keyMap.size() / 2; i++) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Delete(keyMap[i])); // can not get delete file @@ -201,13 +205,13 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[i], &out)); } - // 5. rename file: rename file9 ~ file10, file10本来不存在 - Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), - const_cast(fileInfo9.c_str()), + // 5. Rename file: rename file9~file10, file10 does not originally exist + Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), + const_cast(fileInfo9.c_str()), static_cast(keyMap[9].size()), static_cast(fileInfo9.size())}; - Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), - const_cast(fileInfo10.c_str()), + Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), + const_cast(fileInfo10.c_str()), static_cast(fileKey10.size()), static_cast(fileInfo10.size())}; std::vector ops{op1, op2}; @@ -222,12 +226,12 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName10, fileinfo.filename()); // 6. snapshot of keyMap[6] - Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), - const_cast(fileInfo6.c_str()), + Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), + const_cast(fileInfo6.c_str()), static_cast(keyMap[6].size()), static_cast(fileInfo6.size())}; - Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -258,9 +262,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ("200", out); // 8. rename file: rename file7 ~ file8 - Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), - const_cast(""), static_cast(keyMap[7].size()), - 0}; + Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), + const_cast(""), static_cast(keyMap[7].size()), 0}; FileInfo newFileInfo7; newFileInfo7.CopyFrom(fileInfo7); newFileInfo7.set_parentid(fileInfo8.parentid()); @@ -271,17 +274,17 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { std::string encodeNewFileInfo7; ASSERT_TRUE(newFileInfo7.SerializeToString(&encodeNewFileInfo7)); Operation op9{OpType::OpPut, - const_cast(encodeNewFileInfo7Key.c_str()), - const_cast(encodeNewFileInfo7.c_str()), + const_cast(encodeNewFileInfo7Key.c_str()), + const_cast(encodeNewFileInfo7.c_str()), static_cast(encodeNewFileInfo7Key.size()), static_cast(encodeNewFileInfo7.size())}; ops.clear(); ops.emplace_back(op8); ops.emplace_back(op9); ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); - // 不能获取 file7 + // Unable to obtain file7 ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[7], &out)); - // 成功获取rename以后的file7 + // Successfully obtained file7 after renam ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Get(keyMap[8], &out)); ASSERT_TRUE(NameSpaceStorageCodec::DecodeFileInfo(out, &fileinfo)); ASSERT_EQ(newFileInfo7.filename(), fileinfo.filename()); @@ -304,8 +307,8 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->TxnN(ops)); client_->SetTimeout(5000); - Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), + Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), static_cast(snapshotKey6.size()), static_cast(snapshotInfo6.size())}; ops.clear(); @@ -321,7 +324,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { - // 准备一批数据 + // Prepare a batch of data // "011" "013" "015" "017" "019" for (int i = 1; i <= 9; i += 2) { std::string key = std::string("01") + std::to_string(i); @@ -336,13 +339,13 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Put(key, value)); } - // 获取当前revision - // 通过GetCurrentRevision获取 + // Obtain the current revision + // Obtained through GetCurrentRevision int64_t curRevision; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->GetCurrentRevision(&curRevision)); LOG(INFO) << "get current revision: " << curRevision; - // 根据当前revision获取前5个key-value + // Obtain the top 5 key values based on the current revision std::vector out; std::string lastKey; int res = client_->ListWithLimitAndRevision("01", "", 5, curRevision, &out, @@ -355,7 +358,7 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(value, out[i - 1]); } - // 根据当前revision获取后5个key-value + // Obtain the last 5 key values based on the current revision out.clear(); res = client_->ListWithLimitAndRevision(lastKey, "", 5, curRevision, &out, &lastKey); @@ -395,37 +398,41 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { uint64_t leaderOid; { - // 1. leader1竞选成功,client退出后leader2竞选成功 + // 1. leader1 successfully ran, but leader2 successfully ran after + // client exited LOG(INFO) << "test case1 start..."; - // 启动一个线程竞选leader + // Start a thread to run for the leader int electionTimeoutMs = 0; uint64_t targetOid; common::Thread thread1(&EtcdClientImp::CampaignLeader, client_, pfx, leaderName1, sessionnInterSec, electionTimeoutMs, &targetOid); - // 等待线程1执行完成, 线程1执行完成就说明竞选成功, - // 否则electionTimeoutMs为0的情况下会一直hung在里面 + // Waiting for thread 1 to complete execution indicates a successful + // election, Otherwise, if electionTimeoutMs is 0, they will remain in + // it all the time thread1.join(); LOG(INFO) << "thread 1 exit."; client_->CloseClient(); - // 启动第二个线程竞选leader + // Start the second thread to run for the leader auto client2 = std::make_shared(); ASSERT_EQ(0, client2->Init(conf, dialtTimeout, retryTimes)); common::Thread thread2(&EtcdClientImp::CampaignLeader, client2, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); - // 线程1退出后,leader2会当选 + // After thread1 exits, leader2 will be elected thread2.join(); LOG(INFO) << "thread 2 exit."; - // leader2为leader的情况下此时观察leader1的key应该发现session过期 + // If leader2 is the leader, observing the key of leader1 at this time + // should reveal that the session has expired ASSERT_EQ(EtcdErrCode::EtcdObserverLeaderInternal, client2->LeaderObserve(targetOid, leaderName1)); client2->CloseClient(); } { - // 2. leader1竞选成功后,不退出; leader2竞选超时 + // 2. After the successful election of leader1, do not withdraw; leader2 + // campaign timeout LOG(INFO) << "test case2 start..."; int electionTimeoutMs = 1000; auto client1 = std::make_shared(); @@ -436,7 +443,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader2再次竞选 + // leader2 is running again common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); @@ -446,8 +453,9 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { } { - // 3. leader1竞选成功后,删除key; leader2竞选成功; observe leader1改变; - // observer leader2的过程中etcd挂掉 + // 3. After the successful election of leader1, delete the key; The + // leader2 campaign was successful; Observe leader1 changed; + // During the process of observer leader2, etcd crashes LOG(INFO) << "test case3 start..."; uint64_t targetOid; int electionTimeoutMs = 0; @@ -458,17 +466,17 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { &targetOid); thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader1卸任leader + // leader1 Resignation Leader ASSERT_EQ(EtcdErrCode::EtcdLeaderResiginSuccess, client1->LeaderResign(targetOid, 1000)); - // leader2当选 + // leader2 elected common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); thread2.join(); - // leader2启动线程observe + // leader2 starts thread observe common::Thread thread3(&EtcdClientImp::LeaderObserve, client1, targetOid, leaderName2); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -477,7 +485,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { client1->CloseClient(); LOG(INFO) << "thread 2 exit."; - // 使得etcd完全停掉 + // Make the ETCD completely stop std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -490,12 +498,13 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { segment.set_logicalpoolid(11); int size = segment.segmentsize() / segment.chunksize(); for (uint32_t i = 0; i < size; i++) { - PageFileChunkInfo *chunkinfo = segment.add_chunks(); + PageFileChunkInfo* chunkinfo = segment.add_chunks(); chunkinfo->set_chunkid(i + 1); chunkinfo->set_copysetid(i + 1); } - // 放入segment,前三个属于文件1,后四个属于文件2 + // Place the segment, with the first three belonging to file1 and the last + // four belonging to file2 uint64_t id1 = 101; uint64_t id2 = 100001; for (uint32_t i = 0; i < 7; ++i) { @@ -514,7 +523,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { LOG(INFO) << segment.startoffset(); } - // 获取文件1的segment + // Obtain the segment of file1 std::string startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1, 0); std::string endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1 + 1, 0); @@ -527,7 +536,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { ASSERT_EQ(i * 1024, segment2.startoffset()); } - // 获取文件2的segment + // Obtain the segment of file2 startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2, 0); endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2 + 1, 0); out.clear(); diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..222f76a6bc 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -20,24 +20,26 @@ * Author: lixiaocui */ -#include -#include #include "src/mds/heartbeat/chunkserver_healthy_checker.h" + +#include +#include + #include "src/mds/topology/topology_item.h" #include "test/mds/mock/mock_topology.h" +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::ChunkServer; using ::curve::mds::topology::ChunkServerStatus; -using ::curve::mds::topology::OnlineState; using ::curve::mds::topology::CopySetKey; -using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::kTopoErrCodeInternalError; +using ::curve::mds::topology::kTopoErrCodeSuccess; +using ::curve::mds::topology::OnlineState; namespace curve { namespace mds { @@ -53,7 +55,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -65,8 +67,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { 6, steady_clock::now() - std::chrono::milliseconds(10000)); checker->UpdateLastReceivedHeartbeatTime( 7, steady_clock::now() - std::chrono::milliseconds(10000)); - checker->UpdateLastReceivedHeartbeatTime( - 8, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(8, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 9, steady_clock::now() - std::chrono::milliseconds(4000)); checker->UpdateLastReceivedHeartbeatTime( @@ -94,30 +95,31 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // chunkserver-1 update to online + // chunkserver-2 Heartbeat Miss, Keep Unstable + // chunkserver-3, chunkserver-5, chunkserver-6 heartbeat offline, + // The retried status of chunkserver-3 will be updated and removed from + // the heartbeat map chunkserver-5 is already in a retired state and + // does not need to be updated chunkserver-6 get info failed, status not + // successfully updated chunkserver-7 update failed, status not + // successfully updated chunkserver-8, pendding && online, updated to + // onLine chunkserver-9, pendding && unstable, updated to retired + // chunkserver-10, pendding && offline, updated to retired EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); - ChunkServer cs2(2, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs3(3, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs5(5, "", "", 1, "", 0, "", - ChunkServerStatus::RETIRED, OnlineState::UNSTABLE); - ChunkServer cs7(7, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs9(9, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); - ChunkServer cs10(10, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); + .Times(7) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); + ChunkServer cs2(2, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs3(3, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs5(5, "", "", 1, "", 0, "", ChunkServerStatus::RETIRED, + OnlineState::UNSTABLE); + ChunkServer cs7(7, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs9(9, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); + ChunkServer cs10(10, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); EXPECT_CALL(*topology, GetChunkServer(2, _)) .WillOnce(DoAll(SetArgPointee<1>(cs2), Return(true))); EXPECT_CALL(*topology, GetChunkServer(3, _)) @@ -128,8 +130,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { .WillOnce(Return(std::vector{})); EXPECT_CALL(*topology, GetChunkServer(5, _)) .WillOnce(DoAll(SetArgPointee<1>(cs5), Return(true))); - EXPECT_CALL(*topology, GetChunkServer(6, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology, GetChunkServer(6, _)).WillOnce(Return(false)); EXPECT_CALL(*topology, GetChunkServer(7, _)) .WillOnce(DoAll(SetArgPointee<1>(cs7), Return(true))); EXPECT_CALL(*topology, GetChunkServer(9, _)) @@ -164,15 +165,13 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 - checker->UpdateLastReceivedHeartbeatTime( - 2, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 6, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 7, steady_clock::now()); + // chunkserver 2, 6, 7 Heartbeat received + checker->UpdateLastReceivedHeartbeatTime(2, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(6, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(7, steady_clock::now()); EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(3).WillRepeatedly(Return(kTopoErrCodeSuccess)); + .Times(3) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); checker->CheckHeartBeatInterval(); ASSERT_TRUE(checker->GetHeartBeatInfo(2, &info)); ASSERT_EQ(OnlineState::ONLINE, info.state); diff --git a/test/mds/heartbeat/heartbeat_manager_test.cpp b/test/mds/heartbeat/heartbeat_manager_test.cpp index 54c4397287..6f1b539405 100644 --- a/test/mds/heartbeat/heartbeat_manager_test.cpp +++ b/test/mds/heartbeat/heartbeat_manager_test.cpp @@ -20,52 +20,54 @@ * Author: lixiaocui */ -#include +#include "src/mds/heartbeat/heartbeat_manager.h" + #include +#include #include -#include "src/mds/heartbeat/heartbeat_manager.h" -#include "src/mds/heartbeat/chunkserver_healthy_checker.h" + #include "src/common/timeutility.h" +#include "src/mds/heartbeat/chunkserver_healthy_checker.h" +#include "test/mds/heartbeat/common.h" #include "test/mds/mock/mock_coordinator.h" -#include "test/mds/mock/mock_topology.h" #include "test/mds/mock/mock_topoAdapter.h" -#include "test/mds/heartbeat/common.h" +#include "test/mds/mock/mock_topology.h" -using ::testing::Return; -using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::MockTopologyStat; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { namespace heartbeat { class TestHeartbeatManager : public ::testing::Test { protected: - TestHeartbeatManager() {} - ~TestHeartbeatManager() {} - - void SetUp() override { - HeartbeatOption option; - option.cleanFollowerAfterMs = 0; - option.heartbeatMissTimeOutMs = 10000; - option.offLineTimeOutMs = 30000; - option.mdsStartTime = steady_clock::now(); - topology_ = std::make_shared(); - coordinator_ = std::make_shared(); - topologyStat_ = std::make_shared(); - heartbeatManager_ = std::make_shared( - option, topology_, topologyStat_, coordinator_); - } - - void TearDown() override {} + TestHeartbeatManager() {} + ~TestHeartbeatManager() {} + + void SetUp() override { + HeartbeatOption option; + option.cleanFollowerAfterMs = 0; + option.heartbeatMissTimeOutMs = 10000; + option.offLineTimeOutMs = 30000; + option.mdsStartTime = steady_clock::now(); + topology_ = std::make_shared(); + coordinator_ = std::make_shared(); + topologyStat_ = std::make_shared(); + heartbeatManager_ = std::make_shared( + option, topology_, topologyStat_, coordinator_); + } + + void TearDown() override {} protected: - std::shared_ptr topology_; - std::shared_ptr topologyStat_; - std::shared_ptr coordinator_; - std::shared_ptr heartbeatManager_; + std::shared_ptr topology_; + std::shared_ptr topologyStat_; + std::shared_ptr coordinator_; + std::shared_ptr heartbeatManager_; }; TEST_F(TestHeartbeatManager, test_stop_and_run) { @@ -124,9 +126,10 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { ASSERT_EQ(0, response.needupdatecopysets_size()); // 7. startTime not initialized - // TODO(lixiaocui): 后续考虑心跳加上错误码 - ::curve::mds::topology::ChunkServer normalCs( - 1, "hello", "", 1, "192.168.10.1", 9000, ""); + // TODO(lixiaocui): Consider adding an error code to the heartbeat in the + // future + ::curve::mds::topology::ChunkServer normalCs(1, "hello", "", 1, + "192.168.10.1", 9000, ""); EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); heartbeatManager_->ChunkServerHeartbeat(req, &response); @@ -138,7 +141,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(t, req.chunkserverid())) + UpdateChunkServerStartUpTime(t, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -148,7 +151,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { EXPECT_CALL(*topology_, GetChunkServer(_, _)) .WillOnce(DoAll(SetArgPointee<1>(normalCs), Return(true))); EXPECT_CALL(*topology_, - UpdateChunkServerStartUpTime(0, req.chunkserverid())) + UpdateChunkServerStartUpTime(0, req.chunkserverid())) .WillOnce(Return(::curve::mds::topology::kTopoErrCodeSuccess)); heartbeatManager_->ChunkServerHeartbeat(req, &response); ASSERT_EQ(0, response.needupdatecopysets_size()); @@ -394,8 +397,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -450,8 +452,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -509,8 +510,7 @@ TEST_F(TestHeartbeatManager, 2, "hello", "", 1, "192.168.10.2", 9000, "", ::curve::mds::topology::ChunkServerStatus::READWRITE); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.2", _, _)) - .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), - Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(leaderChunkServer), Return(true))); EXPECT_CALL(*topology_, GetChunkServerNotRetired("192.168.10.1", _, _)) .WillOnce(DoAll(SetArgPointee<2>(chunkServer1), Return(true))); ::curve::mds::topology::ChunkServer chunkServer3( @@ -626,7 +626,8 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { .WillOnce(DoAll(SetArgPointee<2>(chunkServer2), Return(true))) .WillOnce(DoAll(SetArgPointee<2>(chunkServer3), Return(true))); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .Times(2).WillRepeatedly(Return(false)); + .Times(2) + .WillRepeatedly(Return(false)); heartbeatManager_->ChunkServerHeartbeat(request, &response); ASSERT_EQ(1, response.needupdatecopysets_size()); ASSERT_EQ(1, response.needupdatecopysets(0).logicalpoolid()); @@ -634,8 +635,7 @@ TEST_F(TestHeartbeatManager, test_chunkServer_heartbeat_get_copySetInfo_err) { ASSERT_EQ(0, response.needupdatecopysets(0).epoch()); } -TEST_F(TestHeartbeatManager, - test_handle_copySetInfo_stale_epoch_update_err) { +TEST_F(TestHeartbeatManager, test_handle_copySetInfo_stale_epoch_update_err) { auto request = GetChunkServerHeartbeatRequestForTest(); ChunkServerHeartbeatResponse response; ::curve::mds::topology::ChunkServer chunkServer1( @@ -937,5 +937,3 @@ TEST_F(TestHeartbeatManager, test_patrol_copySetInfo_return_order) { } // namespace heartbeat } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..d7a49f2c8b 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -20,166 +20,179 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include "src/common/namespace_define.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; -namespace curve { -namespace mds { -TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { - auto mockEtcdClient = std::make_shared(); - - { - // 1. list失败 - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - } - - { - // 2. list成功,解析失败 - std::vector values{"hello"}; - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - } +namespace curve +{ + namespace mds { - // 3. 获取已有的segment alloc value成功 - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); - ASSERT_EQ(1, out.size()); - ASSERT_EQ(1024, out[1]); - } -} + TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) + { + auto mockEtcdClient = std::make_shared(); -TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { - auto mockEtcdClient = std::make_shared(); - { - // 1. CalculateSegmentAlloc ok - LOG(INFO) << "start test1......"; - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(Return(EtcdErrCode::EtcdUnknown)); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - } - { - // 2. ListWithLimitAndRevision成功,但是解析失败 - LOG(INFO) << "start test2......"; - std::vector values{"hello"}; - std::string lastKey = "021"; - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce( - DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - } - { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 - LOG(INFO) << "start test3......"; - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); - segment.set_startoffset(0); - std::string encodeSegment; - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - std::vector values{encodeSegment}; - std::string lastKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - ASSERT_EQ(1, out.size()); - ASSERT_EQ(1 << 30, out[1]); - } - { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 - LOG(INFO) << "start test4......"; - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); - segment.set_startoffset(0); - std::string encodeSegment; - std::vector values; - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 1; i <= 500; i++) { - values.emplace_back(encodeSegment); - } + { + // 1. list failed + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + } - segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 501; i <= 1000; i++) { - values.emplace_back(encodeSegment); + { + // 2. list successful, parsing failed + std::vector values{"hello"}; + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + } + { + // 3. Successfully obtained the existing segment alloc value + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( + &out, mockEtcdClient)); + ASSERT_EQ(1, out.size()); + ASSERT_EQ(1024, out[1]); + } } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); - std::string lastKey2 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), - SetArgPointee<5>(lastKey2), - Return(EtcdErrCode::EtcdOK))); - std::map out; - ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); - ASSERT_EQ(2, out.size()); - ASSERT_EQ(500L * (1 << 30), out[1]); - ASSERT_EQ(501L * (1 << 30), out[2]); - } -} -} // namespace mds -} // namespace curve + TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) + { + auto mockEtcdClient = std::make_shared(); + { + // 1. CalculateSegmentAlloc ok + LOG(INFO) << "start test1......"; + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(Return(EtcdErrCode::EtcdUnknown)); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + } + { + // 2. ListWithLimitAndRevision succeeded, but parsing failed + LOG(INFO) << "start test2......"; + std::vector values{"hello"}; + std::string lastKey = "021"; + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce( + DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + } + { + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 + LOG(INFO) << "start test3......"; + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + std::vector values{encodeSegment}; + std::string lastKey = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), + Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + ASSERT_EQ(1, out.size()); + ASSERT_EQ(1 << 30, out[1]); + } + { + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 + LOG(INFO) << "start test4......"; + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + std::vector values; + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 1; i <= 500; i++) + { + values.emplace_back(encodeSegment); + } + segment.set_logicalpoolid(2); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 501; i <= 1000; i++) + { + values.emplace_back(encodeSegment); + } + std::string lastKey1 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey2 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); + EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(values), + SetArgPointee<5>(lastKey1), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), + SetArgPointee<5>(lastKey2), + Return(EtcdErrCode::EtcdOK))); + std::map out; + ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( + 2, mockEtcdClient, &out)); + ASSERT_EQ(2, out.size()); + ASSERT_EQ(500L * (1 << 30), out[1]); + ASSERT_EQ(501L * (1 << 30), out[2]); + } + } + } // namespace mds +} // namespace curve diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..3a4b579852 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -28,205 +28,212 @@ #include "src/common/namespace_define.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; using ::curve::common::SEGMENTINFOKEYPREFIX; -namespace curve { -namespace mds { - -class AllocStatisticTest : public ::testing::Test { - protected: - void SetUp() override { - periodicPersistInterMs_ = 2; - retryInterMs_ = 2; - mockEtcdClient_ = std::make_shared(); - allocStatistic_ = std::make_shared( - periodicPersistInterMs_, retryInterMs_, mockEtcdClient_); - } - - protected: - int64_t periodicPersistInterMs_; - int64_t retryInterMs_; - std::shared_ptr allocStatistic_; - std::shared_ptr mockEtcdClient_; -}; - -TEST_F(AllocStatisticTest, test_Init) { - { - // 1. 从etcd中获取当前revision失败 - LOG(INFO) << "test1......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdCanceled)); - ASSERT_EQ(-1, allocStatistic_->Init()); - } +namespace curve +{ + namespace mds { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 - LOG(INFO) << "test2......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)); - ASSERT_EQ(-1, allocStatistic_->Init()); - int64_t alloc; - ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - } - { - // 3. init成功 - LOG(INFO) << "test3......"; - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce( - DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - ASSERT_EQ(0, allocStatistic_->Init()); - int64_t alloc; - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024, alloc); - } -} - -TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 - std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) - .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, - List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, - Matcher*>(_))) - .WillOnce(DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); - ASSERT_EQ(0, allocStatistic_->Init()); - - PageFileSegment segment; - segment.set_segmentsize(1 << 30); - segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); - segment.set_startoffset(0); - std::string encodeSegment; - values.clear(); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 1; i <= 500; i++) { - values.emplace_back(encodeSegment); - } - - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 - int64_t alloc; - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024, alloc); - ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - - // 2. 更新segment的值 - allocStatistic_->DeAllocSpace(1, 64, 1); - allocStatistic_->AllocSpace(1, 32, 1); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(1024 - 32, alloc); - - // 设置mock的etcd中segment的值 - // logicalPoolId(1):500 * (1<<30) - // logicalPoolId(2):501 * (1<<30) - segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); - for (int i = 501; i <= 1000; i++) { - values.emplace_back(encodeSegment); - } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); - std::string lastKey2 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .Times(2) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), - SetArgPointee<5>(lastKey2), - Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) - .Times(2) - .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - - // 设置mock的Put结果 - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue( - 1, 1024 - 32 + (1L << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L *(1 << 30)))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(3), - NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) - .WillOnce(Return(EtcdErrCode::EtcdOK)); - - // 2. 启动定期持久化线程和统计线程 - for (int i = 1; i <= 2; i++) { - allocStatistic_->AllocSpace(i, 1L << 30, i + 3); - } - allocStatistic_->Run(); - std::this_thread::sleep_for(std::chrono::seconds(6)); - - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(502L *(1 << 30), alloc); - std::this_thread::sleep_for(std::chrono::milliseconds(30)); - - // 再通过alloc进行更新 - for (int i = 1; i <= 2; i++) { - allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); - } - allocStatistic_->AllocSpace(3, 1L << 30, 10); - - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(500L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); - ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); - ASSERT_EQ(1L << 30, alloc); - std::this_thread::sleep_for(std::chrono::milliseconds(30)); - - allocStatistic_->Stop(); -} - -} // namespace mds -} // namespace curve + + class AllocStatisticTest : public ::testing::Test + { + protected: + void SetUp() override + { + periodicPersistInterMs_ = 2; + retryInterMs_ = 2; + mockEtcdClient_ = std::make_shared(); + allocStatistic_ = std::make_shared( + periodicPersistInterMs_, retryInterMs_, mockEtcdClient_); + } + + protected: + int64_t periodicPersistInterMs_; + int64_t retryInterMs_; + std::shared_ptr allocStatistic_; + std::shared_ptr mockEtcdClient_; + }; + + TEST_F(AllocStatisticTest, test_Init) + { + { + // 1. Failed to obtain the current revision from ETCD + LOG(INFO) << "test1......"; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(Return(EtcdErrCode::EtcdCanceled)); + ASSERT_EQ(-1, allocStatistic_->Init()); + } + { + // 2. Failed to obtain the alloc size corresponding to the existing logicalPool + LOG(INFO) << "test2......"; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); + ASSERT_EQ(-1, allocStatistic_->Init()); + int64_t alloc; + ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + } + { + // 3. init successful + LOG(INFO) << "test3......"; + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)).WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce( + DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + ASSERT_EQ(0, allocStatistic_->Init()); + int64_t alloc; + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024, alloc); + } + } + + TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) + { + // Initialize allocStatistics + // Old value: logicalPooId(1):1024 + std::vector values{ + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, + List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, + Matcher *>(_))) + .WillOnce(DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); + ASSERT_EQ(0, allocStatistic_->Init()); + + PageFileSegment segment; + segment.set_segmentsize(1 << 30); + segment.set_logicalpoolid(1); + segment.set_chunksize(16 * 1024 * 1024); + segment.set_startoffset(0); + std::string encodeSegment; + values.clear(); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 1; i <= 500; i++) + { + values.emplace_back(encodeSegment); + } + + // 1 Only old values can be obtained before regular persistent threads and statistical threads are started + int64_t alloc; + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024, alloc); + ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + + // 2 Update the value of segment + allocStatistic_->DeAllocSpace(1, 64, 1); + allocStatistic_->AllocSpace(1, 32, 1); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(1024 - 32, alloc); + + // Set the value of segment in the ETCD of the mock + // logicalPoolId(1):500 * (1<<30) + // logicalPoolId(2):501 * (1<<30) + segment.set_logicalpoolid(2); + ASSERT_TRUE( + NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + for (int i = 501; i <= 1000; i++) + { + values.emplace_back(encodeSegment); + } + std::string lastKey1 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey2 = + NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); + EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( + SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .Times(2) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)) + .WillOnce(DoAll(SetArgPointee<4>(values), + SetArgPointee<5>(lastKey1), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( + lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>( + std::vector{encodeSegment, encodeSegment}), + SetArgPointee<5>(lastKey2), + Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .Times(2) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + + // Set the Put result of the mock + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 1024 - 32 + (1L << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L * (1 << 30)))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, Put( + NameSpaceStorageCodec::EncodeSegmentAllocKey(3), + NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + + // 2 Start regular persistence and statistics threads + for (int i = 1; i <= 2; i++) + { + allocStatistic_->AllocSpace(i, 1L << 30, i + 3); + } + allocStatistic_->Run(); + std::this_thread::sleep_for(std::chrono::seconds(6)); + + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(501L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + ASSERT_EQ(502L * (1 << 30), alloc); + std::this_thread::sleep_for(std::chrono::milliseconds(30)); + + // Update through alloc again + for (int i = 1; i <= 2; i++) + { + allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); + } + allocStatistic_->AllocSpace(3, 1L << 30, 10); + + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); + ASSERT_EQ(500L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); + ASSERT_EQ(501L * (1 << 30), alloc); + ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); + ASSERT_EQ(1L << 30, alloc); + std::this_thread::sleep_for(std::chrono::milliseconds(30)); + + allocStatistic_->Stop(); + } + + } // namespace mds +} // namespace curve diff --git a/test/mds/nameserver2/clean_core_test.cpp b/test/mds/nameserver2/clean_core_test.cpp index 5288fd83d6..ca568b7209 100644 --- a/test/mds/nameserver2/clean_core_test.cpp +++ b/test/mds/nameserver2/clean_core_test.cpp @@ -20,23 +20,25 @@ * Author: hzsunjianliang */ -#include -#include -#include #include "src/mds/nameserver2/clean_core.h" -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/mock/mock_topology.h" + +#include +#include +#include + #include "src/mds/chunkserverclient/copyset_client.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_chunkserverclient.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using ::curve::mds::chunkserverclient::MockChunkServerClient; +using curve::mds::topology::MockTopology; using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using curve::mds::topology::MockTopology; -using ::curve::mds::chunkserverclient::ChunkServerClientOption; -using ::curve::mds::chunkserverclient::MockChunkServerClient; namespace curve { namespace mds { @@ -56,8 +58,8 @@ class CleanCoreTest : public testing::Test { cleanCore_ = std::make_shared(storage_, client_, allocStatistic_); - csClient_ = std::make_shared( - topology_, option_, channelPool_); + csClient_ = std::make_shared(topology_, option_, + channelPool_); } void TearDown() override {} @@ -81,7 +83,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -89,19 +91,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -111,47 +113,48 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kSnapshotFileDeleteError); + StatusCode::kSnapshotFileDeleteError); } { - // 联调Bug修复:快照文件共享源文件的segment,所以在查询segment的时候需要使用 - // ParentID 进行查找 + // Joint debugging bug fix: The snapshot file shares a segment of the + // source file, so it needs to be used when querying segments ParentID + // for lookup uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; uint64_t expectParentID = 101; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(expectParentID, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); @@ -159,7 +162,7 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { cleanFile.set_parentid(expectParentID); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -173,19 +176,19 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); } EXPECT_CALL(*storage_, DeleteSnapshotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanSnapShotFile(cleanFile, &progress), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -200,7 +203,7 @@ TEST_F(CleanCoreTest, testcleanfile) { cleanFile.set_segmentsize(0); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::KInternalError); + StatusCode::KInternalError); } { @@ -208,19 +211,18 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; - ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kOK); + ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), StatusCode::kOK); ASSERT_EQ(progress.GetStatus(), TaskStatus::SUCCESS); ASSERT_EQ(progress.GetProgress(), 100); @@ -231,52 +233,51 @@ TEST_F(CleanCoreTest, testcleanfile) { uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; for (uint32_t i = 0; i < segmentNum; i++) { EXPECT_CALL(*storage_, GetSegment(_, i * DefaultSegmentSize, _)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .WillOnce(Return(StoreStatus::KeyNotExist)); } EXPECT_CALL(*storage_, DeleteFile(_, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } { // get segment ok, DeleteSnapShotChunk Error - } - { + } { // get segment ok, DeleteSnapShotChunk ok, DeleteSegment error EXPECT_CALL(*storage_, GetSegment(_, 0, _)) - .WillOnce(Return(StoreStatus::OK)); + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*storage_, DeleteSegment(_, _, _)) - .WillOnce(Return(StoreStatus::InternalError)); + .WillOnce(Return(StoreStatus::InternalError)); FileInfo cleanFile; cleanFile.set_length(kMiniFileLength); cleanFile.set_segmentsize(DefaultSegmentSize); TaskProgress progress; ASSERT_EQ(cleanCore_->CleanFile(cleanFile, &progress), - StatusCode::kCommonFileDeleteError); + StatusCode::kCommonFileDeleteError); ASSERT_EQ(progress.GetStatus(), TaskStatus::FAILED); } } @@ -310,12 +311,9 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { // CopysetClient DeleteChunk failed { - EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillOnce(Return(false)); - EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) - .Times(0); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*topology_, GetCopySet(_, _)).WillOnce(Return(false)); + EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)).Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, cleanCore_->CleanDiscardSegment(fakeKey, discardSegmentInfo, @@ -333,16 +331,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::InternalError)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(0); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(0); TaskProgress progress; ASSERT_EQ(StatusCode::KInternalError, @@ -361,16 +357,14 @@ TEST_F(CleanCoreTest, TestCleanDiscardSegment) { EXPECT_CALL(*topology_, GetCopySet(_, _)) .Times(segment.chunks_size()) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); EXPECT_CALL(*csClient_, DeleteChunk(_, _, _, _, _)) .Times(segment.chunks_size()) .WillRepeatedly(Return(kMdsSuccess)); EXPECT_CALL(*storage_, CleanDiscardSegment(_, _, _)) .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)) - .Times(1); + EXPECT_CALL(*allocStatistic_, DeAllocSpace(_, _, _)).Times(1); TaskProgress progress; ASSERT_EQ(StatusCode::kOK, cleanCore_->CleanDiscardSegment( diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index 899b942ee8..7ce79cb724 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -19,46 +19,47 @@ * Created Date: Wednesday September 12th 2018 * Author: hzsunjianliang */ -#include -#include #include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" -#include "src/mds/nameserver2/namespace_storage.h" + +#include +#include + +#include "src/common/namespace_define.h" #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" +#include "src/mds/nameserver2/idgenerator/inode_id_generator.h" +#include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/topology/topology_item.h" -#include "src/common/namespace_define.h" - -#include "test/mds/nameserver2/mock/mock_namespace_storage.h" -#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" #include "test/mds/nameserver2/mock/mock_chunk_allocate.h" #include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" #include "test/mds/nameserver2/mock/mock_file_record_manager.h" -#include "test/mds/mock/mock_alloc_statistic.h" -#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/mock/mock_inode_id_generator.h" +#include "test/mds/nameserver2/mock/mock_namespace_storage.h" +#include "test/mds/nameserver2/mock/mock_snapshotclone_client.h" -using ::testing::AtLeast; -using ::testing::StrEq; +using curve::common::Authenticator; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; -using ::testing::SetArgPointee; using ::testing::SaveArg; -using ::testing::Invoke; -using curve::common::Authenticator; +using ::testing::SetArgPointee; +using ::testing::StrEq; +using curve::common::kDefaultPoolsetName; using curve::common::TimeUtility; -using curve::mds::topology::MockTopology; -using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; using curve::mds::snapshotcloneclient::DestFileInfo; -using curve::common::kDefaultPoolsetName; +using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; +using curve::mds::topology::MockTopology; namespace curve { namespace mds { -class CurveFSTest: public ::testing::Test { +class CurveFSTest : public ::testing::Test { protected: void SetUp() override { storage_ = std::make_shared(); @@ -68,7 +69,8 @@ class CurveFSTest: public ::testing::Test { mockcleanManager_ = std::make_shared(); topology_ = std::make_shared(); snapshotClient_ = std::make_shared(); - // session repo已经mock,数据库相关参数不需要 + // The session repo has been mocked, and database related parameters are + // not required fileRecordManager_ = std::make_shared(); fileRecordOptions_.fileRecordExpiredTimeUs = 5 * 1000; fileRecordOptions_.scanIntervalTimeUs = 1 * 1000; @@ -83,7 +85,7 @@ class CurveFSTest: public ::testing::Test { curveFSOptions_.authOptions = authOptions_; curveFSOptions_.fileRecordOptions = fileRecordOptions_; - curvefs_ = &kCurveFS; + curvefs_ = &kCurveFS; allocStatistic_ = std::make_shared(); FileInfo fileInfo; @@ -95,16 +97,12 @@ class CurveFSTest: public ::testing::Test { fileInfo.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); curvefs_->Init(storage_, inodeIdGenerator_, mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - snapshotClient_); + mockcleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, snapshotClient_); DefaultSegmentSize = curvefs_->GetDefaultSegmentSize(); kMiniFileLength = curvefs_->GetMinFileLength(); kMaxFileLength = curvefs_->GetMaxFileLength(); @@ -115,11 +113,9 @@ class CurveFSTest: public ::testing::Test { Return(std::vector{kDefaultPoolsetName})); } - void TearDown() override { - curvefs_->Uninit(); - } + void TearDown() override { curvefs_->Uninit(); } - CurveFS *curvefs_; + CurveFS* curvefs_; std::shared_ptr storage_; std::shared_ptr inodeIdGenerator_; std::shared_ptr mockChunkAllocator_; @@ -140,108 +136,112 @@ class CurveFSTest: public ::testing::Test { TEST_F(CurveFSTest, testCreateFile1) { // test parm error std::map spacePools; - spacePools.insert(std::pair(1, - kMaxFileLength - kMiniFileLength)); - EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) + spacePools.insert( + std::pair(1, kMaxFileLength - kMiniFileLength)); + EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength - 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/file1", "", - "owner1", FileType::INODE_PAGEFILE, - kMaxFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); - - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength + 1, 0, 0), - StatusCode::kFileLengthNotSupported); + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); + + ASSERT_EQ( + curvefs_->CreateFile("/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength + 1, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/flie1", "", "owner1", - FileType::INODE_PAGEFILE, - kMaxFileLength - kMiniFileLength + DefaultSegmentSize, - 0, 0), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->CreateFile( + "/flie1", "", "owner1", FileType::INODE_PAGEFILE, + kMaxFileLength - kMiniFileLength + DefaultSegmentSize, 0, 0), + StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, - 0, 0, 0), StatusCode::kFileExists); + ASSERT_EQ( + curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, 0, 0, 0), + StatusCode::kFileExists); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); - + .Times(1) + .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -253,17 +253,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateFile( - "/dir1", kDefaultPoolsetName, "owner1", - FileType::INODE_DIRECTORY, 0, 0, 0); + auto statusCode = + curvefs_->CreateFile("/dir1", kDefaultPoolsetName, "owner1", + FileType::INODE_DIRECTORY, 0, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_FALSE(fileInfo.has_throttleparams()); } @@ -276,18 +274,15 @@ TEST_F(CurveFSTest, testCreateFile1) { FileInfo fileInfo; EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(DoAll( - SaveArg<0>(&fileInfo), - Return(StoreStatus::OK))); + .WillOnce(DoAll(SaveArg<0>(&fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) .Times(1) .WillOnce(Return(true)); - auto statusCode = - curvefs_->CreateFile("/file1", kDefaultPoolsetName, - "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength, 0, 0); + auto statusCode = curvefs_->CreateFile( + "/file1", kDefaultPoolsetName, "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_TRUE(fileInfo.has_throttleparams()); } @@ -300,71 +295,76 @@ TEST_F(CurveFSTest, testCreateStripeFile) { spacePools.insert(std::pair(1, kMaxFileLength)); spacePools.insert(std::pair(2, kMaxFileLength)); EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + GetRemainingSpaceInLogicalPool(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 1 * 1024 * 1024, 4), StatusCode::kOK); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1 * 1024 * 1024, 4), + StatusCode::kOK); } { // test stripeStripe and stripeCount is not all zero ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 0, 1), - StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 1), + StatusCode::kParaError); ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 1024*1024ul, - 0), StatusCode::kParaError); + FileType::INODE_PAGEFILE, + kMiniFileLength, 1024 * 1024ul, 0), + StatusCode::kParaError); } { // test stripeUnit more then chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 16*1024*1024ul + 1, - 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 16 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } { // test stripeUnit is not divisible by chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, - 4*1024*1024ul + 1, 0), StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile( + "/file1", "", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 4 * 1024 * 1024ul + 1, 0), + StatusCode::kParaError); } } TEST_F(CurveFSTest, testCreateFileWithPoolset) { const std::map spacePools{ - {1, kMaxFileLength}, - {2, kMaxFileLength}, + {1, kMaxFileLength}, + {2, kMaxFileLength}, }; EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillRepeatedly(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .WillRepeatedly(Invoke([](uint64_t* id) { - static std::atomic counter{0}; - *id = counter++; - return true; - })); + .WillRepeatedly(Invoke([](uint64_t* id) { + static std::atomic counter{0}; + *id = counter++; + return true; + })); // create file without poolset, assign to default poolset { @@ -382,8 +382,8 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset but not same with anyone { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{kDefaultPoolsetName, "SSD"})); + .WillOnce( + Return(std::vector{kDefaultPoolsetName, "SSD"})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "HDD", "owner", @@ -393,8 +393,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // create file with poolset and poolset exists { - EXPECT_CALL(*storage_, PutFile(_)) - .WillOnce(Return(StoreStatus::OK)); + EXPECT_CALL(*storage_, PutFile(_)).WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)); ASSERT_EQ(StatusCode::kOK, @@ -406,8 +405,7 @@ TEST_F(CurveFSTest, testCreateFileWithPoolset) { // cluster doesn't have poolset { EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) - .WillOnce(Return( - std::vector{})); + .WillOnce(Return(std::vector{})); ASSERT_EQ(StatusCode::kPoolsetNotExist, curvefs_->CreateFile("/file1", "SSD", "owner", FileType::INODE_PAGEFILE, @@ -419,23 +417,19 @@ TEST(TestSelectPoolsetByRules, Test) { ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/filename", {})); { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ("system", SelectPoolsetByRules("/system/file", rules)); } { - std::map rules{ - {"/system/", "system"} - }; + std::map rules{{"/system/", "system"}}; ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/systems", rules)); } { std::map rules{ - {"/system/", "system"}, - {"/systems/", "system1"}, + {"/system/", "system"}, + {"/systems/", "system1"}, }; ASSERT_EQ("system1", SelectPoolsetByRules("/systems/file", rules)); } @@ -443,9 +437,7 @@ TEST(TestSelectPoolsetByRules, Test) { // subdir rules { std::map rules{ - {"/system/", "system"}, - {"/system/sub/", "system-sub"} - }; + {"/system/", "system"}, {"/system/sub/", "system-sub"}}; ASSERT_EQ("system-sub", SelectPoolsetByRules("/system/sub/file", rules)); @@ -462,15 +454,15 @@ TEST_F(CurveFSTest, testGetFileInfo) { FileInfo rootFileInfo = curvefs_->GetRootFileInfo(); ASSERT_EQ(fileInfo.id(), rootFileInfo.id()); - ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); + ASSERT_EQ(fileInfo.filename(), rootFileInfo.filename()); ASSERT_EQ(fileInfo.filetype(), rootFileInfo.filetype()); { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kFileNotExists); } @@ -478,8 +470,8 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kStorageError); } @@ -487,134 +479,134 @@ TEST_F(CurveFSTest, testGetFileInfo) { // test ok FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(Return(StoreStatus::OK)); + .Times(2) + .WillRepeatedly(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->GetFileInfo("/file1/file2", &fileInfo), StatusCode::kOK); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); FileInfo retFileInfo; std::string lastEntry; ASSERT_EQ(curvefs_->GetFileInfo("/testdir/file1", &retFileInfo), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); FileInfo fileInfo1; ASSERT_EQ(curvefs_->GetFileInfo("testdir/file1", &fileInfo1), - StatusCode::kStorageError); + StatusCode::kStorageError); } } TEST_F(CurveFSTest, testDeleteFile) { // test remove root ASSERT_EQ(curvefs_->DeleteFile("/", kUnitializedFileID, false), - StatusCode::kParaError); + StatusCode::kParaError); // test delete directory ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete directory, directory is not empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kDirNotEmpty); + StatusCode::kDirNotEmpty); } // test delete directory, delete file fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/dir1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile ok { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete recyclebin pagefile,cleanManager fail @@ -623,44 +615,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(false)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::KInternalError); + kUnitializedFileID, true), + StatusCode::KInternalError); } // test force delete recyclebin file ok @@ -669,44 +659,42 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(nullptr)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteCommonFileJob(_)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteCommonFileJob(_)) + .Times(1) + .WillOnce(Return(true)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } // test force delete already deleting @@ -715,250 +703,245 @@ TEST_F(CurveFSTest, testDeleteFile) { recycleBindir.set_parentid(ROOTINODEID); recycleBindir.set_filetype(FileType::INODE_DIRECTORY); - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(RECYCLEBININODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(recycleBindir), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(4) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(recycleBindir), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); // mockcleanManager_ = std::make_shared(); auto notNullTask = std::make_shared(1, nullptr, fileInfo); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) + EXPECT_CALL(*mockcleanManager_, GetTask(_)) .Times(1) .WillOnce(Return(notNullTask)); ASSERT_EQ(curvefs_->DeleteFile(RECYCLEBINDIR + "/file1", - kUnitializedFileID, true), - StatusCode::kOK); + kUnitializedFileID, true), + StatusCode::kOK); } - // test force delete file not in recyclebin + // test force delete file not in recyclebin { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_parentid(USERSTARTINODEID); fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, true), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under snapshot { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileUnderSnapShot); + StatusCode::kFileUnderSnapShot); } // test delete pagefile, storage error { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector fileInfoList; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } // delete not support file type { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // test delete pagefile, file under clone { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kHasRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete pagefile, file under clone but has no ref but delete fail { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kStorageError); + StatusCode::kStorageError); } // test delete pagefile, file under clone but has no ref success { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNoRef; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check list empty { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, file has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::KeyNotExist))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::KeyNotExist))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -967,37 +950,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, inode mismatch { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(10); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1006,37 +989,37 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kOK); + StatusCode::kOK); } // test delete pagefile, file under clone but need check, has ref { - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileBeingCloned); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); CloneRefStatus status = CloneRefStatus::kNeedCheck; std::vector fileCheckList; @@ -1045,17 +1028,17 @@ TEST_F(CurveFSTest, testDeleteFile) { info.inodeid = 100; fileCheckList.push_back(info); EXPECT_CALL(*snapshotClient_, GetCloneRefStatus(_, _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(status), - SetArgPointee<3>(fileCheckList), - Return(StatusCode::kOK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(status), + SetArgPointee<3>(fileCheckList), + Return(StatusCode::kOK))); // EXPECT_CALL(*storage_, MoveFileToRecycle(_, _)) // .Times(1) // .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->DeleteFile("/file1", kUnitializedFileID, false), - StatusCode::kDeleteFileBeingCloned); + StatusCode::kDeleteFileBeingCloned); } // test delete failed when mds didn't start for enough time @@ -1113,7 +1096,7 @@ TEST_F(CurveFSTest, testDeleteFile) { TEST_F(CurveFSTest, testGetAllocatedSize) { AllocatedSize allocSize; - FileInfo fileInfo; + FileInfo fileInfo; uint64_t segmentSize = 1 * 1024 * 1024 * 1024ul; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); @@ -1130,22 +1113,21 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { segment.set_logicalpoolid(2); segments.emplace_back(segment); - // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(3 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 2 * segmentSize}, {2, segmentSize}}; + std::unordered_map expected = { + {1, 2 * segmentSize}, {2, segmentSize}}; ASSERT_EQ(expected, allocSize.allocSizeMap); } // test directory normal @@ -1157,73 +1139,72 @@ TEST_F(CurveFSTest, testGetAllocatedSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(StoreStatus::OK))); + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<1>(segments), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); ASSERT_EQ(9 * segmentSize, allocSize.total); - std::unordered_map expected = - {{1, 6 * segmentSize}, {2, 3 * segmentSize}}; + std::unordered_map expected = { + {1, 6 * segmentSize}, {2, 3 * segmentSize}}; } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list segment fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetAllocatedSize("/tests", &allocSize)); + curvefs_->GetAllocatedSize("/tests", &allocSize)); } } TEST_F(CurveFSTest, testGetFileSize) { uint64_t fileSize; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_id(0); fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_length(10 * kGB); @@ -1231,11 +1212,10 @@ TEST_F(CurveFSTest, testGetFileSize) { // test page file normal { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(10 * kGB, fileSize); } // test directory normal @@ -1247,49 +1227,47 @@ TEST_F(CurveFSTest, testGetFileSize) { files.emplace_back(fileInfo); } EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(files), - Return(StoreStatus::OK))); - ASSERT_EQ(StatusCode::kOK, - curvefs_->GetFileSize("/tests", &fileSize)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(files), Return(StoreStatus::OK))); + ASSERT_EQ(StatusCode::kOK, curvefs_->GetFileSize("/tests", &fileSize)); ASSERT_EQ(30 * kGB, fileSize); } // test GetFile fail { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kFileNotExists, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test file type not supported { FileInfo appendFileInfo; appendFileInfo.set_filetype(INODE_APPENDFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(appendFileInfo), + Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kNotSupported, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } // test list directory fail { FileInfo dirInfo; dirInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(dirInfo), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(dirInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->GetFileSize("/tests", &fileSize)); + curvefs_->GetFileSize("/tests", &fileSize)); } } @@ -1301,9 +1279,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kNotDirectory); @@ -1313,8 +1291,8 @@ TEST_F(CurveFSTest, testReadDir) { // test getFile Not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ReadDir("/file1", &items), StatusCode::kDirNotExist); @@ -1324,9 +1302,9 @@ TEST_F(CurveFSTest, testReadDir) { { fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); std::vector sideEffectArgs; sideEffectArgs.clear(); @@ -1335,9 +1313,9 @@ TEST_F(CurveFSTest, testReadDir) { sideEffectArgs.push_back(fileInfo); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(sideEffectArgs), + Return(StoreStatus::OK))); auto ret = curvefs_->ReadDir("/file1", &items); ASSERT_EQ(ret, StatusCode::kOK); @@ -1355,16 +1333,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kOK); @@ -1373,19 +1351,19 @@ TEST_F(CurveFSTest, testRecoverFile) { // the upper dir not exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->RecoverFile("/k8s/file1", - "/RecycleBin/k8s/file1-10", 2), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->RecoverFile("/k8s/file1", "/RecycleBin/k8s/file1-10", 2), + StatusCode::kFileNotExists); } // the same file exist, can not recover { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileExists); @@ -1400,12 +1378,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 2), StatusCode::kFileIdNotMatch); @@ -1420,12 +1398,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kFileUnderDeleting); @@ -1440,12 +1418,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileCloneMetaInstalled); @@ -1460,12 +1438,12 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kRecoverFileError); @@ -1478,16 +1456,16 @@ TEST_F(CurveFSTest, testRecoverFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RecoverFile("/file1", "/RecycleBin/file1-10", 0), StatusCode::kStorageError); @@ -1502,22 +1480,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1528,18 +1506,18 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo1; fileInfo1.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1548,8 +1526,8 @@ TEST_F(CurveFSTest, testRenameFile) { // old file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1560,16 +1538,16 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileNotExists); @@ -1582,22 +1560,22 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(4)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(4)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, RenameFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1625,9 +1603,9 @@ TEST_F(CurveFSTest, testRenameFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(AtLeast(1)) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kNotSupported); @@ -1644,33 +1622,33 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_id(10); fileInfo3.set_id(11); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(3) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(3) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 10, 11), StatusCode::kOK); @@ -1683,18 +1661,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(4) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(4) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileExists); @@ -1707,31 +1685,31 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kFileUnderSnapShot); @@ -1744,32 +1722,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find /file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find /trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /trash/file2 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1782,32 +1760,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kStorageError); @@ -1820,32 +1798,32 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(6) - // 查找/file1 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /file1是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // 查找/trash/file2 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - // check /trash/file2是否有快照 - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(6) + // Find/file1 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check if /file1 has a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Find/trash/file2 + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + // Check/trash/file2 if there is a snapshot + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::KeyNotExist)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce(Return(StoreStatus::KeyNotExist)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, ReplaceFileAndRecycleOldFile(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ(curvefs_->RenameFile("/file1", "/trash/file2", 0, 0), StatusCode::kOK); @@ -1859,18 +1837,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( @@ -1908,26 +1886,25 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); - EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 0), StatusCode::kShrinkBiggerFile); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", kMiniFileLength), + StatusCode::kOK); } // test enlarge size unit is not segment @@ -1941,14 +1918,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 1 + kMiniFileLength), StatusCode::kExtentUnitError); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 1 + kMiniFileLength), + StatusCode::kExtentUnitError); } // test enlarge size ok @@ -1962,11 +1939,11 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -1974,8 +1951,8 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo doesn't has throttle params ASSERT_FALSE(modifiedInfo.has_throttleparams()); @@ -1999,11 +1976,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(1); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2011,16 +1988,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_EQ( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_EQ(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test enlarge size ok, and update throttle params @@ -2041,11 +2016,11 @@ TEST_F(CurveFSTest, testExtendFile) { p2->set_limit(120 * kMB); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); FileInfo modifiedInfo; EXPECT_CALL(*storage_, PutFile(_)) @@ -2053,16 +2028,14 @@ TEST_F(CurveFSTest, testExtendFile) { .WillOnce( DoAll(SaveArg<0>(&modifiedInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), StatusCode::kOK); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kOK); // previous fileinfo has throttle params and has been modified by user ASSERT_TRUE(modifiedInfo.has_throttleparams()); ASSERT_EQ(2, modifiedInfo.throttleparams().throttleparams_size()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[0].limit()); - ASSERT_NE( - 1, modifiedInfo.throttleparams().throttleparams()[1].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[0].limit()); + ASSERT_NE(1, modifiedInfo.throttleparams().throttleparams()[1].limit()); } // test size over maxsize @@ -2076,14 +2049,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMaxFileLength), StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMaxFileLength), + StatusCode::kFileLengthNotSupported); } // file not exist @@ -2097,14 +2070,13 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_segmentsize(DefaultSegmentSize); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kFileNotExists); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kFileNotExists); } // extend directory @@ -2116,15 +2088,14 @@ TEST_F(CurveFSTest, testExtendFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", - 2 * kMiniFileLength), - StatusCode::kNotSupported); + ASSERT_EQ(curvefs_->ExtendFile("/user1/file1", 2 * kMiniFileLength), + StatusCode::kNotSupported); } } @@ -2135,20 +2106,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // file owner same with newowner @@ -2157,12 +2127,11 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner1"), StatusCode::kOK); } // file is under snapshot, can not changeOwner @@ -2171,16 +2140,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector snapshotFileInfos; snapshotFileInfos.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapshotFileInfos), + Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileUnderSnapShot); @@ -2189,8 +2158,8 @@ TEST_F(CurveFSTest, testChangeOwner) { // file not exist { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kFileNotExists); @@ -2202,17 +2171,17 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_PAGEFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kStorageError); @@ -2224,20 +2193,19 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), - StatusCode::kOK); + ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kOK); } // changeOwner dir not empty @@ -2246,16 +2214,16 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_DIRECTORY); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); std::vector fileInfoList; fileInfoList.push_back(fileInfo1); EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfoList), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfoList), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kDirNotEmpty); @@ -2267,9 +2235,9 @@ TEST_F(CurveFSTest, testChangeOwner) { fileInfo1.set_filetype(FileType::INODE_APPENDECFILE); fileInfo1.set_owner("owner1"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))); ASSERT_EQ(curvefs_->ChangeOwner("/file1", "owner2"), StatusCode::kNotSupported); @@ -2315,18 +2283,19 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kOK); } // test normal get & allocate not exist segment @@ -2343,29 +2312,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kOK); } // file is a directory @@ -2379,14 +2347,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, false, &segment), + StatusCode::kParaError); } // segment offset not align file segment size @@ -2403,14 +2372,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 1, false, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 1, false, &segment), + StatusCode::kParaError); } // file length < segment offset + segmentsize @@ -2427,14 +2397,15 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - kMiniFileLength, false, &segment), StatusCode::kParaError); + ASSERT_EQ(curvefs_->GetOrAllocateSegment( + "/user1/file2", kMiniFileLength, false, &segment), + StatusCode::kParaError); } // alloc chunk segment fail @@ -2451,24 +2422,24 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(false)); + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(false)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kSegmentAllocateError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kSegmentAllocateError); } // put segment fail @@ -2485,29 +2456,28 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*mockChunkAllocator_, - AllocateChunkSegment(_, _, _, _, _, _)) - .Times(1) - .WillOnce(Return(true)); - + AllocateChunkSegment(_, _, _, _, _, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutSegment(_, _, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->GetOrAllocateSegment("/user1/file2", - 0, true, &segment), StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->GetOrAllocateSegment("/user1/file2", 0, true, &segment), + StatusCode::kStorageError); } } @@ -2732,8 +2702,8 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { std::string fileName = "/snapshotFile1WithInvalidClientVersion"; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2753,10 +2723,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2769,8 +2739,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test client version empty invalid @@ -2780,10 +2751,10 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(Return(StoreStatus::OK)) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce(Return(StoreStatus::OK)) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2791,13 +2762,14 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { FileInfo info; ASSERT_EQ(StatusCode::kOK, - curvefs_->RefreshSession( - fileName, "", 0 , "", "127.0.0.1", 1234, "", &info)); + curvefs_->RefreshSession(fileName, "", 0, "", "127.0.0.1", + 1234, "", &info)); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile( - "/snapshotFile1WithInvalidClientVersion2", - &snapShotFileInfoRet), StatusCode::kClientVersionNotMatch); + "/snapshotFile1WithInvalidClientVersion2", + &snapShotFileInfoRet), + StatusCode::kClientVersionNotMatch); } { // test under snapshot @@ -2806,9 +2778,9 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2819,24 +2791,22 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { snapShotFiles.push_back(fileInfo1); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/snapshotFile1", - &snapShotFileInfoRet), StatusCode::kFileUnderSnapShot); + &snapShotFileInfoRet), + StatusCode::kFileUnderSnapShot); } { // test File is not PageFile - } - { + } { // test storage ListFile error - } - { + } { // test GenId error - } - { + } { // test create snapshot ok FileInfo originalFile; originalFile.set_id(1); @@ -2845,25 +2815,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2871,15 +2840,16 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; - ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile", - &snapShotFileInfoRet), StatusCode::kOK); + ASSERT_EQ( + curvefs_->CreateSnapShotFile("/originalFile", &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test create snapshot ok @@ -2890,25 +2860,24 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(2), - Return(true))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(true))); FileInfo snapShotFileInfo; EXPECT_CALL(*storage_, SnapShotFile(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); EXPECT_CALL(*fileRecordManager_, GetFileRecordExpiredTimeUs()) .Times(1) @@ -2917,14 +2886,15 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { // test client version valid FileInfo snapShotFileInfoRet; ASSERT_EQ(curvefs_->CreateSnapShotFile("/originalFile2", - &snapShotFileInfoRet), StatusCode::kOK); + &snapShotFileInfoRet), + StatusCode::kOK); ASSERT_EQ(snapShotFileInfoRet.parentid(), originalFile.id()); ASSERT_EQ(snapShotFileInfoRet.filename(), - originalFile.filename() + "-" + - std::to_string(originalFile.seqnum()) ); + originalFile.filename() + "-" + + std::to_string(originalFile.seqnum())); ASSERT_EQ(snapShotFileInfoRet.filestatus(), FileStatus::kFileCreated); - ASSERT_EQ( - snapShotFileInfoRet.filetype(), FileType::INODE_SNAPSHOT_PAGEFILE); + ASSERT_EQ(snapShotFileInfoRet.filetype(), + FileType::INODE_SNAPSHOT_PAGEFILE); } { // test storage snapshotFile Error @@ -2934,22 +2904,21 @@ TEST_F(CurveFSTest, testCreateSnapshotFile) { TEST_F(CurveFSTest, testListSnapShotFile) { { // workPath error - } - { + } { // dir not support std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("/", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // lookupFile error std::vector snapFileInfos; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->ListSnapShotFile("/originalFile", &snapFileInfos), - StatusCode::kFileNotExists); + StatusCode::kFileNotExists); } { // check type not support @@ -2960,13 +2929,13 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // ListFile error @@ -2977,17 +2946,17 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); std::vector snapFileInfos; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfos), - StatusCode::kStorageError); + StatusCode::kStorageError); } { // ListFile ok @@ -2998,37 +2967,36 @@ TEST_F(CurveFSTest, testListSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapFileInfos; - FileInfo snapShotFile; + FileInfo snapShotFile; snapShotFile.set_parentid(1); snapFileInfos.push_back(snapShotFile); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapFileInfos), + Return(StoreStatus::OK))); std::vector snapFileInfosRet; ASSERT_EQ(curvefs_->ListSnapShotFile("originalFile", &snapFileInfosRet), - StatusCode::kOK); + StatusCode::kOK); ASSERT_EQ(snapFileInfosRet.size(), 1); ASSERT_EQ(snapFileInfosRet[0].SerializeAsString(), - snapShotFile.SerializeAsString()); - } + snapShotFile.SerializeAsString()); + } } - TEST_F(CurveFSTest, testGetSnapShotFileInfo) { { // ListSnapShotFile error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/", 1, &snapshotFileInfo), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // snapfile not exist(not under snapshot) @@ -3039,19 +3007,20 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // under snapshot, butsnapfile not exist @@ -3062,22 +3031,23 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(2); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kSnapshotFileNotExists); } { // test ok @@ -3088,24 +3058,25 @@ TEST_F(CurveFSTest, testGetSnapShotFileInfo) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; snapInfo.set_seqnum(1); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileInfo snapshotFileInfo; - ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", - 1, &snapshotFileInfo), StatusCode::kOK); + ASSERT_EQ(curvefs_->GetSnapShotFileInfo("/originalFile", 1, + &snapshotFileInfo), + StatusCode::kOK); ASSERT_EQ(snapshotFileInfo.SerializeAsString(), - snapInfo.SerializeAsString()); + snapInfo.SerializeAsString()); } } @@ -3114,7 +3085,7 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { // GetSnapShotFileInfo error PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // offset not align @@ -3125,9 +3096,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3135,13 +3106,14 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_segmentsize(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 1, &segment), StatusCode::kParaError); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 1, &segment), + StatusCode::kParaError); } { // storage->GetSegment return error @@ -3154,11 +3126,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3167,17 +3139,18 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kSegmentNotAllocated); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kSegmentNotAllocated); } { // ok @@ -3190,12 +3163,11 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); - + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3204,9 +3176,9 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { snapInfo.set_length(DefaultSegmentSize); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); PageFileSegment expectSegment; expectSegment.set_logicalpoolid(1); @@ -3214,20 +3186,21 @@ TEST_F(CurveFSTest, GetSnapShotFileSegment) { expectSegment.set_chunksize(curvefs_->GetDefaultChunkSize()); expectSegment.set_startoffset(0); - PageFileChunkInfo *chunkInfo = expectSegment.add_chunks(); + PageFileChunkInfo* chunkInfo = expectSegment.add_chunks(); chunkInfo->set_chunkid(1); chunkInfo->set_copysetid(1); EXPECT_CALL(*storage_, GetSegment(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expectSegment), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(expectSegment), + Return(StoreStatus::OK))); PageFileSegment segment; - ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/originalFile", - 1, 0, &segment), StatusCode::kOK); + ASSERT_EQ( + curvefs_->GetSnapShotFileSegment("/originalFile", 1, 0, &segment), + StatusCode::kOK); ASSERT_EQ(expectSegment.SerializeAsString(), - segment.SerializeAsString()); + segment.SerializeAsString()); } } @@ -3236,7 +3209,7 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { // GetSnapShotFileInfo error FileInfo snapshotFileInfo; ASSERT_EQ(curvefs_->DeleteFileSnapShotFile("/", 1, nullptr), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } { // under deleteing @@ -3247,9 +3220,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3258,12 +3231,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kSnapshotDeleting); + StatusCode::kSnapshotDeleting); } { // delete snapshot file filetype error (internal case) @@ -3274,9 +3247,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3286,12 +3259,12 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete storage error @@ -3302,9 +3275,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3315,16 +3288,16 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } { // delete snapshot ok @@ -3335,9 +3308,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3348,21 +3321,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(true)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(true)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::kOK); + StatusCode::kOK); } { // message the snapshot delete manager error, return error @@ -3373,9 +3345,9 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3386,21 +3358,20 @@ TEST_F(CurveFSTest, DeleteFileSnapShotFile) { snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - EXPECT_CALL(*mockcleanManager_, - SubmitDeleteSnapShotFileJob(_, _)) - .Times(1) - .WillOnce(Return(false)); + EXPECT_CALL(*mockcleanManager_, SubmitDeleteSnapShotFileJob(_, _)) + .Times(1) + .WillOnce(Return(false)); EXPECT_EQ(curvefs_->DeleteFileSnapShotFile("/originalFile", 1, nullptr), - StatusCode::KInternalError); + StatusCode::KInternalError); } } @@ -3409,7 +3380,7 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { { PageFileSegment segment; ASSERT_EQ(curvefs_->GetSnapShotFileSegment("/", 1, 0, &segment), - StatusCode::kNotSupported); + StatusCode::kNotSupported); } // snapshot file is not deleting @@ -3421,9 +3392,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3431,14 +3402,15 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileCreated); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileCreated); ASSERT_EQ(progress, 0); } @@ -3452,9 +3424,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3464,21 +3436,21 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { std::vector snapShotFiles2; EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), - Return(StoreStatus::OK))); - - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(snapShotFiles), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles2), + Return(StoreStatus::OK))); + + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileNotExists); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileNotExists); ASSERT_EQ(progress, 100); } @@ -3491,9 +3463,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3501,19 +3473,19 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(2) + .WillRepeatedly(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); - EXPECT_CALL(*mockcleanManager_, - GetTask(_)) - .Times(1) - .WillOnce(Return(nullptr)); + EXPECT_CALL(*mockcleanManager_, GetTask(_)) + .Times(1) + .WillOnce(Return(nullptr)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kSnapshotFileDeleteError); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kSnapshotFileDeleteError); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 0); } @@ -3527,9 +3499,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3537,24 +3509,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::PROGRESSING); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3568,9 +3541,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3578,24 +3551,25 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(50); taskProgress.SetStatus(TaskStatus::FAILED); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 50); } @@ -3609,9 +3583,9 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { originalFile.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(originalFile), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(originalFile), Return(StoreStatus::OK))); std::vector snapShotFiles; FileInfo snapInfo; @@ -3619,66 +3593,67 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { snapInfo.set_filestatus(FileStatus::kFileDeleting); snapShotFiles.push_back(snapInfo); EXPECT_CALL(*storage_, ListSnapshotFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(snapShotFiles), + Return(StoreStatus::OK))); auto task = - std::make_shared(1, nullptr, originalFile); + std::make_shared(1, nullptr, originalFile); TaskProgress taskProgress; taskProgress.SetProgress(100); taskProgress.SetStatus(TaskStatus::SUCCESS); task->SetTaskProgress(taskProgress); EXPECT_CALL(*mockcleanManager_, GetTask(_)) - .Times(1) - .WillOnce(Return(task)); + .Times(1) + .WillOnce(Return(task)); FileStatus fileStatus; uint32_t progress; - ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", - 1, &fileStatus, &progress), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckSnapShotFileStatus("/originalFile", 1, + &fileStatus, &progress), + StatusCode::kOK); ASSERT_EQ(fileStatus, FileStatus::kFileDeleting); ASSERT_EQ(progress, 100); } } TEST_F(CurveFSTest, testOpenFile) { - // 文件不存在 + // File does not exist { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kFileNotExists); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kFileNotExists); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // open目录 + // Open directory { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kNotSupported); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kNotSupported); ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // 执行成功 + // Execution successful { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), @@ -3854,19 +3829,19 @@ TEST_F(CurveFSTest, testOpenFile) { TEST_F(CurveFSTest, testCloseFile) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); ASSERT_EQ( curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), StatusCode::kOK); - // 执行成功 + // Execution successful { EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) @@ -3880,39 +3855,41 @@ TEST_F(CurveFSTest, testCloseFile) { TEST_F(CurveFSTest, testRefreshSession) { ProtoSession protoSession; - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->OpenFile("/file1", "127.0.0.1", - &protoSession, &fileInfo), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), + StatusCode::kOK); - // 文件不存在 + // File does not exist { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(curvefs_->RefreshSession("/file1", "sessionidxxxxx", 12345, - "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + "signaturexxxx", "127.0.0.1", 1234, + "", &fileInfo1), StatusCode::kFileNotExists); } - // 执行成功 + // Execution successful { - FileInfo fileInfo1; + FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::OK)); + .Times(1) + .WillOnce(Return(StoreStatus::OK)); uint64_t date = ::curve::common::TimeUtility::GetTimeofDayUs(); ASSERT_EQ(curvefs_->RefreshSession("/file1", protoSession.sessionid(), - date, "signaturexxxx", "127.0.0.1", 1234, "", &fileInfo1), + date, "signaturexxxx", "127.0.0.1", + 1234, "", &fileInfo1), StatusCode::kOK); ASSERT_EQ(1, curvefs_->GetOpenFileNum()); } @@ -3921,39 +3898,41 @@ TEST_F(CurveFSTest, testRefreshSession) { TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配,date超时 + // Root user, signature matching, date timeout { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date), + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); + ASSERT_EQ(curvefs_->CheckDestinationOwner( + filename, authOptions_.rootOwner, sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckDestinationOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), - StatusCode::kOwnerAuthFail); + ASSERT_EQ( + curvefs_->CheckDestinationOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), + StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckDestinationOwner( + "/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner(authOptions_.rootOwner); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", - "normaluser", "wrongpass", date), + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); + ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", "normaluser", + "wrongpass", date), StatusCode::kOwnerAuthFail); } } @@ -3961,16 +3940,16 @@ TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { TEST_F(CurveFSTest, testCheckPathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配, 并检测date过期 + // Root user, signature matching, and detecting date expiration { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckPathOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner(filename, authOptions_.rootOwner, @@ -3978,168 +3957,176 @@ TEST_F(CurveFSTest, testCheckPathOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckPathOwner("/file1", authOptions_.rootOwner, - "wrongpass", date), + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功, 并检测date超时 + // Normal user, non root user authentication successful for files in the + // root directory, and detection of date timeout { - ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", - "wrongpass", date), - StatusCode::kOK); + ASSERT_EQ( + curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", date), + StatusCode::kOK); ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", - date + 15 * 2000 * 2000), + date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } } TEST_F(CurveFSTest, testCheckFileOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配 + // Root user, signature matching { std::string filename = "/file1"; - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions_.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions_.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions_.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions_.rootPassword); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date), StatusCode::kOK); - ASSERT_EQ(curvefs_->CheckFileOwner(filename, - authOptions_.rootOwner, sig, date + 15 * 2000 * 2000), + ASSERT_EQ(curvefs_->CheckFileOwner(filename, authOptions_.rootOwner, + sig, date + 15 * 2000 * 2000), StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - authOptions_.rootOwner, "wrongpass", date), + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", authOptions_.rootOwner, + "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功 + // Normal user, non root user authentication succeeded for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser", "", date), StatusCode::kOK); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser", "", date), + StatusCode::kOK); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root + // directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->CheckFileOwner("/file1", - "normaluser1", "", date), StatusCode::kOwnerAuthFail); + ASSERT_EQ(curvefs_->CheckFileOwner("/file1", "normaluser1", "", date), + StatusCode::kOwnerAuthFail); } } - TEST_F(CurveFSTest, testCreateCloneFile) { // test parm error - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_DIRECTORY, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_DIRECTORY, + kMiniFileLength, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); - ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength - 1, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength - 1, kStartSeqNum, + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), + StatusCode::kParaError); { // test file exist EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kFileExists); } { // test get storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test inode allocate error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test put storage error EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { // test ok EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::KeyNotExist)); EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) - .Times(1) - .WillOnce(Return(true)); + .Times(1) + .WillOnce(Return(true)); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); FileInfo fileInfo; - auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, - "default", &fileInfo); + auto statusCode = curvefs_->CreateCloneFile( + "/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, + kStartSeqNum, curvefs_->GetDefaultChunkSize(), 0, 0, "default", + &fileInfo); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_EQ(fileInfo.filename(), "file1"); ASSERT_EQ(fileInfo.owner(), "owner1"); @@ -4156,54 +4143,58 @@ TEST_F(CurveFSTest, testCreateCloneFile) { TEST_F(CurveFSTest, testSetCloneFileStatus) { { // test path not exist - FileInfo fileInfo; + FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test stoarge error FileInfo fileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(1) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test WalkPath NOT DIRECTORY - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kFileNotExists); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kFileNotExists); } { // test LookUpFile internal Error - FileInfo fileInfo; + FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1/file2", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1/file2", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test inodeid not match @@ -4211,13 +4202,13 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_id(100); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - 10, FileStatus::kFileCloned), - StatusCode::kFileIdNotMatch); + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", 10, FileStatus::kFileCloned), + StatusCode::kFileIdNotMatch); } { // test filestatus not ok @@ -4226,43 +4217,41 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { FileStatus setStatus; StatusCode expectReturn; int putFileTime; - } testCases[] { + } testCases[]{ {FileStatus::kFileCloning, FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloning, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloneMetaInstalled, - FileStatus::kFileCloneMetaInstalled, - StatusCode::kOK, 1}, - {FileStatus::kFileCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + FileStatus::kFileCloneMetaInstalled, StatusCode::kOK, 1}, + {FileStatus::kFileCloned, FileStatus::kFileCloned, StatusCode::kOK, + 1}, {FileStatus::kFileCreated, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloned, FileStatus::kFileBeingCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileBeingCloned, FileStatus::kFileCloned, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCreated, FileStatus::kFileCreated, - StatusCode::kOK, 1}, + StatusCode::kOK, 1}, {FileStatus::kFileCloning, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileCloning, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCreated, FileStatus::kFileCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileDeleting, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloning, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0}, + StatusCode::kCloneStatusNotMatch, 0}, {FileStatus::kFileCloneMetaInstalled, FileStatus::kFileBeingCloned, - StatusCode::kCloneStatusNotMatch, 0} - }; + StatusCode::kCloneStatusNotMatch, 0}}; for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { { @@ -4270,17 +4259,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(testCases[i].originStatus); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(fileInfo), + Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(testCases[i].putFileTime)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(testCases[i].putFileTime)) + .WillOnce(Return(StoreStatus::OK)); - - ASSERT_EQ(curvefs_->SetCloneFileStatus("/dir1", - kUnitializedFileID, testCases[i].setStatus), + ASSERT_EQ( + curvefs_->SetCloneFileStatus("/dir1", kUnitializedFileID, + testCases[i].setStatus), testCases[i].expectReturn); } } @@ -4291,17 +4280,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::InternalError)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kStorageError); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kStorageError); } { // test put file ok @@ -4309,17 +4298,17 @@ TEST_F(CurveFSTest, testSetCloneFileStatus) { fileInfo.set_filetype(FileType::INODE_PAGEFILE); fileInfo.set_filestatus(FileStatus::kFileCloneMetaInstalled); EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .Times(1) + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) - .Times(AtLeast(1)) - .WillOnce(Return(StoreStatus::OK)); + .Times(AtLeast(1)) + .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", - kUnitializedFileID, FileStatus::kFileCloned), - StatusCode::kOK); + ASSERT_EQ(curvefs_->SetCloneFileStatus("/file1", kUnitializedFileID, + FileStatus::kFileCloned), + StatusCode::kOK); } } @@ -4327,10 +4316,10 @@ TEST_F(CurveFSTest, Init) { // test getFile ok { FileInfo fileInfo1, fileInfo2, fileInfo3, fileInfo4, fileInfo5; - fileInfo1.set_parentid(ROOTINODEID+1); + fileInfo1.set_parentid(ROOTINODEID + 1); fileInfo2.set_parentid(ROOTINODEID); - fileInfo2.set_id(RECYCLEBININODEID+1); + fileInfo2.set_id(RECYCLEBININODEID + 1); fileInfo3.set_parentid(ROOTINODEID); fileInfo3.set_id(RECYCLEBININODEID); @@ -4355,30 +4344,23 @@ TEST_F(CurveFSTest, Init) { const struct { FileInfo info; - bool ret; + bool ret; } testCases[] = { - {fileInfo1, false}, - {fileInfo2, false}, - {fileInfo3, false}, - {fileInfo4, false}, - {fileInfo5, true}, + {fileInfo1, false}, {fileInfo2, false}, {fileInfo3, false}, + {fileInfo4, false}, {fileInfo5, true}, }; - for (int i = 0; i < sizeof(testCases)/ sizeof(testCases[0]); i++) { + for (int i = 0; i < sizeof(testCases) / sizeof(testCases[0]); i++) { EXPECT_CALL(*storage_, GetFile(_, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), - Return(StoreStatus::OK))); - - ASSERT_EQ(testCases[i].ret, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + .Times(1) + .WillOnce(DoAll(SetArgPointee<2>(testCases[i].info), + Return(StoreStatus::OK))); + + ASSERT_EQ(testCases[i].ret, + kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4388,15 +4370,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } // test getfile not exist @@ -4410,15 +4387,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::InternalError)); - ASSERT_EQ(false, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(false, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); // putfile ok FileInfo fileInfo5; @@ -4436,15 +4408,10 @@ TEST_F(CurveFSTest, Init) { .Times(1) .WillOnce(Return(StoreStatus::OK)); - ASSERT_EQ(true, kCurveFS.Init(storage_, - inodeIdGenerator_, - mockChunkAllocator_, - mockcleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions_, - topology_, - nullptr)); + ASSERT_EQ(true, kCurveFS.Init(storage_, inodeIdGenerator_, + mockChunkAllocator_, mockcleanManager_, + fileRecordManager_, allocStatistic_, + curveFSOptions_, topology_, nullptr)); } } @@ -4497,11 +4464,11 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { { // normal test EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); PageFileSegment segment2 = segment; PageFileSegment segment3 = segment; auto chunk = segment.add_chunks(); @@ -4515,41 +4482,39 @@ TEST_F(CurveFSTest, ListAllVolumesOnCopyset) { std::vector segVec2 = {segment2}; std::vector segVec3 = {segment3}; EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(3) - .WillOnce(DoAll(SetArgPointee<1>(segVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec2), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<1>(segVec3), - Return(StoreStatus::OK))); + .Times(3) + .WillOnce(DoAll(SetArgPointee<1>(segVec1), Return(StoreStatus::OK))) + .WillOnce(DoAll(SetArgPointee<1>(segVec2), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<1>(segVec3), Return(StoreStatus::OK))); ASSERT_EQ(StatusCode::kOK, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); ASSERT_EQ(1, fileNames.size()); ASSERT_EQ("file1", fileNames[0]); } // list file fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } // list segment fail { EXPECT_CALL(*storage_, ListFile(_, _, _)) - .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(fileVec1), - Return(StoreStatus::OK))) - .WillOnce(DoAll(SetArgPointee<2>(fileVec2), - Return(StoreStatus::OK))); + .Times(2) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec1), Return(StoreStatus::OK))) + .WillOnce( + DoAll(SetArgPointee<2>(fileVec2), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, ListSegment(_, _)) - .Times(1) - .WillOnce(Return(StoreStatus::KeyNotExist)); + .Times(1) + .WillOnce(Return(StoreStatus::KeyNotExist)); ASSERT_EQ(StatusCode::kStorageError, - curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); + curvefs_->ListVolumesOnCopyset(copysetVec, &fileNames)); } } @@ -4571,8 +4536,8 @@ TEST_F(CurveFSTest, TestUpdateFileThrottleParams) { FileInfo updatedFileInfo; EXPECT_CALL(*storage_, GetFile(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(fileInfo), - Return(StoreStatus::OK))); + .WillOnce( + DoAll(SetArgPointee<2>(fileInfo), Return(StoreStatus::OK))); EXPECT_CALL(*storage_, PutFile(_)) .WillOnce( DoAll(SaveArg<0>(&updatedFileInfo), Return(StoreStatus::OK))); @@ -4635,8 +4600,8 @@ TEST(StripeParamTest, Test) { rc = CheckStripeParam(segmentSize, chunkSize, 4096, 128); EXPECT_EQ(StatusCode::kParaError, rc); - rc = CheckStripeParam(segmentSize, chunkSize, 4096, - segmentSize / chunkSize); + rc = + CheckStripeParam(segmentSize, chunkSize, 4096, segmentSize / chunkSize); EXPECT_EQ(StatusCode::kOK, rc); } diff --git a/test/mds/nameserver2/file_lock_test.cpp b/test/mds/nameserver2/file_lock_test.cpp index 25b524d195..6c5f14a943 100644 --- a/test/mds/nameserver2/file_lock_test.cpp +++ b/test/mds/nameserver2/file_lock_test.cpp @@ -19,26 +19,28 @@ * Created Date: 2019-04-03 * Author: hzchenwei7 */ +#include "src/mds/nameserver2/file_lock.h" + #include -#include #include +#include + #include // NOLINT -#include "src/mds/nameserver2/file_lock.h" -using ::testing::AtLeast; -using ::testing::StrEq; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; using ::testing::SetArgPointee; +using ::testing::StrEq; namespace curve { namespace mds { FileLockManager flm(4); -class FileLockManagerTest: public ::testing::Test { +class FileLockManagerTest : public ::testing::Test { public: FileLockManagerTest() {} }; @@ -59,9 +61,7 @@ void ReadLock(const std::string& filePath, bool unlock = false) { } } -void Unlock(const std::string& filePath) { - flm.Unlock(filePath); -} +void Unlock(const std::string& filePath) { flm.Unlock(filePath); } TEST_F(FileLockManagerTest, Basic) { std::string filePath1 = "/home/dir1/file1"; @@ -115,62 +115,46 @@ TEST_F(FileLockManagerTest, UnlockInAnotherThread) { Unlock(filePath); } -class FileReadLockGuardTest: public ::testing::Test { +class FileReadLockGuardTest : public ::testing::Test { public: FileReadLockGuardTest() {} }; TEST_F(FileReadLockGuardTest, LockUnlockTest) { - { - FileReadLockGuard guard(&flm, "/"); - } + { FileReadLockGuard guard(&flm, "/"); } - { - FileReadLockGuard guard(&flm, "/a"); - } + { FileReadLockGuard guard(&flm, "/a"); } - { - FileReadLockGuard guard(&flm, "/a/b"); - } + { FileReadLockGuard guard(&flm, "/a/b"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -class FileWriteLockGuardTest: public ::testing::Test { +class FileWriteLockGuardTest : public ::testing::Test { public: FileWriteLockGuardTest() {} }; TEST_F(FileWriteLockGuardTest, LockUnlockTest) { - { - FileWriteLockGuard guard(&flm, "/"); - } + { FileWriteLockGuard guard(&flm, "/"); } - { - FileWriteLockGuard guard(&flm, "/a"); - } + { FileWriteLockGuard guard(&flm, "/a"); } - { - FileWriteLockGuard guard(&flm, "/a/b"); - } + { FileWriteLockGuard guard(&flm, "/a/b"); } - { - FileWriteLockGuard guard(&flm, "/a", "/a"); - } + { FileWriteLockGuard guard(&flm, "/a", "/a"); } - { - FileWriteLockGuard guard(&flm, "/a", "/b"); - } + { FileWriteLockGuard guard(&flm, "/a", "/b"); } - { - FileWriteLockGuard guard(&flm, "/b", "/a"); - } + { FileWriteLockGuard guard(&flm, "/b", "/a"); } ASSERT_EQ(flm.GetLockEntryNum(), 0); } -// 以下这种情况,跑测试的时候会出现Segmentation fault,是锁的实现机制的问题 -// 要避免这样使用锁,已在代码里进行规避,以下注释的测试保留,提醒使用者注意 +// In the following scenario, a Segmentation fault may occur when running tests, +// due to issues with the locking mechanism. To avoid using locks in this way, +// precautions have been taken in the code. The commented-out test cases are +// retained to remind users to be cautious. /* TEST_F(FileWriteLockGuardTest, LockUnlockTest1) { { diff --git a/test/mds/nameserver2/file_record_test.cpp b/test/mds/nameserver2/file_record_test.cpp index 37a728b012..3369db4554 100644 --- a/test/mds/nameserver2/file_record_test.cpp +++ b/test/mds/nameserver2/file_record_test.cpp @@ -20,15 +20,16 @@ * Author : wuhanqing */ +#include "src/mds/nameserver2/file_record.h" + #include #include -#include //NOLINT -#include // NOLINT +#include //NOLINT +#include // NOLINT #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" -#include "src/mds/nameserver2/file_record.h" namespace curve { namespace mds { @@ -37,15 +38,15 @@ TEST(FileRecordTest, timeout_test) { butil::EndPoint ep; butil::str2endpoint("127.0.0.1:1111", &ep); - // 设置有效时间为1ms + // Set the effective time to 1ms FileRecord record(1 * 1000, "0.0.6", ep); - // 判断超时 + // Judgment timeout ASSERT_EQ(false, record.IsTimeout()); - // 判断版本号 + // Determine version number ASSERT_EQ("0.0.6", record.GetClientVersion()); - // 睡眠一段时间判断超时是否生效 + // Sleep for a period of time to determine if the timeout is effective std::this_thread::sleep_for(std::chrono::milliseconds(15)); ASSERT_EQ(true, record.IsTimeout()); @@ -89,9 +90,9 @@ TEST(FileRecordManagerTest, normal_test) { kInvalidPort); fileRecordManager.UpdateFileRecord("file4", "0.0.6", "127.0.0.1", 1235); - // 总共记录了4个文件 - // 其中一个port为Invalid - // 其中两个文件打开的client ip port相同 + // A total of 4 files were recorded + // One of the ports is Invalid + // Two of the files have the same client IP port opened ASSERT_EQ(2, fileRecordManager.ListAllClient().size()); // ClientIpPortType clientIpPort; @@ -110,8 +111,7 @@ TEST(FileRecordManagerTest, normal_test) { butil::endpoint2str(clients[0]).c_str()); clients.clear(); - ASSERT_FALSE( - fileRecordManager.FindFileMountPoint("file100", &clients)); + ASSERT_FALSE(fileRecordManager.FindFileMountPoint("file100", &clients)); fileRecordManager.Stop(); } @@ -127,7 +127,7 @@ TEST(FileRecordManagerTest, open_file_num_test) { ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); - // 插入两个记录 + // Insert two records fileRecordManager.UpdateFileRecord("file1", "", "127.0.0.1", 0); fileRecordManager.UpdateFileRecord("file2", "", "127.0.0.1", 0); @@ -138,18 +138,18 @@ TEST(FileRecordManagerTest, open_file_num_test) { } }; - // 只对 file1 定期续约 + // Regular renewal only for file1 std::thread th(task, "file1"); - // sleep 50ms后,file2 会超时 + // After 50ms of sleep, file2 will timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(1, fileRecordManager.GetOpenFileNum()); - // 停止 file1 的定期续约 + // Stop regular renewal of file1 running = false; th.join(); - // sleep 50ms后,file1 也会超时 + // After 50ms of sleep, file1 will also timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); diff --git a/test/mds/nameserver2/namespace_service_test.cpp b/test/mds/nameserver2/namespace_service_test.cpp index c5247030f2..09fff706e2 100644 --- a/test/mds/nameserver2/namespace_service_test.cpp +++ b/test/mds/nameserver2/namespace_service_test.cpp @@ -19,40 +19,42 @@ * Created Date: Wednesday September 26th 2018 * Author: hzsunjianliang */ -#include -#include -#include +#include "src/mds/nameserver2/namespace_service.h" + #include #include -#include "src/mds/nameserver2/namespace_service.h" -#include "src/mds/nameserver2/curvefs.h" -#include "src/mds/nameserver2/chunk_allocator.h" -#include "src/common/timeutility.h" +#include +#include +#include + +#include "src/common/authenticator.h" #include "src/common/configuration.h" #include "src/common/string_util.h" -#include "test/mds/nameserver2/fakes.h" -#include "test/mds/nameserver2/mock/mock_clean_manager.h" -#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" -#include "src/mds/nameserver2/clean_manager.h" +#include "src/common/timeutility.h" +#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/chunk_allocator.h" #include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/clean_manager.h" #include "src/mds/nameserver2/clean_task_manager.h" -#include "src/common/authenticator.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/mock/mock_chunkserver.h" -#include "src/mds/chunkserverclient/copyset_client.h" +#include "src/mds/nameserver2/curvefs.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_chunkserver.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/nameserver2/fakes.h" +#include "test/mds/nameserver2/mock/mock_chunk_allocate.h" +#include "test/mds/nameserver2/mock/mock_clean_manager.h" -using curve::common::TimeUtility; using curve::common::Authenticator; -using curve::mds::topology::MockTopology; +using curve::common::TimeUtility; using ::curve::mds::chunkserverclient::ChunkServerClientOption; +using curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Invoke; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -74,34 +76,33 @@ class NameSpaceServiceTest : public ::testing::Test { protected: void SetUp() override { // init the kcurvefs, use the fake element - storage_ = std::make_shared(); + storage_ = std::make_shared(); inodeGenerator_ = std::make_shared(0); topology_ = std::make_shared(); ChunkServerClientOption option; auto channelPool = std::make_shared(); - auto client = std::make_shared(topology_, - option, channelPool); + auto client = + std::make_shared(topology_, option, channelPool); allocStatistic_ = std::make_shared(); - cleanCore_ = std::make_shared( - storage_, client, allocStatistic_); + cleanCore_ = + std::make_shared(storage_, client, allocStatistic_); // new taskmanger for 2 worker thread, and check thread period 2 second - cleanTaskManager_ = std::make_shared(channelPool, - 2, 2000); + cleanTaskManager_ = + std::make_shared(channelPool, 2, 2000); - cleanManager_ = std::make_shared(cleanCore_, - cleanTaskManager_, storage_); + cleanManager_ = std::make_shared( + cleanCore_, cleanTaskManager_, storage_); ASSERT_EQ(cleanManager_->Start(), true); std::shared_ptr topologyChunkAllocator = - std::make_shared(); + std::make_shared(); std::shared_ptr chunkIdGenerator = - std::make_shared(); - chunkSegmentAllocate_ = - std::make_shared( - topologyChunkAllocator, chunkIdGenerator); + std::make_shared(); + chunkSegmentAllocate_ = std::make_shared( + topologyChunkAllocator, chunkIdGenerator); fileRecordManager_ = std::make_shared(); fileRecordOptions.fileRecordExpiredTimeUs = 5 * 1000; @@ -118,16 +119,13 @@ class NameSpaceServiceTest : public ::testing::Test { curveFSOptions.authOptions = authOptions; kCurveFS.Init(storage_, inodeGenerator_, chunkSegmentAllocate_, - cleanManager_, - fileRecordManager_, - allocStatistic_, - curveFSOptions, topology_, - nullptr); + cleanManager_, fileRecordManager_, allocStatistic_, + curveFSOptions, topology_, nullptr); ASSERT_EQ(curveFSOptions.defaultChunkSize, - kCurveFS.GetDefaultChunkSize()); + kCurveFS.GetDefaultChunkSize()); ASSERT_EQ(curveFSOptions.defaultSegmentSize, - kCurveFS.GetDefaultSegmentSize()); + kCurveFS.GetDefaultSegmentSize()); ASSERT_EQ(curveFSOptions.minFileLength, kCurveFS.GetMinFileLength()); ASSERT_EQ(curveFSOptions.maxFileLength, kCurveFS.GetMaxFileLength()); DefaultSegmentSize = kCurveFS.GetDefaultSegmentSize(); @@ -150,7 +148,7 @@ class NameSpaceServiceTest : public ::testing::Test { } } - template + template void SetRequestAuth(T* request, RequestOption option) { uint64_t date = TimeUtility::GetTimeofDayUs(); request->set_date(date); @@ -173,18 +171,16 @@ class NameSpaceServiceTest : public ::testing::Test { uint64_t time; auto n = items.size(); - if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) - || time < dtime || time - dtime > 1) { + if (n <= 2 || !::curve::common::StringToUll(items[n - 1], &time) || + time < dtime || time - dtime > 1) { LOG(INFO) << "unexpected filename: " << filename - << ", dtime: " << dtime - << ", time in file: " << time; + << ", dtime: " << dtime << ", time in file: " << time; return false; } return true; } - bool DeleteFile(const std::string& filename, - RequestOption option, + bool DeleteFile(const std::string& filename, RequestOption option, DeleteFileResponse* response) { brpc::Controller cntl; DeleteFileRequest request; @@ -201,8 +197,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool GetFileInfo(const std::string& filename, - RequestOption option, + bool GetFileInfo(const std::string& filename, RequestOption option, GetFileInfoResponse* response) { brpc::Controller cntl; GetFileInfoRequest request; @@ -218,8 +213,7 @@ class NameSpaceServiceTest : public ::testing::Test { return true; } - bool ListDir(const std::string& dirname, - RequestOption option, + bool ListDir(const std::string& dirname, RequestOption option, ListDirResponse* response) { brpc::Controller cntl; ListDirRequest request; @@ -260,8 +254,9 @@ TEST_F(NameSpaceServiceTest, test1) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -273,7 +268,6 @@ TEST_F(NameSpaceServiceTest, test1) { CurveFSService_Stub stub(&channel); - // test CreateFile // create /file1(owner1) , /file2(owner2), /dir/file3(owner3) std::vector logicalPools{1, 2, 3}; @@ -285,7 +279,7 @@ TEST_F(NameSpaceServiceTest, test1) { brpc::Controller cntl; uint64_t fileLength = kMiniFileLength; - // 创建file1,owner1 + // Create file1, owner1 request.set_filename("/file1"); request.set_owner("owner1"); request.set_date(TimeUtility::GetTimeofDayUs()); @@ -347,7 +341,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个不存在的目录下创建文件,会失败 kFileNotExists + // Creating a file in a non-existent directory will fail kFileNotExists cntl.Reset(); request.set_filename("/dir4/file4"); request.set_owner("owner4"); @@ -363,7 +357,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个文件下创建文件,会失败 kNotDirectory + // Creating a file under one file will fail kNotDirectory cntl.Reset(); request.set_filename("/file2/file4"); request.set_owner("owner2"); @@ -379,7 +373,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的文件,会创建失败kFileExists + // If you create an existing file, it will fail to create kFileExists cntl.Reset(); request.set_filename("/file2"); request.set_poolset(""); @@ -396,7 +390,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的目录,会创建失败kFileExists + // If you create an existing directory, it will fail to create kFileExist cntl.Reset(); request.set_filename("/dir"); request.set_owner("owner3"); @@ -412,7 +406,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建其他类型文件,返回kNotSupported + // Create other types of files and return kNotSupported cntl.Reset(); request.set_filename("/file4"); request.set_owner("owner4"); @@ -457,7 +451,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建文件名不规范的文件会失败 + // Creating files with non-standard file names will fail cntl.Reset(); request.set_filename("/file4/"); request.set_owner("owner4"); @@ -515,10 +509,10 @@ TEST_F(NameSpaceServiceTest, test1) { cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = Authenticator::CalcString2Signature( + str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename("/"); listRequest.set_owner(authOptions.rootOwner); @@ -527,7 +521,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(listResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(listResponse.fileinfo_size(), 4); - } else { + } else { ASSERT_TRUE(false); } } @@ -559,7 +553,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_EQ(response1.fileinfo().parentid(), 0); ASSERT_EQ(response1.fileinfo().filetype(), INODE_PAGEFILE); ASSERT_EQ(response1.fileinfo().chunksize(), - curveFSOptions.defaultChunkSize); + curveFSOptions.defaultChunkSize); ASSERT_EQ(response1.fileinfo().segmentsize(), DefaultSegmentSize); ASSERT_EQ(response1.fileinfo().length(), fileLength); } else { @@ -567,7 +561,7 @@ TEST_F(NameSpaceServiceTest, test1) { } // test GetOrAllocateSegment - // 为file1分配空间 + // Allocate space for file1 cntl.Reset(); GetOrAllocateSegmentRequest request2; GetOrAllocateSegmentResponse response2; @@ -606,13 +600,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response2.statuscode(), StatusCode::kOK); ASSERT_EQ(response2.pagefilesegment().segmentsize(), - response1.fileinfo().segmentsize()); + response1.fileinfo().segmentsize()); ASSERT_EQ(response2.pagefilesegment().chunksize(), - response1.fileinfo().chunksize()); + response1.fileinfo().chunksize()); ASSERT_EQ(response2.pagefilesegment().startoffset(), request2.offset()); - int chunkNumber = response2.pagefilesegment().segmentsize()/ - response2.pagefilesegment().chunksize(); + int chunkNumber = response2.pagefilesegment().segmentsize() / + response2.pagefilesegment().chunksize(); ASSERT_EQ(response2.pagefilesegment().chunks().size(), chunkNumber); } else { @@ -631,7 +625,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response3.statuscode(), StatusCode::kOK); ASSERT_EQ(response3.pagefilesegment().SerializeAsString(), - response2.pagefilesegment().SerializeAsString()); + response2.pagefilesegment().SerializeAsString()); } else { ASSERT_TRUE(false); } @@ -682,8 +676,8 @@ TEST_F(NameSpaceServiceTest, test1) { // test change owner { - // 当前有文件 /file1(owner1) , /file2(owner2), /dir/file3(owner3) - // changeowner success + // There are currently /file1(owner1) , /file2(owner2), + // /dir/file3(owner3) changeowner success cntl.Reset(); ChangeOwnerRequest request; ChangeOwnerResponse response; @@ -694,10 +688,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -713,10 +707,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("newowner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -732,10 +726,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner("newowner1"); request.set_signature(sig); request.set_date(date); @@ -766,10 +760,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date + kStaledRequestTimeIntervalUs * 2); @@ -785,10 +779,10 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_filename("/file1"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -799,15 +793,15 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // changeowner 文件名不规范,失败 + // changeowner file name is not standardized, failed cntl.Reset(); request.set_filename("/file1/"); request.set_newowner("owner1"); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); + str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + authOptions.rootPassword); request.set_rootowner(authOptions.rootOwner); request.set_signature(sig); request.set_date(date); @@ -820,12 +814,12 @@ TEST_F(NameSpaceServiceTest, test1) { } // test RenameFile - // 重命名到根目录下,非root owner,失败 - // fileinfoid不匹配,失败 - // 重命名成功 /dir/file3 -> /dir/file4 - // 原文件不存在,重命名失败 - // 重命名到根目录下,root owner,成功 /dir/file4 -> /file4 - // 文件名不规范,失败 + // Renaming to root directory, not root owner, failed + // fileinfoid mismatch, failed + // Rename successful /dir/file3 -> /dir/file4 + // The original file does not exist, renaming failed + // Rename to the root directory, root owner, successful /dir/file4 -> + // /file4 File name not standardized, failed cntl.Reset(); RenameFileRequest request4; RenameFileResponse response4; @@ -858,10 +852,10 @@ TEST_F(NameSpaceServiceTest, test1) { std::string oldname = "/dir/file4"; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); request4.set_oldfilename(oldname); request4.set_newfilename("/file4"); @@ -951,8 +945,8 @@ TEST_F(NameSpaceServiceTest, test1) { } // test ExtendFile - // 扩容file2,第一次扩大,成功;第二次缩小,失败 - // 扩容的文件名不符合规范,失败 + // Expanding file2 for the first time, successful; Second reduction, failed + // The expanded file name does not meet the specifications and failed uint64_t newsize = kMiniFileLength * 2; cntl.Reset(); ExtendFileRequest request5; @@ -992,8 +986,9 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // begin session test,开始测试时,有/file1,/file2和/file4 - // OpenFile case1. 文件不存在,返回kFileNotExists + // begin session test, at the beginning of the session test, there are + // /file1,/file2, and/file4 OpenFile case1. File does not exist, returned + // kFileNotExists cntl.Reset(); OpenFileRequest request8; OpenFileResponse response8; @@ -1008,7 +1003,8 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // OpenFile case2. 文件存在,没有open过,返回成功、session、fileInfo + // OpenFile case2. The file exists and has not been opened. Success, + // session, and fileInfo are returned cntl.Reset(); OpenFileRequest request9; OpenFileResponse response9; @@ -1020,7 +1016,7 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response9.statuscode(), StatusCode::kOK); ASSERT_EQ(response9.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response9.fileinfo().filename(), "file2"); } else { ASSERT_TRUE(false); @@ -1037,13 +1033,13 @@ TEST_F(NameSpaceServiceTest, test1) { if (!cntl.Failed()) { ASSERT_EQ(response10.statuscode(), StatusCode::kOK); ASSERT_EQ(response10.protosession().sessionstatus(), - SessionStatus::kSessionOK); + SessionStatus::kSessionOK); ASSERT_EQ(response10.fileinfo().filename(), "file1"); } else { ASSERT_TRUE(false); } - // openFile case3, 文件名不符合规范 + // OpenFile case3, file name does not meet specifications OpenFileRequest request11; OpenFileResponse response11; cntl.Reset(); @@ -1058,7 +1054,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case1. 文件不存在,返回kFileNotExists + // CloseFile case1 File does not exist, returned kFileNotExists cntl.Reset(); CloseFileRequest request12; CloseFileResponse response12; @@ -1074,7 +1070,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case2. 文件存在,session存在,返回成功 + // CloseFile case2 File exists, session exists, success returne CloseFileRequest request13; CloseFileResponse response13; cntl.Reset(); @@ -1092,7 +1088,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case3. 文件名不符合规范 + // CloseFile case3. The file name does not meet the specification cntl.Reset(); request14.set_filename("/file2/"); request14.set_owner("owner2"); @@ -1106,7 +1102,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case1. 文件不存在,返回kFileNotExists + // RefreshSession case1. File does not exist, returned kFileNotExists cntl.Reset(); ReFreshSessionRequest request15; ReFreshSessionResponse response15; @@ -1124,7 +1120,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case2. 文件名不符合规范 + // RefreshSession case2. The file name does not meet the specifications ReFreshSessionRequest request18; ReFreshSessionResponse response18; cntl.Reset(); @@ -1155,8 +1151,9 @@ TEST_F(NameSpaceServiceTest, snapshottests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1168,7 +1165,6 @@ TEST_F(NameSpaceServiceTest, snapshottests) { CurveFSService_Stub stub(&channel); - // test create file std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) @@ -1188,7 +1184,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1204,7 +1200,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1246,7 +1242,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { snapshotRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.CreateSnapShot(&cntl, &snapshotRequest, &snapshotResponses, NULL); if (!cntl.Failed()) { - ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); + ASSERT_EQ(snapshotResponses.statuscode(), StatusCode::kParaError); } else { ASSERT_TRUE(false); } @@ -1310,11 +1306,11 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), - StatusCode::kSegmentNotAllocated); + StatusCode::kSegmentNotAllocated); } else { ASSERT_TRUE(false); } @@ -1326,8 +1322,8 @@ TEST_F(NameSpaceServiceTest, snapshottests) { getSegmentRequest.set_offset(DefaultSegmentSize); getSegmentRequest.set_allocateifnotexist(false); getSegmentRequest.set_seqnum(1); - stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, - &getSegmentResponse, NULL); + stub.GetSnapShotFileSegment(&cntl, &getSegmentRequest, &getSegmentResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(getSegmentResponse.statuscode(), StatusCode::kParaError); } else { @@ -1407,13 +1403,14 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); using ::curve::chunkserver::MockChunkService; - MockChunkService *chunkService = new MockChunkService(); - ASSERT_EQ(server.AddService(chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + MockChunkService* chunkService = new MockChunkService(); + ASSERT_EQ(server.AddService(chunkService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1426,7 +1423,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { CurveFSService_Stub stub(&channel); - // 先创建文件/file1,目录/dir1,文件/dir1/file2 + // First create file '/file1', directory '/dir1', file '/dir1/file2' std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) .Times(AtLeast(1)) @@ -1444,7 +1441,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request.set_filelength(fileLength); cntl.set_log_id(2); - stub.CreateFile(&cntl, &request, &response, NULL); + stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { ASSERT_EQ(response.statuscode(), StatusCode::kOK); } else { @@ -1480,7 +1477,8 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FAIL(); } - // 查看文件/file1,目录/dir1,文件/dir1/file2的状态 + // View the status of file '/file1', directory '/dir1', and file + // '/dir1/file2' cntl.Reset(); GetFileInfoRequest request1; GetFileInfoResponse response1; @@ -1489,7 +1487,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -1509,7 +1507,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request1.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &request1, &response1, NULL); if (!cntl.Failed()) { - FileInfo file = response1.fileinfo(); + FileInfo file = response1.fileinfo(); ASSERT_EQ(response1.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -1539,7 +1537,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 文件/dir1/file2申请segment + // File '/dir1/file2' application segment GetOrAllocateSegmentRequest allocRequest; GetOrAllocateSegmentResponse allocResponse; for (int i = 0; i < 10; i++) { @@ -1551,15 +1549,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } } - // 开始测试删除文件逻辑 - // 1 如果文件有快照,那么删除文件返回kFileUnderSnapShot + // Start testing delete file logic + // 1. If the file has a snapshot, deleting the file returns + // kFileUnderSnapShot cntl.Reset(); CreateSnapShotRequest snapshotRequest; CreateSnapShotResponse snapshotResponses; @@ -1623,7 +1621,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { stub.CheckSnapShotStatus(&cntl, &checkRequest, &checkResponse, NULL); if (!cntl.Failed()) { if (checkResponse.statuscode() == - StatusCode::kSnapshotFileNotExists) { + StatusCode::kSnapshotFileNotExists) { break; } else { ASSERT_EQ(checkResponse.statuscode(), StatusCode::kOK); @@ -1636,10 +1634,10 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { } } ASSERT_LE(attempts, 100) - << "max attempts for check snapshot status exhausted"; - + << "max attempts for check snapshot status exhausted"; - // 2 如果目录下有文件,那么删除目录返回kDirNotEmpty + // 2. If there are files in the directory, deleting the directory returns + // kDirNotEmpty cntl.Reset(); request3.set_filename("/dir1"); request3.set_owner("owner"); @@ -1653,7 +1651,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 3 如果传入的fileid不匹配,删除文件失败 + // 3. If the passed in fileids do not match, deleting the file fails cntl.Reset(); DeleteFileRequest request5; DeleteFileResponse response5; @@ -1670,7 +1668,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 4 删除文件/file1成功,查询文件已经删除 + // 4. Successfully deleted file '/file1', query file has been deleted cntl.Reset(); request3.set_filename("/file1"); request3.set_owner("owner"); @@ -1696,15 +1694,15 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 查询垃圾箱 + // Query Trash Bin ListDirRequest listRequest; ListDirResponse listResponse; cntl.Reset(); uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -1716,37 +1714,36 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FileInfo file = listResponse.fileinfo(0); ASSERT_TRUE(CheckFilename(file.filename(), dtime)); // file1-1-${dtime} ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); - } else { + } else { ASSERT_TRUE(false); } - // 删除文件/dir1/file2成功,删除目录/dir1成功,查询目录和文件均已经删除 - using ::curve::mds::topology::ChunkServerStatus; - using ::curve::mds::topology::OnlineState; + // Successfully deleted file '/dir1/file2', deleted directory '/dir1', + // queried directory and files have been deleted + using ::curve::chunkserver::CHUNK_OP_STATUS; using ::curve::chunkserver::ChunkRequest; using ::curve::chunkserver::ChunkResponse; - using ::curve::chunkserver::CHUNK_OP_STATUS; + using ::curve::mds::topology::ChunkServerStatus; + using ::curve::mds::topology::OnlineState; CopySetInfo copyset(1, 1); copyset.SetLeader(1); EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillRepeatedly( - DoAll(SetArgPointee<1>(copyset), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copyset), Return(true))); ChunkServer chunkserver(1, "", "", 1, "127.0.0.1", listenAddr.port, "", - ChunkServerStatus::READWRITE, OnlineState::ONLINE); + ChunkServerStatus::READWRITE, OnlineState::ONLINE); EXPECT_CALL(*topology_, GetChunkServer(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkserver), Return(true))); ChunkResponse chunkResponse; chunkResponse.set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); EXPECT_CALL(*chunkService, DeleteChunk(_, _, _, _)) - .WillRepeatedly(DoAll(SetArgPointee<2>(chunkResponse), - Invoke([](RpcController *controller, - const ChunkRequest *chunkRequest, - ChunkResponse *chunkResponse, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(chunkResponse), + Invoke([](RpcController* controller, + const ChunkRequest* chunkRequest, + ChunkResponse* chunkResponse, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); stub_ = std::make_shared(&channel); @@ -1858,8 +1855,9 @@ TEST_F(NameSpaceServiceTest, clonetest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1944,8 +1942,9 @@ TEST_F(NameSpaceServiceTest, listClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -1979,8 +1978,9 @@ TEST_F(NameSpaceServiceTest, listAllClientTest) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2085,8 +2085,9 @@ TEST_F(NameSpaceServiceTest, ListVolumesOnCopysets) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2116,8 +2117,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // start server NameSpaceService namespaceService(new FileLockManager(8)); - ASSERT_EQ(server.AddService(&namespaceService, - brpc::SERVER_DOESNT_OWN_SERVICE), 0); + ASSERT_EQ( + server.AddService(&namespaceService, brpc::SERVER_DOESNT_OWN_SERVICE), + 0); brpc::ServerOptions option; option.idle_timeout_sec = -1; @@ -2143,7 +2145,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_PAGEFILE); createRequest.set_filelength(fileLength); - stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); } else { @@ -2187,7 +2189,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 1); ASSERT_EQ(file.filename(), "file1"); @@ -2207,7 +2209,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { getRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.GetFileInfo(&cntl, &getRequest, &getResponse, NULL); if (!cntl.Failed()) { - FileInfo file = getResponse.fileinfo(); + FileInfo file = getResponse.fileinfo(); ASSERT_EQ(getResponse.statuscode(), StatusCode::kOK); ASSERT_EQ(file.id(), 2); ASSERT_EQ(file.filename(), "dir1"); @@ -2249,8 +2251,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { allocRequest.set_allocateifnotexist(true); stub.GetOrAllocateSegment(&cntl, &allocRequest, &allocResponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(allocResponse.statuscode(), - StatusCode::kOK); + ASSERT_EQ(allocResponse.statuscode(), StatusCode::kOK); } else { ASSERT_TRUE(false); } @@ -2278,10 +2279,10 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ListDirRequest listRequest; ListDirResponse listResponse; uint64_t date = TimeUtility::GetTimeofDayUs(); - std::string str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - std::string sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + std::string str2sig = + Authenticator::GetString2Signature(date, authOptions.rootOwner); + std::string sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2295,7 +2296,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.originalfullpathname(), "/dir1/file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2327,7 +2328,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_EQ(file.filename(), "file2"); ASSERT_EQ(file.filestatus(), FileStatus::kFileCreated); ASSERT_EQ(listResponse.fileinfo_size(), 1); - } else { + } else { ASSERT_TRUE(false); } @@ -2400,14 +2401,14 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { ASSERT_TRUE(false); } - // 3. check the ctime of recovered file is greater than the other in recyclebin //NOLINT + // 3. check the ctime of recovered file is greater than the other in + // recyclebin //NOLINT FileInfo recycleFile; cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2471,10 +2472,9 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { // 3. check the fileId of recovered file 3 and not recovered is 4 cntl.Reset(); date = TimeUtility::GetTimeofDayUs(); - str2sig = Authenticator::GetString2Signature(date, - authOptions.rootOwner); - sig = Authenticator::CalcString2Signature(str2sig, - authOptions.rootPassword); + str2sig = Authenticator::GetString2Signature(date, authOptions.rootOwner); + sig = + Authenticator::CalcString2Signature(str2sig, authOptions.rootPassword); listRequest.set_signature(sig); listRequest.set_filename(RECYCLEBINDIR); listRequest.set_owner(authOptions.rootOwner); @@ -2588,8 +2588,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_poolset(kDefaultPoolset); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2632,7 +2632,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileCloneMetaInstalled); + StatusCode::kRecoverFileCloneMetaInstalled); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2649,8 +2649,8 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_owner("owner"); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); - stub.CreateCloneFile(&cntl, &createCloneRequest, - &createCloneResponse, NULL); + stub.CreateCloneFile(&cntl, &createCloneRequest, &createCloneResponse, + NULL); if (!cntl.Failed()) { ASSERT_EQ(createCloneResponse.statuscode(), StatusCode::kOK); } else { @@ -2690,8 +2690,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { recoverRequest.set_date(TimeUtility::GetTimeofDayUs()); stub.RecoverFile(&cntl, &recoverRequest, &recoverRresponse, NULL); if (!cntl.Failed()) { - ASSERT_EQ(recoverRresponse.statuscode(), - StatusCode::kRecoverFileError); + ASSERT_EQ(recoverRresponse.statuscode(), StatusCode::kRecoverFileError); } else { std::cout << cntl.ErrorText(); ASSERT_TRUE(false); @@ -2754,9 +2753,9 @@ TEST_F(NameSpaceServiceTest, TestDeAllocateSegment) { // create file and allocate segment { std::vector logicalPools{1, 2, 3}; - EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Return(logicalPools)); + EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(logicalPools)); CreateFileRequest createRequest; CreateFileResponse createResponse; createRequest.set_filename(filename); diff --git a/test/mds/schedule/coordinator_test.cpp b/test/mds/schedule/coordinator_test.cpp index b18aa07b31..90284dfeff 100644 --- a/test/mds/schedule/coordinator_test.cpp +++ b/test/mds/schedule/coordinator_test.cpp @@ -20,19 +20,21 @@ * Author: lixiaocui */ -#include #include "src/mds/schedule/coordinator.h" + +#include + #include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::schedule::ScheduleOption; +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; using ::curve::mds::topology::UNINTIALIZE_ID; @@ -85,29 +87,31 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { copySetKey.first = 1; copySetKey.second = 1; Operator testOperator(startEpoch, copySetKey, - OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(4)); + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -115,21 +119,22 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(Return(false)); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -139,21 +144,23 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -167,8 +174,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -179,8 +187,9 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -189,16 +198,18 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -228,34 +239,36 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { CopySetKey copySetKey; copySetKey.first = 1; copySetKey.second = 1; - Operator testOperator( - startEpoch, copySetKey, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + Operator testOperator(startEpoch, copySetKey, + OperatorPriority::NormalPriority, steady_clock::now(), + std::make_shared(1, 4)); testOperator.timeLimit = std::chrono::seconds(100); auto info = GetCopySetInfoForTest(); PeerInfo peer(4, 1, 1, "127.0.0.1", 9000); ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); PeerInfo peer1(1, 1, 1, "127.0.0.1", 9001); ChunkServerInfo csInfo1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); ::curve::mds::heartbeat::CopySetConf res; { // 1. test copySet do not have operator EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 2. test copySet has operator and not execute EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) - .Times(2).WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) - .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); + .Times(2) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))) + .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))) @@ -265,22 +278,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); coordinator->GetOpController()->AddOperator(testOperator); Operator opRes; - ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); - // 第一次下发配置 - ASSERT_EQ(4, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_TRUE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); + // First configuration distribution + ASSERT_EQ(4, coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取chunkserver失败 - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + // Failed to obtain chunkserver for the second time + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -290,21 +304,23 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); Operator opRes; - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); - ASSERT_FALSE(coordinator->GetOpController()->GetOperatorById( - info.id, &opRes)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); + ASSERT_FALSE( + coordinator->GetOpController()->GetOperatorById(info.id, &opRes)); } { @@ -318,8 +334,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.configChangeInfo.set_allocated_peer(replica); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, info.configChangeInfo, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + info.configChangeInfo, &res)); } { @@ -330,8 +347,9 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); coordinator->GetOpController()->RemoveOperator(info.id); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { @@ -340,16 +358,18 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.peers.emplace_back(PeerInfo(4, 4, 4, "192.10.123.1", 9000)); EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(true))); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } { // 7. test transfer copysetInfo err EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .WillOnce(Return(false)); - ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( - testCopySetInfo, ConfigChangeInfo{}, &res)); + ASSERT_EQ(UNINTIALIZE_ID, + coordinator->CopySetHeartbeat(testCopySetInfo, + ConfigChangeInfo{}, &res)); } } @@ -359,70 +379,68 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { auto coordinator = std::make_shared(topoAdapter); ScheduleOption scheduleOption; scheduleOption.operatorConcurrent = 4; - coordinator->InitScheduler( - scheduleOption, std::make_shared(topo)); + coordinator->InitScheduler(scheduleOption, + std::make_shared(topo)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为chunkserver-1 - Operator testOperator(1, CopySetKey{1, 1}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 1)); + // 2. There is a leader change on the copyset and the target leader is + // chunkserver-1 + Operator testOperator( + 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 3. copyset上有remove peer操作 - Operator testOperator(1, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 3. There is a remove peer operation on the copyset + Operator testOperator( + 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 2})); } { - // 4. copyset上有add peer操作, target不是1 - Operator testOperator(1, CopySetKey{1, 3}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2)); + // 4. There is an add peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 3})); } { - // 5. copyset上有add peer操作, target是1 - Operator testOperator(1, CopySetKey{1, 4}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(1)); + // 5. There is an add peer operation on the copyset, with a target of 1 + Operator testOperator( + 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 4})); } { - // 6. copyset上有change peer操作,target不是1 - Operator testOperator(1, CopySetKey{1, 5}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 2)); + // 6. There is a change peer operation on the copyset, but the target is + // not 1 + Operator testOperator( + 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 2)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 5})); } { - // 7. copyset上有change peer操作,target是1 - Operator testOperator(1, CopySetKey{1, 6}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(4, 1)); + // 7. There is a change peer operation on the copyset, with a target of + // 1 + Operator testOperator( + 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(4, 1)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); ASSERT_TRUE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 6})); } @@ -479,15 +497,15 @@ TEST(CoordinatorTest, test_RapidLeaderSchedule) { EXPECT_CALL(*topoAdapter, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - coordinator->RapidLeaderSchedule(2)); + coordinator->RapidLeaderSchedule(2)); } TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { /* - 场景: - chunkserver1: offline 有恢复op - chunkserver2: offline 没有恢复op,没有candidate,有其他op - chunkserver3: offline 有candidate + Scenario: + chunkserver1: offline has recovery op + chunkserver2: offline has no recovery op, no candidate, and other ops + chunkserver3: offline has candidate chunkserver4: online chunkserver4: online */ @@ -496,21 +514,18 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { auto topoAdapter = std::make_shared(); auto coordinator = std::make_shared(topoAdapter); - // 获取option + // Get option ScheduleOption scheduleOption = GetScheduleOption(); coordinator->InitScheduler(scheduleOption, metric); - // 构造chunkserver + // Construct chunkserver std::vector chunkserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { PeerInfo peer(i, i % 3 + 1, i, "192.168.0." + std::to_string(i), 9000); - ChunkServerInfo csInfo( - peer, - OnlineState::ONLINE, - DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, - 1, 10, 1, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo(peer, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 10, 1, + ChunkServerStatisticInfo{}); if (i <= 3) { csInfo.state = OnlineState::OFFLINE; } @@ -519,28 +534,21 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op - Operator opForCopySet1( - 1, CopySetKey{1, 1}, - OperatorPriority::HighPriority, - steady_clock::now(), - std::make_shared(1, 4)); + // Construct op + Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, + steady_clock::now(), + std::make_shared(1, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet1)); Operator opForCopySet2( - 2, CopySetKey{1, 2}, - OperatorPriority::NormalPriority, - steady_clock::now(), - std::make_shared(2, 4)); + 2, CopySetKey{1, 2}, OperatorPriority::NormalPriority, + steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); - CopySetInfo copyset2( - CopySetKey{1, 2}, 1, 4, - peersFor2, - ConfigChangeInfo{}, - CopysetStatistics{}); + CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}, + CopysetStatistics{}); std::vector peersFor3({peerInfos[2], peerInfos[3], peerInfos[4]}); ConfigChangeInfo configChangeInfoForCS3; @@ -550,13 +558,10 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { configChangeInfoForCS3.set_allocated_peer(replica); configChangeInfoForCS3.set_type(ConfigChangeType::CHANGE_PEER); configChangeInfoForCS3.set_finished(true); - CopySetInfo copyset3( - CopySetKey{1, 3}, 1, 4, - peersFor3, - configChangeInfoForCS3, - CopysetStatistics{}); + CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, + configChangeInfoForCS3, CopysetStatistics{}); - // 1. 查询所有chunkserver + // 1. Query all chunkservers { EXPECT_CALL(*topoAdapter, GetChunkServerInfos()) .WillOnce(Return(chunkserverInfos)); @@ -567,8 +572,8 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{}, &statusMap)); ASSERT_EQ(6, statusMap.size()); ASSERT_TRUE(statusMap[1]); ASSERT_FALSE(statusMap[2]); @@ -578,26 +583,26 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定chunkserver, 但chunkserver不存在 + // 2. Query for specified chunkserver, but chunkserver does not exist { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(7, _)) .WillOnce(Return(false)); std::map statusMap; ASSERT_EQ(kScheduleErrInvalidQueryChunkserverID, - coordinator->QueryChunkServerRecoverStatus( - std::vector{7}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{7}, &statusMap)); } - // 3. 查询指定chunkserver, 不在恢复中 + // 3. Query the specified chunkserver, not in recovery { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(6, _)) - .WillOnce(DoAll(SetArgPointee<1>(chunkserverInfos[5]), - Return(true))); + .WillOnce( + DoAll(SetArgPointee<1>(chunkserverInfos[5]), Return(true))); std::map statusMap; ASSERT_EQ(kScheduleErrCodeSuccess, - coordinator->QueryChunkServerRecoverStatus( - std::vector{6}, &statusMap)); + coordinator->QueryChunkServerRecoverStatus( + std::vector{6}, &statusMap)); ASSERT_EQ(1, statusMap.size()); ASSERT_FALSE(statusMap[6]); } @@ -606,4 +611,3 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index 3be00637b0..f1705f950a 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -21,20 +21,21 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/common/timeutility.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" -#include "src/common/timeutility.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -58,8 +59,8 @@ class TestLeaderSchedule : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.leaderSchedulerIntervalSec = 1; opt.chunkserverCoolingTimeSec = 0; - leaderScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + leaderScheduler_ = + std::make_shared(opt, topoAdapter_, opController_); } void TearDown() override { @@ -91,15 +92,12 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; - ChunkServerInfo csInfo1( - peer1, offlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, offlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -110,8 +108,8 @@ TEST_F(TestLeaderSchedule, test_has_chunkServer_offline) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -134,15 +132,12 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; CopySetIdType copysetId = 1; @@ -152,8 +147,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySet1.candidatePeerInfo = PeerInfo(1, 1, 1, "192.168.10.1", 9000); std::vector copySetInfos({copySet1}); @@ -165,7 +160,8 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { .WillRepeatedly(Return(copySetInfos)); leaderScheduler_->Schedule(); - ASSERT_EQ(0, opController_->GetOperators().size());} + ASSERT_EQ(0, opController_->GetOperators().size()); +} TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -174,15 +170,12 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3}); PoolIdType poolId = 1; @@ -193,8 +186,8 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -206,7 +199,6 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .WillRepeatedly(Return(false)); - leaderScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } @@ -218,15 +210,12 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo3.startUpTime = 3; std::vector csInfos({csInfo1, csInfo2, csInfo3}); @@ -238,8 +227,8 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos({copySet1}); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -264,25 +253,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -298,11 +281,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -334,14 +317,14 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - res = dynamic_cast(op.step.get()); + res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo6.info.id, res->GetTargetPeer()); } @@ -359,25 +342,19 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); - - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 4, 100, 10, statInfo); - ChunkServerInfo csInfo5( - peer5, onlineState, diskState, ChunkServerStatus::READWRITE, - 5, 100, 10, statInfo); - ChunkServerInfo csInfo6( - peer6, onlineState, diskState, ChunkServerStatus::PENDDING, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); + + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 4, 100, 10, statInfo); + ChunkServerInfo csInfo5(peer5, onlineState, diskState, + ChunkServerStatus::READWRITE, 5, 100, 10, statInfo); + ChunkServerInfo csInfo6(peer6, onlineState, diskState, + ChunkServerStatus::PENDDING, 0, 100, 10, statInfo); struct timeval tm; gettimeofday(&tm, NULL); csInfo3.startUpTime = tm.tv_sec - 2; @@ -393,11 +370,11 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); CopySetInfo copySet2(CopySetKey{2, 1}, epoch, 5, - std::vector({peer4, peer5, peer6}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer4, peer5, peer6}), + ConfigChangeInfo{}, CopysetStatistics{}); std::vector copySetInfos1({copySet1}); std::vector copySetInfos2({copySet2}); @@ -429,7 +406,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet1.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(csInfo3.info.id, res->GetTargetPeer()); @@ -439,7 +416,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -449,19 +426,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::READWRITE, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::READWRITE, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -472,18 +445,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -498,7 +471,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -513,7 +486,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -521,7 +494,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -531,19 +504,15 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); - ChunkServerInfo csInfo1( - peer1, onlineState, diskState, ChunkServerStatus::READWRITE, - 0, 100, 10, statInfo); + ChunkServerInfo csInfo1(peer1, onlineState, diskState, + ChunkServerStatus::READWRITE, 0, 100, 10, statInfo); csInfo1.startUpTime = ::curve::common::TimeUtility::GetTimeofDaySec() - 4; - ChunkServerInfo csInfo2( - peer2, onlineState, diskState, ChunkServerStatus::READWRITE, - 3, 100, 10, statInfo); - ChunkServerInfo csInfo3( - peer3, onlineState, diskState, ChunkServerStatus::PENDDING, - 2, 100, 10, statInfo); - ChunkServerInfo csInfo4( - peer4, onlineState, diskState, ChunkServerStatus::READWRITE, - 1, 100, 10, statInfo); + ChunkServerInfo csInfo2(peer2, onlineState, diskState, + ChunkServerStatus::READWRITE, 3, 100, 10, statInfo); + ChunkServerInfo csInfo3(peer3, onlineState, diskState, + ChunkServerStatus::PENDDING, 2, 100, 10, statInfo); + ChunkServerInfo csInfo4(peer4, onlineState, diskState, + ChunkServerStatus::READWRITE, 1, 100, 10, statInfo); std::vector csInfos({csInfo1, csInfo2, csInfo3, csInfo4}); PoolIdType poolId = 1; @@ -554,18 +523,18 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { EpochType epoch = 1; ChunkServerIdType leader = 2; CopySetInfo copySet1(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 2; leader = 3; CopySetInfo copySet2(copySetKey, epoch, leader, - std::vector({peer1, peer2, peer3}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer1, peer2, peer3}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 3; leader = 4; CopySetInfo copySet3(copySetKey, epoch, leader, - std::vector({peer2, peer3, peer4}), - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector({peer2, peer3, peer4}), + ConfigChangeInfo{}, CopysetStatistics{}); copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, @@ -580,7 +549,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { .Times(2) .WillOnce(Return(std::vector({copySet1}))) .WillOnce(Return(std::vector({copySet3, copySet2}))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(1, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(3, _)) @@ -595,7 +564,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { ASSERT_TRUE(opController_->GetOperatorById(copySet2.id, &op)); ASSERT_EQ(OperatorPriority::NormalPriority, op.priority); ASSERT_EQ(std::chrono::seconds(10), op.timeLimit); - TransferLeader *res = dynamic_cast(op.step.get()); + TransferLeader* res = dynamic_cast(op.step.get()); ASSERT_TRUE(res != nullptr); ASSERT_EQ(1, res->GetTargetPeer()); } @@ -603,7 +572,3 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { } // namespace schedule } // namespace mds } // namespace curve - - - - diff --git a/test/mds/schedule/operatorStep_test.cpp b/test/mds/schedule/operatorStep_test.cpp index 3cab9d2911..0147579ce8 100644 --- a/test/mds/schedule/operatorStep_test.cpp +++ b/test/mds/schedule/operatorStep_test.cpp @@ -22,6 +22,7 @@ #include #include + #include "src/common/timeutility.h" #include "test/mds/schedule/common.h" @@ -30,8 +31,8 @@ namespace mds { namespace schedule { TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr transferLeader - = std::make_shared(1, 2); + std::shared_ptr transferLeader = + std::make_shared(1, 2); auto testCopySetInfo = originCopySetInfo; ApplyStatus applyStatus; @@ -49,21 +50,21 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { // 2. transfer leader complete testCopySetInfo.leader = 2; ASSERT_EQ(ApplyStatus::Finished, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 3. report leader is not record old/target leader in operator testCopySetInfo.leader = 3; ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 4. transfer leader fail testCopySetInfo.leader = 1; - CandidateError *candidateError = new CandidateError(); - std::string *errMsg = new std::string("transfer leader err"); + CandidateError* candidateError = new CandidateError(); + std::string* errMsg = new std::string("transfer leader err"); candidateError->set_allocated_errmsg(errMsg); candidateError->set_errtype(1); testCopySetInfo.candidatePeerInfo = PeerInfo(2, 1, 1, "", 9000); @@ -76,7 +77,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -90,14 +91,14 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { // 6. tarnfer leader type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } { @@ -110,7 +111,7 @@ TEST(OperatorStepTest, OperatorStepTest_TransferLeader_Test) { ConfigChangeType::TRANSFER_LEADER); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - transferLeader->Apply(testCopySetInfo, ©SetConf)); + transferLeader->Apply(testCopySetInfo, ©SetConf)); } } @@ -127,8 +128,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 2. add peer complete auto testCopySetInfo = originCopySetInfo; - testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + testCopySetInfo.peers.emplace_back(PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, addPeer->Apply(testCopySetInfo, ©SetConf)); @@ -141,8 +141,8 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); @@ -158,7 +158,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { // 5. add peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); ASSERT_EQ(ApplyStatus::Failed, - addPeer->Apply(testCopySetInfo, ©SetConf)); + addPeer->Apply(testCopySetInfo, ©SetConf)); // 6. config change item do not match testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); @@ -174,8 +174,7 @@ TEST(OperatorSepTest, OperatorSepTest_AddPeer_Test) { TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - removePeer = std::make_shared(3); + std::shared_ptr removePeer = std::make_shared(3); // 1. remove peer haven't started CopySetConf copySetConf; @@ -199,13 +198,12 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { replica->set_address("192.10.12.4:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::REMOVE_PEER); - std::string *errMsg = new std::string("remove peer err"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("remove peer err"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_finished(false); - testCopySetInfo.configChangeInfo.set_allocated_err( - candidateError); + testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, removePeer->Apply(testCopySetInfo, ©SetConf)); @@ -218,7 +216,7 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { // 5. remove peer type not complete testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); ASSERT_EQ(ApplyStatus::Failed, - removePeer->Apply(testCopySetInfo, ©SetConf)); + removePeer->Apply(testCopySetInfo, ©SetConf)); // 5. config change item do not match testCopySetInfo.candidatePeerInfo = PeerInfo(10, 1, 1, "", 9000); @@ -234,31 +232,31 @@ TEST(OperatorStepTest, OperatorStepTest_RemovePeer_Test) { TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { auto originCopySetInfo = GetCopySetInfoForTest(); - std::shared_ptr - changePeer = std::make_shared(3, 4); + std::shared_ptr changePeer = + std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, - changePeer->Apply(originCopySetInfo, ©SetConf)); + changePeer->Apply(originCopySetInfo, ©SetConf)); ASSERT_EQ(4, copySetConf.configChangeItem); ASSERT_EQ(3, copySetConf.oldOne); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, copySetConf.type); } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); testCopySetInfo.peers.emplace_back( - PeerInfo(4, 3, 4, "192.168.10.4", 9000)); + PeerInfo(4, 3, 4, "192.168.10.4", 9000)); ASSERT_EQ(ApplyStatus::Finished, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -269,24 +267,24 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { testCopySetInfo.configChangeInfo.set_type( ConfigChangeType::CHANGE_PEER); testCopySetInfo.configChangeInfo.set_finished(false); - std::string *errMsg = new std::string("add peer failed"); - CandidateError *candidateError = new CandidateError(); + std::string* errMsg = new std::string("add peer failed"); + CandidateError* candidateError = new CandidateError(); candidateError->set_errtype(2); candidateError->set_allocated_errmsg(errMsg); testCopySetInfo.configChangeInfo.set_allocated_err(candidateError); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); ASSERT_EQ(ApplyStatus::OnGoing, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the optimizer in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); @@ -296,7 +294,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { replica->set_address("192.10.12.5:9000:0"); testCopySetInfo.configChangeInfo.set_allocated_peer(replica); ASSERT_EQ(ApplyStatus::Failed, - changePeer->Apply(testCopySetInfo, ©SetConf)); + changePeer->Apply(testCopySetInfo, ©SetConf)); } } @@ -362,9 +360,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -377,9 +375,9 @@ TEST(OperatorStepTest, TestStartScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers ASSERT_EQ(copysetConf.type, ConfigChangeType::START_SCAN_PEER); // type ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id @@ -509,12 +507,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 3: copyset has no config change -> Ordered @@ -525,12 +524,13 @@ TEST(OperatorStepTest, TestCancelScanPeer) { auto ret = step->Apply(copysetInfo, ©setConf); ASSERT_EQ(ret, ApplyStatus::Ordered); - ASSERT_EQ(copysetConf.id.first, 1); // logical pool id - ASSERT_EQ(copysetConf.id.second, 1); // copyset id - ASSERT_EQ(copysetConf.epoch, 1); // epoch + ASSERT_EQ(copysetConf.id.first, 1); // logical pool id + ASSERT_EQ(copysetConf.id.second, 1); // copyset id + ASSERT_EQ(copysetConf.epoch, 1); // epoch ASSERT_EQ(copysetConf.peers, copysetInfo.peers); // peers - ASSERT_EQ(copysetConf.type, ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT - ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id + ASSERT_EQ(copysetConf.type, + ConfigChangeType::CANCEL_SCAN_PEER); // type // NOLINT + ASSERT_EQ(copysetConf.configChangeItem, 1); // chunkserver id } // CASE 4: copyset has config change but the change type diff --git a/test/mds/schedule/rapidLeaderSheduler_test.cpp b/test/mds/schedule/rapidLeaderSheduler_test.cpp index 3caecf7111..5d9389c6d9 100644 --- a/test/mds/schedule/rapidLeaderSheduler_test.cpp +++ b/test/mds/schedule/rapidLeaderSheduler_test.cpp @@ -20,20 +20,20 @@ * Author: lixiaocui */ -#include "test/mds/schedule/mock_topoAdapter.h" -#include "test/mds/mock/mock_topology.h" -#include "test/mds/schedule/common.h" +#include "src/mds/schedule/operatorFactory.h" #include "src/mds/schedule/scheduleMetrics.h" #include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorFactory.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -55,14 +55,17 @@ class TestRapidLeaderSchedule : public ::testing::Test { auto testCopySetInfo = GetCopySetInfoForTest(); ChunkServerInfo csInfo1(testCopySetInfo.peers[0], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 1, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 1, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo2(testCopySetInfo.peers[1], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo3(testCopySetInfo.peers[2], OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 0, 100, 100, ChunkServerStatisticInfo{}); + DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 0, 100, 100, + ChunkServerStatisticInfo{}); chunkServerInfos_.emplace_back(csInfo1); chunkServerInfos_.emplace_back(csInfo2); chunkServerInfos_.emplace_back(csInfo3); @@ -77,14 +80,14 @@ class TestRapidLeaderSchedule : public ::testing::Test { TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { std::shared_ptr rapidLeaderScheduler; - // 1. mds没有任何logicalpool + // 1. Mds does not have any logicalpool { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 0); @@ -93,21 +96,21 @@ TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); } - // 2. mds逻辑池列表中没有指定logicalpool + // 2. No logicalpool specified in the mds logical pool list { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); ASSERT_EQ(kScheduleErrCodeInvalidLogicalPool, - rapidLeaderScheduler->Schedule()); + rapidLeaderScheduler->Schedule()); } } TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { std::shared_ptr rapidLeaderScheduler; { - // 1. 指定logicalpool中没有chunkserver + // 1. There is no chunkserver in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -121,7 +124,7 @@ TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { } { - // 2. 指定logicalpool中没有copyset + // 2. There is no copyset in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -141,7 +144,8 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { opt_, topoAdapter_, opController_, 1); { - // 1. copyset的副本数目为1, 不会产生迁移 + // 1. The number of copies for copyset is 1, and migration will not + // occur EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -158,16 +162,17 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } { - // 2. chunkserver上拥有的leader数目最多相差1, 不会产生迁移 + // 2. The maximum difference in the number of leaders owned on + // chunkserver is 1, and migration will not occur // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 EXPECT_CALL(*topoAdapter_, GetLogicalpools()) - .WillOnce(Return(std::vector{1})); + .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) .WillOnce(Return(chunkServerInfos_)); EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{GetCopySetInfoForTest()})); + .WillOnce( + Return(std::vector{GetCopySetInfoForTest()})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); ASSERT_EQ(0, opController_->GetOperators().size()); @@ -175,7 +180,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -189,7 +194,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto chunkserverInfosBak = chunkServerInfos_; chunkserverInfosBak[0].leaderCount = 3; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -197,8 +202,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); OperatorFactory factory; opController_->AddOperator(factory.CreateRemovePeerOperator( copyset2, 2, OperatorPriority::NormalPriority)); @@ -206,18 +211,18 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); ASSERT_EQ(3, operators.size()); - auto op1 = dynamic_cast(operators[0].step.get()); + auto op1 = dynamic_cast(operators[0].step.get()); ASSERT_TRUE(nullptr != op1); ASSERT_EQ(2, op1->GetTargetPeer()); ASSERT_EQ(1, operators[0].copysetID.second); - auto op2 = dynamic_cast(operators[2].step.get()); + auto op2 = dynamic_cast(operators[2].step.get()); ASSERT_TRUE(nullptr != op2); ASSERT_EQ(3, op2->GetTargetPeer()); ASSERT_EQ(3, operators[2].copysetID.second); } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -232,7 +237,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { chunkserverInfosBak[0].leaderCount = 3; chunkserverInfosBak[0].status = ChunkServerStatus::PENDDING; EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) - .WillOnce(Return(chunkserverInfosBak)); + .WillOnce(Return(chunkserverInfosBak)); auto copyset1 = GetCopySetInfoForTest(); auto copyset2 = GetCopySetInfoForTest(); @@ -240,8 +245,8 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { auto copyset3 = GetCopySetInfoForTest(); copyset3.id = CopySetKey{1, 3}; EXPECT_CALL(*topoAdapter_, GetCopySetInfosInLogicalPool(1)) - .WillOnce(Return( - std::vector{copyset1, copyset2, copyset3})); + .WillOnce( + Return(std::vector{copyset1, copyset2, copyset3})); ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); auto operators = opController_->GetOperators(); diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index c7c11b299e..8e26a2ff57 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -21,23 +21,24 @@ */ #include -#include "src/mds/schedule/scheduler.h" + +#include "src/mds/common/mds_define.h" #include "src/mds/schedule/operatorController.h" -#include "src/mds/topology/topology_id_generator.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/schedule/mock_topoAdapter.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/topology/topology_id_generator.h" #include "test/mds/mock/mock_topology.h" #include "test/mds/schedule/common.h" +#include "test/mds/schedule/mock_topoAdapter.h" using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::mds::topology::TopologyIdGenerator; using ::curve::mds::topology::MockTopology; +using ::curve::mds::topology::TopologyIdGenerator; namespace curve { namespace mds { @@ -62,7 +63,7 @@ class TestRecoverSheduler : public ::testing::Test { opt.scatterWithRangePerent = 0.2; opt.chunkserverFailureTolerance = 3; recoverScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + opt, topoAdapter_, opController_); } void TearDown() override { opController_ = nullptr; @@ -71,9 +72,9 @@ class TestRecoverSheduler : public ::testing::Test { } protected: - std::shared_ptr topoAdapter_; - std::shared_ptr opController_; - std::shared_ptr recoverScheduler_; + std::shared_ptr topoAdapter_; + std::shared_ptr opController_; + std::shared_ptr recoverScheduler_; }; TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { @@ -82,10 +83,8 @@ TEST_F(TestRecoverSheduler, test_copySet_already_has_operator) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); CopySetKey copySetKey; - copySetKey. - first = 1; - copySetKey. - second = 1; + copySetKey.first = 1; + copySetKey.second = 1; Operator testOperator(1, copySetKey, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); ASSERT_TRUE(opController_->AddOperator(testOperator)); @@ -107,8 +106,8 @@ TEST_F(TestRecoverSheduler, test_copySet_has_configChangeInfo) { TEST_F(TestRecoverSheduler, test_chunkServer_cannot_get) { EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) .WillOnce(Return(std::vector{})); - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()). - WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) + .WillOnce(Return(std::vector({GetCopySetInfoForTest()}))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(_, _)) .Times(3) .WillRepeatedly(Return(false)); @@ -132,27 +131,27 @@ TEST_F(TestRecoverSheduler, test_server_has_more_offline_chunkserver) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::UNSTABLE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } TEST_F(TestRecoverSheduler, - test_server_has_more_offline_and_retired_chunkserver) { + test_server_has_more_offline_and_retired_chunkserver) { auto testCopySetInfo = GetCopySetInfoForTest(); EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) .WillRepeatedly(Return(std::vector({testCopySetInfo}))); @@ -168,27 +167,27 @@ TEST_F(TestRecoverSheduler, PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); PeerInfo peer5(5, 1, 1, "192.168.10.1", 9002); ChunkServerInfo csInfo4(peer4, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerInfo csInfo5(peer5, OnlineState::OFFLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 2, 100, 100, - ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); EXPECT_CALL(*topoAdapter_, GetChunkServerInfos()) - .WillOnce(Return(std::vector{ - csInfo1, csInfo2, csInfo3, csInfo4, csInfo5})); + .WillOnce(Return(std::vector{csInfo1, csInfo2, csInfo3, + csInfo4, csInfo5})); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo1.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo2.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo2), Return(true))); - EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id , _)) + EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(csInfo3.info.id, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo3), Return(true))); - EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) + EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillOnce(Return(2)); recoverScheduler_->Schedule(); Operator op; ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } @@ -208,64 +207,61 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, 2, 100, 100, ChunkServerStatisticInfo{}); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); - ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, - DiskState::DISKNORMAL, ChunkServerStatus::READWRITE, - 2, 100, 100, ChunkServerStatisticInfo{}); + ChunkServerInfo csInfo4(peer4, OnlineState::ONLINE, DiskState::DISKNORMAL, + ChunkServerStatus::READWRITE, 2, 100, 100, + ChunkServerStatisticInfo{}); ChunkServerIdType id1 = 1; ChunkServerIdType id2 = 2; ChunkServerIdType id3 = 3; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) - .WillRepeatedly(Return(90)); + .WillRepeatedly(Return(90)); { - // 1. 所有chunkserveronline + // 1. All chunkserveronline EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id3, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo3), Return(true))); recoverScheduler_->Schedule(); ASSERT_EQ(0, opController_->GetOperators().size()); } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is + // suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillOnce(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) - .Times(2).WillRepeatedly(Return(2)); + .Times(2) + .WillRepeatedly(Return(2)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be + // suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id2, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(csInfo2), Return(true))); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(std::chrono::seconds(100), op.timeLimit); } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be + // dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); @@ -290,14 +286,13 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { .WillOnce(Return(true)); recoverScheduler_->Schedule(); ASSERT_TRUE(opController_->GetOperatorById(testCopySetInfo.id, &op)); - ASSERT_TRUE( - dynamic_cast(op.step.get()) != nullptr); + ASSERT_TRUE(dynamic_cast(op.step.get()) != nullptr); ASSERT_EQ(4, op.step.get()->GetTargetPeer()); ASSERT_EQ(std::chrono::seconds(1000), op.timeLimit); } { - // 5. 选不出替换chunkserver + // 5. Unable to select a replacement chunkserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(_)) .WillOnce(Return(std::vector{})); @@ -306,7 +301,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 6. 在chunkserver上创建copyset失败 + // 6. Failed to create copyset on chunkserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); std::vector chunkserverList( @@ -335,5 +330,3 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } // namespace schedule } // namespace mds } // namespace curve - - diff --git a/test/mds/schedule/scheduleMetrics_test.cpp b/test/mds/schedule/scheduleMetrics_test.cpp index 66969a6845..3714260772 100644 --- a/test/mds/schedule/scheduleMetrics_test.cpp +++ b/test/mds/schedule/scheduleMetrics_test.cpp @@ -20,22 +20,24 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduleMetrics.h" + #include #include #include -#include "src/mds/schedule/scheduleMetrics.h" -#include "src/mds/schedule/operatorController.h" + #include "src/mds/schedule/operator.h" +#include "src/mds/schedule/operatorController.h" #include "test/mds/mock/mock_topology.h" -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::CopySetKey; +using ::curve::mds::topology::MockTopology; using ::testing::_; -using ::testing::Return; using ::testing::AtLeast; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace mds { @@ -47,24 +49,22 @@ class ScheduleMetricsTest : public testing::Test { scheduleMetrics = std::make_shared(topo); } - void TearDown() { - } + void TearDown() {} ::curve::mds::topology::ChunkServer GetChunkServer(int id) { - return ::curve::mds::topology::ChunkServer( - id, "", "", id, "", 9000, ""); + return ::curve::mds::topology::ChunkServer(id, "", "", id, "", 9000, + ""); } ::curve::mds::topology::Server GetServer(int id) { - std::string hostName = - "pubbeta2-curve" + std::to_string(id) + ".org"; - return ::curve::mds::topology::Server( - id, hostName, "", 0, "", 0, id, 1, ""); + std::string hostName = "pubbeta2-curve" + std::to_string(id) + ".org"; + return ::curve::mds::topology::Server(id, hostName, "", 0, "", 0, id, 1, + ""); } std::string GetChunkServerHostPort(int id) { return GetServer(id).GetHostName() + ":" + - std::to_string(GetChunkServer(id).GetPort()); + std::to_string(GetChunkServer(id).GetPort()); } public: @@ -74,24 +74,24 @@ class ScheduleMetricsTest : public testing::Test { TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { Operator addOp(1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operators of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(addOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -100,34 +100,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(addCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(addCsInfo.GetId()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + GetChunkServerHostPort(2); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(addCsInfo.GetEpoch()), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ("UNINTIALIZE_ID", - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(addOp.startEpoch), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(ADDPEER, - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[addOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[addOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"1\",") + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + @@ -143,7 +143,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -154,25 +154,26 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { Operator rmOp(1, CopySetKey{1, 2}, OperatorPriority::HighPriority, - steady_clock::now(), std::make_shared(3)); + steady_clock::now(), std::make_shared(3)); ::curve::mds::topology::CopySetInfo rmCsInfo(1, 2); rmCsInfo.SetCopySetMembers(std::set{1, 2, 3}); rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(rmOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -181,34 +182,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(rmCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "logicalPoolId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "logicalPoolId")); ASSERT_EQ(std::to_string(rmCsInfo.GetId()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(rmCsInfo.GetEpoch()), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(GetChunkServerHostPort(1), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "copySetLeader")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "copySetLeader")); ASSERT_EQ(std::to_string(rmOp.startEpoch), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(HIGH, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(REMOVEPEER, - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), - scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey( + "opPriority")); + ASSERT_EQ( + REMOVEPEER, + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opType")); + ASSERT_EQ( + GetChunkServerHostPort(3), + scheduleMetrics->operators[rmOp.copysetID].GetValueByKey("opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"2\",") + std::string("\"copySetLeader\":") + @@ -226,7 +228,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -237,25 +239,27 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) - .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + .WillRepeatedly( + Return(GetServer(3).GetHostName() + ":" + + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -263,30 +267,35 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { ASSERT_EQ(1, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(1, scheduleMetrics->operators.size()); - ASSERT_EQ(std::to_string(transCsInfo.GetLogicalPoolId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetLogicalPoolId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "logicalPoolId")); - ASSERT_EQ(std::to_string(transCsInfo.GetId()), + ASSERT_EQ( + std::to_string(transCsInfo.GetId()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); - ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "copySetPeers")); - ASSERT_EQ(std::to_string(transCsInfo.GetEpoch()), + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); + ASSERT_EQ(copysetpeers, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("copySetPeers")); + ASSERT_EQ( + std::to_string(transCsInfo.GetEpoch()), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "copySetEpoch")); - ASSERT_EQ(std::to_string(transferOp.startEpoch), + ASSERT_EQ( + std::to_string(transferOp.startEpoch), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "startEpoch")); - ASSERT_EQ(NORMAL, - scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( - "opPriority")); - ASSERT_EQ(TRANSFERLEADER, + ASSERT_EQ(NORMAL, scheduleMetrics->operators[transferOp.copysetID] + .GetValueByKey("opPriority")); + ASSERT_EQ( + TRANSFERLEADER, scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opType")); - ASSERT_EQ(GetChunkServerHostPort(3), + ASSERT_EQ( + GetChunkServerHostPort(3), scheduleMetrics->operators[transferOp.copysetID].GetValueByKey( "opItem")); std::string res = @@ -301,47 +310,49 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { std::string("\"Normal\",\"opType\":\"TransferLeader\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[transferOp.copysetID].JsonBody()); - LOG(INFO) << "format: " + scheduleMetrics->operators[transferOp.copysetID].JsonBody()); + LOG(INFO) + << "format: " << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(transferOp); } } TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { Operator changeOp(1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 4)); + steady_clock::now(), std::make_shared(1, 4)); ::curve::mds::topology::CopySetInfo changeCsInfo(1, 4); changeCsInfo.SetCopySetMembers(std::set{1, 2, 3}); changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) .WillOnce(Return(GetServer(1).GetHostName() + ":" + - std::to_string(GetChunkServer(1).GetPort()))); + std::to_string(GetChunkServer(1).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(2)) .WillOnce(Return(GetServer(2).GetHostName() + ":" + - std::to_string(GetChunkServer(2).GetPort()))); + std::to_string(GetChunkServer(2).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .WillOnce(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); EXPECT_CALL(*topo, GetHostNameAndPortById(4)) .WillOnce(Return(GetServer(4).GetHostName() + ":" + - std::to_string(GetChunkServer(4).GetPort()))); + std::to_string(GetChunkServer(4).GetPort()))); scheduleMetrics->UpdateAddMetric(changeOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -350,31 +361,32 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { ASSERT_EQ(1, scheduleMetrics->operators.size()); ASSERT_EQ(std::to_string(changeCsInfo.GetLogicalPoolId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "logicalPoolId")); - ASSERT_EQ(std::to_string(changeCsInfo.GetId()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetId")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "logicalPoolId")); + ASSERT_EQ(std::to_string(changeCsInfo.GetId()), + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetId")); std::string copysetpeers = GetChunkServerHostPort(1) + "," + - GetChunkServerHostPort(2) + "," + GetChunkServerHostPort(3); + GetChunkServerHostPort(2) + "," + + GetChunkServerHostPort(3); ASSERT_EQ(copysetpeers, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetPeers")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetPeers")); ASSERT_EQ(std::to_string(changeCsInfo.GetEpoch()), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "copySetEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "copySetEpoch")); ASSERT_EQ(std::to_string(changeOp.startEpoch), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "startEpoch")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "startEpoch")); ASSERT_EQ(NORMAL, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opPriority")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opPriority")); ASSERT_EQ(CHANGEPEER, - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opType")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opType")); ASSERT_EQ(GetChunkServerHostPort(4), - scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( - "opItem")); + scheduleMetrics->operators[changeOp.copysetID].GetValueByKey( + "opItem")); std::string res = std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"4\",") + std::string("\"copySetLeader\":") + @@ -387,32 +399,34 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { std::string("\"Normal\",\"opType\":\"ChangePeer\",") + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, - scheduleMetrics->operators[changeOp.copysetID].JsonBody()); + scheduleMetrics->operators[changeOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); + << scheduleMetrics->operators[changeOp.copysetID].JsonBody(); } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the + // map scheduleMetrics->UpdateRemoveMetric(changeOp); } } TEST_F(ScheduleMetricsTest, test_abnormal) { Operator transferOp(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, - steady_clock::now(), std::make_shared(1, 3)); + steady_clock::now(), + std::make_shared(1, 3)); ::curve::mds::topology::CopySetInfo transCsInfo(1, 3); transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -422,36 +436,32 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { ASSERT_TRUE( scheduleMetrics->operators[transferOp.copysetID].JsonBody().empty()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - - // 获取chunkserver 或者 server失败 + // Failed to obtain chunkserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) - .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); - EXPECT_CALL(*topo, GetHostNameAndPortById(1)) - .WillOnce(Return("")); - EXPECT_CALL(*topo, GetHostNameAndPortById(2)) - .WillOnce(Return("")); + .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); + EXPECT_CALL(*topo, GetHostNameAndPortById(1)).WillOnce(Return("")); + EXPECT_CALL(*topo, GetHostNameAndPortById(2)).WillOnce(Return("")); EXPECT_CALL(*topo, GetHostNameAndPortById(3)) .Times(2) .WillRepeatedly(Return(GetServer(3).GetHostName() + ":" + - std::to_string(GetChunkServer(3).GetPort()))); + std::to_string(GetChunkServer(3).GetPort()))); scheduleMetrics->UpdateAddMetric(transferOp); std::string res = - std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + - std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + - std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + - std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + - std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + - std::string("\"Normal\",\"opType\":\"TransferLeader\",") + - std::string("\"startEpoch\":\"1\"}"); + std::string("{\"copySetEpoch\":\"0\",\"copySetId\":\"3\",") + + std::string("\"copySetLeader\":\"UNINTIALIZE_ID\",\"") + + std::string("copySetPeers\":\",,pubbeta2-curve3.org:9000") + + std::string("\",\"logicalPoolId\":\"1\",\"opItem\":") + + std::string("\"pubbeta2-curve3.org:9000\",\"opPriority\":") + + std::string("\"Normal\",\"opType\":\"TransferLeader\",") + + std::string("\"startEpoch\":\"1\"}"); ASSERT_EQ(res, scheduleMetrics->operators[transferOp.copysetID].JsonBody()); LOG(INFO) << "format: " - << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); + << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); } } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/schedule/scheduleService/scheduleService_test.cpp b/test/mds/schedule/scheduleService/scheduleService_test.cpp index 9814f8ce0b..17ab08e546 100644 --- a/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -20,24 +20,25 @@ * Author: lixiaocui */ -#include -#include -#include +#include "src/mds/schedule/scheduleService/scheduleService.h" + #include +#include #include +#include +#include -#include "src/mds/schedule/scheduleService/scheduleService.h" -#include "test/mds/mock/mock_coordinator.h" #include "proto/schedule.pb.h" +#include "test/mds/mock/mock_coordinator.h" namespace curve { namespace mds { namespace schedule { -using ::testing::Return; using ::testing::_; -using ::testing::SetArgPointee; using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; class TestScheduleService : public ::testing::Test { protected: @@ -45,10 +46,10 @@ class TestScheduleService : public ::testing::Test { server_ = new brpc::Server(); coordinator_ = std::make_shared(); - ScheduleServiceImpl *scheduleService = + ScheduleServiceImpl* scheduleService = new ScheduleServiceImpl(coordinator_); - ASSERT_EQ(0, - server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server_->AddService(scheduleService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server_->Start("127.0.0.1", {5900, 5999}, nullptr)); listenAddr_ = server_->listen_address(); } @@ -63,7 +64,7 @@ class TestScheduleService : public ::testing::Test { protected: std::shared_ptr coordinator_; butil::EndPoint listenAddr_; - brpc::Server *server_; + brpc::Server* server_; }; TEST_F(TestScheduleService, test_RapidLeaderSchedule) { @@ -75,7 +76,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { request.set_logicalpoolid(1); RapidLeaderScheduleResponse response; - // 1. 快速leader均衡返回成功 + // 1. Fast leader balance returned successfully { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeSuccess)); @@ -85,7 +86,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { ASSERT_EQ(kScheduleErrCodeSuccess, response.statuscode()); } - // 2. 传入的logicalpoolid不存在 + // 2. The logicaltool passed in does not exist { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeInvalidLogicalPool)); @@ -105,13 +106,13 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { request.add_chunkserverid(1); QueryChunkServerRecoverStatusResponse response; - // 1. 查询chunkserver恢复状态返回成功 + // 1. Querying the recovery status of chunkserver returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(DoAll(SetArgPointee<1>(expectRes), - Return(kScheduleErrCodeSuccess))); + Return(kScheduleErrCodeSuccess))); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); @@ -121,11 +122,11 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的chunkserverid不合法 + // 2. The chunkserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( - std::vector{1}, _)) + std::vector{1}, _)) .WillOnce(Return(kScheduleErrInvalidQueryChunkserverID)); brpc::Controller cntl; stub.QueryChunkServerRecoverStatus(&cntl, &request, &response, nullptr); diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index b8b3ddb148..b6919dee9b 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -22,25 +22,27 @@ #include #include -#include -#include -#include -#include + #include +#include +#include +#include #include -#include "src/mds/topology/topology_item.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_config.h" -#include "src/mds/topology/topology_service_manager.h" -#include "src/mds/schedule/topoAdapter.h" -#include "src/mds/schedule/scheduler.h" -#include "src/mds/schedule/operatorController.h" +#include + #include "src/mds/common/mds_define.h" -#include "src/mds/copyset/copyset_policy.h" #include "src/mds/copyset/copyset_manager.h" +#include "src/mds/copyset/copyset_policy.h" +#include "src/mds/schedule/operatorController.h" #include "src/mds/schedule/scheduleMetrics.h" -#include "test/mds/schedule/schedulerPOC/mock_topology.h" +#include "src/mds/schedule/scheduler.h" +#include "src/mds/schedule/topoAdapter.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_config.h" +#include "src/mds/topology/topology_item.h" +#include "src/mds/topology/topology_service_manager.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/schedule/schedulerPOC/mock_topology.h" using ::curve::mds::topology::MockTopology; @@ -141,10 +143,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { logicalPoolSet_.insert(0); } - std::vector - GetLogicalPoolInCluster(LogicalPoolFilter filter = [](const LogicalPool &) { - return true; - }) const override { + std::vector GetLogicalPoolInCluster(LogicalPoolFilter filter = + [](const LogicalPool&) { + return true; + }) const override { std::vector ret; for (auto lid : logicalPoolSet_) { ret.emplace_back(lid); @@ -152,10 +154,10 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - std::vector - GetChunkServerInCluster(ChunkServerFilter filter = [](const ChunkServer &) { - return true; - }) const override { + std::vector GetChunkServerInCluster( + ChunkServerFilter filter = [](const ChunkServer&) { + return true; + }) const override { std::vector ret; for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); it++) { @@ -165,7 +167,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInLogicalPool( - PoolIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + PoolIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list ret; @@ -177,7 +179,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::list GetChunkServerInServer( - ServerIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + ServerIdType id, ChunkServerFilter filter = [](const ChunkServer&) { return true; }) const override { std::list res; @@ -190,7 +192,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector GetCopySetsInCluster( - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -202,7 +204,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector GetCopySetsInChunkServer( ChunkServerIdType csId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector ret; @@ -217,7 +219,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector<::curve::mds::topology::CopySetInfo> GetCopySetInfosInLogicalPool( PoolIdType logicalPoolId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { return true; }) const override { std::vector<::curve::mds::topology::CopySetInfo> ret; @@ -230,7 +232,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - bool GetServer(ServerIdType serverId, Server *out) const override { + bool GetServer(ServerIdType serverId, Server* out) const override { auto it = serverMap_.find(serverId); if (it != serverMap_.end()) { *out = it->second; @@ -240,7 +242,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetCopySet(::curve::mds::topology::CopySetKey key, - ::curve::mds::topology::CopySetInfo *out) const override { + ::curve::mds::topology::CopySetInfo* out) const override { auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { *out = it->second; @@ -251,7 +253,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetChunkServer(ChunkServerIdType chunkserverId, - ChunkServer *out) const override { + ChunkServer* out) const override { auto it = chunkServerMap_.find(chunkserverId); if (it != chunkServerMap_.end()) { *out = it->second; @@ -260,7 +262,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return false; } - bool GetLogicalPool(PoolIdType poolId, LogicalPool *out) const override { + bool GetLogicalPool(PoolIdType poolId, LogicalPool* out) const override { LogicalPool::RedundanceAndPlaceMentPolicy rap; rap.pageFileRAP.copysetNum = copySetMap_.size(); rap.pageFileRAP.replicaNum = 3; @@ -273,7 +275,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return true; } - int UpdateChunkServerOnlineState(const OnlineState &onlineState, + int UpdateChunkServerOnlineState(const OnlineState& onlineState, ChunkServerIdType id) override { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -284,7 +286,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } } - int UpdateChunkServerRwState(const ChunkServerStatus &rwStatus, + int UpdateChunkServerRwState(const ChunkServerStatus& rwStatus, ChunkServerIdType id) { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { @@ -296,7 +298,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } int UpdateCopySetTopo( - const ::curve::mds::topology::CopySetInfo &data) override { + const ::curve::mds::topology::CopySetInfo& data) override { CopySetKey key(data.GetLogicalPoolId(), data.GetId()); auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { @@ -330,19 +332,19 @@ class FakeTopologyServiceManager : public TopologyServiceManager { bool CreateCopysetNodeOnChunkServer( ChunkServerIdType csId, - const std::vector<::curve::mds::topology::CopySetInfo> &cs) override { + const std::vector<::curve::mds::topology::CopySetInfo>& cs) override { return true; } }; class FakeTopologyStat : public TopologyStat { public: - explicit FakeTopologyStat(const std::shared_ptr &topo) + explicit FakeTopologyStat(const std::shared_ptr& topo) : topo_(topo) {} void UpdateChunkServerStat(ChunkServerIdType csId, - const ChunkServerStat &stat) {} + const ChunkServerStat& stat) {} - bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat *stat) { + bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat* stat) { if (!leaderCountOn) { stat->leaderCount = 10; return true; @@ -360,7 +362,7 @@ class FakeTopologyStat : public TopologyStat { stat->leaderCount = leaderCount; return true; } - bool GetChunkPoolSize(PoolIdType pId, uint64_t *chunkPoolSize) { + bool GetChunkPoolSize(PoolIdType pId, uint64_t* chunkPoolSize) { return true; } @@ -401,7 +403,7 @@ class CopysetSchedulerPOC : public testing::Test { void TearDown() override {} void PrintScatterWithInOnlineChunkServer(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; std::map factorMap; int max = -1; @@ -437,7 +439,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (online chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -446,14 +448,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintScatterWithInLogicalPool(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; int max = -1; int maxId = -1; @@ -477,7 +479,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (all chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -486,14 +488,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInOnlineChunkServer(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -526,7 +528,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", copyset num:" << number; } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -535,14 +537,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << "), Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInLogicalPool(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -561,7 +563,7 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -570,13 +572,13 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" - << min; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: " << max << ", Minimum Value: " << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { - // 打印每个chunkserver上leader的数量 + // Print the number of leaders on each chunkserver std::map leaderDistribute; int sumNumber = 0; int max = -1; @@ -612,10 +614,10 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId - << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "Mean: " << avg << ", Variance: " << variance + << ", Standard Deviation: " << std::sqrt(variance) + << ", Maximum Value: (" << max << "," << maxId << ")" + << ", Minimum Value: (" << min << "," << minId << ")"; } int GetLeaderCountRange(PoolIdType lid = 0) { @@ -637,16 +639,16 @@ class CopysetSchedulerPOC : public testing::Test { return max - min; } - // 计算每个chunkserver的scatter-with + // Calculate the scatter with for each chunkserver int GetChunkServerScatterwith(ChunkServerIdType csId) { - // 计算chunkserver上的scatter-with + // Calculate scatter with on chunkserver std::map chunkServerCount; for (auto it : topo_->GetCopySetsInChunkServer(csId)) { // get copyset info ::curve::mds::topology::CopySetInfo info; topo_->GetCopySet(it, &info); - // 统计所分布的chunkserver + // Count the distributed chunkservers for (auto it : info.GetCopySetMembers()) { if (it == csId) { continue; @@ -673,11 +675,11 @@ class CopysetSchedulerPOC : public testing::Test { ChunkServerIdType RandomOfflineOneChunkServer(PoolIdType lid = 0) { auto chunkServers = topo_->GetChunkServerInLogicalPool(lid); - // 选择[0, chunkServers.size())中的index + // Select the index in [0, chunkServers.size()) std::srand(std::time(nullptr)); int index = std::rand() % chunkServers.size(); - // 设置目标chunkserver的状态为offline + // Set the status of the target chunkserver to offline auto it = chunkServers.begin(); std::advance(it, index); topo_->UpdateChunkServerOnlineState(OnlineState::OFFLINE, *it); @@ -697,7 +699,7 @@ class CopysetSchedulerPOC : public testing::Test { topo_->UpdateChunkServerOnlineState(OnlineState::ONLINE, id); } - void SetChunkServerOnline(const std::set &list) { + void SetChunkServerOnline(const std::set& list) { for (auto id : list) { SetChunkServerOnline(id); } @@ -741,10 +743,10 @@ class CopysetSchedulerPOC : public testing::Test { opt, topoAdapter_, opController_); } - void ApplyOperatorsInOpController(const std::set &list) { + void ApplyOperatorsInOpController(const std::set& list) { std::vector keys; for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ASSERT_TRUE(list.end() != list.find(type->GetOldPeer())); @@ -771,7 +773,7 @@ class CopysetSchedulerPOC : public testing::Test { void ApplyTranferLeaderOperator() { for (auto op : opController_->GetOperators()) { - auto type = dynamic_cast(op.step.get()); + auto type = dynamic_cast(op.step.get()); ASSERT_TRUE(type != nullptr); ::curve::mds::topology::CopySetInfo info; @@ -781,9 +783,9 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 有两个chunkserver offline的停止条件: - // 所有copyset均有两个及以上的副本offline - bool SatisfyStopCondition(const std::set &idList) { + // There are two stopping conditions for chunkserver offline: + // All copysets have two or more copies offline + bool SatisfyStopCondition(const std::set& idList) { std::vector<::curve::mds::topology::CopySetKey> copysetList; for (auto id : idList) { auto list = topo_->GetCopySetsInChunkServer(id); @@ -831,58 +833,65 @@ class CopysetSchedulerPOC : public testing::Test { }; TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { - // 测试一个chunkserver offline恢复后的情况 - // 1. 创建recoverScheduler + // Testing the situation of a chunkserver offline recovery + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择一个chunkserver处于offline状态 + // 2. Select any chunkserver to be offline ChunkServerIdType choose = RandomOfflineOneChunkServer(); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); // update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // =============================结果====================================== - // ===========================集群初始状态================================= + // =============================Result====================================== + // =============================Initial state of the + // cluster============================= // ###print scatter-with in cluster### - // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 + // Mean: 97.9556, Variance: 11.5314, Standard Deviation: 3.39579, Max: 106, + // Min: 88 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // =============================Status after + // Recovery================================= // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, - // 最小值:95 //NOLINT + // Mean: 98.8156, variance: 10.3403, standard deviation: 3.21564, maximum + // value: 106, Minimum value: 95//NOLINT // ###print scatter-with in cluster### - // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 + // Mean: 98.2667, Variance: 64.2289, Standard Deviation: 8.0143, Max: 106, + // Min: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.77729, 标准差: 1.33315, 最大值:109, 最小值:100 + // Mean value: 100.559, variance: 1.77729, standard deviation: 1.33315, + // maximum value: 109, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.6333, 标准差: 7.59166, 最大值: 109, 最小值:0 + // Mean value: 100, variance: 57.6333, standard deviation: 7.59166, maximum + // value: 109, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline的情况 - // 1. 创建recoverScheduler + // Testing the situation of another chunkserver offline during the recovery + // process of one chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; ChunkServerIdType choose1 = 0; ChunkServerIdType choose2 = 0; choose1 = RandomOfflineOneChunkServer(); idlist.emplace(choose1); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -896,35 +905,43 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { ApplyOperatorsInOpController(std::set{choose2}); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.3, 方差:9.89889, 标准差:3.14625, 最大值:106, 最小值:89 + // Mean value: 97.3, variance: 9.89889, standard deviation: 3.14625, maximum + // value: 106, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.348, 方差:7.47418, 标准差: 2.73389, 最大值:108, 最小值:101 + // Mean value: 100.348, variance: 7.47418, standard deviation: 2.73389, + // maximum value: 108, minimum value: 101 // ###print scatter-with in cluster### - // 均值:99.2333, 方差:118.034, 标准差: 10.8644, 最大值:108, 最小值:0 + // Mean value: 99.2333, variance: 118.034, standard deviation: 10.8644, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.9735, 标准差: 1.72438, 最大值:112, 最小值:100 + // Mean value: 101.124, variance: 2.9735, standard deviation: 1.72438, + // maximum value: 112, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.3, 标准差: 10.7378, 最大值: 112, 最小值:0 + // Mean value: 100, variance: 115.3, standard deviation: 10.7378, maximum + // value: 112, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 1. 创建recoverScheduler + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 6; i++) { @@ -934,7 +951,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -950,35 +967,42 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { - // 测试20个chunkserver 接连 offline - // 1. 创建recoverScheduler + // Test 20 chunkservers connected offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 20; i++) { @@ -988,7 +1012,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1004,7 +1028,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); @@ -1012,24 +1036,24 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { - // 测试一个server有多个chunkserver offline, 有一个被设置为pending, - // 可以recover的情况 + // Testing a server with multiple chunkservers offline, with one set to + // pending, Conditions that can be recovered offlineTolerent_ = 20; BuilRecoverScheduler(4); - // offline一个server上的chunkserver + // Offline Chunkserver on a server auto chunkserverSet = OfflineChunkServerInServer1(); - // 选择其中一个设置为pendding状态 + // Select one of the settings as pending status ChunkServerIdType target = *chunkserverSet.begin(); topo_->UpdateChunkServerRwState(ChunkServerStatus::PENDDING, target); int opNum = 0; int targetOpNum = topo_->GetCopySetsInChunkServer(target).size(); - // 开始恢复 + // Start recovery do { recoverScheduler_->Schedule(); opNum += opController_->GetOperators().size(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{target}); } while (topo_->GetCopySetsInChunkServer(target).size() > 0); @@ -1038,14 +1062,14 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { // NOLINT - // 测试一个chunkserver offline, 集群回迁的情况 + // Testing a cluster offline and cluster fetch situation - // 1. 一个chunkserver offline后恢复 + // 1. Restore after a chunkserver is offline BuilRecoverScheduler(1); ChunkServerIdType choose = RandomOfflineOneChunkServer(); do { recoverScheduler_->Schedule(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); @@ -1053,23 +1077,30 @@ TEST_F(CopysetSchedulerPOC, PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6667, 方差:10.9444, 标准差: 3.30824, 最大值:107, 最小值:90 + // Mean value: 97.6667, variance: 10.9444, standard deviation: 3.30824, + // maximum value: 107, minimum value: 90 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:99.1061, 方差:10.1172, 标准差: 3.18076, 最大值:108, 最小值:91 + // Mean value: 99.1061, variance: 10.1172, standard deviation: 3.18076, + // maximum value: 108, minimum value: 91 // ###print scatter-with in cluster### - // 均值:98.5556, 方差:64.3247, 标准差: 8.02027, 最大值:108, 最小值:0 + // Mean value: 98.5556, variance: 64.3247, standard deviation: 8.02027, + // maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.56499, 标准差: 1.251, 最大值:107, 最小值:100 + // Mean value: 100.559, variance: 1.56499, standard deviation: 1.251, + // maximum value: 107, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.4222, 标准差: 7.57774, 最大值: 107, 最小值:0 + // Mean value: 100, variance: 57.4222, standard deviation: 7.57774, maximum + // value: 107, minimum value: 0 - // 2. chunkserver-choose恢复成online状态 + // 2. Chunkserver house restore to online state SetChunkServerOnline(choose); BuildCopySetScheduler(1); std::vector csList; @@ -1087,20 +1118,23 @@ TEST_F(CopysetSchedulerPOC, minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ====================================Result==================================== + // ====================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:99.2667, 方差:9.65111, 标准差: 3.10662, 最大值:109, 最小值:91 + // Mean value: 99.2667, variance: 9.65111, standard deviation: 3.10662, + // maximum value: 109, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 + // Mean value: 100, variance: 0.5, standard deviation: 0.707107, maximum + // value: 101, minimum value: 91 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline - // 集群回迁的情况 + // During the recovery process of testing one chunkserver offline, another + // chunkserver offline Cluster fetch situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; ChunkServerIdType choose1 = 0; @@ -1124,23 +1158,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the + // cluster=============================== // ###print scatter-with in cluster### - // 均值:97.4889, 方差:9.96099, 标准差: 3.1561, 最大值:105, 最小值:89 + // Mean value: 97.4889, variance: 9.96099, standard deviation: 3.1561, + // maximum value: 105, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.472, 方差:7.37281, 标准差: 2.71529, 最大值:106, 最小值:91 + // Mean value: 100.472, variance: 7.37281, standard deviation: 2.71529, + // maximum value: 106, minimum value: 91 // ###print scatter-with in cluster### - // 均值:99.3556, 方差:118.207, 标准差: 10.8723, 最大值:106, 最小值:0 + // Mean value: 99.3556, variance: 118.207, standard deviation: 10.8723, + // maximum value: 106, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.77125, 标准差: 1.66471, 最大值:111, 最小值:100 + // Mean value: 101.124, variance: 2.77125, standard deviation: 1.66471, + // maximum value: 111, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.1, 标准差: 10.7285, 最大值: 111, 最小值:0 + // Mean value: 100, variance: 115.1, standard deviation: 10.7285, maximum + // value: 111, minimum value: 0 - // 2. cchunkserver恢复成online状态 + // 2. Restore cchunkserver to online state SetChunkServerOnline(choose1); SetChunkServerOnline(choose2); BuildCopySetScheduler(1); @@ -1152,20 +1193,22 @@ TEST_F(CopysetSchedulerPOC, } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { // NOLINT - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 回迁的情况 + // During the recovery process of testing a chunkserver offline, there were + // 5 consecutive chunkserver offline Migration situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; std::vector origin; @@ -1176,7 +1219,7 @@ TEST_F(CopysetSchedulerPOC, origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1197,23 +1240,30 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ===================================Result==================================== + // ===================================Initial state of the + // cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum + // value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after + // Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, + // maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, + // maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, + // maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum + // value: 121, minimum value: 0 - // 2. chunkserver恢复成online状态 + // 2. Chunkserver restored to online state SetChunkServerOnline(idlist); BuildCopySetScheduler(1); std::vector csList; @@ -1235,12 +1285,14 @@ TEST_F(CopysetSchedulerPOC, ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after + // Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, + // maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, diff --git a/test/mds/schedule/scheduler_helper_test.cpp b/test/mds/schedule/scheduler_helper_test.cpp index ff54d4c5bf..76668c415d 100644 --- a/test/mds/schedule/scheduler_helper_test.cpp +++ b/test/mds/schedule/scheduler_helper_test.cpp @@ -20,15 +20,17 @@ * Author: lixiaocui */ +#include "src/mds/schedule/scheduler_helper.h" + #include + #include "test/mds/schedule/common.h" #include "test/mds/schedule/mock_topoAdapter.h" -#include "src/mds/schedule/scheduler_helper.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace mds { @@ -42,9 +44,7 @@ class TestSchedulerHelper : public ::testing::Test { topoAdapter_ = std::make_shared(); } - void TearDown() override { - topoAdapter_ = nullptr; - } + void TearDown() override { topoAdapter_ = nullptr; } protected: std::shared_ptr topoAdapter_; @@ -56,67 +56,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = true; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -126,67 +142,83 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_not_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = false; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it + // increased the scatter-width int oldValue = 10; int newValue = 13; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the + // scatter-width remains unchanged int oldValue = 10; int newValue = 10; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the + // scatter-width decreased int oldValue = 10; int newValue = 8; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less + // than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the + // scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; - ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the + // scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the + // scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; - ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, - newValue, minScatterWidth, scatterWidthRangePerent)); + ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth( + target, oldValue, newValue, minScatterWidth, + scatterWidthRangePerent)); } } @@ -195,7 +227,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { ChunkServerIdType source = 1; ChunkServerIdType target = 4; { - // 1. 获取target的信息失败 + // 1. Failed to obtain information for target EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(Return(false)); ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( @@ -204,9 +236,10 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { PeerInfo peer4(4, 1, 1, "192.168.10.1", 9001); ChunkServerInfo info4(peer4, OnlineState::ONLINE, DiskState::DISKERROR, - ChunkServerStatus::READWRITE, 1, 1, 1, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 1, 1, 1, + ChunkServerStatisticInfo{}); { - // 2. 获取到的标准zoneNum = 0 + // 2. Obtained standard zoneNum=0 EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) @@ -216,12 +249,12 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { } { - // 3. 迁移之后不符合zone条件 + // 3. Does not meet zone conditions after migration EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) .WillOnce(Return(4)); - ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( + ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( topoAdapter_, target, source, copyset, 1, 0.01)); } } @@ -283,18 +316,18 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于target, old=1, new=2 + // For target, old=1, new=2 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(2, scatterWidth[target].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -324,19 +357,19 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_source) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于target, old=1, new=3 + // For target, old=1, new=3 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(3, scatterWidth[target].second); - // 对于replica1, old=2, new=3 + // For replica1, old=2, new=3 ASSERT_EQ(2, scatterWidth[1].first); ASSERT_EQ(3, scatterWidth[1].second); - // 对于replica2, old=3, new=3 + // For replica2, old=3, new=3 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(3, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -362,22 +395,22 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_target) { .WillOnce(SetArgPointee<1>(replica2Map)); EXPECT_CALL(*topoAdapter_, GetChunkServerScatterMap(3, _)) .WillOnce(SetArgPointee<1>(replica3Map)); - SchedulerHelper::CalculateAffectOfMigration( - copyset, source, target, topoAdapter_, &scatterWidth); + SchedulerHelper::CalculateAffectOfMigration(copyset, source, target, + topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=2 + // For replica3, old=2, new=2 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(2, scatterWidth[3].second); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_not_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -405,14 +438,14 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, - 10, 0.1, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 10, 0.1, + &affected); ASSERT_FALSE(res); ASSERT_EQ(0, affected); } TEST_F(TestSchedulerHelper, - test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { + test_InvovledReplicasSatisfyScatterWidthAfterMigration_satisfy) { CopySetInfo copyset = GetCopySetInfoForTest(); ChunkServerIdType source = 1; ChunkServerIdType target = 4; @@ -440,53 +473,55 @@ TEST_F(TestSchedulerHelper, int affected = 0; bool res = SchedulerHelper::InvovledReplicasSatisfyScatterWidthAfterMigration( - copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, &affected); + copyset, source, target, UNINTIALIZE_ID, topoAdapter_, 1, 2, + &affected); ASSERT_TRUE(res); ASSERT_EQ(0, affected); } - TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(4, 4, 4, "192.168.10.4", 9000); ChunkServerInfo info1(peer1, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info2(peer2, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); ChunkServerInfo info3(peer3, OnlineState::ONLINE, DiskState::DISKNORMAL, - ChunkServerStatus::READWRITE, 10, 10, 10, ChunkServerStatisticInfo{}); + ChunkServerStatus::READWRITE, 10, 10, 10, + ChunkServerStatisticInfo{}); std::vector chunkserverList{info1, info2, info3}; // {1,2,3} CopySetInfo copyset1(CopySetKey{1, 1}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset2(CopySetKey{1, 2}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,3} CopySetInfo copyset3(CopySetKey{1, 3}, 1, 1, - std::vector{peer1, peer2, peer3}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer3}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,2,4} CopySetInfo copyset4(CopySetKey{1, 4}, 1, 1, - std::vector{peer1, peer2, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); + std::vector{peer1, peer2, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); // {1,3,4} CopySetInfo copyset5(CopySetKey{1, 5}, 1, 1, - std::vector{peer1, peer3, peer4}, - ConfigChangeInfo{}, CopysetStatistics{}); - std::vector copysetList{ - copyset1, copyset2, copyset3, copyset4, copyset5}; + std::vector{peer1, peer3, peer4}, + ConfigChangeInfo{}, CopysetStatistics{}); + std::vector copysetList{copyset1, copyset2, copyset3, copyset4, + copyset5}; // chunkserver-1: 5, chunkserver-2: 3 chunkserver-3: 4 - EXPECT_CALL(*topoAdapter_, GetCopySetInfos()) - .WillOnce(Return(copysetList)); - SchedulerHelper::SortChunkServerByCopySetNumAsc( - &chunkserverList, topoAdapter_); + EXPECT_CALL(*topoAdapter_, GetCopySetInfos()).WillOnce(Return(copysetList)); + SchedulerHelper::SortChunkServerByCopySetNumAsc(&chunkserverList, + topoAdapter_); ASSERT_EQ(info2.info.id, chunkserverList[0].info.id); ASSERT_EQ(info3.info.id, chunkserverList[1].info.id); @@ -496,4 +531,3 @@ TEST_F(TestSchedulerHelper, test_SortChunkServerByCopySetNumAsc) { } // namespace schedule } // namespace mds } // namespace curve - diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 236e526371..1881504452 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -20,19 +20,20 @@ * Author: charisu */ +#include "src/mds/server/mds.h" + +#include #include -#include #include -#include +#include #include + #include #include -#include "src/mds/server/mds.h" #include "src/common/concurrent/concurrent.h" -#include "src/common/timeutility.h" #include "src/common/string_util.h" - +#include "src/common/timeutility.h" #include "test/mds/mock/mock_etcdclient.h" using ::curve::common::Thread; @@ -55,18 +56,19 @@ class MDSTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", - "http://localhost:10032", - "--advertise-client-urls", - "http://localhost:10032", "--listen-peer-urls", - "http://localhost:10033", "--name", "testMds", - nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://localhost:10032", "--advertise-client-urls", + "http://localhost:10032", "--listen-peer-urls", + "http://localhost:10033", "--name", "testMds", nullptr)); exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered auto client = std::make_shared(); EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); @@ -102,7 +104,7 @@ class MDSTest : public ::testing::Test { }; TEST_F(MDSTest, common) { - // 加载配置 + // Load Configuration std::string confPath = "./conf/mds.conf"; auto conf = std::make_shared(); conf->SetConfigPath(confPath); @@ -116,7 +118,7 @@ TEST_F(MDSTest, common) { mds.InitMdsOptions(conf); mds.StartDummy(); - // 从dummy server获取version和mds监听端口 + // Obtain version and mds listening ports from dummy server brpc::Channel httpChannel; brpc::Controller cntl; brpc::ChannelOptions options; @@ -124,12 +126,12 @@ TEST_F(MDSTest, common) { std::string dummyAddr = "127.0.0.1:" + std::to_string(kDummyPort); ASSERT_EQ(0, httpChannel.Init(dummyAddr.c_str(), &options)); - // 测试获取version + // Test to obtain version cntl.http_request().uri() = dummyAddr + "/vars/curve_version"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); ASSERT_FALSE(cntl.Failed()); - // 测试获取mds监听端口 + // Testing to obtain the mds listening port cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_config_mds_listen_addr"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -140,13 +142,13 @@ TEST_F(MDSTest, common) { auto pos = attachment.find(":"); ASSERT_NE(std::string::npos, pos); std::string jsonString = attachment.substr(pos + 2); - // 去除两端引号 + // Remove double quotes jsonString = jsonString.substr(1, jsonString.size() - 2); reader.parse(jsonString, value); std::string mdsAddr = value["conf_value"].asString(); ASSERT_EQ(kMdsAddr, mdsAddr); - // 获取leader状态,此时mds_status应为follower + // Obtain the leader status, at which point mds_status should be follower cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_status"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -156,7 +158,7 @@ TEST_F(MDSTest, common) { mds.StartCompaginLeader(); - // 此时isLeader应为true + // At this point, isLeader should be true cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/is_leader"; ASSERT_FALSE(cntl.Failed()); @@ -164,7 +166,7 @@ TEST_F(MDSTest, common) { cntl.response_attachment().to_string().find("leader")); mds.Init(); - // 启动mds + // Start mds Thread mdsThread(&MDS::Run, &mds); // sleep 5s sleep(5); @@ -172,7 +174,7 @@ TEST_F(MDSTest, common) { // 1、init channel ASSERT_EQ(0, channel_.Init(kMdsAddr.c_str(), nullptr)); - // 2、测试hearbeat接口 + // 2. Test the heartbeat interface cntl.Reset(); heartbeat::ChunkServerHeartbeatRequest request1; heartbeat::ChunkServerHeartbeatResponse response1; @@ -180,7 +182,7 @@ TEST_F(MDSTest, common) { request1.set_token("123"); request1.set_ip("127.0.0.1"); request1.set_port(8888); - heartbeat::DiskState *diskState = new heartbeat::DiskState(); + heartbeat::DiskState* diskState = new heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); request1.set_allocated_diskstate(diskState); @@ -193,7 +195,7 @@ TEST_F(MDSTest, common) { stub1.ChunkServerHeartbeat(&cntl, &request1, &response1, nullptr); ASSERT_FALSE(cntl.Failed()); - // 3、测试namespaceService接口 + // 3. Test the namespaceService interface cntl.Reset(); GetFileInfoRequest request2; GetFileInfoResponse response2; @@ -205,7 +207,7 @@ TEST_F(MDSTest, common) { stub2.GetFileInfo(&cntl, &request2, &response2, nullptr); ASSERT_FALSE(cntl.Failed()); - // 4、测试topology接口 + // 4. Testing the topology interface cntl.Reset(); topology::ListPhysicalPoolRequest request3; topology::ListPhysicalPoolResponse response3; @@ -213,7 +215,7 @@ TEST_F(MDSTest, common) { stub3.ListPhysicalPool(&cntl, &request3, &response3, nullptr); ASSERT_FALSE(cntl.Failed()); - // 5、停掉mds + // 5. Stop the MDS uint64_t startTime = curve::common::TimeUtility::GetTimeofDayMs(); mds.Stop(); mdsThread.join(); @@ -250,7 +252,7 @@ TEST(TestParsePoolsetRules, Test) { { // subdir rules ASSERT_TRUE(ParsePoolsetRules( - "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); + "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); ASSERT_EQ(3, rules.size()); ASSERT_EQ("system", rules["/system/"]); ASSERT_EQ("data", rules["/data/"]); @@ -260,8 +262,8 @@ TEST(TestParsePoolsetRules, Test) { TEST_F(MDSTest, TestBlockSize) { using ::testing::_; - using ::testing::Return; using ::testing::Invoke; + using ::testing::Return; auto client = std::make_shared(); @@ -269,8 +271,7 @@ TEST_F(MDSTest, TestBlockSize) { { EXPECT_CALL(*client, Get(_, _)) .WillOnce(Return(EtcdErrCode::EtcdKeyNotExist)); - EXPECT_CALL(*client, Put(_, _)) - .WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*client, Put(_, _)).WillOnce(Return(EtcdErrCode::EtcdOK)); ASSERT_TRUE(CheckOrInsertBlockSize(client.get())); } diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..59c394cda9 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -22,25 +22,25 @@ #include -#include "test/mds/topology/mock_topology.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" #include "src/common/namespace_define.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_item.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; -using ::testing::_; -using ::testing::Contains; -using ::testing::SetArgPointee; -using ::testing::SaveArg; -using ::testing::DoAll; using ::curve::common::Configuration; using ::curve::common::kDefaultPoolsetId; using ::curve::common::kDefaultPoolsetName; +using ::testing::_; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestTopology : public ::testing::Test { protected: @@ -52,13 +52,11 @@ class TestTopology : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); const std::unordered_map poolsetMap{ {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; ON_CALL(*storage_, LoadPoolset(_, _)) .WillByDefault(DoAll( @@ -80,128 +78,90 @@ class TestTopology : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(id, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 0) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 0) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - uint32_t internalPort = 0, - const std::string &externalHostIp = "testExternalIp", - uint32_t externalPort = 0, - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - internalPort, - externalHostIp, - externalPort, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + uint32_t internalPort = 0, + const std::string& externalHostIp = "testExternalIp", + uint32_t externalPort = 0, ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, internalPort, + externalHostIp, externalPort, zoneId, physicalPoolId, + desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -218,16 +178,12 @@ class TestTopology : public ::testing::Test { TEST_F(TestTopology, test_init_success) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); const std::unordered_map poolsetMap{ - {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -235,40 +191,33 @@ TEST_F(TestTopology, test_init_success) { std::unordered_map chunkServerMap_; std::map copySetMap_; - logicalPoolMap_[0x01] = LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, false, true); + logicalPoolMap_[0x01] = + LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, false, true); physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); - serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, - "127.0.0.1", 8200, 0x21, 0x11, "desc1"); - chunkServerMap_[0x41] = ChunkServer(0x41, "token", "ssd", - 0x31, "127.0.0.1", 8200, "/"); + serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", + 8200, 0x21, 0x11, "desc1"); + chunkServerMap_[0x41] = + ChunkServer(0x41, "token", "ssd", 0x31, "127.0.0.1", 8200, "/"); copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), Return(true))); EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(serverMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(serverMap_), Return(true))); EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), Return(true))); EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -278,10 +227,8 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*idGenerator_, initChunkServerIdGenerator(_)); EXPECT_CALL(*idGenerator_, initCopySetIdGenerator(_)); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); TopologyOption option; int ret = topology_->Init(option); @@ -291,8 +238,7 @@ TEST_F(TestTopology, test_init_success) { TEST_F(TestTopology, test_init_loadClusterFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(false))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(false))); TopologyOption option; int ret = topology_->Init(option); @@ -302,11 +248,9 @@ TEST_F(TestTopology, test_init_loadClusterFail) { TEST_F(TestTopology, test_init_StorageClusterInfoFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -318,11 +262,9 @@ TEST_F(TestTopology, test_init_loadLogicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -334,13 +276,10 @@ TEST_F(TestTopology, test_init_LoadPhysicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -354,15 +293,11 @@ TEST_F(TestTopology, test_init_LoadZoneFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -377,16 +312,11 @@ TEST_F(TestTopology, test_init_LoadServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -402,19 +332,13 @@ TEST_F(TestTopology, test_init_LoadChunkServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -431,21 +355,14 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -462,18 +379,11 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); @@ -487,15 +397,9 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "test1", physicalPoolId); - LogicalPool pool(id, - "test2", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(id, "test2", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -506,18 +410,11 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddLogicalPool(pool); @@ -528,16 +425,9 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - ++physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); - + LogicalPool pool(0x01, "test1", ++physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -546,26 +436,18 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, test_AddPhysicalPool_success) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { PrepareAddPoolset(); PoolIdType id = 0x11; PoolsetIdType pid = 0x61; - PhysicalPool pool(id, - "test1", - pid, - "desc"); + PhysicalPool pool(id, "test1", pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeIdDuplicated, ret); @@ -573,12 +455,8 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(false)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -590,13 +468,9 @@ TEST_F(TestTopology, test_AddZone_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); @@ -616,10 +490,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -631,13 +502,9 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(0x21, - "testZone", - physicalPoolId, - "desc"); + Zone zone(0x21, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(false)); int ret = topology_->AddZone(zone); @@ -649,11 +516,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); - + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -668,18 +531,10 @@ TEST_F(TestTopology, test_AddServer_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -701,15 +556,8 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { PrepareAddZone(zoneId, "test", physicalPoolId); PrepareAddServer(id); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); @@ -724,46 +572,29 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(false)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, test_AddServer_ZoneNotFound) { PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); } - TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddPoolset(); ChunkServerIdType csId = 0x41; @@ -773,20 +604,13 @@ TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); ChunkServerState state; state.SetDiskCapacity(1024); state.SetDiskUsed(512); cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); @@ -812,18 +636,9 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token2", - "ssd", - serverId); - - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + PrepareAddChunkServer(csId, "token2", "ssd", serverId); + + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -839,16 +654,9 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(false)); int ret = topology_->AddChunkServer(cs); @@ -860,13 +668,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -880,8 +682,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemoveLogicalPool(id); @@ -904,8 +705,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemoveLogicalPool(id); @@ -917,8 +717,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemovePhysicalPool(poolId); @@ -939,8 +738,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemovePhysicalPool(poolId); @@ -952,12 +750,9 @@ TEST_F(TestTopology, test_RemoveZone_success) { ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - PrepareAddZone(zoneId, - "testZone", - poolId); + PrepareAddZone(zoneId, "testZone", poolId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(true)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -982,8 +777,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(false)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -995,16 +789,9 @@ TEST_F(TestTopology, test_RemoveServer_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(true)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1030,16 +817,9 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(false)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1052,18 +832,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(true)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1075,7 +851,6 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { ASSERT_TRUE(it == csList.end()); } - TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { ChunkServerIdType csId = 0x41; @@ -1090,19 +865,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(false)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1113,26 +883,15 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdateLogicalPool(pool); @@ -1146,15 +905,9 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->UpdateLogicalPool(pool); @@ -1166,26 +919,15 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdateLogicalPool(pool); @@ -1197,24 +939,19 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); LogicalPool pool2; topology_->GetLogicalPool(logicalPoolId, &pool2); ASSERT_EQ(AllocateStatus::ALLOW, pool2.GetStatus()); // update to deny - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1223,11 +960,10 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { ASSERT_EQ(AllocateStatus::DENY, pool3.GetStatus()); // update to allow - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1239,18 +975,12 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeLogicalPoolNotFound, ret); } @@ -1260,19 +990,14 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1285,8 +1010,7 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { PrepareAddLogicalPool(lpid, "name", ppid); auto set_state = [&](PoolIdType lpid, bool scanEnable) { - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); auto retCode = topology_->UpdateLogicalPoolScanState(lpid, scanEnable); ASSERT_EQ(retCode, kTopoErrCodeSuccess); }; @@ -1309,14 +1033,12 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { check_state(lpid, true); // CASE 4: logical pool not found -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .Times(0); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).Times(0); auto retCode = topology_->UpdateLogicalPoolScanState(lpid + 1, true); ASSERT_EQ(retCode, kTopoErrCodeLogicalPoolNotFound); // CASE 5: update storage fail -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); retCode = topology_->UpdateLogicalPoolScanState(lpid, true); ASSERT_EQ(retCode, kTopoErrCodeStorgeFail); } @@ -1325,18 +1047,11 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1349,69 +1064,45 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; PoolIdType pid = 0x61; - PhysicalPool newPool(physicalPoolId, - "name1", - pid, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, ret); } - TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - - TEST_F(TestTopology, UpdateZone_success) { PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(true)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(true)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, UpdateZone_ZoneNotFound) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); @@ -1422,18 +1113,11 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(false)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(false)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1445,28 +1129,13 @@ TEST_F(TestTopology, UpdateServer_success) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1477,15 +1146,8 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeServerNotFound, ret); @@ -1498,34 +1160,18 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(false)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1535,24 +1181,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1566,28 +1199,15 @@ TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { ChunkServerIdType csId = 0x41; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, "server1", - "ip1", 0, "ip2", 0, zoneId, physicalPoolId); - PrepareAddServer(serverId2, "server2", - "ip3", 0, "ip4", 0, zoneId, physicalPoolId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId2, - "ip3", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "server1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); + PrepareAddServer(serverId2, "server2", "ip3", 0, "ip4", 0, zoneId, + physicalPoolId); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId2, "ip3", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1596,13 +1216,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { ServerIdType serverId = 0x31; ChunkServerIdType csId = 0x41; - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); @@ -1617,24 +1231,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(false)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1648,11 +1249,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); @@ -1662,17 +1259,16 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1684,7 +1280,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1697,22 +1293,17 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1726,60 +1317,50 @@ TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); @@ -1790,7 +1371,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1803,13 +1384,9 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); uint64_t time = 0x1234567812345678; - int ret = topology_->UpdateChunkServerStartUpTime(time, csId); + int ret = topology_->UpdateChunkServerStartUpTime(time, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ChunkServer cs; @@ -1819,7 +1396,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { ChunkServerIdType csId = 0x41; - int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); + int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1831,19 +1408,18 @@ TEST_F(TestTopology, FindLogicalPool_success) { std::string physicalPoolName = "PhysiclPool1"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); PrepareAddLogicalPool(logicalPoolId, logicalPoolName, physicalPoolId); - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); ASSERT_EQ(logicalPoolId, ret); } TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { std::string logicalPoolName = "logicalPool1"; std::string physicalPoolName = "PhysiclPool1"; - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindPhysicalPool_success) { @@ -1858,11 +1434,9 @@ TEST_F(TestTopology, FindPhysicalPool_success) { TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { std::string physicalPoolName = "physicalPoolName"; PoolIdType ret = topology_->FindPhysicalPool(physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } - TEST_F(TestTopology, FindZone_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1879,8 +1453,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindZone_success2) { @@ -1900,8 +1473,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolId); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostName_success) { @@ -1910,8 +1482,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { std::string hostName = "host1"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName); + PrepareAddServer(serverId, hostName); ServerIdType ret = topology_->FindServerByHostName(hostName); ASSERT_EQ(serverId, ret); @@ -1920,8 +1491,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { std::string hostName = "host1"; ServerIdType ret = topology_->FindServerByHostName(hostName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostIpPort_success) { @@ -1932,12 +1502,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort(internalHostIp, 0); ASSERT_EQ(serverId, ret); @@ -1954,16 +1519,10 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort("ip3", 0); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindChunkServerNotRetired_success) { @@ -1977,21 +1536,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); - - ChunkServerIdType ret = topology_->FindChunkServerNotRetired( - internalHostIp, port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); + + ChunkServerIdType ret = + topology_->FindChunkServerNotRetired(internalHostIp, port); ASSERT_EQ(csId, ret); } @@ -2006,22 +1555,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); ChunkServerIdType ret = topology_->FindChunkServerNotRetired("ip3", port); - ASSERT_EQ(static_cast( - UNINTIALIZE_ID), ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, GetLogicalPool_success) { @@ -2089,7 +1627,6 @@ TEST_F(TestTopology, GetServer_success) { ASSERT_EQ(true, ret); } - TEST_F(TestTopology, GetServer_GetServerNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -2133,7 +1670,6 @@ TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { ASSERT_EQ(false, ret); } - TEST_F(TestTopology, GetChunkServerInCluster_success) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -2371,8 +1907,8 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); - PrepareAddServer( - serverId, "name2", "ip1", 0, "ip2", 0, zoneId, physicalPoolId); + PrepareAddServer(serverId, "name2", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); PrepareAddChunkServer(csId, "token", "ssd", serverId); PrepareAddChunkServer(csId2, "token", "ssd", serverId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); @@ -2452,12 +1988,12 @@ TEST_F(TestTopology, AddCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2470,8 +2006,7 @@ TEST_F(TestTopology, AddCopySet_success) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -2486,12 +2021,12 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2519,12 +2054,12 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2551,12 +2086,12 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2569,8 +2104,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(false)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -2585,12 +2119,12 @@ TEST_F(TestTopology, RemoveCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2601,8 +2135,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2620,12 +2153,12 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2636,8 +2169,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(false)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2655,12 +2187,12 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2687,12 +2219,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2716,11 +2248,10 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateCopySet(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateCopySet(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -2735,12 +2266,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2775,12 +2306,12 @@ TEST_F(TestTopology, GetCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2809,12 +2340,12 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2843,12 +2374,12 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2860,7 +2391,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddCopySet(copysetId, logicalPoolId, replicas); std::vector csList = - topology_->GetCopySetsInLogicalPool(logicalPoolId); + topology_->GetCopySetsInLogicalPool(logicalPoolId); ASSERT_EQ(1, csList.size()); } @@ -2874,12 +2405,12 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2890,8 +2421,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInCluster(); + std::vector csList = topology_->GetCopySetsInCluster(); ASSERT_EQ(1, csList.size()); } @@ -2905,12 +2435,12 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2921,44 +2451,33 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInChunkServer(0x41); + std::vector csList = topology_->GetCopySetsInChunkServer(0x41); ASSERT_EQ(1, csList.size()); } TEST_F(TestTopology, test_create_default_poolset) { - EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadClusterInfo(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPoolset(_, _)).WillOnce(Return(true)); Poolset poolset; EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce( - DoAll(SaveArg<0>(&poolset), Return(true))); + .WillOnce(DoAll(SaveArg<0>(&poolset), Return(true))); std::unordered_map physicalPoolMap{ {1, {1, "pool1", UNINTIALIZE_ID, ""}}, {2, {2, "pool2", UNINTIALIZE_ID, ""}}, }; EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), - SetArgPointee<1>(2), + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), SetArgPointee<1>(2), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(true)); int rc = topology_->Init({}); ASSERT_EQ(kTopoErrCodeSuccess, rc); diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..2f3c59e089 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -20,30 +20,28 @@ * Author: xuchaojie */ -#include #include +#include #include - -#include "src/mds/topology/topology_chunk_allocator.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/topology/mock_topology.h" -#include "test/mds/mock/mock_topology.h" #include "proto/nameserver2.pb.h" #include "src/common/timeutility.h" +#include "src/mds/common/mds_define.h" +#include "src/mds/topology/topology_chunk_allocator.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; using ::testing::Invoke; - +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyChunkAllocator : public ::testing::Test { protected: @@ -54,21 +52,17 @@ class TestTopologyChunkAllocator : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); TopologyOption option; topoStat_ = std::make_shared(topology_); - chunkFilePoolAllocHelp_ = - std::make_shared(); + chunkFilePoolAllocHelp_ = std::make_shared(); chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig(true, true, 15); option.PoolUsagePercentLimit = 85; option.enableLogicalPoolStatus = true; allocStatistic_ = std::make_shared(); - testObj_ = std::make_shared(topology_, - allocStatistic_, - topoStat_, - chunkFilePoolAllocHelp_, - option); + testObj_ = std::make_shared( + topology_, allocStatistic_, topoStat_, chunkFilePoolAllocHelp_, + option); } virtual void TearDown() { @@ -85,53 +79,37 @@ class TestTopologyChunkAllocator : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 10240) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 10240) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) @@ -139,78 +117,56 @@ class TestTopologyChunkAllocator : public ::testing::Test { } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ChunkServerStat stat; - stat.chunkFilepoolSize = diskCapacity-diskUsed; + stat.chunkFilepoolSize = diskCapacity - diskUsed; topoStat_->UpdateChunkServerStat(id, stat); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members, - bool availFlag = true) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members, + bool availFlag = true) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); cs.SetAvailableFlag(availFlag); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -228,7 +184,7 @@ class TestTopologyChunkAllocator : public ::testing::Test { }; TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_success) { + Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -247,7 +203,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -258,12 +214,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -275,20 +227,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { + Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -304,7 +252,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -315,12 +263,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); @@ -328,12 +272,8 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -343,18 +283,14 @@ TEST_F(TestTopologyChunkAllocator, topoStat_->UpdateChunkServerStat(0x42, stat); topoStat_->UpdateChunkServerStat(0x43, stat); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { + Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -370,7 +306,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -385,16 +321,16 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); std::map enoughsize; - std::vector pools ={0x01}; + std::vector pools = {0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, - &enoughsize, "testPoolset"); + testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize, + "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -412,7 +348,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -423,16 +359,11 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_TRUE(ret); @@ -443,12 +374,8 @@ TEST_F(TestTopologyChunkAllocator, // second time std::vector infos2; - ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos2); + ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos2); ASSERT_TRUE(ret); @@ -493,20 +420,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -514,18 +437,14 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -542,7 +461,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -553,27 +472,23 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_FALSE(ret); } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -584,12 +499,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { for (int i = 0; i < 100000; i++) { int chunkNumber = 64; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( + copySetIds, 1, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(chunkNumber, infos.size()); for (int j = 0; j < chunkNumber; j++) { @@ -598,7 +509,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } int minCount = copySetMap[0]; int maxCount = copySetMap[0]; - for (auto &pair : copySetMap) { + for (auto& pair : copySetMap) { if (pair.second > maxCount) { maxCount = pair.second; } @@ -610,10 +521,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { double minPercent = static_cast(avg - minCount) / avg; double maxPercent = static_cast(maxCount - avg) / avg; LOG(INFO) << "AllocateChunkRandomInSingleLogicalPool poc" - <<", minCount = " << minCount - <<", maxCount = " << maxCount - << ", avg = " << avg - << ", minPercent = " << minPercent + << ", minCount = " << minCount << ", maxCount = " << maxCount + << ", avg = " << avg << ", minPercent = " << minPercent << ", maxPercent = " << maxPercent; ASSERT_TRUE(minPercent < 0.1); @@ -621,7 +530,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -632,23 +542,19 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { int chunkNumber = 64; std::vector infos; AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + copySetIds, 1, chunkNumber, &infos); } uint64_t stoptime = curve::common::TimeUtility::GetTimeofDayUs(); double usetime = stoptime - startime; - double tps = 1000000.0 * 100000.0/usetime; + double tps = 1000000.0 * 100000.0 / usetime; - std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " - << tps + std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " << tps << " * 64 chunk per second."; } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { + TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 20; i++) { @@ -657,13 +563,8 @@ TEST(TestAllocateChunkPolicy, uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(5, nextIndex); ASSERT_EQ(chunkNumber, infos.size()); @@ -680,26 +581,20 @@ TEST(TestAllocateChunkPolicy, } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { + TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { std::vector copySetIds; std::map copySetMap; uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_FALSE(ret); ASSERT_EQ(15, nextIndex); ASSERT_EQ(0, infos.size()); } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc) { std::map poolWeightMap; std::map poolMap; for (int i = 0; i < 5; i++) { @@ -709,8 +604,8 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolWeightMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolWeightMap, + &pid); poolMap[pid]++; } @@ -719,7 +614,8 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be 0, 10000, 20000, 30000, + // 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -727,8 +623,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc2) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc2) { std::map poolMap; poolMap[0] = 100000; poolMap[1] = 90000; @@ -738,12 +633,11 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolMap, &pid); poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,9 +645,8 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolRandom) { +// Test to see if random allocation to each pool is possible +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; std::map allocMap; allocMap[1] = 0; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..fd1112a4ec 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include "src/mds/topology/topology_metric.h" -#include "test/mds/topology/mock_topology.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyMetric : public ::testing::Test { public: @@ -48,10 +48,9 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); topologyStat_ = std::make_shared(); allocStatistic_ = std::make_shared(); @@ -76,122 +75,87 @@ class TestTopologyMetric : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool") { - PhysicalPool pool(id, - name, - pid, - desc); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool") { + PhysicalPool pool(id, name, pid, desc); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/") { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState st; - st.SetDiskCapacity(100 * 1024); - st.SetDiskUsed(10 * 1024); - cs.SetChunkServerState(st); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + const std::string& token = "testToken", + const std::string& diskType = "nvme", + ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", + uint32_t port = 0, + const std::string& diskPath = "/") { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState st; + st.SetDiskCapacity(100 * 1024); + st.SetDiskUsed(10 * 1024); + cs.SetChunkServerState(st); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -207,7 +171,7 @@ class TestTopologyMetric : public ::testing::Test { std::shared_ptr testObj_; }; -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -229,14 +193,13 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { rap.pageFileRAP.replicaNum = 3; PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE, rap); + PAGEFILE, rap); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -258,12 +221,10 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), Return(true))); testObj_->UpdateTopologyMetrics(); @@ -283,9 +244,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x42]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x42]->copysetNum.get_value()); @@ -301,9 +262,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x43]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x43]->copysetNum.get_value()); @@ -319,43 +280,75 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(1, gLogicalPoolMetrics.size()); - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); //NOLINT - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->chunkServerNum.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthRange.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMin.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMax.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumRange.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMin.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMax.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumRange.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMin.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMax.get_value()); //NOLINT - ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskCapacity.get_value()); //NOLINT - ASSERT_EQ(20 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); //NOLINT - ASSERT_EQ(10 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); //NOLINT - - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 3, + gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); // NOLINT + ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId] + ->chunkServerNum.get_value()); // NOLINT + ASSERT_EQ( + 1, + gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthRange.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMin.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMax.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumRange.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMin.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMax.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumRange.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMin.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMax.get_value()); // NOLINT + ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId] + ->diskCapacity.get_value()); // NOLINT + ASSERT_EQ( + 20 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); // NOLINT + ASSERT_EQ( + 10 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); // NOLINT + + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeUsedBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeLeftBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTrashedBytes.get_value()); - ASSERT_EQ(1024 * 9, + ASSERT_EQ( + 1024 * 9, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->readIOPS.get_value()); @@ -372,7 +365,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1, gClusterMetrics->copysetNum.get_value()); } -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -396,7 +389,6 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -414,8 +406,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); testObj_->UpdateTopologyMetrics(); diff --git a/test/resources.list b/test/resources.list index 9be11dbb07..20b047da17 100644 --- a/test/resources.list +++ b/test/resources.list @@ -18,30 +18,30 @@ Used port list: # client - 9101: session service 测试 - 9102: applyindex service 测试 - 9103: snapshot service 测试 - 9104: client端其他测试 - 9105: client workflow测试mds占用 - 9106: client workflow测试chunkserver占用 - 9107: client workflow测试chunkserver占用 - 9108: client workflow测试chunkserver占用 - 9109: request scheduler测试占用 - 9110/9111/9112: TestLibcbdLibcurve测试占用 - 9115/9116/9117: TestLibcurveInterface测试占用 - - 9120: mds 接口测试 - 9121: mds 接口测试 - 9122: mds 接口测试 - 9123: mds 接口测试 - 9130: metric测试 - 9131: metric测试 - 9132: metric测试 - 9140: metric测试 - 9141: metric测试 - 9142: metric测试 - 9150/9151 ChunkserverUnstableTest - 19151/19110/19111/19112 curveClient测试 + 9101: session service testing + 9102: applyindex service testing + 9103: snapshot service testing + 9104: Other client testing + 9105: client workflow testing, MDS usage + 9106: client workflow testing, Chunkserver usage + 9107: client workflow testing, Chunkserver usage + 9108: client workflow testing, Chunkserver usage + 9109: request scheduler testing usage + 9110/9111/9112: TestLibcbdLibcurve testing usage + 9115/9116/9117: TestLibcurveInterface testing usage + + 9120: MDS interface testing + 9121: MDS interface testing + 9122: MDS interface testing + 9123: MDS interface testing + 9130: metric testing + 9131: metric testing + 9132: metric testing + 9140: metric testing + 9141: metric testing + 9142: metric testing + 9150/9151: ChunkserverUnstableTest + 19151/19110/19111/19112: curveClient testing client_test_unittest: 21000 diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index f57c2d15c0..882905855d 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include +#include "src/common/location_operator.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/common/snapshotclone/snapshotclone_define.h" -#include "src/common/location_operator.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" using ::curve::common::LocationOperator; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,10 +50,8 @@ class TestCloneCoreImpl : public ::testing::Test { virtual ~TestCloneCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); - cloneRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); + cloneRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -65,14 +62,9 @@ class TestCloneCoreImpl : public ::testing::Test { option.recoverChunkConcurrency = 2; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - cloneRef_, - option); - EXPECT_CALL(*client_, Mkdir(_, _)) - .WillOnce(Return(LIBCURVE_ERROR::OK)); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, cloneRef_, option); + EXPECT_CALL(*client_, Mkdir(_, _)).WillOnce(Return(LIBCURVE_ERROR::OK)); ASSERT_EQ(core_->Init(), 0); } @@ -86,66 +78,47 @@ class TestCloneCoreImpl : public ::testing::Test { } protected: - // 辅助mock函数 + // Auxiliary mock function void MockBuildFileInfoFromSnapshotSuccess( std::shared_ptr task); - void MockBuildFileInfoFromFileSuccess( - std::shared_ptr task); + void MockBuildFileInfoFromFileSuccess(std::shared_ptr task); - void MockCreateCloneFileSuccess( - std::shared_ptr task); + void MockCreateCloneFileSuccess(std::shared_ptr task); - void MockCloneMetaSuccess( - std::shared_ptr task); + void MockCloneMetaSuccess(std::shared_ptr task); - void MockCreateCloneChunkSuccess( - std::shared_ptr task); + void MockCreateCloneChunkSuccess(std::shared_ptr task); - void MockCompleteCloneMetaSuccess( - std::shared_ptr task); + void MockCompleteCloneMetaSuccess(std::shared_ptr task); - void MockRecoverChunkSuccess( - std::shared_ptr task); + void MockRecoverChunkSuccess(std::shared_ptr task); - void MockChangeOwnerSuccess( - std::shared_ptr task); + void MockChangeOwnerSuccess(std::shared_ptr task); - void MockRenameCloneFileSuccess( - std::shared_ptr task); + void MockRenameCloneFileSuccess(std::shared_ptr task); - void MockCompleteCloneFileSuccess( - std::shared_ptr task); + void MockCompleteCloneFileSuccess(std::shared_ptr task); - void MockBuildFileInfoFromSnapshotFail( - std::shared_ptr task); + void MockBuildFileInfoFromSnapshotFail(std::shared_ptr task); - void MockBuildFileInfoFromFileFail( - std::shared_ptr task); + void MockBuildFileInfoFromFileFail(std::shared_ptr task); - void MockCreateCloneFileFail( - std::shared_ptr task); + void MockCreateCloneFileFail(std::shared_ptr task); - void MockCloneMetaFail( - std::shared_ptr task); + void MockCloneMetaFail(std::shared_ptr task); - void MockCreateCloneChunkFail( - std::shared_ptr task); + void MockCreateCloneChunkFail(std::shared_ptr task); - void MockCompleteCloneMetaFail( - std::shared_ptr task); + void MockCompleteCloneMetaFail(std::shared_ptr task); - void MockRecoverChunkFail( - std::shared_ptr task); + void MockRecoverChunkFail(std::shared_ptr task); - void MockChangeOwnerFail( - std::shared_ptr task); + void MockChangeOwnerFail(std::shared_ptr task); - void MockRenameCloneFileFail( - std::shared_ptr task); + void MockRenameCloneFileFail(std::shared_ptr task); - void MockCompleteCloneFileFail( - std::shared_ptr task); + void MockCompleteCloneFileFail(std::shared_ptr task); protected: std::shared_ptr core_; @@ -157,9 +130,8 @@ class TestCloneCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -171,16 +143,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -188,35 +157,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -224,35 +183,25 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; CloneInfo cloneInfoOut; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kRecover, - source, - destination, - "", - 100, - 101, - 0, - CloneFileType::kSnapshot, - lazyFlag, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo info1("taskid1", user, CloneTaskType::kRecover, source, + destination, "", 100, 101, 0, CloneFileType::kSnapshot, + lazyFlag, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -260,7 +209,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { } TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -283,15 +232,13 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { fInfo.filestatus = FileStatus::Created; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -299,7 +246,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -311,13 +258,11 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -325,7 +270,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { } TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -336,20 +281,18 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { SnapshotInfo snap("id1", "user2", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -360,23 +303,21 @@ TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, AddCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -390,16 +331,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -413,16 +354,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -431,16 +372,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -448,42 +389,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -491,42 +420,30 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { uint64_t destId = 10086; std::vector cloneInfoList; - CloneInfo info1("taskid1", - user, - CloneTaskType::kClone, - source, - destination, - kDefaultPoolset, - 100, - destId, - 0, - CloneFileType::kFile, - lazyFlag, - CloneStep::kRecoverChunk, - CloneStatus::metaInstalled); + CloneInfo info1("taskid1", user, CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, CloneFileType::kFile, + lazyFlag, CloneStep::kRecoverChunk, + CloneStatus::metaInstalled); cloneInfoList.push_back(info1); EXPECT_CALL(*metaStore_, GetCloneInfoByFileName(destination, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cloneInfoList), - Return(kErrCodeSuccess))); + .WillOnce( + DoAll(SetArgPointee<1>(cloneInfoList), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = destId + 1; EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -535,16 +452,16 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -556,23 +473,20 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { SnapshotInfo snap("id1", "user1", destination, "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { - const UUID &source = "id1"; + const UUID& source = "id1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -584,20 +498,18 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { SnapshotInfo snap("id1", "user1", "file1", "snap1"); snap.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(source, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snap), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snap), Return(kErrCodeSuccess))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kRecover, "", &cloneInfoOut); + int ret = + core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -606,16 +518,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { EXPECT_CALL(*client_, GetFileInfo(destination, option.mdsRootUser, _)) .WillOnce(Return(-LIBCURVE_ERROR::UNKNOWN)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -631,16 +543,16 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { - const UUID &source = "fi1e1"; + const UUID& source = "fi1e1"; const std::string user = "user1"; const std::string destination = "destination1"; bool lazyFlag = true; @@ -662,29 +574,26 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { fInfo.filename = "file1"; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) - .WillOnce(DoAll(SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddCloneInfo(_)) - .WillOnce(Return(kErrCodeSuccess)); + EXPECT_CALL(*metaStore_, AddCloneInfo(_)).WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, SetCloneFileStatus(source, - FileStatus::BeingCloned, - option.mdsRootUser)) + EXPECT_CALL(*client_, SetCloneFileStatus(source, FileStatus::BeingCloned, + option.mdsRootUser)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - int ret = core_->CloneOrRecoverPre( - source, user, destination, lazyFlag, - CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); + int ret = core_->CloneOrRecoverPre(source, user, destination, lazyFlag, + CloneTaskType::kClone, kDefaultPoolset, + &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(1, core_->GetCloneRef()->GetRef(source)); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -705,7 +614,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::cloning); @@ -726,7 +635,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { + HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cloning); @@ -752,9 +661,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -769,10 +678,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { - CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { + CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); cinfo.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -792,22 +700,20 @@ TEST_F(TestCloneCoreImpl, uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kRecover, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { + CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::recovering); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -827,7 +733,7 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { + HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::recovering); @@ -847,10 +753,9 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskFailOnCreateCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -867,8 +772,8 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -886,8 +791,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -906,8 +811,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -927,8 +832,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -949,8 +854,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRenameCloneFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1011,8 +916,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneFail) { core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); @@ -1034,11 +938,10 @@ TEST_F(TestCloneCoreImpl, core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1056,9 +959,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1074,9 +977,9 @@ TEST_F(TestCloneCoreImpl, } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1095,17 +998,16 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); + HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1124,17 +1026,15 @@ TEST_F(TestCloneCoreImpl, fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); core_->HandleCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - HandleCloneOrRecoverTaskStepUnknown) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); +TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStepUnknown) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); info.SetNextStep(static_cast(8)); auto cloneMetric = std::make_shared("id1"); @@ -1163,14 +1063,12 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( uint64_t filelength = 1 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, kDefaultPoolset, - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, kDefaultPoolset, time, + status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; @@ -1178,9 +1076,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } ChunkIndexData snapMeta; @@ -1191,18 +1088,15 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( snapMeta.PutChunkDataName(chunk2); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapMeta), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapMeta), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.id = 100; fInfo.seqnum = 100; fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( @@ -1216,9 +1110,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileSuccess( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneFileSuccess( @@ -1226,8 +1119,8 @@ void TestCloneCoreImpl::MockCreateCloneFileSuccess( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(LIBCURVE_ERROR::OK))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCloneMetaSuccess( @@ -1238,33 +1131,25 @@ void TestCloneCoreImpl::MockCloneMetaSuccess( segInfoOut.segmentsize = segmentsize; segInfoOut.chunksize = chunksize; segInfoOut.startoffset = 0; - segInfoOut.chunkvec = {{1, 1, 1}, - {2, 2, 1}}; + segInfoOut.chunkvec = {{1, 1, 1}, {2, 2, 1}}; segInfoOut.lpcpIDInfo.lpid = 1; segInfoOut.lpcpIDInfo.cpidVec = {1, 2}; EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, 0, _, _, _)) .WillRepeatedly( - DoAll(SetArgPointee<4>(segInfoOut), - Return(LIBCURVE_ERROR::OK))); + DoAll(SetArgPointee<4>(segInfoOut), Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockCreateCloneChunkSuccess( std::shared_ptr task) { std::string location1, location2; if (CloneFileType::kSnapshot == task->GetCloneInfo().GetFileType()) { - location1 = LocationOperator::GenerateS3Location( - "file1-0-1"); - location2 = LocationOperator::GenerateS3Location( - "file1-1-1"); + location1 = LocationOperator::GenerateS3Location("file1-0-1"); + location2 = LocationOperator::GenerateS3Location("file1-1-1"); } else { - location1 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("0")); - location2 = - LocationOperator::GenerateCurveLocation( - task->GetCloneInfo().GetSrc(), - std::stoull("1048576")); + location1 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("0")); + location2 = LocationOperator::GenerateCurveLocation( + task->GetCloneInfo().GetSrc(), std::stoull("1048576")); } uint32_t correctSn = 0; @@ -1273,18 +1158,15 @@ void TestCloneCoreImpl::MockCreateCloneChunkSuccess( } else { correctSn = 100; } - EXPECT_CALL(*client_, CreateCloneChunk( - AnyOf(location1, location2), _, _, correctSn, _, _)) + EXPECT_CALL(*client_, CreateCloneChunk(AnyOf(location1, location2), _, _, + correctSn, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(LIBCURVE_ERROR::OK))); } @@ -1297,15 +1179,12 @@ void TestCloneCoreImpl::MockCompleteCloneMetaSuccess( void TestCloneCoreImpl::MockRecoverChunkSuccess( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK), - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK), scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); } void TestCloneCoreImpl::MockChangeOwnerSuccess( @@ -1338,22 +1217,18 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( uint64_t filelength = 2 * segmentsize; uint64_t time = 100; Status status = Status::done; - SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, "default", - time, status); + SnapshotInfo info(uuid, user, fileName, desc, seqnum, chunksize, + segmentsize, filelength, 0, 0, "default", time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillRepeatedly(DoAll( - SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); if (CloneTaskType::kRecover == task->GetCloneInfo().GetTaskType()) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); } EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) @@ -1362,9 +1237,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( @@ -1378,9 +1252,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromFileFail( fInfo.owner = "user1"; fInfo.filename = "file1"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillRepeatedly(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockCreateCloneFileFail( @@ -1388,12 +1261,11 @@ void TestCloneCoreImpl::MockCreateCloneFileFail( FInfo fInfoOut; fInfoOut.id = 100; EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<9>(fInfoOut), Return(-LIBCURVE_ERROR::FAILED))); } -void TestCloneCoreImpl::MockCloneMetaFail( - std::shared_ptr task) { +void TestCloneCoreImpl::MockCloneMetaFail(std::shared_ptr task) { EXPECT_CALL(*client_, GetOrAllocateSegmentInfo(_, _, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); } @@ -1402,15 +1274,12 @@ void TestCloneCoreImpl::MockCreateCloneChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, CreateCloneChunk(_, _, _, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const std::string &location, - const ChunkIDInfo &chunkidinfo, - uint64_t sn, - uint64_t csn, - uint64_t chunkSize, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), + Invoke([](const std::string& location, + const ChunkIDInfo& chunkidinfo, uint64_t sn, uint64_t csn, + uint64_t chunkSize, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), Return(-LIBCURVE_ERROR::FAILED))); } @@ -1424,13 +1293,9 @@ void TestCloneCoreImpl::MockRecoverChunkFail( std::shared_ptr task) { EXPECT_CALL(*client_, RecoverChunk(_, _, _, _)) .WillRepeatedly(DoAll( - Invoke([](const ChunkIDInfo &chunkidinfo, - uint64_t offset, - uint64_t len, - SnapCloneClosure* scc){ - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + Invoke([](const ChunkIDInfo& chunkidinfo, uint64_t offset, + uint64_t len, SnapCloneClosure* scc) { scc->Run(); }), + Return(-LIBCURVE_ERROR::FAILED))); } void TestCloneCoreImpl::MockChangeOwnerFail( @@ -1452,7 +1317,7 @@ void TestCloneCoreImpl::MockCompleteCloneFileFail( } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1462,20 +1327,17 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreSuccess) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeSuccess)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1485,17 +1347,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreGetCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(-1))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(-1))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(0, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1505,17 +1364,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreInvalidUser) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1525,17 +1381,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreCannotCleanUnFinished) { cinfo.SetStatus(CloneStatus::cloning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeCannotCleanCloneUnfinished, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1545,17 +1398,14 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreTaskExist) { cinfo.SetStatus(CloneStatus::errorCleaning); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { - const TaskIdType &taskId = "id1"; + const TaskIdType& taskId = "id1"; const std::string user = "user1"; CloneInfo cloneInfoOut; @@ -1565,21 +1415,18 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { cinfo.SetStatus(CloneStatus::error); EXPECT_CALL(*metaStore_, GetCloneInfo(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(cinfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(cinfo), Return(0))); EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) .WillOnce(Return(kErrCodeInternalError)); - int ret = core_->CleanCloneOrRecoverTaskPre( - user, taskId, &cloneInfoOut); + int ret = core_->CleanCloneOrRecoverTaskPre(user, taskId, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1589,14 +1436,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1606,14 +1452,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { EXPECT_CALL(*client_, DeleteFile(_, _, _)) .WillOnce(Return(-LIBCURVE_ERROR::NOTEXIST)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1624,14 +1469,13 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { .Times(1) .WillOnce(Return(LIBCURVE_ERROR::OK)); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1642,43 +1486,36 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { .Times(1) .WillOnce(Return(-LIBCURVE_ERROR::FAILED)); - EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, UpdateCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } TEST_F(TestCloneCoreImpl, - TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); + TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); std::shared_ptr task = std::make_shared(info, cloneMetric, cloneClosure); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); core_->HandleCleanCloneOrRecoverTask(task); } -TEST_F(TestCloneCoreImpl, - TestCheckFileExists) { +TEST_F(TestCloneCoreImpl, TestCheckFileExists) { FInfo fInfo; fInfo.id = 100; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeFileExist); EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(core_->CheckFileExists("filename", 10), kErrCodeFileNotExist); @@ -1693,36 +1530,31 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(core_->CheckFileExists("filename", 100), kErrCodeInternalError); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoSnapSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); snapshotRef_->IncrementSnapshotRef("snapid1"); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeSuccess); ASSERT_EQ(snapshotRef_->GetSnapshotRef("snapid1"), 0); } TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); + TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 2); @@ -1730,26 +1562,22 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSetStatusFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSetStatusFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); - EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(core_->HandleDeleteCloneInfo(info), kErrCodeInternalError); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(-1)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) @@ -1758,13 +1586,11 @@ TEST_F(TestCloneCoreImpl, ASSERT_EQ(cloneRef_->GetRef("source1"), 1); } -TEST_F(TestCloneCoreImpl, - TestHandleDeleteCloneInfoFileSuccess) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); +TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSuccess) { + CloneInfo info("id1", "user1", CloneTaskType::kClone, "source1", "file1", + kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); - EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)).WillOnce(Return(0)); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); EXPECT_CALL(*client_, SetCloneFileStatus(_, _, _)) diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..ec27aa8fe7 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -20,9 +20,8 @@ * Author: xuchaojie */ - -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" #include "test/util/config_generator.h" @@ -40,19 +39,14 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to + // complete as soon as possible std::vector options = { - {"mds.listen.addr=127.0.0.1:8888", - "mds.registerToMDS=false", - "mds.rpcTimeoutMS=1", - "mds.maxRPCTimeoutMS=1", - "mds.maxRetryMS=1", - "mds.rpcRetryIntervalUS=1", - "metacache.getLeaderTimeOutMS=1", - "metacache.getLeaderRetry=1", - "metacache.rpcRetryIntervalUS=1", - "chunkserver.opRetryIntervalUS=1", - "chunkserver.opMaxRetry=1", + {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", + "mds.rpcTimeoutMS=1", "mds.maxRPCTimeoutMS=1", "mds.maxRetryMS=1", + "mds.rpcRetryIntervalUS=1", "metacache.getLeaderTimeOutMS=1", + "metacache.getLeaderRetry=1", "metacache.rpcRetryIntervalUS=1", + "chunkserver.opRetryIntervalUS=1", "chunkserver.opMaxRetry=1", "chunkserver.rpcTimeoutMS=1", "chunkserver.maxRetrySleepIntervalUS=1", "chunkserver.maxRPCTimeoutMS=1"}, @@ -64,8 +58,7 @@ class TestCurveFsClientImpl : public ::testing::Test { virtual void SetUp() { std::shared_ptr snapClient = std::make_shared(); - std::shared_ptr fileClient = - std::make_shared(); + std::shared_ptr fileClient = std::make_shared(); client_ = std::make_shared(snapClient, fileClient); clientOption_.configPath = kClientConfigPath; clientOption_.mdsRootUser = "root"; @@ -75,9 +68,7 @@ class TestCurveFsClientImpl : public ::testing::Test { client_->Init(clientOption_); } - virtual void TearDown() { - client_->UnInit(); - } + virtual void TearDown() { client_->UnInit(); } protected: std::shared_ptr client_; @@ -85,9 +76,7 @@ class TestCurveFsClientImpl : public ::testing::Test { }; struct TestClosure : public SnapCloneClosure { - void Run() { - std::unique_ptr selfGuard(this); - } + void Run() { std::unique_ptr selfGuard(this); } }; TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { @@ -111,35 +100,35 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { SegmentInfo segInfo; ret = client_->GetSnapshotSegmentInfo("file1", "user1", 1, 0, &segInfo); ASSERT_LT(ret, 0); - ret = client_->GetSnapshotSegmentInfo( - "file1", clientOption_.mdsRootUser, 1, 0, &segInfo); + ret = client_->GetSnapshotSegmentInfo("file1", clientOption_.mdsRootUser, 1, + 0, &segInfo); ASSERT_LT(ret, 0); ChunkIDInfo cidinfo; FileStatus fstatus; ret = client_->CheckSnapShotStatus("file1", "user1", 1, &fstatus); ASSERT_LT(ret, 0); - ret = client_->CheckSnapShotStatus( - "file1", clientOption_.mdsRootUser, 1, &fstatus); + ret = client_->CheckSnapShotStatus("file1", clientOption_.mdsRootUser, 1, + &fstatus); ASSERT_LT(ret, 0); ChunkInfoDetail chunkInfo; ret = client_->GetChunkInfo(cidinfo, &chunkInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); + ret = client_->CreateCloneFile("source1", "file1", "user1", 1024, 1, 1024, + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, "default", &fInfo); + ret = + client_->CreateCloneFile("source1", "file1", clientOption_.mdsRootUser, + 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - TestClosure *cb = new TestClosure(); + TestClosure* cb = new TestClosure(); ret = client_->CreateCloneChunk("", cidinfo, 1, 2, 1024, cb); ASSERT_EQ(ret, 0); - TestClosure *cb2 = new TestClosure(); + TestClosure* cb2 = new TestClosure(); ret = client_->RecoverChunk(cidinfo, 0, 1024, cb2); ASSERT_EQ(ret, 0); @@ -159,8 +148,9 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 - // ret = client_->GetOrAllocateSegmentInfo( + // The client retries the mds interface infinitely, and these two interfaces + // loop endlessly. Please comment them out first ret = + // client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); // ret = client_->GetOrAllocateSegmentInfo( @@ -169,8 +159,8 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->RenameCloneFile("user1", 1, 2, "file1", "file2"); ASSERT_LT(ret, 0); - ret = client_->RenameCloneFile( - clientOption_.mdsRootUser, 1, 2, "file1", "file2"); + ret = client_->RenameCloneFile(clientOption_.mdsRootUser, 1, 2, "file1", + "file2"); ASSERT_LT(ret, 0); ret = client_->DeleteFile("file1", "user1", 1); @@ -187,7 +177,5 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); } - - } // namespace snapshotcloneserver } // namespace curve diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index d4c40963f1..02e363ee1a 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -20,26 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/snapshot/snapshot_core.h" #include "src/snapshotcloneserver/snapshot/snapshot_task.h" - #include "test/snapshotcloneserver/mock_snapshot_server.h" - namespace curve { namespace snapshotcloneserver { -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestSnapshotCoreImpl : public ::testing::Test { public: @@ -47,8 +45,7 @@ class TestSnapshotCoreImpl : public ::testing::Test { virtual ~TestSnapshotCoreImpl() {} virtual void SetUp() { - snapshotRef_ = - std::make_shared(); + snapshotRef_ = std::make_shared(); client_ = std::make_shared(); metaStore_ = std::make_shared(); dataStore_ = std::make_shared(); @@ -60,11 +57,8 @@ class TestSnapshotCoreImpl : public ::testing::Test { option.snapshotCoreThreadNum = 1; option.clientAsyncMethodRetryTimeSec = 1; option.clientAsyncMethodRetryIntervalMs = 500; - core_ = std::make_shared(client_, - metaStore_, - dataStore_, - snapshotRef_, - option); + core_ = std::make_shared( + client_, metaStore_, dataStore_, snapshotRef_, option); ASSERT_EQ(core_->Init(), 0); } @@ -84,7 +78,6 @@ class TestSnapshotCoreImpl : public ::testing::Test { SnapshotCloneServerOptions option; }; - TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { const std::string file = "file"; const std::string user = "user"; @@ -96,18 +89,13 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreSuccess) { sinfo.SetStatus(Status::done); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); FInfo fInfo; fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); - EXPECT_CALL(*metaStore_, AddSnapshot(_)) - .WillOnce(Return(kErrCodeSuccess)); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); + EXPECT_CALL(*metaStore_, AddSnapshot(_)).WillOnce(Return(kErrCodeSuccess)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeSuccess, ret); } @@ -119,16 +107,11 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreTaskExist) { SnapshotInfo info; std::vector list; - SnapshotInfo sinfo("snapid1", - user, - file, - desc); + SnapshotInfo sinfo("snapid1", user, file, desc); sinfo.SetStatus(Status::pending); list.push_back(sinfo); EXPECT_CALL(*metaStore_, GetSnapshotList(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(list), Return(kErrCodeSuccess))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeTaskExist, ret); } @@ -144,9 +127,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreAddSnapshotFail) { fInfo.filestatus = FileStatus::Created; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, AddSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); int ret = core_->CreateSnapshotPre(file, user, desc, &info); @@ -163,9 +144,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFileNotExist) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::NOTEXIST))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::NOTEXIST))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); } @@ -181,9 +161,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInvalidUser) { fInfo.filestatus = FileStatus::Created; fInfo.owner = "user2"; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::AUTHFAIL))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::AUTHFAIL))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInvalidUser, ret); } @@ -198,9 +177,8 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreInternalError) { FInfo fInfo; fInfo.filestatus = FileStatus::Created; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<2>(fInfo), Return(-LIBCURVE_ERROR::FAILED))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeInternalError, ret); } @@ -216,9 +194,7 @@ TEST_F(TestSnapshotCoreImpl, TestCreateSnapshotPreFailStatusInvalid) { fInfo.filestatus = FileStatus::Cloning; fInfo.owner = user; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) - .WillOnce(DoAll( - SetArgPointee<2>(fInfo), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); int ret = core_->CreateSnapshotPre(file, user, desc, &info); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); } @@ -232,8 +208,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPreSuccess) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -252,8 +227,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_GetSnapshotInfoNotExist) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeInternalError))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -269,8 +243,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_UpdateSnapshotFail) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeInternalError)); @@ -290,8 +263,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_InvalidUser) { SnapshotInfo info(uuid, user2, fileName, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -307,8 +279,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_DeleteSnapshotUnfinished) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::pending); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -325,8 +296,7 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_FileNameNotMatch) { SnapshotInfo info(uuid, user, fileName2, desc); info.SetStatus(Status::done); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); @@ -342,16 +312,14 @@ TEST_F(TestSnapshotCoreImpl, TestDeleteSnapshotPre_TaskExit) { SnapshotInfo info(uuid, user, fileName, desc); info.SetStatus(Status::deleting); EXPECT_CALL(*metaStore_, GetSnapshotInfo(uuid, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(kErrCodeSuccess))); SnapshotInfo infoOut; int ret = core_->DeleteSnapshotPre(uuid, user, fileName, &infoOut); ASSERT_EQ(kErrCodeTaskExist, ret); } -TEST_F(TestSnapshotCoreImpl, - TestGetFileSnapshotInfoSuccess) { +TEST_F(TestSnapshotCoreImpl, TestGetFileSnapshotInfoSuccess) { std::string file = "file1"; std::vector info; @@ -362,8 +330,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -378,9 +345,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -389,10 +354,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -407,10 +370,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -420,29 +381,21 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce(DoAll(SetArgPointee<4>(segInfo2), Return(kErrCodeSuccess))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -464,16 +417,13 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) @@ -481,28 +431,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -518,8 +462,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::done, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CreateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_CreateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -533,10 +476,8 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(-LIBCURVE_ERROR::FAILED))); - + .WillOnce( + DoAll(SetArgPointee<2>(seqNum), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -547,8 +488,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -562,10 +502,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -574,9 +511,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(-LIBCURVE_ERROR::FAILED))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -587,8 +523,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_UpdateSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_UpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -601,10 +536,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -613,10 +545,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -630,7 +560,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { + TestHandleCreateSnapshotTask_SecondTimeUpdateSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -643,10 +573,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -655,10 +582,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -672,7 +597,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { + TestHandleCreateSnapshotTask_GetSnapshotSegmentInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -685,12 +610,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -699,21 +620,15 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillOnce(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::FAILED)); core_->HandleCreateSnapshotTask(task); @@ -722,8 +637,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_GetChunkInfoFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_GetChunkInfoFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -737,10 +651,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -749,10 +660,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -767,10 +676,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -780,25 +687,19 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(chunkInfo), - Return(-LIBCURVE_ERROR::FAILED))); + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -807,7 +708,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { + TestHandleCreateSnapshotTask_PutChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -820,12 +721,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -834,10 +731,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -852,10 +747,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -865,29 +758,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -899,7 +785,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { + TestHandleCreateSnapshotTask_DataChunkTranferInitFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -912,11 +798,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -925,10 +808,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -943,10 +824,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -956,29 +835,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -989,23 +861,20 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); // 上一个快照 + info2.SetSeqNum(seqNum - 1); // Previous snapshot info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .WillOnce(Return(kErrCodeInternalError)); @@ -1017,7 +886,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { + TestHandleCreateSnapshotTask_ReadChunkSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1031,9 +900,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1042,10 +909,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1060,10 +925,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1073,29 +936,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1111,35 +967,28 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info); snapInfos.push_back(info2); - EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) + EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillOnce(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(-LIBCURVE_ERROR::FAILED))); + .WillOnce( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(-LIBCURVE_ERROR::FAILED))); core_->HandleCreateSnapshotTask(task); @@ -1148,7 +997,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { + TestHandleCreateSnapshotTask_DataChunkTranferAddPartFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1161,11 +1010,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1174,10 +1020,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1192,10 +1036,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1205,29 +1047,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1245,34 +1080,26 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -1287,7 +1114,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { + TestHandleCreateSnapshotTask_DataChunkTranferCompleteFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1300,11 +1127,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1313,10 +1137,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1331,10 +1153,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1344,29 +1164,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1384,35 +1197,27 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(1) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(2) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(2) @@ -1430,8 +1235,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_DeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTask_DeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1444,11 +1248,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1457,10 +1258,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1475,10 +1274,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1488,29 +1285,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1528,41 +1318,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1577,7 +1358,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnReturnFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1590,11 +1371,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1603,10 +1381,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1621,10 +1397,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1634,29 +1408,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1674,41 +1441,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1726,7 +1484,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnDeleteError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1739,11 +1497,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1752,10 +1507,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1770,10 +1523,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1783,29 +1534,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1823,41 +1567,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -1875,7 +1610,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { + TestHandleCreateSnapshotTask_CheckSnapShotStatusFailOnFileStatusError) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -1888,11 +1623,8 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -1901,10 +1633,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1919,10 +1649,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -1932,29 +1660,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -1972,41 +1693,32 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2025,7 +1737,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskExistIndexDataSuccess) { + TestHandleCreateSnapshotTaskExistIndexDataSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2043,8 +1755,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2057,12 +1768,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2072,10 +1779,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2085,21 +1790,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2114,29 +1812,22 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); - + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) @@ -2163,7 +1854,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { + TestHandleCreateSnapshotTaskChunkSizeNotAlignTokChunkSplitSize) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2180,8 +1871,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2194,13 +1884,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); LogicPoolID lpid1 = 1; CopysetID cpid1 = 1; @@ -2210,10 +1895,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2223,21 +1906,14 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2252,9 +1928,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2266,8 +1941,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskChunkVecInfoMiss) { +TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskChunkVecInfoMiss) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2284,8 +1958,7 @@ TEST_F(TestSnapshotCoreImpl, std::shared_ptr task = std::make_shared(info, snapshotInfoMetric); - EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)).WillOnce(Return(true)); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2298,28 +1971,18 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); - + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); SegmentInfo segInfo1; SegmentInfo segInfo2; - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); UUID uuid2 = "uuid2"; std::string desc2 = "desc2"; @@ -2334,9 +1997,8 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .Times(1) @@ -2348,8 +2010,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskSuccess) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2374,9 +2035,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2384,15 +2043,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2412,7 +2066,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { + TestHandleDeleteSnapshotTask_GetChunkIndexDataSecondTimeFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2437,9 +2091,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2447,12 +2099,9 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce( + DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeInternalError))); EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); @@ -2466,7 +2115,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { + TestHandleDeleteSnapshotTask_DeleteChunkIndexDataFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2491,9 +2140,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2501,15 +2148,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2528,8 +2170,7 @@ TEST_F(TestSnapshotCoreImpl, ASSERT_EQ(Status::error, task->GetSnapshotInfo().GetStatus()); } -TEST_F(TestSnapshotCoreImpl, - TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { +TEST_F(TestSnapshotCoreImpl, TestHandleDeleteSnapshotTaskDeleteSnapshotFail) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2554,9 +2195,7 @@ TEST_F(TestSnapshotCoreImpl, snapInfos.push_back(info2); EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) - .WillOnce(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData1; indexData1.PutChunkDataName(ChunkDataName(fileName, seqNum, 0)); @@ -2564,15 +2203,10 @@ TEST_F(TestSnapshotCoreImpl, indexData2.PutChunkDataName(ChunkDataName(fileName, 1, 1)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<1>(indexData1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<1>(indexData2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<1>(indexData2), Return(kErrCodeSuccess))); - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -2580,7 +2214,6 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*dataStore_, ChunkIndexDataExist(_)) .WillRepeatedly(Return(true)); - EXPECT_CALL(*dataStore_, DeleteChunkIndexData(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -2609,9 +2242,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2620,10 +2251,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2636,10 +2265,8 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2649,29 +2276,22 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2689,60 +2309,50 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -2764,7 +2374,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { + TestHandleCreateSnapshotTaskCancelAfterCreateSnapshotOnCurvefs) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2778,9 +2388,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2789,19 +2397,17 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Invoke([task](const UUID& uuid, CASFunc cas) { task->Cancel(); return kErrCodeSuccess; })); - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2818,7 +2424,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { + TestHandleCreateSnapshotTaskCancelAfterCreateChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2832,9 +2438,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2843,10 +2447,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2859,10 +2461,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2872,40 +2472,32 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) - .WillOnce(Invoke([task](const ChunkIndexDataName &name, - const ChunkIndexData &meta) { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke( + [task](const ChunkIndexDataName& name, const ChunkIndexData& meta) { + task->Cancel(); + return kErrCodeSuccess; + })); - - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2925,7 +2517,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -2939,9 +2531,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -2950,10 +2540,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -2968,10 +2556,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -2981,29 +2567,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3021,60 +2600,49 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel - EXPECT_CALL(*dataStore_, ChunkDataExist(_)) - .WillRepeatedly(Return(true)); + // Enter cancel + EXPECT_CALL(*dataStore_, ChunkDataExist(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*dataStore_, DeleteChunkData(_)) .WillRepeatedly(Return(kErrCodeInternalError)); @@ -3086,7 +2654,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteChunkIndexData) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3100,9 +2668,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3111,10 +2677,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3127,10 +2691,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3140,29 +2702,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3180,58 +2735,48 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })); + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3250,7 +2795,7 @@ TEST_F(TestSnapshotCoreImpl, } TEST_F(TestSnapshotCoreImpl, - TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { + TestHandleCreateSnapshotTaskCancelFailOnDeleteSnapshot) { UUID uuid = "uuid1"; std::string user = "user1"; std::string fileName = "file1"; @@ -3264,9 +2809,7 @@ TEST_F(TestSnapshotCoreImpl, std::make_shared(info, snapshotInfoMetric); EXPECT_CALL(*client_, CreateSnapshot(fileName, user, _)) - .WillOnce(DoAll( - SetArgPointee<2>(seqNum), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<2>(seqNum), Return(LIBCURVE_ERROR::OK))); FInfo snapInfo; snapInfo.seqnum = 100; @@ -3275,10 +2818,8 @@ TEST_F(TestSnapshotCoreImpl, snapInfo.length = 2 * snapInfo.segmentsize; snapInfo.ctime = 10; EXPECT_CALL(*client_, GetSnapshot(fileName, user, seqNum, _)) - .WillOnce(DoAll( - SetArgPointee<3>(snapInfo), - Return(LIBCURVE_ERROR::OK))); - + .WillOnce( + DoAll(SetArgPointee<3>(snapInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*metaStore_, UpdateSnapshot(_)) .WillRepeatedly(Return(kErrCodeSuccess)); @@ -3291,10 +2832,8 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId2 = 2; SegmentInfo segInfo1; - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId1, lpid1, cpid1)); - segInfo1.chunkvec.push_back( - ChunkIDInfo(chunkId2, lpid2, cpid2)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId1, lpid1, cpid1)); + segInfo1.chunkvec.push_back(ChunkIDInfo(chunkId2, lpid2, cpid2)); LogicPoolID lpid3 = 3; CopysetID cpid3 = 3; @@ -3304,29 +2843,22 @@ TEST_F(TestSnapshotCoreImpl, ChunkID chunkId4 = 4; SegmentInfo segInfo2; - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId3, lpid3, cpid3)); - segInfo2.chunkvec.push_back( - ChunkIDInfo(chunkId4, lpid4, cpid4)); - - EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, - user, - seqNum, - _, - _)) + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId3, lpid3, cpid3)); + segInfo2.chunkvec.push_back(ChunkIDInfo(chunkId4, lpid4, cpid4)); + + EXPECT_CALL(*client_, GetSnapshotSegmentInfo(fileName, user, seqNum, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<4>(segInfo1), - Return(LIBCURVE_ERROR::OK))) - .WillOnce(DoAll(SetArgPointee<4>(segInfo2), - Return(LIBCURVE_ERROR::OK))); + .WillOnce(DoAll(SetArgPointee<4>(segInfo1), Return(LIBCURVE_ERROR::OK))) + .WillOnce( + DoAll(SetArgPointee<4>(segInfo2), Return(LIBCURVE_ERROR::OK))); uint64_t chunkSn = 100; ChunkInfoDetail chunkInfo; chunkInfo.chunkSn.push_back(chunkSn); EXPECT_CALL(*client_, GetChunkInfo(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Return(kErrCodeSuccess)); @@ -3344,60 +2876,50 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*metaStore_, GetSnapshotList(fileName, _)) .Times(2) - .WillRepeatedly(DoAll( - SetArgPointee<1>(snapInfos), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfos), Return(kErrCodeSuccess))); ChunkIndexData indexData; indexData.PutChunkDataName(ChunkDataName(fileName, 1, 0)); EXPECT_CALL(*dataStore_, GetChunkIndexData(_, _)) - .WillOnce(DoAll( - SetArgPointee<1>(indexData), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(indexData), Return(kErrCodeSuccess))); EXPECT_CALL(*dataStore_, DataChunkTranferInit(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*client_, ReadChunkSnapshot(_, _, _, _, _, _)) .Times(8) - .WillRepeatedly(DoAll( - Invoke([](ChunkIDInfo cidinfo, - uint64_t seq, - uint64_t offset, - uint64_t len, - char *buf, - SnapCloneClosure* scc){ - scc->SetRetCode(LIBCURVE_ERROR::OK); - scc->Run(); - }), - Return(LIBCURVE_ERROR::OK))); + .WillRepeatedly( + DoAll(Invoke([](ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, + uint64_t len, char* buf, SnapCloneClosure* scc) { + scc->SetRetCode(LIBCURVE_ERROR::OK); + scc->Run(); + }), + Return(LIBCURVE_ERROR::OK))); EXPECT_CALL(*dataStore_, DataChunkTranferAddPart(_, _, _, _, _)) .Times(8) .WillRepeatedly(Return(kErrCodeSuccess)); - EXPECT_CALL(*dataStore_, DataChunkTranferComplete(_, _)) .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) - .WillOnce(Invoke([task](const std::string &filename, - const std::string &user, - uint64_t seq) -> int { - task->Cancel(); - return kErrCodeSuccess; - })) + .WillOnce(Invoke([task](const std::string& filename, + const std::string& user, uint64_t seq) -> int { + task->Cancel(); + return kErrCodeSuccess; + })) .WillOnce(Return(LIBCURVE_ERROR::OK)); EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3420,4 +2942,3 @@ TEST_F(TestSnapshotCoreImpl, } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..0af03c9315 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" - -#include "test/snapshotcloneserver/mock_snapshot_server.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/snapshotcloneserver/mock_snapshot_server.h" using curve::common::CountDownEvent; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Property; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,21 +50,16 @@ class TestSnapshotServiceManager : public ::testing::Test { virtual void SetUp() { serverOption_.snapshotPoolThreadNum = 8; serverOption_.snapshotTaskManagerScanIntervalMs = 100; - core_ = - std::make_shared(); - auto metaStore_ = - std::shared_ptr(); + core_ = std::make_shared(); + auto metaStore_ = std::shared_ptr(); snapshotMetric_ = std::make_shared(metaStore_); - std::shared_ptr - taskMgr_ = + std::shared_ptr taskMgr_ = std::make_shared(core_, snapshotMetric_); manager_ = std::make_shared(taskMgr_, core_); - ASSERT_EQ(0, manager_->Init(serverOption_)) - << "manager init fail."; - ASSERT_EQ(0, manager_->Start()) - << "manager start fail."; + ASSERT_EQ(0, manager_->Init(serverOption_)) << "manager init fail."; + ASSERT_EQ(0, manager_->Start()) << "manager start fail."; } virtual void TearDown() { @@ -75,31 +69,22 @@ class TestSnapshotServiceManager : public ::testing::Test { snapshotMetric_ = nullptr; } - void PrepareCreateSnapshot( - const std::string &file, - const std::string &user, - const std::string &desc, - UUID uuid) { + void PrepareCreateSnapshot(const std::string& file, const std::string& user, + const std::string& desc, UUID uuid) { SnapshotInfo info(uuid, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); @@ -112,8 +97,7 @@ class TestSnapshotServiceManager : public ::testing::Test { SnapshotCloneServerOptions serverOption_; }; -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -122,32 +106,25 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -155,8 +132,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPreFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -165,21 +141,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeInternalError))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccessByTaskExist) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccessByTaskExist) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -188,20 +156,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeTaskExist))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeTaskExist))); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPushTaskFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPushTaskFail) { const std::string file1 = "file1"; const std::string user1 = "user1"; const std::string desc1 = "snap1"; @@ -209,33 +170,21 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuid1, user1, file1, desc1); EXPECT_CALL(*core_, CreateSnapshotPre(file1, user1, desc1, _)) - .WillRepeatedly(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([] (std::shared_ptr task) { - })); + .WillOnce(Invoke([](std::shared_ptr task) {})); UUID uuid; - int ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid); + int ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); UUID uuid2; - ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid2); + ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid2); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -243,8 +192,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotMultiThreadSuccess) { const std::string file1 = "file1"; const std::string file2 = "file2"; const std::string file3 = "file3"; @@ -264,15 +212,9 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); std::condition_variable cv; std::mutex m; @@ -281,43 +223,28 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cv, &m, &count] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - std::unique_lock lk(m); - count++; - task->Finish(); - cv.notify_all(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cv, &m, &count](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + std::unique_lock lk(m); + count++; + task->Finish(); + cv.notify_all(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file2, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file2, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file3, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file3, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - cv.wait(lk, [&count](){return count == 3;}); + cv.wait(lk, [&count]() { return count == 3; }); - - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); ASSERT_EQ(3, snapshotMetric_->snapshotSucceed.get_value()); @@ -325,7 +252,7 @@ TEST_F(TestSnapshotServiceManager, } TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSameFileSuccess) { + TestCreateSnapshotMultiThreadSameFileSuccess) { const std::string file1 = "file1"; const std::string user = "user1"; const std::string desc1 = "snap1"; @@ -343,52 +270,32 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); CountDownEvent cond1(3); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cond1] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -408,19 +315,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -428,7 +334,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -438,30 +344,23 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] ( - std::shared_ptr task) { - LOG(INFO) << "in HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - break; - } - } - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + LOG(INFO) << "in HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + break; + } + } + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); @@ -496,19 +395,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -516,8 +414,6 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - - TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; @@ -543,10 +439,10 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - cond1.Signal(); - })); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -555,9 +451,8 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -579,19 +474,18 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -599,7 +493,6 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -610,29 +503,22 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -659,8 +545,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -688,8 +573,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -702,8 +586,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeInternalError))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -722,8 +606,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail2) { snapInfo.push_back(snap1); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -740,29 +624,22 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -789,8 +666,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // empty filter SnapshotFilterCondition filter; @@ -826,14 +702,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter uuid SnapshotFilterCondition filter2; @@ -852,14 +726,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::pending, s.GetStatus()); ASSERT_EQ(progress, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by filename SnapshotFilterCondition filter3; @@ -890,14 +762,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by status SnapshotFilterCondition filter4; @@ -923,14 +793,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by user SnapshotFilterCondition filter5; @@ -949,8 +817,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -964,8 +831,8 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeInternalError))); SnapshotFilterCondition filter; std::vector fileSnapInfo; @@ -993,32 +860,30 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskSuccess) { list.push_back(snap2); list.push_back(snap3); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeSuccess))); CountDownEvent cond1(2); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -1041,15 +906,13 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskFail) { list.push_back(snap1); list.push_back(snap2); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeInternalError))); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1062,31 +925,27 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info2(uuidOut2, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - LOG(INFO) << "in mock HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - cond1.Signal(); - break; - } - } - task->Finish(); - cond2.Signal(); - })); - - // 取消排队的快照会调一次 + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + LOG(INFO) << "in mock HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + cond1.Signal(); + break; + } + } + task->Finish(); + cond2.Signal(); + })); + + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1099,32 +958,20 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCancelScheduledSnapshotTask(_)) .WillOnce(Invoke(callback)); - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 - ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid2); + // Take another snapshot to cover the queuing situation + ret = manager_->CreateSnapshot(file, user, desc, &uuid2); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 - ret = manager_->CancelSnapshot(uuidOut2, - user, - file); + // Cancel queued snapshots first + ret = manager_->CancelSnapshot(uuidOut2, user, file); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CancelSnapshot(uuidOut, - user, - file); + ret = manager_->CancelSnapshot(uuidOut, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -1132,8 +979,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffUser) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffUser) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1142,41 +988,32 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string user2 = "user2"; - ret = manager_->CancelSnapshot(uuidOut, - user2, - file); + ret = manager_->CancelSnapshot(uuidOut, user2, file); cond2.Signal(); ASSERT_EQ(kErrCodeInvalidUser, ret); cond1.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffFile) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffFile) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1185,40 +1022,30 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string file2 = "file2"; - ret = manager_->CancelSnapshot(uuidOut, - user, - file2); + ret = manager_->CancelSnapshot(uuidOut, user, file2); cond2.Signal(); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); cond1.Wait(); } - } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..b88d1fab08 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -20,14 +20,15 @@ * Author: charisu */ -#include #include "src/tools/chunkserver_client.h" -#include "test/client/fake/mockMDS.h" + +#include + #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" -using curve::chunkserver::GetChunkInfoResponse; using curve::chunkserver::CHUNK_OP_STATUS; - +using curve::chunkserver::GetChunkInfoResponse; DECLARE_string(chunkserver_list); namespace brpc { @@ -46,9 +47,7 @@ class ChunkServerClientTest : public ::testing::Test { fakemds.Initialize(); fakemds.CreateFakeChunkservers(false); } - void TearDown() { - fakemds.UnInitialize(); - } + void TearDown() { fakemds.UnInitialize(); } ChunkServerClient client; FakeMDS fakemds; }; @@ -59,37 +58,36 @@ TEST_F(ChunkServerClientTest, Init) { } TEST_F(ChunkServerClientTest, GetRaftStatus) { - std::vector statServices = - fakemds.GetRaftStateService(); - // 正常情况 + std::vector statServices = + fakemds.GetRaftStateService(); + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkInfoResponse()); + std::unique_ptr response(new GetChunkInfoResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -98,23 +96,23 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { auto copysetServices = fakemds.GetCreateCopysetService(); CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address("127.0.0.1:9191"); request.set_logicpoolid(1); request.set_copysetid(1001); request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -122,27 +120,26 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { } TEST_F(ChunkServerClientTest, GetChunkHash) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkHashResponse()); + std::unique_ptr response(new GetChunkHashResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_hash("1234"); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); } diff --git a/test/tools/config/data_check.conf b/test/tools/config/data_check.conf index 7380f75bd5..0f93452c72 100644 --- a/test/tools/config/data_check.conf +++ b/test/tools/config/data_check.conf @@ -15,131 +15,131 @@ # # -# mds一侧配置信息 +# MDS side configuration information # -# mds的地址信息 +# Address information of mds mds.listen.addr=127.0.0.1:9160 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的超时时间 +# Time out for communication with mds mds.rpcTimeoutMS=1000 -# 与mds通信最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout time for communication with MDS, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 在当前mds上连续重试次数超过该限制就切换 +# Switch if the number of consecutive retries on the current mds exceeds this limit mds.maxFailedTimesBeforeChangeMDS=5 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=1000 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=3 -# getleader接口每次重试之前需要先睡眠一段时间 +# The getleader interface needs to sleep for a period of time before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=4096 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of Execution Threads for the Queue +# The task of execution threads is to retrieve IO and then send it over the network before moving on to the next network task. +# The time it takes for a task to be retrieved from the queue and the RPC request to be sent typically ranges from 20 microseconds (20us) to 100 microseconds (100us). +# The lower end of this range, 20us, is under normal conditions when leader acquisition is not required during transmission. If leader acquisition is necessary during transmission, the time may extend to around 100us. +# The throughput of a single thread ranges from 100,000 (10w) to 500,000 (50w) tasks per second. This performance level meets the requirements. schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=500000 -# 任务队列线程池大小, 默认值为1个线程 +# Task queue thread pool size, default value is 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=50000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 下发IO最大的分片KB +# Maximum sharding KB for issuing IO global.fileIOSplitMaxSizeKB=4 -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=2048 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.opRetryIntervalUS=100000 metacache.getLeaderBackupRequestMS=100 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=./runlog/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/test/tools/copyset_check_core_test.cpp b/test/tools/copyset_check_core_test.cpp index 9ef6de55ce..ef085e2548 100644 --- a/test/tools/copyset_check_core_test.cpp +++ b/test/tools/copyset_check_core_test.cpp @@ -20,20 +20,22 @@ * Author: charisu */ -#include #include "src/tools/copyset_check_core.h" -#include "test/tools/mock/mock_mds_client.h" + +#include + #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_mds_client.h" -using ::testing::_; -using ::testing::Return; -using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::An; using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::CopySetServerInfo; using curve::mds::topology::DiskState; using curve::mds::topology::OnlineState; -using curve::mds::topology::CopySetServerInfo; +using ::testing::_; +using ::testing::An; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; DECLARE_uint64(operatorMaxPeriod); DECLARE_bool(checkOperator); @@ -69,9 +71,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -93,7 +95,7 @@ class CopysetCheckCoreTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *serverInfo) { + void GetServerInfoForTest(curve::mds::topology::ServerInfo* serverInfo) { serverInfo->set_serverid(1); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -108,24 +110,24 @@ class CopysetCheckCoreTest : public ::testing::Test { } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - const std::string& state = "FOLLOWER", - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool minOffline = false, - bool majOffline = false) { + const std::string& state = "FOLLOWER", + bool noLeader = false, bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool minOffline = false, + bool majOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (minOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else if (majOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 127.0.0.1:9195:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9194:0 " + "127.0.0.1:9195:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -135,7 +137,9 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (state == "LEADER") { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -150,11 +154,15 @@ class CopysetCheckCoreTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else if (state == "FOLLOWER") { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } else { os << "state: " << state << "\r\n"; @@ -176,7 +184,7 @@ TEST_F(CopysetCheckCoreTest, Init) { ASSERT_EQ(-1, copysetCheck.Init("127.0.0.1:6666")); } -// CheckOneCopyset正常情况 +// CheckOneCopyset normal situation TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { std::vector csLocs; butil::IOBuf followerBuf; @@ -191,17 +199,12 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kHealthy, copysetCheck.CheckOneCopyset(1, 100)); butil::IOBuf iobuf; @@ -215,7 +218,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { ASSERT_EQ(iobuf.to_string(), copysetCheck.GetCopysetDetail()); } -// CheckOneCopyset异常情况 +// CheckOneCopyset Exception TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { std::vector csLocs; butil::IOBuf followerBuf; @@ -231,52 +234,45 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { copyset.set_logicalpoolid(1); copyset.set_copysetid(100); - // 1、GetChunkServerListInCopySet失败 + // 1. GetChunkServerListInCopySet failed EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck1.CheckOneCopyset(1, 100)); - // 2、copyset不健康 + // 2. Copyset is unhealthy GetIoBufForTest(&followerBuf, "4294967396", "FOLLOWER", true); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck2.CheckOneCopyset(1, 100)); - // 3、有peer不在线,一个是chunkserver不在线,一个是copyset不在线 + // 3. Some peers are not online, one is chunkserver, and the other is + // copyset GetIoBufForTest(&followerBuf, "4294967397"); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(4) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kMajorityPeerNotOnline, - copysetCheck3.CheckOneCopyset(1, 100)); + copysetCheck3.CheckOneCopyset(1, 100)); } - -// CheckCopysetsOnChunkserver正常情况 +// CheckCopysetsOnChunkserver normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -297,63 +293,52 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { csServerInfos.emplace_back(csServerInfo); } - // mds返回Chunkserver retired的情况,直接返回0 + // Mds returns the case of Chunkserver retired, directly returning 0 GetCsInfoForTest(&csInfo, csId, false, "LEADER"); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); expectedRes[kTotal].insert(gId); - // 通过id查询,有一个copyset配置组中没有当前chunkserver,应忽略 + // Through ID query, there is a copyset configuration group that does not + // have the current chunkserver and should be ignored GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(4) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), - Return(0))) - .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf1), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(leaderBuf), Return(0))) + .WillRepeatedly(DoAll(SetArgPointee<0>(followerBuf1), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 通过地址查询 + // Search through address EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck3.CheckCopysetsOnChunkServer(csAddr)); ASSERT_DOUBLE_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); } -// CheckCopysetsOnChunkserver异常情况 +// CheckCopysetsOnChunkserver Exception TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -376,7 +361,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetIoBufForTest(&followerBuf2, gId, "FOLLOWER", true); std::map> expectedRes; - // 1、GetChunkServerInfo失败的情况 + // 1. The situation of GetChunkServerInfo failur CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -385,7 +370,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、chunkserver发送RPC失败的情况 + // 2. The situation where chunkserver fails to send RPC std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -398,53 +383,43 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(10) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf), - Return(0))) + .WillOnce(DoAll(SetArgPointee<0>(followerBuf), Return(0))) .WillRepeatedly(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnChunkServer(csId)); ASSERT_DOUBLE_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::set expectedExcepCs = {csAddr, "127.0.0.1:9493", - "127.0.0.1:9394", "127.0.0.1:9496", - "127.0.0.1:9293", "127.0.0.1:9396", - "127.0.0.1:9499"}; + std::set expectedExcepCs = { + csAddr, "127.0.0.1:9493", "127.0.0.1:9394", "127.0.0.1:9496", + "127.0.0.1:9293", "127.0.0.1:9396", "127.0.0.1:9499"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); std::set expectedCopysetExcepCs = {"127.0.0.1:9292"}; ASSERT_EQ(expectedCopysetExcepCs, - copysetCheck2.GetCopysetLoadExceptionChunkServer()); + copysetCheck2.GetCopysetLoadExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); expectedRes.clear(); - // 3、获取chunkserver上的copyset失败的情况 + // 3. Failure in obtaining copyset on chunkserver GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) .WillOnce(Return(-1)); @@ -455,22 +430,16 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck3.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // 4、获取copyset对应的chunkserver列表失败的情况 + // 4. Failure in obtaining the chunkserver list corresponding to the copyset GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*csClient_, GetRaftStatus(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*csClient_, GetRaftStatus(_)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -480,18 +449,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck4.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 检查copyset是否在配置组中时出错 + // Error checking if copyset is in configuration group EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(followerBuf2), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -499,10 +464,12 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(-1, copysetCheck5.CheckCopysetsOnChunkServer(csAddr)); } -// chunkserver上copyset不健康的情况 -// 检查单个server和集群都是复用的CheckCopysetsOnChunkserver -// 所以CheckCopysetsOnChunkserver要测每个不健康的情况,其他的只要测健康和不健康还有不在线的情况就好 -// 具体什么原因不健康不用关心 +// Unhealthy copyset on chunkserver +// Check that both individual servers and clusters are reusable +// CheckCopysetsOnChunkservers So CheckCopysetsOnChunkserver needs to test every +// unhealthy situation, and the rest just needs to test for healthy, unhealthy, +// and offline situations What are the specific reasons for being unhealthy? +// Don't worry TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ChunkServerIdType csId = 1; std::string csAddr1 = "127.0.0.1:9194"; @@ -516,110 +483,107 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { uint64_t gId = 4294967396; std::string groupId; - // 1、首先加入9个健康的copyset + // 1. First, add 9 healthy copysets for (int i = 0; i < 9; ++i) { groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; } - // 2、加入没有leader的copyset + // 2. Add a copyset without a leader groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 3、加入正在安装快照的copyset + // 3. Add a copyset that is currently installing snapshots groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kInstallingSnapshot].emplace(groupId); os << temp << "\r\n"; - // 4、加入peer不足的copyset + // 4. Add a copyset with insufficient peers groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, - false, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, false, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kPeersNoSufficient].emplace(groupId); os << temp << "\r\n"; - // 5、加入日志差距大的copset + // 5. Add a eclipse with a large log gap groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - true, false, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, true, false, + false); expectedRes[kTotal].emplace(groupId); expectedRes[kLogIndexGapTooBig].emplace(groupId); os << temp << "\r\n"; - // 6、加入无法解析的copyset,这种情况不会发生,发生了表明程序有bug - // 打印错误信息,但不会加入到unhealthy + // 6. Add a copyset that cannot be parsed. This situation will not occur, + // indicating a bug in the program + // Print error message, but it will not be added to unhealthy groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, true, false); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, true, + false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; - // 7.1、加入少数peer不在线的copyset + // 7.1. Add a few copysets where peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, - false, false, true); + GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, false, + true); expectedRes[kTotal].emplace(groupId); expectedRes[kMinorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 7.2、加入大多数peer不在线的copyset + // 7.2. Add copysets where most peers are not online groupId = std::to_string(gId++); - GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, - false, false, false, true); + GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, + false, false, true); expectedRes[kTotal].emplace(groupId); expectedRes[kMajorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 8、加入CANDIDATE状态的copyset + // 8. Add a copyset in the CANDIDATE state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "CANDIDATE"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 9、加入TRANSFERRING状态的copyset + // 9. Add a copyset in the TRANSFERRING state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "TRANSFERRING"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 10、加入ERROR状态的copyset + // 10. Add a copyset in the ERROR state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "ERROR"); expectedRes[kTotal].emplace(groupId); expectedRes["state ERROR"].emplace(groupId); os << temp << "\r\n"; - // 11、加入SHUTDOWN状态的copyset + // 11. Add a copyset in SHUTDOWN state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "SHUTDOWN"); expectedRes[kTotal].emplace(groupId); expectedRes["state SHUTDOWN"].emplace(groupId); os << temp; - // 设置mock对象的返回,8个正常iobuf里面,设置一个的peer不在线,因此unhealthy++ + // Set the return of mock objects. Among the 8 normal iobufs, one peer is + // set to be offline, resulting in unhealthy++ os.move_to(iobuf); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(csInfo), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*csClient_, Init(csAddr1)) - .WillOnce(Return(-1)); - EXPECT_CALL(*csClient_, Init(csAddr2)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<1>(csInfo), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(csAddr1)).WillOnce(Return(-1)); + EXPECT_CALL(*csClient_, Init(csAddr2)).WillOnce(Return(-1)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); std::vector csServerInfos; CopySetServerInfo csServerInfo; GetCsServerInfoForTest(&csServerInfo, 1); @@ -629,10 +593,9 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { csServerInfos.emplace_back(csServerInfo); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); - // 检查结果 + // Inspection results std::set expectedExcepCs = {csAddr1, csAddr2}; CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck.CheckCopysetsOnChunkServer(csId)); @@ -641,7 +604,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ASSERT_EQ(expectedExcepCs, copysetCheck.GetServiceExceptionChunkServer()); } -// CheckCopysetsOnServer正常情况 +// CheckCopysetsOnServer normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ServerIdType serverId = 1; std::string serverIp = "127.0.0.1"; @@ -656,21 +619,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { std::string groupId; groupId = std::to_string(gId++); expectedRes[kTotal].emplace(groupId); - GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, - false, false, false); + GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, false, + false, false); - // 通过id查询 + // Query by ID EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsOnServer(serverId, &unhealthyCs)); @@ -678,19 +637,15 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 通过ip查询 + // Query through IP EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 通过ip查询 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Query through IP CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnServer(serverIp, &unhealthyCs)); ASSERT_EQ(0, unhealthyCs.size()); @@ -698,7 +653,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); } -// CheckCopysetsOnServer异常情况 +// CheckCopysetsOnServer Exceptio TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ServerIdType serverId = 1; butil::IOBuf iobuf; @@ -721,7 +676,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { gIds.emplace(std::to_string(gId)); } - // 1、ListChunkServersOnServer失败的情况 + // 1. Situation of ListChunkServersOnServer failure EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) .WillOnce(Return(-1)); @@ -730,7 +685,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 3、一个chunkserver访问失败,一个chunkserver不健康的情况 + // 3. A chunkserver access failure and an unhealthy chunkserver situation GetIoBufForTest(&iobuf, groupId, "LEADER", false, true); expectedRes[kTotal] = gIds; expectedRes[kTotal].emplace(groupId); @@ -738,21 +693,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { expectedRes[kMinorityPeerNotOnline] = gIds; EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - EXPECT_CALL(*mdsClient_, - GetCopySetsInChunkServer("127.0.0.1:9191", _)) + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer("127.0.0.1:9191", _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(copysets), Return(0))); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -760,15 +711,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsOnServer(serverId, &unhealthyCs)); ASSERT_EQ(1, copysetCheck2.GetCopysetStatistics().unhealthyRatio); - std::vector unhealthyCsExpected = - {"127.0.0.1:9191", "127.0.0.1:9192"}; + std::vector unhealthyCsExpected = {"127.0.0.1:9191", + "127.0.0.1:9192"}; ASSERT_EQ(unhealthyCsExpected, unhealthyCs); - std::set expectedExcepCs = - {"127.0.0.1:9191"}; + std::set expectedExcepCs = {"127.0.0.1:9191"}; ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); } -// CheckCopysetsInCluster正常情况 +// CheckCopysetsInCluster normal situation TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", "LEADER"); @@ -783,23 +733,17 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); std::vector copysetsInMds; CopysetInfo copyset; copyset.set_logicalpoolid(1); @@ -807,8 +751,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck1.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); @@ -826,7 +769,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { GetCsInfoForTest(&chunkserver, 1); std::vector chunkservers = {chunkserver}; - // 1、ListServersInCluster失败 + // 1. ListServersInCluster failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -835,89 +778,75 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、CheckCopysetsOnServer返回不为0 + // 2. CheckCopysetsOnServer returned a non zero value EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(1) .WillOnce(Return(-1)); std::vector copysetsInMds; EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck2.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); expectedRes[kTotal] = {}; ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 3、GetMetric失败 + // 3. GetMetric failed expectedRes[kTotal] = {"4294967396"}; EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(6).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(2) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10), Return(0))); CopysetInfo copyset; copyset.set_logicalpoolid(1); copyset.set_copysetid(100); copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); - // 获取operator失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); + // Failed to obtain operator CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck3.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // operator数量大于0 + // The number of operators is greater than 0 CopysetCheckCore copysetCheck4(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 4、比较chunkserver跟mds的copyset失败 + // 4. Failed to compare the copyset between chunkserver and mds EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(servers), Return(0))); EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(1, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(9) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(chunkservers), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(9).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetRaftStatus(_)) .Times(9) - .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), - Return(0))); - // 从获取copyset失败 + .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); + // Failed to obtain copyset from EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量不一致 + // Inconsistent number of copysets copysetsInMds.clear(); copyset.set_logicalpoolid(1); copyset.set_copysetid(101); @@ -926,16 +855,14 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { copysetsInMds.emplace_back(copyset); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量一致,但是内容不一致 + // The number of copysets is consistent, but the content is inconsistent copysetsInMds.pop_back(); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); } @@ -944,21 +871,17 @@ TEST_F(CopysetCheckCoreTest, CheckOperator) { CopysetCheckCore copysetCheck(mdsClient_, csClient_); std::string opName = "change_peer"; uint64_t checkTime = 3; - // 1、获取metric失败 - EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 1. Failed to obtain metric + EXPECT_CALL(*mdsClient_, GetMetric(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.CheckOperator(opName, checkTime)); - // 2、operator数量不为0 + // 2. The number of operators is not 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10), Return(0))); ASSERT_EQ(10, copysetCheck.CheckOperator(opName, checkTime)); - // 3、operator数量为0 + // 3. The number of operators is 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(0), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); ASSERT_EQ(0, copysetCheck.CheckOperator(opName, checkTime)); } @@ -969,11 +892,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { GetCsInfoForTest(&chunkserver, 1); chunkservers.emplace_back(chunkserver); } - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*csClient_, Init(_)) .Times(12) .WillOnce(Return(0)) @@ -988,11 +910,10 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { copyset.set_copysetid(100 + i); copysets.emplace_back(copyset); } - EXPECT_CALL(*mdsClient_, GetCopySetsInChunkServer( - An(), _)) + EXPECT_CALL(*mdsClient_, + GetCopySetsInChunkServer(An(), _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(copysets), Return(0))); std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -1001,16 +922,14 @@ TEST_F(CopysetCheckCoreTest, ListMayBrokenVolumes) { } EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySets(_, _, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); std::vector fileNames = {"file1", "file2"}; std::vector fileNames2; CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, ListVolumesOnCopyset(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(fileNames), Return(0))); ASSERT_EQ(0, copysetCheck1.ListMayBrokenVolumes(&fileNames2)); ASSERT_EQ(fileNames, fileNames2); } diff --git a/test/tools/copyset_check_test.cpp b/test/tools/copyset_check_test.cpp index 01c7e3f4c2..2e034b6d27 100644 --- a/test/tools/copyset_check_test.cpp +++ b/test/tools/copyset_check_test.cpp @@ -20,15 +20,17 @@ * Author: charisu */ -#include #include "src/tools/copyset_check.h" + +#include + #include "src/tools/copyset_check_core.h" #include "test/tools/mock/mock_copyset_check_core.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; DECLARE_bool(detail); @@ -55,26 +57,23 @@ class CopysetCheckTest : public ::testing::Test { core_ = std::make_shared(); FLAGS_detail = true; } - void TearDown() { - core_ = nullptr; - } + void TearDown() { core_ = nullptr; } void GetIoBufForTest(butil::IOBuf* buf, const std::string& gId, - bool isLeader = false, - bool noLeader = false, - bool installingSnapshot = false, - bool peersLess = false, - bool gapBig = false, - bool parseErr = false, - bool peerOffline = false) { + bool isLeader = false, bool noLeader = false, + bool installingSnapshot = false, + bool peersLess = false, bool gapBig = false, + bool parseErr = false, bool peerOffline = false) { butil::IOBufBuilder os; - os << "[" << gId << "]\r\n"; + os << "[" << gId << "]\r\n"; if (peersLess) { os << "peers: \r\n"; } else if (peerOffline) { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9194:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9194:0\r\n"; // NOLINT } else { - os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 127.0.0.1:9193:0\r\n"; // NOLINT + os << "peers: 127.0.0.1:9191:0 127.0.0.1:9192:0 " + "127.0.0.1:9193:0\r\n"; // NOLINT } os << "storage: [2581, 2580]\n"; if (parseErr) { @@ -84,7 +83,9 @@ class CopysetCheckTest : public ::testing::Test { } os << "state_machine: Idle\r\n"; if (isLeader) { - os << "state: " << "LEADER" << "\r\n"; + os << "state: " + << "LEADER" + << "\r\n"; os << "replicator_123: next_index="; if (gapBig) { os << "1000"; @@ -99,26 +100,31 @@ class CopysetCheckTest : public ::testing::Test { } os << "hc=4211759 ac=1089 ic=0\r\n"; } else { - os << "state: " << "FOLLOWER" << "\r\n"; + os << "state: " + << "FOLLOWER" + << "\r\n"; if (noLeader) { - os << "leader: " << "0.0.0.0:0:0\r\n"; + os << "leader: " + << "0.0.0.0:0:0\r\n"; } else { - os << "leader: " << "127.0.0.1:9192:0\r\n"; + os << "leader: " + << "127.0.0.1:9192:0\r\n"; } } os.move_to(*buf); } - std::map> res1 = - {{"total", {"4294967396", "4294967397"}}}; - std::map> res2 = - {{"total", {"4294967396", "4294967397", "4294967398", - "4294967399", "4294967400", "4294967401"}}, - {"installing snapshot", {"4294967397"}}, - {"no leader", {"4294967398"}}, - {"index gap too big", {"4294967399"}}, - {"peers not sufficient", {"4294967400"}}, - {"peer not online", {"4294967401"}}}; + std::map> res1 = { + {"total", {"4294967396", "4294967397"}}}; + std::map> res2 = { + {"total", + {"4294967396", "4294967397", "4294967398", "4294967399", "4294967400", + "4294967401"}}, + {"installing snapshot", {"4294967397"}}, + {"no leader", {"4294967398"}}, + {"index gap too big", {"4294967399"}}, + {"peers not sufficient", {"4294967400"}}, + {"peer not online", {"4294967401"}}}; std::set serviceExcepCs = {"127.0.0.1:9092"}; std::set copysetExcepCs = {"127.0.0.1:9093"}; std::set emptySet; @@ -143,29 +149,25 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { copysetCheck.PrintHelp("check-copyset"); butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", true); - std::vector peersInCopyset = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; + std::vector peersInCopyset = { + "127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; std::string copysetDetail = iobuf.to_string(); - // Init失败的情况 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // The situation of Init failure + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 不支持的命令 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Unsupported command ASSERT_EQ(-1, copysetCheck.RunCommand("check-nothings")); copysetCheck.PrintHelp("check-nothins"); - // 没有指定逻辑池和copyset的话返回失败 + // If no logical pool and copyset are specified, a failure is returne ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); FLAGS_logicalPoolId = 1; FLAGS_copysetId = 100; copysetCheck.PrintHelp("check-copyset"); - // 健康的情况 + // Healthy situation EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kHealthy)); @@ -180,7 +182,7 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-copyset")); - // copyset不健康的情况 + // The unhealthy situation of copyset EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kLogIndexGapTooBig)); @@ -199,15 +201,13 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { TEST_F(CopysetCheckTest, testCheckChunkServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-chunkserver"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - // 没有指定chunkserver的话报错 + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); + // Error reported if chunkserver is not specified ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); copysetCheck.PrintHelp("check-chunkserver"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_chunkserverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverId)) .Times(1) @@ -225,11 +225,11 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // id和地址同时指定,报错 + // Error reported when both ID and address are specified simultaneously FLAGS_chunkserverAddr = "127.0.0.1:8200"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); FLAGS_chunkserverId = 0; - // 通过地址查询 + // Search through address EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(0)); @@ -247,7 +247,7 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(-1)); @@ -269,23 +269,20 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { TEST_F(CopysetCheckTest, testCheckServer) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-server"); - std::vector chunkservers = - {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + std::vector chunkservers = {"127.0.0.1:9091", "127.0.0.1:9092", + "127.0.0.1:9093"}; + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 没有指定server的话报错 + // If no server is specified, an error will be reported ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); copysetCheck.PrintHelp("check-server"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_serverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverId, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -299,15 +296,14 @@ TEST_F(CopysetCheckTest, testCheckServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // id和ip同时指定,报错 + // Error reported when both ID and IP are specified simultaneously FLAGS_serverIp = "127.0.0.1"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); FLAGS_serverId = 0; - // 通过ip查询 + // Query through IP EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(chunkservers), Return(0))); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -322,7 +318,7 @@ TEST_F(CopysetCheckTest, testCheckServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) .WillOnce(Return(-1)); @@ -344,14 +340,10 @@ TEST_F(CopysetCheckTest, testCheckServer) { TEST_F(CopysetCheckTest, testCheckCluster) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("copysets-status"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(0)); + // Healthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(0)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res1)); @@ -366,10 +358,8 @@ TEST_F(CopysetCheckTest, testCheckCluster) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand(kCopysetsStatusCmd)); - // 不健康的情况 - EXPECT_CALL(*core_, CheckCopysetsInCluster()) - .Times(1) - .WillOnce(Return(-1)); + // Unhealthy situation + EXPECT_CALL(*core_, CheckCopysetsInCluster()).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*core_, GetCopysetsRes()) .Times(1) .WillRepeatedly(ReturnRef(res2)); @@ -388,14 +378,12 @@ TEST_F(CopysetCheckTest, testCheckCluster) { TEST_F(CopysetCheckTest, testCheckOperator) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp("check-operator"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、不支持的operator + // 1. Unsupported operator FLAGS_opName = "no_operator"; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、transfer leader的operator和total的 + // 2. The operator and total of the transfer leader EXPECT_CALL(*core_, CheckOperator(_, FLAGS_leaderOpInterval)) .Times(2) .WillOnce(Return(0)) @@ -404,7 +392,7 @@ TEST_F(CopysetCheckTest, testCheckOperator) { ASSERT_EQ(0, copysetCheck.RunCommand(kCheckOperatorCmd)); FLAGS_opName = kTotalOpName; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、其他operator + // 2. Other operators EXPECT_CALL(*core_, CheckOperator(_, FLAGS_opIntervalExceptLeader)) .Times(3) .WillOnce(Return(10)) @@ -420,15 +408,11 @@ TEST_F(CopysetCheckTest, testCheckOperator) { TEST_F(CopysetCheckTest, PrintMayBrokenVolumes) { CopysetCheck copysetCheck(core_); copysetCheck.PrintHelp(kListMayBrokenVolumes); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); // fail - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(-1)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.RunCommand(kListMayBrokenVolumes)); - EXPECT_CALL(*core_, ListMayBrokenVolumes(_)) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, ListMayBrokenVolumes(_)).WillOnce(Return(0)); ASSERT_EQ(0, copysetCheck.RunCommand(kListMayBrokenVolumes)); } diff --git a/test/tools/curve_cli_test.cpp b/test/tools/curve_cli_test.cpp index 133d9de42d..0ad6d9cae8 100644 --- a/test/tools/curve_cli_test.cpp +++ b/test/tools/curve_cli_test.cpp @@ -20,22 +20,25 @@ * Author: charisu */ -#include -#include +#include "src/tools/curve_cli.h" + #include +#include #include +#include + #include -#include "src/tools/curve_cli.h" + #include "test/tools/mock/mock_cli_service.h" #include "test/tools/mock/mock_copyset_service.h" #include "test/tools/mock/mock_mds_client.h" using ::testing::_; -using ::testing::Return; -using ::testing::Invoke; +using ::testing::An; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::An; DECLARE_int32(timeout_ms); DECLARE_int32(max_retry); @@ -50,10 +53,8 @@ DECLARE_bool(affirm); namespace curve { namespace tool { -template -void callback(RpcController* controller, - const Req* request, - Resp* response, +template +void callback(RpcController* controller, const Req* request, Resp* response, Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -67,7 +68,7 @@ class CurveCliTest : public ::testing::Test { mockCliService = new MockCliService(); mockCopysetService_ = std::make_shared(); ASSERT_EQ(0, server->AddService(mockCliService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(mockCopysetService_.get(), brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); @@ -82,8 +83,8 @@ class CurveCliTest : public ::testing::Test { mockCliService = nullptr; } - brpc::Server *server; - MockCliService *mockCliService; + brpc::Server* server; + MockCliService* mockCliService; std::shared_ptr mockCopysetService_; const std::string conf = "127.0.0.1:9192:0"; const std::string peer = "127.0.0.1:9192:0"; @@ -113,20 +114,20 @@ TEST_F(CurveCliTest, RemovePeer) { curveCli.PrintHelp("remove-peer"); curveCli.PrintHelp("test"); curveCli.RunCommand("test"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -134,32 +135,27 @@ TEST_F(CurveCliTest, RemovePeer) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const RemovePeerRequest2 *request, - RemovePeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const RemovePeerRequest2* request, + RemovePeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("remove-peer")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); // TEST CASES: remove broken copyset after remove peer @@ -181,8 +177,8 @@ TEST_F(CurveCliTest, RemovePeer) { EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(getLeaderResp), - Invoke(getLeaderFunc))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(getLeaderResp), Invoke(getLeaderFunc))); EXPECT_CALL(*mockCliService, RemovePeer(_, _, _, _)) .Times(3) .WillRepeatedly(Invoke(removePeerFunc)); @@ -210,21 +206,21 @@ TEST_F(CurveCliTest, RemovePeer) { TEST_F(CurveCliTest, TransferLeader) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("transfer-leader"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -232,147 +228,132 @@ TEST_F(CurveCliTest, TransferLeader) { response.set_allocated_leader(targetPeer); EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, curveCli.RunCommand("transfer-leader")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) - .WillOnce( - Invoke([](RpcController *controller, - const GetLeaderRequest2 *request, - GetLeaderResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const GetLeaderRequest2* request, + GetLeaderResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); } TEST_F(CurveCliTest, ResetPeer) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("reset-peer"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf为空 + // newConf is empty FLAGS_peer = peer; FLAGS_new_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析newConf失败 + // Failed to parse newConf FLAGS_new_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_new_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf有三个副本 + // newConf has three copies FLAGS_peer = peer; FLAGS_new_conf = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf不包含peer + // newConf does not contain peer FLAGS_new_conf = "127.0.0.1:8201:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 执行变更成功 + // Successfully executed changes FLAGS_new_conf = conf; EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const ResetPeerRequest2* request, + ResetPeerResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("reset-peer")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const ResetPeerRequest2 *request, - ResetPeerResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const ResetPeerRequest2* request, + ResetPeerResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); } TEST_F(CurveCliTest, DoSnapshot) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 解析peer失败 + // Parsing peer failed FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotRequest2* request, + SnapshotResponse2* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotRequest2 *request, - SnapshotResponse2 *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + // Failed to execute changes + EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) + .WillOnce(Invoke([](RpcController* controller, + const SnapshotRequest2* request, + SnapshotResponse2* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); } TEST_F(CurveCliTest, DoSnapshotAll) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot-all"); - // 执行变更成功 + // Successfully executed changes std::vector chunkservers; ChunkServerInfo csInfo; csInfo.set_hostip("127.0.0.1"); csInfo.set_port(9192); chunkservers.emplace_back(csInfo); - EXPECT_CALL(*mdsClient_, Init(_)) + EXPECT_CALL(*mdsClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(2) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - })); + .WillOnce(Invoke( + [](RpcController* controller, const SnapshotAllRequest* request, + SnapshotAllResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot-all")); - // 执行变更失败 - EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) + // Failed to execute changes + EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) - .WillOnce(Invoke([](RpcController *controller, - const SnapshotAllRequest *request, - SnapshotAllResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillOnce(Invoke([](RpcController* controller, + const SnapshotAllRequest* request, + SnapshotAllResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot-all")); } diff --git a/test/tools/curve_meta_tool_test.cpp b/test/tools/curve_meta_tool_test.cpp index 1d493c56f8..a94d54dbb3 100644 --- a/test/tools/curve_meta_tool_test.cpp +++ b/test/tools/curve_meta_tool_test.cpp @@ -20,10 +20,13 @@ * Author: charisu */ +#include "src/tools/curve_meta_tool.h" + #include + #include #include -#include "src/tools/curve_meta_tool.h" + #include "test/fs/mock_local_filesystem.h" namespace curve { @@ -32,8 +35,8 @@ namespace tool { using curve::common::Bitmap; using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -43,12 +46,8 @@ const char chunkFileName[] = "chunk_001"; class CurveMetaToolTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } std::shared_ptr localFs_; }; @@ -65,30 +64,28 @@ TEST_F(CurveMetaToolTest, SupportCommand) { TEST_F(CurveMetaToolTest, PrintChunkMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(6) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(5) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(5).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 3、解析失败 + // 3. Parsing failed char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 4、普通chunk + // 4. Ordinary chunk ChunkFileMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -97,9 +94,9 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); - // 5、克隆chunk + // 5. Clone chunk metaPage.location = "test@s3"; uint32_t size = CHUNK_SIZE / PAGE_SIZE; auto bitmap = std::make_shared(size); @@ -110,36 +107,34 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); } TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(5) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); - EXPECT_CALL(*localFs_, Close(_)) - .Times(4) - .WillRepeatedly(Return(-1)); + EXPECT_CALL(*localFs_, Close(_)).Times(4).WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 3、解析失败 + // 3. Parsing faile char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 4、成功chunk + // 4. Successful Chunk SnapshotMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -153,9 +148,8 @@ TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), - Return(PAGE_SIZE))); + Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("snapshot-meta")); } } // namespace tool } // namespace curve - diff --git a/test/tools/data_consistency_check_test.cpp b/test/tools/data_consistency_check_test.cpp index 15cd238004..c9641ee9b5 100644 --- a/test/tools/data_consistency_check_test.cpp +++ b/test/tools/data_consistency_check_test.cpp @@ -19,20 +19,20 @@ * File Created: Friday, 28th June 2019 2:29:14 pm * Author: tongguangxun */ +#include +#include #include #include -#include -#include #include "src/tools/consistency_check.h" -#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_chunkserver_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" DECLARE_bool(check_hash); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; extern uint32_t segment_size; @@ -41,8 +41,7 @@ extern uint32_t chunk_size; class ConsistencyCheckTest : public ::testing::Test { public: void SetUp() { - nameSpaceTool_ = - std::make_shared(); + nameSpaceTool_ = std::make_shared(); csClient_ = std::make_shared(); } @@ -70,8 +69,7 @@ class ConsistencyCheckTest : public ::testing::Test { } void GetCopysetStatusForTest(CopysetStatusResponse* response, - int64_t applyingIndex = 1111, - bool ok = true) { + int64_t applyingIndex = 1111, bool ok = true) { if (ok) { response->set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } else { @@ -110,51 +108,41 @@ TEST_F(ConsistencyCheckTest, Consistency) { CopysetStatusResponse response; GetCopysetStatusForTest(&response); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(90) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(90).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(60) - .WillRepeatedly(DoAll(SetArgPointee<1>(response), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(30) - .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), - Return(0))); - // 1、检查hash + .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), Return(0))); + // 1. Check hash FLAGS_check_hash = true; curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); cfc1.PrintHelp("check-consistency"); cfc1.PrintHelp("check-nothing"); ASSERT_EQ(0, cfc1.RunCommand("check-consistency")); - // 2、检查applyIndex + // 2. Check the applyIndex FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); ASSERT_EQ(-1, cfc2.RunCommand("check-nothing")); - // mds返回副本为空的情况 + // Mds returns a situation where the replica is empty EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(20) - .WillRepeatedly(DoAll(SetArgPointee<2>( - std::vector()), - Return(0))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(std::vector()), Return(0))); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); FLAGS_check_hash = true; ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); @@ -180,61 +168,45 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { CopysetStatusResponse response3; GetCopysetStatusForTest(&response3, 2222); - // 设置期望 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(3) - .WillRepeatedly(Return(0)); + // Set expectations + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(3).WillRepeatedly(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); - // 1、检查hash,apply index一致,hash不一致 + // 1. Check if the hash and apply index are consistent and the hash is + // inconsistent FLAGS_check_hash = true; - EXPECT_CALL(*csClient_, Init(_)) - .Times(5) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(5).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); EXPECT_CALL(*csClient_, GetChunkHash(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>("2222"), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>("1111"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>("2222"), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>("1111"), Return(0))); curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc1.RunCommand("check-consistency")); - // 2、检查hash的时候apply index不一致 - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + // 2. When checking the hash, the apply index is inconsistent + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc2.RunCommand("check-consistency")); - // 3、检查applyIndex + // 3. Check the applyIndex FLAGS_check_hash = false; - EXPECT_CALL(*csClient_, Init(_)) - .Times(2) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(2).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(2) - .WillOnce(DoAll(SetArgPointee<1>(response1), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(response3), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(response1), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(response3), Return(0))); curve::tool::ConsistencyCheck cfc3(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc3.RunCommand("check-consistency")); } @@ -254,62 +226,47 @@ TEST_F(ConsistencyCheckTest, CheckError) { } FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc(nameSpaceTool_, csClient_); - // 0、Init失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 1、获取segment失败 - EXPECT_CALL(*nameSpaceTool_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Failed to obtain segment + EXPECT_CALL(*nameSpaceTool_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 2、获取chunkserver list失败 + // 2. Failed to obtain chunkserver list EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(4) - .WillRepeatedly(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(segments), Return(0))); EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 3、init 向chunkserverclient init失败 + // 3. Failed to init to chunkserverclient init EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), - Return(0))); - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 4、从chunkserver获取copyset status失败 - EXPECT_CALL(*csClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. Failed to obtain copyset status from chunkserver + EXPECT_CALL(*csClient_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 5、从chunkserver获取chunk hash失败 + // 5. Failed to obtain chunk hash from chunkserver FLAGS_check_hash = true; CopysetStatusResponse response1; GetCopysetStatusForTest(&response1); - EXPECT_CALL(*csClient_, Init(_)) - .Times(4) - .WillRepeatedly(Return(0)); + EXPECT_CALL(*csClient_, Init(_)).Times(4).WillRepeatedly(Return(0)); EXPECT_CALL(*csClient_, GetCopysetStatus(_, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<1>(response1), - Return(0))); - EXPECT_CALL(*csClient_, GetChunkHash(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillRepeatedly(DoAll(SetArgPointee<1>(response1), Return(0))); + EXPECT_CALL(*csClient_, GetChunkHash(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); } diff --git a/test/tools/etcd_client_test.cpp b/test/tools/etcd_client_test.cpp index b6774425bd..0e7d8a9765 100644 --- a/test/tools/etcd_client_test.cpp +++ b/test/tools/etcd_client_test.cpp @@ -20,11 +20,14 @@ * Author: charisu */ +#include "src/tools/etcd_client.h" + #include -#include //NOLINT + #include //NOLINT #include -#include "src/tools/etcd_client.h" +#include //NOLINT + #include "src/common/timeutility.h" class EtcdClientTest : public ::testing::Test { @@ -36,21 +39,23 @@ class EtcdClientTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, + * as it may cause deadlock!!! */ - ASSERT_EQ(0, - execlp("etcd", "etcd", "--listen-client-urls", - "http://127.0.0.1:2366", "--advertise-client-urls", - "http://127.0.0.1:2366", "--listen-peer-urls", - "http://127.0.0.1:2367", - "--initial-advertise-peer-urls", - "http://127.0.0.1:2367", "--initial-cluster", - "toolEtcdClientTest=http://127.0.0.1:2367", - "--name", "toolEtcdClientTest", nullptr)); + ASSERT_EQ( + 0, + execlp("etcd", "etcd", "--listen-client-urls", + "http://127.0.0.1:2366", "--advertise-client-urls", + "http://127.0.0.1:2366", "--listen-peer-urls", + "http://127.0.0.1:2367", "--initial-advertise-peer-urls", + "http://127.0.0.1:2367", "--initial-cluster", + "toolEtcdClientTest=http://127.0.0.1:2367", "--name", + "toolEtcdClientTest", nullptr)); exit(0); } - // 一定时间内尝试check直到etcd完全起来 + // Try checking for a certain period of time until the ETCD is + // completely up curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); bool running; @@ -59,8 +64,8 @@ class EtcdClientTest : public ::testing::Test { 5) { std::vector leaderAddrVec; std::map onlineState; - ASSERT_EQ(0, - client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); + ASSERT_EQ( + 0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); if (onlineState["127.0.0.1:2366"]) { running = true; break; @@ -81,22 +86,22 @@ class EtcdClientTest : public ::testing::Test { TEST_F(EtcdClientTest, GetEtcdClusterStatus) { curve::tool::EtcdClient client; - // Init失败的情况 + // The situation of Init failure ASSERT_EQ(-1, client.Init("")); - // Init成功 + // Init succeeded ASSERT_EQ(0, client.Init(etcdAddr)); std::vector leaderAddrVec; std::map onlineState; - // 正常情况 + // Normal situation ASSERT_EQ(0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); - std::map expected = { { "127.0.0.1:2366", true }, - { "127.0.0.1:2368", false } }; + std::map expected = {{"127.0.0.1:2366", true}, + {"127.0.0.1:2368", false}}; ASSERT_EQ(expected, onlineState); ASSERT_EQ(1, leaderAddrVec.size()); ASSERT_EQ("127.0.0.1:2366", leaderAddrVec[0]); - // 空指针错误 + // Null pointer error ASSERT_EQ(-1, client.GetEtcdClusterStatus(nullptr, &onlineState)); ASSERT_EQ(-1, client.GetEtcdClusterStatus(&leaderAddrVec, nullptr)); } @@ -105,13 +110,13 @@ TEST_F(EtcdClientTest, GetAndCheckEtcdVersion) { curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); - // 正常情况 + // Normal situation std::string version; std::vector failedList; ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 个别etcd获取version失败 + // Individual ETCD failed to obtain version ASSERT_EQ(0, client.Init(etcdAddr)); ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_EQ(1, failedList.size()); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index e261d43895..c89d8d7066 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -20,13 +20,16 @@ * Author: charisu */ -#include +#include "src/tools/mds_client.h" + #include +#include + #include -#include "src/tools/mds_client.h" + #include "test/tools/mock/mock_namespace_service.h" -#include "test/tools/mock/mock_topology_service.h" #include "test/tools/mock/mock_schedule_service.h" +#include "test/tools/mock/mock_topology_service.h" using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; @@ -68,8 +71,8 @@ namespace tool { const char mdsAddr[] = "127.0.0.1:9191,127.0.0.1:9192"; template -void callback(RpcController *controller, const Req *request, Resp *response, - Closure *done) { +void callback(RpcController* controller, const Req* request, Resp* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); } @@ -90,15 +93,15 @@ class ToolMDSClientTest : public ::testing::Test { ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); - // 初始化mds client + // Initialize mds client curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.Init(mdsAddr, "9194,9193")); @@ -116,7 +119,7 @@ class ToolMDSClientTest : public ::testing::Test { scheduleService = nullptr; } - void GetFileInfoForTest(uint64_t id, FileInfo *fileInfo) { + void GetFileInfoForTest(uint64_t id, FileInfo* fileInfo) { fileInfo->set_id(id); fileInfo->set_filename("test"); fileInfo->set_parentid(0); @@ -127,11 +130,11 @@ class ToolMDSClientTest : public ::testing::Test { fileInfo->set_ctime(1573546993000000); } - void GetCopysetInfoForTest(CopySetServerInfo *info, int num, + void GetCopysetInfoForTest(CopySetServerInfo* info, int num, uint32_t copysetId = 1) { info->Clear(); for (int i = 0; i < num; ++i) { - curve::common::ChunkServerLocation *csLoc = info->add_cslocs(); + curve::common::ChunkServerLocation* csLoc = info->add_cslocs(); csLoc->set_chunkserverid(i); csLoc->set_hostip("127.0.0.1"); csLoc->set_port(9191 + i); @@ -139,14 +142,14 @@ class ToolMDSClientTest : public ::testing::Test { info->set_copysetid(copysetId); } - void GetSegmentForTest(PageFileSegment *segment) { + void GetSegmentForTest(PageFileSegment* segment) { segment->set_logicalpoolid(1); segment->set_segmentsize(DefaultSegmentSize); segment->set_chunksize(kChunkSize); segment->set_startoffset(0); } - void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo *pool) { + void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo* pool) { pool->set_physicalpoolid(id); pool->set_physicalpoolname("testPool"); pool->set_desc("physical pool for test"); @@ -155,7 +158,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetLogicalPoolForTest(PoolIdType id, - curve::mds::topology::LogicalPoolInfo *lpInfo) { + curve::mds::topology::LogicalPoolInfo* lpInfo) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); lpInfo->set_physicalpoolid(1); @@ -167,14 +170,14 @@ class ToolMDSClientTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetZoneInfoForTest(ZoneIdType id, ZoneInfo *zoneInfo) { + void GetZoneInfoForTest(ZoneIdType id, ZoneInfo* zoneInfo) { zoneInfo->set_zoneid(1); zoneInfo->set_zonename("testZone"); zoneInfo->set_physicalpoolid(1); zoneInfo->set_physicalpoolname("testPool"); } - void GetServerInfoForTest(ServerIdType id, ServerInfo *serverInfo) { + void GetServerInfoForTest(ServerIdType id, ServerInfo* serverInfo) { serverInfo->set_serverid(id); serverInfo->set_hostname("localhost"); serverInfo->set_internalip("127.0.0.1"); @@ -189,7 +192,7 @@ class ToolMDSClientTest : public ::testing::Test { } void GetChunkServerInfoForTest(ChunkServerIdType id, - ChunkServerInfo *csInfo, + ChunkServerInfo* csInfo, bool retired = false) { csInfo->set_chunkserverid(id); csInfo->set_disktype("ssd"); @@ -206,10 +209,10 @@ class ToolMDSClientTest : public ::testing::Test { csInfo->set_diskcapacity(1024); csInfo->set_diskused(512); } - brpc::Server *server; - curve::mds::MockNameService *nameService; - curve::mds::topology::MockTopologyService *topoService; - curve::mds::schedule::MockScheduleService *scheduleService; + brpc::Server* server; + curve::mds::MockNameService* nameService; + curve::mds::topology::MockTopologyService* topoService; + curve::mds::schedule::MockScheduleService* scheduleService; MDSClient mdsClient; const uint64_t kChunkSize = 16777216; const uint64_t DefaultSegmentSize = 1024 * 1024 * 1024; @@ -220,9 +223,9 @@ TEST(MDSClientInitTest, Init) { ASSERT_EQ(-1, mdsClient.Init("")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1:65536")); - // dummy server非法 + // dummy server is illegal ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "")); - // dummy server与mds不匹配 + // dummy server and mds do not match ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "9091,9092,9093")); } @@ -232,44 +235,44 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { std::string filename = "/test"; curve::mds::FileInfo outFileInfo; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { + .WillRepeatedly(Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 返回码不为OK + // The return code is not O curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 正常情况 - curve::mds::FileInfo *info = new curve::mds::FileInfo; + // Normal situation + curve::mds::FileInfo* info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetFileInfoRequest* request, + curve::mds::GetFileInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetFileInfo(filename, &outFileInfo)); ASSERT_EQ(info->DebugString(), outFileInfo.DebugString()); } @@ -277,33 +280,33 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { TEST_F(ToolMDSClientTest, GetAllocatedSize) { uint64_t allocSize; std::string filename = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, Closure *done) { + [](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 返回码不为OK + // The return code is not OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 正常情况 + // Normal situation response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { response.mutable_allocsizemap()->insert( @@ -313,10 +316,10 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetAllocatedSizeRequest* request, + curve::mds::GetAllocatedSizeResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); AllocMap allocMap; ASSERT_EQ(0, mdsClient.GetAllocatedSize(filename, &allocSize, &allocMap)); ASSERT_EQ(DefaultSegmentSize * 3, allocSize); @@ -330,32 +333,32 @@ TEST_F(ToolMDSClientTest, ListDir) { std::string fileName = "/test"; std::vector fileInfoVec; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 返回码不为OK + // The return code is not OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto fileInfo = response.add_fileinfo(); @@ -364,10 +367,10 @@ TEST_F(ToolMDSClientTest, ListDir) { EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListDirRequest* request, + curve::mds::ListDirResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListDir(fileName, &fileInfoVec)); for (int i = 0; i < 5; i++) { FileInfo expected; @@ -381,70 +384,70 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { curve::mds::PageFileSegment outSegment; uint64_t offset = 0; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // segment不存在 + // segment does not exist curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 文件不存在 + // File does not exist response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kFileNotExists, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 其他错误 + // Other errors response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 正常情况 - PageFileSegment *segment = new PageFileSegment(); + // Normal situation + PageFileSegment* segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(segment); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::GetOrAllocateSegmentRequest* request, + curve::mds::GetOrAllocateSegmentResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOK, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); ASSERT_EQ(segment->DebugString(), outSegment.DebugString()); @@ -453,41 +456,41 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { TEST_F(ToolMDSClientTest, DeleteFile) { std::string fileName = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 返回码不为OK + // The return code is not OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::DeleteFileRequest* request, + curve::mds::DeleteFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.DeleteFile(fileName)); } @@ -505,43 +508,41 @@ TEST_F(ToolMDSClientTest, CreateFile) { context.stripeCount = stripeCount; context.poolset = ""; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, Closure* done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller* cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 返回码不为OK + // The return code is not OK curve::mds::CreateFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const curve::mds::CreateFileRequest* request, + curve::mds::CreateFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.CreateFile(context)); } @@ -553,10 +554,10 @@ TEST_F(ToolMDSClientTest, ExtendVolume_success) { EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ExtendVolume(fileName, length)); } @@ -564,32 +565,32 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); return; - // 返回码不为OK + // The return code is not OK curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ExtendFileRequest* request, + curve::mds::ExtendFileResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); } @@ -598,35 +599,35 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { CopySetIdType copysetId = 100; std::vector csLocs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, Closure *done) { + [](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 返回码不为OK + // The return code is not OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); CopySetServerInfo csInfo; GetCopysetInfoForTest(&csInfo, 3, copysetId); @@ -635,10 +636,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); ASSERT_EQ(csInfo.cslocs_size(), csLocs.size()); @@ -646,7 +647,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); } - // 测试获取多个copyset + // Test obtaining multiple copysets std::vector expected; response.Clear(); response.set_statuscode(kTopoErrCodeSuccess); @@ -662,10 +663,10 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetChunkServerListInCopySetsRequest* request, + GetChunkServerListInCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets(logicalPoolId, copysets, &csServerInfos)); ASSERT_EQ(expected.size(), csServerInfos.size()); @@ -677,47 +678,45 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 返回码不为OK + // The return code is not OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_physicalpoolinfos(); GetPhysicalPoolInfoForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListPhysicalPoolRequest* request, + ListPhysicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListPhysicalPoolsInCluster(&pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -731,46 +730,44 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { PoolIdType poolId = 1; std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { + [](RpcController* controller, const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 返回码不为OK + // The return code is not OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_logicalpoolinfos(); GetLogicalPoolForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController* controller, + const ListLogicalPoolRequest* request, + ListLogicalPoolResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -783,33 +780,33 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { PoolIdType poolId = 1; std::vector zones; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto zoneInfo = response.add_zones(); @@ -818,10 +815,10 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::topology::ListPoolZoneRequest* request, + curve::mds::topology::ListPoolZoneResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); ASSERT_EQ(3, zones.size()); for (int i = 0; i < 3; ++i) { @@ -835,35 +832,35 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { ZoneIdType zoneId; std::vector servers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto serverInfo = response.add_serverinfo(); @@ -873,10 +870,10 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListZoneServerRequest* request, + curve::mds::topology::ListZoneServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListServersInZone(zoneId, &servers)); ASSERT_EQ(3, servers.size()); for (int i = 0; i < 3; ++i) { @@ -890,35 +887,36 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { ServerIdType serverId = 1; std::vector chunkservers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 正常情况,两个chunkserver正常,一个chunkserver retired + // Under normal circumstances, two chunkservers are normal and one + // chunkserver retired response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto csInfo = response.add_chunkserverinfos(); @@ -928,10 +926,10 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::ListChunkServerRequest* request, + curve::mds::topology::ListChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); ASSERT_EQ(2, chunkservers.size()); for (int i = 0; i < 2; ++i) { @@ -946,23 +944,23 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { std::string csAddr = "127.0.0.1:8200"; ChunkServerInfo chunkserver; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::GetChunkServerInfoResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -970,17 +968,17 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); - ChunkServerInfo *csInfo = new ChunkServerInfo(); + ChunkServerInfo* csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); response.set_allocated_chunkserverinfo(csInfo); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -988,18 +986,18 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - [](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest - *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + [](RpcController* controller, + const curve::mds::topology::GetChunkServerInfoRequest* + request, + curve::mds::topology::GetChunkServerInfoResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); ChunkServerInfo expected; GetChunkServerInfoForTest(1, &expected); ASSERT_EQ(expected.DebugString(), chunkserver.DebugString()); - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); csAddr = "127.0.0.1"; @@ -1013,36 +1011,36 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { std::string csAddr = "127.0.0.1:8200"; std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( - [](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, Closure *done) { + [](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 返回码不为OK + // The return code is not OK GetCopySetsInChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 正常情况 + // Normal situatio response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1053,10 +1051,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { .Times(2) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInChunkServerRequest* request, + GetCopySetsInChunkServerResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1066,7 +1064,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(1, copysets[i].logicalpoolid()); ASSERT_EQ(1000 + i, copysets[i].copysetid()); } - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); csAddr = "127.0.0.1"; @@ -1078,34 +1076,34 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 返回码不为OK + // The return code is not O GetCopySetsInClusterResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1116,10 +1114,10 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { .Times(1) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const GetCopySetsInClusterRequest* request, + GetCopySetsInClusterResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInCluster(©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1131,11 +1129,11 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { TEST_F(ToolMDSClientTest, GetCopyset) { auto succCallback = callback; - auto failCallback = [](RpcController *controller, - const GetCopysetRequest *request, - GetCopysetResponse *response, Closure *done) { + auto failCallback = [](RpcController* controller, + const GetCopysetRequest* request, + GetCopysetResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); cntl->SetFailed("fail"); }; @@ -1184,42 +1182,42 @@ TEST_F(ToolMDSClientTest, GetCopyset) { } TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 返回码不为OK + // The return code is not OK RapidLeaderScheduleResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 成功 + // Success response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const RapidLeaderScheduleRequst* request, + RapidLeaderScheduleResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.RapidLeaderSchedule(1)); } @@ -1234,13 +1232,13 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { // CASE 1: Send rpc failed { - auto failCallback = [](RpcController *controller, - const SetLogicalPoolScanStateRequest *request, - SetLogicalPoolScanStateResponse *response, - Closure *done) { + auto failCallback = [](RpcController* controller, + const SetLogicalPoolScanStateRequest* request, + SetLogicalPoolScanStateResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("fail"); }; EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) @@ -1267,43 +1265,43 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { std::map statusMap; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( - [](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, Closure *done) { + [](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 1. QueryChunkServerRecoverStatus失败的情况 + // 1. QueryChunkServerRecoverStatus failed situation QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 2. QueryChunkServerRecoverStatus成功的情况 + // 2. Success of QueryChunkServerRecoverStatus response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const QueryChunkServerRecoverStatusRequest* request, + QueryChunkServerRecoverStatusResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); } @@ -1329,12 +1327,12 @@ TEST_F(ToolMDSClientTest, GetMetric) { TEST_F(ToolMDSClientTest, GetCurrentMds) { bvar::Status value; value.expose("mds_status"); - // 有leader + // With a leader value.set_value("leader"); std::vector curMds = mdsClient.GetCurrentMds(); ASSERT_EQ(1, curMds.size()); ASSERT_EQ("127.0.0.1:9192", curMds[0]); - // 没有leader + // No leader value.set_value("follower"); ASSERT_TRUE(mdsClient.GetCurrentMds().empty()); } @@ -1343,20 +1341,22 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { bvar::Status value; value.expose("mds_config_mds_listen_addr"); std::map onlineStatus; - // 9180在线,9999不在线 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9192\"}"); + // 9180 online, 9999 offline + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); - // 9180的服务端口不一致 - value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9188\"}"); + // The service ports of 9180 are inconsistent + value.set_value( + "{\"conf_name\":\"mds.listen.addr\"," + "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); - // 非json格式 + // Non JSON format value.set_value("127.0.0.1::9191"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; @@ -1366,33 +1366,33 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { TEST_F(ToolMDSClientTest, ListClient) { std::vector clientAddrs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 返回码不为OK + // The return code is not OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto clientInfo = response.add_clientinfos(); @@ -1402,14 +1402,14 @@ TEST_F(ToolMDSClientTest, ListClient) { EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListClientRequest* request, + curve::mds::ListClientResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListClient(&clientAddrs)); ASSERT_EQ(response.clientinfos_size(), clientAddrs.size()); for (int i = 0; i < 5; i++) { - const auto &clientInfo = response.clientinfos(i); + const auto& clientInfo = response.clientinfos(i); std::string expected = clientInfo.ip() + ":" + std::to_string(clientInfo.port()); ASSERT_EQ(expected, clientAddrs[i]); @@ -1424,13 +1424,13 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); @@ -1441,10 +1441,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // normal @@ -1456,10 +1456,10 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const curve::mds::ListVolumesOnCopysetsRequest* request, + curve::mds::ListVolumesOnCopysetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); ASSERT_EQ(response.filenames_size(), fileNames.size()); for (int i = 0; i < 5; i++) { @@ -1478,12 +1478,12 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); @@ -1494,10 +1494,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // normal @@ -1505,10 +1505,10 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const SetCopysetsAvailFlagRequest* request, + SetCopysetsAvailFlagResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.SetCopysetsAvailFlag(copysets, false)); } @@ -1518,12 +1518,12 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .Times(6) .WillRepeatedly( - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, Closure *done) { + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, Closure* done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller* cntl = + dynamic_cast(controller); cntl->SetFailed("test"); })); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); @@ -1534,10 +1534,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // normal @@ -1550,10 +1550,10 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + Invoke([](RpcController* controller, + const ListUnAvailCopySetsRequest* request, + ListUnAvailCopySetsResponse* response, + Closure* done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListUnAvailCopySets(©sets)); } diff --git a/test/tools/metric_client_test.cpp b/test/tools/metric_client_test.cpp index 30f6c78802..7e41b910f5 100644 --- a/test/tools/metric_client_test.cpp +++ b/test/tools/metric_client_test.cpp @@ -20,10 +20,12 @@ * Author: charisu */ -#include +#include "src/tools/metric_client.h" + #include +#include + #include -#include "src/tools/metric_client.h" namespace curve { namespace tool { @@ -43,82 +45,71 @@ class MetricClientTest : public ::testing::Test { delete server; server = nullptr; } - brpc::Server *server; + brpc::Server* server; }; TEST_F(MetricClientTest, GetMetric) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "string_metric"; bvar::Status metric(metricName, "value"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, client.GetMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetric(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetric("127.0.0.1:9191", - "not-exist-metric", - &value)); + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetric("127.0.0.1:9191", "not-exist-metric", &value)); } TEST_F(MetricClientTest, GetMetricUint) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "uint_metric"; bvar::Status metric(metricName, 10); uint64_t value; - ASSERT_EQ(MetricRet::kOK, client.GetMetricUint(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetMetricUint(serverAddr, metricName, &value)); ASSERT_EQ(10, value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetMetricUint(serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint("127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ(MetricRet::kNotFound, + client.GetMetricUint(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ( + MetricRet::kOtherErr, + client.GetMetricUint("127.0.0.1:9191", "not-exist-metric", &value)); + // Parsing failed bvar::Status metric2("string_metric", "value"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint(serverAddr, - "string_metric", - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetMetricUint(serverAddr, "string_metric", &value)); } TEST_F(MetricClientTest, GetConfValue) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "conf_metric"; bvar::Status conf_metric(metricName, ""); - conf_metric.set_value("{\"conf_name\":\"key\"," - "\"conf_value\":\"value\"}"); + conf_metric.set_value( + "{\"conf_name\":\"key\"," + "\"conf_value\":\"value\"}"); std::string value; - ASSERT_EQ(MetricRet::kOK, client.GetConfValueFromMetric(serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOK, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 - ASSERT_EQ(MetricRet::kNotFound, client.GetConfValueFromMetric( - serverAddr, - "not-exist-metric", - &value)); - // 其他错误 - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - "127.0.0.1:9191", - "not-exist-metric", - &value)); - // 解析失败 + // Bvar does not exist + ASSERT_EQ( + MetricRet::kNotFound, + client.GetConfValueFromMetric(serverAddr, "not-exist-metric", &value)); + // Other errors + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric("127.0.0.1:9191", + "not-exist-metric", &value)); + // Parsing failed conf_metric.set_value("string"); - ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( - serverAddr, - metricName, - &value)); + ASSERT_EQ(MetricRet::kOtherErr, + client.GetConfValueFromMetric(serverAddr, metricName, &value)); } } // namespace tool diff --git a/test/tools/namespace_tool_core_test.cpp b/test/tools/namespace_tool_core_test.cpp index e1b365b28f..7affe3b1a6 100644 --- a/test/tools/namespace_tool_core_test.cpp +++ b/test/tools/namespace_tool_core_test.cpp @@ -20,18 +20,20 @@ * Author: charisu */ +#include "src/tools/namespace_tool_core.h" + #include + #include "src/common/timeutility.h" -#include "src/tools/namespace_tool_core.h" #include "test/tools/mock/mock_mds_client.h" +using curve::tool::CreateFileContext; +using curve::tool::GetSegmentRes; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; -using ::testing::SetArgPointee; +using ::testing::Return; using ::testing::SaveArg; -using curve::tool::GetSegmentRes; -using curve::tool::CreateFileContext; +using ::testing::SetArgPointee; DECLARE_bool(isTest); DECLARE_string(fileName); @@ -39,12 +41,8 @@ DECLARE_uint64(offset); class NameSpaceToolCoreTest : public ::testing::Test { protected: - void SetUp() { - client_ = std::make_shared(); - } - void TearDown() { - client_ = nullptr; - } + void SetUp() { client_ = std::make_shared(); } + void TearDown() { client_ = nullptr; } void GetFileInfoForTest(FileInfo* fileInfo) { fileInfo->set_id(1); @@ -98,14 +96,11 @@ TEST_F(NameSpaceToolCoreTest, GetFileInfo) { EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetFileInfo(fileName, &fileInfo)); ASSERT_EQ(expected.DebugString(), fileInfo.DebugString()); - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileInfo(fileName, &fileInfo)); } @@ -122,17 +117,14 @@ TEST_F(NameSpaceToolCoreTest, ListDir) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.ListDir(fileName, &files)); ASSERT_EQ(expected.size(), files.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), files[i].DebugString()); } - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ListDir(fileName, &files)); } @@ -140,14 +132,12 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 5 * segmentSize; - uint64_t stripeUnit = 32 * 1024 *1024; + uint64_t stripeUnit = 32 * 1024 * 1024; uint64_t stripeCount = 32; std::string pstName = ""; - // 1、正常情况 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(0)); CreateFileContext context; context.type = curve::mds::FileType::INODE_PAGEFILE; @@ -159,10 +149,8 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { ASSERT_EQ(0, namespaceTool.CreateFile(context)); - // 2、创建失败 - EXPECT_CALL(*client_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CreateFile(context)); } @@ -170,16 +158,12 @@ TEST_F(NameSpaceToolCoreTest, ExtendVolume) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 10 * segmentSize; - // 1、正常情况 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.ExtendVolume(fileName, length)); - // 2、创建失败 - EXPECT_CALL(*client_, ExtendVolume(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, ExtendVolume(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.ExtendVolume(fileName, length)); } @@ -188,16 +172,12 @@ TEST_F(NameSpaceToolCoreTest, DeleteFile) { std::string fileName = "/test"; bool forceDelete = false; - // 1、正常情况 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.DeleteFile(fileName, forceDelete)); - // 2、创建失败 - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.DeleteFile(fileName, forceDelete)); } @@ -213,23 +193,22 @@ TEST_F(NameSpaceToolCoreTest, GetChunkServerListInCopySet) { expected.emplace_back(csLoc); } - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(expected), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(expected), Return(0))); ASSERT_EQ(0, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + copysetId, &csLocs)); ASSERT_EQ(expected.size(), csLocs.size()); for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csLocs[i].DebugString()); } - // 2、失败 + // 2. Failure EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet(logicalPoolId, - copysetId, &csLocs)); + ASSERT_EQ(-1, namespaceTool.GetChunkServerListInCopySet( + logicalPoolId, copysetId, &csLocs)); } TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { @@ -274,18 +253,14 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { // CASE 1: clean recycle bin success EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(7) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(7).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 2: clean recycle bin fail EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(7) .WillOnce(Return(-1)) @@ -293,47 +268,35 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 3: list dir fail - EXPECT_CALL(*client_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*client_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.CleanRecycleBin("/", parseArg("0s"))); // CASE 4: clean recycle bin with expireTime is "3s" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(6) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(6).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3s"))); // CASE 5: clean recycle bin with expireTime is "3m" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(5) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(5).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3m"))); // CASE 6: clean recycle bin with expireTime is "3d" EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); - EXPECT_CALL(*client_, DeleteFile(_, _)) - .Times(3) - .WillRepeatedly(Return(0)); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); + EXPECT_CALL(*client_, DeleteFile(_, _)).Times(3).WillRepeatedly(Return(0)); ASSERT_EQ(0, namespaceTool.CleanRecycleBin("/", parseArg("3d"))); // CASE 7: clean recycle bin with different dirname auto cleanByDir = [&](const std::string& dirname, int deleteTimes) { EXPECT_CALL(*client_, ListDir(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(deleteTimes) @@ -352,10 +315,9 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { cleanByDir("/", 7); } - TEST_F(NameSpaceToolCoreTest, GetAllocatedSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t allocSize; EXPECT_CALL(*client_, GetAllocatedSize(_, _, _)) .Times(1) @@ -374,38 +336,33 @@ TEST_F(NameSpaceToolCoreTest, QueryChunkCopyset) { uint64_t chunkId; std::pair copyset; - // 正常情况 + // Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(segment), - Return(GetSegmentRes::kOK))); - ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + .WillOnce(DoAll(SetArgPointee<2>(segment), Return(GetSegmentRes::kOK))); + ASSERT_EQ(0, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); ASSERT_EQ(2001, chunkId); ASSERT_EQ(1, copyset.first); ASSERT_EQ(1001, copyset.second); - // GetFileInfo失败 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + // GetFileInfo failed + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); - // GetSegmentInfo失败 + // GetSegmentInfo failed EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); - ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, - &chunkId, ©set)); + ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, + ©set)); } TEST_F(NameSpaceToolCoreTest, GetFileSegments) { @@ -417,33 +374,29 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { PageFileSegment expected; GetSegmentForTest(&expected); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(5) .WillOnce(Return(GetSegmentRes::kSegmentNotAllocated)) - .WillRepeatedly(DoAll(SetArgPointee<2>(expected), - Return(GetSegmentRes::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(expected), Return(GetSegmentRes::kOK))); ASSERT_EQ(0, namespaceTool.GetFileSegments(fileName, &segments)); ASSERT_EQ(4, segments.size()); for (uint64_t i = 0; i < segments.size(); ++i) { ASSERT_EQ(expected.DebugString(), segments[i].DebugString()); } - // 2、GetFileInfo失败的情况 - EXPECT_CALL(*client_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The situation of GetFileInfo failure + EXPECT_CALL(*client_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileSegments(fileName, &segments)); - // 3、获取segment失败 + // 3. Failed to obtain segment EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*client_, GetSegmentInfo(_, _, _)) .Times(1) .WillOnce(Return(GetSegmentRes::kOtherError)); @@ -452,11 +405,9 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { TEST_F(NameSpaceToolCoreTest, GetFileSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + // 1. Normal situation uint64_t size; - EXPECT_CALL(*client_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*client_, GetFileSize(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.GetFileSize("/test", &size)); } @@ -465,8 +416,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 1. throttle type is invalid { - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "hello", 10000, 0, 0)); @@ -476,11 +426,10 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { { curve::mds::ThrottleParams params; EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .WillOnce( - DoAll(SaveArg<1>(¶ms), Return(0))); + .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, -1, -1)); + 10000, -1, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_FALSE(params.has_burst()); ASSERT_FALSE(params.has_burstlength()); @@ -489,8 +438,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { // 3. burst lower than limit { curve::mds::ThrottleParams params; - EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)) - .Times(0); + EXPECT_CALL(*client_, UpdateFileThrottleParams(_, _)).Times(0); ASSERT_EQ(-1, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", 10000, 5000, -1)); @@ -504,7 +452,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, -1)); + 10000, 50000, -1)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(1, params.burstlength()); @@ -518,7 +466,7 @@ TEST_F(NameSpaceToolCoreTest, TestUpdateThrottle) { .WillOnce(DoAll(SaveArg<1>(¶ms), Return(0))); ASSERT_EQ(0, namespaceTool.UpdateFileThrottle("/test", "BPS_TOTAL", - 10000, 50000, 10)); + 10000, 50000, 10)); ASSERT_EQ(10000, params.limit()); ASSERT_EQ(50000, params.burst()); ASSERT_EQ(10, params.burstlength()); diff --git a/test/tools/namespace_tool_test.cpp b/test/tools/namespace_tool_test.cpp index a8202bda39..526263446f 100644 --- a/test/tools/namespace_tool_test.cpp +++ b/test/tools/namespace_tool_test.cpp @@ -21,13 +21,15 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/namespace_tool.h" + +#include + #include "test/tools/mock/mock_namespace_tool_core.h" using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; DECLARE_bool(isTest); @@ -39,9 +41,7 @@ DECLARE_bool(showAllocMap); class NameSpaceToolTest : public ::testing::Test { protected: - NameSpaceToolTest() { - FLAGS_isTest = true; - } + NameSpaceToolTest() { FLAGS_isTest = true; } void SetUp() { core_ = std::make_shared(); } @@ -106,80 +106,68 @@ TEST_F(NameSpaceToolTest, GetFile) { PageFileSegment segment; GetSegmentForTest(&segment); FLAGS_fileName = "/test/"; - // 0、Init失败 - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 0. Init failed + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(-1, namespaceTool.RunCommand("abc")); - // 1、正常情况 + // 1. Normal situation FLAGS_showAllocMap = true; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); curve::tool::AllocMap allocMap = {{1, segmentSize}, {2, 9 * segmentSize}}; EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - SetArgPointee<2>(allocMap), - Return(0))); + SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 2、获取fileInfo失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain fileInfo + EXPECT_CALL(*core_, GetFileInfo(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 3、计算大小失败 - EXPECT_CALL(*core_, GetFileInfo(_, _)) + // 3. Calculation of size failed + EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 4、get的是目录的话还要计算file size + // 4. If the target is a directory, the file size should also be calculated FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 5、指定了-showAllocSize=false的话不计算分配大小 + // 5. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 6、对目录指定了-showFileSize=false的话不计算文件大小 + // 6. If - showFileSize=false is specified for the directory, the file size + // will not be calculated FLAGS_showFileSize = false; FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(fileInfo2), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); } @@ -190,75 +178,66 @@ TEST_F(NameSpaceToolTest, ListDir) { GetFileInfoForTest(&fileInfo); PageFileSegment segment; GetSegmentForTest(&segment); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation std::vector files; for (uint64_t i = 0; i < 3; ++i) { files.emplace_back(fileInfo); } EXPECT_CALL(*core_, ListDir(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); FLAGS_fileName = "/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); FLAGS_fileName = "/test/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 2、listDir失败 - EXPECT_CALL(*core_, ListDir(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListDir failed + EXPECT_CALL(*core_, ListDir(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 3、计算大小失败,个别的文件计算大小失败会继续计算,但是返回-1 + // 3. Failed to calculate the size. Some files will continue to be + // calculated if the size calculation fails, but will return -1 EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetAllocatedSize(_, _, _)) .Times(3) .WillOnce(Return(-1)) - .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 4、指定了-showAllocSize=false的话不计算分配大小 + // 4. If - showAllocSize=false is specified, the allocation size will not be + // calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 4、list的时候有目录的话计算fileSize + // 4. If there is a directory in the list, calculate fileSize FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); files.emplace_back(fileInfo2); EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); EXPECT_CALL(*core_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * segmentSize), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 5、指定了-showFileSize=false的话不计算文件大小 + // 5. If - showFileSize=false is specified, the file size will not be + // calculated FLAGS_showFileSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(files), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(files), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); } @@ -272,81 +251,58 @@ TEST_F(NameSpaceToolTest, SegInfo) { segments.emplace_back(segment); } FLAGS_fileName = "/test"; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, GetFileSegments(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(segments), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(segments), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("seginfo")); - // 2、GetFileSegment失败 - EXPECT_CALL(*core_, GetFileSegments(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. GetFileSegment failed + EXPECT_CALL(*core_, GetFileSegments(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("seginfo")); } TEST_F(NameSpaceToolTest, CreateFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("create"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("create")); - // 2、创建失败 - EXPECT_CALL(*core_, CreateFile(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, CreateFile(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("create")); } TEST_F(NameSpaceToolTest, DeleteFile) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("delete"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("delete")); - // 2、创建失败 - EXPECT_CALL(*core_, DeleteFile(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Creation failed + EXPECT_CALL(*core_, DeleteFile(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("delete")); } TEST_F(NameSpaceToolTest, CleanRecycle) { curve::tool::NameSpaceTool namespaceTool(core_); namespaceTool.PrintHelp("clean-recycle"); - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 1. Normal situation + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("clean-recycle")); - // 2、失败 - EXPECT_CALL(*core_, CleanRecycleBin(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failure + EXPECT_CALL(*core_, CleanRecycleBin(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("clean-recycle")); } @@ -361,33 +317,28 @@ TEST_F(NameSpaceToolTest, PrintChunkLocation) { } uint64_t chunkId = 2001; std::pair copyset = {1, 101}; - EXPECT_CALL(*core_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*core_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(csLocs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(csLocs), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("chunk-location")); - // 2、QueryChunkCopyset失败 + // 2. QueryChunkCopyset failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("chunk-location")); - // 3、GetChunkServerListInCopySet失败 + // 3. GetChunkServerListInCopySet failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<2>(chunkId), - SetArgPointee<3>(copyset), + .WillOnce(DoAll(SetArgPointee<2>(chunkId), SetArgPointee<3>(copyset), Return(0))); EXPECT_CALL(*core_, GetChunkServerListInCopySet(_, _, _)) .Times(1) diff --git a/test/tools/raft_log_tool_test.cpp b/test/tools/raft_log_tool_test.cpp index ff70a5ef8b..f026ac064c 100644 --- a/test/tools/raft_log_tool_test.cpp +++ b/test/tools/raft_log_tool_test.cpp @@ -20,16 +20,19 @@ * Author: charisu */ +#include "src/tools/raft_log_tool.h" + #include + #include #include -#include "src/tools/raft_log_tool.h" + #include "test/tools/mock/mock_segment_parser.h" DECLARE_string(fileName); using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; namespace curve { @@ -37,12 +40,8 @@ namespace tool { class RaftLogToolTest : public ::testing::Test { protected: - void SetUp() { - parser_ = std::make_shared(); - } - void TearDown() { - parser_ = nullptr; - } + void SetUp() { parser_ = std::make_shared(); } + void TearDown() { parser_ = nullptr; } std::shared_ptr parser_; }; @@ -58,23 +57,19 @@ TEST_F(RaftLogToolTest, PrintHeaders) { raftLogTool.PrintHelp("chunk-meta"); ASSERT_EQ(-1, raftLogTool.RunCommand("chunk-meta")); - // 文件名格式不对 + // The file name format is incorrect FLAGS_fileName = "illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); FLAGS_fileName = "/tmp/illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // parser初始化失败 + // parser initialization faile FLAGS_fileName = "/tmp/log_inprogress_002"; - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 解析失败 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Parsing failed + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(1) .WillOnce(Return(false)); @@ -83,10 +78,8 @@ TEST_F(RaftLogToolTest, PrintHeaders) { .WillOnce(Return(false)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 正常情况 - EXPECT_CALL(*parser_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Normal situation + EXPECT_CALL(*parser_, Init(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*parser_, GetNextEntryHeader(_)) .Times(3) .WillOnce(Return(true)) @@ -100,4 +93,3 @@ TEST_F(RaftLogToolTest, PrintHeaders) { } // namespace tool } // namespace curve - diff --git a/test/tools/segment_parser_test.cpp b/test/tools/segment_parser_test.cpp index 3f9e1f465f..12e6614a9f 100644 --- a/test/tools/segment_parser_test.cpp +++ b/test/tools/segment_parser_test.cpp @@ -21,8 +21,10 @@ */ #include + #include #include + #include "src/tools/raft_log_tool.h" #include "test/fs/mock_local_filesystem.h" @@ -31,8 +33,8 @@ namespace tool { using curve::fs::MockLocalFileSystem; using ::testing::_; -using ::testing::Return; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArrayArgument; @@ -41,23 +43,19 @@ const uint32_t DATA_LEN = 20; class SetmentParserTest : public ::testing::Test { protected: - void SetUp() { - localFs_ = std::make_shared(); - } - void TearDown() { - localFs_ = nullptr; - } + void SetUp() { localFs_ = std::make_shared(); } + void TearDown() { localFs_ = nullptr; } void PackHeader(const EntryHeader& header, char* buf, bool checkFail = false) { memset(buf, 0, ENTRY_HEADER_SIZE); - const uint32_t meta_field = (header.type << 24) | - (header.checksum_type << 16); + const uint32_t meta_field = + (header.type << 24) | (header.checksum_type << 16); butil::RawPacker packer(buf); packer.pack64(header.term) - .pack32(meta_field) - .pack32((uint32_t)header.data_len) - .pack32(header.data_checksum); + .pack32(meta_field) + .pack32((uint32_t)header.data_len) + .pack32(header.data_checksum); uint32_t checkSum = braft::murmurhash32(buf, ENTRY_HEADER_SIZE - 4); if (checkFail) { packer.pack32(checkSum + 1); @@ -71,29 +69,23 @@ class SetmentParserTest : public ::testing::Test { TEST_F(SetmentParserTest, Init) { SegmentParser parser(localFs_); - // 1、打开文件失败 + // 1. Failed to open file EXPECT_CALL(*localFs_, Open(_, _)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 2、获取文件大小失败 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. Failed to obtain file size + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 3、成功 - EXPECT_CALL(*localFs_, Fstat(_, _)) - .Times(1) - .WillOnce(Return(0)); + // 3. Success + EXPECT_CALL(*localFs_, Fstat(_, _)).Times(1).WillOnce(Return(0)); ASSERT_EQ(0, parser.Init(fileName)); - // 4、反初始化 - EXPECT_CALL(*localFs_, Close(_)) - .Times(1) - .WillOnce(Return(0)); + // 4. De-initialization + EXPECT_CALL(*localFs_, Close(_)).Times(1).WillOnce(Return(0)); parser.UnInit(); } @@ -102,13 +94,10 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { struct stat stBuf; stBuf.st_size = 88; - EXPECT_CALL(*localFs_, Open(_, _)) - .Times(1) - .WillOnce(Return(1)); + EXPECT_CALL(*localFs_, Open(_, _)).Times(1).WillOnce(Return(1)); EXPECT_CALL(*localFs_, Fstat(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(stBuf), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(stBuf), Return(0))); ASSERT_EQ(0, parser.Init(fileName)); EntryHeader header; @@ -120,30 +109,30 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { header.data_checksum = 73235795; char header_buf[ENTRY_HEADER_SIZE] = {0}; - // 读出来的数据大小不对 + // The size of the data read out is incorrect EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) .WillOnce(Return(22)); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 校验失败 + // Verification failed PackHeader(header, header_buf, true); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) - .WillOnce(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillOnce(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 正常情况 + // Normal situation PackHeader(header, header_buf); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(2) - .WillRepeatedly(DoAll(SetArrayArgument<1>(header_buf, - header_buf + ENTRY_HEADER_SIZE), - Return(24))); + .WillRepeatedly(DoAll( + SetArrayArgument<1>(header_buf, header_buf + ENTRY_HEADER_SIZE), + Return(24))); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); ASSERT_EQ(header, header2); ASSERT_TRUE(parser.GetNextEntryHeader(&header2)); @@ -155,4 +144,3 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { } // namespace tool } // namespace curve - diff --git a/test/tools/snapshot_clone_client_test.cpp b/test/tools/snapshot_clone_client_test.cpp index 024a270a69..9a87583dd8 100644 --- a/test/tools/snapshot_clone_client_test.cpp +++ b/test/tools/snapshot_clone_client_test.cpp @@ -20,28 +20,27 @@ * Author: charisu */ +#include "src/tools/snapshot_clone_client.h" + #include + #include -#include "src/tools/snapshot_clone_client.h" + #include "test/tools/mock/mock_metric_client.h" using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; namespace curve { namespace tool { class SnapshotCloneClientTest : public ::testing::Test { protected: - void SetUp() { - metricClient_ = std::make_shared(); - } + void SetUp() { metricClient_ = std::make_shared(); } - void TearDown() { - metricClient_ = nullptr; - } + void TearDown() { metricClient_ = nullptr; } std::shared_ptr metricClient_; }; @@ -50,60 +49,57 @@ TEST_F(SnapshotCloneClientTest, Init) { // no snapshot clone server ASSERT_EQ(1, client.Init("", "")); ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "")); - // dummy server与mds不匹配 + // Dummy server and mds do not match ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "8081,8082,8083")); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091,9092,9093")); - std::map expected = - {{"127.0.0.1:5555", "127.0.0.1:9091"}, - {"127.0.0.1:5556", "127.0.0.1:9092"}, - {"127.0.0.1:5557", "127.0.0.1:9093"}}; + "9091,9092,9093")); + std::map expected = { + {"127.0.0.1:5555", "127.0.0.1:9091"}, + {"127.0.0.1:5556", "127.0.0.1:9092"}, + {"127.0.0.1:5557", "127.0.0.1:9093"}}; ASSERT_EQ(expected, client.GetDummyServerMap()); } TEST_F(SnapshotCloneClientTest, GetActiveAddr) { - // 正常情况 + // Normal situation SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); std::vector activeAddr = client.GetActiveAddrs(); ASSERT_EQ(1, activeAddr.size()); ASSERT_EQ("127.0.0.1:5555", activeAddr[0]); - // 有一个dummyserver显示active,服务端口访问失败 + // There is a dummyserver displaying active, and the service port access + // failed EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_TRUE(activeAddr.empty()); - // 有一个获取metric失败,其他返回standby + // One failed to obtain metric, while the others returned standby EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))); ASSERT_TRUE(client.GetActiveAddrs().empty()); - // 有两个active状态的 + // Having two active states EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("standby"), - Return(MetricRet::kOK))) - .WillRepeatedly(DoAll(SetArgPointee<2>("active"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("standby"), Return(MetricRet::kOK))) + .WillRepeatedly( + DoAll(SetArgPointee<2>("active"), Return(MetricRet::kOK))); activeAddr = client.GetActiveAddrs(); ASSERT_EQ(2, activeAddr.size()); ASSERT_EQ("127.0.0.1:5556", activeAddr[0]); @@ -112,15 +108,16 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { TEST_F(SnapshotCloneClientTest, GetOnlineStatus) { SnapshotCloneClient client(metricClient_); - ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", - "9091")); - // 有一个在线,有一个获取metric失败,有一个listen addr不匹配 + ASSERT_EQ( + 0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); + // One online, one failed to obtain metric, and one did not match the listen + // addr EXPECT_CALL(*metricClient_, GetConfValueFromMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5555"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5557"), - Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5555"), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>("127.0.0.1:5557"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); std::map onlineStatus; client.GetOnlineStatus(&onlineStatus); diff --git a/test/tools/status_tool_test.cpp b/test/tools/status_tool_test.cpp index 8b33183220..8dba1a8f94 100644 --- a/test/tools/status_tool_test.cpp +++ b/test/tools/status_tool_test.cpp @@ -19,25 +19,28 @@ * File Created: 2019-11-26 * Author: charisu */ +#include "src/tools/status_tool.h" + #include + #include -#include "src/tools/status_tool.h" -#include "test/tools/mock/mock_namespace_tool_core.h" -#include "test/tools/mock/mock_copyset_check_core.h" + #include "test/tools//mock/mock_mds_client.h" +#include "test/tools/mock/mock_copyset_check_core.h" #include "test/tools/mock/mock_etcd_client.h" -#include "test/tools/mock/mock_version_tool.h" #include "test/tools/mock/mock_metric_client.h" +#include "test/tools/mock/mock_namespace_tool_core.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +#include "test/tools/mock/mock_version_tool.h" +using curve::mds::topology::AllocateStatus; +using curve::mds::topology::LogicalPoolType; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::LogicalPoolType; -using curve::mds::topology::AllocateStatus; DECLARE_bool(offline); DECLARE_bool(unhealthy); @@ -76,7 +79,7 @@ class StatusToolTest : public ::testing::Test { pool->set_desc("physical pool for test"); } - void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo *lpInfo, + void GetLogicalPoolForTest(PoolIdType id, LogicalPoolInfo* lpInfo, bool getSpace = true) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); @@ -89,9 +92,9 @@ class StatusToolTest : public ::testing::Test { lpInfo->set_allocatestatus(AllocateStatus::ALLOW); } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId, bool offline = false, - bool retired = false) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId, bool offline = false, + bool retired = false) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -113,7 +116,7 @@ class StatusToolTest : public ::testing::Test { csInfo->set_diskused(512); } - void GetServerInfoForTest(curve::mds::topology::ServerInfo *server, + void GetServerInfoForTest(curve::mds::topology::ServerInfo* server, uint64_t id) { server->set_serverid(id); server->set_hostname("localhost"); @@ -137,8 +140,7 @@ class StatusToolTest : public ::testing::Test { }; TEST_F(StatusToolTest, InitAndSupportCommand) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); ASSERT_TRUE(statusTool.SupportCommand("status")); ASSERT_TRUE(statusTool.SupportCommand("space")); @@ -153,10 +155,9 @@ TEST_F(StatusToolTest, InitAndSupportCommand) { } TEST_F(StatusToolTest, InitFail) { - StatusTool statusTool1(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - // 1、status命令需要所有的init + StatusTool statusTool1(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + // 1. The status command requires all inits EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(3) .WillOnce(Return(-1)) @@ -169,50 +170,38 @@ TEST_F(StatusToolTest, InitFail) { .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); - // 2、etcd-status命令只需要初始化etcdClinet - StatusTool statusTool2(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + // 2. The etcd-status command only needs to initialize etcdClinet + StatusTool statusTool2(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool2.RunCommand("etcd-status")); - // 3、space和其他命令不需要初始化etcdClient - StatusTool statusTool3(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); + // 3. Space and other commands do not require initialization of etcdClient + StatusTool statusTool3(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(2) .WillOnce(Return(-1)) .WillRepeatedly(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(-1)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool3.RunCommand("space")); ASSERT_EQ(-1, statusTool3.RunCommand("chunkserver-list")); - // 4、snapshot-clone-status只需要snapshot clone - StatusTool statusTool4(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, - metricClient_, snapshotClient_); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 4. snapshot-clone-status only requires snapshot clone + StatusTool statusTool4(mdsClient_, etcdClient_, copysetCheck_, versionTool_, + metricClient_, snapshotClient_); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool4.RunCommand("snapshot-clone-status")); } TEST_F(StatusToolTest, SpaceCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("space"); statusTool.PrintHelp("123"); @@ -221,92 +210,70 @@ TEST_F(StatusToolTest, SpaceCmd) { std::vector lgPools; lgPools.emplace_back(lgPool); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("123")); - // 2、ListLogicalPoolsInPhysicalPool失败的情况 + // 2. The situation of ListLogicalPoolsInPhysicalPool failure EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 3、获取filesize失败 + // 3. Failed to obtain filesize EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) - .Times(1) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetFileSize(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 4、获取metric失败的情况 + // 4. Failure to obtain metric EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .WillOnce(Return(-1)) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 5、获取RecyleBin大小失败的情况 + // 5. Failure in obtaining the size of RecycleBin EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -314,33 +281,28 @@ TEST_F(StatusToolTest, SpaceCmd) { } TEST_F(StatusToolTest, ChunkServerCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("chunkserver-list"); std::vector chunkservers; - // 加入5个chunkserver,2个offline + // Add 5 chunkservers and 2 offline ChunkServerInfo csInfo; for (uint64_t i = 1; i <= 5; ++i) { GetCsInfoForTest(&csInfo, i, i <= 2); chunkservers.emplace_back(csInfo); } - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - - // 正常情况,有一个chunkserver的UnhealthyRatio大于0 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) - .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + + // Under normal circumstances, there is a chunkserver with an UnhealthyRatio + // greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) + .Times(1) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -349,23 +311,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { .WillRepeatedly(Return(statistics1)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示offline的 + // Only display offline FLAGS_offline = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示unhealthy ratio大于0的 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // Show only those with unhealthy ratio greater than 0 + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); - EXPECT_CALL(*copysetCheck_, CheckCopysetsOnChunkServer( - An())) + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); + EXPECT_CALL(*copysetCheck_, + CheckCopysetsOnChunkServer(An())) .Times(3) .WillRepeatedly(Return(0)); EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) @@ -376,21 +336,21 @@ TEST_F(StatusToolTest, ChunkServerCmd) { FLAGS_unhealthy = true; ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // list chunkserver失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // List chunkserver failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-list")); - // FLAGS_checkCSAlive为true的时候,会发送rpc检查chunkserver在线状态 + // when FLAGS_checkCSAlive is true, an rpc will be sent to check the online + // status of the chunkserver FLAGS_checkHealth = false; FLAGS_checkCSAlive = true; - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(5) .WillOnce(Return(false)) @@ -399,8 +359,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { } TEST_F(StatusToolTest, StatusCmdCommon) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); statusTool.PrintHelp("status"); statusTool.PrintHelp("chunkserver-status"); @@ -422,10 +381,9 @@ TEST_F(StatusToolTest, StatusCmdCommon) { {"0.0.2", {"127.0.0.1:8002"}}, {"0.0.3", {"127.0.0.1:8003"}}}; ClientVersionMapType clientVersionMap = {{"nebd-server", versionMap}, - {"python", versionMap}, - {"qemu", versionMap}}; - std::vector offlineList = {"127.0.0.1:8004", - "127.0.0.1:8005"}; + {"python", versionMap}, + {"qemu", versionMap}}; + std::vector offlineList = {"127.0.0.1:8004", "127.0.0.1:8005"}; std::vector leaderAddr = {"127.0.0.1:2379"}; std::map onlineState = {{"127.0.0.1:2379", true}, {"127.0.0.1:2381", true}, @@ -440,22 +398,14 @@ TEST_F(StatusToolTest, StatusCmdCommon) { } chunkservers.emplace(1, chunkserverList); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); - // 正常情况 - // 1、设置cluster的输出 + // Normal situation + // 1. Set the output of the cluster EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); @@ -464,41 +414,31 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(Return(statistics1)); EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>(phyPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInPhysicalPool(_, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<1>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); EXPECT_CALL(*mdsClient_, GetFileSize(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(150 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(4) - .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), - Return(0))) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(300 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(20 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(100 * DefaultSegmentSize), Return(0))) + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); - // 设置client status的输出 + // Set the output of client status EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), Return(0))); - // 2、设置MDS status的输出 + // 2. Set the output of MDS status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(2) .WillRepeatedly(Return(mdsAddr)); @@ -506,25 +446,21 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); - // 3、设置etcd status的输出 + // 3. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), - Return(0))); + .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), Return(0))); EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); - // 设置snapshot clone的输出 + // Set the output of snapshot clone std::vector activeAddr = {"127.0.0.1:5555"}; EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(2) .WillRepeatedly(Return(activeAddr)); @@ -532,39 +468,36 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineState)); - // 4、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 4. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*metricClient_, GetMetricUint(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>(1000), - Return(MetricRet::kOK))); + .WillRepeatedly(DoAll(SetArgPointee<2>(1000), Return(MetricRet::kOK))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("status")); - // 5、设置chunkserver status的输出 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + // 5. Set the output of chunkserver status + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_)) .Times(3) .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-status")); - // 6、设置mds status的输出 + // 6. Set the output of mds statu EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(mdsAddr)); @@ -572,37 +505,26 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("mds-status")); - // 7、设置etcd status的输出 + // 7. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(leaderAddr), - SetArgPointee<1>(onlineState), - Return(0))); + SetArgPointee<1>(onlineState), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("etcd-status")); } TEST_F(StatusToolTest, StatusCmdError) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - // 设置Init的期望 - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*etcdClient_, Init(_)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*snapshotClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); + // Set expectations for Init + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*etcdClient_, Init(_)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*snapshotClient_, Init(_, _)).Times(1).WillOnce(Return(0)); // 1、cluster unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) @@ -611,24 +533,22 @@ TEST_F(StatusToolTest, StatusCmdError) { EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) .Times(1) .WillOnce(Return(statistics2)); - // 列出物理池失败 + // Failed to list physical pools EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 列出逻辑池失败 + // Failed to list logical pools EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 获取client version失败 - EXPECT_CALL(*versionTool_, GetClientVersion(_)) - .WillOnce(Return(-1)); + // Failed to obtain client version + EXPECT_CALL(*versionTool_, GetClientVersion(_)).WillOnce(Return(-1)); - // 2、当前无mds可用 + // 2. Currently, no mds are available std::vector failedList = {"127.0.0.1:6666", "127.0.0.1:6667"}; EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map mdsOnlineStatus = {{"127.0.0.1:6666", false}, {"127.0.0.1:6667", false}}; EXPECT_CALL(*mdsClient_, GetCurrentMds()) @@ -638,7 +558,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); - // 3、GetEtcdClusterStatus失败 + // 3. GetEtcdClusterStatus failed EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -646,10 +566,9 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(Return(-1)); - // 当前无snapshot clone server可用 + // Currently, no snapshot clone server is available EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(failedList), - Return(0))); + .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); std::map onlineStatus = {{"127.0.0.1:5555", false}, {"127.0.0.1:5556", false}}; EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) @@ -659,42 +578,42 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineStatus)); - // 4、获取chunkserver version失败并ListChunkServersInCluster失败 + // 4. Failed to obtain chunkserver version and ListChunkServersInCluster EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) .WillOnce(Return(-1)); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("status")); - // 获取mds在线状态失败 + // Failed to obtain mds online status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); - // 获取mdsversion失败 + // Failed to obtain mdsversion EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("mds-status")); - // 个别chunkserver获取version失败 + // Individual chunkservers failed to obtain version EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) - .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), - SetArgPointee<1>(failedList), - Return(0))); - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An>*>())) + .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), SetArgPointee<1>(failedList), + Return(0))); + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster( + An>*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-status")); } TEST_F(StatusToolTest, IsClusterHeatlhy) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); std::map onlineStatus = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", true}, @@ -702,55 +621,54 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { std::map onlineStatus2 = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", false}, {"127.0.0.1:8003", true}}; - // 1、copysets不健康 + // 1. Copysets are unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(-1)); - // 2、没有mds可用 + // 2. No mds available EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); - // 3、有mds不在线 + // 3. There are MDSs that are not online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); - // 4、获取etcd集群状态失败 + // 4. Failed to obtain the ETCD cluster status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(Return(-1)); - // 5、没有snapshot-clone-server可用 + // 5. No snapshot-clone-server available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector())); - // 6、有snapshot-clone-server不在线 + // 6. There is snapshot-clone-server that is not online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); ASSERT_FALSE(statusTool.IsClusterHeatlhy()); - // 1、copyset健康 + // 1. Copyset Health EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); - // 2、超过一个mds在服务 + // 2. More than one mds is in service EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector(2))); - // 3、mds都在线 + // 3. MDS is all online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); - // 4、etcd没有leader且有etcd不在线 + // 4. ETCD does not have a leader and there are ETCDs that are not online EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - SetArgPointee<1>(onlineStatus2), - Return(0))); - // 5、有多个snapshot-clone-server可用 + SetArgPointee<1>(onlineStatus2), Return(0))); + // 5. Multiple snapshot-clone-server are available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector(2))); - // 9、snapshot-clone-server都在线 + // 9. snapshot-clone-server is all online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); @@ -758,43 +676,30 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { } TEST_F(StatusToolTest, ListClientCmd) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector clientAddrs; for (int i = 0; i < 10; ++i) { clientAddrs.emplace_back("127.0.0.1:900" + std::to_string(i)); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("client-list")); - // 失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // Failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("client-list")); } TEST_F(StatusToolTest, ServerList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector servers; for (int i = 0; i < 3; ++i) { @@ -802,13 +707,12 @@ TEST_F(StatusToolTest, ServerList) { GetServerInfoForTest(&server, i); servers.emplace_back(server); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(servers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("server-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -816,15 +720,10 @@ TEST_F(StatusToolTest, ServerList) { } TEST_F(StatusToolTest, LogicalPoolList) { - StatusTool statusTool(mdsClient_, etcdClient_, - copysetCheck_, versionTool_, + StatusTool statusTool(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - EXPECT_CALL(*mdsClient_, Init(_, _)) - .Times(1) - .WillOnce(Return(0)); - EXPECT_CALL(*copysetCheck_, Init(_)) - .Times(1) - .WillOnce(Return(0)); + EXPECT_CALL(*mdsClient_, Init(_, _)).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*copysetCheck_, Init(_)).Times(1).WillOnce(Return(0)); std::vector lgPools; for (int i = 1; i <= 3; ++i) { @@ -832,30 +731,25 @@ TEST_F(StatusToolTest, LogicalPoolList) { GetLogicalPoolForTest(i, &lgPool); lgPools.emplace_back(lgPool); } - // 成功 + // Success EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); AllocMap allocMap = {{1, DefaultSegmentSize}, {2, DefaultSegmentSize * 20}}; EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(allocMap), - Return(0))); + .WillOnce(DoAll(SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("logical-pool-list")); - // 失败 + // Failed EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(lgPools), - Return(0))); - EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)) - .WillOnce(Return(-1)); + .WillOnce(DoAll(SetArgPointee<0>(lgPools), Return(0))); + EXPECT_CALL(*mdsClient_, GetAllocatedSize(_, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("logical-pool-list")); } } // namespace tool } // namespace curve - diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..db40892f40 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -21,21 +21,23 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/version_tool.h" + +#include + #include "test/tools/mock/mock_mds_client.h" #include "test/tools/mock/mock_metric_client.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::DiskState; +using curve::mds::topology::OnlineState; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; namespace curve { namespace tool { @@ -53,8 +55,8 @@ class VersionToolTest : public ::testing::Test { metricClient_ = nullptr; } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -73,64 +75,61 @@ class VersionToolTest : public ::testing::Test { TEST_F(VersionToolTest, GetAndCheckMdsVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,123 +150,112 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 1. Normal situation + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 2. ListChunkServersInCluster failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 3. Failed to obtain metric + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 4. chunkserverList is empty + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - Return(0))); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillOnce( + DoAll(SetArgPointee<0>(std::vector()), Return(0))); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 5. version inconsistency + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 6. Old version + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } TEST_F(VersionToolTest, GetClientVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::vector clientAddrs = - {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", - "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; + std::vector clientAddrs = {"127.0.0.1:8000", "127.0.0.1:8001", + "127.0.0.1:8002", "127.0.0.1:8003", + "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, kProcessCmdLineMetricName, _)) .Times(6) .WillOnce(Return(MetricRet::kOtherErr)) - .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessPython), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessOther), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessPython), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessOther), Return(MetricRet::kOK))) .WillRepeatedly(DoAll(SetArgPointee<2>(kProcessNebdServer), - Return(MetricRet::kOK))); + Return(MetricRet::kOK))); EXPECT_CALL(*metricClient_, GetMetric(_, kCurveVersionMetricName, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) .WillOnce(Return(MetricRet::kNotFound)) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))); ClientVersionMapType clientVersionMap; ClientVersionMapType expected; VersionMapType versionMap = {{"0.0.5.2", {"127.0.0.1:8004"}}, @@ -282,85 +270,80 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListClient failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, versionTool.GetClientVersion(&clientVersionMap)); } TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } diff --git a/test/util/config_generator.h b/test/util/config_generator.h index f0508e58ca..7ee14f23d0 100644 --- a/test/util/config_generator.h +++ b/test/util/config_generator.h @@ -32,14 +32,15 @@ namespace curve { using curve::common::Configuration; -// 各模块继承该接口,实现自己的初始化配置函数 +// Each module inherits this interface and implements its own initialization +// configuration function class ConfigGenerator { public: ConfigGenerator() = default; virtual ~ConfigGenerator() = default; - virtual bool LoadTemplete(const std::string &defaultConfigPath) { + virtual bool LoadTemplete(const std::string& defaultConfigPath) { config_.SetConfigPath(defaultConfigPath); if (!config_.LoadConfig()) { return false; @@ -47,23 +48,22 @@ class ConfigGenerator { return true; } - virtual void SetConfigPath(const std::string &configPath) { + virtual void SetConfigPath(const std::string& configPath) { configPath_ = configPath; } - // 设置配置项 + // Set Configuration Items virtual void SetKV(const std::string& key, const std::string& value) { config_.SetValue(key, value); } /** - * @brief 批量设置配置项 + * @brief Batch Set Configuration Items * - * @param options 配置项表,形如 "Ip=127.0.0.1" + * @param options configuration item table, in the form of "Ip=127.0.0.1" */ - virtual void SetConfigOptions( - const std::vector &options) { - for (const std::string &op : options) { + virtual void SetConfigOptions(const std::vector& options) { + for (const std::string& op : options) { int delimiterPos = op.find("="); std::string key = op.substr(0, delimiterPos); std::string value = op.substr(delimiterPos + 1); @@ -71,7 +71,7 @@ class ConfigGenerator { } } - // 用于生成配置文件 + // Used to generate configuration files virtual bool Generate() { if (configPath_ != "") { config_.SetConfigPath(configPath_); @@ -80,27 +80,25 @@ class ConfigGenerator { return false; } - virtual bool Generate(const std::string &newConfigPath) { + virtual bool Generate(const std::string& newConfigPath) { configPath_ = newConfigPath; return Generate(); } - // 删除配置文件 - virtual int Remove() { - return ::remove(configPath_.c_str()); - } + // Delete Profile + virtual int Remove() { return ::remove(configPath_.c_str()); } protected: - // 配置文件路径 + // Configuration file path std::string configPath_; - // 配置器 + // Configurator Configuration config_; }; #define DEFAULT_MDS_CONF "conf/mds.conf" struct MDSConfigGenerator : public ConfigGenerator { - explicit MDSConfigGenerator(const std::string &configPath) { + explicit MDSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_MDS_CONF); SetConfigPath(configPath); } @@ -109,7 +107,7 @@ struct MDSConfigGenerator : public ConfigGenerator { #define DEFAULT_CHUNKSERVER_CONF "conf/chunkserver.conf.example" struct CSConfigGenerator : public ConfigGenerator { - explicit CSConfigGenerator(const std::string &configPath) { + explicit CSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CHUNKSERVER_CONF); SetConfigPath(configPath); } @@ -118,7 +116,7 @@ struct CSConfigGenerator : public ConfigGenerator { #define DEFAULT_CLIENT_CONF "conf/client.conf" struct ClientConfigGenerator : public ConfigGenerator { - explicit ClientConfigGenerator(const std::string &configPath) { + explicit ClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CLIENT_CONF); SetConfigPath(configPath); } @@ -127,7 +125,7 @@ struct ClientConfigGenerator : public ConfigGenerator { #define DEFAULT_CS_CLIENT_CONF "conf/cs_client.conf" struct CSClientConfigGenerator : public ConfigGenerator { - explicit CSClientConfigGenerator(const std::string &configPath) { + explicit CSClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_CS_CLIENT_CONF); SetConfigPath(configPath); } @@ -136,7 +134,7 @@ struct CSClientConfigGenerator : public ConfigGenerator { #define DEFAULT_SNAP_CLIENT_CONF "conf/snap_client.conf" struct SnapClientConfigGenerator : public ConfigGenerator { - explicit SnapClientConfigGenerator(const std::string &configPath) { + explicit SnapClientConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SNAP_CLIENT_CONF); SetConfigPath(configPath); } @@ -145,7 +143,7 @@ struct SnapClientConfigGenerator : public ConfigGenerator { #define DEFAULT_S3_CONF "conf/s3.conf" struct S3ConfigGenerator : public ConfigGenerator { - explicit S3ConfigGenerator(const std::string &configPath) { + explicit S3ConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_S3_CONF); SetConfigPath(configPath); SetKV("s3.endpoint", "127.0.0.1:9999"); @@ -155,7 +153,7 @@ struct S3ConfigGenerator : public ConfigGenerator { #define DEFAULT_SCS_CONF "conf/snapshot_clone_server.conf" struct SCSConfigGenerator : public ConfigGenerator { - explicit SCSConfigGenerator(const std::string &configPath) { + explicit SCSConfigGenerator(const std::string& configPath) { LoadTemplete(DEFAULT_SCS_CONF); SetConfigPath(configPath); } diff --git a/thirdparties/etcdclient/etcdclient.go b/thirdparties/etcdclient/etcdclient.go index 355e99b162..344517c8f9 100644 --- a/thirdparties/etcdclient/etcdclient.go +++ b/thirdparties/etcdclient/etcdclient.go @@ -21,7 +21,7 @@ package main enum EtcdErrCode { - // grpc errCode, 具体的含义见: + // grpc errCode, for specific meanings, refer to: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -42,7 +42,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom Error Codes EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -87,6 +87,11 @@ import "C" import ( "context" "errors" + "log" + "strings" + "sync" + "time" + "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/clientv3/concurrency" "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" @@ -94,10 +99,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "log" - "strings" - "sync" - "time" ) const ( @@ -203,9 +204,8 @@ func GetErrCode(op string, err error) C.enum_EtcdErrCode { return C.EtcdUnknown } -// TODO(lixiaocui): 日志打印看是否需要glog -// -//export NewEtcdClientV3 +// TODO(lixiaocui): Log printing to see if glog is required +// export NewEtcdClientV3 func NewEtcdClientV3(conf C.struct_EtcdConf) C.enum_EtcdErrCode { var err error cfg := clientv3.Config{ @@ -282,9 +282,8 @@ func EtcdClientGet(timeout C.int, key *C.char, resp.Header.Revision } -// TODO(lixiaocui): list可能需要有长度限制 -// -//export EtcdClientList +// TODO(lixiaocui): list may require a length limit +// export EtcdClientList func EtcdClientList(timeout C.int, startKey, endKey *C.char, startLen, endLen C.int) (C.enum_EtcdErrCode, uint64, int64) { goStartKey := C.GoStringN(startKey, startLen) @@ -437,7 +436,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, goPfx := C.GoStringN(pfx, pfxLen) goLeaderName := C.GoStringN(leaderName, nameLen) - // 创建带ttl的session + // Create a session with ttl var sessionOpts concurrency.SessionOption = concurrency.WithTTL(int(sessionInterSec)) session, err := concurrency.NewSession(globalClient, sessionOpts) if err != nil { @@ -445,7 +444,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, return C.EtcdCampaignInternalErr, 0 } - // 创建election和超时context + // Create an election and timeout context var election *concurrency.Election = concurrency.NewElection(session, goPfx) var ctx context.Context var cancel context.CancelFunc @@ -460,7 +459,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, wg.Add(2) defer wg.Wait() - // 监测当前的leader + // Monitor the current leader obCtx, obCancel := context.WithCancel(context.Background()) observer := election.Observe(obCtx) defer obCancel() @@ -484,7 +483,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 监测自己key的存活状态 + // Monitor the survival status of one's own key exitSignal := make(chan struct{}, 1) go func() { defer wg.Done() @@ -502,8 +501,8 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 1. Campaign返回nil说明当前mds持有的key版本号最小 - // 2. Campaign返回时不检测自己持有key的状态,所以返回nil后需要监测session.Done() + // 1. Campaign returns nil indicating that the current MDS holds the smallest key version number + // 2. When Campaign returns, it does not detect the status of the key it holds, so after returning nil, it is necessary to monitor session. Done() if err := election.Campaign(ctx, goLeaderName); err == nil { log.Printf("[%s/%x] campaign for leader success", goLeaderName, session.Lease()) diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index 2227257bf3..3d1a726b47 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -29,27 +29,25 @@ using ::curve::common::kDefaultPoolsetName; DEFINE_string(mds_addr, "127.0.0.1:6666", - "mds ip and port list, separated by \",\""); + "mds ip and port list, separated by \",\""); -DEFINE_string(op, - "", - "operation: create_logicalpool, " - "create_physicalpool, " - "set_chunkserver, " - "set_logicalpool"); +DEFINE_string(op, "", + "operation: create_logicalpool, " + "create_physicalpool, " + "set_chunkserver, " + "set_logicalpool"); DEFINE_string(cluster_map, "/etc/curve/topo.json", "cluster topology map."); DEFINE_int32(chunkserver_id, -1, "chunkserver id for set chunkserver status."); DEFINE_string(chunkserver_status, "readwrite", - "chunkserver status: readwrite, pendding."); + "chunkserver status: readwrite, pendding."); DEFINE_uint32(rpcTimeOutMs, 5000u, "rpc time out"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); DEFINE_uint32(logicalpool_id, -1, "logicalpool id for set logicalpool status."); -DEFINE_string(logicalpool_status, "allow", - "logicalpool status: allow, deny."); +DEFINE_string(logicalpool_status, "allow", "logicalpool status: allow, deny."); const int kRetCodeCommonErr = -1; const int kRetCodeRedirectMds = -2; @@ -73,1339 +71,1552 @@ const char kAllocStatusDeny[] = "deny"; const char kPoolsets[] = "poolsets"; const char kPoolsetName[] = "poolset"; - using ::curve::common::SplitString; -namespace curve { -namespace mds { -namespace topology { - -const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT - -void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 - if (conf->LoadConfig()) { - google::CommandLineFlagInfo info; - if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) { - conf->GetStringValue("mdsAddr", &FLAGS_mds_addr); - LOG(INFO) << "conf: " << FLAGS_mds_addr; - } - } -} - -int CurvefsTools::Init() { - curve::common::Configuration conf; - conf.SetConfigPath(FLAGS_confPath); - UpdateFlagsFromConf(&conf); - SplitString(FLAGS_mds_addr, ",", &mdsAddressStr_); - if (mdsAddressStr_.empty()) { - LOG(ERROR) << "no available mds address."; - return kRetCodeCommonErr; - } - - butil::EndPoint endpt; - for (const auto& addr : mdsAddressStr_) { - if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { - LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; - return kRetCodeCommonErr; - } - } - mdsAddressIndex_ = -1; - return 0; -} - -int CurvefsTools::TryAnotherMdsAddress() { - if (mdsAddressStr_.size() == 0) { - LOG(ERROR) << "no available mds address."; - return kRetCodeCommonErr; - } - mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); - std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; - LOG(INFO) << "try mds address(" << mdsAddressIndex_ - << "): " << mdsAddress; - int ret = channel_.Init(mdsAddress.c_str(), NULL); - if (ret != 0) { - LOG(ERROR) << "Fail to init channel to mdsAddress: " - << mdsAddress; - } - return ret; -} - -int CurvefsTools::DealFailedRet(int ret, std::string operation) { - if (kRetCodeRedirectMds == ret) { - LOG(WARNING) << operation << " fail on mds: " - << mdsAddressStr_[mdsAddressIndex_]; - } else { - LOG(ERROR) << operation << " fail."; - } - return ret; -} - -int CurvefsTools::HandleCreateLogicalPool() { - int ret = ReadClusterMap(); - if (ret < 0) { - return DealFailedRet(ret, "read cluster map"); - } - ret = InitLogicalPoolData(); - if (ret < 0) { - return DealFailedRet(ret, "init logical pool data"); - } - ret = ScanLogicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "scan logical pool"); - } - for (const auto& lgPool : lgPoolDatas) { - TopologyService_Stub stub(&channel_); - - CreateLogicalPoolRequest request; - request.set_logicalpoolname(lgPool.name); - request.set_physicalpoolname(lgPool.physicalPoolName); - request.set_type(lgPool.type); - std::string replicaNumStr = std::to_string(lgPool.replicasNum); - std::string copysetNumStr = std::to_string(lgPool.copysetNum); - std::string zoneNumStr = std::to_string(lgPool.zoneNum); - - std::string rapString = "{\"replicaNum\":" + replicaNumStr - + ", \"copysetNum\":" + copysetNumStr - + ", \"zoneNum\":" + zoneNumStr - + "}"; - - request.set_redundanceandplacementpolicy(rapString); - request.set_userpolicy("{\"aaa\":1}"); - request.set_scatterwidth(lgPool.scatterwidth); - request.set_status(lgPool.status); - - CreateLogicalPoolResponse response; - - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(-1); - cntl.set_log_id(1); - - LOG(INFO) << "CreateLogicalPool, second request: " - << request.DebugString(); - - stub.CreateLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() == kTopoErrCodeSuccess) { - LOG(INFO) << "Received CreateLogicalPool Rpc response success, " - << response.DebugString(); - } else if (response.statuscode() == kTopoErrCodeLogicalPoolExist) { - LOG(INFO) << "Logical pool already exist"; - } else { - LOG(ERROR) << "CreateLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); - return response.statuscode(); - } - } - return 0; -} - -int CurvefsTools::ScanLogicalPool() { - // get all logicalpool and compare - // De-duplication - std::set phyPools; - for (const auto& lgPool : lgPoolDatas) { - phyPools.insert(lgPool.physicalPoolName); - } - for (const auto& phyPool : phyPools) { - std::list logicalPoolInfos; - int ret = ListLogicalPool(phyPool, &logicalPoolInfos); - if (ret < 0) { - return ret; - } - for (auto it = logicalPoolInfos.begin(); - it != logicalPoolInfos.end();) { - auto ix = - std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), - [it](const CurveLogicalPoolData& data) { - return data.name == it->logicalpoolname(); - }); - if (ix != lgPoolDatas.end()) { - lgPoolDatas.erase(ix); - it++; +namespace curve +{ + namespace mds + { + namespace topology + { + + const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT + + void UpdateFlagsFromConf(curve::common::Configuration *conf) + { + // If the configuration file does not exist, no error will be reported, and + // the command line will prevail. This is to avoid strong dependence on the + // configuration If the configuration file exists and no command line is + // specified, the configuration file shall prevail + if (conf->LoadConfig()) + { + google::CommandLineFlagInfo info; + if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) + { + conf->GetStringValue("mdsAddr", &FLAGS_mds_addr); + LOG(INFO) << "conf: " << FLAGS_mds_addr; + } + } } - } - } - return 0; -} - -int CurvefsTools::ListLogicalPool(const std::string& phyPoolName, - std::list *logicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListLogicalPoolRequest request; - ListLogicalPoolResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - request.set_physicalpoolname(phyPoolName); - - LOG(INFO) << "ListLogicalPool send request: " - << request.DebugString(); - stub.ListLogicalPool(&cntl, &request, &response, nullptr); - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - for (int i = 0; i < response.logicalpoolinfos_size(); i++) { - logicalPoolInfos->push_back( - response.logicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::HandleBuildCluster() { - int ret = ReadClusterMap(); - if (ret < 0) { - return DealFailedRet(ret, "read cluster map"); - } - ret = InitPoolsetData(); - if (ret < 0) { - return DealFailedRet(ret, "init poolset data"); - } - ret = InitServerData(); - if (ret < 0) { - return DealFailedRet(ret, "init server data"); - } - ret = ScanCluster(); - if (ret < 0) { - return DealFailedRet(ret, "scan cluster"); - } - ret = ClearServer(); - if (ret < 0) { - return DealFailedRet(ret, "clear server"); - } - ret = ClearZone(); - if (ret < 0) { - return DealFailedRet(ret, "clear zone"); - } - ret = ClearPhysicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "clear physicalpool"); - } - ret = ClearPoolset(); - if (ret < 0) { - return DealFailedRet(ret, "clear poolset"); - } - ret = CreatePoolset(); - if (ret < 0) { - return DealFailedRet(ret, "create Poolset"); - } - ret = CreatePhysicalPool(); - if (ret < 0) { - return DealFailedRet(ret, "create physicalpool"); - } - ret = CreateZone(); - if (ret < 0) { - return DealFailedRet(ret, "create zone"); - } - ret = CreateServer(); - if (ret < 0) { - return DealFailedRet(ret, "create server"); - } - return ret; -} - - -int CurvefsTools::ReadClusterMap() { - std::ifstream fin(FLAGS_cluster_map); - if (fin.is_open()) { - Json::CharReaderBuilder reader; - JSONCPP_STRING errs; - bool ok = Json::parseFromStream(reader, fin, &clusterMap_, &errs); - fin.close(); - if (!ok) { - LOG(ERROR) << "Parse cluster map file " << FLAGS_cluster_map - << " fail: " << errs; - return -1; - } - } else { - LOG(ERROR) << "open cluster map file : " - << FLAGS_cluster_map << " fail."; - return -1; - } - return 0; -} -int CurvefsTools::InitPoolsetData() { - if (clusterMap_[kPoolsets].isNull()) { - return 0; - } - - for (const auto& poolset : clusterMap_[kPoolsets]) { - CurvePoolsetData poolsetData; - if (!poolset[kName].isString()) { - LOG(ERROR) <<"poolset name must be string" << poolset[kName]; - return -1; - } - poolsetData.name = poolset[kName].asString(); - - if (!poolset[kType].isString()) { - LOG(ERROR) << "poolset type must be string"; - return -1; - } - poolsetData.type = poolset[kType].asString(); - if (poolsetData.type.empty()) { - LOG(ERROR) << "poolset type must not empty"; - return -1; - } - - poolsetDatas.emplace_back(std::move(poolsetData)); - } - return 0; -} - -int CurvefsTools::InitServerData() { - if (clusterMap_[kServers].isNull()) { - LOG(ERROR) << "No servers in cluster map"; - return -1; - } - for (const auto &server : clusterMap_[kServers]) { - CurveServerData serverData; - if (!server[kName].isString()) { - LOG(ERROR) << "server name must be string"; - return -1; - } - serverData.serverName = server[kName].asString(); - if (!server[kInternalIp].isString()) { - LOG(ERROR) << "server internal ip must be string"; - return -1; - } - serverData.internalIp = server[kInternalIp].asString(); - if (!server[kInternalPort].isUInt()) { - LOG(ERROR) << "server internal port must be uint"; - return -1; - } - serverData.internalPort = server[kInternalPort].asUInt(); - if (!server[kExternalIp].isString()) { - LOG(ERROR) << "server internal port must be string"; - return -1; - } - serverData.externalIp = server[kExternalIp].asString(); - if (!server[kExternalPort].isUInt()) { - LOG(ERROR) << "server internal port must be string"; - return -1; - } - serverData.externalPort = server[kExternalPort].asUInt(); - if (!server[kZone].isString()) { - LOG(ERROR) << "server zone must be string"; - return -1; - } - serverData.zoneName = server[kZone].asString(); - - if (!server[kPhysicalPool].isString()) { - LOG(ERROR) << "server physicalpool must be string"; - return -1; - } - serverData.physicalPoolName = server[kPhysicalPool].asString(); - - if (!server.isMember(kPoolsetName)) { - serverData.poolsetName = kDefaultPoolsetName; - } else if (server[kPoolsetName].isString()) { - serverData.poolsetName = server[kPoolsetName].asString(); - } else { - LOG(ERROR) << "server poolsetName must be string, poolsetName is " - << server[kPoolsetName]; - return -1; - } - serverDatas.emplace_back(std::move(serverData)); - } - return 0; -} - -int CurvefsTools::InitLogicalPoolData() { - if (clusterMap_[kLogicalPools].isNull()) { - LOG(ERROR) << "No servers in cluster map"; - return -1; - } - for (const auto &lgPool : clusterMap_[kLogicalPools]) { - CurveLogicalPoolData lgPoolData; - if (!lgPool[kName].isString()) { - LOG(ERROR) << "logicalpool name must be string"; - return -1; - } - lgPoolData.name = lgPool[kName].asString(); - if (!lgPool[kPhysicalPool].isString()) { - LOG(ERROR) << "logicalpool physicalpool must be string"; - return -1; - } - lgPoolData.physicalPoolName = lgPool[kPhysicalPool].asString(); - if (!lgPool[kType].isInt()) { - LOG(ERROR) << "logicalpool type must be int"; - return -1; - } - lgPoolData.type = static_cast(lgPool[kType].asInt()); - if (!lgPool[kReplicasNum].isUInt()) { - LOG(ERROR) << "logicalpool replicasnum must be uint"; - return -1; - } - lgPoolData.replicasNum = lgPool[kReplicasNum].asUInt(); - if (!lgPool[kCopysetNum].isUInt64()) { - LOG(ERROR) << "logicalpool copysetnum must be uint64"; - return -1; - } - lgPoolData.copysetNum = lgPool[kCopysetNum].asUInt64(); - if (!lgPool[kZoneNum].isUInt64()) { - LOG(ERROR) << "logicalpool zonenum must be uint64"; - return -1; - } - lgPoolData.zoneNum = lgPool[kZoneNum].asUInt(); - if (!lgPool[kScatterWidth].isUInt()) { - LOG(ERROR) << "logicalpool scatterwidth must be uint"; - return -1; - } - lgPoolData.scatterwidth = lgPool[kScatterWidth].asUInt(); - if (lgPool[kAllocStatus].isString()) { - if (lgPool[kAllocStatus].asString() == kAllocStatusAllow) { - lgPoolData.status = AllocateStatus::ALLOW; - } else if (lgPool[kAllocStatus].asString() == kAllocStatusDeny) { - lgPoolData.status = AllocateStatus::DENY; - } else { - LOG(ERROR) << "logicalpool status string is invalid!, which is " - << lgPool[kAllocStatus].asString(); - return -1; + int CurvefsTools::Init() + { + curve::common::Configuration conf; + conf.SetConfigPath(FLAGS_confPath); + UpdateFlagsFromConf(&conf); + SplitString(FLAGS_mds_addr, ",", &mdsAddressStr_); + if (mdsAddressStr_.empty()) + { + LOG(ERROR) << "no available mds address."; + return kRetCodeCommonErr; + } + + butil::EndPoint endpt; + for (const auto &addr : mdsAddressStr_) + { + if (butil::str2endpoint(addr.c_str(), &endpt) < 0) + { + LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; + return kRetCodeCommonErr; + } + } + mdsAddressIndex_ = -1; + return 0; } - } else { - LOG(WARNING) << "logicalpool not set, use default allow"; - lgPoolData.status = AllocateStatus::ALLOW; - } - lgPoolDatas.emplace_back(lgPoolData); - } - return 0; -} - -int CurvefsTools::ListPoolset(std::list* poolsetInfos) { - TopologyService_Stub stub(&channel_); - ListPoolsetRequest request; - ListPoolsetResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPoolset send request: " << request.DebugString(); - - stub.ListPoolset(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPoolset Rpc response fail. " - << "Message is :" - << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPoolset Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.poolsetinfos_size(); i++) { - poolsetInfos->push_back(response.poolsetinfos(i)); - } - return 0; -} - -int CurvefsTools::ListPhysicalPool( - std::list *physicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListPhysicalPoolRequest request; - ListPhysicalPoolResponse response; - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPhysicalPool send request: " - << request.DebugString(); - - stub.ListPhysicalPool(&cntl, - &request, - &response, - nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPhysicalPool Rpc response success, " - << response.DebugString(); - } - for (int i = 0; - i < response.physicalpoolinfos_size(); - i++) { - physicalPoolInfos->push_back( - response.physicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, - std::list *physicalPoolInfos) { - TopologyService_Stub stub(&channel_); - ListPhysicalPoolsInPoolsetRequest request; - ListPhysicalPoolResponse response; - request.add_poolsetid(poolsetid); - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPhysicalPoolsInPoolset, send request: " - << request.DebugString(); - - stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetid = " - << poolsetid; - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," - << response.DebugString(); - } - - for (int i = 0; i < response.physicalpoolinfos_size(); i++) { - physicalPoolInfos->push_back(response.physicalpoolinfos(i)); - } - return 0; -} - -int CurvefsTools::AddListPoolZone(PoolIdType poolid, - std::list *zoneInfos) { - TopologyService_Stub stub(&channel_); - ListPoolZoneRequest request; - ListPoolZoneResponse response; - request.set_physicalpoolid(poolid); - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListPoolZone, send request: " - << request.DebugString(); - - stub.ListPoolZone(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListPoolZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalpoolid = " - << poolid; - return response.statuscode(); - } else { - LOG(INFO) << "Received ListPoolZone Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.zones_size(); i++) { - zoneInfos->push_back(response.zones(i)); - } - return 0; -} - -int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, - std::list *serverInfos) { - TopologyService_Stub stub(&channel_); - ListZoneServerRequest request; - ListZoneServerResponse response; - request.set_zoneid(zoneid); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "ListZoneServer, send request: " - << request.DebugString(); - - stub.ListZoneServer(&cntl, &request, &response, nullptr); - - if (cntl.Failed()) { - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "ListZoneServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneid = " - << zoneid; - return response.statuscode(); - } else { - LOG(INFO) << "ListZoneServer Rpc response success, " - << response.DebugString(); - } - - for (int i = 0; i < response.serverinfo_size(); i++) { - serverInfos->push_back(response.serverinfo(i)); - } - return 0; -} - -int CurvefsTools::ScanCluster() { - // get all poolsets and compare - // De-duplication - for (const auto& poolset : poolsetDatas) { - if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [poolset](const CurvePoolsetData& data) { - return data.name == poolset.name; - }) != poolsetToAdd.end()) { - continue; - } - poolsetToAdd.push_back(poolset); - } - - std::list poolsetInfos; - int ret = ListPoolset(&poolsetInfos); - if (ret < 0) { - return ret; - } - - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { - if (it->poolsetname() == kDefaultPoolsetName) { - ++it; - continue; - } - - auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [it](const CurvePoolsetData& data) { - return data.name == it->poolsetname(); - }); - if (ix != poolsetToAdd.end()) { - poolsetToAdd.erase(ix); - it++; - } else { - poolsetToDel.push_back(it->poolsetid()); - it = poolsetInfos.erase(it); - } - } - - // get all phsicalpool and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(physicalPoolToAdd.begin(), - physicalPoolToAdd.end(), - [server](CurvePhysicalPoolData& data) { - return data.physicalPoolName == server.physicalPoolName; - }) != physicalPoolToAdd.end()) { - continue; - } - CurvePhysicalPoolData poolData; - poolData.physicalPoolName = server.physicalPoolName; - poolData.poolsetName = server.poolsetName.empty() ? kDefaultPoolsetName - : server.poolsetName; - physicalPoolToAdd.push_back(poolData); - } - - std::list physicalPoolInfos; - for (auto poolsetid : poolsetToDel) { - ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); - if (ret < 0) { - return ret; - } - } - - for (auto phyPoolinfo : physicalPoolInfos) { - physicalPoolToDel.push_back(phyPoolinfo.physicalpoolid()); - } - - physicalPoolInfos.clear(); - - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end(); it++) { - PoolsetIdType poolsetid = it->poolsetid(); - ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) { - auto ix = std::find_if( - physicalPoolToAdd.begin(), physicalPoolToAdd.end(), - [it](const CurvePhysicalPoolData& data) { - return (data.poolsetName == it->poolsetname()) && - (data.physicalPoolName == it->physicalpoolname()); - }); - if (ix != physicalPoolToAdd.end()) { - physicalPoolToAdd.erase(ix); - it++; - } else { - physicalPoolToDel.push_back(it->physicalpoolid()); - it = physicalPoolInfos.erase(it); - } - } - - // get zone and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(zoneToAdd.begin(), - zoneToAdd.end(), - [server](CurveZoneData& data) { - return (data.physicalPoolName == - server.physicalPoolName) && - (data.zoneName == - server.zoneName); - }) != zoneToAdd.end()) { - continue; - } - CurveZoneData CurveZoneData; - CurveZoneData.physicalPoolName = server.physicalPoolName; - CurveZoneData.zoneName = server.zoneName; - zoneToAdd.push_back(CurveZoneData); - } - - std::list zoneInfos; - for (auto poolid : physicalPoolToDel) { - ret = AddListPoolZone(poolid, &zoneInfos); - if (ret < 0) { - return ret; - } - } - - for (auto zinfo : zoneInfos) { - zoneToDel.push_back(zinfo.zoneid()); - } - - zoneInfos.clear(); - for (auto it = physicalPoolInfos.begin(); - it != physicalPoolInfos.end(); - it++) { - PoolIdType poolid = it->physicalpoolid(); - ret = AddListPoolZone(poolid, &zoneInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = zoneInfos.begin(); - it != zoneInfos.end();) { - auto ix = std::find_if( - zoneToAdd.begin(), zoneToAdd.end(), - [it](const CurveZoneData& data) { - return (data.physicalPoolName == - it->physicalpoolname()) && - (data.zoneName == - it->zonename()); - }); - if (ix != zoneToAdd.end()) { - zoneToAdd.erase(ix); - it++; - } else { - zoneToDel.push_back(it->zoneid()); - it = zoneInfos.erase(it); - } - } - - // get server and compare - // De-duplication - for (auto server : serverDatas) { - if (std::find_if(serverToAdd.begin(), - serverToAdd.end(), - [server](CurveServerData& data) { - return data.serverName == - server.serverName; - }) != serverToAdd.end()) { - LOG(WARNING) << "WARING! Duplicated Server Name: " - << server.serverName - << " , ignored."; - continue; - } - serverToAdd.push_back(server); - } - - std::list serverInfos; - for (auto zoneid : zoneToDel) { - ret = AddListZoneServer(zoneid, &serverInfos); - if (ret < 0) { - return ret; - } - } - - for (auto sinfo : serverInfos) { - serverToDel.push_back(sinfo.serverid()); - } - - serverInfos.clear(); - for (auto it = zoneInfos.begin(); - it != zoneInfos.end(); - it++) { - ZoneIdType zoneid = it->zoneid(); - ret = AddListZoneServer(zoneid, &serverInfos); - if (ret < 0) { - return ret; - } - } - - for (auto it = serverInfos.begin(); - it != serverInfos.end(); - it++) { - auto ix = - std::find_if( - serverToAdd.begin(), serverToAdd.end(), - [it](const CurveServerData& data) { - return (data.serverName == it->hostname()) && - (data.zoneName == it->zonename()) && - (data.physicalPoolName == it->physicalpoolname()); - }); - if (ix != serverToAdd.end()) { - serverToAdd.erase(ix); - } else { - serverToDel.push_back(it->serverid()); - } - } - - return 0; -} - -int CurvefsTools::CreatePoolset() { - TopologyService_Stub stub(&channel_); - for (const auto& it : poolsetToAdd) { - if (it.name == kDefaultPoolsetName) { - continue; - } - - PoolsetRequest request; - request.set_poolsetname(it.name); - request.set_type(it.type); - request.set_desc(""); - - PoolsetResponse response; - - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreatePoolset, send request: " - << request.DebugString(); + int CurvefsTools::TryAnotherMdsAddress() + { + if (mdsAddressStr_.size() == 0) + { + LOG(ERROR) << "no available mds address."; + return kRetCodeCommonErr; + } + mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); + std::string mdsAddress = mdsAddressStr_[mdsAddressIndex_]; + LOG(INFO) << "try mds address(" << mdsAddressIndex_ << "): " << mdsAddress; + int ret = channel_.Init(mdsAddress.c_str(), NULL); + if (ret != 0) + { + LOG(ERROR) << "Fail to init channel to mdsAddress: " << mdsAddress; + } + return ret; + } - stub.CreatePoolset(&cntl, &request, &response, nullptr); + int CurvefsTools::DealFailedRet(int ret, std::string operation) + { + if (kRetCodeRedirectMds == ret) + { + LOG(WARNING) << operation + << " fail on mds: " << mdsAddressStr_[mdsAddressIndex_]; + } + else + { + LOG(ERROR) << operation << " fail."; + } + return ret; + } - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "CreatePoolset Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , poolsetName =" - << it.name; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreatePoolset response success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::HandleCreateLogicalPool() + { + int ret = ReadClusterMap(); + if (ret < 0) + { + return DealFailedRet(ret, "read cluster map"); + } + ret = InitLogicalPoolData(); + if (ret < 0) + { + return DealFailedRet(ret, "init logical pool data"); + } + ret = ScanLogicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "scan logical pool"); + } + for (const auto &lgPool : lgPoolDatas) + { + TopologyService_Stub stub(&channel_); + + CreateLogicalPoolRequest request; + request.set_logicalpoolname(lgPool.name); + request.set_physicalpoolname(lgPool.physicalPoolName); + request.set_type(lgPool.type); + std::string replicaNumStr = std::to_string(lgPool.replicasNum); + std::string copysetNumStr = std::to_string(lgPool.copysetNum); + std::string zoneNumStr = std::to_string(lgPool.zoneNum); + + std::string rapString = "{\"replicaNum\":" + replicaNumStr + + ", \"copysetNum\":" + copysetNumStr + + ", \"zoneNum\":" + zoneNumStr + "}"; + + request.set_redundanceandplacementpolicy(rapString); + request.set_userpolicy("{\"aaa\":1}"); + request.set_scatterwidth(lgPool.scatterwidth); + request.set_status(lgPool.status); + + CreateLogicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(-1); + cntl.set_log_id(1); + + LOG(INFO) << "CreateLogicalPool, second request: " + << request.DebugString(); + + stub.CreateLogicalPool(&cntl, &request, &response, nullptr); + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() == kTopoErrCodeSuccess) + { + LOG(INFO) << "Received CreateLogicalPool Rpc response success, " + << response.DebugString(); + } + else if (response.statuscode() == kTopoErrCodeLogicalPoolExist) + { + LOG(INFO) << "Logical pool already exist"; + } + else + { + LOG(ERROR) << "CreateLogicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + } + return 0; + } -int CurvefsTools::CreatePhysicalPool() { - TopologyService_Stub stub(&channel_); - for (auto it : physicalPoolToAdd) { - PhysicalPoolRequest request; - request.set_physicalpoolname(it.physicalPoolName); - request.set_desc(""); - request.set_poolsetname(it.poolsetName); + int CurvefsTools::ScanLogicalPool() + { + // get all logicalpool and compare + // De-duplication + std::set phyPools; + for (const auto &lgPool : lgPoolDatas) + { + phyPools.insert(lgPool.physicalPoolName); + } + for (const auto &phyPool : phyPools) + { + std::list logicalPoolInfos; + int ret = ListLogicalPool(phyPool, &logicalPoolInfos); + if (ret < 0) + { + return ret; + } + for (auto it = logicalPoolInfos.begin(); + it != logicalPoolInfos.end();) + { + auto ix = + std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), + [it](const CurveLogicalPoolData &data) + { + return data.name == it->logicalpoolname(); + }); + if (ix != lgPoolDatas.end()) + { + lgPoolDatas.erase(ix); + it++; + } + } + } + return 0; + } - PhysicalPoolResponse response; + int CurvefsTools::ListLogicalPool( + const std::string &phyPoolName, + std::list *logicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListLogicalPoolRequest request; + ListLogicalPoolResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + request.set_physicalpoolname(phyPoolName); + + LOG(INFO) << "ListLogicalPool send request: " << request.DebugString(); + stub.ListLogicalPool(&cntl, &request, &response, nullptr); + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + for (int i = 0; i < response.logicalpoolinfos_size(); i++) + { + logicalPoolInfos->push_back(response.logicalpoolinfos(i)); + } + return 0; + } - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::HandleBuildCluster() + { + int ret = ReadClusterMap(); + if (ret < 0) + { + return DealFailedRet(ret, "read cluster map"); + } + ret = InitPoolsetData(); + if (ret < 0) + { + return DealFailedRet(ret, "init poolset data"); + } + ret = InitServerData(); + if (ret < 0) + { + return DealFailedRet(ret, "init server data"); + } + ret = ScanCluster(); + if (ret < 0) + { + return DealFailedRet(ret, "scan cluster"); + } + ret = ClearServer(); + if (ret < 0) + { + return DealFailedRet(ret, "clear server"); + } + ret = ClearZone(); + if (ret < 0) + { + return DealFailedRet(ret, "clear zone"); + } + ret = ClearPhysicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "clear physicalpool"); + } + ret = ClearPoolset(); + if (ret < 0) + { + return DealFailedRet(ret, "clear poolset"); + } + ret = CreatePoolset(); + if (ret < 0) + { + return DealFailedRet(ret, "create Poolset"); + } + ret = CreatePhysicalPool(); + if (ret < 0) + { + return DealFailedRet(ret, "create physicalpool"); + } + ret = CreateZone(); + if (ret < 0) + { + return DealFailedRet(ret, "create zone"); + } + ret = CreateServer(); + if (ret < 0) + { + return DealFailedRet(ret, "create server"); + } + return ret; + } - LOG(INFO) << "CreatePhysicalPool, send request: " - << request.DebugString(); + int CurvefsTools::ReadClusterMap() + { + std::ifstream fin(FLAGS_cluster_map); + if (fin.is_open()) + { + Json::CharReaderBuilder reader; + JSONCPP_STRING errs; + bool ok = Json::parseFromStream(reader, fin, &clusterMap_, &errs); + fin.close(); + if (!ok) + { + LOG(ERROR) << "Parse cluster map file " << FLAGS_cluster_map + << " fail: " << errs; + return -1; + } + } + else + { + LOG(ERROR) << "open cluster map file : " << FLAGS_cluster_map + << " fail."; + return -1; + } + return 0; + } + int CurvefsTools::InitPoolsetData() + { + if (clusterMap_[kPoolsets].isNull()) + { + return 0; + } + + for (const auto &poolset : clusterMap_[kPoolsets]) + { + CurvePoolsetData poolsetData; + if (!poolset[kName].isString()) + { + LOG(ERROR) << "poolset name must be string" << poolset[kName]; + return -1; + } + poolsetData.name = poolset[kName].asString(); + + if (!poolset[kType].isString()) + { + LOG(ERROR) << "poolset type must be string"; + return -1; + } + poolsetData.type = poolset[kType].asString(); + if (poolsetData.type.empty()) + { + LOG(ERROR) << "poolset type must not empty"; + return -1; + } + + poolsetDatas.emplace_back(std::move(poolsetData)); + } + return 0; + } - stub.CreatePhysicalPool(&cntl, &request, &response, nullptr); + int CurvefsTools::InitServerData() + { + if (clusterMap_[kServers].isNull()) + { + LOG(ERROR) << "No servers in cluster map"; + return -1; + } + for (const auto &server : clusterMap_[kServers]) + { + CurveServerData serverData; + if (!server[kName].isString()) + { + LOG(ERROR) << "server name must be string"; + return -1; + } + serverData.serverName = server[kName].asString(); + if (!server[kInternalIp].isString()) + { + LOG(ERROR) << "server internal ip must be string"; + return -1; + } + serverData.internalIp = server[kInternalIp].asString(); + if (!server[kInternalPort].isUInt()) + { + LOG(ERROR) << "server internal port must be uint"; + return -1; + } + serverData.internalPort = server[kInternalPort].asUInt(); + if (!server[kExternalIp].isString()) + { + LOG(ERROR) << "server internal port must be string"; + return -1; + } + serverData.externalIp = server[kExternalIp].asString(); + if (!server[kExternalPort].isUInt()) + { + LOG(ERROR) << "server internal port must be string"; + return -1; + } + serverData.externalPort = server[kExternalPort].asUInt(); + if (!server[kZone].isString()) + { + LOG(ERROR) << "server zone must be string"; + return -1; + } + serverData.zoneName = server[kZone].asString(); + + if (!server[kPhysicalPool].isString()) + { + LOG(ERROR) << "server physicalpool must be string"; + return -1; + } + serverData.physicalPoolName = server[kPhysicalPool].asString(); + + if (!server.isMember(kPoolsetName)) + { + serverData.poolsetName = kDefaultPoolsetName; + } + else if (server[kPoolsetName].isString()) + { + serverData.poolsetName = server[kPoolsetName].asString(); + } + else + { + LOG(ERROR) << "server poolsetName must be string, poolsetName is " + << server[kPoolsetName]; + return -1; + } + + serverDatas.emplace_back(std::move(serverData)); + } + return 0; + } - if (cntl.Failed()) { - LOG(WARNING) << "send rpc get cntl Failed, error context:" - << cntl.ErrorText(); - return kRetCodeRedirectMds; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolName =" - << it.physicalPoolName; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreatePhysicalPool response success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::InitLogicalPoolData() + { + if (clusterMap_[kLogicalPools].isNull()) + { + LOG(ERROR) << "No servers in cluster map"; + return -1; + } + for (const auto &lgPool : clusterMap_[kLogicalPools]) + { + CurveLogicalPoolData lgPoolData; + if (!lgPool[kName].isString()) + { + LOG(ERROR) << "logicalpool name must be string"; + return -1; + } + lgPoolData.name = lgPool[kName].asString(); + if (!lgPool[kPhysicalPool].isString()) + { + LOG(ERROR) << "logicalpool physicalpool must be string"; + return -1; + } + lgPoolData.physicalPoolName = lgPool[kPhysicalPool].asString(); + if (!lgPool[kType].isInt()) + { + LOG(ERROR) << "logicalpool type must be int"; + return -1; + } + lgPoolData.type = static_cast(lgPool[kType].asInt()); + if (!lgPool[kReplicasNum].isUInt()) + { + LOG(ERROR) << "logicalpool replicasnum must be uint"; + return -1; + } + lgPoolData.replicasNum = lgPool[kReplicasNum].asUInt(); + if (!lgPool[kCopysetNum].isUInt64()) + { + LOG(ERROR) << "logicalpool copysetnum must be uint64"; + return -1; + } + lgPoolData.copysetNum = lgPool[kCopysetNum].asUInt64(); + if (!lgPool[kZoneNum].isUInt64()) + { + LOG(ERROR) << "logicalpool zonenum must be uint64"; + return -1; + } + lgPoolData.zoneNum = lgPool[kZoneNum].asUInt(); + if (!lgPool[kScatterWidth].isUInt()) + { + LOG(ERROR) << "logicalpool scatterwidth must be uint"; + return -1; + } + lgPoolData.scatterwidth = lgPool[kScatterWidth].asUInt(); + if (lgPool[kAllocStatus].isString()) + { + if (lgPool[kAllocStatus].asString() == kAllocStatusAllow) + { + lgPoolData.status = AllocateStatus::ALLOW; + } + else if (lgPool[kAllocStatus].asString() == kAllocStatusDeny) + { + lgPoolData.status = AllocateStatus::DENY; + } + else + { + LOG(ERROR) << "logicalpool status string is invalid!, which is " + << lgPool[kAllocStatus].asString(); + return -1; + } + } + else + { + LOG(WARNING) << "logicalpool not set, use default allow"; + lgPoolData.status = AllocateStatus::ALLOW; + } + lgPoolDatas.emplace_back(lgPoolData); + } + return 0; + } -int CurvefsTools::CreateZone() { - TopologyService_Stub stub(&channel_); - for (auto it : zoneToAdd) { - ZoneRequest request; - request.set_zonename(it.zoneName); - request.set_physicalpoolname(it.physicalPoolName); - request.set_desc(""); - - ZoneResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreateZone, send request: " - << request.DebugString(); - - stub.CreateZone(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "CreateZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneName = " - << it.zoneName; - return kRetCodeCommonErr; - } - if (response.statuscode() != 0) { - LOG(ERROR) << "CreateZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneName = " - << it.zoneName; - return response.statuscode(); - } else { - LOG(INFO) << "Received CreateZone Rpc success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::ListPoolset(std::list *poolsetInfos) + { + TopologyService_Stub stub(&channel_); + ListPoolsetRequest request; + ListPoolsetResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPoolset send request: " << request.DebugString(); + + stub.ListPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPoolset Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPoolset Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.poolsetinfos_size(); i++) + { + poolsetInfos->push_back(response.poolsetinfos(i)); + } + return 0; + } -int CurvefsTools::CreateServer() { - TopologyService_Stub stub(&channel_); - for (auto it : serverToAdd) { - ServerRegistRequest request; - request.set_hostname(it.serverName); - request.set_internalip(it.internalIp); - request.set_internalport(it.internalPort); - request.set_externalip(it.externalIp); - request.set_externalport(it.externalPort); - request.set_zonename(it.zoneName); - request.set_physicalpoolname(it.physicalPoolName); - request.set_poolsetname(it.poolsetName); - request.set_desc(""); - - ServerRegistResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "CreateServer, send request: " - << request.DebugString(); - - stub.RegistServer(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "RegistServer, errcorde = " - << response.statuscode() - << ", error content : " - << cntl.ErrorText() - << " , serverName = " - << it.serverName; - return kRetCodeCommonErr; - } - if (response.statuscode() == kTopoErrCodeSuccess) { - LOG(INFO) << "Received RegistServer Rpc response success, " - << response.DebugString(); - } else if (response.statuscode() == kTopoErrCodeIpPortDuplicated) { - LOG(INFO) << "Server already exist"; - } else { - LOG(ERROR) << "RegistServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverName = " - << it.serverName; - return response.statuscode(); - } - } - return 0; -} + int CurvefsTools::ListPhysicalPool( + std::list *physicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListPhysicalPoolRequest request; + ListPhysicalPoolResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPhysicalPool send request: " << request.DebugString(); + + stub.ListPhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPhysicalPool Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.physicalpoolinfos_size(); i++) + { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); + } + return 0; + } -int CurvefsTools::ClearPhysicalPool() { - TopologyService_Stub stub(&channel_); - for (auto it : physicalPoolToDel) { - PhysicalPoolRequest request; - request.set_physicalpoolid(it); - - PhysicalPoolResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "DeletePhysicalPool, send request: " - << request.DebugString(); - - stub.DeletePhysicalPool(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeletePhysicalPool, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , physicalPoolId = " - << it; - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , physicalPoolId = " - << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " - << response.statuscode(); - } - } - return 0; -} + int CurvefsTools::ListPhysicalPoolsInPoolset( + PoolsetIdType poolsetid, std::list *physicalPoolInfos) + { + TopologyService_Stub stub(&channel_); + ListPhysicalPoolsInPoolsetRequest request; + ListPhysicalPoolResponse response; + request.add_poolsetid(poolsetid); + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPhysicalPoolsInPoolset, send request: " + << request.DebugString(); + + stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , poolsetid = " << poolsetid; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," + << response.DebugString(); + } + + for (int i = 0; i < response.physicalpoolinfos_size(); i++) + { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); + } + return 0; + } -int CurvefsTools::ClearPoolset() { - TopologyService_Stub stub(&channel_); - for (const auto& it : poolsetToDel) { - PoolsetRequest request; - request.set_poolsetid(it); + int CurvefsTools::AddListPoolZone(PoolIdType poolid, + std::list *zoneInfos) + { + TopologyService_Stub stub(&channel_); + ListPoolZoneRequest request; + ListPoolZoneResponse response; + request.set_physicalpoolid(poolid); + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPoolZone, send request: " << request.DebugString(); + + stub.ListPoolZone(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListPoolZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalpoolid = " << poolid; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received ListPoolZone Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.zones_size(); i++) + { + zoneInfos->push_back(response.zones(i)); + } + return 0; + } - PoolsetResponse response; + int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, + std::list *serverInfos) + { + TopologyService_Stub stub(&channel_); + ListZoneServerRequest request; + ListZoneServerResponse response; + request.set_zoneid(zoneid); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListZoneServer, send request: " << request.DebugString(); + + stub.ListZoneServer(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "ListZoneServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneid = " << zoneid; + return response.statuscode(); + } + else + { + LOG(INFO) << "ListZoneServer Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.serverinfo_size(); i++) + { + serverInfos->push_back(response.serverinfo(i)); + } + return 0; + } - brpc::Controller cntl; - cntl.set_max_retry(0); - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); + int CurvefsTools::ScanCluster() + { + // get all poolsets and compare + // De-duplication + for (const auto &poolset : poolsetDatas) + { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](const CurvePoolsetData &data) + { + return data.name == poolset.name; + }) != poolsetToAdd.end()) + { + continue; + } + poolsetToAdd.push_back(poolset); + } + + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) + { + return ret; + } + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) + { + if (it->poolsetname() == kDefaultPoolsetName) + { + ++it; + continue; + } + + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](const CurvePoolsetData &data) + { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) + { + poolsetToAdd.erase(ix); + it++; + } + else + { + poolsetToDel.push_back(it->poolsetid()); + it = poolsetInfos.erase(it); + } + } + + // get all phsicalpool and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [server](CurvePhysicalPoolData &data) + { + return data.physicalPoolName == + server.physicalPoolName; + }) != physicalPoolToAdd.end()) + { + continue; + } + CurvePhysicalPoolData poolData; + poolData.physicalPoolName = server.physicalPoolName; + poolData.poolsetName = server.poolsetName.empty() ? kDefaultPoolsetName + : server.poolsetName; + physicalPoolToAdd.push_back(poolData); + } + + std::list physicalPoolInfos; + for (auto poolsetid : poolsetToDel) + { + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto phyPoolinfo : physicalPoolInfos) + { + physicalPoolToDel.push_back(phyPoolinfo.physicalpoolid()); + } + + physicalPoolInfos.clear(); + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end(); it++) + { + PoolsetIdType poolsetid = it->poolsetid(); + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) + { + auto ix = std::find_if( + physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [it](const CurvePhysicalPoolData &data) + { + return (data.poolsetName == it->poolsetname()) && + (data.physicalPoolName == it->physicalpoolname()); + }); + if (ix != physicalPoolToAdd.end()) + { + physicalPoolToAdd.erase(ix); + it++; + } + else + { + physicalPoolToDel.push_back(it->physicalpoolid()); + it = physicalPoolInfos.erase(it); + } + } + + // get zone and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(zoneToAdd.begin(), zoneToAdd.end(), + [server](CurveZoneData &data) + { + return (data.physicalPoolName == + server.physicalPoolName) && + (data.zoneName == server.zoneName); + }) != zoneToAdd.end()) + { + continue; + } + CurveZoneData CurveZoneData; + CurveZoneData.physicalPoolName = server.physicalPoolName; + CurveZoneData.zoneName = server.zoneName; + zoneToAdd.push_back(CurveZoneData); + } + + std::list zoneInfos; + for (auto poolid : physicalPoolToDel) + { + ret = AddListPoolZone(poolid, &zoneInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto zinfo : zoneInfos) + { + zoneToDel.push_back(zinfo.zoneid()); + } + + zoneInfos.clear(); + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end(); + it++) + { + PoolIdType poolid = it->physicalpoolid(); + ret = AddListPoolZone(poolid, &zoneInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = zoneInfos.begin(); it != zoneInfos.end();) + { + auto ix = std::find_if( + zoneToAdd.begin(), zoneToAdd.end(), + [it](const CurveZoneData &data) + { + return (data.physicalPoolName == it->physicalpoolname()) && + (data.zoneName == it->zonename()); + }); + if (ix != zoneToAdd.end()) + { + zoneToAdd.erase(ix); + it++; + } + else + { + zoneToDel.push_back(it->zoneid()); + it = zoneInfos.erase(it); + } + } + + // get server and compare + // De-duplication + for (auto server : serverDatas) + { + if (std::find_if(serverToAdd.begin(), serverToAdd.end(), + [server](CurveServerData &data) + { + return data.serverName == server.serverName; + }) != serverToAdd.end()) + { + LOG(WARNING) << "WARING! Duplicated Server Name: " + << server.serverName << " , ignored."; + continue; + } + serverToAdd.push_back(server); + } + + std::list serverInfos; + for (auto zoneid : zoneToDel) + { + ret = AddListZoneServer(zoneid, &serverInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto sinfo : serverInfos) + { + serverToDel.push_back(sinfo.serverid()); + } + + serverInfos.clear(); + for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) + { + ZoneIdType zoneid = it->zoneid(); + ret = AddListZoneServer(zoneid, &serverInfos); + if (ret < 0) + { + return ret; + } + } + + for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) + { + auto ix = std::find_if( + serverToAdd.begin(), serverToAdd.end(), + [it](const CurveServerData &data) + { + return (data.serverName == it->hostname()) && + (data.zoneName == it->zonename()) && + (data.physicalPoolName == it->physicalpoolname()); + }); + if (ix != serverToAdd.end()) + { + serverToAdd.erase(ix); + } + else + { + serverToDel.push_back(it->serverid()); + } + } + + return 0; + } - LOG(INFO) << "DeletePoolset, send request: " << request.DebugString(); + int CurvefsTools::CreatePoolset() + { + TopologyService_Stub stub(&channel_); + for (const auto &it : poolsetToAdd) + { + if (it.name == kDefaultPoolsetName) + { + continue; + } + + PoolsetRequest request; + request.set_poolsetname(it.name); + request.set_type(it.type); + request.set_desc(""); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreatePoolset, send request: " << request.DebugString(); + + stub.CreatePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "CreatePoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , poolsetName =" << it.name; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreatePoolset response success, " + << response.DebugString(); + } + } + return 0; + } - stub.DeletePoolset(&cntl, &request, &response, nullptr); + int CurvefsTools::CreatePhysicalPool() + { + TopologyService_Stub stub(&channel_); + for (auto it : physicalPoolToAdd) + { + PhysicalPoolRequest request; + request.set_physicalpoolname(it.physicalPoolName); + request.set_desc(""); + request.set_poolsetname(it.poolsetName); + + PhysicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreatePhysicalPool, send request: " + << request.DebugString(); + + stub.CreatePhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) + { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "CreatePhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalPoolName =" << it.physicalPoolName; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreatePhysicalPool response success, " + << response.DebugString(); + } + } + return 0; + } - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeletePoolset, errcode = " << response.statuscode() - << ", error content:" << cntl.ErrorText() - << " , PoolsetId = " << it; - return kRetCodeCommonErr; - } else if (response.statuscode() != kTopoErrCodeSuccess && - response.statuscode() != - kTopoErrCodeCannotDeleteDefaultPoolset) { - LOG(ERROR) << "DeletePoolset Rpc response fail. " - << "Message is :" << response.DebugString() - << " , PoolsetId = " << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeletePoolset Rpc success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::CreateZone() + { + TopologyService_Stub stub(&channel_); + for (auto it : zoneToAdd) + { + ZoneRequest request; + request.set_zonename(it.zoneName); + request.set_physicalpoolname(it.physicalPoolName); + request.set_desc(""); + + ZoneResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreateZone, send request: " << request.DebugString(); + + stub.CreateZone(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "CreateZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneName = " << it.zoneName; + return kRetCodeCommonErr; + } + if (response.statuscode() != 0) + { + LOG(ERROR) << "CreateZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneName = " << it.zoneName; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received CreateZone Rpc success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::ClearZone() { - TopologyService_Stub stub(&channel_); - for (auto it : zoneToDel) { - ZoneRequest request; - request.set_zoneid(it); - - ZoneResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "DeleteZone, send request: " - << request.DebugString(); - - stub.DeleteZone(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteZone, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , zoneId = " - << it; - return kRetCodeCommonErr; - } else if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeleteZone Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , zoneId = " - << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeleteZone Rpc success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::CreateServer() + { + TopologyService_Stub stub(&channel_); + for (auto it : serverToAdd) + { + ServerRegistRequest request; + request.set_hostname(it.serverName); + request.set_internalip(it.internalIp); + request.set_internalport(it.internalPort); + request.set_externalip(it.externalIp); + request.set_externalport(it.externalPort); + request.set_zonename(it.zoneName); + request.set_physicalpoolname(it.physicalPoolName); + request.set_poolsetname(it.poolsetName); + request.set_desc(""); + + ServerRegistResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreateServer, send request: " << request.DebugString(); + + stub.RegistServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "RegistServer, errcorde = " << response.statuscode() + << ", error content : " << cntl.ErrorText() + << " , serverName = " << it.serverName; + return kRetCodeCommonErr; + } + if (response.statuscode() == kTopoErrCodeSuccess) + { + LOG(INFO) << "Received RegistServer Rpc response success, " + << response.DebugString(); + } + else if (response.statuscode() == kTopoErrCodeIpPortDuplicated) + { + LOG(INFO) << "Server already exist"; + } + else + { + LOG(ERROR) << "RegistServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , serverName = " << it.serverName; + return response.statuscode(); + } + } + return 0; + } -int CurvefsTools::ClearServer() { - TopologyService_Stub stub(&channel_); - for (auto it : serverToDel) { - DeleteServerRequest request; - request.set_serverid(it); - - DeleteServerResponse response; - - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "DeleteServer, send request: " - << request.DebugString(); - - stub.DeleteServer(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "DeleteServer, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText() - << " , serverId = " - << it; - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "DeleteServer Rpc response fail. " - << "Message is :" - << response.DebugString() - << " , serverId = " - << it; - return response.statuscode(); - } else { - LOG(INFO) << "Received DeleteServer Rpc response success, " - << response.DebugString(); - } - } - return 0; -} + int CurvefsTools::ClearPhysicalPool() + { + TopologyService_Stub stub(&channel_); + for (auto it : physicalPoolToDel) + { + PhysicalPoolRequest request; + request.set_physicalpoolid(it); + + PhysicalPoolResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeletePhysicalPool, send request: " + << request.DebugString(); + + stub.DeletePhysicalPool(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeletePhysicalPool, errcorde = " + << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , physicalPoolId = " << it; + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeletePhysicalPool Rpc response fail. " + << "Message is :" << response.DebugString() + << " , physicalPoolId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeletePhysicalPool Rpc response success, " + << response.statuscode(); + } + } + return 0; + } -int CurvefsTools::SetChunkServer() { - SetChunkServerStatusRequest request; - request.set_chunkserverid(FLAGS_chunkserver_id); - if (FLAGS_chunkserver_status == "pendding") { - request.set_chunkserverstatus(ChunkServerStatus::PENDDING); - } else if (FLAGS_chunkserver_status == "readwrite") { - request.set_chunkserverstatus(ChunkServerStatus::READWRITE); - } else if (FLAGS_chunkserver_status == "retired") { - LOG(ERROR) << "SetChunkServer retired not unsupport!"; - return kRetCodeCommonErr; - } else { - LOG(ERROR) << "SetChunkServer param error, unknown chunkserver status"; - return kRetCodeCommonErr; - } + int CurvefsTools::ClearPoolset() + { + TopologyService_Stub stub(&channel_); + for (const auto &it : poolsetToDel) + { + PoolsetRequest request; + request.set_poolsetid(it); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeletePoolset, send request: " << request.DebugString(); + + stub.DeletePoolset(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeletePoolset, errcode = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , PoolsetId = " << it; + return kRetCodeCommonErr; + } + else if (response.statuscode() != kTopoErrCodeSuccess && + response.statuscode() != + kTopoErrCodeCannotDeleteDefaultPoolset) + { + LOG(ERROR) << "DeletePoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , PoolsetId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeletePoolset Rpc success, " + << response.DebugString(); + } + } + return 0; + } - SetChunkServerStatusResponse response; - TopologyService_Stub stub(&channel_); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "SetChunkServerStatusRequest, send request: " - << request.DebugString(); - - stub.SetChunkServer(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText(); - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " - << "Message is :" - << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " - << "response success, " - << response.DebugString(); - } - return 0; -} + int CurvefsTools::ClearZone() + { + TopologyService_Stub stub(&channel_); + for (auto it : zoneToDel) + { + ZoneRequest request; + request.set_zoneid(it); + + ZoneResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeleteZone, send request: " << request.DebugString(); + + stub.DeleteZone(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeleteZone, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , zoneId = " << it; + return kRetCodeCommonErr; + } + else if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeleteZone Rpc response fail. " + << "Message is :" << response.DebugString() + << " , zoneId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeleteZone Rpc success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::ScanPoolset() { - for (const auto& poolset : poolsetDatas) { - if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [poolset](CurvePoolsetData& data) { - return data.name == poolset.name; - }) != poolsetToAdd.end()) { - continue; - } - // CurvePoolsetData poolsetData; - // poolsetData.name = poolset.; - poolsetToAdd.push_back(poolset); - } - std::list poolsetInfos; - int ret = ListPoolset(&poolsetInfos); - if (ret < 0) { - return ret; - } - for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { - auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [it](const CurvePoolsetData& data) { - return data.name == it->poolsetname(); - }); - if (ix != poolsetToAdd.end()) { - poolsetToAdd.erase(ix); - it++; - } else { - poolsetToDel.push_back(static_cast(it->poolsetid())); - it = poolsetInfos.erase(it); - } - } - return 0; -} + int CurvefsTools::ClearServer() + { + TopologyService_Stub stub(&channel_); + for (auto it : serverToDel) + { + DeleteServerRequest request; + request.set_serverid(it); + + DeleteServerResponse response; + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeleteServer, send request: " << request.DebugString(); + + stub.DeleteServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "DeleteServer, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , serverId = " << it; + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "DeleteServer Rpc response fail. " + << "Message is :" << response.DebugString() + << " , serverId = " << it; + return response.statuscode(); + } + else + { + LOG(INFO) << "Received DeleteServer Rpc response success, " + << response.DebugString(); + } + } + return 0; + } -int CurvefsTools::SetLogicalPool() { - SetLogicalPoolRequest request; - request.set_logicalpoolid(FLAGS_logicalpool_id); - if (FLAGS_logicalpool_status == "allow") { - request.set_status(AllocateStatus::ALLOW); - } else if (FLAGS_logicalpool_status == "deny") { - request.set_status(AllocateStatus::DENY); - } else { - LOG(ERROR) << "SetLogicalPool param error, unknown logicalpool status"; - return kRetCodeCommonErr; - } - SetLogicalPoolResponse response; - TopologyService_Stub stub(&channel_); - brpc::Controller cntl; - cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); - cntl.set_log_id(1); - - LOG(INFO) << "SetLogicalPool, send request: " - << request.DebugString(); - - stub.SetLogicalPool(&cntl, &request, &response, nullptr); - - if (cntl.ErrorCode() == EHOSTDOWN || - cntl.ErrorCode() == brpc::ELOGOFF) { - return kRetCodeRedirectMds; - } else if (cntl.Failed()) { - LOG(ERROR) << "SetLogicalPool, errcorde = " - << response.statuscode() - << ", error content:" - << cntl.ErrorText(); - return kRetCodeCommonErr; - } - if (response.statuscode() != kTopoErrCodeSuccess) { - LOG(ERROR) << "SetLogicalPool Rpc response fail. " - << "Message is :" - << response.DebugString(); - return response.statuscode(); - } else { - LOG(INFO) << "Received SetLogicalPool Rpc " - << "response success, " - << response.DebugString(); - } - return 0; -} + int CurvefsTools::SetChunkServer() + { + SetChunkServerStatusRequest request; + request.set_chunkserverid(FLAGS_chunkserver_id); + if (FLAGS_chunkserver_status == "pendding") + { + request.set_chunkserverstatus(ChunkServerStatus::PENDDING); + } + else if (FLAGS_chunkserver_status == "readwrite") + { + request.set_chunkserverstatus(ChunkServerStatus::READWRITE); + } + else if (FLAGS_chunkserver_status == "retired") + { + LOG(ERROR) << "SetChunkServer retired not unsupport!"; + return kRetCodeCommonErr; + } + else + { + LOG(ERROR) << "SetChunkServer param error, unknown chunkserver status"; + return kRetCodeCommonErr; + } + + SetChunkServerStatusResponse response; + TopologyService_Stub stub(&channel_); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "SetChunkServerStatusRequest, send request: " + << request.DebugString(); + + stub.SetChunkServer(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "SetChunkServerStatusRequest, errcorde = " + << response.statuscode() + << ", error content:" << cntl.ErrorText(); + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "SetChunkServerStatusRequest Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received SetChunkServerStatusRequest Rpc " + << "response success, " << response.DebugString(); + } + return 0; + } -} // namespace topology -} // namespace mds -} // namespace curve + int CurvefsTools::ScanPoolset() + { + for (const auto &poolset : poolsetDatas) + { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](CurvePoolsetData &data) + { + return data.name == poolset.name; + }) != poolsetToAdd.end()) + { + continue; + } + // CurvePoolsetData poolsetData; + // poolsetData.name = poolset.; + poolsetToAdd.push_back(poolset); + } + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) + { + return ret; + } + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) + { + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](const CurvePoolsetData &data) + { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) + { + poolsetToAdd.erase(ix); + it++; + } + else + { + poolsetToDel.push_back(static_cast(it->poolsetid())); + it = poolsetInfos.erase(it); + } + } + return 0; + } + int CurvefsTools::SetLogicalPool() + { + SetLogicalPoolRequest request; + request.set_logicalpoolid(FLAGS_logicalpool_id); + if (FLAGS_logicalpool_status == "allow") + { + request.set_status(AllocateStatus::ALLOW); + } + else if (FLAGS_logicalpool_status == "deny") + { + request.set_status(AllocateStatus::DENY); + } + else + { + LOG(ERROR) << "SetLogicalPool param error, unknown logicalpool status"; + return kRetCodeCommonErr; + } + SetLogicalPoolResponse response; + TopologyService_Stub stub(&channel_); + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "SetLogicalPool, send request: " << request.DebugString(); + + stub.SetLogicalPool(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || cntl.ErrorCode() == brpc::ELOGOFF) + { + return kRetCodeRedirectMds; + } + else if (cntl.Failed()) + { + LOG(ERROR) << "SetLogicalPool, errcorde = " << response.statuscode() + << ", error content:" << cntl.ErrorText(); + return kRetCodeCommonErr; + } + if (response.statuscode() != kTopoErrCodeSuccess) + { + LOG(ERROR) << "SetLogicalPool Rpc response fail. " + << "Message is :" << response.DebugString(); + return response.statuscode(); + } + else + { + LOG(INFO) << "Received SetLogicalPool Rpc " + << "response success, " << response.DebugString(); + } + return 0; + } + } // namespace topology + } // namespace mds +} // namespace curve -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, false); int ret = 0; curve::mds::topology::CurvefsTools tools; - if (tools.Init() < 0) { + if (tools.Init() < 0) + { LOG(ERROR) << "curvefsTool init error."; return kRetCodeCommonErr; } int maxTry = tools.GetMaxTry(); int retry = 0; - for (; retry < maxTry; retry++) { + for (; retry < maxTry; retry++) + { ret = tools.TryAnotherMdsAddress(); - if (ret < 0) { + if (ret < 0) + { return kRetCodeCommonErr; } std::string operation = FLAGS_op; - if (operation == "create_logicalpool") { + if (operation == "create_logicalpool") + { ret = tools.HandleCreateLogicalPool(); - } else if (operation == "create_physicalpool") { + } + else if (operation == "create_physicalpool") + { ret = tools.HandleBuildCluster(); - } else if (operation == "set_chunkserver") { + } + else if (operation == "set_chunkserver") + { ret = tools.SetChunkServer(); - } else if (operation == "set_logicalpool") { + } + else if (operation == "set_logicalpool") + { ret = tools.SetLogicalPool(); - } else { + } + else + { LOG(ERROR) << "undefined op."; ret = kRetCodeCommonErr; break; } - if (ret != kRetCodeRedirectMds) { + if (ret != kRetCodeRedirectMds) + { break; } } - if (retry >= maxTry) { + if (retry >= maxTry) + { LOG(ERROR) << "rpc retry times exceed."; return kRetCodeCommonErr; } - if (ret < 0) { + if (ret < 0) + { LOG(ERROR) << "exec fail, ret = " << ret; - } else { + } + else + { LOG(INFO) << "exec success, ret = " << ret; } diff --git a/tools/snaptool/queryclone.py b/tools/snaptool/queryclone.py index a80d746f7a..cde76bc130 100644 --- a/tools/snaptool/queryclone.py +++ b/tools/snaptool/queryclone.py @@ -5,18 +5,21 @@ import common import time -status = ['done', 'cloning', 'recovering', 'cleaning', 'errorCleaning', 'error', 'retrying', 'metaInstalled'] +status = ['done', 'cloning', 'recovering', 'cleaning', + 'errorCleaning', 'error', 'retrying', 'metaInstalled'] filetype = ['file', 'snapshot'] clonestep = ['createCloneFile', 'createCloneMeta', 'createCloneChunk', 'completeCloneMeta', - 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] + 'recoverChunk', 'changeOwner', 'renameCloneFile', 'completeCloneFile', 'end'] tasktype = ["clone", "recover"] islazy = ["notlazy", "lazy"] + def __get_status(args): if args.status: return status.index(args.status) return None + def __get_type(args): if args.clone: return tasktype.index("clone") @@ -24,12 +27,14 @@ def __get_type(args): return tasktype.index("recover") return None + def query_clone_recover(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, args.taskid, __get_type(args), __get_status(args)) if totalCount == 0: print "no record found" return - # 提高打印可读性 + # Improving Print Readability for record in records: code = record['TaskStatus'] record['TaskStatus'] = status[code] @@ -42,15 +47,18 @@ def query_clone_recover(args): code = record['IsLazy'] record['IsLazy'] = islazy[code] time_temp = record['Time'] - record['Time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) + record['Time'] = time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(time_temp/1000000)) notes = {} heads = ['UUID', 'User', 'TaskType', 'Src', 'File', - 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] + 'Time', 'FileType', 'IsLazy', 'NextStep', 'TaskStatus', 'Progress'] common.printTable(heads, records, notes) + def clone_recover_status(args): - totalCount, records = curltool.get_clone_list_all(args.user, args.src, args.dest, None, __get_type(args)) + totalCount, records = curltool.get_clone_list_all( + args.user, args.src, args.dest, None, __get_type(args)) if totalCount == 0: print "no record found" return @@ -64,17 +72,17 @@ def clone_recover_status(args): clone_statistics[status_name].append(record['UUID']) else: clone_statistics[status_name] = [record['UUID']] - else : + else: if recover_statistics.has_key(status_name): recover_statistics[status_name].append(record['UUID']) else: recover_statistics[status_name] = [record['UUID']] if clone_statistics: print "clone status:" - for k,v in clone_statistics.items(): + for k, v in clone_statistics.items(): print("%s : %d" % (k, len(v))) if recover_statistics: print "recover status:" - for k,v in recover_statistics.items(): - print("%s : %d" % (k, len(v))) \ No newline at end of file + for k, v in recover_statistics.items(): + print("%s : %d" % (k, len(v))) diff --git a/ut.sh b/ut.sh index 8a3599f800..d405ccc028 100644 --- a/ut.sh +++ b/ut.sh @@ -86,7 +86,7 @@ get_options() { main() { get_options "$@" - sudo docker run --rm -w /var/lib/jenkins/workspace/curve/curve_multijob/ -v /var/lib/jenkins:/var/lib/jenkins -v $(pwd):/var/lib/jenkins/workspace/curve/curve_multijob/ -v ${HOME}:${HOME} --user $(id -u ${USER}):$(id -g ${USER}) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro -v /etc/sudoers:/etc/sudoers:ro -v /etc/shadow:/etc/shadow:ro --ulimit core=-1 --privileged opencurvedocker/curve-base:build-$g_os bash util/ut_in_image.sh "$@" + sudo docker run --rm -w /var/lib/jenkins/workspace/curve/curve_multijob/ -v /var/lib/jenkins:/var/lib/jenkins -v $(pwd):/var/lib/jenkins/workspace/curve/curve_multijob/ -e BUILD_NUMBER=$BUILD_NUMBER -v ${HOME}:${HOME} --user $(id -u ${USER}):$(id -g ${USER}) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro -v /etc/sudoers:/etc/sudoers:ro -v /etc/shadow:/etc/shadow:ro --ulimit core=-1 --privileged opencurvedocker/curve-base:build-$g_os bash util/ut_in_image.sh "$@" } ############################ MAIN() diff --git a/util/ut_in_image.sh b/util/ut_in_image.sh index ee2010fe28..f0650a29f4 100755 --- a/util/ut_in_image.sh +++ b/util/ut_in_image.sh @@ -133,8 +133,8 @@ do now_test=`ps -ef | grep test | grep -v 'test[0-9]' | grep -v grep | awk '{print $8}'` echo "now_test case is "$now_test - for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do a=`cat $i.log | grep "FAILED ]" | wc -l`;if [ $a -gt 0 ];then f1=`cat $i.log | grep "FAILED ]"`;f1_file="${i}.log"; echo "fail test is $i"; check=1; fi;done - for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do b=`cat $i.log | grep "Failure" | wc -l`;if [ $b -gt 0 ];then f2=`cat $i.log | grep "Failure"`; f2_file="${i}.log";echo "fail test is $i"; check=1; fi;done + for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do a=`cat $i.log | grep -a "FAILED ]" | wc -l`;if [ $a -gt 0 ];then f1=`cat $i.log | grep -a "FAILED ]"`;f1_file="${i}.log"; echo "fail test is $i"; check=1; fi;done + for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do b=`cat $i.log | grep -a "Failure" | wc -l`;if [ $b -gt 0 ];then f2=`cat $i.log | grep -a "Failure"`; f2_file="${i}.log";echo "fail test is $i"; check=1; fi;done if [ $check -eq 1 ];then echo "=========================test fail,Here is the logs of failed use cases=========================" echo "=========================test fail,Here is the logs of failed use cases=========================" From 917684b4d5addee7fe15b6926063bfbd01a8121e Mon Sep 17 00:00:00 2001 From: koko2pp Date: Mon, 27 Nov 2023 19:45:57 +0800 Subject: [PATCH 8/8] fix the file --- src/kvstorageclient/etcd_client.h | 100 ++++++++++++++---------------- 1 file changed, 48 insertions(+), 52 deletions(-) diff --git a/src/kvstorageclient/etcd_client.h b/src/kvstorageclient/etcd_client.h index b9c2266d83..16aec44e6a 100644 --- a/src/kvstorageclient/etcd_client.h +++ b/src/kvstorageclient/etcd_client.h @@ -24,10 +24,9 @@ #define SRC_KVSTORAGECLIENT_ETCD_CLIENT_H_ #include - #include -#include #include +#include namespace curve { namespace kvstorage { @@ -44,7 +43,7 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int Put(const std::string& key, const std::string& value) = 0; + virtual int Put(const std::string &key, const std::string &value) = 0; /** * @brief PutRewithRevision store key-value @@ -55,9 +54,8 @@ class KVStorageClient { * * @return error code EtcdErrCode */ - virtual int PutRewithRevision(const std::string& key, - const std::string& value, - int64_t* revision) = 0; + virtual int PutRewithRevision(const std::string &key, + const std::string &value, int64_t *revision) = 0; /** * @brief Get Get the value of the specified key @@ -67,7 +65,7 @@ class KVStorageClient { * * @return error code */ - virtual int Get(const std::string& key, std::string* out) = 0; + virtual int Get(const std::string &key, std::string *out) = 0; /** * @brief List Get all the values ​​between [startKey, endKey) @@ -78,16 +76,15 @@ class KVStorageClient { * * @return error code */ - virtual int List(const std::string& startKey, const std::string& endKey, - std::vector* values) = 0; + virtual int List(const std::string &startKey, const std::string &endKey, + std::vector *values) = 0; /** * @brief List all the key and values between [startKey, endKey) * * @param[in] startKey * @param[in] endKey - * @param[out] out store key/value pairs that key is between [startKey, - * endKey) + * @param[out] out store key/value pairs that key is between [startKey, endKey) * * @return error code */ @@ -101,7 +98,7 @@ class KVStorageClient { * * @return error code */ - virtual int Delete(const std::string& key) = 0; + virtual int Delete(const std::string &key) = 0; /** * @brief DeleteRewithRevision Delete the value of the specified key @@ -111,18 +108,17 @@ class KVStorageClient { * * @return error code */ - virtual int DeleteRewithRevision(const std::string& key, - int64_t* revision) = 0; + virtual int DeleteRewithRevision( + const std::string &key, int64_t *revision) = 0; /* - * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., - * currently 2 and 3 operations are supported //NOLINT - * - * @param[in] ops Operation set - * - * @return error code - */ - virtual int TxnN(const std::vector& ops) = 0; + * @brief TxnN Operate transactions in the order of ops[0] ops[1] ..., currently 2 and 3 operations are supported //NOLINT + * + * @param[in] ops Operation set + * + * @return error code + */ + virtual int TxnN(const std::vector &ops) = 0; /** * @brief CompareAndSwap Transaction, to achieve CAS @@ -133,15 +129,17 @@ class KVStorageClient { * * @return error code */ - virtual int CompareAndSwap(const std::string& key, const std::string& preV, - const std::string& target) = 0; + virtual int CompareAndSwap(const std::string &key, const std::string &preV, + const std::string &target) = 0; }; // encapsulate the c header file of etcd generated by go compilation class EtcdClientImp : public KVStorageClient { public: EtcdClientImp() {} - ~EtcdClientImp() { CloseClient(); } + ~EtcdClientImp() { + CloseClient(); + } /** * @brief Init init the etcdclient, a global var in go @@ -156,30 +154,30 @@ class EtcdClientImp : public KVStorageClient { void CloseClient(); - int Put(const std::string& key, const std::string& value) override; + int Put(const std::string &key, const std::string &value) override; - int PutRewithRevision(const std::string& key, const std::string& value, - int64_t* revision) override; + int PutRewithRevision(const std::string &key, const std::string &value, + int64_t *revision) override; - int Get(const std::string& key, std::string* out) override; + int Get(const std::string &key, std::string *out) override; - int List(const std::string& startKey, const std::string& endKey, - std::vector* values) override; + int List(const std::string &startKey, + const std::string &endKey, std::vector *values) override; int List(const std::string& startKey, const std::string& endKey, - std::vector>* out) override; + std::vector >* out) override; - int Delete(const std::string& key) override; + int Delete(const std::string &key) override; - int DeleteRewithRevision(const std::string& key, - int64_t* revision) override; + int DeleteRewithRevision( + const std::string &key, int64_t *revision) override; - int TxnN(const std::vector& ops) override; + int TxnN(const std::vector &ops) override; - int CompareAndSwap(const std::string& key, const std::string& preV, - const std::string& target) override; + int CompareAndSwap(const std::string &key, const std::string &preV, + const std::string &target) override; - virtual int GetCurrentRevision(int64_t* revision); + virtual int GetCurrentRevision(int64_t *revision); /** * @brief ListWithLimitAndRevision @@ -193,11 +191,9 @@ class EtcdClientImp : public KVStorageClient { * @param[out] values the value vector of all the key-value pairs * @param[out] lastKey the last key of the vector */ - virtual int ListWithLimitAndRevision(const std::string& startKey, - const std::string& endKey, - int64_t limit, int64_t revision, - std::vector* values, - std::string* lastKey); + virtual int ListWithLimitAndRevision(const std::string &startKey, + const std::string &endKey, int64_t limit, int64_t revision, + std::vector *values, std::string *lastKey); /** * @brief CampaignLeader Leader campaign through etcd, return directly if @@ -213,14 +209,14 @@ class EtcdClientImp : public KVStorageClient { * leader when the session expired after * client offline. * @param[in] electionTimeoutMs the timeout,0 will block always - * @param[out] leaderOid leader's objectId,recorded in objectManager + * @param[out] leaderOid leader的objectId,recorded in objectManager * * @return EtcdErrCode::EtcdCampaignLeaderSuccess success,others fail */ - virtual int CampaignLeader(const std::string& pfx, - const std::string& leaderName, - uint32_t sessionInterSec, - uint32_t electionTimeoutMs, uint64_t* leaderOid); + virtual int CampaignLeader( + const std::string &pfx, const std::string &leaderName, + uint32_t sessionInterSec, uint32_t electionTimeoutMs, + uint64_t *leaderOid); /** * @brief LeaderObserve @@ -232,8 +228,8 @@ class EtcdClientImp : public KVStorageClient { * * @return if returned, the session between mds and etcd expired */ - virtual int LeaderObserve(uint64_t leaderOid, - const std::string& leaderName); + virtual int LeaderObserve( + uint64_t leaderOid, const std::string &leaderName); /** * @brief LeaderResign the leader resigns initiatively, the other peers @@ -245,7 +241,7 @@ class EtcdClientImp : public KVStorageClient { * @return EtcdErrCode::EtcdLeaderResiginSuccess resign seccess * EtcdErrCode::EtcdLeaderResiginErr resign fail */ - virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); + virtual int LeaderResign(uint64_t leaderOid, uint64_t timeoutMs); // for test void SetTimeout(int time);

i1t)sVBHrVM#ew|?EZ$03}=PqM5w8mWK&+vIotn7f;1e-QS8m|-Bb0e zCK4muplC)%#>|xg1^mMlS9?|t?$zKY1}hEtdSXrE|&o8fEd_Yfq$Sc4duoad%K`B_3#!Z#21BNO+LHEp;{REaPg z9Cbml%(QAhKLjWM>QN0^_KNir24HjtjFhHNOa;B^?Bbh}XM^lhaiTrd6B55*uP}=M zX#Nj#U)dH{6KqRDfCLQ?+(K}7x8T9u-QC?2+zIaP9^3{d!EJDNXK;5r`#tCWi2HG- zpXr|N-d($^R;^kJU@r^O0YKg}-p^(!>xA?P``RX})cPwV=|FnnN;sQBT>Kr(Ps>vx zOZn;X)eDlZw13_Ova6K*>>zVVEkE+~K0OoOr$m%+l-xVC`HmR?yg(duqQ3iiAzKWNllr%L@JCdiVn%~~-nPP%?!vS2lb}*X zmZ^PooQ&bJ?vi(^cu{?a>)?P6_Y)8ZlqO4=uDh>(A4*HXAfx(26xK>2e8OVA11}u0 zLQ5-}K1ei2D1vY%VQAoE_4Z4&8AoAQcHZx@0&}?U;h9_x_;`4oo)_;01U=9HuC3|M z5axNO;ZcVpT3LRDe2hLt<^mFLNmv#rZ!ttM6UWnN=ivl@hjr*ALN!4 zQOU|g0>n%bz+~?;M*y>isrli__4ow+PTjE{QESjZkf5~2+s7Spq!Rad!ZmLhHd|@i zQ?l=OeIo_-PQ05rUJWDhX`#7s^qL;wXipZbnAB=7J7~2sUfKx>UA3swj742dP*Hvl z8`K|1KxZ8rY20w7HAL{EDmQreL!_&Y(q>ct=qNoIS&&Cnm1r1hz}u_%5PUK?;BBo+ zi;cza1SUSk(siAoD7`&g?oVQae>1n>R}w02p3v{r_{e0RwD=}nIpym>JC#!L56p)N zUy6y3X(&k6@7!MBN3V~3B9oFitk%oUJ(sgMMRfO%eN02AL5M4Syx*;y5VBU`D4gCe z)@h=1h)8a+TXw$=pBjCBK$@JpU7tp#W61^-z}{S#IIOVG=n-<And=+l+AXj;I#Adash|$_>LV&WN~YiFWcI#u-S!Tc(bwPZ`g@V^wFdb6 zexp6ITPxe+2^LE<-QRs(=zM5cmsPd;(Fla9aDGr~Iv^ zTEIo>6f1T&$<@)i+>)wSJ`9q)pp{Bc&X~sWU2nunR*Dsl18d4AnRu$a)h`At*h*pS z%XJC42KkUc7_b80m+26OY*r%qWGb%1BwE&L7w?KS1w;QEGzYhvp z^y=Ocp^^2@eL~u=yEvftQkj9BG(5=(MC$bw_TO#b&ZyH13XH9-X&*t3dD$KpJTV;AbSu$ z=?_|5odPIiHWtLqdwT(8L^Quc5TJB&Nq#kYF*P?n_2f-796m`kcia;j3Uc9QbfAZ& ziszKH)BV!$)q!8sJGUq~V^*M6F}mXf>N!fD@_K4ts}uWgb&{?qW~3nA%JNi+v{0U$ z2+17;sN+$o8=<#()pM7}kjEq>KeeVKTK|eSE7lBfo*>r^{5CzOIJ7AKZ|tH;W}Na1 zV&d0mD-4jF8h`dh_7HvN-{eYlA-~7b$;pecY(A*fKrGQf90A{?N5a>?6BCXn8ET+Q z^|zC|vvsD<-w|CZ35kh}di-&MXTS4%t}ZVh-S z>`M!ECKjVnO^j1bw2GX>`P3~~qLNV?C^vVW(F z8E-4F=Gc1I`!^rxHWrVW?A9}L{=zh|j}g9FIdaoUAn)ry9eg;2vgoaK5TXx=kvtr#QRoF=U<~ zV;b=EyuLV|_qhM)pi0c=E-M2I`)0RnVq#)3p5Efr)6SsNBH(!rBUfKv|M>diGE21O?(<-3i@rGQ8a-lLyz~p(ZBBc zc6xV}5370<0ugt}@ffA$%e)GCdwIg)=$>Gb5f=}^32Ht|$HS|4cBV$iEA?u7er~Zj z(-#hcZ4#Lvs1X?!rkjqyq^i;HTxzfwU(=30lg8^tn=QZJ6nuSib9b`LWz-$aYCym)n5-`>T>YIu zr*zuH0hMQ?%z~pWd3g*?cz1;a-!ARx09lu?p-kaQuv#s~GnpP6{d`xIN_@IT+uf2+ zonJk4@xxLlx!n{TMd;*DFoJk5G)pV;qMZ+$OQwYC{K6W^R?~ z^ePY6c0&pLShwH5i$3wO_PAVh^IS(z%4b9mm&(6T^$Acm+w8k1S#@`FGvMmSZieKG zj80ors#i1CeNOV9D3lq`d3o|dCE)x}oK-L|7yNinl?rgXPSFP|g$4yU=C+5foKA^h zer+Ce(p-*=8GU%=C*_z33)sBNdwuC}t(%#g)oOIyT)sQ=O3oGX1D4smK<;*jCb!G~ z6So^JH+O4psfD!jFtpC3+2JPO=mYC0aW)hQX0)fSP(R!%9_~GIDbW9CY zv@$h?A)u|PX}Dsd3-VQGA0P)#oT;gpghTnu2r%wM)k?A$J z>EIuXcjE>ivAAsB^pBi*CE2onl!3Rafa^l|Pd1AQq3cEk5|6WZGU+g^kg4iC--62F z_SQiy*X-G910U*Qbc3h2DZl)d;M@g!U+QBA>^mODjTIi%R^JEKTH#Pv_JcvX4{&h# z=5jwA&uavHl9KLAQ%mG?Q&LhmUB+zxiMZc8Ab~K9O?%hFJRdG8)TLNiS^fR}!_Y`1 zAYYOQ6t}JVqw&g=+|Diw#9Z%j9KF1@x$I#swBf(#?>z$2+CNu=3{A-sZ+?rtH&z_Paxb_M+-9}*RKvs z-6ze4>$*jZq+yirf35N+>de15yWm5VwIk&|lR&T8u?g&Q_4jd_^_cA|*b?$+oVnZs zT6v`IFN7I=?1k9htz8hlLrh<(ps*N8OSr_OOKogqw?16~zkTK20VGFrVPS*FEir-+ z(C96&{k%TWV8oB~7y59y_&C#rExQ=KKJ8zz*xDTVyr^em5w3olz9+D6X}jNxhlNu<;Ul%eKhM~koQf+3{s#3%9O0Y^ufvkc-vljsz{k?osKMc9!_3R6o%lSf|Gkip=yuetO0{VW@=n8G}65T>i(J z`U~a(0jl32866&{3$7=ToOj~eDB8`@QBf+4-0><})KT1ge88)Jd4Aqwz01#JAXdCK zEh(v}tc;q1B4jH1E5>g09wc*rcNdZ?MAGD}_&q|LhK{bXqGGMp33iusO*Ofqi*7;@ zxS$om9th#rt#q=&(_0I!o$><$2 zR$%12K!vQp%|zbEf=`I5UiP`{obq|O_xrz`zSzqI2IsSjq?AzG`!%53RRti+2qS?O zxc&@yr9N4C)cUud&Q)-!8)1{P>PcV9>H9MZPkh?a(ig0lDN$@JQ>8oTzWM!&a(8bR zvA=Bp8YizjkLc|u(!v~UHx6W%%kA>z>28wPW8Gih+1VL%a+xO_1dn>I-DP_)Wj69s zBFAKLdw%Yiq&EQy%&0SwOvM}zYM!^i*pi9s{NC+E<2=ebU_z8}%{7Uy#opDVbd(iUkt@!CgOGdzD|`a zEza~S;j?vZp6*no29xUdN)3C)M=W%xj6`Brz}uK9tIsv`7k0Qd8$Le18Z;ROpIMXO zkTcMw4{@J2`5OUiW7xVYSZ1pA$kPZ8kM75hX^6QzTV^PZ>*qg{%@88dfi?^DiDo7b zU;WK;*Q^Z`dA3#4es;_;N^S?jphR25CF-d!i&=FKWmXq0W~lvb(xEtU>*wu&0o$p6 z`uY(oJ~2w0nNm3cibBqccY!>{llFlU^CgjV6P{gLP}rB+*;f{EU|C99(J4#(Gt<>& z{X($X1zfZ3;^l0mF(eRaf1FXL>#{Ej$MPzyIn$&+`gFJZzgH$%W zbTkaK3`;D&0hinEkOGBqrw+LFPbhuCS}jJfwV!d`Vl?j0uC4SpWkfcI+ImUU@N5jn zvUs5A94d1)dPm#aLHHEVAdj=PY_8?!zGZ+=u8D!iDg~FsqO66tfi38?^_>B%( z5K4VV0xo_Cb4aG*P}6int;Mopw!2M-`M9ws)n{^W*6PDvHEh_Q;PK zyd}O2J3CJA$sz+P?d3B39A_+%8cp*E0OV28G2mVJv=6LKUUoTjj)1kzipd28$ zWm--#OG^B%lnHGMv}sC-^df3QY+aQm<>Vf=4ZSu!wMv^?**4KE$7DMRMRi_Ndm)3U zrh3HMpPYp-BQ8aRDkBo`rWtFl65S)T0meQ(eew1rt5RK5R5Uv)OEIP_9i|NN2b)_` zLebQCgazuU+3aP$>A)!&*(IPQ9;$Su={y@YQIXQ%yoBtah#2JX2tjpt4oRr)iLdoQ*U z51FORn$S1Ll{SZBWA1R6#6!+xDIXDX##biO^2w2fcQ4Lu+)gU(2@7UpJUaS zGtZOVto~Xdo)YQ?nubZR(1p|fQ>&@3)aM=C#0!(-`G@&wG_|q zpz_mRcXF8jHa|>{RJWbE?i5)^nY;DyIJl0fv zZ!V?w=Z%Z)?d3l*V1NFw-F+G{CVK9(eLiq4>f>L@2=D$=&T4U^qda=$52nxNO)#@bMnsAkg9=ssrzUy8j zGGi)<2O{2|G}Nmb&IO+RkN%+1SE2x7Qjsq7zY1KCG|U23&OMTewj>4wg&`FIm+@#PsL)eEP- zF`jJ4p7)jtxrU9GGX#M+e{k7=kAwzUM1V8F-TzA?db)BK>^r4d$MFo-qL5gi{VAg( zvpHTMWK1LzbEY~*4)YiMR}D7G;2oJ2{{@KU@xXep_A9XOJq*7~YL~gPb)3=YIbVP0 zj|=KEms_o0fsDGG9`d4&JXJcLM$b;k!o_9ivgXYPk+0EU%>c(jDLtQ{&YV~ocg}n! z0qj?b-cU8n?Vd@JBXj-7T5m`S24(_45X#Gnm%m9KgL-M=8)YeQ!`4|O2rhJ>(H0#m zs~(@e?V@z{Hy4LzeSm?9d1re`OL-?n7!{rgb!yD}X1<#;S-N*QoOElVK=1e+4ntg7 zx?o{eYghI?fRMmU$%g1^HOWsJy$Tyl2O{pl)c%XGl}%i(Lob~G9M|A6O<>M2mdF6k z@?rZg-Q)08N@F*wF}s4@)D&zjYOI67m@R-n0p%lZscbPbt5&-_sdnb2+T>J8kuro!l)RKuIOOEyfF)sQ z14=%l2RV<5)u-g_!ui-{CAAAA22Mvu zNBRAZ%qBBpx)_PC6q4*q^6(|WL4GGo`d#Bzy=CR)O4K*bSiAeUb^;Hp!cZFdlEJZM zpxoX+^KJ6hH+DglHN02!R_8+8>Y2ecKZgeG=w$@POpT71U#)aaU07o z&U8d-Ve$;IdOjJFOdt)B%ssjOwoIMoX<@6-ui>do2E078;APH+=>R8}=&t9}@4^(b zOX=A;89XcUn^2;J3U8hU*onX2Gpxk6ni=#6JZ+A`{dK~-pa*4sRD6flawk?)r->x0 zw6hC&2ia-kf%sl{LnPqWf}(nll!3w7eNTs^0dM|5h?_N4 zN8<%=J23u-xC=6xloVX*?|Zf}@rx~$e75%!Uxk4Y%f2|9%QIS21oQiym@mB!1{)dp zm+)3CIeAQesWZgr*xLYdbvc61iyO zPVL;i96FDYWG1f6u+ zQ84Y8TP=*c$cyQQrOmL|4X}OqTE}=KNE0VG&He<%r$Pd{Q#Hftrg^xsN?n<8c#~HW zTq;Swk)dcPqvI=1J5T*_!^chk2i`uPr&}NsW|bb1Jkz$)~dvbH1Wrp&#O<0>r>m<3)br<0K#0+Y&0e_sVZ@ixg<-ToFL)cZ~&$dsAv9ZXs()oaEs}pRyERj;LOkseEbtcSM8HDHka=;a5$z-b z!4Y2AeR8n3#S{M)KUR&8aq&avzW$IiA)0>a+aA#(y?~A1LHUEloymtxaC$fmDUIMekab!H#*bHjVTKtiN0Q1qg`h#FRd|W_w|`4=wF^P(%GrL!4FNo-7vl6j6a`KlbN&?Ca$g+3F93dnQZ#C|J14S zF3R*%GPCIF+K7l3mDCBqQw-51)MVL3=1@KxK0;nQ&8nqch+c=ke-5^bBe9h8b`(w2 znC{sg2(Cl)^GO|D|H3*q5~3lMC6GE>Lll#0QnL<`k}+}69pZ6atYLu6pW#%@%@;Qv z5ssUi*vu8@KaL{b2DV9yB7C$;4wU}IJ`FY9+jAQ8J?6${p4CzGGCckorp70lD$C&0 z6|l9j!R&o)iCB)A2;nKo|4KIGPvUFaADvRYoYnC}OO;C+Zku1eqOD;E1Z#nShl;wo z|NVJ>X{p5`QcYCRz60dUB#UKG+WZweg9BVjxa}o2S5U{DOetVu_s}rU`M#;enhSz_ zDiZER-1XURUoS%quSUA@lcN-B$7&XUE&R zPPs@%>A+B?%+g@Q+O39K8*Vi&1q9g>Nz$y>1d{A?DrRCoAEM>FRpz zr&){j52IhQ42WyrIX=!(P^r6E<#R||eN4QAGircej$QVY0;ku4un&5v;-gwel+M$#CqqLNT>%zTmaR&3uj0M=8thv1 z1wmfj*F4~Lv7@|t`%9qi&1O!NqbO~jFcGq%@&)0=X8b28Bl+v zFzV`=@`DLKB|YBVVSTjYL;Q=qBar2wB~$sdgSPf{f&THPq3Ud%x*cA%)comB(D$iZjOJ@jCSAA9g+d-_!+Y z9`@)u8G+K~;Yvg;rhntKy}D~fM2-4Fhe9ROs^&tABpy;(ds0$TYinx^0oN-K{RFJh z!NGw-uHXrmef(FL-L|r_GKcl9+3FGL3Ph@S3U3r_tl5PNd>@b`cBbdWHxjiaZ7Cy7 zBgx<_UH4}k%Jkldbe_~{Ij3=Tjy2Ua(BN}lIaEnfWf1QPecQj_%lh2!&{u(g^s~Km zS3`%yIe1kbJPd2jY<~5Hcf5=_o0d$wmg7Q%rIR1V-ZWqVCzR;l3juB2B9^w6mgQ}; zwp`TQ&J{2(P$bAt8xfzRo!AQ5WFEc48Op_ z!N$hMzzDpoEBkA`b@lsdaPVK*7NAyy-ALSWL}*Z8uzZXs?UGUysyoDp?zutLA678M`M@KdsG$3acVf9NObh_I+4@zOsk#=Ts85$lIiK{gLr^M4N zW?P+a{6j}a2eHOTB>vto5W*!QV$kwnj7Kl;cR{|qm&%F6PxnC#ZqR;x9csJn^9j$= zs}-B23_<8y!j-DhDkv)Y(fnthaOUwW?-4o#R-J976KdfpTnpy6XjittBn5(k1_M0t z+o#`xE>X@xZf!S-;(MgX$D#y%iXf&Kd+zIKCw8o(d}dj3&qECXeE2e_OR7v^@c*mc z;#LOt$35=|^mAjo>lt0TD%I{}N&9Uxj7b(d)5EAR@8X{gZ~*+btN7KxNd++pLM;|~ zLXyH1nGpF^r7D^pGBVN&m-P$_1F*-;>=%{Hk}}{rdr&Tg1aF|lJM8pyS6N_QVO6Vt z;iEUu*M~yu6hWB(^U##+5eGXtNpFQ*%4{b9GtM3 zk`-PRIN*~Mjjmn{p>s(=>Ld8A_bYeM$Bze`dtjy6Md6J3!f1&exh0v;E3Pfe26pG+ zUsTWdgq*v7=g^ArQZo^Z@T3 z-^o3&*Mr0SxCFuJG1Tr$-`sAC?r)x91yV{#({Q?WOuOqrK~2r;%Ofq+ z)zt+6ynAX(OG`kRrFkGr;A|ZnM=Stbk$F8%SAYzwtE;Q0>w^Zs_0eB=x;q09t618g zz(7|wHyI|7Z)(lTE>;mag%7WGGKua?OK?#s(|UfOI@?X{h0x<&dA@Lci@kzm+z1Xe zPPLc;tCFLv9n^Acvbv|QgFY3D-f=ISw`4;Ew47h{&W79$`5ZJ zr(`>SIuOlyJO-pb>mD;;JJ?rHQ{iTZ^kGlkC5h#iKNK@#kbT6?&a)^zbs(EGXO^#s z0^2m|{g9W}Z*$%SvKnpx9-z;i{0w0ea;Fh%dR1AO=kxtVJelMr?R<^i(`HZD)W4dV z8o3PiO*!p)(}=bFKSrzpuqV8s2&nv2IvSN$i$rB;6&l zhI?DPLuL-$Fvu~i4yKBWj|$f)R{SotM8)lcWvJF}1SZeA3}QUjLFHTLIawJvxe3uI z!Gw31Bmn^djYh@F<iYWnD!%J8$+}vgud47y%c%D|$@*uqPqNzP+gAb$ zE0AR((sguyZsBI*V*LNew-~KyZc(Kn~TacD+8a$=bJ4o4hzB3+y(u&?rBmA=g>2Hd4vWI zI$EwBOLtUlL?=>e9r5(zEo!%IefxcR3?}{QV)FA_J4=JD^isy>8=~Tg#!o!--)b8o zdLQy^srbq}!`L`D%I&mOPO2z|$Ok!rJ96}AxARN*My$*o(`ma|c^XW~a}vOG4}A3*a|{&p(+3=D3PDhbjAs(F@fa=UIQ z8w|e;M>I!`ryF*U)m1lFWPwom1To8!h?cfCqps$S@KkfI&(8Gn=*kZQzbSha)ZkM> z3l1X%Xlz|}Ors)XrdpV8fVY<0dB6M6s31iZMZ3}RpTJ5u0Rh3)k`6V;@bIv?nOXk8 zWT9j(zgPXWE_HdHs#2r*R&NBC{R*z2FZW~Q2R+z=!oun*F~Z4&_;@2En`ZUT)>YBD zZIO2)$CHryj_kgmfpK#=;>mT3+q6wT1p|=3=M!u6wBuqZvC{JHW45E89^}?ey8sku zJ*MB)W_rtgeWAoJ&AEJ=UEjhfnjB>w%djs_w+r#%4HAYz70D@r-%jclH17-fuhDgV zK9ZoyLpND-u90v5yw-QbJx6%Qvc_ER9xW0Jm~|Ikn$t_$O^5cI8?n<8zwp2HRX$6` zd!=MkFd7V{?8j!^?!jiu8LrwD2Q%K1s6LKYbOo`KiDCBf&hSw`eFSS9_2&~q4I;e# z+-6ryGT{OYtIexK_zX^Q*Pf*G3s($fl(+s4t%LKqD9Eoi)Qn2ydG5Z6FnS3s(c z>iy-ejJ$koW8?2J8Nf~wb6Be$JQgHO*B4Rn=&erh3AI>53pLgmYltNZl|4E5%&&#D zO3S5DP&h;IDT?XPi20#MM?QdgiX#!KC@)V&%~Pa)cz6)ExxBpm61o2Q7f$FQi<3x9 zY0zsFT2`xSqx1CGw#NJBJ7RXX$KL#RI1YJpvVbX?9KWaPw`8wKB7a**l037gUMOoK zrfpsrXT`Tz_6b@loYS zu}o@z3_*)=A0h%m81OnOP)ALcE}lNHyc43Nl#rITzB^rgeSJkhCH(GlN5E-&esR&} zbH`|nm$w1@w3>Jy1pN!iRLaeQpp>BfX?xm9Y>G3P+*P4zTG?Ue;zC11lO%C@eSQ#u zZTC16v|DN{;DBBgjO%|Jote1?lK;{?>2vws39Rvh`2<9Ps&DTvSGBm!=YY&g*(!Pcf`Mc1zES62s-h*=>2%|XhfU=evc zx+C$*ys*OXu9*xunwOO_aTh5bNNT}{%H}Rq3mqz^C_%iM)o6ptuLA;BK}*+Jrad$x zLzk3J#BN2s^yb+S)R9aER09hlNM}ZYC}+-WV@VV56g>gN7ET(_$~+_c-(Sui<7l z+!Z#Hfj|wyW0?6m#`$+~rO+YMi`dO`(`Sf^TG{UAwa?8Vhs79SKGhrgr%#_GB?o}a z!Syy5NoD1!>FEZW`I=&;Lg8w>>&4|}JD-y!4j^2~pwmpRRWAm8U-`X0PrRUT=-@S~ zxfJy7h`deX7K)7wL&`MpMyVw6kDCVM*zmVfn1Mie8-5p8K1Nlp>029MQL5#W`XWZ7ZAS$?vf&eNKJpSQSa`|C#L389bTTw-8XgJ(eE23u1z z9m^;$MY6~b)Cyr;qE;+ds8tSUG#dVUpTru~3^%!-!QaMwQ8NJ@sns zi$e=YhMFr+Ca0Z4#Pg&Bp=I?6jGrbKyhM}kCVm#7ZeF(c0| zA=_44a>H;qKgEDGw zQcCiXm%n=_?D7MF+ibJ%gY%3dU<_)?G!;dM9o$vhl$3YzLWc7> zJO50Hsj9*Pv<+F0X9Rg|pDv5VRKlQstDV7k2JJ@ZOnKtO=~CX-2ZDfQm}_`twOSO@ST!4MuM|w-aC*an`7YX> zG~<~uh^%cxL;HsmcxPqQgLe8jUO3_SAJP+6)WFUNrT}9EQ%l?ID48Tx-Q$>UcHvHu z6c8V*63+TZeI_8M=?je)r=ut||AL38lAwI^4&@yT$|v~(Q#NYwRhd!kbTrBg_H8|M zS93)4+bsHTbJ|iJs*|v6c!2E@^FC2rajWvDgm6QlAE&~Yc<(p%eNQA$i_KrKP`_`(0nO4f zPl+wo$ZsjB9>!aw5Ew<&|GL1p9wy8<>ZKdUH}yVU3wvGFP8LFfiNOXaOSJnM=BGu68C?K>^~K}e=TB3}Fv%004P(jSOanDwfeuhhp7V3b z^64DI)axElrt1Iw38R0^NC3}3c5>M16d}tZ%#{on0tTk{BhV2H3`-z8 zEKo!_`v2FaR>=cIW>}Sbw14iCByx$%B`Rc%0)M z7TJCW^WWE5GL@@BehL~XOturpDBex}zJMUiyF8X4cG&N`fHLFgn#w}nKbESXF6)N| zn2EIio}4Mh+S4mRQZ+SU#AFX$@4Cf#z>R}p*Z;3Sb}zgokRKY_n3ISco{K1o(k2YM z^?wbKK(MXXUq-->%Kd1^9^g?~&Hy{0h;=#D`0zv`e+{JuIC}l4z!s^w+KCF`C)3j+ z{`WcBvSVC4tMd&~Ug#`?Vg+Y)-wy+`YxHM9hz*Y=_l?|w6%BG$Oif2>9Se`jnX%i3 z_jO%CaJJIA=t@9OEui<(O1;4I|ia(u=gKmmSol6d}LoDO+65f z2mqp+U(Bd&LZJ^;h%0QRE#%el_6`IG1K3tq0e4RxO8H7Z0IUt>d+-pW_V&DiK=Xng z0PB4K$Vgc${95<~&d4x%?XoAu2dqz9O))P3(dj$D{eiYnTi*P}21tr9c^KyH)~`b) zBO?<)BLJ@jJ8}e(RsdWqS*aA(#kbRo5CQT3ZIF)ie3-3I4g|`q0}D#A*zcK-Rg|7! zFZ3`sqNxs0C1F%E(z`%qzu^HtTuRDn5f|QSFZc5Y_ix{}`}VJWd13dXlMIA@Oi*K2 ze|gji--S1$=XBo@URriIK!(>*wbc{;w%LBjTG&xe^CQAW^x}bQTlW5JXMOR5w$&j4 z9!*7JUA@V&v?(fvzJveM>{T?4G}W?Cx0W z0H;SkqrjNX$p1UYTt&^yjDyXLL#fV9{v5&Gb#wrw=Z2zecJc1g2i~WrnA>Gq9dgr{ zaTF0*Q>nbLuv7^~R$K66pmi0|Q0tn^ejE4-`FXhOv6C30WdmHWGBPUOhtkOMVH};Q zpoj|P`C)^5a@$>N2G&1Ch}A<^yfri*|H@-z49~LJ55L5Djd!ti%v`C3NNVUXr2fUu zdTcch? zPE9Er;GY8^e!!|lZUF08cVlu{*&|glDFtoOe>n?SLIsQv1Mu7GovfcprWwH;J}qY% z#Afz`(73hYQz>pq8Fj!>9~0Gf#ofn+0JeW16%InLO1=zh)tBYZ6SFk|RAsaxQfdc} zSGfX5n>12RKq`k7U+5xnoQ9D9Wx|>so)x!m4m-g`--Z#OC*VFM1@VpL*qUTEUtxJjjj3q_Hyy}K&TXL=^;VZVjcMB z>roX~OZsUb+&|M_zY{;Q4p3)XUXPBYeTi(Q>qfRulk>U%&}6>w=^d)5feH7Gv202?cSV(g7UNuZkP-o%(|e+JN>?uKv-tI{fo6X;fe^W8jdg|g zKk)j~)l(Ua;!ma3@DbMv4fApdZ@1ZweCr&VUz9uBq^l^^4&rLjpK{VFttgMbQ1hKK zWP4ZnsHl?DWGpK#hOO24#&8qN?k37j6b9OH1-?`Vu~Rj7;88&JD9s0XT>qGdu$4v@ z?R^y|4CtYK+mjr?eq5&S8ana>L!nz^0_w*4KEv6C@`jXID-ArRmzxt6B-S*)3=KWS z<4C(alcwzR%JT5VOeIdz1al&kuvFDvpXDjbv@!4CWnQl4A_hI7LMD+#8S&njizE)Y z%tcFb>t2MFt|=7F53cw=_vzO{;9Oc!AF3g1iF)>E$lzmg+SkRYd5?dZN^EMG&fuD* zH|N9h$Qr+&k_HNbuMj?Dx><$L^q=vnaFx08nEZnAAx_iLgtaMR{f<0qs)PdzaP#~& zShn6As^>*tu+?i?9jXvU2uD+}_^H%h8iiv>r%r7R+HJP-&jUxM7M6Ww{o?owv1weQ z)}N_vtY+2eIC)o`Aeh@FP#+^Ay!G_rqrAejhsU8PC4|B+QVZ_6@+sBDU`0Mp$V%sQ zX`&ln@(QPkCJC(Z8tv$UPvbTHF_jPI<;UZ0_x9DD6nwD*xSZRQq+d$}v| z&6G#uQ+lzYCxy(7R@#Cf$~VuY@yx{A=mU=JiP_72Od7}2nX#w>yX%L5$@!9W{ea8T z(sW)9^xR3jo!-IHns{q@{PfCrl;mpOUOgoEfv z#goC|RAyij=k$3$s4f5#H^4NRNhrd-9c#%W&cdi-#X#g)p`e&D{hFZo>#O4zJiK&@ zK%K*QwkN&iQ+DRZhW82 zPZ|^;u`}AqauN}zag&wW+J>9_T9BsH;2|`p@z-V9sw5<2#qq4U?{84FzGxw-yxe^yls>Z?(AMP&oEn21zUvT@BP zql0ho6PTCCIWrYMqUYGi>}WK?9XH){k6q;G$eRo^oG|cH;x%pR))w08rhnW=Sf$)Rm?EB@{B2yy#wHT0r%X#zKB&hiMIODUQs zQLd5YCgD9a0^S43`+jq~+NIw;{Y|~JuqS3<6YJSrq56cqvBi&H&&y>)bnIug9VO&& z)5gqba>{-uox%Q=YVwt%*{bJUux3sHJ4}Ra9Vr8=(_qb*I17iSlOnV?1#CYUf*9o|rbZO^S2Ltv1Ob zl1Tu{+ZkA@gne1BFjU8QOYED;hmM(zh!5Tl&sT>Mat|SU(nYl-csr~Z77@?Gi<2dX za)QraPO^I?v*Wn;{7x*I2byvH9=aXM%JHvyi^c++(^o9M$d6>=$!*X+*NRS!`J@w_ zJ^OYq3Zpf^%Mw_|&SLbSKk*cB19dbw*GrtUG#o zUsOm=D%?|KY$tw{)StBSYsK8Bvd|mYjh5H{#K1f9H>3lBaVNEhfjdsl_T?M#(}nd8 za+X2}x5-VUcC9G(q&lC*X(q^%e}D8#Y(9}AT;n@(e?)nSIzc$&h0m&n{cF9&)k^u< zO=Az4^X)C0Yl7JiyBh9n9B}AFMk5&^)HbxcKC<*fGpe<1v*Dli7@B2Qq9S-8jrEpq z=W|x0X7-cA^Pgm=%b%6(KCJ#xWEC+`)rx31Hph_~@P&B&V|+@hvj%G^l|2}= zSYI8ZkuAoa83Z}uqR?3l&tJ%KmOOxpho3^F-xkV`*EdvA67ye(UisS2`o9>k8J&@Y zoP*O@@0icKYY~}VtCPVhm;GRys+lP~N$!Ah5_b=;z+V-B`(+(?GbNeZpOJ%Fk9WK_ z*Iqn&0XN;wiz^u_HsyF4nrx`Fq;-zjjmOUvMmP z)wrYgYk>+QTzZ8fxw&&3#Te_`_D5dqR=xdjaiHyS*dVzfqA-+coj3$bu<@n0nYg~D zMDV06pZ>HI&)p?O;yX-{I~HU+#Ngq1xu#sZ$R69~AH?fg#+(p&Tpi9E^yKTYIos_s zhoyzf81%G_&VSIfsVH*n74eGJ4qJrGu;z_*$eVjWmQYpE9G8+NBfF<0Y@%=v`P>@py=?psKI&ztG*`#Iy|ptTEecmV~lxylB3t-Xe5#dv#L{pLfk z-vSxQ5n`cDmdo-MYG|H5U9FY;O10UhKIQ3r>u%m$-Nf4p-NL<`f0!xPu3gJTX_g`a z(M7;)Qlp(_A+-Zv3c@xBoosD0`|*t_1tLwK}(v zKZ{B}6co?H7CZO6DItn(!a_Rz1zCkt;u$0DS`2>f=(K4?U`vvWXBLkrjJ{%TB&lwO z(hJAUw*!QMaWKA)LXc9dDame&6MjExr4kZb-Eo_(rC6pe`Jj(SwLq)4j}=X8vElf>?e4dIn-SAI*;yRA!y%?1VWh z&&T^)?>`?d`kp=4lbp#nhUg5K3TQ)-H7qbZAT$BL={wMotu?pOv+{-eK6cXKt-^^W z-$@l1{b)jeK;1qxxn6i)2)F%J`z+n(&1A-{nzzvRZQ@MhKXquHGy`tV<|@X}^}=8Rtyx&gH9(mOVGB zy6SAONC=gAF?E0d6Y2ul-Q7x^NyZVN3q&Bl&}afj{Twve9!s53M52xk%G%(o3TcM0 zkN4mtLMwYf7$Vf73>$HWP*m%eY{0jM(kFeXz7@JlVb@wsTMkA=mC!L+9Xs69lQZ63x8%BQDfCbhg}Vxjd>?Fc{|h^`7@F5x0K(F|{gj zJr;ZK+NY~Wzv*{sUpPZpd8Dbc@f*xJ#&b z+gtA+HJDG5Q~0}Rk5Bm^qkpeJjW9C0IOBc*{ar#4f3Bu1!-7NRDQ`%(%NDENTiYrF zpEiq!rcFJY5&bG!Ym0U&$0DI<9LBOT1VlbV8oiDnO9cgUKzZB*cM8*eXaC zVal;{OqCyYDAiTUnbu^{327nf(?>Xk`)V}u16avl3Xv0^7?^^S>bh!1`^K{pP1pJSv#6V8~bBh8xzJC-N2Y^N|A1G{hEtf3a@ z=NcB-jgzf30SL zn0cc5!B@^b6Q*rmj=|h68U`9BU6MOm3nLb z{VOVXVB18V)uf=%fUa3{SwZxNj!pCGP7Nj~z361b4Zn#(fIqc9iF?lbe70_;u>e(z zFB~+%RhccTs9C_H(M3s_I=$keUn={Bs%x=>Rlnw8G&4`=PO4#_J~L#o#10R@e>f13 z&|qOd+}Jtl`6#4-{#XOPyNu+IZr_?Me|U^)kj7wg)Sy2CUnQ>0)ATSuTK}mVE{rds z!#dB2oL>>>H{Luq+cnJPQ&;+I^5$m}mJW9~GT<>)1C!xwtt($I3M2#%;$FvmS^?uD z|0tC?w=VBHZQLw0D7R|E%#ANGu-%%gFj2m>m|LE=!yThsyzhMbtWbsEY@3+?IL37} z7IkKX!_P7wifm73D{gM+-f7Ale}0Qa0=~}uI{BG}p~!I6K8oY&z5M4k4Qxyq-~Sve zS;$|i2}}Dom2h8K+K*6dN2K}_6!SfCS36;M z^2bNfQ@x|)#ihj~H(t9g2k=goqkPm^SLaQBmn^!sos%+3oT)KwQc&P*Ao!xHGfDF* zVdiLJj+4ZMkrBSxg5|;@OfVN{VI0>Un)*gj;SdW@uj=Uk=TO}b@%UxMPH&bpp30A8 zDMxpgx_%m<>QQEIV4~h`%5vwOSxE=e;qC!WBgoIS1hK&!_CEoAUjTP zDM5q+YTrO4t8&d#R!MrxHdq@Ne+#(eAjNA9dvU8c5NL3XqZCKXWZsg<(4H1il#=K5 zkM`Qf)a!4n)FHof|<9A(cdLUTwAfkU%?cG@9Ti zCySeBzIkr*ADQl^4D{jJYz{-jCTKg>5=g-OMU7LO5pS)r*2pnI$dk3use4_|&XoW{ zYi=vbRevi)Yi~sPY?R1F__AUBy&Fo+&qT6W@Fh*V&ClP4NI=8LxEwue%epYEc!T%kv-dus(t$!qil*v(=bn`xG8!n$651nS=rB=Ctk>toIvAPL*s^cHy(cD{ z&l+p1cZu|*Cb0O*yeCLhhB0)s>VzF(!97)ZJ=d56_jJr-0v`1-=VJ5PPrFB@#@O**r0h4LH@KKr)XIBaI%)` zMmG7AP{2n>Udi)Y;h%456828Jiw3^q?}{0BZ&qhpj9wBeS7{Tb4cVn|frNkIMVa>L zvn|BL!qQrIoIhRy3{vwf*fQCHEks0K;1bi}f|r7x+bIhtl9plliQ4&WZ2StEtOV@GaaaoB_0q(yhVO+J%HA|R*3V?8$o1S(>v3^ePgUeI@#ADsTT z^>)J3w5q1h*Py6%!I&is*BZ~fxx1zC$bY$wM9nsF(Iq2ZhQ&ec7H#Gr)DZ<&x)@<8Hcgc#oSVimYXFw29d$^=T zg~lTq|K|q;?JBQq;;Z6zL^k9P)LD~($w}^3NsiXK&e>vC{Qc4l3NsSSeY|DUQZnhv zRwYtYZb%znlXh$AwnQ`uDSkv9BUU5X=ha)D(BwjCUS03EKqOg30qj@HG8fv+f~oZH zfF8Z8_n;Vd+H+Y(65gHyUg;RvB5WmonTu!H6K^p|@9VLB3uk}VT%3s#@RaED{PR?)Q3}1e?=ClJ(U$Aq2E5^M&Ue%lC&9tL&Y4*E(viOsc z8RMiwec#4KrZ;?+aGqI5;UR@?uV(l2_L#skR29F!u)Tg~OF9j~1f{iE$@#A|;;1xGG4A*3O<0`bt{S_F>aspZP^p33 z%GOmL<6Tj?p&^jq@mIPx<_D=w~F$@)%mWz+K<8jr|g- zEtGxlj160l>R65c$|*}aFIjyKou`frDy>D>v>mHt^qPsTI(CWRoI+;cq*B=#66QBdtyaPi~mfJYffm*clrv57#RwYv^z9;CTOHU7RY^59%; z^cF5b0i|PWbZCN#KCX!XXQv`Nr&4*k+lT|j{$kVi7$a^3dmHir0fPdxv~WNTvC(o~ zWOKY#JoX#*Sl06YXl*H>xjO9_e=nH}-zD`7Da(SJHJz4!_mVzF*ofKer4{8Cw99^V zsT}6-{P$vv(YO97BZ=vlm%`zi`!&;kngfW=xE!2jwqkuPQWFGoye!-n_Y1mJM(e@M zi|*bpr;Q~qTaDx+?j>wMQ!=03FAbyQgYv6l}Lsh`(GnUu4Uls!m z+OOBu)={|#5cgFkb4JwVG^6dX;B#UuVVuZ<@Xg9x*$yma5p#bTVKJ>>u}@&AfTxU5SaHv)nd8p3 zw6UP1bO%h+@`Cq&PB7OwI8$ft8q2En=XF<7HM3MRv-f-#=wFvtQwjq0i;;b!FJ|YQ z^KwOVvx7(ube7YTaq!~s_owF$%2mx^Ef{N1BMd9SN>y%ZXwLDH+t@C;x4~kPC!@w` zx9%;Yyj4k#v||@*u%ga;U;%35Ef(>tfs<3wq}X{1Mo*sD6phew*5kbq`PhwmR*0#K zKfW|REhd`&25J2jcyFgR7Q&Z>XG@}Hr);+=0alTjwJ7eJ^6WP17i2N=X&kyW1Jft^ z68>>CtKN*0`5_df{G&x(Bs!L2A*L$gp|^H?V*ZRVGjB!j1o-0L!Hw@1zfP%CgAP`N zJ%<+|-NYj-#unZ0<3@){2|tWh+v|k?-4Vq~7r&pX_=-*y9oJ!gKFtDjRqqI(K!w)f z?NasMc4y6)*!1^Vvt-4|&8~=IN6Zyh|56im>WzOhIfNl1;BVUapyNc)%NtWtQcm5J z&Ae1B@I}Zc@kB)0`~l%Fo$M0M6T3KHT{Y0rVT3~DPKzgvkn1D3?I7N=R1qYP(alF> zyE5Sz^y!MmvH5`eaoIMfyu{L^*5y){1e7(q8P)LUvw~_Pqn2oFK9$GM1e)cHrAq9b zu(75z&WTtQF*x1@CNB^KWIVd9sOXDepzhwO$t^BsVg*&~4$8nYVeAs`$LTU6+_Wv$PN-NRdK6oLgb~&nHBG;}+ zNbM|u_c8qwBtpSld480cZ#-&VH~-0U*`sh%$DF>m>ZF9E%*y6kH;X1;Ti!RQ9ug!5 z{>L@XZ);1;b~fB(Ukw6G=0Al*aq-8fF82`2o_yke$k+d*Fw?fu*MHj;d;vIgyeRb( z!zyZAAY$x)pHQ;1dQ58B8fZ(mtktwZ=Klo=QG^cf4RHcNxT6MueAFN`VKttq{wGMX z0RjVnl?5AOx1SHKqg7i&cwB$a*ZbdCz&LCc3tvs@>)j*27H7AuHb|8*@}DZBz|Y*Q zHctuEYB8@@_$p}#mEQ}B(1gSh^t>KsI!)xUa|wvwQsvt)&DQ4#=J!Oij&0bt+~`Lk8omvi_=UvJ(1#Vx9lr z$i@F3k%Y_2E?K8et*H(Un4>fZx;~FyO7pI|i}^R$%c`)!ttS4HqA1Iw6=A}2GAet5 z*5V%Pkwkd~4-D+THCP!X@0t$jzOpMbDDd;Yu58}#fX(?126A`*6WQlPcfET&XI*0N zqW<>=3=AH$t|_0C($2vd#2``@hAZp0Z% z9eSd?vMvOh|D#v`pW(rnvRYp|crg$t*oJo!a>J*c^4T6~zT~JnnLU}Tn0%z*2rOSg zi>Hj;3&eav1-3j!a5&276g!*-M2Xun7CD+!FCMW3^Qk1sx>4$;W|j zS`>~`V4d1r16dE+u~&w$?X#S}?^-vouvH4|ITjs?f&l-> zjQ^xBU?%k@TI;!u40_uKAa&i^f>q|K3$e|%*$6WTWS!;ymN~KhWVHE=gLBm^F)ufF z)x(7t1P{tN(c6}igCrMg3woKW{x1=&NxA>$c9!NvohLhm>ZVm7dzYY0SyoHY{SFT$ z4D|0p`eK%K-5(Q)CB1I?`LT&fe@A8K#l+E*G85=&io8HKx~W@0LsyaHe~WkBosCnZ zc7^CKDr+nIrKEYl{G-r9g)|%fp3O{r5Y~h9=zR2zx@%1qhvV?Qct(8io02Yz(%Og# z|Fe{(NmFq@jVpOcm^nhsh^C8mNz~uSvUncKgW1ybX>5ZMm7qy-W;xKAQ5NN_(nH^w zIp1mv*O;1OCC8dk<+NUC$u&OttHuPxc@8ZfN#vK~vebKo7O<_ZwO%$Dpr_H{NKsH2 zUs6h?7QEPARLUHkw4@qWuMyTfr?`s5bEK@a)N!F4`;!h|A8&E+srCEf?JNO(ZQPV8 zeO!?*YgJSM1;BQ*j{|lBDn0N>7r!T9 zl^#v%#?4wT$Phs#zi}{Lg>+(nAE%S{f~-`2MCzGC)17_XsOp4VKrI%z$oMo|X>ssdTs= zfhac8Btc*=*)&;u!Q9Hy!J=aQIkFv~x=!d7mWK-6bmIJM`C_Zw${jZ7v7rYA&F48p zH{?&5XLukd(OFbR%z}HO3t&Xl23{%grsFEZCy!$%#So~A{rkYq^`qR!0#{+Lx1?MZ z#FWHR7f+~FQ5Zl+@jO5Xc&Aaa;6YQTI@PfYN%g5@&>|8ub4{uBm9P0Od`Xetuve`k zvHLSLfh1=JCkgx$GMEYiORydm0#FT`@T=o>AVM*-M|dPZT-&X1ThQNVP^|I~I3D0 zxv>x^tvl_@0`ie5#3aH~PPyz>%Ri&0V5FC4XS3A)bo1&cdqOUN?6faol%uUR_O}8O~KzE;0M#Pfff#k zjC&d2StBQO)^`@(`fXW4F`pY1r1K^Bm?`K%UDftKKvVwP2D?pxquQT!l^n`aSjjLr zo;F(Hmt$B76_TT^WS`%rgC!~EFDBEkj~A2y1&N%ZOo-LQFw-CHsbw$u>$moUgD6F{ zd*d#vUs`zWynI#l#xDu1Gwa#I-)aqzM^Yc{rn?8Fi~?ZDp0shNjTBzXT;@Hnn|0Qb z&Rz*#oa@@>?q^c!m_PZFm{jzuvTG##E0)Ep%ZWliQiRU`EO%gdw&u!Xfr}4PDOCwb zxTFI;^$TCJmg^GF%atp*o2Gs7Oo6WZbXoy^4p9<2?Vz-NL#~s%$zzK z$o#V9qvS&0;=yY=9TIr$^AqCBHP$=XIe3*UJ|UC0Pv+oU?BgU$e#p`$(fDJiGeK2% zaIOjmv1eK-TXr#MLA?l~+>K*rT~VG!{n6o-QB+hYGa^cg?ZmO0Ss2j2z53c5DVOZ? zpK#N3yw=fFm6^Mx2?*1$65l|wHTjp&fVOQbG9?&m=}`3N z)3v!i)8+LcdU9g=w?k-1)>~Hi^@G;4meG@J`PjP1l}9S-l%I!j9}$06^K7I2VU|=M zDnz}yE(`A7ehm&vgYiIy_(i{EB_(Q8P+i=otJ8E&$UMUZwmwj+_uoSPAcID~3+A`Y z#Tf#F-u3`;euaX?=5AK;LH#AmZsr+;U`mZNBuQo6OJ1~B+JqVEyr8`kK*yaHm^aZii@Y@C8x9gqj8(#yw#g(&hTp=v{o7IjzTSs{r zi;&r4kA=o|ca`iIaF`(mt5Y_&M-4$^WSoH922Q~&S&n0-x$nDSX&pKM{wN1F)xe4O z1SnPm0cg^5I@J)EN;*eD?swH>>Y-jyRn*s0LO~aKhueh}%QxQg!ZD!i6~*ejgmJyyd6bR%2#4l3 z@{;}Y?F}rVxYR0=ivx}HI_Z%g{alVFWW)Wfq=EbUBS4yOLQ6oI?{0T`zjYhq zHS6ZihFXMlKCR>0kmcN)z&ALF2I|mtaEq6zsT`n&EEo! zRFpcUO~*}`8!+S^`~8lKJHL7Pg=ySek}dzjmf-cnrd{_O`egprB0*TkCYN=q9Rr~F zw-{8tu(%}_`vgEWkjE_+zl(nL73(WNVX)62uv!lZh3HJ5N*(9qjoNQu8B6xMo!cBr zV%yu;JHSvc8*Sa%t2kX1mtcwUTcK|0Gk_Bv3?A((()bXOp|Qe#QST{W#&Sjb$-6Yw zh>oTv(`hcrnu$$CiZ1x#+5GLl)tBcu+DTrz;t%?$p?h|s`aLqjy++CL#(pnCjSgl# z1?$INgqjuh(lOoUWv^NtQwfbrALR_R^kF~K@4wAI+qV^K!`8eZ`ThOs!304-%D>4X zFsb+OWI!QP1)FdmjOqD-jQ$4y1NQjJHxszNz%o4vQ(O@!^Ovp5(>C9Hyk=(=Mr=t? zid2bZ2=NBC>kS22^D*yWB0bS$F$quGz8Oz{^HOOO*7;EF+r{p98_5IrDe; zO7yr$&TiDHT>kp| zz=g1P(&eJMLUo0)U2&F(sp&bzbc^K^Bb=TpKvKW&Y^dvo&<_JdIn zrOZN7g=X5{qpEx0SAdnDn-YHUyXir2&jkUm=BukKA^nk!>@2jTGBUa^1jS4)_@;*E zs#Z0>cV$+^??=GQO%f*3`cq8=Sv3b|32{dxrG@vrgkJG zbdEso!!y>{l2^Rn`_H>$Yur=Pe9A6>{KcoSx)Qxr^Rsq^AMGv>po9tGyu`Qi<5)hiXmD4R z0`tYsb2l$1>B{Sv9G*T>GP#)6Ed|@(6w#=NnjAhg=V?sMnTprji;{k|a%BQ=dR$sC z20Q_b)9aJ)Ldyj!#Q&PkuIKOt4L6&}N8Y-nf`7 z7H$t=K;wmLTP&>+n(3aL7YF$MJgD8;|BROP*uLG6(2*~C-il&$FX3rh>MEN{Z1|?R zZ^^wxZF_|HlNamJ{UF~#1av^pD_Jj`D+=)7w&-P%qDnR{Z7#{Rm)lQ5K)Kq}RiMwP zpnNsbFQ+p_9+-!UWBA4!Z`Kbh7LU4m^9h_#(^jNEB3E`Q3sV;^;B>RvqP6?>G~UpY04Lzrk9yN z>?5z{3TO8G%n5q(|EfoU<*FIvt-VzQJWn&u^SLfpZ?Jt>sC*S|&YDwon44xO)-_`M zVp}K#@f2w=4BB3fgiQi@2wWFW`5NO!NL8kb4fqW(^6y=;T1(m|ACoyxOlsCW;wgwf z*81ZZ1t>>Ih+Z}zkIk785q$}^+68wn0IMZ?<;%xwQ*G=M(|6*}+L@D~lz`A{fY275 zQ^JT$gyN_4{_)C0ctSua^VRI@rds`)I{L`R%Gk#q3E09s7Cav!q;`U^vI?ys{Nr*1F>{P(#1q2_Ck{v#zkAu>b)gMMz4@31`!9yY!_5x$v595U1l z^VOAX?!V2;P01BD17XyvXq&yC2$bJ7;Dt-?u}Lf%Qw?QI)-rF1;+rpBNEt>G`na($+QM>1YFa*OtZZlSW4BWRzJ$5g`fsgb329*95 zh$TAh1s-WrKkkWN{X1+T3Lq7f3(U7OiGk@97(Z?5g0xlFb3WK@OKmFJ@J0a~Eq;&& zti;5BbE_kKbF%OoK1{B-`!5MEj9Zy+dilEkfw`sWFMF(cGVNEW?7^zXpuJ4`lVu=d zwXaecUHx^r5=s7Gk`<8F`R;smxL~I#Na($7Ss$K$TNfdV)$}7>-=(iQP6k&lnOtQ& z*e3`nneLd`zGi>+ktIl&$<+wj4E+F$+%1*2>ld~9*v{@odt5rS74yd(I+%r#FjD`k zrh*ca6fEJI=Nzy5_wF!H>U@eBr`v1>9pyJ(Tkmq1KR@SDCxduY&pS zZOoHKT*W*W4;HaJ7FDMHnl>iY@7d}>maU^lgU{$C8hPUR+L0Fpbich%U*6%a@iGM} z_i=1x{MiXO40fxm;=0nv;&8b*fTCuIIY(w_5qKFVqVqA$+%?v)0XyR-(?ZvEo-aA$ zzGer9IZ$k8-@dd4nu^&^#7sko$lG~5E;E58;`D?R&1EUyWac@lzVUD}s(^dLLaYey zc8(!cfqm)|^MXC%AGaN<4=s}IVlUkr=w41bcaW8Hnh7HH~EkR(*1)KWV9=lt`n3uPQ&CN{} zZ&|*ObguY@csc&{9SOo3?-&8;>|yfSwX*Tg&stEDWb?9$o9JXZx4hnHX7`FSu&qU} zklZQG__T-jp%<;Wj%h%# zQJLkX*JKOpr^!GeeH;odt^`h}bN=DAzyk8OzNwA&ju!tfbybn-nk_AR$Jye_oUXzB zw>XO2A)319H(tI809M;?&k_N^Yl2Z4Q=22{i!jlobk6tS-@xFZl23O_|23zT`06~r z9VeUbrsFp9kow7)E8)CXzB$C~S{@VxUC754#r@Ma(=S)A(1NUfU8=wUj--tGW7HM? zM6%(~9G%#Y~37q@m zzjU^(-we;DFs=?7UZUUtr$EQy-y3ftk+12MBX&;=?lMkjXyk?W9FMwX)riFv4$(lFVj?&W(Vjk zk0)QL7PCeX^t+B8dONezUr#3^s7FZe#qt=Lp)+IiC%?hXj}(pa+g7(aTO+N%-?JBK z63PxZIQhlm)eLC>vllDof9T=>4CZ1fdpO5MpVJm}2cdTW`cW1@i=_bwkxma0TQfjo zPsvv7_>bhX2)V>hs+8f4e?yOMpR#fb(qSZbmhh^F$177X1VmHQ-yzJ8;;8-YEiTU zQHgnoq%Fxzwrzn8T&a}>IUU(!Uj32BDJRqMb)i{3IN1G%Jl94Nd->66kX@2U%CAm4(D z;p!&7`4*$|>xNirUMRyFxifZQ0^Tw^)8>@tWN;CeH(iq5x2NfS6w!AJb9;lMfwdLC zdSr#kedjN_4)RmKPH9aPDyoMP65TlOko`S>yLZ(BskOE`+amu!f~tr^%H76YuSZ%{&KHs9ze~ zldhX?V_fs%2t!Z+A%j>QFQVXUN8uhMvz$f-4ia0o#C*1G2~S$ude zd?yeO6JBnP>m&G+L&-u1C;O`g_n40U-)Mav^y|jXu_TFRjnQ zf3GzV@ZP$OD{M4YyS|fDAR?oXJF7!Q_(ipFo7>ye{s4{5zN~92q&nMMzzhh9{dAoHAX9{P{&qB2iubGaNFL>)9)ae_wn5}dq3TM60k-+scZ zp+huRXC%W+Z5H`BH9B3Rd&7nNg3R@8jS4e;zjwoJ=x3@D6WKB`qbwvmIO{ZKNne440E8ybgS z75ogj8s&TVDEQN83I)U!suUY-rd1vto)37>n(DZ#V^SF9r{;kHZ>sRJe@SKpL$=rs zHx;REj_SsSfxJBjZK!g*sU=3c_sWVulwBElco{Y#J-tYPT2NUEEs0!EAx^q3+QEM5 z;oe%!E~zc0di0M?0d->>`+q_e1z|dyUYNbVS$2b@&k{VkQe6=I$gREw>uJ1nT!sO2 z-XDY~g$<2e2X3ll3vJT)+PT~#();ni0)%*|KraeL=MYw$0&WloA2-W(iNQh;LN1lE zVnyz!l8eUc&q1#RSxC$fZI6AiyAT6rH|ryT2(!GiKlP{5ZZE9`;b ztoB)o6FalEV)2cp>4}!2vu^Byy5x-UgK$byE(N^*H`I62DbtA|XO825S=5%~-;38T zot#Zy1!Rx{?_ zK4l%F)jQ*DW-?PL^fw`K;Xxz>AMY=(pzV|PbZc|vT$*q5#iT$X!MDGnQZ}~Wv`PV^ zV_xjOicZb1>`+7HMQ`XOl}{0SunZ-2)+`ik6uDe3@SKFlv>n*u3f|UkYc6MQO=K_^ zY?tvAD=4CgA>fiFfU%gUYUN_Qpd#^-ReD;k9qL_y5B1+cx@dRDi1|wMrnI^ z&vGH0r0~%}OcuT>zQJ`gIMNY=3Tpr65Eh^!&bjy=Kb(8J~NWO^f$P=tO~I(utbG6tJl>p=1^ zy(_U(4(`*b&_=itZK!BK!#`4^b)fK_2xGPLKA8lZ3Sbi7!V>BacC#yqMSjtCuixc$ zl3d9*QTN5bM}h10gUI2Dk0WSAI+IqL>c?h1&zqR#iY&|H(+Y)dq!QeMh3E6}uCN+_ zfXR7K|2{Q8r&N!&$$}vart;{|Qi1xFzbwxWBWGkfCP7yzTkw5T3>!~Y;suy!HrK7Si(wi}VBfcdz`aGHw4I)Zx`8#C3J#)qY*#v zKJv~=i#e#ond!%<>zdi(EZv;(y!Ti-H<7r$kG6k`TW894e$5d=kFIW9n}!vV(&1U> zh%I*1vF?Hw$coQ*WOqVruMEjFhwMYy^X*Delhbf^ua-;E2w|^OJyl~4ERZw`Me?$< z=b-8uLzZ|faExB#fI8vE>T=XP*c@H`m{4KSE2zk4iyeb*HILPLH5c-Ar3}HgB*cxO zMxNzhl8pz3rk(3%CdK8VQ3*R3xG(<)49q-~lbAFbgD?)?CeYDG38jcXU~g1&OBM6w z9Yk6)ZG4kkc`)hNbX>;y!eYDfNxv@`S6oO!f{!k*>)B`dz5KydbbFX_;-ko)Bg!cV zHiWe87B@Xr2}~&PZ=ZLuQhGo(5XMqdTky)wNc0!AK1`vM!AzSJ@qV`K{BVA;*r;SL z5yZNS<6U>`&iezMLg1&{KQ(dW$^XUm%+UD6Glj)3@VXJGxv8LwgRA4 zG9lpMX7VUFw^)CYqg(7xoTmb|(Ph`ezscj5$?>HtoCm9lD-V@E_{M%j|1hf-h|-*w zX-|X@^02u~^BDb(3Je=Q{9K;rWdVN5kkq@6Ad(*$TvRxTRMu<4t`+=<)`tDtFb=@kHCIZ1%X-|>)N)FV@?TT!gLFc^PTm-6u~9XL2Q^!&seCcjCA>{^Y_Xu z^tlCtorl44A9Vi+6h8!bO}r|R%hXf;*h;cwcgidkUV$w*Y(8&m0dN#`e3FfkELf*z zQr5wM&52HqbaWKd${~SmutQ2&H7msUi{M~m@QS|x(F!qZMz2 zkOSr>ivsfSb3^mAH6U#~y58AOOEICKY{yeOY8o-6cR!wnhmx3tm&LZ(Ra)wATjJkl zlxno^$s;q^07Tf`f8A7P!E5jM_>WaR*hLc~Z=p$pX*KsNOP$9TC zfCNv;i&NmLlsLR4!_w*O{_L-9Tx!n{?c!uSP*{-{v%2Zhx=1p^P!?ttmP}BI6zb&q z!dB1!92eUe{Wxa4$*7B;; zZZ>#1L8%|}d6FyGTj*Hy&?f2<{O1(pA+OhwHnvW{-@*FL@x*xD`*pT7lDQ=BG^|Z! ze@ei&!MI?*F>bY`kQ3kVD1GToIL5N!cebQKeAuLHPql{=QOaA9tix9N;VNrNLEYnq zy!i}Ycoggt^GlNDfTgeN2?X5Ck{QMlP0esS+;h)}g+wYD(5+1G5&@ z3}m0kvC~oPVkL_;%of}|`*y@?3c|WfJzt@vEti51t=3`XZQucYkTw5j-N_ICDe$!7@EfH zgBHCrHm`l=UOc?-uT{Y4Sgb`(Rna^m_EMr>w;zFJL# zh#2NqYtq|uVOynFp)9MAPIwKD8^w7O9%%9e93Atmo0+FuG`u38regkMiz+rT92zi1 z?OJXeetq!@f7?=R@xmT6F2 zC)am@LXaUDVTO(Dp)sO_8TmPkNQ#{HB+^kNj0#+?+@X1z-oa1#K9IsPw7QNlcN!_! zVOuysNBUd*v*~%H2TygEB@4A-H2EoaF2QWyP{eGE{R9MS{oyKWSl;z)JhS7`nWUO? zogQIgOg(y_c$|T>vynfq!@F(-7n-^Kmd^pYVYhcS96IV2Ob&K$qtVCL?(#TgE7O+#~beZtdJ- zKmU3sJF5b!-_*?l$?)r^g7Pqns4)n=hY{hK7EdhKaFVKt;(LyJO9|PHQ6gX*H$e>@ z33-y^^FyForgs%QdgZsT86{ANH)rsNW&X0BL&XFdttfb}=Lp-TH&zV>l0*m;`faPJ zSYa$SV@E?f`UzzO2oXVmlv|IJADY{A2sUnqpILXmv7JnuOzdQ0+qRR5ZQD*Jwr$&X^1k`qx9)xGuJ>oM&grjDcUO1q-Mgwf z%^W-oOjXLnH;9p-3Y+{{=40|$u^xXz)Iwwk{rm9%(8M&Ci3HcOA@Ot1N4x<#JIw3$*{?ocnq3zmynhM@c)=Rv-wXRIQL7^7<1zxAD{vtGP$&SqW`4^e z!^NqJw!j4U6C{dbha8jPDYoS}3=zjSzHQ7@VsozxQT*?Af8d!69c`Uo`%i`N77zjY zmIUaVZg{x-vNG~6_T!E-O(J%fzHrfek16^hYd%$aW_B$2uLCmCky?c_v%fuNo`+i% z*B&+yyajjCG;V9eaqOXC!?wK76O-aw3%M?I-ALH7Ba$@q$o2_`-qIp#Y=AD(#%`C$ zn~R!Qwf6%Cs@Hf)p<-E&|7Z9zwU;EAAvriBiKrDFdf_LCadDTDs#HcD$I<-r2M;d9 zB39PXHq%VpD0OA(@v^8AR3Uw@%`APMF~R3~RKkYC+xq3CZZx@nqa|Q6)bgL+3uaLrk?A^BffE^iR?oS8!y`GXm*Z^7BjUhn%8-aH%`C4>*7Gx!&{K8_e z3x}ui=VJU>K>eBAU#_H20tZ&6gZB(?^32H1oE8+Mtq#Kx_egSHUWLCPL{&3+|7trX zDz8_%K;=-4^h|Ahy3&@HXVM>Pzm4gCzqE;@C!mLR2Sv^A9)p`>jJXC2M7jNOG;QDr zxVg7h&7BbZ(!qiY*_^m{1o;7sbbJhJj+{HxNTEs7#~M<>$n7=0-MgSr;o|t53^4|G zusvu$1)e-4Nc4@QI;8ZxJ^V+CqNx$V8UwruE@}UQXNigW|(5 z3HtB*Rfkul3NzJf;QF+A#SmqdjsXg5Z3*X`Qp5~Y_yA}EXY&72_fEw~X z3`mm0$66-Hi2x%63@+MUJ{h1fz>oarPa?o`{}y1tf6NX^5*Fm&6bKU0LjfKrM5w5u z&sIKDpXv%L7MsBN{QGtH)GIPU_w7kQzt|4DAx!j7eSswD-5DbefwOm>s?8pZ5MY!` z947!g1_#!clW*y`+LVSPt#Q`{-oR0kf~0LyO}KBfAgT#lMYvy>VHYC!W&f|Cp6FtX?v&xhLmQ}u6${|(>%cW)bED28bNv0H2+X>uCbJQIAYHRw4Dx9U_MtCf+TEl|Wj z*|rbc$(Nos(zMp1E2_E1gqxQ`Aelka59Sc*bK&xREiv~i6qW=&j<`LAzQOD1_{D!V zVj=sgO|BklyEDjn4JO-Pht`U)!#y!3o_T#wv?ov%IhV9=ka*L2|5o&PJVu~0#EOdY z5DJ#bvKi!}`NbLPV#vTxuNa6}^~GPVCW=G`EXznrmYPQDHT(|a885&8?ec@$zQj$NF{I0P5j5hV98R}ebyG%~K-!(*-D;M1yl&OQ&8jEjz4>QhJkE4Si>)+KEBF$PFn=Ti zLyjt*UE57>S)8KBe8BS199D=M5<%g)d-2Bh@b2s@>hGSL=}_&uiobtquyYfQdQwr^ zEZ0Fb#79|y%DIt)!@IH;B6S5OXNuAJC4gdS4F!rM_?VAr;z(4iWNG5ciZ z<-N?xL>qvmYwgnyWqY-Ur+>L@%bc{v?GaZIUOf}Wktb(9(A^DV-p~W=^ z`Uou%} zytuWS6rGPwjFU3$o8$nQe=%0H`0*XKP{5!bf(q~aL^%^3 zd(+xS!H>fsic)tm#+>+u3|qt6lI$ifRE@ST)k_i%66o z(IX&$E+NRT`-ySu#BmOVE}}}{uFF=0h|SYl|3TDiMnX_ABcPE5`K!hTHxgUQf}(_R z+U`De5XxumU==EKsXa>P-M^o`!vUePS#R zU@Xh-+XTNi%Mgo8m+(-cL0A0Sj${8cj;6q0E>&-HE)D)kLjZU!QpRHB6s#YeZls0+ zo!ns)cvMX;lULNpy(_2=G}+t?eTWT1+FgiPXlksmSHD)_awj}?o)DTh#_|1?yKo$l z->Y_NU&{OUYNlxW57& zleMwaY^W-0@y=6=RGmk;9I3SVVAkAt3#bkq?HsAL6SwBF<#F7LaQeWVJXBqaL6B4N zB#g|GwqL{+;2%RJ?b~6sL9B$41EO#D@j#ng7kVon3Ty5n?i`=ElO%6@DMg8H&xUl<2 zJlYW4#F*KhCQ)2+uo)LJ@G|?xh|Uj51Uq|=-@ZeoM!-)K>HNV;4Lei?v&gi zM=tzJ#Nt176HG!c*VkrdJ=Q6u@@pMpm9uEK1;$zP*5i>n^t$?r5MW7gSwy{G9a@Y?jzO%s07+En9=TEkSLai>Rs}_HT;F#|nbNs*1!%2= zP(ewL-^%DW>=D9C7&a}(n@=DnK`D#3eim?5Fh>^TEMrFLBu&_0Tf&h&Z!JA`G&I>F za3t@gVg-BVC>(^so0FgyRsqhz_y>uyuTByu=)=s8=MnRmvdnXQUn3=1ObF^|%}WjI zy{Ag}gwT8ykyW8^L`{1UVFKsAO!3H(6B+TIj%U+8$NAMK85j| zd&VZG%gQh4b?fCi>v}a$x{e)lOS2FCT^R$wxQGnR5K(l9u<}{mpwOeSm7(f*-Uc4- zEs#JD;ok2`kMEChwB64;{r;ttV*xs9mjtyGBz~@Fy8Y*8ur+J@=9&Iej)Y=tc8eXF zd{mtw3_ck(cV_Dp;y-RGg~q+@gWmUs42c04@Sh9nXq$aock8%`un_g`0V#W8TO5te z!v!J5$g{_3UJs2tVEvT$0MAut4`f2PUu*a>!-m|F0t7~%;p3^Jb_6PR!SnTC2x$-I zSEQ`--dYfu)2Yp+xn}g99PACG-t1kAKEpn^f#QO|*sSYrg70`YnfgBd0g4cs1#NLR zM-=A*{jV1cWRo*k7x``|XZvWd6IE#6_4V0koU{#M>UAjHN&csRa<=GvWHL#+7vd}Lt#Mf@HJ1?;fMDIzr z!>Mk4+QyRri2Als>h`8pTuLMdg(~a^27rZK5M+VQS=x~~VM$#bEtNe>Z7*1c`sgHzL->c8{$Z^DAE>%xRMJ=-(0oK1=?REx zMKW{$2Q|f%SZH3_03E@@6P{a++dW={1K_Za$PuZq8%MzHG$$i}BI4QR3B<}Ipltvu zywZ@v%oYdK6o26oCc;ntFr*PkIRZef13l3o8BSM9(=jF$pBMn*Bin_ocNF*a(fto> z6d0R~*434+iE>dsI)?Ih`Vq!i=QHH+(U}fU#Tw_E0!z!wo1%+-oYyyv406hum=Z1t z6`6KMd+TV${vSwe$4TZj8F!m;eO8JL=N$CfCPH+H3nJ$>emZSnxhWPL_TiO*`md?1 zKu5f~985xA{%;~?zLsBMP|Hn0PE@5%5D@{r7@7^8(V3o)JI7le|L|ri+~qR(`|va@ zi2nybvGPAa)F=E;#8@0DvsTI4onz1k3LxO6hnF(|z&()y0hZh=xi)6d>Z3&$b1ptX zM)n*tA?svEAMhbQAz(hCd<~W7C_T!;p;5h(^!BJnzm;Uh4BBlhnwAj zEDqMtiiZ9%2eK4!1H@)TOK#{w=rjl&NyX$^N@E{H}?#aSPI)R$z ze;Btfl!wt5?%3lx`>g8e)HB;(oX4#bJi(mjx^>mAyX$%tT$V>9eP=hLMyF{2aLxap zRb8Cg@jZ3ZZp}kRQh#tWFI~ZlBL%z*8ZB1=jd(@-$XB5*B9#yAD`~{*p0lI_h)=Az z{MXGNFrdarVZ2YPc$dXaPr3m$fj<3IlJmT;aeL+!>W*O|e_v!>ZEbHYNT3Uux4fqs zPV$mp3IyeK0Ln+Lf7ss~?Oy6QV8f}~c;<(Sg<38(12N$8Z)z(hnB^Xlj_bv6OH$S* zvVaSZ;w$u*(&@^ysr~6cJ9!$5{PbCwZb0M*QcyMGMf*D3MYz5V)E|Dd4a_gK+|ZU}$LkzFE~_6piw%4~e( zSe}bm03b}vO*Vq#!+xql^_HEa5yJKGsi#Pc&c&hYcBx8ujTI#KU+Ghbl^~S?xx`V? z@KM>_CT;fi)Z6m#?Kdaq@eCe1ElK&G-@M$AZ%>2`#}sYFI1C=5K_j=CKG_Q9%{i$z zVH@cP5k5c}L(F|lc*1|FuYV8@e`72_o9jyo7xFT&39t-oC}QC=J~aD zm}fTvsHb<6Ll4UBXn!tEr3Cpmz{E6>h?{&`C@SEyy-gfa1%RT^3m~lcpCuO;_DA<& zVcg>0w}}EvPx})n^jJ0$ntYM;V|avN6n?=dn0l@P{JLrr-kqcUa~;uEK183!mDShT z!lnayWkd;XSpg600v+Fit7+c|U7wtT%Aw92y&UyD^iI+Y8h-QrqN;s*Ch1*h4!Tj| z#ey%~zD8@%?QO0R6uz@cV)pl%uBIEEkDV8#rN^Zn;a-EIq9FYsZ7@HwCwu;&C&#ou z>XMnwhhxP=jz9{;L2=KAmF2x*fg&T8{BwWp%y;L9X-ULQcsIAVJ-=F%^8OY{AWS&! zK7DA2C5hgEgEM-ZK1)-&#K?K2IOM<>g4ct4ScZ& zA$u8!i%JNVc5yqG5si6AA-Pt-1Qd@vsjMS!(nyI<6iIm3GSYwub8_=2 z5@!3l0Ih@ol%A`;h~li-up7I?C;0mx=oGlwFPiRBf$^R>MB;Qt3T=E93t3?g246Dl$20HP9-dv@R=1=7dY7tAtgBXa;2g(PHhtx4t^77+WX< znCO-ghfR}?g5O5Qp#;XusIne&Qs8_>GlDBTQ65Gee5R#oJtI`TR;Xw;vV_8oDVd?X z;{RlVVwrPx-jOAScx~iz@8C66Ld9Z)?G9e6Fv%#pYa1jG!rxJLzGn_6V=EGCe=PHg z(Dzz%2LkMr3+vkU!bu?hyKw=3Mu3loob`jpNHrimw8C!m%V`Lpc*&pQ6FsNM_w`bd z@?ef?^TU#x@?lEreq*!7ZlMIZ(K-Ifq6Lejg>qtV$i_-`jbLV^02F3&j(wO*$4J{! zIJc~Na*~yZKGZ%z#@(=>!O^izHB8Nu52V9g{0HxFB=fI9*Auui{f>KZf(p2>8P(va z&=Q1bL0cbC<$V_Gi(N9(xDVA4lbktSsuQ8m-=~V+Mm`#^6kbjkqh z+ehBmn)ArdL}&Z}mOpgr^P{1Ul=$9RkRCJ?3W|P8^FCfB;hsQeDBM67Yn!ji6SJ;B zB@{t3J*Cs(W#3*_gqcN5AX>R0H8@&2xZt+lJ)T(4V=2A?%!a?f+zIfTTi6N|P)K zQJ~kTMy6Kjwp-iE!>8E>!};ao$!o}sgJk; zecTN2j0#_G1@5Bwz;ZDHfCYoFqF}E6be*B>01F|`uqv7q1&u@b6_?gP+J+=;Q{2(U1)ITAUcSZ?_n0tHifZ? zdVqf3cl2j-6v1q~LA@va=1%NjE}}WvC!JvW1YK(_$RpVO3dPf()P_k=)et8Q`+7qc z{t98pvRDW1HpS%FT|EN$AdN6?n_q@1+3);8vS$!f0-`~Uf6Se>lDs;*i=d1j1__5l z%9ZE)Y?VilH=(2SlCgEH`$i$_{#Lv8j$b|p7qTj|0?q92hH{*c%G`3LyZJP3-LrjG z#Xjd#Sev^A(}2vHsD|z+YaN|uB*sYfTOc7Tx5xg>(m_HVnRmPjZ;EW!P^R=E24Tv( zKF}p*bZ@^6$qmjGpanvLfK$^Es0>k_H&cym8&+kwGSFo{4Q0}#Vx)0win={lgL*Zh zDf@-bwwo@p&%h$f*}?@lhpPqx4c8(BP3zJSe?M4dA1TzKbe1Den;IwTGvdzOsOeO8 z)l;FXQ?I6)iUi~UI67t`n+k2ayVZS9Z)0P6OgHqJokMJ8UAFOtnwcQiBYxO2DPq56 zbQq$Ssd<_pczoy(*f~Ne7q!HZp)&kv#%2tVos=4+yWtJbPw?29DmU1d zR1<&`X+qJNUq^wVJyOR*JZup=s+*_6#tIdFbYWmZ2c^XzX1)7!^l}uRU(b`l0L@q) z2EF5ry@|p=ms~6FjI6}2Z@^m3uo>9qOwT}z2 zsPZ=(F}0_#QFfETokF7p70-!xUI3rxpxHojK60i&U(`4tMP;`3s>j^%g_K`ph#Hi+ zZg#i)^?F0|tc;?Z-NhHS z%0G|jXHa1hv6V1*)# z;0{RQH<+tl$xisA9=X#(kQ+BF2w?G9gFaiNf)BeY5KJows3>(~_snT3O_gZbl*;#m z|2XgI`SkQ!H!!*}GaN)eDmOIuylCHm?d$7C| z)CJ>orb+k3+ZIwAlHm1{9q=Su*Use_=>H|92g_L|W+jp=S#ww@r9jwt9jcM280Tx8 z_|n|n!Aw>!P>HGpLbAORMEEi$@0NZ1l$;Np^b}?K|Ge&Q)HVDeq_6q?%0T{#tSQV z$-^6IkwL_m9KZo|QZnxl=7n1*=qU*QCYN)%&B;32F<05tWK^vMLDVw2KBQ3(M@y69 zPGo3-n1Z)p4ddmcS^@e^+fBB9PxS26A z#(NxqYL1qfpbNccQzy`pzTWnHt6;pG)@fb%7I7orZW{{gN}EaeN$Wf69wvUSqR!sJ zjFkOEvqOxaVWdrY=z4Lei#*NlDCU=DiD?JXg*6lY{lTFO+A*aWN$Sf@d(MQ#dtB#* zXI-CZin$#?S;CSO;uZl~`&z)MC^@}b+$6I!8#&FNxNK`2+{idaL(L6OFbQN#k;1V}aMzqHq7-$EYGRc)R)&-kuxCMU}IIK~}0!elpPn|Ok zh_kQHtD?{JrvAH8ClMYfv2~nyaIZA{y~| zn~L=^t%R`N1hJ||ZL#4chySMo?(ZSv*@)b2+!IzBRVQoUCVbn_;7D0Ed z3JbVMMglyFm96t>uXm4wL+lIQAx<=ApBPj@w+$_UueAOTv5sK7D`{e^4dTGrEn zdF;F4N<2$G50TcCED{Wg{n1|ju7CUsQPaO67?FYza8H<(t(XGYu}jsE-Z3!;0&1Z4 z+Am{tZGqVnwS*GY2goG4!$0B%=~>ZmEEZ*ppnXSjc30J0cQ#<>J0BUVT=tJ){RPi} zjElYRe=u17(XtNpeJH_0#O>YOEY)dnZJuw9c7?P)9|Eu+{tJNM^>=Z(`l-%s%DOZH zc8_Q-kuL~(<5AD`kUM}{iRclTDD5>}F)mfpj%Y71QtUFyQgYsmW)WKh7eB@RK-0=- zD((p(-cLCRll3&8jFh-li(HuK(abY#)s6eed2Sk-K}VK*{Ao7nigh?d4@Qz6+r54x zG+F$oNac=XLkvRK-r0OjN8jFO7MwT&)4?&keqbg#?Df1|h-)f|tF6Ma?3`Ed~N^?^tH z0oXYs=~;hp_vcrT2)Igy8ZOP)z@ar|xPSG!*!pxMNv_X8uqdl5)xB;=moiU+Q?MFk zrY-iK#0tFktxfbmA_y@AbG`*>!|UvhO=E(XgKBD-V8Jh!p@U>MketsW z0;KX^8c)-WPwHqG%}aqfMj47?MeEbNvHiqZXlGG+cA4G)EhhD7SeD-IoVuoDM{$$! zko#28Owkj7PU{iUBQ$v#*Ce%Knc@P zads~bZ~_8IxV}1`2rOh8jC6DYPglXKzrSl5_)DKt9)$5#sC{Cr1OGIF$-h5Rih-K^ z6|3Q)tD<4e={|PX>_u-$$F?@UUC6ALyUSmy9lXeq(2U~sdi-41>hu$=HxT{?uNjR9TpwN06x>VZ@!hz8aG$=s80Zf&Hsk)|IbGIKxb~q=U;r82g=hJlqyGmn`QgGys}+FpV$bP@zFKm5 z;$t(p3~%Asr@M*h-(8YDS$9H_^elP&Z6avP=ED#tFt8?hdK9^^lV^9U50&6=g7cxh z_9y4{A^oO;^Q>O6wyq^`-h*Xc3i#=+3C+8My6AT@K%VzOYeo(IP+HZ04^lKbGxN%IXMm z2PqkjvO&-G>JH7oh#0eT401{YV4%op$`)ob2;?6_aFy@Ge`azztCtvBT$Re+4j-?v zUw%+(-=%iuSowc_@g#&0xeiMi>BM5B3!2Ezo#QtJZ7&^$3&l|c4 z79EIw{zcHXyBq#$$4V?j#?u3!T(25l*Ms<80kM|0ek}cj*DB8@uh0)(9R4ID%CB1) zPLQo9pkN>dz{VkngXk)xkhJ&r0Sf`c#K^PMAoQ#N*6Xy+qg_**LfA_fFWa1OxuS#^ z$V_}LbOOcW7xQKS+q}K1Eg&?O=51|NsbSx6j|GEoL}y;FI3A#Ylf=)RxG|VaQ?vQp z;b-UxO$iiM#1>Qf<<|jxlMX1(-g$9WquXT&X&a+*CGendcxi98b$fZ8q%cI;j6c~U ztZ>@h&H}$1PXO3CB@}*}1l3PW(wG2AAf4GBV~*nPZRC*J{}8vTyWnzqM412QCWzqPeKrpF$+#t zg86mH!jSAs)jU{n(*uqkA%))%MsHm?Jd{ z$2J0^4twbh@+~A$cm2v<8&B8JvF#t09&gfa_|b4be)|OLRaX;iE>nEGqT&G|9&=rf zHww6ZvSSqg<|jO|XgJVKEOTScPVTQ;mjcYr!}ZY65Sv7h4Xa)j-0#(SD{JezZF75O z$?5RXW4bhP`x>JHRVY#@IZ1o=fgW}c(c@zO(qTN+Q2s}3iO-KpQT$(ct}D}2>7JJ)}kCVpB; zL&#q-f6sw1E*;46*=d|0` zm_51Sx;P*^B27`}s@PNE7ZX^rziY#nfT6 zCL84YYeGs(T&bP;gLp|~3MEvaN=u5Kq``|BYfqLeBx>>YS8Tk;7RS!U;EC2sN`qtR zq>|R1^vhobq152b|ob=y; z^uw6J1&^SMZlwAQIbo=4v-fE5+f#o*Fa%%^1s86ri{Ic#^$)E{!)*mFGMi={)G?b1 zbu?b%x?QXKvJX>ImXKCbznfu*mCW5Iyfa}m5K@L8TP+=K4rPppYMC4q{VIA0J@$#9 z0ze$;m*&_TQn+6j1$G@tkY*CP^BvexMuN>dJQ&-iz8s42bxw{tG2AZu`clNT^b9E} zrCMe5hNsHcFJ=+~uWB?o4Lm)lG3M886-Q(+vnhjtG*LizMu3745(rXol|JlDoJFxg ziGBvm6uzeeDnV&QGwxDf}i(2?6y{@0)ga!%fK@1i8feJc~Kyi_(pf!ho-=w1DS_6rIN;G9cOdc%OIS120#meAo zy21DAZU4*lLR}eldwl=QfrDCi3eUL!RwX z-iiYnUv1fj^%Dl+Co%;IqZCC&3nBiH$~$z<2CMEwJI$^S%;dw_q|s_#cf!_-?$%C- z*rDzN;rE{|k~}V1-#7FfwB9(_2z0G<~F=8DBzodR>hs?(4LFm&Qr{0p+34g8mhSu#g z21jhfe z$)LEbV`^9nbcDDM$G9^&itbF^BX+S zuztpA`EgQp$3w zBQ(y+j1|ZzE4#n+!5rB^g?x{OD^Qj^&IFK?_+|wrMl-vy;iNL0$D$q|k54aOCC-4fc) z;5N;`=H!_Kl56)Z7xFsRZ52vwN?<5ZM(wBnH|>e6qA)BO1K8pRC^3O9U#xW^5%Ke$ zxzHwfsg>0=)R>w(H)~D`_UOd%3i=yZw+vh*W|bNLk&284ap^S`gS=R-07ak9@7&ceBX$G$=Y9RPx(1o&RprZ$a8B+aC zG-WxthGYZoznf70?2<$x;wSOJrS!|Ce&qpp^^G50Ttl&<%X-l!-6moT?Csk4<7O*c z;T0t*P=aKp^qV7G<1f`rVK1=$ZKjqY=!W8E; z+YI~16n43Tj47et8*eKu;&3y2)4R5I1kK!hkPul8CwqoHptZk?eP;Di-l?5>rY5eb zDN>kwgUFP~hNvctB=~$k2*O@!`!8fNAiMUzsO9MTlOqio9MA072&u~O{qa`T3L_)%+6}8_5Gq-krL}SC zvHTU1C7N%$U8~z?0R)Q-l!-Z`#zn{-gVdvg z&d~&Z3)m|fD7O>q77?-Bt6DI2x3VO9Q&4{R&lK`2^;^BEKVK3&=8J*>N43A6%Xf1T zJNk8qnG#cz8~n5p;_|-8OQA3f8^SwQM4dbp$s`Ec+-$4$op~y2ktKzg=p?nU#zI1> zmXUh-wZN9o5&u-XOTLd-{Qy;aX2H5bdVaWw)(Obvob?k|;O7VoyEMtY2pv}-@ARvA zSb20_(dV*LJ zJSk|(ak{eUC!SaH`EP*Wju~LtLQOP0ImC|PZd)=ixstN z+Kp%m&VPZkVszB-_x6jCk>D$jt>grp)zMK(yZuVgRHQC}a*8pE;qj~aR0ukPL05sr zCOYMcaUpq~7_e^eQNf6Yhp|!pnfW%L{NAcAIoMy3fpgtlLG>k~?JtOLyj!?#+@|MQ z(u}cP@lt$BDS`M{KRx`M&s*=czZ8h?n;B{mo;m!Z-@lQTYk23M^!jN-3|T&(a{!7(>On9zlEKRxfbLmau+ZWBTRr) za9~unp$p``giZB{_^pKVtPf&fcMuMRXXixZIuC&hbF*Ld;}1=5z%<*t9_J3ii3Yr4 zD7{bl=K~p-kDVSw%=_8DA1q}nx!vY!jQTM(&aF+v9bvsAFChlp)y@ zu`asPA%^$ip#p!(FTR{;yP)IVZf+PVRxmV9qB%_sq)ttq64F=P}m_{w1|-n7W+zxiUJ)5;(1+n&n95aD4<|JN~%6sLwlh zjRY}#Sj*JEe%^ZO^WZJ@?-s2Mi{9E}KOTL5;0S&ZAAm9<4TXW}eA&B&aj!c}^*9P9 zqOuuNX5ZdZ!l0#|1!RvKYCDh>yxy)8?wY_eV<9n7jt?Ut>mjJK%md;v0+a>_-q5P@ zEQlMhoR|sGczx?!1&(Ow+OzKuDXnhwQ08*LvguDW4m$;Q!5sDXXr{w8y^9YiuEdcV z%??v4D$!qpAn1N>?T+I8X|-&NvT}8jIJzuzfgxqwTn|BK^(=8p0)?2kJ33^!{{Rk6 zh-Eo8t%`!0yX@ILo`03q#Q{hIfB4&}^uj`$N0a)M7F9i6j;rlXWhW+jtBY>PL@T+XHNIl8s543I%)8Y5_S(!zfn3N$nwl>@6f4&L7ZU2~BWI#qs z1OB~v-R2H7Ll}y`&bjWsxBRCeybZL~Ws=vId~+Yj^jB1CAR(zM8Bj-Et-PxokCrWb zM0E0Atl?86O+4`5%Xe^_VNSNy!GUn@9xYi`EPBF*6!_#%3p{ysNxP_OJP*D?hf1mHMB0r^9 zFMnUP?V270q(c=>1?e}M_%Mf8iJHYEPcFo^aYe{D7FEJIs@PrBg#Vfr9#XND5BY3P zYcrA-kkgyW_67uY0pid?^+(_|sc0+id3kCaANSC=mFvUD$}=UG0I=Q8d1aAulFUUUWM3;2EJBnkmF?3tlP)^+S{ekqXr#~@U9`21i zevuDimf_NTY~||?+Z-e%&f2+Uwqn(QfB0ZSWW~p4ahL{Cgsb~DN}crQPh2?2Xc`v| z_UieuC|sb;cPFBA;TgCd0tCSpL@hmb2?d1?&t%Tv??J?iu0e8jEA4l^jg~y5Li8CP&=M&S& zcesEwghWc$a$wlBV_`JTSH;x$&u=Q!y8E6UKp6AN7#wR3H@W6K&s7l^$_ng{ou&+r z_c8ljP|^zqSA zTB;w@S2Jf>2Q`BSh;>7bk*l9A=$!e#7>!mbP`_!3>5>S>l`@x~Dnc4O$wX3N!=NBcQ zl#b3~GJrY)=djOlXv5gf9D_bSY)G(@7dQw>h%*d^W@IV9=YsOV4Vux$=@-}0xjlGH zjAv-VWj8%m9UJ9{9jaCQ6}Re2Z9Zi8h%Vz{O{QACfinot~0B^&3q*2))pxZdNNkW5ZqfwbLv`-iethCoP;!L z5jeLstx);xJ3yjw(f|@xdf>xXltW@gu2V!izoV{;)r-)SUcCY;%E9Twme+KRUT>;z z*D7^hYPviJLYn5k)L$q%O{&(ouR2e`UB}7T4b?ERoE$t#d-RJ8l(T>6eY|E>%^zZ` zsqWS2=X}Fs&aQch{TKfn@6PJzXBN9&O59!=Hrb+7EB*MCuyU9 zBuR(;yqD`tFW!bD#4w+-fPf@cg8i+D>CmN2H0g_3G4`#CsrKGiO?jOH$Y5Wg>Zr;` zJc;LRr4z&AMX-y`Fgsd)ID~L0B2QDyfdxtN}HCo z^Y60orMnhA-=Zj63=`d-P~szEKnmZuzmeFy-R2IYLm;=Gx0G}M{L54QCho2Fq}$)g z;pGmd9`Q;Dk&$-?)kXb*X_|<_T05KqCE%Js7LNV4`sr$~9_NRSxU0%eY++7ekm$VC zz3ZY$8OW40jKFs5sKv%_-P&&cw;1>J%S6UVd7t9ZRl|IT^?@CY_ZC}RYyQ)oW#P(n zk=@JL;cf0ob%n0WwQGgfeUFnTcyQye>ia)J*)j0++c9f8K1x7#WMcjPA|?!&Uy~h3 z(sHnFGii?oC@zojsUMjR`34~i)&-LaesN^KJmI&^3}yplkC6+SOt;GWEMn4&30nGH z+1LZDt3_ge<=i;#|G*8mh2LYSMAUQ$W3aTlv1kpy$U!r%(uVrN?gI@hv5D^CT8@kN z_+bZOiT&FmJ+w%frtJK4BvPsI1X)68Eg9K@6hYH>yY`*@j^;_OqRE{&KWl=XzRtHl zv-utG!^g*IZ42^_4>pD;c8$b_^QD(aTH{)1p+^xt28qII(=;csU8qoM32QeaUHZJDOIOr-&O>4}2c4&iKd7RR<_Scm4>8 z0r}AL{m>)6EZk;yQjsBXbWzh+i`F6R`R={BSF!Ce6d;_uGH84B)_O|#0gXS4yvc(+ z;qCGJ?rjwH@*0$>yJE3udiyq4sb)K5z|-Zrz;}SWIBHe_@$JjVtM5rgvGFjuN~d|} z@pF5+2Sl88_~#2`rsw@9Pp+|P5-Y6Dk8A&7@OcRyO>Lw9)ntry>T{?H%|Mm9d!sVN|rJij~P5leD6)WFTq zj)09%b^_lP%dp-)$s|1*H7JrIj_od711l?{{YI@2bD=Yh?nQ1?2|3q}*Zhel#OKXP zLQP|?Bo)it<#myA_Wm`?BqlAXbz5k03IF5}qHv`;rPg11)5lYvI;d}Ly5A^0?JXw7 z-~L4fG728XQer-g4dKr=)-W!$d|#*MCaMeN)SypVmQ2FgFD8s>Uh!U}R?3K<=7a*a zOtstg=O?YBF4@aUdrJ?qRGBW$NzV0`3}wd|vX7o}|A())imD^(wso=K?ykYz-7Po- z_uvpbxVyVM3wL)XxVyW%yTe)e_ulQcv+r&9i3irAs^*+j&FW+HuOsrZyOj(bwoDDR zPA%L=?B$V9MSCB?SMfgH@IKz%+wt#gLr)M%B9S=kt3A+UI~YZs6nkfcRwLaX3V6(u zmN#>>qhC$Xra8WyO;3((dB4Oe8Y?R9&pVvIRb$QUhAQotQi3G{-9eW1v|`*v;a%Wq zys%6u4jxK_O!L3GIA;_q&YP4M%&Ou^{kLnuzG0wL4qHXAx0!E=v2peF*>tTQSQ}i& znLh6xh@Tg6oTIpMoJh`Jv4kN`R4;%%Aaa8#n2o8dCwk4FQoW1-hdmxFz>uPJcU1KiOS=`g-v%c zcOl4}Pfc*RL2au>C7jJmc?Q6%tCQ)hZa(R7g!kpQn<5f};QC`>x;NU-`n$NV&tQ%6mYUV_Ac)6IB>(IpOMG+JLW5}^zP{b^eUD2Dm4P; zN=CZ3D~sscGL-ueqmyOLjLGw+%m~P@7;tw=wSCWfN<}BL`4b%{->=li0g;W{mza{| zHlv#?Lqo%7Zi9hJ!W^?jJ}#m;Y_I$0l)q(rj6W%dRXRt=maq1&=wln^#xRlX(30VK zJOb)<+jrKoDXG+hVfs6RL?jmtli>o`ob7o$A=`w(-^CIlYF|t&7*E2t#*lqZQo%$zSgVi@cb?#!i$*hD}&#$I)#p}CjqN4n%1E7 z&H+ug63pbCSqAo)hUeKRV*GC^E)NN)kkLpsyp@LRzXW~i=M6nI#)XjtRvM{Xy<|{E zk0S)55mbwdg0_lc$=dE1{7~YNweBlle^^<#sEpWbgxPjDH(DS8djn{%arX|M?k6G7 zqrLJfUlX*e+vH$&Z-rDAHd|#TqfK2dcN!>)tgnsk*Wd0Y-~9!kDo$1LU6tJo+X5a6 z=)+MXDc_b>E4@L5#QEXe3HK?wNcX6ypPI;Xm50LE10b%4ktY=7Tvd|*u1CXXC}3p z+$u?zf)#yNb`jM2bt|=O%^**i@^)_N^*8uBg=dE3n^9F=Jt_uh9Ff4P(^;99?A+dI z3WdVjYv|ymDT7)!FA2<>6mAEgkyNC(CIwJNAZuHn4A}UHTA2zgiCLR=A0<_^_j>z(bt*?8*!JL!H>++Em##Mw-;p8u#%jN*Ico8$ zl;LEbae$7F1B5eCSdt0;EI*ItuKwm@9BuK7Im=c*H+p2Mw|HYT6g=>Z@z)nOKK04e zLk3tgx@k)YP8$lr~G(w;uJZ*J7gX|!cLLb#rZjn2TJLZTW#^Ac;H<$1Az zpvR)|ZQzO@;69GqiIamH|82yJL0~&ku#mD=BtU}R^pgh&;Bx*lIjqSmq`r-29o?CEfWQ*zWs_{Q)C%!|(w6~1z^C3K3EcW@cJ)N;~t ze}gm_D=eXR;vchAcafmsriIueEI?o6(#h=Xw;5t&8-)QT@jVOWH;S8?zJ>eM#b4mudZOjy*g}P1hw|$tfu*$zh=}72Ct%+?fzO!1|H1l7F!`ASUxhPZs*6+ z7n0e_Gu9yFjiSpg<;_5VFRwje>l^VEH#}9G78dqpQSc!n*Gw-g?QU5x)f3xXt-XYNA4}Lm>}b=!E>*Aq=7-pA%sGqG zz!qN0GTnJsBG*u}i&5=uydiZO`Rb?-6WWO_H+mf3S}}X_)~5@wNhYN9asz40%Q}aA zP=JoM92LXjaGv)3U zpzw^YDqdM5H~0HuszZH(zRueQ2V>_0#+?jy3L&MUdFAR4auSiIvxRvszssxx>Cxl0 zDN2>b$Dp`ZoIcJ1V0RC^PglV#d;{0cl>Y43AEFv}Xf%r|ud_(2$w`LkPLsWxV>;Hnm0ONhGz%MpzWIZ8B- zhk46nZ!NHVDzeUL9rd_1E}uk5=kV;oHZC33%)r1za!Rupe4XMGTmRzGA~cvm{x)Rj ziD@Oq$a2>wX6rmG2LvOqXzZ7&{@*1O-C_@E$D4f)_RoBRWaOGX=|}I^a?$<`TveCp zi%Xn{Fd9zQ9|aO?p6PJt19HIL<<2XG}iYZ#jU zRQ9Iap$g+_Y~w4TX`<=8xz%JeZC+9p3=QJM7L4E=4l6BgI=QvTO;dghsg*mNf zK~O4{`|~HuC|(7_od*+kNp4eWE2y$jcm%Y?O5%3z#xwcuL=yWY8xPr`k!j&y^q^6e zJR`^lbMv9!Ai1>bEO(>3-3RQE)(oSxrA>obHr9l_Q#a~3##P^lW7VKW$E*K%mwNrw znpqE+;(w#IEJp#B$R7)}Qv)qelw(6fyChieKW5EamZy@Y1P=*dnn+MTe}8Cz5t?nt zB>_@$a_DHoKlYBk`Mm?d)h`H+3*Y?4%qAljPtzR(e8?17o-HT%*^@I zT5sN={yEk$T0VA%M0^~)^^kwi&bYJOk=F+T;DCP@lJC{|@CfKqCIrI#GAP$ru;) z54138U6`tnax*WU2Tc=7R3xSyCMaq%N*EX zCnbsMGlHK~Z8z_dn&j3WfnCkbt~WBE|J8(R1(e?Gk$UJDzSbsUfD(l1_Ikux(dvoi z@U_E!R2iF;s?_9+YzTl1f|RPEb9`P@B(NXI=CY{nkdx04&@tL;O^u-C1G3792_XCu zfb#^-YP(gB)5i{`n<&5Wv43U$cZvY)1p1#nS}QESSx@0<=2ys;r+g5Q^d$-O!TL3z zBIy;sa#7dzITxYUKk=>d3^a|PBma>6w}57G$2UESC5)C!sF<*+U?uNFdH8_k8=#Bb zk0o#<(p|z!4khJ9-Nq_#j)sH)NXG`+-&u`El8BKOf*(=9G5$o*?0*l!lK>*iApi5r zgly&3-Dxa3WR@!DF1WS2kMpM#C`q<-RIt*q$uUN6V=PrQ=FH0PmXWOkCJv7q17Nfg zYvHgk>+-b!bzJ)Ii1nhaQRV&>nGQ>iD}8)vbfGBFf2B$2 z0)m<95O)~z1Tz&a`HxRQ(0>du;6lZ-YO~Y{n}^`8Hmx8>8efb=sg@7?oM$fI3;-pT7h zX{Am*QXzwW!b<@|@5YvHysKcnew0H>Ei$T{P>klp~_cY%iQe}2$ouGK|!n647y$u{K zeh|$-Vxl-B1iWquMe!#ma>}H+&F&%Rkq0Qf)3YhAJ?*B3kpVIVgb1#;n(y*!hD9AX z={JT>s$>QC#cOn`1wF)WbP#y1&15!)0xl&Ih+5m9+Px1>JBEVDSep2$gl7ieqz5Yr zb0yy^9oPr=%LPq|`;qRAHtZe*oi<+%7K*j^7RZZtT!8aQQUrO+S3#9qUPOn~4n#mh zf??yBS%kKbf|aDDg)nyBKAi{QTx11F;E#9UsoZy{z<9Y4)Z_Cs9C-aE&naGP20?Ka z7&Vfg@lQPYMNNTmrT#kdLNHW!&hl=|5WoqPXo1eW~9h{f$i zz9uKAKSb$t(MI*MB_ljY;ai~P09Dq?yUw45zZ~WdXK>-4w1X9zS7IWP?}x3pA-|-+ zP{JI&P9Op9y|<@xH?Q&(RGe|t5VNz(xR&T@rt_>tW^l*mrXVJVTS3Wz^9-w@yeT?< z1jtEId-B@b!F*=GVSZqQboCOf1cRXD^L(A9Kf`>S9_%p2( z3JH7)=|ZoBb83`TF?g)z^F^AIe z5i@ZZx&Q3tQMJe9lxHEU(f5Fgy)M@pEl~b~?6Lt^;{3_9UcTIgI!@B@59y!tBl(MX z;!D*1r;38wLr%Q6!G1`G9M}64*VUU6`3IgqKLfxg5$|*!$Lf(nNl20v9C)X($rej- zlXR`|KlUHPsN+{+6ZC!~TdyF21P41be!(W)9h*@td!X}`F%2YeX3G3-m`re0&*AST zd#dou*d$jdtdOi(^or|+YXZ)#iCI14K#!ZU?vxTdsah@V8sys%wTtr8(-5n-X`T>H zy*j^ym_*J!o1(VGYm@`~OjKP-3`^}0DMsK>u_3Jzpc|vFp5A&MI@mxuKQgwuc%{j9gIjq{T z;wdEFbpK(h3S5d&>(UG;i#U=EXVF=#bcVCoSVs;a97Z-iCgOw56&Hvl8lp8?@^~x_ zIZf_jPo*ScHwJ1u!9N3{=EQB?T+ZF)uzJe_3zv%vcyNS=MoOl)T(+WkPpu)|M>_I3 z43uCKR}moY;-7CZ`UCAz6_L6Ku3o2oh_+=4F0SC!%Q>5N1*`@1FY#JRQ-&K+L#W$e zA`!#mYX#@3`5wS#ayGmv!LRpDacuGrC7PJrzEOc(cp}qZM+~MG@8z zLx}sFHFiiWdM|!$sPlR3?_4~Y8)Ab>(75@&1hG%uBGoP!-)R$-B0ny9Nbs{1DG%)+ zKbUCk4Bu@tBQmaGH!)B)0d232AlIS5hz3ll+E?-BzrZC4EPecE%fl`EfYFL{!j^a0 zy=V>zyursN_brO$=a6!0QL-=jiM z;ERg0jL_;)%lAL)>Wd0k&S+J2I+~bgkdm5esWds3x984snk}Ef_eL^3+`=g_oEvNS zrB04fbB5#=b9q3X?JnqXz7-1gUz`Fg{W^oIBj&3Uz@gD-loQ%9j$BKsi5xKTX~l+WjUf< zL>TRu?tQ;1(f%p10r*1id>0tejQV@GyCQ{HO8BsaTx5;+{5jcU2^!y{gf1j8xL(1t zyB=)=@`G1YD^mxp>5^jF*62dj)N@fUgDtN=hgPt=iHwe;Bc;xZlOD%A+J7LnzR*D* ztqf89yoTg4G3X-C!k`U1W?9YIh4=uVvxS9F4VSK$l9Pb#bbB2HU;Cr~Y=@LEfCre>Xr z-=%y`CYIy8Z)Vr}-4GF1-Da@>2;H>F@o`s?Hhp#(^oL+aQs0UHIEfsQ4r5N*r;0EU`?BmIysd==&fa)2b7G_=)q{vSR<_ohV%fa{8q z7v6OXs&J{BCOWC|lNN1ak#$s9Q%}uCs<66pdWM&pHZm>YYYK2{B(XOii2UjTc|1O5 zy|$Z>4fQ9DJFZdGi+sK7=F`y##rKLwJ^mzQIbJ9RZJZqp+08`=n7reBqk)>Gd0H_v zY@4{3NT39w0R~-gPi?Fq^{`cLt_{!tV(pkz5fExbd+4rQFud4535-sN zZT3UCQyf5A207F_wCD|++)tW1`Pq%TFj#~h+Q3tBj#aPRBdme zA9>TB&~kH;ibJn#gocR#-bV1%a%nB0%Z7WXx2M_~;x{IwCA-*WZNai_>b0}cE9e;Q zc{8(_#oCmro});xOeT688OO~aE!Xdkswu9<8hwmc}GT>l`fi5y@vD(q!y4wn~GWV=ql=dxY3(0pa%)1zlf$; z0X8W|4N5Ohhtzv|UzgI~J;+H96&O6Uhf;4y7EUsRLR-0-R$8Q>{#fmeZg1g?T3n#W z^hAa`cgN~j_tyWx!^#d0(iv>w);5tU_`_tk#zAih^q~Ffg=IS8so2t8_854&kT{_F zcgtro4%i*ryLOTTN&#p0bBBoq*&Tq_Ym*lrI7qf(aIhVPpTBV^SSi|^FtbsFfLs3Z z`ISX@1z|U~D*>vaN@z#Sj29t^sY44y)u07;^#4UTu$uZGSus2T_^%)jj@5P7;NJ|p zi2LV;8~#dj4el}0%JnBjs7|b5n$a0a*2Ju#{U{ zB@u@Ac2opYm@z>MvxSHLfbzJ)?Dd?gU(xeI1QhZ4EWR~DT*%wrRFuJ$3rG>ti4^F{ zkBv()3z*$EG4$W?`!PMhjz$GIQQ<8!q|tG~umZvG#jA}nO%pdG)XLRhIBsXkeGPue z-6mS*wbOKlNj<}6vLbt;0t$yx8-c5?H<0WfudJ}#>Q2O!?okwVlv+C@uwlAh8CrW8 z>Lchu50R_lDP!CQgf@xeF7sXGG|o)zZVWG#i_IDX;T2ye!$97`0@}~1C59uq$h@R; z#lDCmdH*2likh;sBAQVOn`Thc)##pTQ+V|wj$P!;HqW>7<^X8O+n?hk)czeDtQkH( zOr@(Y@f`BkUc44NNR@2&N<+3T9L%wvdPYFT199bV&1Aur?PCzNK{q$bnDsw2@?D;MB=c1 z3|1K0yfkf!p(hk5J4#$ivgr)u`h-Tz<$H6y9H66Hhx5$r;9fNXoIJJ&}11n>mwA_-TLCBkjBcc~H z$O9`lrQRHr5qXH1K=H$=`kG<%(h=Lb%xD;^^cX1nw*jeoEF_WsD4yr1Zx#mf41|17 z+fj-0nZl+U{}C?&5%sL{;1!i$a^`+>q?TJ3Nh&G&%-~jO9?)Uua3Fz{v&41>MkaJL zjD`cSAZ}$k{^8qX*5-Ol2K!~7r!NY^yi#SXHlgJ}X^MPHB8(C-bA21*p>fkfCRr)% z;1s+U!pMGRPRR4v!`v6nYP6Y|JR3fKlpGgp0q**OgBk+s~L?Z=6d(-)}^>yj8;f1f$)%g(tJ)l6t!6$F$A5&eW#(RSkW5yd*X zJ67dF=LZiw@c>=AOr=NY5u$jKkErya9YT~&Wg9Xj{H*4i2Y0kOL{>!qaiKTJtXDeK z_(+w0Tbu0O#5xCAMWkYVUwQhymu#;c(lxfwz$Ex&C?Mz}T-=gb_qczDhZl0~5D2^; z!VUS^)WH^d_y~=oo~h6c{WR396^gG&}s04=&i*-jMW3nhf zuKuy=Ueb6zUWc#me)IEThJ;kw9|2W7As&jOFNd`3b%~= zN6X2`$bUp&VeWU1mbB0lRS440hW{Y|9gQ0jSATII0rxi8N0-l1(;VL9x% z%w=MNo{710QgB{CkFYgAA6AIq>dC}^eJ%K5N!R^(rED>aY+cU;a_>(lnqUe?Un#9% zj2h)~F&2=Q4>5Z~O=s;6SUxN!7=zzm3j2uP`wdK@{dZW;!x;06iF@E915L**eqC2G z8))E7u17*;b&z>+6HZ4^<}d8xS9x*sD|z1UzxoDLT|tNgXTaTT%?l_PF$n+`1|{RH zP2|9VR=B?LAQZ&#$O8x(P_J6Aj`<2_!g8y^lg#j+k+ zLU(o7&a%0HrnCK1!mPzTlG3tv7(eh29>*_%83Pj<9xN2Yt;K=R0J*t;+M3ZEgGo_I zo?~D_M_Hy&%60_U{7_ZQCTk_pKYbVMWEeR6ns&zvGeK?d9jn=*cf39d^y0Ji`3>aDGmCEN~?&2LCizxqCJ{^_eLQ#xS;(WntvLG)yuu88XsJB&n z_je@H9~MLRuJr5?hdMUBDMc`qnb-dOQ)&axJ_kSZ%7vCjuP5iLjNjuR%$GOmw3TFv z8XrzdtSx3}{AEtUSBplWI>YA4bJSBx*IUo`CDbaq?yBE6ySQn!_fZl z+Hta19O#ZHg!if0%=q$5SqzDL)6%n6As3mSk9~V&M~k~mPjUKOYVN9RL#=43_t@!E z>d23%3Zn{6O|3D#&S`jd0a(OClCKrE96;!3K*--;>zdI3xPpLspTL<1oeP5EE6XvM1Impowthu}=MmjG zx|)&@IQOdwe0w*JmWWooSsWwRGGTtNKrMUhX-4%|(($eIr06m?~T5w*Wi2Mymh-LW=pTizuh-o5% z%zcww4=NUmzxVbYLsnq*j5YB&23x>bIo4F~hg5?N4)JMfHM~=I)BshxZFiNO)UmM@ zTeF*;2{iSJXV%co@fzeYT+DUt2I_%MpV|mTmx82$-vlJo@UU!1Wn~BU#f%p8 zS8^!ElR4K4gDbIb!4+Uq@wx)&u`r;#L^S%=rK_>dzCBrTV-sh_s7Rnro@qa{ubjSa zT@aUAa5ewlP8mI5nOsEB&xeu=ixVVFD6f@^MOf|qYOn$h&6ir zyTJQ@#HOmf+?xMF+| zr2fGac)pUPs~-PXY6a$b+?sEBYsQE=X>lXFc~I{!f6R(U8|DX_)Bm}WTlHStOk7!_ zqA33B3>+~`ebGk$SP6nq{&8;Y{4S&{>7)i@3t8(j_ng)zQ29(%Xf^LVUpVm73io2G z`HY7QD+>6@!CC({w63gGL#|t)ElMng@3Y|d*CRvCu$tr6vE@Q`wC|P4TaJ3n4K|2ipM4W44SwthL>dp1mY>@XJox=pdi-ynt}AvPKh113h<1>mTI`a$eepL2+eaDfx~0 zu(Ulp3k(F$hV>P0PIFP<+0y7Ver~!ql=X2((r@;Jtf#Xa|MdPE{8YYV-|_AF!F$eO z#V@R5{r;-hR7im_*vGzU-#ad(YH-?o?=MBEX*(1Np!&N6zplgHPm?wXzpWyHzai*$ zzD$_#lm)-a`PzR1@x@E?`ue%~#_-MRNxe*fZu!uJR6C*qWUC+*$z89>Q{FAs1sh((^#qM>m|3dc4&aV)tg1eeK>=bm6f5 zEt9bm`|0DRrWvd(0n&;8&9GwE^5np5S;4&*bIOEVLukN^#CpmV-yHVg zKnR3yQ)MUlH?(mA4hX~Mq4FUo7#u#%9%yLeir~JJrpNlq&&nQ^x&h zUKE)OZ>6X)-!nl+uE(pfCKL+rC(Q0_uvH8KVN+>QGxMOQUwrL{Y(@hsD{WeNo#Nvc z)~aJML4)g~?px}(tsM3>Q?0OD2AB#A60spyuQgMcGEO~P%-4_%!_`k|)79smGz^oM zj`EV=rO<5S8Qq_UtuR^Hf>%LKys32%;cLJoyN*6loq$FjpMcC0%d1KMtZE(r_} zKLZ}-x$%N0g|WKJ=xe7tpu~@v@eF*EH~8G_wud!=xxaO$eseWmPd#*f66dS>N?sOp z`GctPYQ|5NIH#2_ORuWr!TA2dZJ zNVB(J)_rZ6|JF7U`q{CTr{;t}U|h^H@`VMZ|+{o3L)%9;5H!H!=P znJ1!VDz_@l90}AY!$bIsS@?&WB+Z-gt@m` zO$JjUJgICRnAmh1da<+C{G&A57X}J69_uEXcsJ)YM_+b6`#IXpp`V+i=w-4o>^FS` zr-1BOSBGm+9%i6Zt2VEUku^V2JRo2>WOVw8FQPXM>#1BT!QeP--c48v{KM@lIYJKBF}Jt#_DYoc(t# z7B}4kU;%n>rTg0yh~Aa~QhVgNzb)G*+_`-04-HjTWTMXu?73^^v<*qy-ncOZ2=?C; zA*N}=1w5s(IRDVPPF(V%(vEVGR~vSLk+>|z$w^_`yRQsjIrPi6c zSdSnv8p=SIX@1k|U=YqZfo1_u)df_Y|6PI8)Ek1T=|ctQ!)av&(XcL(y2v+w?+jho zgolPd$I0(7+1}@xqqxyx=XVrCo8%7(3sM!7GKHr6rifmyh?aCOm`y-(N-$QX^VCG& zx0xms0G#EXiyAg@haaQS9s-GSY`dQ1w$0qMv1~XEwtVc$-scmewH>6m247)7V$ch> zla9pallSzS3|`SH<>#>SnQ1%UZhki1(S_qs-;{zvRpzl_ysBd9?_cb^Y>OJIEzEEB zTQy2=i=0_ZY%I=G>%Jr;_On^^OuOK@S;$2$h%x z8q}6@+=$@4emRO%J|M{+;Uaz0a;$Ty&Y!mB z!U51<)5e!vi!_q04O`BeU1O)zDNiMpa$C0=H#(kN`{%LMIq`Mj;d@%5It%-u2ue(Z zLBKHf(QTUwywj7oiE4zaL?an6Hip<(ekZ!deX6WSL}P$eVa4$#C0v92kX4yn`m!&4 z_tIfc+K;%yUsm2d9gv`*sWvcQGv?jdq`v0HcGASTtH!(2#sJ(v!6#@oJ06l|2+kcj z8+aG|FAQW=>4YZ&!u(?=m+9^>WigT>)cex~p990%|6euwW)@?*=sj7YDrBjszC&N+ z4U(?!^CR4Aex<*NAeER@nL~fqru9g^@!tNAu32mO44^gohLE+%V;lsb$0g04zKsJ+ z3D=3fEzT_h`3E@gTIZ3OL>7#;CioG@C2IVy;anGbd~suTRW)(J!>??&OTb334ffTh zAfFZaS=*?y`KzznjHh7pWctdCWx2Q)Y-g=Q0z(EeqB=FExCT>QZ*Egs@7jOuj5ekz zRHp5{E?f_{iu{_g&vnM}W=Rn?i!NPhmn4jdD2zIl&v69URbm&iW~9kf;Vwf~dV2}a z_za}7pM6b8umWFjXN(6uMFUgKrwAt%fDyq8I^J((*964OiPI``BxCTSo9~XBRNnpYOU3|oe4 zJ>&n*4}5aAXe)sdQ>1wP3VF1rYe|Hy@-WAw-m%ilr4cMzxJ&R%td7ENB(H0FMQ{~n5TZu@jiQK08Og*zFQJ%|rYwm_i>xau> zC2ID_vV|z*c~a(&517abI&`Yqz$52l0!kvvz`e!ygl6L!raaiP?}X?lr)6 zt8Pf(>qsyJ`BPeI<~*uciHz9+BcPk5zYSB(t0JsOF8S_!SNnQjrDzse!9KQ+87+Pi zl4>s~7z(mro%bzzTkBviaDl?;sl9AFNLS~=Mz^ze+d$yZj3VZaqro2F@!bFHT;m1A zeOc%M6$u1_VkxY+S~{caia*3jjo>$B86@R1xZd~#-kBJDDn{r3sk07U{HwD9iHToS z7e7r)*t`kB0$S{#blxd0Hz-MPG=V)}&=9=^Kp95viHBAUhHj+PyqWJF(s9^**^SG3=5At+K=Kg~P+;QpdUddB`mSoda|H3?g>Akk z!_GoO70Psil*}x00%GAPO-FWNS6~&3VEOwC|BX81%Eo4wZhbP!wVkaZ zI-Sqd#|hvmSjC0KA@Fe7%#|wFFWGkUZgm?b%QZJD2xMMgus@!kr9I9$uWvbdM*^eEmHVPQ8y>0k2<8IZq;QIB z>-ir4=>FNerHa9z>{VlBeFt>5gP89|?16Z@$Vj*pgT))>8u~-i(1b5vY+YVns>z<) z45;;Af`G#*qQJM|VY+-ZBiQ}@m6MaxJ>}!mv9Y;fvv#rhcDcDh{(A9pI?HWysqO$Z zqR#emo=cZZE!JLxy>-&!MsiAw+R&UD;($wzVP`7qT;XVQD}o|Hshuc3{P* z3DSC#y)GbyoPyHz;~IA{*6yJU>#F;3;U=nYCb=`L2(eS()%5qwDH?WBHX&md#a>(O zY2P|t3u8BmkAsfENN?P3ab)+!Ql*Cr?#!e8Uv`JK0MDJ380-nfgT=B!$m(NKXTs&s zZD>8g7So?K<2^A#ju;D;J?JUG~`nb$dXda)8HK_z9znhUPnlTCtrkOlMP- zQfWRhy!Md<;5wMczZ^qdaeiipPj$&I2NR+G>I`wpZh!K>4$RP+)LYB`{y46Avob7w ziy@bg8Dms#L{zsnaQU~Ul^he#7guk>S6bLxK)P=AzDX5jD+}w|8Vsot+YO=;GsuAJ zf{y%$5MJ5HhL&yn_59zSkJF8@Lwq`WTc~nLAio>iYtYo}-g)T=>c}}`7i~xxzwvea zbtkXfa$me{-oYK8(q(t0Jr46vj9X%DjlCQL?s3=SGr@<1?&Su5;e$F*Zv0gBxp!G@ z^*F9Bw+q}P4NJIkJU0j!GjIqzGQ41PZy19%NRZl?gT3nr<5*c<&B&P12bA~LYd4EE zm)e;BYA@k=QJOs+`XTEIYJP>%D?{&sUmRq>N!}L68hBKI=fMMwz|dh&__5(A=~@?= z1RjV4o6_TD`C0!Al>=%|K$e-S_Qc$a$a&VUtEna3K|1nsFaEWp37P~S3tNWci_$2; z)S`U$pt;}e(#3%%X&Dq>gU1o^_FG_#Un0{Kccj(|QDQ%vnF7sr76V)JyMnM1$$-!N zbWP~QE%w#QxCGf%>`HG0ebdkHm|TCbxThbui~5L)H~y&b2uzbk_b=yVK}IaToJ^Ry!Yz@u`ucVi@F#7m z4KH6+n%fno|$ipL8Iso#K^? z7sdC^@!pX!X&F#LJAaCUJQFd|q{Ozw$1G|K6d|cYgKXPEF2Mg4*xsTDy!PC2D8tO%Ll#xXi4XeTp>G+c8Xeuy5#p+LAw&*FrDhC8QNPdCHsH$m?oeh#=?EgU@C?2%eik}UtI$14^DSIvY*hP3pG>pRJ2+Ql7<$N$ z$JU5PyF9Pm?5J^qJTjeyDSo7AmNbam+$oc5y{EYQ@`+>DOVB53tnHTqC3zVR9P4YY zb~STKFts0^ZJnx7m$=dW;;6>pV@-!ra>bnJ>0wH~CKv6#)5hoFIu&o^TT&m#M?co_ zLg3ao>QC0@*G_A5P1PeuMI}RCtrO;xx-GbH(kVgc;uQDqW^UA@tlHezBQ)fRX<6-$ z&*jpd7i_q>#km^IO+^*{7~5ZTmM=Xq6BjhrBx3;*Lsqf}?yZRnD-Z69i}4?-?Z1Lo zD^Rj=AwJ8?#TH(G7hbL)RW8vOuL_IKIrdalK`I!wU`N7Or3}cnb{F)FqCIvd;!fFz zQ}C9mPoL3pNvf-Ik{*8x(OjqA&|>lUmd1I7IW}RC6G+M&LXDJgIbUyRHJs&kyt9Ev zKfjP=>MWxm1z(}BZpNek)qBS|?j?JJd9TLfGn3;Ci)LG@+0+5}TaMy1ewy$1X4Y(V zuuWIK%@`&%y7`BqpgKbp$Am%P{a#=4Zsp@*j7r(NS-;L^wHqsH_l&V&L( z)%()t)+m&+JA16EGzWTJ=TWdt*82UvA-JYVxfnoPA)28k@ez8{Vz6D8HT@&_8yvK( z^u&+zg(4QqOX+*4G8{#O8FswPj)%N{^WDXx`CihqNxpc#G5GSeunwY>X7!fmTkV~! z0v!Vabi2cg4_oRyT8kok<~A*FqX>-}!CyVz6-6y?x3pzO#z|!mZKs(iS@g4YRJfJ% zrYGBojqHglh5TAdlMhbK00jE{^p|IP`>P7fLjKo?$8oE!U$%A%Df*op{jnH3CDC!J zWw4b*O9fZ)Lz0IJ)EwUPZO|%1Ke7sDx#!iopQ%LEN(CW-r>xqtq)z>iz1nWJCNm_@ zMep%bG$z*VgIh{1PTHs-f7^kk!kX-)+k_u340pb(X+FezwWP{P&wJdMxxk0(h{*HC zN-2o49{MAmt~}K0R60rR>XVH;IuWgQulq^8LSCMt2hg=pSxH>JO$4$4UqVCRCHg~u zW;)l1C0xzu_I&G=fO-$>u_BYE6-0s|MPG|OAx-&woh@lxOj}6}Wkm)3^SzP7N;XkC zT1Dx3Lzk-l?uVxMXXMr^1{es)b+zAbNDvSq41eH{6+X+1`{h3sRqm6|gaL@bKJ3Ib z5(LfP^l<4dc}RNBRF(56t*z7-6{rrV$>yGP^^UYMakV9%;g&Ba8y~w^JntW(E5b%4 zkynPTHDa99i@6iq2A21K^SuwI^nQHrIYL@xG!1;eoj5d6%)8m!I*2_%J_Ga5KINaiH zBdM_#=n`1YD0Fw!)IiI~D21R&1Wa>EazT71E2K$P>eTIft3jq=2)D2KLIWN$<^->NkLZElrgs z#Lk+HMN_@($@DV6T=B$5`C3UTueDA{;U791flklHVvAnz%~cZp+2o1Js}0UyE3qTI zmkoq_?Owray^xLRF18$tPcKbFwmOcqm52ivkr|EeGx<}+x(;8%jDyDnw^nDXC#olN z3)nt2=_H$h!LVQCf_rS`Pr11!p62Oax)UD?y8;6P*P~UiVt4oa&(_ykn&h-ptP$nJ z`kodM-K2_@wUbk(OS21{6-%azWe=CmxF7PJdgu1f_dh-qwwi(~RKhm4_U!0Z6cPwM z&Tc*=t6%&gOd;%+IQV>ict{OHgHDVtm*?B%^?!BF1M_!*c9d&M1SDVd;?gdw;%%$)^cv3Pa?)SrWXLRh=b#1j* z;{~|L!DB&}r|mn-h;%Vn7-&G9k-YzYT4`;|P&h6eaWa+dsG+vH`)+whf7L~^qrpp7 zUal`1Xx*!CgN4~=c(ZotV-@SZnjk>Ee-=GdpV4t3Xu$M5p$0(Grmd-9XQR6xYG)oV znkbCf5t~#7XiJG+j9+!Ug~sXQ%Ce4+yjXjVk zzQZ@ghtC?RcU(ioWa^3}FGc=5Q#EZX4-|E*pW3?Ahs&3vDhYD3!j}eA4L%1f$ZCQ? zW)MhOk9#=nXx4$J-PF`vFovK-Xi@7Juy=Z2tfuJ=dNH7oy*2W<(y6TS1)`XRX4U=K z5+4D@bMCH1@7M1u(cq9;;bvtGrYFz}u_sSK z)jdMi-pcecvg=R!a)dM65K9ZKGvAt&*qQE0s3$aIFsCE3H;mQ1niG{kY;iG$n)0&U zAMr~Y>TsOoAR)rH5VSaI#F5LsrqZ4%t1l3^TY0wi_!iJ#ef%M>ur}ku$FHp`;9>u| zF<(fZC^IR&S`OZw@u}5AI~UkzxD&w;o|qf@m&^V}|=N zV^v8!8vS-ROxaceT$|-=9XDpg1>W&!s^?%kp;pA7fjcYGn45s`#8Nu&q7C>xJO*^c zS=VKs?qxKYYa3nDNh4O>or6KSWPxsWeOFm>E!WMBc7zOi@jj;zYxESWzbK!oJ$DN9 zf#n`U){X-d1l`uGM3rwbvp*!!ly~62b>PGAK`S_^pR5t36Z+j`Zy{+4R!&0hu$M_b zVdcc}^m5S2C8-;WqNGy34v1CVKww$fkQpuQ^;F88xV5&%o47I(Hk!|%A!zV$XZuWH zO^T`T@3yXJ9OSM|J=?4?}%VJ?UTWNwxKj<9Cv}s9Lm?9=3p4a)&JrKDHtWG6DJ0>~K z7g|{F2$dcK>8<3xdg)ijecC&vt1OI1I~G;yeTvGv6(o}gz(kt#2K=&q z|NJy69=Q+snO%8XsWDe{)Rt0Q44pv-ZWZaZ>8(_GzWkJ-I*0Io*!#=oIC^Lc7jbkzY00wsh-|F^3zYrmGC?dB}t>{I?kk4JFj;D*O0)FHSw0@sXUuN8*Kz*29sFr4kK)KHw zmGCU#Y}a`13g-#$T0U5Bn7p;C*rDRZCd`?YNW?xYf~>IB2bGUhgbgC1geOO{f77p@ z4|50?ct{95bX*wK3(arMF3uK1)_lyFMDp3oGX>Fk8Zi&*LrWjruWn8R=EjBm9%2`G zzbwYergrr;H_Sh?+Vry~leomvhHJ1`2hp=X^v8@oI#3|S%Jyp)-P#*xau@fWtPKAu zarDSyUNEx|-n;wSv9w-ZRQOe$@qLQRZMs?)rZvna{ZPMH6fg-)%JT`>lt!!}^lfsO z!g{oF-0!PBVpJ%mp=)}(m~e5XpLN#0)Vqr+T+>Kjt!&3Mvs=4Fcu{;xzuRro@6|AW zd}q}ES2VMMIEQExoA8P2vk^ly@yqHhh3e_j11n2+rql&XxMpUi8&_}W??O{L^YlFbRO)8@&8{wa8u#S)(GwFk-vkRM>qh-ay+ZTJwpf;S58m z25e7CSJF&);r=AFO9PUMlHddA_-EW}-Ttiv!5r5F9Wx#hy$kU+sl_zv^98O2RO{~? zgHjI6cswg{HoG`J22G6Q$oIUTKRj;Te5$_X2$$%ZZOipPhHp(#=0qB`EZy&`+9!KSJ){P z!J?#k)Di92v73l^mysgxT5}6Y-8Vki^s%s#$J4g|+4Mk|6&Y3Zi$KxY`9joJOTO8l zo{y>aaoBnXNdUDKfo#*Um?bJQChylyqxS9PZf0j9aov8Ug|{W7uYk$O7_^3%;6i!r z0(^1Hf{spH(ME?OY1f&#lv|vWGL=`NcnY_d(j`Ik+{0;kDBH?u)s|8S$LQH~v?HUd z`+WLw$6Dj?3kd;R7$G}7U&#eO=eqxb75zt=IE+=UO4m{W2Q!{F8}IF(mrx&N>ET2$XwSzV4~p{>In5=DSMG_xv86T_!R4 zzmc2fP)hk|()uALXDCNyD5-_<_251V49yNMX^CixuR%uT$10IObj^o+RM6!WV6Lpy zY?3lhtAtzLL3^y>RA;!3N4t$wMsRP>%7xgJbvjm;cd}-d0oRBhYqNDNb>S+w3^8>T zMY^j6ueeg^L8>-7j(k*z6HN^gRWf8eTPmH;=p!GA0w)^3^D9YIFApOENBEq>qdZ@3 zp0d{P!ldP@)Z0+y#56JflG5n#iJ7?c39ZX#7-9VIX-)IOv*E$x23p=!gsE_=1i|aw z{RQL%7h(M3P{H|kznSNrqHwy#Zm7Y?x>7r5xrq+q47<;e$stiRCETESb<17rwsg-RNdZD2?C8(A&Bbcc#Iu}%Y zN6c`sH;xlwtbkGB$kDMOt(YhJ`v*t;m0F57fWM?3Y8>Zi^HwmjZx2tE$N!%Lx3F_0@xS)mucLo zlojcP($up;QF&qd$mE`>TEN}Px3WRNg2yZWp@QIWB!84q5(L=1+xKK09f79C;JVC8 zf7K$X^0bb6Z&&$XpUk}K;z|-R)}}?VW)#aBS2Ke{7zrEj>y%rr50Vtx=MSB+@UnOd z$a!dDLKUZg4OahWPw^RA!>iDA>=`e|DjBKdxxFF3iP1 z@1W++#`;^9=(p#L_cEQ2qW>R0;{Rxg|0n?HTyg(j-~acr zG>&-u|0-CJL9_htr7tc_@BcOXf2;ogAAwxhII@>rul)lFqVEF5#YQl!7%)2I!f7IV zzwEgnv(rT^XlINj{%4@SlJObR#!8s)N*k=0DP&_4B~Qv~v(E6G%*~5x%BUKG8ol_%X>kiGEF?kHQk;?^x2%aL5=jqR0n|Fr>2iV^>t?W`F5Gv%@+ zK6Y%Asfs=W=3>nU+H-l^Q~{lB<4DO$>hXB-_MaZsfAxK|Y2kpr`$*ajd5+Q*oz_)G z7*IYi&@8#V@yc@^jKFh?0SgT{#4{)`Ux7;H61lqYcM%5NcB%^K=ZqQX${f-?P?D8^ zfX?%@6GW)lcR1p4-*6$xL66UaPFYUP@WB7Otu8G-5njLpc+%D&!UQ0Do7<6bZem=7SacPcXcCS~GR3@7d!4%{uEI4!e#jCn@ zcm`C}G!*Lp>)@BIhi?bJh#o|>6ATy_8-|!7ndIUAe*e&rrMY?Sij}bFq7&HsuST}g z8uRwO=3YSSpHUh(i(jDUYwsMQ;DeUUIz&|8ObY6yx{t~Y@gzBO=Ed&v9y zIrk_UJ}YiX?g<#!v)A+@H4V+^#6%R?h~ZGQ^V#CXe3>!`+`GECL`O!>FDxX1HqBXJ zV-mRjQto_Z?JX!vtE|f{2CXZjXO1%B^lZl=#1Ixl&|k6p3!zZTSJp67(K``5R?v>H zKR2O7!XOPs8fs&jBRv{$yl%MhSantmD$~lp1vfQ^-2Hh? zm!i*FSamhPN#EtHeA%}3WoPsA^8@$G)u=bw+}!*jbbmZ!*cXg2l_TKu@yZs6bFI#- zWE$)xePc#bfnQn+nh&>9zsD^9b|#q8m-V0M(ibUrfn>(Xnu-Th9C2eno#0&{h3 zCErh@{wO#&INwh^3U@M_O?i;co3WwP{~nA6fJlGEM^Wn-*=r}&L$IVJoDdMPs|r6# zX2p)@wX4(S@_;2onw6C`mBlk;P&2NU0<#-qh_xAEauiaud9Q%{U^^y!4{1(16 zPo{BY;^N*o37ic(5C?2jS`gNyi@%xE%oUykq7SE6v88lDTG8oSHDyU@cYMT%jBL>@ z)eS@X?5Ve+rBc-2Gn!Jd!U5AgPnARxJBDD}PdDSS5I9aTuJZZKUGC;i7lIQSZ2)* z<6{cmik`QMf4md16qd@wM;_cIJbS|NN3=W8A6N)}8nO!Kb#|D<@LLdO!($*+YD!5- zp;F?IadYDa;W(-9;;Z@8mC=OCPi?wIyvIj&612cd*!Z(lKfu4u=5z;1qSCHTAPrIR z&Jn$BGZL#X-L0{@Sf8*!%;w7EmzM+H9xv)mrvM%vdL4j;jSU@q6@Kq`PS;CqclVd4 ztF5!MGgD(@4K1y!jkad_9DW7{hTRd#`2O3sMOahSJjdKHd4brJ6iQ@hPW;KKi63BK z;UI$~=ze4O8Q-17^nU(MMk*er-STqbZbT&LFClh^T_lEPl`J$YB#+bq7VhQ zA=m!jZrm5`ZVV(7=~4yne}6JZ$eR)a_^GEtv*qTiqLKM%DWc7OSez>4+qldo$Bp`k zl)h7!4>mtU6mZxU+`7e+o}5Y{J_Q`poEFI@y%G~QNZ9Tm2Mt)=M6J}qpZ;yyE|XWO%AImqnH{K; zQf=k^a^>Kj3)sjTgOCyX*wc9B&0ok`^%5P(F;8+v)U z*`XyNF*G@e*zpDWnvZ{qiHRk;OzUuQaosDZsG#HOadP2)oi0=alf)46(R{dvi#YPz z?}}b+_S&vI-|P)V;uwO8g#|7uDv6~M7#J1Y|4j5(q$SQG${O)E6x%@hx=wZifM(N) zt9iEGyhvnzoLKrLdKQTjI%}xE<99Ra(mYX>hDc5dXsF*Zv6RS_o^`iQ&oEF?z%#9? zx!5_1a2I__i22PW4~|aFHSOjD@stO;U|xWNAj~ey9>rExszd>(BQZ3pYv{L{9U9g~ z=c+~f)J%VowK62?oeR0PyddDI`-JcYT|^4Y$K?H0+7{(f&T{W^5W~%G5KdTcXR!~+ z(wNJL8wi|vZ({1q&|>>|%m$Q(e61j2U;IjzoMw6-LJ>5Bt@drcd08dR8_Gn`tTFEh z7BZBQ=j}RQ98osXEanfWrkY_2&Pn{NljM7-)=^n&8PQmsMn|6(NfhjsmK%ljj&nNE zKCSW)JM0~^6{oGo9`(cpjI59)$-pW}eoHT4h_dN1z+u9s3)4%4Ku_w&61{s_K{t-g z(!!!0tJq%BYOF6r$_XbZ+PIj40MHK)z%tph>^jg!Y%gcW- zFfc?!`i_sy2xFECCDD+OVrET3bv1WlD0W3Gr?Kyo^VOq-6?e)>-hqnqwT8XA=|_`U zXP^^z+}t?TuIJ!Y!O#dDeutVy9L6Rk6%`heaiO&z7bAgzfq)IzJ1#ygj)Ul%1h&&- z7(;~sg+vn{&yJRExY19Q4MR>1Bh>Qwv)Fvhuk>8!u}!)`rqjJ8Qb(mcUj6%n1# zYme!(w75DHf`T1%7uNG_*H3lUuKGA{midwkpL&m{BZA|ie*S2H!EL485K7MO(X}I# z_OUbDPha$ls8*LF=AJdvJWJQ>JsZNFj2o-L$lSDmC`en2vj9{$KQylP%32NzqR%>c zx4|FqU)MW*U~57TjrdX*!B1mKznRHnXbHtHykxm`BIFW4o5BR_er$wes9EUhby(Vd zFeRiy2E!v*`}d&6NXS-PoneYqZAFG@D&Dd>k-{@6ddH42#xfxH-$IOl3(dqRvkM4x zfrgazpX(LBQ-UWI2gj51wI#jmL{J1KB?Sd$#;>R`ub=+GY^9WHI&{|EG(8I zv&P34^z1MeF^~~|$#YqqpReO0;q}`d@6845Q(vxc=GI&9rd8s}Sy))M!k?e5c8AAo zR~k^*e55D;2~Xp>QL-B@-tF6JBpTEQBbG-Nr#m28+>58*s+?xedh=0<3(gHqV;~+>S|l9XMemYOj28~Zhh}qZfI!0 zq}R+P3m_RyqE%N=5j0%i*a-0VXSZ7ZXSr1U?K>o|+x6e^RHht$ui3ddtqwqAN(xpK z#YbesF&aEPypz4X(D$VpJ=@iWlf^16O4;nv-2zFOgEZW^=DU+L^LUk*I$gLsH|EjH}rN&zfpI_`AXxbY%rJ%>Bsi)jc5- z+lT>P*{_hsNmaSdKMD_3&?9JW@1EGBHvTb7H0x3j(u$`U#y4v^?&|1y8-j>Nx!@3% z3;}VQo3>y1|e5sKjPT}N|rH|@}oiy~c1(T|;0 zxNWE2x))B{&si(Vq_&$FTVk@eX8Z!cT>XB!VfJ(X9Je{5c0mqSL1i_Ct98kWEn@c! zfPZCVGhuaW4hh*&q^3?QdeE%2us%-()jg)=SiRPB<a#~YI8 z=le>YjgHuWUWXe)4q1n-7@s71GvG#r!G$u~hf;1iE37*q+X=h0;3X>iUtZ+*sUNV` zc6(kPx&u?154<5ytK#wp?}F;z$zS4)S!U;!sx=%RZj&m<>9_-I3@usoXZHtXaZ~8H z%y;|G@(p*z>@YJn7OO5Y-S7HqusRY;O!nCLAU8MMe0gUlbmP zTn;}0L?j3d=c)r8k0wE20pwru0q&tEV#f}mv6*(K^F)pb1X)_0PU1^SC|^BZZVy~8 zR`dRmWy7Sy8dIW!wuO6FR#tjGU+?Jm1(#J+6v!Cc*hF_)m}Qqn&KrXqad;5#lLW(d zCAoT(*|I9(fynN%HEV;3W}4g7{ST1+alZP{%KGLo?s!^)d$SwdOgQ!|856?Ca1Tg- zEj^mZb|%zZzwazysL@prf5u`%v=Lpu23`(Ox;e$Wv|o%wn-Bzf36#EPK~dSUEx5`M zCi{jba7v>6Qg|e`TU{T7J7m=)K7AEa+9i|B1yzZ-{`Y`j0HW$gHV1-8V~K)t;)DY{NNfvXHdS`?xjh`y@$2STKTX-DG-|! zlM4{4J50$JZ3oJuoR9kK|A$|oIhPV1t1tigohr<#fPR#n&L6qZc#w!Y*e2c)O+lsI z43R4ciWs9v6q(y89XBf3=amcgjoHRwd6Un&dObz6lMG}%8!go^9*e@l!#CR)ee|M| zlGrS!bXoyb%VejgxFAQpR53Kz^?dzcf8J@~lx>%+Z^zE%@;WKyYi16HO+JIeQMh?P zI=RF11(g;pOHRa*$Kw$nS58ju^QueVOi)X!4fOt;FD{P8vtQ0OYWu;_9EtkxPRQlN zCn^37JS3*UTbS3mO7uxSj)q5$pBcStl2Bq}c>|6SO#ZII%_arQx7@z(XG`xczuniKb?WblkF-(E zKioe)-rl!6T;K-u9B(yWt#Q}}=yp=A^whX+@n`F_vD!O1S+6xMXQ$QZb$Wh%>b#$< zce!@?T=DNwMEc;O|2dEForO?)U}X(7EfH)bXJ$qnz;3X~g%Ig$ZS7?-GPAGgt*(-~aP{e}E7%VzJ z2CXOW=AmF%U_=gs24OpTl&+@F!MwOKlRb5KH#KuY2GjLuE-g&T@AzLP3!U>jkwL}Q z!Nf_0Qk{t5Be8Qz^8*FH{C*+ig}mhRNwIoGf6MqArYpVhz=#G&NXkmgB8}Mt!UMUZ ziidIw@F)OC?%0@9dqZ$P;#Uswa=xmF*M@gmP-We~1FYi5sjQ|k272tRxn&a=JQLvb zeiKR#-aOlTcDC3i3yA@jXA;|lyrH!_=xPl?u^_I94O5Q1<%;CNeq-N`_=*xW1M!TMN1BqZ26;cEj z5$Dy4M53!iL^z&hrHFn+oL<$6o%SAm7m|C z5`E%F56;iePfx@7R|cc-knlPGf!M_(5RK$?9P4I$wNSBCESmwEN@^{pgCNXM6sOr} zk)*v1A~`_97=w@3hX5#~&1PGoQ5GsV{d`aVY_WD)?19vH9dhzKXBfVdcyj6Xj*h8J zu7G0Hsr|VU`JkUP^z@)31X4iq$p7)M7rJY;GmqV@Yr~ohAtwOFVO1r)rX_CY1Ezg&F%$eG^bwDp6U-xHH~Ab zWhYtNw!L>;p4xch8M`6!HyHK<`cW zcKIoCy5#t`eq~p0^kAP;tLeEkY3~@F)NpSYTEC=4B8ITT&PJ!pyTbrNNqG*DkjA@R z!Caxw>DVV=M%ELn(J3K8x%%}DL=1joJ5#iDK5a-_YqFv7pfD~#3+!r}nJB#%t5H(@ zv%Gl-T-xh=05~Tw(jS#Tw{&jt=WEe7^`;HdBP1S>D%XW42@xHKnqU%G%aBQge@Bwr z<^}-cLn^p-Qh$A zzt^ij1YEx<-q}*k7Kn!hF%2#HeULmnZNtOAZ;DvRkAg3&wOdgq94}TIL0^iuw>JnM zi1~m9Ckk8%gs3;$-Rq6VFdHaR?EfLBkG#L!&9p^*0^8d3S{)$h`^uC{OeWHuL5+n1 zAcKN}UY?&5OG3!NwsRo#A$@5ANd$kpv|svFqglBA*<;Sig((=G9_%0=gQhB9E*lRg zIVmLWYD@WL44olV3L!XjvbOpxCG4fUF&HB z-^>nc$WI9v`ARygxzM%=Xdm7p==lAh60b_vb-$RKz_3v9hNXMX<65n_Jio)(_=NSR zQ(vvHCF=OD(~t5fTd(X#=<)&^IhF11JlJEeoz=Cn0uhES89GjQ9C{^O$zprSO5(Lr z|hO*5?YW0<{x*Y`51_tSP^O&DjzQSiWpvX^tz0}XG*p09XLQ1h7&6zX(*{IzR%Bt~<$a$m8r!YrXYK$z z0A_aI6WC~}sT-}&R{!aXU>Fx2r!L!Wbz%`NS5;Q3yzEgy5;6*hXTRcmeDE?J_dVKMfwSF!MsxB>@8C|GO+KJJHqYeM{mBlggp}@7 zq0v=Ycnyr?%&z0lJWC zqN=}Mhp?rBIhNeXLfcc17gRE%1`x2J31Yu)hG$>xRUz(0&5Cq?l5|c+c6IrzHCYej zO^uCNf)>(U%dy-abruWdv9ZYL-ZmRRkT1u0gFg+*wqyJEFY#qU@NHy!`+GK@M=(5& z|L5Sq00I&J`+BP*Re0`$DX1qiBNdgzmbP>tkUJf@Lvfchdiq~p0OKQCL^r0sZ4XE3m z@fQ`6`abYFIY=)C6EK~$qA9MOyUMH zdlUQOqS~%Lg!>CY=NkC;0Cww@^fXXH%Pgx7}>F6IPjBT*Nd! z>1Bcw>+dTsF9%hi;Y10tv&P|dv(p~|1>dj`m#TGmgc}$Yt-?dyu!D*~=!1~cF4vBt zKq6)k3@jHN*#xYV*q$xw2uT_Hll_GyXY{8XyPQ6be`LxHQISj#+6w2Pw>dz^y+eA# z>tSET&c5Uq>|5eJ&X9tk-0m0^zjIpYiEN*}m+?o%<^a;SDaOzO3by)Ip|K1;8!V5| z?vrwvB)krr-&y~#!AWJ3#tEN1G&#lNes+ymJ8C2#_y2wSc}&qs8)DDk+NPqZKaa0y zSaE6Vn%e-LM3qHCdV5=AR_42>S=#ZtWI_KRM^m#sfINCio_@KjFCTU|5OzLA?#`4c)zHhs86yYK>>`9PRW;q-khWX*MNlg7FDS#2_9AXOplq zeC~xH!GwjG{n5}^>0e%+pBFu?nUBI{<8=~Twx=Q|Cnsa2V?gXs=me$%gNj-^vk5g1 zL9w__?>q5SzskTRLu2N&434+;HjkLx8O>%?1{j(E$o@W1Z$$jwq2b|^qocx8mKFv| z70DW^@7$XUphQ8<*Rh#I>Lzqi5E8Y1>8E*E0&=syYC4t^O~x|*x&N3f?vE8`ar;?u ztFHUOLOcC>f@S#!MpH4IexA)X(gO91M6PCMjmN^HGWkrS_jNEM7F-2=oM_d~9s5ly zPZdc#o-PU4ty10KVev#l5XDCFT(36q5swbWQb2^XF13Hy<0J_C=;-NnjkjW|Q}d+L z7*l}x`UoAUOM|^vrJ@^Jj*U1HUI22?$WL#`*Zcf&` z1R3^dD2l)F{>n6n{ShwAE4(~R@f&#UH1X190&9l8so1siyIgognpOI=E4RmVek! z5*mI~OUR>z>kZ<36pL4+5(z`0;n~;l`&vV>(+{__7t>cMN=c_a zvbw0}%28Yh4?G2h5zOrVbc4Mg^|>6xs&)-B7Sw(cGlEMt-C%8gB3zn)>BBd3QcJDq zQe%{~H1~6l{i2p*c)x#+M5D3J`$>Qk9QWfB7PFTv95x6ZCt637TorwN{o@)?G_|pj zO;hLM;)0%@eyiOb4F?pIkLSC&Ac)!3h@~CTzr&^Ou!ul#ju}4wJlTK?*6_rwD6RUL}Nkt@DVUSDL zR6B>jt2Z|d!I<5t;?761tEyyL8;97`jr!-zcY1li!A0ce65lt2v}g5nD*%Q1<5d$2MuiVuK%ycjaQp^gkE*S>=N>30>|^04NX=*p$&HX!)Vp319g}dlrJNy3b6E~ z3->c*?z0XmwIwkEwazpeV^4hAG)`;PVyhe85fTsfkeYK{aWCRPSSRgi3!LK<{tHzKaPZs?E~M}@iSc=h zV``g<3(2oF>G_1{7jUKni>6?FaUl%kbX0rO~-ld z-4?Hez;hs!esCDe#gCDv$(V)u>p`3$gC$9>(<$iF(%a!i+jz#e6TV#ApJ#lk5jLts z!esh_6a%8Bjux)x!8RbV;Pn&Lg#l+Sa2RHl*-?*5(e^$Q#YQG0G0t*oT+4c&r~V;k zgT;TkqrgOWd&#;oE7NqLsJn0~jKl4Gxoy@x z8I9QK{6Q-bA)mC6#N{*T^3dp*lm%?lq&W~<=WcjfHP!4uluNs+&jL98TR;~T;A}ec zgkCh7T}-P|6=f-y^>3QecBfA^JXz0ctYQdBL4VwK=sq(#FhNt7w=D95+q}f*f1)YS zpnaIGIjzc+NrcDe;b7{n-y%qB+n-l20@z%nYns)$OYX?myg&`oaLWGSs%pfnED^-~ zZ=%86L}Nil9V=8|fMk~c$*|Cg3*HZ_TGEOyJO3uPal^vW7t=wu|rqpNyRvZmPAP0RK4R`W>t(&iI;!JSunrrg{1~apm zxLeL0hvAMggSo1;Hsn-UP2u1s?&bH8AVN;8R=2IGV6ia|#amHCKpQ4~Qs-0ZZ$rp4;t&ickDMH_>SuB@C_ugYkp8xrbuGg~` z#gRU-&Vw?UMb*{UhLcX*qqwYWY>h;1QfB!qLa_Z%cuHYAJ0oQJMjBd*4)%6(RjaT4 z35+N?Xw-N!&`sv_9lh=f)IvZX{ls&l>mn-gbq;U zRF$}g$n|Dd45Wf*gUBHODt??WnpV5s=oFU1*Tb8T3FD*yKDa-5KmO zIp6|7szRbgxQ^$G_-zYyO3%wkrfuDmK6kI~w->tG5a|0+i_~!Im2=@OR^f_St)10YdqO+ZxmlEX^u@{4luw* z5TCsHWPQ3{6(Y55cDDZ8J^K21`xxL|FA!>TksVk0*AUw6YdMvl`XFAN zy0i&aZc7YOF({A^Gg4wb^G+nYrgKkLK}hqky7_&@L3AFjN$?~BG_^7()l`&};@4eh zsVmHJG+FH~*!D-)^*e2a5v%w#Y(;F9N9fn4WdBn$stC0B&&`V*l_Y<*SY)>)Z`1 zu(%Di_6$f(hesJLo^+i!`GkRI0pN#>sQJ+Xr!RnqR!bxWcH~PCrrP8ZRNU}uo))S7 zsf!mL(I58mqgFx6_ExVo0t~I8lQufR=!_78WgyiUO)Of2@#oA`BH#@tK@<9Ik~=bW z?c-xUVM(pB?mFwJT=S-55TXRJ2&Dm5=G{^G+y8t(xZu6;rB*8M2Sv2D#mIvlhX^%e z=rIKgr#xR>PX(?J);ezOkFT2)2dmP;FfD14TYdi3IovL6tWo_KrwBPSeZLnaaeZuP z{_6}(;Z891TT%p8o$Y2d2XF94=!+P@Wk-{+o}wME_HpM1LTkb`(w55IAnBc2&xT;T zwegrU^KaSlQj;@5N1ZYrEt@&Y@qe@9zvA-^^x>c?n!7GC@Dqs`U_Uip`DAx0!4<7* zck~*(^}3u60KJ(`qgLvtb&C86dcFVeCa$r3LGsk7Pwja1+p$rjT?A<;1-^qPQ4 z@YWasT=aZ$`{n;gIl;g%AVE6RcK1lt@YhI#b*=m{>si^Gr`2+HZ zK#KUTAJsAcjkB-bxBuE|j~Sj&BWV=fq5yT&-y}0DbxX!*x)_R*86ZBDmHpBSl;R5o ziMMau%zQ_TPy-VkS3xvfWhI?TaL->mPW*eUsF%kJt>3?Yi;9YZWEi@-*&yF5EG(?5 zstP2t%*@mtNmGFcb@wTf(x~twdoBh8`%f|bNjNexGFL3i1Gq13Y>cX~#7h)xXly)j z0%&S#f^^yBTVgR^tvzkdp9YT~t&)YR0xyxL-7MIdR$u3R%a^J18Sj?6Efi z4G141GyN|i0v?w{)xz>^PC3QIN+q8`Mus&;Io-(p_32-pdfklw3B8~3a#{U*VZy0E zemV}b{$_c>Q}sYGWPi!uKd|NWOo{`Npc5;Ai_#_Y1ZNBP3f&bOSUJ{Si2(wzn0mTN zs0-DsBr`i>V<=fTkeZ{?i0c5%+bk;2#NNu)=nb#JDAa%@ptB__Gchq(P);2toC^77 zM99h*WxRW9rY^h z2}XsK_kpO+>63fB<}3FpzWBuK0CCj7ew;Qq@R3oJi`n`+T*M8MOJ@p18VIpKSA~Js zTao+W#-rcy7DmQ)wqw@|5?49^bg586#JBWi*Bcmage~`d?d_~F7Z;bEogEp9`e$%I z8jvoS*m8i3h7rr_uM?W1_z?f@&3kN$`w=2Deme82^AI_|Hdy(jty#LdSrZ10wzmM& z+Sc_Xm@i1|yKPIZ;rRYg1son8263!>KA)a1c|2j}D@uBdl8SS0P2;0`^vfP1r#CDE zxpeoubma%BS)^^O{GI>Qzlta!{NI}s8FSe)-Tqmam5d}%g38(PaUvhBLKtYbtr7iH z3=?aCCakmz%IQl9N*)#S)xMm~HlOz}oTjpRdbs9FJvi9Ay^q(!5AW~)$!vl;DT^BZ z?C-tlO}*vqyj}9E9{zJhyjU2Ym3cqjQg@i_+nel9GzBL_R15jpr*}BL;lQgG1=2LF}bu zy}ASiS_gJfe9C<+s}#l->0%1wn$+P-o@kq^KMpn^=m7S(?5hU_VvW0EJJUyhVUMe<~7uHPOL?>tsH`@(%1nvBI+;3N4Mb^;tv%W1uvQ3CcpU+bqy39 zIJjF#;j4T)yU_tu1mg+PHM0eLnzWj2riad-h9A>1TtDzfX<6xYUR0sXahV^3<8aC} z?n3TJ`L9io^jn?wWpp)M+(GLK8#-t*ehO3NkKbBl#AciGIln}$7|JxvJF6yA;B(fu zs}E(%o<;b;w7s5swI!`(Xrx}?FG~AOl+Tn6&d<+pZhpQ#oLS?gg?LGm6?R}kWX*OSo>bdWLfQkaJ@!yYvxz@~p1KD*z9wm~B9 zlNI;l;E2uk1JYI59Cqu$NSxEtQ}P^W6$WO?4386NDQ3=y0Ya`4Uddu)%Wb2}xiupT zhwLx&;w5BZIl}McXkXi|Nea}s13ejVs|Q`!{X23 zwj;UQKat`7j<>EiGbYkGoc4Q^l3($kftSVg8L!Cq*LIxpMp>M?Vo-nsWUG9;H1<|0 zL^L^YfOpH;C|**84-AOBe$pkINC-=WTu0r*-a5D|OVhk_MbastU#Az&@xtiga@4j# zjekBB0b9^>;FiO9iB&k za7-3G81trzEIy>;rf2gFnn<<2N0YFB{!E${>bB}uk`KW89YFz`-ACVHo}(vlX0(oi zhJ+>Ke3gftT*H@Lt(NJX@WZg?`Q*e*^1}@bO!&V^kdep!f!&N5BOtykr}b$wVO1ss z5oKkr`Zjhb<}#=587`f=S}Q9%&ccdoy#aKtH<&k+>2Bp1;IpgTQDx`GP6rJI9tJKLzqTvO9YCmJ2oRlbm&*9%~N zmAm0YbYc$eZ=vhe_%6 zGODgzGLcoSDIWimnvOb%3{I{g6vJlg_Nmv<3AeadIAQ%Vjw~sT{M<4!SwT^EnKEUl zAYM7B>#(#OxztmMCjIB2!48d7Zds?RO?vG&10J#cx)$h+(AY8V6)qJ+N$L0l0 z)Dc(BcJ%4#2ePQAnVY+DEmnoO53a1NY;JmAT@pA#0c5tklPGE_VHcL$82Q^;0Mj4a z!yY{-yJO9*T$(Lh=^j&jO3WjnH_v2%`|kBiKJD|a!J_E%Sx!ode$yklL?biS=e&Ck zA)@5$9jHxiW|rta$)F1R^DQX#DGgOxB>HW-p;!g9iC!m4VL1Mj_u)X zc_zb-bh%Z}v_h(zfhy??(=T$$k_YpZ=BmDzB55x#ua5S1@fgCa#KilXJqb{32o8&( zJB~#)%EqLqeI07)S0=H_Jo-`&G3(d@ z>wB})ABu_}!Q@?ilid0;?ChN1>+ye9+;3T(RX0?1n(yvx@?jjxy`iX6!QS1-ZnW|c zpk3f9Dp62Gsoz{`zni7Ch<3W>v=}K~Zx8Ps$sM1beVPS`4c&UchssK#7L%h3;}fim zVV0g)jF=UUI9`W*dyO|#SjXjIySd!JZ=i}es%SQQCG{~eP9UYE0BJ4{yPV>HoFl}r zxBdG!<(3@9>+NY?d*bY{zjqJ3pIM)tM`R*e(GmYTeux^v~vn9h`elcs!iDzuX#w7y*zbLcopSg`L;s zYTTw-*OUMFE5AV<_n-IhXrHT>9Dk*)K8wZ6PyU?3xzLr1Q)4pmRv_@!ZPus$M^Z$w zX{F~o%G2OVC4GdOE%@APV?R}Vv0=B_xeQBr&={@$3XXp%<9c_SKV!}>;pNlb1I|op z+BDhK;dBnFtPfEcAvBLRUA5LOSO$A)n%X_@v^CiHNG+|&%(C)$Ex2!z&X2LpwFVoV zIbJUgE}FQlc9L~hmko0v&hsj_-vz}nSH8NQ$rR)Df87LlG>|92Q$&*Y7dNKcZaKs{lQ5z!kkclI#<|#W|BvG zDJWs)_udEP^aOgDY6`d8ae27WI9*lR9f-51OPXF`hX5)G_|wC0e@IkQsKRdRf&FK` z@XJ$O5zaQOKeB^1FZSk``73kI>YT>P!bHxa?xonbX0V4=Dbn5+14Rd_Iek9*_pn|q zMl&G)KkU6#TO3dHEgT~R2$0}T2=4A4EVvI2!QFMRNgzOQcXxMpf;%&~ySuxb&hP)e z&$&4l=L4L%ot~guXpYp=Do`^@-s0d=9ch{(oLv#V?pTOyD1{@m769C1^2))&rZ z$bl^Wv?t2U(rRB^v4dzWo-_|GpJYptP? zl$`l8X(vrYTx?SHgXku!HXcp>Mm`JZpWA{HPc~e+A8pC(rNhB`=~5bj1-4_~>Yw>B z!_!8!+gNbvyWP_#udTV~leYFJgW}w2r4;i0^11gGB<%OyVqNw57}Re6{36nk*NQpK zwca}BADnAs2<~a?CW9i=Hp>SLNp?WiG~B<22=ilU|CDZZhGKf{z`7 zZvk)}B8dNXDZ<&YjXam)SevBJ=yz$lhrsSgHfp*=6GuFr;s*kW=!}I!2Acxi@IUhV z4ekMQ+L_d)Ia|djhG&)IsnK>aBy&Hs!lK#J>KrVrskG_ z$F-GAcq1;YT2&}sT-@ZLg?T&V#rtr!5>PLGYwQ^u9Q^xNACSh4OZ%?8F|E`{dkd8X z3BU55+UQs`XGQNOEGj8YY{ReJy{yta2ZLkWOzD$+MzC!3B+>A>nO}>+NyU(Z=#_2f z6?!uz%=bD;@BIeg^9S2|_`wX0&1N@|&G@FhqL_Hd_4NiNKoH^}iu2uK%EW4+z&wLC zcPhVFNVb?;vEvLogcj4Dzj!!cHy&|30RZdm4 z7D%jB4NeRB68}i7oL%DX^n(8gS90f2@=Ugtme{TX;%M9u5|={>LhUpZBXb=*b(?H- z)L6GO`0-t^L7A3B(eBa81jf(#6(cTItj#m_=8hE=IlGUZJ~6tl=)=3o5V!&Pv=3wS zVRcN^d4+YZv4T!4q&%S6@Zwnti7y>(5@%@wBO{|&0Dzf^Nb0L)m$vXK&GC)y-LsLVSJ`^B->MkW|NWAxx#a+3VN=2UIN z+&#gNu3-0XTV$B!KW8mIE{T6kbuTEIR8<`XJ5rHeq5Ds4rJA7Cd=;#o!#~hVW^bC= zeu_jGc;V(e+L+APjJ2@fK}HS*qzzi+VFmYb<9lU|t!EAN7y4>4=8un$fFzQkk)m3i zj-I~sJu;rt{^S4@S`lI%oXBCVX=hhaqI3s^uXjDs<#?#)s1^D44N&52ZEan-`s0cU z!(Ysj7DM&OdDYxp%5P2@jIk$RxrB4ssS%>ryTr+nx|>NVe#PEvSluO9hT9mbloD%$ zjYhJAvBete2H6rcH%FOVrjqB47l~05ST-3*racC0I((QEp&*bF-3-M|&q2#xHTs?B zaDPIZM=-@8QxyLeR0ic3D*l)59AA(@SO|-*tyauiP$qu+@FM@RyK67e$Y-~M2qVj8 zaX_F#kZ{E8hh9%v8shF&_F2*3?<7PLIS#~g!M0m+RJJ#(^?>BAV@w3F_ffN9yu6Tj z>u*7$_2$^N{dx87+#IV(vnL37!TGSiYsajsyg2KU?U5EQY`IP}Oyifp6B#3?c|3jZ zPzNS5MiS!}jGUBIgnXo%N+ZFQxGN#IubSU-vGQS}Drk%aFJM!h<#xvCi4#{(s0Wl~a7K*+{wlfortsLjCn_3Qfi)({X5 z{Nwse7QlYJg&%Y@f>jw0eXsh?8acMp+|XcTWTXvbvene;)$iIFHGNmYCHoXaUizMV zT2bu-eRSWRI|gT=*}H|;R@tp&!;FpZp{y{>a_qobsNiSAnOIzuxLwhO2?(`nyZubZYf z{252W6JXarJ=O|09Pb1Q9{3UMH+j3dc}BGbtxqLp2U}0TvX5!&{Cc_%iVOL)xgyu| zvhiuUppHg~t~8}@J{`A8KGAurWZrp#N?m#onPlfr+ZHJ%zQR$b=Zpv;vT-7l`-v?R zLWdQ_BvxHA_pRCey*FFn3kdgSle&xr^6RiEi?5Qo|;sYkZX^ z`t`FndL;cx!-?lhOY`aJx{X%1US3uM)W4jWjX0kDxzyOor2hek286-r=t0k$a%h`@Hs*LL+d%P~cXu+@4;nbT%ywlKF04_rvgX5Lf0s z;S|4s+KE+*J2vL5K#oU%J4dsvU|h<+mXd{=_Sf{Wc%HGCl9b;ebli%h;j3mv*N_OX ztDNp^<8LTylr^0`a=?TJRw`zS8XBE(VRaG4uH@X-ZD8wrMMcHC=O^Al){VY!TY#|U zK}k9J`w7sIJr&>V=#iR?rrhmKP$rhzDd1sa$2U6eWZj_myp563tg{MS?*`;HQo4zh ztgI7ra~fYhEjHMT$jaWs!3PJ1g%Pn^Oal8HOafGIqvPfE)ZN_;a2>WbHvxA2@qF#j z^mJXtX_0EN%kg5P+nIsNGaxjjO3h-atgJLE0z&j`Mq3AmbKvp5efw5%ZNAnL(3JqQ z>-mwJogH)J1$g{avwuwe+H5SROU?j>{f6aKkzd}^?HQ@N4k9Arzi5LsrmZ2?bYj%- zCcQ=UEiSHx=Pw~uIC`A2!n`VLYs@sq;g{`fb}bYt8aqVSDt6n$ABqcZM{&MdY9l3v z`kL39QwruDXbujA!lWD7*?y)(q-F^x6!gKGEFA7V2Jf3Zt{t}Lkqg(GOSJ~*6H7r2 ze;FXIB?jcdvgCIKez=CP65^Gls3BzsO&J4!1Io)MnRt z-j4OE=pVXAgX}8@X>to1soBztQkpNH_x8F@*a+7f=CMcMubFLr>WB+99?78V^kUh^ zD$>ml3YBn1=oyn<1ZY@x9Q?GP+=V#*o6ACO|^; zz$*UE`*AA>z?fVpj5$ z157TNL)Kvv5Pw6ZFV4^1?=Q{G(Lr%%T^`x^cnOvLI~zOYls6^f_yHZ*wjL;{kqN)E zut%Ds51`LF?Gk;+sTH#WmB+y|nTZ;6N&7{X*z)5uQ#@dX)zv`hMzA(kG;C<76%rn1 z27(XM;{zYN6UEbxMdFXoOjLUvM4*0--YzM%G`%}_?(fh|Wt;rXpS>-II}QEYWG7j+ z?5WP>xp5qA?IE9=YnnCkL*gbb+u!MC&i?YOdKKLRltO>KFFA-?)yC5df12*)%_s6KV@jGol63K zQ^Ud{$!xsy?kroI!THpPBG@nc;b`8DO>C)`jMDR*Z)P-zdhGMDTO(L7KMsMP6T8^( znKUi6DZ=j@KPk&x1T-*44+XOclb4Rd+NVU! zucT{2R3e2rt#`7?Y8}Tf;0Q8a{8g$0OPnckifxB0I$^SUTf8I57q6J=Pa#XHI@6mN zDale|br3J-O^+RNwpQ1Y(1|1~JS`x5$tT8}hUsxE>V)+Oop_$+Cy+n*#UDDA=?y7J zCo+B!6eHI`eKa!7Pc)2qU%^jco89W%Ii&GYoTcP5bt2s{HT03Q(}CnqOhR*>rl zFkiZRdNM(Wz^2~X+S)@a1MqhN(L{TL(Ap*dxVO5V5J}-(?N8A@NqP&}zmYYtwr+*I zJV!l@K%=uvfdE~=d0!;}g-}~tyL&^C)qQI)mOOmv1whTS#DYUYLKI?X#&jEi@DB$E z2Lpc&U^yEbZvl$sONyACSDAy(+v_91jABS;ot_m6$f1hSuX1!~gj#R}=UW?V5~AiNN^)v?h9YJDpU{DvIu zPQQ^oUM%^$4~NbKOT)c%-Ts>1S^b>4T%Tw6+J%72mu0-LwRlP;7PilCa6S%GjxX0_ zTH6nWS1S|z!L-Y4AGjNMEa09Nm6R48&0FAVYF@s9Dq_1-(|f+%6Q>yC}8+*0DM8P)O7DnVmzvbB$! z@3r4HI^hWCWIlZR7KgLZN>lnouJ2=+FRsm~L!Z290v@aGJ4Z!aZf-u(=<(@{T5Fw_ z!Gc}sSF1C?iD4_3N@8rg6RW7uKHY;~NTY2tzmz8cWSGlg(3Jq(kch+n0Zcqd{aP$I6t-n6?8B$$&vn zL3uf28R5jlgkp0vusScdG=BSq$PNLP$s`VIn&KZCP?q5(%pcZ8?5-KXo3|QIT8zAI z_svms4H<2$OAfVIagv*rCG82k`gd_s7wz3-RCju0m z0^oBaH#ax^1YzRz$qUPX-@h{g=;-JMqm7F4Tlm(}-Z`@ytEw8a*au7o^y;GJ>tjYVI4tj=o2OSF)VM?xZbZO6f%3++3FL2XWJ{1rLF^& zgnVRSNE^$@b{6{bg>eY$SJm9Zn|iR75--O|vM6EK0m9785~LHLJYd$H#1DWKFqq z0yF1>{EyxpyVC?NZx}Q88MpHclQA$h787->ANx`dH@T3@x4gjuKo8ZT6i!@0#brTo%KBBIG0% zr=WHq?D zz=dJbK};Bqy#=Zj)C-8(ztGdm0;zsLB)Pr44G4rNZ@hy#veiMBgDF-xY?VfkzBO@aKD;y|?Jl04GkW-q4$u@`-8Y{1y;%@fv7)c#vY+v|1C!SXqJ*-7FMvmFP31-IMHc@>@jtx~H+Ep* ztYxS29@UUhp?{ibGN)e?AjoME2jtKJ&z+S9 z`wbwj22fC~7B_n!6zu5%BEIQ*TRjXhg#(w%-ACmvokj<8F0R>yg@wUE8g6df+zh}C z;c|Zp=?N2C=&uPhLl}tdB}ObEke6sm`79Pd18{M6HrMJ=9~~X-wvA3H2bd%5jps@m z;%D!8^ktH-A@%3f9P%=D`3m+3c)T20dhSmy(@#`iU~reP2ryzrmwoAz8R(}j?P&k` z<8>gP4D4Inu?>fjPI=U3>CHp`y?2aE==i^N*R$}i{j zU!5X0WTAxf=u2dM>RL&Xa?53*%FP@-oOnkIY$BXz6{Zih^FVkdFnJ z(|}U${&Y=5NJyyOW(hI>o9Z3_YY-3;B2lVn*JB_xal(^r}266gaiZ0S3*Q ze&E>mXkJuMU}S7;FBZSBJ6BCPn6=fOlNS&Wkes{_gh|WC&kt+*ZPK}B2MzkIi`_&! zI))jfwWR%U8RgCvjN&=JW4hqa#1!XQKrV7BYt6&cd@SlN0CcRc20Q7GI_Y0gp z>gww`Sy@G3nfb3l_qa)YkE9vd1hEGHO4MS zx7W})bu^uCk3-r@_md8?hJ6*)8NwkC;9LJ_RJv2aAU?0~>sZE2 z!f47%U_TAHn3);Xy%Ee52H2O44>I9|u*jkKR}CUYjw60gB2f~5eW`sRHb~|B#?j5K zVKP8rJ5r{7b%n%H-Y;j6o~~NR+}pjgNFmq5WcOq5uVj1Pyrw~#4;CB%tL{zGJYG$i zt^3x<42sLv(*XL<^t|lsi?uFzz%+@PI@3%VNi`fYf5o;No*VF<$>kz z&D(@KZ)x5Eghs%+9j}~MN(b?Ix{_fNSK%UrkQemj6B!vY0&`#*aB~GXa=T?ElCKL~ zjOyBrv2GEJ59RJsJv6;{9G?(ttPyUzZFFZcEQjU+X#gH7a2plWUP;=hlXGtCd(dqM zC(RzsUjQSD4Nq2D6eO%*j)sn&&Ik5k`ItUdUs(yDC}C{^1oZEP?2SeJSiXu51W1kM zqg5-24aMhGZ7ZV%XSbYbj-RiE0R?=35rF`ts#Vh(YoqagDc6qiA>@w*_q}%7bOoN+ zjJh$Z?;iCr??do{{sid#iP0hYneMnZPOc%=p%?t+&)8fN<8oe#&U9i=0W&8PvcyTgTvQITMR+<`F-8Y1C($)Nlt2KXj$@|Ysw8QpqlybvOBG%{lM*9@!m22dG zW|<`ac%2EmR65#we(xXC#mjgfs=cdMc`O?ZLWaxi_%>E@iLhyu|D=908Ug$s z7*;r7+xh2`I-TxG6raN0BppdcYEqQX(kSs%<~W+JGoLt+5ex9HUcHI|>{5Z7E2=#f zRq&L${x1J8j+n4dBY!Z};W+B&IK)%^Wem8CiWA`Vy?RvyZQ2kQaS>h-*nZx_2^beC<~~#4}Wx;SR8V zzu-?|{cEo~MF;iGoA!@q6<3gJxFyUF-^26rl%&NejoVP<(NH-npcdT6-Ag0<-2eUg z{x_-s(fTJ%rIos)6K%_a*=^)_=WyxHN{I|xi&o-}JlA8N3ec_)?*B6o{=XW545{I{ zC#F>mqx3rGZdW9DDTk^luP1KvuEHHNE@pDdFS|qk2A{4V6ByP0SO5R$o3SF3yC}Jj zLI|yf7|iqXeQeCWkdxX@hrzpYUi~Bk=5q#LMjh}I1yzbGvsK6ao-|rp<853rZc^Lp zQFzmMk8e@`Ca9c(K5$XNH}s~N6{q{2V`*D|mm1n^bqDulU}B8XU&Q$S*Vtml=)cBl zdp(BwSp&@*FK+mF>RVWo5o*zZ>45mt0_agFRs?KPy+T_ErXXQ={i_>e#HaBwY0@0% z=jS*OI0Kcxctb86guo^mZ+Eb+Ix>8j< zft$OHhA9Qve=jlI|GdPeGHUv1bM|YYrjFId0coi^+XFf1RTTe->^Z=?^=}pU1~$2> z>^Ku}C9*yk`EM{*|FN=S{j&;AZ4t)03;^K8Q@Ve66y0e16#PaB@ZtRliOge}aGb`~ z$ORfQgnj^X=%kG>jVo1fOqm&3|HvsNv;2DcZ<+Cyl+xTTLmDfG>(ShGOqx-wn~W|m z^`8PMcQc7;ME^&qS5c8$tVa#^bs!GNeJ^3+Y-4h-M+r}kqCBm#S`L^eJbo0=t2pj| z`iX7-R`Z~)5@cqmqcx2I^bD9J|Li!M=W*DbZjU`ZuH_v=Sq!w+ow0MbmGJ2>mPLN% z02bGt5AS@q7i>i-cS4*dTxv86>izY!30gc6L zR6XycdO+Cj}0%c-q@%@xV`~v6u_tja{fiEDDCDG9pBY-bpJo! z;ND1-NFBc{as z@7UVet;QU<&|G6+V9z2bq$~{YT70UtSe%IX0+Q_fV*MtaQ9(^vMJXyi-jF%LiJh_fg;=|dk-!bR8!zNW=Pz7CQfyE)kY<+q$jUGsVNci}QXdZ`+(Jw`bAlfNW107%+oF(Vi$scN>EY}L0W25L=yd{FomKGcE>v- z&Mv`ex}tZF*p^KpP0N!z=P48wM^Eg1#FTirX5XR;r|pKPzug>8{QhinUljxjM73{` zUl~tgtBO(m^eCBU@?PTDOJh1e4k)qkVTzekx0~bVauGYC@T!aRW%y!n;_PRJDP$wy zB`wmtu2yaBOAX7yAn-rt<{Q>6ycO^kmg#uZcieHhT9^ed1%l}@EKP`i9(v?6_9?P+ zhpG}YOLF&83|2|~5)2twJT3S}1BjNd zgNgOSmUt-jn)9JeB1qE6n)f>a@eFW1r>aRZ3tJOQzy0F`?`Qwm9O$l>#xJWc$Qhgs z+(u{ftHB5MHuu0Cw%u-;D{zP*EbGkHaYs;?PJsUee+F<`r;aH5U3GH?Jj02l*~pch2t+9J}CpL$_1R^+AWSWUT3o+*=+3 za1-@-brVa9^^1KGN{y(L$Fp9j)v3Hz4xA{HX;q)zk@V5sG;7U`SKof5IDA|Bx`B^| zRqac$3g+N?)W_KoEmrEPy*+MEcDUQTw?KsUvq+R&uz_$m_d^$oPV#qI7Nt|&l0A-d z(UO*n^@HMt;_o%o)yJbJEY>UEPr9$LvDmN)vHk=zutnV*wvOHnQBHM>HQ!v<5pOh} z&sBo4v2`0bL~`v)2&IU}T<@26+va5(sJKW|T6{UMRMhi0IMK0b4owm}J zp-mIK8bodL$&mV-)&CRW-PW^l7`D@vJAq_D?UjovbrW2S>;7iZ8iZ!erK4J7MC-X= zbk8}7Eru$HwwNZcm&4`@EH_jEBnI}$VqtZpI_%4f)n8~!wU5+`-viMnSrFdgVc+gz zjgbVQowXJc{&uojFPBwgPryU<5PEPdzHmNp9=R< zSaYva=@?DbcEsLHz!C5LoAP$g$%bn*!LvwQPKjfF0(ApEIpcuV_RR;DT`?solu;P0 z!oe15T;XR^$tAb#)&AW1a+nW=PS(goWo8*El!Go_BPTD|pQ};P>^wK5dJwZ}*$~Y$ zUpybnlB65%(bu66LK7HEwofLKZSj~psMgoFJMw=aNG9Sa%l4EsrtYZ2JE{iuFg|EB ztnuoM3XY=1Uh=XIqjsrUQNlIMF%L3ZKYw$`wry>3-!@b?vykFJHLOw%soigwtvxWz zdSmw1n$`#dW^Lh$d`sp3tkbThc`r(x>hdy(4M4hBIxfn zG-7m|Y}^;+QJd~Lkb646ONZqj-55@Jvcqu5Y~XCm2>VWRJko136QAxeXLFK~<{Ws= zFI~EZN>R7s?Y1^b2FEz?UILugQyEfGO2{C%v+u$`c_0|l|F!p80NNbmr=Kf`0EDH~V3Du7 z@bv*z(9Od;;8<9?e%J*@y<8qdOjuzy>G@(NH8ZrcJ4}Iy%ccV8g&%(5?MsG zR@*$lkwyLnHOqz4md3-KjNBm+dxX?Ho{9A%BQ_*|M++L!oR%2-Mol%Ctst69mLD}6 zKEjr0cXF*I;Ey>*74bHb^7`zO3Z5mXsy1On8x<6wt>e15~ zi#StfF}-=6oQ#|9Gf~(?qXJIPz4eTAU?dUFU&9dGDt(FfcQPdBRhx_18Ks z#qYc8Mm~?RQ2(L#;Wm^3@vLkHm*+idYgJ~>mdBpk!=d?{6!j=BhZ7mb_~;j!1mbHh zKD{6q9gc}qXw~M9EPULNbmjv0$nwfEmtgd1`^k+KPydbG5yeF~H}00L@4&7XC6;yA zUpl;y6&|w^Gjy?BiB#MpPLF0G(;s6!=uXlcpIFitHQ3L@FcvZv*6fKuFPE7gjocr7 zG2kaX6Nrlss`_GhIEn zBsW$x-v+1HU#*ukdMlT+r$Mq3nM+f~l;%;%w>$tq6a*X|RSoCDdGFj#AC!l7Y!hK; zeB@S?{nW*UG|-Evjp*t-4F76mt?(ts0^8@n(cNow`Bb(fX7{OYH@%i0@p;&pKxjFl zEuQD+9$1-E4%5e&OT#quLqB8!?loxV*}^%k56ub(zP~(Pt_|%HXb&0xJr*0yBDIX+ zks6K!NxwY?S_v@jVx#@53;K$NI_i{zn&PSr@dQBcatQv>uf@*uWSt)7P`#sS6K(VGE7PRXuL-k*buFUEgS+ioz&U#q^B(I>=kva`%Tf`X2IVcc6X> z8XJO9d#<5xaP`&HJ;Y2C$px{u%6{v~Av=Xph>bv1dZjay&G*MEP!}f?sL-l!KQpo4 zJK=0U8@dvgnR6T6_pRa*>)yT>tFhKUuT9BhhIr1Mu|tekajv;JD^Ue`&!?N=XMApm z5xx(9xqy)VMF^|3zYvy7d3;;C*Z&C5;YuURp*pq;yYca!0gC7U zh}Mv!|D!U}$=EnM4n^)MMAf^6aLe;0_hnr1IGx0sFNd>2=cY5PmTTc}PXy6vB%3vt zRf)|S93`ff)?_Bc^O>H51(Ox@5@;H!GK&47X+wyN0=iJ$%Pn&{|gI7z;SoM=0& zB)s!>^Q?JQDq-O-9!1afO}Q$O=G%;tiueErD8VXp5Et*ll!UYm>qxnm+1~QEP1Hug zVGQ=d(+z_FT7~+6^G-$^a!%V<@69sh;`+W#77TixS+;`WysYo}nQ*K5#=)eDe@6|^ zr5)(p2j*!stHwsYd8@a3%qj^a#V|o_)nC%3SrTdtO}ZV@mbAGHc0O}Y2z%>jTC8nB zUV0qqucalx5dVnT2K$(x5|!V(Y`qO%o4*SRXJy>f?dtZViz)f(etFdn4}jVodaq}y zaO`VcW*o8O=^Od3dY^iSJWyQG3HfFFXUd9Y_~cPq(c%`D1q3%EaDO2&=!TNnm#Bxz zlBYM-D3&kG3gE4W5if=dCxd=F+TU$&;Nb;#h1hp}R{n}z5;nVXzz30rH@)zgnCbB+ z#)sUwaFrhW{Mz89eKbI%x~X1e!?I{s;D(teVo*GT+dU@+Dbx8_O}98Tz48YR2rZwu zwB8SFM0NfwV1P9j`bX98avxtEOsx=`03IhJLNm~ep&V+bai4nK_aI7kF8SW>E+2S(N0 zaz&-5!Ri{VxaFgA{v*$5Airnqb=S`Y* zZ~4?v*(M%g&7WuXr@TB+VQbX~;d&^NvO}LfA)af--J`s9x7p9okrdR9ztFI26;3mp zC?mRR=C2*#hx3}SN#?0xqME7M=Wp@iSJq2+^9C>e`t#>9gzojIK_QMsV!av94v4V2 zJ+m^%@^ZfocD}Z6FMm_B;?_O9XehIRHNqa-Lrl&|xxWW659Epcw+#^Qmjhb)N`eXB zeapSqS_q0A585+B72(wOq>6XlMi^<(XQUoz~nt>#=CGGY$6__rpY=)_dP2 z_?2+id2h8v=HtsQ%yd55a&Ilkldy_>8>4Fia|UH4a~^J!@JXB8tw0I~Z&*d)rl|@C zpXijlsqT8PKL||$fbp|zgHj)IxVKa3Yxba&fE~H;ablCA<|8VX54>f&3bqlptp;$; z+hL2m^{XhEt5t?R($9no0?AgvE(~lCtU=%W5qjV(~-SAcq1`XvRB>%-)9&9gwP%DN%vU0;9>d3 zbEwa;(?ucaa#HItCr|Ouu;6~(?22&^QIlm~Dx-9au|^13)8NQTXp*|~Fl;}C`Gh5!tCzHBv*uMY za44PJI!SZwOZ?K4#E@OadDY05FZn!Hl32g(lDL=dPmMQKtfw_H@9_)?9c=oS&I3LL zia=K@KF~Au21tu+U|LOM`ScC0y^~3N#99C?4x(|x5VQ3 zMNAR$B^@5CX}@y5JW%TU>%4s{Zwpbo)rSVYrp%open-uq>*a{guK`Ir7%xUv(Z?0L z&Kf1Bl1&9pQj(cb36_mpK4!D<_$fcJ3Hiuupt~VmPKL1h*`9?wU#(v9Ei1d`_RWmE zuRwkj!ND*5x{xwPo?_+eTJti;W2kl?vh;H6Msb_0wKHa);t>4ohq5udH0NM5b|<0v zJA4`&qOkjr9M0$br)g#( zasD@IRg@yT1KT)nPCQ$&YT9_=P=>V~?AqUXn7A=cJK=-Bf|kK&S5rr-WNFL?$BzsX z*P)kk+7NwVM-FjQsC~*{qZhN`N@>8fU>gNlsL;kmTI&oqKBVRT?+^S80`?DYq`x!1Z}Ev^&R_jyzR_-jG$sGK zB+_<5M&fNf^=ekTV?YcY*u-unz~-kV4en&4u`@zu+#S5)ic+vVI4V+dVgcvNmEk?- zquIdoB~P^&dz-R183&*^=Xgr&{)4F!h}1x;Xrb1Ns}if7AB9)T1tViCdNsTg&{E#B@B|Uq^P`ZH=CSvqogSVl;q_zklK0kM65ZkRif@yvoP)L-Y zv2BZ$(08KF6t$3}?lzk(^j*H|QhZJMR-FAhVs}a&fT1bhFdV^Kz;0QinS7)Y2~-UQj0L>T|quf+#|gAqjS zr2bN%J>!l3H^InhzF3oy}9^U60kSW>B$TLz!Osas%;spyGm$tMp)z`RAu$;kz-pW;=by3O8= z8mW{U^4z_lVGp*=D$i05n|TB9UEqLH&5VtF&);3&I_hw1z!fk41o1eAONjST?CllA zd&WqT+QRqQ5LpO6dYF1cRgGm&nKYd_zJdO(1a&Zrv1}ae{O-Rcfa3`5DbU4s8?WElvh*IE@eVuZB155Y^s}XN@F?!^Lc!~SOT6=DR7ycd-kWQOfJTW+Cvi$toQZx zmTsc_#Eq>l!p+oIQsfv9-i0F?i9?%b8<#1TLsbT;K!jw*r&wtp?JHQLqiNN?7yGLl z*~mzZW~G@_A%c*O2vpL&20w&{lp23EUSSU&^uSJTERio%%-6K9Q$=`8!Nej0DmvRw_!Ci;Ja+KE`Ea_ZM;j`eY+2bKIH2l_JA(qB1NgxJKW>kL}>ce!phM zZ;FZ9QN4=O9934k<6c51SpIM$^tw=*F_^c*+v`*UOP#;f{_&wVZsq|9ufvLM=o}dw z1OlyI3d=48-A>Mvwd=<@TC;X|;+3FlNWiuYN3R?(y(|*oTn8%ZFGDz=DD!^0#${-1 zEwYxiiV*N5tZt}JA>%G4&@K3md?75< zX;k2Oa(qA>G-nY+`vr53{l^w{PHC^?nv~uT2E7bTYj0E1UKwrKIn;d-UggArDX?m(gYA=cVb^&8?Ll zdmobgEOYJrNXu~H)+GE7?K&#^eh7qdZI<P$`+}!{d?cyS+b@r=Stv?YD&$th$ zY$fp3?TE#&KiaF)a#GUk&}47s+`X1ye4b*gqxJ4d$3(|{SjwY^6}u`*+RfhRRQ%{$ zJTt5sB)iM{Q0&?kzRpUKgwBk=^}V1v3vPA&LwJ_<8l@OH+w|L|mlG=BW?or>$< zHC4iz4|E61-^%yK4&)MeOZ+1F7@x21WQ*muuet5)%;<$p4x1}*etqHOXQx*dRusNZ z>_z}%Vq#IOo=IY?*^FGIT@8Egw%8xn#uVhtP=e@rz%juJvSwNwerxb zIaF;CThYh+bTi=ETd)F*tAGAoM%5}g=rE=;eEhUhI zHT$AEG(V)JYPQev!4bP&pVaeCSuWh+A{XDw-e%hYm_@x>!{cq$Svvv2a&xh5>KUc$ z4QC0cl^&uKh)w%T>OL7l`9BqDg^(j3NXSy;%O?tcRfTN(dJc)b9^3AQ*K4ovn4a!X z%UqdgAoF9L8cufEWj9igz>i&laZpZV3wYlu7@;R)CnrLVO`elekx;xHH3=%9I+uoZ zaoeY|#jLIV<+FAR#$xy!%0oiQ;H#e4;2HB$JWi`!7JK>Tz;y(3L@9zvo1Ds6l^D$5 zj3QqMlD({xCTfYtd|7J=n&kMmPHaLn$s9^4%hQ@}%sQ^;c9g5#JcHH&C*&IUN5HH|GYcLMcuDA5MdEdHr4K+4om$-9TnRB-WlO23KQuSHCkaWMZ z)Ll-NC$b0AS%AO}|G?_^zlZkCRC$^1=ZB1<3nb@``nb_|q zjkSi{4Nww~%-(T*6+g*b;<9O9KQ6h4ky1Qn$B^Z&V6aO)m)JZkkcJO53ojjU%KTG2 z7FSD^Hn_<$!&_Z%9Vb8;+RWzL*Yg_AyI_-bRIDiT1x1(EhvG|a;Ek;!IM`NR842z;JnosMrE<76+z*ZO6!KZ8;eJWY>zO3N#T#Vc|)_(!UuSG z(U`O_m7W^)?bxHtFQ{S?6B=$%gQABhzvi$!uCm>Rn@fx;(>^FK6|jnCj80aQ!J0;A5spx{zQgFK zV;qO;6LI-2zRbr=Ue&D)>4X>eN#357eVQ^Wb_!Z*!tM)dFgF0qx`gW)GRXRHsRn(1}}4j2pzJ2i_fFw z@_E&N&9*z|^G-ejQ&(4aY6JIO5(yonywt}4guXDwnGJp&)6@5|ZV&j`A8t^fquoTT z?m4l4t+*<;D*&hd91x_6P^w1)0ZsB~oU*`OtZek$;EJF=(NIt~S)}p0xjrXX?GSH| zp`}G@yXd!k3THbxs>ii!BsmH9xUj0?vbZ#Ba}-$`@2>Kh;>E|bc_<%zKjKZlK>c=~ zoZ#GPMJCe6xmB6t+{z&bA;238!iB7)dJm9y=6-+*n53udMbwuFD`Y<>N4n~_X0V{x_*MJ) z#GbGasio24)YgE^$Nc8d^%7AIj)Q(@;@ep3swMWcmV(9UIwD}`POU4Vsv93(m=MN) z%1R1O9hZcQk31bkUXfeUwO^Kdqj{qIQdIQF?|KeMBXEE1y0(p1Bvv=ND@y!g4v8cg zRUnbf+ELuX06S?dGTqp#i$OzP)G3ufzd7GMv8rC6DE#$HmhZxuy`pf644ik%+i*Mk zs0TJ=#qwLYe&_XD3m-oqm%MQOtl}~1F0#z+_x`3j)A28%s(V3zr>bt(%slhCc@g)l zbH-$&>>@@wik%zJ>+HdlCoUhCc4cX8%E+}aOL3gji)l~TymUlO-rmyDQ45r7AdTs- zN`-tVF3FVVRSNNFP0q*i?%#3Y>UY02Rt>BdbsSTKbG9^*fPqcpDsRR$=vgsOQNp7D zn%4(^@Z>+Ge5MGV;R}(*#>PgBND5fT>7CNbLXyb+qWeY|#wMmrNJ!Av!Rh-oH?}mG zX)~gk!$UH%bOwi`zPxGX1`eej z2fR==zV(t)O%;>)$gT9AS0)vGzcU@ee#N;@$jl&W`tIOlY&;I3iOZxi_R_Q-B z?&XwDtIWAO04RFn3Y7JN%cq*Jp2{gCn5G%PC3ZmRUmp zDyy=Dx|xUw(`efR99wyP_2Q;FsT6P2HvqC5YD%130-AMCh&l7h%-_+1eNc+J2o+7` zp03pX{sPQORi=PN2W%rRo~wsrPhFF*1=OD(V}TKex>G^c-|pk)C|%h2TFxhixeaBq zjD0F)R|W+TF6;K7DdgY{JB38EfZAHq0ipYRTFeWx2a=ulgQ^d4Rv*E$AKH@^JB3U3fQ>lNI3dUx{-~xJ|;&F||o3!&ONRwW6$OAjHqwjvg{Z zqk{du#YueD1{<3adXdmnv}MYYFIJwiT;HsxnnZ?&E}rQZ*xilgYuOg2Yp6Uds-W8+ ztob@=_j8&lj}Q01Efp?y>zTGTze}fZ-<( z_6|oY61teQ<1ZhKPYdhS%6S{bc&?^SWuSb4$ij3wLpyeM;v#ovFY_-Vp@Fk4Q3N3q zxa?o6E*)(esY3pKMZ&Az58nzt?yy;pwp8a{Nh=9Ql4R}(=Ji2L4xM#EXyV({%&=oK^qw+3K#2 zrfk30jq%Q}%UA6&cM-GcI3iEPX$|8UlOTu30yz21!-vY|4%Ax%ju^U%DB=guR9JKV z8YnPrPa0O=NI5AW+~HM&;3cp(Sn2y{q@W;JViyFW%?2hDW518ziUetBGzRai<$)E0 z4a089Dkf&CDVM2{gRQ`a`JtyU+Mf$6U?nUGV6-^F($btab;4k0iIO*(tQTk?LI8io z-X!Co3Ck^D0qZ4FE~_)Yp^rg#yFKi4^U6Y;Jsk1!wX()O)AW|Gnf#*6he{;uasb`oeT=w2@L1br+?R_H_6$YD)1dj_mH~h9Njj z=@Wg8-^gGYQg+anf)h!pMs-vcw8Pn^)KE~1S2wX!MqNWi&Ns#^8o|ni?3Ykau1+DT zm-|OQQ`N?wb?`eio=-W6*M`j(Oo}u(qB&^V`VqqfGZ0LqGn;1Dua~E4gvSdcm`uE- z|9)zyE+bmFySQ0>v?!#XZ)ZdOKw4)Kv+RGiWpZF*nrYAsdKVkK>ff*Mb=6*_`ojoK zd1#gPn*=qnG38cJMnQW-2wKI{^WmHDq>Kzz0sYIabnv;QW?EP>?EhH-xZ?ugqlb|T zk&a@MiY&{QDNJK3hc1q+!<<|H4U=$rXa@ro3MDM*^6E$p?SWqm_99>W39aWpY9a@y zDUAs^L?qHzBoru_kHv-K&hX4QgSDKfD~l6}h@{^_9%Jj7%6GEOT?Tq8)xRrTor&*r z-0j1v>Qgb7`E6)O88w8M;8c7T8eYbI88qMgpyz%vLr@%o_6~U>XS2D+h8vWvc?TfdwuiOWbO&kSZn=XjnhSZRXT6WX7E65e?*&B&@vR>V6_bz`FV9?eI}dGGi7Z!P3gcJZJWHF$1(DBMr>P3llL( z&1kSBk@rQl=2HNbA!^DD=nxIGNJi_R_7EGY6i~{tImi~Stc9~dhv#|X|E0yVrXrqN zYYg&xtgqXoHmjoH8m_=JJ$Jq9|KaNV!TAmgwuK>^RRE>=lm9KX*C&^Dr-u_A>tP(`2(eMz-w1EFVB0f1+XbOq9IK#_UqDZbr7Q@P@m&l*$%%1 zkpY3>^?HJn+rDy28;ASwRCt-#SruZI;uT*917CY&)^=>z_atS7V~naHd<|I5bD#AU za2H9T$|tM4rlrd@gR;;c)tyKQf|lYH6PVJG1HWo~vn^&&Y2Cr4RD$FGUj3?5J)6M_ z8ANf}lp`=Jn4VOO(xTadiXHPSJSnC3wFtVcMiboi7(Jy{%^3$(et7WHNx85j?)I^R zfyn*h88EGaYQ*0J`9)W83|ZZo`sQfIhq{*byc7Nk`=^Nj^Uv=}_}^e#Z%pV*2q5ix+29df^#WJX3aMfn)+eGEGi5WwPGg2oknEuY%HAV(Wt3UTy#W zD~1C05wH_R-;oRzME(?g?9t!pLcg4A=x6k;KDIe}ENdK1C&T9@+55<(ugF$o;UzwJ z#0X7-DTXEUKVm?PpaM%(CgMl?lf7HIf8v%1MLw2|1M^yo9RTVHofPD$$10ZkHdg)q zUYnbWo)nuEt1B-r1fVHQn&G-A_6PHOk(4}s>_L9fDE%RyTjmNTkPr*)h9p>9O(>zE zsjjB7CLOraienQE1vL2IteB7l71g}m9|E)p!E$?-eU80l1(&DE;Okr=2=q>hDQWvE z{xKV-Fn@yFePC~JzcDt+OwnZs40?PzybCK9Us}b{AC;2L$LTacZfhr&e;){7W{zlS z(yG(-IccyNU_o6IrsP&lzGez~M}lY|vP^^^$i@WAwYF}(qT?-cOOKEk4^w91&N*ha z2j$6yc`Y3gB;co=G2%N4u9mx3DHv^edqzjS5{AzF0(pCvvfkE6QGlPR+rUozvOe=zb!$8K!~-4KZSZmaO>e|MqaKTQN$c@3Q?t|J~>`=U6(R z*vLOF81+>`R{!~;jKJR&9)qy=Hz+zNpw!I>(occYI#wkyB%|x!hdAWREZLLQ`>2Vp z_^{25E(~Z)|2B9WJOmDOvCb<`HrvCkaqN1ZQwpdxY_Uv>sI&0{ zmJ(9ju5kzdO$hXG4|$Xn=N@j|qx8%4`L$=X%H&)bLsBleYlVhn%r($hmU8X-3>pcz ztlwOdjQN|AKw?3^+;<^`zMnFQ@4pspm+6^j;@NXP@N-ma9lQ{fXg~O6Hi_b~V0$Mv zKJE5w#&9vR&`9o@gZ?e|zta|20zp7Q`;aj9R{p1hh!<2Apjm^3$$_zI_tWUWUi~b% zZW|zug_}B^rbYALEnsf~nu%@3{`L}_j!WUEUp>v-IkD{k-3d>eX@6$1bWd#_SrRu6 z4ZCUoaZo`&9}8!|W8O>*Xvv}clqwe>AyQWCLkbeRA3 z-<(lE-QXMUoBe$FQFWo?wBHnTi~J#NqF8q$UfJAg>>n1$5cYp2;}!i=;A@-A0fA3L z&*+G@c(4}70A-dk|bgk7kpPtWnvGtjxW$QVwV<2Q$+RNfkvOfcm@=Cn+Cfo&5^bnOXONO%W zhno^4kqsGWAZS6mJY|tY$+@K7`}r!Kw*3vyIQLt_wBPAZe?K%yg--QwsHkW+?av*> z_`KCIz%R4$%-le6_9u!ZZU;v+ylDCsDW<#+Tr*_Rm$jM2)uaEvR3oPd6H)nN7X7K#u#d_tIFo1Uz1;(RRw zvp@YJ4^@jL7+_a@_6;@Ztc}#bc~(ZO@j>chM+Wehy`)vw*4?V3u&Pc94@jc{B1b)^ zXQssJejkQCQeSFF!&Bosz?fZXAhW3faY4Tc+C97&!S+7qJpU@8IGvG+(}JdRxQW2P zi@>#zJud@v(W1Xmp$q-eU-Ngv9r)YJDGsJ778@yDhxQbk*!X0&!bP8e&W^&o5Sxfx z;1IC`2eOvP^=s6nqT85NNtcAJzk%s?$(oC4#Orxa!nf)2cviy$ufEkVr@4oakgt3X zhpzW&Wt9&jrEL7Pugos(3;ZpH{UdL!=ULFfWV+-vr>gVmWL-&K@;e(XSo)wy7$JFA zy607=^+w?lK_lm`_dj{d4%hjOup7#d=_z1cQY|IhIsMZUeD91sszF*brWy32oLEYl zfCQ?+(45(80+euMCP|h(w~tC37d(IIuCXLU*5PC|id2yrztuE$_7=pein@3ZF{OSA zq_$@vWtYYeKUh?W9RT29#iA--SXIdJ+CQy@_~C47yx>8ul=YCR<1mf=uhxH@vB27j zipY_lDAy;H#l*^&ADe4@Ukg~0E4={9ED;)e0o~6o2TbTY41M)GN zHR}J4i%Ea$U6#xbN`s{(+Z+2JNx29K9waq6NYNK54NZ(MR8V{Uvs>ixpjQM~WJOeu1K47g% zl@xf>P)Wh8eLJu2l%2s=R*!l+hZln zNScmCj2gX}sZCGL#ed&xXXU|#V5vk5AY`5JwqOJM4NQEKCHSDlLQZr*a zUc3H^KAY*q+$B}U<)3~1{ILurmo47!H8}$Ks}B*M+mvWfB&+jsk9T4~skAFCgTjZv z?XHmH#%t$qlNH_?$w9{7>E{^6))mfQcc{u1VbyuOaDjBwklv)Af|tB0cPBC2B?Snb zd%h)d`Oso6lF{HcQ=O!5J-7JeO^ve*8G{>mTx}3CI8K~xA}5s0h~akPI}Ugb|K#KG zN^ks00FMgxW(u6+TF#0| zxa4c2VL$8cXVk1XJ|A%tv(UFntJxV{ZH>-fC8LU&^{cf?r?f$qXS*vGxApkSjyEkO zP`EBUnT4dyzE9=LyE=Fka7I*}osfdsf`5FdBE>Lp9M87CR>ZvqN1da2`lR+%w{rFc+bmZaQx)vT3_eSq}ug5xB-5op(TljzPij*Wg-8qwd*?dK~?BQG26awK08!aY~_1@ z4nc(I&JXP%F3G^Amb4z&r-db2d-jibN~Wihlo_V3ol4qP^#>*;F?Nt1luE{Z!$Aq6 z(zX!A+V?NgGa{j^tWK>Y29T01#J5(uLq>#weCX6PbihTD4C-F|R(HgHB*TzwEgvGI z$*l?3vjKP)mwJY2bdvWEvYnv6Ztp3gi8YvwvG2v4aDedU7K5|Eu&l3b^CmpuE7N5c z-aN{$S&Jzh)!NLKQ`o1jJ_BY8X^i>N#^$tcHG>L>A%H=9)z9p-oN9k@zJmuzX2@59 zH!sI8+S%Y>Z`O8Z5WXr-boURhYtUa=b>@cY8rSlke+p@C|AA82t$^r*Y$iDE+*2+u z(6}I%U3kkpws$%-*49w?_%-|OB2R1zFGk8E{pa{C!y#^0=Z|nRoXR)$0u27>#~#TR z;9pzH!wkpJEb{F1L(26?X7GmZnBzWn)@&q~qwnHCn~h~RD^p4E-Nk*Fw%FjbgvX<6 z;f6rVD#ZifC<>R(L3RBPh=8D01nHMSXP5FT%d7JcdJ=`z{)Cpr(`{^|8LXCbBoc*@ zUB#VqoJ-(CZ7I+O+5`~L`0aG^fnb~cDmuLCv0;Ypw6f}SB_8K5Vq7)}RLL2_)5FtF zR{1rGvF1yWg{A1e`lv^wC9t^fp`{Z1sTHJ>z6dD2IlgfnYh{3rev*Y78Wqm%X>aGp zjh8kj!bdF(e|`*)hf@(!rzYuOkRxwANn95TfHYUYahBC#guGV}IL3YbvV50}h(zo> zo3L_C=@`WPC=)uEEBU^_zKxIe_hy3^9mbN3hNN^}o#NFHMQ{{L&Yxrvf02b~1Kbt}|;t-VXv>)O@gMPLKaUItJvyFyKmvD2QvwX8IRl znEFUPWh;yrsKLvuEfU<+BbGf6sOv5jG{8&*lTK52;KmVClTrI69{j}sCHN1m7W@oa zZ9iABtUNquhXGu#D2E6W*talWSmT%UI2TY!Dl{0Ucj{lkC~v51;YJOIfA>talrER}yv875Og(_0fua+q`#kKik@w z)7%O+Ea@g{DyE4GKCo29WY^ZCw1AN<^V!epim||T+XvZqk z>zw|4uORWJo4`tKIC;q2NXlvD@QO&-OcF`{b%^v@6~j>aXq-AcsT#FZ+jDUN4>UGi z(E33{>wz(a+jpS^Z#{w2NobUCU%YBaK9OdXPw~#2V1)kpz1g+v^KRD-ThM!}F=7|i zx6(^QJ{;Ti@hH2=8;brRmCtbADHg1mI7ENvCsbF)3E8&?Ka$v^?Rn2fhZji4OvKQD|PX z7078n9-3%lQC$UOJ4Y*FfI{Eg^`{PO(9O=N1!rumDebFiPN2=2jVWSSoD?<8mU5epY` zlI_NwonSiL7*)4^5ES`dTgOt9c@vTahnU%Ic)VL(;%ybj6#oL#k(iDVm>R9vSD<_k z*CKO+fL*S3>H&x@W_Nr|NE>S#n!-`bwz}uJ<7F($4IB^|7K&FKX=%yAJIvlREmgln zSUcinZVF!qQ93=r;aD~;*pTb4%-!N9<(X}Mzfp1U3=4G3mlAK*rk+U``l^LRCk(%t z{2@6r0eByqAB2dsggF4Lv#be8qtR~)%5w_RKl||43|2|NnTX8VM^hVs{G@x>R7k9v z=IZ)a6kE1nEBu;E-F`8~9FkS@QYS`Ctb_(Gx@G6ssTyAa{1*o!aD#cNJKiR=c++m= zN+`q@?1o9Edv}J{gFqhpb=yaAxRMp*fu1Emug@!^bwa`JLX|@}uKm`|kgW5`d_&EQ zKYLU(dhv@GW;+7~BV9!(sp&S`#2$8IBE>mIv09wd!58YO1$fSzzfwbr{-iZN@!P;Dh^n zIJA8cvDce22)|(K1k;}~WooU_yD;;D~cklCcHTb$%Pr5({bLnT)=~ucI8YjWq~RAyuwi z>`|H@A5g!meVZeQi@CsCZ@0c1&R6xZT#T`Li9^ayN>)?emH7Lxke&M19C((zUOR#@ zRvVdm6#AIcqqni%#7tNa8$?8^64URgs-+r999q@J7}9U{Q9Uy5mJ;1B8REfwm(g0m z1{BB^0RW&qve-)0U@{&lL$?7h&XgY>2Z~mzNOf(`t9Z8hwG5C7%8K7Rrxa4PXw1IZ zZ-+}}wV8SH^IU^#G4C(tC^_%BHKEIp=CW`lXrbC}ZS#LUqLkM#W@RMh0qi>(Ar&sR zX~BV5X5Rtu>+CLLOV)D}QWwvhqiptZ`#GEajP{{^m4!cIaH)%R$g@2+K-snm@4d;r zRyqx9&}I_3g%yZJwM7R_GNYqRFDu$pbZ;_tDLfq3y9#ykDBmx~hO1x$jX3MoTJc2? zFhGdp8S2?kxva^+L*ea)fhSeMS4dt+Z zU=$GNDRa<$og!2{Id`APcWIk6+#XU)5B4Zk^mo(Ll^iQ7P81@CN4so!x}p8t=#<6K z=j;Y#2A6xLMQjJ5-QTGM9&70F51J%z%R`qXvD?z0ZrI>x)bk>P%Nc^;I9+-|Ecd!* z&vx6Vi$0?W$JW2LSQQtKmYU4T`>5hBzx4eq<;Z#*E zf01XYw^J7*qBLgKH31M<^#%S~zX{LXi;r*Y^B#;E(SLPeO6{%~qaU-)A!Y`_0Z6Hq z6JeOu1((7&QRuzth|J{Ho7_dXTOR!O`Slh&XLp6dpJ)8ecN00R@CwW&(rEqxWmm(7z#3#n&??4`lh&;lkBGnrk)P z&!yalLl_^jG>Y|U@mkbi#ud z+eu>C+y$L350URs1`N2Bx=cfo?rj72y_alWmN!jTt1AU0KwexvjZ?)?!D`qvX zG0S-P0b~W$nfx=V=@u*MJ6PNL^i6bMAXUK;e%VIF`(Ab*UbWd5!rhXO6 zgVx*;y4QT|CedpYY_X?!iMLq!H`M78Iqy!;NAI1Ob$)PgyK<-tO88F(aY;NfWA&OQn-%RPa7tKo8V;-a9) zYmR~k1+$ggCgh|YZWWm2i;|{3gd96{wC31$Sx&`!qMg3~BDRI0zsO9L^CE-C=h#8y zY|}46o=99VyF9Y8QkbIC>B9@hq5RdyfD9Mwxc=DNUQoRY3}!PeihWU}HaC>B7$5@vIK(ff8bhj?`s(m=gBm#?iV^5ED@6MdaGk&KcV1_BE8M@TVCWqD%R8Y0t{Fd8<$PGitnR7%~Rc`lcwSKCyvK3cYv7A zv8LIttu}7DrCTmnTXntENjWibZ)PU^^bO{F8>{qCGO8l>p}Eyy{)YSOTC)KAzUf@g zRiF7NMsOzoYKO@&Hh_lr<dj13%Jre$=Z0RvaQiyA1+)6jR_W5} zCD={53VKbr%B4%D3H+FlRGt zpfWR~EZ(*))U|P83w_j9UHWC%GpE&@g^J&vxu3udd3QxiTv#wsi$;$MK+EaHT-%B+ zQU9B|HJFr+F2MdpckVk>d-U{g>joe}o}Sl4hWTLGAy;$F;TYljm-C`Zc@-gt+SsOs^pmS&wU>{NhSU1t5N`*XO@ z2J@Bi{1DRqV3Bs=1?B))gtvI zbk6MzZ-{XJIzs2#RNW1G7jSVu{L(^$z&l)@{NSHG)k(ZJk9q ze1CH5_m1wXGYv8u+x)+*A|?@w8VBrgl-VzfI$4X_+bF;d@w2K=Fc-rTolJV(m^dQ9SD1_h z@EB{$CL!=7W3#B@me48k*JAfSyID+O)@)XpQxTay5HvgSv+$5JGIzp~D2o0~WQl57 zLBQjmuPd4%*e5!<>Dg9jUdz)Y;B~6&7%D|ZhbN-cXiXhmTm!?}%r{8mh_p8~Kq5bd{!~i#hq+y}WEEfdfe@MZ^+#Wl^obTb{W3unGmX37_BIzf?@nHo z8>#QNU6XGuzE4}@+o2mx-r|>+Nu{ZQ`i$w9g)85kbj@ZvqR z|CxR?iF1L2S62(Z<9`HZcmiglkB8{LC7l>{?yk%+)p+9E9Gz4s5~AJFX#3PH+Rp(l zyhuAZm_C2n-~Ky##j=|zn+B_zmLn&Rd1ml(2o^4GvsGMkB?54YLF23up5UV8V$oh; z$IYb?{UZ!|z@{P2v+n#ne>p4NMV;b^2KKprI$nz**<)nh>}(!au#W0gZjJL7<%8#a z68YNZ4sYY6hi&FglO+KHcNj1qZ3flN`|?n(?f(1mp_qp>?#yY+`^?rUzGl*CK(2mq z#9+`i-UDyj$$YY2d5P^D5`3bJUnoLnDyUvE?&9_B-A_k`Kth_Tk^*8{kzeSh?@!n% ziUNa(rNN`oz(MaZfKRSl@4jv4iOdPROAw34quZt z)md3a|A(m6=<18V9AgWp86uP$m_gIzlIP z#~#0;Y-+9?ycczJb7l>@ds!nUmvS&^BZ$rVzY}0q9^M3?+&9g4erMFu4D%!0581D_`9kA6|mMT?U zkN>~pDpMQLnL@pwG|OH!Ww*HA`-`T%ldzU2OB&iWJCkS4N#n$;wk-{wfnF5_ivM@S z%8t9ZLAW?oT{Pu3w0y96@IA+-+F93<*j@i3E5WiH!rrYxV-TI^sAc2kivyKSJx`JkL3zB|+w<70wH}}DvL?J}2ktRi*sG?8 zMyZb!dB$t5_Pd>H{wkMW>H$$+shftKD%vDIZCyf^;6-382aYT#j{vPMKnWH5HjRKr z-vy|m-v@wv#2M>sVNys*EKt&j{&%)=7YL#7{>CzPskdw!HBv^ocp85SH1Xj9WmGl~ zEnbgQ$&Q2vPXVZIa=K^2e0^8i)TjO7jB-mJmlURs{+5z&fN_9p)MQ?jt; z=JUI%rXte!|%PO@Sdj5q`RPm7!ja|a}LGja_ze8JW zW}!{Y?_GRy?zDINL+8%z&jlRtu0g`IXJ$7U9on~hgeTooJzN*t0>LlG^b2lKA8N$R zCq`E%7pxPmjQO2s#c?Xz^YyG(O`&&3M^@#IVAn**@`~`nj z0(mBQDfs`(XMt_3A@P`e2eKjbvxn+UsJ`S7Vn@I?nWt9jife z*895&`TIWjOo3Kk^UZ(XMY^iR;PC=&9daLmZF-L-L=ZKy@Vn@hx_k@%DPq4x`*>Ur zJr**g7t$X0?%OmI&Kz(e=rp!z?{g)18PsKniIS=! zc9@Pdb+PwT?@q4iuB5GfBLKk`H5EvJwaU(5UHY;lBmA8ahC7JhIxuB=q0ilm{o_H7MjR@K>rC*OV6GRVI(S-?&H_EkxPg9;8|t z;)sDu4}hn4*N09;f0^YRKeoj_Jgj2B#q>5`xsNs0L)B$WD@3&m)=T-^RwiBNvE!J0 zKno0az0Fd`P~cb(NE0JVbuqXvZ*8G)!ddwh1XL(=y*6(>whn}qTJXnc`XeZhH8^E5 z>u!%ZBqjE0+#OxBADKGK&avFw^?_6={4!&1o*!L*AiuKp7aq)p?NA5veyi;C98##G zwl;r{fdT_NM`J$jJ5dn~O0C|{?AOUy9Y$BpeA%468p7=J3eVSXM>0eN7uDKq7@I>H zy)+5t3(qhwS(J^*34QVlxYnbEN|(d>gZuHKZ^lmz*`4GF0GWVKtWOWUDRIT;I}+=s ztXjf>PQf;weLoXH^|jny2VN`M%iXRfu&y38N)v>3&58&_hNXVMU(SYUxp~$f97U)p zV71(c=sf#&VstSa5#)U(XD+@8gwZ3P%NBE3yFNaReyO=H%&&EckXxm##0U*{!sc+i z?Mk~hd0Uv$+WKx1LC`;^mFG9HRR9G=U%|dSWqIjcrJ3-ZDNCHzkpCojq3?b7T_%oG zNKHtwJaTV-PvnYHi0t+my!t=vFJ4aK>SpL)z+$i-`X5D?wF$;4M5iNSS@J)~J=RaU zxf-=M<}n-l?XQYkW&#Vz$8MhMoqpL^I9>;59Rd2o#vIcZD31gV(-t<=y&urfk_x*Q z`0FUZ%dLL=Sqd4Pbq$?*kE1yMRQAJ#3tl8ian!RzY3p)GHEzFB_e}}L$GVf60oCJ3 zt6McuG7)tRPzlb@mB)?@b`DyYH3o0Lj;G7Z@u9FbBnH!$U~%JO&VJ5)| zawz6lE@r@Q@r&y+jXO~S23>d2hxk$dLO81czQr}+A-|HePIc$XtQhp|p>zzU=xqKP zqD6fBnq@bKFNU_8Q_1&6%UF808Hq3?XR&S)KnPLmmzo!zZG0$UYVyE{{uCRyb?I?bEp3NXkQV{!oU{n9d1?wdV)k1KRn(#+Bux=_zUJrmSPu!QUZy$CVp1U#p_x)6^LI0fS|Z5nn!+ihXK#+4`Ku zjDmx^)0n$ZjCwVSq5^vFqUbOA>kdj`VUye9zI ztaBHL*D7fTa|pQ1gPG);NJJKjLR;-e3m}h2>K+Xh@BsVx4nnYB>Z=(eYDd6pCVh3A z9o{KjiDz{Lk5oubNl#Yj>S7QXvb#zbMrnD+#_<sS-po9Tz$`RjZWTfBd;#cAh}C{KnsuJ@IAaD{DW;Pcy{J{KkUUSD-I%C z4JqfF(}yoR>quVq^1DPM?jz0_jNPf>P|$7O5H^(W&`Y4kC&r?RggZLa!kC|7vk9r3 zT%LB?>YA2pnlc0U9XS<1z=)RLYY9f&k03BwPo^nb=uZKW5+1vmdfL0Gyfygu1B47FuoGduaHU~MJs^A%ubn59#u&&JVN zNc!%sY*DGs);5=a3*_E2Ny{GhG9wZ@bXHC0$Gga+3Sz#&R&`p(cRhYs23rPJcsUyvJM<=5Z~ra}NEW-lLU_RP_$zf4D0}~j zT28>duC_KLevf{+$zI%I5NF^FvpS?%bt3u6ze=Fsu1u6H*qd5gv!u zOVj5$TS{N#i=djQvQTIsC<9!mn4!@=YG)XLMV^!vkI>&%Ay1N=5atEihTG-Q`b&FA zNB!>>h_sF_*P=hYy}%pG>&>7%?{zIgn~ktnET39@cYAKg z32+lWg|w|d>#B79TC}mu=M;DRN{il5wKui-B&&rkiQ%N~J}cL0GUA_zyQ%==+&Yk&BhH*aR6IXZ3L5;%4Adv~XiLc8Q_ZTF>j3e^yO!!X+bQ?#x$@BHLSJmrM*$WSf1rZ<5CocW;bS{D{F5?odK4tT^g|;@YYc5+*Q-3;U80+IZpz{`pA+1ffnpJ`{GH~JXQzV=BMiFq_(lY= zq$e$$$MuF$jCwU~0GKGT=X4h-cyHG6>Kc9UU0Vq6g5A*d0XSB3CpGbt6Rv^1yOGD7 zO5In+1V>>IZv?^cl3#?&0!9w(37?eN&(#i}3YUob!49|9N6oLjRKDF1je5~?yX3^g zkj6v9=OkuG{4x4sgY9Kc0npiSD$dsKC>p7%8C|m(D?)sIxXd6_RD*yFi{YG8Ju$*& z^;iJdMdKiNE71XNq~Y1%+nK*vl}&_@mnnV!(_L(D$*Cw*RHbcB+?7Xa{SfQ;RiF0C#d-?Z z4lpJQ1mpwrv$wMZ6ln?W*6&$bbb^8%O$azk{-LDedBLTJi{UfXSEudu57QE!_OC)R z6*!A^8OjnLT=v&gUkKIU&*pxTW)eWFS-d`0jhLuZ7DqnowrZap4OAy^XuGY>Yw~R3LGR3;W1L&gLJyr4RA98_SDC_+YHZ1$HsYnG5BC4 zva-=)yZq_@mu~YHnpyW(A(Xc&B*1aRGOl%Q8F&dXrxupeG(wIXrk3t?=2V&QNI4Gf0U z`t{ivk%oQq-W7u1^scKq=HFN#9qz_l0R8@W;*%Hfh5-DxrZdSedRfrshrXlJ~2{FRDo?+SYXp;Kl^U((f9 z+0{%L3q|nnuyPpusQpyM9ZnHiRQ2#pt-So#ktH2`v`Jjqs-vO zMU%CUNne}$%#5At2$Saf%|8$0_6^|;1B9~~Vb)g6=EE6iF#sMH>QS4=YN@2?nJsTq zxchwXi?;h`(YQU@xId)|DkIo{BU4Ip*^H+n;lrcz?K;$|F@R<53+-uA-o~|dW2&UL_>aPyMV`N2ywPhF zg@zmf%w{k6>o|2#`OJZ$7@&9@i;C|N@|*?vKb+>Rzc>E8j5_PQrsO_`YAk{Wcf#`K z!eS7YzT!J`4IW7X1j|0u2MNy4r_7J)UZ>zq5D2By?sKs)EBrN*bC%29+0DL=J8w^) zRRuq2M8vAw5LMGHKvKN=*?H>B;apm($yu z^)~0|lvBK(&5;PJufs+phB3k7Bb&DJ^Nhq`8<*fPF=?MgLjnaPD9bi0U9kU`(kAaG?8~sa8e=CJsKMf;5q()Q!gEvIq{Ega7FB8 z*rL4oSphS#84!Zp=Y+Q8d8qJWn;UPj14aQr4diXOT#+)ATC_PzKtfx8m65m(X{8`U zdZKDm{MFace)Xm9r!9>BTzBQr(k;W?~2uDU9FyiZIn$cIQ$tNb)YJ(sc3XpQ-s5BLIX8^+BVj=sDxy_TCa00uwL~( z!@>dpdawN^F6*udnOl@LXf?X24`G;;_Cs3>`65fgZ1X&q6*WUTyS7S~b$AsFhuN*@ z{nXYD5AbG#PR+s&DU zU0*vzF=dSjqofl|Hm&OY*(*=|HZJDX8;{N~E^@58B5sg~pClHRSf0HL9Dy`!uROEe zQ9QZ(h7R@UUUxU7#Su5S06X#N*ApB5y zFg|9!-=Sz_;-8CiBzZdQ;^Qn|aqZZL+5Hpmh9J9*{`cc?e-j)SB`qgnoaxE;#%Z2M znxGG8ml~w4pbanvMhN>F#UTsi`@cIyVATmagB_`wm|E+1%Xz#XQvL}2?xR4hoc5Tc zCRXcz!c^{;;A#0fIwtpgGeNt?_3`jT=-N}vg8#fxc7Lj&yAWv13u3|>xV`&jWL^VK zWBp^NWgLw~RQdH@F6~X^K&AFGgxZ*;sS{$y9f_o6Q3W;{0KIzV?VweN$8ex6BJDmK-M4fc{Z5|`3Q`AwD&u^O_G^ejhh^DY z(jq_s^3;96t88dxG0j^;XHZ>^6ny2@Zh$WUNQKZZel*vtZ1+2*`-kzS^E6#KcX#dh zFty57^;}xgmio>GPGn9=b#UKbr@SHf^F(D}m&JP;OMm#ON%Mxa-I7eT1@E}AuK&@o zvkvhy5abf0e(9;OtVAW-Q0EoO-7g_2iPZK0IP^0qi~EgO9P0m2q;5JK3T4^T4}_Z} zFIeShuzEl^d4uw>q-ffX@)3Nsg1OTG9e!Gt}a^!iDh$#}ji#l3?_g&9Q174=4sL0R^{>E0v<>~%rBK$qwOL!8W z?v6Kd+}RG`7?pX{He3+YhspuS90RJYmZb(~Vb2htNe)(rBCm6|wR()tkVg>&xsUgu zzF2qInic>PMcdw%RTAVVFzM;dgsQ6kveL$aS1G)ZQ&tusgRRUV`FJ^&UiO5lr$8tT zG#5cDv>oDIYJFW16|KMev%3}E%4RB-+dUc<--jJnVha`}IqM z&LGFd(uJ-ngM{VPSVtHY?9f9-Go$T949bnBU6|=;=3VS8hBX%a8@5?&*q-6OzOyOT z5@eh}UAH087My!{Ci!6TJk=i=w&F!%^O7ih8%xu}6`_Li=Rukk+8M~xmQ71}IiYpa zH*}xvDJqW(b%GPRxn&KQP5q+hQF|YNgXkEZjj&TTpP^+RHy$@nPPU2v&$8;UqR#9& zE}YQnYJfiOcx+9mLgxW&DHU|FlYR22Cv|C(7(zzz+=6MxJM2W;K{6+;^gtlb?l;@^ zXLu}g9)8=)Bu9^)w=wgx+)Gzt5rA5&fqB>9mgfK(pR|zGwO`E;T5Xa}qbd%hGWG4y zw{}m!YNq>>T(Ta+yM~n!miG?0u9v*Kn#gD(U1!NpkU41*t>&7BtD=PVpsE&HweUk^7_ecRGH@hWPpm>Z!&s?8a%Yyw(iV z7k*oVEpnNC!@2QkIUvr>4U)N8(;O+_YuUD!|)g{qu&n*U8l@yGiG%h@C${0U% zJ$YG-{?@n1)n^@laXUWca5?^+wuU>Vj)g$AbL4XXE-HFleFR4vcQg11)4fo{tNiiH zbH2r-c_S9t(@-X*7EC5aIEB5B9;Ea8R}`7E=JL5toZ5#9Iwovf;xY=c_659T8|&!Q z+Pa=*N^8~iu=+2M1V5_M*sX`NXj72;rf=!G3mYw4d+d0Q zRz!<|PUnM2uww^h8tkev-(S{>=Q_xPD~yej=1MoV>-XEmCR3LdLTiLgH}fdYSvln& zb9N`P8z<7*kf<1#e}|I7WEz*r@H%`rOd8i$j3SV$SUgka3g!2=c_q#_)|O>eeXpQ@ zo$`AScP$x6;dzUaYG>;*u2@~a%RrZ}vyO5aO}6K^T6y{Ic%OJu-!ma^uiR+d@6mua zsqaPD+RO{Kw)!F$b7fp;p5<)UdqG4l*>ghM=6Dz(jqqd_mtY;G0X$=Jnl_VujTKnU zasJXt%4E?poFf+!P=Kws3dT?M6G>A>(DYNu!X!4d3sIzCrBEMTA)so_kK!y1mbt+|fDy+Zpw4u_0C_!D{pKcL~!r z>#P!x@-4#@yLU`Mobzp;-<*K)$pfm!3N)Rntf&fhVp3+4m*s<434n#2b=4?etadQz zF1^6^hxP|4X5njnZ!FS*Dmf~)OZ~2jKBJJ??OfZ`+|A#L0|`y4%=xC;%td_-_p#R3 zkTtf!Kb93Z^VZgyPwOSn`MN9G@GF5k&@qRNClX5)DRC_;oKZKWt`+nrzqIe@_10YB zQ#?Ty5QUt&#-oI}%%$z{S#9MuFI0Tv&e7hnxuk4|=0HzB;4dm&X-+*VM_XI6Y%aR! z@;fCcBUWylRx@-e5tFYPVFy(Px4CFs(=~X^QuT;0u2Isvg#ywM*3`=ul({AtA!&&l z6}39vl?XEWf`&#XGnN-J2R~g7;eEgFfUSS4mcKq+y6z|h#s{L37N0_m3kB}1_74(q zNPhspE!aG3_P8bb+_&P)VN12emY%GtZ^~VLf>o(qUC&fJg!H?WULX==c;>n9i&7|g z43p7bI`4RuDqCe*udoJ@D2?}CHbbrNz|+otADDLPFxN4)DJi4`yEZI zX@>8y5K+0B^K(Vl1Fnii4VQMtZ$ESs^izB(Qj4LElAi8 z#}ZC#TEJKfkUCNk-u|u;#062p$X{Uwt~hdu=*W%cCVp+S z1F54>G#{_s=2*>nSLMe#A1L+bN)5Dw-NkZG8m)pDt;1>CP01lqQfra6MhB_WvCgqC z;5^9$`lTv}nJ*y*0k31&-91$-7{tQhSN5XC%F_IZX`+11mA^=5oH|#S*M)+IGw+B% zD64G_w}~$v#%e`1+7pw!g801ZP?5KmIWZ3URnS|G%_uI1&ZN9M389g%IeKTZ6*R@y zQ6+a#R-gl ztmxF~kTRmQ%rJTZHuQQPiYcxBo6-{{9EAJ+U7`gJvCBP*y`|~L%m-RLJrEdqJ%Hg1 z_TWWd6#y|2JbEJS803RHBA!{1Pt%|tFN>?HXxSX~xM;&;8DnW4{4)8DHgg3F-=T4A z>Y|%~(x{rz_fbzuBU}x&vP#@(D7AujYL;(eJlx!g2l8GXJ(0XpXs*P2q5J)Yl5?fI z%#YO(2tLgtC#u)oI`{vLB)2M1l(sz~HqRQ$esgi*u<8wI+Mr|8e%&@9CDPPS(x zNQM70>dg)(vWi?E;Clsps*b!z7qrQa6)137izH66N9p(euE`Vn+gg}xp9Tw3`4f?Z zG^+kVxi0?4+(PY1J59=}_+?vU?5JW+H1rKOXDtC{pxZmc(d1Zgns1HvK#f+NP!%D`sEtX*PbhT$TaKeI^RP@-9P_xoK>YF+7KRx;c`nj6i)9-xX z?l;1#D+wTK%Ak+}b#>yyS7VF()97G0OgZ`?Nb7o#)v+R`{vv~z2roU6rjj|9atKRl z_dV9*Vy{V%G|Sh>kjc%hEf^9 z{LlIqdOQ%QM3y&&G?R!Z1U}t0cQiqMwn6|y@IMxF0Whp0j$C@@Wi)$U#~#11w7lg3 z+@B&$K)s<)rJwieO#}+6!P)jbRDK)Qfv-^?5)Q`|-PQkP(FRIQd3#Yji?ZVP7yq?6 zB%d9SJ%4W{_@~PRd^PxI1^GeNohz8i5Bkaq75D<26ab%E> z>6y#je*6MlR7{2 zABl^NVP?d-|BB$D00n4XarmaMVI>YZ#q}bB?&zaEUf-eq>C%9J+yU3buFflO4V+r> zwc9+lbOhrXdhSr0Fo@5uAF2ZRn9U7nc$tjTyb+qOqC{C*cLtdOj9toG$;E_!330y| zT7h~fdW}oDt6>t;6fWw%2Mi~kibe6J@4GncV}1iM`E2>7wc9JNtq=i?m0Ue^G)&-e zEFE9IDi-CxfsyV1DRsOwzJ*_Et%?PMWAc>+`4?%f=X%O1xsamG%2Lvf1TZfw2~G$U zNFB;&%T)Hz5&{MdWt>=pC1~_IsdE`-4FbR^(!4^3mhugmjrQ!-V2&duoj;|hveb$U zdhY54BiBOMJx@-OK(8ZoHr_MD9&c&h#`m*-XhK1Gi+&E74k?)s3>W?J!-yaoU)y^c zZ-*CoWcLm$&lBFN8bON@Bv8lb`q+BcAfLs34-Vj_Mv7(k27>HUb0FVi0kF@^o~-)@ zqWw9tz~?bD{>PnMR}~Anz=nun%WZ502Nj>p=5rXq!7HS^m@FNb_h47lKd~LW{%Zzh zhLC23hV@K(RXN4rpJcJ)t3!vz6i%NjZ4OXj2>PEqQqy8ycrJi=-F=)0NfwS26oMkQ zh@Riu*SMPg9@Q!zbdy0_`s+f55xrX3Dp+8UA2(j2%e5R?qX2McWrkFiANV_-OKueJ zB|)2jm<`L##x2j(;0#cmOYYRF&o{4Rf3V0$@zzqeyUsXd)LpW>0cVsY; zz2q;D@jJl!SF5P)zT9IjU}z^Dtr?ik0%TKAk&$yvYiQ5qbm-8P=|0JOv(F6QjRwy3vVAgnR*&0>Xftsg41vzMp4A0{{qMxKUN(!GTYR2*l z#M;o){={ey5z}_j{DA^aArKHe0XyA7Q$AwMwzh6G>*Bjnv zs3)8LnH2bM+;Mpo$n*b<8rT=F|7W!{II5PDi>70i^dHpZbOPJB0(D6prnWMa@FNH* zT;m^TJu~8$(;nzAF9^}r z+$x_u+Of~)hTEmn^;o0dm!m-8>r^^XR*O*~?O2r~>KW&;AbBSV!_6TFcm1CX**{aU zO5&r9s_htc!Jl&OCiMbf(X#bVZtyfE^aw!r1Y`%Pbe6HeQXvu&LRXNb|sA-*Ph84*ze#t@xg7>ieqTx5w=_Y$hWl zo8w(nSEG&5n!V0MjxdR2)?eY8MDg%yOwIH9s9=Ufd1TADpH~XqNR)*=*r{TPdazT> zuis~*y1xZFjB>I4Od#FDqFsXa;Sb`?8I0}$Majb+gXw<$9!&eC-~4^mWMc!WP$DU{ zL_Bs&ip1c)Rt0+p0Nfb1QV_%R4iHJ<*E| zrkP8CVc7l7d$*y+xhO^rHf8wN5)K;8nn`{i(FNenjYq;HCGtP9nQ|88Kd5ci^N-mr zH%C49t?#XDQeP~K9GK|WU+x^$aY%PPSR{ zCvePtb{zRWE%o|Zwu`dP+sem;NSKR7{w87`b0TUGbe`JsXcfH?-+CaD$CSu#;7j3q7%ei-6)xScx+bEv+ORtup843GxjsUrU?Da)u5^JN zaEvQ9EATM{dIvIZWnB7eY$T#n->bV{JLYsfW_57`qU`(mCKh(JZV@{VuU@dhaCr8f zwrG6Ftph~J4AAsZe;KZgJDn>Wys(?XGS`f9Z9FU|reTus-T&EaO8vEBAWf}S_jX*m zXc6e0R&iG)oVpM`NnFUcQr&maRbJJk`KKzj`mHHJBaxkQZjn`W=|UO;JfrF9RZvr1 zn}g^_e#wT1_rhyGO3TQsiDT@;iB{}`$%b~T!^{#nf!A&=xIrB?lyjx(i9U^%ex;<0 zhpqEW!%m|BANhWKTpkO}DUT6#I#*uYZ%60{$H#-J(Of?>&16#>h*%rdtplo+M%>?C zNtG1Mq?l@&{DW-|h!-xkT5$eInl90W;UmSvwGL1(F$mkT#aE572wVcvUz{_ecWN*q zSA8kxj#99Yu$%UVyHY@%)q>DoaFu7NpdZ1go}S}lf_hCcwGA&b^ijr&aB~qsBi1FP z7o|Nso`k~ll;|AAmB_sF({xVpACskL(;b~=e)KOtfl}?P-VUwwV1f5u9Xue&y}Qs; zGusj2o;#kQ@z?s<+6-^E24bzcMVf5YGG zLOq^~Ht@;ca41b#&U$K1(~Lktu)&5#GjlP}{?vRB!@{U2YbEOd7E>qSX_fFiZ@W)+ z)Z;TzuzoEREZ)VY3RSPXZLKS^#Cs%RpbOZPt+pz(l0K)kpmf%jVCyiS_LT-e&Zq7q z81Y7~t=EOlCbrq`9hn@)SpDk%0yvL)46C0%*S0{8M*?OHyLe{K+yPA zISN->CDvV94z}{z(PnBU+#Ikurhh*;6TU4y_(pLj-!L)WglmvbydNKU=mfo_s#xIP zvV}#sT7(liX+06`=DExvOAxbisNY53FlfH-zfsPVln1Nd(F)#)!}IYtZtbNE%OpGe z9r_(R7K3+_A#7Q#=S47Z5W$+B*d`)rq|=JyDBM8A&~2ylcyB*@Lv>NN>GC*WwM$mf ztGqDzb(HI(f_2-@Wnr#TATsHdlOEmUo6VX2Dcks!?CIE*pY;CB{!21I`|Z~>P6cq^ z=FF-oe3@|nT?ht#HiR?C7CrPD80_enx>SFKNegX&oRNM_p9_$ZIIcciq|yl45QQj5 z<0Z7x$lT@C#xSur41vfuVo<(5nk(d;V(=XghEphEOs;8s37lw22t{d7cm7=}QBK=a zofD-&Ue?rP^<0~}GjZ-H_wGlGK(ub$@ZeF|KDD%hAj@tKlKs&8JEPk_AS> zL^F_smM7BQ)Z};-s(>U+(hI@%fKajM!jubiE&HeoK6SWVOP)gRoS;F9*M#rjj4x(p zHM#fq@WYo>5JE!bN5l=}fLu>9o_1bKd~(@oQnDTqgNG7`Y#DjiCZjZL+`C%G?6+kD zNL>-U>6SxecN@M_`z`3$xVJ}`D7gqtyA;tJ^%xeM`=Y$B1$aS-BRE}1wb<>MFSc5Y zwr#fu|H0{)WK$gNJ?vjHZSLRujG<@^uhM#Y-A`MBy$y*98rk(?||xui8X)%P#)#&$s-{AV09!#!5Rs8qDNwg)8D2%kKxb1 zlT3J*hiKO$AE#8nYPS@QkSx$^F{#{>*l5$ar#{)Q`1$e{*7~}hUhA0#v-kj%0mmLy z<&&(f`L;9i9s!V73-)fGfJ|)h!y`P^dJFBQBiH&B&fb1t+j)0iU&E~Y{!kT7FRf3h z#Q(!?V1KyNm5AKL*~(>KEd$f;=JALtae;>CNXKL6n{RaA++FM4;XJ_G62Ky63u3}4 zW|R`1>})0wwYeeXh7|u3>5n|aVaFqXsD?`OhMTy^XdxzK>$=yB1oGW>9WL{Arx>Jk zbqNT+mPYysfe#W+zc!h+l8#KyI4ug&{JieM(ayI14iq@to`+5S6HRHbu3 za7fs^CS~Y*xb9mENA+#F7eOmp<=0>0thoblXW6CS$$lD$PIewtQyS?zUg!#u=98FR z1YG%Ns_3ZlVGIR7jlRF2&;!xl$6nF@to5sdid7) zPH?a>+|qEK)En??pARroMxdgCADq4OW9@+MRB zniKLg>Y0_4hzO-tx#&uZUPSE|))&z%X!&J5ymtnn(Z3!?{P#Nx;L7tJnfPB`N-c5S zw+H8@9A_kf*J)YVBhN2V5AMzy%Fowb)r;B+_04$kCN9kmNLOUqzP~4r;Kl^5(lGd| zwdp5FM=!*eX+GLn4~&nIO0ko+{9i^@cQo54L<0PUE}F{1H`}vb~YXG!1;CgUkrAqC-tIs2SJ;dKwLIrx8cQSKU zxRx73qnvrmVwK8A%C9Br=bdNl&-<>r4D?VMIVoGM!qex5$M_qZ+jKhHhe}qVz-0Az z@PvlF;n_7YKg!qvN7}{4U9&LMP|u}up_#qHSvim6U}&&>KAu%I$jdvSYOuTO0H*q& zgi9sS3FYIi0kgfXY+G4cP7TYfKjs^u++S$;k&ky8oZg|2t7QR^H% zPXnO7Dm~aQSxNC37?dD#BSVcCVxJ2i5KfXKtndpzVCf|n(&!}>H`&|xso~COU zWSP?OZU)BXL!abmu_l%f`c~q-IiW?t#6=SqqEO~2rtj%t_X6v8Q-jUVn%+44ba#95 z3*#Oa2XXU}%x1$Fl1R7FOl?9PuD3`FYAFnSW0VfA!-lh=wb5yf7@O+pt=uG3SVb#< z%t#xTLEkAt(|H>T1?}t{VD{@Ha1iWw!k+}CG)f^c10vjCASS@HFrWYn=W?tl*aDT? zzIw-uyTAvXnd{*zqHFa6*72K6#rZf+SV5LUo^M@+4aA2+cg~% z$tpOmZ1tHKzTMj@q&M<9xXX2pWmnr1A(`JWd4n z7k1V0fF9FdTTaZ-{0p)G zI{aE4Ns;O=_1hO5(MUW5sX{`5wS8+?T%wI@4^H4U#lZU!cO(5m=a~aRnB35t+wJT-xNxCxkP%khS~ngDFBo4u86uuXy90A$7N;yj z?w03Z;0p^bzY~v659fp~Wc5k`<^UNy6SN%o@*_{I$EF3;JhiUAKiyZP`=3?iN_=_UW;1-N%P z8^tlun}=^n6Q=XlML9S7gX5_<5uo0KzlR>IgLd`vXm`N&AIdyflSww_^bZ^hM>Q2S z#W#&Nf-2vhE87*8nqTkGcq(eW`aHh)8aY*tnqqQnu~nqC-`wlFi)d4bpeHELF;x?A z_O%BIC>blL@=tQpeJsrKYam2X@phO^#K{~P9C-JJwJK7lCVbqTH)m-OX6nV+!5%>? zNvf|^)z18Q=nl+bIZ3RW1sEe}HQ()<15>^dl}?p+uAs_<#e2grRiY)0E_DKuBpvBX zA5V6&6!^0*^rvgWl1Lb^Xiy{ykt)XPF`!ez^7gB)O1`+gq4x!>wSQdid1YGRx}+$m zj+4j=kr>Q6gp1Iovv!S(btq%#^TDWAw0YR})S8RZc0j|cIyusWk&u;I8tt)qEZ9rw zId|=HS&LWK440xyiB4wL*|;C3`N?JS?OdAaqmMOgIy90}9&LK9>Pp$VU#5ovv@enY zmM+pTaX--#Ye6sGtBLQS0TSnJKt?DaKc65QY)PB_}$ zztwUC>CBz;#?G2|&rCwx5ufywx@ETQkI|HX5f$m4H2j^9(7<(5xTx9ImX&~#6bP83 z4s_M{wucS{Vgk%k0YqV#ea)C3Ugw&_U+KCLtht+N-&ZT*iA3gxY{BWu^!B-3XZ=Cd zd*F1&e|xmHJ=;)6{BF4IqHaBYsX(e8-+7>w#NQ-v(w%vFC+BgzLgUFLX8*P@=C&es zhb=V=4LNh){>l;kec;E)1TEzo$Di!q6B+wKyzOGnADk}!kza>HTRRS{;6>`yk=c7T z7cezQ`ZK6xU@8H3t?KGKH}0Kn7REvpt-P)lo*&1lfM-!nOpwCPUtE&ksPmnS+JLt^U?le@;5F|mXb z@cL9bB*ewPzG$JM#h=va5FI5QB3E`4S*7(oUirPb(2FdVc={=CvH#rJ-cI^tG(fyD z{$Yz*psK%)UnVbuo%?!bri!#D6rq)>AIj=S!(%$4Y6W!q7)Ec|@#UdQO3a*B56b)> zxku~fE!kAJ9{Rq03sR9&wrr9SdQSDTM-WaFhgYi~ZoARZtK)=pjJkUikut?6+?KME7R?@eq?BLO;bfU)qTWx9>Ar z40y&@2Fq((E-!?9)DS=RB{w^EtdD}r-d3wy_@c<6uZ+EFgo}@!Fe@1C`IUky(yon0 z&k<4!Eil&3^vN|`HgyfSSJ*sm+&JaTYc}Z11LLsTe0_=VhlV1)zKEZcJ=|;~lpfyn zTXyj0Z}&jc@{#kz1t{=ewfGIFuW7%%_Cx9jj=_CV^eW)Ft&4`Mz2_~R8VH9|!m2Ub zcbo7AoD7uZLyZFF%39(DWxVj{t!MVEg--I_SP|mAVSxz+x?un7)c2hAzAj^ws8N|p@YkZ_%vT+Y!n%EKG6v{DVHY_nVsHgt;>SC5bHE4eve(G?o0 zG4p~;B9?DQi643+sEq$eZG2`^8-xuVF-dj8t9{r7`7$*iS|X4Th}&Z9`LKDV5XK{8 zoS56g$eHdId9Cg4$X*God#TIq4DK9#z3ga;cXT4;nZEyr(bi1#H`4(H-w#`hd}TlWV1 zn?l~OZA}$pm_S%x6GCx%z_>g@rv1p-79|np6e@^FDcIwcuJitYIjH2A-9BJv)HTi# z=tshKxIiVMY|%I99C(x$apPdN%BFyd{Tvh7TZ(lAyI>|C_|av!=2MB<7_5K^l zE(GXU`Ea%o1RbfxAPgl&hPi0MgaY@-a50K*m?IgUDzh|nO&cH%{r=@22;AWv3{Z1F zzt*$T1zO@GFdz^3=kv+W=p6eNlUXo)TE&xBdij%@yi{2Qi(Ese<3j35L#xBmDl|FnB9 z1FTV#0ZTw*LO4^4LslTEhvrY>e>{Q&;7b6M#+nBLPn66M=DbRvkbgX|<&RkSz6{nz;aD6U?9#8pmfocp+&s9b=~>*9>&YFiv4*xM&4Pe7OxzYs&JD~DA| zuDZ4g3_eX$`H%2fxa3V=M7WQ8V89Z!76qpNG0dZ=C>BD)5Doyh5dzb!1O7hv-ru1E zx5C1vZY^AIqk1Mb>`$o`+n&wuYBw|8^nu_SkQ)I>YmN+Wmd@lvF?BG)N*`hXiv0h4 zArvFro`QFE!TZNurNsn01E<|57koMK-*T2fYJ@Hp3Jo2-h4^JTJ-$o;28l8N9ETjl z9n2FrWkJ+u8o~%C`0~)uYVXG`N{fFC(BOJ^9_$@lFntj;AOW{31Mp{$z=Avi5ueZE zW7~cR_OKG<-;@n%s+L+asPWdyTYb2J^w%pK1Zlp+LwgF301HFJmK(jn8;LnEbApMW zDTgqOX@J(9F2Ymce@nnu`foSut`Y*^7Z${0o*|+!LxOt%9goyM7aX~EmKMek!$f)T;iP*w|YyRu$rpy$UWYpZk zm_vY6eSo?2!y&l`+pW2}^pzq;T;MR+CB|bWG&&j?rkRqXg87Eh?qofkNQ!*mv3vvu zcvghF42{7DPFzxHF7-GKIy2p-Xr|FK2^73_%gIEdXk<8JywBje0MHsxQ&E9khKanp9 zkbF`T^-ZFRahl;XXkytKj_K!Vo}D(+Sbb| z$bBb4j!owKXGY#e#G6yO63=g*F88*5epQcwz5zvjj&Ws6AFQZAM(zbO)9%&jG%aTJ zBp95VaahiCd;ewORqOY6sY?uhArf;66~pms#wLyIp@>1hf&ZYGEX@@5j5!2?(p@ER2+fzf58JFW2 zLl5xg1W-I^i|DrB>YH!c=-boVV@)h0VyUil%pcovm=Eimt9qa&&4+#AcAN?#Ij%d* zlZbF3Vu1z){hzjY3D!28q+iLZKEGm^(3WJr%=J5IekI{z{RykF?KyZVRNO|umjsvR zSV@%@wvzG2weLQ|Yo7Y}ff4k>2d>6LgQ39Klq2{0Y0p@$dPwcusRox=1uuMsX+b(z zy;{AXc{5Ll0=E~$ga$Zw7nCa;0e?cNua>+hP}0M(DZ&0_@A6KBalqk`G(K3K&=Jr! z)ElrvqY=m47?(NaI~Q_D_D)gi*ojqN!4-*01AY>)$GV5oth-l2r1inWi`@Ua~oN?U>B%G^qIb& zVjRlTo2G&s2`_1>RUe&zVopX0$?7D;drDQ2gJ8M(PBGtfZe6z6NBZ7;xVi z5a(z$`oMrBTD9)`z7(pC@U{o$kxFx=cWzuej*wVpMH98Bja|&NDzUJ=(h8u8fMF*J z3I48nHK*^Ay+o?zxuD;Y+SIP@d^UOl#!&jpoJ}zux)5SwwtnSF(E0CB_(*P^SzQ-h z(|IwAg9pLdY?ies)K}&@NnF^dn!-e0_fq6UObQJ(&*jtNwHGrpp8DgBLUCyBQd z&ivN)f{7U=Dq88zQ^LLs0U3#W_HqtpS#$5OfWC{y0Y>;cl`Gy|-jbWfm$*GD^4x?A zy90deg!r6yb1}HgfLg$k>6mki`}q_DpxqE@IgG=#!3Snqn;$|n-QN?K2^~kFWvqkS zEob}4UcpgKN;4>nck#QfA9xlLGv|(kK=*HrV$PW{M-o9|1ze!oFC}=Bw+PTv}P5%XrHN)n7u3{^NPsU^lpMV2@LP!j`xp6rxfeIKX%bU*qz6>yrDhb7# znw^1d(HLul0KAxf}>70wVK`N;OHVG z-;zTsR(EKwp+(-89XlQS1706(7ekjlJM`(gU~QIop)$F{ef7oPom1H@2Eqk`I?a8o zK~m_Zd%T1N3QEXu*ANJ~#$={W0HM@OT)wz2p*>9zE=3jGCI9pX54#FiX}85P$3^~b zNEjRvkTJl=nhF|+*;{opb3zee%uw`b9Wc`u2#EI2PeweXm*%Tu_K#MwfZ!Dx&boz; z^}A;jNAm9mjLqi^`5nXK>I`L0{ctKgNaaiE=6)kWh|)Xj#1W1VVfh3&c|7k%A=`gz zt?gF#N=Fj4amhGdiTnhS>aHpbsSX&EM@95f8sWKq{PzPqX}g!5$2XI4n64%iQrs2# z6>PQ;15ak_{Kq`HW%n@=48{$aSG6~>rD_qBQ7r%(xsJq$7~BLm$7(WpYdV=bVvU`N z3*LhbK)9r%F7?6jyS*uO+q76^$F2zaq}?rGLK|J*`A&C6%S;is`W4C^Lx-Z@Mkw-b zX&~A)3ai+a`(p}I z8(mPxj~^t8+T!1dct^x?tzenE&_0LFQ=n=U5lf#l`{VKI`jNtptf9eZ7;LXNW+F3? z^bg`(5P^$<@u(%z9b$Qdd*rNB?ZZ_p@%U`R*+yv`I}_9K(%XS8MA39s`^1D4gGp)$ zapZ23Q{&273x(TSQ0r!67?*m*Zo^ zlKc~od7u+iQDP$|#0X^iKAm&2JW&1z?WI5;;}3Q^<63hEpSpJVJ7)BL(`%Gs2zPxD zh;|@{B9+=l)K(B2+2u|@b;_=LH?=sf3d`>;(aE{)ohbZvaLZxK7B(>Fy0!}-KauIk z_`BE2Y>MY3TgBf43G*|I+moO&e|3{yj?&Iw)LOpa8>{J$|2!9g$4}s9IiFhe4J92i z4(k^KGes?ZW0j_6Qv_}KHO)U<0a(IB(9pqkG|D%-9mibQsDu~`Ip)+Le@g6I5yHPi z-Df3}Z&mCpE|eYGg`+H{l=1?6`9B$FG(R`v&u21FTgc|$^rP7A>CGF^{|QVi1^gC` z^7fS67uI*t$_#-CsnHBUQJ6X2zMM z=+jB*XKMgu6c>(MZ5WC2YeR9vQD0+73=dw|~u@AK<7MUsVQ*G_Z$$Emv|hW{p%{9-g)LH42FV>KPOY zcD-DXa5K~c#W+sgJCv z9fP)Hf6>DwM=5&i$-F8_WXARD<)5y&Mgd}!nox$&yc#QGu7~0qkF>hxIx}Ft8aNr? z@j}jYx>8Oq(1wlK4|*m>1Re|+K9;vqq%bqSvGH9lTGdnM%FH5k~FlV_R!<9m{!F1YY8B?b5F zg3?2H*));68Hnbc8a3Y|iy8KBbAv}p5^}GqYw&5R0Oxf{6&xp5gCIC&UoPN49=X=u~7MF32(pl8>!ZIwWQIIB{4*+fyU6y-Xv-zn^iQqpMdU#dUr5PZwyYJPv? zl?jvD_&oR=YZFbb8>-K@uXl>*lS|^?!@Q4*WEPCuJ&iN7t zdZ;39k5~E{!QA16y{rNx$!g~fl%0%N!*~+)lU*^drcMonviF7iVy$f{sQ(XL?-U;C z^L2q{f{AS>6Wg{Ywr$(S#I|kQoY=M}b~-jr|Nh^_xj4`1`>waTy53#8*4}F^JUtO$ zosz_bd5P@bfD(y-so}~U+l7>d-qHyvMr?l3E;Pae$cHpBVNy$$+n@}YCe`88rwZI) ze?b{6HSC5z%4FnXghxu7jrYj=yo&4K=8+DE5yJ`~JjpNt4xkvg-TxPNZ?iw#mxQz@7HeolBhGok@QZ$vH57fZ!I~cgjv9tn&sOUIYEr6vfaNVe?)Jgtf{g?00aI9*i+7OtG>_bkO?_;gP^9G zhE#J!nj=by85afw9(a5P`2W?BU}q~M?aJ3t3Vyd`Wcao4mM((JGDpzyw2+X7 zqUFr%5?N(*fFHN-2%057tfO&6OF!kC1r#v^$^5Us{3|9h)eO0Uo^PogVn5gKH&^3Y zJP@z@4owLhn9+RN$pXl_tCz-ex^qtDLld-PyIUV1p&-vP{g;+l{O4lu!g0w{b=`$A z!tOO=RRHX-Rim<*hmn6%>M!nu&$pL-rgCA5t;w8nseZN z|KD!?Qj&f*z|)UOR;7OCWv4|Dm*p=isaUx?dl!P1*K$i+bfJu^NTa>vOYy^iu{0ho z)qm!*%|P)UkPns~8T32}V5_w?t-9wMu_r+1T^tm|f`XECV0+qb@FS=b2j;9oT}RsI zJA#TBu}{*F#5*iQswh@8XE`v?~Oa1rH3+Gqy;Cf`v(- z5n(sdhcN4GO{}9XB(XF*zZ5SlP~9Antx^CvK&^kKQ1|0HZ9W}MY5SG~=o84}uKsul z`{xJPUv&-Ijf48NE<;IxanqKKtN7D0DzZhjv zR6_QIz}+E(PYE=(q0{&J=ZPJ2c4FygW=RMf?t^Sa+$nCSY)R4#Wg&_YPB{tHt{e^6 z*-j}5YnQc=^GV9j=6#p?UUc~goZZ3y?4MH*5~h+Du~7{}+;~DThO2N@0Lu_xGyN^D zs<^;T(l(TUKLwL@Zu6KcO2`HR6!DHTd?mP%*-ScTGw<~iCxnIR3xz>VThq8OKS7#B zKC-9gPNSKu0dQvpt*%lWKpN(U^h&7HEXkrD1#=`iCtx>)k>KeCLZ4pN~%VUirG4EXSIGSf@_7R?$t_yKYS4|H`FBRdjF zg|N-ok`?=TT?T%I&y%t4bxV>IvNs>;7C%d}03-;tqXQR(Yd4`ZP2Weq@%!~{Tw&0T z;0KK3X7mmXf77%^5idli$UNt6yAJPO%{U2{LDQYsh?m z?0gsZFc2~r4Wy3D#cJS=17(>aLJVj5OSNz(oM0hAT!@@?;74p>TALI?5V*3epYg){ z?&+z=6UoH5;$$+j#fko948FVUb7F$beE~;wz7orJ=J@Z#Yi3YE+4-&eibCU+FJaf3QCXw zjvL?cWQ=y<$r8343N17mow5&a%%L%3=D-N%$#E*r@cMQ zxZW0SHR3(VteK%~J${f7SUr#%WOwRg68#=5NdA4{rB=A{;%o=-0AYd z>5Wc0&$8ybbEoaN7rO00dvp&lpiB|G@9bFD^V4Y6tmYZlQDY*hqoI)+f6Q+|Ra2_1 z$e`Gu{(%V#4MSuSlDh11(av+pM?j%fCDk~vd;_!yw`Wns3$>yQhfxRz!qXt#KN7L$ zCN!|z&cq%%Bn04C!EL98;a$f}pis#MObC|u83|om=I~I(hcEOC}nb<&o zR$eJ0(*XvQ;AW)kgy+rE==S4h2SiZo!lq7!HWWy}e}z6R_?UhTw+~eygU&;s!?UR1 zTVYNZD(KW2s)6wGM1mkx;G2QLAv2{gnW)W4C;G~qaYZo?cqrS_kTX#nO90t@0*`!j zyS~G+&-{#1KVEQ|8>3JcU>R!Nj)4!(0gGLH*A7%O9zM=s-R;$&soc7sUhB%A=UB;RtzwC;n zC`pUc`+ivy)&?bLJi-|NKU;KvQO1QKl0!qBo7qn$`F{lmixO z^rBBDg?ep&OR#C)!y)Im)e;w$Gi2BdWF&*sl#Yk7f^habv@as|dLpR&!+`pmx1dt$ z6Hf2%W*Wo8LFNPIMSy)^_OCcqMEv6~p=QBxdx`$*4_AIL{ zp&j15dR0Zw=UUJ6Ql$I7`sBbrZe5_0p3-KrM}{Qp`mx6uZ*gJ$0W7h&#= zC}R)2?y%wHOX{ARrtD!4jwNpK0x~A$)5B7xYnuM6vD+vU1S0MLn`|#@q`EtM3WXUP zDDx85;IJ(~!g94fL8YD6u?z2_ZC+u@AHpH2a{Y9f!#n-Wa(m>Df!FSk5_*GhlH zA8~#%QwJCjI3wZfODXLYff+-!xFjMN=Sy3SoWmq0(5}iSXSvRJXjVIYq$apnvarSG z-CbUnryagYU_E_D^9_|K|F7`7FrJ+!ujyHa`Btktd@d!o5W54py)TK=Xv|rz$9Ihc%Vd*9adGat4jkyHN=z3$67yUVpjregX@2732IL5Haa$cqoPUCB~KljyEY z3(0?Xy%N1J@-+R@&zhG20l{4BfpDe?sr5|?>XHiL7wZe5@TEwTN?x90kzdn-Ju^T5 zgg<1uQ<&)WkhPeL{;DJzzh!{TJe~z)yh77>92(ozu`1Vp_s?=rP_C>sM=)ayVl)!A z6h;}<)?)D(8l8aHw_wK=x;+F5IRn$>=2UXR1#UF6i6c*XKyYPcaP?hK^GiAag4Fdw z(OcLz?2Mgt2ZGL|p=2_Ul(@{7X>^;+x|k%StxO{t8f%N~Q|5&p(n{=*X4h`U8qo~hA;ui0(Z?DopNv+wuh3_g5h#NB@MYC@AsQI&Usg)e0 zh1lpkIhbio+~z2X^XQmx^Q$bR{r+wu9LFpDs;bCn;0|iKh=}?oi1xQ61zDB+PjVkTdVJj%L|3>P2m1m3Oh)N_H(wM ziju0Ny3E$Z(x!Z9=ZZF1Mc>tV$BPgL9}nH`9apRLDOW(r-M7G40fyD;{x+xSy*Km) z6W#?LH!5#<_jl(l^TB7^ex&#>oY@%)%92ucvklTHszx0+2F*{>ft({Zzt!>ci(d$x zFoBth2AseuJD&w|)(wsB^wb#Lhdg)u7 zzTZDOA=k$%VSU-Tma|!rqK#`6@&qigFW%^&j0TkHS(szIlfJ!m>0KU)AbK3a95OLo zu!S&^%{X%sVV2ceLiy=Rs9AeG8qz_mr;RWPFhbWXe5Jv_Kh{o(c6~sG-DNtRl)=Y_ z?|;7_xb!^nBN}pCBJL{N`F04aZhEtNmc8t~Bv7&VHxc`}G&s;aaV| zs|omsr0jEqMiK0tasDopDZ|W52OZyCNVKwk&$?p3+qNO5xwpT`eJZBkrpVMP07jEr zHKgm_T(H{1+x29kgt#qB0foM6CFWXkE0V7D4W+F@-P1e`bHtaz4*65dciSMR+K;KvGcwN{MU(B6D z)aM#V!&t1NMu*9~hbF(-xmw~y)UHmSmr2uV{}E|(y5n3(ig-Re4+tFAZ?m@_A;w2( zE9~0Uw(R~ZTDDarRTS;czTu5#7ub+(+hqYR#l)-|KMM`g*%L*7J;z>Fy!zD(_> z_qGUa$~N85s{R3G}@K1Gb$4@ z_{1yS7<~_wncMJjml1HvGUZvmL z@0%2E6kDE|>Kg$@s0lb+(-Xc41ty!T5jsA0*8{rpiU)2(!CG$Ca6XPZA2$yxd!=<} z)#HQcZ?>b_PQGqNTW9x(`X+vLNcgsXbx!??Cx&La_odhEt_`;8HDu*)UCR`uhwD}Gv8{6sir z-EHJrU4CBb!9RB10l(6-qFrkOk*Dn@Z1*vDBK~EAj&#Kr1aD?$F4mvCC?&>*ZFJ6? znIxUJHBG8GgP_p&??J6L<9>;P-052k^*S+t((;F*A=?dH zCIj<4#pKKOAFVuek=K}Q?^N#DsJKqE^tY~CJ<9$U35=VHKk91N0D60k9C3@aP5i}V zAt6pOHPX+|)Q&nm2iJv9zuXDa!YCzfDaZ7SKC;>SY%%b=_^rbFOg7cxrR=Wt*+-ec>pHj#=sqpHF54`YJDkk6 zJt41yDVu8?jdzAkrJsvYu+fUOatYR7OQDT$2v0Xjk_vt}XK+Ghy7ZFuZeBP1o8yQs z{VcfGi@f3~Cws5&d#4?B2w5N9KbdIDB&x`WHrp2rxF(58_PZW%cUTM8d z!kxgkLM$RZL|@>%a49#);IXY(Al|)2vq>kzz9k@X2MW6MTz)q9NF`B2!eeCEj`8m! zf8(H!*}L8mw|$0HhiCVYWm!7z9(lgyT)I?CJgVVMB2>qNQq9IB}~xu+}bF5UFa2CVdiiWKTh z;tB^sZQb~y+9l1|`}hSV9IJ<|*O|;>0kSzrVv0C~?uU@}@mZMDq;(Yy%)a)-y+BNxh-mzZxiZw|$ z8}~0mwP~)stj1~evfXX`_njrpBY(W^vGsQdsWV^5z$WUaYG`rt^42;w} z$cWt){=`G2=L?;s7|hs08sssnJ4qR-pC682c%GK$JEhhB|7H;BID6pd?Yy$_=>#}c zG4wmz+fR56?X-7Ath;c8nk;6rS-uj#V-`4OrN>Ev3%;&k{2aDnv7&+f2>7Gbi(*Y! z5UxQT$JtbwB0S&}0KBBLEeQ_}53!R)`y79yIqzdMT5dmg*w51uuuq64sw{gh>)sGW z9o*!@hrzZocih9%rMEupne9*`3Y%EpZtQbdEx+pwNauF~I?iW7cK!bLS0kAOS)JuiN)FDBw+$<5Kx&%h~lY>kqU$@5Wj-OOLLoZN3#21?<2F z*)+a6uXN1fj(!i`L1>9qs!wzJVo{k!5HuN~U>=JIg@g?6M#M|`-xcu3_Z}W4Q9=(U z5#dMG%F$Zp^B*0?6V(RUha}?@lhQ5}<&aAK7RH14WXuqPj+A!nubY6`?Wqw-=6X-_ zZ0X+Zvs$3^7Li`hZhhi;&@ZNXe+cRJIMT}-5`CR8p5Y>X|A z!_&K?Q(Ruh^(giP6dcT0Kh7}zxC4o4(-NO1_6Rpyf%EalpRyTg5vXXzZ zwnb=CO1c2hD{@gS4bKSZssNMJP(Xt4rJ|#*4rArA6i2$}_ba4*2$RHCoAiy@sbPyF zgtQ4)L2yQsGZRW}ITwM;xy$iYs)i-s&!)p@93aeO063A$_Zk;wKJj48fv}L?rLm!f)nri=}YwPTpuf!`KX2|z@}{IV=$?o2;FC3Sq`ezD^zSY4*_r5pa zVa%m>vbv56W+Ll-(F9kjJtM2j4;y0@C%uumADIQMr5-hKv zE^MuZ3o=d?`%0)o@9-S<-d{I;G?KG+l}}N&!DDVeif%U1Hs789fgXWI_TE3SCakmx z)&!F0wbp1ioD6WACMTL6gO4%5je>v)F^;oi@>+LI8-BU2$1u2eo{zSQqO@RxV-!^t z_RMZ7r*3T1)dgG=HXyq9=$6PzeM>w^dYEl#XNt8mWzrxqu(tT}D+Fjp|zS>6c zX6qNs#>mRGUM_p(CF2qvfYJb6cTwxe)dc1XDtOK1NP!F1!}NQTsHNEB5Lw0Z6BR6F|WRWu;k0f zYqv#JZxqa7HyP**sdoLV2o<^%k5=PukB$dskyWusVJpv-{r zF#|SB-ZePBLU$;?HoUJz=w-))gM3~6&b`F#Ms}ys;+I+CNrGf<5BWKWb4>G1#CeAs zN>NFtp#!tjm7jB8o97lM5lTWQze83JIn_GLM<4$`K55u`zjuzT&l z?Dk$ebjZb)o5^_6q6dCflKEb4cP88x;2opmw&`6MQiv$zCN`0kW2|j`S2?pTH639~ zfS#V~Ceu{(9NNjjV@qxU-kvUQM%J#CTWP0>pI?KTK7N5_GRo;9yl;<48f+C8|e+7F=t-oyi19G6fyaS zY>Jgd0YR!9IFoHx;^(+QDF{oL15po{CJ=ym{AUn@<>*lun~N__AVV}1+DXw(4j;6@ zbV>D1+fu>{^Stp)6V>i+ulb-=H?B?aHY2zM?}PL_vEF{(ENoR<+^8JUXwcB0nEhg8 z9haqKDuq)UG0+BwCTUXbiO=7l{KvSA`|n#%ia%t~DYQ8BJcxR0B~$C`Tg?^!W_Hk{ z$^be)pl4(J>dmFJS3?fBz?2_9`=)y18z!+#?tv9B=%HsS_r6;7wBk~zVtK^H$@G@T zrFx{Pse$Mv)z!q!+yK!D&)dq#t!1O!hrO8|VF9+=6-Ui8`HiwdNDiwY;x8K`rzbCX zh;ukC0^3%NM?bX(Ne)fcK`e>P*XUeM3{3BPx1FEx8LgQa>TTDlj&WJ}k3}uRR?#Y9 zb;W$tLQC4#W|?_NR@ymg|@FaC2{b*M}ipA^4>Q;TKR zNbt6fE@A?!^AByQ==yCnl3q3|RD)W2KLdB%w%&xPwHTPT) zKP3&`&+(>ru9MW36M6Zf^s*o7z>VAWudxuk;Vp#GIw8Rv<=#D6Haq3+xUFrGIvzglTpX@ zz0yC|W3H@sq}p5`ZiTh!`R18<_-?h4UA~0qhtsFfUDcba^GQ!{v^yRQiQPfQCdwPv z9@$&bGs^6pW5)sxVgU8GGB)YAHk+f&~Wa+OG)G2^Vi|F^(TZ-1evw-T~ zk8?9=VTK%5oY=vsddx%mo6IGax&xGVqsf`X?OvnttNbh*Y`CI_5fu$rCW-QVsuGLAp*GvpLd2m^rY>kwH&EDrq-M!7^m{hfjE`2X* zWn1hE|m|Uybs7~Tr<~y6o9WQ*- z?C|3)tw|+lyIZX~I}(AGRZmMA>Kl-ppdgH=dq-Dp+l{-+&WuTo&W9u?W3@l*aW`|z zg4tteEv-JSgFdgX7ypsty{xcHl^n#WO+~uxtzHfk5EkGrvJI-q+_4iovJ)%V`_|Ht z=_chOyBUjXtd>{g6oG(XnHltZI_|2p%48f877wCObNh{tfFN!*G}g!(kGXar&RpJX zIxyCx57l0{+txKPV+194%iHyEJ5AdnieT>WNSL**)*| z_EySsmyKqwpP7kG1PZM{1-;I2{!fE52D@4uCjJi z^&|K33DGcfYxWa-OjOft&%Nm2f(!W}v4vz>b2X(l?|IdD{z*!LJTe5&3Hs9dvP47^ z*(}P>?TC3A}zW!quO@@RdLO$%G}$ zjpB85%o!d*^dw`Wv%t_(97YaR&Ym&bWlhYlXcVHUk*j+C$aQmhk6o{n-aSlhGgyZx zW*yb4?fKqzOcs<_#~|xCRLhZmdi>n$UsTEKnWd?*4a{g4J#2<6dljI~s9OCpQE$H6 zplLvlrHQ3YGtnHRv27(I9#Q?792trfKQ$L}wA!d8=Q)oca^8g6O0U;zG$14&0ijM1 zhBXI(3`qYxV?h>qlw3!$ur=bZ7>OC|&Ng%3u;)_d2PPzOdX6=D%YKHMs3q!@` zbFS^^?Pi2@z9{m)uhLGcY?b&%xp%swluk%YIXlfR!q?OG2WO|Qum-ukhn#pL`hBOQ zxl@9KU;b>rgGy}ZFbcGW27dgJ^-QLC{U@Rnb!ejF`v)8*qJ*%TE|-=eOKZOj;iQ&~ zyd#I72uDEvVE#6U)s;wB0gCtyO87ltlv=bp|h(U2kQD%ScmTV*^SZJP}0_84FpC zGu=BNToR06fF|CSl>GBP7~P`gg5H4C`i8JzotlYOivJq{`!Q`VW)CI&I)c$RO||RI zH8`Pw1M!E4-7CP8D`P$5R?YZ`=``>&7G~0Vfd$qWU>0@P1oN?6)c+0Q?9Axg#FP=2 zFlhL*_4N<@(c-(7()+%$q^E3CHWp;jO#uvqvfp5RBpL(7#rIVrT1ZfD%5v`v^&~^B za?+j|8ox?EQnYvxny6OQs%kXdJ8*C3w5W(xc`#HWr1nFTo4gzi}A7djCHr^br@?Yx)@BY zi|}jOj`jU3zkW}-@x{Tb(55R3=EGY66(c-84s~$5weaVx{hKlB<49W3({$R2XPnTS zjZXA*=&^C569a-vkzLJB2zlZHa?gKh(uefh+R{+5qbsExWH!!f4GEk(OG#HjLv|fT z4na;%PmWKI*uvpO2Xxz8IYC{)hDS)jFksf}{m71QcE{_yBPU{NC}H5uYU|DF0892( zzU!I%VlE}F@Z*w)FWMhZQ$eZ6W7O6a+b(R}i3E*PoHF{w@gx3*lI30D15ys-7_5qX z!2i~1UY?4!Dx%}nG9@Q7A{OLmn9_&S!r~Wx&l43%ms3Uz1`gjp#OmtGYtj8M^d=SA zW(Z0K@x4JImO!+W7cROYlay^J+Mh1i;Bex=Jm{x@(N&JUb%beMZ7#pJU(9l7T!)zI z2x)+}(|%}^#IaPt5np029knN3 z&q=^07GUZF1~fEAUf8yIJ<^)+QvB}QBdy3JmE2Um9SN^}R?$&kk|a`D*mopNJ z;m|b^(fjkaa5Q~hOuO}rQ&rdrHNZ}k$FV=Q=dwvhowOY>AtgVipe3bSGfH9e{e^Aw z|M@c}F%30ELX_m7u@w7eB4Uw2M16_ad-m|uiiVK|z8ds!RRP)_y>v%`=^!+?FDthH`-mPD!wJ5CK}9+J`U z46{k*W$!pWeVrZe@#a0W@E|TMs#ivglcVXu1Yg|h+Qoi77$=AC7|pwq%6;7x(*%Q8 zU`b(3!$Vywpb+U?jyums(?GoN0ar3t$2*%H~kkb0N13 zYvWYY@QdETQZwD0Hn;JXohWQMJv^8xLu^AcFXr=<$LhKEs{c9dTxwOj<0BKmNZq-y zwJillDTjX1@?Ntl?7wtnZfYfk@`okq3{+$ya-V5@{p;TJEVl2qT}oWa!NH8V*~El2 zk)U@`c`x8B1q0Asj`{E4@EGq>{sIXVwfCD*SwAN!>XYH}=5k%A5nqzyTx2m)^QrHg*)vwQr}v zvM=Dq;6+%L5lnY*fr)l~6vbPdYd52vm{n1hf1??f@pL7@yqH^){u7UcIQiVj@2yp3 zOl?2#%l)A&`9L~Lx*XM3AmZEBcI!6e9a*uEc@@X`HKDfB%-Dw{ko2?NbO+n*?0&0 zj;*14<=P{{T;6O%GZDn!W}&p{{u8mIVmw$O|Mh+U1*j4N3a>L{R(2dtwu~${P$-mq zHcTHj-c6de0kIlzwC*pM&DU+kD|lGqhUx3CL&@S;vhF&Z+WN%@x!%GXKl#E|XU5ai zDd!j_Sm3az=TPqDio?zysnghbHA+moom01(VrKIoE#}t6i%V*M7#R=Wc9F4nJQDCZ z_)gc;^Y5Gd@C6%7A9cJ<$qG66#s4VKMxQE@<$P1hVY)p9gNh6$BpULEadh?nRE;4b6K9c(Ad@xhYDb;c$wAZ@3Doz9&WZx z{y06lE=+F1sc2|mF1tBecCWi*q@JWr=2#7D>Bzhsl6O*h57 zBn!Ge!Fgh_u_(|Je4J>6hRz_CvW@?3S34#dx9qF^%MgkxHq-XiN$8v&buRdj0x1Nq)$2N|!=OtsGS2JbVD+AaPq->A`_V(-0=t5( z_&VWRCC&NriC8<^l}kz)v06WmvYG0v{10d3jY(61Ew5Mf>s$PC(ra&y7VDdTI!gy3U$hY&>#kJV<{#9n`&FkR+wl>YBe2!KXPkzyYfjB)cs>>-DvRNMUXk*Q) z1}y^Y;P1~PR8_=DTo)c#6h5z6`;Io9aT9y~uE&bRH9N%8@(AbTEM#7x3d*m^TT9x{ zY+|i-y8{d|(pwcR?5y02)c7YeGnldif-0G{ra`M-!G|QCDEWs^GLhRiB73Cb!u>I4!uZe4; zHe*K7sVmj6fJ_3X%$V(C@C`ufs+`gJ9Tzi4Lv5>#Wsb}l>DFo+688<69K9%S@2RR1V z3ELlToGoFK@)N&ge5{T4q)ukQtT?^y|BH_a=wqlRZiK3c&O15jbAfYBLnTl=sXy(Oe!adj zU%0Pq`1SU5;r=FvWcSIb<&@NFyM_8rSq6Hnn(?pQZxIyC?>xM>_2QeTXV=yWr@xV& z%H2?%#zUC~GOuTsh2=NCb6Fp8F%Lh9jx#1fPC}bEV3~tVl%9(u?XiHM=vBd2ZsEzb zs4KAe@`dg~pXWkO$(Gl+4DcOrH_xjOnL<#d^pMSQ`egb36c$3ftiCN#L#3JGV;VRV z|8e()jl~rQ1xSXT3?kp!Dh`|AZ~v8C|4|w?@csnJj!GC(%`fE8j@8!NLX|L-U9Zu~ z&4&nRN}5@_|GQ+={i@Yhv1A|I-Lrivi&$xHuge^n$1FU#BX#vJ7c*l>6V!iCt+Nk= zj)*q=mx|g_e8CLAwt*Qkliz+%7E@Y?7C9ES+(xp&`pPp>!Q<|Tdjg9&p160e@0*Zc zxE?XF-v&EOSIi6)T36`i`h%}q<2F6BK?@#!%lj99^Hv1Ex}Y%hjF!#j!520H3#Rs7 zI#@{B>~?Ir*?dbA)>J%ysUuIKNN#0aL@tl@<> z3=dMzM)>Au<#4(FOa1TcYVSso^O?;H@nO{#K`aD%Xjw%vUu-^Q&GDJqx_49&%&Nn~ zj1S8&0baT#R`mBg)89WcllHfhybwcQy%CF+XW1^z*9=1AB}J7kmOE3=vn3=}A$Rr} zw*|C+!V{xoV#eOR^|HR+EsE6WFO*7LM2llgwe8we|yq1}0agN^c#5S4VoL11FoWY0s^@ zzDC#oJnVVhNj$u4BlKLKh{0A^xqiqN_YrU2Xwcp+=WN|XIW`THvrO_a`c9Oq7tF?K zIVQYqxXEyR!t3m;Ptr?`?QQj#(G-0$C!BT3IJAuG8DlWSU~RiW{+1os2vGU+FYKBq z$qz2RmWHLVjz%!zhhaF%=KRNTYb=yx<7c1R=+uZgPIDZC#ut1$Rp_V6qX2GWN$nrz8~mS^wG zi3ha87!0y2G7yR}H5~4ptP{2D?8F%W2-oV=DCs&1|LIca0xe>jah~V+PH96spcvnh z%~L5EBpora)){`@_*vygOB02fgv=4F8VJZb2L~ zoX5hFrMSe5of#qRee`N>y&=)UEAOi$U%P8q+S$aCDH?27Vp5}4jNxxF^*g)(tp*Dscd*-pkprxW4ljR`5 z5@k}dwMy!1XYj6iwX;?hEY6T_b=Eq06PmUZ1J6GzgIJ<Bb@Jmks*T{O#L+ArUvBfV80V zdbRzBM)8DjwVb}|!AA2r)#%ZECB38A@*RRMv-~fFGgQ=lL%vq}hV(8abkdoZ7EIBu zoRo%9X$_B?{zC0|U!g!9F`W73C8aMd@5$CzQx?T^k!4=EiVnZa6Aw0>8G!e!?7V)A z@17)m^m=J|lwkw_m)<4$_hl;;4kHRxz5CGr3kn(z4VXLhL6Wg8oRr>hg*nscRb05$ z<@U#AoVHM!9lYVfR4Jb9YnJ9^lbU}p~Ojv0~|_sEEgBJFzBo) z$8pqIpT@d1@pbcwT7V9HQ=r{_H&t;edfkVpI<@%c^#;GBPs|vlCvl?p+Wku_dRy?u zIYaH37nM?4zXlq-ZInqUtUi|vpW80}mu)Ki$p}GXWj#90DX8Z@cvwft0wci7+Voo> zRmboN2_|iyzZy36I)b?zYv^3FMkMgjEnU!Kxu<_rgg7Z@%4s10V~9b@9iqUX;mXr8 z6}5f)$;KbU`xzZBW9*=aTV-4RC#uVxV^%~glpA~Rm47rQkM21n|AELSTgZTkiqvhIjlRZZMg> zf8{wk>o=Ly8Xd9Oj(~&vssYe`&IRNKHC^nIzH{;|hx;aZs7!eGx<78vo+SpDuwr$(CZQC|B#v9wV?c~0@ zpYOfrp5OeDb28IYZ*@;~Pft}nPlX3~xWA&badl5g3KzLsp{%bv8+Izo+JYmF50Enp zkJ~Ma^0n2!h|xVtriYZG(CXow)zA))mFF9fpgSpj0hgtgs9%o;TUHA%jnG#c>&S%f z)Ir7^8po%Xvs&W>Ca|YK%Iqe2Rq68d1;L76QxnM38a_gpufO7Uv*xrKHb`oAy#FNj z;za3QuXT14)5W3|;RH~S)`XQrXSWmIC*n6c8fr5`MCg!$Wq(&c)6o=b-H5~wT4c)oAqRa+fyGh5gUPJ~0x$N_?{xoFeClDz zKK%qB0qIdpE90H4J@0oIJ44st1n-hYy1L+Nt&PRt*~aZBJB8-iHEp#qV7!^ezDGjC zc55r4ObYvG&8O7`xO8V zvX#8$5Mu72gFFXrZC~Z?5_$+T4~$FE$qNu0 zQ_ygx?mc_tjDkEu0#5o*a2yp5qlyHH(Xgzqx~Q3zu+fg)sPn%vTpgZfraK|z?^C{) zS8$xGXT0SFYg+3{>+17h>GK>l`>DDJPh@l2eSC)GTFr?ka|?esc~WNznJ()SL(p^G z0Y4Ov9ltcPRy+mF2a{A>aCN*A8$;!H?;i(GIJ=n2ckdQ;c4u)FpD-6(N-WmZ_tG?O zNX~HHhllUzDzL~XU$^Rgu0(wbb9wDlb%oQayNc?pvM&P7AVQ& z(4^^+kWsgvU_dyjiWXbZIkMDg_Og}*Ey7>q3Hbd%pnz(?^}lCQZ5BINgmnQNn%A1(^vnq2iORX3Y0fC?vhIEozty2E;3JP?&-B6+|5Zqz{1W zdIpiCuHjYilty`Ao}(KM9bH=axs2J9TV2-_81JaVYGPkUpQyk>A@3a!GBPH?_rS^=mS=&SuBNa4zQa-mFS`&J3;{XEmV@mUd39{X zacs($vfVHc{$}{!B0HJTWxa?yzkzBJmTCeyAU3CE@;_c)xFg{Be^#_K+B;r$Q%JZx zNh&313Z(@@b7Wk?nc>HUQKV^kQs^7@vZa>N?LumN=<4=9v7K;xYHvF~wN{V?*DDi>;!es=x%fTBxW*5-+K3fH66m(DEGZaegLRGe_ z`TiiDzEZ|uc+6ehP%6HS+fj=AWAB7gCkBsI8R~Y2+!(t}zi}@`FD|^WAKWX?zk&z) zJL}vWq7b-S6Y8F|+?ck@irtABs4tiHs)}<*&T{Vay zc~(1Zw+t5bf^tuc@UtRu5gJ>183W8uhdIM%pyyn+FCYxrAn)DVy*kGup8W3#FvDn! z&r2={kZvsCbko*psLSyqTkOqztc-Xr5NcDSO2N7*>aLV_e^(Vy#Pr9AP_9X-*^|k> z33#fJzkBtdD~Mu%B*i+|Ie%0;sgsoSzn(!x^+)!k#ETu4J|OTM5QSlERfECpe&JIU z^Ygc)Yog%Gk9{e-%nm!@JQSVzHk4O0RPB{sLD;n{1pP8jw(K%Wqdr6(8iWa&1hEs* z>+6KVOtdKw-Q0q8LIHh`VsCQ4S2=y0Tg<35uGE(Y=`7C0JMU<)DImkbb{s8xdJFj! ztUFb5@^a7+!4I_4E7(2c1*S%yc4Oae2%uG0{~7b`MyEU0HL`s=UOdo$Mj=`WAA2#& zNLW0qW-g_rgpo*la!sL6>DG?IM;K9Tq}H%Nhj(=%wbvEij_F_vuda8{HQnt@-n8pt zxv|@|P3hk3l+{P7Ee(w5>pfvOV~hlUvD%uIj)6SfB~p}?YI!A~Wh~B$Mj9IR6MCON z_dzVFO;uSGC5kWQSayJ;MeSQ>M+M)eS$mu4i`uN819>fWVnh07uNyBUz&Bp)GOz{{ZgVFb6=1h z(hc%qiw_S!u~90!TQOP=u?~kayTubCpZtT2-s_<__>dtCh{Qzy1_WBaiR&wX50yw) zS>m0APkMZm&Z(p;OJ#xk2krN#?Dq>|NO3KNUhN)A8f84g*>PqpVGQ~)MqmuQ6Vt&h zF6Lc!l?+o>-PS2?)~cjUBX@(t;?z3?p#q~h2Qa!5r)6rbp@1U1M8E5p<+q?GcYdC^q_!5QBo!z~RAwM3n8{2)j%QXg@1Qn8jd` zo&NfPL3DuBhCP=9<3Y_o3B_FLVZeMgL;Rt!o$jU1I%Oh2zid60SrUcZ_+I=iBkA?8 z;3eIg)B<9D2fENd0d)QU1IYug`{0|l5HnIqE)}8v$S3orYKSb}KPl)Wj5EC%v+#!B zwcWdQ(irmpQfh;s&*`9l3-ju7?ZVIJ1?uZPO($tYhN^Db8c0;wL6MO|+)MVaP%1=q z4Xicw;BcAvEdvaYBLD~moHPj9DTywz>5ToFYCIlt`%ls_x^xLMLNHHdpX$by35W^~6H!;wzRCJ0U`Fo-5wK*=81>gcubJrXVzj@3D(gH^cPV;RoWi4-z$D5QZG=hl< zX(`3_jEapM{~Iwhj-eP1ys8%xwiOb#sjxgS;~V51H0*eNIK)Oy>p;zX^>sFY+-O0G{rH`_Ha)q4;8+SCO7t=njJ35}GY4q?EjrX~5KW=*WcQ z|J@JqKLacZ6wTpdxm7QQ& zvi7TEK#RjTyVs!OqB;G6LrjOa6GQyvCy zTxAf7vEEF?2Z^;Y6uHSIbdNh82La{2VKTC4{Lvo~m$R_6mJ+$WM*-?L5>k2OQf(hYJv3BCf$QFjn?)28v;o8W z->TmK9Z&Z~N5h%oFT-SI#cRC>(`5t^c|K16DF|%gqb(B<9T^F(t}#t8EKpqk)Mc=z z@C;DYK?ZO5Z*2aGc|-jr{-z4<_*K*hK||KHGtD8}c@c!`?=XL0%umk-{@hJ(`yL>x zf-3E0Yp5BIaWRG_t*CSAto^$iRnEWYf>mQ^G{MzXy#-k~&;;cu>g3(k+Jbb5O z*+O|DP%^VR8HG9X`aR7d#@;DH@nOPA9Kgew>V|OQup~VX7Ej z6Rlx?Z*=8y;ajONS^0OWq0=cOt6;@0@blU%d12e~>@UhJ#2O)kJNzpJj1a%%GP(3U z4Tk~gaZ{_4!sr48a-s=w@8)ot!#Pt@)HCzFvkb#%KVAWYYu>dndey|YO5#=Yyj`Nb z@0)hp>y5px;a9&yRM{z(Ca1@n23f=4S1kVooaVE#KfEyCq?I(>(n~O&c9tO;FB+i= zoYjR*E4_U)9j^3e`0347gsMhcYB;Fk7;ZBz=fOz0$sGk?jcKjXJbkU?NDZ~ zHH{i0e`KH)7`Jy@mp(JP$AUxi#j32l22T-ivOYQgp(`L6OT2I7Q*3)}4aPx+maz&Xdl z=~^omZU<8O%b1dxw8>RL$8CjP>fk|jzH`}>k&m!vfQfaA9O!TNQ2bpBq7ntq6OH5CH} zZG+E_zn+Tea3Mf}Q0bMDPOwtR+AqTTz7 zAMk?4Uqbt-rMf9eKnT@7|1&?b6dE_qYVXOMJO>{cHP|fYbc}TWtt_Q@ zE8|-@<|5D-z+8Z(XCeiW*EOmPcJYt{z2Cd_<4BTkv*xyX`iE@*y5B#x_5R3-PX??} zFgP;JtZYH;q}aAawD9ci_;Nv7q8J$`vB(DZnb7PXm-jlqq=@UhVhBJk0)|bJ+Ydxo z*)7v!FUli=LS$H%f(;1j%4vSmnRj*xy(K({ijoVEvfbuo9FK$EKkTa$5QPzTt{-?d zAn+VdDKVm^O%FfrHYq{g4x4ywoYBGN)1}c@__-9&8C~4TUdkmkF8CtwbmmjR5|dEg zdcTT31+EHNYjiVlin;Uo=r~0p!pzn4esY_?d`va~SFle|J{%Tl7Qo{TD!vxP&^c>% zc|~TT_s_3)Aiy`x(@@R;c2v>*5s+9nDLo7I%dsOtxb^hqxa=pi^t_3y^X8!XFMTR&NLx64Ky?VG)Q8bPZq z7)`YCBkF<}4(^Z|R1H3ul|Q)c;R>EwoUhCU33bw8E!|mmPNn2gHOlp~T(h2ZWg=m_(X>5+3&;rT7hObGbVNt8&0IU4(DSE{mw z-Z~b0uN}a9O=~$K)ZkfTCfkZXfh^a+2_YaNwq?)v9vzPn5K&oXsgzXUEe!TDvuRHY z92eAx20u6m8XDJ>n<863Y4bityZu?NTO-^`#4TaIeIf8bB2mQuo7npUG+=5;P!KOg z{PURK~c2+?rf$blt(}Vt_ zffEbMKJ}Yj<+Mrelu%ZlG}nPt%ITp?;rbl)BQ^LmLM==hV1uJ*iJ@_5I6nM z_tM+P>=bO6W*eMDAd*>P`BhLS6we8WufrtI2&;0keiDb zpT2PY9kXtLsD#~6U~!O|22aSdk|a(g$(uijPZfSVV9hi2(DTYSGZrqN7X&?SVtpJs z&0437bb#zihHu2RFj0mbdkf2m8u_8~%Q$hNA;T)=l}8h){rl~D)HmPQBFXjA;TY(&iNdCv$Sau^nU~PZpWZ4&?F6)70^) zzeouW&$GEHPdKP{U5}UGsv;n}IT#0O2b z`;DUhtd4jPY##=IVHG|&e68$w5Vr{24I>;QR$aIfPq%DsYmZL-+&tF{c`{ukJ8ODi zrGpvk|3h1v$LM9Y)k)aY=GP$g`tq(CjQ+UP7J$C^AroHQob-1hf(bZ6XNNQxfnr3T z--3K_S1;%_SZM3}cEP0@1nDKj`J|`*{n|v2qc{zT;QwHi4sH>K6y|GKnmY*6&cxcV zr#l@+N#3Z=!NB8iy!S0FcMo#x1CT)+I7?XA<#WJPHf;-yLA2$s`Tu z*Xr)OM1aD4P9m|Z%2kd5w~kKo-_Jnc(R0bHs(f(N*z6fO{=C2h;V}9p4|Fm7cM+sJ z8IP3`jialkLNy?If-3-HInimbtTXs4-2ZZ_fbG0EG>LuBsvA4wF@7MbukyyA?dGu} z_J9C))mz!J*j$>!^jRe}B-HhnhXA}nk>apP89zXjSLea#;zqO0ePOFz6a79ya&OD^ z3Fxoz=*sM#m7zQD?{XN1H%46M?aL!=*Q{ zaVLmSdUc%lTobI^GSUY|C_oD>VC)YbS6v7S} zZ#6Mt399I^C9bF@nN8hq#GGSd9LE9Kfs-xGt0Iq7g7I~|MpRNk#eme0I}Hu4bqtVh zQB9Bj5l}nj#TB3$0&%`+Pb(n}MMexrUFy0A0$P>hY=K=xpk6=+iMN{7^5Pa_by^zY z?zEJgo**)5LH?KtW{x5vwA&-j{Smml)tA|q*0z(GEyUJtf#@_0y@S-AqvY1dF$Abm zHHMEe%MRN5+_s-4hsI6abMUzzo7EoCS?^8y0qSPP z?J^Hs6P_9CwH2r&vgg^Nd{HpZLT|0^U?4X`uww(uH||!JggLU8^_B*6KLIWDT0oqsU`M{-W}O@C4Wo~ z#Sa|2(4a{WwXLWuJf{r1og`jQq&91c-j2Y#mZL}*T6XSI{EM>{b5N8tpiHk-?|8>e zt74Ihj<%2ejkOdQB4NDZnv@zlLCdwnk2a-!HnU-W#d=T{qVk2vL_I$j=%_eqE-aJz z9NGY(Fhr)Kw%#-*zvEAT<>%<$g+~+XN<_S5K|LV~liF`B;u99^2mfGa4|$ly0_K9= z+1keyW3ux8?z{C{Ro81FREpTu`x+_jMHz9D(X*9$Y+lvRS+$K>uOk7~XcTlctnO-| z@-6v1wa(tDL(>#Y&UISr&xES!Uh?52tJ&{p8tUj;ty3Ct&r+W@j4^T73bnctMV7{(s6S9n{UI39&kwk1mV#Sx{I8Q>t znjCw{J8bq-vRC%q`khP;wQA-KEnVyv-&JmwL7}6E@e3rxE;Ra7NFnL?bTj6-V*6H6 zv4Ye9_kwec%!drnK;b`hGbt$%z#6{4+w}Lj7F;eEW7VCU`^t827dND4F*Q(zAa)yeS zii&MLwd+I_%&Dl2#O1uO6ur1EL@uNzXEdB~A3bujq5Ms~Mm~8d`x&o6AEG8MqDCF3 zwc}>H1sk@q7Y@f|On!o8zShbKF>pu1T13s3cbsdtwpZ#G<`9W zXV{4N-dp(>=62;lDn6PFjO2E$kp%5QbYf)MpQ|+Ix8entLnZVVF z&O&k~F*0PV0Q=@yebz*AyKCo55HI=FclaWl;_WjgmCCJJI`ug-)7fHnkdkhy zpyBf#jI$J$V4P8=v1kE1jqSW=7}0*-bBcdJ4ulC}@lTWJE*Gjl~ds0D5YuEVc6z8+ZppyO^P_8-{kN1=U=GQW9G zlXdjgvGv1!Pkl&YV3B;-1$>qby!JjNd@>->wC>R`_z zuM<}C9uv)KIMOGJIbFVUl6a3Osw%Se_zaeZfusn)Jx2Md+RQh=AKxq?;FdyOjl-nM zRKiG9kjhsy2^FGK_k$DYpzGlYO2A^5ppKHx1XY{4ziVVp>lC^3n}&6L6&o7|%qyaI zii*w-n*^{)*iWy{wadU_*338!S*4pai0HvPcX{VCnTHK|Al}L0-%tON11RFo%+^zg zTKwWBXKjkQq;TAY?~QBbQM`vq`}{$CmW044<5SeJn*Dr7cJ$y){0fem>do?!)giT{ z6rk@mH?TkBnc!(#r2a4|S;y0DpD{HKWVw#B8EK!}o3pE|6l;z~E@{ESnx_7AG{I`rJK!`2yQLnxAfMTmfSAp z(S~XONdMd*CBKZ zj72C$0>Ip#QE+tMWX(cKF939;*Yxu<(j zEbK#S3_g0_$oNJU_Jet|gLdRZF5@KYLlw4emSe81QZxR$8ra;ToZENf|3dvy`Fvuc zKlm+vvXIWrAu;bQd!tJP2C4Mr@nmfp{27-%lwAX^9^KJhVJfD?xwWZ)2$9ao@=Pwi zz-i>3XdeS3756a3)vP7I~?}?=sq6aK2QZ8yo3XxRw5 z{tZ|kv>DwjWZmUKH6hx(zz@U)^XU`#I?Amsd>beYkWk({L}o!qWg~8A1fZrJPlpmJ zcDSHBzl_u`1{Nk#LuCO;I{5W(*vgm9dNrG6@R)588<-jIl?gRiIt!iL z$8+zaFvq1X1j+?K;f`*wkFe5D)#_rpXI$L!QLcF}JMs?B zTc+UIK}=PQ`y2E!kyic|N_(H{N)VwUt_&4hivz3Ju-5>MHG7*(ULU=#E_!LZlY#2& zT27@^u7rAUT)7p^_g_ahWV#!pp5+Ep2y-cz=e0Ys#Ptp9_@7c{l5=pyg=c(IN{0z$ z5DWo-eBFSxT0cr$<>vximmjp;4y=e*mgDd0C4aC|(im(n;mQDGR#)xUHgwna^xEzT zhk7hiG_pQfybO#9v9P<&#mR&!nwvr_;+Z2*(s~aB**oK1L*X2~VKk5$pKB2dH`<*R zKirMgzSxuh!cEm=V}Ds*u171XOfTnubp0{s{d)&V$$EbO(n39I2h%tgIrX9JOi>zI zRT!P3qwP@)+;3L=JbD4>I;2f0AS4F#HQ9NO#--{>vPt&7 zuhgdL)svDjqycScOC|>OV3f8h8F@zs8Hx@BXg?7uN>7QBV&VaU?h55SesLaTC4{N? zNvM0V_{h;MOcK#BKT;HV1wn#fMq#>R@!Cct)zL{Q-5<$iP0Z+K9%5ie%{=k}*)oE8 zLuNYrBBDBajKIm_)6FLpbh+1NTF*+nRcpc{@_wzR7f}S9;7s) zpZOBFOlY{ogrk)-4bO1&hf-e0S`{M3wDX7ic z+^T+58oO#sm^Y$;DHggU61f@(wXiK;B`^tVMTAfPjoScqjZ9Mu*`Jrfy`kjZQ|y?a z8GlhLt)^hGE$CN)m%S>eV8KhpxY0qy|N5QvYc!6(F|Ip(`zr!+a#AkrKKMHbK)3U* z!*S95Ot9IW)}0eRYr;lvd9k?~MvM^9T!nafa)@E(afg=>DLpKsV+I9a=T{ZgiXzp99UmGeMpagf(jVGh zHo(BTk2Xl(n3yDw6IIg_U7DgpKt=>e(Y3nPLHZ3ui0X3Rn0FUhN$T^dTF72yF^=(G z0yx|0zlb1kQUF~STg0@3^9$3Q7<|&3Zr%*!vt!c6VOhxe$3cJVy>^2Fs_HjB-`s?C zdp3W|EpOFQ8b+jrbk}pAXm>gM^KkTi+Xr0GiKX69lv_D0d1y+dSyy&$@HjR>jsS4| zg!G}ng!y`W{^U4(d<7$L9mwK{^E*3aO=S!Nx?g~SPN>~ogD5SY#=53uBY}>0qW#p} zeGDmjNgsoQ@4A2VTbHJ_Vd!Rd1chsPyLEm8Cb{RR(kJ@Ju5j<~`W|yi?$AF?EdVOQ zwSR?yz(KB^JxQcf<91Ou+=S;ReEA5}5)1xB{e}};E_qaO&3q(aS7X;%&8K3MS@K}^*!%Q27R63&HxSQzf3b-TC5cA&H7U*Z4tS**aAJ3`4>N}#rQQVazf$P{`!96xXeR=wC#S<#H-k#~qNLEWp z&%x(G@OEr)X1!2|z)Tc85IA0WSb92&cQvx{e80apfjlYO)@~=SM&u;GH??6ggkhKc z^;q={B|Q!T-y5N9W;DZ4!fQV2lh7!V?`1&m$|65s$Lg3;`$L7#TqMSdSqD z55j}y)YTdjMD^F~1thW%lGO&t!oHE?M_C&>M&t!i{M{a2O)p964Eh(3{_E#dU~>Gk zfEIXuNiGlu|L+v=51iQl7ib325d{T4SBO5*nR=P+!}@XI>y#s#kr29DJ00DWX@k#> zHV8RBG5eSHw54{#7Yfj^in(i*tA5FUvV6QTfj+R3g2T&d?w2m(*0(!bwFo_5t0k4G zQ8}uFSh$z4Atfx z-j%FEhzPy^T#G*2z@hPmvt!v`9pGga*5&n%lr!Y8YuwvEtV6W(_^+AHlF4XiJOYaS ztJBL+g$FhWhG=piw+eD z2|IewvA2V_yn~my5GjD0Jam8>*jg zdOym35t6E=PG*($%pP8t0y^!fPb9^~UxF+AM%z$RLgPg=_*A{~LObhIeN}MZr<)Tv zf4Oh(6$T$y+jl3;v*4>3eWQ`Z0AGhjI;3u(Lk5rDdqLu5xDB3TMI!sGWB7ho{QWJ2 zjfZjHr4N|*>7yH^@o}|glnomVtBwFAteeW_!mhG)aB_1Jgc`sAMhndBw+!^BF@F3? zC+EaE776<#W{G=o`=^x2Tl2d#k zrpoz)wT2x}(K@=tI?7EoI4AYlkQhugblw#l9EBPoOw-e~C#q1qf=g|AhDw5--Xv{N zA2dr{E-U#Vb`z?$wCu=T3?7kBho$kDFV^bCgX_RgfvdA+;8k*EaUyeMB#3w+NpX%T z4pDqNMs;b}y)rxhr{i%!?`(b2nS10g)w-=+p~WKu0sCjoWqieCAvDo`y?<1YR0@p38X5Fsg*M1uP{ry@Fn~Wqnqac+@sJ1xo9Ys}-xe0;G#xUETI#K7qYP0h3H8`%YLNS&_r)IG0S1 zZfkjT14-2TI}bt<4%N7|I{rr|jZk)))Ajw7?ksFosjAU6z&_QG@)b<^JTHiojwD`Rx49X%436%Qa}zghCl}BEi>Wo{eB&L72@}ddG=Ez|7iv&P7v^q11Hz zBCdZz^fl62;04+CObsJmH^raD%E#VBa?_-4UpVt<4Yrt5cuK>UWFM#ghO=(W809qA zpT>`AKyk&8@4nLiHvtk$e_P3v7)nf8KQR2Op=$W%xV{czzY%p;dY~HOs^fv*%jorI zL=d)hgCkz!oo#rL!=&(-ezwM3LZmiqsgkkV!h3&g8VUNF@2qKfHuHCaZytULr0P5J zRXE(KXEnY}yuk`0cO9y$BDpnHSgWnqwV%v-X|>^VaQ9kjwW}PN&ir z@C%*aI%n80?O-vVh*VdK%e~z7Hz$@_+%N6J&7mdN;>=d$u``u^ncmDMJirpJDJpU5 zW;XMfQ>xFAj5;5WAI2F`HP@uYF4w_ZEre06D}hJyAxX$RoWX>;T~Ra#4Fd@C8NEJU z_F)HYJ}n>u^^zHHWV#4+b?f#H#(Od`W}D5XK15@}3|Pxcs8mcn0ZzD4i~}e5^ih-! z3{h0bmy4jX7QJpw;rW(mg!|5@Csk0{oF7U(&yZa=3D~@&5p+KrKbb=}KY9xZlh$d4 zdtC6Tk*dj%s*wy2&nnlQ#*y+e&{e*A^ zCL}N)i##ymAWp{GPF-QotpVJZoupd&sUgj27-Y9}cwodGh^@n%z&SWe$eZXn=&9&F zex8^5O&bujBFs>@M@q6>nO8bghwjTRQZ|+=pWT>%iW-l<(Nw2;{T!HHjmq!$Eq&5@ zu3+;Q!~i?RR?oXn)t8i5d#wPOR{;ytjj2`IjCAGN6j2aas2m%xSuVLT3`Erc?w8IO9nvztbaPpwA@mYzuq;- z;+IAeFh>7u^;XEMOp1Csf4Cu^kdwF?Y}euH?4a*>>Ril1XcJbSY-JP1A<- z7yy#X)*v$C^r{zQ=dAjyQPwZB=G-F!ALw^BiQ`?$!aCx5eDfT)y zGHuQ8oT=I}>DzvA7DQ+bRWsbX^_7bu)hhma8GF-?bbi<1zP$WqN_ zTJJL-WdsO&Mn8jpEIY|rmuLw z3|@y&n3tQKlpRORFT@?5$?{{GaDp*&EjgWl%jy(O-Ywi6lO9Y#JbUS;FUFn~0Ru%d zBpD%Z=vdkI7y3kPis^S63(+xHH&*=?58>_-{i2hPHYX)5_-H(xpJ5?614Ts2Xh6~xXF%h7M3rSWxlM1X0GO(joFBi7~Q5ihCn~J(2h|J5`m?{+Y z7b|9;qTz(Ff^7_h2?YXk^zyM2w20z84>d9xJ0}v#-_JaL8P;pPLZw&>1F&g3C#FxU_Sq?|0G=J#3%b#oD7(+snJTbbG(Rg9u9p#Zl}d!q|7m;YanJ(F%g~ zl9h=w4u{&*nT45@G!VbevJw<5sks?ppk@8WmZtKweN~vdA0{qNI#~D-(`16)M{LM2 zIq$uGhsdL;3VsJn%m;3ieDGl+i%_c&7+UYGqHu-cfQf1lFo7p9eIscV-9~j?=kTSa zd$U6kXsOjW#DN#K>JPlAE0K8maax29dk>;71TR_X|9GB3HXtpp+yAP&IY%`g4x7ZK~uBrwZX3Oh7xQE5N6;%6{zy@#p#yClS@TR(hMy{`C|1 z7$Kkw-ui#E2AW#XKyp`9Zqj*+0OBvHCJ=ESqO3Z3g{jW5kelKL~9M!D|)erGOnteFwKX%o~f7d=# zL{b4ufG45R=pOEH7j2#z8- z`K+3C@Ai=nYTV?LnB6gAv2S?At1B9XXbento5?&gq5O;o= z2sNW;ew&Ty>EmA$FCa>$XmJ<2)p!=p!Hh;1;(c%T4CfN2rsVJTLe!HpUmcNt(Nl7S zLqSg~$<3h|pb#LaX=uH$CZg+pS*@XhcdVz^wX%!18qf%7I2k<%wf+XIG_O6x>GrrP zaO+M?CHlaovzEykkD?wEuGF8*3NI zx@V)5NVm2*isjFF+SFF6LU-PDsG`_OK@b4^n;UKf{zLCqtvn1$8cbn&6! ziuA%>-JPS61oL|c1I|!ZKxDyN1eA>P2`2VEHG6PlqE}H60moz!HKDhg$W85&*nXgDTyjQ5wPg|+7do00X=F*px(dVlUM0Q zWVBoFkM4`#X4K3c6F`525yFwEbF@w1P;Tskg((QD!f=rC#(&=@rbalsz2z5=mPX0W z21~_$cN=fiUPS)lL!Yp9HXNx<4N8-F*Z6eQLfEU+_y|uJ=~_$4awb|TIHGpRp|{~Q z#wnNCw&9{KP96Wymf5b^f0O}@)$L79EYAStrE5kQ>pQqejKgu0LAA4^w;Ia<@9=F; z#yFEYpQq36Lb+E1*|aZ|eUfKBjy#=aY5}k1vCa#G_?O`jVQYTVhKMqoVL??i%)+Ue z2bjI|Dy|yS?Ca7z!q_CLdi40>VX=G-VREOGtElSq#g$t4!V$4MPpcc7SnR7AOTTr! zS(?q^92s|cqP+gWpmvBTqLAhoF(XEWEe<%1;&*f^9oOw}i<0NJoXUEaKCnR=jW(+h z zX;YoauvnR|281Ye)1p(~-g`Ta39b(_=T`Yoc>-3g#4P**4_w`%o+6h?g{O~=y3!_1 znmg+waayrmS|(%MKE=;F3l9*jN7kH%obM(K@J`_AW}^lusVLn*(2R1u%=RPbmHA4nhpLV-SzSTzN$@B#k zonFdavDfm>=eG+d9Z5X}buO2!)w84l`!f{?_qYvp5_<=+*zah)z&K)LAF(#Rm^+R- zcUUK3jCXIu^->Gv^Q5){K4A&v@@Asx{3L2y53WxF2{mJuEnYqJ!F1Xw5<;zET@fAQ zeV+(sxE66X9BfU3T`9i`ld@)Ky(fQn)s3U?v*u3uVH_UQH=Y{k*WiqL{fav0CV9P& z?UAph7vJGLE!4^{HB_&5MwKp29uY}PJE3<~UBTE?V+Kd-%GyX6457l;*s5C!7s{xZ z_8)51vnV!JI~?t=c52lT(8mFkfT?!hLLt~ z{d_6$UAI}cGXzZ1AS$JJ5(sfF84ksoDv3-7z}m>~d#L9^EVkcfVC zJ9eWEy=SCNlfAUgYv9D5Z#GM6)zLLq+881vJ`>k_WcxOAeIvMYuwVWWJ1AAcCmdU( zjJCS0!aVmIU#+NgR^zKrV-?;xsID|DYxB|J>xGboug(GVt!6u`OJ79BxmCi?gcf($ zeQT*?!&!E(A=@qOp0c)wiTuU{hdtSuYk5}Cibroib%`Zk{T0-(xv7jD)L2KAg!=$A zH3w_0#Wx^9pXRD$Q}-l!m8oz&^aurpqOA)8=U&L<(iu-qpp#P%8{_7ssVYBGeot>iVaUAnTWV8zUO4`-VLL&^QZ9E*%9Ezb zt)7NWUU_8Oft1K`Q*Qqnc2F9}ihvF>>d*vf9;ckMIPcsSEM{M=QgA?N=k@k(6OJ?UwbGj<^$nZ428SN8p63VZoxba-$E1aBjsSWv9~zZQNe?1oiPj(rtJbmsCbm7w@Y zL7$@P5-`6qt7M+vJp`Dsfy*YlF?mgY)w)^d{)t}dhSF%b+qLwXx_L6K@#AFBiIVks ziK0<+Q$Rdg{h=%sU)fM&2^-b*_+sq;VkNS0IdYHwOj~l~RdIPJ|DKN6O~z)oeW-IR zuRu}7FJa%1?V)HZx;OvFj!(Sk{6BX54}7}dCdXk*b79@}T<}52GfbITdbF?a(L-gG zZMd72;GXFplesA3{XMkHJ(oj#o>bwMFG23J#{td$%fi;|X-$D2q9p9QjmeRVGq}sF z4Z~~U$_Ee3s79Wfes!xKfLfQ}VPagC?6?w!bP zW#3qO?<-FSmh>&`JXczSGvw`x;AUf}mAA3W?sG4$P&1#oWrtjS?Jjno&>NK2T+e9q zjUKWHY+p5lJu?uB`Ryqa7?y0&El@fw_Q;E^cwMywt3Q7u{CJ@P%4D;+h+7G?#pvH|@ z&08Xq6C#fNt$}(y?ySaBE|Hl-E~G}TOJ?yE?Cxha@v6?gscL1^)Ce@S9)r41$D5sp z;IsdOwy1HuNc{d*ltKZ?MCn%C1=Gl>xL)^*SDlnDei3^P@YlWE!vl_t3d!1VH>#ht zc;9#F@{4AVtIIpcMm;tAih5v{=|~CIN|1LL(~(IKH8Gj5j~M0HnVT82v}vI=raUXt z$-YH_vd7o2tP3q6CTAf@6TNUBt4MG~TjoxqLx@{%y<}klykOM*iv)H)MXa{mW%Tjm zXv2#-wS%vfsH-YF7+5bwDJA_$mYk3P^w&Zb?~fXe?We$xWbW@$R<&op?tcG6 zZhp9r`}jMq`Dw*JrCt}rPSDQUy7x(O!jnAZx}k0AV@kvE+98kI+#D%w+1IJmh^UuX z=HR1GS>7Fa&4m#_qGF6I4|8Ro`ftypge@N!qchUD@G}<9b{EPlWVgds)Ga!^WkQVj z-e7tyb|Y(kR49~5LW!!&KtUTCe2!l$YoN@;VZZIT z_yRZh0!1VqT}U5=)+P+89=7)m8by#LJkz1 zk*$p6_Lzpf&Ee2V5%WO-#yNHP4m`1nqs+n265mC17l#IS(I)d7`$DK03S0ifWf9X* zpp3R8iOU`8A}iN(8p|`Nh9Ekql=ii^W@;QTl6Xoq36X}6mj{;y1D^%|ee_>_<5wqI zB0SQPBO%A803faQ-~3~wg?faR1Qtz)_gV6Urs`aV?M^i|Du>=X3czX*H+h!t=mxcl z^Docbw=0qD*$NzbDpVcGCeJKdXQpnYKhFzz@?HapS5A{9SUf+5FTz_n2yl8= zd&`97!p>PnEvZI&(y2z5l3^L=%f*i#g$;G_7IYpJ&W0WFZCi@{frd&^L5B^~UYQ zv)XzVjyBNsG!0!n2TuGwas&(@H5evogu(#z9U?}+P*ybgHueJE{?Vk#G)do>o>JH8 zw34*m66H^x8#+N6n>Wa*s(0Z2g7N6Cn)+we#%5-sR`v@fVAL^yaOs}>9|riZFMrLU zAxI$eKI58B-oeNqMy5yRFMTPb@b=ul!tqDn=5c$|hq9|yl2MNPPltnIp3?u4;(mRF z>%8L&2St!(#z%?oX!uxl0kCJI=Yg*Cw64tuk&_jK*JL9opdatCrGW?>J#p|?FHYXp z;Hm&U5F;M8yz5+pOO7#Y+;NzF9(y~8C2DkS%(KZK?S0!zgkSJX>JvGMajcag{t_lY z`fXG4D8X;zUq}bpFV}pR$=#pS=evLxc^T0`FA#{6Wz9i1@+6v$bvkGN9vkS~+syZ(qC=GcKyUQthAGg2`Nu#|F+#ucH|h9R z%1vK(CR6epN6}91MT+I!DJD<_JUlAI(hX#T22oo32*z{3I?Ei_PP$>1PEIUG@)%LTPt{fB%yT1FE#{dC@bET&9MSDHQhrWC#IJ1B@W3XA4@sr;LZHoF( zF3>~Qa`nM3YI4Xi>}Kp&4yv0TgU?GlW9O&bnK2obO>D;u^f=MebG?RBSi!L%g#xv? zzMx03w(TGx_e%;?O5dfa!-N#pJ>Kz^Eyt_af|-5`_i~%YJyE{$@ni&9-r+sTB@4Aa zd;@#3)7qH(RQ2)icV?iE#Tt#-@<|0!ab&i(ICzMY2I2Yms!N=at3y<}wz7_X-xm*A zW_xTy3>W1xvGIG;|ksZq~i69$;Oz*cz;dw&)N>L6N}*c>X8OVoc;9qy5JadXp0zqbDM-uOYr zb+-pfAl{pTTizW<;R-hS?2j1`?EnQF=JOqA+`B#otrP%>a@95#_5co|g1YNiAkLE^~ zHy!;DADS9%PUq+1Be8*MkIvzcAPWLS zC&2jos(@s|BD!=3j%dhsSL3r%MNyk&NekpK{i>+oXQ0nfi~Xvpk1$Gv+eL?@FfOt_ zquEUSARWUwYL)_FI@)A9x4!Ikt-?|xokil?#5|$ash)buw9hCXJ+d%eX{|A>xJ&lz zN$GZ~j7}okT}JVHq&(mBp*we*$uo#E#5pZrL;d_J8G=%`KUo=qoL_Q5o8&ZY%EuM_>+DfoplYV~Hdu7Rjk~?Vj9ll6Uu>bWY zd`0W4u2x6$jrpiDRg-r0$(s(q0djN21++X;>_0aDH`cLvW(*=}P#V{bj*3B&o}xtp zUn-A*29A2>Qxc+t$)W&i@0pOi2i%gouuveAYxy_;GW~$t=MdQUkRH zzmm_bZBE zM-SmvGHf`>f|8lapJXWN8SA^AA-^6uDdq9*r{< zu2^QX)eF80uMYxOg$4Gdr%dX=B9jX|D7&g|B9t48Fn@=`PIM1C;IpNr0{%iXsy@fdutAeqz(=E&jiMTOLMfo4H;PIb`ODGk zsPZ4r4P#YI1FPP%V!XXhQTZgo>Ns)SLa20W&|^kPAfV(>Ix{dfYlowTXU(udX)UBx z%qCaTd636_Dp5EPv<(VlsCAXr(02pYmr7C#qn%j2ll0s!=i*}x8@^Fo5D;3exE5z- zUu2mVV2OFScz{V`ej#{Bnb@W29^qK!fomyL2*pka2!b2{A&P)4*EYCza&oxTk3CJf z-09*8%yCiXMGXWe5oUDyOz3_0H}!+ZHLjWw@#MeEn&@_%(2!faYv|%cMq|%zrQjUR zYwyL@9G(QW>XwoItV-^G&03Y z7}Q*3Z@~W)-Je+@&IahubC%_ZExt%gQ7~x)$Faat*K0=9fuP4OOz(!0f2?IjKM)4t z#yr>Dg&rOy3!s4l;txl;>~0K=Hf(JbJl{K$1j5I+GE43K`p}0dE3)t)>v-}5ia&J% z*lNCAYOB1t;(wk-63}!vDq7?Icjm!UffndUA${GVPf~<&kR_R`YBgl!kWe3o%y`p6 z12G%_m+=Ne;Gdgt@$6uk6)Zqhwh-+u+`dsFTU?PR3UR>&$A1lvM9;N;onXyHlu=!q ze^q^!;c`Fjki`Rc-D_+0?nqj8CQO>#j3u0@8lsG!ij~StOw}gqOqy}YUXN;srjX*I zEM?LCw*W%ne*_Q?bP)b)b|OOg@_w5IscWmh6rB?a9EHzT_)Zk(4921|BybQ2t? z0IDvnH^QDpN~>lrjo)FMQ0JJK2X|sFnIRG|37~4mx5?+2X5nDHrexB#a_SSh@s-4? z|CBSgi`yG%cDyMbE>fDYDZMMc>yzoA*FWo4s+Sl@{VK6IsClnrkSop5&N-}y&XebO z4w)__&TxkD2kE%XN7o=)?e}UQVWoQj^v7|w5BWLwj%aFzaH#4=9FB>-6)m&8GiTZ! z)W(K^Fo|FIQ5B13;Huggak);3H{f-gl(5sIK6l{HvL3B9sZVj6CSBNl&Pai>%q2G7 z@!C2XDpjN`Ec89Uts=$H#B-YJ?(`vIEOK8;hc5_{dH0ykIn%7H-o_1>YbaHUYHg?d z@fyL&EFB-UigaM=hb2Nr(^dSxzu6(C85b&m6359h7x@aTCGqj#&W_^GC0m;EAWcjg z3GY2HFbLw|%gA%U|J)j`-%PVdOT4CuF1TK`ob@3Ibu;=`%78plU5EJDji%j`)HB@$ zbK9l;b$-b4*qg;`O653Jh{U~n$Z5CDS{CPakRO8?O5YdfT8|`g&Z*jb(UI5#67&bb zH}#mn2r6F+p1g-XxFZmemFB=Y5By>A%XVla4;6i(F%Oa-_9_8E(j44C>ddcYL~vic zpb}sBkRGSrymb_XG(9t)8c?KlMM!(TzQZ4@zU|IR*30Bxl`3Emd2u>7b@z1ua+8o0 z>htGTYP{vvzDxvxEG6GbBhk}Jj;i-Yv<&F&h3Yl1xKog|!qR5i_%oU=pPluCTDbOqp+C$vo^y=R(eBZ`&&wkwFl9`*`Vp zPdbP&$um8_CjN${H}t;A8v}CYt%ySzwb~%%as6cZ9z#bVD;wykvarTHRF;mYC(VKO zkp9QKiN7W0b3OE2TFXzm+BKu|(R$Jqu5$}VBO+69`p-TZ4UNd`&(z53UwUIb>f->KhB11)4oOeF2gH9u(aRK*r-fbYVzCDK6t^2Pv0} zd~QRlMg&-O_rqeQ@P#_CizFn#K^yw;Y2N+0db6@K@y}aDJ=#d`gkAlii&SRxcb1QG zZ|Y-ceObF%6ZU(eWjb#^UEmKY`a2yDi>*2Agn_Ga?To`@QFWS+UwbQk~dh6e|<)@rg3+-m|(l7HsJ{ZMFo#A9b|LJ zhbn}7cpA{$Dg9>{`q%e=5rEcHK9q8sVxNk*9G?1+iD zVsJgEGaZuQatA+E4ud;wM4!oEhjCyA@#|p+J*aA)JwGV1XM|hZulVCM#5*YLD90A~ z%%TapG9FI^AQwX0g-w3{@nJo`zClg4tOFIEa=}`&Cni3~F6BVZ76o7zXh=NAYtcof z>qKKVsy{#;K&Yb4i3p-2<>a*pomNMTi0iHEW&+fZg>*8*1Whmj!upxd=hbP|JhJ3+ zCwSz0Z8xTdrkIT}0rCchhkp|U5MvWtztte3LW=+N^u!DnW!a9K%2)4&Fp68qbQ-wq zkbUt{F5ntcn-_SI=F7xxEp9me8Tl#u8AkE~VS=dZrK3nnj}!8=X?v?TA-x5ayEe~T zb8EBIB;uV3Ip12huA{nzxy=;W=SM_@IN2VfZ+`^7CszAqjU$)sxv-M$3YO2cys?ul zZhD4Z`CLEsXZ2fouZ>??5b!R452qt|{V1mL`EzhiirC7i1>Dr(ZLm!NT67+G<}DLP z%dftx+bbB>a$hgKJqBVb{S z*a=Za-6!0LUws<>vCZv28vJG>!I~%ukaquFl8(NJI55h)wrWOz6A?3$ zlvHmgo0f83bI&~N{n>?H0k^B}s@4p4Pa)SAB4uQxG2O7Iez$sRp+s%<>#N`RxKotFk{U<$Jl1 z==2kZ8AX%vnC$AE77*Sm&GSAZX6?tCW>}banz zac+b>W=_QTK)2>hdb6e>jiq4sZgVn=q;`CDB`3yya9O1LU7P3C;<4?|q%UxEQH5?m z+#W&ZdYQ<&1J)Czg!lU10zl7uXxHJSuLh4&&TsTNenZR-E zeTT6+Ii$4UvSMmYWl3dTPuOpmuQBf7#|1=&-;+@`WJQD{xVZ4nv@gu2#?0Q_91|HC znY-CVGpJ-$osdp}b+zNSbY}kDPRWPUd<}QYExuS28sP;yD>4wNfWyADm=JG~qn58_ zg)h(3?aCQy@Rj5TS+>#9(Yis5=q{t`L~=Ypgm$B-lFev&_hpL)|K@gvN}J2Kuj1=# ziWdHaq`?e?tBd<&uEP7hDe|$u3k|n>Y*}Im|iF z&D_4f&+yj!wV9Uc^M#g|-D*r}d4meiZOhyNDe$9|+fj7k3o=#`)((8wQlK1{!fBO& zAsfn$@Bx)8-Nt$GrZj6~NMFbT3x7psiPU8ey)*1`FSAvdp`c^z$zTSf_Cl^SI_scq zvERP#!#ybe#JlgbEM0^fuBX6)Ws258ADBevLi$^T{-iA1OwQjvXn&Q4R${@19ZYwU zO3s?}Pt}v5aagqVr|*ujgnZ%Z&3I@MCQ6pBl=#y8;>z%}bkc4!HBK}0R@X_Dto-LO zs0|{1c4A9zXl+Vk)DuW^ey8Bnvhhu0L|Pzwmc8nszkTSH!!T3%HG5S8mcOFT*V7x4 zdL$es#@7E?|FO2{`cjUtPKA7#daCf;fKB^pN7(1f{X{P#XgV8CfL$J7Q@%uNivcwAX-)OgAqzAQ>Jcz)S)%i#$R7OO*6(^u2}qNu!6 z5|{wvlZQZsy;qNz`V@UT*xz@!Up!fz0{-MxhQnI&m#?>u-ZfMp&4Db?^MOnSa=(oG z#{`5qqYQ7SG%6CVq1yqCcK8l zC_&Z?-*bMW$4I>0<}~mTkpYf1^vE1)WmCV`i`K1_5rN8&phlUOO<$gg3hhUWU|%ct z@tWG+>S-Lj4+Ssri4pHK2r;`XGJh1jq%U&RCBqcZ7d_#OC#@iGALjdMA06)YD>;KL zteav)-bPmzW#<7Kvf?X|%K2t|Y%nO$SXhXkAGsc3+9^eJL%izLD^cBUM1Ex!FL3Y9 zO0jwsvGZ+hyyk%VEZQyy6x(gW>JdI=YEe{L9=Exjg6?uyJ!xZl7r@}rxXn{zBtihby8MqCTef?k~FH&qNC+ z2gq1yoJH=G9aW7{l9}pOiBkj1O7lwD?@cM_6r%IuFT(7Tfod!s(avr>iM_Jds69nw zVtboP+i%W3SH}3^b_y7R3DIt)SW_RXhn32ItgoWg9<) z$l9?a-&i(qu#lmT7>Ncey0moa%jWJheAwCa0QcH=7Z5sR)X&?-R$44JICa93>7?&k zz4d*v&}bfwEKR@g6ykeR5OrrvyC18NZME4HTf-Ss2Da)luZ!Q=+0scJ*Ap>l1&lr| zdkMbz((+{~RHh>0yf`Hq$A1fF@7W%KUj%wVYvMseQ!poUktz!}>5sD6( zIoOG#3<=)HJ|>HI5iTc+G40v2$zYtdlT@DL&!;8U*LP+(2P42Q_g{!(a-Y#DLyQP< z&F$@5Mn@*ll6P%dyP;IRw$0MzxTLNX2bzv$t4-6*I`)yc4D|@u`7QMMPievoNYINgzrLP;kT|n|zYN89;%bhu98qNwT z%5aVr1x@9Sy<-s{mKy71L6?QCANpaZ3AzC*OimvK2)hoSMsJWWNff`=T%r?6Z9w`R z8>f!khcr=Uhqh8!F@lvr*f_6s6(jHk1D6W?dGR)`kNys9I~cgQnBTn&rVmfql~tY z9s@(R9;4)7Lg|1}cSvi+A>VoEt&Z&l&IMJ*8FtgVs&kvfS01)HYA)uP$RJQk3?3iE zIY))Oc8A9D4Te;KSXsGy<4UHbaj+kona*nFDxN29>(L!OzqYxTo}!JNb8-`(z~$!H zV{==VBHIFqqe5TFWo=c>2RBD`Dl?Sf2!Mhs&(kl`+izpd5Ye>=+my~2E|>iTQ?m91 zx|H5`pQI%v%N1M-PKIk+Jv!XP61uHuG$hRp>W=b4WTp&i?Fwl>D(;Te|4A~kez@Th z*fmp&`x6vitM>AC3b7LzZl;BoJpIX6!hIc9m|Qw&=Db!c7G2lIbL_yd>pMgv!NjR; zy{M^3U`2vN!UOjTp-E+id-f)Fx>(yy_ zqIeuv!#uLEBoAW6Nl{7JeyC~RKG)_`bQJOpd7VaTruO0=ozR$FwX6uuNGU>Q%rUw$%VV1 zL4=ihJ-<{Qp1ta4^T=v8f5(c7@psQ+*elp|uOkiBt(A$+;_$<@zoaPwAd8ySKbJe- z=1bmZ({A>5FZR#^T z8Dxl)prfZrk5dGMvcmwzIjRHH;u_kGLsfK68RLdk(jK#HGL?Uj*MlkJ5 zH}90oe(A&0nlqz0LReIO^>h5`DrPy2&^YJ+<|nHEVSw`!uY$LuXcG5=qKLAX(x~)> zSj^QJ+XLIu$umT8>-ZCTjH~I4@vg2U}aHY8oIB4npN$~@DJjP z8tL4kj|El~psZ&3BGx3ufm2#SoSr2;xl9vpUEA*Jv zou^=te69AGfruhYM2UHSig~m_aP3YjS92II&Bz$CKC?(1&Z;D5y}lVA25=;0+^f?} z`_7Vm0_MNBib3ESt`H#l)I@>-A!nmf5{Q)JiJ{0@1@{Kwn z52v_W>~!t8ODsjr>!x6eM!@-npx;UC=YQiXew#ounB0(CWWghfu5wV0Hu$+}khBs{ z)j8#R^INeEHaWsFMl0qp9!k}@)dn5q%oS&8Lyx@i>x+G^Z1)L_Y>{7oFN(eTG8lz< z{hbPsnv*&%Hnq(BmRdOXN4iA?Xi-DmV@A(SeTqq>#5OAyTv@Ynkp$8k;dMC&7VVWD z;*YBOz6_!Uy^yxBkf|Ga^$urydE;IH>iN`l>isnKa+m)Zw)Whb{yf{!@?0ReU9VBw zD5a!A0o|q!s}!YNJsB%EW0v)~hZ%4~36Y=7=gyJz$w^Hr;e2w8*(OykRmMcHVfIw{ z^TClWw`h7o-51%3hhd~Qq^A?_9{WaD{hRxFp69VIhK7a|>jgE&D(-z`=A<0bCp~Wa z+KVew7MozwJ<5ym=Q_S0b_5Pe`4Vn+t{-@;7av}Y!?Xq6xI51*{l%SxvyN;HIBjgGpgix^aJLhrvp0+MDj~b4$(0Qa8kKQt|AQ7%SQwwGVtAe|;?jE-64uy;Xws z9CSLa1!RsjDPOsnN5wtABLif)F&>a}ot0m|TvHRa8szPcXTF@_0IXyYSDzWa^}iDs zLw?^#RYPq)5R@IDec2PDv=0>n{`}z}rcu5HDeN0p7anmn?e3ZL*vrWBXLmE!x@Shz z_=0i74SN(#_{gbITO}@6_L^QJ`^Uz&@&X63t#E(oq_sPk->1RZvf;hZUEof%Pmd20 zkGS%UyO!gg6hK>lc+$)3!L0CJ$HmRPDN&zQUc{T;{99MNbGf>oGVJn;TT@z@?J>>H z;5b9*rVZRi{U?9TOH3u50{K#^gkhuWdeh%NJxUENfPG2DL-uUDl)q3d&Pf9T*=IxB z6dl4i`1QTRdE`)+t3#~rR7$hiv?p<;b=TFcyqA*>VRmp;lY(kX%x`~VJLRp$OeGRa z{*0lyg#}S>@2l`2jc~gq8?T{=6`5K%ruXDdyT!SCyEp94^+rpy?w5l-ovVF9-NjxW z>Le-hpn)uLOeB~E)NtwKGCg~d*(Ci3AeG?V^VH2g788z+XI`FaJ)Ol}co&(D_G~wt6AC5TPl|a)1Qxn^;fw;Wei&oY|sUu(C9SYt4`#= z;EqkYbm}x2ZPQLp<^JG%GSGZ)(xoH1HdEX@oFA~_o3d)8D3sLd4)O9R-=ey2Xsj*Y z=ow6_bJ!3T<$O4`RWTf9Az6N#<8q&DQ~z=8TsmPtVC~IM1rbAI4lLi6%&2KeqA(w+ zA}nh^;e4};%}3F!F;sm-|E1E>dj9e_HRGXnjV6Ds-35i-!Tg7Qx<3stkjj;)3y|M$ zKX6wk>}loYqlmr~H*02dx0%Jxqsu+gCHpzQ49u&E?;%)S%YA7ucjH+bT^#)4XP6_) zaebRfxwLNqrPYVgjAO$C3U815&(d<@KY!zn3JpD%@c7BF8d(HX6?k&{)Qdmd`dFYuAmZh6=y(QcssP z9h9+du)6&{b95510@GA1UTHZTz$>x@V7rfR6yx;enrVJbpK}IWvj$KB8_<4tC{roh zYBg&EKgFlRvFcDlmT5A?c8Yw-Ka(`}QVVyj&iS|eYzLRQ+5lAxghSkghMnJ6KaHq{ zFepic$TSohWdfLk3Z0y4>N6x?#Qc^ny!ukUp=SDnt-P;KSPHNV$pf}pc^;vXD|U)L z_F4hu;qhFA!?+1vtB1TlDc(Apx}d-8e2-1viopVuYZoFUc#jTzz_N>Pz_QuqfPTY> zQ|ckaYoK1~_#k`vkMF1IW?v^_f~JBtoq=fIN64s-RVZh)6XQRt1GuI0ou;!~NL#(L z_EG)}j=j|qY`DZJB<&4(5%)5oL6mZyR`8EPBGQ4~bNuf-6zXcZV8KXQw=cB=Ro6>- z!wY(`7?@j239v@_19$I~Z_Ep2;AcPGHmTn!>R*3aH?E}^VeSS% zKu8NewgwNY#Rj~ux8$}|XztR;&l^F|JS@Q~AKl924V&V2d`G&bU$zn1g@Ew6yxX$2 zm~}x2s-j{qf5I}PgFS>{v{TfEWB%mQeR7&yoFVHkz`&e}Z;gOZMfo~^KbWn*LpZhh zY{uc)%i}APgM9&j`=lbRj$0t2);%)lYa*K49mIt6{Gc;pWid_0Y5$mCO!RxQ|Km(Z z_&k$gk~|;{Zk1vM`U7EN4cT{_zhKpU5LV1Y-F_ZFMdSS<{w8J ztE{ewdcT9@+cT;d3`C(`#vbp@aW*C8xATbMz#pVG&KGVzApHfWJ(Iqfpr)}GlBSXj z7~wqFlfo?hwnRA*ktjAKoE!U8&pO5&S?!4dyHlehKXpUt?<6t}wA@?2?ZfSram3LK zy7faJO^e-XY%APQY>o+!mX?S0U7#+6c?(MD=C6NIz;7cVD*j6xB)I&|ie&y8E-ime zWctUXS0DwqqaYwq(+~Xv$pHZWMl%s)BmM#0evnW6r_ezSei7iG?|A>e{`3hO*iDR? TNa?2b_dMi2Doa&J7zO<=Z%oi; literal 281309 zcmZsiV{m3o)UM-XV&{o%Clfo9iEZnNXJXs7?TKyMwr$%!dC&Rt)v532-d){Qy?ggs zci-#kP-8y`(ITGc#)y@>zDFu`l&6$D!~*1tM70N{-q zP~a4sPySU`9Z&67jqaDuSMJW%)AHz$WQso`hkgES5_yt##B|2$if@TC{n2G;IbwY>b zW$ExzIAF)=W)!&3svL$$k0`gU(I=<-TZ2(ZJ#t3E8huK?=kJthaA!_VVCcxB7vvh{ z7ZU{&X-MBEU+pBNKW6Ya6SL5qZu@%vS`U8RXjWBJWHH<~1MD1H>@9LY`mlBVVs`|S z6Bcf?`m9n)p1fFV`m(o)LP2R%Q>t!wcDIzL`4hT)RaS!M?4;T*Qon0J* zRS@khV1x^HPX<`4{bjR9{R~FxcO_eb{uy5_%KODuIf&L>vh(>9?4GpKl(eQU52bzx zU|KEL|4{aA>dcj=x~^TU0g3^Jei3Q1<{}o8ZV9Th_JvB*fHTtRt^_g}${1-_mOj1G zAVJc=++C*aAmUc`{K+n>8%y!$;7w!pSBth^cL}9xDi)vTFvAga$jHnD2Ll6n#U*N8 z=ewpMCL`1Cj zqq2JweynDra9RAvZuIN7_$)_UK~7H25S||I=3vgu#6>(T99-bCnw{z5HeyIbMC+FQ zrU;6)tZfdn*BBkcLbK0ZL6)aI`_ILZa{pG;Y^iLLsRw!4prmX$57H5G?-BLL91amx zW4sfP!tlS^!Si+2gOBy-^>AHpz#4g2NUlzY5OoBa0!z~nX%}%hkmw=b&EDF3!Z^F9 zApVGZn`f@^U#^qNYyd(o>W4fzAb`t*<>@&z!xUvAAVD7CbcKVn06vJWi;{dstk813 znfbYBvbhGxpo^aGhw)aaTMQ(sD=h7?-;*#>Sni-$k2g9*Zy*MGWpn1pnC9?{;lk19 zaL;F2qYpCDad$}BkJgUwY30I5@^6l}@yWRBTD24|2lvNjH7VEa(A3hxT~v3z+QjGgXbueg{rB%*^57Rp0XzHpA;Yp7|Br*U zDa?uVYxQ4H`b*W@jS7dI!;BG&-7NS#dFM`Q-UhT*vb|-@ z_fxW^(chg7v|+V5K>e^7){XooY7Q@fOQ1lws>ehQWe!lY!Q$a&Zzv%F9eWvBUQJC! zh3)6h_tyv7pJtowFTUW=&5tITs}Hv6?AEJPf9#bqMbob0J7u%Fgj9o^-bQ;Q$;U?R zI&mG~IZ(Wg{Dd)uLBnnnrbh0Qpjc3yK_U;$pw*PubKudgn<-M-+qLxP>2Z-Z;k>}I7;(^pi^ z_WIdY*GSiW#yO~44j9HXURl^|Rf$WXsCN^cVgvRU2chL6f$qp#)xNR0ndl&|?t%H; zuZ`?hZWhZpm>nIHSfMY6pO?8z>$azzHJJIQx?SCCDrk^IHC6q|tuGX9R|i3tgtA7z z1s?>m7{e*=Nm)vC@=(22Q?9FK&rq9iegydkcq&~TThL5wJh^PGZ;AwYgCb0D#02dC zrinb~^#7N1A?1H7qpJG@zUzI|Dz`}1=nZQ+$0I+NBso)Wo(T^eC&ijb(*mK$G`ip8 z?*2rHB;Fg5m6eqQ%|LvRW3CBO?nG--DF)(5(Pke;OpAop_xtr?MPjIQ|K7S3Y#_;? zdw zuq3@tgVj#1SQSXWxP2P@l|5z$^53OI?w#5$G8M|hP+jOiV2%5fGJTPo_%eqIo|4gt zM@k|N0inHrs>{5^{YjRc!__tCXX==lxq9x@sO}WQovd9e$gI>ciycV^h^k`0Zm*lW zlljTc-sy6pv)b@$)>P-AlJdskl}=>Q`G%wCrtni~V8S$)r^RFyp>fx}Usm^1D|{%S z1knv@wDn_J?W}vquV~z->sWL2G@a`uK$y7+Y{i&>>5s|-z36op8-K=i#r<;Smjm-piRXjvdruz4n zm}X|8vxgU9xb?w(XWT_8Ds$jr`^0|fv4vOrsqKQ_w~6j1LM1AA8JKKMWxWcf*Df7p zbx8kM9#_FKcjykX?K@!B2u{)0g^+>YQ5}NkM8F9woB>-)XO=urj3^LO9zR3~Iv6`9Q3=c5RAynj)ILAzeMBt_R zAf}^~_)T_&I}F$=X68+=ebmx#X0rJr8bWIA7>x|6aGd zw`Z{K)|(2GJu^}QNq@hyqqd_ZJ~Q;kDc^Zu;CD^aHK-QeF~~nAA$ywY8f@GLv!KL{ zB>Q+3p9$m6gKdcM2EIoqt~z(!=Bcd1uiyI^=znMwniF>)T?R?)aej+?-NY(x2@Z9~ zEHNXet32(g+3wC14fa$Ac3LEsAKc@2kTTqofCM&Tevs^CbrZUq#?k`_!JQMcm#KP{ zt1t4ixSvE$)ze4G_xCZkQM|pqUDm~_R$1*2%N0!O5l6)fbgk5m+8jG<>#Cy2I!Lj% zJjO(%$J$_*5-8H_&?rj2Stx8lyUUjX&`4*6HjJjGdWOnLqrD#wr_@; zVR%{eP4@$r)FB9Z5C49N{05T3e70f1_4Oz4g3$2N@G-Tp)lbnIHn+BdJwsQ;Kh``m z_)|nKKk;Y$(bVQFQZsi_cwxK##a%#wG*7md-VpC5`KVE(A%*d*#ui&uqYl#ow62&s z!e(E~no>Yjj%o#qp73GtuoMwt=>%o-I;5e-84qV8655Ip&wx5ehn9Czk6#sh*PTT_*lAJtbv3);HXGeY3~g z;K#^32lEes(`DuuuE#KePjq(6fDVTp?x4I4-5AfoB@UcrEhE1YX)rMGvz$pkJ%Dp` zXHvH>Qryi%RegjicO11OUEbz9V706vr{hkR{326-d;|VPes{6qeL{^H4=#t7L8xB0h3x|v z(@DTM^CeuR_7-3HsqQfVN7x`|xqgS}NLvNmMY(c|gunxWfo_*orZ)xdE9MYQ@o))=m1q;&rm>9|(%77hsu3spE<`$Q3tlKlrYDEI_;2rQKs)P9+OoP)9!@{QwF`0f6b$Ys%P@1=iPwY>YgIe=#xkO+-)(=102X#Fuy2pn)ctXcvQoNJ zXMNs5q6|Y;*6Voc&1UnBnYPeV?L8Lgkd~PwUaWwlV1{new+}Rm4B`H?LxBDtpKA$T zr^zoDGob~C&csNY5D!v{o|^KN3(gx2_fCQg>y#caP4w+Wy4TRiA`+LR^EqAg8MGdt{Sdl+uxVG72V!vtZZhG0vL<#_wp$+g_2W)o+1R?=$4o zg{y9=-6g{0Fu2L%)!mmdhY9GyJk#QF%GxUr7W5n;Z#HLn@{`bL4R(D&2wtmI!+xRb zMpBn4#~DAeIV({p+kxu*&VcRTvDs$ue3&-)uR*#}j7EuObb$d^Ampd>9jIn&d#p#Y zz`#^4aGOB{yN!h^%%1jGUzpdF>S_jSvV0SjV2B1Y?nY)i+>jCa+_b*j4tmrH{@T4W zUXGpJf4@72U^%4Zb2Nv@5)R~`4}oeztWQ4sXtrkey68vaff)NOaA#WA2W!V!mW0Gg zMXiHNNx=J)Ze5V2^F~8IP=L*rMFDIC;sG=n+WAMPwu%cr_zGVEE5VD*(qf{-QM@m> zDKAC%1{#hP3&@3}@?m0rGJA^dNi&m(ji|y8Y7<>%Meci6Ih93{rh7;Ze6Q>Fp@O1l z$PsOVG?mwSK*5+2uK&ABj$}J$8Pit6bgCUo9wPY;e>-il)>sO<>JP0L?3>AMUM^^-sj^o+DRr8K)Hb$S zK^4bjr=z9E&M;ET*bt2NAO!L&l-<;t|F8&ej@dcgDyoJAoRIBI!@cE2ND?%~BZ7pd zfXHb>0%cbS%H0h{tO^@7-85jaOw%*x6V_sZ8To)_ zu2us1H@U%+Yr}I7S>E)Y%YzutcePPsPo=38fCcR8LXJl~VLKI5rNe$}nyd#E`G1*9 zbbg$<@3lp{#XJ6rfl})H<*)wzMk<;bA@{%+U#=0#7>7f-RC2@*LAbdSJz$s*>1MUj z2d6XZgG1Wqhq;^jR9t%-_Nc5tY7S}JxkI*Z8xPdGDes-{Hki zjZ&*=c3GHTF=r&SlkUrDL`l|y(sulBUy}G~=;{%-?h@Q(y}0NX5={-D#VSLBU_jxG zU`t$gW9o=G!iu<>tz#o|xT*WU=ozckwx6pDB9QveDh}`v&hSQdrcsyRmnyoc4UfoE z9JAw&jX)xn$8`M=JwJ40qCDWgn4mq}PGqO{1QVQQ>wnn+RbYe&?+=~1tm-VNS>ZLZ zKURiJBfuy1u5dZhPZ*V z-SRRPp|xF^-z*rw`2EE8BKJ4+o88(@Vh>ArJ`6v(YV2{*&58?y%@zW6^uK=%3az~z z+YO1()@8msya>ELGc01EHBULyNMVR(S|RO2KI#P9BDnR_=cB#jNpyr2T?P)G8}}^i z-rm?PDs_{rx_N@{IPDfS>NuQ#L2iV2)fgCAV1%l$* zT`uQD(OzN{Dn&Ew3l#q%A@h(TXQ39;^z3_ur&=DSb`602-j-up3vDC8dWLjaqyg`K zipJ!A1}P2wg>$j_&|%!rGCFS*q@?k}Lf>KF=T8yM{)lgXkg9oF<-wm!*7Thulm{yJ zbaGlo;9hB4d9%UyI11G9Dt)0S%PPyPbDZ+?IM1v1oGN1{D3Gpm5A1onc6SxMsj%Zv z7sakY78gBlY_>3`rUH(?o9IM~+z(9r|gCh3%aH(sB^yF2EdQlctA} zIt(bd@dj0tWWt3z-5~UULH;OX8If2L$?|*O*~EY+pF%`c5!BcE^oD2=wKXj)uSw+e zW)KbLX(IWm7P3GZKzB-t8A>|N(XbB)E@-;6#1%18X;YWxBXs`zGZv${ z(M$X*a0wa>!LXD(TBDN7!5!mz3n{$MSCNc&dpG~>42sn5;{P@xnw)L3S*uRDQZxKF}Vk54822Yq?Vs8^bG58>C*Ovj6OMtORZ-WrbWkc zqP0&F^{}t0(yf#~95Siou)U7Jmpg@^2;psU_uTjJhM2jiH?U=~=bUu25x=QMWwpJuUq=^1tvISw~}B@R_qLZOv>yJ zE-X9eu-QqUkP`jTW8*T45~}(bQJI4@5C@fuUw9ks%whuP*}Jv&J#!Z`sqw)Hgx#=z z7us#6&vh-KZf16$?7RSBde7E=mT7n&0ZYX5CmN2BB1A&q800W^D4O*S=jg+V5{Dj~ znIK)h!iiPMoVBoZh>q{@P_nA<98pi~5FE4;=;md!FMIr)P;%0-pngRAQwIv%AtX>E z2*4j5%-~xdCo@fY+SlB*e>4b|9gMqbM`*YAe1YJrcx2U#^{_wK<~|pAlvr(^x=fX^ z4s_&u*Uw^>PHotJ?eTY9LP#7N!e7*fUOaH~qy(G~p&4Hw2?bV$>VkEn8l;}ZNcc}s zs7U=36xpIWsXIE**W*(p?4=OYpcP#F-0p!*P8j;(Q8;{RsYs{lR`qyLRw*ngiA|kq zt};Pxnhr=|*6dg@Y_GCT80mt*zM#?w6N z=F6h4m*m_c73V#bdtrz8RF6qHkGG4?i)X>zX1|Z?vUJOq(jA0puYOX6@jDNnq*^*F zuA-IDk9!#ttCwn+;Zp{BhbIweIXs&)v@0ZPG2r+_xjlmmM{tKX&K!3oL#K-FOjt;O zC%6#xd~_h>^|TjtZn3e~!aQsL9JZ^#uXgRDESFaI*V~OcEP76PgK~HWc=%i(4VD^| z!+6m@GE!i<-$mEO!$v`&6ycnI)w)1_Q_EQz%q2tjyMxPyxK*W0&2st|seeS_7D;Df z?mh$vn<+rUtLtGz0c;{74G=Xe1PdFsS^}4`1j7xL;Ao$NW4D>Cgd1t}*+NAnr9yU? zUP#wkjMd}EJ~_Fzzol(V%4b&f432Ej6eJ`WB9vl<2ELEC5EV$*2b;5!u#S*2 zZwa z&2#|2ox)eK6l+v(jHX4>EcQohqj!)W(k+^6Fn5tE?j1$e@mi-6`V;~syJDD1^-Ea~ z@I7o3<$UaCl$kv=G>KtW#cU!fDDGshYMfBt`VYFX$H=VV7O`G+#g2HsKC#?L%xs>V z#sm`9@f%S}aHImYDq$Kc+9+Xl()c2jqjQgBFE&3lP;*Ast3s9h{w0mvCF3SBH9GdN1dq@`^{!*fI&QR*Dr2?ab-W+bmw*^c-